index
int64
0
0
repo_id
stringlengths
48
65
file_path
stringlengths
62
122
content
stringlengths
27
3.15M
__index_level_0__
int64
0
10k
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/layoutxlm.mdx-b857cc95.js
import{S as Bs,i as Hs,s as Gs,e as r,k as d,w as y,t as n,L as Ys,c as s,d as o,m as c,a as i,x as v,h as a,b as l,J as e,g as m,y as L,K as Js,q as b,o as x,B as w}from"../../chunks/vendor-b1433968.js";import{D as B}from"../../chunks/Docstring-ff504c58.js";import{C as Vs}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Jt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Zs(Eo){let $,$e,q,E,ct,ae,Fo,ut,$o,Zt,U,H,mt,re,Po,pt,No,Kt,P,Ao,se,Do,Io,ie,So,Oo,Qt,Pe,Co,eo,Ne,ht,Uo,to,Ae,Ro,oo,le,no,N,jo,De,Wo,Vo,Ie,Bo,Ho,ao,de,ro,z,Go,Se,Yo,Jo,Oe,Zo,Ko,Ce,Qo,en,Ue,tn,on,so,G,nn,Re,an,rn,io,A,sn,ce,ln,dn,ue,cn,un,lo,R,Y,ft,me,mn,_t,pn,co,f,pe,hn,F,fn,je,_n,gn,We,kn,yn,he,vn,Ln,bn,fe,xn,Ve,wn,Tn,zn,j,Mn,gt,Xn,qn,kt,En,Fn,$n,J,_e,Pn,yt,Nn,An,D,ge,Dn,vt,In,Sn,ke,Be,On,Lt,Cn,Un,He,Rn,bt,jn,Wn,Z,ye,Vn,ve,Bn,xt,Hn,Gn,Yn,K,Le,Jn,wt,Zn,Kn,Tt,uo,W,Q,zt,be,Qn,Mt,ea,mo,M,xe,ta,X,oa,Xt,na,aa,Ge,ra,sa,Ye,ia,la,we,da,ca,ua,Te,ma,Je,pa,ha,fa,ee,ze,_a,qt,ga,po,V,te,Et,Me,ka,Ft,ya,ho,T,Xe,va,$t,La,ba,Ze,Ke,xa,wa,Ta,_,za,Qe,Ma,Xa,et,qa,Ea,tt,Fa,$a,Pt,Pa,Na,Nt,Aa,Da,At,Ia,Sa,Dt,Oa,Ca,It,Ua,Ra,St,ja,Wa,Va,I,qe,Ba,p,Ha,Ot,Ga,Ya,Ee,Ct,Ja,Za,Ka,ot,Qa,er,Ut,tr,or,Rt,nr,ar,Fe,jt,rr,sr,ir,Wt,lr,dr,nt,cr,ur,Vt,mr,pr,Bt,hr,fr,Ht,_r,gr,Gt,kr,yr,vr,Yt,Lr,fo;return ae=new Jt({}),re=new Jt({}),le=new Vs({props:{code:`from transformers import LayoutLMv2Model model = LayoutLMv2Model.from_pretrained('microsoft/layoutxlm-base'),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Model model = LayoutLMv2Model.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutxlm-base&#x27;</span>)`}}),de=new Vs({props:{code:`from transformers import LayoutXLMTokenizer tokenizer = LayoutXLMTokenizer.from_pretrained('microsoft/layoutxlm-base'),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutXLMTokenizer tokenizer = LayoutXLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutxlm-base&#x27;</span>)`}}),me=new Jt({}),pe=new B({props:{name:"class transformers.LayoutXLMTokenizer",anchor:"transformers.LayoutXLMTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"cls_token_box",val:" = [0, 0, 0, 0]"},{name:"sep_token_box",val:" = [1000, 1000, 1000, 1000]"},{name:"pad_token_box",val:" = [0, 0, 0, 0]"},{name:"pad_token_label",val:" = -100"},{name:"only_label_first_subword",val:" = True"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm.py#L48",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.LayoutXLMTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.LayoutXLMTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.LayoutXLMTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.LayoutXLMTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.LayoutXLMTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.LayoutXLMTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.LayoutXLMTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.LayoutXLMTokenizer.cls_token_box",description:`<strong>cls_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [CLS] token.`,name:"cls_token_box"},{anchor:"transformers.LayoutXLMTokenizer.sep_token_box",description:`<strong>sep_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[1000, 1000, 1000, 1000]</code>) &#x2014; The bounding box to use for the special [SEP] token.`,name:"sep_token_box"},{anchor:"transformers.LayoutXLMTokenizer.pad_token_box",description:`<strong>pad_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [PAD] token.`,name:"pad_token_box"},{anchor:"transformers.LayoutXLMTokenizer.pad_token_label",description:`<strong>pad_token_label</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The label to use for padding tokens. Defaults to -100, which is the <code>ignore_index</code> of PyTorch&#x2019;s CrossEntropyLoss.`,name:"pad_token_label"},{anchor:"transformers.LayoutXLMTokenizer.only_label_first_subword",description:`<strong>only_label_first_subword</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to only label the first subword, in case word labels are provided.`,name:"only_label_first_subword"},{anchor:"transformers.LayoutXLMTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.LayoutXLMTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),_e=new B({props:{name:"__call__",anchor:"transformers.LayoutXLMTokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"boxes",val:": typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None"},{name:"word_labels",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm.py#L337",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words).`,name:"text"},{anchor:"transformers.LayoutXLMTokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string).`,name:"text_pair"},{anchor:"transformers.LayoutXLMTokenizer.__call__.boxes",description:`<strong>boxes</strong> (<code>List[List[int]]</code>, <code>List[List[List[int]]]</code>) &#x2014; Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.`,name:"boxes"},{anchor:"transformers.LayoutXLMTokenizer.__call__.word_labels",description:`<strong>word_labels</strong> (<code>List[int]</code>, <code>List[List[int]]</code>, <em>optional</em>) &#x2014; Word-level integer labels (for token classification tasks such as FUNSD, CORD).`,name:"word_labels"},{anchor:"transformers.LayoutXLMTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutXLMTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutXLMTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutXLMTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutXLMTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutXLMTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.LayoutXLMTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutXLMTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.LayoutXLMTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutXLMTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutXLMTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutXLMTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutXLMTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutXLMTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutXLMTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}]}}),ge=new B({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.LayoutXLMTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm.py#L213",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.LayoutXLMTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ye=new B({props:{name:"get_special_tokens_mask",anchor:"transformers.LayoutXLMTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm.py#L239",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.LayoutXLMTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.LayoutXLMTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Le=new B({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.LayoutXLMTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm.py#L267",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.LayoutXLMTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),be=new Jt({}),xe=new B({props:{name:"class transformers.LayoutXLMTokenizerFast",anchor:"transformers.LayoutXLMTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"cls_token_box",val:" = [0, 0, 0, 0]"},{name:"sep_token_box",val:" = [1000, 1000, 1000, 1000]"},{name:"pad_token_box",val:" = [0, 0, 0, 0]"},{name:"pad_token_label",val:" = -100"},{name:"only_label_first_subword",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py#L53",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.LayoutXLMTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.LayoutXLMTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.LayoutXLMTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.LayoutXLMTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.LayoutXLMTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.LayoutXLMTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.LayoutXLMTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.LayoutXLMTokenizerFast.cls_token_box",description:`<strong>cls_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [CLS] token.`,name:"cls_token_box"},{anchor:"transformers.LayoutXLMTokenizerFast.sep_token_box",description:`<strong>sep_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[1000, 1000, 1000, 1000]</code>) &#x2014; The bounding box to use for the special [SEP] token.`,name:"sep_token_box"},{anchor:"transformers.LayoutXLMTokenizerFast.pad_token_box",description:`<strong>pad_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [PAD] token.`,name:"pad_token_box"},{anchor:"transformers.LayoutXLMTokenizerFast.pad_token_label",description:`<strong>pad_token_label</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The label to use for padding tokens. Defaults to -100, which is the <code>ignore_index</code> of PyTorch&#x2019;s CrossEntropyLoss.`,name:"pad_token_label"},{anchor:"transformers.LayoutXLMTokenizerFast.only_label_first_subword",description:`<strong>only_label_first_subword</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to only label the first subword, in case word labels are provided.`,name:"only_label_first_subword"},{anchor:"transformers.LayoutXLMTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),ze=new B({props:{name:"__call__",anchor:"transformers.LayoutXLMTokenizerFast.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"boxes",val:": typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None"},{name:"word_labels",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py#L169",parametersDescription:[{anchor:"transformers.LayoutXLMTokenizerFast.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words).`,name:"text"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.text_pair",description:`<strong>text_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string).`,name:"text_pair"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.boxes",description:`<strong>boxes</strong> (<code>List[List[int]]</code>, <code>List[List[List[int]]]</code>) &#x2014; Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.`,name:"boxes"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.word_labels",description:`<strong>word_labels</strong> (<code>List[int]</code>, <code>List[List[int]]</code>, <em>optional</em>) &#x2014; Word-level integer labels (for token classification tasks such as FUNSD, CORD).`,name:"word_labels"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutXLMTokenizerFast.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}]}}),Me=new Jt({}),Xe=new B({props:{name:"class transformers.LayoutXLMProcessor",anchor:"transformers.LayoutXLMProcessor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/processing_layoutxlm.py#L28",parametersDescription:[{anchor:"transformers.LayoutXLMProcessor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>LayoutLMv2FeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.LayoutXLMProcessor.tokenizer",description:`<strong>tokenizer</strong> (<code>LayoutXLMTokenizer</code> or <code>LayoutXLMTokenizerFast</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer">LayoutXLMTokenizer</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizerFast">LayoutXLMTokenizerFast</a>. The tokenizer is a required input.`,name:"tokenizer"}]}}),qe=new B({props:{name:"__call__",anchor:"transformers.LayoutXLMProcessor.__call__",parameters:[{name:"images",val:""},{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"boxes",val:": typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None"},{name:"word_labels",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutxlm/processing_layoutxlm.py#L129"}}),{c(){$=r("meta"),$e=d(),q=r("h1"),E=r("a"),ct=r("span"),y(ae.$$.fragment),Fo=d(),ut=r("span"),$o=n("LayoutXLM"),Zt=d(),U=r("h2"),H=r("a"),mt=r("span"),y(re.$$.fragment),Po=d(),pt=r("span"),No=n("Overview"),Kt=d(),P=r("p"),Ao=n("LayoutXLM was proposed in "),se=r("a"),Do=n("LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding"),Io=n(` by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. It\u2019s a multilingual extension of the `),ie=r("a"),So=n("LayoutLMv2 model"),Oo=n(` trained on 53 languages.`),Qt=d(),Pe=r("p"),Co=n("The abstract from the paper is the following:"),eo=d(),Ne=r("p"),ht=r("em"),Uo=n(`Multimodal pre-training with text, layout, and image has achieved SOTA performance for visually-rich document understanding tasks recently, which demonstrates the great potential for joint learning across different modalities. In this paper, we present LayoutXLM, a multimodal pre-trained model for multilingual document understanding, which aims to bridge the language barriers for visually-rich document understanding. To accurately evaluate LayoutXLM, we also introduce a multilingual form understanding benchmark dataset named XFUN, which includes form understanding samples in 7 languages (Chinese, Japanese, Spanish, French, Italian, German, Portuguese), and key-value pairs are manually labeled for each language. Experiment results show that the LayoutXLM model has significantly outperformed the existing SOTA cross-lingual pre-trained models on the XFUN dataset.`),to=d(),Ae=r("p"),Ro=n("One can directly plug in the weights of LayoutXLM into a LayoutLMv2 model, like so:"),oo=d(),y(le.$$.fragment),no=d(),N=r("p"),jo=n(`Note that LayoutXLM has its own tokenizer, based on `),De=r("a"),Wo=n("LayoutXLMTokenizer"),Vo=n("/"),Ie=r("a"),Bo=n("LayoutXLMTokenizerFast"),Ho=n(`. You can initialize it as follows:`),ao=d(),y(de.$$.fragment),ro=d(),z=r("p"),Go=n("Similar to LayoutLMv2, you can use "),Se=r("a"),Yo=n("LayoutXLMProcessor"),Jo=n(` (which internally applies `),Oe=r("a"),Zo=n("LayoutLMv2FeatureExtractor"),Ko=n(` and `),Ce=r("a"),Qo=n("LayoutXLMTokenizer"),en=n("/"),Ue=r("a"),tn=n("LayoutXLMTokenizerFast"),on=n(` in sequence) to prepare all data for the model.`),so=d(),G=r("p"),nn=n("As LayoutXLM\u2019s architecture is equivalent to that of LayoutLMv2, one can refer to "),Re=r("a"),an=n("LayoutLMv2\u2019s documentation page"),rn=n(" for all tips, code examples and notebooks."),io=d(),A=r("p"),sn=n("This model was contributed by "),ce=r("a"),ln=n("nielsr"),dn=n(". The original code can be found "),ue=r("a"),cn=n("here"),un=n("."),lo=d(),R=r("h2"),Y=r("a"),ft=r("span"),y(me.$$.fragment),mn=d(),_t=r("span"),pn=n("LayoutXLMTokenizer"),co=d(),f=r("div"),y(pe.$$.fragment),hn=d(),F=r("p"),fn=n("Adapted from "),je=r("a"),_n=n("RobertaTokenizer"),gn=n(" and "),We=r("a"),kn=n("XLNetTokenizer"),yn=n(`. Based on `),he=r("a"),vn=n("SentencePiece"),Ln=n("."),bn=d(),fe=r("p"),xn=n("This tokenizer inherits from "),Ve=r("a"),wn=n("PreTrainedTokenizer"),Tn=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),zn=d(),j=r("p"),Mn=n(`Attributes: sp_model (`),gt=r("code"),Xn=n("SentencePieceProcessor"),qn=n(`): The `),kt=r("em"),En=n("SentencePiece"),Fn=n(" processor that is used for every conversion (string, tokens and IDs)."),$n=d(),J=r("div"),y(_e.$$.fragment),Pn=d(),yt=r("p"),Nn=n(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),An=d(),D=r("div"),y(ge.$$.fragment),Dn=d(),vt=r("p"),In=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:`),Sn=d(),ke=r("ul"),Be=r("li"),On=n("single sequence: "),Lt=r("code"),Cn=n("<s> X </s>"),Un=d(),He=r("li"),Rn=n("pair of sequences: "),bt=r("code"),jn=n("<s> A </s></s> B </s>"),Wn=d(),Z=r("div"),y(ye.$$.fragment),Vn=d(),ve=r("p"),Bn=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),xt=r("code"),Hn=n("prepare_for_model"),Gn=n(" method."),Yn=d(),K=r("div"),y(Le.$$.fragment),Jn=d(),wt=r("p"),Zn=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),Kn=d(),Tt=r("div"),uo=d(),W=r("h2"),Q=r("a"),zt=r("span"),y(be.$$.fragment),Qn=d(),Mt=r("span"),ea=n("LayoutXLMTokenizerFast"),mo=d(),M=r("div"),y(xe.$$.fragment),ta=d(),X=r("p"),oa=n("Construct a \u201Cfast\u201D LayoutXLM tokenizer (backed by HuggingFace\u2019s "),Xt=r("em"),na=n("tokenizers"),aa=n(` library). Adapted from `),Ge=r("a"),ra=n("RobertaTokenizer"),sa=n(" and "),Ye=r("a"),ia=n("XLNetTokenizer"),la=n(". Based on "),we=r("a"),da=n("BPE"),ca=n("."),ua=d(),Te=r("p"),ma=n("This tokenizer inherits from "),Je=r("a"),pa=n("PreTrainedTokenizerFast"),ha=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),fa=d(),ee=r("div"),y(ze.$$.fragment),_a=d(),qt=r("p"),ga=n(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),po=d(),V=r("h2"),te=r("a"),Et=r("span"),y(Me.$$.fragment),ka=d(),Ft=r("span"),ya=n("LayoutXLMProcessor"),ho=d(),T=r("div"),y(Xe.$$.fragment),va=d(),$t=r("p"),La=n(`Constructs a LayoutXLM processor which combines a LayoutXLM feature extractor and a LayoutXLM tokenizer into a single processor.`),ba=d(),Ze=r("p"),Ke=r("a"),xa=n("LayoutXLMProcessor"),wa=n(" offers all the functionalities you need to prepare data for the model."),Ta=d(),_=r("p"),za=n("It first uses "),Qe=r("a"),Ma=n("LayoutLMv2FeatureExtractor"),Xa=n(` to resize document images to a fixed size, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to `),et=r("a"),qa=n("LayoutXLMTokenizer"),Ea=n(" or "),tt=r("a"),Fa=n("LayoutXLMTokenizerFast"),$a=n(`, which turns the words and bounding boxes into token-level `),Pt=r("code"),Pa=n("input_ids"),Na=n(", "),Nt=r("code"),Aa=n("attention_mask"),Da=n(", "),At=r("code"),Ia=n("token_type_ids"),Sa=n(", "),Dt=r("code"),Oa=n("bbox"),Ca=n(`. Optionally, one can provide integer `),It=r("code"),Ua=n("word_labels"),Ra=n(", which are turned into token-level "),St=r("code"),ja=n("labels"),Wa=n(` for token classification tasks (such as FUNSD, CORD).`),Va=d(),I=r("div"),y(qe.$$.fragment),Ba=d(),p=r("p"),Ha=n("This method first forwards the "),Ot=r("code"),Ga=n("images"),Ya=n(` argument to `),Ee=r("a"),Ct=r("strong"),Ja=n("call"),Za=n("()"),Ka=n(". In case "),ot=r("a"),Qa=n("LayoutLMv2FeatureExtractor"),er=n(` was initialized with `),Ut=r("code"),tr=n("apply_ocr"),or=n(" set to "),Rt=r("code"),nr=n("True"),ar=n(`, it passes the obtained words and bounding boxes along with the additional arguments to `),Fe=r("a"),jt=r("strong"),rr=n("call"),sr=n("()"),ir=n(` and returns the output, together with resized `),Wt=r("code"),lr=n("images"),dr=n(". In case "),nt=r("a"),cr=n("LayoutLMv2FeatureExtractor"),ur=n(" was initialized with "),Vt=r("code"),mr=n("apply_ocr"),pr=n(` set to `),Bt=r("code"),hr=n("False"),fr=n(", it passes the words ("),Ht=r("code"),_r=n("text"),gr=n("/"),Gt=r("code"),kr=n("text_pair`) and `boxes` specified by the user along with the additional arguments to [__call__()](/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer.__call__) and returns the output, together with resized `images"),yr=n("."),vr=d(),Yt=r("p"),Lr=n("Please refer to the docstring of the above two methods for more information."),this.h()},l(t){const u=Ys('[data-svelte="svelte-1phssyn"]',document.head);$=s(u,"META",{name:!0,content:!0}),u.forEach(o),$e=c(t),q=s(t,"H1",{class:!0});var _o=i(q);E=s(_o,"A",{id:!0,class:!0,href:!0});var Mr=i(E);ct=s(Mr,"SPAN",{});var Xr=i(ct);v(ae.$$.fragment,Xr),Xr.forEach(o),Mr.forEach(o),Fo=c(_o),ut=s(_o,"SPAN",{});var qr=i(ut);$o=a(qr,"LayoutXLM"),qr.forEach(o),_o.forEach(o),Zt=c(t),U=s(t,"H2",{class:!0});var go=i(U);H=s(go,"A",{id:!0,class:!0,href:!0});var Er=i(H);mt=s(Er,"SPAN",{});var Fr=i(mt);v(re.$$.fragment,Fr),Fr.forEach(o),Er.forEach(o),Po=c(go),pt=s(go,"SPAN",{});var $r=i(pt);No=a($r,"Overview"),$r.forEach(o),go.forEach(o),Kt=c(t),P=s(t,"P",{});var at=i(P);Ao=a(at,"LayoutXLM was proposed in "),se=s(at,"A",{href:!0,rel:!0});var Pr=i(se);Do=a(Pr,"LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding"),Pr.forEach(o),Io=a(at,` by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. It\u2019s a multilingual extension of the `),ie=s(at,"A",{href:!0,rel:!0});var Nr=i(ie);So=a(Nr,"LayoutLMv2 model"),Nr.forEach(o),Oo=a(at,` trained on 53 languages.`),at.forEach(o),Qt=c(t),Pe=s(t,"P",{});var Ar=i(Pe);Co=a(Ar,"The abstract from the paper is the following:"),Ar.forEach(o),eo=c(t),Ne=s(t,"P",{});var Dr=i(Ne);ht=s(Dr,"EM",{});var Ir=i(ht);Uo=a(Ir,`Multimodal pre-training with text, layout, and image has achieved SOTA performance for visually-rich document understanding tasks recently, which demonstrates the great potential for joint learning across different modalities. In this paper, we present LayoutXLM, a multimodal pre-trained model for multilingual document understanding, which aims to bridge the language barriers for visually-rich document understanding. To accurately evaluate LayoutXLM, we also introduce a multilingual form understanding benchmark dataset named XFUN, which includes form understanding samples in 7 languages (Chinese, Japanese, Spanish, French, Italian, German, Portuguese), and key-value pairs are manually labeled for each language. Experiment results show that the LayoutXLM model has significantly outperformed the existing SOTA cross-lingual pre-trained models on the XFUN dataset.`),Ir.forEach(o),Dr.forEach(o),to=c(t),Ae=s(t,"P",{});var Sr=i(Ae);Ro=a(Sr,"One can directly plug in the weights of LayoutXLM into a LayoutLMv2 model, like so:"),Sr.forEach(o),oo=c(t),v(le.$$.fragment,t),no=c(t),N=s(t,"P",{});var rt=i(N);jo=a(rt,`Note that LayoutXLM has its own tokenizer, based on `),De=s(rt,"A",{href:!0});var Or=i(De);Wo=a(Or,"LayoutXLMTokenizer"),Or.forEach(o),Vo=a(rt,"/"),Ie=s(rt,"A",{href:!0});var Cr=i(Ie);Bo=a(Cr,"LayoutXLMTokenizerFast"),Cr.forEach(o),Ho=a(rt,`. You can initialize it as follows:`),rt.forEach(o),ao=c(t),v(de.$$.fragment,t),ro=c(t),z=s(t,"P",{});var S=i(z);Go=a(S,"Similar to LayoutLMv2, you can use "),Se=s(S,"A",{href:!0});var Ur=i(Se);Yo=a(Ur,"LayoutXLMProcessor"),Ur.forEach(o),Jo=a(S,` (which internally applies `),Oe=s(S,"A",{href:!0});var Rr=i(Oe);Zo=a(Rr,"LayoutLMv2FeatureExtractor"),Rr.forEach(o),Ko=a(S,` and `),Ce=s(S,"A",{href:!0});var jr=i(Ce);Qo=a(jr,"LayoutXLMTokenizer"),jr.forEach(o),en=a(S,"/"),Ue=s(S,"A",{href:!0});var Wr=i(Ue);tn=a(Wr,"LayoutXLMTokenizerFast"),Wr.forEach(o),on=a(S,` in sequence) to prepare all data for the model.`),S.forEach(o),so=c(t),G=s(t,"P",{});var ko=i(G);nn=a(ko,"As LayoutXLM\u2019s architecture is equivalent to that of LayoutLMv2, one can refer to "),Re=s(ko,"A",{href:!0});var Vr=i(Re);an=a(Vr,"LayoutLMv2\u2019s documentation page"),Vr.forEach(o),rn=a(ko," for all tips, code examples and notebooks."),ko.forEach(o),io=c(t),A=s(t,"P",{});var st=i(A);sn=a(st,"This model was contributed by "),ce=s(st,"A",{href:!0,rel:!0});var Br=i(ce);ln=a(Br,"nielsr"),Br.forEach(o),dn=a(st,". The original code can be found "),ue=s(st,"A",{href:!0,rel:!0});var Hr=i(ue);cn=a(Hr,"here"),Hr.forEach(o),un=a(st,"."),st.forEach(o),lo=c(t),R=s(t,"H2",{class:!0});var yo=i(R);Y=s(yo,"A",{id:!0,class:!0,href:!0});var Gr=i(Y);ft=s(Gr,"SPAN",{});var Yr=i(ft);v(me.$$.fragment,Yr),Yr.forEach(o),Gr.forEach(o),mn=c(yo),_t=s(yo,"SPAN",{});var Jr=i(_t);pn=a(Jr,"LayoutXLMTokenizer"),Jr.forEach(o),yo.forEach(o),co=c(t),f=s(t,"DIV",{class:!0});var k=i(f);v(pe.$$.fragment,k),hn=c(k),F=s(k,"P",{});var oe=i(F);fn=a(oe,"Adapted from "),je=s(oe,"A",{href:!0});var Zr=i(je);_n=a(Zr,"RobertaTokenizer"),Zr.forEach(o),gn=a(oe," and "),We=s(oe,"A",{href:!0});var Kr=i(We);kn=a(Kr,"XLNetTokenizer"),Kr.forEach(o),yn=a(oe,`. Based on `),he=s(oe,"A",{href:!0,rel:!0});var Qr=i(he);vn=a(Qr,"SentencePiece"),Qr.forEach(o),Ln=a(oe,"."),oe.forEach(o),bn=c(k),fe=s(k,"P",{});var vo=i(fe);xn=a(vo,"This tokenizer inherits from "),Ve=s(vo,"A",{href:!0});var es=i(Ve);wn=a(es,"PreTrainedTokenizer"),es.forEach(o),Tn=a(vo,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),vo.forEach(o),zn=c(k),j=s(k,"P",{});var it=i(j);Mn=a(it,`Attributes: sp_model (`),gt=s(it,"CODE",{});var ts=i(gt);Xn=a(ts,"SentencePieceProcessor"),ts.forEach(o),qn=a(it,`): The `),kt=s(it,"EM",{});var os=i(kt);En=a(os,"SentencePiece"),os.forEach(o),Fn=a(it," processor that is used for every conversion (string, tokens and IDs)."),it.forEach(o),$n=c(k),J=s(k,"DIV",{class:!0});var Lo=i(J);v(_e.$$.fragment,Lo),Pn=c(Lo),yt=s(Lo,"P",{});var ns=i(yt);Nn=a(ns,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),ns.forEach(o),Lo.forEach(o),An=c(k),D=s(k,"DIV",{class:!0});var lt=i(D);v(ge.$$.fragment,lt),Dn=c(lt),vt=s(lt,"P",{});var as=i(vt);In=a(as,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:`),as.forEach(o),Sn=c(lt),ke=s(lt,"UL",{});var bo=i(ke);Be=s(bo,"LI",{});var br=i(Be);On=a(br,"single sequence: "),Lt=s(br,"CODE",{});var rs=i(Lt);Cn=a(rs,"<s> X </s>"),rs.forEach(o),br.forEach(o),Un=c(bo),He=s(bo,"LI",{});var xr=i(He);Rn=a(xr,"pair of sequences: "),bt=s(xr,"CODE",{});var ss=i(bt);jn=a(ss,"<s> A </s></s> B </s>"),ss.forEach(o),xr.forEach(o),bo.forEach(o),lt.forEach(o),Wn=c(k),Z=s(k,"DIV",{class:!0});var xo=i(Z);v(ye.$$.fragment,xo),Vn=c(xo),ve=s(xo,"P",{});var wo=i(ve);Bn=a(wo,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),xt=s(wo,"CODE",{});var is=i(xt);Hn=a(is,"prepare_for_model"),is.forEach(o),Gn=a(wo," method."),wo.forEach(o),xo.forEach(o),Yn=c(k),K=s(k,"DIV",{class:!0});var To=i(K);v(Le.$$.fragment,To),Jn=c(To),wt=s(To,"P",{});var ls=i(wt);Zn=a(ls,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),ls.forEach(o),To.forEach(o),Kn=c(k),Tt=s(k,"DIV",{class:!0}),i(Tt).forEach(o),k.forEach(o),uo=c(t),W=s(t,"H2",{class:!0});var zo=i(W);Q=s(zo,"A",{id:!0,class:!0,href:!0});var ds=i(Q);zt=s(ds,"SPAN",{});var cs=i(zt);v(be.$$.fragment,cs),cs.forEach(o),ds.forEach(o),Qn=c(zo),Mt=s(zo,"SPAN",{});var us=i(Mt);ea=a(us,"LayoutXLMTokenizerFast"),us.forEach(o),zo.forEach(o),mo=c(t),M=s(t,"DIV",{class:!0});var ne=i(M);v(xe.$$.fragment,ne),ta=c(ne),X=s(ne,"P",{});var O=i(X);oa=a(O,"Construct a \u201Cfast\u201D LayoutXLM tokenizer (backed by HuggingFace\u2019s "),Xt=s(O,"EM",{});var ms=i(Xt);na=a(ms,"tokenizers"),ms.forEach(o),aa=a(O,` library). Adapted from `),Ge=s(O,"A",{href:!0});var ps=i(Ge);ra=a(ps,"RobertaTokenizer"),ps.forEach(o),sa=a(O," and "),Ye=s(O,"A",{href:!0});var hs=i(Ye);ia=a(hs,"XLNetTokenizer"),hs.forEach(o),la=a(O,". Based on "),we=s(O,"A",{href:!0,rel:!0});var fs=i(we);da=a(fs,"BPE"),fs.forEach(o),ca=a(O,"."),O.forEach(o),ua=c(ne),Te=s(ne,"P",{});var Mo=i(Te);ma=a(Mo,"This tokenizer inherits from "),Je=s(Mo,"A",{href:!0});var _s=i(Je);pa=a(_s,"PreTrainedTokenizerFast"),_s.forEach(o),ha=a(Mo,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Mo.forEach(o),fa=c(ne),ee=s(ne,"DIV",{class:!0});var Xo=i(ee);v(ze.$$.fragment,Xo),_a=c(Xo),qt=s(Xo,"P",{});var gs=i(qt);ga=a(gs,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),gs.forEach(o),Xo.forEach(o),ne.forEach(o),po=c(t),V=s(t,"H2",{class:!0});var qo=i(V);te=s(qo,"A",{id:!0,class:!0,href:!0});var ks=i(te);Et=s(ks,"SPAN",{});var ys=i(Et);v(Me.$$.fragment,ys),ys.forEach(o),ks.forEach(o),ka=c(qo),Ft=s(qo,"SPAN",{});var vs=i(Ft);ya=a(vs,"LayoutXLMProcessor"),vs.forEach(o),qo.forEach(o),ho=c(t),T=s(t,"DIV",{class:!0});var C=i(T);v(Xe.$$.fragment,C),va=c(C),$t=s(C,"P",{});var Ls=i($t);La=a(Ls,`Constructs a LayoutXLM processor which combines a LayoutXLM feature extractor and a LayoutXLM tokenizer into a single processor.`),Ls.forEach(o),ba=c(C),Ze=s(C,"P",{});var wr=i(Ze);Ke=s(wr,"A",{href:!0});var bs=i(Ke);xa=a(bs,"LayoutXLMProcessor"),bs.forEach(o),wa=a(wr," offers all the functionalities you need to prepare data for the model."),wr.forEach(o),Ta=c(C),_=s(C,"P",{});var g=i(_);za=a(g,"It first uses "),Qe=s(g,"A",{href:!0});var xs=i(Qe);Ma=a(xs,"LayoutLMv2FeatureExtractor"),xs.forEach(o),Xa=a(g,` to resize document images to a fixed size, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to `),et=s(g,"A",{href:!0});var ws=i(et);qa=a(ws,"LayoutXLMTokenizer"),ws.forEach(o),Ea=a(g," or "),tt=s(g,"A",{href:!0});var Ts=i(tt);Fa=a(Ts,"LayoutXLMTokenizerFast"),Ts.forEach(o),$a=a(g,`, which turns the words and bounding boxes into token-level `),Pt=s(g,"CODE",{});var zs=i(Pt);Pa=a(zs,"input_ids"),zs.forEach(o),Na=a(g,", "),Nt=s(g,"CODE",{});var Ms=i(Nt);Aa=a(Ms,"attention_mask"),Ms.forEach(o),Da=a(g,", "),At=s(g,"CODE",{});var Xs=i(At);Ia=a(Xs,"token_type_ids"),Xs.forEach(o),Sa=a(g,", "),Dt=s(g,"CODE",{});var qs=i(Dt);Oa=a(qs,"bbox"),qs.forEach(o),Ca=a(g,`. Optionally, one can provide integer `),It=s(g,"CODE",{});var Es=i(It);Ua=a(Es,"word_labels"),Es.forEach(o),Ra=a(g,", which are turned into token-level "),St=s(g,"CODE",{});var Fs=i(St);ja=a(Fs,"labels"),Fs.forEach(o),Wa=a(g,` for token classification tasks (such as FUNSD, CORD).`),g.forEach(o),Va=c(C),I=s(C,"DIV",{class:!0});var dt=i(I);v(qe.$$.fragment,dt),Ba=c(dt),p=s(dt,"P",{});var h=i(p);Ha=a(h,"This method first forwards the "),Ot=s(h,"CODE",{});var $s=i(Ot);Ga=a($s,"images"),$s.forEach(o),Ya=a(h,` argument to `),Ee=s(h,"A",{href:!0});var Tr=i(Ee);Ct=s(Tr,"STRONG",{});var Ps=i(Ct);Ja=a(Ps,"call"),Ps.forEach(o),Za=a(Tr,"()"),Tr.forEach(o),Ka=a(h,". In case "),ot=s(h,"A",{href:!0});var Ns=i(ot);Qa=a(Ns,"LayoutLMv2FeatureExtractor"),Ns.forEach(o),er=a(h,` was initialized with `),Ut=s(h,"CODE",{});var As=i(Ut);tr=a(As,"apply_ocr"),As.forEach(o),or=a(h," set to "),Rt=s(h,"CODE",{});var Ds=i(Rt);nr=a(Ds,"True"),Ds.forEach(o),ar=a(h,`, it passes the obtained words and bounding boxes along with the additional arguments to `),Fe=s(h,"A",{href:!0});var zr=i(Fe);jt=s(zr,"STRONG",{});var Is=i(jt);rr=a(Is,"call"),Is.forEach(o),sr=a(zr,"()"),zr.forEach(o),ir=a(h,` and returns the output, together with resized `),Wt=s(h,"CODE",{});var Ss=i(Wt);lr=a(Ss,"images"),Ss.forEach(o),dr=a(h,". In case "),nt=s(h,"A",{href:!0});var Os=i(nt);cr=a(Os,"LayoutLMv2FeatureExtractor"),Os.forEach(o),ur=a(h," was initialized with "),Vt=s(h,"CODE",{});var Cs=i(Vt);mr=a(Cs,"apply_ocr"),Cs.forEach(o),pr=a(h,` set to `),Bt=s(h,"CODE",{});var Us=i(Bt);hr=a(Us,"False"),Us.forEach(o),fr=a(h,", it passes the words ("),Ht=s(h,"CODE",{});var Rs=i(Ht);_r=a(Rs,"text"),Rs.forEach(o),gr=a(h,"/"),Gt=s(h,"CODE",{});var js=i(Gt);kr=a(js,"text_pair`) and `boxes` specified by the user along with the additional arguments to [__call__()](/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer.__call__) and returns the output, together with resized `images"),js.forEach(o),yr=a(h,"."),h.forEach(o),vr=c(dt),Yt=s(dt,"P",{});var Ws=i(Yt);Lr=a(Ws,"Please refer to the docstring of the above two methods for more information."),Ws.forEach(o),dt.forEach(o),C.forEach(o),this.h()},h(){l($,"name","hf:doc:metadata"),l($,"content",JSON.stringify(Ks)),l(E,"id","layoutxlm"),l(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(E,"href","#layoutxlm"),l(q,"class","relative group"),l(H,"id","overview"),l(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(H,"href","#overview"),l(U,"class","relative group"),l(se,"href","https://arxiv.org/abs/2104.08836"),l(se,"rel","nofollow"),l(ie,"href","https://arxiv.org/abs/2012.14740"),l(ie,"rel","nofollow"),l(De,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer"),l(Ie,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizerFast"),l(Se,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMProcessor"),l(Oe,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(Ce,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer"),l(Ue,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizerFast"),l(Re,"href","layoutlmv2"),l(ce,"href","https://huggingface.co/nielsr"),l(ce,"rel","nofollow"),l(ue,"href","https://github.com/microsoft/unilm"),l(ue,"rel","nofollow"),l(Y,"id","transformers.LayoutXLMTokenizer"),l(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Y,"href","#transformers.LayoutXLMTokenizer"),l(R,"class","relative group"),l(je,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),l(We,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),l(he,"href","https://github.com/google/sentencepiece"),l(he,"rel","nofollow"),l(Ve,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(J,"class","docstring"),l(D,"class","docstring"),l(Z,"class","docstring"),l(K,"class","docstring"),l(Tt,"class","docstring"),l(f,"class","docstring"),l(Q,"id","transformers.LayoutXLMTokenizerFast"),l(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Q,"href","#transformers.LayoutXLMTokenizerFast"),l(W,"class","relative group"),l(Ge,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),l(Ye,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),l(we,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models"),l(we,"rel","nofollow"),l(Je,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(ee,"class","docstring"),l(M,"class","docstring"),l(te,"id","transformers.LayoutXLMProcessor"),l(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(te,"href","#transformers.LayoutXLMProcessor"),l(V,"class","relative group"),l(Ke,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMProcessor"),l(Qe,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(et,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer"),l(tt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizerFast"),l(Ee,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor.__call__"),l(ot,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(Fe,"href","/docs/transformers/v4.15.0/en/model_doc/layoutxlm#transformers.LayoutXLMTokenizer.__call__"),l(nt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(I,"class","docstring"),l(T,"class","docstring")},m(t,u){e(document.head,$),m(t,$e,u),m(t,q,u),e(q,E),e(E,ct),L(ae,ct,null),e(q,Fo),e(q,ut),e(ut,$o),m(t,Zt,u),m(t,U,u),e(U,H),e(H,mt),L(re,mt,null),e(U,Po),e(U,pt),e(pt,No),m(t,Kt,u),m(t,P,u),e(P,Ao),e(P,se),e(se,Do),e(P,Io),e(P,ie),e(ie,So),e(P,Oo),m(t,Qt,u),m(t,Pe,u),e(Pe,Co),m(t,eo,u),m(t,Ne,u),e(Ne,ht),e(ht,Uo),m(t,to,u),m(t,Ae,u),e(Ae,Ro),m(t,oo,u),L(le,t,u),m(t,no,u),m(t,N,u),e(N,jo),e(N,De),e(De,Wo),e(N,Vo),e(N,Ie),e(Ie,Bo),e(N,Ho),m(t,ao,u),L(de,t,u),m(t,ro,u),m(t,z,u),e(z,Go),e(z,Se),e(Se,Yo),e(z,Jo),e(z,Oe),e(Oe,Zo),e(z,Ko),e(z,Ce),e(Ce,Qo),e(z,en),e(z,Ue),e(Ue,tn),e(z,on),m(t,so,u),m(t,G,u),e(G,nn),e(G,Re),e(Re,an),e(G,rn),m(t,io,u),m(t,A,u),e(A,sn),e(A,ce),e(ce,ln),e(A,dn),e(A,ue),e(ue,cn),e(A,un),m(t,lo,u),m(t,R,u),e(R,Y),e(Y,ft),L(me,ft,null),e(R,mn),e(R,_t),e(_t,pn),m(t,co,u),m(t,f,u),L(pe,f,null),e(f,hn),e(f,F),e(F,fn),e(F,je),e(je,_n),e(F,gn),e(F,We),e(We,kn),e(F,yn),e(F,he),e(he,vn),e(F,Ln),e(f,bn),e(f,fe),e(fe,xn),e(fe,Ve),e(Ve,wn),e(fe,Tn),e(f,zn),e(f,j),e(j,Mn),e(j,gt),e(gt,Xn),e(j,qn),e(j,kt),e(kt,En),e(j,Fn),e(f,$n),e(f,J),L(_e,J,null),e(J,Pn),e(J,yt),e(yt,Nn),e(f,An),e(f,D),L(ge,D,null),e(D,Dn),e(D,vt),e(vt,In),e(D,Sn),e(D,ke),e(ke,Be),e(Be,On),e(Be,Lt),e(Lt,Cn),e(ke,Un),e(ke,He),e(He,Rn),e(He,bt),e(bt,jn),e(f,Wn),e(f,Z),L(ye,Z,null),e(Z,Vn),e(Z,ve),e(ve,Bn),e(ve,xt),e(xt,Hn),e(ve,Gn),e(f,Yn),e(f,K),L(Le,K,null),e(K,Jn),e(K,wt),e(wt,Zn),e(f,Kn),e(f,Tt),m(t,uo,u),m(t,W,u),e(W,Q),e(Q,zt),L(be,zt,null),e(W,Qn),e(W,Mt),e(Mt,ea),m(t,mo,u),m(t,M,u),L(xe,M,null),e(M,ta),e(M,X),e(X,oa),e(X,Xt),e(Xt,na),e(X,aa),e(X,Ge),e(Ge,ra),e(X,sa),e(X,Ye),e(Ye,ia),e(X,la),e(X,we),e(we,da),e(X,ca),e(M,ua),e(M,Te),e(Te,ma),e(Te,Je),e(Je,pa),e(Te,ha),e(M,fa),e(M,ee),L(ze,ee,null),e(ee,_a),e(ee,qt),e(qt,ga),m(t,po,u),m(t,V,u),e(V,te),e(te,Et),L(Me,Et,null),e(V,ka),e(V,Ft),e(Ft,ya),m(t,ho,u),m(t,T,u),L(Xe,T,null),e(T,va),e(T,$t),e($t,La),e(T,ba),e(T,Ze),e(Ze,Ke),e(Ke,xa),e(Ze,wa),e(T,Ta),e(T,_),e(_,za),e(_,Qe),e(Qe,Ma),e(_,Xa),e(_,et),e(et,qa),e(_,Ea),e(_,tt),e(tt,Fa),e(_,$a),e(_,Pt),e(Pt,Pa),e(_,Na),e(_,Nt),e(Nt,Aa),e(_,Da),e(_,At),e(At,Ia),e(_,Sa),e(_,Dt),e(Dt,Oa),e(_,Ca),e(_,It),e(It,Ua),e(_,Ra),e(_,St),e(St,ja),e(_,Wa),e(T,Va),e(T,I),L(qe,I,null),e(I,Ba),e(I,p),e(p,Ha),e(p,Ot),e(Ot,Ga),e(p,Ya),e(p,Ee),e(Ee,Ct),e(Ct,Ja),e(Ee,Za),e(p,Ka),e(p,ot),e(ot,Qa),e(p,er),e(p,Ut),e(Ut,tr),e(p,or),e(p,Rt),e(Rt,nr),e(p,ar),e(p,Fe),e(Fe,jt),e(jt,rr),e(Fe,sr),e(p,ir),e(p,Wt),e(Wt,lr),e(p,dr),e(p,nt),e(nt,cr),e(p,ur),e(p,Vt),e(Vt,mr),e(p,pr),e(p,Bt),e(Bt,hr),e(p,fr),e(p,Ht),e(Ht,_r),e(p,gr),e(p,Gt),e(Gt,kr),e(p,yr),e(I,vr),e(I,Yt),e(Yt,Lr),fo=!0},p:Js,i(t){fo||(b(ae.$$.fragment,t),b(re.$$.fragment,t),b(le.$$.fragment,t),b(de.$$.fragment,t),b(me.$$.fragment,t),b(pe.$$.fragment,t),b(_e.$$.fragment,t),b(ge.$$.fragment,t),b(ye.$$.fragment,t),b(Le.$$.fragment,t),b(be.$$.fragment,t),b(xe.$$.fragment,t),b(ze.$$.fragment,t),b(Me.$$.fragment,t),b(Xe.$$.fragment,t),b(qe.$$.fragment,t),fo=!0)},o(t){x(ae.$$.fragment,t),x(re.$$.fragment,t),x(le.$$.fragment,t),x(de.$$.fragment,t),x(me.$$.fragment,t),x(pe.$$.fragment,t),x(_e.$$.fragment,t),x(ge.$$.fragment,t),x(ye.$$.fragment,t),x(Le.$$.fragment,t),x(be.$$.fragment,t),x(xe.$$.fragment,t),x(ze.$$.fragment,t),x(Me.$$.fragment,t),x(Xe.$$.fragment,t),x(qe.$$.fragment,t),fo=!1},d(t){o($),t&&o($e),t&&o(q),w(ae),t&&o(Zt),t&&o(U),w(re),t&&o(Kt),t&&o(P),t&&o(Qt),t&&o(Pe),t&&o(eo),t&&o(Ne),t&&o(to),t&&o(Ae),t&&o(oo),w(le,t),t&&o(no),t&&o(N),t&&o(ao),w(de,t),t&&o(ro),t&&o(z),t&&o(so),t&&o(G),t&&o(io),t&&o(A),t&&o(lo),t&&o(R),w(me),t&&o(co),t&&o(f),w(pe),w(_e),w(ge),w(ye),w(Le),t&&o(uo),t&&o(W),w(be),t&&o(mo),t&&o(M),w(xe),w(ze),t&&o(po),t&&o(V),w(Me),t&&o(ho),t&&o(T),w(Xe),w(qe)}}}const Ks={local:"layoutxlm",sections:[{local:"overview",title:"Overview"},{local:"transformers.LayoutXLMTokenizer",title:"LayoutXLMTokenizer"},{local:"transformers.LayoutXLMTokenizerFast",title:"LayoutXLMTokenizerFast"},{local:"transformers.LayoutXLMProcessor",title:"LayoutXLMProcessor"}],title:"LayoutXLM"};function Qs(Eo,$,$e){let{fw:q}=$;return Eo.$$set=E=>{"fw"in E&&$e(0,q=E.fw)},[q]}class ri extends Bs{constructor($){super();Hs(this,$,Qs,Zs,Gs,{fw:0})}}export{ri as default,Ks as metadata};
9,900
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/longformer.mdx-21448f89.js
import{S as e1,i as t1,s as o1,e as a,k as l,w as v,t as n,T as Po,L as n1,c as r,d as t,m as d,a as i,x as k,h as s,U as Oo,b as c,J as e,g as h,y as b,q as T,o as y,B as L}from"../../chunks/vendor-b1433968.js";import{T as Oe}from"../../chunks/Tip-c3840994.js";import{D as S}from"../../chunks/Docstring-ff504c58.js";import{C as rt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function s1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function a1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function r1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function i1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function l1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function d1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function c1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe;return{c(){p=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=a("ul"),x=a("li"),w=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),E=a("p"),ne=n("This second option is useful when using "),Q=a("code"),ce=n("tf.keras.Model.fit"),Z=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),he=n("model(inputs)"),ae=n("."),R=l(),P=a("p"),se=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),z=a("ul"),q=a("li"),me=n("a single Tensor with "),D=a("code"),Y=n("input_ids"),fe=n(" only and nothing else: "),I=a("code"),pe=n("model(inputs_ids)"),ge=l(),C=a("li"),ee=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=a("code"),re=n("model([input_ids, attention_mask])"),te=n(" or "),N=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),ie=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){p=r(m,"P",{});var $=i(p);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),g=d(m),u=r(m,"UL",{});var J=i(u);x=r(J,"LI",{});var Le=i(x);w=s(Le,"having all inputs as keyword arguments (like PyTorch models), or"),Le.forEach(t),_=d(J),M=r(J,"LI",{});var xe=i(M);de=s(xe,"having all inputs as a list, tuple or dict in the first positional arguments."),xe.forEach(t),J.forEach(t),X=d(m),E=r(m,"P",{});var j=i(E);ne=s(j,"This second option is useful when using "),Q=r(j,"CODE",{});var $e=i(Q);ce=s($e,"tf.keras.Model.fit"),$e.forEach(t),Z=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(j,"CODE",{});var Fe=i(H);he=s(Fe,"model(inputs)"),Fe.forEach(t),ae=s(j,"."),j.forEach(t),R=d(m),P=r(m,"P",{});var be=i(P);se=s(be,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),be.forEach(t),K=d(m),z=r(m,"UL",{});var A=i(z);q=r(A,"LI",{});var V=i(q);me=s(V,"a single Tensor with "),D=r(V,"CODE",{});var Te=i(D);Y=s(Te,"input_ids"),Te.forEach(t),fe=s(V," only and nothing else: "),I=r(V,"CODE",{});var le=i(I);pe=s(le,"model(inputs_ids)"),le.forEach(t),V.forEach(t),ge=d(A),C=r(A,"LI",{});var G=i(C);ee=s(G,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=r(G,"CODE",{});var ve=i(U);re=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),te=s(G," or "),N=r(G,"CODE",{});var Me=i(N);ue=s(Me,"model([input_ids, attention_mask, token_type_ids])"),Me.forEach(t),G.forEach(t),ie=d(A),O=r(A,"LI",{});var we=i(O);_e=s(we,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(we,"CODE",{});var ke=i(B);oe=s(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),we.forEach(t),A.forEach(t)},m(m,$){h(m,p,$),e(p,F),h(m,g,$),h(m,u,$),e(u,x),e(x,w),e(u,_),e(u,M),e(M,de),h(m,X,$),h(m,E,$),e(E,ne),e(E,Q),e(Q,ce),e(E,Z),e(E,H),e(H,he),e(E,ae),h(m,R,$),h(m,P,$),e(P,se),h(m,K,$),h(m,z,$),e(z,q),e(q,me),e(q,D),e(D,Y),e(q,fe),e(q,I),e(I,pe),e(z,ge),e(z,C),e(C,ee),e(C,U),e(U,re),e(C,te),e(C,N),e(N,ue),e(z,ie),e(z,O),e(O,_e),e(O,B),e(B,oe)},d(m){m&&t(p),m&&t(g),m&&t(u),m&&t(X),m&&t(E),m&&t(R),m&&t(P),m&&t(K),m&&t(z)}}}function h1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function m1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe;return{c(){p=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=a("ul"),x=a("li"),w=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),E=a("p"),ne=n("This second option is useful when using "),Q=a("code"),ce=n("tf.keras.Model.fit"),Z=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),he=n("model(inputs)"),ae=n("."),R=l(),P=a("p"),se=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),z=a("ul"),q=a("li"),me=n("a single Tensor with "),D=a("code"),Y=n("input_ids"),fe=n(" only and nothing else: "),I=a("code"),pe=n("model(inputs_ids)"),ge=l(),C=a("li"),ee=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=a("code"),re=n("model([input_ids, attention_mask])"),te=n(" or "),N=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),ie=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){p=r(m,"P",{});var $=i(p);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),g=d(m),u=r(m,"UL",{});var J=i(u);x=r(J,"LI",{});var Le=i(x);w=s(Le,"having all inputs as keyword arguments (like PyTorch models), or"),Le.forEach(t),_=d(J),M=r(J,"LI",{});var xe=i(M);de=s(xe,"having all inputs as a list, tuple or dict in the first positional arguments."),xe.forEach(t),J.forEach(t),X=d(m),E=r(m,"P",{});var j=i(E);ne=s(j,"This second option is useful when using "),Q=r(j,"CODE",{});var $e=i(Q);ce=s($e,"tf.keras.Model.fit"),$e.forEach(t),Z=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(j,"CODE",{});var Fe=i(H);he=s(Fe,"model(inputs)"),Fe.forEach(t),ae=s(j,"."),j.forEach(t),R=d(m),P=r(m,"P",{});var be=i(P);se=s(be,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),be.forEach(t),K=d(m),z=r(m,"UL",{});var A=i(z);q=r(A,"LI",{});var V=i(q);me=s(V,"a single Tensor with "),D=r(V,"CODE",{});var Te=i(D);Y=s(Te,"input_ids"),Te.forEach(t),fe=s(V," only and nothing else: "),I=r(V,"CODE",{});var le=i(I);pe=s(le,"model(inputs_ids)"),le.forEach(t),V.forEach(t),ge=d(A),C=r(A,"LI",{});var G=i(C);ee=s(G,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=r(G,"CODE",{});var ve=i(U);re=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),te=s(G," or "),N=r(G,"CODE",{});var Me=i(N);ue=s(Me,"model([input_ids, attention_mask, token_type_ids])"),Me.forEach(t),G.forEach(t),ie=d(A),O=r(A,"LI",{});var we=i(O);_e=s(we,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(we,"CODE",{});var ke=i(B);oe=s(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),we.forEach(t),A.forEach(t)},m(m,$){h(m,p,$),e(p,F),h(m,g,$),h(m,u,$),e(u,x),e(x,w),e(u,_),e(u,M),e(M,de),h(m,X,$),h(m,E,$),e(E,ne),e(E,Q),e(Q,ce),e(E,Z),e(E,H),e(H,he),e(E,ae),h(m,R,$),h(m,P,$),e(P,se),h(m,K,$),h(m,z,$),e(z,q),e(q,me),e(q,D),e(D,Y),e(q,fe),e(q,I),e(I,pe),e(z,ge),e(z,C),e(C,ee),e(C,U),e(U,re),e(C,te),e(C,N),e(N,ue),e(z,ie),e(z,O),e(O,_e),e(O,B),e(B,oe)},d(m){m&&t(p),m&&t(g),m&&t(u),m&&t(X),m&&t(E),m&&t(R),m&&t(P),m&&t(K),m&&t(z)}}}function f1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function p1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe;return{c(){p=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=a("ul"),x=a("li"),w=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),E=a("p"),ne=n("This second option is useful when using "),Q=a("code"),ce=n("tf.keras.Model.fit"),Z=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),he=n("model(inputs)"),ae=n("."),R=l(),P=a("p"),se=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),z=a("ul"),q=a("li"),me=n("a single Tensor with "),D=a("code"),Y=n("input_ids"),fe=n(" only and nothing else: "),I=a("code"),pe=n("model(inputs_ids)"),ge=l(),C=a("li"),ee=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=a("code"),re=n("model([input_ids, attention_mask])"),te=n(" or "),N=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),ie=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){p=r(m,"P",{});var $=i(p);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),g=d(m),u=r(m,"UL",{});var J=i(u);x=r(J,"LI",{});var Le=i(x);w=s(Le,"having all inputs as keyword arguments (like PyTorch models), or"),Le.forEach(t),_=d(J),M=r(J,"LI",{});var xe=i(M);de=s(xe,"having all inputs as a list, tuple or dict in the first positional arguments."),xe.forEach(t),J.forEach(t),X=d(m),E=r(m,"P",{});var j=i(E);ne=s(j,"This second option is useful when using "),Q=r(j,"CODE",{});var $e=i(Q);ce=s($e,"tf.keras.Model.fit"),$e.forEach(t),Z=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(j,"CODE",{});var Fe=i(H);he=s(Fe,"model(inputs)"),Fe.forEach(t),ae=s(j,"."),j.forEach(t),R=d(m),P=r(m,"P",{});var be=i(P);se=s(be,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),be.forEach(t),K=d(m),z=r(m,"UL",{});var A=i(z);q=r(A,"LI",{});var V=i(q);me=s(V,"a single Tensor with "),D=r(V,"CODE",{});var Te=i(D);Y=s(Te,"input_ids"),Te.forEach(t),fe=s(V," only and nothing else: "),I=r(V,"CODE",{});var le=i(I);pe=s(le,"model(inputs_ids)"),le.forEach(t),V.forEach(t),ge=d(A),C=r(A,"LI",{});var G=i(C);ee=s(G,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=r(G,"CODE",{});var ve=i(U);re=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),te=s(G," or "),N=r(G,"CODE",{});var Me=i(N);ue=s(Me,"model([input_ids, attention_mask, token_type_ids])"),Me.forEach(t),G.forEach(t),ie=d(A),O=r(A,"LI",{});var we=i(O);_e=s(we,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(we,"CODE",{});var ke=i(B);oe=s(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),we.forEach(t),A.forEach(t)},m(m,$){h(m,p,$),e(p,F),h(m,g,$),h(m,u,$),e(u,x),e(x,w),e(u,_),e(u,M),e(M,de),h(m,X,$),h(m,E,$),e(E,ne),e(E,Q),e(Q,ce),e(E,Z),e(E,H),e(H,he),e(E,ae),h(m,R,$),h(m,P,$),e(P,se),h(m,K,$),h(m,z,$),e(z,q),e(q,me),e(q,D),e(D,Y),e(q,fe),e(q,I),e(I,pe),e(z,ge),e(z,C),e(C,ee),e(C,U),e(U,re),e(C,te),e(C,N),e(N,ue),e(z,ie),e(z,O),e(O,_e),e(O,B),e(B,oe)},d(m){m&&t(p),m&&t(g),m&&t(u),m&&t(X),m&&t(E),m&&t(R),m&&t(P),m&&t(K),m&&t(z)}}}function g1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function u1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe;return{c(){p=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=a("ul"),x=a("li"),w=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),E=a("p"),ne=n("This second option is useful when using "),Q=a("code"),ce=n("tf.keras.Model.fit"),Z=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),he=n("model(inputs)"),ae=n("."),R=l(),P=a("p"),se=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),z=a("ul"),q=a("li"),me=n("a single Tensor with "),D=a("code"),Y=n("input_ids"),fe=n(" only and nothing else: "),I=a("code"),pe=n("model(inputs_ids)"),ge=l(),C=a("li"),ee=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=a("code"),re=n("model([input_ids, attention_mask])"),te=n(" or "),N=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),ie=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){p=r(m,"P",{});var $=i(p);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),g=d(m),u=r(m,"UL",{});var J=i(u);x=r(J,"LI",{});var Le=i(x);w=s(Le,"having all inputs as keyword arguments (like PyTorch models), or"),Le.forEach(t),_=d(J),M=r(J,"LI",{});var xe=i(M);de=s(xe,"having all inputs as a list, tuple or dict in the first positional arguments."),xe.forEach(t),J.forEach(t),X=d(m),E=r(m,"P",{});var j=i(E);ne=s(j,"This second option is useful when using "),Q=r(j,"CODE",{});var $e=i(Q);ce=s($e,"tf.keras.Model.fit"),$e.forEach(t),Z=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(j,"CODE",{});var Fe=i(H);he=s(Fe,"model(inputs)"),Fe.forEach(t),ae=s(j,"."),j.forEach(t),R=d(m),P=r(m,"P",{});var be=i(P);se=s(be,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),be.forEach(t),K=d(m),z=r(m,"UL",{});var A=i(z);q=r(A,"LI",{});var V=i(q);me=s(V,"a single Tensor with "),D=r(V,"CODE",{});var Te=i(D);Y=s(Te,"input_ids"),Te.forEach(t),fe=s(V," only and nothing else: "),I=r(V,"CODE",{});var le=i(I);pe=s(le,"model(inputs_ids)"),le.forEach(t),V.forEach(t),ge=d(A),C=r(A,"LI",{});var G=i(C);ee=s(G,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=r(G,"CODE",{});var ve=i(U);re=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),te=s(G," or "),N=r(G,"CODE",{});var Me=i(N);ue=s(Me,"model([input_ids, attention_mask, token_type_ids])"),Me.forEach(t),G.forEach(t),ie=d(A),O=r(A,"LI",{});var we=i(O);_e=s(we,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(we,"CODE",{});var ke=i(B);oe=s(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),we.forEach(t),A.forEach(t)},m(m,$){h(m,p,$),e(p,F),h(m,g,$),h(m,u,$),e(u,x),e(x,w),e(u,_),e(u,M),e(M,de),h(m,X,$),h(m,E,$),e(E,ne),e(E,Q),e(Q,ce),e(E,Z),e(E,H),e(H,he),e(E,ae),h(m,R,$),h(m,P,$),e(P,se),h(m,K,$),h(m,z,$),e(z,q),e(q,me),e(q,D),e(D,Y),e(q,fe),e(q,I),e(I,pe),e(z,ge),e(z,C),e(C,ee),e(C,U),e(U,re),e(C,te),e(C,N),e(N,ue),e(z,ie),e(z,O),e(O,_e),e(O,B),e(B,oe)},d(m){m&&t(p),m&&t(g),m&&t(u),m&&t(X),m&&t(E),m&&t(R),m&&t(P),m&&t(K),m&&t(z)}}}function _1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function w1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe;return{c(){p=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=a("ul"),x=a("li"),w=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),E=a("p"),ne=n("This second option is useful when using "),Q=a("code"),ce=n("tf.keras.Model.fit"),Z=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),he=n("model(inputs)"),ae=n("."),R=l(),P=a("p"),se=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),z=a("ul"),q=a("li"),me=n("a single Tensor with "),D=a("code"),Y=n("input_ids"),fe=n(" only and nothing else: "),I=a("code"),pe=n("model(inputs_ids)"),ge=l(),C=a("li"),ee=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=a("code"),re=n("model([input_ids, attention_mask])"),te=n(" or "),N=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),ie=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){p=r(m,"P",{});var $=i(p);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),g=d(m),u=r(m,"UL",{});var J=i(u);x=r(J,"LI",{});var Le=i(x);w=s(Le,"having all inputs as keyword arguments (like PyTorch models), or"),Le.forEach(t),_=d(J),M=r(J,"LI",{});var xe=i(M);de=s(xe,"having all inputs as a list, tuple or dict in the first positional arguments."),xe.forEach(t),J.forEach(t),X=d(m),E=r(m,"P",{});var j=i(E);ne=s(j,"This second option is useful when using "),Q=r(j,"CODE",{});var $e=i(Q);ce=s($e,"tf.keras.Model.fit"),$e.forEach(t),Z=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(j,"CODE",{});var Fe=i(H);he=s(Fe,"model(inputs)"),Fe.forEach(t),ae=s(j,"."),j.forEach(t),R=d(m),P=r(m,"P",{});var be=i(P);se=s(be,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),be.forEach(t),K=d(m),z=r(m,"UL",{});var A=i(z);q=r(A,"LI",{});var V=i(q);me=s(V,"a single Tensor with "),D=r(V,"CODE",{});var Te=i(D);Y=s(Te,"input_ids"),Te.forEach(t),fe=s(V," only and nothing else: "),I=r(V,"CODE",{});var le=i(I);pe=s(le,"model(inputs_ids)"),le.forEach(t),V.forEach(t),ge=d(A),C=r(A,"LI",{});var G=i(C);ee=s(G,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=r(G,"CODE",{});var ve=i(U);re=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),te=s(G," or "),N=r(G,"CODE",{});var Me=i(N);ue=s(Me,"model([input_ids, attention_mask, token_type_ids])"),Me.forEach(t),G.forEach(t),ie=d(A),O=r(A,"LI",{});var we=i(O);_e=s(we,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(we,"CODE",{});var ke=i(B);oe=s(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),we.forEach(t),A.forEach(t)},m(m,$){h(m,p,$),e(p,F),h(m,g,$),h(m,u,$),e(u,x),e(x,w),e(u,_),e(u,M),e(M,de),h(m,X,$),h(m,E,$),e(E,ne),e(E,Q),e(Q,ce),e(E,Z),e(E,H),e(H,he),e(E,ae),h(m,R,$),h(m,P,$),e(P,se),h(m,K,$),h(m,z,$),e(z,q),e(q,me),e(q,D),e(D,Y),e(q,fe),e(q,I),e(I,pe),e(z,ge),e(z,C),e(C,ee),e(C,U),e(U,re),e(C,te),e(C,N),e(N,ue),e(z,ie),e(z,O),e(O,_e),e(O,B),e(B,oe)},d(m){m&&t(p),m&&t(g),m&&t(u),m&&t(X),m&&t(E),m&&t(R),m&&t(P),m&&t(K),m&&t(z)}}}function v1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function k1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe;return{c(){p=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=a("ul"),x=a("li"),w=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),E=a("p"),ne=n("This second option is useful when using "),Q=a("code"),ce=n("tf.keras.Model.fit"),Z=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),he=n("model(inputs)"),ae=n("."),R=l(),P=a("p"),se=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),z=a("ul"),q=a("li"),me=n("a single Tensor with "),D=a("code"),Y=n("input_ids"),fe=n(" only and nothing else: "),I=a("code"),pe=n("model(inputs_ids)"),ge=l(),C=a("li"),ee=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=a("code"),re=n("model([input_ids, attention_mask])"),te=n(" or "),N=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),ie=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){p=r(m,"P",{});var $=i(p);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),g=d(m),u=r(m,"UL",{});var J=i(u);x=r(J,"LI",{});var Le=i(x);w=s(Le,"having all inputs as keyword arguments (like PyTorch models), or"),Le.forEach(t),_=d(J),M=r(J,"LI",{});var xe=i(M);de=s(xe,"having all inputs as a list, tuple or dict in the first positional arguments."),xe.forEach(t),J.forEach(t),X=d(m),E=r(m,"P",{});var j=i(E);ne=s(j,"This second option is useful when using "),Q=r(j,"CODE",{});var $e=i(Q);ce=s($e,"tf.keras.Model.fit"),$e.forEach(t),Z=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(j,"CODE",{});var Fe=i(H);he=s(Fe,"model(inputs)"),Fe.forEach(t),ae=s(j,"."),j.forEach(t),R=d(m),P=r(m,"P",{});var be=i(P);se=s(be,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),be.forEach(t),K=d(m),z=r(m,"UL",{});var A=i(z);q=r(A,"LI",{});var V=i(q);me=s(V,"a single Tensor with "),D=r(V,"CODE",{});var Te=i(D);Y=s(Te,"input_ids"),Te.forEach(t),fe=s(V," only and nothing else: "),I=r(V,"CODE",{});var le=i(I);pe=s(le,"model(inputs_ids)"),le.forEach(t),V.forEach(t),ge=d(A),C=r(A,"LI",{});var G=i(C);ee=s(G,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),U=r(G,"CODE",{});var ve=i(U);re=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),te=s(G," or "),N=r(G,"CODE",{});var Me=i(N);ue=s(Me,"model([input_ids, attention_mask, token_type_ids])"),Me.forEach(t),G.forEach(t),ie=d(A),O=r(A,"LI",{});var we=i(O);_e=s(we,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(we,"CODE",{});var ke=i(B);oe=s(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),we.forEach(t),A.forEach(t)},m(m,$){h(m,p,$),e(p,F),h(m,g,$),h(m,u,$),e(u,x),e(x,w),e(u,_),e(u,M),e(M,de),h(m,X,$),h(m,E,$),e(E,ne),e(E,Q),e(Q,ce),e(E,Z),e(E,H),e(H,he),e(E,ae),h(m,R,$),h(m,P,$),e(P,se),h(m,K,$),h(m,z,$),e(z,q),e(q,me),e(q,D),e(D,Y),e(q,fe),e(q,I),e(I,pe),e(z,ge),e(z,C),e(C,ee),e(C,U),e(U,re),e(C,te),e(C,N),e(N,ue),e(z,ie),e(z,O),e(O,_e),e(O,B),e(B,oe)},d(m){m&&t(p),m&&t(g),m&&t(u),m&&t(X),m&&t(E),m&&t(R),m&&t(P),m&&t(K),m&&t(z)}}}function b1(W){let p,F,g,u,x;return{c(){p=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=n("Module"),x=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=r(w,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(t),x=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(w,_){h(w,p,_),e(p,F),e(p,g),e(g,u),e(p,x)},d(w){w&&t(p)}}}function T1(W){let p,F,g,u,x,w,_,M,de,X,E,ne,Q,ce,Z,H,he,ae,R,P,se,K,z,q,me,D,Y,fe,I,pe,ge,C,ee,U,re,te,N,ue,ie,O,_e,B,oe,m,$,J,Le,xe,j,$e,Fe,be,A,V,Te,le,G,ve,Me,we,ke,jh,Ah,sd,Ot,jo,Vr,xn,Ih,Gr,Nh,ad,ye,Sh,rd,RT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mfrac><mn>1</mn><mn>2</mn></mfrac><mi>w</mi></mrow><annotation encoding="application/x-tex">\\frac{1}{2} w</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.1901em;vertical-align:-0.345em;"></span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8451em;"><span style="top:-2.655em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.394em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.345em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span></span></span></span>',id,ld,UT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mfrac><mn>1</mn><mn>2</mn></mfrac><mi>w</mi></mrow><annotation encoding="application/x-tex">\\frac{1}{2} w</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.1901em;vertical-align:-0.345em;"></span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8451em;"><span style="top:-2.655em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.394em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.345em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span></span></span></span>',dd,cd,VT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>w</mi></mrow><annotation encoding="application/x-tex">w</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.4306em;"></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span></span></span></span>',hd,Kr,Dh,Bh,Jr,Wh,Qh,Xr,Hh,Rh,md,GT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>w</mi></mrow><annotation encoding="application/x-tex">w</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.4306em;"></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span></span></span></span>',fd,Yr,Uh,Vh,pd,gt,Gh,gd,KT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>w</mi></mrow><annotation encoding="application/x-tex">w</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.4306em;"></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span></span></span></span>',ud,Zr,Kh,Jh,_d,ut,Xh,ei,Yh,Zh,ti,em,tm,wd,Ao,oi,om,nm,ni,sm,vd,Io,am,Na,rm,im,kd,Ve,lm,bd,JT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi mathvariant="script">O</mi><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo>\xD7</mo><msub><mi>n</mi><mi>s</mi></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\mathcal{O}(n_s \\times n_s)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathcal" style="margin-right:0.02778em;">O</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',Td,yd,XT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi mathvariant="script">O</mi><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo>\xD7</mo><mi>w</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\mathcal{O}(n_s \\times w)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathcal" style="margin-right:0.02778em;">O</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span><span class="mclose">)</span></span></span></span>',Ld,xd,YT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>n</mi><mi>s</mi></msub></mrow><annotation encoding="application/x-tex">n_s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',$d,Fd,ZT='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>w</mi></mrow><annotation encoding="application/x-tex">w</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.4306em;"></span><span class="mord mathnormal" style="margin-right:0.02691em;">w</span></span></span></span>',Md,Ed,No,dm,$n,cm,hm,zd,jt,So,si,Fn,mm,ai,fm,qd,At,Sa,pm,gm,Da,um,_m,Cd,Mn,Pd,It,Do,ri,En,wm,ii,vm,Od,je,zn,km,Nt,bm,Ba,Tm,ym,Wa,Lm,xm,$m,St,Fm,Qa,Mm,Em,qn,zm,qm,Cm,Dt,Pm,Ha,Om,jm,Ra,Am,Im,Nm,li,Sm,Dm,Cn,jd,Bt,Bo,di,Pn,Bm,ci,Wm,Ad,ft,On,Qm,hi,Hm,Rm,Wo,Ua,Um,Vm,Va,Gm,Km,Id,Wt,Qo,mi,jn,Jm,fi,Xm,Nd,pt,An,Ym,In,Zm,pi,ef,tf,of,Ho,Ga,nf,sf,Ka,af,rf,Sd,Qt,Ro,gi,Nn,lf,ui,df,Dd,Ht,Sn,cf,_i,hf,Bd,Rt,Dn,mf,wi,ff,Wd,Ut,Bn,pf,vi,gf,Qd,Vt,Wn,uf,ki,_f,Hd,Gt,Qn,wf,bi,vf,Rd,Kt,Hn,kf,Ti,bf,Ud,Jt,Rn,Tf,yi,yf,Vd,Xt,Un,Lf,Li,xf,Gd,Yt,Vn,$f,xi,Ff,Kd,Zt,Gn,Mf,$i,Ef,Jd,eo,Kn,zf,Fi,qf,Xd,to,Jn,Cf,Mi,Pf,Yd,oo,Xn,Of,Ei,jf,Zd,no,Yn,Af,zi,If,ec,so,Uo,qi,Zn,Nf,Ci,Sf,tc,qe,es,Df,Pi,Bf,Wf,ts,Qf,Ja,Hf,Rf,Uf,os,Vf,ns,Gf,Kf,Jf,ao,Xf,Xa,Yf,Zf,ss,ep,tp,op,as,np,Oi,sp,ap,rp,Ge,rs,ip,ro,lp,Ya,dp,cp,ji,hp,mp,fp,Vo,pp,Ai,gp,up,is,oc,io,Go,Ii,ls,_p,Ni,wp,nc,We,ds,vp,cs,kp,Si,bp,Tp,yp,hs,Lp,Za,xp,$p,Fp,ms,Mp,fs,Ep,zp,qp,Ke,ps,Cp,lo,Pp,er,Op,jp,Di,Ap,Ip,Np,Ko,Sp,Bi,Dp,Bp,gs,sc,co,Jo,Wi,us,Wp,Qi,Qp,ac,Qe,_s,Hp,Hi,Rp,Up,ws,Vp,tr,Gp,Kp,Jp,vs,Xp,ks,Yp,Zp,eg,Ce,bs,tg,ho,og,or,ng,sg,Ri,ag,rg,ig,Xo,lg,Ui,dg,cg,Ts,hg,Vi,mg,fg,ys,rc,mo,Yo,Gi,Ls,pg,Ki,gg,ic,He,xs,ug,Ji,_g,wg,$s,vg,nr,kg,bg,Tg,Fs,yg,Ms,Lg,xg,$g,Je,Es,Fg,fo,Mg,sr,Eg,zg,Xi,qg,Cg,Pg,Zo,Og,Yi,jg,Ag,zs,lc,po,en,Zi,qs,Ig,el,Ng,dc,Re,Cs,Sg,tl,Dg,Bg,Ps,Wg,ar,Qg,Hg,Rg,Os,Ug,js,Vg,Gg,Kg,Xe,As,Jg,go,Xg,rr,Yg,Zg,ol,eu,tu,ou,tn,nu,nl,su,au,Is,cc,uo,on,sl,Ns,ru,al,iu,hc,Ue,Ss,lu,_o,du,rl,cu,hu,il,mu,fu,pu,Ds,gu,ir,uu,_u,wu,Bs,vu,Ws,ku,bu,Tu,Ye,Qs,yu,wo,Lu,lr,xu,$u,ll,Fu,Mu,Eu,nn,zu,dl,qu,Cu,Hs,mc,vo,sn,cl,Rs,Pu,hl,Ou,fc,Ee,Us,ju,ml,Au,Iu,Vs,Nu,dr,Su,Du,Bu,Gs,Wu,Ks,Qu,Hu,Ru,an,Uu,ko,Vu,cr,Gu,Ku,Js,Ju,Xu,Yu,Xs,Zu,fl,e_,t_,o_,_t,Ys,n_,bo,s_,hr,a_,r_,pl,i_,l_,d_,rn,pc,To,ln,gl,Zs,c_,ul,h_,gc,Ae,ea,m_,ta,f_,_l,p_,g_,u_,oa,__,mr,w_,v_,k_,na,b_,sa,T_,y_,L_,dn,x_,Ze,aa,$_,yo,F_,fr,M_,E_,wl,z_,q_,C_,cn,P_,vl,O_,j_,ra,uc,Lo,hn,kl,ia,A_,bl,I_,_c,Ie,la,N_,xo,S_,Tl,D_,B_,yl,W_,Q_,H_,da,R_,pr,U_,V_,G_,ca,K_,ha,J_,X_,Y_,mn,Z_,et,ma,ew,$o,tw,gr,ow,nw,Ll,sw,aw,rw,fn,iw,xl,lw,dw,fa,wc,Fo,pn,$l,pa,cw,Fl,hw,vc,Ne,ga,mw,Ml,fw,pw,ua,gw,ur,uw,_w,ww,_a,vw,wa,kw,bw,Tw,gn,yw,tt,va,Lw,Mo,xw,_r,$w,Fw,El,Mw,Ew,zw,un,qw,zl,Cw,Pw,ka,kc,Eo,_n,ql,ba,Ow,Cl,jw,bc,Se,Ta,Aw,Pl,Iw,Nw,ya,Sw,wr,Dw,Bw,Ww,La,Qw,xa,Hw,Rw,Uw,wn,Vw,ot,$a,Gw,zo,Kw,vr,Jw,Xw,Ol,Yw,Zw,ev,vn,tv,jl,ov,nv,Fa,Tc,qo,kn,Al,Ma,sv,Il,av,yc,De,Ea,rv,Nl,iv,lv,za,dv,kr,cv,hv,mv,qa,fv,Ca,pv,gv,uv,bn,_v,nt,Pa,wv,Co,vv,br,kv,bv,Sl,Tv,yv,Lv,Tn,xv,Dl,$v,Fv,Oa,Lc;return w=new ze({}),K=new ze({}),xn=new ze({}),Fn=new ze({}),Mn=new rt({props:{code:`input_ids = tokenizer.encode('This is a sentence from [MASK] training data', return_tensors='pt') mlm_labels = tokenizer.encode('This is a sentence from the training data', return_tensors='pt') loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0],`,highlighted:`input_ids = tokenizer.encode(<span class="hljs-string">&#x27;This is a sentence from [MASK] training data&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) mlm_labels = tokenizer.encode(<span class="hljs-string">&#x27;This is a sentence from the training data&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[<span class="hljs-number">0</span>]`}}),En=new ze({}),zn=new S({props:{name:"class transformers.LongformerConfig",anchor:"transformers.LongformerConfig",parameters:[{name:"attention_window",val:": typing.Union[typing.List[int], int] = 512"},{name:"sep_token_id",val:": int = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/configuration_longformer.py#L35",parametersDescription:[{anchor:"transformers.LongformerConfig.attention_window",description:`<strong>attention_window</strong> (<code>int</code> or <code>List[int]</code>, <em>optional</em>, defaults to 512) &#x2014; Size of an attention window around each token. If an <code>int</code>, use the same size for all layers. To specify a different window size for each layer, use a <code>List[int]</code> where <code>len(attention_window) == num_hidden_layers</code>.`,name:"attention_window"}]}}),Cn=new rt({props:{code:`from transformers import LongformerConfig, LongformerModel # Initializing a Longformer configuration configuration = LongformerConfig() # Initializing a model from the configuration model = LongformerModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerConfig, LongformerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Longformer configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = LongformerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Pn=new ze({}),On=new S({props:{name:"class transformers.LongformerTokenizer",anchor:"transformers.LongformerTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/tokenization_longformer.py#L51"}}),jn=new ze({}),An=new S({props:{name:"class transformers.LongformerTokenizerFast",anchor:"transformers.LongformerTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/tokenization_longformer_fast.py#L59"}}),Nn=new ze({}),Sn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput",anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L61",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Dn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling",anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"pooler_output",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L102",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.`,name:"pooler_output"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Bn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput",anchor:"transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L148",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Wn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput",anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_logits",val:": FloatTensor = None"},{name:"end_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L192",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Qn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput",anchor:"transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L239",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Hn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput",anchor:"transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L283",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Rn=new S({props:{name:"class transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput",anchor:"transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L329",parametersDescription:[{anchor:"transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Un=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L64",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Vn=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"pooler_output",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L105",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.`,name:"pooler_output"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Gn=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L151",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Kn=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"start_logits",val:": Tensor = None"},{name:"end_logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L195",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Jn=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L242",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Xn=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L286",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Yn=new S({props:{name:"class transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput",anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L332",parametersDescription:[{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Zn=new ze({}),es=new S({props:{name:"class transformers.LongformerModel",anchor:"transformers.LongformerModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1460",parametersDescription:[{anchor:"transformers.LongformerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rs=new S({props:{name:"forward",anchor:"transformers.LongformerModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1571",parametersDescription:[{anchor:"transformers.LongformerModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LongformerModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongformerModel.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LongformerModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongformerModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongformerModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LongformerModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LongformerModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongformerModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongformerModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongformerModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling" >transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling" >transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Vo=new Oe({props:{$$slots:{default:[s1]},$$scope:{ctx:W}}}),is=new rt({props:{code:`import torch from transformers import LongformerModel, LongformerTokenizer model = LongformerModel.from_pretrained('allenai/longformer-base-4096') tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to local attention global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to global attention to be deactivated for all tokens global_attention_mask[:, [1, 4, 21,]] = 1 # Set global attention to random tokens for the sake of this example # Usually, set global attention based on the task. For example, # classification: the <s> token # QA: question tokens # LM: potentially on the beginning of sentences and paragraphs outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) sequence_output = outputs.last_hidden_state pooled_output = outputs.pooler_output,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerModel, LongformerTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerModel.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>SAMPLE_TEXT = <span class="hljs-string">&#x27; &#x27;</span>.join([<span class="hljs-string">&#x27;Hello world! &#x27;</span>] * <span class="hljs-number">1000</span>) <span class="hljs-comment"># long input document</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># batch of size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) <span class="hljs-comment"># initialize to local attention</span> <span class="hljs-meta">&gt;&gt;&gt; </span>global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) <span class="hljs-comment"># initialize to global attention to be deactivated for all tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>global_attention_mask[:, [<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">21</span>,]] = <span class="hljs-number">1</span> <span class="hljs-comment"># Set global attention to random tokens for the sake of this example</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># Usually, set global attention based on the task. For example,</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># classification: the &lt;s&gt; token</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># QA: question tokens</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># LM: potentially on the beginning of sentences and paragraphs</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_output = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output`}}),ls=new ze({}),ds=new S({props:{name:"class transformers.LongformerForMaskedLM",anchor:"transformers.LongformerForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1688",parametersDescription:[{anchor:"transformers.LongformerForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ps=new S({props:{name:"forward",anchor:"transformers.LongformerForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1707",parametersDescription:[{anchor:"transformers.LongformerForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LongformerForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongformerForMaskedLM.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LongformerForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongformerForMaskedLM.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongformerForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LongformerForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LongformerForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongformerForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongformerForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongformerForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LongformerForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.LongformerForMaskedLM.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput" >transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput" >transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ko=new Oe({props:{$$slots:{default:[a1]},$$scope:{ctx:W}}}),gs=new rt({props:{code:`import torch from transformers import LongformerForMaskedLM, LongformerTokenizer model = LongformerForMaskedLM.from_pretrained('allenai/longformer-base-4096') tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 attention_mask = None # default is local attention everywhere, which is a good choice for MaskedLM # check \`LongformerModel.forward\` for more details how to set *attention_mask* outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) loss = outputs.loss prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerForMaskedLM, LongformerTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>SAMPLE_TEXT = <span class="hljs-string">&#x27; &#x27;</span>.join([<span class="hljs-string">&#x27;Hello world! &#x27;</span>] * <span class="hljs-number">1000</span>) <span class="hljs-comment"># long input document</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># batch of size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = <span class="hljs-literal">None</span> <span class="hljs-comment"># default is local attention everywhere, which is a good choice for MaskedLM</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># check \`LongformerModel.forward\` for more details how to set *attention_mask*</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),us=new ze({}),_s=new S({props:{name:"class transformers.LongformerForSequenceClassification",anchor:"transformers.LongformerForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1792",parametersDescription:[{anchor:"transformers.LongformerForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bs=new S({props:{name:"forward",anchor:"transformers.LongformerForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1807",parametersDescription:[{anchor:"transformers.LongformerForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LongformerForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongformerForSequenceClassification.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LongformerForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongformerForSequenceClassification.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongformerForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LongformerForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LongformerForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongformerForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongformerForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongformerForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LongformerForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput" >transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput" >transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xo=new Oe({props:{$$slots:{default:[r1]},$$scope:{ctx:W}}}),Ts=new rt({props:{code:`from transformers import LongformerTokenizer, LongformerForSequenceClassification import torch tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = LongformerForSequenceClassification.from_pretrained('allenai/longformer-base-4096') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, LongformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ys=new rt({props:{code:`from transformers import LongformerTokenizer, LongformerForSequenceClassification import torch tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = LongformerForSequenceClassification.from_pretrained('allenai/longformer-base-4096', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, LongformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ls=new ze({}),xs=new S({props:{name:"class transformers.LongformerForMultipleChoice",anchor:"transformers.LongformerForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L2149",parametersDescription:[{anchor:"transformers.LongformerForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Es=new S({props:{name:"forward",anchor:"transformers.LongformerForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"labels",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L2160",parametersDescription:[{anchor:"transformers.LongformerForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LongformerForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongformerForMultipleChoice.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LongformerForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongformerForMultipleChoice.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongformerForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LongformerForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LongformerForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongformerForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongformerForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongformerForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LongformerForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput" >transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput" >transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Zo=new Oe({props:{$$slots:{default:[i1]},$$scope:{ctx:W}}}),zs=new rt({props:{code:`from transformers import LongformerTokenizer, LongformerForMultipleChoice import torch tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = LongformerForMultipleChoice.from_pretrained('allenai/longformer-base-4096') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, LongformerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qs=new ze({}),Cs=new S({props:{name:"class transformers.LongformerForTokenClassification",anchor:"transformers.LongformerForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L2055",parametersDescription:[{anchor:"transformers.LongformerForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),As=new S({props:{name:"forward",anchor:"transformers.LongformerForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L2070",parametersDescription:[{anchor:"transformers.LongformerForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LongformerForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongformerForTokenClassification.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LongformerForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongformerForTokenClassification.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongformerForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LongformerForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LongformerForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongformerForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongformerForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongformerForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LongformerForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput" >transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput" >transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tn=new Oe({props:{$$slots:{default:[l1]},$$scope:{ctx:W}}}),Is=new rt({props:{code:`from transformers import LongformerTokenizer, LongformerForTokenClassification import torch tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = LongformerForTokenClassification.from_pretrained('allenai/longformer-base-4096') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, LongformerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ns=new ze({}),Ss=new S({props:{name:"class transformers.LongformerForQuestionAnswering",anchor:"transformers.LongformerForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1918",parametersDescription:[{anchor:"transformers.LongformerForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qs=new S({props:{name:"forward",anchor:"transformers.LongformerForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_longformer.py#L1932",parametersDescription:[{anchor:"transformers.LongformerForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LongformerForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongformerForQuestionAnswering.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LongformerForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongformerForQuestionAnswering.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongformerForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LongformerForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LongformerForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongformerForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongformerForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongformerForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LongformerForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.LongformerForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput" >transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput" >transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nn=new Oe({props:{$$slots:{default:[d1]},$$scope:{ctx:W}}}),Hs=new rt({props:{code:`from transformers import LongformerTokenizer, LongformerForQuestionAnswering import torch tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" encoding = tokenizer(question, text, return_tensors="pt") input_ids = encoding["input_ids"] # default is local attention everywhere # the forward method will automatically set global attention on question tokens attention_mask = encoding["attention_mask"] outputs = model(input_ids, attention_mask=attention_mask) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) answer_tokens = all_tokens[torch.argmax(start_logits) :torch.argmax(end_logits)+1] answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # remove space prepending space token,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, LongformerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/longformer-large-4096-finetuned-triviaqa&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongformerForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;allenai/longformer-large-4096-finetuned-triviaqa&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># default is local attention everywhere</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the forward method will automatically set global attention on question tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, attention_mask=attention_mask) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_ids[<span class="hljs-number">0</span>].tolist()) <span class="hljs-meta">&gt;&gt;&gt; </span>answer_tokens = all_tokens[torch.argmax(start_logits) :torch.argmax(end_logits)+<span class="hljs-number">1</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) <span class="hljs-comment"># remove space prepending space token</span>`}}),Rs=new ze({}),Us=new S({props:{name:"class transformers.TFLongformerModel",anchor:"transformers.TFLongformerModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L1971",parametersDescription:[{anchor:"transformers.TFLongformerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),an=new Oe({props:{$$slots:{default:[c1]},$$scope:{ctx:W}}}),Ys=new S({props:{name:"call",anchor:"transformers.TFLongformerModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L1993",parametersDescription:[{anchor:"transformers.TFLongformerModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLongformerModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLongformerModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLongformerModel.call.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.TFLongformerModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLongformerModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLongformerModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLongformerModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLongformerModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLongformerModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLongformerModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}]}}),rn=new Oe({props:{$$slots:{default:[h1]},$$scope:{ctx:W}}}),Zs=new ze({}),ea=new S({props:{name:"class transformers.TFLongformerForMaskedLM",anchor:"transformers.TFLongformerForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2059",parametersDescription:[{anchor:"transformers.TFLongformerForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),dn=new Oe({props:{$$slots:{default:[m1]},$$scope:{ctx:W}}}),aa=new S({props:{name:"call",anchor:"transformers.TFLongformerForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2076",parametersDescription:[{anchor:"transformers.TFLongformerForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLongformerForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLongformerForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLongformerForMaskedLM.call.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.TFLongformerForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLongformerForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLongformerForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLongformerForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLongformerForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLongformerForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLongformerForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLongformerForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),cn=new Oe({props:{$$slots:{default:[f1]},$$scope:{ctx:W}}}),ra=new rt({props:{code:`from transformers import LongformerTokenizer, TFLongformerForMaskedLM import tensorflow as tf tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = TFLongformerForMaskedLM.from_pretrained('allenai/longformer-base-4096') inputs = tokenizer("The capital of France is <mask>.", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, TFLongformerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLongformerForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ia=new ze({}),la=new S({props:{name:"class transformers.TFLongformerForQuestionAnswering",anchor:"transformers.TFLongformerForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2169",parametersDescription:[{anchor:"transformers.TFLongformerForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mn=new Oe({props:{$$slots:{default:[p1]},$$scope:{ctx:W}}}),ma=new S({props:{name:"call",anchor:"transformers.TFLongformerForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2184",parametersDescription:[{anchor:"transformers.TFLongformerForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFLongformerForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),fn=new Oe({props:{$$slots:{default:[g1]},$$scope:{ctx:W}}}),fa=new rt({props:{code:`from transformers import LongformerTokenizer, TFLongformerForQuestionAnswering import tensorflow as tf tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-large-4096-finetuned-triviaqa') model = TFLongformerForQuestionAnswering.from_pretrained('allenai/longformer-large-4096-finetuned-triviaqa') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, TFLongformerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-large-4096-finetuned-triviaqa&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLongformerForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-large-4096-finetuned-triviaqa&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),pa=new ze({}),ga=new S({props:{name:"class transformers.TFLongformerForSequenceClassification",anchor:"transformers.TFLongformerForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2341",parametersDescription:[{anchor:"transformers.TFLongformerForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gn=new Oe({props:{$$slots:{default:[u1]},$$scope:{ctx:W}}}),va=new S({props:{name:"call",anchor:"transformers.TFLongformerForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2353",parametersDescription:[{anchor:"transformers.TFLongformerForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLongformerForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLongformerForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLongformerForSequenceClassification.call.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.TFLongformerForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLongformerForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLongformerForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLongformerForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLongformerForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLongformerForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLongformerForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),un=new Oe({props:{$$slots:{default:[_1]},$$scope:{ctx:W}}}),ka=new rt({props:{code:`from transformers import LongformerTokenizer, TFLongformerForSequenceClassification import tensorflow as tf tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = TFLongformerForSequenceClassification.from_pretrained('allenai/longformer-base-4096') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, TFLongformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLongformerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ba=new ze({}),Ta=new S({props:{name:"class transformers.TFLongformerForTokenClassification",anchor:"transformers.TFLongformerForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2616",parametersDescription:[{anchor:"transformers.TFLongformerForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),wn=new Oe({props:{$$slots:{default:[w1]},$$scope:{ctx:W}}}),$a=new S({props:{name:"call",anchor:"transformers.TFLongformerForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2631",parametersDescription:[{anchor:"transformers.TFLongformerForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLongformerForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLongformerForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLongformerForTokenClassification.call.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.TFLongformerForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLongformerForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLongformerForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLongformerForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLongformerForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLongformerForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLongformerForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLongformerForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),vn=new Oe({props:{$$slots:{default:[v1]},$$scope:{ctx:W}}}),Fa=new rt({props:{code:`from transformers import LongformerTokenizer, TFLongformerForTokenClassification import tensorflow as tf tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = TFLongformerForTokenClassification.from_pretrained('allenai/longformer-base-4096') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, TFLongformerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLongformerForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ma=new ze({}),Ea=new S({props:{name:"class transformers.TFLongformerForMultipleChoice",anchor:"transformers.TFLongformerForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2457",parametersDescription:[{anchor:"transformers.TFLongformerForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bn=new Oe({props:{$$slots:{default:[k1]},$$scope:{ctx:W}}}),Pa=new S({props:{name:"call",anchor:"transformers.TFLongformerForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/longformer/modeling_tf_longformer.py#L2477",parametersDescription:[{anchor:"transformers.TFLongformerForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer">LongformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLongformerForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLongformerForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLongformerForMultipleChoice.call.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.TFLongformerForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLongformerForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLongformerForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLongformerForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLongformerForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLongformerForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLongformerForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLongformerForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig" >LongformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.</p> </li> <li> <p><strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput" >transformers.models.longformer.modeling_tf_longformer.TFLongformerMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Tn=new Oe({props:{$$slots:{default:[b1]},$$scope:{ctx:W}}}),Oa=new rt({props:{code:`from transformers import LongformerTokenizer, TFLongformerForMultipleChoice import tensorflow as tf tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') model = TFLongformerForMultipleChoice.from_pretrained('allenai/longformer-base-4096') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LongformerTokenizer, TFLongformerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LongformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLongformerForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;allenai/longformer-base-4096&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){p=a("meta"),F=l(),g=a("h1"),u=a("a"),x=a("span"),v(w.$$.fragment),_=l(),M=a("span"),de=n("Longformer"),X=l(),E=a("p"),ne=a("strong"),Q=n("DISCLAIMER:"),ce=n(" This model is still a work in progress, if you see something strange, file a "),Z=a("a"),H=n("Github Issue"),he=n("."),ae=l(),R=a("h2"),P=a("a"),se=a("span"),v(K.$$.fragment),z=l(),q=a("span"),me=n("Overview"),D=l(),Y=a("p"),fe=n("The Longformer model was presented in "),I=a("a"),pe=n("Longformer: The Long-Document Transformer"),ge=n(" by Iz Beltagy, Matthew E. Peters, Arman Cohan."),C=l(),ee=a("p"),U=n("The abstract from the paper is the following:"),re=l(),te=a("p"),N=a("em"),ue=n(`Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer\u2019s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.`),ie=l(),O=a("p"),_e=n("Tips:"),B=l(),oe=a("ul"),m=a("li"),$=n("Since the Longformer is based on RoBERTa, it doesn\u2019t have "),J=a("code"),Le=n("token_type_ids"),xe=n(`. You don\u2019t need to indicate which token belongs to which segment. Just separate your segments with the separation token `),j=a("code"),$e=n("tokenizer.sep_token"),Fe=n(` (or `),be=a("code"),A=n("</s>"),V=n(")."),Te=l(),le=a("p"),G=n("This model was contributed by "),ve=a("a"),Me=n("beltagy"),we=n(". The Authors\u2019 code can be found "),ke=a("a"),jh=n("here"),Ah=n("."),sd=l(),Ot=a("h2"),jo=a("a"),Vr=a("span"),v(xn.$$.fragment),Ih=l(),Gr=a("span"),Nh=n("Longformer Self Attention"),ad=l(),ye=a("p"),Sh=n(`Longformer self attention employs self attention on both a \u201Clocal\u201D context and a \u201Cglobal\u201D context. Most tokens only attend \u201Clocally\u201D to each other meaning that each token attends to its `),rd=new Po,id=n(` previous tokens and `),ld=new Po,dd=n(" succeding tokens with "),cd=new Po,hd=n(` being the window length as defined in `),Kr=a("code"),Dh=n("config.attention_window"),Bh=n(". Note that "),Jr=a("code"),Wh=n("config.attention_window"),Qh=n(" can be of type "),Xr=a("code"),Hh=n("List"),Rh=n(` to define a different `),md=new Po,fd=n(` for each layer. A selected few tokens attend \u201Cglobally\u201D to all other tokens, as it is conventionally done for all tokens in `),Yr=a("code"),Uh=n("BertSelfAttention"),Vh=n("."),pd=l(),gt=a("p"),Gh=n(`Note that \u201Clocally\u201D and \u201Cglobally\u201D attending tokens are projected by different query, key and value matrices. Also note that every \u201Clocally\u201D attending token not only attends to tokens within its window `),gd=new Po,ud=n(`, but also to all \u201Cglobally\u201D attending tokens so that global attention is `),Zr=a("em"),Kh=n("symmetric"),Jh=n("."),_d=l(),ut=a("p"),Xh=n(`The user can define which tokens attend \u201Clocally\u201D and which tokens attend \u201Cglobally\u201D by setting the tensor `),ei=a("code"),Yh=n("global_attention_mask"),Zh=n(` at run-time appropriately. All Longformer models employ the following logic for `),ti=a("code"),em=n("global_attention_mask"),tm=n(":"),wd=l(),Ao=a("ul"),oi=a("li"),om=n("0: the token attends \u201Clocally\u201D,"),nm=l(),ni=a("li"),sm=n("1: the token attends \u201Cglobally\u201D."),vd=l(),Io=a("p"),am=n("For more information please also refer to "),Na=a("a"),rm=n("forward()"),im=n(" method."),kd=l(),Ve=a("p"),lm=n(`Using Longformer self attention, the memory and time complexity of the query-key matmul operation, which usually represents the memory and time bottleneck, can be reduced from `),bd=new Po,Td=n(` to `),yd=new Po,Ld=n(", with "),xd=new Po,$d=n(" being the sequence length and "),Fd=new Po,Md=n(` being the average window size. It is assumed that the number of \u201Cglobally\u201D attending tokens is insignificant as compared to the number of \u201Clocally\u201D attending tokens.`),Ed=l(),No=a("p"),dm=n("For more information, please refer to the official "),$n=a("a"),cm=n("paper"),hm=n("."),zd=l(),jt=a("h2"),So=a("a"),si=a("span"),v(Fn.$$.fragment),mm=l(),ai=a("span"),fm=n("Training"),qd=l(),At=a("p"),Sa=a("a"),pm=n("LongformerForMaskedLM"),gm=n(" is trained the exact same way "),Da=a("a"),um=n("RobertaForMaskedLM"),_m=n(` is trained and should be used as follows:`),Cd=l(),v(Mn.$$.fragment),Pd=l(),It=a("h2"),Do=a("a"),ri=a("span"),v(En.$$.fragment),wm=l(),ii=a("span"),vm=n("LongformerConfig"),Od=l(),je=a("div"),v(zn.$$.fragment),km=l(),Nt=a("p"),bm=n("This is the configuration class to store the configuration of a "),Ba=a("a"),Tm=n("LongformerModel"),ym=n(` or a `),Wa=a("a"),Lm=n("TFLongformerModel"),xm=n(`. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture.`),$m=l(),St=a("p"),Fm=n("This is the configuration class to store the configuration of a "),Qa=a("a"),Mm=n("LongformerModel"),Em=n(`. It is used to instantiate an Longformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa `),qn=a("a"),zm=n("roberta-base"),qm=n(" architecture with a sequence length 4,096."),Cm=l(),Dt=a("p"),Pm=n("The "),Ha=a("a"),Om=n("LongformerConfig"),jm=n(" class directly inherits "),Ra=a("a"),Am=n("RobertaConfig"),Im=n(`. It reuses the same defaults. Please check the parent class for more information.`),Nm=l(),li=a("p"),Sm=n("Example:"),Dm=l(),v(Cn.$$.fragment),jd=l(),Bt=a("h2"),Bo=a("a"),di=a("span"),v(Pn.$$.fragment),Bm=l(),ci=a("span"),Wm=n("LongformerTokenizer"),Ad=l(),ft=a("div"),v(On.$$.fragment),Qm=l(),hi=a("p"),Hm=n("Construct a Longformer tokenizer."),Rm=l(),Wo=a("p"),Ua=a("a"),Um=n("LongformerTokenizer"),Vm=n(" is identical to "),Va=a("a"),Gm=n("RobertaTokenizer"),Km=n(`. Refer to the superclass for usage examples and documentation concerning parameters.`),Id=l(),Wt=a("h2"),Qo=a("a"),mi=a("span"),v(jn.$$.fragment),Jm=l(),fi=a("span"),Xm=n("LongformerTokenizerFast"),Nd=l(),pt=a("div"),v(An.$$.fragment),Ym=l(),In=a("p"),Zm=n("Construct a \u201Cfast\u201D Longformer tokenizer (backed by HuggingFace\u2019s "),pi=a("em"),ef=n("tokenizers"),tf=n(" library)."),of=l(),Ho=a("p"),Ga=a("a"),nf=n("LongformerTokenizerFast"),sf=n(" is identical to "),Ka=a("a"),af=n("RobertaTokenizerFast"),rf=n(`. Refer to the superclass for usage examples and documentation concerning parameters.`),Sd=l(),Qt=a("h2"),Ro=a("a"),gi=a("span"),v(Nn.$$.fragment),lf=l(),ui=a("span"),df=n("Longformer specific outputs"),Dd=l(),Ht=a("div"),v(Sn.$$.fragment),cf=l(),_i=a("p"),hf=n("Base class for Longformer\u2019s outputs, with potential hidden states, local and global attentions."),Bd=l(),Rt=a("div"),v(Dn.$$.fragment),mf=l(),wi=a("p"),ff=n("Base class for Longformer\u2019s outputs that also contains a pooling of the last hidden states."),Wd=l(),Ut=a("div"),v(Bn.$$.fragment),pf=l(),vi=a("p"),gf=n("Base class for masked language models outputs."),Qd=l(),Vt=a("div"),v(Wn.$$.fragment),uf=l(),ki=a("p"),_f=n("Base class for outputs of question answering Longformer models."),Hd=l(),Gt=a("div"),v(Qn.$$.fragment),wf=l(),bi=a("p"),vf=n("Base class for outputs of sentence classification models."),Rd=l(),Kt=a("div"),v(Hn.$$.fragment),kf=l(),Ti=a("p"),bf=n("Base class for outputs of multiple choice Longformer models."),Ud=l(),Jt=a("div"),v(Rn.$$.fragment),Tf=l(),yi=a("p"),yf=n("Base class for outputs of token classification models."),Vd=l(),Xt=a("div"),v(Un.$$.fragment),Lf=l(),Li=a("p"),xf=n("Base class for Longformer\u2019s outputs, with potential hidden states, local and global attentions."),Gd=l(),Yt=a("div"),v(Vn.$$.fragment),$f=l(),xi=a("p"),Ff=n("Base class for Longformer\u2019s outputs that also contains a pooling of the last hidden states."),Kd=l(),Zt=a("div"),v(Gn.$$.fragment),Mf=l(),$i=a("p"),Ef=n("Base class for masked language models outputs."),Jd=l(),eo=a("div"),v(Kn.$$.fragment),zf=l(),Fi=a("p"),qf=n("Base class for outputs of question answering Longformer models."),Xd=l(),to=a("div"),v(Jn.$$.fragment),Cf=l(),Mi=a("p"),Pf=n("Base class for outputs of sentence classification models."),Yd=l(),oo=a("div"),v(Xn.$$.fragment),Of=l(),Ei=a("p"),jf=n("Base class for outputs of multiple choice models."),Zd=l(),no=a("div"),v(Yn.$$.fragment),Af=l(),zi=a("p"),If=n("Base class for outputs of token classification models."),ec=l(),so=a("h2"),Uo=a("a"),qi=a("span"),v(Zn.$$.fragment),Nf=l(),Ci=a("span"),Sf=n("LongformerModel"),tc=l(),qe=a("div"),v(es.$$.fragment),Df=l(),Pi=a("p"),Bf=n("The bare Longformer Model outputting raw hidden-states without any specific head on top."),Wf=l(),ts=a("p"),Qf=n("This model inherits from "),Ja=a("a"),Hf=n("PreTrainedModel"),Rf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Uf=l(),os=a("p"),Vf=n("This model is also a PyTorch "),ns=a("a"),Gf=n("torch.nn.Module"),Kf=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Jf=l(),ao=a("p"),Xf=n("This class copied code from "),Xa=a("a"),Yf=n("RobertaModel"),Zf=n(` and overwrote standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in `),ss=a("a"),ep=n("Longformer: the Long-Document Transformer"),tp=n(` by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute.`),op=l(),as=a("p"),np=n("The self-attention module "),Oi=a("code"),sp=n("LongformerSelfAttention"),ap=n(` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient.`),rp=l(),Ge=a("div"),v(rs.$$.fragment),ip=l(),ro=a("p"),lp=n("The "),Ya=a("a"),dp=n("LongformerModel"),cp=n(" forward method, overrides the "),ji=a("code"),hp=n("__call__"),mp=n(" special method."),fp=l(),v(Vo.$$.fragment),pp=l(),Ai=a("p"),gp=n("Examples:"),up=l(),v(is.$$.fragment),oc=l(),io=a("h2"),Go=a("a"),Ii=a("span"),v(ls.$$.fragment),_p=l(),Ni=a("span"),wp=n("LongformerForMaskedLM"),nc=l(),We=a("div"),v(ds.$$.fragment),vp=l(),cs=a("p"),kp=n("Longformer Model with a "),Si=a("code"),bp=n("language modeling"),Tp=n(" head on top."),yp=l(),hs=a("p"),Lp=n("This model inherits from "),Za=a("a"),xp=n("PreTrainedModel"),$p=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fp=l(),ms=a("p"),Mp=n("This model is also a PyTorch "),fs=a("a"),Ep=n("torch.nn.Module"),zp=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qp=l(),Ke=a("div"),v(ps.$$.fragment),Cp=l(),lo=a("p"),Pp=n("The "),er=a("a"),Op=n("LongformerForMaskedLM"),jp=n(" forward method, overrides the "),Di=a("code"),Ap=n("__call__"),Ip=n(" special method."),Np=l(),v(Ko.$$.fragment),Sp=l(),Bi=a("p"),Dp=n("Examples:"),Bp=l(),v(gs.$$.fragment),sc=l(),co=a("h2"),Jo=a("a"),Wi=a("span"),v(us.$$.fragment),Wp=l(),Qi=a("span"),Qp=n("LongformerForSequenceClassification"),ac=l(),Qe=a("div"),v(_s.$$.fragment),Hp=l(),Hi=a("p"),Rp=n(`Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Up=l(),ws=a("p"),Vp=n("This model inherits from "),tr=a("a"),Gp=n("PreTrainedModel"),Kp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jp=l(),vs=a("p"),Xp=n("This model is also a PyTorch "),ks=a("a"),Yp=n("torch.nn.Module"),Zp=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),eg=l(),Ce=a("div"),v(bs.$$.fragment),tg=l(),ho=a("p"),og=n("The "),or=a("a"),ng=n("LongformerForSequenceClassification"),sg=n(" forward method, overrides the "),Ri=a("code"),ag=n("__call__"),rg=n(" special method."),ig=l(),v(Xo.$$.fragment),lg=l(),Ui=a("p"),dg=n("Example of single-label classification:"),cg=l(),v(Ts.$$.fragment),hg=l(),Vi=a("p"),mg=n("Example of multi-label classification:"),fg=l(),v(ys.$$.fragment),rc=l(),mo=a("h2"),Yo=a("a"),Gi=a("span"),v(Ls.$$.fragment),pg=l(),Ki=a("span"),gg=n("LongformerForMultipleChoice"),ic=l(),He=a("div"),v(xs.$$.fragment),ug=l(),Ji=a("p"),_g=n(`Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),wg=l(),$s=a("p"),vg=n("This model inherits from "),nr=a("a"),kg=n("PreTrainedModel"),bg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tg=l(),Fs=a("p"),yg=n("This model is also a PyTorch "),Ms=a("a"),Lg=n("torch.nn.Module"),xg=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$g=l(),Je=a("div"),v(Es.$$.fragment),Fg=l(),fo=a("p"),Mg=n("The "),sr=a("a"),Eg=n("LongformerForMultipleChoice"),zg=n(" forward method, overrides the "),Xi=a("code"),qg=n("__call__"),Cg=n(" special method."),Pg=l(),v(Zo.$$.fragment),Og=l(),Yi=a("p"),jg=n("Example:"),Ag=l(),v(zs.$$.fragment),lc=l(),po=a("h2"),en=a("a"),Zi=a("span"),v(qs.$$.fragment),Ig=l(),el=a("span"),Ng=n("LongformerForTokenClassification"),dc=l(),Re=a("div"),v(Cs.$$.fragment),Sg=l(),tl=a("p"),Dg=n(`Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Bg=l(),Ps=a("p"),Wg=n("This model inherits from "),ar=a("a"),Qg=n("PreTrainedModel"),Hg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rg=l(),Os=a("p"),Ug=n("This model is also a PyTorch "),js=a("a"),Vg=n("torch.nn.Module"),Gg=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kg=l(),Xe=a("div"),v(As.$$.fragment),Jg=l(),go=a("p"),Xg=n("The "),rr=a("a"),Yg=n("LongformerForTokenClassification"),Zg=n(" forward method, overrides the "),ol=a("code"),eu=n("__call__"),tu=n(" special method."),ou=l(),v(tn.$$.fragment),nu=l(),nl=a("p"),su=n("Example:"),au=l(),v(Is.$$.fragment),cc=l(),uo=a("h2"),on=a("a"),sl=a("span"),v(Ns.$$.fragment),ru=l(),al=a("span"),iu=n("LongformerForQuestionAnswering"),hc=l(),Ue=a("div"),v(Ss.$$.fragment),lu=l(),_o=a("p"),du=n(`Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layers on top of the hidden-states output to compute `),rl=a("code"),cu=n("span start logits"),hu=n(" and "),il=a("code"),mu=n("span end logits"),fu=n(")."),pu=l(),Ds=a("p"),gu=n("This model inherits from "),ir=a("a"),uu=n("PreTrainedModel"),_u=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wu=l(),Bs=a("p"),vu=n("This model is also a PyTorch "),Ws=a("a"),ku=n("torch.nn.Module"),bu=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tu=l(),Ye=a("div"),v(Qs.$$.fragment),yu=l(),wo=a("p"),Lu=n("The "),lr=a("a"),xu=n("LongformerForQuestionAnswering"),$u=n(" forward method, overrides the "),ll=a("code"),Fu=n("__call__"),Mu=n(" special method."),Eu=l(),v(nn.$$.fragment),zu=l(),dl=a("p"),qu=n("Examples:"),Cu=l(),v(Hs.$$.fragment),mc=l(),vo=a("h2"),sn=a("a"),cl=a("span"),v(Rs.$$.fragment),Pu=l(),hl=a("span"),Ou=n("TFLongformerModel"),fc=l(),Ee=a("div"),v(Us.$$.fragment),ju=l(),ml=a("p"),Au=n("The bare Longformer Model outputting raw hidden-states without any specific head on top."),Iu=l(),Vs=a("p"),Nu=n("This model inherits from "),dr=a("a"),Su=n("TFPreTrainedModel"),Du=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bu=l(),Gs=a("p"),Wu=n("This model is also a "),Ks=a("a"),Qu=n("tf.keras.Model"),Hu=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ru=l(),v(an.$$.fragment),Uu=l(),ko=a("p"),Vu=n("This class copies code from "),cr=a("a"),Gu=n("TFRobertaModel"),Ku=n(` and overwrites standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in `),Js=a("a"),Ju=n("Longformer: the Long-Document Transformer"),Xu=n(` by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute.`),Yu=l(),Xs=a("p"),Zu=n("The self-attention module "),fl=a("code"),e_=n("TFLongformerSelfAttention"),t_=n(` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient.`),o_=l(),_t=a("div"),v(Ys.$$.fragment),n_=l(),bo=a("p"),s_=n("The "),hr=a("a"),a_=n("TFLongformerModel"),r_=n(" forward method, overrides the "),pl=a("code"),i_=n("__call__"),l_=n(" special method."),d_=l(),v(rn.$$.fragment),pc=l(),To=a("h2"),ln=a("a"),gl=a("span"),v(Zs.$$.fragment),c_=l(),ul=a("span"),h_=n("TFLongformerForMaskedLM"),gc=l(),Ae=a("div"),v(ea.$$.fragment),m_=l(),ta=a("p"),f_=n("Longformer Model with a "),_l=a("code"),p_=n("language modeling"),g_=n(" head on top."),u_=l(),oa=a("p"),__=n("This model inherits from "),mr=a("a"),w_=n("TFPreTrainedModel"),v_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),k_=l(),na=a("p"),b_=n("This model is also a "),sa=a("a"),T_=n("tf.keras.Model"),y_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),L_=l(),v(dn.$$.fragment),x_=l(),Ze=a("div"),v(aa.$$.fragment),$_=l(),yo=a("p"),F_=n("The "),fr=a("a"),M_=n("TFLongformerForMaskedLM"),E_=n(" forward method, overrides the "),wl=a("code"),z_=n("__call__"),q_=n(" special method."),C_=l(),v(cn.$$.fragment),P_=l(),vl=a("p"),O_=n("Example:"),j_=l(),v(ra.$$.fragment),uc=l(),Lo=a("h2"),hn=a("a"),kl=a("span"),v(ia.$$.fragment),A_=l(),bl=a("span"),I_=n("TFLongformerForQuestionAnswering"),_c=l(),Ie=a("div"),v(la.$$.fragment),N_=l(),xo=a("p"),S_=n(`Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layer on top of the hidden-states output to compute `),Tl=a("code"),D_=n("span start logits"),B_=n(" and "),yl=a("code"),W_=n("span end logits"),Q_=n(")."),H_=l(),da=a("p"),R_=n("This model inherits from "),pr=a("a"),U_=n("TFPreTrainedModel"),V_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),G_=l(),ca=a("p"),K_=n("This model is also a "),ha=a("a"),J_=n("tf.keras.Model"),X_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Y_=l(),v(mn.$$.fragment),Z_=l(),et=a("div"),v(ma.$$.fragment),ew=l(),$o=a("p"),tw=n("The "),gr=a("a"),ow=n("TFLongformerForQuestionAnswering"),nw=n(" forward method, overrides the "),Ll=a("code"),sw=n("__call__"),aw=n(" special method."),rw=l(),v(fn.$$.fragment),iw=l(),xl=a("p"),lw=n("Example:"),dw=l(),v(fa.$$.fragment),wc=l(),Fo=a("h2"),pn=a("a"),$l=a("span"),v(pa.$$.fragment),cw=l(),Fl=a("span"),hw=n("TFLongformerForSequenceClassification"),vc=l(),Ne=a("div"),v(ga.$$.fragment),mw=l(),Ml=a("p"),fw=n(`Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),pw=l(),ua=a("p"),gw=n("This model inherits from "),ur=a("a"),uw=n("TFPreTrainedModel"),_w=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ww=l(),_a=a("p"),vw=n("This model is also a "),wa=a("a"),kw=n("tf.keras.Model"),bw=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Tw=l(),v(gn.$$.fragment),yw=l(),tt=a("div"),v(va.$$.fragment),Lw=l(),Mo=a("p"),xw=n("The "),_r=a("a"),$w=n("TFLongformerForSequenceClassification"),Fw=n(" forward method, overrides the "),El=a("code"),Mw=n("__call__"),Ew=n(" special method."),zw=l(),v(un.$$.fragment),qw=l(),zl=a("p"),Cw=n("Example:"),Pw=l(),v(ka.$$.fragment),kc=l(),Eo=a("h2"),_n=a("a"),ql=a("span"),v(ba.$$.fragment),Ow=l(),Cl=a("span"),jw=n("TFLongformerForTokenClassification"),bc=l(),Se=a("div"),v(Ta.$$.fragment),Aw=l(),Pl=a("p"),Iw=n(`Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Nw=l(),ya=a("p"),Sw=n("This model inherits from "),wr=a("a"),Dw=n("TFPreTrainedModel"),Bw=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ww=l(),La=a("p"),Qw=n("This model is also a "),xa=a("a"),Hw=n("tf.keras.Model"),Rw=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Uw=l(),v(wn.$$.fragment),Vw=l(),ot=a("div"),v($a.$$.fragment),Gw=l(),zo=a("p"),Kw=n("The "),vr=a("a"),Jw=n("TFLongformerForTokenClassification"),Xw=n(" forward method, overrides the "),Ol=a("code"),Yw=n("__call__"),Zw=n(" special method."),ev=l(),v(vn.$$.fragment),tv=l(),jl=a("p"),ov=n("Example:"),nv=l(),v(Fa.$$.fragment),Tc=l(),qo=a("h2"),kn=a("a"),Al=a("span"),v(Ma.$$.fragment),sv=l(),Il=a("span"),av=n("TFLongformerForMultipleChoice"),yc=l(),De=a("div"),v(Ea.$$.fragment),rv=l(),Nl=a("p"),iv=n(`Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),lv=l(),za=a("p"),dv=n("This model inherits from "),kr=a("a"),cv=n("TFPreTrainedModel"),hv=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mv=l(),qa=a("p"),fv=n("This model is also a "),Ca=a("a"),pv=n("tf.keras.Model"),gv=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),uv=l(),v(bn.$$.fragment),_v=l(),nt=a("div"),v(Pa.$$.fragment),wv=l(),Co=a("p"),vv=n("The "),br=a("a"),kv=n("TFLongformerForMultipleChoice"),bv=n(" forward method, overrides the "),Sl=a("code"),Tv=n("__call__"),yv=n(" special method."),Lv=l(),v(Tn.$$.fragment),xv=l(),Dl=a("p"),$v=n("Example:"),Fv=l(),v(Oa.$$.fragment),this.h()},l(o){const f=n1('[data-svelte="svelte-1phssyn"]',document.head);p=r(f,"META",{name:!0,content:!0}),f.forEach(t),F=d(o),g=r(o,"H1",{class:!0});var ja=i(g);u=r(ja,"A",{id:!0,class:!0,href:!0});var Bl=i(u);x=r(Bl,"SPAN",{});var Wl=i(x);k(w.$$.fragment,Wl),Wl.forEach(t),Bl.forEach(t),_=d(ja),M=r(ja,"SPAN",{});var Ql=i(M);de=s(Ql,"Longformer"),Ql.forEach(t),ja.forEach(t),X=d(o),E=r(o,"P",{});var yn=i(E);ne=r(yn,"STRONG",{});var Hl=i(ne);Q=s(Hl,"DISCLAIMER:"),Hl.forEach(t),ce=s(yn," This model is still a work in progress, if you see something strange, file a "),Z=r(yn,"A",{href:!0,rel:!0});var Rl=i(Z);H=s(Rl,"Github Issue"),Rl.forEach(t),he=s(yn,"."),yn.forEach(t),ae=d(o),R=r(o,"H2",{class:!0});var Aa=i(R);P=r(Aa,"A",{id:!0,class:!0,href:!0});var Ul=i(P);se=r(Ul,"SPAN",{});var Vl=i(se);k(K.$$.fragment,Vl),Vl.forEach(t),Ul.forEach(t),z=d(Aa),q=r(Aa,"SPAN",{});var Gl=i(q);me=s(Gl,"Overview"),Gl.forEach(t),Aa.forEach(t),D=d(o),Y=r(o,"P",{});var Ia=i(Y);fe=s(Ia,"The Longformer model was presented in "),I=r(Ia,"A",{href:!0,rel:!0});var Kl=i(I);pe=s(Kl,"Longformer: The Long-Document Transformer"),Kl.forEach(t),ge=s(Ia," by Iz Beltagy, Matthew E. Peters, Arman Cohan."),Ia.forEach(t),C=d(o),ee=r(o,"P",{});var Jl=i(ee);U=s(Jl,"The abstract from the paper is the following:"),Jl.forEach(t),re=d(o),te=r(o,"P",{});var Xl=i(te);N=r(Xl,"EM",{});var Yl=i(N);ue=s(Yl,`Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer\u2019s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.`),Yl.forEach(t),Xl.forEach(t),ie=d(o),O=r(o,"P",{});var Zl=i(O);_e=s(Zl,"Tips:"),Zl.forEach(t),B=d(o),oe=r(o,"UL",{});var ed=i(oe);m=r(ed,"LI",{});var Ln=i(m);$=s(Ln,"Since the Longformer is based on RoBERTa, it doesn\u2019t have "),J=r(Ln,"CODE",{});var Mv=i(J);Le=s(Mv,"token_type_ids"),Mv.forEach(t),xe=s(Ln,`. You don\u2019t need to indicate which token belongs to which segment. Just separate your segments with the separation token `),j=r(Ln,"CODE",{});var Ev=i(j);$e=s(Ev,"tokenizer.sep_token"),Ev.forEach(t),Fe=s(Ln,` (or `),be=r(Ln,"CODE",{});var zv=i(be);A=s(zv,"</s>"),zv.forEach(t),V=s(Ln,")."),Ln.forEach(t),ed.forEach(t),Te=d(o),le=r(o,"P",{});var Tr=i(le);G=s(Tr,"This model was contributed by "),ve=r(Tr,"A",{href:!0,rel:!0});var qv=i(ve);Me=s(qv,"beltagy"),qv.forEach(t),we=s(Tr,". The Authors\u2019 code can be found "),ke=r(Tr,"A",{href:!0,rel:!0});var Cv=i(ke);jh=s(Cv,"here"),Cv.forEach(t),Ah=s(Tr,"."),Tr.forEach(t),sd=d(o),Ot=r(o,"H2",{class:!0});var xc=i(Ot);jo=r(xc,"A",{id:!0,class:!0,href:!0});var Pv=i(jo);Vr=r(Pv,"SPAN",{});var Ov=i(Vr);k(xn.$$.fragment,Ov),Ov.forEach(t),Pv.forEach(t),Ih=d(xc),Gr=r(xc,"SPAN",{});var jv=i(Gr);Nh=s(jv,"Longformer Self Attention"),jv.forEach(t),xc.forEach(t),ad=d(o),ye=r(o,"P",{});var Pe=i(ye);Sh=s(Pe,`Longformer self attention employs self attention on both a \u201Clocal\u201D context and a \u201Cglobal\u201D context. Most tokens only attend \u201Clocally\u201D to each other meaning that each token attends to its `),rd=Oo(Pe),id=s(Pe,` previous tokens and `),ld=Oo(Pe),dd=s(Pe," succeding tokens with "),cd=Oo(Pe),hd=s(Pe,` being the window length as defined in `),Kr=r(Pe,"CODE",{});var Av=i(Kr);Dh=s(Av,"config.attention_window"),Av.forEach(t),Bh=s(Pe,". Note that "),Jr=r(Pe,"CODE",{});var Iv=i(Jr);Wh=s(Iv,"config.attention_window"),Iv.forEach(t),Qh=s(Pe," can be of type "),Xr=r(Pe,"CODE",{});var Nv=i(Xr);Hh=s(Nv,"List"),Nv.forEach(t),Rh=s(Pe,` to define a different `),md=Oo(Pe),fd=s(Pe,` for each layer. A selected few tokens attend \u201Cglobally\u201D to all other tokens, as it is conventionally done for all tokens in `),Yr=r(Pe,"CODE",{});var Sv=i(Yr);Uh=s(Sv,"BertSelfAttention"),Sv.forEach(t),Vh=s(Pe,"."),Pe.forEach(t),pd=d(o),gt=r(o,"P",{});var yr=i(gt);Gh=s(yr,`Note that \u201Clocally\u201D and \u201Cglobally\u201D attending tokens are projected by different query, key and value matrices. Also note that every \u201Clocally\u201D attending token not only attends to tokens within its window `),gd=Oo(yr),ud=s(yr,`, but also to all \u201Cglobally\u201D attending tokens so that global attention is `),Zr=r(yr,"EM",{});var Dv=i(Zr);Kh=s(Dv,"symmetric"),Dv.forEach(t),Jh=s(yr,"."),yr.forEach(t),_d=d(o),ut=r(o,"P",{});var Lr=i(ut);Xh=s(Lr,`The user can define which tokens attend \u201Clocally\u201D and which tokens attend \u201Cglobally\u201D by setting the tensor `),ei=r(Lr,"CODE",{});var Bv=i(ei);Yh=s(Bv,"global_attention_mask"),Bv.forEach(t),Zh=s(Lr,` at run-time appropriately. All Longformer models employ the following logic for `),ti=r(Lr,"CODE",{});var Wv=i(ti);em=s(Wv,"global_attention_mask"),Wv.forEach(t),tm=s(Lr,":"),Lr.forEach(t),wd=d(o),Ao=r(o,"UL",{});var $c=i(Ao);oi=r($c,"LI",{});var Qv=i(oi);om=s(Qv,"0: the token attends \u201Clocally\u201D,"),Qv.forEach(t),nm=d($c),ni=r($c,"LI",{});var Hv=i(ni);sm=s(Hv,"1: the token attends \u201Cglobally\u201D."),Hv.forEach(t),$c.forEach(t),vd=d(o),Io=r(o,"P",{});var Fc=i(Io);am=s(Fc,"For more information please also refer to "),Na=r(Fc,"A",{href:!0});var Rv=i(Na);rm=s(Rv,"forward()"),Rv.forEach(t),im=s(Fc," method."),Fc.forEach(t),kd=d(o),Ve=r(o,"P",{});var wt=i(Ve);lm=s(wt,`Using Longformer self attention, the memory and time complexity of the query-key matmul operation, which usually represents the memory and time bottleneck, can be reduced from `),bd=Oo(wt),Td=s(wt,` to `),yd=Oo(wt),Ld=s(wt,", with "),xd=Oo(wt),$d=s(wt," being the sequence length and "),Fd=Oo(wt),Md=s(wt,` being the average window size. It is assumed that the number of \u201Cglobally\u201D attending tokens is insignificant as compared to the number of \u201Clocally\u201D attending tokens.`),wt.forEach(t),Ed=d(o),No=r(o,"P",{});var Mc=i(No);dm=s(Mc,"For more information, please refer to the official "),$n=r(Mc,"A",{href:!0,rel:!0});var Uv=i($n);cm=s(Uv,"paper"),Uv.forEach(t),hm=s(Mc,"."),Mc.forEach(t),zd=d(o),jt=r(o,"H2",{class:!0});var Ec=i(jt);So=r(Ec,"A",{id:!0,class:!0,href:!0});var Vv=i(So);si=r(Vv,"SPAN",{});var Gv=i(si);k(Fn.$$.fragment,Gv),Gv.forEach(t),Vv.forEach(t),mm=d(Ec),ai=r(Ec,"SPAN",{});var Kv=i(ai);fm=s(Kv,"Training"),Kv.forEach(t),Ec.forEach(t),qd=d(o),At=r(o,"P",{});var td=i(At);Sa=r(td,"A",{href:!0});var Jv=i(Sa);pm=s(Jv,"LongformerForMaskedLM"),Jv.forEach(t),gm=s(td," is trained the exact same way "),Da=r(td,"A",{href:!0});var Xv=i(Da);um=s(Xv,"RobertaForMaskedLM"),Xv.forEach(t),_m=s(td,` is trained and should be used as follows:`),td.forEach(t),Cd=d(o),k(Mn.$$.fragment,o),Pd=d(o),It=r(o,"H2",{class:!0});var zc=i(It);Do=r(zc,"A",{id:!0,class:!0,href:!0});var Yv=i(Do);ri=r(Yv,"SPAN",{});var Zv=i(ri);k(En.$$.fragment,Zv),Zv.forEach(t),Yv.forEach(t),wm=d(zc),ii=r(zc,"SPAN",{});var ek=i(ii);vm=s(ek,"LongformerConfig"),ek.forEach(t),zc.forEach(t),Od=d(o),je=r(o,"DIV",{class:!0});var it=i(je);k(zn.$$.fragment,it),km=d(it),Nt=r(it,"P",{});var xr=i(Nt);bm=s(xr,"This is the configuration class to store the configuration of a "),Ba=r(xr,"A",{href:!0});var tk=i(Ba);Tm=s(tk,"LongformerModel"),tk.forEach(t),ym=s(xr,` or a `),Wa=r(xr,"A",{href:!0});var ok=i(Wa);Lm=s(ok,"TFLongformerModel"),ok.forEach(t),xm=s(xr,`. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture.`),xr.forEach(t),$m=d(it),St=r(it,"P",{});var $r=i(St);Fm=s($r,"This is the configuration class to store the configuration of a "),Qa=r($r,"A",{href:!0});var nk=i(Qa);Mm=s(nk,"LongformerModel"),nk.forEach(t),Em=s($r,`. It is used to instantiate an Longformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa `),qn=r($r,"A",{href:!0,rel:!0});var sk=i(qn);zm=s(sk,"roberta-base"),sk.forEach(t),qm=s($r," architecture with a sequence length 4,096."),$r.forEach(t),Cm=d(it),Dt=r(it,"P",{});var Fr=i(Dt);Pm=s(Fr,"The "),Ha=r(Fr,"A",{href:!0});var ak=i(Ha);Om=s(ak,"LongformerConfig"),ak.forEach(t),jm=s(Fr," class directly inherits "),Ra=r(Fr,"A",{href:!0});var rk=i(Ra);Am=s(rk,"RobertaConfig"),rk.forEach(t),Im=s(Fr,`. It reuses the same defaults. Please check the parent class for more information.`),Fr.forEach(t),Nm=d(it),li=r(it,"P",{});var ik=i(li);Sm=s(ik,"Example:"),ik.forEach(t),Dm=d(it),k(Cn.$$.fragment,it),it.forEach(t),jd=d(o),Bt=r(o,"H2",{class:!0});var qc=i(Bt);Bo=r(qc,"A",{id:!0,class:!0,href:!0});var lk=i(Bo);di=r(lk,"SPAN",{});var dk=i(di);k(Pn.$$.fragment,dk),dk.forEach(t),lk.forEach(t),Bm=d(qc),ci=r(qc,"SPAN",{});var ck=i(ci);Wm=s(ck,"LongformerTokenizer"),ck.forEach(t),qc.forEach(t),Ad=d(o),ft=r(o,"DIV",{class:!0});var Mr=i(ft);k(On.$$.fragment,Mr),Qm=d(Mr),hi=r(Mr,"P",{});var hk=i(hi);Hm=s(hk,"Construct a Longformer tokenizer."),hk.forEach(t),Rm=d(Mr),Wo=r(Mr,"P",{});var od=i(Wo);Ua=r(od,"A",{href:!0});var mk=i(Ua);Um=s(mk,"LongformerTokenizer"),mk.forEach(t),Vm=s(od," is identical to "),Va=r(od,"A",{href:!0});var fk=i(Va);Gm=s(fk,"RobertaTokenizer"),fk.forEach(t),Km=s(od,`. Refer to the superclass for usage examples and documentation concerning parameters.`),od.forEach(t),Mr.forEach(t),Id=d(o),Wt=r(o,"H2",{class:!0});var Cc=i(Wt);Qo=r(Cc,"A",{id:!0,class:!0,href:!0});var pk=i(Qo);mi=r(pk,"SPAN",{});var gk=i(mi);k(jn.$$.fragment,gk),gk.forEach(t),pk.forEach(t),Jm=d(Cc),fi=r(Cc,"SPAN",{});var uk=i(fi);Xm=s(uk,"LongformerTokenizerFast"),uk.forEach(t),Cc.forEach(t),Nd=d(o),pt=r(o,"DIV",{class:!0});var Er=i(pt);k(An.$$.fragment,Er),Ym=d(Er),In=r(Er,"P",{});var Pc=i(In);Zm=s(Pc,"Construct a \u201Cfast\u201D Longformer tokenizer (backed by HuggingFace\u2019s "),pi=r(Pc,"EM",{});var _k=i(pi);ef=s(_k,"tokenizers"),_k.forEach(t),tf=s(Pc," library)."),Pc.forEach(t),of=d(Er),Ho=r(Er,"P",{});var nd=i(Ho);Ga=r(nd,"A",{href:!0});var wk=i(Ga);nf=s(wk,"LongformerTokenizerFast"),wk.forEach(t),sf=s(nd," is identical to "),Ka=r(nd,"A",{href:!0});var vk=i(Ka);af=s(vk,"RobertaTokenizerFast"),vk.forEach(t),rf=s(nd,`. Refer to the superclass for usage examples and documentation concerning parameters.`),nd.forEach(t),Er.forEach(t),Sd=d(o),Qt=r(o,"H2",{class:!0});var Oc=i(Qt);Ro=r(Oc,"A",{id:!0,class:!0,href:!0});var kk=i(Ro);gi=r(kk,"SPAN",{});var bk=i(gi);k(Nn.$$.fragment,bk),bk.forEach(t),kk.forEach(t),lf=d(Oc),ui=r(Oc,"SPAN",{});var Tk=i(ui);df=s(Tk,"Longformer specific outputs"),Tk.forEach(t),Oc.forEach(t),Dd=d(o),Ht=r(o,"DIV",{class:!0});var jc=i(Ht);k(Sn.$$.fragment,jc),cf=d(jc),_i=r(jc,"P",{});var yk=i(_i);hf=s(yk,"Base class for Longformer\u2019s outputs, with potential hidden states, local and global attentions."),yk.forEach(t),jc.forEach(t),Bd=d(o),Rt=r(o,"DIV",{class:!0});var Ac=i(Rt);k(Dn.$$.fragment,Ac),mf=d(Ac),wi=r(Ac,"P",{});var Lk=i(wi);ff=s(Lk,"Base class for Longformer\u2019s outputs that also contains a pooling of the last hidden states."),Lk.forEach(t),Ac.forEach(t),Wd=d(o),Ut=r(o,"DIV",{class:!0});var Ic=i(Ut);k(Bn.$$.fragment,Ic),pf=d(Ic),vi=r(Ic,"P",{});var xk=i(vi);gf=s(xk,"Base class for masked language models outputs."),xk.forEach(t),Ic.forEach(t),Qd=d(o),Vt=r(o,"DIV",{class:!0});var Nc=i(Vt);k(Wn.$$.fragment,Nc),uf=d(Nc),ki=r(Nc,"P",{});var $k=i(ki);_f=s($k,"Base class for outputs of question answering Longformer models."),$k.forEach(t),Nc.forEach(t),Hd=d(o),Gt=r(o,"DIV",{class:!0});var Sc=i(Gt);k(Qn.$$.fragment,Sc),wf=d(Sc),bi=r(Sc,"P",{});var Fk=i(bi);vf=s(Fk,"Base class for outputs of sentence classification models."),Fk.forEach(t),Sc.forEach(t),Rd=d(o),Kt=r(o,"DIV",{class:!0});var Dc=i(Kt);k(Hn.$$.fragment,Dc),kf=d(Dc),Ti=r(Dc,"P",{});var Mk=i(Ti);bf=s(Mk,"Base class for outputs of multiple choice Longformer models."),Mk.forEach(t),Dc.forEach(t),Ud=d(o),Jt=r(o,"DIV",{class:!0});var Bc=i(Jt);k(Rn.$$.fragment,Bc),Tf=d(Bc),yi=r(Bc,"P",{});var Ek=i(yi);yf=s(Ek,"Base class for outputs of token classification models."),Ek.forEach(t),Bc.forEach(t),Vd=d(o),Xt=r(o,"DIV",{class:!0});var Wc=i(Xt);k(Un.$$.fragment,Wc),Lf=d(Wc),Li=r(Wc,"P",{});var zk=i(Li);xf=s(zk,"Base class for Longformer\u2019s outputs, with potential hidden states, local and global attentions."),zk.forEach(t),Wc.forEach(t),Gd=d(o),Yt=r(o,"DIV",{class:!0});var Qc=i(Yt);k(Vn.$$.fragment,Qc),$f=d(Qc),xi=r(Qc,"P",{});var qk=i(xi);Ff=s(qk,"Base class for Longformer\u2019s outputs that also contains a pooling of the last hidden states."),qk.forEach(t),Qc.forEach(t),Kd=d(o),Zt=r(o,"DIV",{class:!0});var Hc=i(Zt);k(Gn.$$.fragment,Hc),Mf=d(Hc),$i=r(Hc,"P",{});var Ck=i($i);Ef=s(Ck,"Base class for masked language models outputs."),Ck.forEach(t),Hc.forEach(t),Jd=d(o),eo=r(o,"DIV",{class:!0});var Rc=i(eo);k(Kn.$$.fragment,Rc),zf=d(Rc),Fi=r(Rc,"P",{});var Pk=i(Fi);qf=s(Pk,"Base class for outputs of question answering Longformer models."),Pk.forEach(t),Rc.forEach(t),Xd=d(o),to=r(o,"DIV",{class:!0});var Uc=i(to);k(Jn.$$.fragment,Uc),Cf=d(Uc),Mi=r(Uc,"P",{});var Ok=i(Mi);Pf=s(Ok,"Base class for outputs of sentence classification models."),Ok.forEach(t),Uc.forEach(t),Yd=d(o),oo=r(o,"DIV",{class:!0});var Vc=i(oo);k(Xn.$$.fragment,Vc),Of=d(Vc),Ei=r(Vc,"P",{});var jk=i(Ei);jf=s(jk,"Base class for outputs of multiple choice models."),jk.forEach(t),Vc.forEach(t),Zd=d(o),no=r(o,"DIV",{class:!0});var Gc=i(no);k(Yn.$$.fragment,Gc),Af=d(Gc),zi=r(Gc,"P",{});var Ak=i(zi);If=s(Ak,"Base class for outputs of token classification models."),Ak.forEach(t),Gc.forEach(t),ec=d(o),so=r(o,"H2",{class:!0});var Kc=i(so);Uo=r(Kc,"A",{id:!0,class:!0,href:!0});var Ik=i(Uo);qi=r(Ik,"SPAN",{});var Nk=i(qi);k(Zn.$$.fragment,Nk),Nk.forEach(t),Ik.forEach(t),Nf=d(Kc),Ci=r(Kc,"SPAN",{});var Sk=i(Ci);Sf=s(Sk,"LongformerModel"),Sk.forEach(t),Kc.forEach(t),tc=d(o),qe=r(o,"DIV",{class:!0});var st=i(qe);k(es.$$.fragment,st),Df=d(st),Pi=r(st,"P",{});var Dk=i(Pi);Bf=s(Dk,"The bare Longformer Model outputting raw hidden-states without any specific head on top."),Dk.forEach(t),Wf=d(st),ts=r(st,"P",{});var Jc=i(ts);Qf=s(Jc,"This model inherits from "),Ja=r(Jc,"A",{href:!0});var Bk=i(Ja);Hf=s(Bk,"PreTrainedModel"),Bk.forEach(t),Rf=s(Jc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jc.forEach(t),Uf=d(st),os=r(st,"P",{});var Xc=i(os);Vf=s(Xc,"This model is also a PyTorch "),ns=r(Xc,"A",{href:!0,rel:!0});var Wk=i(ns);Gf=s(Wk,"torch.nn.Module"),Wk.forEach(t),Kf=s(Xc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xc.forEach(t),Jf=d(st),ao=r(st,"P",{});var zr=i(ao);Xf=s(zr,"This class copied code from "),Xa=r(zr,"A",{href:!0});var Qk=i(Xa);Yf=s(Qk,"RobertaModel"),Qk.forEach(t),Zf=s(zr,` and overwrote standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in `),ss=r(zr,"A",{href:!0,rel:!0});var Hk=i(ss);ep=s(Hk,"Longformer: the Long-Document Transformer"),Hk.forEach(t),tp=s(zr,` by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute.`),zr.forEach(t),op=d(st),as=r(st,"P",{});var Yc=i(as);np=s(Yc,"The self-attention module "),Oi=r(Yc,"CODE",{});var Rk=i(Oi);sp=s(Rk,"LongformerSelfAttention"),Rk.forEach(t),ap=s(Yc,` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient.`),Yc.forEach(t),rp=d(st),Ge=r(st,"DIV",{class:!0});var vt=i(Ge);k(rs.$$.fragment,vt),ip=d(vt),ro=r(vt,"P",{});var qr=i(ro);lp=s(qr,"The "),Ya=r(qr,"A",{href:!0});var Uk=i(Ya);dp=s(Uk,"LongformerModel"),Uk.forEach(t),cp=s(qr," forward method, overrides the "),ji=r(qr,"CODE",{});var Vk=i(ji);hp=s(Vk,"__call__"),Vk.forEach(t),mp=s(qr," special method."),qr.forEach(t),fp=d(vt),k(Vo.$$.fragment,vt),pp=d(vt),Ai=r(vt,"P",{});var Gk=i(Ai);gp=s(Gk,"Examples:"),Gk.forEach(t),up=d(vt),k(is.$$.fragment,vt),vt.forEach(t),st.forEach(t),oc=d(o),io=r(o,"H2",{class:!0});var Zc=i(io);Go=r(Zc,"A",{id:!0,class:!0,href:!0});var Kk=i(Go);Ii=r(Kk,"SPAN",{});var Jk=i(Ii);k(ls.$$.fragment,Jk),Jk.forEach(t),Kk.forEach(t),_p=d(Zc),Ni=r(Zc,"SPAN",{});var Xk=i(Ni);wp=s(Xk,"LongformerForMaskedLM"),Xk.forEach(t),Zc.forEach(t),nc=d(o),We=r(o,"DIV",{class:!0});var kt=i(We);k(ds.$$.fragment,kt),vp=d(kt),cs=r(kt,"P",{});var eh=i(cs);kp=s(eh,"Longformer Model with a "),Si=r(eh,"CODE",{});var Yk=i(Si);bp=s(Yk,"language modeling"),Yk.forEach(t),Tp=s(eh," head on top."),eh.forEach(t),yp=d(kt),hs=r(kt,"P",{});var th=i(hs);Lp=s(th,"This model inherits from "),Za=r(th,"A",{href:!0});var Zk=i(Za);xp=s(Zk,"PreTrainedModel"),Zk.forEach(t),$p=s(th,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),th.forEach(t),Fp=d(kt),ms=r(kt,"P",{});var oh=i(ms);Mp=s(oh,"This model is also a PyTorch "),fs=r(oh,"A",{href:!0,rel:!0});var eb=i(fs);Ep=s(eb,"torch.nn.Module"),eb.forEach(t),zp=s(oh,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),oh.forEach(t),qp=d(kt),Ke=r(kt,"DIV",{class:!0});var bt=i(Ke);k(ps.$$.fragment,bt),Cp=d(bt),lo=r(bt,"P",{});var Cr=i(lo);Pp=s(Cr,"The "),er=r(Cr,"A",{href:!0});var tb=i(er);Op=s(tb,"LongformerForMaskedLM"),tb.forEach(t),jp=s(Cr," forward method, overrides the "),Di=r(Cr,"CODE",{});var ob=i(Di);Ap=s(ob,"__call__"),ob.forEach(t),Ip=s(Cr," special method."),Cr.forEach(t),Np=d(bt),k(Ko.$$.fragment,bt),Sp=d(bt),Bi=r(bt,"P",{});var nb=i(Bi);Dp=s(nb,"Examples:"),nb.forEach(t),Bp=d(bt),k(gs.$$.fragment,bt),bt.forEach(t),kt.forEach(t),sc=d(o),co=r(o,"H2",{class:!0});var nh=i(co);Jo=r(nh,"A",{id:!0,class:!0,href:!0});var sb=i(Jo);Wi=r(sb,"SPAN",{});var ab=i(Wi);k(us.$$.fragment,ab),ab.forEach(t),sb.forEach(t),Wp=d(nh),Qi=r(nh,"SPAN",{});var rb=i(Qi);Qp=s(rb,"LongformerForSequenceClassification"),rb.forEach(t),nh.forEach(t),ac=d(o),Qe=r(o,"DIV",{class:!0});var Tt=i(Qe);k(_s.$$.fragment,Tt),Hp=d(Tt),Hi=r(Tt,"P",{});var ib=i(Hi);Rp=s(ib,`Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),ib.forEach(t),Up=d(Tt),ws=r(Tt,"P",{});var sh=i(ws);Vp=s(sh,"This model inherits from "),tr=r(sh,"A",{href:!0});var lb=i(tr);Gp=s(lb,"PreTrainedModel"),lb.forEach(t),Kp=s(sh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sh.forEach(t),Jp=d(Tt),vs=r(Tt,"P",{});var ah=i(vs);Xp=s(ah,"This model is also a PyTorch "),ks=r(ah,"A",{href:!0,rel:!0});var db=i(ks);Yp=s(db,"torch.nn.Module"),db.forEach(t),Zp=s(ah,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ah.forEach(t),eg=d(Tt),Ce=r(Tt,"DIV",{class:!0});var at=i(Ce);k(bs.$$.fragment,at),tg=d(at),ho=r(at,"P",{});var Pr=i(ho);og=s(Pr,"The "),or=r(Pr,"A",{href:!0});var cb=i(or);ng=s(cb,"LongformerForSequenceClassification"),cb.forEach(t),sg=s(Pr," forward method, overrides the "),Ri=r(Pr,"CODE",{});var hb=i(Ri);ag=s(hb,"__call__"),hb.forEach(t),rg=s(Pr," special method."),Pr.forEach(t),ig=d(at),k(Xo.$$.fragment,at),lg=d(at),Ui=r(at,"P",{});var mb=i(Ui);dg=s(mb,"Example of single-label classification:"),mb.forEach(t),cg=d(at),k(Ts.$$.fragment,at),hg=d(at),Vi=r(at,"P",{});var fb=i(Vi);mg=s(fb,"Example of multi-label classification:"),fb.forEach(t),fg=d(at),k(ys.$$.fragment,at),at.forEach(t),Tt.forEach(t),rc=d(o),mo=r(o,"H2",{class:!0});var rh=i(mo);Yo=r(rh,"A",{id:!0,class:!0,href:!0});var pb=i(Yo);Gi=r(pb,"SPAN",{});var gb=i(Gi);k(Ls.$$.fragment,gb),gb.forEach(t),pb.forEach(t),pg=d(rh),Ki=r(rh,"SPAN",{});var ub=i(Ki);gg=s(ub,"LongformerForMultipleChoice"),ub.forEach(t),rh.forEach(t),ic=d(o),He=r(o,"DIV",{class:!0});var yt=i(He);k(xs.$$.fragment,yt),ug=d(yt),Ji=r(yt,"P",{});var _b=i(Ji);_g=s(_b,`Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),_b.forEach(t),wg=d(yt),$s=r(yt,"P",{});var ih=i($s);vg=s(ih,"This model inherits from "),nr=r(ih,"A",{href:!0});var wb=i(nr);kg=s(wb,"PreTrainedModel"),wb.forEach(t),bg=s(ih,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ih.forEach(t),Tg=d(yt),Fs=r(yt,"P",{});var lh=i(Fs);yg=s(lh,"This model is also a PyTorch "),Ms=r(lh,"A",{href:!0,rel:!0});var vb=i(Ms);Lg=s(vb,"torch.nn.Module"),vb.forEach(t),xg=s(lh,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),lh.forEach(t),$g=d(yt),Je=r(yt,"DIV",{class:!0});var Lt=i(Je);k(Es.$$.fragment,Lt),Fg=d(Lt),fo=r(Lt,"P",{});var Or=i(fo);Mg=s(Or,"The "),sr=r(Or,"A",{href:!0});var kb=i(sr);Eg=s(kb,"LongformerForMultipleChoice"),kb.forEach(t),zg=s(Or," forward method, overrides the "),Xi=r(Or,"CODE",{});var bb=i(Xi);qg=s(bb,"__call__"),bb.forEach(t),Cg=s(Or," special method."),Or.forEach(t),Pg=d(Lt),k(Zo.$$.fragment,Lt),Og=d(Lt),Yi=r(Lt,"P",{});var Tb=i(Yi);jg=s(Tb,"Example:"),Tb.forEach(t),Ag=d(Lt),k(zs.$$.fragment,Lt),Lt.forEach(t),yt.forEach(t),lc=d(o),po=r(o,"H2",{class:!0});var dh=i(po);en=r(dh,"A",{id:!0,class:!0,href:!0});var yb=i(en);Zi=r(yb,"SPAN",{});var Lb=i(Zi);k(qs.$$.fragment,Lb),Lb.forEach(t),yb.forEach(t),Ig=d(dh),el=r(dh,"SPAN",{});var xb=i(el);Ng=s(xb,"LongformerForTokenClassification"),xb.forEach(t),dh.forEach(t),dc=d(o),Re=r(o,"DIV",{class:!0});var xt=i(Re);k(Cs.$$.fragment,xt),Sg=d(xt),tl=r(xt,"P",{});var $b=i(tl);Dg=s($b,`Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),$b.forEach(t),Bg=d(xt),Ps=r(xt,"P",{});var ch=i(Ps);Wg=s(ch,"This model inherits from "),ar=r(ch,"A",{href:!0});var Fb=i(ar);Qg=s(Fb,"PreTrainedModel"),Fb.forEach(t),Hg=s(ch,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ch.forEach(t),Rg=d(xt),Os=r(xt,"P",{});var hh=i(Os);Ug=s(hh,"This model is also a PyTorch "),js=r(hh,"A",{href:!0,rel:!0});var Mb=i(js);Vg=s(Mb,"torch.nn.Module"),Mb.forEach(t),Gg=s(hh,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hh.forEach(t),Kg=d(xt),Xe=r(xt,"DIV",{class:!0});var $t=i(Xe);k(As.$$.fragment,$t),Jg=d($t),go=r($t,"P",{});var jr=i(go);Xg=s(jr,"The "),rr=r(jr,"A",{href:!0});var Eb=i(rr);Yg=s(Eb,"LongformerForTokenClassification"),Eb.forEach(t),Zg=s(jr," forward method, overrides the "),ol=r(jr,"CODE",{});var zb=i(ol);eu=s(zb,"__call__"),zb.forEach(t),tu=s(jr," special method."),jr.forEach(t),ou=d($t),k(tn.$$.fragment,$t),nu=d($t),nl=r($t,"P",{});var qb=i(nl);su=s(qb,"Example:"),qb.forEach(t),au=d($t),k(Is.$$.fragment,$t),$t.forEach(t),xt.forEach(t),cc=d(o),uo=r(o,"H2",{class:!0});var mh=i(uo);on=r(mh,"A",{id:!0,class:!0,href:!0});var Cb=i(on);sl=r(Cb,"SPAN",{});var Pb=i(sl);k(Ns.$$.fragment,Pb),Pb.forEach(t),Cb.forEach(t),ru=d(mh),al=r(mh,"SPAN",{});var Ob=i(al);iu=s(Ob,"LongformerForQuestionAnswering"),Ob.forEach(t),mh.forEach(t),hc=d(o),Ue=r(o,"DIV",{class:!0});var Ft=i(Ue);k(Ss.$$.fragment,Ft),lu=d(Ft),_o=r(Ft,"P",{});var Ar=i(_o);du=s(Ar,`Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layers on top of the hidden-states output to compute `),rl=r(Ar,"CODE",{});var jb=i(rl);cu=s(jb,"span start logits"),jb.forEach(t),hu=s(Ar," and "),il=r(Ar,"CODE",{});var Ab=i(il);mu=s(Ab,"span end logits"),Ab.forEach(t),fu=s(Ar,")."),Ar.forEach(t),pu=d(Ft),Ds=r(Ft,"P",{});var fh=i(Ds);gu=s(fh,"This model inherits from "),ir=r(fh,"A",{href:!0});var Ib=i(ir);uu=s(Ib,"PreTrainedModel"),Ib.forEach(t),_u=s(fh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fh.forEach(t),wu=d(Ft),Bs=r(Ft,"P",{});var ph=i(Bs);vu=s(ph,"This model is also a PyTorch "),Ws=r(ph,"A",{href:!0,rel:!0});var Nb=i(Ws);ku=s(Nb,"torch.nn.Module"),Nb.forEach(t),bu=s(ph,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ph.forEach(t),Tu=d(Ft),Ye=r(Ft,"DIV",{class:!0});var Mt=i(Ye);k(Qs.$$.fragment,Mt),yu=d(Mt),wo=r(Mt,"P",{});var Ir=i(wo);Lu=s(Ir,"The "),lr=r(Ir,"A",{href:!0});var Sb=i(lr);xu=s(Sb,"LongformerForQuestionAnswering"),Sb.forEach(t),$u=s(Ir," forward method, overrides the "),ll=r(Ir,"CODE",{});var Db=i(ll);Fu=s(Db,"__call__"),Db.forEach(t),Mu=s(Ir," special method."),Ir.forEach(t),Eu=d(Mt),k(nn.$$.fragment,Mt),zu=d(Mt),dl=r(Mt,"P",{});var Bb=i(dl);qu=s(Bb,"Examples:"),Bb.forEach(t),Cu=d(Mt),k(Hs.$$.fragment,Mt),Mt.forEach(t),Ft.forEach(t),mc=d(o),vo=r(o,"H2",{class:!0});var gh=i(vo);sn=r(gh,"A",{id:!0,class:!0,href:!0});var Wb=i(sn);cl=r(Wb,"SPAN",{});var Qb=i(cl);k(Rs.$$.fragment,Qb),Qb.forEach(t),Wb.forEach(t),Pu=d(gh),hl=r(gh,"SPAN",{});var Hb=i(hl);Ou=s(Hb,"TFLongformerModel"),Hb.forEach(t),gh.forEach(t),fc=d(o),Ee=r(o,"DIV",{class:!0});var Be=i(Ee);k(Us.$$.fragment,Be),ju=d(Be),ml=r(Be,"P",{});var Rb=i(ml);Au=s(Rb,"The bare Longformer Model outputting raw hidden-states without any specific head on top."),Rb.forEach(t),Iu=d(Be),Vs=r(Be,"P",{});var uh=i(Vs);Nu=s(uh,"This model inherits from "),dr=r(uh,"A",{href:!0});var Ub=i(dr);Su=s(Ub,"TFPreTrainedModel"),Ub.forEach(t),Du=s(uh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uh.forEach(t),Bu=d(Be),Gs=r(Be,"P",{});var _h=i(Gs);Wu=s(_h,"This model is also a "),Ks=r(_h,"A",{href:!0,rel:!0});var Vb=i(Ks);Qu=s(Vb,"tf.keras.Model"),Vb.forEach(t),Hu=s(_h,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_h.forEach(t),Ru=d(Be),k(an.$$.fragment,Be),Uu=d(Be),ko=r(Be,"P",{});var Nr=i(ko);Vu=s(Nr,"This class copies code from "),cr=r(Nr,"A",{href:!0});var Gb=i(cr);Gu=s(Gb,"TFRobertaModel"),Gb.forEach(t),Ku=s(Nr,` and overwrites standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in `),Js=r(Nr,"A",{href:!0,rel:!0});var Kb=i(Js);Ju=s(Kb,"Longformer: the Long-Document Transformer"),Kb.forEach(t),Xu=s(Nr,` by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute.`),Nr.forEach(t),Yu=d(Be),Xs=r(Be,"P",{});var wh=i(Xs);Zu=s(wh,"The self-attention module "),fl=r(wh,"CODE",{});var Jb=i(fl);e_=s(Jb,"TFLongformerSelfAttention"),Jb.forEach(t),t_=s(wh,` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient.`),wh.forEach(t),o_=d(Be),_t=r(Be,"DIV",{class:!0});var Sr=i(_t);k(Ys.$$.fragment,Sr),n_=d(Sr),bo=r(Sr,"P",{});var Dr=i(bo);s_=s(Dr,"The "),hr=r(Dr,"A",{href:!0});var Xb=i(hr);a_=s(Xb,"TFLongformerModel"),Xb.forEach(t),r_=s(Dr," forward method, overrides the "),pl=r(Dr,"CODE",{});var Yb=i(pl);i_=s(Yb,"__call__"),Yb.forEach(t),l_=s(Dr," special method."),Dr.forEach(t),d_=d(Sr),k(rn.$$.fragment,Sr),Sr.forEach(t),Be.forEach(t),pc=d(o),To=r(o,"H2",{class:!0});var vh=i(To);ln=r(vh,"A",{id:!0,class:!0,href:!0});var Zb=i(ln);gl=r(Zb,"SPAN",{});var eT=i(gl);k(Zs.$$.fragment,eT),eT.forEach(t),Zb.forEach(t),c_=d(vh),ul=r(vh,"SPAN",{});var tT=i(ul);h_=s(tT,"TFLongformerForMaskedLM"),tT.forEach(t),vh.forEach(t),gc=d(o),Ae=r(o,"DIV",{class:!0});var lt=i(Ae);k(ea.$$.fragment,lt),m_=d(lt),ta=r(lt,"P",{});var kh=i(ta);f_=s(kh,"Longformer Model with a "),_l=r(kh,"CODE",{});var oT=i(_l);p_=s(oT,"language modeling"),oT.forEach(t),g_=s(kh," head on top."),kh.forEach(t),u_=d(lt),oa=r(lt,"P",{});var bh=i(oa);__=s(bh,"This model inherits from "),mr=r(bh,"A",{href:!0});var nT=i(mr);w_=s(nT,"TFPreTrainedModel"),nT.forEach(t),v_=s(bh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bh.forEach(t),k_=d(lt),na=r(lt,"P",{});var Th=i(na);b_=s(Th,"This model is also a "),sa=r(Th,"A",{href:!0,rel:!0});var sT=i(sa);T_=s(sT,"tf.keras.Model"),sT.forEach(t),y_=s(Th,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Th.forEach(t),L_=d(lt),k(dn.$$.fragment,lt),x_=d(lt),Ze=r(lt,"DIV",{class:!0});var Et=i(Ze);k(aa.$$.fragment,Et),$_=d(Et),yo=r(Et,"P",{});var Br=i(yo);F_=s(Br,"The "),fr=r(Br,"A",{href:!0});var aT=i(fr);M_=s(aT,"TFLongformerForMaskedLM"),aT.forEach(t),E_=s(Br," forward method, overrides the "),wl=r(Br,"CODE",{});var rT=i(wl);z_=s(rT,"__call__"),rT.forEach(t),q_=s(Br," special method."),Br.forEach(t),C_=d(Et),k(cn.$$.fragment,Et),P_=d(Et),vl=r(Et,"P",{});var iT=i(vl);O_=s(iT,"Example:"),iT.forEach(t),j_=d(Et),k(ra.$$.fragment,Et),Et.forEach(t),lt.forEach(t),uc=d(o),Lo=r(o,"H2",{class:!0});var yh=i(Lo);hn=r(yh,"A",{id:!0,class:!0,href:!0});var lT=i(hn);kl=r(lT,"SPAN",{});var dT=i(kl);k(ia.$$.fragment,dT),dT.forEach(t),lT.forEach(t),A_=d(yh),bl=r(yh,"SPAN",{});var cT=i(bl);I_=s(cT,"TFLongformerForQuestionAnswering"),cT.forEach(t),yh.forEach(t),_c=d(o),Ie=r(o,"DIV",{class:!0});var dt=i(Ie);k(la.$$.fragment,dt),N_=d(dt),xo=r(dt,"P",{});var Wr=i(xo);S_=s(Wr,`Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layer on top of the hidden-states output to compute `),Tl=r(Wr,"CODE",{});var hT=i(Tl);D_=s(hT,"span start logits"),hT.forEach(t),B_=s(Wr," and "),yl=r(Wr,"CODE",{});var mT=i(yl);W_=s(mT,"span end logits"),mT.forEach(t),Q_=s(Wr,")."),Wr.forEach(t),H_=d(dt),da=r(dt,"P",{});var Lh=i(da);R_=s(Lh,"This model inherits from "),pr=r(Lh,"A",{href:!0});var fT=i(pr);U_=s(fT,"TFPreTrainedModel"),fT.forEach(t),V_=s(Lh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lh.forEach(t),G_=d(dt),ca=r(dt,"P",{});var xh=i(ca);K_=s(xh,"This model is also a "),ha=r(xh,"A",{href:!0,rel:!0});var pT=i(ha);J_=s(pT,"tf.keras.Model"),pT.forEach(t),X_=s(xh,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),xh.forEach(t),Y_=d(dt),k(mn.$$.fragment,dt),Z_=d(dt),et=r(dt,"DIV",{class:!0});var zt=i(et);k(ma.$$.fragment,zt),ew=d(zt),$o=r(zt,"P",{});var Qr=i($o);tw=s(Qr,"The "),gr=r(Qr,"A",{href:!0});var gT=i(gr);ow=s(gT,"TFLongformerForQuestionAnswering"),gT.forEach(t),nw=s(Qr," forward method, overrides the "),Ll=r(Qr,"CODE",{});var uT=i(Ll);sw=s(uT,"__call__"),uT.forEach(t),aw=s(Qr," special method."),Qr.forEach(t),rw=d(zt),k(fn.$$.fragment,zt),iw=d(zt),xl=r(zt,"P",{});var _T=i(xl);lw=s(_T,"Example:"),_T.forEach(t),dw=d(zt),k(fa.$$.fragment,zt),zt.forEach(t),dt.forEach(t),wc=d(o),Fo=r(o,"H2",{class:!0});var $h=i(Fo);pn=r($h,"A",{id:!0,class:!0,href:!0});var wT=i(pn);$l=r(wT,"SPAN",{});var vT=i($l);k(pa.$$.fragment,vT),vT.forEach(t),wT.forEach(t),cw=d($h),Fl=r($h,"SPAN",{});var kT=i(Fl);hw=s(kT,"TFLongformerForSequenceClassification"),kT.forEach(t),$h.forEach(t),vc=d(o),Ne=r(o,"DIV",{class:!0});var ct=i(Ne);k(ga.$$.fragment,ct),mw=d(ct),Ml=r(ct,"P",{});var bT=i(Ml);fw=s(bT,`Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),bT.forEach(t),pw=d(ct),ua=r(ct,"P",{});var Fh=i(ua);gw=s(Fh,"This model inherits from "),ur=r(Fh,"A",{href:!0});var TT=i(ur);uw=s(TT,"TFPreTrainedModel"),TT.forEach(t),_w=s(Fh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fh.forEach(t),ww=d(ct),_a=r(ct,"P",{});var Mh=i(_a);vw=s(Mh,"This model is also a "),wa=r(Mh,"A",{href:!0,rel:!0});var yT=i(wa);kw=s(yT,"tf.keras.Model"),yT.forEach(t),bw=s(Mh,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Mh.forEach(t),Tw=d(ct),k(gn.$$.fragment,ct),yw=d(ct),tt=r(ct,"DIV",{class:!0});var qt=i(tt);k(va.$$.fragment,qt),Lw=d(qt),Mo=r(qt,"P",{});var Hr=i(Mo);xw=s(Hr,"The "),_r=r(Hr,"A",{href:!0});var LT=i(_r);$w=s(LT,"TFLongformerForSequenceClassification"),LT.forEach(t),Fw=s(Hr," forward method, overrides the "),El=r(Hr,"CODE",{});var xT=i(El);Mw=s(xT,"__call__"),xT.forEach(t),Ew=s(Hr," special method."),Hr.forEach(t),zw=d(qt),k(un.$$.fragment,qt),qw=d(qt),zl=r(qt,"P",{});var $T=i(zl);Cw=s($T,"Example:"),$T.forEach(t),Pw=d(qt),k(ka.$$.fragment,qt),qt.forEach(t),ct.forEach(t),kc=d(o),Eo=r(o,"H2",{class:!0});var Eh=i(Eo);_n=r(Eh,"A",{id:!0,class:!0,href:!0});var FT=i(_n);ql=r(FT,"SPAN",{});var MT=i(ql);k(ba.$$.fragment,MT),MT.forEach(t),FT.forEach(t),Ow=d(Eh),Cl=r(Eh,"SPAN",{});var ET=i(Cl);jw=s(ET,"TFLongformerForTokenClassification"),ET.forEach(t),Eh.forEach(t),bc=d(o),Se=r(o,"DIV",{class:!0});var ht=i(Se);k(Ta.$$.fragment,ht),Aw=d(ht),Pl=r(ht,"P",{});var zT=i(Pl);Iw=s(zT,`Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),zT.forEach(t),Nw=d(ht),ya=r(ht,"P",{});var zh=i(ya);Sw=s(zh,"This model inherits from "),wr=r(zh,"A",{href:!0});var qT=i(wr);Dw=s(qT,"TFPreTrainedModel"),qT.forEach(t),Bw=s(zh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zh.forEach(t),Ww=d(ht),La=r(ht,"P",{});var qh=i(La);Qw=s(qh,"This model is also a "),xa=r(qh,"A",{href:!0,rel:!0});var CT=i(xa);Hw=s(CT,"tf.keras.Model"),CT.forEach(t),Rw=s(qh,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),qh.forEach(t),Uw=d(ht),k(wn.$$.fragment,ht),Vw=d(ht),ot=r(ht,"DIV",{class:!0});var Ct=i(ot);k($a.$$.fragment,Ct),Gw=d(Ct),zo=r(Ct,"P",{});var Rr=i(zo);Kw=s(Rr,"The "),vr=r(Rr,"A",{href:!0});var PT=i(vr);Jw=s(PT,"TFLongformerForTokenClassification"),PT.forEach(t),Xw=s(Rr," forward method, overrides the "),Ol=r(Rr,"CODE",{});var OT=i(Ol);Yw=s(OT,"__call__"),OT.forEach(t),Zw=s(Rr," special method."),Rr.forEach(t),ev=d(Ct),k(vn.$$.fragment,Ct),tv=d(Ct),jl=r(Ct,"P",{});var jT=i(jl);ov=s(jT,"Example:"),jT.forEach(t),nv=d(Ct),k(Fa.$$.fragment,Ct),Ct.forEach(t),ht.forEach(t),Tc=d(o),qo=r(o,"H2",{class:!0});var Ch=i(qo);kn=r(Ch,"A",{id:!0,class:!0,href:!0});var AT=i(kn);Al=r(AT,"SPAN",{});var IT=i(Al);k(Ma.$$.fragment,IT),IT.forEach(t),AT.forEach(t),sv=d(Ch),Il=r(Ch,"SPAN",{});var NT=i(Il);av=s(NT,"TFLongformerForMultipleChoice"),NT.forEach(t),Ch.forEach(t),yc=d(o),De=r(o,"DIV",{class:!0});var mt=i(De);k(Ea.$$.fragment,mt),rv=d(mt),Nl=r(mt,"P",{});var ST=i(Nl);iv=s(ST,`Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ST.forEach(t),lv=d(mt),za=r(mt,"P",{});var Ph=i(za);dv=s(Ph,"This model inherits from "),kr=r(Ph,"A",{href:!0});var DT=i(kr);cv=s(DT,"TFPreTrainedModel"),DT.forEach(t),hv=s(Ph,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ph.forEach(t),mv=d(mt),qa=r(mt,"P",{});var Oh=i(qa);fv=s(Oh,"This model is also a "),Ca=r(Oh,"A",{href:!0,rel:!0});var BT=i(Ca);pv=s(BT,"tf.keras.Model"),BT.forEach(t),gv=s(Oh,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Oh.forEach(t),uv=d(mt),k(bn.$$.fragment,mt),_v=d(mt),nt=r(mt,"DIV",{class:!0});var Pt=i(nt);k(Pa.$$.fragment,Pt),wv=d(Pt),Co=r(Pt,"P",{});var Ur=i(Co);vv=s(Ur,"The "),br=r(Ur,"A",{href:!0});var WT=i(br);kv=s(WT,"TFLongformerForMultipleChoice"),WT.forEach(t),bv=s(Ur," forward method, overrides the "),Sl=r(Ur,"CODE",{});var QT=i(Sl);Tv=s(QT,"__call__"),QT.forEach(t),yv=s(Ur," special method."),Ur.forEach(t),Lv=d(Pt),k(Tn.$$.fragment,Pt),xv=d(Pt),Dl=r(Pt,"P",{});var HT=i(Dl);$v=s(HT,"Example:"),HT.forEach(t),Fv=d(Pt),k(Oa.$$.fragment,Pt),Pt.forEach(t),mt.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(y1)),c(u,"id","longformer"),c(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(u,"href","#longformer"),c(g,"class","relative group"),c(Z,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),c(Z,"rel","nofollow"),c(P,"id","overview"),c(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(P,"href","#overview"),c(R,"class","relative group"),c(I,"href","https://arxiv.org/pdf/2004.05150.pdf"),c(I,"rel","nofollow"),c(ve,"href","https://huggingface.co/beltagy"),c(ve,"rel","nofollow"),c(ke,"href","https://github.com/allenai/longformer"),c(ke,"rel","nofollow"),c(jo,"id","longformer-self-attention"),c(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jo,"href","#longformer-self-attention"),c(Ot,"class","relative group"),rd.a=id,ld.a=dd,cd.a=hd,md.a=fd,gd.a=ud,c(Na,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel.forward"),bd.a=Td,yd.a=Ld,xd.a=$d,Fd.a=Md,c($n,"href","https://arxiv.org/pdf/2004.05150.pdf"),c($n,"rel","nofollow"),c(So,"id","training"),c(So,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(So,"href","#training"),c(jt,"class","relative group"),c(Sa,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMaskedLM"),c(Da,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM"),c(Do,"id","transformers.LongformerConfig"),c(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Do,"href","#transformers.LongformerConfig"),c(It,"class","relative group"),c(Ba,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel"),c(Wa,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerModel"),c(Qa,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel"),c(qn,"href","https://huggingface.co/roberta-base"),c(qn,"rel","nofollow"),c(Ha,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig"),c(Ra,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig"),c(je,"class","docstring"),c(Bo,"id","transformers.LongformerTokenizer"),c(Bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bo,"href","#transformers.LongformerTokenizer"),c(Bt,"class","relative group"),c(Ua,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer"),c(Va,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(ft,"class","docstring"),c(Qo,"id","transformers.LongformerTokenizerFast"),c(Qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qo,"href","#transformers.LongformerTokenizerFast"),c(Wt,"class","relative group"),c(Ga,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizerFast"),c(Ka,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(pt,"class","docstring"),c(Ro,"id","transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput"),c(Ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ro,"href","#transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput"),c(Qt,"class","relative group"),c(Ht,"class","docstring"),c(Rt,"class","docstring"),c(Ut,"class","docstring"),c(Vt,"class","docstring"),c(Gt,"class","docstring"),c(Kt,"class","docstring"),c(Jt,"class","docstring"),c(Xt,"class","docstring"),c(Yt,"class","docstring"),c(Zt,"class","docstring"),c(eo,"class","docstring"),c(to,"class","docstring"),c(oo,"class","docstring"),c(no,"class","docstring"),c(Uo,"id","transformers.LongformerModel"),c(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Uo,"href","#transformers.LongformerModel"),c(so,"class","relative group"),c(Ja,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ns,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ns,"rel","nofollow"),c(Xa,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel"),c(ss,"href","https://arxiv.org/abs/2004.05150"),c(ss,"rel","nofollow"),c(Ya,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel"),c(Ge,"class","docstring"),c(qe,"class","docstring"),c(Go,"id","transformers.LongformerForMaskedLM"),c(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Go,"href","#transformers.LongformerForMaskedLM"),c(io,"class","relative group"),c(Za,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(fs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(fs,"rel","nofollow"),c(er,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMaskedLM"),c(Ke,"class","docstring"),c(We,"class","docstring"),c(Jo,"id","transformers.LongformerForSequenceClassification"),c(Jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jo,"href","#transformers.LongformerForSequenceClassification"),c(co,"class","relative group"),c(tr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ks,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ks,"rel","nofollow"),c(or,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForSequenceClassification"),c(Ce,"class","docstring"),c(Qe,"class","docstring"),c(Yo,"id","transformers.LongformerForMultipleChoice"),c(Yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yo,"href","#transformers.LongformerForMultipleChoice"),c(mo,"class","relative group"),c(nr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ms,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ms,"rel","nofollow"),c(sr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMultipleChoice"),c(Je,"class","docstring"),c(He,"class","docstring"),c(en,"id","transformers.LongformerForTokenClassification"),c(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(en,"href","#transformers.LongformerForTokenClassification"),c(po,"class","relative group"),c(ar,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(js,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(js,"rel","nofollow"),c(rr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForTokenClassification"),c(Xe,"class","docstring"),c(Re,"class","docstring"),c(on,"id","transformers.LongformerForQuestionAnswering"),c(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(on,"href","#transformers.LongformerForQuestionAnswering"),c(uo,"class","relative group"),c(ir,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ws,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ws,"rel","nofollow"),c(lr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForQuestionAnswering"),c(Ye,"class","docstring"),c(Ue,"class","docstring"),c(sn,"id","transformers.TFLongformerModel"),c(sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(sn,"href","#transformers.TFLongformerModel"),c(vo,"class","relative group"),c(dr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ks,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ks,"rel","nofollow"),c(cr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaModel"),c(Js,"href","https://arxiv.org/abs/2004.05150"),c(Js,"rel","nofollow"),c(hr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerModel"),c(_t,"class","docstring"),c(Ee,"class","docstring"),c(ln,"id","transformers.TFLongformerForMaskedLM"),c(ln,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ln,"href","#transformers.TFLongformerForMaskedLM"),c(To,"class","relative group"),c(mr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(sa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(sa,"rel","nofollow"),c(fr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForMaskedLM"),c(Ze,"class","docstring"),c(Ae,"class","docstring"),c(hn,"id","transformers.TFLongformerForQuestionAnswering"),c(hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(hn,"href","#transformers.TFLongformerForQuestionAnswering"),c(Lo,"class","relative group"),c(pr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ha,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ha,"rel","nofollow"),c(gr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForQuestionAnswering"),c(et,"class","docstring"),c(Ie,"class","docstring"),c(pn,"id","transformers.TFLongformerForSequenceClassification"),c(pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(pn,"href","#transformers.TFLongformerForSequenceClassification"),c(Fo,"class","relative group"),c(ur,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(wa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(wa,"rel","nofollow"),c(_r,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForSequenceClassification"),c(tt,"class","docstring"),c(Ne,"class","docstring"),c(_n,"id","transformers.TFLongformerForTokenClassification"),c(_n,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_n,"href","#transformers.TFLongformerForTokenClassification"),c(Eo,"class","relative group"),c(wr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(xa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(xa,"rel","nofollow"),c(vr,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForTokenClassification"),c(ot,"class","docstring"),c(Se,"class","docstring"),c(kn,"id","transformers.TFLongformerForMultipleChoice"),c(kn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(kn,"href","#transformers.TFLongformerForMultipleChoice"),c(qo,"class","relative group"),c(kr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ca,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ca,"rel","nofollow"),c(br,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForMultipleChoice"),c(nt,"class","docstring"),c(De,"class","docstring")},m(o,f){e(document.head,p),h(o,F,f),h(o,g,f),e(g,u),e(u,x),b(w,x,null),e(g,_),e(g,M),e(M,de),h(o,X,f),h(o,E,f),e(E,ne),e(ne,Q),e(E,ce),e(E,Z),e(Z,H),e(E,he),h(o,ae,f),h(o,R,f),e(R,P),e(P,se),b(K,se,null),e(R,z),e(R,q),e(q,me),h(o,D,f),h(o,Y,f),e(Y,fe),e(Y,I),e(I,pe),e(Y,ge),h(o,C,f),h(o,ee,f),e(ee,U),h(o,re,f),h(o,te,f),e(te,N),e(N,ue),h(o,ie,f),h(o,O,f),e(O,_e),h(o,B,f),h(o,oe,f),e(oe,m),e(m,$),e(m,J),e(J,Le),e(m,xe),e(m,j),e(j,$e),e(m,Fe),e(m,be),e(be,A),e(m,V),h(o,Te,f),h(o,le,f),e(le,G),e(le,ve),e(ve,Me),e(le,we),e(le,ke),e(ke,jh),e(le,Ah),h(o,sd,f),h(o,Ot,f),e(Ot,jo),e(jo,Vr),b(xn,Vr,null),e(Ot,Ih),e(Ot,Gr),e(Gr,Nh),h(o,ad,f),h(o,ye,f),e(ye,Sh),rd.m(RT,ye),e(ye,id),ld.m(UT,ye),e(ye,dd),cd.m(VT,ye),e(ye,hd),e(ye,Kr),e(Kr,Dh),e(ye,Bh),e(ye,Jr),e(Jr,Wh),e(ye,Qh),e(ye,Xr),e(Xr,Hh),e(ye,Rh),md.m(GT,ye),e(ye,fd),e(ye,Yr),e(Yr,Uh),e(ye,Vh),h(o,pd,f),h(o,gt,f),e(gt,Gh),gd.m(KT,gt),e(gt,ud),e(gt,Zr),e(Zr,Kh),e(gt,Jh),h(o,_d,f),h(o,ut,f),e(ut,Xh),e(ut,ei),e(ei,Yh),e(ut,Zh),e(ut,ti),e(ti,em),e(ut,tm),h(o,wd,f),h(o,Ao,f),e(Ao,oi),e(oi,om),e(Ao,nm),e(Ao,ni),e(ni,sm),h(o,vd,f),h(o,Io,f),e(Io,am),e(Io,Na),e(Na,rm),e(Io,im),h(o,kd,f),h(o,Ve,f),e(Ve,lm),bd.m(JT,Ve),e(Ve,Td),yd.m(XT,Ve),e(Ve,Ld),xd.m(YT,Ve),e(Ve,$d),Fd.m(ZT,Ve),e(Ve,Md),h(o,Ed,f),h(o,No,f),e(No,dm),e(No,$n),e($n,cm),e(No,hm),h(o,zd,f),h(o,jt,f),e(jt,So),e(So,si),b(Fn,si,null),e(jt,mm),e(jt,ai),e(ai,fm),h(o,qd,f),h(o,At,f),e(At,Sa),e(Sa,pm),e(At,gm),e(At,Da),e(Da,um),e(At,_m),h(o,Cd,f),b(Mn,o,f),h(o,Pd,f),h(o,It,f),e(It,Do),e(Do,ri),b(En,ri,null),e(It,wm),e(It,ii),e(ii,vm),h(o,Od,f),h(o,je,f),b(zn,je,null),e(je,km),e(je,Nt),e(Nt,bm),e(Nt,Ba),e(Ba,Tm),e(Nt,ym),e(Nt,Wa),e(Wa,Lm),e(Nt,xm),e(je,$m),e(je,St),e(St,Fm),e(St,Qa),e(Qa,Mm),e(St,Em),e(St,qn),e(qn,zm),e(St,qm),e(je,Cm),e(je,Dt),e(Dt,Pm),e(Dt,Ha),e(Ha,Om),e(Dt,jm),e(Dt,Ra),e(Ra,Am),e(Dt,Im),e(je,Nm),e(je,li),e(li,Sm),e(je,Dm),b(Cn,je,null),h(o,jd,f),h(o,Bt,f),e(Bt,Bo),e(Bo,di),b(Pn,di,null),e(Bt,Bm),e(Bt,ci),e(ci,Wm),h(o,Ad,f),h(o,ft,f),b(On,ft,null),e(ft,Qm),e(ft,hi),e(hi,Hm),e(ft,Rm),e(ft,Wo),e(Wo,Ua),e(Ua,Um),e(Wo,Vm),e(Wo,Va),e(Va,Gm),e(Wo,Km),h(o,Id,f),h(o,Wt,f),e(Wt,Qo),e(Qo,mi),b(jn,mi,null),e(Wt,Jm),e(Wt,fi),e(fi,Xm),h(o,Nd,f),h(o,pt,f),b(An,pt,null),e(pt,Ym),e(pt,In),e(In,Zm),e(In,pi),e(pi,ef),e(In,tf),e(pt,of),e(pt,Ho),e(Ho,Ga),e(Ga,nf),e(Ho,sf),e(Ho,Ka),e(Ka,af),e(Ho,rf),h(o,Sd,f),h(o,Qt,f),e(Qt,Ro),e(Ro,gi),b(Nn,gi,null),e(Qt,lf),e(Qt,ui),e(ui,df),h(o,Dd,f),h(o,Ht,f),b(Sn,Ht,null),e(Ht,cf),e(Ht,_i),e(_i,hf),h(o,Bd,f),h(o,Rt,f),b(Dn,Rt,null),e(Rt,mf),e(Rt,wi),e(wi,ff),h(o,Wd,f),h(o,Ut,f),b(Bn,Ut,null),e(Ut,pf),e(Ut,vi),e(vi,gf),h(o,Qd,f),h(o,Vt,f),b(Wn,Vt,null),e(Vt,uf),e(Vt,ki),e(ki,_f),h(o,Hd,f),h(o,Gt,f),b(Qn,Gt,null),e(Gt,wf),e(Gt,bi),e(bi,vf),h(o,Rd,f),h(o,Kt,f),b(Hn,Kt,null),e(Kt,kf),e(Kt,Ti),e(Ti,bf),h(o,Ud,f),h(o,Jt,f),b(Rn,Jt,null),e(Jt,Tf),e(Jt,yi),e(yi,yf),h(o,Vd,f),h(o,Xt,f),b(Un,Xt,null),e(Xt,Lf),e(Xt,Li),e(Li,xf),h(o,Gd,f),h(o,Yt,f),b(Vn,Yt,null),e(Yt,$f),e(Yt,xi),e(xi,Ff),h(o,Kd,f),h(o,Zt,f),b(Gn,Zt,null),e(Zt,Mf),e(Zt,$i),e($i,Ef),h(o,Jd,f),h(o,eo,f),b(Kn,eo,null),e(eo,zf),e(eo,Fi),e(Fi,qf),h(o,Xd,f),h(o,to,f),b(Jn,to,null),e(to,Cf),e(to,Mi),e(Mi,Pf),h(o,Yd,f),h(o,oo,f),b(Xn,oo,null),e(oo,Of),e(oo,Ei),e(Ei,jf),h(o,Zd,f),h(o,no,f),b(Yn,no,null),e(no,Af),e(no,zi),e(zi,If),h(o,ec,f),h(o,so,f),e(so,Uo),e(Uo,qi),b(Zn,qi,null),e(so,Nf),e(so,Ci),e(Ci,Sf),h(o,tc,f),h(o,qe,f),b(es,qe,null),e(qe,Df),e(qe,Pi),e(Pi,Bf),e(qe,Wf),e(qe,ts),e(ts,Qf),e(ts,Ja),e(Ja,Hf),e(ts,Rf),e(qe,Uf),e(qe,os),e(os,Vf),e(os,ns),e(ns,Gf),e(os,Kf),e(qe,Jf),e(qe,ao),e(ao,Xf),e(ao,Xa),e(Xa,Yf),e(ao,Zf),e(ao,ss),e(ss,ep),e(ao,tp),e(qe,op),e(qe,as),e(as,np),e(as,Oi),e(Oi,sp),e(as,ap),e(qe,rp),e(qe,Ge),b(rs,Ge,null),e(Ge,ip),e(Ge,ro),e(ro,lp),e(ro,Ya),e(Ya,dp),e(ro,cp),e(ro,ji),e(ji,hp),e(ro,mp),e(Ge,fp),b(Vo,Ge,null),e(Ge,pp),e(Ge,Ai),e(Ai,gp),e(Ge,up),b(is,Ge,null),h(o,oc,f),h(o,io,f),e(io,Go),e(Go,Ii),b(ls,Ii,null),e(io,_p),e(io,Ni),e(Ni,wp),h(o,nc,f),h(o,We,f),b(ds,We,null),e(We,vp),e(We,cs),e(cs,kp),e(cs,Si),e(Si,bp),e(cs,Tp),e(We,yp),e(We,hs),e(hs,Lp),e(hs,Za),e(Za,xp),e(hs,$p),e(We,Fp),e(We,ms),e(ms,Mp),e(ms,fs),e(fs,Ep),e(ms,zp),e(We,qp),e(We,Ke),b(ps,Ke,null),e(Ke,Cp),e(Ke,lo),e(lo,Pp),e(lo,er),e(er,Op),e(lo,jp),e(lo,Di),e(Di,Ap),e(lo,Ip),e(Ke,Np),b(Ko,Ke,null),e(Ke,Sp),e(Ke,Bi),e(Bi,Dp),e(Ke,Bp),b(gs,Ke,null),h(o,sc,f),h(o,co,f),e(co,Jo),e(Jo,Wi),b(us,Wi,null),e(co,Wp),e(co,Qi),e(Qi,Qp),h(o,ac,f),h(o,Qe,f),b(_s,Qe,null),e(Qe,Hp),e(Qe,Hi),e(Hi,Rp),e(Qe,Up),e(Qe,ws),e(ws,Vp),e(ws,tr),e(tr,Gp),e(ws,Kp),e(Qe,Jp),e(Qe,vs),e(vs,Xp),e(vs,ks),e(ks,Yp),e(vs,Zp),e(Qe,eg),e(Qe,Ce),b(bs,Ce,null),e(Ce,tg),e(Ce,ho),e(ho,og),e(ho,or),e(or,ng),e(ho,sg),e(ho,Ri),e(Ri,ag),e(ho,rg),e(Ce,ig),b(Xo,Ce,null),e(Ce,lg),e(Ce,Ui),e(Ui,dg),e(Ce,cg),b(Ts,Ce,null),e(Ce,hg),e(Ce,Vi),e(Vi,mg),e(Ce,fg),b(ys,Ce,null),h(o,rc,f),h(o,mo,f),e(mo,Yo),e(Yo,Gi),b(Ls,Gi,null),e(mo,pg),e(mo,Ki),e(Ki,gg),h(o,ic,f),h(o,He,f),b(xs,He,null),e(He,ug),e(He,Ji),e(Ji,_g),e(He,wg),e(He,$s),e($s,vg),e($s,nr),e(nr,kg),e($s,bg),e(He,Tg),e(He,Fs),e(Fs,yg),e(Fs,Ms),e(Ms,Lg),e(Fs,xg),e(He,$g),e(He,Je),b(Es,Je,null),e(Je,Fg),e(Je,fo),e(fo,Mg),e(fo,sr),e(sr,Eg),e(fo,zg),e(fo,Xi),e(Xi,qg),e(fo,Cg),e(Je,Pg),b(Zo,Je,null),e(Je,Og),e(Je,Yi),e(Yi,jg),e(Je,Ag),b(zs,Je,null),h(o,lc,f),h(o,po,f),e(po,en),e(en,Zi),b(qs,Zi,null),e(po,Ig),e(po,el),e(el,Ng),h(o,dc,f),h(o,Re,f),b(Cs,Re,null),e(Re,Sg),e(Re,tl),e(tl,Dg),e(Re,Bg),e(Re,Ps),e(Ps,Wg),e(Ps,ar),e(ar,Qg),e(Ps,Hg),e(Re,Rg),e(Re,Os),e(Os,Ug),e(Os,js),e(js,Vg),e(Os,Gg),e(Re,Kg),e(Re,Xe),b(As,Xe,null),e(Xe,Jg),e(Xe,go),e(go,Xg),e(go,rr),e(rr,Yg),e(go,Zg),e(go,ol),e(ol,eu),e(go,tu),e(Xe,ou),b(tn,Xe,null),e(Xe,nu),e(Xe,nl),e(nl,su),e(Xe,au),b(Is,Xe,null),h(o,cc,f),h(o,uo,f),e(uo,on),e(on,sl),b(Ns,sl,null),e(uo,ru),e(uo,al),e(al,iu),h(o,hc,f),h(o,Ue,f),b(Ss,Ue,null),e(Ue,lu),e(Ue,_o),e(_o,du),e(_o,rl),e(rl,cu),e(_o,hu),e(_o,il),e(il,mu),e(_o,fu),e(Ue,pu),e(Ue,Ds),e(Ds,gu),e(Ds,ir),e(ir,uu),e(Ds,_u),e(Ue,wu),e(Ue,Bs),e(Bs,vu),e(Bs,Ws),e(Ws,ku),e(Bs,bu),e(Ue,Tu),e(Ue,Ye),b(Qs,Ye,null),e(Ye,yu),e(Ye,wo),e(wo,Lu),e(wo,lr),e(lr,xu),e(wo,$u),e(wo,ll),e(ll,Fu),e(wo,Mu),e(Ye,Eu),b(nn,Ye,null),e(Ye,zu),e(Ye,dl),e(dl,qu),e(Ye,Cu),b(Hs,Ye,null),h(o,mc,f),h(o,vo,f),e(vo,sn),e(sn,cl),b(Rs,cl,null),e(vo,Pu),e(vo,hl),e(hl,Ou),h(o,fc,f),h(o,Ee,f),b(Us,Ee,null),e(Ee,ju),e(Ee,ml),e(ml,Au),e(Ee,Iu),e(Ee,Vs),e(Vs,Nu),e(Vs,dr),e(dr,Su),e(Vs,Du),e(Ee,Bu),e(Ee,Gs),e(Gs,Wu),e(Gs,Ks),e(Ks,Qu),e(Gs,Hu),e(Ee,Ru),b(an,Ee,null),e(Ee,Uu),e(Ee,ko),e(ko,Vu),e(ko,cr),e(cr,Gu),e(ko,Ku),e(ko,Js),e(Js,Ju),e(ko,Xu),e(Ee,Yu),e(Ee,Xs),e(Xs,Zu),e(Xs,fl),e(fl,e_),e(Xs,t_),e(Ee,o_),e(Ee,_t),b(Ys,_t,null),e(_t,n_),e(_t,bo),e(bo,s_),e(bo,hr),e(hr,a_),e(bo,r_),e(bo,pl),e(pl,i_),e(bo,l_),e(_t,d_),b(rn,_t,null),h(o,pc,f),h(o,To,f),e(To,ln),e(ln,gl),b(Zs,gl,null),e(To,c_),e(To,ul),e(ul,h_),h(o,gc,f),h(o,Ae,f),b(ea,Ae,null),e(Ae,m_),e(Ae,ta),e(ta,f_),e(ta,_l),e(_l,p_),e(ta,g_),e(Ae,u_),e(Ae,oa),e(oa,__),e(oa,mr),e(mr,w_),e(oa,v_),e(Ae,k_),e(Ae,na),e(na,b_),e(na,sa),e(sa,T_),e(na,y_),e(Ae,L_),b(dn,Ae,null),e(Ae,x_),e(Ae,Ze),b(aa,Ze,null),e(Ze,$_),e(Ze,yo),e(yo,F_),e(yo,fr),e(fr,M_),e(yo,E_),e(yo,wl),e(wl,z_),e(yo,q_),e(Ze,C_),b(cn,Ze,null),e(Ze,P_),e(Ze,vl),e(vl,O_),e(Ze,j_),b(ra,Ze,null),h(o,uc,f),h(o,Lo,f),e(Lo,hn),e(hn,kl),b(ia,kl,null),e(Lo,A_),e(Lo,bl),e(bl,I_),h(o,_c,f),h(o,Ie,f),b(la,Ie,null),e(Ie,N_),e(Ie,xo),e(xo,S_),e(xo,Tl),e(Tl,D_),e(xo,B_),e(xo,yl),e(yl,W_),e(xo,Q_),e(Ie,H_),e(Ie,da),e(da,R_),e(da,pr),e(pr,U_),e(da,V_),e(Ie,G_),e(Ie,ca),e(ca,K_),e(ca,ha),e(ha,J_),e(ca,X_),e(Ie,Y_),b(mn,Ie,null),e(Ie,Z_),e(Ie,et),b(ma,et,null),e(et,ew),e(et,$o),e($o,tw),e($o,gr),e(gr,ow),e($o,nw),e($o,Ll),e(Ll,sw),e($o,aw),e(et,rw),b(fn,et,null),e(et,iw),e(et,xl),e(xl,lw),e(et,dw),b(fa,et,null),h(o,wc,f),h(o,Fo,f),e(Fo,pn),e(pn,$l),b(pa,$l,null),e(Fo,cw),e(Fo,Fl),e(Fl,hw),h(o,vc,f),h(o,Ne,f),b(ga,Ne,null),e(Ne,mw),e(Ne,Ml),e(Ml,fw),e(Ne,pw),e(Ne,ua),e(ua,gw),e(ua,ur),e(ur,uw),e(ua,_w),e(Ne,ww),e(Ne,_a),e(_a,vw),e(_a,wa),e(wa,kw),e(_a,bw),e(Ne,Tw),b(gn,Ne,null),e(Ne,yw),e(Ne,tt),b(va,tt,null),e(tt,Lw),e(tt,Mo),e(Mo,xw),e(Mo,_r),e(_r,$w),e(Mo,Fw),e(Mo,El),e(El,Mw),e(Mo,Ew),e(tt,zw),b(un,tt,null),e(tt,qw),e(tt,zl),e(zl,Cw),e(tt,Pw),b(ka,tt,null),h(o,kc,f),h(o,Eo,f),e(Eo,_n),e(_n,ql),b(ba,ql,null),e(Eo,Ow),e(Eo,Cl),e(Cl,jw),h(o,bc,f),h(o,Se,f),b(Ta,Se,null),e(Se,Aw),e(Se,Pl),e(Pl,Iw),e(Se,Nw),e(Se,ya),e(ya,Sw),e(ya,wr),e(wr,Dw),e(ya,Bw),e(Se,Ww),e(Se,La),e(La,Qw),e(La,xa),e(xa,Hw),e(La,Rw),e(Se,Uw),b(wn,Se,null),e(Se,Vw),e(Se,ot),b($a,ot,null),e(ot,Gw),e(ot,zo),e(zo,Kw),e(zo,vr),e(vr,Jw),e(zo,Xw),e(zo,Ol),e(Ol,Yw),e(zo,Zw),e(ot,ev),b(vn,ot,null),e(ot,tv),e(ot,jl),e(jl,ov),e(ot,nv),b(Fa,ot,null),h(o,Tc,f),h(o,qo,f),e(qo,kn),e(kn,Al),b(Ma,Al,null),e(qo,sv),e(qo,Il),e(Il,av),h(o,yc,f),h(o,De,f),b(Ea,De,null),e(De,rv),e(De,Nl),e(Nl,iv),e(De,lv),e(De,za),e(za,dv),e(za,kr),e(kr,cv),e(za,hv),e(De,mv),e(De,qa),e(qa,fv),e(qa,Ca),e(Ca,pv),e(qa,gv),e(De,uv),b(bn,De,null),e(De,_v),e(De,nt),b(Pa,nt,null),e(nt,wv),e(nt,Co),e(Co,vv),e(Co,br),e(br,kv),e(Co,bv),e(Co,Sl),e(Sl,Tv),e(Co,yv),e(nt,Lv),b(Tn,nt,null),e(nt,xv),e(nt,Dl),e(Dl,$v),e(nt,Fv),b(Oa,nt,null),Lc=!0},p(o,[f]){const ja={};f&2&&(ja.$$scope={dirty:f,ctx:o}),Vo.$set(ja);const Bl={};f&2&&(Bl.$$scope={dirty:f,ctx:o}),Ko.$set(Bl);const Wl={};f&2&&(Wl.$$scope={dirty:f,ctx:o}),Xo.$set(Wl);const Ql={};f&2&&(Ql.$$scope={dirty:f,ctx:o}),Zo.$set(Ql);const yn={};f&2&&(yn.$$scope={dirty:f,ctx:o}),tn.$set(yn);const Hl={};f&2&&(Hl.$$scope={dirty:f,ctx:o}),nn.$set(Hl);const Rl={};f&2&&(Rl.$$scope={dirty:f,ctx:o}),an.$set(Rl);const Aa={};f&2&&(Aa.$$scope={dirty:f,ctx:o}),rn.$set(Aa);const Ul={};f&2&&(Ul.$$scope={dirty:f,ctx:o}),dn.$set(Ul);const Vl={};f&2&&(Vl.$$scope={dirty:f,ctx:o}),cn.$set(Vl);const Gl={};f&2&&(Gl.$$scope={dirty:f,ctx:o}),mn.$set(Gl);const Ia={};f&2&&(Ia.$$scope={dirty:f,ctx:o}),fn.$set(Ia);const Kl={};f&2&&(Kl.$$scope={dirty:f,ctx:o}),gn.$set(Kl);const Jl={};f&2&&(Jl.$$scope={dirty:f,ctx:o}),un.$set(Jl);const Xl={};f&2&&(Xl.$$scope={dirty:f,ctx:o}),wn.$set(Xl);const Yl={};f&2&&(Yl.$$scope={dirty:f,ctx:o}),vn.$set(Yl);const Zl={};f&2&&(Zl.$$scope={dirty:f,ctx:o}),bn.$set(Zl);const ed={};f&2&&(ed.$$scope={dirty:f,ctx:o}),Tn.$set(ed)},i(o){Lc||(T(w.$$.fragment,o),T(K.$$.fragment,o),T(xn.$$.fragment,o),T(Fn.$$.fragment,o),T(Mn.$$.fragment,o),T(En.$$.fragment,o),T(zn.$$.fragment,o),T(Cn.$$.fragment,o),T(Pn.$$.fragment,o),T(On.$$.fragment,o),T(jn.$$.fragment,o),T(An.$$.fragment,o),T(Nn.$$.fragment,o),T(Sn.$$.fragment,o),T(Dn.$$.fragment,o),T(Bn.$$.fragment,o),T(Wn.$$.fragment,o),T(Qn.$$.fragment,o),T(Hn.$$.fragment,o),T(Rn.$$.fragment,o),T(Un.$$.fragment,o),T(Vn.$$.fragment,o),T(Gn.$$.fragment,o),T(Kn.$$.fragment,o),T(Jn.$$.fragment,o),T(Xn.$$.fragment,o),T(Yn.$$.fragment,o),T(Zn.$$.fragment,o),T(es.$$.fragment,o),T(rs.$$.fragment,o),T(Vo.$$.fragment,o),T(is.$$.fragment,o),T(ls.$$.fragment,o),T(ds.$$.fragment,o),T(ps.$$.fragment,o),T(Ko.$$.fragment,o),T(gs.$$.fragment,o),T(us.$$.fragment,o),T(_s.$$.fragment,o),T(bs.$$.fragment,o),T(Xo.$$.fragment,o),T(Ts.$$.fragment,o),T(ys.$$.fragment,o),T(Ls.$$.fragment,o),T(xs.$$.fragment,o),T(Es.$$.fragment,o),T(Zo.$$.fragment,o),T(zs.$$.fragment,o),T(qs.$$.fragment,o),T(Cs.$$.fragment,o),T(As.$$.fragment,o),T(tn.$$.fragment,o),T(Is.$$.fragment,o),T(Ns.$$.fragment,o),T(Ss.$$.fragment,o),T(Qs.$$.fragment,o),T(nn.$$.fragment,o),T(Hs.$$.fragment,o),T(Rs.$$.fragment,o),T(Us.$$.fragment,o),T(an.$$.fragment,o),T(Ys.$$.fragment,o),T(rn.$$.fragment,o),T(Zs.$$.fragment,o),T(ea.$$.fragment,o),T(dn.$$.fragment,o),T(aa.$$.fragment,o),T(cn.$$.fragment,o),T(ra.$$.fragment,o),T(ia.$$.fragment,o),T(la.$$.fragment,o),T(mn.$$.fragment,o),T(ma.$$.fragment,o),T(fn.$$.fragment,o),T(fa.$$.fragment,o),T(pa.$$.fragment,o),T(ga.$$.fragment,o),T(gn.$$.fragment,o),T(va.$$.fragment,o),T(un.$$.fragment,o),T(ka.$$.fragment,o),T(ba.$$.fragment,o),T(Ta.$$.fragment,o),T(wn.$$.fragment,o),T($a.$$.fragment,o),T(vn.$$.fragment,o),T(Fa.$$.fragment,o),T(Ma.$$.fragment,o),T(Ea.$$.fragment,o),T(bn.$$.fragment,o),T(Pa.$$.fragment,o),T(Tn.$$.fragment,o),T(Oa.$$.fragment,o),Lc=!0)},o(o){y(w.$$.fragment,o),y(K.$$.fragment,o),y(xn.$$.fragment,o),y(Fn.$$.fragment,o),y(Mn.$$.fragment,o),y(En.$$.fragment,o),y(zn.$$.fragment,o),y(Cn.$$.fragment,o),y(Pn.$$.fragment,o),y(On.$$.fragment,o),y(jn.$$.fragment,o),y(An.$$.fragment,o),y(Nn.$$.fragment,o),y(Sn.$$.fragment,o),y(Dn.$$.fragment,o),y(Bn.$$.fragment,o),y(Wn.$$.fragment,o),y(Qn.$$.fragment,o),y(Hn.$$.fragment,o),y(Rn.$$.fragment,o),y(Un.$$.fragment,o),y(Vn.$$.fragment,o),y(Gn.$$.fragment,o),y(Kn.$$.fragment,o),y(Jn.$$.fragment,o),y(Xn.$$.fragment,o),y(Yn.$$.fragment,o),y(Zn.$$.fragment,o),y(es.$$.fragment,o),y(rs.$$.fragment,o),y(Vo.$$.fragment,o),y(is.$$.fragment,o),y(ls.$$.fragment,o),y(ds.$$.fragment,o),y(ps.$$.fragment,o),y(Ko.$$.fragment,o),y(gs.$$.fragment,o),y(us.$$.fragment,o),y(_s.$$.fragment,o),y(bs.$$.fragment,o),y(Xo.$$.fragment,o),y(Ts.$$.fragment,o),y(ys.$$.fragment,o),y(Ls.$$.fragment,o),y(xs.$$.fragment,o),y(Es.$$.fragment,o),y(Zo.$$.fragment,o),y(zs.$$.fragment,o),y(qs.$$.fragment,o),y(Cs.$$.fragment,o),y(As.$$.fragment,o),y(tn.$$.fragment,o),y(Is.$$.fragment,o),y(Ns.$$.fragment,o),y(Ss.$$.fragment,o),y(Qs.$$.fragment,o),y(nn.$$.fragment,o),y(Hs.$$.fragment,o),y(Rs.$$.fragment,o),y(Us.$$.fragment,o),y(an.$$.fragment,o),y(Ys.$$.fragment,o),y(rn.$$.fragment,o),y(Zs.$$.fragment,o),y(ea.$$.fragment,o),y(dn.$$.fragment,o),y(aa.$$.fragment,o),y(cn.$$.fragment,o),y(ra.$$.fragment,o),y(ia.$$.fragment,o),y(la.$$.fragment,o),y(mn.$$.fragment,o),y(ma.$$.fragment,o),y(fn.$$.fragment,o),y(fa.$$.fragment,o),y(pa.$$.fragment,o),y(ga.$$.fragment,o),y(gn.$$.fragment,o),y(va.$$.fragment,o),y(un.$$.fragment,o),y(ka.$$.fragment,o),y(ba.$$.fragment,o),y(Ta.$$.fragment,o),y(wn.$$.fragment,o),y($a.$$.fragment,o),y(vn.$$.fragment,o),y(Fa.$$.fragment,o),y(Ma.$$.fragment,o),y(Ea.$$.fragment,o),y(bn.$$.fragment,o),y(Pa.$$.fragment,o),y(Tn.$$.fragment,o),y(Oa.$$.fragment,o),Lc=!1},d(o){t(p),o&&t(F),o&&t(g),L(w),o&&t(X),o&&t(E),o&&t(ae),o&&t(R),L(K),o&&t(D),o&&t(Y),o&&t(C),o&&t(ee),o&&t(re),o&&t(te),o&&t(ie),o&&t(O),o&&t(B),o&&t(oe),o&&t(Te),o&&t(le),o&&t(sd),o&&t(Ot),L(xn),o&&t(ad),o&&t(ye),o&&t(pd),o&&t(gt),o&&t(_d),o&&t(ut),o&&t(wd),o&&t(Ao),o&&t(vd),o&&t(Io),o&&t(kd),o&&t(Ve),o&&t(Ed),o&&t(No),o&&t(zd),o&&t(jt),L(Fn),o&&t(qd),o&&t(At),o&&t(Cd),L(Mn,o),o&&t(Pd),o&&t(It),L(En),o&&t(Od),o&&t(je),L(zn),L(Cn),o&&t(jd),o&&t(Bt),L(Pn),o&&t(Ad),o&&t(ft),L(On),o&&t(Id),o&&t(Wt),L(jn),o&&t(Nd),o&&t(pt),L(An),o&&t(Sd),o&&t(Qt),L(Nn),o&&t(Dd),o&&t(Ht),L(Sn),o&&t(Bd),o&&t(Rt),L(Dn),o&&t(Wd),o&&t(Ut),L(Bn),o&&t(Qd),o&&t(Vt),L(Wn),o&&t(Hd),o&&t(Gt),L(Qn),o&&t(Rd),o&&t(Kt),L(Hn),o&&t(Ud),o&&t(Jt),L(Rn),o&&t(Vd),o&&t(Xt),L(Un),o&&t(Gd),o&&t(Yt),L(Vn),o&&t(Kd),o&&t(Zt),L(Gn),o&&t(Jd),o&&t(eo),L(Kn),o&&t(Xd),o&&t(to),L(Jn),o&&t(Yd),o&&t(oo),L(Xn),o&&t(Zd),o&&t(no),L(Yn),o&&t(ec),o&&t(so),L(Zn),o&&t(tc),o&&t(qe),L(es),L(rs),L(Vo),L(is),o&&t(oc),o&&t(io),L(ls),o&&t(nc),o&&t(We),L(ds),L(ps),L(Ko),L(gs),o&&t(sc),o&&t(co),L(us),o&&t(ac),o&&t(Qe),L(_s),L(bs),L(Xo),L(Ts),L(ys),o&&t(rc),o&&t(mo),L(Ls),o&&t(ic),o&&t(He),L(xs),L(Es),L(Zo),L(zs),o&&t(lc),o&&t(po),L(qs),o&&t(dc),o&&t(Re),L(Cs),L(As),L(tn),L(Is),o&&t(cc),o&&t(uo),L(Ns),o&&t(hc),o&&t(Ue),L(Ss),L(Qs),L(nn),L(Hs),o&&t(mc),o&&t(vo),L(Rs),o&&t(fc),o&&t(Ee),L(Us),L(an),L(Ys),L(rn),o&&t(pc),o&&t(To),L(Zs),o&&t(gc),o&&t(Ae),L(ea),L(dn),L(aa),L(cn),L(ra),o&&t(uc),o&&t(Lo),L(ia),o&&t(_c),o&&t(Ie),L(la),L(mn),L(ma),L(fn),L(fa),o&&t(wc),o&&t(Fo),L(pa),o&&t(vc),o&&t(Ne),L(ga),L(gn),L(va),L(un),L(ka),o&&t(kc),o&&t(Eo),L(ba),o&&t(bc),o&&t(Se),L(Ta),L(wn),L($a),L(vn),L(Fa),o&&t(Tc),o&&t(qo),L(Ma),o&&t(yc),o&&t(De),L(Ea),L(bn),L(Pa),L(Tn),L(Oa)}}}const y1={local:"longformer",sections:[{local:"overview",title:"Overview"},{local:"longformer-self-attention",title:"Longformer Self Attention"},{local:"training",title:"Training"},{local:"transformers.LongformerConfig",title:"LongformerConfig"},{local:"transformers.LongformerTokenizer",title:"LongformerTokenizer"},{local:"transformers.LongformerTokenizerFast",title:"LongformerTokenizerFast"},{local:"transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput",title:"Longformer specific outputs"},{local:"transformers.LongformerModel",title:"LongformerModel"},{local:"transformers.LongformerForMaskedLM",title:"LongformerForMaskedLM"},{local:"transformers.LongformerForSequenceClassification",title:"LongformerForSequenceClassification"},{local:"transformers.LongformerForMultipleChoice",title:"LongformerForMultipleChoice"},{local:"transformers.LongformerForTokenClassification",title:"LongformerForTokenClassification"},{local:"transformers.LongformerForQuestionAnswering",title:"LongformerForQuestionAnswering"},{local:"transformers.TFLongformerModel",title:"TFLongformerModel"},{local:"transformers.TFLongformerForMaskedLM",title:"TFLongformerForMaskedLM"},{local:"transformers.TFLongformerForQuestionAnswering",title:"TFLongformerForQuestionAnswering"},{local:"transformers.TFLongformerForSequenceClassification",title:"TFLongformerForSequenceClassification"},{local:"transformers.TFLongformerForTokenClassification",title:"TFLongformerForTokenClassification"},{local:"transformers.TFLongformerForMultipleChoice",title:"TFLongformerForMultipleChoice"}],title:"Longformer"};function L1(W,p,F){let{fw:g}=p;return W.$$set=u=>{"fw"in u&&F(0,g=u.fw)},[g]}class q1 extends e1{constructor(p){super();t1(this,p,L1,T1,o1,{fw:0})}}export{q1 as default,y1 as metadata};
9,901
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/mt5.mdx-bab03e20.js
import{S as ap,i as ip,s as lp,e as n,k as d,w as f,t as a,L as dp,c as r,d as s,m as p,a as o,x as h,h as i,b as l,J as e,g as c,y as u,K as pp,q as g,o as _,B as v}from"../../chunks/vendor-b1433968.js";import{D as T}from"../../chunks/Docstring-ff504c58.js";import{C as Ce}from"../../chunks/CodeBlock-a320dbd7.js";import{I as P}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function mp(Gr){let D,Wt,C,A,ws,Ae,Or,ys,Vr,En,U,re,bs,Se,Ur,zs,Wr,Mn,oe,Br,Le,Hr,Rr,xn,Bt,Xr,qn,Ht,Es,Kr,jn,ae,Jr,De,Qr,Yr,Fn,Rt,Zr,Pn,y,Ms,xs,Ne,eo,to,qs,js,Ie,so,no,Fs,Ps,Ge,ro,oo,Cs,As,Oe,ao,io,Ss,Xt,Ve,lo,po,Cn,N,mo,Ue,co,fo,We,ho,uo,An,W,ie,Ls,Be,go,Ds,_o,Sn,S,He,vo,L,ko,Kt,To,$o,Jt,wo,yo,Re,bo,zo,Eo,B,Mo,Qt,xo,qo,Yt,jo,Fo,Ln,H,le,Ns,Xe,Po,Is,Co,Dn,k,Ke,Ao,Je,So,Qe,Lo,Do,No,Ye,Io,Zt,Go,Oo,Vo,R,Uo,Gs,Wo,Bo,Os,Ho,Ro,Xo,I,Ze,Ko,Vs,Jo,Qo,et,es,Yo,Us,Zo,ea,ts,ta,Ws,sa,na,de,tt,ra,Bs,oa,aa,pe,st,ia,Hs,la,da,me,nt,pa,rt,ma,Rs,ca,fa,Nn,ce,ha,ss,ua,ga,In,X,fe,Xs,ot,_a,Ks,va,Gn,w,at,ka,K,Ta,Js,$a,wa,it,ya,ba,za,lt,Ea,ns,Ma,xa,qa,G,dt,ja,Qs,Fa,Pa,pt,rs,Ca,Ys,Aa,Sa,os,La,Zs,Da,Na,he,mt,Ia,en,Ga,On,ue,Oa,as,Va,Ua,Vn,J,ge,tn,ct,Wa,sn,Ba,Un,b,ft,Ha,ht,Ra,is,Xa,Ka,Ja,nn,Qa,Ya,ut,Wn,Q,_e,rn,gt,Za,on,ei,Bn,z,_t,ti,vt,si,ls,ni,ri,oi,an,ai,ii,kt,Hn,Y,ve,ln,Tt,li,dn,di,Rn,E,$t,pi,wt,mi,ds,ci,fi,hi,pn,ui,gi,yt,Xn,Z,ke,mn,bt,_i,cn,vi,Kn,M,zt,ki,Et,Ti,ps,$i,wi,yi,fn,bi,zi,Mt,Jn,ee,Te,hn,xt,Ei,un,Mi,Qn,x,qt,xi,jt,qi,ms,ji,Fi,Pi,gn,Ci,Ai,Ft,Yn,te,$e,_n,Pt,Si,vn,Li,Zn,q,Ct,Di,At,Ni,cs,Ii,Gi,Oi,kn,Vi,Ui,St,er,se,we,Tn,Lt,Wi,$n,Bi,tr,j,Dt,Hi,Nt,Ri,fs,Xi,Ki,Ji,wn,Qi,Yi,It,sr,ne,ye,yn,Gt,Zi,bn,el,nr,F,Ot,tl,Vt,sl,hs,nl,rl,ol,zn,al,il,Ut,rr;return Ae=new P({}),Se=new P({}),Be=new P({}),He=new T({props:{name:"class transformers.MT5Config",anchor:"transformers.MT5Config",parameters:[{name:"vocab_size",val:" = 250112"},{name:"d_model",val:" = 512"},{name:"d_kv",val:" = 64"},{name:"d_ff",val:" = 1024"},{name:"num_layers",val:" = 8"},{name:"num_decoder_layers",val:" = None"},{name:"num_heads",val:" = 6"},{name:"relative_attention_num_buckets",val:" = 32"},{name:"dropout_rate",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-06"},{name:"initializer_factor",val:" = 1.0"},{name:"feed_forward_proj",val:" = 'gated-gelu'"},{name:"is_encoder_decoder",val:" = True"},{name:"use_cache",val:" = True"},{name:"tokenizer_class",val:" = 'T5Tokenizer'"},{name:"tie_word_embeddings",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"eos_token_id",val:" = 1"},{name:"decoder_start_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/configuration_mt5.py#L24",parametersDescription:[{anchor:"transformers.MT5Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 250112) &#x2014; Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model">T5Model</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model">TFT5Model</a>.`,name:"vocab_size"},{anchor:"transformers.MT5Config.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Size of the encoder layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.MT5Config.d_kv",description:`<strong>d_kv</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Size of the key, query, value projections per attention head. <code>d_kv</code> has to be equal to <code>d_model // num_heads</code>.`,name:"d_kv"},{anchor:"transformers.MT5Config.d_ff",description:`<strong>d_ff</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Size of the intermediate feed forward layer in each <code>T5Block</code>.`,name:"d_ff"},{anchor:"transformers.MT5Config.num_layers",description:`<strong>num_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_layers"},{anchor:"transformers.MT5Config.num_decoder_layers",description:`<strong>num_decoder_layers</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of hidden layers in the Transformer decoder. Will use the same value as <code>num_layers</code> if not set.`,name:"num_decoder_layers"},{anchor:"transformers.MT5Config.num_heads",description:`<strong>num_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 6) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_heads"},{anchor:"transformers.MT5Config.relative_attention_num_buckets",description:`<strong>relative_attention_num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer.`,name:"relative_attention_num_buckets"},{anchor:"transformers.MT5Config.dropout_rate",description:`<strong>dropout_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The ratio for all dropout layers.`,name:"dropout_rate"},{anchor:"transformers.MT5Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.MT5Config.initializer_factor",description:`<strong>initializer_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"},{anchor:"transformers.MT5Config.feed_forward_proj",description:`<strong>feed_forward_proj</strong> (<code>string</code>, <em>optional</em>, defaults to <code>&quot;gated-gelu&quot;</code>) &#x2014; Type of feed forward layer to be used. Should be one of <code>&quot;relu&quot;</code> or <code>&quot;gated-gelu&quot;</code>.`,name:"feed_forward_proj"},{anchor:"transformers.MT5Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),Xe=new P({}),Ke=new T({props:{name:"class transformers.T5Tokenizer",anchor:"transformers.T5Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"extra_ids",val:" = 100"},{name:"additional_special_tokens",val:" = None"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L53",parametersDescription:[{anchor:"transformers.T5Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.T5Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.T5Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.T5Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.T5Tokenizer.extra_ids",description:`<strong>extra_ids</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as &#x201C;<extra<em>id{%d}&gt;&#x201D; where &#x201D;{%d}&#x201D; is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning (&#x201C;<extra_id_0>&#x201D; is the last token in the vocabulary like in T5 preprocessing see <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117" rel="nofollow">here</a>).</extra_id_0></extra<em>`,name:"extra_ids"},{anchor:"transformers.T5Tokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.T5Tokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Ze=new T({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.T5Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L220",parametersDescription:[{anchor:"transformers.T5Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.T5Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),tt=new T({props:{name:"convert_tokens_to_string",anchor:"transformers.T5Tokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L281"}}),st=new T({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.T5Tokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L198",parametersDescription:[{anchor:"transformers.T5Tokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.T5Tokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),nt=new T({props:{name:"get_special_tokens_mask",anchor:"transformers.T5Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L160",parametersDescription:[{anchor:"transformers.T5Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.T5Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.T5Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ot=new P({}),at=new T({props:{name:"class transformers.T5TokenizerFast",anchor:"transformers.T5TokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"extra_ids",val:" = 100"},{name:"additional_special_tokens",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5_fast.py#L63",parametersDescription:[{anchor:"transformers.T5TokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.T5TokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.T5TokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.T5TokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.T5TokenizerFast.extra_ids",description:`<strong>extra_ids</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as &#x201C;<extra<em>id{%d}&gt;&#x201D; where &#x201D;{%d}&#x201D; is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning (&#x201C;<extra_id_0>&#x201D; is the last token in the vocabulary like in T5 preprocessing see <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117" rel="nofollow">here</a>).</extra_id_0></extra<em>`,name:"extra_ids"},{anchor:"transformers.T5TokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),dt=new T({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.T5TokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5_fast.py#L164",parametersDescription:[{anchor:"transformers.T5TokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.T5TokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),mt=new T({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.T5TokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5_fast.py#L190",parametersDescription:[{anchor:"transformers.T5TokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.T5TokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ct=new P({}),ft=new T({props:{name:"class transformers.MT5Model",anchor:"transformers.MT5Model",parameters:[{name:"config",val:": T5Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_mt5.py#L28"}}),ut=new Ce({props:{code:`from transformers import MT5Model, T5Tokenizer model = MT5Model.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." summary = "Weiter Verhandlung in Syrien." inputs = tokenizer(article, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(summary, return_tensors="pt") outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MT5Model, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = MT5Model.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary = <span class="hljs-string">&quot;Weiter Verhandlung in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(summary, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], decoder_input_ids=labels[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = outputs.last_hidden_state`}}),gt=new P({}),_t=new T({props:{name:"class transformers.MT5ForConditionalGeneration",anchor:"transformers.MT5ForConditionalGeneration",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_mt5.py#L61"}}),kt=new Ce({props:{code:`from transformers import MT5ForConditionalGeneration, T5Tokenizer model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." summary = "Weiter Verhandlung in Syrien." inputs = tokenizer(article, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(summary, return_tensors="pt") outputs = model(**inputs,labels=labels["input_ids"]) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MT5ForConditionalGeneration, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = MT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary = <span class="hljs-string">&quot;Weiter Verhandlung in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(summary, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs,labels=labels[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),Tt=new P({}),$t=new T({props:{name:"class transformers.MT5EncoderModel",anchor:"transformers.MT5EncoderModel",parameters:[{name:"config",val:": T5Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_mt5.py#L92"}}),yt=new Ce({props:{code:`from transformers import MT5EncoderModel, T5Tokenizer model = MT5EncoderModel.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." input_ids = tokenizer(article, return_tensors="pt").input_ids outputs = model(input_ids) hidden_state = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MT5EncoderModel, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = MT5EncoderModel.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(article, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_state = outputs.last_hidden_state`}}),bt=new P({}),zt=new T({props:{name:"class transformers.TFMT5Model",anchor:"transformers.TFMT5Model",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_tf_mt5.py#L28"}}),Mt=new Ce({props:{code:`from transformers import TFMT5Model, T5Tokenizer model = TFMT5Model.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." summary = "Weiter Verhandlung in Syrien." inputs = tokenizer(article, return_tensors="tf") with tokenizer.as_target_tokenizer(): labels = tokenizer(summary, return_tensors="tf") outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFMT5Model, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMT5Model.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary = <span class="hljs-string">&quot;Weiter Verhandlung in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(summary, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], decoder_input_ids=labels[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = outputs.last_hidden_state`}}),xt=new P({}),qt=new T({props:{name:"class transformers.TFMT5ForConditionalGeneration",anchor:"transformers.TFMT5ForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_tf_mt5.py#L52"}}),Ft=new Ce({props:{code:`from transformers import TFMT5ForConditionalGeneration, T5Tokenizer model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." summary = "Weiter Verhandlung in Syrien." inputs = tokenizer(article, return_tensors="tf") with tokenizer.as_target_tokenizer(): labels = tokenizer(summary, return_tensors="tf") outputs = model(**inputs,labels=labels["input_ids"]) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFMT5ForConditionalGeneration, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary = <span class="hljs-string">&quot;Weiter Verhandlung in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(summary, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs,labels=labels[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),Pt=new P({}),Ct=new T({props:{name:"class transformers.TFMT5EncoderModel",anchor:"transformers.TFMT5EncoderModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_tf_mt5.py#L77"}}),St=new Ce({props:{code:`from transformers import TFMT5EncoderModel, T5Tokenizer model = TFMT5EncoderModel.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." input_ids = tokenizer(article, return_tensors="tf").input_ids outputs = model(input_ids) hidden_state = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFMT5EncoderModel, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMT5EncoderModel.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(article, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_state = outputs.last_hidden_state`}}),Lt=new P({}),Dt=new T({props:{name:"class transformers.FlaxMT5Model",anchor:"transformers.FlaxMT5Model",parameters:[{name:"config",val:": T5Config"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_flax_mt5.py#L28"}}),It=new Ce({props:{code:`from transformers import FlaxMT5Model, T5Tokenizer model = FlaxMT5Model.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." summary = "Weiter Verhandlung in Syrien." inputs = tokenizer(article, return_tensors="np") with tokenizer.as_target_tokenizer(): decoder_input_ids = tokenizer(summary, return_tensors="np").input_ids outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids) hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxMT5Model, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMT5Model.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary = <span class="hljs-string">&quot;Weiter Verhandlung in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> decoder_input_ids = tokenizer(summary, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = outputs.last_hidden_state`}}),Gt=new P({}),Ot=new T({props:{name:"class transformers.FlaxMT5ForConditionalGeneration",anchor:"transformers.FlaxMT5ForConditionalGeneration",parameters:[{name:"config",val:": T5Config"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mt5/modeling_flax_mt5.py#L55"}}),Ut=new Ce({props:{code:`from transformers import FlaxMT5ForConditionalGeneration, T5Tokenizer model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small") tokenizer = T5Tokenizer.from_pretrained("google/mt5-small") article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." summary = "Weiter Verhandlung in Syrien." inputs = tokenizer(article, return_tensors="np") with tokenizer.as_target_tokenizer(): decoder_input_ids = tokenizer(summary, return_tensors="np").input_ids outputs = model(**inputs, decoder_input_ids=decoder_input_ids) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxMT5ForConditionalGeneration, T5Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/mt5-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary = <span class="hljs-string">&quot;Weiter Verhandlung in Syrien.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> decoder_input_ids = tokenizer(summary, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){D=n("meta"),Wt=d(),C=n("h1"),A=n("a"),ws=n("span"),f(Ae.$$.fragment),Or=d(),ys=n("span"),Vr=a("mT5"),En=d(),U=n("h2"),re=n("a"),bs=n("span"),f(Se.$$.fragment),Ur=d(),zs=n("span"),Wr=a("Overview"),Mn=d(),oe=n("p"),Br=a("The mT5 model was presented in "),Le=n("a"),Hr=a("mT5: A massively multilingual pre-trained text-to-text transformer"),Rr=a(` by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.`),xn=d(),Bt=n("p"),Xr=a("The abstract from the paper is the following:"),qn=d(),Ht=n("p"),Es=n("em"),Kr=a(`The recent \u201CText-to-Text Transfer Transformer\u201D (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We detail the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. We also describe a simple technique to prevent \u201Caccidental translation\u201D in the zero-shot setting, where a generative model chooses to (partially) translate its prediction into the wrong language. All of the code and model checkpoints used in this work are publicly available.`),jn=d(),ae=n("p"),Jr=a("Note: mT5 was only pre-trained on "),De=n("a"),Qr=a("mC4"),Yr=a(` excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task, unlike the original T5 model. Since mT5 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),Fn=d(),Rt=n("p"),Zr=a("Google has released the following variants:"),Pn=d(),y=n("ul"),Ms=n("li"),xs=n("p"),Ne=n("a"),eo=a("google/mt5-small"),to=d(),qs=n("li"),js=n("p"),Ie=n("a"),so=a("google/mt5-base"),no=d(),Fs=n("li"),Ps=n("p"),Ge=n("a"),ro=a("google/mt5-large"),oo=d(),Cs=n("li"),As=n("p"),Oe=n("a"),ao=a("google/mt5-xl"),io=d(),Ss=n("li"),Xt=n("p"),Ve=n("a"),lo=a("google/mt5-xxl"),po=a("."),Cn=d(),N=n("p"),mo=a("This model was contributed by "),Ue=n("a"),co=a("patrickvonplaten"),fo=a(`. The original code can be found `),We=n("a"),ho=a("here"),uo=a("."),An=d(),W=n("h2"),ie=n("a"),Ls=n("span"),f(Be.$$.fragment),go=d(),Ds=n("span"),_o=a("MT5Config"),Sn=d(),S=n("div"),f(He.$$.fragment),vo=d(),L=n("p"),ko=a("This is the configuration class to store the configuration of a "),Kt=n("a"),To=a("MT5Model"),$o=a(` or a `),Jt=n("a"),wo=a("TFMT5Model"),yo=a(`. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 `),Re=n("a"),bo=a("google/mt5-small"),zo=a(" architecture."),Eo=d(),B=n("p"),Mo=a("Configuration objects inherit from "),Qt=n("a"),xo=a("PretrainedConfig"),qo=a(` and can be used to control the model outputs. Read the documentation from `),Yt=n("a"),jo=a("PretrainedConfig"),Fo=a(" for more information."),Ln=d(),H=n("h2"),le=n("a"),Ns=n("span"),f(Xe.$$.fragment),Po=d(),Is=n("span"),Co=a("MT5Tokenizer"),Dn=d(),k=n("div"),f(Ke.$$.fragment),Ao=d(),Je=n("p"),So=a("Construct a T5 tokenizer. Based on "),Qe=n("a"),Lo=a("SentencePiece"),Do=a("."),No=d(),Ye=n("p"),Io=a("This tokenizer inherits from "),Zt=n("a"),Go=a("PreTrainedTokenizer"),Oo=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Vo=d(),R=n("p"),Uo=a(`Attributes: sp_model (`),Gs=n("code"),Wo=a("SentencePieceProcessor"),Bo=a(`): The `),Os=n("em"),Ho=a("SentencePiece"),Ro=a(" processor that is used for every conversion (string, tokens and IDs)."),Xo=d(),I=n("div"),f(Ze.$$.fragment),Ko=d(),Vs=n("p"),Jo=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Qo=d(),et=n("ul"),es=n("li"),Yo=a("single sequence: "),Us=n("code"),Zo=a("X </s>"),ea=d(),ts=n("li"),ta=a("pair of sequences: "),Ws=n("code"),sa=a("A </s> B </s>"),na=d(),de=n("div"),f(tt.$$.fragment),ra=d(),Bs=n("p"),oa=a("Converts a sequence of tokens (string) in a single string."),aa=d(),pe=n("div"),f(st.$$.fragment),ia=d(),Hs=n("p"),la=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),da=d(),me=n("div"),f(nt.$$.fragment),pa=d(),rt=n("p"),ma=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Rs=n("code"),ca=a("prepare_for_model"),fa=a(" method."),Nn=d(),ce=n("p"),ha=a("See "),ss=n("a"),ua=a("T5Tokenizer"),ga=a(" for all details."),In=d(),X=n("h2"),fe=n("a"),Xs=n("span"),f(ot.$$.fragment),_a=d(),Ks=n("span"),va=a("MT5TokenizerFast"),Gn=d(),w=n("div"),f(at.$$.fragment),ka=d(),K=n("p"),Ta=a("Construct a \u201Cfast\u201D T5 tokenizer (backed by HuggingFace\u2019s "),Js=n("em"),$a=a("tokenizers"),wa=a(" library). Based on "),it=n("a"),ya=a("Unigram"),ba=a("."),za=d(),lt=n("p"),Ea=a("This tokenizer inherits from "),ns=n("a"),Ma=a("PreTrainedTokenizerFast"),xa=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),qa=d(),G=n("div"),f(dt.$$.fragment),ja=d(),Qs=n("p"),Fa=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Pa=d(),pt=n("ul"),rs=n("li"),Ca=a("single sequence: "),Ys=n("code"),Aa=a("X </s>"),Sa=d(),os=n("li"),La=a("pair of sequences: "),Zs=n("code"),Da=a("A </s> B </s>"),Na=d(),he=n("div"),f(mt.$$.fragment),Ia=d(),en=n("p"),Ga=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),On=d(),ue=n("p"),Oa=a("See "),as=n("a"),Va=a("T5TokenizerFast"),Ua=a(" for all details."),Vn=d(),J=n("h2"),ge=n("a"),tn=n("span"),f(ct.$$.fragment),Wa=d(),sn=n("span"),Ba=a("MT5Model"),Un=d(),b=n("div"),f(ft.$$.fragment),Ha=d(),ht=n("p"),Ra=a("This class overrides "),is=n("a"),Xa=a("T5Model"),Ka=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ja=d(),nn=n("p"),Qa=a("Examples:"),Ya=d(),f(ut.$$.fragment),Wn=d(),Q=n("h2"),_e=n("a"),rn=n("span"),f(gt.$$.fragment),Za=d(),on=n("span"),ei=a("MT5ForConditionalGeneration"),Bn=d(),z=n("div"),f(_t.$$.fragment),ti=d(),vt=n("p"),si=a("This class overrides "),ls=n("a"),ni=a("T5ForConditionalGeneration"),ri=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),oi=d(),an=n("p"),ai=a("Examples:"),ii=d(),f(kt.$$.fragment),Hn=d(),Y=n("h2"),ve=n("a"),ln=n("span"),f(Tt.$$.fragment),li=d(),dn=n("span"),di=a("MT5EncoderModel"),Rn=d(),E=n("div"),f($t.$$.fragment),pi=d(),wt=n("p"),mi=a("This class overrides "),ds=n("a"),ci=a("T5EncoderModel"),fi=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),hi=d(),pn=n("p"),ui=a("Examples:"),gi=d(),f(yt.$$.fragment),Xn=d(),Z=n("h2"),ke=n("a"),mn=n("span"),f(bt.$$.fragment),_i=d(),cn=n("span"),vi=a("TFMT5Model"),Kn=d(),M=n("div"),f(zt.$$.fragment),ki=d(),Et=n("p"),Ti=a("This class overrides "),ps=n("a"),$i=a("TFT5Model"),wi=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),yi=d(),fn=n("p"),bi=a("Examples:"),zi=d(),f(Mt.$$.fragment),Jn=d(),ee=n("h2"),Te=n("a"),hn=n("span"),f(xt.$$.fragment),Ei=d(),un=n("span"),Mi=a("TFMT5ForConditionalGeneration"),Qn=d(),x=n("div"),f(qt.$$.fragment),xi=d(),jt=n("p"),qi=a("This class overrides "),ms=n("a"),ji=a("TFT5ForConditionalGeneration"),Fi=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Pi=d(),gn=n("p"),Ci=a("Examples:"),Ai=d(),f(Ft.$$.fragment),Yn=d(),te=n("h2"),$e=n("a"),_n=n("span"),f(Pt.$$.fragment),Si=d(),vn=n("span"),Li=a("TFMT5EncoderModel"),Zn=d(),q=n("div"),f(Ct.$$.fragment),Di=d(),At=n("p"),Ni=a("This class overrides "),cs=n("a"),Ii=a("TFT5EncoderModel"),Gi=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Oi=d(),kn=n("p"),Vi=a("Examples:"),Ui=d(),f(St.$$.fragment),er=d(),se=n("h2"),we=n("a"),Tn=n("span"),f(Lt.$$.fragment),Wi=d(),$n=n("span"),Bi=a("FlaxMT5Model"),tr=d(),j=n("div"),f(Dt.$$.fragment),Hi=d(),Nt=n("p"),Ri=a("This class overrides "),fs=n("a"),Xi=a("FlaxT5Model"),Ki=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ji=d(),wn=n("p"),Qi=a("Examples:"),Yi=d(),f(It.$$.fragment),sr=d(),ne=n("h2"),ye=n("a"),yn=n("span"),f(Gt.$$.fragment),Zi=d(),bn=n("span"),el=a("FlaxMT5ForConditionalGeneration"),nr=d(),F=n("div"),f(Ot.$$.fragment),tl=d(),Vt=n("p"),sl=a("This class overrides "),hs=n("a"),nl=a("FlaxT5ForConditionalGeneration"),rl=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),ol=d(),zn=n("p"),al=a("Examples:"),il=d(),f(Ut.$$.fragment),this.h()},l(t){const m=dp('[data-svelte="svelte-1phssyn"]',document.head);D=r(m,"META",{name:!0,content:!0}),m.forEach(s),Wt=p(t),C=r(t,"H1",{class:!0});var or=o(C);A=r(or,"A",{id:!0,class:!0,href:!0});var fl=o(A);ws=r(fl,"SPAN",{});var hl=o(ws);h(Ae.$$.fragment,hl),hl.forEach(s),fl.forEach(s),Or=p(or),ys=r(or,"SPAN",{});var ul=o(ys);Vr=i(ul,"mT5"),ul.forEach(s),or.forEach(s),En=p(t),U=r(t,"H2",{class:!0});var ar=o(U);re=r(ar,"A",{id:!0,class:!0,href:!0});var gl=o(re);bs=r(gl,"SPAN",{});var _l=o(bs);h(Se.$$.fragment,_l),_l.forEach(s),gl.forEach(s),Ur=p(ar),zs=r(ar,"SPAN",{});var vl=o(zs);Wr=i(vl,"Overview"),vl.forEach(s),ar.forEach(s),Mn=p(t),oe=r(t,"P",{});var ir=o(oe);Br=i(ir,"The mT5 model was presented in "),Le=r(ir,"A",{href:!0,rel:!0});var kl=o(Le);Hr=i(kl,"mT5: A massively multilingual pre-trained text-to-text transformer"),kl.forEach(s),Rr=i(ir,` by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.`),ir.forEach(s),xn=p(t),Bt=r(t,"P",{});var Tl=o(Bt);Xr=i(Tl,"The abstract from the paper is the following:"),Tl.forEach(s),qn=p(t),Ht=r(t,"P",{});var $l=o(Ht);Es=r($l,"EM",{});var wl=o(Es);Kr=i(wl,`The recent \u201CText-to-Text Transfer Transformer\u201D (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We detail the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. We also describe a simple technique to prevent \u201Caccidental translation\u201D in the zero-shot setting, where a generative model chooses to (partially) translate its prediction into the wrong language. All of the code and model checkpoints used in this work are publicly available.`),wl.forEach(s),$l.forEach(s),jn=p(t),ae=r(t,"P",{});var lr=o(ae);Jr=i(lr,"Note: mT5 was only pre-trained on "),De=r(lr,"A",{href:!0,rel:!0});var yl=o(De);Qr=i(yl,"mC4"),yl.forEach(s),Yr=i(lr,` excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task, unlike the original T5 model. Since mT5 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),lr.forEach(s),Fn=p(t),Rt=r(t,"P",{});var bl=o(Rt);Zr=i(bl,"Google has released the following variants:"),bl.forEach(s),Pn=p(t),y=r(t,"UL",{});var O=o(y);Ms=r(O,"LI",{});var zl=o(Ms);xs=r(zl,"P",{});var El=o(xs);Ne=r(El,"A",{href:!0,rel:!0});var Ml=o(Ne);eo=i(Ml,"google/mt5-small"),Ml.forEach(s),El.forEach(s),zl.forEach(s),to=p(O),qs=r(O,"LI",{});var xl=o(qs);js=r(xl,"P",{});var ql=o(js);Ie=r(ql,"A",{href:!0,rel:!0});var jl=o(Ie);so=i(jl,"google/mt5-base"),jl.forEach(s),ql.forEach(s),xl.forEach(s),no=p(O),Fs=r(O,"LI",{});var Fl=o(Fs);Ps=r(Fl,"P",{});var Pl=o(Ps);Ge=r(Pl,"A",{href:!0,rel:!0});var Cl=o(Ge);ro=i(Cl,"google/mt5-large"),Cl.forEach(s),Pl.forEach(s),Fl.forEach(s),oo=p(O),Cs=r(O,"LI",{});var Al=o(Cs);As=r(Al,"P",{});var Sl=o(As);Oe=r(Sl,"A",{href:!0,rel:!0});var Ll=o(Oe);ao=i(Ll,"google/mt5-xl"),Ll.forEach(s),Sl.forEach(s),Al.forEach(s),io=p(O),Ss=r(O,"LI",{});var Dl=o(Ss);Xt=r(Dl,"P",{});var ll=o(Xt);Ve=r(ll,"A",{href:!0,rel:!0});var Nl=o(Ve);lo=i(Nl,"google/mt5-xxl"),Nl.forEach(s),po=i(ll,"."),ll.forEach(s),Dl.forEach(s),O.forEach(s),Cn=p(t),N=r(t,"P",{});var us=o(N);mo=i(us,"This model was contributed by "),Ue=r(us,"A",{href:!0,rel:!0});var Il=o(Ue);co=i(Il,"patrickvonplaten"),Il.forEach(s),fo=i(us,`. The original code can be found `),We=r(us,"A",{href:!0,rel:!0});var Gl=o(We);ho=i(Gl,"here"),Gl.forEach(s),uo=i(us,"."),us.forEach(s),An=p(t),W=r(t,"H2",{class:!0});var dr=o(W);ie=r(dr,"A",{id:!0,class:!0,href:!0});var Ol=o(ie);Ls=r(Ol,"SPAN",{});var Vl=o(Ls);h(Be.$$.fragment,Vl),Vl.forEach(s),Ol.forEach(s),go=p(dr),Ds=r(dr,"SPAN",{});var Ul=o(Ds);_o=i(Ul,"MT5Config"),Ul.forEach(s),dr.forEach(s),Sn=p(t),S=r(t,"DIV",{class:!0});var gs=o(S);h(He.$$.fragment,gs),vo=p(gs),L=r(gs,"P",{});var be=o(L);ko=i(be,"This is the configuration class to store the configuration of a "),Kt=r(be,"A",{href:!0});var Wl=o(Kt);To=i(Wl,"MT5Model"),Wl.forEach(s),$o=i(be,` or a `),Jt=r(be,"A",{href:!0});var Bl=o(Jt);wo=i(Bl,"TFMT5Model"),Bl.forEach(s),yo=i(be,`. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 `),Re=r(be,"A",{href:!0,rel:!0});var Hl=o(Re);bo=i(Hl,"google/mt5-small"),Hl.forEach(s),zo=i(be," architecture."),be.forEach(s),Eo=p(gs),B=r(gs,"P",{});var _s=o(B);Mo=i(_s,"Configuration objects inherit from "),Qt=r(_s,"A",{href:!0});var Rl=o(Qt);xo=i(Rl,"PretrainedConfig"),Rl.forEach(s),qo=i(_s,` and can be used to control the model outputs. Read the documentation from `),Yt=r(_s,"A",{href:!0});var Xl=o(Yt);jo=i(Xl,"PretrainedConfig"),Xl.forEach(s),Fo=i(_s," for more information."),_s.forEach(s),gs.forEach(s),Ln=p(t),H=r(t,"H2",{class:!0});var pr=o(H);le=r(pr,"A",{id:!0,class:!0,href:!0});var Kl=o(le);Ns=r(Kl,"SPAN",{});var Jl=o(Ns);h(Xe.$$.fragment,Jl),Jl.forEach(s),Kl.forEach(s),Po=p(pr),Is=r(pr,"SPAN",{});var Ql=o(Is);Co=i(Ql,"MT5Tokenizer"),Ql.forEach(s),pr.forEach(s),Dn=p(t),k=r(t,"DIV",{class:!0});var $=o(k);h(Ke.$$.fragment,$),Ao=p($),Je=r($,"P",{});var mr=o(Je);So=i(mr,"Construct a T5 tokenizer. Based on "),Qe=r(mr,"A",{href:!0,rel:!0});var Yl=o(Qe);Lo=i(Yl,"SentencePiece"),Yl.forEach(s),Do=i(mr,"."),mr.forEach(s),No=p($),Ye=r($,"P",{});var cr=o(Ye);Io=i(cr,"This tokenizer inherits from "),Zt=r(cr,"A",{href:!0});var Zl=o(Zt);Go=i(Zl,"PreTrainedTokenizer"),Zl.forEach(s),Oo=i(cr,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),cr.forEach(s),Vo=p($),R=r($,"P",{});var vs=o(R);Uo=i(vs,`Attributes: sp_model (`),Gs=r(vs,"CODE",{});var ed=o(Gs);Wo=i(ed,"SentencePieceProcessor"),ed.forEach(s),Bo=i(vs,`): The `),Os=r(vs,"EM",{});var td=o(Os);Ho=i(td,"SentencePiece"),td.forEach(s),Ro=i(vs," processor that is used for every conversion (string, tokens and IDs)."),vs.forEach(s),Xo=p($),I=r($,"DIV",{class:!0});var ks=o(I);h(Ze.$$.fragment,ks),Ko=p(ks),Vs=r(ks,"P",{});var sd=o(Vs);Jo=i(sd,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),sd.forEach(s),Qo=p(ks),et=r(ks,"UL",{});var fr=o(et);es=r(fr,"LI",{});var dl=o(es);Yo=i(dl,"single sequence: "),Us=r(dl,"CODE",{});var nd=o(Us);Zo=i(nd,"X </s>"),nd.forEach(s),dl.forEach(s),ea=p(fr),ts=r(fr,"LI",{});var pl=o(ts);ta=i(pl,"pair of sequences: "),Ws=r(pl,"CODE",{});var rd=o(Ws);sa=i(rd,"A </s> B </s>"),rd.forEach(s),pl.forEach(s),fr.forEach(s),ks.forEach(s),na=p($),de=r($,"DIV",{class:!0});var hr=o(de);h(tt.$$.fragment,hr),ra=p(hr),Bs=r(hr,"P",{});var od=o(Bs);oa=i(od,"Converts a sequence of tokens (string) in a single string."),od.forEach(s),hr.forEach(s),aa=p($),pe=r($,"DIV",{class:!0});var ur=o(pe);h(st.$$.fragment,ur),ia=p(ur),Hs=r(ur,"P",{});var ad=o(Hs);la=i(ad,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),ad.forEach(s),ur.forEach(s),da=p($),me=r($,"DIV",{class:!0});var gr=o(me);h(nt.$$.fragment,gr),pa=p(gr),rt=r(gr,"P",{});var _r=o(rt);ma=i(_r,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Rs=r(_r,"CODE",{});var id=o(Rs);ca=i(id,"prepare_for_model"),id.forEach(s),fa=i(_r," method."),_r.forEach(s),gr.forEach(s),$.forEach(s),Nn=p(t),ce=r(t,"P",{});var vr=o(ce);ha=i(vr,"See "),ss=r(vr,"A",{href:!0});var ld=o(ss);ua=i(ld,"T5Tokenizer"),ld.forEach(s),ga=i(vr," for all details."),vr.forEach(s),In=p(t),X=r(t,"H2",{class:!0});var kr=o(X);fe=r(kr,"A",{id:!0,class:!0,href:!0});var dd=o(fe);Xs=r(dd,"SPAN",{});var pd=o(Xs);h(ot.$$.fragment,pd),pd.forEach(s),dd.forEach(s),_a=p(kr),Ks=r(kr,"SPAN",{});var md=o(Ks);va=i(md,"MT5TokenizerFast"),md.forEach(s),kr.forEach(s),Gn=p(t),w=r(t,"DIV",{class:!0});var V=o(w);h(at.$$.fragment,V),ka=p(V),K=r(V,"P",{});var Ts=o(K);Ta=i(Ts,"Construct a \u201Cfast\u201D T5 tokenizer (backed by HuggingFace\u2019s "),Js=r(Ts,"EM",{});var cd=o(Js);$a=i(cd,"tokenizers"),cd.forEach(s),wa=i(Ts," library). Based on "),it=r(Ts,"A",{href:!0,rel:!0});var fd=o(it);ya=i(fd,"Unigram"),fd.forEach(s),ba=i(Ts,"."),Ts.forEach(s),za=p(V),lt=r(V,"P",{});var Tr=o(lt);Ea=i(Tr,"This tokenizer inherits from "),ns=r(Tr,"A",{href:!0});var hd=o(ns);Ma=i(hd,"PreTrainedTokenizerFast"),hd.forEach(s),xa=i(Tr,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Tr.forEach(s),qa=p(V),G=r(V,"DIV",{class:!0});var $s=o(G);h(dt.$$.fragment,$s),ja=p($s),Qs=r($s,"P",{});var ud=o(Qs);Fa=i(ud,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),ud.forEach(s),Pa=p($s),pt=r($s,"UL",{});var $r=o(pt);rs=r($r,"LI",{});var ml=o(rs);Ca=i(ml,"single sequence: "),Ys=r(ml,"CODE",{});var gd=o(Ys);Aa=i(gd,"X </s>"),gd.forEach(s),ml.forEach(s),Sa=p($r),os=r($r,"LI",{});var cl=o(os);La=i(cl,"pair of sequences: "),Zs=r(cl,"CODE",{});var _d=o(Zs);Da=i(_d,"A </s> B </s>"),_d.forEach(s),cl.forEach(s),$r.forEach(s),$s.forEach(s),Na=p(V),he=r(V,"DIV",{class:!0});var wr=o(he);h(mt.$$.fragment,wr),Ia=p(wr),en=r(wr,"P",{});var vd=o(en);Ga=i(vd,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),vd.forEach(s),wr.forEach(s),V.forEach(s),On=p(t),ue=r(t,"P",{});var yr=o(ue);Oa=i(yr,"See "),as=r(yr,"A",{href:!0});var kd=o(as);Va=i(kd,"T5TokenizerFast"),kd.forEach(s),Ua=i(yr," for all details."),yr.forEach(s),Vn=p(t),J=r(t,"H2",{class:!0});var br=o(J);ge=r(br,"A",{id:!0,class:!0,href:!0});var Td=o(ge);tn=r(Td,"SPAN",{});var $d=o(tn);h(ct.$$.fragment,$d),$d.forEach(s),Td.forEach(s),Wa=p(br),sn=r(br,"SPAN",{});var wd=o(sn);Ba=i(wd,"MT5Model"),wd.forEach(s),br.forEach(s),Un=p(t),b=r(t,"DIV",{class:!0});var ze=o(b);h(ft.$$.fragment,ze),Ha=p(ze),ht=r(ze,"P",{});var zr=o(ht);Ra=i(zr,"This class overrides "),is=r(zr,"A",{href:!0});var yd=o(is);Xa=i(yd,"T5Model"),yd.forEach(s),Ka=i(zr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),zr.forEach(s),Ja=p(ze),nn=r(ze,"P",{});var bd=o(nn);Qa=i(bd,"Examples:"),bd.forEach(s),Ya=p(ze),h(ut.$$.fragment,ze),ze.forEach(s),Wn=p(t),Q=r(t,"H2",{class:!0});var Er=o(Q);_e=r(Er,"A",{id:!0,class:!0,href:!0});var zd=o(_e);rn=r(zd,"SPAN",{});var Ed=o(rn);h(gt.$$.fragment,Ed),Ed.forEach(s),zd.forEach(s),Za=p(Er),on=r(Er,"SPAN",{});var Md=o(on);ei=i(Md,"MT5ForConditionalGeneration"),Md.forEach(s),Er.forEach(s),Bn=p(t),z=r(t,"DIV",{class:!0});var Ee=o(z);h(_t.$$.fragment,Ee),ti=p(Ee),vt=r(Ee,"P",{});var Mr=o(vt);si=i(Mr,"This class overrides "),ls=r(Mr,"A",{href:!0});var xd=o(ls);ni=i(xd,"T5ForConditionalGeneration"),xd.forEach(s),ri=i(Mr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Mr.forEach(s),oi=p(Ee),an=r(Ee,"P",{});var qd=o(an);ai=i(qd,"Examples:"),qd.forEach(s),ii=p(Ee),h(kt.$$.fragment,Ee),Ee.forEach(s),Hn=p(t),Y=r(t,"H2",{class:!0});var xr=o(Y);ve=r(xr,"A",{id:!0,class:!0,href:!0});var jd=o(ve);ln=r(jd,"SPAN",{});var Fd=o(ln);h(Tt.$$.fragment,Fd),Fd.forEach(s),jd.forEach(s),li=p(xr),dn=r(xr,"SPAN",{});var Pd=o(dn);di=i(Pd,"MT5EncoderModel"),Pd.forEach(s),xr.forEach(s),Rn=p(t),E=r(t,"DIV",{class:!0});var Me=o(E);h($t.$$.fragment,Me),pi=p(Me),wt=r(Me,"P",{});var qr=o(wt);mi=i(qr,"This class overrides "),ds=r(qr,"A",{href:!0});var Cd=o(ds);ci=i(Cd,"T5EncoderModel"),Cd.forEach(s),fi=i(qr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),qr.forEach(s),hi=p(Me),pn=r(Me,"P",{});var Ad=o(pn);ui=i(Ad,"Examples:"),Ad.forEach(s),gi=p(Me),h(yt.$$.fragment,Me),Me.forEach(s),Xn=p(t),Z=r(t,"H2",{class:!0});var jr=o(Z);ke=r(jr,"A",{id:!0,class:!0,href:!0});var Sd=o(ke);mn=r(Sd,"SPAN",{});var Ld=o(mn);h(bt.$$.fragment,Ld),Ld.forEach(s),Sd.forEach(s),_i=p(jr),cn=r(jr,"SPAN",{});var Dd=o(cn);vi=i(Dd,"TFMT5Model"),Dd.forEach(s),jr.forEach(s),Kn=p(t),M=r(t,"DIV",{class:!0});var xe=o(M);h(zt.$$.fragment,xe),ki=p(xe),Et=r(xe,"P",{});var Fr=o(Et);Ti=i(Fr,"This class overrides "),ps=r(Fr,"A",{href:!0});var Nd=o(ps);$i=i(Nd,"TFT5Model"),Nd.forEach(s),wi=i(Fr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Fr.forEach(s),yi=p(xe),fn=r(xe,"P",{});var Id=o(fn);bi=i(Id,"Examples:"),Id.forEach(s),zi=p(xe),h(Mt.$$.fragment,xe),xe.forEach(s),Jn=p(t),ee=r(t,"H2",{class:!0});var Pr=o(ee);Te=r(Pr,"A",{id:!0,class:!0,href:!0});var Gd=o(Te);hn=r(Gd,"SPAN",{});var Od=o(hn);h(xt.$$.fragment,Od),Od.forEach(s),Gd.forEach(s),Ei=p(Pr),un=r(Pr,"SPAN",{});var Vd=o(un);Mi=i(Vd,"TFMT5ForConditionalGeneration"),Vd.forEach(s),Pr.forEach(s),Qn=p(t),x=r(t,"DIV",{class:!0});var qe=o(x);h(qt.$$.fragment,qe),xi=p(qe),jt=r(qe,"P",{});var Cr=o(jt);qi=i(Cr,"This class overrides "),ms=r(Cr,"A",{href:!0});var Ud=o(ms);ji=i(Ud,"TFT5ForConditionalGeneration"),Ud.forEach(s),Fi=i(Cr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Cr.forEach(s),Pi=p(qe),gn=r(qe,"P",{});var Wd=o(gn);Ci=i(Wd,"Examples:"),Wd.forEach(s),Ai=p(qe),h(Ft.$$.fragment,qe),qe.forEach(s),Yn=p(t),te=r(t,"H2",{class:!0});var Ar=o(te);$e=r(Ar,"A",{id:!0,class:!0,href:!0});var Bd=o($e);_n=r(Bd,"SPAN",{});var Hd=o(_n);h(Pt.$$.fragment,Hd),Hd.forEach(s),Bd.forEach(s),Si=p(Ar),vn=r(Ar,"SPAN",{});var Rd=o(vn);Li=i(Rd,"TFMT5EncoderModel"),Rd.forEach(s),Ar.forEach(s),Zn=p(t),q=r(t,"DIV",{class:!0});var je=o(q);h(Ct.$$.fragment,je),Di=p(je),At=r(je,"P",{});var Sr=o(At);Ni=i(Sr,"This class overrides "),cs=r(Sr,"A",{href:!0});var Xd=o(cs);Ii=i(Xd,"TFT5EncoderModel"),Xd.forEach(s),Gi=i(Sr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Sr.forEach(s),Oi=p(je),kn=r(je,"P",{});var Kd=o(kn);Vi=i(Kd,"Examples:"),Kd.forEach(s),Ui=p(je),h(St.$$.fragment,je),je.forEach(s),er=p(t),se=r(t,"H2",{class:!0});var Lr=o(se);we=r(Lr,"A",{id:!0,class:!0,href:!0});var Jd=o(we);Tn=r(Jd,"SPAN",{});var Qd=o(Tn);h(Lt.$$.fragment,Qd),Qd.forEach(s),Jd.forEach(s),Wi=p(Lr),$n=r(Lr,"SPAN",{});var Yd=o($n);Bi=i(Yd,"FlaxMT5Model"),Yd.forEach(s),Lr.forEach(s),tr=p(t),j=r(t,"DIV",{class:!0});var Fe=o(j);h(Dt.$$.fragment,Fe),Hi=p(Fe),Nt=r(Fe,"P",{});var Dr=o(Nt);Ri=i(Dr,"This class overrides "),fs=r(Dr,"A",{href:!0});var Zd=o(fs);Xi=i(Zd,"FlaxT5Model"),Zd.forEach(s),Ki=i(Dr,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Dr.forEach(s),Ji=p(Fe),wn=r(Fe,"P",{});var ep=o(wn);Qi=i(ep,"Examples:"),ep.forEach(s),Yi=p(Fe),h(It.$$.fragment,Fe),Fe.forEach(s),sr=p(t),ne=r(t,"H2",{class:!0});var Nr=o(ne);ye=r(Nr,"A",{id:!0,class:!0,href:!0});var tp=o(ye);yn=r(tp,"SPAN",{});var sp=o(yn);h(Gt.$$.fragment,sp),sp.forEach(s),tp.forEach(s),Zi=p(Nr),bn=r(Nr,"SPAN",{});var np=o(bn);el=i(np,"FlaxMT5ForConditionalGeneration"),np.forEach(s),Nr.forEach(s),nr=p(t),F=r(t,"DIV",{class:!0});var Pe=o(F);h(Ot.$$.fragment,Pe),tl=p(Pe),Vt=r(Pe,"P",{});var Ir=o(Vt);sl=i(Ir,"This class overrides "),hs=r(Ir,"A",{href:!0});var rp=o(hs);nl=i(rp,"FlaxT5ForConditionalGeneration"),rp.forEach(s),rl=i(Ir,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ir.forEach(s),ol=p(Pe),zn=r(Pe,"P",{});var op=o(zn);al=i(op,"Examples:"),op.forEach(s),il=p(Pe),h(Ut.$$.fragment,Pe),Pe.forEach(s),this.h()},h(){l(D,"name","hf:doc:metadata"),l(D,"content",JSON.stringify(cp)),l(A,"id","mt5"),l(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(A,"href","#mt5"),l(C,"class","relative group"),l(re,"id","overview"),l(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(re,"href","#overview"),l(U,"class","relative group"),l(Le,"href","https://arxiv.org/abs/2010.11934"),l(Le,"rel","nofollow"),l(De,"href","https://huggingface.co/datasets/mc4"),l(De,"rel","nofollow"),l(Ne,"href","https://huggingface.co/google/mt5-small"),l(Ne,"rel","nofollow"),l(Ie,"href","https://huggingface.co/google/mt5-base"),l(Ie,"rel","nofollow"),l(Ge,"href","https://huggingface.co/google/mt5-large"),l(Ge,"rel","nofollow"),l(Oe,"href","https://huggingface.co/google/mt5-xl"),l(Oe,"rel","nofollow"),l(Ve,"href","https://huggingface.co/google/mt5-xxl"),l(Ve,"rel","nofollow"),l(Ue,"href","https://huggingface.co/patrickvonplaten"),l(Ue,"rel","nofollow"),l(We,"href","https://github.com/google-research/multilingual-t5"),l(We,"rel","nofollow"),l(ie,"id","transformers.MT5Config"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#transformers.MT5Config"),l(W,"class","relative group"),l(Kt,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Model"),l(Jt,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.TFMT5Model"),l(Re,"href","https://huggingface.co/google/mt5-small"),l(Re,"rel","nofollow"),l(Qt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Yt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(S,"class","docstring"),l(le,"id","transformers.T5Tokenizer"),l(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(le,"href","#transformers.T5Tokenizer"),l(H,"class","relative group"),l(Qe,"href","https://github.com/google/sentencepiece"),l(Qe,"rel","nofollow"),l(Zt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(I,"class","docstring"),l(de,"class","docstring"),l(pe,"class","docstring"),l(me,"class","docstring"),l(k,"class","docstring"),l(ss,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer"),l(fe,"id","transformers.T5TokenizerFast"),l(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(fe,"href","#transformers.T5TokenizerFast"),l(X,"class","relative group"),l(it,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),l(it,"rel","nofollow"),l(ns,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(G,"class","docstring"),l(he,"class","docstring"),l(w,"class","docstring"),l(as,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5TokenizerFast"),l(ge,"id","transformers.MT5Model"),l(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ge,"href","#transformers.MT5Model"),l(J,"class","relative group"),l(is,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model"),l(b,"class","docstring"),l(_e,"id","transformers.MT5ForConditionalGeneration"),l(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_e,"href","#transformers.MT5ForConditionalGeneration"),l(Q,"class","relative group"),l(ls,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),l(z,"class","docstring"),l(ve,"id","transformers.MT5EncoderModel"),l(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ve,"href","#transformers.MT5EncoderModel"),l(Y,"class","relative group"),l(ds,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5EncoderModel"),l(E,"class","docstring"),l(ke,"id","transformers.TFMT5Model"),l(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ke,"href","#transformers.TFMT5Model"),l(Z,"class","relative group"),l(ps,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model"),l(M,"class","docstring"),l(Te,"id","transformers.TFMT5ForConditionalGeneration"),l(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Te,"href","#transformers.TFMT5ForConditionalGeneration"),l(ee,"class","relative group"),l(ms,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5ForConditionalGeneration"),l(x,"class","docstring"),l($e,"id","transformers.TFMT5EncoderModel"),l($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($e,"href","#transformers.TFMT5EncoderModel"),l(te,"class","relative group"),l(cs,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5EncoderModel"),l(q,"class","docstring"),l(we,"id","transformers.FlaxMT5Model"),l(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(we,"href","#transformers.FlaxMT5Model"),l(se,"class","relative group"),l(fs,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5Model"),l(j,"class","docstring"),l(ye,"id","transformers.FlaxMT5ForConditionalGeneration"),l(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ye,"href","#transformers.FlaxMT5ForConditionalGeneration"),l(ne,"class","relative group"),l(hs,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5ForConditionalGeneration"),l(F,"class","docstring")},m(t,m){e(document.head,D),c(t,Wt,m),c(t,C,m),e(C,A),e(A,ws),u(Ae,ws,null),e(C,Or),e(C,ys),e(ys,Vr),c(t,En,m),c(t,U,m),e(U,re),e(re,bs),u(Se,bs,null),e(U,Ur),e(U,zs),e(zs,Wr),c(t,Mn,m),c(t,oe,m),e(oe,Br),e(oe,Le),e(Le,Hr),e(oe,Rr),c(t,xn,m),c(t,Bt,m),e(Bt,Xr),c(t,qn,m),c(t,Ht,m),e(Ht,Es),e(Es,Kr),c(t,jn,m),c(t,ae,m),e(ae,Jr),e(ae,De),e(De,Qr),e(ae,Yr),c(t,Fn,m),c(t,Rt,m),e(Rt,Zr),c(t,Pn,m),c(t,y,m),e(y,Ms),e(Ms,xs),e(xs,Ne),e(Ne,eo),e(y,to),e(y,qs),e(qs,js),e(js,Ie),e(Ie,so),e(y,no),e(y,Fs),e(Fs,Ps),e(Ps,Ge),e(Ge,ro),e(y,oo),e(y,Cs),e(Cs,As),e(As,Oe),e(Oe,ao),e(y,io),e(y,Ss),e(Ss,Xt),e(Xt,Ve),e(Ve,lo),e(Xt,po),c(t,Cn,m),c(t,N,m),e(N,mo),e(N,Ue),e(Ue,co),e(N,fo),e(N,We),e(We,ho),e(N,uo),c(t,An,m),c(t,W,m),e(W,ie),e(ie,Ls),u(Be,Ls,null),e(W,go),e(W,Ds),e(Ds,_o),c(t,Sn,m),c(t,S,m),u(He,S,null),e(S,vo),e(S,L),e(L,ko),e(L,Kt),e(Kt,To),e(L,$o),e(L,Jt),e(Jt,wo),e(L,yo),e(L,Re),e(Re,bo),e(L,zo),e(S,Eo),e(S,B),e(B,Mo),e(B,Qt),e(Qt,xo),e(B,qo),e(B,Yt),e(Yt,jo),e(B,Fo),c(t,Ln,m),c(t,H,m),e(H,le),e(le,Ns),u(Xe,Ns,null),e(H,Po),e(H,Is),e(Is,Co),c(t,Dn,m),c(t,k,m),u(Ke,k,null),e(k,Ao),e(k,Je),e(Je,So),e(Je,Qe),e(Qe,Lo),e(Je,Do),e(k,No),e(k,Ye),e(Ye,Io),e(Ye,Zt),e(Zt,Go),e(Ye,Oo),e(k,Vo),e(k,R),e(R,Uo),e(R,Gs),e(Gs,Wo),e(R,Bo),e(R,Os),e(Os,Ho),e(R,Ro),e(k,Xo),e(k,I),u(Ze,I,null),e(I,Ko),e(I,Vs),e(Vs,Jo),e(I,Qo),e(I,et),e(et,es),e(es,Yo),e(es,Us),e(Us,Zo),e(et,ea),e(et,ts),e(ts,ta),e(ts,Ws),e(Ws,sa),e(k,na),e(k,de),u(tt,de,null),e(de,ra),e(de,Bs),e(Bs,oa),e(k,aa),e(k,pe),u(st,pe,null),e(pe,ia),e(pe,Hs),e(Hs,la),e(k,da),e(k,me),u(nt,me,null),e(me,pa),e(me,rt),e(rt,ma),e(rt,Rs),e(Rs,ca),e(rt,fa),c(t,Nn,m),c(t,ce,m),e(ce,ha),e(ce,ss),e(ss,ua),e(ce,ga),c(t,In,m),c(t,X,m),e(X,fe),e(fe,Xs),u(ot,Xs,null),e(X,_a),e(X,Ks),e(Ks,va),c(t,Gn,m),c(t,w,m),u(at,w,null),e(w,ka),e(w,K),e(K,Ta),e(K,Js),e(Js,$a),e(K,wa),e(K,it),e(it,ya),e(K,ba),e(w,za),e(w,lt),e(lt,Ea),e(lt,ns),e(ns,Ma),e(lt,xa),e(w,qa),e(w,G),u(dt,G,null),e(G,ja),e(G,Qs),e(Qs,Fa),e(G,Pa),e(G,pt),e(pt,rs),e(rs,Ca),e(rs,Ys),e(Ys,Aa),e(pt,Sa),e(pt,os),e(os,La),e(os,Zs),e(Zs,Da),e(w,Na),e(w,he),u(mt,he,null),e(he,Ia),e(he,en),e(en,Ga),c(t,On,m),c(t,ue,m),e(ue,Oa),e(ue,as),e(as,Va),e(ue,Ua),c(t,Vn,m),c(t,J,m),e(J,ge),e(ge,tn),u(ct,tn,null),e(J,Wa),e(J,sn),e(sn,Ba),c(t,Un,m),c(t,b,m),u(ft,b,null),e(b,Ha),e(b,ht),e(ht,Ra),e(ht,is),e(is,Xa),e(ht,Ka),e(b,Ja),e(b,nn),e(nn,Qa),e(b,Ya),u(ut,b,null),c(t,Wn,m),c(t,Q,m),e(Q,_e),e(_e,rn),u(gt,rn,null),e(Q,Za),e(Q,on),e(on,ei),c(t,Bn,m),c(t,z,m),u(_t,z,null),e(z,ti),e(z,vt),e(vt,si),e(vt,ls),e(ls,ni),e(vt,ri),e(z,oi),e(z,an),e(an,ai),e(z,ii),u(kt,z,null),c(t,Hn,m),c(t,Y,m),e(Y,ve),e(ve,ln),u(Tt,ln,null),e(Y,li),e(Y,dn),e(dn,di),c(t,Rn,m),c(t,E,m),u($t,E,null),e(E,pi),e(E,wt),e(wt,mi),e(wt,ds),e(ds,ci),e(wt,fi),e(E,hi),e(E,pn),e(pn,ui),e(E,gi),u(yt,E,null),c(t,Xn,m),c(t,Z,m),e(Z,ke),e(ke,mn),u(bt,mn,null),e(Z,_i),e(Z,cn),e(cn,vi),c(t,Kn,m),c(t,M,m),u(zt,M,null),e(M,ki),e(M,Et),e(Et,Ti),e(Et,ps),e(ps,$i),e(Et,wi),e(M,yi),e(M,fn),e(fn,bi),e(M,zi),u(Mt,M,null),c(t,Jn,m),c(t,ee,m),e(ee,Te),e(Te,hn),u(xt,hn,null),e(ee,Ei),e(ee,un),e(un,Mi),c(t,Qn,m),c(t,x,m),u(qt,x,null),e(x,xi),e(x,jt),e(jt,qi),e(jt,ms),e(ms,ji),e(jt,Fi),e(x,Pi),e(x,gn),e(gn,Ci),e(x,Ai),u(Ft,x,null),c(t,Yn,m),c(t,te,m),e(te,$e),e($e,_n),u(Pt,_n,null),e(te,Si),e(te,vn),e(vn,Li),c(t,Zn,m),c(t,q,m),u(Ct,q,null),e(q,Di),e(q,At),e(At,Ni),e(At,cs),e(cs,Ii),e(At,Gi),e(q,Oi),e(q,kn),e(kn,Vi),e(q,Ui),u(St,q,null),c(t,er,m),c(t,se,m),e(se,we),e(we,Tn),u(Lt,Tn,null),e(se,Wi),e(se,$n),e($n,Bi),c(t,tr,m),c(t,j,m),u(Dt,j,null),e(j,Hi),e(j,Nt),e(Nt,Ri),e(Nt,fs),e(fs,Xi),e(Nt,Ki),e(j,Ji),e(j,wn),e(wn,Qi),e(j,Yi),u(It,j,null),c(t,sr,m),c(t,ne,m),e(ne,ye),e(ye,yn),u(Gt,yn,null),e(ne,Zi),e(ne,bn),e(bn,el),c(t,nr,m),c(t,F,m),u(Ot,F,null),e(F,tl),e(F,Vt),e(Vt,sl),e(Vt,hs),e(hs,nl),e(Vt,rl),e(F,ol),e(F,zn),e(zn,al),e(F,il),u(Ut,F,null),rr=!0},p:pp,i(t){rr||(g(Ae.$$.fragment,t),g(Se.$$.fragment,t),g(Be.$$.fragment,t),g(He.$$.fragment,t),g(Xe.$$.fragment,t),g(Ke.$$.fragment,t),g(Ze.$$.fragment,t),g(tt.$$.fragment,t),g(st.$$.fragment,t),g(nt.$$.fragment,t),g(ot.$$.fragment,t),g(at.$$.fragment,t),g(dt.$$.fragment,t),g(mt.$$.fragment,t),g(ct.$$.fragment,t),g(ft.$$.fragment,t),g(ut.$$.fragment,t),g(gt.$$.fragment,t),g(_t.$$.fragment,t),g(kt.$$.fragment,t),g(Tt.$$.fragment,t),g($t.$$.fragment,t),g(yt.$$.fragment,t),g(bt.$$.fragment,t),g(zt.$$.fragment,t),g(Mt.$$.fragment,t),g(xt.$$.fragment,t),g(qt.$$.fragment,t),g(Ft.$$.fragment,t),g(Pt.$$.fragment,t),g(Ct.$$.fragment,t),g(St.$$.fragment,t),g(Lt.$$.fragment,t),g(Dt.$$.fragment,t),g(It.$$.fragment,t),g(Gt.$$.fragment,t),g(Ot.$$.fragment,t),g(Ut.$$.fragment,t),rr=!0)},o(t){_(Ae.$$.fragment,t),_(Se.$$.fragment,t),_(Be.$$.fragment,t),_(He.$$.fragment,t),_(Xe.$$.fragment,t),_(Ke.$$.fragment,t),_(Ze.$$.fragment,t),_(tt.$$.fragment,t),_(st.$$.fragment,t),_(nt.$$.fragment,t),_(ot.$$.fragment,t),_(at.$$.fragment,t),_(dt.$$.fragment,t),_(mt.$$.fragment,t),_(ct.$$.fragment,t),_(ft.$$.fragment,t),_(ut.$$.fragment,t),_(gt.$$.fragment,t),_(_t.$$.fragment,t),_(kt.$$.fragment,t),_(Tt.$$.fragment,t),_($t.$$.fragment,t),_(yt.$$.fragment,t),_(bt.$$.fragment,t),_(zt.$$.fragment,t),_(Mt.$$.fragment,t),_(xt.$$.fragment,t),_(qt.$$.fragment,t),_(Ft.$$.fragment,t),_(Pt.$$.fragment,t),_(Ct.$$.fragment,t),_(St.$$.fragment,t),_(Lt.$$.fragment,t),_(Dt.$$.fragment,t),_(It.$$.fragment,t),_(Gt.$$.fragment,t),_(Ot.$$.fragment,t),_(Ut.$$.fragment,t),rr=!1},d(t){s(D),t&&s(Wt),t&&s(C),v(Ae),t&&s(En),t&&s(U),v(Se),t&&s(Mn),t&&s(oe),t&&s(xn),t&&s(Bt),t&&s(qn),t&&s(Ht),t&&s(jn),t&&s(ae),t&&s(Fn),t&&s(Rt),t&&s(Pn),t&&s(y),t&&s(Cn),t&&s(N),t&&s(An),t&&s(W),v(Be),t&&s(Sn),t&&s(S),v(He),t&&s(Ln),t&&s(H),v(Xe),t&&s(Dn),t&&s(k),v(Ke),v(Ze),v(tt),v(st),v(nt),t&&s(Nn),t&&s(ce),t&&s(In),t&&s(X),v(ot),t&&s(Gn),t&&s(w),v(at),v(dt),v(mt),t&&s(On),t&&s(ue),t&&s(Vn),t&&s(J),v(ct),t&&s(Un),t&&s(b),v(ft),v(ut),t&&s(Wn),t&&s(Q),v(gt),t&&s(Bn),t&&s(z),v(_t),v(kt),t&&s(Hn),t&&s(Y),v(Tt),t&&s(Rn),t&&s(E),v($t),v(yt),t&&s(Xn),t&&s(Z),v(bt),t&&s(Kn),t&&s(M),v(zt),v(Mt),t&&s(Jn),t&&s(ee),v(xt),t&&s(Qn),t&&s(x),v(qt),v(Ft),t&&s(Yn),t&&s(te),v(Pt),t&&s(Zn),t&&s(q),v(Ct),v(St),t&&s(er),t&&s(se),v(Lt),t&&s(tr),t&&s(j),v(Dt),v(It),t&&s(sr),t&&s(ne),v(Gt),t&&s(nr),t&&s(F),v(Ot),v(Ut)}}}const cp={local:"mt5",sections:[{local:"overview",title:"Overview"},{local:"transformers.MT5Config",title:"MT5Config"},{local:"transformers.T5Tokenizer",title:"MT5Tokenizer"},{local:"transformers.T5TokenizerFast",title:"MT5TokenizerFast"},{local:"transformers.MT5Model",title:"MT5Model"},{local:"transformers.MT5ForConditionalGeneration",title:"MT5ForConditionalGeneration"},{local:"transformers.MT5EncoderModel",title:"MT5EncoderModel"},{local:"transformers.TFMT5Model",title:"TFMT5Model"},{local:"transformers.TFMT5ForConditionalGeneration",title:"TFMT5ForConditionalGeneration"},{local:"transformers.TFMT5EncoderModel",title:"TFMT5EncoderModel"},{local:"transformers.FlaxMT5Model",title:"FlaxMT5Model"},{local:"transformers.FlaxMT5ForConditionalGeneration",title:"FlaxMT5ForConditionalGeneration"}],title:"mT5"};function fp(Gr,D,Wt){let{fw:C}=D;return Gr.$$set=A=>{"fw"in A&&Wt(0,C=A.fw)},[C]}class kp extends ap{constructor(D){super();ip(this,D,fp,mp,lp,{fw:0})}}export{kp as default,cp as metadata};
9,902
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/gptj.mdx-cf0c2373.js
import{S as Mc,i as Fc,s as zc,e as n,k as d,w as _,t as r,L as Ec,c as s,d as o,m as c,a,x as T,h as i,b as l,J as e,g as u,y as v,q as P,o as k,B as w}from"../../chunks/vendor-b1433968.js";import{T as zo}from"../../chunks/Tip-c3840994.js";import{D as Q}from"../../chunks/Docstring-ff504c58.js";import{C as Y}from"../../chunks/CodeBlock-a320dbd7.js";import{I as le}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ac(F){let h,y,f,b,$;return{c(){h=n("p"),y=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var m=a(h);y=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),$=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,h,m),e(h,y),e(h,f),e(f,b),e(h,$)},d(g){g&&o(h)}}}function Cc(F){let h,y,f,b,$;return{c(){h=n("p"),y=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var m=a(h);y=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),$=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,h,m),e(h,y),e(h,f),e(f,b),e(h,$)},d(g){g&&o(h)}}}function qc(F){let h,y,f,b,$;return{c(){h=n("p"),y=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var m=a(h);y=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),$=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,h,m),e(h,y),e(h,f),e(f,b),e(h,$)},d(g){g&&o(h)}}}function Ic(F){let h,y,f,b,$;return{c(){h=n("p"),y=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var m=a(h);y=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),$=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,h,m),e(h,y),e(h,f),e(f,b),e(h,$)},d(g){g&&o(h)}}}function Lc(F){let h,y,f,b,$;return{c(){h=n("p"),y=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var m=a(h);y=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),$=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,h,m),e(h,y),e(h,f),e(f,b),e(h,$)},d(g){g&&o(h)}}}function Sc(F){let h,y,f,b,$;return{c(){h=n("p"),y=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var m=a(h);y=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),$=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,h,m),e(h,y),e(h,f),e(f,b),e(h,$)},d(g){g&&o(h)}}}function Nc(F){let h,y,f,b,$,g,m,x,qs,Wn,de,$e,Eo,Ve,Is,Ao,Ls,Un,Z,Ss,Ke,Ns,Ds,Xe,Bs,Os,Qn,xe,Ws,Ye,Us,Qs,Hn,ro,Hs,Rn,io,N,Rs,Ze,Vs,Ks,Co,Xs,Ys,qo,Zs,ea,et,ta,oa,Vn,tt,Kn,Ge,Io,lo,na,ot,sa,aa,Lo,R,ra,nt,ia,la,So,da,ca,No,pa,ha,Xn,ce,je,Do,st,ua,Bo,fa,Yn,Je,ma,co,ga,_a,Zn,at,es,po,Ta,ts,rt,os,pe,Me,Oo,it,va,Wo,Pa,ns,D,lt,ka,B,wa,ho,ba,ya,dt,$a,xa,uo,Ga,ja,fo,Ja,Ma,Fa,Uo,za,Ea,ct,ss,he,Fe,Qo,pt,Aa,Ho,Ca,as,V,ht,qa,ut,Ia,ft,La,Sa,Na,z,mt,Da,ue,Ba,mo,Oa,Wa,Ro,Ua,Qa,Ha,ze,Ra,Vo,Va,Ka,gt,rs,fe,Ee,Ko,_t,Xa,Xo,Ya,is,O,Tt,Za,Yo,er,tr,vt,or,Pt,nr,sr,ar,E,kt,rr,me,ir,go,lr,dr,Zo,cr,pr,hr,Ae,ur,en,fr,mr,wt,ls,ge,Ce,tn,bt,gr,on,_r,ds,M,yt,Tr,nn,vr,Pr,_o,To,kr,wr,br,W,yr,sn,$r,xr,an,Gr,jr,rn,Jr,Mr,ln,Fr,zr,Er,$t,Ar,xt,Cr,qr,Ir,J,Gt,Lr,_e,Sr,vo,Nr,Dr,dn,Br,Or,Wr,qe,Ur,cn,Qr,Hr,jt,Rr,pn,Vr,Kr,Jt,cs,Te,Ie,hn,Mt,Xr,un,Yr,ps,U,Ft,Zr,ve,ei,fn,ti,oi,mn,ni,si,ai,zt,ri,Et,ii,li,di,A,At,ci,Pe,pi,Po,hi,ui,gn,fi,mi,gi,Le,_i,_n,Ti,vi,Ct,hs,ke,Se,Tn,qt,Pi,vn,ki,us,G,It,wi,Pn,bi,yi,Lt,$i,ko,xi,Gi,ji,St,Ji,Nt,Mi,Fi,zi,kn,Ei,Ai,K,wn,Dt,Ci,qi,bn,Bt,Ii,Li,yn,Ot,Si,Ni,$n,Wt,Di,Bi,C,Ut,Oi,we,Wi,xn,Ui,Qi,Gn,Hi,Ri,Vi,Ne,Ki,jn,Xi,Yi,Qt,fs,be,De,Jn,Ht,Zi,Mn,el,ms,j,Rt,tl,Fn,ol,nl,Vt,sl,wo,al,rl,il,Kt,ll,Xt,dl,cl,pl,zn,hl,ul,X,En,Yt,fl,ml,An,Zt,gl,_l,Cn,eo,Tl,vl,qn,to,Pl,kl,q,oo,wl,ye,bl,In,yl,$l,Ln,xl,Gl,jl,Be,Jl,Sn,Ml,Fl,no,gs;return g=new le({}),Ve=new le({}),tt=new Y({props:{code:`from transformers import GPTJForCausalLM import torch model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16, low_cpu_mem_usage=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTJForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJForCausalLM.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-j-6B&quot;</span>, revision=<span class="hljs-string">&quot;float16&quot;</span>, torch_dtype=torch.float16, low_cpu_mem_usage=<span class="hljs-literal">True</span>)`}}),st=new le({}),at=new Y({props:{code:`from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \\ "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \\ "researchers was the fact that the unicorns spoke perfect English." input_ids = tokenizer(prompt, return_tensors="pt").input_ids gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,) gen_text = tokenizer.batch_decode(gen_tokens)[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-j-6B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-j-6B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In a shocking finding, scientists discovered a herd of unicorns living in a remote, &quot;</span> \\ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;previously unexplored valley, in the Andes Mountains. Even more surprising to the &quot;</span> \\ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;researchers was the fact that the unicorns spoke perfect English.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>gen_tokens = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, temperature=<span class="hljs-number">0.9</span>, max_length=<span class="hljs-number">100</span>,) <span class="hljs-meta">&gt;&gt;&gt; </span>gen_text = tokenizer.batch_decode(gen_tokens)[<span class="hljs-number">0</span>]`}}),rt=new Y({props:{code:`from transformers import GPTJForCausalLM, AutoTokenizer import torch model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.float16) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \\ "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \\ "researchers was the fact that the unicorns spoke perfect English." input_ids = tokenizer(prompt, return_tensors="pt").input_ids gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,) gen_text = tokenizer.batch_decode(gen_tokens)[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTJForCausalLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJForCausalLM.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-j-6B&quot;</span>, torch_dtype=torch.float16) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-j-6B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In a shocking finding, scientists discovered a herd of unicorns living in a remote, &quot;</span> \\ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;previously unexplored valley, in the Andes Mountains. Even more surprising to the &quot;</span> \\ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;researchers was the fact that the unicorns spoke perfect English.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>gen_tokens = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, temperature=<span class="hljs-number">0.9</span>, max_length=<span class="hljs-number">100</span>,) <span class="hljs-meta">&gt;&gt;&gt; </span>gen_text = tokenizer.batch_decode(gen_tokens)[<span class="hljs-number">0</span>]`}}),it=new le({}),lt=new Q({props:{name:"class transformers.GPTJConfig",anchor:"transformers.GPTJConfig",parameters:[{name:"vocab_size",val:" = 50400"},{name:"n_positions",val:" = 2048"},{name:"n_embd",val:" = 4096"},{name:"n_layer",val:" = 28"},{name:"n_head",val:" = 16"},{name:"rotary_dim",val:" = 64"},{name:"n_inner",val:" = None"},{name:"activation_function",val:" = 'gelu_new'"},{name:"resid_pdrop",val:" = 0.0"},{name:"embd_pdrop",val:" = 0.0"},{name:"attn_pdrop",val:" = 0.0"},{name:"layer_norm_epsilon",val:" = 1e-05"},{name:"initializer_range",val:" = 0.02"},{name:"scale_attn_weights",val:" = True"},{name:"use_cache",val:" = True"},{name:"bos_token_id",val:" = 50256"},{name:"eos_token_id",val:" = 50256"},{name:"tie_word_embeddings",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/configuration_gptj.py#L29",parametersDescription:[{anchor:"transformers.GPTJConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50400) &#x2014; Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJModel">GPTJModel</a>.`,name:"vocab_size"},{anchor:"transformers.GPTJConfig.n_positions",description:`<strong>n_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"n_positions"},{anchor:"transformers.GPTJConfig.n_embd",description:`<strong>n_embd</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the embeddings and hidden states.`,name:"n_embd"},{anchor:"transformers.GPTJConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 28) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.GPTJConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.GPTJConfig.rotary_dim",description:`<strong>rotary_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Number of dimensions in the embedding that Rotary Position Embedding is applied to.`,name:"rotary_dim"},{anchor:"transformers.GPTJConfig.n_inner",description:`<strong>n_inner</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; Dimensionality of the inner feed-forward layers. <code>None</code> will set it to 4 times n_embd`,name:"n_inner"},{anchor:"transformers.GPTJConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; Activation function, to be selected in the list <code>[&quot;relu&quot;, &quot;silu&quot;, &quot;gelu&quot;, &quot;tanh&quot;, &quot;gelu_new&quot;]</code>.`,name:"activation_function"},{anchor:"transformers.GPTJConfig.resid_pdrop",description:`<strong>resid_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"resid_pdrop"},{anchor:"transformers.GPTJConfig.embd_pdrop",description:`<strong>embd_pdrop</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the embeddings.`,name:"embd_pdrop"},{anchor:"transformers.GPTJConfig.attn_pdrop",description:`<strong>attn_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention.`,name:"attn_pdrop"},{anchor:"transformers.GPTJConfig.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon to use in the layer normalization layers.`,name:"layer_norm_epsilon"},{anchor:"transformers.GPTJConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.GPTJConfig.scale_attn_weights",description:`<strong>scale_attn_weights</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Scale attention weights by dividing by sqrt(hidden_size).`,name:"scale_attn_weights"},{anchor:"transformers.GPTJConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),ct=new Y({props:{code:`from transformers import GPTJModel, GPTJConfig # Initializing a GPT-J 6B configuration configuration = GPTJConfig() # Initializing a model from the configuration model = GPTJModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTJModel, GPTJConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a GPT-J 6B configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = GPTJConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),pt=new le({}),ht=new Q({props:{name:"class transformers.GPTJModel",anchor:"transformers.GPTJModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L446",parametersDescription:[{anchor:"transformers.GPTJModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mt=new Q({props:{name:"forward",anchor:"transformers.GPTJModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L502",parametersDescription:[{anchor:"transformers.GPTJModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTJTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTJModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTJModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTJModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.n_positions - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTJModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_attention_heads,)</code> or <code>(n_layer, num_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTJModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_dim)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.GPTJModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTJModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTJModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPast" >transformers.modeling_outputs.BaseModelOutputWithPast</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig" >GPTJConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPast" >transformers.modeling_outputs.BaseModelOutputWithPast</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ze=new zo({props:{$$slots:{default:[Ac]},$$scope:{ctx:F}}}),gt=new Y({props:{code:`from transformers import GPT2Tokenizer, GPTJModel import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-j-6B') model = GPTJModel.from_pretrained('EleutherAI/gpt-j-6B') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTJModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJModel.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),_t=new le({}),Tt=new Q({props:{name:"class transformers.GPTJForCausalLM",anchor:"transformers.GPTJForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L687",parametersDescription:[{anchor:"transformers.GPTJForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kt=new Q({props:{name:"forward",anchor:"transformers.GPTJForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L756",parametersDescription:[{anchor:"transformers.GPTJForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTJTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTJForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTJForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTJForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.n_positions - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTJForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_attention_heads,)</code> or <code>(n_layer, num_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTJForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_dim)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.GPTJForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTJForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTJForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPTJForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithPast" >transformers.modeling_outputs.CausalLMOutputWithPast</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig" >GPTJConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithPast" >transformers.modeling_outputs.CausalLMOutputWithPast</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new zo({props:{$$slots:{default:[Cc]},$$scope:{ctx:F}}}),wt=new Y({props:{code:`import torch from transformers import GPT2Tokenizer, GPTJForCausalLM tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-j-6B') model = GPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTJForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJForCausalLM.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),bt=new le({}),yt=new Q({props:{name:"class transformers.GPTJForSequenceClassification",anchor:"transformers.GPTJForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L862",parametersDescription:[{anchor:"transformers.GPTJForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gt=new Q({props:{name:"forward",anchor:"transformers.GPTJForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L878",parametersDescription:[{anchor:"transformers.GPTJForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTJTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTJForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTJForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTJForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.n_positions - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTJForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_attention_heads,)</code> or <code>(n_layer, num_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTJForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_dim)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.GPTJForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTJForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTJForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPTJForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig" >GPTJConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),qe=new zo({props:{$$slots:{default:[qc]},$$scope:{ctx:F}}}),jt=new Y({props:{code:`from transformers import GPT2Tokenizer, GPTJForSequenceClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-j-6B') model = GPTJForSequenceClassification.from_pretrained('EleutherAI/gpt-j-6B') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTJForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Jt=new Y({props:{code:`from transformers import GPT2Tokenizer, GPTJForSequenceClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-j-6B') model = GPTJForSequenceClassification.from_pretrained('EleutherAI/gpt-j-6B', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTJForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Mt=new le({}),Ft=new Q({props:{name:"class transformers.GPTJForQuestionAnswering",anchor:"transformers.GPTJForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L987",parametersDescription:[{anchor:"transformers.GPTJForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),At=new Q({props:{name:"forward",anchor:"transformers.GPTJForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_gptj.py#L1003",parametersDescription:[{anchor:"transformers.GPTJForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTJTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTJForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTJForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTJForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.n_positions - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTJForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_attention_heads,)</code> or <code>(n_layer, num_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTJForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_dim)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.GPTJForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTJForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTJForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPTJForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.GPTJForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig" >GPTJConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Le=new zo({props:{$$slots:{default:[Ic]},$$scope:{ctx:F}}}),Ct=new Y({props:{code:`from transformers import GPT2Tokenizer, GPTJForQuestionAnswering import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-j-6B') model = GPTJForQuestionAnswering.from_pretrained('EleutherAI/gpt-j-6B') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTJForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTJForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-j-6B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),qt=new le({}),It=new Q({props:{name:"class transformers.FlaxGPTJModel",anchor:"transformers.FlaxGPTJModel",parameters:[{name:"config",val:": GPTJConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_flax_gptj.py#L608",parametersDescription:[{anchor:"transformers.FlaxGPTJModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxGPTJModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Ut=new Q({props:{name:"__call__",anchor:"transformers.FlaxGPTJPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_flax_gptj.py#L426",parametersDescription:[{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code>. Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTJTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig" >GPTJConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new zo({props:{$$slots:{default:[Lc]},$$scope:{ctx:F}}}),Qt=new Y({props:{code:`from transformers import GPTJTokenizer, FlaxGPTJModel tokenizer = GPTJTokenizer.from_pretrained('gptj') model = FlaxGPTJModel.from_pretrained('gptj') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTJTokenizer, FlaxGPTJModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPTJTokenizer.from_pretrained(<span class="hljs-string">&#x27;gptj&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxGPTJModel.from_pretrained(<span class="hljs-string">&#x27;gptj&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ht=new le({}),Rt=new Q({props:{name:"class transformers.FlaxGPTJForCausalLM",anchor:"transformers.FlaxGPTJForCausalLM",parameters:[{name:"config",val:": GPTJConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_flax_gptj.py#L676",parametersDescription:[{anchor:"transformers.FlaxGPTJForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxGPTJForCausalLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),oo=new Q({props:{name:"__call__",anchor:"transformers.FlaxGPTJPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gptj/modeling_flax_gptj.py#L426",parametersDescription:[{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code>. Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTJTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxGPTJPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig" >GPTJConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Be=new zo({props:{$$slots:{default:[Sc]},$$scope:{ctx:F}}}),no=new Y({props:{code:`from transformers import GPTJTokenizer, FlaxGPTJForCausalLM tokenizer = GPTJTokenizer.from_pretrained('gptj') model = FlaxGPTJForCausalLM.from_pretrained('gptj') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) # retrieve logts for next token next_token_logits = outputs.logits[:, -1],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTJTokenizer, FlaxGPTJForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPTJTokenizer.from_pretrained(<span class="hljs-string">&#x27;gptj&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxGPTJForCausalLM.from_pretrained(<span class="hljs-string">&#x27;gptj&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve logts for next token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs.logits[:, -<span class="hljs-number">1</span>]`}}),{c(){h=n("meta"),y=d(),f=n("h1"),b=n("a"),$=n("span"),_(g.$$.fragment),m=d(),x=n("span"),qs=r("GPT-J"),Wn=d(),de=n("h2"),$e=n("a"),Eo=n("span"),_(Ve.$$.fragment),Is=d(),Ao=n("span"),Ls=r("Overview"),Un=d(),Z=n("p"),Ss=r("The GPT-J model was released in the "),Ke=n("a"),Ns=r("kingoflolz/mesh-transformer-jax"),Ds=r(` repository by Ben Wang and Aran Komatsuzaki. It is a GPT-2-like causal language model trained on `),Xe=n("a"),Bs=r("the Pile"),Os=r(" dataset."),Qn=d(),xe=n("p"),Ws=r("This model was contributed by "),Ye=n("a"),Us=r("Stella Biderman"),Qs=r("."),Hn=d(),ro=n("p"),Hs=r("Tips:"),Rn=d(),io=n("ul"),N=n("li"),Rs=r("To load "),Ze=n("a"),Vs=r("GPT-J"),Ks=r(` in float32 one would need at least 2x model size CPU RAM: 1x for initial weights and another 1x to load the checkpoint. So for GPT-J it would take at least 48GB of CPU RAM to just load the model. To reduce the CPU RAM usage there are a few options. The `),Co=n("code"),Xs=r("torch_dtype"),Ys=r(` argument can be used to initialize the model in half-precision. And the `),qo=n("code"),Zs=r("low_cpu_mem_usage"),ea=r(` argument can be used to keep the RAM usage to 1x. There is also a `),et=n("a"),ta=r("fp16 branch"),oa=r(` which stores the fp16 weights, which could be used to further minimize the RAM usage. Combining all this it should take roughly 12.1GB of CPU RAM to load the model.`),Vn=d(),_(tt.$$.fragment),Kn=d(),Ge=n("ul"),Io=n("li"),lo=n("p"),na=r(`The model should fit on 16GB GPU for inference. For training/fine-tuning it would take much more GPU RAM. Adam optimizer for example makes four copies of the model: model, gradients, average and squared average of the gradients. So it would need at least 4x model size GPU memory, even with mixed precision as gradient updates are in fp32. This is not including the activations and data batches, which would again require some more GPU RAM. So one should explore solutions such as DeepSpeed, to train/fine-tune the model. Another option is to use the original codebase to train/fine-tune the model on TPU and then convert the model to Transformers format for inference. Instructions for that could be found `),ot=n("a"),sa=r("here"),aa=d(),Lo=n("li"),R=n("p"),ra=r(`Although the embedding matrix has a size of 50400, only 50257 entries are used by the GPT-2 tokenizer. These extra tokens are added for the sake of efficiency on TPUs. To avoid the mis-match between embedding matrix size and vocab size, the tokenizer for `),nt=n("a"),ia=r("GPT-J"),la=r(` contains 143 extra tokens `),So=n("code"),da=r("<|extratoken_1|>... <|extratoken_143|>"),ca=r(", so the "),No=n("code"),pa=r("vocab_size"),ha=r(" of tokenizer also becomes 50400."),Xn=d(),ce=n("h3"),je=n("a"),Do=n("span"),_(st.$$.fragment),ua=d(),Bo=n("span"),fa=r("Generation"),Yn=d(),Je=n("p"),ma=r("The "),co=n("a"),ga=r("generate()"),_a=r(` method can be used to generate text using GPT-J model.`),Zn=d(),_(at.$$.fragment),es=d(),po=n("p"),Ta=r("\u2026or in float16 precision:"),ts=d(),_(rt.$$.fragment),os=d(),pe=n("h2"),Me=n("a"),Oo=n("span"),_(it.$$.fragment),va=d(),Wo=n("span"),Pa=r("GPTJConfig"),ns=d(),D=n("div"),_(lt.$$.fragment),ka=d(),B=n("p"),wa=r("This is the configuration class to store the configuration of a "),ho=n("a"),ba=r("GPTJModel"),ya=r(`. It is used to instantiate a GPT-J model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-J `),dt=n("a"),$a=r("gpt-j-6B"),xa=r(` architecture. Configuration objects inherit from `),uo=n("a"),Ga=r("PretrainedConfig"),ja=r(` and can be used to control the model outputs. Read the documentation from `),fo=n("a"),Ja=r("PretrainedConfig"),Ma=r(" for more information."),Fa=d(),Uo=n("p"),za=r("Example:"),Ea=d(),_(ct.$$.fragment),ss=d(),he=n("h2"),Fe=n("a"),Qo=n("span"),_(pt.$$.fragment),Aa=d(),Ho=n("span"),Ca=r("GPTJModel"),as=d(),V=n("div"),_(ht.$$.fragment),qa=d(),ut=n("p"),Ia=r(`The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),ft=n("a"),La=r("torch.nn.Module"),Sa=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Na=d(),z=n("div"),_(mt.$$.fragment),Da=d(),ue=n("p"),Ba=r("The "),mo=n("a"),Oa=r("GPTJModel"),Wa=r(" forward method, overrides the "),Ro=n("code"),Ua=r("__call__"),Qa=r(" special method."),Ha=d(),_(ze.$$.fragment),Ra=d(),Vo=n("p"),Va=r("Example:"),Ka=d(),_(gt.$$.fragment),rs=d(),fe=n("h2"),Ee=n("a"),Ko=n("span"),_(_t.$$.fragment),Xa=d(),Xo=n("span"),Ya=r("GPTJForCausalLM"),is=d(),O=n("div"),_(Tt.$$.fragment),Za=d(),Yo=n("p"),er=r(`The GPT-J Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),tr=d(),vt=n("p"),or=r("This model is a PyTorch "),Pt=n("a"),nr=r("torch.nn.Module"),sr=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ar=d(),E=n("div"),_(kt.$$.fragment),rr=d(),me=n("p"),ir=r("The "),go=n("a"),lr=r("GPTJForCausalLM"),dr=r(" forward method, overrides the "),Zo=n("code"),cr=r("__call__"),pr=r(" special method."),hr=d(),_(Ae.$$.fragment),ur=d(),en=n("p"),fr=r("Example:"),mr=d(),_(wt.$$.fragment),ls=d(),ge=n("h2"),Ce=n("a"),tn=n("span"),_(bt.$$.fragment),gr=d(),on=n("span"),_r=r("GPTJForSequenceClassification"),ds=d(),M=n("div"),_(yt.$$.fragment),Tr=d(),nn=n("p"),vr=r("The GPT-J Model transformer with a sequence classification head on top (linear layer)."),Pr=d(),_o=n("p"),To=n("a"),kr=r("GPTJForSequenceClassification"),wr=r(` uses the last token in order to do the classification, as other causal models (e.g. GPT, GPT-2, GPT-Neo) do.`),br=d(),W=n("p"),yr=r(`Since it does classification on the last token, it requires to know the position of the last token. If a `),sn=n("code"),$r=r("pad_token_id"),xr=r(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),an=n("code"),Gr=r("pad_token_id"),jr=r(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),rn=n("code"),Jr=r("inputs_embeds"),Mr=r(" are passed instead of "),ln=n("code"),Fr=r("input_ids"),zr=r(`, it does the same (take the last value in each row of the batch).`),Er=d(),$t=n("p"),Ar=r("This model is a PyTorch "),xt=n("a"),Cr=r("torch.nn.Module"),qr=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ir=d(),J=n("div"),_(Gt.$$.fragment),Lr=d(),_e=n("p"),Sr=r("The "),vo=n("a"),Nr=r("GPTJForSequenceClassification"),Dr=r(" forward method, overrides the "),dn=n("code"),Br=r("__call__"),Or=r(" special method."),Wr=d(),_(qe.$$.fragment),Ur=d(),cn=n("p"),Qr=r("Example of single-label classification:"),Hr=d(),_(jt.$$.fragment),Rr=d(),pn=n("p"),Vr=r("Example of multi-label classification:"),Kr=d(),_(Jt.$$.fragment),cs=d(),Te=n("h2"),Ie=n("a"),hn=n("span"),_(Mt.$$.fragment),Xr=d(),un=n("span"),Yr=r("GPTJForQuestionAnswering"),ps=d(),U=n("div"),_(Ft.$$.fragment),Zr=d(),ve=n("p"),ei=r(`The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),fn=n("code"),ti=r("span start logits"),oi=r(" and "),mn=n("code"),ni=r("span end logits"),si=r(")."),ai=d(),zt=n("p"),ri=r("This model is a PyTorch "),Et=n("a"),ii=r("torch.nn.Module"),li=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),di=d(),A=n("div"),_(At.$$.fragment),ci=d(),Pe=n("p"),pi=r("The "),Po=n("a"),hi=r("GPTJForQuestionAnswering"),ui=r(" forward method, overrides the "),gn=n("code"),fi=r("__call__"),mi=r(" special method."),gi=d(),_(Le.$$.fragment),_i=d(),_n=n("p"),Ti=r("Example:"),vi=d(),_(Ct.$$.fragment),hs=d(),ke=n("h2"),Se=n("a"),Tn=n("span"),_(qt.$$.fragment),Pi=d(),vn=n("span"),ki=r("FlaxGPTJModel"),us=d(),G=n("div"),_(It.$$.fragment),wi=d(),Pn=n("p"),bi=r("The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top."),yi=d(),Lt=n("p"),$i=r("This model inherits from "),ko=n("a"),xi=r("FlaxPreTrainedModel"),Gi=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ji=d(),St=n("p"),Ji=r("This model is also a Flax Linen "),Nt=n("a"),Mi=r("flax.nn.Module"),Fi=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),zi=d(),kn=n("p"),Ei=r("Finally, this model supports inherent JAX features such as:"),Ai=d(),K=n("ul"),wn=n("li"),Dt=n("a"),Ci=r("Just-In-Time (JIT) compilation"),qi=d(),bn=n("li"),Bt=n("a"),Ii=r("Automatic Differentiation"),Li=d(),yn=n("li"),Ot=n("a"),Si=r("Vectorization"),Ni=d(),$n=n("li"),Wt=n("a"),Di=r("Parallelization"),Bi=d(),C=n("div"),_(Ut.$$.fragment),Oi=d(),we=n("p"),Wi=r("The "),xn=n("code"),Ui=r("FlaxGPTJPreTrainedModel"),Qi=r(" forward method, overrides the "),Gn=n("code"),Hi=r("__call__"),Ri=r(" special method."),Vi=d(),_(Ne.$$.fragment),Ki=d(),jn=n("p"),Xi=r("Example:"),Yi=d(),_(Qt.$$.fragment),fs=d(),be=n("h2"),De=n("a"),Jn=n("span"),_(Ht.$$.fragment),Zi=d(),Mn=n("span"),el=r("FlaxGPTJForCausalLM"),ms=d(),j=n("div"),_(Rt.$$.fragment),tl=d(),Fn=n("p"),ol=r(`The GPTJ Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),nl=d(),Vt=n("p"),sl=r("This model inherits from "),wo=n("a"),al=r("FlaxPreTrainedModel"),rl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),il=d(),Kt=n("p"),ll=r("This model is also a Flax Linen "),Xt=n("a"),dl=r("flax.nn.Module"),cl=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),pl=d(),zn=n("p"),hl=r("Finally, this model supports inherent JAX features such as:"),ul=d(),X=n("ul"),En=n("li"),Yt=n("a"),fl=r("Just-In-Time (JIT) compilation"),ml=d(),An=n("li"),Zt=n("a"),gl=r("Automatic Differentiation"),_l=d(),Cn=n("li"),eo=n("a"),Tl=r("Vectorization"),vl=d(),qn=n("li"),to=n("a"),Pl=r("Parallelization"),kl=d(),q=n("div"),_(oo.$$.fragment),wl=d(),ye=n("p"),bl=r("The "),In=n("code"),yl=r("FlaxGPTJPreTrainedModel"),$l=r(" forward method, overrides the "),Ln=n("code"),xl=r("__call__"),Gl=r(" special method."),jl=d(),_(Be.$$.fragment),Jl=d(),Sn=n("p"),Ml=r("Example:"),Fl=d(),_(no.$$.fragment),this.h()},l(t){const p=Ec('[data-svelte="svelte-1phssyn"]',document.head);h=s(p,"META",{name:!0,content:!0}),p.forEach(o),y=c(t),f=s(t,"H1",{class:!0});var so=a(f);b=s(so,"A",{id:!0,class:!0,href:!0});var Nn=a(b);$=s(Nn,"SPAN",{});var Dn=a($);T(g.$$.fragment,Dn),Dn.forEach(o),Nn.forEach(o),m=c(so),x=s(so,"SPAN",{});var Bn=a(x);qs=i(Bn,"GPT-J"),Bn.forEach(o),so.forEach(o),Wn=c(t),de=s(t,"H2",{class:!0});var ao=a(de);$e=s(ao,"A",{id:!0,class:!0,href:!0});var On=a($e);Eo=s(On,"SPAN",{});var Al=a(Eo);T(Ve.$$.fragment,Al),Al.forEach(o),On.forEach(o),Is=c(ao),Ao=s(ao,"SPAN",{});var Cl=a(Ao);Ls=i(Cl,"Overview"),Cl.forEach(o),ao.forEach(o),Un=c(t),Z=s(t,"P",{});var bo=a(Z);Ss=i(bo,"The GPT-J model was released in the "),Ke=s(bo,"A",{href:!0,rel:!0});var ql=a(Ke);Ns=i(ql,"kingoflolz/mesh-transformer-jax"),ql.forEach(o),Ds=i(bo,` repository by Ben Wang and Aran Komatsuzaki. It is a GPT-2-like causal language model trained on `),Xe=s(bo,"A",{href:!0,rel:!0});var Il=a(Xe);Bs=i(Il,"the Pile"),Il.forEach(o),Os=i(bo," dataset."),bo.forEach(o),Qn=c(t),xe=s(t,"P",{});var _s=a(xe);Ws=i(_s,"This model was contributed by "),Ye=s(_s,"A",{href:!0,rel:!0});var Ll=a(Ye);Us=i(Ll,"Stella Biderman"),Ll.forEach(o),Qs=i(_s,"."),_s.forEach(o),Hn=c(t),ro=s(t,"P",{});var Sl=a(ro);Hs=i(Sl,"Tips:"),Sl.forEach(o),Rn=c(t),io=s(t,"UL",{});var Nl=a(io);N=s(Nl,"LI",{});var ee=a(N);Rs=i(ee,"To load "),Ze=s(ee,"A",{href:!0,rel:!0});var Dl=a(Ze);Vs=i(Dl,"GPT-J"),Dl.forEach(o),Ks=i(ee,` in float32 one would need at least 2x model size CPU RAM: 1x for initial weights and another 1x to load the checkpoint. So for GPT-J it would take at least 48GB of CPU RAM to just load the model. To reduce the CPU RAM usage there are a few options. The `),Co=s(ee,"CODE",{});var Bl=a(Co);Xs=i(Bl,"torch_dtype"),Bl.forEach(o),Ys=i(ee,` argument can be used to initialize the model in half-precision. And the `),qo=s(ee,"CODE",{});var Ol=a(qo);Zs=i(Ol,"low_cpu_mem_usage"),Ol.forEach(o),ea=i(ee,` argument can be used to keep the RAM usage to 1x. There is also a `),et=s(ee,"A",{href:!0,rel:!0});var Wl=a(et);ta=i(Wl,"fp16 branch"),Wl.forEach(o),oa=i(ee,` which stores the fp16 weights, which could be used to further minimize the RAM usage. Combining all this it should take roughly 12.1GB of CPU RAM to load the model.`),ee.forEach(o),Nl.forEach(o),Vn=c(t),T(tt.$$.fragment,t),Kn=c(t),Ge=s(t,"UL",{});var Ts=a(Ge);Io=s(Ts,"LI",{});var Ul=a(Io);lo=s(Ul,"P",{});var zl=a(lo);na=i(zl,`The model should fit on 16GB GPU for inference. For training/fine-tuning it would take much more GPU RAM. Adam optimizer for example makes four copies of the model: model, gradients, average and squared average of the gradients. So it would need at least 4x model size GPU memory, even with mixed precision as gradient updates are in fp32. This is not including the activations and data batches, which would again require some more GPU RAM. So one should explore solutions such as DeepSpeed, to train/fine-tune the model. Another option is to use the original codebase to train/fine-tune the model on TPU and then convert the model to Transformers format for inference. Instructions for that could be found `),ot=s(zl,"A",{href:!0,rel:!0});var Ql=a(ot);sa=i(Ql,"here"),Ql.forEach(o),zl.forEach(o),Ul.forEach(o),aa=c(Ts),Lo=s(Ts,"LI",{});var Hl=a(Lo);R=s(Hl,"P",{});var Oe=a(R);ra=i(Oe,`Although the embedding matrix has a size of 50400, only 50257 entries are used by the GPT-2 tokenizer. These extra tokens are added for the sake of efficiency on TPUs. To avoid the mis-match between embedding matrix size and vocab size, the tokenizer for `),nt=s(Oe,"A",{href:!0,rel:!0});var Rl=a(nt);ia=i(Rl,"GPT-J"),Rl.forEach(o),la=i(Oe,` contains 143 extra tokens `),So=s(Oe,"CODE",{});var Vl=a(So);da=i(Vl,"<|extratoken_1|>... <|extratoken_143|>"),Vl.forEach(o),ca=i(Oe,", so the "),No=s(Oe,"CODE",{});var Kl=a(No);pa=i(Kl,"vocab_size"),Kl.forEach(o),ha=i(Oe," of tokenizer also becomes 50400."),Oe.forEach(o),Hl.forEach(o),Ts.forEach(o),Xn=c(t),ce=s(t,"H3",{class:!0});var vs=a(ce);je=s(vs,"A",{id:!0,class:!0,href:!0});var Xl=a(je);Do=s(Xl,"SPAN",{});var Yl=a(Do);T(st.$$.fragment,Yl),Yl.forEach(o),Xl.forEach(o),ua=c(vs),Bo=s(vs,"SPAN",{});var Zl=a(Bo);fa=i(Zl,"Generation"),Zl.forEach(o),vs.forEach(o),Yn=c(t),Je=s(t,"P",{});var Ps=a(Je);ma=i(Ps,"The "),co=s(Ps,"A",{href:!0});var ed=a(co);ga=i(ed,"generate()"),ed.forEach(o),_a=i(Ps,` method can be used to generate text using GPT-J model.`),Ps.forEach(o),Zn=c(t),T(at.$$.fragment,t),es=c(t),po=s(t,"P",{});var td=a(po);Ta=i(td,"\u2026or in float16 precision:"),td.forEach(o),ts=c(t),T(rt.$$.fragment,t),os=c(t),pe=s(t,"H2",{class:!0});var ks=a(pe);Me=s(ks,"A",{id:!0,class:!0,href:!0});var od=a(Me);Oo=s(od,"SPAN",{});var nd=a(Oo);T(it.$$.fragment,nd),nd.forEach(o),od.forEach(o),va=c(ks),Wo=s(ks,"SPAN",{});var sd=a(Wo);Pa=i(sd,"GPTJConfig"),sd.forEach(o),ks.forEach(o),ns=c(t),D=s(t,"DIV",{class:!0});var We=a(D);T(lt.$$.fragment,We),ka=c(We),B=s(We,"P",{});var te=a(B);wa=i(te,"This is the configuration class to store the configuration of a "),ho=s(te,"A",{href:!0});var ad=a(ho);ba=i(ad,"GPTJModel"),ad.forEach(o),ya=i(te,`. It is used to instantiate a GPT-J model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-J `),dt=s(te,"A",{href:!0,rel:!0});var rd=a(dt);$a=i(rd,"gpt-j-6B"),rd.forEach(o),xa=i(te,` architecture. Configuration objects inherit from `),uo=s(te,"A",{href:!0});var id=a(uo);Ga=i(id,"PretrainedConfig"),id.forEach(o),ja=i(te,` and can be used to control the model outputs. Read the documentation from `),fo=s(te,"A",{href:!0});var ld=a(fo);Ja=i(ld,"PretrainedConfig"),ld.forEach(o),Ma=i(te," for more information."),te.forEach(o),Fa=c(We),Uo=s(We,"P",{});var dd=a(Uo);za=i(dd,"Example:"),dd.forEach(o),Ea=c(We),T(ct.$$.fragment,We),We.forEach(o),ss=c(t),he=s(t,"H2",{class:!0});var ws=a(he);Fe=s(ws,"A",{id:!0,class:!0,href:!0});var cd=a(Fe);Qo=s(cd,"SPAN",{});var pd=a(Qo);T(pt.$$.fragment,pd),pd.forEach(o),cd.forEach(o),Aa=c(ws),Ho=s(ws,"SPAN",{});var hd=a(Ho);Ca=i(hd,"GPTJModel"),hd.forEach(o),ws.forEach(o),as=c(t),V=s(t,"DIV",{class:!0});var yo=a(V);T(ht.$$.fragment,yo),qa=c(yo),ut=s(yo,"P",{});var bs=a(ut);Ia=i(bs,`The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),ft=s(bs,"A",{href:!0,rel:!0});var ud=a(ft);La=i(ud,"torch.nn.Module"),ud.forEach(o),Sa=i(bs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bs.forEach(o),Na=c(yo),z=s(yo,"DIV",{class:!0});var oe=a(z);T(mt.$$.fragment,oe),Da=c(oe),ue=s(oe,"P",{});var $o=a(ue);Ba=i($o,"The "),mo=s($o,"A",{href:!0});var fd=a(mo);Oa=i(fd,"GPTJModel"),fd.forEach(o),Wa=i($o," forward method, overrides the "),Ro=s($o,"CODE",{});var md=a(Ro);Ua=i(md,"__call__"),md.forEach(o),Qa=i($o," special method."),$o.forEach(o),Ha=c(oe),T(ze.$$.fragment,oe),Ra=c(oe),Vo=s(oe,"P",{});var gd=a(Vo);Va=i(gd,"Example:"),gd.forEach(o),Ka=c(oe),T(gt.$$.fragment,oe),oe.forEach(o),yo.forEach(o),rs=c(t),fe=s(t,"H2",{class:!0});var ys=a(fe);Ee=s(ys,"A",{id:!0,class:!0,href:!0});var _d=a(Ee);Ko=s(_d,"SPAN",{});var Td=a(Ko);T(_t.$$.fragment,Td),Td.forEach(o),_d.forEach(o),Xa=c(ys),Xo=s(ys,"SPAN",{});var vd=a(Xo);Ya=i(vd,"GPTJForCausalLM"),vd.forEach(o),ys.forEach(o),is=c(t),O=s(t,"DIV",{class:!0});var Ue=a(O);T(Tt.$$.fragment,Ue),Za=c(Ue),Yo=s(Ue,"P",{});var Pd=a(Yo);er=i(Pd,`The GPT-J Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Pd.forEach(o),tr=c(Ue),vt=s(Ue,"P",{});var $s=a(vt);or=i($s,"This model is a PyTorch "),Pt=s($s,"A",{href:!0,rel:!0});var kd=a(Pt);nr=i(kd,"torch.nn.Module"),kd.forEach(o),sr=i($s,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$s.forEach(o),ar=c(Ue),E=s(Ue,"DIV",{class:!0});var ne=a(E);T(kt.$$.fragment,ne),rr=c(ne),me=s(ne,"P",{});var xo=a(me);ir=i(xo,"The "),go=s(xo,"A",{href:!0});var wd=a(go);lr=i(wd,"GPTJForCausalLM"),wd.forEach(o),dr=i(xo," forward method, overrides the "),Zo=s(xo,"CODE",{});var bd=a(Zo);cr=i(bd,"__call__"),bd.forEach(o),pr=i(xo," special method."),xo.forEach(o),hr=c(ne),T(Ae.$$.fragment,ne),ur=c(ne),en=s(ne,"P",{});var yd=a(en);fr=i(yd,"Example:"),yd.forEach(o),mr=c(ne),T(wt.$$.fragment,ne),ne.forEach(o),Ue.forEach(o),ls=c(t),ge=s(t,"H2",{class:!0});var xs=a(ge);Ce=s(xs,"A",{id:!0,class:!0,href:!0});var $d=a(Ce);tn=s($d,"SPAN",{});var xd=a(tn);T(bt.$$.fragment,xd),xd.forEach(o),$d.forEach(o),gr=c(xs),on=s(xs,"SPAN",{});var Gd=a(on);_r=i(Gd,"GPTJForSequenceClassification"),Gd.forEach(o),xs.forEach(o),ds=c(t),M=s(t,"DIV",{class:!0});var H=a(M);T(yt.$$.fragment,H),Tr=c(H),nn=s(H,"P",{});var jd=a(nn);vr=i(jd,"The GPT-J Model transformer with a sequence classification head on top (linear layer)."),jd.forEach(o),Pr=c(H),_o=s(H,"P",{});var El=a(_o);To=s(El,"A",{href:!0});var Jd=a(To);kr=i(Jd,"GPTJForSequenceClassification"),Jd.forEach(o),wr=i(El,` uses the last token in order to do the classification, as other causal models (e.g. GPT, GPT-2, GPT-Neo) do.`),El.forEach(o),br=c(H),W=s(H,"P",{});var se=a(W);yr=i(se,`Since it does classification on the last token, it requires to know the position of the last token. If a `),sn=s(se,"CODE",{});var Md=a(sn);$r=i(Md,"pad_token_id"),Md.forEach(o),xr=i(se,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),an=s(se,"CODE",{});var Fd=a(an);Gr=i(Fd,"pad_token_id"),Fd.forEach(o),jr=i(se,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),rn=s(se,"CODE",{});var zd=a(rn);Jr=i(zd,"inputs_embeds"),zd.forEach(o),Mr=i(se," are passed instead of "),ln=s(se,"CODE",{});var Ed=a(ln);Fr=i(Ed,"input_ids"),Ed.forEach(o),zr=i(se,`, it does the same (take the last value in each row of the batch).`),se.forEach(o),Er=c(H),$t=s(H,"P",{});var Gs=a($t);Ar=i(Gs,"This model is a PyTorch "),xt=s(Gs,"A",{href:!0,rel:!0});var Ad=a(xt);Cr=i(Ad,"torch.nn.Module"),Ad.forEach(o),qr=i(Gs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gs.forEach(o),Ir=c(H),J=s(H,"DIV",{class:!0});var I=a(J);T(Gt.$$.fragment,I),Lr=c(I),_e=s(I,"P",{});var Go=a(_e);Sr=i(Go,"The "),vo=s(Go,"A",{href:!0});var Cd=a(vo);Nr=i(Cd,"GPTJForSequenceClassification"),Cd.forEach(o),Dr=i(Go," forward method, overrides the "),dn=s(Go,"CODE",{});var qd=a(dn);Br=i(qd,"__call__"),qd.forEach(o),Or=i(Go," special method."),Go.forEach(o),Wr=c(I),T(qe.$$.fragment,I),Ur=c(I),cn=s(I,"P",{});var Id=a(cn);Qr=i(Id,"Example of single-label classification:"),Id.forEach(o),Hr=c(I),T(jt.$$.fragment,I),Rr=c(I),pn=s(I,"P",{});var Ld=a(pn);Vr=i(Ld,"Example of multi-label classification:"),Ld.forEach(o),Kr=c(I),T(Jt.$$.fragment,I),I.forEach(o),H.forEach(o),cs=c(t),Te=s(t,"H2",{class:!0});var js=a(Te);Ie=s(js,"A",{id:!0,class:!0,href:!0});var Sd=a(Ie);hn=s(Sd,"SPAN",{});var Nd=a(hn);T(Mt.$$.fragment,Nd),Nd.forEach(o),Sd.forEach(o),Xr=c(js),un=s(js,"SPAN",{});var Dd=a(un);Yr=i(Dd,"GPTJForQuestionAnswering"),Dd.forEach(o),js.forEach(o),ps=c(t),U=s(t,"DIV",{class:!0});var Qe=a(U);T(Ft.$$.fragment,Qe),Zr=c(Qe),ve=s(Qe,"P",{});var jo=a(ve);ei=i(jo,`The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),fn=s(jo,"CODE",{});var Bd=a(fn);ti=i(Bd,"span start logits"),Bd.forEach(o),oi=i(jo," and "),mn=s(jo,"CODE",{});var Od=a(mn);ni=i(Od,"span end logits"),Od.forEach(o),si=i(jo,")."),jo.forEach(o),ai=c(Qe),zt=s(Qe,"P",{});var Js=a(zt);ri=i(Js,"This model is a PyTorch "),Et=s(Js,"A",{href:!0,rel:!0});var Wd=a(Et);ii=i(Wd,"torch.nn.Module"),Wd.forEach(o),li=i(Js,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Js.forEach(o),di=c(Qe),A=s(Qe,"DIV",{class:!0});var ae=a(A);T(At.$$.fragment,ae),ci=c(ae),Pe=s(ae,"P",{});var Jo=a(Pe);pi=i(Jo,"The "),Po=s(Jo,"A",{href:!0});var Ud=a(Po);hi=i(Ud,"GPTJForQuestionAnswering"),Ud.forEach(o),ui=i(Jo," forward method, overrides the "),gn=s(Jo,"CODE",{});var Qd=a(gn);fi=i(Qd,"__call__"),Qd.forEach(o),mi=i(Jo," special method."),Jo.forEach(o),gi=c(ae),T(Le.$$.fragment,ae),_i=c(ae),_n=s(ae,"P",{});var Hd=a(_n);Ti=i(Hd,"Example:"),Hd.forEach(o),vi=c(ae),T(Ct.$$.fragment,ae),ae.forEach(o),Qe.forEach(o),hs=c(t),ke=s(t,"H2",{class:!0});var Ms=a(ke);Se=s(Ms,"A",{id:!0,class:!0,href:!0});var Rd=a(Se);Tn=s(Rd,"SPAN",{});var Vd=a(Tn);T(qt.$$.fragment,Vd),Vd.forEach(o),Rd.forEach(o),Pi=c(Ms),vn=s(Ms,"SPAN",{});var Kd=a(vn);ki=i(Kd,"FlaxGPTJModel"),Kd.forEach(o),Ms.forEach(o),us=c(t),G=s(t,"DIV",{class:!0});var L=a(G);T(It.$$.fragment,L),wi=c(L),Pn=s(L,"P",{});var Xd=a(Pn);bi=i(Xd,"The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top."),Xd.forEach(o),yi=c(L),Lt=s(L,"P",{});var Fs=a(Lt);$i=i(Fs,"This model inherits from "),ko=s(Fs,"A",{href:!0});var Yd=a(ko);xi=i(Yd,"FlaxPreTrainedModel"),Yd.forEach(o),Gi=i(Fs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fs.forEach(o),ji=c(L),St=s(L,"P",{});var zs=a(St);Ji=i(zs,"This model is also a Flax Linen "),Nt=s(zs,"A",{href:!0,rel:!0});var Zd=a(Nt);Mi=i(Zd,"flax.nn.Module"),Zd.forEach(o),Fi=i(zs,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),zs.forEach(o),zi=c(L),kn=s(L,"P",{});var ec=a(kn);Ei=i(ec,"Finally, this model supports inherent JAX features such as:"),ec.forEach(o),Ai=c(L),K=s(L,"UL",{});var He=a(K);wn=s(He,"LI",{});var tc=a(wn);Dt=s(tc,"A",{href:!0,rel:!0});var oc=a(Dt);Ci=i(oc,"Just-In-Time (JIT) compilation"),oc.forEach(o),tc.forEach(o),qi=c(He),bn=s(He,"LI",{});var nc=a(bn);Bt=s(nc,"A",{href:!0,rel:!0});var sc=a(Bt);Ii=i(sc,"Automatic Differentiation"),sc.forEach(o),nc.forEach(o),Li=c(He),yn=s(He,"LI",{});var ac=a(yn);Ot=s(ac,"A",{href:!0,rel:!0});var rc=a(Ot);Si=i(rc,"Vectorization"),rc.forEach(o),ac.forEach(o),Ni=c(He),$n=s(He,"LI",{});var ic=a($n);Wt=s(ic,"A",{href:!0,rel:!0});var lc=a(Wt);Di=i(lc,"Parallelization"),lc.forEach(o),ic.forEach(o),He.forEach(o),Bi=c(L),C=s(L,"DIV",{class:!0});var re=a(C);T(Ut.$$.fragment,re),Oi=c(re),we=s(re,"P",{});var Mo=a(we);Wi=i(Mo,"The "),xn=s(Mo,"CODE",{});var dc=a(xn);Ui=i(dc,"FlaxGPTJPreTrainedModel"),dc.forEach(o),Qi=i(Mo," forward method, overrides the "),Gn=s(Mo,"CODE",{});var cc=a(Gn);Hi=i(cc,"__call__"),cc.forEach(o),Ri=i(Mo," special method."),Mo.forEach(o),Vi=c(re),T(Ne.$$.fragment,re),Ki=c(re),jn=s(re,"P",{});var pc=a(jn);Xi=i(pc,"Example:"),pc.forEach(o),Yi=c(re),T(Qt.$$.fragment,re),re.forEach(o),L.forEach(o),fs=c(t),be=s(t,"H2",{class:!0});var Es=a(be);De=s(Es,"A",{id:!0,class:!0,href:!0});var hc=a(De);Jn=s(hc,"SPAN",{});var uc=a(Jn);T(Ht.$$.fragment,uc),uc.forEach(o),hc.forEach(o),Zi=c(Es),Mn=s(Es,"SPAN",{});var fc=a(Mn);el=i(fc,"FlaxGPTJForCausalLM"),fc.forEach(o),Es.forEach(o),ms=c(t),j=s(t,"DIV",{class:!0});var S=a(j);T(Rt.$$.fragment,S),tl=c(S),Fn=s(S,"P",{});var mc=a(Fn);ol=i(mc,`The GPTJ Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),mc.forEach(o),nl=c(S),Vt=s(S,"P",{});var As=a(Vt);sl=i(As,"This model inherits from "),wo=s(As,"A",{href:!0});var gc=a(wo);al=i(gc,"FlaxPreTrainedModel"),gc.forEach(o),rl=i(As,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),As.forEach(o),il=c(S),Kt=s(S,"P",{});var Cs=a(Kt);ll=i(Cs,"This model is also a Flax Linen "),Xt=s(Cs,"A",{href:!0,rel:!0});var _c=a(Xt);dl=i(_c,"flax.nn.Module"),_c.forEach(o),cl=i(Cs,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Cs.forEach(o),pl=c(S),zn=s(S,"P",{});var Tc=a(zn);hl=i(Tc,"Finally, this model supports inherent JAX features such as:"),Tc.forEach(o),ul=c(S),X=s(S,"UL",{});var Re=a(X);En=s(Re,"LI",{});var vc=a(En);Yt=s(vc,"A",{href:!0,rel:!0});var Pc=a(Yt);fl=i(Pc,"Just-In-Time (JIT) compilation"),Pc.forEach(o),vc.forEach(o),ml=c(Re),An=s(Re,"LI",{});var kc=a(An);Zt=s(kc,"A",{href:!0,rel:!0});var wc=a(Zt);gl=i(wc,"Automatic Differentiation"),wc.forEach(o),kc.forEach(o),_l=c(Re),Cn=s(Re,"LI",{});var bc=a(Cn);eo=s(bc,"A",{href:!0,rel:!0});var yc=a(eo);Tl=i(yc,"Vectorization"),yc.forEach(o),bc.forEach(o),vl=c(Re),qn=s(Re,"LI",{});var $c=a(qn);to=s($c,"A",{href:!0,rel:!0});var xc=a(to);Pl=i(xc,"Parallelization"),xc.forEach(o),$c.forEach(o),Re.forEach(o),kl=c(S),q=s(S,"DIV",{class:!0});var ie=a(q);T(oo.$$.fragment,ie),wl=c(ie),ye=s(ie,"P",{});var Fo=a(ye);bl=i(Fo,"The "),In=s(Fo,"CODE",{});var Gc=a(In);yl=i(Gc,"FlaxGPTJPreTrainedModel"),Gc.forEach(o),$l=i(Fo," forward method, overrides the "),Ln=s(Fo,"CODE",{});var jc=a(Ln);xl=i(jc,"__call__"),jc.forEach(o),Gl=i(Fo," special method."),Fo.forEach(o),jl=c(ie),T(Be.$$.fragment,ie),Jl=c(ie),Sn=s(ie,"P",{});var Jc=a(Sn);Ml=i(Jc,"Example:"),Jc.forEach(o),Fl=c(ie),T(no.$$.fragment,ie),ie.forEach(o),S.forEach(o),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(Dc)),l(b,"id","gptj"),l(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(b,"href","#gptj"),l(f,"class","relative group"),l($e,"id","overview"),l($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($e,"href","#overview"),l(de,"class","relative group"),l(Ke,"href","https://github.com/kingoflolz/mesh-transformer-jax"),l(Ke,"rel","nofollow"),l(Xe,"href","https://pile.eleuther.ai/"),l(Xe,"rel","nofollow"),l(Ye,"href","https://huggingface.co/stellaathena"),l(Ye,"rel","nofollow"),l(Ze,"href","https://huggingface.co/EleutherAI/gpt-j-6B"),l(Ze,"rel","nofollow"),l(et,"href","https://huggingface.co/EleutherAI/gpt-j-6B/tree/float16"),l(et,"rel","nofollow"),l(ot,"href","https://github.com/kingoflolz/mesh-transformer-jax/blob/master/howto_finetune.md"),l(ot,"rel","nofollow"),l(nt,"href","https://huggingface.co/EleutherAI/gpt-j-6B"),l(nt,"rel","nofollow"),l(je,"id","generation"),l(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(je,"href","#generation"),l(ce,"class","relative group"),l(co,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),l(Me,"id","transformers.GPTJConfig"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.GPTJConfig"),l(pe,"class","relative group"),l(ho,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJModel"),l(dt,"href","https://huggingface.co/EleutherAI/gpt-j-6B"),l(dt,"rel","nofollow"),l(uo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(fo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(D,"class","docstring"),l(Fe,"id","transformers.GPTJModel"),l(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Fe,"href","#transformers.GPTJModel"),l(he,"class","relative group"),l(ft,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(ft,"rel","nofollow"),l(mo,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJModel"),l(z,"class","docstring"),l(V,"class","docstring"),l(Ee,"id","transformers.GPTJForCausalLM"),l(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ee,"href","#transformers.GPTJForCausalLM"),l(fe,"class","relative group"),l(Pt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Pt,"rel","nofollow"),l(go,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForCausalLM"),l(E,"class","docstring"),l(O,"class","docstring"),l(Ce,"id","transformers.GPTJForSequenceClassification"),l(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ce,"href","#transformers.GPTJForSequenceClassification"),l(ge,"class","relative group"),l(To,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForSequenceClassification"),l(xt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(xt,"rel","nofollow"),l(vo,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForSequenceClassification"),l(J,"class","docstring"),l(M,"class","docstring"),l(Ie,"id","transformers.GPTJForQuestionAnswering"),l(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ie,"href","#transformers.GPTJForQuestionAnswering"),l(Te,"class","relative group"),l(Et,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Et,"rel","nofollow"),l(Po,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForQuestionAnswering"),l(A,"class","docstring"),l(U,"class","docstring"),l(Se,"id","transformers.FlaxGPTJModel"),l(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Se,"href","#transformers.FlaxGPTJModel"),l(ke,"class","relative group"),l(ko,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(Nt,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(Nt,"rel","nofollow"),l(Dt,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(Dt,"rel","nofollow"),l(Bt,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Bt,"rel","nofollow"),l(Ot,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Ot,"rel","nofollow"),l(Wt,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(Wt,"rel","nofollow"),l(C,"class","docstring"),l(G,"class","docstring"),l(De,"id","transformers.FlaxGPTJForCausalLM"),l(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(De,"href","#transformers.FlaxGPTJForCausalLM"),l(be,"class","relative group"),l(wo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(Xt,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(Xt,"rel","nofollow"),l(Yt,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(Yt,"rel","nofollow"),l(Zt,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Zt,"rel","nofollow"),l(eo,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(eo,"rel","nofollow"),l(to,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(to,"rel","nofollow"),l(q,"class","docstring"),l(j,"class","docstring")},m(t,p){e(document.head,h),u(t,y,p),u(t,f,p),e(f,b),e(b,$),v(g,$,null),e(f,m),e(f,x),e(x,qs),u(t,Wn,p),u(t,de,p),e(de,$e),e($e,Eo),v(Ve,Eo,null),e(de,Is),e(de,Ao),e(Ao,Ls),u(t,Un,p),u(t,Z,p),e(Z,Ss),e(Z,Ke),e(Ke,Ns),e(Z,Ds),e(Z,Xe),e(Xe,Bs),e(Z,Os),u(t,Qn,p),u(t,xe,p),e(xe,Ws),e(xe,Ye),e(Ye,Us),e(xe,Qs),u(t,Hn,p),u(t,ro,p),e(ro,Hs),u(t,Rn,p),u(t,io,p),e(io,N),e(N,Rs),e(N,Ze),e(Ze,Vs),e(N,Ks),e(N,Co),e(Co,Xs),e(N,Ys),e(N,qo),e(qo,Zs),e(N,ea),e(N,et),e(et,ta),e(N,oa),u(t,Vn,p),v(tt,t,p),u(t,Kn,p),u(t,Ge,p),e(Ge,Io),e(Io,lo),e(lo,na),e(lo,ot),e(ot,sa),e(Ge,aa),e(Ge,Lo),e(Lo,R),e(R,ra),e(R,nt),e(nt,ia),e(R,la),e(R,So),e(So,da),e(R,ca),e(R,No),e(No,pa),e(R,ha),u(t,Xn,p),u(t,ce,p),e(ce,je),e(je,Do),v(st,Do,null),e(ce,ua),e(ce,Bo),e(Bo,fa),u(t,Yn,p),u(t,Je,p),e(Je,ma),e(Je,co),e(co,ga),e(Je,_a),u(t,Zn,p),v(at,t,p),u(t,es,p),u(t,po,p),e(po,Ta),u(t,ts,p),v(rt,t,p),u(t,os,p),u(t,pe,p),e(pe,Me),e(Me,Oo),v(it,Oo,null),e(pe,va),e(pe,Wo),e(Wo,Pa),u(t,ns,p),u(t,D,p),v(lt,D,null),e(D,ka),e(D,B),e(B,wa),e(B,ho),e(ho,ba),e(B,ya),e(B,dt),e(dt,$a),e(B,xa),e(B,uo),e(uo,Ga),e(B,ja),e(B,fo),e(fo,Ja),e(B,Ma),e(D,Fa),e(D,Uo),e(Uo,za),e(D,Ea),v(ct,D,null),u(t,ss,p),u(t,he,p),e(he,Fe),e(Fe,Qo),v(pt,Qo,null),e(he,Aa),e(he,Ho),e(Ho,Ca),u(t,as,p),u(t,V,p),v(ht,V,null),e(V,qa),e(V,ut),e(ut,Ia),e(ut,ft),e(ft,La),e(ut,Sa),e(V,Na),e(V,z),v(mt,z,null),e(z,Da),e(z,ue),e(ue,Ba),e(ue,mo),e(mo,Oa),e(ue,Wa),e(ue,Ro),e(Ro,Ua),e(ue,Qa),e(z,Ha),v(ze,z,null),e(z,Ra),e(z,Vo),e(Vo,Va),e(z,Ka),v(gt,z,null),u(t,rs,p),u(t,fe,p),e(fe,Ee),e(Ee,Ko),v(_t,Ko,null),e(fe,Xa),e(fe,Xo),e(Xo,Ya),u(t,is,p),u(t,O,p),v(Tt,O,null),e(O,Za),e(O,Yo),e(Yo,er),e(O,tr),e(O,vt),e(vt,or),e(vt,Pt),e(Pt,nr),e(vt,sr),e(O,ar),e(O,E),v(kt,E,null),e(E,rr),e(E,me),e(me,ir),e(me,go),e(go,lr),e(me,dr),e(me,Zo),e(Zo,cr),e(me,pr),e(E,hr),v(Ae,E,null),e(E,ur),e(E,en),e(en,fr),e(E,mr),v(wt,E,null),u(t,ls,p),u(t,ge,p),e(ge,Ce),e(Ce,tn),v(bt,tn,null),e(ge,gr),e(ge,on),e(on,_r),u(t,ds,p),u(t,M,p),v(yt,M,null),e(M,Tr),e(M,nn),e(nn,vr),e(M,Pr),e(M,_o),e(_o,To),e(To,kr),e(_o,wr),e(M,br),e(M,W),e(W,yr),e(W,sn),e(sn,$r),e(W,xr),e(W,an),e(an,Gr),e(W,jr),e(W,rn),e(rn,Jr),e(W,Mr),e(W,ln),e(ln,Fr),e(W,zr),e(M,Er),e(M,$t),e($t,Ar),e($t,xt),e(xt,Cr),e($t,qr),e(M,Ir),e(M,J),v(Gt,J,null),e(J,Lr),e(J,_e),e(_e,Sr),e(_e,vo),e(vo,Nr),e(_e,Dr),e(_e,dn),e(dn,Br),e(_e,Or),e(J,Wr),v(qe,J,null),e(J,Ur),e(J,cn),e(cn,Qr),e(J,Hr),v(jt,J,null),e(J,Rr),e(J,pn),e(pn,Vr),e(J,Kr),v(Jt,J,null),u(t,cs,p),u(t,Te,p),e(Te,Ie),e(Ie,hn),v(Mt,hn,null),e(Te,Xr),e(Te,un),e(un,Yr),u(t,ps,p),u(t,U,p),v(Ft,U,null),e(U,Zr),e(U,ve),e(ve,ei),e(ve,fn),e(fn,ti),e(ve,oi),e(ve,mn),e(mn,ni),e(ve,si),e(U,ai),e(U,zt),e(zt,ri),e(zt,Et),e(Et,ii),e(zt,li),e(U,di),e(U,A),v(At,A,null),e(A,ci),e(A,Pe),e(Pe,pi),e(Pe,Po),e(Po,hi),e(Pe,ui),e(Pe,gn),e(gn,fi),e(Pe,mi),e(A,gi),v(Le,A,null),e(A,_i),e(A,_n),e(_n,Ti),e(A,vi),v(Ct,A,null),u(t,hs,p),u(t,ke,p),e(ke,Se),e(Se,Tn),v(qt,Tn,null),e(ke,Pi),e(ke,vn),e(vn,ki),u(t,us,p),u(t,G,p),v(It,G,null),e(G,wi),e(G,Pn),e(Pn,bi),e(G,yi),e(G,Lt),e(Lt,$i),e(Lt,ko),e(ko,xi),e(Lt,Gi),e(G,ji),e(G,St),e(St,Ji),e(St,Nt),e(Nt,Mi),e(St,Fi),e(G,zi),e(G,kn),e(kn,Ei),e(G,Ai),e(G,K),e(K,wn),e(wn,Dt),e(Dt,Ci),e(K,qi),e(K,bn),e(bn,Bt),e(Bt,Ii),e(K,Li),e(K,yn),e(yn,Ot),e(Ot,Si),e(K,Ni),e(K,$n),e($n,Wt),e(Wt,Di),e(G,Bi),e(G,C),v(Ut,C,null),e(C,Oi),e(C,we),e(we,Wi),e(we,xn),e(xn,Ui),e(we,Qi),e(we,Gn),e(Gn,Hi),e(we,Ri),e(C,Vi),v(Ne,C,null),e(C,Ki),e(C,jn),e(jn,Xi),e(C,Yi),v(Qt,C,null),u(t,fs,p),u(t,be,p),e(be,De),e(De,Jn),v(Ht,Jn,null),e(be,Zi),e(be,Mn),e(Mn,el),u(t,ms,p),u(t,j,p),v(Rt,j,null),e(j,tl),e(j,Fn),e(Fn,ol),e(j,nl),e(j,Vt),e(Vt,sl),e(Vt,wo),e(wo,al),e(Vt,rl),e(j,il),e(j,Kt),e(Kt,ll),e(Kt,Xt),e(Xt,dl),e(Kt,cl),e(j,pl),e(j,zn),e(zn,hl),e(j,ul),e(j,X),e(X,En),e(En,Yt),e(Yt,fl),e(X,ml),e(X,An),e(An,Zt),e(Zt,gl),e(X,_l),e(X,Cn),e(Cn,eo),e(eo,Tl),e(X,vl),e(X,qn),e(qn,to),e(to,Pl),e(j,kl),e(j,q),v(oo,q,null),e(q,wl),e(q,ye),e(ye,bl),e(ye,In),e(In,yl),e(ye,$l),e(ye,Ln),e(Ln,xl),e(ye,Gl),e(q,jl),v(Be,q,null),e(q,Jl),e(q,Sn),e(Sn,Ml),e(q,Fl),v(no,q,null),gs=!0},p(t,[p]){const so={};p&2&&(so.$$scope={dirty:p,ctx:t}),ze.$set(so);const Nn={};p&2&&(Nn.$$scope={dirty:p,ctx:t}),Ae.$set(Nn);const Dn={};p&2&&(Dn.$$scope={dirty:p,ctx:t}),qe.$set(Dn);const Bn={};p&2&&(Bn.$$scope={dirty:p,ctx:t}),Le.$set(Bn);const ao={};p&2&&(ao.$$scope={dirty:p,ctx:t}),Ne.$set(ao);const On={};p&2&&(On.$$scope={dirty:p,ctx:t}),Be.$set(On)},i(t){gs||(P(g.$$.fragment,t),P(Ve.$$.fragment,t),P(tt.$$.fragment,t),P(st.$$.fragment,t),P(at.$$.fragment,t),P(rt.$$.fragment,t),P(it.$$.fragment,t),P(lt.$$.fragment,t),P(ct.$$.fragment,t),P(pt.$$.fragment,t),P(ht.$$.fragment,t),P(mt.$$.fragment,t),P(ze.$$.fragment,t),P(gt.$$.fragment,t),P(_t.$$.fragment,t),P(Tt.$$.fragment,t),P(kt.$$.fragment,t),P(Ae.$$.fragment,t),P(wt.$$.fragment,t),P(bt.$$.fragment,t),P(yt.$$.fragment,t),P(Gt.$$.fragment,t),P(qe.$$.fragment,t),P(jt.$$.fragment,t),P(Jt.$$.fragment,t),P(Mt.$$.fragment,t),P(Ft.$$.fragment,t),P(At.$$.fragment,t),P(Le.$$.fragment,t),P(Ct.$$.fragment,t),P(qt.$$.fragment,t),P(It.$$.fragment,t),P(Ut.$$.fragment,t),P(Ne.$$.fragment,t),P(Qt.$$.fragment,t),P(Ht.$$.fragment,t),P(Rt.$$.fragment,t),P(oo.$$.fragment,t),P(Be.$$.fragment,t),P(no.$$.fragment,t),gs=!0)},o(t){k(g.$$.fragment,t),k(Ve.$$.fragment,t),k(tt.$$.fragment,t),k(st.$$.fragment,t),k(at.$$.fragment,t),k(rt.$$.fragment,t),k(it.$$.fragment,t),k(lt.$$.fragment,t),k(ct.$$.fragment,t),k(pt.$$.fragment,t),k(ht.$$.fragment,t),k(mt.$$.fragment,t),k(ze.$$.fragment,t),k(gt.$$.fragment,t),k(_t.$$.fragment,t),k(Tt.$$.fragment,t),k(kt.$$.fragment,t),k(Ae.$$.fragment,t),k(wt.$$.fragment,t),k(bt.$$.fragment,t),k(yt.$$.fragment,t),k(Gt.$$.fragment,t),k(qe.$$.fragment,t),k(jt.$$.fragment,t),k(Jt.$$.fragment,t),k(Mt.$$.fragment,t),k(Ft.$$.fragment,t),k(At.$$.fragment,t),k(Le.$$.fragment,t),k(Ct.$$.fragment,t),k(qt.$$.fragment,t),k(It.$$.fragment,t),k(Ut.$$.fragment,t),k(Ne.$$.fragment,t),k(Qt.$$.fragment,t),k(Ht.$$.fragment,t),k(Rt.$$.fragment,t),k(oo.$$.fragment,t),k(Be.$$.fragment,t),k(no.$$.fragment,t),gs=!1},d(t){o(h),t&&o(y),t&&o(f),w(g),t&&o(Wn),t&&o(de),w(Ve),t&&o(Un),t&&o(Z),t&&o(Qn),t&&o(xe),t&&o(Hn),t&&o(ro),t&&o(Rn),t&&o(io),t&&o(Vn),w(tt,t),t&&o(Kn),t&&o(Ge),t&&o(Xn),t&&o(ce),w(st),t&&o(Yn),t&&o(Je),t&&o(Zn),w(at,t),t&&o(es),t&&o(po),t&&o(ts),w(rt,t),t&&o(os),t&&o(pe),w(it),t&&o(ns),t&&o(D),w(lt),w(ct),t&&o(ss),t&&o(he),w(pt),t&&o(as),t&&o(V),w(ht),w(mt),w(ze),w(gt),t&&o(rs),t&&o(fe),w(_t),t&&o(is),t&&o(O),w(Tt),w(kt),w(Ae),w(wt),t&&o(ls),t&&o(ge),w(bt),t&&o(ds),t&&o(M),w(yt),w(Gt),w(qe),w(jt),w(Jt),t&&o(cs),t&&o(Te),w(Mt),t&&o(ps),t&&o(U),w(Ft),w(At),w(Le),w(Ct),t&&o(hs),t&&o(ke),w(qt),t&&o(us),t&&o(G),w(It),w(Ut),w(Ne),w(Qt),t&&o(fs),t&&o(be),w(Ht),t&&o(ms),t&&o(j),w(Rt),w(oo),w(Be),w(no)}}}const Dc={local:"gptj",sections:[{local:"overview",sections:[{local:"generation",title:"Generation"}],title:"Overview"},{local:"transformers.GPTJConfig",title:"GPTJConfig"},{local:"transformers.GPTJModel",title:"GPTJModel"},{local:"transformers.GPTJForCausalLM",title:"GPTJForCausalLM"},{local:"transformers.GPTJForSequenceClassification",title:"GPTJForSequenceClassification"},{local:"transformers.GPTJForQuestionAnswering",title:"GPTJForQuestionAnswering"},{local:"transformers.FlaxGPTJModel",title:"FlaxGPTJModel"},{local:"transformers.FlaxGPTJForCausalLM",title:"FlaxGPTJForCausalLM"}],title:"GPT-J"};function Bc(F,h,y){let{fw:f}=h;return F.$$set=b=>{"fw"in b&&y(0,f=b.fw)},[f]}class Vc extends Mc{constructor(h){super();Fc(this,h,Bc,Nc,zc,{fw:0})}}export{Vc as default,Dc as metadata};
9,903
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/m2m_100.mdx-6607852b.js
import{S as td,i as od,s as nd,e as n,k as d,w as u,t as r,L as sd,c as s,d as o,m as l,a,x as f,h as i,b as c,J as e,g as p,y as _,q as g,o as k,B as M}from"../../chunks/vendor-b1433968.js";import{T as ed}from"../../chunks/Tip-c3840994.js";import{D as ee}from"../../chunks/Docstring-ff504c58.js";import{C as Et}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ke}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ad(Me){let m,C,v,T,F;return{c(){m=n("p"),C=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n("code"),T=r("Module"),F=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(z){m=s(z,"P",{});var $=a(m);C=i($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=s($,"CODE",{});var N=a(v);T=i(N,"Module"),N.forEach(o),F=i($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(z,$){p(z,m,$),e(m,C),e(m,v),e(v,T),e(m,F)},d(z){z&&o(m)}}}function rd(Me){let m,C,v,T,F;return{c(){m=n("p"),C=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n("code"),T=r("Module"),F=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(z){m=s(z,"P",{});var $=a(m);C=i($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=s($,"CODE",{});var N=a(v);T=i(N,"Module"),N.forEach(o),F=i($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(z,$){p(z,m,$),e(m,C),e(m,v),e(v,T),e(m,F)},d(z){z&&o(m)}}}function id(Me){let m,C,v,T,F,z,$,N,$n,Ao,W,te,xt,ve,En,Ct,xn,Oo,oe,Cn,be,Pn,jn,Do,dt,Fn,No,lt,Pt,Ln,Io,ne,Sn,ye,An,On,Go,H,se,jt,we,Dn,Ft,Nn,Bo,A,In,Lt,Gn,Bn,St,Un,Wn,At,Hn,Vn,Uo,P,Kn,ct,Qn,Xn,Ot,Rn,Jn,Dt,Zn,Yn,Nt,es,ts,Wo,ht,It,os,Ho,Te,Vo,pt,ze,Gt,ns,ss,E,as,Bt,rs,is,Ut,ds,ls,Wt,cs,hs,Ht,ps,ms,Vt,us,fs,Ko,qe,Qo,V,ae,Kt,$e,_s,Qt,gs,Xo,x,Ee,ks,K,Ms,mt,vs,bs,xe,ys,ws,Ts,Q,zs,ut,qs,$s,ft,Es,xs,Cs,Xt,Ps,js,Ce,Ro,X,re,Rt,Pe,Fs,Jt,Ls,Jo,y,je,Ss,Fe,As,Le,Os,Ds,Ns,Se,Is,_t,Gs,Bs,Us,Zt,Ws,Hs,Ae,Vs,O,Oe,Ks,De,Qs,Yt,Xs,Rs,Js,Ne,Ie,eo,Zs,Ys,to,ea,ta,Ge,oo,oa,na,no,sa,aa,so,ra,ia,ie,Be,da,Ue,la,ao,ca,ha,pa,I,We,ma,gt,ua,kt,fa,_a,ro,ga,ka,io,Zo,R,de,lo,He,Ma,co,va,Yo,L,Ve,ba,Ke,ya,Mt,wa,Ta,za,Qe,qa,Xe,$a,Ea,xa,j,Re,Ca,J,Pa,vt,ja,Fa,ho,La,Sa,Aa,le,Oa,po,Da,Na,Je,en,Z,ce,mo,Ze,Ia,uo,Ga,tn,S,Ye,Ba,et,Ua,bt,Wa,Ha,Va,tt,Ka,ot,Qa,Xa,Ra,b,nt,Ja,Y,Za,yt,Ya,er,fo,tr,or,nr,he,sr,_o,ar,rr,st,ir,go,dr,lr,ko,Mo,vo,bo,cr,hr,yo,wo,To,zo,pr,mr,qo,$o,Eo,xo,ur,fr,Co,Po,at,pe,me,jo,rt,_r,Fo,gr,kr,Lo,Mr,on;return z=new ke({}),ve=new ke({}),we=new ke({}),Te=new Et({props:{code:`from transformers import M2M100Config, M2M100ForConditionalGeneration, M2M100Tokenizer model = M2M100ForConditionalGeneration.from_pretrained('facebook/m2m100_418M') tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M', src_lang="en", tgt_lang="fr") src_text = "Life is like a box of chocolates." tgt_text = "La vie est comme une bo\xEEte de chocolat." model_inputs = tokenizer(src_text, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids loss = model(**model_inputs, labels=labels) # forward pass,`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100Config, M2M100ForConditionalGeneration, M2M100Tokenizer model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/m2m100_418M&#x27;</span>) tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/m2m100_418M&#x27;</span>, src_lang=<span class="hljs-string">&quot;en&quot;</span>, tgt_lang=<span class="hljs-string">&quot;fr&quot;</span>) src_text = <span class="hljs-string">&quot;Life is like a box of chocolates.&quot;</span> tgt_text = <span class="hljs-string">&quot;La vie est comme une bo\xEEte de chocolat.&quot;</span> model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids loss = model(**model_inputs, labels=labels) <span class="hljs-comment"># forward pass</span>`}}),qe=new Et({props:{code:`from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer hi_text = "\u091C\u0940\u0935\u0928 \u090F\u0915 \u091A\u0949\u0915\u0932\u0947\u091F \u092C\u0949\u0915\u094D\u0938 \u0915\u0940 \u0924\u0930\u0939 \u0939\u0948\u0964" chinese_text = "\u751F\u6D3B\u5C31\u50CF\u4E00\u76D2\u5DE7\u514B\u529B\u3002" model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") # translate Hindi to French tokenizer.src_lang = "hi" encoded_hi = tokenizer(hi_text, return_tensors="pt") generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("fr")) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) # translate Chinese to English tokenizer.src_lang = "zh" encoded_zh = tokenizer(chinese_text, return_tensors="pt") generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100ForConditionalGeneration, M2M100Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>hi_text = <span class="hljs-string">&quot;\u091C\u0940\u0935\u0928 \u090F\u0915 \u091A\u0949\u0915\u0932\u0947\u091F \u092C\u0949\u0915\u094D\u0938 \u0915\u0940 \u0924\u0930\u0939 \u0939\u0948\u0964&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chinese_text = <span class="hljs-string">&quot;\u751F\u6D3B\u5C31\u50CF\u4E00\u76D2\u5DE7\u514B\u529B\u3002&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># translate Hindi to French</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.src_lang = <span class="hljs-string">&quot;hi&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_hi = tokenizer(hi_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;fr&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;La vie est comme une bo\xEEte de chocolat.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># translate Chinese to English</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.src_lang = <span class="hljs-string">&quot;zh&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_zh = tokenizer(chinese_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;en&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Life is like a box of chocolate.&quot;</span>`}}),$e=new ke({}),Ee=new ee({props:{name:"class transformers.M2M100Config",anchor:"transformers.M2M100Config",parameters:[{name:"vocab_size",val:" = 128112"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.05"},{name:"decoder_layerdrop",val:" = 0.05"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'relu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"scale_embedding",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/configuration_m2m_100.py#L29",parametersDescription:[{anchor:"transformers.M2M100Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the M2M100 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Model">M2M100Model</a> or`,name:"vocab_size"},{anchor:"transformers.M2M100Config.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.M2M100Config.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.M2M100Config.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.M2M100Config.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.M2M100Config.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.M2M100Config.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.M2M100Config.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.M2M100Config.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.M2M100Config.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.M2M100Config.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.M2M100Config.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.M2M100Config.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.M2M100Config.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.M2M100Config.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.M2M100Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),Ce=new Et({props:{code:`from transformers import M2M100Model, M2M100Config # Initializing a M2M100 facebook/m2m100_418M style configuration configuration = M2M100Config() # Initializing a model from the facebook/m2m100_418M style configuration model = M2M100Model(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100Model, M2M100Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a M2M100 facebook/m2m100_418M style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = M2M100Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/m2m100_418M style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100Model(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Pe=new ke({}),je=new ee({props:{name:"class transformers.M2M100Tokenizer",anchor:"transformers.M2M100Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"spm_file",val:""},{name:"src_lang",val:" = None"},{name:"tgt_lang",val:" = None"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"unk_token",val:" = '<unk>'"},{name:"language_codes",val:" = 'm2m100'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"num_madeup_words",val:" = 8"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/tokenization_m2m_100.py#L64",parametersDescription:[{anchor:"transformers.M2M100Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.M2M100Tokenizer.spm_file",description:`<strong>spm_file</strong> (<code>str</code>) &#x2014; Path to <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary.`,name:"spm_file"},{anchor:"transformers.M2M100Tokenizer.src_lang",description:`<strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.`,name:"src_lang"},{anchor:"transformers.M2M100Tokenizer.tgt_lang",description:`<strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.`,name:"tgt_lang"},{anchor:"transformers.M2M100Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.M2M100Tokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.M2M100Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.M2M100Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.M2M100Tokenizer.language_codes",description:`<strong>language_codes</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;m2m100&quot;</code>) &#x2014; What language codes to use. Should be one of <code>&quot;m2m100&quot;</code> or <code>&quot;wmt21&quot;</code>.`,name:"language_codes"},{anchor:"transformers.M2M100Tokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Ae=new Et({props:{code:`from transformers import M2M100Tokenizer tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M, src_lang="en", tgt_lang="ro") src_text = " UN Chief Says There Is No Military Solution in Syria" tgt_text = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" model_inputs = tokenizer(src_text, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids # model(**model_inputs, labels=labels) should work,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M, src_lang=&quot;</span>en<span class="hljs-string">&quot;, tgt_lang=&quot;</span>ro<span class="hljs-string">&quot;) &gt;&gt;&gt; src_text = &quot;</span> UN Chief Says There Is No Military Solution <span class="hljs-keyword">in</span> Syria<span class="hljs-string">&quot; &gt;&gt;&gt; tgt_text = &quot;</span>\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria<span class="hljs-string">&quot; &gt;&gt;&gt; model_inputs = tokenizer(src_text, return_tensors=&quot;</span>pt<span class="hljs-string">&quot;) &gt;&gt;&gt; with tokenizer.as_target_tokenizer(): ... labels = tokenizer(tgt_text, return_tensors=&quot;</span>pt<span class="hljs-string">&quot;).input_ids &gt;&gt;&gt; # model(**model_inputs, labels=labels) should work</span>`}}),Oe=new ee({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.M2M100Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/tokenization_m2m_100.py#L254",parametersDescription:[{anchor:"transformers.M2M100Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.M2M100Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Be=new ee({props:{name:"get_special_tokens_mask",anchor:"transformers.M2M100Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/tokenization_m2m_100.py#L224",parametersDescription:[{anchor:"transformers.M2M100Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.M2M100Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.M2M100Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),We=new ee({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2818",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),He=new ke({}),Ve=new ee({props:{name:"class transformers.M2M100Model",anchor:"transformers.M2M100Model",parameters:[{name:"config",val:": M2M100Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/modeling_m2m_100.py#L1101",parametersDescription:[{anchor:"transformers.M2M100Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config">M2M100Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Re=new ee({props:{name:"forward",anchor:"transformers.M2M100Model.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/modeling_m2m_100.py#L1128",parametersDescription:[{anchor:"transformers.M2M100Model.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Tokenizer">M2M100Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.M2M100Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.M2M100Model.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Tokenizer">M2M100Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>M2M100 uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.M2M100Model.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.M2M100Model.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.M2M100Model.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.M2M100Model.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.M2M100Model.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.M2M100Model.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.M2M100Model.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.M2M100Model.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.M2M100Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.M2M100Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.M2M100Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config" >M2M100Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),le=new ed({props:{$$slots:{default:[ad]},$$scope:{ctx:Me}}}),Je=new Et({props:{code:`from transformers import M2M100Tokenizer, M2M100Model import torch tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M') model = M2M100Model.from_pretrained('facebook/m2m100_418M') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100Tokenizer, M2M100Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/m2m100_418M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100Model.from_pretrained(<span class="hljs-string">&#x27;facebook/m2m100_418M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ze=new ke({}),Ye=new ee({props:{name:"class transformers.M2M100ForConditionalGeneration",anchor:"transformers.M2M100ForConditionalGeneration",parameters:[{name:"config",val:": M2M100Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/modeling_m2m_100.py#L1212",parametersDescription:[{anchor:"transformers.M2M100ForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config">M2M100Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),nt=new ee({props:{name:"forward",anchor:"transformers.M2M100ForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/m2m_100/modeling_m2m_100.py#L1250",parametersDescription:[{anchor:"transformers.M2M100ForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Tokenizer">M2M100Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Tokenizer">M2M100Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>M2M100 uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.M2M100ForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config" >M2M100Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),he=new ed({props:{$$slots:{default:[rd]},$$scope:{ctx:Me}}}),st=new Et({props:{code:`from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration model = M2M100ForConditionalGeneration.from_pretrained('facebook/m2m100_418M') tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M') text_to_translate = "Life is like a box of chocolates" model_inputs = tokenizer(text_to_translate, return_tensors='pt') # translate to French gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr")) print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100Tokenizer, M2M100ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/m2m100_418M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/m2m100_418M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text_to_translate = <span class="hljs-string">&quot;Life is like a box of chocolates&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_inputs = tokenizer(text_to_translate, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># translate to French</span> <span class="hljs-meta">&gt;&gt;&gt; </span>gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;fr&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.batch_decode(gen_tokens, skip_special_tokens=<span class="hljs-literal">True</span>))`}}),rt=new ke({}),{c(){m=n("meta"),C=d(),v=n("h1"),T=n("a"),F=n("span"),u(z.$$.fragment),$=d(),N=n("span"),$n=r("M2M100"),Ao=d(),W=n("h2"),te=n("a"),xt=n("span"),u(ve.$$.fragment),En=d(),Ct=n("span"),xn=r("Overview"),Oo=d(),oe=n("p"),Cn=r("The M2M100 model was proposed in "),be=n("a"),Pn=r("Beyond English-Centric Multilingual Machine Translation"),jn=r(` by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.`),Do=d(),dt=n("p"),Fn=r("The abstract from the paper is the following:"),No=d(),lt=n("p"),Pt=n("em"),Ln=r(`Existing work in translation demonstrated the potential of massively multilingual machine translation by training a single model able to translate between any pair of languages. However, much of this work is English-Centric by training only on data which was translated from or to English. While this is supported by large sources of training data, it does not reflect translation needs worldwide. In this work, we create a true Many-to-Many multilingual translation model that can translate directly between any pair of 100 languages. We build and open source a training dataset that covers thousands of language directions with supervised data, created through large-scale mining. Then, we explore how to effectively increase model capacity through a combination of dense scaling and language-specific sparse parameters to create high quality models. Our focus on non-English-Centric models brings gains of more than 10 BLEU when directly translating between non-English directions while performing competitively to the best single systems of WMT. We open-source our scripts so that others may reproduce the data, evaluation, and final M2M-100 model.`),Io=d(),ne=n("p"),Sn=r("This model was contributed by "),ye=n("a"),An=r("valhalla"),On=r("."),Go=d(),H=n("h3"),se=n("a"),jt=n("span"),u(we.$$.fragment),Dn=d(),Ft=n("span"),Nn=r("Training and Generation"),Bo=d(),A=n("p"),In=r(`M2M100 is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation tasks. As the model is multilingual it expects the sequences in a certain format: A special language id token is used as prefix in both the source and target text. The source text format is `),Lt=n("code"),Gn=r("[lang_code] X [eos]"),Bn=r(", where "),St=n("code"),Un=r("lang_code"),Wn=r(` is source language id for source text and target language id for target text, with `),At=n("code"),Hn=r("X"),Vn=r(" being the source or target text."),Uo=d(),P=n("p"),Kn=r("The "),ct=n("a"),Qn=r("M2M100Tokenizer"),Xn=r(" depends on "),Ot=n("code"),Rn=r("sentencepiece"),Jn=r(` so be sure to install it before running the examples. To install `),Dt=n("code"),Zn=r("sentencepiece"),Yn=r(" run "),Nt=n("code"),es=r("pip install sentencepiece"),ts=r("."),Wo=d(),ht=n("ul"),It=n("li"),os=r("Supervised Training"),Ho=d(),u(Te.$$.fragment),Vo=d(),pt=n("ul"),ze=n("li"),Gt=n("p"),ns=r("Generation"),ss=d(),E=n("p"),as=r("M2M100 uses the "),Bt=n("code"),rs=r("eos_token_id"),is=r(" as the "),Ut=n("code"),ds=r("decoder_start_token_id"),ls=r(` for generation with the target language id being forced as the first generated token. To force the target language id as the first generated token, pass the `),Wt=n("em"),cs=r("forced_bos_token_id"),hs=r(" parameter to the "),Ht=n("em"),ps=r("generate"),ms=r(` method. The following example shows how to translate between Hindi to French and Chinese to English using the `),Vt=n("em"),us=r("facebook/m2m100_418M"),fs=r(" checkpoint."),Ko=d(),u(qe.$$.fragment),Qo=d(),V=n("h2"),ae=n("a"),Kt=n("span"),u($e.$$.fragment),_s=d(),Qt=n("span"),gs=r("M2M100Config"),Xo=d(),x=n("div"),u(Ee.$$.fragment),ks=d(),K=n("p"),Ms=r("This is the configuration class to store the configuration of a "),mt=n("a"),vs=r("M2M100Model"),bs=r(`. It is used to instantiate an M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the M2M100 `),xe=n("a"),ys=r("m2m100_418M"),ws=r(" architecture."),Ts=d(),Q=n("p"),zs=r("Configuration objects inherit from "),ut=n("a"),qs=r("PretrainedConfig"),$s=r(` and can be used to control the model outputs. Read the documentation from `),ft=n("a"),Es=r("PretrainedConfig"),xs=r(" for more information."),Cs=d(),Xt=n("p"),Ps=r("Example:"),js=d(),u(Ce.$$.fragment),Ro=d(),X=n("h2"),re=n("a"),Rt=n("span"),u(Pe.$$.fragment),Fs=d(),Jt=n("span"),Ls=r("M2M100Tokenizer"),Jo=d(),y=n("div"),u(je.$$.fragment),Ss=d(),Fe=n("p"),As=r("Construct an M2M100 tokenizer. Based on "),Le=n("a"),Os=r("SentencePiece"),Ds=r("."),Ns=d(),Se=n("p"),Is=r("This tokenizer inherits from "),_t=n("a"),Gs=r("PreTrainedTokenizer"),Bs=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Us=d(),Zt=n("p"),Ws=r("Examples:"),Hs=d(),u(Ae.$$.fragment),Vs=d(),O=n("div"),u(Oe.$$.fragment),Ks=d(),De=n("p"),Qs=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `),Yt=n("code"),Xs=r("X"),Rs=r(" represents the sequence:"),Js=d(),Ne=n("ul"),Ie=n("li"),eo=n("code"),Zs=r("input_ids"),Ys=r(" (for encoder) "),to=n("code"),ea=r("X [eos, src_lang_code]"),ta=d(),Ge=n("li"),oo=n("code"),oa=r("decoder_input_ids"),na=r(": (for decoder) "),no=n("code"),sa=r("X [eos, tgt_lang_code]"),aa=d(),so=n("p"),ra=r(`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),ia=d(),ie=n("div"),u(Be.$$.fragment),da=d(),Ue=n("p"),la=r(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ao=n("code"),ca=r("prepare_for_model"),ha=r(" method."),pa=d(),I=n("div"),u(We.$$.fragment),ma=d(),gt=n("p"),ua=r("Create the token type IDs corresponding to the sequences passed. "),kt=n("a"),fa=r("What are token type IDs?"),_a=d(),ro=n("p"),ga=r("Should be overridden in a subclass if the model has a special way of building those."),ka=d(),io=n("div"),Zo=d(),R=n("h2"),de=n("a"),lo=n("span"),u(He.$$.fragment),Ma=d(),co=n("span"),va=r("M2M100Model"),Yo=d(),L=n("div"),u(Ve.$$.fragment),ba=d(),Ke=n("p"),ya=r(`The bare M2M100 Model outputting raw hidden-states without any specific head on top. This model inherits from `),Mt=n("a"),wa=r("PreTrainedModel"),Ta=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),za=d(),Qe=n("p"),qa=r("This model is also a PyTorch "),Xe=n("a"),$a=r("torch.nn.Module"),Ea=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xa=d(),j=n("div"),u(Re.$$.fragment),Ca=d(),J=n("p"),Pa=r("The "),vt=n("a"),ja=r("M2M100Model"),Fa=r(" forward method, overrides the "),ho=n("code"),La=r("__call__"),Sa=r(" special method."),Aa=d(),u(le.$$.fragment),Oa=d(),po=n("p"),Da=r("Example:"),Na=d(),u(Je.$$.fragment),en=d(),Z=n("h2"),ce=n("a"),mo=n("span"),u(Ze.$$.fragment),Ia=d(),uo=n("span"),Ga=r("M2M100ForConditionalGeneration"),tn=d(),S=n("div"),u(Ye.$$.fragment),Ba=d(),et=n("p"),Ua=r(`The M2M100 Model with a language modeling head. Can be used for summarization. This model inherits from `),bt=n("a"),Wa=r("PreTrainedModel"),Ha=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Va=d(),tt=n("p"),Ka=r("This model is also a PyTorch "),ot=n("a"),Qa=r("torch.nn.Module"),Xa=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ra=d(),b=n("div"),u(nt.$$.fragment),Ja=d(),Y=n("p"),Za=r("The "),yt=n("a"),Ya=r("M2M100ForConditionalGeneration"),er=r(" forward method, overrides the "),fo=n("code"),tr=r("__call__"),or=r(" special method."),nr=d(),u(he.$$.fragment),sr=d(),_o=n("p"),ar=r("Example:"),rr=d(),u(st.$$.fragment),ir=d(),go=n("p"),dr=r("Translation example::"),lr=d(),ko=n("blockquote"),Mo=n("blockquote"),vo=n("blockquote"),bo=n("p"),cr=r("from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration"),hr=d(),yo=n("blockquote"),wo=n("blockquote"),To=n("blockquote"),zo=n("p"),pr=r(`model = M2M100ForConditionalGeneration.from_pretrained(\u2018facebook/m2m100_418M\u2019) tokenizer = M2M100Tokenizer.from_pretrained(\u2018facebook/m2m100_418M\u2019)`),mr=d(),qo=n("blockquote"),$o=n("blockquote"),Eo=n("blockquote"),xo=n("p"),ur=r(`text_to_translate = \u201CLife is like a box of chocolates\u201D model_inputs = tokenizer(text_to_translate, return_tensors=\u2018pt\u2019)`),fr=d(),Co=n("blockquote"),Po=n("blockquote"),at=n("blockquote"),pe=n("h1"),me=n("a"),jo=n("span"),u(rt.$$.fragment),_r=d(),Fo=n("span"),gr=r("translate to French"),kr=d(),Lo=n("p"),Mr=r(`gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.get_lang_id(\u201Cfr\u201D)) print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))`),this.h()},l(t){const h=sd('[data-svelte="svelte-1phssyn"]',document.head);m=s(h,"META",{name:!0,content:!0}),h.forEach(o),C=l(t),v=s(t,"H1",{class:!0});var it=a(v);T=s(it,"A",{id:!0,class:!0,href:!0});var So=a(T);F=s(So,"SPAN",{});var br=a(F);f(z.$$.fragment,br),br.forEach(o),So.forEach(o),$=l(it),N=s(it,"SPAN",{});var yr=a(N);$n=i(yr,"M2M100"),yr.forEach(o),it.forEach(o),Ao=l(t),W=s(t,"H2",{class:!0});var nn=a(W);te=s(nn,"A",{id:!0,class:!0,href:!0});var wr=a(te);xt=s(wr,"SPAN",{});var Tr=a(xt);f(ve.$$.fragment,Tr),Tr.forEach(o),wr.forEach(o),En=l(nn),Ct=s(nn,"SPAN",{});var zr=a(Ct);xn=i(zr,"Overview"),zr.forEach(o),nn.forEach(o),Oo=l(t),oe=s(t,"P",{});var sn=a(oe);Cn=i(sn,"The M2M100 model was proposed in "),be=s(sn,"A",{href:!0,rel:!0});var qr=a(be);Pn=i(qr,"Beyond English-Centric Multilingual Machine Translation"),qr.forEach(o),jn=i(sn,` by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.`),sn.forEach(o),Do=l(t),dt=s(t,"P",{});var $r=a(dt);Fn=i($r,"The abstract from the paper is the following:"),$r.forEach(o),No=l(t),lt=s(t,"P",{});var Er=a(lt);Pt=s(Er,"EM",{});var xr=a(Pt);Ln=i(xr,`Existing work in translation demonstrated the potential of massively multilingual machine translation by training a single model able to translate between any pair of languages. However, much of this work is English-Centric by training only on data which was translated from or to English. While this is supported by large sources of training data, it does not reflect translation needs worldwide. In this work, we create a true Many-to-Many multilingual translation model that can translate directly between any pair of 100 languages. We build and open source a training dataset that covers thousands of language directions with supervised data, created through large-scale mining. Then, we explore how to effectively increase model capacity through a combination of dense scaling and language-specific sparse parameters to create high quality models. Our focus on non-English-Centric models brings gains of more than 10 BLEU when directly translating between non-English directions while performing competitively to the best single systems of WMT. We open-source our scripts so that others may reproduce the data, evaluation, and final M2M-100 model.`),xr.forEach(o),Er.forEach(o),Io=l(t),ne=s(t,"P",{});var an=a(ne);Sn=i(an,"This model was contributed by "),ye=s(an,"A",{href:!0,rel:!0});var Cr=a(ye);An=i(Cr,"valhalla"),Cr.forEach(o),On=i(an,"."),an.forEach(o),Go=l(t),H=s(t,"H3",{class:!0});var rn=a(H);se=s(rn,"A",{id:!0,class:!0,href:!0});var Pr=a(se);jt=s(Pr,"SPAN",{});var jr=a(jt);f(we.$$.fragment,jr),jr.forEach(o),Pr.forEach(o),Dn=l(rn),Ft=s(rn,"SPAN",{});var Fr=a(Ft);Nn=i(Fr,"Training and Generation"),Fr.forEach(o),rn.forEach(o),Bo=l(t),A=s(t,"P",{});var ue=a(A);In=i(ue,`M2M100 is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation tasks. As the model is multilingual it expects the sequences in a certain format: A special language id token is used as prefix in both the source and target text. The source text format is `),Lt=s(ue,"CODE",{});var Lr=a(Lt);Gn=i(Lr,"[lang_code] X [eos]"),Lr.forEach(o),Bn=i(ue,", where "),St=s(ue,"CODE",{});var Sr=a(St);Un=i(Sr,"lang_code"),Sr.forEach(o),Wn=i(ue,` is source language id for source text and target language id for target text, with `),At=s(ue,"CODE",{});var Ar=a(At);Hn=i(Ar,"X"),Ar.forEach(o),Vn=i(ue," being the source or target text."),ue.forEach(o),Uo=l(t),P=s(t,"P",{});var G=a(P);Kn=i(G,"The "),ct=s(G,"A",{href:!0});var Or=a(ct);Qn=i(Or,"M2M100Tokenizer"),Or.forEach(o),Xn=i(G," depends on "),Ot=s(G,"CODE",{});var Dr=a(Ot);Rn=i(Dr,"sentencepiece"),Dr.forEach(o),Jn=i(G,` so be sure to install it before running the examples. To install `),Dt=s(G,"CODE",{});var Nr=a(Dt);Zn=i(Nr,"sentencepiece"),Nr.forEach(o),Yn=i(G," run "),Nt=s(G,"CODE",{});var Ir=a(Nt);es=i(Ir,"pip install sentencepiece"),Ir.forEach(o),ts=i(G,"."),G.forEach(o),Wo=l(t),ht=s(t,"UL",{});var Gr=a(ht);It=s(Gr,"LI",{});var Br=a(It);os=i(Br,"Supervised Training"),Br.forEach(o),Gr.forEach(o),Ho=l(t),f(Te.$$.fragment,t),Vo=l(t),pt=s(t,"UL",{});var Ur=a(pt);ze=s(Ur,"LI",{});var dn=a(ze);Gt=s(dn,"P",{});var Wr=a(Gt);ns=i(Wr,"Generation"),Wr.forEach(o),ss=l(dn),E=s(dn,"P",{});var D=a(E);as=i(D,"M2M100 uses the "),Bt=s(D,"CODE",{});var Hr=a(Bt);rs=i(Hr,"eos_token_id"),Hr.forEach(o),is=i(D," as the "),Ut=s(D,"CODE",{});var Vr=a(Ut);ds=i(Vr,"decoder_start_token_id"),Vr.forEach(o),ls=i(D,` for generation with the target language id being forced as the first generated token. To force the target language id as the first generated token, pass the `),Wt=s(D,"EM",{});var Kr=a(Wt);cs=i(Kr,"forced_bos_token_id"),Kr.forEach(o),hs=i(D," parameter to the "),Ht=s(D,"EM",{});var Qr=a(Ht);ps=i(Qr,"generate"),Qr.forEach(o),ms=i(D,` method. The following example shows how to translate between Hindi to French and Chinese to English using the `),Vt=s(D,"EM",{});var Xr=a(Vt);us=i(Xr,"facebook/m2m100_418M"),Xr.forEach(o),fs=i(D," checkpoint."),D.forEach(o),dn.forEach(o),Ur.forEach(o),Ko=l(t),f(qe.$$.fragment,t),Qo=l(t),V=s(t,"H2",{class:!0});var ln=a(V);ae=s(ln,"A",{id:!0,class:!0,href:!0});var Rr=a(ae);Kt=s(Rr,"SPAN",{});var Jr=a(Kt);f($e.$$.fragment,Jr),Jr.forEach(o),Rr.forEach(o),_s=l(ln),Qt=s(ln,"SPAN",{});var Zr=a(Qt);gs=i(Zr,"M2M100Config"),Zr.forEach(o),ln.forEach(o),Xo=l(t),x=s(t,"DIV",{class:!0});var B=a(x);f(Ee.$$.fragment,B),ks=l(B),K=s(B,"P",{});var wt=a(K);Ms=i(wt,"This is the configuration class to store the configuration of a "),mt=s(wt,"A",{href:!0});var Yr=a(mt);vs=i(Yr,"M2M100Model"),Yr.forEach(o),bs=i(wt,`. It is used to instantiate an M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the M2M100 `),xe=s(wt,"A",{href:!0,rel:!0});var ei=a(xe);ys=i(ei,"m2m100_418M"),ei.forEach(o),ws=i(wt," architecture."),wt.forEach(o),Ts=l(B),Q=s(B,"P",{});var Tt=a(Q);zs=i(Tt,"Configuration objects inherit from "),ut=s(Tt,"A",{href:!0});var ti=a(ut);qs=i(ti,"PretrainedConfig"),ti.forEach(o),$s=i(Tt,` and can be used to control the model outputs. Read the documentation from `),ft=s(Tt,"A",{href:!0});var oi=a(ft);Es=i(oi,"PretrainedConfig"),oi.forEach(o),xs=i(Tt," for more information."),Tt.forEach(o),Cs=l(B),Xt=s(B,"P",{});var ni=a(Xt);Ps=i(ni,"Example:"),ni.forEach(o),js=l(B),f(Ce.$$.fragment,B),B.forEach(o),Ro=l(t),X=s(t,"H2",{class:!0});var cn=a(X);re=s(cn,"A",{id:!0,class:!0,href:!0});var si=a(re);Rt=s(si,"SPAN",{});var ai=a(Rt);f(Pe.$$.fragment,ai),ai.forEach(o),si.forEach(o),Fs=l(cn),Jt=s(cn,"SPAN",{});var ri=a(Jt);Ls=i(ri,"M2M100Tokenizer"),ri.forEach(o),cn.forEach(o),Jo=l(t),y=s(t,"DIV",{class:!0});var q=a(y);f(je.$$.fragment,q),Ss=l(q),Fe=s(q,"P",{});var hn=a(Fe);As=i(hn,"Construct an M2M100 tokenizer. Based on "),Le=s(hn,"A",{href:!0,rel:!0});var ii=a(Le);Os=i(ii,"SentencePiece"),ii.forEach(o),Ds=i(hn,"."),hn.forEach(o),Ns=l(q),Se=s(q,"P",{});var pn=a(Se);Is=i(pn,"This tokenizer inherits from "),_t=s(pn,"A",{href:!0});var di=a(_t);Gs=i(di,"PreTrainedTokenizer"),di.forEach(o),Bs=i(pn,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),pn.forEach(o),Us=l(q),Zt=s(q,"P",{});var li=a(Zt);Ws=i(li,"Examples:"),li.forEach(o),Hs=l(q),f(Ae.$$.fragment,q),Vs=l(q),O=s(q,"DIV",{class:!0});var fe=a(O);f(Oe.$$.fragment,fe),Ks=l(fe),De=s(fe,"P",{});var mn=a(De);Qs=i(mn,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `),Yt=s(mn,"CODE",{});var ci=a(Yt);Xs=i(ci,"X"),ci.forEach(o),Rs=i(mn," represents the sequence:"),mn.forEach(o),Js=l(fe),Ne=s(fe,"UL",{});var un=a(Ne);Ie=s(un,"LI",{});var fn=a(Ie);eo=s(fn,"CODE",{});var hi=a(eo);Zs=i(hi,"input_ids"),hi.forEach(o),Ys=i(fn," (for encoder) "),to=s(fn,"CODE",{});var pi=a(to);ea=i(pi,"X [eos, src_lang_code]"),pi.forEach(o),fn.forEach(o),ta=l(un),Ge=s(un,"LI",{});var _n=a(Ge);oo=s(_n,"CODE",{});var mi=a(oo);oa=i(mi,"decoder_input_ids"),mi.forEach(o),na=i(_n,": (for decoder) "),no=s(_n,"CODE",{});var ui=a(no);sa=i(ui,"X [eos, tgt_lang_code]"),ui.forEach(o),_n.forEach(o),un.forEach(o),aa=l(fe),so=s(fe,"P",{});var fi=a(so);ra=i(fi,`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),fi.forEach(o),fe.forEach(o),ia=l(q),ie=s(q,"DIV",{class:!0});var gn=a(ie);f(Be.$$.fragment,gn),da=l(gn),Ue=s(gn,"P",{});var kn=a(Ue);la=i(kn,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ao=s(kn,"CODE",{});var _i=a(ao);ca=i(_i,"prepare_for_model"),_i.forEach(o),ha=i(kn," method."),kn.forEach(o),gn.forEach(o),pa=l(q),I=s(q,"DIV",{class:!0});var zt=a(I);f(We.$$.fragment,zt),ma=l(zt),gt=s(zt,"P",{});var vr=a(gt);ua=i(vr,"Create the token type IDs corresponding to the sequences passed. "),kt=s(vr,"A",{href:!0});var gi=a(kt);fa=i(gi,"What are token type IDs?"),gi.forEach(o),vr.forEach(o),_a=l(zt),ro=s(zt,"P",{});var ki=a(ro);ga=i(ki,"Should be overridden in a subclass if the model has a special way of building those."),ki.forEach(o),zt.forEach(o),ka=l(q),io=s(q,"DIV",{class:!0}),a(io).forEach(o),q.forEach(o),Zo=l(t),R=s(t,"H2",{class:!0});var Mn=a(R);de=s(Mn,"A",{id:!0,class:!0,href:!0});var Mi=a(de);lo=s(Mi,"SPAN",{});var vi=a(lo);f(He.$$.fragment,vi),vi.forEach(o),Mi.forEach(o),Ma=l(Mn),co=s(Mn,"SPAN",{});var bi=a(co);va=i(bi,"M2M100Model"),bi.forEach(o),Mn.forEach(o),Yo=l(t),L=s(t,"DIV",{class:!0});var _e=a(L);f(Ve.$$.fragment,_e),ba=l(_e),Ke=s(_e,"P",{});var vn=a(Ke);ya=i(vn,`The bare M2M100 Model outputting raw hidden-states without any specific head on top. This model inherits from `),Mt=s(vn,"A",{href:!0});var yi=a(Mt);wa=i(yi,"PreTrainedModel"),yi.forEach(o),Ta=i(vn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vn.forEach(o),za=l(_e),Qe=s(_e,"P",{});var bn=a(Qe);qa=i(bn,"This model is also a PyTorch "),Xe=s(bn,"A",{href:!0,rel:!0});var wi=a(Xe);$a=i(wi,"torch.nn.Module"),wi.forEach(o),Ea=i(bn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bn.forEach(o),xa=l(_e),j=s(_e,"DIV",{class:!0});var U=a(j);f(Re.$$.fragment,U),Ca=l(U),J=s(U,"P",{});var qt=a(J);Pa=i(qt,"The "),vt=s(qt,"A",{href:!0});var Ti=a(vt);ja=i(Ti,"M2M100Model"),Ti.forEach(o),Fa=i(qt," forward method, overrides the "),ho=s(qt,"CODE",{});var zi=a(ho);La=i(zi,"__call__"),zi.forEach(o),Sa=i(qt," special method."),qt.forEach(o),Aa=l(U),f(le.$$.fragment,U),Oa=l(U),po=s(U,"P",{});var qi=a(po);Da=i(qi,"Example:"),qi.forEach(o),Na=l(U),f(Je.$$.fragment,U),U.forEach(o),_e.forEach(o),en=l(t),Z=s(t,"H2",{class:!0});var yn=a(Z);ce=s(yn,"A",{id:!0,class:!0,href:!0});var $i=a(ce);mo=s($i,"SPAN",{});var Ei=a(mo);f(Ze.$$.fragment,Ei),Ei.forEach(o),$i.forEach(o),Ia=l(yn),uo=s(yn,"SPAN",{});var xi=a(uo);Ga=i(xi,"M2M100ForConditionalGeneration"),xi.forEach(o),yn.forEach(o),tn=l(t),S=s(t,"DIV",{class:!0});var ge=a(S);f(Ye.$$.fragment,ge),Ba=l(ge),et=s(ge,"P",{});var wn=a(et);Ua=i(wn,`The M2M100 Model with a language modeling head. Can be used for summarization. This model inherits from `),bt=s(wn,"A",{href:!0});var Ci=a(bt);Wa=i(Ci,"PreTrainedModel"),Ci.forEach(o),Ha=i(wn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wn.forEach(o),Va=l(ge),tt=s(ge,"P",{});var Tn=a(tt);Ka=i(Tn,"This model is also a PyTorch "),ot=s(Tn,"A",{href:!0,rel:!0});var Pi=a(ot);Qa=i(Pi,"torch.nn.Module"),Pi.forEach(o),Xa=i(Tn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tn.forEach(o),Ra=l(ge),b=s(ge,"DIV",{class:!0});var w=a(b);f(nt.$$.fragment,w),Ja=l(w),Y=s(w,"P",{});var $t=a(Y);Za=i($t,"The "),yt=s($t,"A",{href:!0});var ji=a(yt);Ya=i(ji,"M2M100ForConditionalGeneration"),ji.forEach(o),er=i($t," forward method, overrides the "),fo=s($t,"CODE",{});var Fi=a(fo);tr=i(Fi,"__call__"),Fi.forEach(o),or=i($t," special method."),$t.forEach(o),nr=l(w),f(he.$$.fragment,w),sr=l(w),_o=s(w,"P",{});var Li=a(_o);ar=i(Li,"Example:"),Li.forEach(o),rr=l(w),f(st.$$.fragment,w),ir=l(w),go=s(w,"P",{});var Si=a(go);dr=i(Si,"Translation example::"),Si.forEach(o),lr=l(w),ko=s(w,"BLOCKQUOTE",{});var Ai=a(ko);Mo=s(Ai,"BLOCKQUOTE",{});var Oi=a(Mo);vo=s(Oi,"BLOCKQUOTE",{});var Di=a(vo);bo=s(Di,"P",{});var Ni=a(bo);cr=i(Ni,"from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration"),Ni.forEach(o),Di.forEach(o),Oi.forEach(o),Ai.forEach(o),hr=l(w),yo=s(w,"BLOCKQUOTE",{});var Ii=a(yo);wo=s(Ii,"BLOCKQUOTE",{});var Gi=a(wo);To=s(Gi,"BLOCKQUOTE",{});var Bi=a(To);zo=s(Bi,"P",{});var Ui=a(zo);pr=i(Ui,`model = M2M100ForConditionalGeneration.from_pretrained(\u2018facebook/m2m100_418M\u2019) tokenizer = M2M100Tokenizer.from_pretrained(\u2018facebook/m2m100_418M\u2019)`),Ui.forEach(o),Bi.forEach(o),Gi.forEach(o),Ii.forEach(o),mr=l(w),qo=s(w,"BLOCKQUOTE",{});var Wi=a(qo);$o=s(Wi,"BLOCKQUOTE",{});var Hi=a($o);Eo=s(Hi,"BLOCKQUOTE",{});var Vi=a(Eo);xo=s(Vi,"P",{});var Ki=a(xo);ur=i(Ki,`text_to_translate = \u201CLife is like a box of chocolates\u201D model_inputs = tokenizer(text_to_translate, return_tensors=\u2018pt\u2019)`),Ki.forEach(o),Vi.forEach(o),Hi.forEach(o),Wi.forEach(o),fr=l(w),Co=s(w,"BLOCKQUOTE",{});var Qi=a(Co);Po=s(Qi,"BLOCKQUOTE",{});var Xi=a(Po);at=s(Xi,"BLOCKQUOTE",{});var zn=a(at);pe=s(zn,"H1",{class:!0});var qn=a(pe);me=s(qn,"A",{id:!0,class:!0,href:!0});var Ri=a(me);jo=s(Ri,"SPAN",{});var Ji=a(jo);f(rt.$$.fragment,Ji),Ji.forEach(o),Ri.forEach(o),_r=l(qn),Fo=s(qn,"SPAN",{});var Zi=a(Fo);gr=i(Zi,"translate to French"),Zi.forEach(o),qn.forEach(o),kr=l(zn),Lo=s(zn,"P",{});var Yi=a(Lo);Mr=i(Yi,`gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.get_lang_id(\u201Cfr\u201D)) print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))`),Yi.forEach(o),zn.forEach(o),Xi.forEach(o),Qi.forEach(o),w.forEach(o),ge.forEach(o),this.h()},h(){c(m,"name","hf:doc:metadata"),c(m,"content",JSON.stringify(dd)),c(T,"id","m2m100"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#m2m100"),c(v,"class","relative group"),c(te,"id","overview"),c(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(te,"href","#overview"),c(W,"class","relative group"),c(be,"href","https://arxiv.org/abs/2010.11125"),c(be,"rel","nofollow"),c(ye,"href","https://huggingface.co/valhalla"),c(ye,"rel","nofollow"),c(se,"id","training-and-generation"),c(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(se,"href","#training-and-generation"),c(H,"class","relative group"),c(ct,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Tokenizer"),c(ae,"id","transformers.M2M100Config"),c(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ae,"href","#transformers.M2M100Config"),c(V,"class","relative group"),c(mt,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Model"),c(xe,"href","https://huggingface.co/facebook/m2m100_418M"),c(xe,"rel","nofollow"),c(ut,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(ft,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(x,"class","docstring"),c(re,"id","transformers.M2M100Tokenizer"),c(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(re,"href","#transformers.M2M100Tokenizer"),c(X,"class","relative group"),c(Le,"href","https://github.com/google/sentencepiece"),c(Le,"rel","nofollow"),c(_t,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(O,"class","docstring"),c(ie,"class","docstring"),c(kt,"href","../glossary#token-type-ids"),c(I,"class","docstring"),c(io,"class","docstring"),c(y,"class","docstring"),c(de,"id","transformers.M2M100Model"),c(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(de,"href","#transformers.M2M100Model"),c(R,"class","relative group"),c(Mt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Xe,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Xe,"rel","nofollow"),c(vt,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Model"),c(j,"class","docstring"),c(L,"class","docstring"),c(ce,"id","transformers.M2M100ForConditionalGeneration"),c(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ce,"href","#transformers.M2M100ForConditionalGeneration"),c(Z,"class","relative group"),c(bt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ot,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ot,"rel","nofollow"),c(yt,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100ForConditionalGeneration"),c(me,"id","translate-to-french"),c(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(me,"href","#translate-to-french"),c(pe,"class","relative group"),c(b,"class","docstring"),c(S,"class","docstring")},m(t,h){e(document.head,m),p(t,C,h),p(t,v,h),e(v,T),e(T,F),_(z,F,null),e(v,$),e(v,N),e(N,$n),p(t,Ao,h),p(t,W,h),e(W,te),e(te,xt),_(ve,xt,null),e(W,En),e(W,Ct),e(Ct,xn),p(t,Oo,h),p(t,oe,h),e(oe,Cn),e(oe,be),e(be,Pn),e(oe,jn),p(t,Do,h),p(t,dt,h),e(dt,Fn),p(t,No,h),p(t,lt,h),e(lt,Pt),e(Pt,Ln),p(t,Io,h),p(t,ne,h),e(ne,Sn),e(ne,ye),e(ye,An),e(ne,On),p(t,Go,h),p(t,H,h),e(H,se),e(se,jt),_(we,jt,null),e(H,Dn),e(H,Ft),e(Ft,Nn),p(t,Bo,h),p(t,A,h),e(A,In),e(A,Lt),e(Lt,Gn),e(A,Bn),e(A,St),e(St,Un),e(A,Wn),e(A,At),e(At,Hn),e(A,Vn),p(t,Uo,h),p(t,P,h),e(P,Kn),e(P,ct),e(ct,Qn),e(P,Xn),e(P,Ot),e(Ot,Rn),e(P,Jn),e(P,Dt),e(Dt,Zn),e(P,Yn),e(P,Nt),e(Nt,es),e(P,ts),p(t,Wo,h),p(t,ht,h),e(ht,It),e(It,os),p(t,Ho,h),_(Te,t,h),p(t,Vo,h),p(t,pt,h),e(pt,ze),e(ze,Gt),e(Gt,ns),e(ze,ss),e(ze,E),e(E,as),e(E,Bt),e(Bt,rs),e(E,is),e(E,Ut),e(Ut,ds),e(E,ls),e(E,Wt),e(Wt,cs),e(E,hs),e(E,Ht),e(Ht,ps),e(E,ms),e(E,Vt),e(Vt,us),e(E,fs),p(t,Ko,h),_(qe,t,h),p(t,Qo,h),p(t,V,h),e(V,ae),e(ae,Kt),_($e,Kt,null),e(V,_s),e(V,Qt),e(Qt,gs),p(t,Xo,h),p(t,x,h),_(Ee,x,null),e(x,ks),e(x,K),e(K,Ms),e(K,mt),e(mt,vs),e(K,bs),e(K,xe),e(xe,ys),e(K,ws),e(x,Ts),e(x,Q),e(Q,zs),e(Q,ut),e(ut,qs),e(Q,$s),e(Q,ft),e(ft,Es),e(Q,xs),e(x,Cs),e(x,Xt),e(Xt,Ps),e(x,js),_(Ce,x,null),p(t,Ro,h),p(t,X,h),e(X,re),e(re,Rt),_(Pe,Rt,null),e(X,Fs),e(X,Jt),e(Jt,Ls),p(t,Jo,h),p(t,y,h),_(je,y,null),e(y,Ss),e(y,Fe),e(Fe,As),e(Fe,Le),e(Le,Os),e(Fe,Ds),e(y,Ns),e(y,Se),e(Se,Is),e(Se,_t),e(_t,Gs),e(Se,Bs),e(y,Us),e(y,Zt),e(Zt,Ws),e(y,Hs),_(Ae,y,null),e(y,Vs),e(y,O),_(Oe,O,null),e(O,Ks),e(O,De),e(De,Qs),e(De,Yt),e(Yt,Xs),e(De,Rs),e(O,Js),e(O,Ne),e(Ne,Ie),e(Ie,eo),e(eo,Zs),e(Ie,Ys),e(Ie,to),e(to,ea),e(Ne,ta),e(Ne,Ge),e(Ge,oo),e(oo,oa),e(Ge,na),e(Ge,no),e(no,sa),e(O,aa),e(O,so),e(so,ra),e(y,ia),e(y,ie),_(Be,ie,null),e(ie,da),e(ie,Ue),e(Ue,la),e(Ue,ao),e(ao,ca),e(Ue,ha),e(y,pa),e(y,I),_(We,I,null),e(I,ma),e(I,gt),e(gt,ua),e(gt,kt),e(kt,fa),e(I,_a),e(I,ro),e(ro,ga),e(y,ka),e(y,io),p(t,Zo,h),p(t,R,h),e(R,de),e(de,lo),_(He,lo,null),e(R,Ma),e(R,co),e(co,va),p(t,Yo,h),p(t,L,h),_(Ve,L,null),e(L,ba),e(L,Ke),e(Ke,ya),e(Ke,Mt),e(Mt,wa),e(Ke,Ta),e(L,za),e(L,Qe),e(Qe,qa),e(Qe,Xe),e(Xe,$a),e(Qe,Ea),e(L,xa),e(L,j),_(Re,j,null),e(j,Ca),e(j,J),e(J,Pa),e(J,vt),e(vt,ja),e(J,Fa),e(J,ho),e(ho,La),e(J,Sa),e(j,Aa),_(le,j,null),e(j,Oa),e(j,po),e(po,Da),e(j,Na),_(Je,j,null),p(t,en,h),p(t,Z,h),e(Z,ce),e(ce,mo),_(Ze,mo,null),e(Z,Ia),e(Z,uo),e(uo,Ga),p(t,tn,h),p(t,S,h),_(Ye,S,null),e(S,Ba),e(S,et),e(et,Ua),e(et,bt),e(bt,Wa),e(et,Ha),e(S,Va),e(S,tt),e(tt,Ka),e(tt,ot),e(ot,Qa),e(tt,Xa),e(S,Ra),e(S,b),_(nt,b,null),e(b,Ja),e(b,Y),e(Y,Za),e(Y,yt),e(yt,Ya),e(Y,er),e(Y,fo),e(fo,tr),e(Y,or),e(b,nr),_(he,b,null),e(b,sr),e(b,_o),e(_o,ar),e(b,rr),_(st,b,null),e(b,ir),e(b,go),e(go,dr),e(b,lr),e(b,ko),e(ko,Mo),e(Mo,vo),e(vo,bo),e(bo,cr),e(b,hr),e(b,yo),e(yo,wo),e(wo,To),e(To,zo),e(zo,pr),e(b,mr),e(b,qo),e(qo,$o),e($o,Eo),e(Eo,xo),e(xo,ur),e(b,fr),e(b,Co),e(Co,Po),e(Po,at),e(at,pe),e(pe,me),e(me,jo),_(rt,jo,null),e(pe,_r),e(pe,Fo),e(Fo,gr),e(at,kr),e(at,Lo),e(Lo,Mr),on=!0},p(t,[h]){const it={};h&2&&(it.$$scope={dirty:h,ctx:t}),le.$set(it);const So={};h&2&&(So.$$scope={dirty:h,ctx:t}),he.$set(So)},i(t){on||(g(z.$$.fragment,t),g(ve.$$.fragment,t),g(we.$$.fragment,t),g(Te.$$.fragment,t),g(qe.$$.fragment,t),g($e.$$.fragment,t),g(Ee.$$.fragment,t),g(Ce.$$.fragment,t),g(Pe.$$.fragment,t),g(je.$$.fragment,t),g(Ae.$$.fragment,t),g(Oe.$$.fragment,t),g(Be.$$.fragment,t),g(We.$$.fragment,t),g(He.$$.fragment,t),g(Ve.$$.fragment,t),g(Re.$$.fragment,t),g(le.$$.fragment,t),g(Je.$$.fragment,t),g(Ze.$$.fragment,t),g(Ye.$$.fragment,t),g(nt.$$.fragment,t),g(he.$$.fragment,t),g(st.$$.fragment,t),g(rt.$$.fragment,t),on=!0)},o(t){k(z.$$.fragment,t),k(ve.$$.fragment,t),k(we.$$.fragment,t),k(Te.$$.fragment,t),k(qe.$$.fragment,t),k($e.$$.fragment,t),k(Ee.$$.fragment,t),k(Ce.$$.fragment,t),k(Pe.$$.fragment,t),k(je.$$.fragment,t),k(Ae.$$.fragment,t),k(Oe.$$.fragment,t),k(Be.$$.fragment,t),k(We.$$.fragment,t),k(He.$$.fragment,t),k(Ve.$$.fragment,t),k(Re.$$.fragment,t),k(le.$$.fragment,t),k(Je.$$.fragment,t),k(Ze.$$.fragment,t),k(Ye.$$.fragment,t),k(nt.$$.fragment,t),k(he.$$.fragment,t),k(st.$$.fragment,t),k(rt.$$.fragment,t),on=!1},d(t){o(m),t&&o(C),t&&o(v),M(z),t&&o(Ao),t&&o(W),M(ve),t&&o(Oo),t&&o(oe),t&&o(Do),t&&o(dt),t&&o(No),t&&o(lt),t&&o(Io),t&&o(ne),t&&o(Go),t&&o(H),M(we),t&&o(Bo),t&&o(A),t&&o(Uo),t&&o(P),t&&o(Wo),t&&o(ht),t&&o(Ho),M(Te,t),t&&o(Vo),t&&o(pt),t&&o(Ko),M(qe,t),t&&o(Qo),t&&o(V),M($e),t&&o(Xo),t&&o(x),M(Ee),M(Ce),t&&o(Ro),t&&o(X),M(Pe),t&&o(Jo),t&&o(y),M(je),M(Ae),M(Oe),M(Be),M(We),t&&o(Zo),t&&o(R),M(He),t&&o(Yo),t&&o(L),M(Ve),M(Re),M(le),M(Je),t&&o(en),t&&o(Z),M(Ze),t&&o(tn),t&&o(S),M(Ye),M(nt),M(he),M(st),M(rt)}}}const dd={local:"m2m100",sections:[{local:"overview",sections:[{local:"training-and-generation",title:"Training and Generation"}],title:"Overview"},{local:"transformers.M2M100Config",title:"M2M100Config"},{local:"transformers.M2M100Tokenizer",title:"M2M100Tokenizer"},{local:"transformers.M2M100Model",title:"M2M100Model"},{local:"transformers.M2M100ForConditionalGeneration",title:"M2M100ForConditionalGeneration"}],title:"M2M100"};function ld(Me,m,C){let{fw:v}=m;return Me.$$set=T=>{"fw"in T&&C(0,v=T.fw)},[v]}class _d extends td{constructor(m){super();od(this,m,ld,id,nd,{fw:0})}}export{_d as default,dd as metadata};
9,904
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/electra.mdx-1a3d4047.js
import{S as QM,i as UM,s as VM,e as n,k as l,w as T,t as r,L as JM,c as s,d as t,m as d,a,x as E,h as i,b as c,J as e,g as m,y as w,q as y,o as b,B as F}from"../../chunks/vendor-b1433968.js";import{T as Ee}from"../../chunks/Tip-c3840994.js";import{D as W}from"../../chunks/Docstring-ff504c58.js";import{C as Pe}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ye}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function KM(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function GM(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function XM(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function YM(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function ZM(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function ez(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function tz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function oz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function nz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function sz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function az(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function rz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function iz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function lz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function dz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function cz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function hz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function pz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function mz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function uz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e;return{c(){h=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),g=l(),v=n("ul"),k=n("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),he=r("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=n("p"),ee=r("This second option is useful when using "),D=n("code"),oe=r("tf.keras.Model.fit"),pe=r(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),me=r("model(inputs)"),le=r("."),J=l(),I=n("p"),ne=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),P=n("ul"),q=n("li"),ae=r("a single Tensor with "),R=n("code"),de=r("input_ids"),re=r(" only and nothing else: "),S=n("code"),ue=r("model(inputs_ids)"),ce=l(),C=n("li"),fe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),te=r("model([input_ids, attention_mask])"),ie=r(" or "),H=n("code"),ge=r("model([input_ids, attention_mask, token_type_ids])"),G=l(),L=n("li"),se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),_e=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=s(p,"P",{});var M=a(h);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),g=d(p),v=s(p,"UL",{});var Y=a(v);k=s(Y,"LI",{});var we=a(k);_=i(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),u=d(Y),x=s(Y,"LI",{});var $e=a(x);he=i($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),Y.forEach(t),K=d(p),z=s(p,"P",{});var N=a(z);ee=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var be=a(D);oe=i(be,"tf.keras.Model.fit"),be.forEach(t),pe=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(N,"CODE",{});var ve=a(O);me=i(ve,"model(inputs)"),ve.forEach(t),le=i(N,"."),N.forEach(t),J=d(p),I=s(p,"P",{});var xe=a(I);ne=i(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),X=d(p),P=s(p,"UL",{});var A=a(P);q=s(A,"LI",{});var U=a(q);ae=i(U,"a single Tensor with "),R=s(U,"CODE",{});var Me=a(R);de=i(Me,"input_ids"),Me.forEach(t),re=i(U," only and nothing else: "),S=s(U,"CODE",{});var Te=a(S);ue=i(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(A),C=s(A,"LI",{});var V=a(C);fe=i(V,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(V,"CODE",{});var ze=a(B);te=i(ze,"model([input_ids, attention_mask])"),ze.forEach(t),ie=i(V," or "),H=s(V,"CODE",{});var Fe=a(H);ge=i(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),V.forEach(t),G=d(A),L=s(A,"LI",{});var Z=a(L);se=i(Z,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(Z,"CODE",{});var ke=a(Q);_e=i(ke,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ke.forEach(t),Z.forEach(t),A.forEach(t)},m(p,M){m(p,h,M),e(h,$),m(p,g,M),m(p,v,M),e(v,k),e(k,_),e(v,u),e(v,x),e(x,he),m(p,K,M),m(p,z,M),e(z,ee),e(z,D),e(D,oe),e(z,pe),e(z,O),e(O,me),e(z,le),m(p,J,M),m(p,I,M),e(I,ne),m(p,X,M),m(p,P,M),e(P,q),e(q,ae),e(q,R),e(R,de),e(q,re),e(q,S),e(S,ue),e(P,ce),e(P,C),e(C,fe),e(C,B),e(B,te),e(C,ie),e(C,H),e(H,ge),e(P,G),e(P,L),e(L,se),e(L,Q),e(Q,_e)},d(p){p&&t(h),p&&t(g),p&&t(v),p&&t(K),p&&t(z),p&&t(J),p&&t(I),p&&t(X),p&&t(P)}}}function fz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function gz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function _z(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function vz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function kz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function Tz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function Ez(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function wz(j){let h,$,g,v,k;return{c(){h=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),v=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=s(_,"P",{});var u=a(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=a(g);v=i(x,"Module"),x.forEach(t),k=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){m(_,h,u),e(h,$),e(h,g),e(g,v),e(h,k)},d(_){_&&t(h)}}}function yz(j){let h,$,g,v,k,_,u,x,he,K,z,ee,D,oe,pe,O,me,le,J,I,ne,X,P,q,ae,R,de,re,S,ue,ce,C,fe,B,te,ie,H,ge,G,L,se,Q,_e,p,M,Y,we,$e,N,be,ve,xe,A,U,Me,Te,V,ze,Fe,Z,ke,Pd,Ps,bf,Cd,Ff,rm,Xe,Cs,$f,Wt,xf,pl,Mf,zf,ml,Pf,Cf,qs,qf,jf,Af,Eo,If,ul,Lf,Nf,fl,Df,Of,Sf,qd,Wf,Bf,js,im,wo,Tn,jd,As,Rf,Ad,Hf,lm,Ct,Is,Qf,Id,Uf,Vf,En,gl,Jf,Kf,_l,Gf,Xf,Yf,Ls,Zf,vl,eg,tg,dm,yo,wn,Ld,Ns,og,Nd,ng,cm,qt,Ds,sg,Os,ag,Dd,rg,ig,lg,yn,kl,dg,cg,Tl,hg,pg,mg,Ss,ug,El,fg,gg,hm,bo,bn,Od,Ws,_g,Sd,vg,pm,Fo,Bs,kg,Rs,Tg,wl,Eg,wg,mm,$o,Hs,yg,Qs,bg,yl,Fg,$g,um,xo,Fn,Wd,Us,xg,Bd,Mg,fm,Ye,Vs,zg,Rd,Pg,Cg,Js,qg,bl,jg,Ag,Ig,Ks,Lg,Gs,Ng,Dg,Og,ot,Xs,Sg,Mo,Wg,Fl,Bg,Rg,Hd,Hg,Qg,Ug,$n,Vg,Qd,Jg,Kg,Ys,gm,zo,xn,Ud,Zs,Gg,Vd,Xg,_m,Be,ea,Yg,Jd,Zg,e_,Kd,t_,o_,ta,n_,$l,s_,a_,r_,oa,i_,na,l_,d_,c_,nt,sa,h_,Po,p_,xl,m_,u_,Gd,f_,g_,__,Mn,v_,Xd,k_,T_,aa,vm,Co,zn,Yd,ra,E_,Zd,w_,km,Re,ia,y_,ec,b_,F_,tc,$_,x_,la,M_,Ml,z_,P_,C_,da,q_,ca,j_,A_,I_,st,ha,L_,qo,N_,zl,D_,O_,oc,S_,W_,B_,Pn,R_,nc,H_,Q_,pa,Tm,jo,Cn,sc,ma,U_,ac,V_,Em,Ze,ua,J_,rc,K_,G_,fa,X_,Pl,Y_,Z_,ev,ga,tv,_a,ov,nv,sv,We,va,av,Ao,rv,Cl,iv,lv,ic,dv,cv,hv,qn,pv,lc,mv,uv,ka,fv,dc,gv,_v,Ta,wm,Io,jn,cc,Ea,vv,hc,kv,ym,et,wa,Tv,pc,Ev,wv,ya,yv,ql,bv,Fv,$v,ba,xv,Fa,Mv,zv,Pv,at,$a,Cv,Lo,qv,jl,jv,Av,mc,Iv,Lv,Nv,An,Dv,uc,Ov,Sv,xa,bm,No,In,fc,Ma,Wv,gc,Bv,Fm,He,za,Rv,_c,Hv,Qv,vc,Uv,Vv,Pa,Jv,Al,Kv,Gv,Xv,Ca,Yv,qa,Zv,ek,tk,rt,ja,ok,Do,nk,Il,sk,ak,kc,rk,ik,lk,Ln,dk,Tc,ck,hk,Aa,$m,Oo,Nn,Ec,Ia,pk,wc,mk,xm,tt,La,uk,So,fk,yc,gk,_k,bc,vk,kk,Tk,Na,Ek,Ll,wk,yk,bk,Da,Fk,Oa,$k,xk,Mk,it,Sa,zk,Wo,Pk,Nl,Ck,qk,Fc,jk,Ak,Ik,Dn,Lk,$c,Nk,Dk,Wa,Mm,Bo,On,xc,Ba,Ok,Mc,Sk,zm,Qe,Ra,Wk,zc,Bk,Rk,Ha,Hk,Dl,Qk,Uk,Vk,Qa,Jk,Ua,Kk,Gk,Xk,Sn,Yk,lt,Va,Zk,Ro,eT,Ol,tT,oT,Pc,nT,sT,aT,Wn,rT,Cc,iT,lT,Ja,Pm,Ho,Bn,qc,Ka,dT,jc,cT,Cm,je,Ga,hT,Ac,pT,mT,Ic,uT,fT,Xa,gT,Sl,_T,vT,kT,Ya,TT,Za,ET,wT,yT,Rn,bT,dt,er,FT,Qo,$T,Wl,xT,MT,Lc,zT,PT,CT,Hn,qT,Nc,jT,AT,tr,qm,Uo,Qn,Dc,or,IT,Oc,LT,jm,Ae,nr,NT,Sc,DT,OT,Wc,ST,WT,sr,BT,Bl,RT,HT,QT,ar,UT,rr,VT,JT,KT,Un,GT,ct,ir,XT,Vo,YT,Rl,ZT,e1,Bc,t1,o1,n1,Vn,s1,Rc,a1,r1,lr,Am,Jo,Jn,Hc,dr,i1,Qc,l1,Im,Ue,cr,d1,Uc,c1,h1,hr,p1,Hl,m1,u1,f1,pr,g1,mr,_1,v1,k1,Kn,T1,ht,ur,E1,Ko,w1,Ql,y1,b1,Vc,F1,$1,x1,Gn,M1,Jc,z1,P1,fr,Lm,Go,Xn,Kc,gr,C1,Gc,q1,Nm,Ve,_r,j1,Xc,A1,I1,vr,L1,Ul,N1,D1,O1,kr,S1,Tr,W1,B1,R1,Yn,H1,pt,Er,Q1,Xo,U1,Vl,V1,J1,Yc,K1,G1,X1,Zn,Y1,Zc,Z1,eE,wr,Dm,Yo,es,eh,yr,tE,th,oE,Om,Ie,br,nE,oh,sE,aE,nh,rE,iE,Fr,lE,Jl,dE,cE,hE,$r,pE,xr,mE,uE,fE,ts,gE,mt,Mr,_E,Zo,vE,Kl,kE,TE,sh,EE,wE,yE,os,bE,ah,FE,$E,zr,Sm,en,ns,rh,Pr,xE,ih,ME,Wm,Je,Cr,zE,tn,PE,lh,CE,qE,dh,jE,AE,IE,qr,LE,Gl,NE,DE,OE,jr,SE,Ar,WE,BE,RE,ss,HE,ut,Ir,QE,on,UE,Xl,VE,JE,ch,KE,GE,XE,as,YE,hh,ZE,ew,Lr,Bm,nn,rs,ph,Nr,tw,mh,ow,Rm,Le,Dr,nw,uh,sw,aw,Or,rw,Yl,iw,lw,dw,Sr,cw,Wr,hw,pw,mw,fh,uw,fw,Bt,gh,Br,gw,_w,_h,Rr,vw,kw,vh,Hr,Tw,Ew,kh,Qr,ww,yw,ft,Ur,bw,sn,Fw,Th,$w,xw,Eh,Mw,zw,Pw,is,Cw,wh,qw,jw,Vr,Hm,an,ls,yh,Jr,Aw,bh,Iw,Qm,Ce,Kr,Lw,Fh,Nw,Dw,$h,Ow,Sw,Gr,Ww,Zl,Bw,Rw,Hw,Xr,Qw,Yr,Uw,Vw,Jw,xh,Kw,Gw,Rt,Mh,Zr,Xw,Yw,zh,ei,Zw,ey,Ph,ti,ty,oy,Ch,oi,ny,sy,gt,ni,ay,rn,ry,qh,iy,ly,jh,dy,cy,hy,ds,py,Ah,my,uy,si,Um,ln,cs,Ih,ai,fy,Lh,gy,Vm,Ne,ri,_y,ii,vy,Nh,ky,Ty,Ey,li,wy,ed,yy,by,Fy,di,$y,ci,xy,My,zy,Dh,Py,Cy,Ht,Oh,hi,qy,jy,Sh,pi,Ay,Iy,Wh,mi,Ly,Ny,Bh,ui,Dy,Oy,_t,fi,Sy,dn,Wy,Rh,By,Ry,Hh,Hy,Qy,Uy,hs,Vy,Qh,Jy,Ky,gi,Jm,cn,ps,Uh,_i,Gy,Vh,Xy,Km,De,vi,Yy,Jh,Zy,eb,ki,tb,td,ob,nb,sb,Ti,ab,Ei,rb,ib,lb,Kh,db,cb,Qt,Gh,wi,hb,pb,Xh,yi,mb,ub,Yh,bi,fb,gb,Zh,Fi,_b,vb,vt,$i,kb,hn,Tb,ep,Eb,wb,tp,yb,bb,Fb,ms,$b,op,xb,Mb,xi,Gm,pn,us,np,Mi,zb,sp,Pb,Xm,Oe,zi,Cb,ap,qb,jb,Pi,Ab,od,Ib,Lb,Nb,Ci,Db,qi,Ob,Sb,Wb,rp,Bb,Rb,Ut,ip,ji,Hb,Qb,lp,Ai,Ub,Vb,dp,Ii,Jb,Kb,cp,Li,Gb,Xb,kt,Ni,Yb,mn,Zb,hp,e0,t0,pp,o0,n0,s0,fs,a0,mp,r0,i0,Di,Ym,un,gs,up,Oi,l0,fp,d0,Zm,qe,Si,c0,gp,h0,p0,_p,m0,u0,Wi,f0,nd,g0,_0,v0,Bi,k0,Ri,T0,E0,w0,vp,y0,b0,Vt,kp,Hi,F0,$0,Tp,Qi,x0,M0,Ep,Ui,z0,P0,wp,Vi,C0,q0,Tt,Ji,j0,fn,A0,yp,I0,L0,bp,N0,D0,O0,_s,S0,Fp,W0,B0,Ki,eu,gn,vs,$p,Gi,R0,xp,H0,tu,Se,Xi,Q0,_n,U0,Mp,V0,J0,zp,K0,G0,X0,Yi,Y0,sd,Z0,e2,t2,Zi,o2,el,n2,s2,a2,Pp,r2,i2,Jt,Cp,tl,l2,d2,qp,ol,c2,h2,jp,nl,p2,m2,Ap,sl,u2,f2,Et,al,g2,vn,_2,Ip,v2,k2,Lp,T2,E2,w2,ks,y2,Np,b2,F2,rl,ou;return _=new ye({}),oe=new ye({}),Ps=new ye({}),Cs=new W({props:{name:"class transformers.ElectraConfig",anchor:"transformers.ElectraConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"embedding_size",val:" = 128"},{name:"hidden_size",val:" = 256"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 4"},{name:"intermediate_size",val:" = 1024"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"summary_type",val:" = 'first'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = 'gelu'"},{name:"summary_last_dropout",val:" = 0.1"},{name:"pad_token_id",val:" = 0"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"use_cache",val:" = True"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/configuration_electra.py#L34",parametersDescription:[{anchor:"transformers.ElectraConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraModel">ElectraModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraModel">TFElectraModel</a>.`,name:"vocab_size"},{anchor:"transformers.ElectraConfig.embedding_size",description:`<strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"embedding_size"},{anchor:"transformers.ElectraConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.ElectraConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.ElectraConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.ElectraConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.ElectraConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.ElectraConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.ElectraConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.ElectraConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.ElectraConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraModel">ElectraModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraModel">TFElectraModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.ElectraConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.ElectraConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.ElectraConfig.summary_type",description:`<strong>summary_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;first&quot;</code>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Has to be one of the following options:</p> <ul> <li><code>&quot;last&quot;</code>: Take the last token hidden state (like XLNet).</li> <li><code>&quot;first&quot;</code>: Take the first token hidden state (like BERT).</li> <li><code>&quot;mean&quot;</code>: Take the mean of all tokens hidden states.</li> <li><code>&quot;cls_index&quot;</code>: Supply a Tensor of classification token position (like GPT/GPT-2).</li> <li><code>&quot;attn&quot;</code>: Not implemented now, use multi-head attention.</li> </ul>`,name:"summary_type"},{anchor:"transformers.ElectraConfig.summary_use_proj",description:`<strong>summary_use_proj</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Whether or not to add a projection after the vector extraction.`,name:"summary_use_proj"},{anchor:"transformers.ElectraConfig.summary_activation",description:`<strong>summary_activation</strong> (<code>str</code>, <em>optional</em>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Pass <code>&quot;gelu&quot;</code> for a gelu activation to the output, any other value will result in no activation.`,name:"summary_activation"},{anchor:"transformers.ElectraConfig.summary_last_dropout",description:`<strong>summary_last_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>The dropout ratio to be used after the projection and activation.`,name:"summary_last_dropout"},{anchor:"transformers.ElectraConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.ElectraConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"},{anchor:"transformers.ElectraConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),js=new Pe({props:{code:`from transformers import ElectraModel, ElectraConfig # Initializing a ELECTRA electra-base-uncased style configuration configuration = ElectraConfig() # Initializing a model from the electra-base-uncased style configuration model = ElectraModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraModel, ElectraConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ELECTRA electra-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ElectraConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the electra-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),As=new ye({}),Is=new W({props:{name:"class transformers.ElectraTokenizer",anchor:"transformers.ElectraTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/tokenization_electra.py#L52"}}),Ns=new ye({}),Ds=new W({props:{name:"class transformers.ElectraTokenizerFast",anchor:"transformers.ElectraTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/tokenization_electra_fast.py#L61"}}),Ws=new ye({}),Bs=new W({props:{name:"class transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput",anchor:"transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L696",parametersDescription:[{anchor:"transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss of the ELECTRA objective.`,name:"loss"},{anchor:"transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Prediction scores of the head (scores for each token before SoftMax).`,name:"logits"},{anchor:"transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Hs=new W({props:{name:"class transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput",anchor:"transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput",parameters:[{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L823",parametersDescription:[{anchor:"transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>tf.Tensor</code> of shape <code>(1,)</code>) &#x2014; Total loss of the ELECTRA objective.`,name:"loss"},{anchor:"transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Prediction scores of the head (scores for each token before SoftMax).`,name:"logits"},{anchor:"transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Us=new ye({}),Vs=new W({props:{name:"class transformers.ElectraModel",anchor:"transformers.ElectraModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L807",parametersDescription:[{anchor:"transformers.ElectraModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xs=new W({props:{name:"forward",anchor:"transformers.ElectraModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L834",parametersDescription:[{anchor:"transformers.ElectraModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$n=new Ee({props:{$$slots:{default:[KM]},$$scope:{ctx:j}}}),Ys=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraModel import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraModel.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraModel.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Zs=new ye({}),ea=new W({props:{name:"class transformers.ElectraForPreTraining",anchor:"transformers.ElectraForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1028",parametersDescription:[{anchor:"transformers.ElectraForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),sa=new W({props:{name:"forward",anchor:"transformers.ElectraForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1037",parametersDescription:[{anchor:"transformers.ElectraForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraForPreTraining.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraForPreTraining.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ElectraForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates the token is an original token,</li> <li>1 indicates the token was replaced.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput" >transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss of the ELECTRA objective.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Prediction scores of the head (scores for each token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput" >transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mn=new Ee({props:{$$slots:{default:[GM]},$$scope:{ctx:j}}}),aa=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForPreTraining import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForPreTraining.from_pretrained('google/electra-small-discriminator') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 logits = model(input_ids).logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_ids).logits`}}),ra=new ye({}),ia=new W({props:{name:"class transformers.ElectraForMaskedLM",anchor:"transformers.ElectraForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1123",parametersDescription:[{anchor:"transformers.ElectraForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ha=new W({props:{name:"forward",anchor:"transformers.ElectraForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1140",parametersDescription:[{anchor:"transformers.ElectraForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraForMaskedLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraForMaskedLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ElectraForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pn=new Ee({props:{$$slots:{default:[XM]},$$scope:{ctx:j}}}),pa=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForMaskedLM import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForMaskedLM.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ma=new ye({}),ua=new W({props:{name:"class transformers.ElectraForSequenceClassification",anchor:"transformers.ElectraForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L932",parametersDescription:[{anchor:"transformers.ElectraForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),va=new W({props:{name:"forward",anchor:"transformers.ElectraForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L943",parametersDescription:[{anchor:"transformers.ElectraForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraForSequenceClassification.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraForSequenceClassification.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ElectraForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qn=new Ee({props:{$$slots:{default:[YM]},$$scope:{ctx:j}}}),ka=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForSequenceClassification import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForSequenceClassification.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ta=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForSequenceClassification import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForSequenceClassification.from_pretrained('google/electra-small-discriminator', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ea=new ye({}),wa=new W({props:{name:"class transformers.ElectraForMultipleChoice",anchor:"transformers.ElectraForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1402",parametersDescription:[{anchor:"transformers.ElectraForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$a=new W({props:{name:"forward",anchor:"transformers.ElectraForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1413",parametersDescription:[{anchor:"transformers.ElectraForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraForMultipleChoice.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraForMultipleChoice.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ElectraForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),An=new Ee({props:{$$slots:{default:[ZM]},$$scope:{ctx:j}}}),xa=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForMultipleChoice import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForMultipleChoice.from_pretrained('google/electra-small-discriminator') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ma=new ye({}),za=new W({props:{name:"class transformers.ElectraForTokenClassification",anchor:"transformers.ElectraForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1209",parametersDescription:[{anchor:"transformers.ElectraForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ja=new W({props:{name:"forward",anchor:"transformers.ElectraForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1222",parametersDescription:[{anchor:"transformers.ElectraForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraForTokenClassification.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraForTokenClassification.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ElectraForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ln=new Ee({props:{$$slots:{default:[ez]},$$scope:{ctx:j}}}),Aa=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForTokenClassification import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForTokenClassification.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ia=new ye({}),La=new W({props:{name:"class transformers.ElectraForQuestionAnswering",anchor:"transformers.ElectraForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1297",parametersDescription:[{anchor:"transformers.ElectraForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Sa=new W({props:{name:"forward",anchor:"transformers.ElectraForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_electra.py#L1311",parametersDescription:[{anchor:"transformers.ElectraForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ElectraForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ElectraForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ElectraForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ElectraForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ElectraForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ElectraForQuestionAnswering.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ElectraForQuestionAnswering.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.ElectraForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ElectraForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ElectraForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ElectraForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.ElectraForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Dn=new Ee({props:{$$slots:{default:[tz]},$$scope:{ctx:j}}}),Wa=new Pe({props:{code:`from transformers import ElectraTokenizer, ElectraForQuestionAnswering import torch tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = ElectraForQuestionAnswering.from_pretrained('google/electra-small-discriminator') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, ElectraForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ElectraForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Ba=new ye({}),Ra=new W({props:{name:"class transformers.TFElectraModel",anchor:"transformers.TFElectraModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L943",parametersDescription:[{anchor:"transformers.TFElectraModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Sn=new Ee({props:{$$slots:{default:[oz]},$$scope:{ctx:j}}}),Va=new W({props:{name:"call",anchor:"transformers.TFElectraModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L949",parametersDescription:[{anchor:"transformers.TFElectraModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFElectraModel.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFElectraModel.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFElectraModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFElectraModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Wn=new Ee({props:{$$slots:{default:[nz]},$$scope:{ctx:j}}}),Ja=new Pe({props:{code:`from transformers import ElectraTokenizer, TFElectraModel import tensorflow as tf tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraModel.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraModel.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ka=new ye({}),Ga=new W({props:{name:"class transformers.TFElectraForPreTraining",anchor:"transformers.TFElectraForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1059",parametersDescription:[{anchor:"transformers.TFElectraForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Rn=new Ee({props:{$$slots:{default:[sz]},$$scope:{ctx:j}}}),er=new W({props:{name:"call",anchor:"transformers.TFElectraForPreTraining.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1066",parametersDescription:[{anchor:"transformers.TFElectraForPreTraining.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraForPreTraining.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraForPreTraining.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraForPreTraining.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraForPreTraining.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput" >transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>tf.Tensor</code> of shape <code>(1,)</code>) \u2014 Total loss of the ELECTRA objective.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Prediction scores of the head (scores for each token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput" >transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Hn=new Ee({props:{$$slots:{default:[az]},$$scope:{ctx:j}}}),tr=new Pe({props:{code:`import tensorflow as tf from transformers import ElectraTokenizer, TFElectraForPreTraining tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraForPreTraining.from_pretrained('google/electra-small-discriminator') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) scores = outputs[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tf.constant(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>))[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>scores = outputs[<span class="hljs-number">0</span>]`}}),or=new ye({}),nr=new W({props:{name:"class transformers.TFElectraForMaskedLM",anchor:"transformers.TFElectraForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1189",parametersDescription:[{anchor:"transformers.TFElectraForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Un=new Ee({props:{$$slots:{default:[rz]},$$scope:{ctx:j}}}),ir=new W({props:{name:"call",anchor:"transformers.TFElectraForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1211",parametersDescription:[{anchor:"transformers.TFElectraForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFElectraForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Vn=new Ee({props:{$$slots:{default:[iz]},$$scope:{ctx:j}}}),lr=new Pe({props:{code:`from transformers import ElectraTokenizer, TFElectraForMaskedLM import tensorflow as tf tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraForMaskedLM.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),dr=new ye({}),cr=new W({props:{name:"class transformers.TFElectraForSequenceClassification",anchor:"transformers.TFElectraForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1328",parametersDescription:[{anchor:"transformers.TFElectraForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kn=new Ee({props:{$$slots:{default:[lz]},$$scope:{ctx:j}}}),ur=new W({props:{name:"call",anchor:"transformers.TFElectraForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1335",parametersDescription:[{anchor:"transformers.TFElectraForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFElectraForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Gn=new Ee({props:{$$slots:{default:[dz]},$$scope:{ctx:j}}}),fr=new Pe({props:{code:`from transformers import ElectraTokenizer, TFElectraForSequenceClassification import tensorflow as tf tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraForSequenceClassification.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),gr=new ye({}),_r=new W({props:{name:"class transformers.TFElectraForMultipleChoice",anchor:"transformers.TFElectraForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1420",parametersDescription:[{anchor:"transformers.TFElectraForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Yn=new Ee({props:{$$slots:{default:[cz]},$$scope:{ctx:j}}}),Er=new W({props:{name:"call",anchor:"transformers.TFElectraForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1442",parametersDescription:[{anchor:"transformers.TFElectraForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFElectraForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Zn=new Ee({props:{$$slots:{default:[hz]},$$scope:{ctx:j}}}),wr=new Pe({props:{code:`from transformers import ElectraTokenizer, TFElectraForMultipleChoice import tensorflow as tf tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraForMultipleChoice.from_pretrained('google/electra-small-discriminator') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),yr=new ye({}),br=new W({props:{name:"class transformers.TFElectraForTokenClassification",anchor:"transformers.TFElectraForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1568",parametersDescription:[{anchor:"transformers.TFElectraForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ts=new Ee({props:{$$slots:{default:[pz]},$$scope:{ctx:j}}}),Mr=new W({props:{name:"call",anchor:"transformers.TFElectraForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1581",parametersDescription:[{anchor:"transformers.TFElectraForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFElectraForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),os=new Ee({props:{$$slots:{default:[mz]},$$scope:{ctx:j}}}),zr=new Pe({props:{code:`from transformers import ElectraTokenizer, TFElectraForTokenClassification import tensorflow as tf tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraForTokenClassification.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pr=new ye({}),Cr=new W({props:{name:"class transformers.TFElectraForQuestionAnswering",anchor:"transformers.TFElectraForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1667",parametersDescription:[{anchor:"transformers.TFElectraForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ss=new Ee({props:{$$slots:{default:[uz]},$$scope:{ctx:j}}}),Ir=new W({props:{name:"call",anchor:"transformers.TFElectraForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_tf_electra.py#L1677",parametersDescription:[{anchor:"transformers.TFElectraForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFElectraForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFElectraForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFElectraForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFElectraForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFElectraForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFElectraForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFElectraForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFElectraForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFElectraForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFElectraForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),as=new Ee({props:{$$slots:{default:[fz]},$$scope:{ctx:j}}}),Lr=new Pe({props:{code:`from transformers import ElectraTokenizer, TFElectraForQuestionAnswering import tensorflow as tf tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = TFElectraForQuestionAnswering.from_pretrained('google/electra-small-discriminator') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, TFElectraForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFElectraForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),Nr=new ye({}),Dr=new W({props:{name:"class transformers.FlaxElectraModel",anchor:"transformers.FlaxElectraModel",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L661",parametersDescription:[{anchor:"transformers.FlaxElectraModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ur=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),is=new Ee({props:{$$slots:{default:[gz]},$$scope:{ctx:j}}}),Vr=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraModel tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraModel.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraModel.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Jr=new ye({}),Kr=new W({props:{name:"class transformers.FlaxElectraForPreTraining",anchor:"transformers.FlaxElectraForPreTraining",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L810",parametersDescription:[{anchor:"transformers.FlaxElectraForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ni=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.electra.modeling_flax_electra.FlaxElectraForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.electra.modeling_flax_electra.FlaxElectraForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),ds=new Ee({props:{$$slots:{default:[_z]},$$scope:{ctx:j}}}),si=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraForPreTraining tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraForPreTraining.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),ai=new ye({}),ri=new W({props:{name:"class transformers.FlaxElectraForMaskedLM",anchor:"transformers.FlaxElectraForMaskedLM",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L747",parametersDescription:[{anchor:"transformers.FlaxElectraForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fi=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),hs=new Ee({props:{$$slots:{default:[vz]},$$scope:{ctx:j}}}),gi=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraForMaskedLM tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraForMaskedLM.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("The capital of France is [MASK].", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),_i=new ye({}),vi=new W({props:{name:"class transformers.FlaxElectraForSequenceClassification",anchor:"transformers.FlaxElectraForSequenceClassification",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L1214",parametersDescription:[{anchor:"transformers.FlaxElectraForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$i=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ms=new Ee({props:{$$slots:{default:[kz]},$$scope:{ctx:j}}}),xi=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraForSequenceClassification tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraForSequenceClassification.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Mi=new ye({}),zi=new W({props:{name:"class transformers.FlaxElectraForMultipleChoice",anchor:"transformers.FlaxElectraForMultipleChoice",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L1050",parametersDescription:[{anchor:"transformers.FlaxElectraForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ni=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),fs=new Ee({props:{$$slots:{default:[Tz]},$$scope:{ctx:j}}}),Di=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraForMultipleChoice tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraForMultipleChoice.from_pretrained('google/electra-small-discriminator') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='jax', padding=True) outputs = model(**{k: v[None, :] for k,v in encoding.items()}) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Oi=new ye({}),Si=new W({props:{name:"class transformers.FlaxElectraForTokenClassification",anchor:"transformers.FlaxElectraForTokenClassification",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L902",parametersDescription:[{anchor:"transformers.FlaxElectraForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ji=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_s=new Ee({props:{$$slots:{default:[Ez]},$$scope:{ctx:j}}}),Ki=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraForTokenClassification tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraForTokenClassification.from_pretrained('google/electra-small-discriminator') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Gi=new ye({}),Xi=new W({props:{name:"class transformers.FlaxElectraForQuestionAnswering",anchor:"transformers.FlaxElectraForQuestionAnswering",parameters:[{name:"config",val:": ElectraConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L1123",parametersDescription:[{anchor:"transformers.FlaxElectraForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),al=new W({props:{name:"__call__",anchor:"transformers.FlaxElectraPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/electra/modeling_flax_electra.py#L563",parametersDescription:[{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer">ElectraTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxElectraPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig" >ElectraConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ks=new Ee({props:{$$slots:{default:[wz]},$$scope:{ctx:j}}}),rl=new Pe({props:{code:`from transformers import ElectraTokenizer, FlaxElectraForQuestionAnswering tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') model = FlaxElectraForQuestionAnswering.from_pretrained('google/electra-small-discriminator') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='jax') outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ElectraTokenizer, FlaxElectraForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ElectraTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxElectraForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/electra-small-discriminator&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){h=n("meta"),$=l(),g=n("h1"),v=n("a"),k=n("span"),T(_.$$.fragment),u=l(),x=n("span"),he=r("ELECTRA"),K=l(),z=n("h2"),ee=n("a"),D=n("span"),T(oe.$$.fragment),pe=l(),O=n("span"),me=r("Overview"),le=l(),J=n("p"),I=r("The ELECTRA model was proposed in the paper "),ne=n("a"),X=r(`ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators`),P=r(`. ELECTRA is a new pretraining approach which trains two transformer models: the generator and the discriminator. The generator\u2019s role is to replace tokens in a sequence, and is therefore trained as a masked language model. The discriminator, which is the model we\u2019re interested in, tries to identify which tokens were replaced by the generator in the sequence.`),q=l(),ae=n("p"),R=r("The abstract from the paper is the following:"),de=l(),re=n("p"),S=n("em"),ue=r(`Masked language modeling (MLM) pretraining methods such as BERT corrupt the input by replacing some tokens with [MASK] and then train a model to reconstruct the original tokens. While they produce good results when transferred to downstream NLP tasks, they generally require large amounts of compute to be effective. As an alternative, we propose a more sample-efficient pretraining task called replaced token detection. Instead of masking the input, our approach corrupts it by replacing some tokens with plausible alternatives sampled from a small generator network. Then, instead of training a model that predicts the original identities of the corrupted tokens, we train a discriminative model that predicts whether each token in the corrupted input was replaced by a generator sample or not. Thorough experiments demonstrate this new pretraining task is more efficient than MLM because the task is defined over all input tokens rather than just the small subset that was masked out. As a result, the contextual representations learned by our approach substantially outperform the ones learned by BERT given the same model size, data, and compute. The gains are particularly strong for small models; for example, we train a model on one GPU for 4 days that outperforms GPT (trained using 30x more compute) on the GLUE natural language understanding benchmark. Our approach also works well at scale, where it performs comparably to RoBERTa and XLNet while using less than 1/4 of their compute and outperforms them when using the same amount of compute.`),ce=l(),C=n("p"),fe=r("Tips:"),B=l(),te=n("ul"),ie=n("li"),H=r(`ELECTRA is the pretraining approach, therefore there is nearly no changes done to the underlying model: BERT. The only change is the separation of the embedding size and the hidden size: the embedding size is generally smaller, while the hidden size is larger. An additional projection layer (linear) is used to project the embeddings from their embedding size to the hidden size. In the case where the embedding size is the same as the hidden size, no projection layer is used.`),ge=l(),G=n("li"),L=r("The ELECTRA checkpoints saved using "),se=n("a"),Q=r("Google Research\u2019s implementation"),_e=r(` contain both the generator and discriminator. The conversion script requires the user to name which model to export into the correct architecture. Once converted to the HuggingFace format, these checkpoints may be loaded into all available ELECTRA models, however. This means that the discriminator may be loaded in the `),p=n("a"),M=r("ElectraForMaskedLM"),Y=r(` model, and the generator may be loaded in the `),we=n("a"),$e=r("ElectraForPreTraining"),N=r(` model (the classification head will be randomly initialized as it doesn\u2019t exist in the generator).`),be=l(),ve=n("p"),xe=r("This model was contributed by "),A=n("a"),U=r("lysandre"),Me=r(". The original code can be found "),Te=n("a"),V=r("here"),ze=r("."),Fe=l(),Z=n("h2"),ke=n("a"),Pd=n("span"),T(Ps.$$.fragment),bf=l(),Cd=n("span"),Ff=r("ElectraConfig"),rm=l(),Xe=n("div"),T(Cs.$$.fragment),$f=l(),Wt=n("p"),xf=r("This is the configuration class to store the configuration of a "),pl=n("a"),Mf=r("ElectraModel"),zf=r(` or a `),ml=n("a"),Pf=r("TFElectraModel"),Cf=r(`. It is used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA `),qs=n("a"),qf=r("google/electra-small-discriminator"),jf=r(" architecture."),Af=l(),Eo=n("p"),If=r("Configuration objects inherit from "),ul=n("a"),Lf=r("PretrainedConfig"),Nf=r(` and can be used to control the model outputs. Read the documentation from `),fl=n("a"),Df=r("PretrainedConfig"),Of=r(" for more information."),Sf=l(),qd=n("p"),Wf=r("Examples:"),Bf=l(),T(js.$$.fragment),im=l(),wo=n("h2"),Tn=n("a"),jd=n("span"),T(As.$$.fragment),Rf=l(),Ad=n("span"),Hf=r("ElectraTokenizer"),lm=l(),Ct=n("div"),T(Is.$$.fragment),Qf=l(),Id=n("p"),Uf=r("Construct an ELECTRA tokenizer."),Vf=l(),En=n("p"),gl=n("a"),Jf=r("ElectraTokenizer"),Kf=r(" is identical to "),_l=n("a"),Gf=r("BertTokenizer"),Xf=r(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Yf=l(),Ls=n("p"),Zf=r("Refer to superclass "),vl=n("a"),eg=r("BertTokenizer"),tg=r(` for usage examples and documentation concerning parameters.`),dm=l(),yo=n("h2"),wn=n("a"),Ld=n("span"),T(Ns.$$.fragment),og=l(),Nd=n("span"),ng=r("ElectraTokenizerFast"),cm=l(),qt=n("div"),T(Ds.$$.fragment),sg=l(),Os=n("p"),ag=r("Construct a \u201Cfast\u201D ELECTRA tokenizer (backed by HuggingFace\u2019s "),Dd=n("em"),rg=r("tokenizers"),ig=r(" library)."),lg=l(),yn=n("p"),kl=n("a"),dg=r("ElectraTokenizerFast"),cg=r(" is identical to "),Tl=n("a"),hg=r("BertTokenizerFast"),pg=r(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),mg=l(),Ss=n("p"),ug=r("Refer to superclass "),El=n("a"),fg=r("BertTokenizerFast"),gg=r(` for usage examples and documentation concerning parameters.`),hm=l(),bo=n("h2"),bn=n("a"),Od=n("span"),T(Ws.$$.fragment),_g=l(),Sd=n("span"),vg=r("Electra specific outputs"),pm=l(),Fo=n("div"),T(Bs.$$.fragment),kg=l(),Rs=n("p"),Tg=r("Output type of "),wl=n("a"),Eg=r("ElectraForPreTraining"),wg=r("."),mm=l(),$o=n("div"),T(Hs.$$.fragment),yg=l(),Qs=n("p"),bg=r("Output type of "),yl=n("a"),Fg=r("TFElectraForPreTraining"),$g=r("."),um=l(),xo=n("h2"),Fn=n("a"),Wd=n("span"),T(Us.$$.fragment),xg=l(),Bd=n("span"),Mg=r("ElectraModel"),fm=l(),Ye=n("div"),T(Vs.$$.fragment),zg=l(),Rd=n("p"),Pg=r("The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different. Both the generator and discriminator checkpoints may be loaded into this model."),Cg=l(),Js=n("p"),qg=r("This model inherits from "),bl=n("a"),jg=r("PreTrainedModel"),Ag=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ig=l(),Ks=n("p"),Lg=r("This model is also a PyTorch "),Gs=n("a"),Ng=r("torch.nn.Module"),Dg=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Og=l(),ot=n("div"),T(Xs.$$.fragment),Sg=l(),Mo=n("p"),Wg=r("The "),Fl=n("a"),Bg=r("ElectraModel"),Rg=r(" forward method, overrides the "),Hd=n("code"),Hg=r("__call__"),Qg=r(" special method."),Ug=l(),T($n.$$.fragment),Vg=l(),Qd=n("p"),Jg=r("Example:"),Kg=l(),T(Ys.$$.fragment),gm=l(),zo=n("h2"),xn=n("a"),Ud=n("span"),T(Zs.$$.fragment),Gg=l(),Vd=n("span"),Xg=r("ElectraForPreTraining"),_m=l(),Be=n("div"),T(ea.$$.fragment),Yg=l(),Jd=n("p"),Zg=r("Electra model with a binary classification head on top as used during pretraining for identifying generated tokens."),e_=l(),Kd=n("p"),t_=r("It is recommended to load the discriminator checkpoint into that model."),o_=l(),ta=n("p"),n_=r("This model inherits from "),$l=n("a"),s_=r("PreTrainedModel"),a_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),r_=l(),oa=n("p"),i_=r("This model is also a PyTorch "),na=n("a"),l_=r("torch.nn.Module"),d_=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),c_=l(),nt=n("div"),T(sa.$$.fragment),h_=l(),Po=n("p"),p_=r("The "),xl=n("a"),m_=r("ElectraForPreTraining"),u_=r(" forward method, overrides the "),Gd=n("code"),f_=r("__call__"),g_=r(" special method."),__=l(),T(Mn.$$.fragment),v_=l(),Xd=n("p"),k_=r("Examples:"),T_=l(),T(aa.$$.fragment),vm=l(),Co=n("h2"),zn=n("a"),Yd=n("span"),T(ra.$$.fragment),E_=l(),Zd=n("span"),w_=r("ElectraForMaskedLM"),km=l(),Re=n("div"),T(ia.$$.fragment),y_=l(),ec=n("p"),b_=r("Electra model with a language modeling head on top."),F_=l(),tc=n("p"),$_=r(`Even though both the discriminator and generator may be loaded into this model, the generator is the only model of the two to have been trained for the masked language modeling task.`),x_=l(),la=n("p"),M_=r("This model inherits from "),Ml=n("a"),z_=r("PreTrainedModel"),P_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),C_=l(),da=n("p"),q_=r("This model is also a PyTorch "),ca=n("a"),j_=r("torch.nn.Module"),A_=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),I_=l(),st=n("div"),T(ha.$$.fragment),L_=l(),qo=n("p"),N_=r("The "),zl=n("a"),D_=r("ElectraForMaskedLM"),O_=r(" forward method, overrides the "),oc=n("code"),S_=r("__call__"),W_=r(" special method."),B_=l(),T(Pn.$$.fragment),R_=l(),nc=n("p"),H_=r("Example:"),Q_=l(),T(pa.$$.fragment),Tm=l(),jo=n("h2"),Cn=n("a"),sc=n("span"),T(ma.$$.fragment),U_=l(),ac=n("span"),V_=r("ElectraForSequenceClassification"),Em=l(),Ze=n("div"),T(ua.$$.fragment),J_=l(),rc=n("p"),K_=r(`ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),G_=l(),fa=n("p"),X_=r("This model inherits from "),Pl=n("a"),Y_=r("PreTrainedModel"),Z_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ev=l(),ga=n("p"),tv=r("This model is also a PyTorch "),_a=n("a"),ov=r("torch.nn.Module"),nv=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sv=l(),We=n("div"),T(va.$$.fragment),av=l(),Ao=n("p"),rv=r("The "),Cl=n("a"),iv=r("ElectraForSequenceClassification"),lv=r(" forward method, overrides the "),ic=n("code"),dv=r("__call__"),cv=r(" special method."),hv=l(),T(qn.$$.fragment),pv=l(),lc=n("p"),mv=r("Example of single-label classification:"),uv=l(),T(ka.$$.fragment),fv=l(),dc=n("p"),gv=r("Example of multi-label classification:"),_v=l(),T(Ta.$$.fragment),wm=l(),Io=n("h2"),jn=n("a"),cc=n("span"),T(Ea.$$.fragment),vv=l(),hc=n("span"),kv=r("ElectraForMultipleChoice"),ym=l(),et=n("div"),T(wa.$$.fragment),Tv=l(),pc=n("p"),Ev=r(`ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),wv=l(),ya=n("p"),yv=r("This model inherits from "),ql=n("a"),bv=r("PreTrainedModel"),Fv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$v=l(),ba=n("p"),xv=r("This model is also a PyTorch "),Fa=n("a"),Mv=r("torch.nn.Module"),zv=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pv=l(),at=n("div"),T($a.$$.fragment),Cv=l(),Lo=n("p"),qv=r("The "),jl=n("a"),jv=r("ElectraForMultipleChoice"),Av=r(" forward method, overrides the "),mc=n("code"),Iv=r("__call__"),Lv=r(" special method."),Nv=l(),T(An.$$.fragment),Dv=l(),uc=n("p"),Ov=r("Example:"),Sv=l(),T(xa.$$.fragment),bm=l(),No=n("h2"),In=n("a"),fc=n("span"),T(Ma.$$.fragment),Wv=l(),gc=n("span"),Bv=r("ElectraForTokenClassification"),Fm=l(),He=n("div"),T(za.$$.fragment),Rv=l(),_c=n("p"),Hv=r("Electra model with a token classification head on top."),Qv=l(),vc=n("p"),Uv=r("Both the discriminator and generator may be loaded into this model."),Vv=l(),Pa=n("p"),Jv=r("This model inherits from "),Al=n("a"),Kv=r("PreTrainedModel"),Gv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xv=l(),Ca=n("p"),Yv=r("This model is also a PyTorch "),qa=n("a"),Zv=r("torch.nn.Module"),ek=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),tk=l(),rt=n("div"),T(ja.$$.fragment),ok=l(),Do=n("p"),nk=r("The "),Il=n("a"),sk=r("ElectraForTokenClassification"),ak=r(" forward method, overrides the "),kc=n("code"),rk=r("__call__"),ik=r(" special method."),lk=l(),T(Ln.$$.fragment),dk=l(),Tc=n("p"),ck=r("Example:"),hk=l(),T(Aa.$$.fragment),$m=l(),Oo=n("h2"),Nn=n("a"),Ec=n("span"),T(Ia.$$.fragment),pk=l(),wc=n("span"),mk=r("ElectraForQuestionAnswering"),xm=l(),tt=n("div"),T(La.$$.fragment),uk=l(),So=n("p"),fk=r(`ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),yc=n("code"),gk=r("span start logits"),_k=r(" and "),bc=n("code"),vk=r("span end logits"),kk=r(")."),Tk=l(),Na=n("p"),Ek=r("This model inherits from "),Ll=n("a"),wk=r("PreTrainedModel"),yk=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bk=l(),Da=n("p"),Fk=r("This model is also a PyTorch "),Oa=n("a"),$k=r("torch.nn.Module"),xk=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Mk=l(),it=n("div"),T(Sa.$$.fragment),zk=l(),Wo=n("p"),Pk=r("The "),Nl=n("a"),Ck=r("ElectraForQuestionAnswering"),qk=r(" forward method, overrides the "),Fc=n("code"),jk=r("__call__"),Ak=r(" special method."),Ik=l(),T(Dn.$$.fragment),Lk=l(),$c=n("p"),Nk=r("Example:"),Dk=l(),T(Wa.$$.fragment),Mm=l(),Bo=n("h2"),On=n("a"),xc=n("span"),T(Ba.$$.fragment),Ok=l(),Mc=n("span"),Sk=r("TFElectraModel"),zm=l(),Qe=n("div"),T(Ra.$$.fragment),Wk=l(),zc=n("p"),Bk=r("The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different. Both the generator and discriminator checkpoints may be loaded into this model."),Rk=l(),Ha=n("p"),Hk=r("This model inherits from "),Dl=n("a"),Qk=r("TFPreTrainedModel"),Uk=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Vk=l(),Qa=n("p"),Jk=r("This model is also a "),Ua=n("a"),Kk=r("tf.keras.Model"),Gk=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Xk=l(),T(Sn.$$.fragment),Yk=l(),lt=n("div"),T(Va.$$.fragment),Zk=l(),Ro=n("p"),eT=r("The "),Ol=n("a"),tT=r("TFElectraModel"),oT=r(" forward method, overrides the "),Pc=n("code"),nT=r("__call__"),sT=r(" special method."),aT=l(),T(Wn.$$.fragment),rT=l(),Cc=n("p"),iT=r("Example:"),lT=l(),T(Ja.$$.fragment),Pm=l(),Ho=n("h2"),Bn=n("a"),qc=n("span"),T(Ka.$$.fragment),dT=l(),jc=n("span"),cT=r("TFElectraForPreTraining"),Cm=l(),je=n("div"),T(Ga.$$.fragment),hT=l(),Ac=n("p"),pT=r("Electra model with a binary classification head on top as used during pretraining for identifying generated tokens."),mT=l(),Ic=n("p"),uT=r(`Even though both the discriminator and generator may be loaded into this model, the discriminator is the only model of the two to have the correct classification head to be used for this model.`),fT=l(),Xa=n("p"),gT=r("This model inherits from "),Sl=n("a"),_T=r("TFPreTrainedModel"),vT=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kT=l(),Ya=n("p"),TT=r("This model is also a "),Za=n("a"),ET=r("tf.keras.Model"),wT=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yT=l(),T(Rn.$$.fragment),bT=l(),dt=n("div"),T(er.$$.fragment),FT=l(),Qo=n("p"),$T=r("The "),Wl=n("a"),xT=r("TFElectraForPreTraining"),MT=r(" forward method, overrides the "),Lc=n("code"),zT=r("__call__"),PT=r(" special method."),CT=l(),T(Hn.$$.fragment),qT=l(),Nc=n("p"),jT=r("Examples:"),AT=l(),T(tr.$$.fragment),qm=l(),Uo=n("h2"),Qn=n("a"),Dc=n("span"),T(or.$$.fragment),IT=l(),Oc=n("span"),LT=r("TFElectraForMaskedLM"),jm=l(),Ae=n("div"),T(nr.$$.fragment),NT=l(),Sc=n("p"),DT=r("Electra model with a language modeling head on top."),OT=l(),Wc=n("p"),ST=r(`Even though both the discriminator and generator may be loaded into this model, the generator is the only model of the two to have been trained for the masked language modeling task.`),WT=l(),sr=n("p"),BT=r("This model inherits from "),Bl=n("a"),RT=r("TFPreTrainedModel"),HT=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),QT=l(),ar=n("p"),UT=r("This model is also a "),rr=n("a"),VT=r("tf.keras.Model"),JT=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),KT=l(),T(Un.$$.fragment),GT=l(),ct=n("div"),T(ir.$$.fragment),XT=l(),Vo=n("p"),YT=r("The "),Rl=n("a"),ZT=r("TFElectraForMaskedLM"),e1=r(" forward method, overrides the "),Bc=n("code"),t1=r("__call__"),o1=r(" special method."),n1=l(),T(Vn.$$.fragment),s1=l(),Rc=n("p"),a1=r("Example:"),r1=l(),T(lr.$$.fragment),Am=l(),Jo=n("h2"),Jn=n("a"),Hc=n("span"),T(dr.$$.fragment),i1=l(),Qc=n("span"),l1=r("TFElectraForSequenceClassification"),Im=l(),Ue=n("div"),T(cr.$$.fragment),d1=l(),Uc=n("p"),c1=r(`ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),h1=l(),hr=n("p"),p1=r("This model inherits from "),Hl=n("a"),m1=r("TFPreTrainedModel"),u1=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),f1=l(),pr=n("p"),g1=r("This model is also a "),mr=n("a"),_1=r("tf.keras.Model"),v1=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),k1=l(),T(Kn.$$.fragment),T1=l(),ht=n("div"),T(ur.$$.fragment),E1=l(),Ko=n("p"),w1=r("The "),Ql=n("a"),y1=r("TFElectraForSequenceClassification"),b1=r(" forward method, overrides the "),Vc=n("code"),F1=r("__call__"),$1=r(" special method."),x1=l(),T(Gn.$$.fragment),M1=l(),Jc=n("p"),z1=r("Example:"),P1=l(),T(fr.$$.fragment),Lm=l(),Go=n("h2"),Xn=n("a"),Kc=n("span"),T(gr.$$.fragment),C1=l(),Gc=n("span"),q1=r("TFElectraForMultipleChoice"),Nm=l(),Ve=n("div"),T(_r.$$.fragment),j1=l(),Xc=n("p"),A1=r(`ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),I1=l(),vr=n("p"),L1=r("This model inherits from "),Ul=n("a"),N1=r("TFPreTrainedModel"),D1=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),O1=l(),kr=n("p"),S1=r("This model is also a "),Tr=n("a"),W1=r("tf.keras.Model"),B1=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),R1=l(),T(Yn.$$.fragment),H1=l(),pt=n("div"),T(Er.$$.fragment),Q1=l(),Xo=n("p"),U1=r("The "),Vl=n("a"),V1=r("TFElectraForMultipleChoice"),J1=r(" forward method, overrides the "),Yc=n("code"),K1=r("__call__"),G1=r(" special method."),X1=l(),T(Zn.$$.fragment),Y1=l(),Zc=n("p"),Z1=r("Example:"),eE=l(),T(wr.$$.fragment),Dm=l(),Yo=n("h2"),es=n("a"),eh=n("span"),T(yr.$$.fragment),tE=l(),th=n("span"),oE=r("TFElectraForTokenClassification"),Om=l(),Ie=n("div"),T(br.$$.fragment),nE=l(),oh=n("p"),sE=r("Electra model with a token classification head on top."),aE=l(),nh=n("p"),rE=r("Both the discriminator and generator may be loaded into this model."),iE=l(),Fr=n("p"),lE=r("This model inherits from "),Jl=n("a"),dE=r("TFPreTrainedModel"),cE=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hE=l(),$r=n("p"),pE=r("This model is also a "),xr=n("a"),mE=r("tf.keras.Model"),uE=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fE=l(),T(ts.$$.fragment),gE=l(),mt=n("div"),T(Mr.$$.fragment),_E=l(),Zo=n("p"),vE=r("The "),Kl=n("a"),kE=r("TFElectraForTokenClassification"),TE=r(" forward method, overrides the "),sh=n("code"),EE=r("__call__"),wE=r(" special method."),yE=l(),T(os.$$.fragment),bE=l(),ah=n("p"),FE=r("Example:"),$E=l(),T(zr.$$.fragment),Sm=l(),en=n("h2"),ns=n("a"),rh=n("span"),T(Pr.$$.fragment),xE=l(),ih=n("span"),ME=r("TFElectraForQuestionAnswering"),Wm=l(),Je=n("div"),T(Cr.$$.fragment),zE=l(),tn=n("p"),PE=r(`Electra Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),lh=n("code"),CE=r("span start logits"),qE=r(" and "),dh=n("code"),jE=r("span end logits"),AE=r(")."),IE=l(),qr=n("p"),LE=r("This model inherits from "),Gl=n("a"),NE=r("TFPreTrainedModel"),DE=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),OE=l(),jr=n("p"),SE=r("This model is also a "),Ar=n("a"),WE=r("tf.keras.Model"),BE=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),RE=l(),T(ss.$$.fragment),HE=l(),ut=n("div"),T(Ir.$$.fragment),QE=l(),on=n("p"),UE=r("The "),Xl=n("a"),VE=r("TFElectraForQuestionAnswering"),JE=r(" forward method, overrides the "),ch=n("code"),KE=r("__call__"),GE=r(" special method."),XE=l(),T(as.$$.fragment),YE=l(),hh=n("p"),ZE=r("Example:"),ew=l(),T(Lr.$$.fragment),Bm=l(),nn=n("h2"),rs=n("a"),ph=n("span"),T(Nr.$$.fragment),tw=l(),mh=n("span"),ow=r("FlaxElectraModel"),Rm=l(),Le=n("div"),T(Dr.$$.fragment),nw=l(),uh=n("p"),sw=r("The bare Electra Model transformer outputting raw hidden-states without any specific head on top."),aw=l(),Or=n("p"),rw=r("This model inherits from "),Yl=n("a"),iw=r("FlaxPreTrainedModel"),lw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),dw=l(),Sr=n("p"),cw=r("This model is also a Flax Linen "),Wr=n("a"),hw=r("flax.nn.Module"),pw=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),mw=l(),fh=n("p"),uw=r("Finally, this model supports inherent JAX features such as:"),fw=l(),Bt=n("ul"),gh=n("li"),Br=n("a"),gw=r("Just-In-Time (JIT) compilation"),_w=l(),_h=n("li"),Rr=n("a"),vw=r("Automatic Differentiation"),kw=l(),vh=n("li"),Hr=n("a"),Tw=r("Vectorization"),Ew=l(),kh=n("li"),Qr=n("a"),ww=r("Parallelization"),yw=l(),ft=n("div"),T(Ur.$$.fragment),bw=l(),sn=n("p"),Fw=r("The "),Th=n("code"),$w=r("FlaxElectraPreTrainedModel"),xw=r(" forward method, overrides the "),Eh=n("code"),Mw=r("__call__"),zw=r(" special method."),Pw=l(),T(is.$$.fragment),Cw=l(),wh=n("p"),qw=r("Example:"),jw=l(),T(Vr.$$.fragment),Hm=l(),an=n("h2"),ls=n("a"),yh=n("span"),T(Jr.$$.fragment),Aw=l(),bh=n("span"),Iw=r("FlaxElectraForPreTraining"),Qm=l(),Ce=n("div"),T(Kr.$$.fragment),Lw=l(),Fh=n("p"),Nw=r("Electra model with a binary classification head on top as used during pretraining for identifying generated tokens."),Dw=l(),$h=n("p"),Ow=r("It is recommended to load the discriminator checkpoint into that model."),Sw=l(),Gr=n("p"),Ww=r("This model inherits from "),Zl=n("a"),Bw=r("FlaxPreTrainedModel"),Rw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Hw=l(),Xr=n("p"),Qw=r("This model is also a Flax Linen "),Yr=n("a"),Uw=r("flax.nn.Module"),Vw=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Jw=l(),xh=n("p"),Kw=r("Finally, this model supports inherent JAX features such as:"),Gw=l(),Rt=n("ul"),Mh=n("li"),Zr=n("a"),Xw=r("Just-In-Time (JIT) compilation"),Yw=l(),zh=n("li"),ei=n("a"),Zw=r("Automatic Differentiation"),ey=l(),Ph=n("li"),ti=n("a"),ty=r("Vectorization"),oy=l(),Ch=n("li"),oi=n("a"),ny=r("Parallelization"),sy=l(),gt=n("div"),T(ni.$$.fragment),ay=l(),rn=n("p"),ry=r("The "),qh=n("code"),iy=r("FlaxElectraPreTrainedModel"),ly=r(" forward method, overrides the "),jh=n("code"),dy=r("__call__"),cy=r(" special method."),hy=l(),T(ds.$$.fragment),py=l(),Ah=n("p"),my=r("Example:"),uy=l(),T(si.$$.fragment),Um=l(),ln=n("h2"),cs=n("a"),Ih=n("span"),T(ai.$$.fragment),fy=l(),Lh=n("span"),gy=r("FlaxElectraForMaskedLM"),Vm=l(),Ne=n("div"),T(ri.$$.fragment),_y=l(),ii=n("p"),vy=r("Electra Model with a "),Nh=n("code"),ky=r("language modeling"),Ty=r(" head on top."),Ey=l(),li=n("p"),wy=r("This model inherits from "),ed=n("a"),yy=r("FlaxPreTrainedModel"),by=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Fy=l(),di=n("p"),$y=r("This model is also a Flax Linen "),ci=n("a"),xy=r("flax.nn.Module"),My=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),zy=l(),Dh=n("p"),Py=r("Finally, this model supports inherent JAX features such as:"),Cy=l(),Ht=n("ul"),Oh=n("li"),hi=n("a"),qy=r("Just-In-Time (JIT) compilation"),jy=l(),Sh=n("li"),pi=n("a"),Ay=r("Automatic Differentiation"),Iy=l(),Wh=n("li"),mi=n("a"),Ly=r("Vectorization"),Ny=l(),Bh=n("li"),ui=n("a"),Dy=r("Parallelization"),Oy=l(),_t=n("div"),T(fi.$$.fragment),Sy=l(),dn=n("p"),Wy=r("The "),Rh=n("code"),By=r("FlaxElectraPreTrainedModel"),Ry=r(" forward method, overrides the "),Hh=n("code"),Hy=r("__call__"),Qy=r(" special method."),Uy=l(),T(hs.$$.fragment),Vy=l(),Qh=n("p"),Jy=r("Example:"),Ky=l(),T(gi.$$.fragment),Jm=l(),cn=n("h2"),ps=n("a"),Uh=n("span"),T(_i.$$.fragment),Gy=l(),Vh=n("span"),Xy=r("FlaxElectraForSequenceClassification"),Km=l(),De=n("div"),T(vi.$$.fragment),Yy=l(),Jh=n("p"),Zy=r(`Electra Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),eb=l(),ki=n("p"),tb=r("This model inherits from "),td=n("a"),ob=r("FlaxPreTrainedModel"),nb=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),sb=l(),Ti=n("p"),ab=r("This model is also a Flax Linen "),Ei=n("a"),rb=r("flax.nn.Module"),ib=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),lb=l(),Kh=n("p"),db=r("Finally, this model supports inherent JAX features such as:"),cb=l(),Qt=n("ul"),Gh=n("li"),wi=n("a"),hb=r("Just-In-Time (JIT) compilation"),pb=l(),Xh=n("li"),yi=n("a"),mb=r("Automatic Differentiation"),ub=l(),Yh=n("li"),bi=n("a"),fb=r("Vectorization"),gb=l(),Zh=n("li"),Fi=n("a"),_b=r("Parallelization"),vb=l(),vt=n("div"),T($i.$$.fragment),kb=l(),hn=n("p"),Tb=r("The "),ep=n("code"),Eb=r("FlaxElectraPreTrainedModel"),wb=r(" forward method, overrides the "),tp=n("code"),yb=r("__call__"),bb=r(" special method."),Fb=l(),T(ms.$$.fragment),$b=l(),op=n("p"),xb=r("Example:"),Mb=l(),T(xi.$$.fragment),Gm=l(),pn=n("h2"),us=n("a"),np=n("span"),T(Mi.$$.fragment),zb=l(),sp=n("span"),Pb=r("FlaxElectraForMultipleChoice"),Xm=l(),Oe=n("div"),T(zi.$$.fragment),Cb=l(),ap=n("p"),qb=r(`ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),jb=l(),Pi=n("p"),Ab=r("This model inherits from "),od=n("a"),Ib=r("FlaxPreTrainedModel"),Lb=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Nb=l(),Ci=n("p"),Db=r("This model is also a Flax Linen "),qi=n("a"),Ob=r("flax.nn.Module"),Sb=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Wb=l(),rp=n("p"),Bb=r("Finally, this model supports inherent JAX features such as:"),Rb=l(),Ut=n("ul"),ip=n("li"),ji=n("a"),Hb=r("Just-In-Time (JIT) compilation"),Qb=l(),lp=n("li"),Ai=n("a"),Ub=r("Automatic Differentiation"),Vb=l(),dp=n("li"),Ii=n("a"),Jb=r("Vectorization"),Kb=l(),cp=n("li"),Li=n("a"),Gb=r("Parallelization"),Xb=l(),kt=n("div"),T(Ni.$$.fragment),Yb=l(),mn=n("p"),Zb=r("The "),hp=n("code"),e0=r("FlaxElectraPreTrainedModel"),t0=r(" forward method, overrides the "),pp=n("code"),o0=r("__call__"),n0=r(" special method."),s0=l(),T(fs.$$.fragment),a0=l(),mp=n("p"),r0=r("Example:"),i0=l(),T(Di.$$.fragment),Ym=l(),un=n("h2"),gs=n("a"),up=n("span"),T(Oi.$$.fragment),l0=l(),fp=n("span"),d0=r("FlaxElectraForTokenClassification"),Zm=l(),qe=n("div"),T(Si.$$.fragment),c0=l(),gp=n("p"),h0=r("Electra model with a token classification head on top."),p0=l(),_p=n("p"),m0=r("Both the discriminator and generator may be loaded into this model."),u0=l(),Wi=n("p"),f0=r("This model inherits from "),nd=n("a"),g0=r("FlaxPreTrainedModel"),_0=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),v0=l(),Bi=n("p"),k0=r("This model is also a Flax Linen "),Ri=n("a"),T0=r("flax.nn.Module"),E0=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),w0=l(),vp=n("p"),y0=r("Finally, this model supports inherent JAX features such as:"),b0=l(),Vt=n("ul"),kp=n("li"),Hi=n("a"),F0=r("Just-In-Time (JIT) compilation"),$0=l(),Tp=n("li"),Qi=n("a"),x0=r("Automatic Differentiation"),M0=l(),Ep=n("li"),Ui=n("a"),z0=r("Vectorization"),P0=l(),wp=n("li"),Vi=n("a"),C0=r("Parallelization"),q0=l(),Tt=n("div"),T(Ji.$$.fragment),j0=l(),fn=n("p"),A0=r("The "),yp=n("code"),I0=r("FlaxElectraPreTrainedModel"),L0=r(" forward method, overrides the "),bp=n("code"),N0=r("__call__"),D0=r(" special method."),O0=l(),T(_s.$$.fragment),S0=l(),Fp=n("p"),W0=r("Example:"),B0=l(),T(Ki.$$.fragment),eu=l(),gn=n("h2"),vs=n("a"),$p=n("span"),T(Gi.$$.fragment),R0=l(),xp=n("span"),H0=r("FlaxElectraForQuestionAnswering"),tu=l(),Se=n("div"),T(Xi.$$.fragment),Q0=l(),_n=n("p"),U0=r(`ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Mp=n("code"),V0=r("span start logits"),J0=r(" and "),zp=n("code"),K0=r("span end logits"),G0=r(")."),X0=l(),Yi=n("p"),Y0=r("This model inherits from "),sd=n("a"),Z0=r("FlaxPreTrainedModel"),e2=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),t2=l(),Zi=n("p"),o2=r("This model is also a Flax Linen "),el=n("a"),n2=r("flax.nn.Module"),s2=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),a2=l(),Pp=n("p"),r2=r("Finally, this model supports inherent JAX features such as:"),i2=l(),Jt=n("ul"),Cp=n("li"),tl=n("a"),l2=r("Just-In-Time (JIT) compilation"),d2=l(),qp=n("li"),ol=n("a"),c2=r("Automatic Differentiation"),h2=l(),jp=n("li"),nl=n("a"),p2=r("Vectorization"),m2=l(),Ap=n("li"),sl=n("a"),u2=r("Parallelization"),f2=l(),Et=n("div"),T(al.$$.fragment),g2=l(),vn=n("p"),_2=r("The "),Ip=n("code"),v2=r("FlaxElectraPreTrainedModel"),k2=r(" forward method, overrides the "),Lp=n("code"),T2=r("__call__"),E2=r(" special method."),w2=l(),T(ks.$$.fragment),y2=l(),Np=n("p"),b2=r("Example:"),F2=l(),T(rl.$$.fragment),this.h()},l(o){const f=JM('[data-svelte="svelte-1phssyn"]',document.head);h=s(f,"META",{name:!0,content:!0}),f.forEach(t),$=d(o),g=s(o,"H1",{class:!0});var il=a(g);v=s(il,"A",{id:!0,class:!0,href:!0});var Dp=a(v);k=s(Dp,"SPAN",{});var Op=a(k);E(_.$$.fragment,Op),Op.forEach(t),Dp.forEach(t),u=d(il),x=s(il,"SPAN",{});var Sp=a(x);he=i(Sp,"ELECTRA"),Sp.forEach(t),il.forEach(t),K=d(o),z=s(o,"H2",{class:!0});var ll=a(z);ee=s(ll,"A",{id:!0,class:!0,href:!0});var Wp=a(ee);D=s(Wp,"SPAN",{});var Bp=a(D);E(oe.$$.fragment,Bp),Bp.forEach(t),Wp.forEach(t),pe=d(ll),O=s(ll,"SPAN",{});var Rp=a(O);me=i(Rp,"Overview"),Rp.forEach(t),ll.forEach(t),le=d(o),J=s(o,"P",{});var dl=a(J);I=i(dl,"The ELECTRA model was proposed in the paper "),ne=s(dl,"A",{href:!0,rel:!0});var Hp=a(ne);X=i(Hp,`ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators`),Hp.forEach(t),P=i(dl,`. ELECTRA is a new pretraining approach which trains two transformer models: the generator and the discriminator. The generator\u2019s role is to replace tokens in a sequence, and is therefore trained as a masked language model. The discriminator, which is the model we\u2019re interested in, tries to identify which tokens were replaced by the generator in the sequence.`),dl.forEach(t),q=d(o),ae=s(o,"P",{});var Qp=a(ae);R=i(Qp,"The abstract from the paper is the following:"),Qp.forEach(t),de=d(o),re=s(o,"P",{});var Up=a(re);S=s(Up,"EM",{});var Vp=a(S);ue=i(Vp,`Masked language modeling (MLM) pretraining methods such as BERT corrupt the input by replacing some tokens with [MASK] and then train a model to reconstruct the original tokens. While they produce good results when transferred to downstream NLP tasks, they generally require large amounts of compute to be effective. As an alternative, we propose a more sample-efficient pretraining task called replaced token detection. Instead of masking the input, our approach corrupts it by replacing some tokens with plausible alternatives sampled from a small generator network. Then, instead of training a model that predicts the original identities of the corrupted tokens, we train a discriminative model that predicts whether each token in the corrupted input was replaced by a generator sample or not. Thorough experiments demonstrate this new pretraining task is more efficient than MLM because the task is defined over all input tokens rather than just the small subset that was masked out. As a result, the contextual representations learned by our approach substantially outperform the ones learned by BERT given the same model size, data, and compute. The gains are particularly strong for small models; for example, we train a model on one GPU for 4 days that outperforms GPT (trained using 30x more compute) on the GLUE natural language understanding benchmark. Our approach also works well at scale, where it performs comparably to RoBERTa and XLNet while using less than 1/4 of their compute and outperforms them when using the same amount of compute.`),Vp.forEach(t),Up.forEach(t),ce=d(o),C=s(o,"P",{});var Jp=a(C);fe=i(Jp,"Tips:"),Jp.forEach(t),B=d(o),te=s(o,"UL",{});var cl=a(te);ie=s(cl,"LI",{});var Kp=a(ie);H=i(Kp,`ELECTRA is the pretraining approach, therefore there is nearly no changes done to the underlying model: BERT. The only change is the separation of the embedding size and the hidden size: the embedding size is generally smaller, while the hidden size is larger. An additional projection layer (linear) is used to project the embeddings from their embedding size to the hidden size. In the case where the embedding size is the same as the hidden size, no projection layer is used.`),Kp.forEach(t),ge=d(cl),G=s(cl,"LI",{});var Kt=a(G);L=i(Kt,"The ELECTRA checkpoints saved using "),se=s(Kt,"A",{href:!0,rel:!0});var Gp=a(se);Q=i(Gp,"Google Research\u2019s implementation"),Gp.forEach(t),_e=i(Kt,` contain both the generator and discriminator. The conversion script requires the user to name which model to export into the correct architecture. Once converted to the HuggingFace format, these checkpoints may be loaded into all available ELECTRA models, however. This means that the discriminator may be loaded in the `),p=s(Kt,"A",{href:!0});var Xp=a(p);M=i(Xp,"ElectraForMaskedLM"),Xp.forEach(t),Y=i(Kt,` model, and the generator may be loaded in the `),we=s(Kt,"A",{href:!0});var Yp=a(we);$e=i(Yp,"ElectraForPreTraining"),Yp.forEach(t),N=i(Kt,` model (the classification head will be randomly initialized as it doesn\u2019t exist in the generator).`),Kt.forEach(t),cl.forEach(t),be=d(o),ve=s(o,"P",{});var kn=a(ve);xe=i(kn,"This model was contributed by "),A=s(kn,"A",{href:!0,rel:!0});var Zp=a(A);U=i(Zp,"lysandre"),Zp.forEach(t),Me=i(kn,". The original code can be found "),Te=s(kn,"A",{href:!0,rel:!0});var em=a(Te);V=i(em,"here"),em.forEach(t),ze=i(kn,"."),kn.forEach(t),Fe=d(o),Z=s(o,"H2",{class:!0});var hl=a(Z);ke=s(hl,"A",{id:!0,class:!0,href:!0});var tm=a(ke);Pd=s(tm,"SPAN",{});var om=a(Pd);E(Ps.$$.fragment,om),om.forEach(t),tm.forEach(t),bf=d(hl),Cd=s(hl,"SPAN",{});var nm=a(Cd);Ff=i(nm,"ElectraConfig"),nm.forEach(t),hl.forEach(t),rm=d(o),Xe=s(o,"DIV",{class:!0});var jt=a(Xe);E(Cs.$$.fragment,jt),$f=d(jt),Wt=s(jt,"P",{});var Ts=a(Wt);xf=i(Ts,"This is the configuration class to store the configuration of a "),pl=s(Ts,"A",{href:!0});var $2=a(pl);Mf=i($2,"ElectraModel"),$2.forEach(t),zf=i(Ts,` or a `),ml=s(Ts,"A",{href:!0});var x2=a(ml);Pf=i(x2,"TFElectraModel"),x2.forEach(t),Cf=i(Ts,`. It is used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA `),qs=s(Ts,"A",{href:!0,rel:!0});var M2=a(qs);qf=i(M2,"google/electra-small-discriminator"),M2.forEach(t),jf=i(Ts," architecture."),Ts.forEach(t),Af=d(jt),Eo=s(jt,"P",{});var ad=a(Eo);If=i(ad,"Configuration objects inherit from "),ul=s(ad,"A",{href:!0});var z2=a(ul);Lf=i(z2,"PretrainedConfig"),z2.forEach(t),Nf=i(ad,` and can be used to control the model outputs. Read the documentation from `),fl=s(ad,"A",{href:!0});var P2=a(fl);Df=i(P2,"PretrainedConfig"),P2.forEach(t),Of=i(ad," for more information."),ad.forEach(t),Sf=d(jt),qd=s(jt,"P",{});var C2=a(qd);Wf=i(C2,"Examples:"),C2.forEach(t),Bf=d(jt),E(js.$$.fragment,jt),jt.forEach(t),im=d(o),wo=s(o,"H2",{class:!0});var nu=a(wo);Tn=s(nu,"A",{id:!0,class:!0,href:!0});var q2=a(Tn);jd=s(q2,"SPAN",{});var j2=a(jd);E(As.$$.fragment,j2),j2.forEach(t),q2.forEach(t),Rf=d(nu),Ad=s(nu,"SPAN",{});var A2=a(Ad);Hf=i(A2,"ElectraTokenizer"),A2.forEach(t),nu.forEach(t),lm=d(o),Ct=s(o,"DIV",{class:!0});var Es=a(Ct);E(Is.$$.fragment,Es),Qf=d(Es),Id=s(Es,"P",{});var I2=a(Id);Uf=i(I2,"Construct an ELECTRA tokenizer."),I2.forEach(t),Vf=d(Es),En=s(Es,"P",{});var sm=a(En);gl=s(sm,"A",{href:!0});var L2=a(gl);Jf=i(L2,"ElectraTokenizer"),L2.forEach(t),Kf=i(sm," is identical to "),_l=s(sm,"A",{href:!0});var N2=a(_l);Gf=i(N2,"BertTokenizer"),N2.forEach(t),Xf=i(sm,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),sm.forEach(t),Yf=d(Es),Ls=s(Es,"P",{});var su=a(Ls);Zf=i(su,"Refer to superclass "),vl=s(su,"A",{href:!0});var D2=a(vl);eg=i(D2,"BertTokenizer"),D2.forEach(t),tg=i(su,` for usage examples and documentation concerning parameters.`),su.forEach(t),Es.forEach(t),dm=d(o),yo=s(o,"H2",{class:!0});var au=a(yo);wn=s(au,"A",{id:!0,class:!0,href:!0});var O2=a(wn);Ld=s(O2,"SPAN",{});var S2=a(Ld);E(Ns.$$.fragment,S2),S2.forEach(t),O2.forEach(t),og=d(au),Nd=s(au,"SPAN",{});var W2=a(Nd);ng=i(W2,"ElectraTokenizerFast"),W2.forEach(t),au.forEach(t),cm=d(o),qt=s(o,"DIV",{class:!0});var ws=a(qt);E(Ds.$$.fragment,ws),sg=d(ws),Os=s(ws,"P",{});var ru=a(Os);ag=i(ru,"Construct a \u201Cfast\u201D ELECTRA tokenizer (backed by HuggingFace\u2019s "),Dd=s(ru,"EM",{});var B2=a(Dd);rg=i(B2,"tokenizers"),B2.forEach(t),ig=i(ru," library)."),ru.forEach(t),lg=d(ws),yn=s(ws,"P",{});var am=a(yn);kl=s(am,"A",{href:!0});var R2=a(kl);dg=i(R2,"ElectraTokenizerFast"),R2.forEach(t),cg=i(am," is identical to "),Tl=s(am,"A",{href:!0});var H2=a(Tl);hg=i(H2,"BertTokenizerFast"),H2.forEach(t),pg=i(am,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),am.forEach(t),mg=d(ws),Ss=s(ws,"P",{});var iu=a(Ss);ug=i(iu,"Refer to superclass "),El=s(iu,"A",{href:!0});var Q2=a(El);fg=i(Q2,"BertTokenizerFast"),Q2.forEach(t),gg=i(iu,` for usage examples and documentation concerning parameters.`),iu.forEach(t),ws.forEach(t),hm=d(o),bo=s(o,"H2",{class:!0});var lu=a(bo);bn=s(lu,"A",{id:!0,class:!0,href:!0});var U2=a(bn);Od=s(U2,"SPAN",{});var V2=a(Od);E(Ws.$$.fragment,V2),V2.forEach(t),U2.forEach(t),_g=d(lu),Sd=s(lu,"SPAN",{});var J2=a(Sd);vg=i(J2,"Electra specific outputs"),J2.forEach(t),lu.forEach(t),pm=d(o),Fo=s(o,"DIV",{class:!0});var du=a(Fo);E(Bs.$$.fragment,du),kg=d(du),Rs=s(du,"P",{});var cu=a(Rs);Tg=i(cu,"Output type of "),wl=s(cu,"A",{href:!0});var K2=a(wl);Eg=i(K2,"ElectraForPreTraining"),K2.forEach(t),wg=i(cu,"."),cu.forEach(t),du.forEach(t),mm=d(o),$o=s(o,"DIV",{class:!0});var hu=a($o);E(Hs.$$.fragment,hu),yg=d(hu),Qs=s(hu,"P",{});var pu=a(Qs);bg=i(pu,"Output type of "),yl=s(pu,"A",{href:!0});var G2=a(yl);Fg=i(G2,"TFElectraForPreTraining"),G2.forEach(t),$g=i(pu,"."),pu.forEach(t),hu.forEach(t),um=d(o),xo=s(o,"H2",{class:!0});var mu=a(xo);Fn=s(mu,"A",{id:!0,class:!0,href:!0});var X2=a(Fn);Wd=s(X2,"SPAN",{});var Y2=a(Wd);E(Us.$$.fragment,Y2),Y2.forEach(t),X2.forEach(t),xg=d(mu),Bd=s(mu,"SPAN",{});var Z2=a(Bd);Mg=i(Z2,"ElectraModel"),Z2.forEach(t),mu.forEach(t),fm=d(o),Ye=s(o,"DIV",{class:!0});var Gt=a(Ye);E(Vs.$$.fragment,Gt),zg=d(Gt),Rd=s(Gt,"P",{});var eF=a(Rd);Pg=i(eF,"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different. Both the generator and discriminator checkpoints may be loaded into this model."),eF.forEach(t),Cg=d(Gt),Js=s(Gt,"P",{});var uu=a(Js);qg=i(uu,"This model inherits from "),bl=s(uu,"A",{href:!0});var tF=a(bl);jg=i(tF,"PreTrainedModel"),tF.forEach(t),Ag=i(uu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uu.forEach(t),Ig=d(Gt),Ks=s(Gt,"P",{});var fu=a(Ks);Lg=i(fu,"This model is also a PyTorch "),Gs=s(fu,"A",{href:!0,rel:!0});var oF=a(Gs);Ng=i(oF,"torch.nn.Module"),oF.forEach(t),Dg=i(fu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fu.forEach(t),Og=d(Gt),ot=s(Gt,"DIV",{class:!0});var Xt=a(ot);E(Xs.$$.fragment,Xt),Sg=d(Xt),Mo=s(Xt,"P",{});var rd=a(Mo);Wg=i(rd,"The "),Fl=s(rd,"A",{href:!0});var nF=a(Fl);Bg=i(nF,"ElectraModel"),nF.forEach(t),Rg=i(rd," forward method, overrides the "),Hd=s(rd,"CODE",{});var sF=a(Hd);Hg=i(sF,"__call__"),sF.forEach(t),Qg=i(rd," special method."),rd.forEach(t),Ug=d(Xt),E($n.$$.fragment,Xt),Vg=d(Xt),Qd=s(Xt,"P",{});var aF=a(Qd);Jg=i(aF,"Example:"),aF.forEach(t),Kg=d(Xt),E(Ys.$$.fragment,Xt),Xt.forEach(t),Gt.forEach(t),gm=d(o),zo=s(o,"H2",{class:!0});var gu=a(zo);xn=s(gu,"A",{id:!0,class:!0,href:!0});var rF=a(xn);Ud=s(rF,"SPAN",{});var iF=a(Ud);E(Zs.$$.fragment,iF),iF.forEach(t),rF.forEach(t),Gg=d(gu),Vd=s(gu,"SPAN",{});var lF=a(Vd);Xg=i(lF,"ElectraForPreTraining"),lF.forEach(t),gu.forEach(t),_m=d(o),Be=s(o,"DIV",{class:!0});var At=a(Be);E(ea.$$.fragment,At),Yg=d(At),Jd=s(At,"P",{});var dF=a(Jd);Zg=i(dF,"Electra model with a binary classification head on top as used during pretraining for identifying generated tokens."),dF.forEach(t),e_=d(At),Kd=s(At,"P",{});var cF=a(Kd);t_=i(cF,"It is recommended to load the discriminator checkpoint into that model."),cF.forEach(t),o_=d(At),ta=s(At,"P",{});var _u=a(ta);n_=i(_u,"This model inherits from "),$l=s(_u,"A",{href:!0});var hF=a($l);s_=i(hF,"PreTrainedModel"),hF.forEach(t),a_=i(_u,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_u.forEach(t),r_=d(At),oa=s(At,"P",{});var vu=a(oa);i_=i(vu,"This model is also a PyTorch "),na=s(vu,"A",{href:!0,rel:!0});var pF=a(na);l_=i(pF,"torch.nn.Module"),pF.forEach(t),d_=i(vu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vu.forEach(t),c_=d(At),nt=s(At,"DIV",{class:!0});var Yt=a(nt);E(sa.$$.fragment,Yt),h_=d(Yt),Po=s(Yt,"P",{});var id=a(Po);p_=i(id,"The "),xl=s(id,"A",{href:!0});var mF=a(xl);m_=i(mF,"ElectraForPreTraining"),mF.forEach(t),u_=i(id," forward method, overrides the "),Gd=s(id,"CODE",{});var uF=a(Gd);f_=i(uF,"__call__"),uF.forEach(t),g_=i(id," special method."),id.forEach(t),__=d(Yt),E(Mn.$$.fragment,Yt),v_=d(Yt),Xd=s(Yt,"P",{});var fF=a(Xd);k_=i(fF,"Examples:"),fF.forEach(t),T_=d(Yt),E(aa.$$.fragment,Yt),Yt.forEach(t),At.forEach(t),vm=d(o),Co=s(o,"H2",{class:!0});var ku=a(Co);zn=s(ku,"A",{id:!0,class:!0,href:!0});var gF=a(zn);Yd=s(gF,"SPAN",{});var _F=a(Yd);E(ra.$$.fragment,_F),_F.forEach(t),gF.forEach(t),E_=d(ku),Zd=s(ku,"SPAN",{});var vF=a(Zd);w_=i(vF,"ElectraForMaskedLM"),vF.forEach(t),ku.forEach(t),km=d(o),Re=s(o,"DIV",{class:!0});var It=a(Re);E(ia.$$.fragment,It),y_=d(It),ec=s(It,"P",{});var kF=a(ec);b_=i(kF,"Electra model with a language modeling head on top."),kF.forEach(t),F_=d(It),tc=s(It,"P",{});var TF=a(tc);$_=i(TF,`Even though both the discriminator and generator may be loaded into this model, the generator is the only model of the two to have been trained for the masked language modeling task.`),TF.forEach(t),x_=d(It),la=s(It,"P",{});var Tu=a(la);M_=i(Tu,"This model inherits from "),Ml=s(Tu,"A",{href:!0});var EF=a(Ml);z_=i(EF,"PreTrainedModel"),EF.forEach(t),P_=i(Tu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tu.forEach(t),C_=d(It),da=s(It,"P",{});var Eu=a(da);q_=i(Eu,"This model is also a PyTorch "),ca=s(Eu,"A",{href:!0,rel:!0});var wF=a(ca);j_=i(wF,"torch.nn.Module"),wF.forEach(t),A_=i(Eu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Eu.forEach(t),I_=d(It),st=s(It,"DIV",{class:!0});var Zt=a(st);E(ha.$$.fragment,Zt),L_=d(Zt),qo=s(Zt,"P",{});var ld=a(qo);N_=i(ld,"The "),zl=s(ld,"A",{href:!0});var yF=a(zl);D_=i(yF,"ElectraForMaskedLM"),yF.forEach(t),O_=i(ld," forward method, overrides the "),oc=s(ld,"CODE",{});var bF=a(oc);S_=i(bF,"__call__"),bF.forEach(t),W_=i(ld," special method."),ld.forEach(t),B_=d(Zt),E(Pn.$$.fragment,Zt),R_=d(Zt),nc=s(Zt,"P",{});var FF=a(nc);H_=i(FF,"Example:"),FF.forEach(t),Q_=d(Zt),E(pa.$$.fragment,Zt),Zt.forEach(t),It.forEach(t),Tm=d(o),jo=s(o,"H2",{class:!0});var wu=a(jo);Cn=s(wu,"A",{id:!0,class:!0,href:!0});var $F=a(Cn);sc=s($F,"SPAN",{});var xF=a(sc);E(ma.$$.fragment,xF),xF.forEach(t),$F.forEach(t),U_=d(wu),ac=s(wu,"SPAN",{});var MF=a(ac);V_=i(MF,"ElectraForSequenceClassification"),MF.forEach(t),wu.forEach(t),Em=d(o),Ze=s(o,"DIV",{class:!0});var eo=a(Ze);E(ua.$$.fragment,eo),J_=d(eo),rc=s(eo,"P",{});var zF=a(rc);K_=i(zF,`ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),zF.forEach(t),G_=d(eo),fa=s(eo,"P",{});var yu=a(fa);X_=i(yu,"This model inherits from "),Pl=s(yu,"A",{href:!0});var PF=a(Pl);Y_=i(PF,"PreTrainedModel"),PF.forEach(t),Z_=i(yu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yu.forEach(t),ev=d(eo),ga=s(eo,"P",{});var bu=a(ga);tv=i(bu,"This model is also a PyTorch "),_a=s(bu,"A",{href:!0,rel:!0});var CF=a(_a);ov=i(CF,"torch.nn.Module"),CF.forEach(t),nv=i(bu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bu.forEach(t),sv=d(eo),We=s(eo,"DIV",{class:!0});var wt=a(We);E(va.$$.fragment,wt),av=d(wt),Ao=s(wt,"P",{});var dd=a(Ao);rv=i(dd,"The "),Cl=s(dd,"A",{href:!0});var qF=a(Cl);iv=i(qF,"ElectraForSequenceClassification"),qF.forEach(t),lv=i(dd," forward method, overrides the "),ic=s(dd,"CODE",{});var jF=a(ic);dv=i(jF,"__call__"),jF.forEach(t),cv=i(dd," special method."),dd.forEach(t),hv=d(wt),E(qn.$$.fragment,wt),pv=d(wt),lc=s(wt,"P",{});var AF=a(lc);mv=i(AF,"Example of single-label classification:"),AF.forEach(t),uv=d(wt),E(ka.$$.fragment,wt),fv=d(wt),dc=s(wt,"P",{});var IF=a(dc);gv=i(IF,"Example of multi-label classification:"),IF.forEach(t),_v=d(wt),E(Ta.$$.fragment,wt),wt.forEach(t),eo.forEach(t),wm=d(o),Io=s(o,"H2",{class:!0});var Fu=a(Io);jn=s(Fu,"A",{id:!0,class:!0,href:!0});var LF=a(jn);cc=s(LF,"SPAN",{});var NF=a(cc);E(Ea.$$.fragment,NF),NF.forEach(t),LF.forEach(t),vv=d(Fu),hc=s(Fu,"SPAN",{});var DF=a(hc);kv=i(DF,"ElectraForMultipleChoice"),DF.forEach(t),Fu.forEach(t),ym=d(o),et=s(o,"DIV",{class:!0});var to=a(et);E(wa.$$.fragment,to),Tv=d(to),pc=s(to,"P",{});var OF=a(pc);Ev=i(OF,`ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),OF.forEach(t),wv=d(to),ya=s(to,"P",{});var $u=a(ya);yv=i($u,"This model inherits from "),ql=s($u,"A",{href:!0});var SF=a(ql);bv=i(SF,"PreTrainedModel"),SF.forEach(t),Fv=i($u,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$u.forEach(t),$v=d(to),ba=s(to,"P",{});var xu=a(ba);xv=i(xu,"This model is also a PyTorch "),Fa=s(xu,"A",{href:!0,rel:!0});var WF=a(Fa);Mv=i(WF,"torch.nn.Module"),WF.forEach(t),zv=i(xu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xu.forEach(t),Pv=d(to),at=s(to,"DIV",{class:!0});var oo=a(at);E($a.$$.fragment,oo),Cv=d(oo),Lo=s(oo,"P",{});var cd=a(Lo);qv=i(cd,"The "),jl=s(cd,"A",{href:!0});var BF=a(jl);jv=i(BF,"ElectraForMultipleChoice"),BF.forEach(t),Av=i(cd," forward method, overrides the "),mc=s(cd,"CODE",{});var RF=a(mc);Iv=i(RF,"__call__"),RF.forEach(t),Lv=i(cd," special method."),cd.forEach(t),Nv=d(oo),E(An.$$.fragment,oo),Dv=d(oo),uc=s(oo,"P",{});var HF=a(uc);Ov=i(HF,"Example:"),HF.forEach(t),Sv=d(oo),E(xa.$$.fragment,oo),oo.forEach(t),to.forEach(t),bm=d(o),No=s(o,"H2",{class:!0});var Mu=a(No);In=s(Mu,"A",{id:!0,class:!0,href:!0});var QF=a(In);fc=s(QF,"SPAN",{});var UF=a(fc);E(Ma.$$.fragment,UF),UF.forEach(t),QF.forEach(t),Wv=d(Mu),gc=s(Mu,"SPAN",{});var VF=a(gc);Bv=i(VF,"ElectraForTokenClassification"),VF.forEach(t),Mu.forEach(t),Fm=d(o),He=s(o,"DIV",{class:!0});var Lt=a(He);E(za.$$.fragment,Lt),Rv=d(Lt),_c=s(Lt,"P",{});var JF=a(_c);Hv=i(JF,"Electra model with a token classification head on top."),JF.forEach(t),Qv=d(Lt),vc=s(Lt,"P",{});var KF=a(vc);Uv=i(KF,"Both the discriminator and generator may be loaded into this model."),KF.forEach(t),Vv=d(Lt),Pa=s(Lt,"P",{});var zu=a(Pa);Jv=i(zu,"This model inherits from "),Al=s(zu,"A",{href:!0});var GF=a(Al);Kv=i(GF,"PreTrainedModel"),GF.forEach(t),Gv=i(zu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zu.forEach(t),Xv=d(Lt),Ca=s(Lt,"P",{});var Pu=a(Ca);Yv=i(Pu,"This model is also a PyTorch "),qa=s(Pu,"A",{href:!0,rel:!0});var XF=a(qa);Zv=i(XF,"torch.nn.Module"),XF.forEach(t),ek=i(Pu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pu.forEach(t),tk=d(Lt),rt=s(Lt,"DIV",{class:!0});var no=a(rt);E(ja.$$.fragment,no),ok=d(no),Do=s(no,"P",{});var hd=a(Do);nk=i(hd,"The "),Il=s(hd,"A",{href:!0});var YF=a(Il);sk=i(YF,"ElectraForTokenClassification"),YF.forEach(t),ak=i(hd," forward method, overrides the "),kc=s(hd,"CODE",{});var ZF=a(kc);rk=i(ZF,"__call__"),ZF.forEach(t),ik=i(hd," special method."),hd.forEach(t),lk=d(no),E(Ln.$$.fragment,no),dk=d(no),Tc=s(no,"P",{});var e$=a(Tc);ck=i(e$,"Example:"),e$.forEach(t),hk=d(no),E(Aa.$$.fragment,no),no.forEach(t),Lt.forEach(t),$m=d(o),Oo=s(o,"H2",{class:!0});var Cu=a(Oo);Nn=s(Cu,"A",{id:!0,class:!0,href:!0});var t$=a(Nn);Ec=s(t$,"SPAN",{});var o$=a(Ec);E(Ia.$$.fragment,o$),o$.forEach(t),t$.forEach(t),pk=d(Cu),wc=s(Cu,"SPAN",{});var n$=a(wc);mk=i(n$,"ElectraForQuestionAnswering"),n$.forEach(t),Cu.forEach(t),xm=d(o),tt=s(o,"DIV",{class:!0});var so=a(tt);E(La.$$.fragment,so),uk=d(so),So=s(so,"P",{});var pd=a(So);fk=i(pd,`ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),yc=s(pd,"CODE",{});var s$=a(yc);gk=i(s$,"span start logits"),s$.forEach(t),_k=i(pd," and "),bc=s(pd,"CODE",{});var a$=a(bc);vk=i(a$,"span end logits"),a$.forEach(t),kk=i(pd,")."),pd.forEach(t),Tk=d(so),Na=s(so,"P",{});var qu=a(Na);Ek=i(qu,"This model inherits from "),Ll=s(qu,"A",{href:!0});var r$=a(Ll);wk=i(r$,"PreTrainedModel"),r$.forEach(t),yk=i(qu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qu.forEach(t),bk=d(so),Da=s(so,"P",{});var ju=a(Da);Fk=i(ju,"This model is also a PyTorch "),Oa=s(ju,"A",{href:!0,rel:!0});var i$=a(Oa);$k=i(i$,"torch.nn.Module"),i$.forEach(t),xk=i(ju,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ju.forEach(t),Mk=d(so),it=s(so,"DIV",{class:!0});var ao=a(it);E(Sa.$$.fragment,ao),zk=d(ao),Wo=s(ao,"P",{});var md=a(Wo);Pk=i(md,"The "),Nl=s(md,"A",{href:!0});var l$=a(Nl);Ck=i(l$,"ElectraForQuestionAnswering"),l$.forEach(t),qk=i(md," forward method, overrides the "),Fc=s(md,"CODE",{});var d$=a(Fc);jk=i(d$,"__call__"),d$.forEach(t),Ak=i(md," special method."),md.forEach(t),Ik=d(ao),E(Dn.$$.fragment,ao),Lk=d(ao),$c=s(ao,"P",{});var c$=a($c);Nk=i(c$,"Example:"),c$.forEach(t),Dk=d(ao),E(Wa.$$.fragment,ao),ao.forEach(t),so.forEach(t),Mm=d(o),Bo=s(o,"H2",{class:!0});var Au=a(Bo);On=s(Au,"A",{id:!0,class:!0,href:!0});var h$=a(On);xc=s(h$,"SPAN",{});var p$=a(xc);E(Ba.$$.fragment,p$),p$.forEach(t),h$.forEach(t),Ok=d(Au),Mc=s(Au,"SPAN",{});var m$=a(Mc);Sk=i(m$,"TFElectraModel"),m$.forEach(t),Au.forEach(t),zm=d(o),Qe=s(o,"DIV",{class:!0});var Nt=a(Qe);E(Ra.$$.fragment,Nt),Wk=d(Nt),zc=s(Nt,"P",{});var u$=a(zc);Bk=i(u$,"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different. Both the generator and discriminator checkpoints may be loaded into this model."),u$.forEach(t),Rk=d(Nt),Ha=s(Nt,"P",{});var Iu=a(Ha);Hk=i(Iu,"This model inherits from "),Dl=s(Iu,"A",{href:!0});var f$=a(Dl);Qk=i(f$,"TFPreTrainedModel"),f$.forEach(t),Uk=i(Iu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Iu.forEach(t),Vk=d(Nt),Qa=s(Nt,"P",{});var Lu=a(Qa);Jk=i(Lu,"This model is also a "),Ua=s(Lu,"A",{href:!0,rel:!0});var g$=a(Ua);Kk=i(g$,"tf.keras.Model"),g$.forEach(t),Gk=i(Lu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Lu.forEach(t),Xk=d(Nt),E(Sn.$$.fragment,Nt),Yk=d(Nt),lt=s(Nt,"DIV",{class:!0});var ro=a(lt);E(Va.$$.fragment,ro),Zk=d(ro),Ro=s(ro,"P",{});var ud=a(Ro);eT=i(ud,"The "),Ol=s(ud,"A",{href:!0});var _$=a(Ol);tT=i(_$,"TFElectraModel"),_$.forEach(t),oT=i(ud," forward method, overrides the "),Pc=s(ud,"CODE",{});var v$=a(Pc);nT=i(v$,"__call__"),v$.forEach(t),sT=i(ud," special method."),ud.forEach(t),aT=d(ro),E(Wn.$$.fragment,ro),rT=d(ro),Cc=s(ro,"P",{});var k$=a(Cc);iT=i(k$,"Example:"),k$.forEach(t),lT=d(ro),E(Ja.$$.fragment,ro),ro.forEach(t),Nt.forEach(t),Pm=d(o),Ho=s(o,"H2",{class:!0});var Nu=a(Ho);Bn=s(Nu,"A",{id:!0,class:!0,href:!0});var T$=a(Bn);qc=s(T$,"SPAN",{});var E$=a(qc);E(Ka.$$.fragment,E$),E$.forEach(t),T$.forEach(t),dT=d(Nu),jc=s(Nu,"SPAN",{});var w$=a(jc);cT=i(w$,"TFElectraForPreTraining"),w$.forEach(t),Nu.forEach(t),Cm=d(o),je=s(o,"DIV",{class:!0});var yt=a(je);E(Ga.$$.fragment,yt),hT=d(yt),Ac=s(yt,"P",{});var y$=a(Ac);pT=i(y$,"Electra model with a binary classification head on top as used during pretraining for identifying generated tokens."),y$.forEach(t),mT=d(yt),Ic=s(yt,"P",{});var b$=a(Ic);uT=i(b$,`Even though both the discriminator and generator may be loaded into this model, the discriminator is the only model of the two to have the correct classification head to be used for this model.`),b$.forEach(t),fT=d(yt),Xa=s(yt,"P",{});var Du=a(Xa);gT=i(Du,"This model inherits from "),Sl=s(Du,"A",{href:!0});var F$=a(Sl);_T=i(F$,"TFPreTrainedModel"),F$.forEach(t),vT=i(Du,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Du.forEach(t),kT=d(yt),Ya=s(yt,"P",{});var Ou=a(Ya);TT=i(Ou,"This model is also a "),Za=s(Ou,"A",{href:!0,rel:!0});var $$=a(Za);ET=i($$,"tf.keras.Model"),$$.forEach(t),wT=i(Ou,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ou.forEach(t),yT=d(yt),E(Rn.$$.fragment,yt),bT=d(yt),dt=s(yt,"DIV",{class:!0});var io=a(dt);E(er.$$.fragment,io),FT=d(io),Qo=s(io,"P",{});var fd=a(Qo);$T=i(fd,"The "),Wl=s(fd,"A",{href:!0});var x$=a(Wl);xT=i(x$,"TFElectraForPreTraining"),x$.forEach(t),MT=i(fd," forward method, overrides the "),Lc=s(fd,"CODE",{});var M$=a(Lc);zT=i(M$,"__call__"),M$.forEach(t),PT=i(fd," special method."),fd.forEach(t),CT=d(io),E(Hn.$$.fragment,io),qT=d(io),Nc=s(io,"P",{});var z$=a(Nc);jT=i(z$,"Examples:"),z$.forEach(t),AT=d(io),E(tr.$$.fragment,io),io.forEach(t),yt.forEach(t),qm=d(o),Uo=s(o,"H2",{class:!0});var Su=a(Uo);Qn=s(Su,"A",{id:!0,class:!0,href:!0});var P$=a(Qn);Dc=s(P$,"SPAN",{});var C$=a(Dc);E(or.$$.fragment,C$),C$.forEach(t),P$.forEach(t),IT=d(Su),Oc=s(Su,"SPAN",{});var q$=a(Oc);LT=i(q$,"TFElectraForMaskedLM"),q$.forEach(t),Su.forEach(t),jm=d(o),Ae=s(o,"DIV",{class:!0});var bt=a(Ae);E(nr.$$.fragment,bt),NT=d(bt),Sc=s(bt,"P",{});var j$=a(Sc);DT=i(j$,"Electra model with a language modeling head on top."),j$.forEach(t),OT=d(bt),Wc=s(bt,"P",{});var A$=a(Wc);ST=i(A$,`Even though both the discriminator and generator may be loaded into this model, the generator is the only model of the two to have been trained for the masked language modeling task.`),A$.forEach(t),WT=d(bt),sr=s(bt,"P",{});var Wu=a(sr);BT=i(Wu,"This model inherits from "),Bl=s(Wu,"A",{href:!0});var I$=a(Bl);RT=i(I$,"TFPreTrainedModel"),I$.forEach(t),HT=i(Wu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wu.forEach(t),QT=d(bt),ar=s(bt,"P",{});var Bu=a(ar);UT=i(Bu,"This model is also a "),rr=s(Bu,"A",{href:!0,rel:!0});var L$=a(rr);VT=i(L$,"tf.keras.Model"),L$.forEach(t),JT=i(Bu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Bu.forEach(t),KT=d(bt),E(Un.$$.fragment,bt),GT=d(bt),ct=s(bt,"DIV",{class:!0});var lo=a(ct);E(ir.$$.fragment,lo),XT=d(lo),Vo=s(lo,"P",{});var gd=a(Vo);YT=i(gd,"The "),Rl=s(gd,"A",{href:!0});var N$=a(Rl);ZT=i(N$,"TFElectraForMaskedLM"),N$.forEach(t),e1=i(gd," forward method, overrides the "),Bc=s(gd,"CODE",{});var D$=a(Bc);t1=i(D$,"__call__"),D$.forEach(t),o1=i(gd," special method."),gd.forEach(t),n1=d(lo),E(Vn.$$.fragment,lo),s1=d(lo),Rc=s(lo,"P",{});var O$=a(Rc);a1=i(O$,"Example:"),O$.forEach(t),r1=d(lo),E(lr.$$.fragment,lo),lo.forEach(t),bt.forEach(t),Am=d(o),Jo=s(o,"H2",{class:!0});var Ru=a(Jo);Jn=s(Ru,"A",{id:!0,class:!0,href:!0});var S$=a(Jn);Hc=s(S$,"SPAN",{});var W$=a(Hc);E(dr.$$.fragment,W$),W$.forEach(t),S$.forEach(t),i1=d(Ru),Qc=s(Ru,"SPAN",{});var B$=a(Qc);l1=i(B$,"TFElectraForSequenceClassification"),B$.forEach(t),Ru.forEach(t),Im=d(o),Ue=s(o,"DIV",{class:!0});var Dt=a(Ue);E(cr.$$.fragment,Dt),d1=d(Dt),Uc=s(Dt,"P",{});var R$=a(Uc);c1=i(R$,`ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),R$.forEach(t),h1=d(Dt),hr=s(Dt,"P",{});var Hu=a(hr);p1=i(Hu,"This model inherits from "),Hl=s(Hu,"A",{href:!0});var H$=a(Hl);m1=i(H$,"TFPreTrainedModel"),H$.forEach(t),u1=i(Hu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hu.forEach(t),f1=d(Dt),pr=s(Dt,"P",{});var Qu=a(pr);g1=i(Qu,"This model is also a "),mr=s(Qu,"A",{href:!0,rel:!0});var Q$=a(mr);_1=i(Q$,"tf.keras.Model"),Q$.forEach(t),v1=i(Qu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Qu.forEach(t),k1=d(Dt),E(Kn.$$.fragment,Dt),T1=d(Dt),ht=s(Dt,"DIV",{class:!0});var co=a(ht);E(ur.$$.fragment,co),E1=d(co),Ko=s(co,"P",{});var _d=a(Ko);w1=i(_d,"The "),Ql=s(_d,"A",{href:!0});var U$=a(Ql);y1=i(U$,"TFElectraForSequenceClassification"),U$.forEach(t),b1=i(_d," forward method, overrides the "),Vc=s(_d,"CODE",{});var V$=a(Vc);F1=i(V$,"__call__"),V$.forEach(t),$1=i(_d," special method."),_d.forEach(t),x1=d(co),E(Gn.$$.fragment,co),M1=d(co),Jc=s(co,"P",{});var J$=a(Jc);z1=i(J$,"Example:"),J$.forEach(t),P1=d(co),E(fr.$$.fragment,co),co.forEach(t),Dt.forEach(t),Lm=d(o),Go=s(o,"H2",{class:!0});var Uu=a(Go);Xn=s(Uu,"A",{id:!0,class:!0,href:!0});var K$=a(Xn);Kc=s(K$,"SPAN",{});var G$=a(Kc);E(gr.$$.fragment,G$),G$.forEach(t),K$.forEach(t),C1=d(Uu),Gc=s(Uu,"SPAN",{});var X$=a(Gc);q1=i(X$,"TFElectraForMultipleChoice"),X$.forEach(t),Uu.forEach(t),Nm=d(o),Ve=s(o,"DIV",{class:!0});var Ot=a(Ve);E(_r.$$.fragment,Ot),j1=d(Ot),Xc=s(Ot,"P",{});var Y$=a(Xc);A1=i(Y$,`ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Y$.forEach(t),I1=d(Ot),vr=s(Ot,"P",{});var Vu=a(vr);L1=i(Vu,"This model inherits from "),Ul=s(Vu,"A",{href:!0});var Z$=a(Ul);N1=i(Z$,"TFPreTrainedModel"),Z$.forEach(t),D1=i(Vu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Vu.forEach(t),O1=d(Ot),kr=s(Ot,"P",{});var Ju=a(kr);S1=i(Ju,"This model is also a "),Tr=s(Ju,"A",{href:!0,rel:!0});var e4=a(Tr);W1=i(e4,"tf.keras.Model"),e4.forEach(t),B1=i(Ju,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ju.forEach(t),R1=d(Ot),E(Yn.$$.fragment,Ot),H1=d(Ot),pt=s(Ot,"DIV",{class:!0});var ho=a(pt);E(Er.$$.fragment,ho),Q1=d(ho),Xo=s(ho,"P",{});var vd=a(Xo);U1=i(vd,"The "),Vl=s(vd,"A",{href:!0});var t4=a(Vl);V1=i(t4,"TFElectraForMultipleChoice"),t4.forEach(t),J1=i(vd," forward method, overrides the "),Yc=s(vd,"CODE",{});var o4=a(Yc);K1=i(o4,"__call__"),o4.forEach(t),G1=i(vd," special method."),vd.forEach(t),X1=d(ho),E(Zn.$$.fragment,ho),Y1=d(ho),Zc=s(ho,"P",{});var n4=a(Zc);Z1=i(n4,"Example:"),n4.forEach(t),eE=d(ho),E(wr.$$.fragment,ho),ho.forEach(t),Ot.forEach(t),Dm=d(o),Yo=s(o,"H2",{class:!0});var Ku=a(Yo);es=s(Ku,"A",{id:!0,class:!0,href:!0});var s4=a(es);eh=s(s4,"SPAN",{});var a4=a(eh);E(yr.$$.fragment,a4),a4.forEach(t),s4.forEach(t),tE=d(Ku),th=s(Ku,"SPAN",{});var r4=a(th);oE=i(r4,"TFElectraForTokenClassification"),r4.forEach(t),Ku.forEach(t),Om=d(o),Ie=s(o,"DIV",{class:!0});var Ft=a(Ie);E(br.$$.fragment,Ft),nE=d(Ft),oh=s(Ft,"P",{});var i4=a(oh);sE=i(i4,"Electra model with a token classification head on top."),i4.forEach(t),aE=d(Ft),nh=s(Ft,"P",{});var l4=a(nh);rE=i(l4,"Both the discriminator and generator may be loaded into this model."),l4.forEach(t),iE=d(Ft),Fr=s(Ft,"P",{});var Gu=a(Fr);lE=i(Gu,"This model inherits from "),Jl=s(Gu,"A",{href:!0});var d4=a(Jl);dE=i(d4,"TFPreTrainedModel"),d4.forEach(t),cE=i(Gu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gu.forEach(t),hE=d(Ft),$r=s(Ft,"P",{});var Xu=a($r);pE=i(Xu,"This model is also a "),xr=s(Xu,"A",{href:!0,rel:!0});var c4=a(xr);mE=i(c4,"tf.keras.Model"),c4.forEach(t),uE=i(Xu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Xu.forEach(t),fE=d(Ft),E(ts.$$.fragment,Ft),gE=d(Ft),mt=s(Ft,"DIV",{class:!0});var po=a(mt);E(Mr.$$.fragment,po),_E=d(po),Zo=s(po,"P",{});var kd=a(Zo);vE=i(kd,"The "),Kl=s(kd,"A",{href:!0});var h4=a(Kl);kE=i(h4,"TFElectraForTokenClassification"),h4.forEach(t),TE=i(kd," forward method, overrides the "),sh=s(kd,"CODE",{});var p4=a(sh);EE=i(p4,"__call__"),p4.forEach(t),wE=i(kd," special method."),kd.forEach(t),yE=d(po),E(os.$$.fragment,po),bE=d(po),ah=s(po,"P",{});var m4=a(ah);FE=i(m4,"Example:"),m4.forEach(t),$E=d(po),E(zr.$$.fragment,po),po.forEach(t),Ft.forEach(t),Sm=d(o),en=s(o,"H2",{class:!0});var Yu=a(en);ns=s(Yu,"A",{id:!0,class:!0,href:!0});var u4=a(ns);rh=s(u4,"SPAN",{});var f4=a(rh);E(Pr.$$.fragment,f4),f4.forEach(t),u4.forEach(t),xE=d(Yu),ih=s(Yu,"SPAN",{});var g4=a(ih);ME=i(g4,"TFElectraForQuestionAnswering"),g4.forEach(t),Yu.forEach(t),Wm=d(o),Je=s(o,"DIV",{class:!0});var St=a(Je);E(Cr.$$.fragment,St),zE=d(St),tn=s(St,"P",{});var Td=a(tn);PE=i(Td,`Electra Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),lh=s(Td,"CODE",{});var _4=a(lh);CE=i(_4,"span start logits"),_4.forEach(t),qE=i(Td," and "),dh=s(Td,"CODE",{});var v4=a(dh);jE=i(v4,"span end logits"),v4.forEach(t),AE=i(Td,")."),Td.forEach(t),IE=d(St),qr=s(St,"P",{});var Zu=a(qr);LE=i(Zu,"This model inherits from "),Gl=s(Zu,"A",{href:!0});var k4=a(Gl);NE=i(k4,"TFPreTrainedModel"),k4.forEach(t),DE=i(Zu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zu.forEach(t),OE=d(St),jr=s(St,"P",{});var ef=a(jr);SE=i(ef,"This model is also a "),Ar=s(ef,"A",{href:!0,rel:!0});var T4=a(Ar);WE=i(T4,"tf.keras.Model"),T4.forEach(t),BE=i(ef,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ef.forEach(t),RE=d(St),E(ss.$$.fragment,St),HE=d(St),ut=s(St,"DIV",{class:!0});var mo=a(ut);E(Ir.$$.fragment,mo),QE=d(mo),on=s(mo,"P",{});var Ed=a(on);UE=i(Ed,"The "),Xl=s(Ed,"A",{href:!0});var E4=a(Xl);VE=i(E4,"TFElectraForQuestionAnswering"),E4.forEach(t),JE=i(Ed," forward method, overrides the "),ch=s(Ed,"CODE",{});var w4=a(ch);KE=i(w4,"__call__"),w4.forEach(t),GE=i(Ed," special method."),Ed.forEach(t),XE=d(mo),E(as.$$.fragment,mo),YE=d(mo),hh=s(mo,"P",{});var y4=a(hh);ZE=i(y4,"Example:"),y4.forEach(t),ew=d(mo),E(Lr.$$.fragment,mo),mo.forEach(t),St.forEach(t),Bm=d(o),nn=s(o,"H2",{class:!0});var tf=a(nn);rs=s(tf,"A",{id:!0,class:!0,href:!0});var b4=a(rs);ph=s(b4,"SPAN",{});var F4=a(ph);E(Nr.$$.fragment,F4),F4.forEach(t),b4.forEach(t),tw=d(tf),mh=s(tf,"SPAN",{});var $4=a(mh);ow=i($4,"FlaxElectraModel"),$4.forEach(t),tf.forEach(t),Rm=d(o),Le=s(o,"DIV",{class:!0});var $t=a(Le);E(Dr.$$.fragment,$t),nw=d($t),uh=s($t,"P",{});var x4=a(uh);sw=i(x4,"The bare Electra Model transformer outputting raw hidden-states without any specific head on top."),x4.forEach(t),aw=d($t),Or=s($t,"P",{});var of=a(Or);rw=i(of,"This model inherits from "),Yl=s(of,"A",{href:!0});var M4=a(Yl);iw=i(M4,"FlaxPreTrainedModel"),M4.forEach(t),lw=i(of,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),of.forEach(t),dw=d($t),Sr=s($t,"P",{});var nf=a(Sr);cw=i(nf,"This model is also a Flax Linen "),Wr=s(nf,"A",{href:!0,rel:!0});var z4=a(Wr);hw=i(z4,"flax.nn.Module"),z4.forEach(t),pw=i(nf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),nf.forEach(t),mw=d($t),fh=s($t,"P",{});var P4=a(fh);uw=i(P4,"Finally, this model supports inherent JAX features such as:"),P4.forEach(t),fw=d($t),Bt=s($t,"UL",{});var ys=a(Bt);gh=s(ys,"LI",{});var C4=a(gh);Br=s(C4,"A",{href:!0,rel:!0});var q4=a(Br);gw=i(q4,"Just-In-Time (JIT) compilation"),q4.forEach(t),C4.forEach(t),_w=d(ys),_h=s(ys,"LI",{});var j4=a(_h);Rr=s(j4,"A",{href:!0,rel:!0});var A4=a(Rr);vw=i(A4,"Automatic Differentiation"),A4.forEach(t),j4.forEach(t),kw=d(ys),vh=s(ys,"LI",{});var I4=a(vh);Hr=s(I4,"A",{href:!0,rel:!0});var L4=a(Hr);Tw=i(L4,"Vectorization"),L4.forEach(t),I4.forEach(t),Ew=d(ys),kh=s(ys,"LI",{});var N4=a(kh);Qr=s(N4,"A",{href:!0,rel:!0});var D4=a(Qr);ww=i(D4,"Parallelization"),D4.forEach(t),N4.forEach(t),ys.forEach(t),yw=d($t),ft=s($t,"DIV",{class:!0});var uo=a(ft);E(Ur.$$.fragment,uo),bw=d(uo),sn=s(uo,"P",{});var wd=a(sn);Fw=i(wd,"The "),Th=s(wd,"CODE",{});var O4=a(Th);$w=i(O4,"FlaxElectraPreTrainedModel"),O4.forEach(t),xw=i(wd," forward method, overrides the "),Eh=s(wd,"CODE",{});var S4=a(Eh);Mw=i(S4,"__call__"),S4.forEach(t),zw=i(wd," special method."),wd.forEach(t),Pw=d(uo),E(is.$$.fragment,uo),Cw=d(uo),wh=s(uo,"P",{});var W4=a(wh);qw=i(W4,"Example:"),W4.forEach(t),jw=d(uo),E(Vr.$$.fragment,uo),uo.forEach(t),$t.forEach(t),Hm=d(o),an=s(o,"H2",{class:!0});var sf=a(an);ls=s(sf,"A",{id:!0,class:!0,href:!0});var B4=a(ls);yh=s(B4,"SPAN",{});var R4=a(yh);E(Jr.$$.fragment,R4),R4.forEach(t),B4.forEach(t),Aw=d(sf),bh=s(sf,"SPAN",{});var H4=a(bh);Iw=i(H4,"FlaxElectraForPreTraining"),H4.forEach(t),sf.forEach(t),Qm=d(o),Ce=s(o,"DIV",{class:!0});var Ke=a(Ce);E(Kr.$$.fragment,Ke),Lw=d(Ke),Fh=s(Ke,"P",{});var Q4=a(Fh);Nw=i(Q4,"Electra model with a binary classification head on top as used during pretraining for identifying generated tokens."),Q4.forEach(t),Dw=d(Ke),$h=s(Ke,"P",{});var U4=a($h);Ow=i(U4,"It is recommended to load the discriminator checkpoint into that model."),U4.forEach(t),Sw=d(Ke),Gr=s(Ke,"P",{});var af=a(Gr);Ww=i(af,"This model inherits from "),Zl=s(af,"A",{href:!0});var V4=a(Zl);Bw=i(V4,"FlaxPreTrainedModel"),V4.forEach(t),Rw=i(af,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),af.forEach(t),Hw=d(Ke),Xr=s(Ke,"P",{});var rf=a(Xr);Qw=i(rf,"This model is also a Flax Linen "),Yr=s(rf,"A",{href:!0,rel:!0});var J4=a(Yr);Uw=i(J4,"flax.nn.Module"),J4.forEach(t),Vw=i(rf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),rf.forEach(t),Jw=d(Ke),xh=s(Ke,"P",{});var K4=a(xh);Kw=i(K4,"Finally, this model supports inherent JAX features such as:"),K4.forEach(t),Gw=d(Ke),Rt=s(Ke,"UL",{});var bs=a(Rt);Mh=s(bs,"LI",{});var G4=a(Mh);Zr=s(G4,"A",{href:!0,rel:!0});var X4=a(Zr);Xw=i(X4,"Just-In-Time (JIT) compilation"),X4.forEach(t),G4.forEach(t),Yw=d(bs),zh=s(bs,"LI",{});var Y4=a(zh);ei=s(Y4,"A",{href:!0,rel:!0});var Z4=a(ei);Zw=i(Z4,"Automatic Differentiation"),Z4.forEach(t),Y4.forEach(t),ey=d(bs),Ph=s(bs,"LI",{});var ex=a(Ph);ti=s(ex,"A",{href:!0,rel:!0});var tx=a(ti);ty=i(tx,"Vectorization"),tx.forEach(t),ex.forEach(t),oy=d(bs),Ch=s(bs,"LI",{});var ox=a(Ch);oi=s(ox,"A",{href:!0,rel:!0});var nx=a(oi);ny=i(nx,"Parallelization"),nx.forEach(t),ox.forEach(t),bs.forEach(t),sy=d(Ke),gt=s(Ke,"DIV",{class:!0});var fo=a(gt);E(ni.$$.fragment,fo),ay=d(fo),rn=s(fo,"P",{});var yd=a(rn);ry=i(yd,"The "),qh=s(yd,"CODE",{});var sx=a(qh);iy=i(sx,"FlaxElectraPreTrainedModel"),sx.forEach(t),ly=i(yd," forward method, overrides the "),jh=s(yd,"CODE",{});var ax=a(jh);dy=i(ax,"__call__"),ax.forEach(t),cy=i(yd," special method."),yd.forEach(t),hy=d(fo),E(ds.$$.fragment,fo),py=d(fo),Ah=s(fo,"P",{});var rx=a(Ah);my=i(rx,"Example:"),rx.forEach(t),uy=d(fo),E(si.$$.fragment,fo),fo.forEach(t),Ke.forEach(t),Um=d(o),ln=s(o,"H2",{class:!0});var lf=a(ln);cs=s(lf,"A",{id:!0,class:!0,href:!0});var ix=a(cs);Ih=s(ix,"SPAN",{});var lx=a(Ih);E(ai.$$.fragment,lx),lx.forEach(t),ix.forEach(t),fy=d(lf),Lh=s(lf,"SPAN",{});var dx=a(Lh);gy=i(dx,"FlaxElectraForMaskedLM"),dx.forEach(t),lf.forEach(t),Vm=d(o),Ne=s(o,"DIV",{class:!0});var xt=a(Ne);E(ri.$$.fragment,xt),_y=d(xt),ii=s(xt,"P",{});var df=a(ii);vy=i(df,"Electra Model with a "),Nh=s(df,"CODE",{});var cx=a(Nh);ky=i(cx,"language modeling"),cx.forEach(t),Ty=i(df," head on top."),df.forEach(t),Ey=d(xt),li=s(xt,"P",{});var cf=a(li);wy=i(cf,"This model inherits from "),ed=s(cf,"A",{href:!0});var hx=a(ed);yy=i(hx,"FlaxPreTrainedModel"),hx.forEach(t),by=i(cf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),cf.forEach(t),Fy=d(xt),di=s(xt,"P",{});var hf=a(di);$y=i(hf,"This model is also a Flax Linen "),ci=s(hf,"A",{href:!0,rel:!0});var px=a(ci);xy=i(px,"flax.nn.Module"),px.forEach(t),My=i(hf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),hf.forEach(t),zy=d(xt),Dh=s(xt,"P",{});var mx=a(Dh);Py=i(mx,"Finally, this model supports inherent JAX features such as:"),mx.forEach(t),Cy=d(xt),Ht=s(xt,"UL",{});var Fs=a(Ht);Oh=s(Fs,"LI",{});var ux=a(Oh);hi=s(ux,"A",{href:!0,rel:!0});var fx=a(hi);qy=i(fx,"Just-In-Time (JIT) compilation"),fx.forEach(t),ux.forEach(t),jy=d(Fs),Sh=s(Fs,"LI",{});var gx=a(Sh);pi=s(gx,"A",{href:!0,rel:!0});var _x=a(pi);Ay=i(_x,"Automatic Differentiation"),_x.forEach(t),gx.forEach(t),Iy=d(Fs),Wh=s(Fs,"LI",{});var vx=a(Wh);mi=s(vx,"A",{href:!0,rel:!0});var kx=a(mi);Ly=i(kx,"Vectorization"),kx.forEach(t),vx.forEach(t),Ny=d(Fs),Bh=s(Fs,"LI",{});var Tx=a(Bh);ui=s(Tx,"A",{href:!0,rel:!0});var Ex=a(ui);Dy=i(Ex,"Parallelization"),Ex.forEach(t),Tx.forEach(t),Fs.forEach(t),Oy=d(xt),_t=s(xt,"DIV",{class:!0});var go=a(_t);E(fi.$$.fragment,go),Sy=d(go),dn=s(go,"P",{});var bd=a(dn);Wy=i(bd,"The "),Rh=s(bd,"CODE",{});var wx=a(Rh);By=i(wx,"FlaxElectraPreTrainedModel"),wx.forEach(t),Ry=i(bd," forward method, overrides the "),Hh=s(bd,"CODE",{});var yx=a(Hh);Hy=i(yx,"__call__"),yx.forEach(t),Qy=i(bd," special method."),bd.forEach(t),Uy=d(go),E(hs.$$.fragment,go),Vy=d(go),Qh=s(go,"P",{});var bx=a(Qh);Jy=i(bx,"Example:"),bx.forEach(t),Ky=d(go),E(gi.$$.fragment,go),go.forEach(t),xt.forEach(t),Jm=d(o),cn=s(o,"H2",{class:!0});var pf=a(cn);ps=s(pf,"A",{id:!0,class:!0,href:!0});var Fx=a(ps);Uh=s(Fx,"SPAN",{});var $x=a(Uh);E(_i.$$.fragment,$x),$x.forEach(t),Fx.forEach(t),Gy=d(pf),Vh=s(pf,"SPAN",{});var xx=a(Vh);Xy=i(xx,"FlaxElectraForSequenceClassification"),xx.forEach(t),pf.forEach(t),Km=d(o),De=s(o,"DIV",{class:!0});var Mt=a(De);E(vi.$$.fragment,Mt),Yy=d(Mt),Jh=s(Mt,"P",{});var Mx=a(Jh);Zy=i(Mx,`Electra Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Mx.forEach(t),eb=d(Mt),ki=s(Mt,"P",{});var mf=a(ki);tb=i(mf,"This model inherits from "),td=s(mf,"A",{href:!0});var zx=a(td);ob=i(zx,"FlaxPreTrainedModel"),zx.forEach(t),nb=i(mf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),mf.forEach(t),sb=d(Mt),Ti=s(Mt,"P",{});var uf=a(Ti);ab=i(uf,"This model is also a Flax Linen "),Ei=s(uf,"A",{href:!0,rel:!0});var Px=a(Ei);rb=i(Px,"flax.nn.Module"),Px.forEach(t),ib=i(uf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),uf.forEach(t),lb=d(Mt),Kh=s(Mt,"P",{});var Cx=a(Kh);db=i(Cx,"Finally, this model supports inherent JAX features such as:"),Cx.forEach(t),cb=d(Mt),Qt=s(Mt,"UL",{});var $s=a(Qt);Gh=s($s,"LI",{});var qx=a(Gh);wi=s(qx,"A",{href:!0,rel:!0});var jx=a(wi);hb=i(jx,"Just-In-Time (JIT) compilation"),jx.forEach(t),qx.forEach(t),pb=d($s),Xh=s($s,"LI",{});var Ax=a(Xh);yi=s(Ax,"A",{href:!0,rel:!0});var Ix=a(yi);mb=i(Ix,"Automatic Differentiation"),Ix.forEach(t),Ax.forEach(t),ub=d($s),Yh=s($s,"LI",{});var Lx=a(Yh);bi=s(Lx,"A",{href:!0,rel:!0});var Nx=a(bi);fb=i(Nx,"Vectorization"),Nx.forEach(t),Lx.forEach(t),gb=d($s),Zh=s($s,"LI",{});var Dx=a(Zh);Fi=s(Dx,"A",{href:!0,rel:!0});var Ox=a(Fi);_b=i(Ox,"Parallelization"),Ox.forEach(t),Dx.forEach(t),$s.forEach(t),vb=d(Mt),vt=s(Mt,"DIV",{class:!0});var _o=a(vt);E($i.$$.fragment,_o),kb=d(_o),hn=s(_o,"P",{});var Fd=a(hn);Tb=i(Fd,"The "),ep=s(Fd,"CODE",{});var Sx=a(ep);Eb=i(Sx,"FlaxElectraPreTrainedModel"),Sx.forEach(t),wb=i(Fd," forward method, overrides the "),tp=s(Fd,"CODE",{});var Wx=a(tp);yb=i(Wx,"__call__"),Wx.forEach(t),bb=i(Fd," special method."),Fd.forEach(t),Fb=d(_o),E(ms.$$.fragment,_o),$b=d(_o),op=s(_o,"P",{});var Bx=a(op);xb=i(Bx,"Example:"),Bx.forEach(t),Mb=d(_o),E(xi.$$.fragment,_o),_o.forEach(t),Mt.forEach(t),Gm=d(o),pn=s(o,"H2",{class:!0});var ff=a(pn);us=s(ff,"A",{id:!0,class:!0,href:!0});var Rx=a(us);np=s(Rx,"SPAN",{});var Hx=a(np);E(Mi.$$.fragment,Hx),Hx.forEach(t),Rx.forEach(t),zb=d(ff),sp=s(ff,"SPAN",{});var Qx=a(sp);Pb=i(Qx,"FlaxElectraForMultipleChoice"),Qx.forEach(t),ff.forEach(t),Xm=d(o),Oe=s(o,"DIV",{class:!0});var zt=a(Oe);E(zi.$$.fragment,zt),Cb=d(zt),ap=s(zt,"P",{});var Ux=a(ap);qb=i(Ux,`ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Ux.forEach(t),jb=d(zt),Pi=s(zt,"P",{});var gf=a(Pi);Ab=i(gf,"This model inherits from "),od=s(gf,"A",{href:!0});var Vx=a(od);Ib=i(Vx,"FlaxPreTrainedModel"),Vx.forEach(t),Lb=i(gf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),gf.forEach(t),Nb=d(zt),Ci=s(zt,"P",{});var _f=a(Ci);Db=i(_f,"This model is also a Flax Linen "),qi=s(_f,"A",{href:!0,rel:!0});var Jx=a(qi);Ob=i(Jx,"flax.nn.Module"),Jx.forEach(t),Sb=i(_f,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),_f.forEach(t),Wb=d(zt),rp=s(zt,"P",{});var Kx=a(rp);Bb=i(Kx,"Finally, this model supports inherent JAX features such as:"),Kx.forEach(t),Rb=d(zt),Ut=s(zt,"UL",{});var xs=a(Ut);ip=s(xs,"LI",{});var Gx=a(ip);ji=s(Gx,"A",{href:!0,rel:!0});var Xx=a(ji);Hb=i(Xx,"Just-In-Time (JIT) compilation"),Xx.forEach(t),Gx.forEach(t),Qb=d(xs),lp=s(xs,"LI",{});var Yx=a(lp);Ai=s(Yx,"A",{href:!0,rel:!0});var Zx=a(Ai);Ub=i(Zx,"Automatic Differentiation"),Zx.forEach(t),Yx.forEach(t),Vb=d(xs),dp=s(xs,"LI",{});var eM=a(dp);Ii=s(eM,"A",{href:!0,rel:!0});var tM=a(Ii);Jb=i(tM,"Vectorization"),tM.forEach(t),eM.forEach(t),Kb=d(xs),cp=s(xs,"LI",{});var oM=a(cp);Li=s(oM,"A",{href:!0,rel:!0});var nM=a(Li);Gb=i(nM,"Parallelization"),nM.forEach(t),oM.forEach(t),xs.forEach(t),Xb=d(zt),kt=s(zt,"DIV",{class:!0});var vo=a(kt);E(Ni.$$.fragment,vo),Yb=d(vo),mn=s(vo,"P",{});var $d=a(mn);Zb=i($d,"The "),hp=s($d,"CODE",{});var sM=a(hp);e0=i(sM,"FlaxElectraPreTrainedModel"),sM.forEach(t),t0=i($d," forward method, overrides the "),pp=s($d,"CODE",{});var aM=a(pp);o0=i(aM,"__call__"),aM.forEach(t),n0=i($d," special method."),$d.forEach(t),s0=d(vo),E(fs.$$.fragment,vo),a0=d(vo),mp=s(vo,"P",{});var rM=a(mp);r0=i(rM,"Example:"),rM.forEach(t),i0=d(vo),E(Di.$$.fragment,vo),vo.forEach(t),zt.forEach(t),Ym=d(o),un=s(o,"H2",{class:!0});var vf=a(un);gs=s(vf,"A",{id:!0,class:!0,href:!0});var iM=a(gs);up=s(iM,"SPAN",{});var lM=a(up);E(Oi.$$.fragment,lM),lM.forEach(t),iM.forEach(t),l0=d(vf),fp=s(vf,"SPAN",{});var dM=a(fp);d0=i(dM,"FlaxElectraForTokenClassification"),dM.forEach(t),vf.forEach(t),Zm=d(o),qe=s(o,"DIV",{class:!0});var Ge=a(qe);E(Si.$$.fragment,Ge),c0=d(Ge),gp=s(Ge,"P",{});var cM=a(gp);h0=i(cM,"Electra model with a token classification head on top."),cM.forEach(t),p0=d(Ge),_p=s(Ge,"P",{});var hM=a(_p);m0=i(hM,"Both the discriminator and generator may be loaded into this model."),hM.forEach(t),u0=d(Ge),Wi=s(Ge,"P",{});var kf=a(Wi);f0=i(kf,"This model inherits from "),nd=s(kf,"A",{href:!0});var pM=a(nd);g0=i(pM,"FlaxPreTrainedModel"),pM.forEach(t),_0=i(kf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),kf.forEach(t),v0=d(Ge),Bi=s(Ge,"P",{});var Tf=a(Bi);k0=i(Tf,"This model is also a Flax Linen "),Ri=s(Tf,"A",{href:!0,rel:!0});var mM=a(Ri);T0=i(mM,"flax.nn.Module"),mM.forEach(t),E0=i(Tf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Tf.forEach(t),w0=d(Ge),vp=s(Ge,"P",{});var uM=a(vp);y0=i(uM,"Finally, this model supports inherent JAX features such as:"),uM.forEach(t),b0=d(Ge),Vt=s(Ge,"UL",{});var Ms=a(Vt);kp=s(Ms,"LI",{});var fM=a(kp);Hi=s(fM,"A",{href:!0,rel:!0});var gM=a(Hi);F0=i(gM,"Just-In-Time (JIT) compilation"),gM.forEach(t),fM.forEach(t),$0=d(Ms),Tp=s(Ms,"LI",{});var _M=a(Tp);Qi=s(_M,"A",{href:!0,rel:!0});var vM=a(Qi);x0=i(vM,"Automatic Differentiation"),vM.forEach(t),_M.forEach(t),M0=d(Ms),Ep=s(Ms,"LI",{});var kM=a(Ep);Ui=s(kM,"A",{href:!0,rel:!0});var TM=a(Ui);z0=i(TM,"Vectorization"),TM.forEach(t),kM.forEach(t),P0=d(Ms),wp=s(Ms,"LI",{});var EM=a(wp);Vi=s(EM,"A",{href:!0,rel:!0});var wM=a(Vi);C0=i(wM,"Parallelization"),wM.forEach(t),EM.forEach(t),Ms.forEach(t),q0=d(Ge),Tt=s(Ge,"DIV",{class:!0});var ko=a(Tt);E(Ji.$$.fragment,ko),j0=d(ko),fn=s(ko,"P",{});var xd=a(fn);A0=i(xd,"The "),yp=s(xd,"CODE",{});var yM=a(yp);I0=i(yM,"FlaxElectraPreTrainedModel"),yM.forEach(t),L0=i(xd," forward method, overrides the "),bp=s(xd,"CODE",{});var bM=a(bp);N0=i(bM,"__call__"),bM.forEach(t),D0=i(xd," special method."),xd.forEach(t),O0=d(ko),E(_s.$$.fragment,ko),S0=d(ko),Fp=s(ko,"P",{});var FM=a(Fp);W0=i(FM,"Example:"),FM.forEach(t),B0=d(ko),E(Ki.$$.fragment,ko),ko.forEach(t),Ge.forEach(t),eu=d(o),gn=s(o,"H2",{class:!0});var Ef=a(gn);vs=s(Ef,"A",{id:!0,class:!0,href:!0});var $M=a(vs);$p=s($M,"SPAN",{});var xM=a($p);E(Gi.$$.fragment,xM),xM.forEach(t),$M.forEach(t),R0=d(Ef),xp=s(Ef,"SPAN",{});var MM=a(xp);H0=i(MM,"FlaxElectraForQuestionAnswering"),MM.forEach(t),Ef.forEach(t),tu=d(o),Se=s(o,"DIV",{class:!0});var Pt=a(Se);E(Xi.$$.fragment,Pt),Q0=d(Pt),_n=s(Pt,"P",{});var Md=a(_n);U0=i(Md,`ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Mp=s(Md,"CODE",{});var zM=a(Mp);V0=i(zM,"span start logits"),zM.forEach(t),J0=i(Md," and "),zp=s(Md,"CODE",{});var PM=a(zp);K0=i(PM,"span end logits"),PM.forEach(t),G0=i(Md,")."),Md.forEach(t),X0=d(Pt),Yi=s(Pt,"P",{});var wf=a(Yi);Y0=i(wf,"This model inherits from "),sd=s(wf,"A",{href:!0});var CM=a(sd);Z0=i(CM,"FlaxPreTrainedModel"),CM.forEach(t),e2=i(wf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),wf.forEach(t),t2=d(Pt),Zi=s(Pt,"P",{});var yf=a(Zi);o2=i(yf,"This model is also a Flax Linen "),el=s(yf,"A",{href:!0,rel:!0});var qM=a(el);n2=i(qM,"flax.nn.Module"),qM.forEach(t),s2=i(yf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),yf.forEach(t),a2=d(Pt),Pp=s(Pt,"P",{});var jM=a(Pp);r2=i(jM,"Finally, this model supports inherent JAX features such as:"),jM.forEach(t),i2=d(Pt),Jt=s(Pt,"UL",{});var zs=a(Jt);Cp=s(zs,"LI",{});var AM=a(Cp);tl=s(AM,"A",{href:!0,rel:!0});var IM=a(tl);l2=i(IM,"Just-In-Time (JIT) compilation"),IM.forEach(t),AM.forEach(t),d2=d(zs),qp=s(zs,"LI",{});var LM=a(qp);ol=s(LM,"A",{href:!0,rel:!0});var NM=a(ol);c2=i(NM,"Automatic Differentiation"),NM.forEach(t),LM.forEach(t),h2=d(zs),jp=s(zs,"LI",{});var DM=a(jp);nl=s(DM,"A",{href:!0,rel:!0});var OM=a(nl);p2=i(OM,"Vectorization"),OM.forEach(t),DM.forEach(t),m2=d(zs),Ap=s(zs,"LI",{});var SM=a(Ap);sl=s(SM,"A",{href:!0,rel:!0});var WM=a(sl);u2=i(WM,"Parallelization"),WM.forEach(t),SM.forEach(t),zs.forEach(t),f2=d(Pt),Et=s(Pt,"DIV",{class:!0});var To=a(Et);E(al.$$.fragment,To),g2=d(To),vn=s(To,"P",{});var zd=a(vn);_2=i(zd,"The "),Ip=s(zd,"CODE",{});var BM=a(Ip);v2=i(BM,"FlaxElectraPreTrainedModel"),BM.forEach(t),k2=i(zd," forward method, overrides the "),Lp=s(zd,"CODE",{});var RM=a(Lp);T2=i(RM,"__call__"),RM.forEach(t),E2=i(zd," special method."),zd.forEach(t),w2=d(To),E(ks.$$.fragment,To),y2=d(To),Np=s(To,"P",{});var HM=a(Np);b2=i(HM,"Example:"),HM.forEach(t),F2=d(To),E(rl.$$.fragment,To),To.forEach(t),Pt.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(bz)),c(v,"id","electra"),c(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(v,"href","#electra"),c(g,"class","relative group"),c(ee,"id","overview"),c(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ee,"href","#overview"),c(z,"class","relative group"),c(ne,"href","https://openreview.net/pdf?id=r1xMH1BtvB"),c(ne,"rel","nofollow"),c(se,"href","https://github.com/google-research/electra"),c(se,"rel","nofollow"),c(p,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMaskedLM"),c(we,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForPreTraining"),c(A,"href","https://huggingface.co/lysandre"),c(A,"rel","nofollow"),c(Te,"href","https://github.com/google-research/electra"),c(Te,"rel","nofollow"),c(ke,"id","transformers.ElectraConfig"),c(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ke,"href","#transformers.ElectraConfig"),c(Z,"class","relative group"),c(pl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraModel"),c(ml,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraModel"),c(qs,"href","https://huggingface.co/google/electra-small-discriminator"),c(qs,"rel","nofollow"),c(ul,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(fl,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Xe,"class","docstring"),c(Tn,"id","transformers.ElectraTokenizer"),c(Tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Tn,"href","#transformers.ElectraTokenizer"),c(wo,"class","relative group"),c(gl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer"),c(_l,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(vl,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(Ct,"class","docstring"),c(wn,"id","transformers.ElectraTokenizerFast"),c(wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wn,"href","#transformers.ElectraTokenizerFast"),c(yo,"class","relative group"),c(kl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizerFast"),c(Tl,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(El,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(qt,"class","docstring"),c(bn,"id","transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput"),c(bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bn,"href","#transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput"),c(bo,"class","relative group"),c(wl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForPreTraining"),c(Fo,"class","docstring"),c(yl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForPreTraining"),c($o,"class","docstring"),c(Fn,"id","transformers.ElectraModel"),c(Fn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fn,"href","#transformers.ElectraModel"),c(xo,"class","relative group"),c(bl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Gs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Gs,"rel","nofollow"),c(Fl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraModel"),c(ot,"class","docstring"),c(Ye,"class","docstring"),c(xn,"id","transformers.ElectraForPreTraining"),c(xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xn,"href","#transformers.ElectraForPreTraining"),c(zo,"class","relative group"),c($l,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(na,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(na,"rel","nofollow"),c(xl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForPreTraining"),c(nt,"class","docstring"),c(Be,"class","docstring"),c(zn,"id","transformers.ElectraForMaskedLM"),c(zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zn,"href","#transformers.ElectraForMaskedLM"),c(Co,"class","relative group"),c(Ml,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ca,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ca,"rel","nofollow"),c(zl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMaskedLM"),c(st,"class","docstring"),c(Re,"class","docstring"),c(Cn,"id","transformers.ElectraForSequenceClassification"),c(Cn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Cn,"href","#transformers.ElectraForSequenceClassification"),c(jo,"class","relative group"),c(Pl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(_a,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(_a,"rel","nofollow"),c(Cl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForSequenceClassification"),c(We,"class","docstring"),c(Ze,"class","docstring"),c(jn,"id","transformers.ElectraForMultipleChoice"),c(jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jn,"href","#transformers.ElectraForMultipleChoice"),c(Io,"class","relative group"),c(ql,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Fa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Fa,"rel","nofollow"),c(jl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMultipleChoice"),c(at,"class","docstring"),c(et,"class","docstring"),c(In,"id","transformers.ElectraForTokenClassification"),c(In,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(In,"href","#transformers.ElectraForTokenClassification"),c(No,"class","relative group"),c(Al,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(qa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(qa,"rel","nofollow"),c(Il,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForTokenClassification"),c(rt,"class","docstring"),c(He,"class","docstring"),c(Nn,"id","transformers.ElectraForQuestionAnswering"),c(Nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nn,"href","#transformers.ElectraForQuestionAnswering"),c(Oo,"class","relative group"),c(Ll,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Oa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Oa,"rel","nofollow"),c(Nl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForQuestionAnswering"),c(it,"class","docstring"),c(tt,"class","docstring"),c(On,"id","transformers.TFElectraModel"),c(On,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(On,"href","#transformers.TFElectraModel"),c(Bo,"class","relative group"),c(Dl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ua,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ua,"rel","nofollow"),c(Ol,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraModel"),c(lt,"class","docstring"),c(Qe,"class","docstring"),c(Bn,"id","transformers.TFElectraForPreTraining"),c(Bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bn,"href","#transformers.TFElectraForPreTraining"),c(Ho,"class","relative group"),c(Sl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Za,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Za,"rel","nofollow"),c(Wl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForPreTraining"),c(dt,"class","docstring"),c(je,"class","docstring"),c(Qn,"id","transformers.TFElectraForMaskedLM"),c(Qn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qn,"href","#transformers.TFElectraForMaskedLM"),c(Uo,"class","relative group"),c(Bl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(rr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(rr,"rel","nofollow"),c(Rl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForMaskedLM"),c(ct,"class","docstring"),c(Ae,"class","docstring"),c(Jn,"id","transformers.TFElectraForSequenceClassification"),c(Jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jn,"href","#transformers.TFElectraForSequenceClassification"),c(Jo,"class","relative group"),c(Hl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(mr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(mr,"rel","nofollow"),c(Ql,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForSequenceClassification"),c(ht,"class","docstring"),c(Ue,"class","docstring"),c(Xn,"id","transformers.TFElectraForMultipleChoice"),c(Xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xn,"href","#transformers.TFElectraForMultipleChoice"),c(Go,"class","relative group"),c(Ul,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Tr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Tr,"rel","nofollow"),c(Vl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForMultipleChoice"),c(pt,"class","docstring"),c(Ve,"class","docstring"),c(es,"id","transformers.TFElectraForTokenClassification"),c(es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(es,"href","#transformers.TFElectraForTokenClassification"),c(Yo,"class","relative group"),c(Jl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(xr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(xr,"rel","nofollow"),c(Kl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForTokenClassification"),c(mt,"class","docstring"),c(Ie,"class","docstring"),c(ns,"id","transformers.TFElectraForQuestionAnswering"),c(ns,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ns,"href","#transformers.TFElectraForQuestionAnswering"),c(en,"class","relative group"),c(Gl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ar,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ar,"rel","nofollow"),c(Xl,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForQuestionAnswering"),c(ut,"class","docstring"),c(Je,"class","docstring"),c(rs,"id","transformers.FlaxElectraModel"),c(rs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(rs,"href","#transformers.FlaxElectraModel"),c(nn,"class","relative group"),c(Yl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Wr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Wr,"rel","nofollow"),c(Br,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Br,"rel","nofollow"),c(Rr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Rr,"rel","nofollow"),c(Hr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Hr,"rel","nofollow"),c(Qr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Qr,"rel","nofollow"),c(ft,"class","docstring"),c(Le,"class","docstring"),c(ls,"id","transformers.FlaxElectraForPreTraining"),c(ls,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ls,"href","#transformers.FlaxElectraForPreTraining"),c(an,"class","relative group"),c(Zl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Yr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Yr,"rel","nofollow"),c(Zr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Zr,"rel","nofollow"),c(ei,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(ei,"rel","nofollow"),c(ti,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(ti,"rel","nofollow"),c(oi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(oi,"rel","nofollow"),c(gt,"class","docstring"),c(Ce,"class","docstring"),c(cs,"id","transformers.FlaxElectraForMaskedLM"),c(cs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(cs,"href","#transformers.FlaxElectraForMaskedLM"),c(ln,"class","relative group"),c(ed,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ci,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(ci,"rel","nofollow"),c(hi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(hi,"rel","nofollow"),c(pi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(pi,"rel","nofollow"),c(mi,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(mi,"rel","nofollow"),c(ui,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(ui,"rel","nofollow"),c(_t,"class","docstring"),c(Ne,"class","docstring"),c(ps,"id","transformers.FlaxElectraForSequenceClassification"),c(ps,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ps,"href","#transformers.FlaxElectraForSequenceClassification"),c(cn,"class","relative group"),c(td,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ei,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Ei,"rel","nofollow"),c(wi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(wi,"rel","nofollow"),c(yi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(yi,"rel","nofollow"),c(bi,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(bi,"rel","nofollow"),c(Fi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Fi,"rel","nofollow"),c(vt,"class","docstring"),c(De,"class","docstring"),c(us,"id","transformers.FlaxElectraForMultipleChoice"),c(us,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(us,"href","#transformers.FlaxElectraForMultipleChoice"),c(pn,"class","relative group"),c(od,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(qi,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(qi,"rel","nofollow"),c(ji,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(ji,"rel","nofollow"),c(Ai,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ai,"rel","nofollow"),c(Ii,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ii,"rel","nofollow"),c(Li,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Li,"rel","nofollow"),c(kt,"class","docstring"),c(Oe,"class","docstring"),c(gs,"id","transformers.FlaxElectraForTokenClassification"),c(gs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(gs,"href","#transformers.FlaxElectraForTokenClassification"),c(un,"class","relative group"),c(nd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ri,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Ri,"rel","nofollow"),c(Hi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Hi,"rel","nofollow"),c(Qi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Qi,"rel","nofollow"),c(Ui,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ui,"rel","nofollow"),c(Vi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Vi,"rel","nofollow"),c(Tt,"class","docstring"),c(qe,"class","docstring"),c(vs,"id","transformers.FlaxElectraForQuestionAnswering"),c(vs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vs,"href","#transformers.FlaxElectraForQuestionAnswering"),c(gn,"class","relative group"),c(sd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(el,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(el,"rel","nofollow"),c(tl,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(tl,"rel","nofollow"),c(ol,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(ol,"rel","nofollow"),c(nl,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(nl,"rel","nofollow"),c(sl,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(sl,"rel","nofollow"),c(Et,"class","docstring"),c(Se,"class","docstring")},m(o,f){e(document.head,h),m(o,$,f),m(o,g,f),e(g,v),e(v,k),w(_,k,null),e(g,u),e(g,x),e(x,he),m(o,K,f),m(o,z,f),e(z,ee),e(ee,D),w(oe,D,null),e(z,pe),e(z,O),e(O,me),m(o,le,f),m(o,J,f),e(J,I),e(J,ne),e(ne,X),e(J,P),m(o,q,f),m(o,ae,f),e(ae,R),m(o,de,f),m(o,re,f),e(re,S),e(S,ue),m(o,ce,f),m(o,C,f),e(C,fe),m(o,B,f),m(o,te,f),e(te,ie),e(ie,H),e(te,ge),e(te,G),e(G,L),e(G,se),e(se,Q),e(G,_e),e(G,p),e(p,M),e(G,Y),e(G,we),e(we,$e),e(G,N),m(o,be,f),m(o,ve,f),e(ve,xe),e(ve,A),e(A,U),e(ve,Me),e(ve,Te),e(Te,V),e(ve,ze),m(o,Fe,f),m(o,Z,f),e(Z,ke),e(ke,Pd),w(Ps,Pd,null),e(Z,bf),e(Z,Cd),e(Cd,Ff),m(o,rm,f),m(o,Xe,f),w(Cs,Xe,null),e(Xe,$f),e(Xe,Wt),e(Wt,xf),e(Wt,pl),e(pl,Mf),e(Wt,zf),e(Wt,ml),e(ml,Pf),e(Wt,Cf),e(Wt,qs),e(qs,qf),e(Wt,jf),e(Xe,Af),e(Xe,Eo),e(Eo,If),e(Eo,ul),e(ul,Lf),e(Eo,Nf),e(Eo,fl),e(fl,Df),e(Eo,Of),e(Xe,Sf),e(Xe,qd),e(qd,Wf),e(Xe,Bf),w(js,Xe,null),m(o,im,f),m(o,wo,f),e(wo,Tn),e(Tn,jd),w(As,jd,null),e(wo,Rf),e(wo,Ad),e(Ad,Hf),m(o,lm,f),m(o,Ct,f),w(Is,Ct,null),e(Ct,Qf),e(Ct,Id),e(Id,Uf),e(Ct,Vf),e(Ct,En),e(En,gl),e(gl,Jf),e(En,Kf),e(En,_l),e(_l,Gf),e(En,Xf),e(Ct,Yf),e(Ct,Ls),e(Ls,Zf),e(Ls,vl),e(vl,eg),e(Ls,tg),m(o,dm,f),m(o,yo,f),e(yo,wn),e(wn,Ld),w(Ns,Ld,null),e(yo,og),e(yo,Nd),e(Nd,ng),m(o,cm,f),m(o,qt,f),w(Ds,qt,null),e(qt,sg),e(qt,Os),e(Os,ag),e(Os,Dd),e(Dd,rg),e(Os,ig),e(qt,lg),e(qt,yn),e(yn,kl),e(kl,dg),e(yn,cg),e(yn,Tl),e(Tl,hg),e(yn,pg),e(qt,mg),e(qt,Ss),e(Ss,ug),e(Ss,El),e(El,fg),e(Ss,gg),m(o,hm,f),m(o,bo,f),e(bo,bn),e(bn,Od),w(Ws,Od,null),e(bo,_g),e(bo,Sd),e(Sd,vg),m(o,pm,f),m(o,Fo,f),w(Bs,Fo,null),e(Fo,kg),e(Fo,Rs),e(Rs,Tg),e(Rs,wl),e(wl,Eg),e(Rs,wg),m(o,mm,f),m(o,$o,f),w(Hs,$o,null),e($o,yg),e($o,Qs),e(Qs,bg),e(Qs,yl),e(yl,Fg),e(Qs,$g),m(o,um,f),m(o,xo,f),e(xo,Fn),e(Fn,Wd),w(Us,Wd,null),e(xo,xg),e(xo,Bd),e(Bd,Mg),m(o,fm,f),m(o,Ye,f),w(Vs,Ye,null),e(Ye,zg),e(Ye,Rd),e(Rd,Pg),e(Ye,Cg),e(Ye,Js),e(Js,qg),e(Js,bl),e(bl,jg),e(Js,Ag),e(Ye,Ig),e(Ye,Ks),e(Ks,Lg),e(Ks,Gs),e(Gs,Ng),e(Ks,Dg),e(Ye,Og),e(Ye,ot),w(Xs,ot,null),e(ot,Sg),e(ot,Mo),e(Mo,Wg),e(Mo,Fl),e(Fl,Bg),e(Mo,Rg),e(Mo,Hd),e(Hd,Hg),e(Mo,Qg),e(ot,Ug),w($n,ot,null),e(ot,Vg),e(ot,Qd),e(Qd,Jg),e(ot,Kg),w(Ys,ot,null),m(o,gm,f),m(o,zo,f),e(zo,xn),e(xn,Ud),w(Zs,Ud,null),e(zo,Gg),e(zo,Vd),e(Vd,Xg),m(o,_m,f),m(o,Be,f),w(ea,Be,null),e(Be,Yg),e(Be,Jd),e(Jd,Zg),e(Be,e_),e(Be,Kd),e(Kd,t_),e(Be,o_),e(Be,ta),e(ta,n_),e(ta,$l),e($l,s_),e(ta,a_),e(Be,r_),e(Be,oa),e(oa,i_),e(oa,na),e(na,l_),e(oa,d_),e(Be,c_),e(Be,nt),w(sa,nt,null),e(nt,h_),e(nt,Po),e(Po,p_),e(Po,xl),e(xl,m_),e(Po,u_),e(Po,Gd),e(Gd,f_),e(Po,g_),e(nt,__),w(Mn,nt,null),e(nt,v_),e(nt,Xd),e(Xd,k_),e(nt,T_),w(aa,nt,null),m(o,vm,f),m(o,Co,f),e(Co,zn),e(zn,Yd),w(ra,Yd,null),e(Co,E_),e(Co,Zd),e(Zd,w_),m(o,km,f),m(o,Re,f),w(ia,Re,null),e(Re,y_),e(Re,ec),e(ec,b_),e(Re,F_),e(Re,tc),e(tc,$_),e(Re,x_),e(Re,la),e(la,M_),e(la,Ml),e(Ml,z_),e(la,P_),e(Re,C_),e(Re,da),e(da,q_),e(da,ca),e(ca,j_),e(da,A_),e(Re,I_),e(Re,st),w(ha,st,null),e(st,L_),e(st,qo),e(qo,N_),e(qo,zl),e(zl,D_),e(qo,O_),e(qo,oc),e(oc,S_),e(qo,W_),e(st,B_),w(Pn,st,null),e(st,R_),e(st,nc),e(nc,H_),e(st,Q_),w(pa,st,null),m(o,Tm,f),m(o,jo,f),e(jo,Cn),e(Cn,sc),w(ma,sc,null),e(jo,U_),e(jo,ac),e(ac,V_),m(o,Em,f),m(o,Ze,f),w(ua,Ze,null),e(Ze,J_),e(Ze,rc),e(rc,K_),e(Ze,G_),e(Ze,fa),e(fa,X_),e(fa,Pl),e(Pl,Y_),e(fa,Z_),e(Ze,ev),e(Ze,ga),e(ga,tv),e(ga,_a),e(_a,ov),e(ga,nv),e(Ze,sv),e(Ze,We),w(va,We,null),e(We,av),e(We,Ao),e(Ao,rv),e(Ao,Cl),e(Cl,iv),e(Ao,lv),e(Ao,ic),e(ic,dv),e(Ao,cv),e(We,hv),w(qn,We,null),e(We,pv),e(We,lc),e(lc,mv),e(We,uv),w(ka,We,null),e(We,fv),e(We,dc),e(dc,gv),e(We,_v),w(Ta,We,null),m(o,wm,f),m(o,Io,f),e(Io,jn),e(jn,cc),w(Ea,cc,null),e(Io,vv),e(Io,hc),e(hc,kv),m(o,ym,f),m(o,et,f),w(wa,et,null),e(et,Tv),e(et,pc),e(pc,Ev),e(et,wv),e(et,ya),e(ya,yv),e(ya,ql),e(ql,bv),e(ya,Fv),e(et,$v),e(et,ba),e(ba,xv),e(ba,Fa),e(Fa,Mv),e(ba,zv),e(et,Pv),e(et,at),w($a,at,null),e(at,Cv),e(at,Lo),e(Lo,qv),e(Lo,jl),e(jl,jv),e(Lo,Av),e(Lo,mc),e(mc,Iv),e(Lo,Lv),e(at,Nv),w(An,at,null),e(at,Dv),e(at,uc),e(uc,Ov),e(at,Sv),w(xa,at,null),m(o,bm,f),m(o,No,f),e(No,In),e(In,fc),w(Ma,fc,null),e(No,Wv),e(No,gc),e(gc,Bv),m(o,Fm,f),m(o,He,f),w(za,He,null),e(He,Rv),e(He,_c),e(_c,Hv),e(He,Qv),e(He,vc),e(vc,Uv),e(He,Vv),e(He,Pa),e(Pa,Jv),e(Pa,Al),e(Al,Kv),e(Pa,Gv),e(He,Xv),e(He,Ca),e(Ca,Yv),e(Ca,qa),e(qa,Zv),e(Ca,ek),e(He,tk),e(He,rt),w(ja,rt,null),e(rt,ok),e(rt,Do),e(Do,nk),e(Do,Il),e(Il,sk),e(Do,ak),e(Do,kc),e(kc,rk),e(Do,ik),e(rt,lk),w(Ln,rt,null),e(rt,dk),e(rt,Tc),e(Tc,ck),e(rt,hk),w(Aa,rt,null),m(o,$m,f),m(o,Oo,f),e(Oo,Nn),e(Nn,Ec),w(Ia,Ec,null),e(Oo,pk),e(Oo,wc),e(wc,mk),m(o,xm,f),m(o,tt,f),w(La,tt,null),e(tt,uk),e(tt,So),e(So,fk),e(So,yc),e(yc,gk),e(So,_k),e(So,bc),e(bc,vk),e(So,kk),e(tt,Tk),e(tt,Na),e(Na,Ek),e(Na,Ll),e(Ll,wk),e(Na,yk),e(tt,bk),e(tt,Da),e(Da,Fk),e(Da,Oa),e(Oa,$k),e(Da,xk),e(tt,Mk),e(tt,it),w(Sa,it,null),e(it,zk),e(it,Wo),e(Wo,Pk),e(Wo,Nl),e(Nl,Ck),e(Wo,qk),e(Wo,Fc),e(Fc,jk),e(Wo,Ak),e(it,Ik),w(Dn,it,null),e(it,Lk),e(it,$c),e($c,Nk),e(it,Dk),w(Wa,it,null),m(o,Mm,f),m(o,Bo,f),e(Bo,On),e(On,xc),w(Ba,xc,null),e(Bo,Ok),e(Bo,Mc),e(Mc,Sk),m(o,zm,f),m(o,Qe,f),w(Ra,Qe,null),e(Qe,Wk),e(Qe,zc),e(zc,Bk),e(Qe,Rk),e(Qe,Ha),e(Ha,Hk),e(Ha,Dl),e(Dl,Qk),e(Ha,Uk),e(Qe,Vk),e(Qe,Qa),e(Qa,Jk),e(Qa,Ua),e(Ua,Kk),e(Qa,Gk),e(Qe,Xk),w(Sn,Qe,null),e(Qe,Yk),e(Qe,lt),w(Va,lt,null),e(lt,Zk),e(lt,Ro),e(Ro,eT),e(Ro,Ol),e(Ol,tT),e(Ro,oT),e(Ro,Pc),e(Pc,nT),e(Ro,sT),e(lt,aT),w(Wn,lt,null),e(lt,rT),e(lt,Cc),e(Cc,iT),e(lt,lT),w(Ja,lt,null),m(o,Pm,f),m(o,Ho,f),e(Ho,Bn),e(Bn,qc),w(Ka,qc,null),e(Ho,dT),e(Ho,jc),e(jc,cT),m(o,Cm,f),m(o,je,f),w(Ga,je,null),e(je,hT),e(je,Ac),e(Ac,pT),e(je,mT),e(je,Ic),e(Ic,uT),e(je,fT),e(je,Xa),e(Xa,gT),e(Xa,Sl),e(Sl,_T),e(Xa,vT),e(je,kT),e(je,Ya),e(Ya,TT),e(Ya,Za),e(Za,ET),e(Ya,wT),e(je,yT),w(Rn,je,null),e(je,bT),e(je,dt),w(er,dt,null),e(dt,FT),e(dt,Qo),e(Qo,$T),e(Qo,Wl),e(Wl,xT),e(Qo,MT),e(Qo,Lc),e(Lc,zT),e(Qo,PT),e(dt,CT),w(Hn,dt,null),e(dt,qT),e(dt,Nc),e(Nc,jT),e(dt,AT),w(tr,dt,null),m(o,qm,f),m(o,Uo,f),e(Uo,Qn),e(Qn,Dc),w(or,Dc,null),e(Uo,IT),e(Uo,Oc),e(Oc,LT),m(o,jm,f),m(o,Ae,f),w(nr,Ae,null),e(Ae,NT),e(Ae,Sc),e(Sc,DT),e(Ae,OT),e(Ae,Wc),e(Wc,ST),e(Ae,WT),e(Ae,sr),e(sr,BT),e(sr,Bl),e(Bl,RT),e(sr,HT),e(Ae,QT),e(Ae,ar),e(ar,UT),e(ar,rr),e(rr,VT),e(ar,JT),e(Ae,KT),w(Un,Ae,null),e(Ae,GT),e(Ae,ct),w(ir,ct,null),e(ct,XT),e(ct,Vo),e(Vo,YT),e(Vo,Rl),e(Rl,ZT),e(Vo,e1),e(Vo,Bc),e(Bc,t1),e(Vo,o1),e(ct,n1),w(Vn,ct,null),e(ct,s1),e(ct,Rc),e(Rc,a1),e(ct,r1),w(lr,ct,null),m(o,Am,f),m(o,Jo,f),e(Jo,Jn),e(Jn,Hc),w(dr,Hc,null),e(Jo,i1),e(Jo,Qc),e(Qc,l1),m(o,Im,f),m(o,Ue,f),w(cr,Ue,null),e(Ue,d1),e(Ue,Uc),e(Uc,c1),e(Ue,h1),e(Ue,hr),e(hr,p1),e(hr,Hl),e(Hl,m1),e(hr,u1),e(Ue,f1),e(Ue,pr),e(pr,g1),e(pr,mr),e(mr,_1),e(pr,v1),e(Ue,k1),w(Kn,Ue,null),e(Ue,T1),e(Ue,ht),w(ur,ht,null),e(ht,E1),e(ht,Ko),e(Ko,w1),e(Ko,Ql),e(Ql,y1),e(Ko,b1),e(Ko,Vc),e(Vc,F1),e(Ko,$1),e(ht,x1),w(Gn,ht,null),e(ht,M1),e(ht,Jc),e(Jc,z1),e(ht,P1),w(fr,ht,null),m(o,Lm,f),m(o,Go,f),e(Go,Xn),e(Xn,Kc),w(gr,Kc,null),e(Go,C1),e(Go,Gc),e(Gc,q1),m(o,Nm,f),m(o,Ve,f),w(_r,Ve,null),e(Ve,j1),e(Ve,Xc),e(Xc,A1),e(Ve,I1),e(Ve,vr),e(vr,L1),e(vr,Ul),e(Ul,N1),e(vr,D1),e(Ve,O1),e(Ve,kr),e(kr,S1),e(kr,Tr),e(Tr,W1),e(kr,B1),e(Ve,R1),w(Yn,Ve,null),e(Ve,H1),e(Ve,pt),w(Er,pt,null),e(pt,Q1),e(pt,Xo),e(Xo,U1),e(Xo,Vl),e(Vl,V1),e(Xo,J1),e(Xo,Yc),e(Yc,K1),e(Xo,G1),e(pt,X1),w(Zn,pt,null),e(pt,Y1),e(pt,Zc),e(Zc,Z1),e(pt,eE),w(wr,pt,null),m(o,Dm,f),m(o,Yo,f),e(Yo,es),e(es,eh),w(yr,eh,null),e(Yo,tE),e(Yo,th),e(th,oE),m(o,Om,f),m(o,Ie,f),w(br,Ie,null),e(Ie,nE),e(Ie,oh),e(oh,sE),e(Ie,aE),e(Ie,nh),e(nh,rE),e(Ie,iE),e(Ie,Fr),e(Fr,lE),e(Fr,Jl),e(Jl,dE),e(Fr,cE),e(Ie,hE),e(Ie,$r),e($r,pE),e($r,xr),e(xr,mE),e($r,uE),e(Ie,fE),w(ts,Ie,null),e(Ie,gE),e(Ie,mt),w(Mr,mt,null),e(mt,_E),e(mt,Zo),e(Zo,vE),e(Zo,Kl),e(Kl,kE),e(Zo,TE),e(Zo,sh),e(sh,EE),e(Zo,wE),e(mt,yE),w(os,mt,null),e(mt,bE),e(mt,ah),e(ah,FE),e(mt,$E),w(zr,mt,null),m(o,Sm,f),m(o,en,f),e(en,ns),e(ns,rh),w(Pr,rh,null),e(en,xE),e(en,ih),e(ih,ME),m(o,Wm,f),m(o,Je,f),w(Cr,Je,null),e(Je,zE),e(Je,tn),e(tn,PE),e(tn,lh),e(lh,CE),e(tn,qE),e(tn,dh),e(dh,jE),e(tn,AE),e(Je,IE),e(Je,qr),e(qr,LE),e(qr,Gl),e(Gl,NE),e(qr,DE),e(Je,OE),e(Je,jr),e(jr,SE),e(jr,Ar),e(Ar,WE),e(jr,BE),e(Je,RE),w(ss,Je,null),e(Je,HE),e(Je,ut),w(Ir,ut,null),e(ut,QE),e(ut,on),e(on,UE),e(on,Xl),e(Xl,VE),e(on,JE),e(on,ch),e(ch,KE),e(on,GE),e(ut,XE),w(as,ut,null),e(ut,YE),e(ut,hh),e(hh,ZE),e(ut,ew),w(Lr,ut,null),m(o,Bm,f),m(o,nn,f),e(nn,rs),e(rs,ph),w(Nr,ph,null),e(nn,tw),e(nn,mh),e(mh,ow),m(o,Rm,f),m(o,Le,f),w(Dr,Le,null),e(Le,nw),e(Le,uh),e(uh,sw),e(Le,aw),e(Le,Or),e(Or,rw),e(Or,Yl),e(Yl,iw),e(Or,lw),e(Le,dw),e(Le,Sr),e(Sr,cw),e(Sr,Wr),e(Wr,hw),e(Sr,pw),e(Le,mw),e(Le,fh),e(fh,uw),e(Le,fw),e(Le,Bt),e(Bt,gh),e(gh,Br),e(Br,gw),e(Bt,_w),e(Bt,_h),e(_h,Rr),e(Rr,vw),e(Bt,kw),e(Bt,vh),e(vh,Hr),e(Hr,Tw),e(Bt,Ew),e(Bt,kh),e(kh,Qr),e(Qr,ww),e(Le,yw),e(Le,ft),w(Ur,ft,null),e(ft,bw),e(ft,sn),e(sn,Fw),e(sn,Th),e(Th,$w),e(sn,xw),e(sn,Eh),e(Eh,Mw),e(sn,zw),e(ft,Pw),w(is,ft,null),e(ft,Cw),e(ft,wh),e(wh,qw),e(ft,jw),w(Vr,ft,null),m(o,Hm,f),m(o,an,f),e(an,ls),e(ls,yh),w(Jr,yh,null),e(an,Aw),e(an,bh),e(bh,Iw),m(o,Qm,f),m(o,Ce,f),w(Kr,Ce,null),e(Ce,Lw),e(Ce,Fh),e(Fh,Nw),e(Ce,Dw),e(Ce,$h),e($h,Ow),e(Ce,Sw),e(Ce,Gr),e(Gr,Ww),e(Gr,Zl),e(Zl,Bw),e(Gr,Rw),e(Ce,Hw),e(Ce,Xr),e(Xr,Qw),e(Xr,Yr),e(Yr,Uw),e(Xr,Vw),e(Ce,Jw),e(Ce,xh),e(xh,Kw),e(Ce,Gw),e(Ce,Rt),e(Rt,Mh),e(Mh,Zr),e(Zr,Xw),e(Rt,Yw),e(Rt,zh),e(zh,ei),e(ei,Zw),e(Rt,ey),e(Rt,Ph),e(Ph,ti),e(ti,ty),e(Rt,oy),e(Rt,Ch),e(Ch,oi),e(oi,ny),e(Ce,sy),e(Ce,gt),w(ni,gt,null),e(gt,ay),e(gt,rn),e(rn,ry),e(rn,qh),e(qh,iy),e(rn,ly),e(rn,jh),e(jh,dy),e(rn,cy),e(gt,hy),w(ds,gt,null),e(gt,py),e(gt,Ah),e(Ah,my),e(gt,uy),w(si,gt,null),m(o,Um,f),m(o,ln,f),e(ln,cs),e(cs,Ih),w(ai,Ih,null),e(ln,fy),e(ln,Lh),e(Lh,gy),m(o,Vm,f),m(o,Ne,f),w(ri,Ne,null),e(Ne,_y),e(Ne,ii),e(ii,vy),e(ii,Nh),e(Nh,ky),e(ii,Ty),e(Ne,Ey),e(Ne,li),e(li,wy),e(li,ed),e(ed,yy),e(li,by),e(Ne,Fy),e(Ne,di),e(di,$y),e(di,ci),e(ci,xy),e(di,My),e(Ne,zy),e(Ne,Dh),e(Dh,Py),e(Ne,Cy),e(Ne,Ht),e(Ht,Oh),e(Oh,hi),e(hi,qy),e(Ht,jy),e(Ht,Sh),e(Sh,pi),e(pi,Ay),e(Ht,Iy),e(Ht,Wh),e(Wh,mi),e(mi,Ly),e(Ht,Ny),e(Ht,Bh),e(Bh,ui),e(ui,Dy),e(Ne,Oy),e(Ne,_t),w(fi,_t,null),e(_t,Sy),e(_t,dn),e(dn,Wy),e(dn,Rh),e(Rh,By),e(dn,Ry),e(dn,Hh),e(Hh,Hy),e(dn,Qy),e(_t,Uy),w(hs,_t,null),e(_t,Vy),e(_t,Qh),e(Qh,Jy),e(_t,Ky),w(gi,_t,null),m(o,Jm,f),m(o,cn,f),e(cn,ps),e(ps,Uh),w(_i,Uh,null),e(cn,Gy),e(cn,Vh),e(Vh,Xy),m(o,Km,f),m(o,De,f),w(vi,De,null),e(De,Yy),e(De,Jh),e(Jh,Zy),e(De,eb),e(De,ki),e(ki,tb),e(ki,td),e(td,ob),e(ki,nb),e(De,sb),e(De,Ti),e(Ti,ab),e(Ti,Ei),e(Ei,rb),e(Ti,ib),e(De,lb),e(De,Kh),e(Kh,db),e(De,cb),e(De,Qt),e(Qt,Gh),e(Gh,wi),e(wi,hb),e(Qt,pb),e(Qt,Xh),e(Xh,yi),e(yi,mb),e(Qt,ub),e(Qt,Yh),e(Yh,bi),e(bi,fb),e(Qt,gb),e(Qt,Zh),e(Zh,Fi),e(Fi,_b),e(De,vb),e(De,vt),w($i,vt,null),e(vt,kb),e(vt,hn),e(hn,Tb),e(hn,ep),e(ep,Eb),e(hn,wb),e(hn,tp),e(tp,yb),e(hn,bb),e(vt,Fb),w(ms,vt,null),e(vt,$b),e(vt,op),e(op,xb),e(vt,Mb),w(xi,vt,null),m(o,Gm,f),m(o,pn,f),e(pn,us),e(us,np),w(Mi,np,null),e(pn,zb),e(pn,sp),e(sp,Pb),m(o,Xm,f),m(o,Oe,f),w(zi,Oe,null),e(Oe,Cb),e(Oe,ap),e(ap,qb),e(Oe,jb),e(Oe,Pi),e(Pi,Ab),e(Pi,od),e(od,Ib),e(Pi,Lb),e(Oe,Nb),e(Oe,Ci),e(Ci,Db),e(Ci,qi),e(qi,Ob),e(Ci,Sb),e(Oe,Wb),e(Oe,rp),e(rp,Bb),e(Oe,Rb),e(Oe,Ut),e(Ut,ip),e(ip,ji),e(ji,Hb),e(Ut,Qb),e(Ut,lp),e(lp,Ai),e(Ai,Ub),e(Ut,Vb),e(Ut,dp),e(dp,Ii),e(Ii,Jb),e(Ut,Kb),e(Ut,cp),e(cp,Li),e(Li,Gb),e(Oe,Xb),e(Oe,kt),w(Ni,kt,null),e(kt,Yb),e(kt,mn),e(mn,Zb),e(mn,hp),e(hp,e0),e(mn,t0),e(mn,pp),e(pp,o0),e(mn,n0),e(kt,s0),w(fs,kt,null),e(kt,a0),e(kt,mp),e(mp,r0),e(kt,i0),w(Di,kt,null),m(o,Ym,f),m(o,un,f),e(un,gs),e(gs,up),w(Oi,up,null),e(un,l0),e(un,fp),e(fp,d0),m(o,Zm,f),m(o,qe,f),w(Si,qe,null),e(qe,c0),e(qe,gp),e(gp,h0),e(qe,p0),e(qe,_p),e(_p,m0),e(qe,u0),e(qe,Wi),e(Wi,f0),e(Wi,nd),e(nd,g0),e(Wi,_0),e(qe,v0),e(qe,Bi),e(Bi,k0),e(Bi,Ri),e(Ri,T0),e(Bi,E0),e(qe,w0),e(qe,vp),e(vp,y0),e(qe,b0),e(qe,Vt),e(Vt,kp),e(kp,Hi),e(Hi,F0),e(Vt,$0),e(Vt,Tp),e(Tp,Qi),e(Qi,x0),e(Vt,M0),e(Vt,Ep),e(Ep,Ui),e(Ui,z0),e(Vt,P0),e(Vt,wp),e(wp,Vi),e(Vi,C0),e(qe,q0),e(qe,Tt),w(Ji,Tt,null),e(Tt,j0),e(Tt,fn),e(fn,A0),e(fn,yp),e(yp,I0),e(fn,L0),e(fn,bp),e(bp,N0),e(fn,D0),e(Tt,O0),w(_s,Tt,null),e(Tt,S0),e(Tt,Fp),e(Fp,W0),e(Tt,B0),w(Ki,Tt,null),m(o,eu,f),m(o,gn,f),e(gn,vs),e(vs,$p),w(Gi,$p,null),e(gn,R0),e(gn,xp),e(xp,H0),m(o,tu,f),m(o,Se,f),w(Xi,Se,null),e(Se,Q0),e(Se,_n),e(_n,U0),e(_n,Mp),e(Mp,V0),e(_n,J0),e(_n,zp),e(zp,K0),e(_n,G0),e(Se,X0),e(Se,Yi),e(Yi,Y0),e(Yi,sd),e(sd,Z0),e(Yi,e2),e(Se,t2),e(Se,Zi),e(Zi,o2),e(Zi,el),e(el,n2),e(Zi,s2),e(Se,a2),e(Se,Pp),e(Pp,r2),e(Se,i2),e(Se,Jt),e(Jt,Cp),e(Cp,tl),e(tl,l2),e(Jt,d2),e(Jt,qp),e(qp,ol),e(ol,c2),e(Jt,h2),e(Jt,jp),e(jp,nl),e(nl,p2),e(Jt,m2),e(Jt,Ap),e(Ap,sl),e(sl,u2),e(Se,f2),e(Se,Et),w(al,Et,null),e(Et,g2),e(Et,vn),e(vn,_2),e(vn,Ip),e(Ip,v2),e(vn,k2),e(vn,Lp),e(Lp,T2),e(vn,E2),e(Et,w2),w(ks,Et,null),e(Et,y2),e(Et,Np),e(Np,b2),e(Et,F2),w(rl,Et,null),ou=!0},p(o,[f]){const il={};f&2&&(il.$$scope={dirty:f,ctx:o}),$n.$set(il);const Dp={};f&2&&(Dp.$$scope={dirty:f,ctx:o}),Mn.$set(Dp);const Op={};f&2&&(Op.$$scope={dirty:f,ctx:o}),Pn.$set(Op);const Sp={};f&2&&(Sp.$$scope={dirty:f,ctx:o}),qn.$set(Sp);const ll={};f&2&&(ll.$$scope={dirty:f,ctx:o}),An.$set(ll);const Wp={};f&2&&(Wp.$$scope={dirty:f,ctx:o}),Ln.$set(Wp);const Bp={};f&2&&(Bp.$$scope={dirty:f,ctx:o}),Dn.$set(Bp);const Rp={};f&2&&(Rp.$$scope={dirty:f,ctx:o}),Sn.$set(Rp);const dl={};f&2&&(dl.$$scope={dirty:f,ctx:o}),Wn.$set(dl);const Hp={};f&2&&(Hp.$$scope={dirty:f,ctx:o}),Rn.$set(Hp);const Qp={};f&2&&(Qp.$$scope={dirty:f,ctx:o}),Hn.$set(Qp);const Up={};f&2&&(Up.$$scope={dirty:f,ctx:o}),Un.$set(Up);const Vp={};f&2&&(Vp.$$scope={dirty:f,ctx:o}),Vn.$set(Vp);const Jp={};f&2&&(Jp.$$scope={dirty:f,ctx:o}),Kn.$set(Jp);const cl={};f&2&&(cl.$$scope={dirty:f,ctx:o}),Gn.$set(cl);const Kp={};f&2&&(Kp.$$scope={dirty:f,ctx:o}),Yn.$set(Kp);const Kt={};f&2&&(Kt.$$scope={dirty:f,ctx:o}),Zn.$set(Kt);const Gp={};f&2&&(Gp.$$scope={dirty:f,ctx:o}),ts.$set(Gp);const Xp={};f&2&&(Xp.$$scope={dirty:f,ctx:o}),os.$set(Xp);const Yp={};f&2&&(Yp.$$scope={dirty:f,ctx:o}),ss.$set(Yp);const kn={};f&2&&(kn.$$scope={dirty:f,ctx:o}),as.$set(kn);const Zp={};f&2&&(Zp.$$scope={dirty:f,ctx:o}),is.$set(Zp);const em={};f&2&&(em.$$scope={dirty:f,ctx:o}),ds.$set(em);const hl={};f&2&&(hl.$$scope={dirty:f,ctx:o}),hs.$set(hl);const tm={};f&2&&(tm.$$scope={dirty:f,ctx:o}),ms.$set(tm);const om={};f&2&&(om.$$scope={dirty:f,ctx:o}),fs.$set(om);const nm={};f&2&&(nm.$$scope={dirty:f,ctx:o}),_s.$set(nm);const jt={};f&2&&(jt.$$scope={dirty:f,ctx:o}),ks.$set(jt)},i(o){ou||(y(_.$$.fragment,o),y(oe.$$.fragment,o),y(Ps.$$.fragment,o),y(Cs.$$.fragment,o),y(js.$$.fragment,o),y(As.$$.fragment,o),y(Is.$$.fragment,o),y(Ns.$$.fragment,o),y(Ds.$$.fragment,o),y(Ws.$$.fragment,o),y(Bs.$$.fragment,o),y(Hs.$$.fragment,o),y(Us.$$.fragment,o),y(Vs.$$.fragment,o),y(Xs.$$.fragment,o),y($n.$$.fragment,o),y(Ys.$$.fragment,o),y(Zs.$$.fragment,o),y(ea.$$.fragment,o),y(sa.$$.fragment,o),y(Mn.$$.fragment,o),y(aa.$$.fragment,o),y(ra.$$.fragment,o),y(ia.$$.fragment,o),y(ha.$$.fragment,o),y(Pn.$$.fragment,o),y(pa.$$.fragment,o),y(ma.$$.fragment,o),y(ua.$$.fragment,o),y(va.$$.fragment,o),y(qn.$$.fragment,o),y(ka.$$.fragment,o),y(Ta.$$.fragment,o),y(Ea.$$.fragment,o),y(wa.$$.fragment,o),y($a.$$.fragment,o),y(An.$$.fragment,o),y(xa.$$.fragment,o),y(Ma.$$.fragment,o),y(za.$$.fragment,o),y(ja.$$.fragment,o),y(Ln.$$.fragment,o),y(Aa.$$.fragment,o),y(Ia.$$.fragment,o),y(La.$$.fragment,o),y(Sa.$$.fragment,o),y(Dn.$$.fragment,o),y(Wa.$$.fragment,o),y(Ba.$$.fragment,o),y(Ra.$$.fragment,o),y(Sn.$$.fragment,o),y(Va.$$.fragment,o),y(Wn.$$.fragment,o),y(Ja.$$.fragment,o),y(Ka.$$.fragment,o),y(Ga.$$.fragment,o),y(Rn.$$.fragment,o),y(er.$$.fragment,o),y(Hn.$$.fragment,o),y(tr.$$.fragment,o),y(or.$$.fragment,o),y(nr.$$.fragment,o),y(Un.$$.fragment,o),y(ir.$$.fragment,o),y(Vn.$$.fragment,o),y(lr.$$.fragment,o),y(dr.$$.fragment,o),y(cr.$$.fragment,o),y(Kn.$$.fragment,o),y(ur.$$.fragment,o),y(Gn.$$.fragment,o),y(fr.$$.fragment,o),y(gr.$$.fragment,o),y(_r.$$.fragment,o),y(Yn.$$.fragment,o),y(Er.$$.fragment,o),y(Zn.$$.fragment,o),y(wr.$$.fragment,o),y(yr.$$.fragment,o),y(br.$$.fragment,o),y(ts.$$.fragment,o),y(Mr.$$.fragment,o),y(os.$$.fragment,o),y(zr.$$.fragment,o),y(Pr.$$.fragment,o),y(Cr.$$.fragment,o),y(ss.$$.fragment,o),y(Ir.$$.fragment,o),y(as.$$.fragment,o),y(Lr.$$.fragment,o),y(Nr.$$.fragment,o),y(Dr.$$.fragment,o),y(Ur.$$.fragment,o),y(is.$$.fragment,o),y(Vr.$$.fragment,o),y(Jr.$$.fragment,o),y(Kr.$$.fragment,o),y(ni.$$.fragment,o),y(ds.$$.fragment,o),y(si.$$.fragment,o),y(ai.$$.fragment,o),y(ri.$$.fragment,o),y(fi.$$.fragment,o),y(hs.$$.fragment,o),y(gi.$$.fragment,o),y(_i.$$.fragment,o),y(vi.$$.fragment,o),y($i.$$.fragment,o),y(ms.$$.fragment,o),y(xi.$$.fragment,o),y(Mi.$$.fragment,o),y(zi.$$.fragment,o),y(Ni.$$.fragment,o),y(fs.$$.fragment,o),y(Di.$$.fragment,o),y(Oi.$$.fragment,o),y(Si.$$.fragment,o),y(Ji.$$.fragment,o),y(_s.$$.fragment,o),y(Ki.$$.fragment,o),y(Gi.$$.fragment,o),y(Xi.$$.fragment,o),y(al.$$.fragment,o),y(ks.$$.fragment,o),y(rl.$$.fragment,o),ou=!0)},o(o){b(_.$$.fragment,o),b(oe.$$.fragment,o),b(Ps.$$.fragment,o),b(Cs.$$.fragment,o),b(js.$$.fragment,o),b(As.$$.fragment,o),b(Is.$$.fragment,o),b(Ns.$$.fragment,o),b(Ds.$$.fragment,o),b(Ws.$$.fragment,o),b(Bs.$$.fragment,o),b(Hs.$$.fragment,o),b(Us.$$.fragment,o),b(Vs.$$.fragment,o),b(Xs.$$.fragment,o),b($n.$$.fragment,o),b(Ys.$$.fragment,o),b(Zs.$$.fragment,o),b(ea.$$.fragment,o),b(sa.$$.fragment,o),b(Mn.$$.fragment,o),b(aa.$$.fragment,o),b(ra.$$.fragment,o),b(ia.$$.fragment,o),b(ha.$$.fragment,o),b(Pn.$$.fragment,o),b(pa.$$.fragment,o),b(ma.$$.fragment,o),b(ua.$$.fragment,o),b(va.$$.fragment,o),b(qn.$$.fragment,o),b(ka.$$.fragment,o),b(Ta.$$.fragment,o),b(Ea.$$.fragment,o),b(wa.$$.fragment,o),b($a.$$.fragment,o),b(An.$$.fragment,o),b(xa.$$.fragment,o),b(Ma.$$.fragment,o),b(za.$$.fragment,o),b(ja.$$.fragment,o),b(Ln.$$.fragment,o),b(Aa.$$.fragment,o),b(Ia.$$.fragment,o),b(La.$$.fragment,o),b(Sa.$$.fragment,o),b(Dn.$$.fragment,o),b(Wa.$$.fragment,o),b(Ba.$$.fragment,o),b(Ra.$$.fragment,o),b(Sn.$$.fragment,o),b(Va.$$.fragment,o),b(Wn.$$.fragment,o),b(Ja.$$.fragment,o),b(Ka.$$.fragment,o),b(Ga.$$.fragment,o),b(Rn.$$.fragment,o),b(er.$$.fragment,o),b(Hn.$$.fragment,o),b(tr.$$.fragment,o),b(or.$$.fragment,o),b(nr.$$.fragment,o),b(Un.$$.fragment,o),b(ir.$$.fragment,o),b(Vn.$$.fragment,o),b(lr.$$.fragment,o),b(dr.$$.fragment,o),b(cr.$$.fragment,o),b(Kn.$$.fragment,o),b(ur.$$.fragment,o),b(Gn.$$.fragment,o),b(fr.$$.fragment,o),b(gr.$$.fragment,o),b(_r.$$.fragment,o),b(Yn.$$.fragment,o),b(Er.$$.fragment,o),b(Zn.$$.fragment,o),b(wr.$$.fragment,o),b(yr.$$.fragment,o),b(br.$$.fragment,o),b(ts.$$.fragment,o),b(Mr.$$.fragment,o),b(os.$$.fragment,o),b(zr.$$.fragment,o),b(Pr.$$.fragment,o),b(Cr.$$.fragment,o),b(ss.$$.fragment,o),b(Ir.$$.fragment,o),b(as.$$.fragment,o),b(Lr.$$.fragment,o),b(Nr.$$.fragment,o),b(Dr.$$.fragment,o),b(Ur.$$.fragment,o),b(is.$$.fragment,o),b(Vr.$$.fragment,o),b(Jr.$$.fragment,o),b(Kr.$$.fragment,o),b(ni.$$.fragment,o),b(ds.$$.fragment,o),b(si.$$.fragment,o),b(ai.$$.fragment,o),b(ri.$$.fragment,o),b(fi.$$.fragment,o),b(hs.$$.fragment,o),b(gi.$$.fragment,o),b(_i.$$.fragment,o),b(vi.$$.fragment,o),b($i.$$.fragment,o),b(ms.$$.fragment,o),b(xi.$$.fragment,o),b(Mi.$$.fragment,o),b(zi.$$.fragment,o),b(Ni.$$.fragment,o),b(fs.$$.fragment,o),b(Di.$$.fragment,o),b(Oi.$$.fragment,o),b(Si.$$.fragment,o),b(Ji.$$.fragment,o),b(_s.$$.fragment,o),b(Ki.$$.fragment,o),b(Gi.$$.fragment,o),b(Xi.$$.fragment,o),b(al.$$.fragment,o),b(ks.$$.fragment,o),b(rl.$$.fragment,o),ou=!1},d(o){t(h),o&&t($),o&&t(g),F(_),o&&t(K),o&&t(z),F(oe),o&&t(le),o&&t(J),o&&t(q),o&&t(ae),o&&t(de),o&&t(re),o&&t(ce),o&&t(C),o&&t(B),o&&t(te),o&&t(be),o&&t(ve),o&&t(Fe),o&&t(Z),F(Ps),o&&t(rm),o&&t(Xe),F(Cs),F(js),o&&t(im),o&&t(wo),F(As),o&&t(lm),o&&t(Ct),F(Is),o&&t(dm),o&&t(yo),F(Ns),o&&t(cm),o&&t(qt),F(Ds),o&&t(hm),o&&t(bo),F(Ws),o&&t(pm),o&&t(Fo),F(Bs),o&&t(mm),o&&t($o),F(Hs),o&&t(um),o&&t(xo),F(Us),o&&t(fm),o&&t(Ye),F(Vs),F(Xs),F($n),F(Ys),o&&t(gm),o&&t(zo),F(Zs),o&&t(_m),o&&t(Be),F(ea),F(sa),F(Mn),F(aa),o&&t(vm),o&&t(Co),F(ra),o&&t(km),o&&t(Re),F(ia),F(ha),F(Pn),F(pa),o&&t(Tm),o&&t(jo),F(ma),o&&t(Em),o&&t(Ze),F(ua),F(va),F(qn),F(ka),F(Ta),o&&t(wm),o&&t(Io),F(Ea),o&&t(ym),o&&t(et),F(wa),F($a),F(An),F(xa),o&&t(bm),o&&t(No),F(Ma),o&&t(Fm),o&&t(He),F(za),F(ja),F(Ln),F(Aa),o&&t($m),o&&t(Oo),F(Ia),o&&t(xm),o&&t(tt),F(La),F(Sa),F(Dn),F(Wa),o&&t(Mm),o&&t(Bo),F(Ba),o&&t(zm),o&&t(Qe),F(Ra),F(Sn),F(Va),F(Wn),F(Ja),o&&t(Pm),o&&t(Ho),F(Ka),o&&t(Cm),o&&t(je),F(Ga),F(Rn),F(er),F(Hn),F(tr),o&&t(qm),o&&t(Uo),F(or),o&&t(jm),o&&t(Ae),F(nr),F(Un),F(ir),F(Vn),F(lr),o&&t(Am),o&&t(Jo),F(dr),o&&t(Im),o&&t(Ue),F(cr),F(Kn),F(ur),F(Gn),F(fr),o&&t(Lm),o&&t(Go),F(gr),o&&t(Nm),o&&t(Ve),F(_r),F(Yn),F(Er),F(Zn),F(wr),o&&t(Dm),o&&t(Yo),F(yr),o&&t(Om),o&&t(Ie),F(br),F(ts),F(Mr),F(os),F(zr),o&&t(Sm),o&&t(en),F(Pr),o&&t(Wm),o&&t(Je),F(Cr),F(ss),F(Ir),F(as),F(Lr),o&&t(Bm),o&&t(nn),F(Nr),o&&t(Rm),o&&t(Le),F(Dr),F(Ur),F(is),F(Vr),o&&t(Hm),o&&t(an),F(Jr),o&&t(Qm),o&&t(Ce),F(Kr),F(ni),F(ds),F(si),o&&t(Um),o&&t(ln),F(ai),o&&t(Vm),o&&t(Ne),F(ri),F(fi),F(hs),F(gi),o&&t(Jm),o&&t(cn),F(_i),o&&t(Km),o&&t(De),F(vi),F($i),F(ms),F(xi),o&&t(Gm),o&&t(pn),F(Mi),o&&t(Xm),o&&t(Oe),F(zi),F(Ni),F(fs),F(Di),o&&t(Ym),o&&t(un),F(Oi),o&&t(Zm),o&&t(qe),F(Si),F(Ji),F(_s),F(Ki),o&&t(eu),o&&t(gn),F(Gi),o&&t(tu),o&&t(Se),F(Xi),F(al),F(ks),F(rl)}}}const bz={local:"electra",sections:[{local:"overview",title:"Overview"},{local:"transformers.ElectraConfig",title:"ElectraConfig"},{local:"transformers.ElectraTokenizer",title:"ElectraTokenizer"},{local:"transformers.ElectraTokenizerFast",title:"ElectraTokenizerFast"},{local:"transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput",title:"Electra specific outputs"},{local:"transformers.ElectraModel",title:"ElectraModel"},{local:"transformers.ElectraForPreTraining",title:"ElectraForPreTraining"},{local:"transformers.ElectraForMaskedLM",title:"ElectraForMaskedLM"},{local:"transformers.ElectraForSequenceClassification",title:"ElectraForSequenceClassification"},{local:"transformers.ElectraForMultipleChoice",title:"ElectraForMultipleChoice"},{local:"transformers.ElectraForTokenClassification",title:"ElectraForTokenClassification"},{local:"transformers.ElectraForQuestionAnswering",title:"ElectraForQuestionAnswering"},{local:"transformers.TFElectraModel",title:"TFElectraModel"},{local:"transformers.TFElectraForPreTraining",title:"TFElectraForPreTraining"},{local:"transformers.TFElectraForMaskedLM",title:"TFElectraForMaskedLM"},{local:"transformers.TFElectraForSequenceClassification",title:"TFElectraForSequenceClassification"},{local:"transformers.TFElectraForMultipleChoice",title:"TFElectraForMultipleChoice"},{local:"transformers.TFElectraForTokenClassification",title:"TFElectraForTokenClassification"},{local:"transformers.TFElectraForQuestionAnswering",title:"TFElectraForQuestionAnswering"},{local:"transformers.FlaxElectraModel",title:"FlaxElectraModel"},{local:"transformers.FlaxElectraForPreTraining",title:"FlaxElectraForPreTraining"},{local:"transformers.FlaxElectraForMaskedLM",title:"FlaxElectraForMaskedLM"},{local:"transformers.FlaxElectraForSequenceClassification",title:"FlaxElectraForSequenceClassification"},{local:"transformers.FlaxElectraForMultipleChoice",title:"FlaxElectraForMultipleChoice"},{local:"transformers.FlaxElectraForTokenClassification",title:"FlaxElectraForTokenClassification"},{local:"transformers.FlaxElectraForQuestionAnswering",title:"FlaxElectraForQuestionAnswering"}],title:"ELECTRA"};function Fz(j,h,$){let{fw:g}=h;return j.$$set=v=>{"fw"in v&&$(0,g=v.fw)},[g]}class qz extends QM{constructor(h){super();UM(this,h,Fz,yz,VM,{fw:0})}}export{qz as default,bz as metadata};
9,905
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/fsmt.mdx-11aefc06.js
import{S as fi,i as pi,s as ui,e as n,k as d,w as u,t as a,L as _i,c as s,d as o,m as c,a as r,x as _,h as i,b as l,J as e,g as m,y as g,q as v,o as k,B as T}from"../../chunks/vendor-b1433968.js";import{T as mi}from"../../chunks/Tip-c3840994.js";import{D as H}from"../../chunks/Docstring-ff504c58.js";import{C as Qr}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Te}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function gi(be){let f,x,p,w,A;return{c(){f=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),p=n("code"),w=a("Module"),A=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(M){f=s(M,"P",{});var F=r(f);x=i(F,"Although the recipe for forward pass needs to be defined within this function, one should call the "),p=s(F,"CODE",{});var D=r(p);w=i(D,"Module"),D.forEach(o),A=i(F,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),F.forEach(o)},m(M,F){m(M,f,F),e(f,x),e(f,p),e(p,w),e(f,A)},d(M){M&&o(f)}}}function vi(be){let f,x,p,w,A;return{c(){f=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),p=n("code"),w=a("Module"),A=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(M){f=s(M,"P",{});var F=r(f);x=i(F,"Although the recipe for forward pass needs to be defined within this function, one should call the "),p=s(F,"CODE",{});var D=r(p);w=i(D,"Module"),D.forEach(o),A=i(F,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),F.forEach(o)},m(M,F){m(M,f,F),e(f,x),e(f,p),e(p,w),e(f,A)},d(M){M&&o(f)}}}function ki(be){let f,x,p,w,A,M,F,D,on,To,U,$t,nn,sn,ye,rn,an,bo,V,re,zt,we,dn,Et,cn,yo,ae,ln,Me,hn,mn,wo,st,fn,Mo,rt,qt,pn,Fo,O,un,Fe,_n,gn,Se,vn,kn,So,Q,ie,xt,$e,Tn,Ct,bn,$o,at,X,yn,it,wn,Mn,dt,Fn,Sn,zo,J,de,Pt,ze,$n,At,zn,Eo,S,Ee,En,qe,qn,ct,xn,Cn,Pn,K,An,lt,In,Dn,ht,Nn,Ln,On,It,Gn,jn,xe,Bn,ce,Ce,Wn,Y,Rn,Dt,Hn,Un,Nt,Vn,Qn,qo,Z,le,Lt,Pe,Xn,Ot,Jn,xo,b,Ae,Kn,Gt,Yn,Zn,N,jt,es,ts,Bt,os,ns,L,ss,Wt,rs,as,Rt,is,ds,Ht,cs,ls,hs,Ie,ms,Ut,fs,ps,us,De,_s,mt,gs,vs,ks,G,Ne,Ts,Vt,bs,ys,Le,ft,ws,Qt,Ms,Fs,pt,Ss,Xt,$s,zs,he,Oe,Es,Ge,qs,Jt,xs,Cs,Ps,C,je,As,Kt,Is,Ds,Be,Ns,ee,Ls,Yt,Os,Gs,Zt,js,Bs,Ws,eo,Rs,Hs,to,Co,te,me,oo,We,Us,no,Vs,Po,E,Re,Qs,so,Xs,Js,He,Ks,ut,Ys,Zs,er,Ue,tr,Ve,or,nr,sr,P,Qe,rr,oe,ar,_t,ir,dr,ro,cr,lr,hr,fe,mr,ao,fr,pr,Xe,Ao,ne,pe,io,Je,ur,co,_r,Io,q,Ke,gr,lo,vr,kr,Ye,Tr,gt,br,yr,wr,Ze,Mr,et,Fr,Sr,$r,y,tt,zr,se,Er,vt,qr,xr,ho,Cr,Pr,Ar,ue,Ir,mo,Dr,Nr,fo,Lr,Or,po,Gr,jr,uo,Br,Wr,_e,ge,_o,ot,Rr,go,Hr,Do;return M=new Te({}),we=new Te({}),$e=new Te({}),ze=new Te({}),Ee=new H({props:{name:"class transformers.FSMTConfig",anchor:"transformers.FSMTConfig",parameters:[{name:"langs",val:" = ['en', 'de']"},{name:"src_vocab_size",val:" = 42024"},{name:"tgt_vocab_size",val:" = 42024"},{name:"activation_function",val:" = 'relu'"},{name:"d_model",val:" = 1024"},{name:"max_length",val:" = 200"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_layers",val:" = 12"},{name:"encoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_layers",val:" = 12"},{name:"decoder_attention_heads",val:" = 16"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"attention_dropout",val:" = 0.0"},{name:"dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"is_encoder_decoder",val:" = True"},{name:"scale_embedding",val:" = True"},{name:"tie_word_embeddings",val:" = False"},{name:"num_beams",val:" = 5"},{name:"length_penalty",val:" = 1.0"},{name:"early_stopping",val:" = False"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"forced_eos_token_id",val:" = 2"},{name:"**common_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/configuration_fsmt.py#L41",parametersDescription:[{anchor:"transformers.FSMTConfig.langs",description:`<strong>langs</strong> (<code>List[str]</code>) &#x2014; A list with source language and target_language (e.g., [&#x2018;en&#x2019;, &#x2018;ru&#x2019;]).`,name:"langs"},{anchor:"transformers.FSMTConfig.src_vocab_size",description:`<strong>src_vocab_size</strong> (<code>int</code>) &#x2014; Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed to the forward method in the encoder.`,name:"src_vocab_size"},{anchor:"transformers.FSMTConfig.tgt_vocab_size",description:`<strong>tgt_vocab_size</strong> (<code>int</code>) &#x2014; Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed to the forward method in the decoder.`,name:"tgt_vocab_size"},{anchor:"transformers.FSMTConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.FSMTConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.FSMTConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.FSMTConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.FSMTConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.FSMTConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.FSMTConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.FSMTConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.FSMTConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.FSMTConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.FSMTConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.FSMTConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.FSMTConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"init_std"},{anchor:"transformers.FSMTConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.FSMTConfig.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Beginning of stream token id.`,name:"bos_token_id"},{anchor:"transformers.FSMTConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Padding token id.`,name:"pad_token_id"},{anchor:"transformers.FSMTConfig.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; End of stream token id.`,name:"eos_token_id"},{anchor:"transformers.FSMTConfig.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; This model starts decoding with <code>eos_token_id</code> encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): Google &#x201C;layerdrop arxiv&#x201D;, as its not explainable in one line. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): Google &#x201C;layerdrop arxiv&#x201D;, as its not explainable in one line.`,name:"decoder_start_token_id"},{anchor:"transformers.FSMTConfig.is_encoder_decoder",description:`<strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether this is an encoder/decoder model.`,name:"is_encoder_decoder"},{anchor:"transformers.FSMTConfig.tie_word_embeddings",description:`<strong>tie_word_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to tie input and output embeddings.`,name:"tie_word_embeddings"},{anchor:"transformers.FSMTConfig.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of beams for beam search that will be used by default in the <code>generate</code> method of the model. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.FSMTConfig.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Exponential penalty to the length that will be used by default in the <code>generate</code> method of the model.`,name:"length_penalty"},{anchor:"transformers.FSMTConfig.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag that will be used by default in the <code>generate</code> method of the model. Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"early_stopping"},{anchor:"transformers.FSMTConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.FSMTConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),xe=new Qr({props:{code:`from transformers import FSMTConfig, FSMTModel config = FSMTConfig.from_pretrained('facebook/wmt19-en-ru') model = FSMTModel(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FSMTConfig, FSMTModel <span class="hljs-meta">&gt;&gt;&gt; </span>config = FSMTConfig.from_pretrained(<span class="hljs-string">&#x27;facebook/wmt19-en-ru&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FSMTModel(config)`}}),Ce=new H({props:{name:"to_dict",anchor:"transformers.FSMTConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/configuration_fsmt.py#L209",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),Pe=new Te({}),Ae=new H({props:{name:"class transformers.FSMTTokenizer",anchor:"transformers.FSMTTokenizer",parameters:[{name:"langs",val:" = None"},{name:"src_vocab_file",val:" = None"},{name:"tgt_vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"do_lower_case",val:" = False"},{name:"unk_token",val:" = '<unk>'"},{name:"bos_token",val:" = '<s>'"},{name:"sep_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/tokenization_fsmt.py#L137",parametersDescription:[{anchor:"transformers.FSMTTokenizer.langs",description:`<strong>langs</strong> (<code>List[str]</code>) &#x2014; A list of two languages to translate from and to, for instance <code>[&quot;en&quot;, &quot;ru&quot;]</code>.`,name:"langs"},{anchor:"transformers.FSMTTokenizer.src_vocab_file",description:`<strong>src_vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary for the source language.`,name:"src_vocab_file"},{anchor:"transformers.FSMTTokenizer.tgt_vocab_file",description:`<strong>tgt_vocab_file</strong> (<code>st</code>) &#x2014; File containing the vocabulary for the target language.`,name:"tgt_vocab_file"},{anchor:"transformers.FSMTTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; File containing the merges.`,name:"merges_file"},{anchor:"transformers.FSMTTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.FSMTTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.FSMTTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.FSMTTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.FSMTTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"}]}}),Ne=new H({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.FSMTTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/tokenization_fsmt.py#L397",parametersDescription:[{anchor:"transformers.FSMTTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.FSMTTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Oe=new H({props:{name:"get_special_tokens_mask",anchor:"transformers.FSMTTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/tokenization_fsmt.py#L423",parametersDescription:[{anchor:"transformers.FSMTTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FSMTTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.FSMTTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),je=new H({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FSMTTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/tokenization_fsmt.py#L451",parametersDescription:[{anchor:"transformers.FSMTTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FSMTTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Be=new Qr({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),We=new Te({}),Re=new H({props:{name:"class transformers.FSMTModel",anchor:"transformers.FSMTModel",parameters:[{name:"config",val:": FSMTConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/modeling_fsmt.py#L994",parametersDescription:[{anchor:"transformers.FSMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig">FSMTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qe=new H({props:{name:"forward",anchor:"transformers.FSMTModel.forward",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[typing.Tuple] = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/modeling_fsmt.py#L1008",parametersDescription:[{anchor:"transformers.FSMTModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>IIndices can be obtained using <code>FSTMTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FSMTModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FSMTModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTTokenizer">FSMTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>FSMT uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.FSMTModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FSMTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FSMTModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.FSMTModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.FSMTModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>Tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FSMTModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.FSMTModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.FSMTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FSMTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FSMTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig" >FSMTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),fe=new mi({props:{$$slots:{default:[gi]},$$scope:{ctx:be}}}),Xe=new Qr({props:{code:`from transformers import FSMTTokenizer, FSMTModel import torch tokenizer = FSMTTokenizer.from_pretrained('facebook/wmt19-ru-en') model = FSMTModel.from_pretrained('facebook/wmt19-ru-en') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FSMTTokenizer, FSMTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FSMTTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/wmt19-ru-en&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FSMTModel.from_pretrained(<span class="hljs-string">&#x27;facebook/wmt19-ru-en&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Je=new Te({}),Ke=new H({props:{name:"class transformers.FSMTForConditionalGeneration",anchor:"transformers.FSMTForConditionalGeneration",parameters:[{name:"config",val:": FSMTConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/modeling_fsmt.py#L1118",parametersDescription:[{anchor:"transformers.FSMTForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig">FSMTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),tt=new H({props:{name:"forward",anchor:"transformers.FSMTForConditionalGeneration.forward",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fsmt/modeling_fsmt.py#L1134",parametersDescription:[{anchor:"transformers.FSMTForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>IIndices can be obtained using <code>FSTMTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FSMTForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FSMTForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTTokenizer">FSMTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>FSMT uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.FSMTForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FSMTForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FSMTForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.FSMTForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.FSMTForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>Tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FSMTForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.FSMTForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.FSMTForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FSMTForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FSMTForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FSMTForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig" >FSMTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ue=new mi({props:{$$slots:{default:[vi]},$$scope:{ctx:be}}}),ot=new Te({}),{c(){f=n("meta"),x=d(),p=n("h1"),w=n("a"),A=n("span"),u(M.$$.fragment),F=d(),D=n("span"),on=a("FSMT"),To=d(),U=n("p"),$t=n("strong"),nn=a("DISCLAIMER:"),sn=a(" If you see something strange, file a "),ye=n("a"),rn=a("Github Issue"),an=a(` and assign @stas00.`),bo=d(),V=n("h2"),re=n("a"),zt=n("span"),u(we.$$.fragment),dn=d(),Et=n("span"),cn=a("Overview"),yo=d(),ae=n("p"),ln=a("FSMT (FairSeq MachineTranslation) models were introduced in "),Me=n("a"),hn=a("Facebook FAIR\u2019s WMT19 News Translation Task Submission"),mn=a(" by Nathan Ng, Kyra Yee, Alexei Baevski, Myle Ott, Michael Auli, Sergey Edunov."),wo=d(),st=n("p"),fn=a("The abstract of the paper is the following:"),Mo=d(),rt=n("p"),qt=n("em"),pn=a(`This paper describes Facebook FAIR\u2019s submission to the WMT19 shared news translation task. We participate in two language pairs and four language directions, English <-> German and English <-> Russian. Following our submission from last year, our baseline systems are large BPE-based transformer models trained with the Fairseq sequence modeling toolkit which rely on sampled back-translations. This year we experiment with different bitext data filtering schemes, as well as with adding filtered back-translated data. We also ensemble and fine-tune our models on domain-specific data, then decode using noisy channel model reranking. Our submissions are ranked first in all four directions of the human evaluation campaign. On En->De, our system significantly outperforms other systems as well as human translations. This system improves upon our WMT\u201918 submission by 4.5 BLEU points.`),Fo=d(),O=n("p"),un=a("This model was contributed by "),Fe=n("a"),_n=a("stas"),gn=a(`. The original code can be found `),Se=n("a"),vn=a("here"),kn=a("."),So=d(),Q=n("h2"),ie=n("a"),xt=n("span"),u($e.$$.fragment),Tn=d(),Ct=n("span"),bn=a("Implementation Notes"),$o=d(),at=n("ul"),X=n("li"),yn=a(`FSMT uses source and target vocabulary pairs that aren\u2019t combined into one. It doesn\u2019t share embeddings tokens either. Its tokenizer is very similar to `),it=n("a"),wn=a("XLMTokenizer"),Mn=a(` and the main model is derived from `),dt=n("a"),Fn=a("BartModel"),Sn=a("."),zo=d(),J=n("h2"),de=n("a"),Pt=n("span"),u(ze.$$.fragment),$n=d(),At=n("span"),zn=a("FSMTConfig"),Eo=d(),S=n("div"),u(Ee.$$.fragment),En=d(),qe=n("p"),qn=a("This is the configuration class to store the configuration of a "),ct=n("a"),xn=a("FSMTModel"),Cn=a(`. It is used to instantiate a FSMT model according to the specified arguments, defining the model architecture.`),Pn=d(),K=n("p"),An=a("Configuration objects inherit from "),lt=n("a"),In=a("PretrainedConfig"),Dn=a(` and can be used to control the model outputs. Read the documentation from `),ht=n("a"),Nn=a("PretrainedConfig"),Ln=a(" for more information."),On=d(),It=n("p"),Gn=a("Examples:"),jn=d(),u(xe.$$.fragment),Bn=d(),ce=n("div"),u(Ce.$$.fragment),Wn=d(),Y=n("p"),Rn=a("Serializes this instance to a Python dictionary. Override the default "),Dt=n("em"),Hn=a("to_dict()"),Un=a(" from "),Nt=n("em"),Vn=a("PretrainedConfig"),Qn=a("."),qo=d(),Z=n("h2"),le=n("a"),Lt=n("span"),u(Pe.$$.fragment),Xn=d(),Ot=n("span"),Jn=a("FSMTTokenizer"),xo=d(),b=n("div"),u(Ae.$$.fragment),Kn=d(),Gt=n("p"),Yn=a("Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:"),Zn=d(),N=n("ul"),jt=n("li"),es=a("Moses preprocessing and tokenization."),ts=d(),Bt=n("li"),os=a("Normalizing all inputs text."),ns=d(),L=n("li"),ss=a("The arguments "),Wt=n("code"),rs=a("special_tokens"),as=a(" and the function "),Rt=n("code"),is=a("set_special_tokens"),ds=a(`, can be used to add additional symbols (like \u201D`),Ht=n("strong"),cs=a("classify"),ls=a("\u201D) to a vocabulary."),hs=d(),Ie=n("li"),ms=a("The argument "),Ut=n("code"),fs=a("langs"),ps=a(" defines a pair of languages."),us=d(),De=n("p"),_s=a("This tokenizer inherits from "),mt=n("a"),gs=a("PreTrainedTokenizer"),vs=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ks=d(),G=n("div"),u(Ne.$$.fragment),Ts=d(),Vt=n("p"),bs=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A FAIRSEQ Transformer sequence has the following format:`),ys=d(),Le=n("ul"),ft=n("li"),ws=a("single sequence: "),Qt=n("code"),Ms=a("<s> X </s>"),Fs=d(),pt=n("li"),Ss=a("pair of sequences: "),Xt=n("code"),$s=a("<s> A </s> B </s>"),zs=d(),he=n("div"),u(Oe.$$.fragment),Es=d(),Ge=n("p"),qs=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Jt=n("code"),xs=a("prepare_for_model"),Cs=a(" method."),Ps=d(),C=n("div"),u(je.$$.fragment),As=d(),Kt=n("p"),Is=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ Transformer sequence pair mask has the following format:`),Ds=d(),u(Be.$$.fragment),Ns=d(),ee=n("p"),Ls=a("If "),Yt=n("code"),Os=a("token_ids_1"),Gs=a(" is "),Zt=n("code"),js=a("None"),Bs=a(", this method only returns the first portion of the mask (0s)."),Ws=d(),eo=n("p"),Rs=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FAIRSEQ_TRANSFORMER sequence pair mask has the following format:`),Hs=d(),to=n("div"),Co=d(),te=n("h2"),me=n("a"),oo=n("span"),u(We.$$.fragment),Us=d(),no=n("span"),Vs=a("FSMTModel"),Po=d(),E=n("div"),u(Re.$$.fragment),Qs=d(),so=n("p"),Xs=a("The bare FSMT Model outputting raw hidden-states without any specific head on top."),Js=d(),He=n("p"),Ks=a("This model inherits from "),ut=n("a"),Ys=a("PreTrainedModel"),Zs=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),er=d(),Ue=n("p"),tr=a("This model is also a PyTorch "),Ve=n("a"),or=a("torch.nn.Module"),nr=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sr=d(),P=n("div"),u(Qe.$$.fragment),rr=d(),oe=n("p"),ar=a("The "),_t=n("a"),ir=a("FSMTModel"),dr=a(" forward method, overrides the "),ro=n("code"),cr=a("__call__"),lr=a(" special method."),hr=d(),u(fe.$$.fragment),mr=d(),ao=n("p"),fr=a("Example:"),pr=d(),u(Xe.$$.fragment),Ao=d(),ne=n("h2"),pe=n("a"),io=n("span"),u(Je.$$.fragment),ur=d(),co=n("span"),_r=a("FSMTForConditionalGeneration"),Io=d(),q=n("div"),u(Ke.$$.fragment),gr=d(),lo=n("p"),vr=a("The FSMT Model with a language modeling head. Can be used for summarization."),kr=d(),Ye=n("p"),Tr=a("This model inherits from "),gt=n("a"),br=a("PreTrainedModel"),yr=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wr=d(),Ze=n("p"),Mr=a("This model is also a PyTorch "),et=n("a"),Fr=a("torch.nn.Module"),Sr=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$r=d(),y=n("div"),u(tt.$$.fragment),zr=d(),se=n("p"),Er=a("The "),vt=n("a"),qr=a("FSMTForConditionalGeneration"),xr=a(" forward method, overrides the "),ho=n("code"),Cr=a("__call__"),Pr=a(" special method."),Ar=d(),u(ue.$$.fragment),Ir=d(),mo=n("p"),Dr=a("Translation example::"),Nr=d(),fo=n("p"),Lr=a("from transformers import FSMTTokenizer, FSMTForConditionalGeneration"),Or=d(),po=n("p"),Gr=a(`mname = \u201Cfacebook/wmt19-ru-en\u201D model = FSMTForConditionalGeneration.from_pretrained(mname) tokenizer = FSMTTokenizer.from_pretrained(mname)`),jr=d(),uo=n("p"),Br=a(`src_text = \u201C\u041C\u0430\u0448\u0438\u043D\u043D\u043E\u0435 \u043E\u0431\u0443\u0447\u0435\u043D\u0438\u0435 - \u044D\u0442\u043E \u0437\u0434\u043E\u0440\u043E\u0432\u043E, \u043D\u0435 \u0442\u0430\u043A \u043B\u0438?\u201D input_ids = tokenizer.encode(src_text, return_tensors=\u2018pt\u2019) outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3) for i, output in enumerate(outputs): decoded = tokenizer.decode(output, skip_special_tokens=True) print(f\u201D{i}: {decoded})`),Wr=d(),_e=n("h1"),ge=n("a"),_o=n("span"),u(ot.$$.fragment),Rr=d(),go=n("span"),Hr=a("1: Machine learning is great, isn't it? ..."),this.h()},l(t){const h=_i('[data-svelte="svelte-1phssyn"]',document.head);f=s(h,"META",{name:!0,content:!0}),h.forEach(o),x=c(t),p=s(t,"H1",{class:!0});var nt=r(p);w=s(nt,"A",{id:!0,class:!0,href:!0});var vo=r(w);A=s(vo,"SPAN",{});var Xr=r(A);_(M.$$.fragment,Xr),Xr.forEach(o),vo.forEach(o),F=c(nt),D=s(nt,"SPAN",{});var Jr=r(D);on=i(Jr,"FSMT"),Jr.forEach(o),nt.forEach(o),To=c(t),U=s(t,"P",{});var ko=r(U);$t=s(ko,"STRONG",{});var Kr=r($t);nn=i(Kr,"DISCLAIMER:"),Kr.forEach(o),sn=i(ko," If you see something strange, file a "),ye=s(ko,"A",{href:!0,rel:!0});var Yr=r(ye);rn=i(Yr,"Github Issue"),Yr.forEach(o),an=i(ko,` and assign @stas00.`),ko.forEach(o),bo=c(t),V=s(t,"H2",{class:!0});var No=r(V);re=s(No,"A",{id:!0,class:!0,href:!0});var Zr=r(re);zt=s(Zr,"SPAN",{});var ea=r(zt);_(we.$$.fragment,ea),ea.forEach(o),Zr.forEach(o),dn=c(No),Et=s(No,"SPAN",{});var ta=r(Et);cn=i(ta,"Overview"),ta.forEach(o),No.forEach(o),yo=c(t),ae=s(t,"P",{});var Lo=r(ae);ln=i(Lo,"FSMT (FairSeq MachineTranslation) models were introduced in "),Me=s(Lo,"A",{href:!0,rel:!0});var oa=r(Me);hn=i(oa,"Facebook FAIR\u2019s WMT19 News Translation Task Submission"),oa.forEach(o),mn=i(Lo," by Nathan Ng, Kyra Yee, Alexei Baevski, Myle Ott, Michael Auli, Sergey Edunov."),Lo.forEach(o),wo=c(t),st=s(t,"P",{});var na=r(st);fn=i(na,"The abstract of the paper is the following:"),na.forEach(o),Mo=c(t),rt=s(t,"P",{});var sa=r(rt);qt=s(sa,"EM",{});var ra=r(qt);pn=i(ra,`This paper describes Facebook FAIR\u2019s submission to the WMT19 shared news translation task. We participate in two language pairs and four language directions, English <-> German and English <-> Russian. Following our submission from last year, our baseline systems are large BPE-based transformer models trained with the Fairseq sequence modeling toolkit which rely on sampled back-translations. This year we experiment with different bitext data filtering schemes, as well as with adding filtered back-translated data. We also ensemble and fine-tune our models on domain-specific data, then decode using noisy channel model reranking. Our submissions are ranked first in all four directions of the human evaluation campaign. On En->De, our system significantly outperforms other systems as well as human translations. This system improves upon our WMT\u201918 submission by 4.5 BLEU points.`),ra.forEach(o),sa.forEach(o),Fo=c(t),O=s(t,"P",{});var kt=r(O);un=i(kt,"This model was contributed by "),Fe=s(kt,"A",{href:!0,rel:!0});var aa=r(Fe);_n=i(aa,"stas"),aa.forEach(o),gn=i(kt,`. The original code can be found `),Se=s(kt,"A",{href:!0,rel:!0});var ia=r(Se);vn=i(ia,"here"),ia.forEach(o),kn=i(kt,"."),kt.forEach(o),So=c(t),Q=s(t,"H2",{class:!0});var Oo=r(Q);ie=s(Oo,"A",{id:!0,class:!0,href:!0});var da=r(ie);xt=s(da,"SPAN",{});var ca=r(xt);_($e.$$.fragment,ca),ca.forEach(o),da.forEach(o),Tn=c(Oo),Ct=s(Oo,"SPAN",{});var la=r(Ct);bn=i(la,"Implementation Notes"),la.forEach(o),Oo.forEach(o),$o=c(t),at=s(t,"UL",{});var ha=r(at);X=s(ha,"LI",{});var Tt=r(X);yn=i(Tt,`FSMT uses source and target vocabulary pairs that aren\u2019t combined into one. It doesn\u2019t share embeddings tokens either. Its tokenizer is very similar to `),it=s(Tt,"A",{href:!0});var ma=r(it);wn=i(ma,"XLMTokenizer"),ma.forEach(o),Mn=i(Tt,` and the main model is derived from `),dt=s(Tt,"A",{href:!0});var fa=r(dt);Fn=i(fa,"BartModel"),fa.forEach(o),Sn=i(Tt,"."),Tt.forEach(o),ha.forEach(o),zo=c(t),J=s(t,"H2",{class:!0});var Go=r(J);de=s(Go,"A",{id:!0,class:!0,href:!0});var pa=r(de);Pt=s(pa,"SPAN",{});var ua=r(Pt);_(ze.$$.fragment,ua),ua.forEach(o),pa.forEach(o),$n=c(Go),At=s(Go,"SPAN",{});var _a=r(At);zn=i(_a,"FSMTConfig"),_a.forEach(o),Go.forEach(o),Eo=c(t),S=s(t,"DIV",{class:!0});var I=r(S);_(Ee.$$.fragment,I),En=c(I),qe=s(I,"P",{});var jo=r(qe);qn=i(jo,"This is the configuration class to store the configuration of a "),ct=s(jo,"A",{href:!0});var ga=r(ct);xn=i(ga,"FSMTModel"),ga.forEach(o),Cn=i(jo,`. It is used to instantiate a FSMT model according to the specified arguments, defining the model architecture.`),jo.forEach(o),Pn=c(I),K=s(I,"P",{});var bt=r(K);An=i(bt,"Configuration objects inherit from "),lt=s(bt,"A",{href:!0});var va=r(lt);In=i(va,"PretrainedConfig"),va.forEach(o),Dn=i(bt,` and can be used to control the model outputs. Read the documentation from `),ht=s(bt,"A",{href:!0});var ka=r(ht);Nn=i(ka,"PretrainedConfig"),ka.forEach(o),Ln=i(bt," for more information."),bt.forEach(o),On=c(I),It=s(I,"P",{});var Ta=r(It);Gn=i(Ta,"Examples:"),Ta.forEach(o),jn=c(I),_(xe.$$.fragment,I),Bn=c(I),ce=s(I,"DIV",{class:!0});var Bo=r(ce);_(Ce.$$.fragment,Bo),Wn=c(Bo),Y=s(Bo,"P",{});var yt=r(Y);Rn=i(yt,"Serializes this instance to a Python dictionary. Override the default "),Dt=s(yt,"EM",{});var ba=r(Dt);Hn=i(ba,"to_dict()"),ba.forEach(o),Un=i(yt," from "),Nt=s(yt,"EM",{});var ya=r(Nt);Vn=i(ya,"PretrainedConfig"),ya.forEach(o),Qn=i(yt,"."),yt.forEach(o),Bo.forEach(o),I.forEach(o),qo=c(t),Z=s(t,"H2",{class:!0});var Wo=r(Z);le=s(Wo,"A",{id:!0,class:!0,href:!0});var wa=r(le);Lt=s(wa,"SPAN",{});var Ma=r(Lt);_(Pe.$$.fragment,Ma),Ma.forEach(o),wa.forEach(o),Xn=c(Wo),Ot=s(Wo,"SPAN",{});var Fa=r(Ot);Jn=i(Fa,"FSMTTokenizer"),Fa.forEach(o),Wo.forEach(o),xo=c(t),b=s(t,"DIV",{class:!0});var $=r(b);_(Ae.$$.fragment,$),Kn=c($),Gt=s($,"P",{});var Sa=r(Gt);Yn=i(Sa,"Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:"),Sa.forEach(o),Zn=c($),N=s($,"UL",{});var ve=r(N);jt=s(ve,"LI",{});var $a=r(jt);es=i($a,"Moses preprocessing and tokenization."),$a.forEach(o),ts=c(ve),Bt=s(ve,"LI",{});var za=r(Bt);os=i(za,"Normalizing all inputs text."),za.forEach(o),ns=c(ve),L=s(ve,"LI",{});var ke=r(L);ss=i(ke,"The arguments "),Wt=s(ke,"CODE",{});var Ea=r(Wt);rs=i(Ea,"special_tokens"),Ea.forEach(o),as=i(ke," and the function "),Rt=s(ke,"CODE",{});var qa=r(Rt);is=i(qa,"set_special_tokens"),qa.forEach(o),ds=i(ke,`, can be used to add additional symbols (like \u201D`),Ht=s(ke,"STRONG",{});var xa=r(Ht);cs=i(xa,"classify"),xa.forEach(o),ls=i(ke,"\u201D) to a vocabulary."),ke.forEach(o),hs=c(ve),Ie=s(ve,"LI",{});var Ro=r(Ie);ms=i(Ro,"The argument "),Ut=s(Ro,"CODE",{});var Ca=r(Ut);fs=i(Ca,"langs"),Ca.forEach(o),ps=i(Ro," defines a pair of languages."),Ro.forEach(o),ve.forEach(o),us=c($),De=s($,"P",{});var Ho=r(De);_s=i(Ho,"This tokenizer inherits from "),mt=s(Ho,"A",{href:!0});var Pa=r(mt);gs=i(Pa,"PreTrainedTokenizer"),Pa.forEach(o),vs=i(Ho,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ho.forEach(o),ks=c($),G=s($,"DIV",{class:!0});var wt=r(G);_(Ne.$$.fragment,wt),Ts=c(wt),Vt=s(wt,"P",{});var Aa=r(Vt);bs=i(Aa,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A FAIRSEQ Transformer sequence has the following format:`),Aa.forEach(o),ys=c(wt),Le=s(wt,"UL",{});var Uo=r(Le);ft=s(Uo,"LI",{});var Ur=r(ft);ws=i(Ur,"single sequence: "),Qt=s(Ur,"CODE",{});var Ia=r(Qt);Ms=i(Ia,"<s> X </s>"),Ia.forEach(o),Ur.forEach(o),Fs=c(Uo),pt=s(Uo,"LI",{});var Vr=r(pt);Ss=i(Vr,"pair of sequences: "),Xt=s(Vr,"CODE",{});var Da=r(Xt);$s=i(Da,"<s> A </s> B </s>"),Da.forEach(o),Vr.forEach(o),Uo.forEach(o),wt.forEach(o),zs=c($),he=s($,"DIV",{class:!0});var Vo=r(he);_(Oe.$$.fragment,Vo),Es=c(Vo),Ge=s(Vo,"P",{});var Qo=r(Ge);qs=i(Qo,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Jt=s(Qo,"CODE",{});var Na=r(Jt);xs=i(Na,"prepare_for_model"),Na.forEach(o),Cs=i(Qo," method."),Qo.forEach(o),Vo.forEach(o),Ps=c($),C=s($,"DIV",{class:!0});var j=r(C);_(je.$$.fragment,j),As=c(j),Kt=s(j,"P",{});var La=r(Kt);Is=i(La,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ Transformer sequence pair mask has the following format:`),La.forEach(o),Ds=c(j),_(Be.$$.fragment,j),Ns=c(j),ee=s(j,"P",{});var Mt=r(ee);Ls=i(Mt,"If "),Yt=s(Mt,"CODE",{});var Oa=r(Yt);Os=i(Oa,"token_ids_1"),Oa.forEach(o),Gs=i(Mt," is "),Zt=s(Mt,"CODE",{});var Ga=r(Zt);js=i(Ga,"None"),Ga.forEach(o),Bs=i(Mt,", this method only returns the first portion of the mask (0s)."),Mt.forEach(o),Ws=c(j),eo=s(j,"P",{});var ja=r(eo);Rs=i(ja,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FAIRSEQ_TRANSFORMER sequence pair mask has the following format:`),ja.forEach(o),j.forEach(o),Hs=c($),to=s($,"DIV",{class:!0}),r(to).forEach(o),$.forEach(o),Co=c(t),te=s(t,"H2",{class:!0});var Xo=r(te);me=s(Xo,"A",{id:!0,class:!0,href:!0});var Ba=r(me);oo=s(Ba,"SPAN",{});var Wa=r(oo);_(We.$$.fragment,Wa),Wa.forEach(o),Ba.forEach(o),Us=c(Xo),no=s(Xo,"SPAN",{});var Ra=r(no);Vs=i(Ra,"FSMTModel"),Ra.forEach(o),Xo.forEach(o),Po=c(t),E=s(t,"DIV",{class:!0});var B=r(E);_(Re.$$.fragment,B),Qs=c(B),so=s(B,"P",{});var Ha=r(so);Xs=i(Ha,"The bare FSMT Model outputting raw hidden-states without any specific head on top."),Ha.forEach(o),Js=c(B),He=s(B,"P",{});var Jo=r(He);Ks=i(Jo,"This model inherits from "),ut=s(Jo,"A",{href:!0});var Ua=r(ut);Ys=i(Ua,"PreTrainedModel"),Ua.forEach(o),Zs=i(Jo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jo.forEach(o),er=c(B),Ue=s(B,"P",{});var Ko=r(Ue);tr=i(Ko,"This model is also a PyTorch "),Ve=s(Ko,"A",{href:!0,rel:!0});var Va=r(Ve);or=i(Va,"torch.nn.Module"),Va.forEach(o),nr=i(Ko,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ko.forEach(o),sr=c(B),P=s(B,"DIV",{class:!0});var W=r(P);_(Qe.$$.fragment,W),rr=c(W),oe=s(W,"P",{});var Ft=r(oe);ar=i(Ft,"The "),_t=s(Ft,"A",{href:!0});var Qa=r(_t);ir=i(Qa,"FSMTModel"),Qa.forEach(o),dr=i(Ft," forward method, overrides the "),ro=s(Ft,"CODE",{});var Xa=r(ro);cr=i(Xa,"__call__"),Xa.forEach(o),lr=i(Ft," special method."),Ft.forEach(o),hr=c(W),_(fe.$$.fragment,W),mr=c(W),ao=s(W,"P",{});var Ja=r(ao);fr=i(Ja,"Example:"),Ja.forEach(o),pr=c(W),_(Xe.$$.fragment,W),W.forEach(o),B.forEach(o),Ao=c(t),ne=s(t,"H2",{class:!0});var Yo=r(ne);pe=s(Yo,"A",{id:!0,class:!0,href:!0});var Ka=r(pe);io=s(Ka,"SPAN",{});var Ya=r(io);_(Je.$$.fragment,Ya),Ya.forEach(o),Ka.forEach(o),ur=c(Yo),co=s(Yo,"SPAN",{});var Za=r(co);_r=i(Za,"FSMTForConditionalGeneration"),Za.forEach(o),Yo.forEach(o),Io=c(t),q=s(t,"DIV",{class:!0});var R=r(q);_(Ke.$$.fragment,R),gr=c(R),lo=s(R,"P",{});var ei=r(lo);vr=i(ei,"The FSMT Model with a language modeling head. Can be used for summarization."),ei.forEach(o),kr=c(R),Ye=s(R,"P",{});var Zo=r(Ye);Tr=i(Zo,"This model inherits from "),gt=s(Zo,"A",{href:!0});var ti=r(gt);br=i(ti,"PreTrainedModel"),ti.forEach(o),yr=i(Zo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zo.forEach(o),wr=c(R),Ze=s(R,"P",{});var en=r(Ze);Mr=i(en,"This model is also a PyTorch "),et=s(en,"A",{href:!0,rel:!0});var oi=r(et);Fr=i(oi,"torch.nn.Module"),oi.forEach(o),Sr=i(en,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),en.forEach(o),$r=c(R),y=s(R,"DIV",{class:!0});var z=r(y);_(tt.$$.fragment,z),zr=c(z),se=s(z,"P",{});var St=r(se);Er=i(St,"The "),vt=s(St,"A",{href:!0});var ni=r(vt);qr=i(ni,"FSMTForConditionalGeneration"),ni.forEach(o),xr=i(St," forward method, overrides the "),ho=s(St,"CODE",{});var si=r(ho);Cr=i(si,"__call__"),si.forEach(o),Pr=i(St," special method."),St.forEach(o),Ar=c(z),_(ue.$$.fragment,z),Ir=c(z),mo=s(z,"P",{});var ri=r(mo);Dr=i(ri,"Translation example::"),ri.forEach(o),Nr=c(z),fo=s(z,"P",{});var ai=r(fo);Lr=i(ai,"from transformers import FSMTTokenizer, FSMTForConditionalGeneration"),ai.forEach(o),Or=c(z),po=s(z,"P",{});var ii=r(po);Gr=i(ii,`mname = \u201Cfacebook/wmt19-ru-en\u201D model = FSMTForConditionalGeneration.from_pretrained(mname) tokenizer = FSMTTokenizer.from_pretrained(mname)`),ii.forEach(o),jr=c(z),uo=s(z,"P",{});var di=r(uo);Br=i(di,`src_text = \u201C\u041C\u0430\u0448\u0438\u043D\u043D\u043E\u0435 \u043E\u0431\u0443\u0447\u0435\u043D\u0438\u0435 - \u044D\u0442\u043E \u0437\u0434\u043E\u0440\u043E\u0432\u043E, \u043D\u0435 \u0442\u0430\u043A \u043B\u0438?\u201D input_ids = tokenizer.encode(src_text, return_tensors=\u2018pt\u2019) outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3) for i, output in enumerate(outputs): decoded = tokenizer.decode(output, skip_special_tokens=True) print(f\u201D{i}: {decoded})`),di.forEach(o),Wr=c(z),_e=s(z,"H1",{class:!0});var tn=r(_e);ge=s(tn,"A",{id:!0,class:!0,href:!0});var ci=r(ge);_o=s(ci,"SPAN",{});var li=r(_o);_(ot.$$.fragment,li),li.forEach(o),ci.forEach(o),Rr=c(tn),go=s(tn,"SPAN",{});var hi=r(go);Hr=i(hi,"1: Machine learning is great, isn't it? ..."),hi.forEach(o),tn.forEach(o),z.forEach(o),R.forEach(o),this.h()},h(){l(f,"name","hf:doc:metadata"),l(f,"content",JSON.stringify(Ti)),l(w,"id","fsmt"),l(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(w,"href","#fsmt"),l(p,"class","relative group"),l(ye,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),l(ye,"rel","nofollow"),l(re,"id","overview"),l(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(re,"href","#overview"),l(V,"class","relative group"),l(Me,"href","https://arxiv.org/abs/1907.06616"),l(Me,"rel","nofollow"),l(Fe,"href","https://huggingface.co/stas"),l(Fe,"rel","nofollow"),l(Se,"href","https://github.com/pytorch/fairseq/tree/master/examples/wmt19"),l(Se,"rel","nofollow"),l(ie,"id","implementation-notes"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#implementation-notes"),l(Q,"class","relative group"),l(it,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer"),l(dt,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel"),l(de,"id","transformers.FSMTConfig"),l(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(de,"href","#transformers.FSMTConfig"),l(J,"class","relative group"),l(ct,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTModel"),l(lt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(ht,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(ce,"class","docstring"),l(S,"class","docstring"),l(le,"id","transformers.FSMTTokenizer"),l(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(le,"href","#transformers.FSMTTokenizer"),l(Z,"class","relative group"),l(mt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(G,"class","docstring"),l(he,"class","docstring"),l(C,"class","docstring"),l(to,"class","docstring"),l(b,"class","docstring"),l(me,"id","transformers.FSMTModel"),l(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(me,"href","#transformers.FSMTModel"),l(te,"class","relative group"),l(ut,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ve,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ve,"rel","nofollow"),l(_t,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTModel"),l(P,"class","docstring"),l(E,"class","docstring"),l(pe,"id","transformers.FSMTForConditionalGeneration"),l(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(pe,"href","#transformers.FSMTForConditionalGeneration"),l(ne,"class","relative group"),l(gt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(et,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(et,"rel","nofollow"),l(vt,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTForConditionalGeneration"),l(ge,"id","1-machine-learning-is-great-isnt-it"),l(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ge,"href","#1-machine-learning-is-great-isnt-it"),l(_e,"class","relative group"),l(y,"class","docstring"),l(q,"class","docstring")},m(t,h){e(document.head,f),m(t,x,h),m(t,p,h),e(p,w),e(w,A),g(M,A,null),e(p,F),e(p,D),e(D,on),m(t,To,h),m(t,U,h),e(U,$t),e($t,nn),e(U,sn),e(U,ye),e(ye,rn),e(U,an),m(t,bo,h),m(t,V,h),e(V,re),e(re,zt),g(we,zt,null),e(V,dn),e(V,Et),e(Et,cn),m(t,yo,h),m(t,ae,h),e(ae,ln),e(ae,Me),e(Me,hn),e(ae,mn),m(t,wo,h),m(t,st,h),e(st,fn),m(t,Mo,h),m(t,rt,h),e(rt,qt),e(qt,pn),m(t,Fo,h),m(t,O,h),e(O,un),e(O,Fe),e(Fe,_n),e(O,gn),e(O,Se),e(Se,vn),e(O,kn),m(t,So,h),m(t,Q,h),e(Q,ie),e(ie,xt),g($e,xt,null),e(Q,Tn),e(Q,Ct),e(Ct,bn),m(t,$o,h),m(t,at,h),e(at,X),e(X,yn),e(X,it),e(it,wn),e(X,Mn),e(X,dt),e(dt,Fn),e(X,Sn),m(t,zo,h),m(t,J,h),e(J,de),e(de,Pt),g(ze,Pt,null),e(J,$n),e(J,At),e(At,zn),m(t,Eo,h),m(t,S,h),g(Ee,S,null),e(S,En),e(S,qe),e(qe,qn),e(qe,ct),e(ct,xn),e(qe,Cn),e(S,Pn),e(S,K),e(K,An),e(K,lt),e(lt,In),e(K,Dn),e(K,ht),e(ht,Nn),e(K,Ln),e(S,On),e(S,It),e(It,Gn),e(S,jn),g(xe,S,null),e(S,Bn),e(S,ce),g(Ce,ce,null),e(ce,Wn),e(ce,Y),e(Y,Rn),e(Y,Dt),e(Dt,Hn),e(Y,Un),e(Y,Nt),e(Nt,Vn),e(Y,Qn),m(t,qo,h),m(t,Z,h),e(Z,le),e(le,Lt),g(Pe,Lt,null),e(Z,Xn),e(Z,Ot),e(Ot,Jn),m(t,xo,h),m(t,b,h),g(Ae,b,null),e(b,Kn),e(b,Gt),e(Gt,Yn),e(b,Zn),e(b,N),e(N,jt),e(jt,es),e(N,ts),e(N,Bt),e(Bt,os),e(N,ns),e(N,L),e(L,ss),e(L,Wt),e(Wt,rs),e(L,as),e(L,Rt),e(Rt,is),e(L,ds),e(L,Ht),e(Ht,cs),e(L,ls),e(N,hs),e(N,Ie),e(Ie,ms),e(Ie,Ut),e(Ut,fs),e(Ie,ps),e(b,us),e(b,De),e(De,_s),e(De,mt),e(mt,gs),e(De,vs),e(b,ks),e(b,G),g(Ne,G,null),e(G,Ts),e(G,Vt),e(Vt,bs),e(G,ys),e(G,Le),e(Le,ft),e(ft,ws),e(ft,Qt),e(Qt,Ms),e(Le,Fs),e(Le,pt),e(pt,Ss),e(pt,Xt),e(Xt,$s),e(b,zs),e(b,he),g(Oe,he,null),e(he,Es),e(he,Ge),e(Ge,qs),e(Ge,Jt),e(Jt,xs),e(Ge,Cs),e(b,Ps),e(b,C),g(je,C,null),e(C,As),e(C,Kt),e(Kt,Is),e(C,Ds),g(Be,C,null),e(C,Ns),e(C,ee),e(ee,Ls),e(ee,Yt),e(Yt,Os),e(ee,Gs),e(ee,Zt),e(Zt,js),e(ee,Bs),e(C,Ws),e(C,eo),e(eo,Rs),e(b,Hs),e(b,to),m(t,Co,h),m(t,te,h),e(te,me),e(me,oo),g(We,oo,null),e(te,Us),e(te,no),e(no,Vs),m(t,Po,h),m(t,E,h),g(Re,E,null),e(E,Qs),e(E,so),e(so,Xs),e(E,Js),e(E,He),e(He,Ks),e(He,ut),e(ut,Ys),e(He,Zs),e(E,er),e(E,Ue),e(Ue,tr),e(Ue,Ve),e(Ve,or),e(Ue,nr),e(E,sr),e(E,P),g(Qe,P,null),e(P,rr),e(P,oe),e(oe,ar),e(oe,_t),e(_t,ir),e(oe,dr),e(oe,ro),e(ro,cr),e(oe,lr),e(P,hr),g(fe,P,null),e(P,mr),e(P,ao),e(ao,fr),e(P,pr),g(Xe,P,null),m(t,Ao,h),m(t,ne,h),e(ne,pe),e(pe,io),g(Je,io,null),e(ne,ur),e(ne,co),e(co,_r),m(t,Io,h),m(t,q,h),g(Ke,q,null),e(q,gr),e(q,lo),e(lo,vr),e(q,kr),e(q,Ye),e(Ye,Tr),e(Ye,gt),e(gt,br),e(Ye,yr),e(q,wr),e(q,Ze),e(Ze,Mr),e(Ze,et),e(et,Fr),e(Ze,Sr),e(q,$r),e(q,y),g(tt,y,null),e(y,zr),e(y,se),e(se,Er),e(se,vt),e(vt,qr),e(se,xr),e(se,ho),e(ho,Cr),e(se,Pr),e(y,Ar),g(ue,y,null),e(y,Ir),e(y,mo),e(mo,Dr),e(y,Nr),e(y,fo),e(fo,Lr),e(y,Or),e(y,po),e(po,Gr),e(y,jr),e(y,uo),e(uo,Br),e(y,Wr),e(y,_e),e(_e,ge),e(ge,_o),g(ot,_o,null),e(_e,Rr),e(_e,go),e(go,Hr),Do=!0},p(t,[h]){const nt={};h&2&&(nt.$$scope={dirty:h,ctx:t}),fe.$set(nt);const vo={};h&2&&(vo.$$scope={dirty:h,ctx:t}),ue.$set(vo)},i(t){Do||(v(M.$$.fragment,t),v(we.$$.fragment,t),v($e.$$.fragment,t),v(ze.$$.fragment,t),v(Ee.$$.fragment,t),v(xe.$$.fragment,t),v(Ce.$$.fragment,t),v(Pe.$$.fragment,t),v(Ae.$$.fragment,t),v(Ne.$$.fragment,t),v(Oe.$$.fragment,t),v(je.$$.fragment,t),v(Be.$$.fragment,t),v(We.$$.fragment,t),v(Re.$$.fragment,t),v(Qe.$$.fragment,t),v(fe.$$.fragment,t),v(Xe.$$.fragment,t),v(Je.$$.fragment,t),v(Ke.$$.fragment,t),v(tt.$$.fragment,t),v(ue.$$.fragment,t),v(ot.$$.fragment,t),Do=!0)},o(t){k(M.$$.fragment,t),k(we.$$.fragment,t),k($e.$$.fragment,t),k(ze.$$.fragment,t),k(Ee.$$.fragment,t),k(xe.$$.fragment,t),k(Ce.$$.fragment,t),k(Pe.$$.fragment,t),k(Ae.$$.fragment,t),k(Ne.$$.fragment,t),k(Oe.$$.fragment,t),k(je.$$.fragment,t),k(Be.$$.fragment,t),k(We.$$.fragment,t),k(Re.$$.fragment,t),k(Qe.$$.fragment,t),k(fe.$$.fragment,t),k(Xe.$$.fragment,t),k(Je.$$.fragment,t),k(Ke.$$.fragment,t),k(tt.$$.fragment,t),k(ue.$$.fragment,t),k(ot.$$.fragment,t),Do=!1},d(t){o(f),t&&o(x),t&&o(p),T(M),t&&o(To),t&&o(U),t&&o(bo),t&&o(V),T(we),t&&o(yo),t&&o(ae),t&&o(wo),t&&o(st),t&&o(Mo),t&&o(rt),t&&o(Fo),t&&o(O),t&&o(So),t&&o(Q),T($e),t&&o($o),t&&o(at),t&&o(zo),t&&o(J),T(ze),t&&o(Eo),t&&o(S),T(Ee),T(xe),T(Ce),t&&o(qo),t&&o(Z),T(Pe),t&&o(xo),t&&o(b),T(Ae),T(Ne),T(Oe),T(je),T(Be),t&&o(Co),t&&o(te),T(We),t&&o(Po),t&&o(E),T(Re),T(Qe),T(fe),T(Xe),t&&o(Ao),t&&o(ne),T(Je),t&&o(Io),t&&o(q),T(Ke),T(tt),T(ue),T(ot)}}}const Ti={local:"1-machine-learning-is-great-isnt-it",title:"1: Machine learning is great, isn't it? ..."};function bi(be,f,x){let{fw:p}=f;return be.$$set=w=>{"fw"in w&&x(0,p=w.fw)},[p]}class zi extends fi{constructor(f){super();pi(this,f,bi,ki,ui,{fw:0})}}export{zi as default,Ti as metadata};
9,906
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/albert.mdx-dc6ca9ee.js
import{S as Pz,i as qz,s as jz,e as n,k as l,w as v,t as a,L as Cz,c as s,d as t,m as d,a as r,x as k,h as i,b as c,J as e,g as f,y as T,q as y,o as w,B as A}from"../../chunks/vendor-b1433968.js";import{T as ke}from"../../chunks/Tip-c3840994.js";import{D}from"../../chunks/Docstring-ff504c58.js";import{C as we}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Te}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Iz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Lz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Nz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Dz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Sz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Oz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Wz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Uz(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function Bz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Rz(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function Hz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Qz(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function Vz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Jz(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function Kz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Gz(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function Xz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function Zz(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function Yz(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function e5(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re;return{c(){p=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),g=l(),b=n("ul"),F=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),x=n("li"),pe=a("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),M=n("p"),Z=a("This second option is useful when using "),U=n("code"),ee=a("tf.keras.Model.fit"),he=a(` method which currently requires having all the tensors in the first argument of the model call function: `),B=n("code"),fe=a("model(inputs)"),ae=a("."),J=l(),I=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),X=l(),z=n("ul"),q=n("li"),Y=a("a single Tensor with "),R=n("code"),me=a("input_ids"),ue=a(" only and nothing else: "),H=n("code"),ge=a("model(inputs_ids)"),ie=l(),P=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=n("code"),oe=a("model([input_ids, attention_mask])"),se=a(" or "),V=n("code"),le=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),L=n("li"),de=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=n("code"),re=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var E=r(p);$=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),b=s(h,"UL",{});var K=r(b);F=s(K,"LI",{});var Ae=r(F);_=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),u=d(K),x=s(K,"LI",{});var ye=r(x);pe=i(ye,"having all inputs as a list, tuple or dict in the first positional arguments."),ye.forEach(t),K.forEach(t),G=d(h),M=s(h,"P",{});var j=r(M);Z=i(j,"This second option is useful when using "),U=s(j,"CODE",{});var Fe=r(U);ee=i(Fe,"tf.keras.Model.fit"),Fe.forEach(t),he=i(j,` method which currently requires having all the tensors in the first argument of the model call function: `),B=s(j,"CODE",{});var ve=r(B);fe=i(ve,"model(inputs)"),ve.forEach(t),ae=i(j,"."),j.forEach(t),J=d(h),I=s(h,"P",{});var $e=r(I);te=i($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),X=d(h),z=s(h,"UL",{});var N=r(z);q=s(N,"LI",{});var O=r(q);Y=i(O,"a single Tensor with "),R=s(O,"CODE",{});var xe=r(R);me=i(xe,"input_ids"),xe.forEach(t),ue=i(O," only and nothing else: "),H=s(O,"CODE",{});var Ee=r(H);ge=i(Ee,"model(inputs_ids)"),Ee.forEach(t),O.forEach(t),ie=d(N),P=s(N,"LI",{});var W=r(P);_e=i(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=s(W,"CODE",{});var Me=r(Q);oe=i(Me,"model([input_ids, attention_mask])"),Me.forEach(t),se=i(W," or "),V=s(W,"CODE",{});var ze=r(V);le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),W.forEach(t),ne=d(N),L=s(N,"LI",{});var ce=r(L);de=i(ce,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=s(ce,"CODE",{});var be=r(S);re=i(be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),be.forEach(t),ce.forEach(t),N.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,b,E),e(b,F),e(F,_),e(b,u),e(b,x),e(x,pe),f(h,G,E),f(h,M,E),e(M,Z),e(M,U),e(U,ee),e(M,he),e(M,B),e(B,fe),e(M,ae),f(h,J,E),f(h,I,E),e(I,te),f(h,X,E),f(h,z,E),e(z,q),e(q,Y),e(q,R),e(R,me),e(q,ue),e(q,H),e(H,ge),e(z,ie),e(z,P),e(P,_e),e(P,Q),e(Q,oe),e(P,se),e(P,V),e(V,le),e(z,ne),e(z,L),e(L,de),e(L,S),e(S,re)},d(h){h&&t(p),h&&t(g),h&&t(b),h&&t(G),h&&t(M),h&&t(J),h&&t(I),h&&t(X),h&&t(z)}}}function t5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function o5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function n5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function s5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function r5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function a5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function i5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function l5(C){let p,$,g,b,F;return{c(){p=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),b=a("Module"),F=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var u=r(p);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var x=r(g);b=i(x,"Module"),x.forEach(t),F=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,b),e(p,F)},d(_){_&&t(p)}}}function d5(C){let p,$,g,b,F,_,u,x,pe,G,M,Z,U,ee,he,B,fe,ae,J,I,te,X,z,q,Y,R,me,ue,H,ge,ie,P,_e,Q,oe,se,V,le,ne,L,de,S,re,h,E,K,Ae,ye,j,Fe,ve,$e,N,O,xe,Ee,W,Me,ze,ce,be,zn,dc,Us,gg,cc,_g,Jf,Je,Bs,bg,Ot,vg,Dl,kg,Tg,Sl,yg,wg,Rs,Ag,Fg,$g,xo,xg,Ol,Eg,Mg,Wl,zg,Pg,qg,pc,jg,Cg,Hs,Kf,Eo,Pn,hc,Qs,Ig,fc,Lg,Gf,Pe,Vs,Ng,Js,Dg,Ks,Sg,Og,Wg,Gs,Ug,Ul,Bg,Rg,Hg,Mo,Qg,mc,Vg,Jg,uc,Kg,Gg,Xg,Gt,Xs,Zg,gc,Yg,e_,Zs,Bl,t_,_c,o_,n_,Rl,s_,bc,r_,a_,qn,Ys,i_,er,l_,vc,d_,c_,p_,Pt,tr,h_,kc,f_,m_,or,u_,zo,g_,Tc,__,b_,yc,v_,k_,T_,wc,Xf,Po,jn,Ac,nr,y_,Fc,w_,Zf,zt,sr,A_,Wt,F_,$c,$_,x_,rr,E_,M_,Hl,z_,P_,q_,Xt,ar,j_,xc,C_,I_,ir,Ql,L_,Ec,N_,D_,Vl,S_,Mc,O_,W_,qt,lr,U_,zc,B_,R_,dr,H_,Pc,Q_,Yf,qo,Cn,qc,cr,V_,jc,J_,em,jo,pr,K_,hr,G_,Jl,X_,Z_,tm,Co,fr,Y_,mr,eb,Kl,tb,ob,om,Io,In,Cc,ur,nb,Ic,sb,nm,Ke,gr,rb,Lc,ab,ib,_r,lb,Gl,db,cb,pb,br,hb,vr,fb,mb,ub,ot,kr,gb,Lo,_b,Xl,bb,vb,Nc,kb,Tb,yb,Ln,wb,Dc,Ab,Fb,Tr,sm,No,Nn,Sc,yr,$b,Oc,xb,rm,Ge,wr,Eb,Do,Mb,Wc,zb,Pb,Uc,qb,jb,Cb,Ar,Ib,Zl,Lb,Nb,Db,Fr,Sb,$r,Ob,Wb,Ub,nt,xr,Bb,So,Rb,Yl,Hb,Qb,Bc,Vb,Jb,Kb,Dn,Gb,Rc,Xb,Zb,Er,am,Oo,Sn,Hc,Mr,Yb,Qc,ev,im,Xe,zr,tv,Pr,ov,Vc,nv,sv,rv,qr,av,ed,iv,lv,dv,jr,cv,Cr,pv,hv,fv,st,Ir,mv,Wo,uv,td,gv,_v,Jc,bv,vv,kv,On,Tv,Kc,yv,wv,Lr,lm,Uo,Wn,Gc,Nr,Av,Xc,Fv,dm,Ze,Dr,$v,Zc,xv,Ev,Sr,Mv,od,zv,Pv,qv,Or,jv,Wr,Cv,Iv,Lv,Se,Ur,Nv,Bo,Dv,nd,Sv,Ov,Yc,Wv,Uv,Bv,Un,Rv,ep,Hv,Qv,Br,Vv,tp,Jv,Kv,Rr,cm,Ro,Bn,op,Hr,Gv,np,Xv,pm,Ye,Qr,Zv,sp,Yv,ek,Vr,tk,sd,ok,nk,sk,Jr,rk,Kr,ak,ik,lk,rt,Gr,dk,Ho,ck,rd,pk,hk,rp,fk,mk,uk,Rn,gk,ap,_k,bk,Xr,hm,Qo,Hn,ip,Zr,vk,lp,kk,fm,et,Yr,Tk,dp,yk,wk,ea,Ak,ad,Fk,$k,xk,ta,Ek,oa,Mk,zk,Pk,at,na,qk,Vo,jk,id,Ck,Ik,cp,Lk,Nk,Dk,Qn,Sk,pp,Ok,Wk,sa,mm,Jo,Vn,hp,ra,Uk,fp,Bk,um,tt,aa,Rk,Ko,Hk,mp,Qk,Vk,up,Jk,Kk,Gk,ia,Xk,ld,Zk,Yk,eT,la,tT,da,oT,nT,sT,it,ca,rT,Go,aT,dd,iT,lT,gp,dT,cT,pT,Jn,hT,_p,fT,mT,pa,gm,Xo,Kn,bp,ha,uT,vp,gT,_m,Oe,fa,_T,kp,bT,vT,ma,kT,cd,TT,yT,wT,ua,AT,ga,FT,$T,xT,Gn,ET,lt,_a,MT,Zo,zT,pd,PT,qT,Tp,jT,CT,IT,Xn,LT,yp,NT,DT,ba,bm,Yo,Zn,wp,va,ST,Ap,OT,vm,We,ka,WT,en,UT,Fp,BT,RT,$p,HT,QT,VT,Ta,JT,hd,KT,GT,XT,ya,ZT,wa,YT,e1,t1,Yn,o1,dt,Aa,n1,tn,s1,fd,r1,a1,xp,i1,l1,d1,es,c1,Ep,p1,h1,Fa,km,on,ts,Mp,$a,f1,zp,m1,Tm,Ue,xa,u1,Ea,g1,Pp,_1,b1,v1,Ma,k1,md,T1,y1,w1,za,A1,Pa,F1,$1,x1,os,E1,ct,qa,M1,nn,z1,ud,P1,q1,qp,j1,C1,I1,ns,L1,jp,N1,D1,ja,ym,sn,ss,Cp,Ca,S1,Ip,O1,wm,Be,Ia,W1,Lp,U1,B1,La,R1,gd,H1,Q1,V1,Na,J1,Da,K1,G1,X1,rs,Z1,pt,Sa,Y1,rn,ey,_d,ty,oy,Np,ny,sy,ry,as,ay,Dp,iy,ly,Oa,Am,an,is,Sp,Wa,dy,Op,cy,Fm,Re,Ua,py,Wp,hy,fy,Ba,my,bd,uy,gy,_y,Ra,by,Ha,vy,ky,Ty,ls,yy,ht,Qa,wy,ln,Ay,vd,Fy,$y,Up,xy,Ey,My,ds,zy,Bp,Py,qy,Va,$m,dn,cs,Rp,Ja,jy,Hp,Cy,xm,He,Ka,Iy,Qp,Ly,Ny,Ga,Dy,kd,Sy,Oy,Wy,Xa,Uy,Za,By,Ry,Hy,ps,Qy,ft,Ya,Vy,cn,Jy,Td,Ky,Gy,Vp,Xy,Zy,Yy,hs,ew,Jp,tw,ow,ei,Em,pn,fs,Kp,ti,nw,Gp,sw,Mm,Qe,oi,rw,hn,aw,Xp,iw,lw,Zp,dw,cw,pw,ni,hw,yd,fw,mw,uw,si,gw,ri,_w,bw,vw,ms,kw,mt,ai,Tw,fn,yw,wd,ww,Aw,Yp,Fw,$w,xw,us,Ew,eh,Mw,zw,ii,zm,mn,gs,th,li,Pw,oh,qw,Pm,qe,di,jw,nh,Cw,Iw,ci,Lw,Ad,Nw,Dw,Sw,pi,Ow,hi,Ww,Uw,Bw,sh,Rw,Hw,Ut,rh,fi,Qw,Vw,ah,mi,Jw,Kw,ih,ui,Gw,Xw,lh,gi,Zw,Yw,ut,_i,e0,un,t0,dh,o0,n0,ch,s0,r0,a0,_s,i0,ph,l0,d0,bi,qm,gn,bs,hh,vi,c0,fh,p0,jm,je,ki,h0,_n,f0,mh,m0,u0,uh,g0,_0,b0,Ti,v0,Fd,k0,T0,y0,yi,w0,wi,A0,F0,$0,gh,x0,E0,Bt,_h,Ai,M0,z0,bh,Fi,P0,q0,vh,$i,j0,C0,kh,xi,I0,L0,gt,Ei,N0,bn,D0,Th,S0,O0,yh,W0,U0,B0,vs,R0,wh,H0,Q0,Mi,Cm,vn,ks,Ah,zi,V0,Fh,J0,Im,Ce,Pi,K0,qi,G0,$h,X0,Z0,Y0,ji,e2,$d,t2,o2,n2,Ci,s2,Ii,r2,a2,i2,xh,l2,d2,Rt,Eh,Li,c2,p2,Mh,Ni,h2,f2,zh,Di,m2,u2,Ph,Si,g2,_2,_t,Oi,b2,kn,v2,qh,k2,T2,jh,y2,w2,A2,Ts,F2,Ch,$2,x2,Wi,Lm,Tn,ys,Ih,Ui,E2,Lh,M2,Nm,Ie,Bi,z2,Nh,P2,q2,Ri,j2,xd,C2,I2,L2,Hi,N2,Qi,D2,S2,O2,Dh,W2,U2,Ht,Sh,Vi,B2,R2,Oh,Ji,H2,Q2,Wh,Ki,V2,J2,Uh,Gi,K2,G2,bt,Xi,X2,yn,Z2,Bh,Y2,eA,Rh,tA,oA,nA,ws,sA,Hh,rA,aA,Zi,Dm,wn,As,Qh,Yi,iA,Vh,lA,Sm,Le,el,dA,Jh,cA,pA,tl,hA,Ed,fA,mA,uA,ol,gA,nl,_A,bA,vA,Kh,kA,TA,Qt,Gh,sl,yA,wA,Xh,rl,AA,FA,Zh,al,$A,xA,Yh,il,EA,MA,vt,ll,zA,An,PA,ef,qA,jA,tf,CA,IA,LA,Fs,NA,of,DA,SA,dl,Om,Fn,$s,nf,cl,OA,sf,WA,Wm,Ne,pl,UA,rf,BA,RA,hl,HA,Md,QA,VA,JA,fl,KA,ml,GA,XA,ZA,af,YA,eF,Vt,lf,ul,tF,oF,df,gl,nF,sF,cf,_l,rF,aF,pf,bl,iF,lF,kt,vl,dF,$n,cF,hf,pF,hF,ff,fF,mF,uF,xs,gF,mf,_F,bF,kl,Um,xn,Es,uf,Tl,vF,gf,kF,Bm,De,yl,TF,En,yF,_f,wF,AF,bf,FF,$F,xF,wl,EF,zd,MF,zF,PF,Al,qF,Fl,jF,CF,IF,vf,LF,NF,Jt,kf,$l,DF,SF,Tf,xl,OF,WF,yf,El,UF,BF,wf,Ml,RF,HF,Tt,zl,QF,Mn,VF,Af,JF,KF,Ff,GF,XF,ZF,Ms,YF,$f,e$,t$,Pl,Rm;return _=new Te({}),ee=new Te({}),Us=new Te({}),Bs=new D({props:{name:"class transformers.AlbertConfig",anchor:"transformers.AlbertConfig",parameters:[{name:"vocab_size",val:" = 30000"},{name:"embedding_size",val:" = 128"},{name:"hidden_size",val:" = 4096"},{name:"num_hidden_layers",val:" = 12"},{name:"num_hidden_groups",val:" = 1"},{name:"num_attention_heads",val:" = 64"},{name:"intermediate_size",val:" = 16384"},{name:"inner_group_num",val:" = 1"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout_prob",val:" = 0"},{name:"attention_probs_dropout_prob",val:" = 0"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"classifier_dropout_prob",val:" = 0.1"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 2"},{name:"eos_token_id",val:" = 3"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/configuration_albert.py#L36",parametersDescription:[{anchor:"transformers.AlbertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30000) &#x2014; Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertModel">AlbertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertModel">TFAlbertModel</a>.`,name:"vocab_size"},{anchor:"transformers.AlbertConfig.embedding_size",description:`<strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimensionality of vocabulary embeddings.`,name:"embedding_size"},{anchor:"transformers.AlbertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.AlbertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.AlbertConfig.num_hidden_groups",description:`<strong>num_hidden_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups for the hidden layers, parameters in the same group are shared.`,name:"num_hidden_groups"},{anchor:"transformers.AlbertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.AlbertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.AlbertConfig.inner_group_num",description:`<strong>inner_group_num</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of inner repetition of attention and ffn.`,name:"inner_group_num"},{anchor:"transformers.AlbertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.AlbertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.AlbertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.AlbertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.AlbertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertModel">AlbertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertModel">TFAlbertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.AlbertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.AlbertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.AlbertConfig.classifier_dropout_prob",description:`<strong>classifier_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for attached classifiers.`,name:"classifier_dropout_prob"},{anchor:"transformers.AlbertConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"}]}}),Hs=new we({props:{code:`from transformers import AlbertConfig, AlbertModel # Initializing an ALBERT-xxlarge style configuration albert_xxlarge_configuration = AlbertConfig() # Initializing an ALBERT-base style configuration albert_base_configuration = AlbertConfig( hidden_size=768, num_attention_heads=12, intermediate_size=3072, ) # Initializing a model from the ALBERT-base style configuration model = AlbertModel(albert_xxlarge_configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertConfig, AlbertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing an ALBERT-xxlarge style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>albert_xxlarge_configuration = AlbertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing an ALBERT-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>albert_base_configuration = AlbertConfig( <span class="hljs-meta">... </span> hidden_size=<span class="hljs-number">768</span>, <span class="hljs-meta">... </span> num_attention_heads=<span class="hljs-number">12</span>, <span class="hljs-meta">... </span> intermediate_size=<span class="hljs-number">3072</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the ALBERT-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertModel(albert_xxlarge_configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Qs=new Te({}),Vs=new D({props:{name:"class transformers.AlbertTokenizer",anchor:"transformers.AlbertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = False"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert.py#L59",parametersDescription:[{anchor:"transformers.AlbertTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.AlbertTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.AlbertTokenizer.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.AlbertTokenizer.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.AlbertTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.AlbertTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.AlbertTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.AlbertTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.AlbertTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.AlbertTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.AlbertTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.AlbertTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Xs=new D({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.AlbertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert.py#L253",parametersDescription:[{anchor:"transformers.AlbertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.AlbertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ys=new D({props:{name:"get_special_tokens_mask",anchor:"transformers.AlbertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert.py#L278",parametersDescription:[{anchor:"transformers.AlbertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.AlbertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.AlbertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),tr=new D({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.AlbertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert.py#L306",parametersDescription:[{anchor:"transformers.AlbertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.AlbertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),or=new we({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),nr=new Te({}),sr=new D({props:{name:"class transformers.AlbertTokenizerFast",anchor:"transformers.AlbertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = False"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert_fast.py#L73",parametersDescription:[{anchor:"transformers.AlbertTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.AlbertTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.AlbertTokenizerFast.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.AlbertTokenizerFast.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.AlbertTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.AlbertTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.`,name:"eos_token"},{anchor:"transformers.AlbertTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.AlbertTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.AlbertTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.AlbertTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.AlbertTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),ar=new D({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.AlbertTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert_fast.py#L170",parametersDescription:[{anchor:"transformers.AlbertTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.AlbertTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),lr=new D({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.AlbertTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/tokenization_albert_fast.py#L195",parametersDescription:[{anchor:"transformers.AlbertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.AlbertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),dr=new we({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),cr=new Te({}),pr=new D({props:{name:"class transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput",anchor:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prediction_logits",val:": FloatTensor = None"},{name:"sop_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L518",parametersDescription:[{anchor:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput.sop_logits",description:`<strong>sop_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"sop_logits"},{anchor:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),fr=new D({props:{name:"class transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput",anchor:"transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput",parameters:[{name:"loss",val:": Tensor = None"},{name:"prediction_logits",val:": Tensor = None"},{name:"sop_logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L649",parametersDescription:[{anchor:"transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput.sop_logits",description:`<strong>sop_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"sop_logits"},{anchor:"transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ur=new Te({}),gr=new D({props:{name:"class transformers.AlbertModel",anchor:"transformers.AlbertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L620",parametersDescription:[{anchor:"transformers.AlbertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kr=new D({props:{name:"forward",anchor:"transformers.AlbertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L664",parametersDescription:[{anchor:"transformers.AlbertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ln=new ke({props:{$$slots:{default:[Iz]},$$scope:{ctx:C}}}),Tr=new we({props:{code:`from transformers import AlbertTokenizer, AlbertModel import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertModel.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertModel.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),yr=new Te({}),wr=new D({props:{name:"class transformers.AlbertForPreTraining",anchor:"transformers.AlbertForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L750",parametersDescription:[{anchor:"transformers.AlbertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xr=new D({props:{name:"forward",anchor:"transformers.AlbertForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"sentence_order_label",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L770",parametersDescription:[{anchor:"transformers.AlbertForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.AlbertForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.AlbertForPreTraining.forward.sentence_order_label",description:`<strong>sentence_order_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>. <code>0</code> indicates original order (sequence A, then sequence B), <code>1</code> indicates switched order (sequence B, then sequence A).`,name:"sentence_order_label"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput" >transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</p> </li> <li> <p><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>sop_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput" >transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Dn=new ke({props:{$$slots:{default:[Lz]},$$scope:{ctx:C}}}),Er=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForPreTraining import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForPreTraining.from_pretrained('albert-base-v2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids) prediction_logits = outputs.prediction_logits sop_logits = outputs.sop_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>sop_logits = outputs.sop_logits`}}),Mr=new Te({}),zr=new D({props:{name:"class transformers.AlbertForMaskedLM",anchor:"transformers.AlbertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L894",parametersDescription:[{anchor:"transformers.AlbertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ir=new D({props:{name:"forward",anchor:"transformers.AlbertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L916",parametersDescription:[{anchor:"transformers.AlbertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.AlbertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new ke({props:{$$slots:{default:[Nz]},$$scope:{ctx:C}}}),Lr=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForMaskedLM import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForMaskedLM.from_pretrained('albert-base-v2') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Nr=new Te({}),Dr=new D({props:{name:"class transformers.AlbertForSequenceClassification",anchor:"transformers.AlbertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L982",parametersDescription:[{anchor:"transformers.AlbertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ur=new D({props:{name:"forward",anchor:"transformers.AlbertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L995",parametersDescription:[{anchor:"transformers.AlbertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.AlbertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Un=new ke({props:{$$slots:{default:[Dz]},$$scope:{ctx:C}}}),Br=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForSequenceClassification import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForSequenceClassification.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Rr=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForSequenceClassification import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForSequenceClassification.from_pretrained('albert-base-v2', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Hr=new Te({}),Qr=new D({props:{name:"class transformers.AlbertForMultipleChoice",anchor:"transformers.AlbertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L1280",parametersDescription:[{anchor:"transformers.AlbertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gr=new D({props:{name:"forward",anchor:"transformers.AlbertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L1291",parametersDescription:[{anchor:"transformers.AlbertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.AlbertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <em>num_choices</em> is the size of the second dimension of the input tensors. (see <em>input_ids</em> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Rn=new ke({props:{$$slots:{default:[Sz]},$$scope:{ctx:C}}}),Xr=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForMultipleChoice import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForMultipleChoice.from_pretrained('albert-base-v2') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Zr=new Te({}),Yr=new D({props:{name:"class transformers.AlbertForTokenClassification",anchor:"transformers.AlbertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L1081",parametersDescription:[{anchor:"transformers.AlbertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),na=new D({props:{name:"forward",anchor:"transformers.AlbertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L1101",parametersDescription:[{anchor:"transformers.AlbertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.AlbertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qn=new ke({props:{$$slots:{default:[Oz]},$$scope:{ctx:C}}}),sa=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForTokenClassification import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForTokenClassification.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ra=new Te({}),aa=new D({props:{name:"class transformers.AlbertForQuestionAnswering",anchor:"transformers.AlbertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L1177",parametersDescription:[{anchor:"transformers.AlbertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ca=new D({props:{name:"forward",anchor:"transformers.AlbertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_albert.py#L1191",parametersDescription:[{anchor:"transformers.AlbertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.AlbertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.AlbertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.AlbertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.AlbertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.AlbertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.AlbertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.AlbertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.AlbertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.AlbertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.AlbertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Jn=new ke({props:{$$slots:{default:[Wz]},$$scope:{ctx:C}}}),pa=new we({props:{code:`from transformers import AlbertTokenizer, AlbertForQuestionAnswering import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, AlbertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AlbertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),ha=new Te({}),fa=new D({props:{name:"class transformers.TFAlbertModel",anchor:"transformers.TFAlbertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L775",parametersDescription:[{anchor:"transformers.TFAlbertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gn=new ke({props:{$$slots:{default:[Uz]},$$scope:{ctx:C}}}),_a=new D({props:{name:"call",anchor:"transformers.TFAlbertModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L781",parametersDescription:[{anchor:"transformers.TFAlbertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),Xn=new ke({props:{$$slots:{default:[Bz]},$$scope:{ctx:C}}}),ba=new we({props:{code:`from transformers import AlbertTokenizer, TFAlbertModel import tensorflow as tf tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertModel.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertModel.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),va=new Te({}),ka=new D({props:{name:"class transformers.TFAlbertForPreTraining",anchor:"transformers.TFAlbertForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L851",parametersDescription:[{anchor:"transformers.TFAlbertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Yn=new ke({props:{$$slots:{default:[Rz]},$$scope:{ctx:C}}}),Aa=new D({props:{name:"call",anchor:"transformers.TFAlbertForPreTraining.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"sentence_order_label",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L867",parametersDescription:[{anchor:"transformers.TFAlbertForPreTraining.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertForPreTraining.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertForPreTraining.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertForPreTraining.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertForPreTraining.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertForPreTraining.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput" >transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>sop_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput" >transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),es=new ke({props:{$$slots:{default:[Hz]},$$scope:{ctx:C}}}),Fa=new we({props:{code:`import tensorflow as tf from transformers import AlbertTokenizer, TFAlbertForPreTraining tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForPreTraining.from_pretrained('albert-base-v2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 outputs = model(input_ids) prediction_logits = outputs.prediction_logits sop_logits = outputs.sop_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tf.constant(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>))[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>sop_logits = outputs.sop_logits`}}),$a=new Te({}),xa=new D({props:{name:"class transformers.TFAlbertForMaskedLM",anchor:"transformers.TFAlbertForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L986",parametersDescription:[{anchor:"transformers.TFAlbertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),os=new ke({props:{$$slots:{default:[Qz]},$$scope:{ctx:C}}}),qa=new D({props:{name:"call",anchor:"transformers.TFAlbertForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L999",parametersDescription:[{anchor:"transformers.TFAlbertForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFAlbertForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ns=new ke({props:{$$slots:{default:[Vz]},$$scope:{ctx:C}}}),ja=new we({props:{code:`from transformers import AlbertTokenizer, TFAlbertForMaskedLM import tensorflow as tf tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForMaskedLM.from_pretrained('albert-base-v2') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ca=new Te({}),Ia=new D({props:{name:"class transformers.TFAlbertForSequenceClassification",anchor:"transformers.TFAlbertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1087",parametersDescription:[{anchor:"transformers.TFAlbertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rs=new ke({props:{$$slots:{default:[Jz]},$$scope:{ctx:C}}}),Sa=new D({props:{name:"call",anchor:"transformers.TFAlbertForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1103",parametersDescription:[{anchor:"transformers.TFAlbertForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFAlbertForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),as=new ke({props:{$$slots:{default:[Kz]},$$scope:{ctx:C}}}),Oa=new we({props:{code:`from transformers import AlbertTokenizer, TFAlbertForSequenceClassification import tensorflow as tf tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForSequenceClassification.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Wa=new Te({}),Ua=new D({props:{name:"class transformers.TFAlbertForMultipleChoice",anchor:"transformers.TFAlbertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1415",parametersDescription:[{anchor:"transformers.TFAlbertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ls=new ke({props:{$$slots:{default:[Gz]},$$scope:{ctx:C}}}),Qa=new D({props:{name:"call",anchor:"transformers.TFAlbertForMultipleChoice.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1439",parametersDescription:[{anchor:"transformers.TFAlbertForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFAlbertForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ds=new ke({props:{$$slots:{default:[Xz]},$$scope:{ctx:C}}}),Va=new we({props:{code:`from transformers import AlbertTokenizer, TFAlbertForMultipleChoice import tensorflow as tf tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForMultipleChoice.from_pretrained('albert-base-v2') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ja=new Te({}),Ka=new D({props:{name:"class transformers.TFAlbertForTokenClassification",anchor:"transformers.TFAlbertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1190",parametersDescription:[{anchor:"transformers.TFAlbertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ps=new ke({props:{$$slots:{default:[Zz]},$$scope:{ctx:C}}}),Ya=new D({props:{name:"call",anchor:"transformers.TFAlbertForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1211",parametersDescription:[{anchor:"transformers.TFAlbertForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFAlbertForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),hs=new ke({props:{$$slots:{default:[Yz]},$$scope:{ctx:C}}}),ei=new we({props:{code:`from transformers import AlbertTokenizer, TFAlbertForTokenClassification import tensorflow as tf tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForTokenClassification.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ti=new Te({}),oi=new D({props:{name:"class transformers.TFAlbertForQuestionAnswering",anchor:"transformers.TFAlbertForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1297",parametersDescription:[{anchor:"transformers.TFAlbertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ms=new ke({props:{$$slots:{default:[e5]},$$scope:{ctx:C}}}),ai=new D({props:{name:"call",anchor:"transformers.TFAlbertForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_tf_albert.py#L1311",parametersDescription:[{anchor:"transformers.TFAlbertForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFAlbertForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),us=new ke({props:{$$slots:{default:[t5]},$$scope:{ctx:C}}}),ii=new we({props:{code:`from transformers import AlbertTokenizer, TFAlbertForQuestionAnswering import tensorflow as tf tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForQuestionAnswering.from_pretrained('albert-base-v2') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, TFAlbertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAlbertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),li=new Te({}),di=new D({props:{name:"class transformers.FlaxAlbertModel",anchor:"transformers.FlaxAlbertModel",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L667",parametersDescription:[{anchor:"transformers.FlaxAlbertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),_i=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_s=new ke({props:{$$slots:{default:[o5]},$$scope:{ctx:C}}}),bi=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertModel tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertModel.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertModel.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),vi=new Te({}),ki=new D({props:{name:"class transformers.FlaxAlbertForPreTraining",anchor:"transformers.FlaxAlbertForPreTraining",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L738",parametersDescription:[{anchor:"transformers.FlaxAlbertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertForPreTraining.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Ei=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.albert.modeling_flax_albert.FlaxAlbertForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>prediction_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>sop_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.albert.modeling_flax_albert.FlaxAlbertForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),vs=new ke({props:{$$slots:{default:[n5]},$$scope:{ctx:C}}}),Mi=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertForPreTraining tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertForPreTraining.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.sop_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.sop_logits`}}),zi=new Te({}),Pi=new D({props:{name:"class transformers.FlaxAlbertForMaskedLM",anchor:"transformers.FlaxAlbertForMaskedLM",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L821",parametersDescription:[{anchor:"transformers.FlaxAlbertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertForMaskedLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Oi=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ts=new ke({props:{$$slots:{default:[s5]},$$scope:{ctx:C}}}),Wi=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertForMaskedLM tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertForMaskedLM.from_pretrained('albert-base-v2') inputs = tokenizer("The capital of France is [MASK].", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ui=new Te({}),Bi=new D({props:{name:"class transformers.FlaxAlbertForSequenceClassification",anchor:"transformers.FlaxAlbertForSequenceClassification",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L891",parametersDescription:[{anchor:"transformers.FlaxAlbertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertForSequenceClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Xi=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ws=new ke({props:{$$slots:{default:[r5]},$$scope:{ctx:C}}}),Zi=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertForSequenceClassification tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertForSequenceClassification.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Yi=new Te({}),el=new D({props:{name:"class transformers.FlaxAlbertForMultipleChoice",anchor:"transformers.FlaxAlbertForMultipleChoice",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L965",parametersDescription:[{anchor:"transformers.FlaxAlbertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertForMultipleChoice.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ll=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fs=new ke({props:{$$slots:{default:[a5]},$$scope:{ctx:C}}}),dl=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertForMultipleChoice tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertForMultipleChoice.from_pretrained('albert-base-v2') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='jax', padding=True) outputs = model(**{k: v[None, :] for k,v in encoding.items()}) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),cl=new Te({}),pl=new D({props:{name:"class transformers.FlaxAlbertForTokenClassification",anchor:"transformers.FlaxAlbertForTokenClassification",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L1039",parametersDescription:[{anchor:"transformers.FlaxAlbertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertForTokenClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),vl=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xs=new ke({props:{$$slots:{default:[i5]},$$scope:{ctx:C}}}),kl=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertForTokenClassification tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertForTokenClassification.from_pretrained('albert-base-v2') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Tl=new Te({}),yl=new D({props:{name:"class transformers.FlaxAlbertForQuestionAnswering",anchor:"transformers.FlaxAlbertForQuestionAnswering",parameters:[{name:"config",val:": AlbertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L1108",parametersDescription:[{anchor:"transformers.FlaxAlbertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxAlbertForQuestionAnswering.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),zl=new D({props:{name:"__call__",anchor:"transformers.FlaxAlbertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/albert/modeling_flax_albert.py#L544",parametersDescription:[{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer">AlbertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxAlbertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig" >AlbertConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ms=new ke({props:{$$slots:{default:[l5]},$$scope:{ctx:C}}}),Pl=new we({props:{code:`from transformers import AlbertTokenizer, FlaxAlbertForQuestionAnswering tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = FlaxAlbertForQuestionAnswering.from_pretrained('albert-base-v2') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='jax') outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AlbertTokenizer, FlaxAlbertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AlbertTokenizer.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAlbertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;albert-base-v2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){p=n("meta"),$=l(),g=n("h1"),b=n("a"),F=n("span"),v(_.$$.fragment),u=l(),x=n("span"),pe=a("ALBERT"),G=l(),M=n("h2"),Z=n("a"),U=n("span"),v(ee.$$.fragment),he=l(),B=n("span"),fe=a("Overview"),ae=l(),J=n("p"),I=a("The ALBERT model was proposed in "),te=n("a"),X=a("ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),z=a(` by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. It presents two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT:`),q=l(),Y=n("ul"),R=n("li"),me=a("Splitting the embedding matrix into two smaller matrices."),ue=l(),H=n("li"),ge=a("Using repeating layers split among groups."),ie=l(),P=n("p"),_e=a("The abstract from the paper is the following:"),Q=l(),oe=n("p"),se=n("em"),V=a(`Increasing model size when pretraining natural language representations often results in improved performance on downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations, longer training times, and unexpected model degradation. To address these problems, we present two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows that our proposed methods lead to models that scale much better compared to the original BERT. We also use a self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and SQuAD benchmarks while having fewer parameters compared to BERT-large.`),le=l(),ne=n("p"),L=a("Tips:"),de=l(),S=n("ul"),re=n("li"),h=a(`ALBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),E=l(),K=n("li"),Ae=a(`ALBERT uses repeating layers which results in a small memory footprint, however the computational cost remains similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same number of (repeating) layers.`),ye=l(),j=n("p"),Fe=a("This model was contributed by "),ve=n("a"),$e=a("lysandre"),N=a(`. This model jax version was contributed by `),O=n("a"),xe=a("kamalkraj"),Ee=a(". The original code can be found "),W=n("a"),Me=a("here"),ze=a("."),ce=l(),be=n("h2"),zn=n("a"),dc=n("span"),v(Us.$$.fragment),gg=l(),cc=n("span"),_g=a("AlbertConfig"),Jf=l(),Je=n("div"),v(Bs.$$.fragment),bg=l(),Ot=n("p"),vg=a("This is the configuration class to store the configuration of a "),Dl=n("a"),kg=a("AlbertModel"),Tg=a(` or a `),Sl=n("a"),yg=a("TFAlbertModel"),wg=a(`. It is used to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ALBERT `),Rs=n("a"),Ag=a("xxlarge"),Fg=a(" architecture."),$g=l(),xo=n("p"),xg=a("Configuration objects inherit from "),Ol=n("a"),Eg=a("PretrainedConfig"),Mg=a(` and can be used to control the model outputs. Read the documentation from `),Wl=n("a"),zg=a("PretrainedConfig"),Pg=a(" for more information."),qg=l(),pc=n("p"),jg=a("Examples:"),Cg=l(),v(Hs.$$.fragment),Kf=l(),Eo=n("h2"),Pn=n("a"),hc=n("span"),v(Qs.$$.fragment),Ig=l(),fc=n("span"),Lg=a("AlbertTokenizer"),Gf=l(),Pe=n("div"),v(Vs.$$.fragment),Ng=l(),Js=n("p"),Dg=a("Construct an ALBERT tokenizer. Based on "),Ks=n("a"),Sg=a("SentencePiece"),Og=a("."),Wg=l(),Gs=n("p"),Ug=a("This tokenizer inherits from "),Ul=n("a"),Bg=a("PreTrainedTokenizer"),Rg=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Hg=l(),Mo=n("p"),Qg=a(`Attributes: sp_model (`),mc=n("code"),Vg=a("SentencePieceProcessor"),Jg=a(`): The `),uc=n("em"),Kg=a("SentencePiece"),Gg=a(" processor that is used for every conversion (string, tokens and IDs)."),Xg=l(),Gt=n("div"),v(Xs.$$.fragment),Zg=l(),gc=n("p"),Yg=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format:`),e_=l(),Zs=n("ul"),Bl=n("li"),t_=a("single sequence: "),_c=n("code"),o_=a("[CLS] X [SEP]"),n_=l(),Rl=n("li"),s_=a("pair of sequences: "),bc=n("code"),r_=a("[CLS] A [SEP] B [SEP]"),a_=l(),qn=n("div"),v(Ys.$$.fragment),i_=l(),er=n("p"),l_=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),vc=n("code"),d_=a("prepare_for_model"),c_=a(" method."),p_=l(),Pt=n("div"),v(tr.$$.fragment),h_=l(),kc=n("p"),f_=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format:`),m_=l(),v(or.$$.fragment),u_=l(),zo=n("p"),g_=a("If "),Tc=n("code"),__=a("token_ids_1"),b_=a(" is "),yc=n("code"),v_=a("None"),k_=a(", this method only returns the first portion of the mask (0s)."),T_=l(),wc=n("div"),Xf=l(),Po=n("h2"),jn=n("a"),Ac=n("span"),v(nr.$$.fragment),y_=l(),Fc=n("span"),w_=a("AlbertTokenizerFast"),Zf=l(),zt=n("div"),v(sr.$$.fragment),A_=l(),Wt=n("p"),F_=a("Construct a \u201Cfast\u201D ALBERT tokenizer (backed by HuggingFace\u2019s "),$c=n("em"),$_=a("tokenizers"),x_=a(" library). Based on "),rr=n("a"),E_=a("Unigram"),M_=a(`. This tokenizer inherits from `),Hl=n("a"),z_=a("PreTrainedTokenizerFast"),P_=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),q_=l(),Xt=n("div"),v(ar.$$.fragment),j_=l(),xc=n("p"),C_=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format:`),I_=l(),ir=n("ul"),Ql=n("li"),L_=a("single sequence: "),Ec=n("code"),N_=a("[CLS] X [SEP]"),D_=l(),Vl=n("li"),S_=a("pair of sequences: "),Mc=n("code"),O_=a("[CLS] A [SEP] B [SEP]"),W_=l(),qt=n("div"),v(lr.$$.fragment),U_=l(),zc=n("p"),B_=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format:`),R_=l(),v(dr.$$.fragment),H_=l(),Pc=n("p"),Q_=a("if token_ids_1 is None, only returns the first portion of the mask (0s)."),Yf=l(),qo=n("h2"),Cn=n("a"),qc=n("span"),v(cr.$$.fragment),V_=l(),jc=n("span"),J_=a("Albert specific outputs"),em=l(),jo=n("div"),v(pr.$$.fragment),K_=l(),hr=n("p"),G_=a("Output type of "),Jl=n("a"),X_=a("AlbertForPreTraining"),Z_=a("."),tm=l(),Co=n("div"),v(fr.$$.fragment),Y_=l(),mr=n("p"),eb=a("Output type of "),Kl=n("a"),tb=a("TFAlbertForPreTraining"),ob=a("."),om=l(),Io=n("h2"),In=n("a"),Cc=n("span"),v(ur.$$.fragment),nb=l(),Ic=n("span"),sb=a("AlbertModel"),nm=l(),Ke=n("div"),v(gr.$$.fragment),rb=l(),Lc=n("p"),ab=a("The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top."),ib=l(),_r=n("p"),lb=a("This model inherits from "),Gl=n("a"),db=a("PreTrainedModel"),cb=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pb=l(),br=n("p"),hb=a("This model is also a PyTorch "),vr=n("a"),fb=a("torch.nn.Module"),mb=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ub=l(),ot=n("div"),v(kr.$$.fragment),gb=l(),Lo=n("p"),_b=a("The "),Xl=n("a"),bb=a("AlbertModel"),vb=a(" forward method, overrides the "),Nc=n("code"),kb=a("__call__"),Tb=a(" special method."),yb=l(),v(Ln.$$.fragment),wb=l(),Dc=n("p"),Ab=a("Example:"),Fb=l(),v(Tr.$$.fragment),sm=l(),No=n("h2"),Nn=n("a"),Sc=n("span"),v(yr.$$.fragment),$b=l(),Oc=n("span"),xb=a("AlbertForPreTraining"),rm=l(),Ge=n("div"),v(wr.$$.fragment),Eb=l(),Do=n("p"),Mb=a("Albert Model with two heads on top as done during the pretraining: a "),Wc=n("code"),zb=a("masked language modeling"),Pb=a(` head and a `),Uc=n("code"),qb=a("sentence order prediction (classification)"),jb=a(" head."),Cb=l(),Ar=n("p"),Ib=a("This model inherits from "),Zl=n("a"),Lb=a("PreTrainedModel"),Nb=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Db=l(),Fr=n("p"),Sb=a("This model is also a PyTorch "),$r=n("a"),Ob=a("torch.nn.Module"),Wb=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ub=l(),nt=n("div"),v(xr.$$.fragment),Bb=l(),So=n("p"),Rb=a("The "),Yl=n("a"),Hb=a("AlbertForPreTraining"),Qb=a(" forward method, overrides the "),Bc=n("code"),Vb=a("__call__"),Jb=a(" special method."),Kb=l(),v(Dn.$$.fragment),Gb=l(),Rc=n("p"),Xb=a("Example:"),Zb=l(),v(Er.$$.fragment),am=l(),Oo=n("h2"),Sn=n("a"),Hc=n("span"),v(Mr.$$.fragment),Yb=l(),Qc=n("span"),ev=a("AlbertForMaskedLM"),im=l(),Xe=n("div"),v(zr.$$.fragment),tv=l(),Pr=n("p"),ov=a("Albert Model with a "),Vc=n("code"),nv=a("language modeling"),sv=a(" head on top."),rv=l(),qr=n("p"),av=a("This model inherits from "),ed=n("a"),iv=a("PreTrainedModel"),lv=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dv=l(),jr=n("p"),cv=a("This model is also a PyTorch "),Cr=n("a"),pv=a("torch.nn.Module"),hv=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fv=l(),st=n("div"),v(Ir.$$.fragment),mv=l(),Wo=n("p"),uv=a("The "),td=n("a"),gv=a("AlbertForMaskedLM"),_v=a(" forward method, overrides the "),Jc=n("code"),bv=a("__call__"),vv=a(" special method."),kv=l(),v(On.$$.fragment),Tv=l(),Kc=n("p"),yv=a("Example:"),wv=l(),v(Lr.$$.fragment),lm=l(),Uo=n("h2"),Wn=n("a"),Gc=n("span"),v(Nr.$$.fragment),Av=l(),Xc=n("span"),Fv=a("AlbertForSequenceClassification"),dm=l(),Ze=n("div"),v(Dr.$$.fragment),$v=l(),Zc=n("p"),xv=a(`Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ev=l(),Sr=n("p"),Mv=a("This model inherits from "),od=n("a"),zv=a("PreTrainedModel"),Pv=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qv=l(),Or=n("p"),jv=a("This model is also a PyTorch "),Wr=n("a"),Cv=a("torch.nn.Module"),Iv=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lv=l(),Se=n("div"),v(Ur.$$.fragment),Nv=l(),Bo=n("p"),Dv=a("The "),nd=n("a"),Sv=a("AlbertForSequenceClassification"),Ov=a(" forward method, overrides the "),Yc=n("code"),Wv=a("__call__"),Uv=a(" special method."),Bv=l(),v(Un.$$.fragment),Rv=l(),ep=n("p"),Hv=a("Example of single-label classification:"),Qv=l(),v(Br.$$.fragment),Vv=l(),tp=n("p"),Jv=a("Example of multi-label classification:"),Kv=l(),v(Rr.$$.fragment),cm=l(),Ro=n("h2"),Bn=n("a"),op=n("span"),v(Hr.$$.fragment),Gv=l(),np=n("span"),Xv=a("AlbertForMultipleChoice"),pm=l(),Ye=n("div"),v(Qr.$$.fragment),Zv=l(),sp=n("p"),Yv=a(`Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ek=l(),Vr=n("p"),tk=a("This model inherits from "),sd=n("a"),ok=a("PreTrainedModel"),nk=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sk=l(),Jr=n("p"),rk=a("This model is also a PyTorch "),Kr=n("a"),ak=a("torch.nn.Module"),ik=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),lk=l(),rt=n("div"),v(Gr.$$.fragment),dk=l(),Ho=n("p"),ck=a("The "),rd=n("a"),pk=a("AlbertForMultipleChoice"),hk=a(" forward method, overrides the "),rp=n("code"),fk=a("__call__"),mk=a(" special method."),uk=l(),v(Rn.$$.fragment),gk=l(),ap=n("p"),_k=a("Example:"),bk=l(),v(Xr.$$.fragment),hm=l(),Qo=n("h2"),Hn=n("a"),ip=n("span"),v(Zr.$$.fragment),vk=l(),lp=n("span"),kk=a("AlbertForTokenClassification"),fm=l(),et=n("div"),v(Yr.$$.fragment),Tk=l(),dp=n("p"),yk=a(`Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),wk=l(),ea=n("p"),Ak=a("This model inherits from "),ad=n("a"),Fk=a("PreTrainedModel"),$k=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xk=l(),ta=n("p"),Ek=a("This model is also a PyTorch "),oa=n("a"),Mk=a("torch.nn.Module"),zk=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pk=l(),at=n("div"),v(na.$$.fragment),qk=l(),Vo=n("p"),jk=a("The "),id=n("a"),Ck=a("AlbertForTokenClassification"),Ik=a(" forward method, overrides the "),cp=n("code"),Lk=a("__call__"),Nk=a(" special method."),Dk=l(),v(Qn.$$.fragment),Sk=l(),pp=n("p"),Ok=a("Example:"),Wk=l(),v(sa.$$.fragment),mm=l(),Jo=n("h2"),Vn=n("a"),hp=n("span"),v(ra.$$.fragment),Uk=l(),fp=n("span"),Bk=a("AlbertForQuestionAnswering"),um=l(),tt=n("div"),v(aa.$$.fragment),Rk=l(),Ko=n("p"),Hk=a(`Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),mp=n("code"),Qk=a("span start logits"),Vk=a(" and "),up=n("code"),Jk=a("span end logits"),Kk=a(")."),Gk=l(),ia=n("p"),Xk=a("This model inherits from "),ld=n("a"),Zk=a("PreTrainedModel"),Yk=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),eT=l(),la=n("p"),tT=a("This model is also a PyTorch "),da=n("a"),oT=a("torch.nn.Module"),nT=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sT=l(),it=n("div"),v(ca.$$.fragment),rT=l(),Go=n("p"),aT=a("The "),dd=n("a"),iT=a("AlbertForQuestionAnswering"),lT=a(" forward method, overrides the "),gp=n("code"),dT=a("__call__"),cT=a(" special method."),pT=l(),v(Jn.$$.fragment),hT=l(),_p=n("p"),fT=a("Example:"),mT=l(),v(pa.$$.fragment),gm=l(),Xo=n("h2"),Kn=n("a"),bp=n("span"),v(ha.$$.fragment),uT=l(),vp=n("span"),gT=a("TFAlbertModel"),_m=l(),Oe=n("div"),v(fa.$$.fragment),_T=l(),kp=n("p"),bT=a("The bare Albert Model transformer outputting raw hidden-states without any specific head on top."),vT=l(),ma=n("p"),kT=a("This model inherits from "),cd=n("a"),TT=a("TFPreTrainedModel"),yT=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wT=l(),ua=n("p"),AT=a("This model is also a "),ga=n("a"),FT=a("tf.keras.Model"),$T=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),xT=l(),v(Gn.$$.fragment),ET=l(),lt=n("div"),v(_a.$$.fragment),MT=l(),Zo=n("p"),zT=a("The "),pd=n("a"),PT=a("TFAlbertModel"),qT=a(" forward method, overrides the "),Tp=n("code"),jT=a("__call__"),CT=a(" special method."),IT=l(),v(Xn.$$.fragment),LT=l(),yp=n("p"),NT=a("Example:"),DT=l(),v(ba.$$.fragment),bm=l(),Yo=n("h2"),Zn=n("a"),wp=n("span"),v(va.$$.fragment),ST=l(),Ap=n("span"),OT=a("TFAlbertForPreTraining"),vm=l(),We=n("div"),v(ka.$$.fragment),WT=l(),en=n("p"),UT=a("Albert Model with two heads on top for pretraining: a "),Fp=n("code"),BT=a("masked language modeling"),RT=a(" head and a "),$p=n("code"),HT=a("sentence order prediction"),QT=a(" (classification) head."),VT=l(),Ta=n("p"),JT=a("This model inherits from "),hd=n("a"),KT=a("TFPreTrainedModel"),GT=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),XT=l(),ya=n("p"),ZT=a("This model is also a "),wa=n("a"),YT=a("tf.keras.Model"),e1=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),t1=l(),v(Yn.$$.fragment),o1=l(),dt=n("div"),v(Aa.$$.fragment),n1=l(),tn=n("p"),s1=a("The "),fd=n("a"),r1=a("TFAlbertForPreTraining"),a1=a(" forward method, overrides the "),xp=n("code"),i1=a("__call__"),l1=a(" special method."),d1=l(),v(es.$$.fragment),c1=l(),Ep=n("p"),p1=a("Example:"),h1=l(),v(Fa.$$.fragment),km=l(),on=n("h2"),ts=n("a"),Mp=n("span"),v($a.$$.fragment),f1=l(),zp=n("span"),m1=a("TFAlbertForMaskedLM"),Tm=l(),Ue=n("div"),v(xa.$$.fragment),u1=l(),Ea=n("p"),g1=a("Albert Model with a "),Pp=n("code"),_1=a("language modeling"),b1=a(" head on top."),v1=l(),Ma=n("p"),k1=a("This model inherits from "),md=n("a"),T1=a("TFPreTrainedModel"),y1=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),w1=l(),za=n("p"),A1=a("This model is also a "),Pa=n("a"),F1=a("tf.keras.Model"),$1=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),x1=l(),v(os.$$.fragment),E1=l(),ct=n("div"),v(qa.$$.fragment),M1=l(),nn=n("p"),z1=a("The "),ud=n("a"),P1=a("TFAlbertForMaskedLM"),q1=a(" forward method, overrides the "),qp=n("code"),j1=a("__call__"),C1=a(" special method."),I1=l(),v(ns.$$.fragment),L1=l(),jp=n("p"),N1=a("Example:"),D1=l(),v(ja.$$.fragment),ym=l(),sn=n("h2"),ss=n("a"),Cp=n("span"),v(Ca.$$.fragment),S1=l(),Ip=n("span"),O1=a("TFAlbertForSequenceClassification"),wm=l(),Be=n("div"),v(Ia.$$.fragment),W1=l(),Lp=n("p"),U1=a(`Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),B1=l(),La=n("p"),R1=a("This model inherits from "),gd=n("a"),H1=a("TFPreTrainedModel"),Q1=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),V1=l(),Na=n("p"),J1=a("This model is also a "),Da=n("a"),K1=a("tf.keras.Model"),G1=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),X1=l(),v(rs.$$.fragment),Z1=l(),pt=n("div"),v(Sa.$$.fragment),Y1=l(),rn=n("p"),ey=a("The "),_d=n("a"),ty=a("TFAlbertForSequenceClassification"),oy=a(" forward method, overrides the "),Np=n("code"),ny=a("__call__"),sy=a(" special method."),ry=l(),v(as.$$.fragment),ay=l(),Dp=n("p"),iy=a("Example:"),ly=l(),v(Oa.$$.fragment),Am=l(),an=n("h2"),is=n("a"),Sp=n("span"),v(Wa.$$.fragment),dy=l(),Op=n("span"),cy=a("TFAlbertForMultipleChoice"),Fm=l(),Re=n("div"),v(Ua.$$.fragment),py=l(),Wp=n("p"),hy=a(`Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),fy=l(),Ba=n("p"),my=a("This model inherits from "),bd=n("a"),uy=a("TFPreTrainedModel"),gy=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_y=l(),Ra=n("p"),by=a("This model is also a "),Ha=n("a"),vy=a("tf.keras.Model"),ky=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ty=l(),v(ls.$$.fragment),yy=l(),ht=n("div"),v(Qa.$$.fragment),wy=l(),ln=n("p"),Ay=a("The "),vd=n("a"),Fy=a("TFAlbertForMultipleChoice"),$y=a(" forward method, overrides the "),Up=n("code"),xy=a("__call__"),Ey=a(" special method."),My=l(),v(ds.$$.fragment),zy=l(),Bp=n("p"),Py=a("Example:"),qy=l(),v(Va.$$.fragment),$m=l(),dn=n("h2"),cs=n("a"),Rp=n("span"),v(Ja.$$.fragment),jy=l(),Hp=n("span"),Cy=a("TFAlbertForTokenClassification"),xm=l(),He=n("div"),v(Ka.$$.fragment),Iy=l(),Qp=n("p"),Ly=a(`Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ny=l(),Ga=n("p"),Dy=a("This model inherits from "),kd=n("a"),Sy=a("TFPreTrainedModel"),Oy=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wy=l(),Xa=n("p"),Uy=a("This model is also a "),Za=n("a"),By=a("tf.keras.Model"),Ry=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hy=l(),v(ps.$$.fragment),Qy=l(),ft=n("div"),v(Ya.$$.fragment),Vy=l(),cn=n("p"),Jy=a("The "),Td=n("a"),Ky=a("TFAlbertForTokenClassification"),Gy=a(" forward method, overrides the "),Vp=n("code"),Xy=a("__call__"),Zy=a(" special method."),Yy=l(),v(hs.$$.fragment),ew=l(),Jp=n("p"),tw=a("Example:"),ow=l(),v(ei.$$.fragment),Em=l(),pn=n("h2"),fs=n("a"),Kp=n("span"),v(ti.$$.fragment),nw=l(),Gp=n("span"),sw=a("TFAlbertForQuestionAnswering"),Mm=l(),Qe=n("div"),v(oi.$$.fragment),rw=l(),hn=n("p"),aw=a(`Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Xp=n("code"),iw=a("span start logits"),lw=a(" and "),Zp=n("code"),dw=a("span end logits"),cw=a(")."),pw=l(),ni=n("p"),hw=a("This model inherits from "),yd=n("a"),fw=a("TFPreTrainedModel"),mw=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uw=l(),si=n("p"),gw=a("This model is also a "),ri=n("a"),_w=a("tf.keras.Model"),bw=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vw=l(),v(ms.$$.fragment),kw=l(),mt=n("div"),v(ai.$$.fragment),Tw=l(),fn=n("p"),yw=a("The "),wd=n("a"),ww=a("TFAlbertForQuestionAnswering"),Aw=a(" forward method, overrides the "),Yp=n("code"),Fw=a("__call__"),$w=a(" special method."),xw=l(),v(us.$$.fragment),Ew=l(),eh=n("p"),Mw=a("Example:"),zw=l(),v(ii.$$.fragment),zm=l(),mn=n("h2"),gs=n("a"),th=n("span"),v(li.$$.fragment),Pw=l(),oh=n("span"),qw=a("FlaxAlbertModel"),Pm=l(),qe=n("div"),v(di.$$.fragment),jw=l(),nh=n("p"),Cw=a("The bare Albert Model transformer outputting raw hidden-states without any specific head on top."),Iw=l(),ci=n("p"),Lw=a("This model inherits from "),Ad=n("a"),Nw=a("FlaxPreTrainedModel"),Dw=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Sw=l(),pi=n("p"),Ow=a("This model is also a Flax Linen "),hi=n("a"),Ww=a("flax.linen.Module"),Uw=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Bw=l(),sh=n("p"),Rw=a("Finally, this model supports inherent JAX features such as:"),Hw=l(),Ut=n("ul"),rh=n("li"),fi=n("a"),Qw=a("Just-In-Time (JIT) compilation"),Vw=l(),ah=n("li"),mi=n("a"),Jw=a("Automatic Differentiation"),Kw=l(),ih=n("li"),ui=n("a"),Gw=a("Vectorization"),Xw=l(),lh=n("li"),gi=n("a"),Zw=a("Parallelization"),Yw=l(),ut=n("div"),v(_i.$$.fragment),e0=l(),un=n("p"),t0=a("The "),dh=n("code"),o0=a("FlaxAlbertPreTrainedModel"),n0=a(" forward method, overrides the "),ch=n("code"),s0=a("__call__"),r0=a(" special method."),a0=l(),v(_s.$$.fragment),i0=l(),ph=n("p"),l0=a("Example:"),d0=l(),v(bi.$$.fragment),qm=l(),gn=n("h2"),bs=n("a"),hh=n("span"),v(vi.$$.fragment),c0=l(),fh=n("span"),p0=a("FlaxAlbertForPreTraining"),jm=l(),je=n("div"),v(ki.$$.fragment),h0=l(),_n=n("p"),f0=a("Albert Model with two heads on top as done during the pretraining: a "),mh=n("code"),m0=a("masked language modeling"),u0=a(` head and a `),uh=n("code"),g0=a("sentence order prediction (classification)"),_0=a(" head."),b0=l(),Ti=n("p"),v0=a("This model inherits from "),Fd=n("a"),k0=a("FlaxPreTrainedModel"),T0=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),y0=l(),yi=n("p"),w0=a("This model is also a Flax Linen "),wi=n("a"),A0=a("flax.linen.Module"),F0=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),$0=l(),gh=n("p"),x0=a("Finally, this model supports inherent JAX features such as:"),E0=l(),Bt=n("ul"),_h=n("li"),Ai=n("a"),M0=a("Just-In-Time (JIT) compilation"),z0=l(),bh=n("li"),Fi=n("a"),P0=a("Automatic Differentiation"),q0=l(),vh=n("li"),$i=n("a"),j0=a("Vectorization"),C0=l(),kh=n("li"),xi=n("a"),I0=a("Parallelization"),L0=l(),gt=n("div"),v(Ei.$$.fragment),N0=l(),bn=n("p"),D0=a("The "),Th=n("code"),S0=a("FlaxAlbertPreTrainedModel"),O0=a(" forward method, overrides the "),yh=n("code"),W0=a("__call__"),U0=a(" special method."),B0=l(),v(vs.$$.fragment),R0=l(),wh=n("p"),H0=a("Example:"),Q0=l(),v(Mi.$$.fragment),Cm=l(),vn=n("h2"),ks=n("a"),Ah=n("span"),v(zi.$$.fragment),V0=l(),Fh=n("span"),J0=a("FlaxAlbertForMaskedLM"),Im=l(),Ce=n("div"),v(Pi.$$.fragment),K0=l(),qi=n("p"),G0=a("Albert Model with a "),$h=n("code"),X0=a("language modeling"),Z0=a(" head on top."),Y0=l(),ji=n("p"),e2=a("This model inherits from "),$d=n("a"),t2=a("FlaxPreTrainedModel"),o2=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),n2=l(),Ci=n("p"),s2=a("This model is also a Flax Linen "),Ii=n("a"),r2=a("flax.linen.Module"),a2=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),i2=l(),xh=n("p"),l2=a("Finally, this model supports inherent JAX features such as:"),d2=l(),Rt=n("ul"),Eh=n("li"),Li=n("a"),c2=a("Just-In-Time (JIT) compilation"),p2=l(),Mh=n("li"),Ni=n("a"),h2=a("Automatic Differentiation"),f2=l(),zh=n("li"),Di=n("a"),m2=a("Vectorization"),u2=l(),Ph=n("li"),Si=n("a"),g2=a("Parallelization"),_2=l(),_t=n("div"),v(Oi.$$.fragment),b2=l(),kn=n("p"),v2=a("The "),qh=n("code"),k2=a("FlaxAlbertPreTrainedModel"),T2=a(" forward method, overrides the "),jh=n("code"),y2=a("__call__"),w2=a(" special method."),A2=l(),v(Ts.$$.fragment),F2=l(),Ch=n("p"),$2=a("Example:"),x2=l(),v(Wi.$$.fragment),Lm=l(),Tn=n("h2"),ys=n("a"),Ih=n("span"),v(Ui.$$.fragment),E2=l(),Lh=n("span"),M2=a("FlaxAlbertForSequenceClassification"),Nm=l(),Ie=n("div"),v(Bi.$$.fragment),z2=l(),Nh=n("p"),P2=a(`Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),q2=l(),Ri=n("p"),j2=a("This model inherits from "),xd=n("a"),C2=a("FlaxPreTrainedModel"),I2=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),L2=l(),Hi=n("p"),N2=a("This model is also a Flax Linen "),Qi=n("a"),D2=a("flax.linen.Module"),S2=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),O2=l(),Dh=n("p"),W2=a("Finally, this model supports inherent JAX features such as:"),U2=l(),Ht=n("ul"),Sh=n("li"),Vi=n("a"),B2=a("Just-In-Time (JIT) compilation"),R2=l(),Oh=n("li"),Ji=n("a"),H2=a("Automatic Differentiation"),Q2=l(),Wh=n("li"),Ki=n("a"),V2=a("Vectorization"),J2=l(),Uh=n("li"),Gi=n("a"),K2=a("Parallelization"),G2=l(),bt=n("div"),v(Xi.$$.fragment),X2=l(),yn=n("p"),Z2=a("The "),Bh=n("code"),Y2=a("FlaxAlbertPreTrainedModel"),eA=a(" forward method, overrides the "),Rh=n("code"),tA=a("__call__"),oA=a(" special method."),nA=l(),v(ws.$$.fragment),sA=l(),Hh=n("p"),rA=a("Example:"),aA=l(),v(Zi.$$.fragment),Dm=l(),wn=n("h2"),As=n("a"),Qh=n("span"),v(Yi.$$.fragment),iA=l(),Vh=n("span"),lA=a("FlaxAlbertForMultipleChoice"),Sm=l(),Le=n("div"),v(el.$$.fragment),dA=l(),Jh=n("p"),cA=a(`Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),pA=l(),tl=n("p"),hA=a("This model inherits from "),Ed=n("a"),fA=a("FlaxPreTrainedModel"),mA=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),uA=l(),ol=n("p"),gA=a("This model is also a Flax Linen "),nl=n("a"),_A=a("flax.linen.Module"),bA=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),vA=l(),Kh=n("p"),kA=a("Finally, this model supports inherent JAX features such as:"),TA=l(),Qt=n("ul"),Gh=n("li"),sl=n("a"),yA=a("Just-In-Time (JIT) compilation"),wA=l(),Xh=n("li"),rl=n("a"),AA=a("Automatic Differentiation"),FA=l(),Zh=n("li"),al=n("a"),$A=a("Vectorization"),xA=l(),Yh=n("li"),il=n("a"),EA=a("Parallelization"),MA=l(),vt=n("div"),v(ll.$$.fragment),zA=l(),An=n("p"),PA=a("The "),ef=n("code"),qA=a("FlaxAlbertPreTrainedModel"),jA=a(" forward method, overrides the "),tf=n("code"),CA=a("__call__"),IA=a(" special method."),LA=l(),v(Fs.$$.fragment),NA=l(),of=n("p"),DA=a("Example:"),SA=l(),v(dl.$$.fragment),Om=l(),Fn=n("h2"),$s=n("a"),nf=n("span"),v(cl.$$.fragment),OA=l(),sf=n("span"),WA=a("FlaxAlbertForTokenClassification"),Wm=l(),Ne=n("div"),v(pl.$$.fragment),UA=l(),rf=n("p"),BA=a(`Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),RA=l(),hl=n("p"),HA=a("This model inherits from "),Md=n("a"),QA=a("FlaxPreTrainedModel"),VA=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),JA=l(),fl=n("p"),KA=a("This model is also a Flax Linen "),ml=n("a"),GA=a("flax.linen.Module"),XA=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ZA=l(),af=n("p"),YA=a("Finally, this model supports inherent JAX features such as:"),eF=l(),Vt=n("ul"),lf=n("li"),ul=n("a"),tF=a("Just-In-Time (JIT) compilation"),oF=l(),df=n("li"),gl=n("a"),nF=a("Automatic Differentiation"),sF=l(),cf=n("li"),_l=n("a"),rF=a("Vectorization"),aF=l(),pf=n("li"),bl=n("a"),iF=a("Parallelization"),lF=l(),kt=n("div"),v(vl.$$.fragment),dF=l(),$n=n("p"),cF=a("The "),hf=n("code"),pF=a("FlaxAlbertPreTrainedModel"),hF=a(" forward method, overrides the "),ff=n("code"),fF=a("__call__"),mF=a(" special method."),uF=l(),v(xs.$$.fragment),gF=l(),mf=n("p"),_F=a("Example:"),bF=l(),v(kl.$$.fragment),Um=l(),xn=n("h2"),Es=n("a"),uf=n("span"),v(Tl.$$.fragment),vF=l(),gf=n("span"),kF=a("FlaxAlbertForQuestionAnswering"),Bm=l(),De=n("div"),v(yl.$$.fragment),TF=l(),En=n("p"),yF=a(`Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),_f=n("code"),wF=a("span start logits"),AF=a(" and "),bf=n("code"),FF=a("span end logits"),$F=a(")."),xF=l(),wl=n("p"),EF=a("This model inherits from "),zd=n("a"),MF=a("FlaxPreTrainedModel"),zF=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),PF=l(),Al=n("p"),qF=a("This model is also a Flax Linen "),Fl=n("a"),jF=a("flax.linen.Module"),CF=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),IF=l(),vf=n("p"),LF=a("Finally, this model supports inherent JAX features such as:"),NF=l(),Jt=n("ul"),kf=n("li"),$l=n("a"),DF=a("Just-In-Time (JIT) compilation"),SF=l(),Tf=n("li"),xl=n("a"),OF=a("Automatic Differentiation"),WF=l(),yf=n("li"),El=n("a"),UF=a("Vectorization"),BF=l(),wf=n("li"),Ml=n("a"),RF=a("Parallelization"),HF=l(),Tt=n("div"),v(zl.$$.fragment),QF=l(),Mn=n("p"),VF=a("The "),Af=n("code"),JF=a("FlaxAlbertPreTrainedModel"),KF=a(" forward method, overrides the "),Ff=n("code"),GF=a("__call__"),XF=a(" special method."),ZF=l(),v(Ms.$$.fragment),YF=l(),$f=n("p"),e$=a("Example:"),t$=l(),v(Pl.$$.fragment),this.h()},l(o){const m=Cz('[data-svelte="svelte-1phssyn"]',document.head);p=s(m,"META",{name:!0,content:!0}),m.forEach(t),$=d(o),g=s(o,"H1",{class:!0});var ql=r(g);b=s(ql,"A",{id:!0,class:!0,href:!0});var xf=r(b);F=s(xf,"SPAN",{});var Ef=r(F);k(_.$$.fragment,Ef),Ef.forEach(t),xf.forEach(t),u=d(ql),x=s(ql,"SPAN",{});var Mf=r(x);pe=i(Mf,"ALBERT"),Mf.forEach(t),ql.forEach(t),G=d(o),M=s(o,"H2",{class:!0});var jl=r(M);Z=s(jl,"A",{id:!0,class:!0,href:!0});var zf=r(Z);U=s(zf,"SPAN",{});var Pf=r(U);k(ee.$$.fragment,Pf),Pf.forEach(t),zf.forEach(t),he=d(jl),B=s(jl,"SPAN",{});var qf=r(B);fe=i(qf,"Overview"),qf.forEach(t),jl.forEach(t),ae=d(o),J=s(o,"P",{});var Cl=r(J);I=i(Cl,"The ALBERT model was proposed in "),te=s(Cl,"A",{href:!0,rel:!0});var jf=r(te);X=i(jf,"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),jf.forEach(t),z=i(Cl,` by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. It presents two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT:`),Cl.forEach(t),q=d(o),Y=s(o,"UL",{});var Il=r(Y);R=s(Il,"LI",{});var Cf=r(R);me=i(Cf,"Splitting the embedding matrix into two smaller matrices."),Cf.forEach(t),ue=d(Il),H=s(Il,"LI",{});var If=r(H);ge=i(If,"Using repeating layers split among groups."),If.forEach(t),Il.forEach(t),ie=d(o),P=s(o,"P",{});var Lf=r(P);_e=i(Lf,"The abstract from the paper is the following:"),Lf.forEach(t),Q=d(o),oe=s(o,"P",{});var Nf=r(oe);se=s(Nf,"EM",{});var Df=r(se);V=i(Df,`Increasing model size when pretraining natural language representations often results in improved performance on downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations, longer training times, and unexpected model degradation. To address these problems, we present two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows that our proposed methods lead to models that scale much better compared to the original BERT. We also use a self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and SQuAD benchmarks while having fewer parameters compared to BERT-large.`),Df.forEach(t),Nf.forEach(t),le=d(o),ne=s(o,"P",{});var Sf=r(ne);L=i(Sf,"Tips:"),Sf.forEach(t),de=d(o),S=s(o,"UL",{});var Ll=r(S);re=s(Ll,"LI",{});var Of=r(re);h=i(Of,`ALBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),Of.forEach(t),E=d(Ll),K=s(Ll,"LI",{});var Wf=r(K);Ae=i(Wf,`ALBERT uses repeating layers which results in a small memory footprint, however the computational cost remains similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same number of (repeating) layers.`),Wf.forEach(t),Ll.forEach(t),ye=d(o),j=s(o,"P",{});var Kt=r(j);Fe=i(Kt,"This model was contributed by "),ve=s(Kt,"A",{href:!0,rel:!0});var Uf=r(ve);$e=i(Uf,"lysandre"),Uf.forEach(t),N=i(Kt,`. This model jax version was contributed by `),O=s(Kt,"A",{href:!0,rel:!0});var Bf=r(O);xe=i(Bf,"kamalkraj"),Bf.forEach(t),Ee=i(Kt,". The original code can be found "),W=s(Kt,"A",{href:!0,rel:!0});var Rf=r(W);Me=i(Rf,"here"),Rf.forEach(t),ze=i(Kt,"."),Kt.forEach(t),ce=d(o),be=s(o,"H2",{class:!0});var Nl=r(be);zn=s(Nl,"A",{id:!0,class:!0,href:!0});var Hf=r(zn);dc=s(Hf,"SPAN",{});var Qf=r(dc);k(Us.$$.fragment,Qf),Qf.forEach(t),Hf.forEach(t),gg=d(Nl),cc=s(Nl,"SPAN",{});var Vf=r(cc);_g=i(Vf,"AlbertConfig"),Vf.forEach(t),Nl.forEach(t),Jf=d(o),Je=s(o,"DIV",{class:!0});var Zt=r(Je);k(Bs.$$.fragment,Zt),bg=d(Zt),Ot=s(Zt,"P",{});var zs=r(Ot);vg=i(zs,"This is the configuration class to store the configuration of a "),Dl=s(zs,"A",{href:!0});var a$=r(Dl);kg=i(a$,"AlbertModel"),a$.forEach(t),Tg=i(zs,` or a `),Sl=s(zs,"A",{href:!0});var i$=r(Sl);yg=i(i$,"TFAlbertModel"),i$.forEach(t),wg=i(zs,`. It is used to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ALBERT `),Rs=s(zs,"A",{href:!0,rel:!0});var l$=r(Rs);Ag=i(l$,"xxlarge"),l$.forEach(t),Fg=i(zs," architecture."),zs.forEach(t),$g=d(Zt),xo=s(Zt,"P",{});var Pd=r(xo);xg=i(Pd,"Configuration objects inherit from "),Ol=s(Pd,"A",{href:!0});var d$=r(Ol);Eg=i(d$,"PretrainedConfig"),d$.forEach(t),Mg=i(Pd,` and can be used to control the model outputs. Read the documentation from `),Wl=s(Pd,"A",{href:!0});var c$=r(Wl);zg=i(c$,"PretrainedConfig"),c$.forEach(t),Pg=i(Pd," for more information."),Pd.forEach(t),qg=d(Zt),pc=s(Zt,"P",{});var p$=r(pc);jg=i(p$,"Examples:"),p$.forEach(t),Cg=d(Zt),k(Hs.$$.fragment,Zt),Zt.forEach(t),Kf=d(o),Eo=s(o,"H2",{class:!0});var Hm=r(Eo);Pn=s(Hm,"A",{id:!0,class:!0,href:!0});var h$=r(Pn);hc=s(h$,"SPAN",{});var f$=r(hc);k(Qs.$$.fragment,f$),f$.forEach(t),h$.forEach(t),Ig=d(Hm),fc=s(Hm,"SPAN",{});var m$=r(fc);Lg=i(m$,"AlbertTokenizer"),m$.forEach(t),Hm.forEach(t),Gf=d(o),Pe=s(o,"DIV",{class:!0});var Ve=r(Pe);k(Vs.$$.fragment,Ve),Ng=d(Ve),Js=s(Ve,"P",{});var Qm=r(Js);Dg=i(Qm,"Construct an ALBERT tokenizer. Based on "),Ks=s(Qm,"A",{href:!0,rel:!0});var u$=r(Ks);Sg=i(u$,"SentencePiece"),u$.forEach(t),Og=i(Qm,"."),Qm.forEach(t),Wg=d(Ve),Gs=s(Ve,"P",{});var Vm=r(Gs);Ug=i(Vm,"This tokenizer inherits from "),Ul=s(Vm,"A",{href:!0});var g$=r(Ul);Bg=i(g$,"PreTrainedTokenizer"),g$.forEach(t),Rg=i(Vm,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Vm.forEach(t),Hg=d(Ve),Mo=s(Ve,"P",{});var qd=r(Mo);Qg=i(qd,`Attributes: sp_model (`),mc=s(qd,"CODE",{});var _$=r(mc);Vg=i(_$,"SentencePieceProcessor"),_$.forEach(t),Jg=i(qd,`): The `),uc=s(qd,"EM",{});var b$=r(uc);Kg=i(b$,"SentencePiece"),b$.forEach(t),Gg=i(qd," processor that is used for every conversion (string, tokens and IDs)."),qd.forEach(t),Xg=d(Ve),Gt=s(Ve,"DIV",{class:!0});var jd=r(Gt);k(Xs.$$.fragment,jd),Zg=d(jd),gc=s(jd,"P",{});var v$=r(gc);Yg=i(v$,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format:`),v$.forEach(t),e_=d(jd),Zs=s(jd,"UL",{});var Jm=r(Zs);Bl=s(Jm,"LI",{});var o$=r(Bl);t_=i(o$,"single sequence: "),_c=s(o$,"CODE",{});var k$=r(_c);o_=i(k$,"[CLS] X [SEP]"),k$.forEach(t),o$.forEach(t),n_=d(Jm),Rl=s(Jm,"LI",{});var n$=r(Rl);s_=i(n$,"pair of sequences: "),bc=s(n$,"CODE",{});var T$=r(bc);r_=i(T$,"[CLS] A [SEP] B [SEP]"),T$.forEach(t),n$.forEach(t),Jm.forEach(t),jd.forEach(t),a_=d(Ve),qn=s(Ve,"DIV",{class:!0});var Km=r(qn);k(Ys.$$.fragment,Km),i_=d(Km),er=s(Km,"P",{});var Gm=r(er);l_=i(Gm,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),vc=s(Gm,"CODE",{});var y$=r(vc);d_=i(y$,"prepare_for_model"),y$.forEach(t),c_=i(Gm," method."),Gm.forEach(t),Km.forEach(t),p_=d(Ve),Pt=s(Ve,"DIV",{class:!0});var Ps=r(Pt);k(tr.$$.fragment,Ps),h_=d(Ps),kc=s(Ps,"P",{});var w$=r(kc);f_=i(w$,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format:`),w$.forEach(t),m_=d(Ps),k(or.$$.fragment,Ps),u_=d(Ps),zo=s(Ps,"P",{});var Cd=r(zo);g_=i(Cd,"If "),Tc=s(Cd,"CODE",{});var A$=r(Tc);__=i(A$,"token_ids_1"),A$.forEach(t),b_=i(Cd," is "),yc=s(Cd,"CODE",{});var F$=r(yc);v_=i(F$,"None"),F$.forEach(t),k_=i(Cd,", this method only returns the first portion of the mask (0s)."),Cd.forEach(t),Ps.forEach(t),T_=d(Ve),wc=s(Ve,"DIV",{class:!0}),r(wc).forEach(t),Ve.forEach(t),Xf=d(o),Po=s(o,"H2",{class:!0});var Xm=r(Po);jn=s(Xm,"A",{id:!0,class:!0,href:!0});var $$=r(jn);Ac=s($$,"SPAN",{});var x$=r(Ac);k(nr.$$.fragment,x$),x$.forEach(t),$$.forEach(t),y_=d(Xm),Fc=s(Xm,"SPAN",{});var E$=r(Fc);w_=i(E$,"AlbertTokenizerFast"),E$.forEach(t),Xm.forEach(t),Zf=d(o),zt=s(o,"DIV",{class:!0});var qs=r(zt);k(sr.$$.fragment,qs),A_=d(qs),Wt=s(qs,"P",{});var js=r(Wt);F_=i(js,"Construct a \u201Cfast\u201D ALBERT tokenizer (backed by HuggingFace\u2019s "),$c=s(js,"EM",{});var M$=r($c);$_=i(M$,"tokenizers"),M$.forEach(t),x_=i(js," library). Based on "),rr=s(js,"A",{href:!0,rel:!0});var z$=r(rr);E_=i(z$,"Unigram"),z$.forEach(t),M_=i(js,`. This tokenizer inherits from `),Hl=s(js,"A",{href:!0});var P$=r(Hl);z_=i(P$,"PreTrainedTokenizerFast"),P$.forEach(t),P_=i(js,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),js.forEach(t),q_=d(qs),Xt=s(qs,"DIV",{class:!0});var Id=r(Xt);k(ar.$$.fragment,Id),j_=d(Id),xc=s(Id,"P",{});var q$=r(xc);C_=i(q$,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format:`),q$.forEach(t),I_=d(Id),ir=s(Id,"UL",{});var Zm=r(ir);Ql=s(Zm,"LI",{});var s$=r(Ql);L_=i(s$,"single sequence: "),Ec=s(s$,"CODE",{});var j$=r(Ec);N_=i(j$,"[CLS] X [SEP]"),j$.forEach(t),s$.forEach(t),D_=d(Zm),Vl=s(Zm,"LI",{});var r$=r(Vl);S_=i(r$,"pair of sequences: "),Mc=s(r$,"CODE",{});var C$=r(Mc);O_=i(C$,"[CLS] A [SEP] B [SEP]"),C$.forEach(t),r$.forEach(t),Zm.forEach(t),Id.forEach(t),W_=d(qs),qt=s(qs,"DIV",{class:!0});var Cs=r(qt);k(lr.$$.fragment,Cs),U_=d(Cs),zc=s(Cs,"P",{});var I$=r(zc);B_=i(I$,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format:`),I$.forEach(t),R_=d(Cs),k(dr.$$.fragment,Cs),H_=d(Cs),Pc=s(Cs,"P",{});var L$=r(Pc);Q_=i(L$,"if token_ids_1 is None, only returns the first portion of the mask (0s)."),L$.forEach(t),Cs.forEach(t),qs.forEach(t),Yf=d(o),qo=s(o,"H2",{class:!0});var Ym=r(qo);Cn=s(Ym,"A",{id:!0,class:!0,href:!0});var N$=r(Cn);qc=s(N$,"SPAN",{});var D$=r(qc);k(cr.$$.fragment,D$),D$.forEach(t),N$.forEach(t),V_=d(Ym),jc=s(Ym,"SPAN",{});var S$=r(jc);J_=i(S$,"Albert specific outputs"),S$.forEach(t),Ym.forEach(t),em=d(o),jo=s(o,"DIV",{class:!0});var eu=r(jo);k(pr.$$.fragment,eu),K_=d(eu),hr=s(eu,"P",{});var tu=r(hr);G_=i(tu,"Output type of "),Jl=s(tu,"A",{href:!0});var O$=r(Jl);X_=i(O$,"AlbertForPreTraining"),O$.forEach(t),Z_=i(tu,"."),tu.forEach(t),eu.forEach(t),tm=d(o),Co=s(o,"DIV",{class:!0});var ou=r(Co);k(fr.$$.fragment,ou),Y_=d(ou),mr=s(ou,"P",{});var nu=r(mr);eb=i(nu,"Output type of "),Kl=s(nu,"A",{href:!0});var W$=r(Kl);tb=i(W$,"TFAlbertForPreTraining"),W$.forEach(t),ob=i(nu,"."),nu.forEach(t),ou.forEach(t),om=d(o),Io=s(o,"H2",{class:!0});var su=r(Io);In=s(su,"A",{id:!0,class:!0,href:!0});var U$=r(In);Cc=s(U$,"SPAN",{});var B$=r(Cc);k(ur.$$.fragment,B$),B$.forEach(t),U$.forEach(t),nb=d(su),Ic=s(su,"SPAN",{});var R$=r(Ic);sb=i(R$,"AlbertModel"),R$.forEach(t),su.forEach(t),nm=d(o),Ke=s(o,"DIV",{class:!0});var Yt=r(Ke);k(gr.$$.fragment,Yt),rb=d(Yt),Lc=s(Yt,"P",{});var H$=r(Lc);ab=i(H$,"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top."),H$.forEach(t),ib=d(Yt),_r=s(Yt,"P",{});var ru=r(_r);lb=i(ru,"This model inherits from "),Gl=s(ru,"A",{href:!0});var Q$=r(Gl);db=i(Q$,"PreTrainedModel"),Q$.forEach(t),cb=i(ru,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ru.forEach(t),pb=d(Yt),br=s(Yt,"P",{});var au=r(br);hb=i(au,"This model is also a PyTorch "),vr=s(au,"A",{href:!0,rel:!0});var V$=r(vr);fb=i(V$,"torch.nn.Module"),V$.forEach(t),mb=i(au,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),au.forEach(t),ub=d(Yt),ot=s(Yt,"DIV",{class:!0});var eo=r(ot);k(kr.$$.fragment,eo),gb=d(eo),Lo=s(eo,"P",{});var Ld=r(Lo);_b=i(Ld,"The "),Xl=s(Ld,"A",{href:!0});var J$=r(Xl);bb=i(J$,"AlbertModel"),J$.forEach(t),vb=i(Ld," forward method, overrides the "),Nc=s(Ld,"CODE",{});var K$=r(Nc);kb=i(K$,"__call__"),K$.forEach(t),Tb=i(Ld," special method."),Ld.forEach(t),yb=d(eo),k(Ln.$$.fragment,eo),wb=d(eo),Dc=s(eo,"P",{});var G$=r(Dc);Ab=i(G$,"Example:"),G$.forEach(t),Fb=d(eo),k(Tr.$$.fragment,eo),eo.forEach(t),Yt.forEach(t),sm=d(o),No=s(o,"H2",{class:!0});var iu=r(No);Nn=s(iu,"A",{id:!0,class:!0,href:!0});var X$=r(Nn);Sc=s(X$,"SPAN",{});var Z$=r(Sc);k(yr.$$.fragment,Z$),Z$.forEach(t),X$.forEach(t),$b=d(iu),Oc=s(iu,"SPAN",{});var Y$=r(Oc);xb=i(Y$,"AlbertForPreTraining"),Y$.forEach(t),iu.forEach(t),rm=d(o),Ge=s(o,"DIV",{class:!0});var to=r(Ge);k(wr.$$.fragment,to),Eb=d(to),Do=s(to,"P",{});var Nd=r(Do);Mb=i(Nd,"Albert Model with two heads on top as done during the pretraining: a "),Wc=s(Nd,"CODE",{});var e4=r(Wc);zb=i(e4,"masked language modeling"),e4.forEach(t),Pb=i(Nd,` head and a `),Uc=s(Nd,"CODE",{});var t4=r(Uc);qb=i(t4,"sentence order prediction (classification)"),t4.forEach(t),jb=i(Nd," head."),Nd.forEach(t),Cb=d(to),Ar=s(to,"P",{});var lu=r(Ar);Ib=i(lu,"This model inherits from "),Zl=s(lu,"A",{href:!0});var o4=r(Zl);Lb=i(o4,"PreTrainedModel"),o4.forEach(t),Nb=i(lu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lu.forEach(t),Db=d(to),Fr=s(to,"P",{});var du=r(Fr);Sb=i(du,"This model is also a PyTorch "),$r=s(du,"A",{href:!0,rel:!0});var n4=r($r);Ob=i(n4,"torch.nn.Module"),n4.forEach(t),Wb=i(du,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),du.forEach(t),Ub=d(to),nt=s(to,"DIV",{class:!0});var oo=r(nt);k(xr.$$.fragment,oo),Bb=d(oo),So=s(oo,"P",{});var Dd=r(So);Rb=i(Dd,"The "),Yl=s(Dd,"A",{href:!0});var s4=r(Yl);Hb=i(s4,"AlbertForPreTraining"),s4.forEach(t),Qb=i(Dd," forward method, overrides the "),Bc=s(Dd,"CODE",{});var r4=r(Bc);Vb=i(r4,"__call__"),r4.forEach(t),Jb=i(Dd," special method."),Dd.forEach(t),Kb=d(oo),k(Dn.$$.fragment,oo),Gb=d(oo),Rc=s(oo,"P",{});var a4=r(Rc);Xb=i(a4,"Example:"),a4.forEach(t),Zb=d(oo),k(Er.$$.fragment,oo),oo.forEach(t),to.forEach(t),am=d(o),Oo=s(o,"H2",{class:!0});var cu=r(Oo);Sn=s(cu,"A",{id:!0,class:!0,href:!0});var i4=r(Sn);Hc=s(i4,"SPAN",{});var l4=r(Hc);k(Mr.$$.fragment,l4),l4.forEach(t),i4.forEach(t),Yb=d(cu),Qc=s(cu,"SPAN",{});var d4=r(Qc);ev=i(d4,"AlbertForMaskedLM"),d4.forEach(t),cu.forEach(t),im=d(o),Xe=s(o,"DIV",{class:!0});var no=r(Xe);k(zr.$$.fragment,no),tv=d(no),Pr=s(no,"P",{});var pu=r(Pr);ov=i(pu,"Albert Model with a "),Vc=s(pu,"CODE",{});var c4=r(Vc);nv=i(c4,"language modeling"),c4.forEach(t),sv=i(pu," head on top."),pu.forEach(t),rv=d(no),qr=s(no,"P",{});var hu=r(qr);av=i(hu,"This model inherits from "),ed=s(hu,"A",{href:!0});var p4=r(ed);iv=i(p4,"PreTrainedModel"),p4.forEach(t),lv=i(hu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hu.forEach(t),dv=d(no),jr=s(no,"P",{});var fu=r(jr);cv=i(fu,"This model is also a PyTorch "),Cr=s(fu,"A",{href:!0,rel:!0});var h4=r(Cr);pv=i(h4,"torch.nn.Module"),h4.forEach(t),hv=i(fu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fu.forEach(t),fv=d(no),st=s(no,"DIV",{class:!0});var so=r(st);k(Ir.$$.fragment,so),mv=d(so),Wo=s(so,"P",{});var Sd=r(Wo);uv=i(Sd,"The "),td=s(Sd,"A",{href:!0});var f4=r(td);gv=i(f4,"AlbertForMaskedLM"),f4.forEach(t),_v=i(Sd," forward method, overrides the "),Jc=s(Sd,"CODE",{});var m4=r(Jc);bv=i(m4,"__call__"),m4.forEach(t),vv=i(Sd," special method."),Sd.forEach(t),kv=d(so),k(On.$$.fragment,so),Tv=d(so),Kc=s(so,"P",{});var u4=r(Kc);yv=i(u4,"Example:"),u4.forEach(t),wv=d(so),k(Lr.$$.fragment,so),so.forEach(t),no.forEach(t),lm=d(o),Uo=s(o,"H2",{class:!0});var mu=r(Uo);Wn=s(mu,"A",{id:!0,class:!0,href:!0});var g4=r(Wn);Gc=s(g4,"SPAN",{});var _4=r(Gc);k(Nr.$$.fragment,_4),_4.forEach(t),g4.forEach(t),Av=d(mu),Xc=s(mu,"SPAN",{});var b4=r(Xc);Fv=i(b4,"AlbertForSequenceClassification"),b4.forEach(t),mu.forEach(t),dm=d(o),Ze=s(o,"DIV",{class:!0});var ro=r(Ze);k(Dr.$$.fragment,ro),$v=d(ro),Zc=s(ro,"P",{});var v4=r(Zc);xv=i(v4,`Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),v4.forEach(t),Ev=d(ro),Sr=s(ro,"P",{});var uu=r(Sr);Mv=i(uu,"This model inherits from "),od=s(uu,"A",{href:!0});var k4=r(od);zv=i(k4,"PreTrainedModel"),k4.forEach(t),Pv=i(uu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uu.forEach(t),qv=d(ro),Or=s(ro,"P",{});var gu=r(Or);jv=i(gu,"This model is also a PyTorch "),Wr=s(gu,"A",{href:!0,rel:!0});var T4=r(Wr);Cv=i(T4,"torch.nn.Module"),T4.forEach(t),Iv=i(gu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gu.forEach(t),Lv=d(ro),Se=s(ro,"DIV",{class:!0});var yt=r(Se);k(Ur.$$.fragment,yt),Nv=d(yt),Bo=s(yt,"P",{});var Od=r(Bo);Dv=i(Od,"The "),nd=s(Od,"A",{href:!0});var y4=r(nd);Sv=i(y4,"AlbertForSequenceClassification"),y4.forEach(t),Ov=i(Od," forward method, overrides the "),Yc=s(Od,"CODE",{});var w4=r(Yc);Wv=i(w4,"__call__"),w4.forEach(t),Uv=i(Od," special method."),Od.forEach(t),Bv=d(yt),k(Un.$$.fragment,yt),Rv=d(yt),ep=s(yt,"P",{});var A4=r(ep);Hv=i(A4,"Example of single-label classification:"),A4.forEach(t),Qv=d(yt),k(Br.$$.fragment,yt),Vv=d(yt),tp=s(yt,"P",{});var F4=r(tp);Jv=i(F4,"Example of multi-label classification:"),F4.forEach(t),Kv=d(yt),k(Rr.$$.fragment,yt),yt.forEach(t),ro.forEach(t),cm=d(o),Ro=s(o,"H2",{class:!0});var _u=r(Ro);Bn=s(_u,"A",{id:!0,class:!0,href:!0});var $4=r(Bn);op=s($4,"SPAN",{});var x4=r(op);k(Hr.$$.fragment,x4),x4.forEach(t),$4.forEach(t),Gv=d(_u),np=s(_u,"SPAN",{});var E4=r(np);Xv=i(E4,"AlbertForMultipleChoice"),E4.forEach(t),_u.forEach(t),pm=d(o),Ye=s(o,"DIV",{class:!0});var ao=r(Ye);k(Qr.$$.fragment,ao),Zv=d(ao),sp=s(ao,"P",{});var M4=r(sp);Yv=i(M4,`Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),M4.forEach(t),ek=d(ao),Vr=s(ao,"P",{});var bu=r(Vr);tk=i(bu,"This model inherits from "),sd=s(bu,"A",{href:!0});var z4=r(sd);ok=i(z4,"PreTrainedModel"),z4.forEach(t),nk=i(bu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bu.forEach(t),sk=d(ao),Jr=s(ao,"P",{});var vu=r(Jr);rk=i(vu,"This model is also a PyTorch "),Kr=s(vu,"A",{href:!0,rel:!0});var P4=r(Kr);ak=i(P4,"torch.nn.Module"),P4.forEach(t),ik=i(vu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vu.forEach(t),lk=d(ao),rt=s(ao,"DIV",{class:!0});var io=r(rt);k(Gr.$$.fragment,io),dk=d(io),Ho=s(io,"P",{});var Wd=r(Ho);ck=i(Wd,"The "),rd=s(Wd,"A",{href:!0});var q4=r(rd);pk=i(q4,"AlbertForMultipleChoice"),q4.forEach(t),hk=i(Wd," forward method, overrides the "),rp=s(Wd,"CODE",{});var j4=r(rp);fk=i(j4,"__call__"),j4.forEach(t),mk=i(Wd," special method."),Wd.forEach(t),uk=d(io),k(Rn.$$.fragment,io),gk=d(io),ap=s(io,"P",{});var C4=r(ap);_k=i(C4,"Example:"),C4.forEach(t),bk=d(io),k(Xr.$$.fragment,io),io.forEach(t),ao.forEach(t),hm=d(o),Qo=s(o,"H2",{class:!0});var ku=r(Qo);Hn=s(ku,"A",{id:!0,class:!0,href:!0});var I4=r(Hn);ip=s(I4,"SPAN",{});var L4=r(ip);k(Zr.$$.fragment,L4),L4.forEach(t),I4.forEach(t),vk=d(ku),lp=s(ku,"SPAN",{});var N4=r(lp);kk=i(N4,"AlbertForTokenClassification"),N4.forEach(t),ku.forEach(t),fm=d(o),et=s(o,"DIV",{class:!0});var lo=r(et);k(Yr.$$.fragment,lo),Tk=d(lo),dp=s(lo,"P",{});var D4=r(dp);yk=i(D4,`Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),D4.forEach(t),wk=d(lo),ea=s(lo,"P",{});var Tu=r(ea);Ak=i(Tu,"This model inherits from "),ad=s(Tu,"A",{href:!0});var S4=r(ad);Fk=i(S4,"PreTrainedModel"),S4.forEach(t),$k=i(Tu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tu.forEach(t),xk=d(lo),ta=s(lo,"P",{});var yu=r(ta);Ek=i(yu,"This model is also a PyTorch "),oa=s(yu,"A",{href:!0,rel:!0});var O4=r(oa);Mk=i(O4,"torch.nn.Module"),O4.forEach(t),zk=i(yu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yu.forEach(t),Pk=d(lo),at=s(lo,"DIV",{class:!0});var co=r(at);k(na.$$.fragment,co),qk=d(co),Vo=s(co,"P",{});var Ud=r(Vo);jk=i(Ud,"The "),id=s(Ud,"A",{href:!0});var W4=r(id);Ck=i(W4,"AlbertForTokenClassification"),W4.forEach(t),Ik=i(Ud," forward method, overrides the "),cp=s(Ud,"CODE",{});var U4=r(cp);Lk=i(U4,"__call__"),U4.forEach(t),Nk=i(Ud," special method."),Ud.forEach(t),Dk=d(co),k(Qn.$$.fragment,co),Sk=d(co),pp=s(co,"P",{});var B4=r(pp);Ok=i(B4,"Example:"),B4.forEach(t),Wk=d(co),k(sa.$$.fragment,co),co.forEach(t),lo.forEach(t),mm=d(o),Jo=s(o,"H2",{class:!0});var wu=r(Jo);Vn=s(wu,"A",{id:!0,class:!0,href:!0});var R4=r(Vn);hp=s(R4,"SPAN",{});var H4=r(hp);k(ra.$$.fragment,H4),H4.forEach(t),R4.forEach(t),Uk=d(wu),fp=s(wu,"SPAN",{});var Q4=r(fp);Bk=i(Q4,"AlbertForQuestionAnswering"),Q4.forEach(t),wu.forEach(t),um=d(o),tt=s(o,"DIV",{class:!0});var po=r(tt);k(aa.$$.fragment,po),Rk=d(po),Ko=s(po,"P",{});var Bd=r(Ko);Hk=i(Bd,`Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),mp=s(Bd,"CODE",{});var V4=r(mp);Qk=i(V4,"span start logits"),V4.forEach(t),Vk=i(Bd," and "),up=s(Bd,"CODE",{});var J4=r(up);Jk=i(J4,"span end logits"),J4.forEach(t),Kk=i(Bd,")."),Bd.forEach(t),Gk=d(po),ia=s(po,"P",{});var Au=r(ia);Xk=i(Au,"This model inherits from "),ld=s(Au,"A",{href:!0});var K4=r(ld);Zk=i(K4,"PreTrainedModel"),K4.forEach(t),Yk=i(Au,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Au.forEach(t),eT=d(po),la=s(po,"P",{});var Fu=r(la);tT=i(Fu,"This model is also a PyTorch "),da=s(Fu,"A",{href:!0,rel:!0});var G4=r(da);oT=i(G4,"torch.nn.Module"),G4.forEach(t),nT=i(Fu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fu.forEach(t),sT=d(po),it=s(po,"DIV",{class:!0});var ho=r(it);k(ca.$$.fragment,ho),rT=d(ho),Go=s(ho,"P",{});var Rd=r(Go);aT=i(Rd,"The "),dd=s(Rd,"A",{href:!0});var X4=r(dd);iT=i(X4,"AlbertForQuestionAnswering"),X4.forEach(t),lT=i(Rd," forward method, overrides the "),gp=s(Rd,"CODE",{});var Z4=r(gp);dT=i(Z4,"__call__"),Z4.forEach(t),cT=i(Rd," special method."),Rd.forEach(t),pT=d(ho),k(Jn.$$.fragment,ho),hT=d(ho),_p=s(ho,"P",{});var Y4=r(_p);fT=i(Y4,"Example:"),Y4.forEach(t),mT=d(ho),k(pa.$$.fragment,ho),ho.forEach(t),po.forEach(t),gm=d(o),Xo=s(o,"H2",{class:!0});var $u=r(Xo);Kn=s($u,"A",{id:!0,class:!0,href:!0});var ex=r(Kn);bp=s(ex,"SPAN",{});var tx=r(bp);k(ha.$$.fragment,tx),tx.forEach(t),ex.forEach(t),uT=d($u),vp=s($u,"SPAN",{});var ox=r(vp);gT=i(ox,"TFAlbertModel"),ox.forEach(t),$u.forEach(t),_m=d(o),Oe=s(o,"DIV",{class:!0});var jt=r(Oe);k(fa.$$.fragment,jt),_T=d(jt),kp=s(jt,"P",{});var nx=r(kp);bT=i(nx,"The bare Albert Model transformer outputting raw hidden-states without any specific head on top."),nx.forEach(t),vT=d(jt),ma=s(jt,"P",{});var xu=r(ma);kT=i(xu,"This model inherits from "),cd=s(xu,"A",{href:!0});var sx=r(cd);TT=i(sx,"TFPreTrainedModel"),sx.forEach(t),yT=i(xu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xu.forEach(t),wT=d(jt),ua=s(jt,"P",{});var Eu=r(ua);AT=i(Eu,"This model is also a "),ga=s(Eu,"A",{href:!0,rel:!0});var rx=r(ga);FT=i(rx,"tf.keras.Model"),rx.forEach(t),$T=i(Eu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Eu.forEach(t),xT=d(jt),k(Gn.$$.fragment,jt),ET=d(jt),lt=s(jt,"DIV",{class:!0});var fo=r(lt);k(_a.$$.fragment,fo),MT=d(fo),Zo=s(fo,"P",{});var Hd=r(Zo);zT=i(Hd,"The "),pd=s(Hd,"A",{href:!0});var ax=r(pd);PT=i(ax,"TFAlbertModel"),ax.forEach(t),qT=i(Hd," forward method, overrides the "),Tp=s(Hd,"CODE",{});var ix=r(Tp);jT=i(ix,"__call__"),ix.forEach(t),CT=i(Hd," special method."),Hd.forEach(t),IT=d(fo),k(Xn.$$.fragment,fo),LT=d(fo),yp=s(fo,"P",{});var lx=r(yp);NT=i(lx,"Example:"),lx.forEach(t),DT=d(fo),k(ba.$$.fragment,fo),fo.forEach(t),jt.forEach(t),bm=d(o),Yo=s(o,"H2",{class:!0});var Mu=r(Yo);Zn=s(Mu,"A",{id:!0,class:!0,href:!0});var dx=r(Zn);wp=s(dx,"SPAN",{});var cx=r(wp);k(va.$$.fragment,cx),cx.forEach(t),dx.forEach(t),ST=d(Mu),Ap=s(Mu,"SPAN",{});var px=r(Ap);OT=i(px,"TFAlbertForPreTraining"),px.forEach(t),Mu.forEach(t),vm=d(o),We=s(o,"DIV",{class:!0});var Ct=r(We);k(ka.$$.fragment,Ct),WT=d(Ct),en=s(Ct,"P",{});var Qd=r(en);UT=i(Qd,"Albert Model with two heads on top for pretraining: a "),Fp=s(Qd,"CODE",{});var hx=r(Fp);BT=i(hx,"masked language modeling"),hx.forEach(t),RT=i(Qd," head and a "),$p=s(Qd,"CODE",{});var fx=r($p);HT=i(fx,"sentence order prediction"),fx.forEach(t),QT=i(Qd," (classification) head."),Qd.forEach(t),VT=d(Ct),Ta=s(Ct,"P",{});var zu=r(Ta);JT=i(zu,"This model inherits from "),hd=s(zu,"A",{href:!0});var mx=r(hd);KT=i(mx,"TFPreTrainedModel"),mx.forEach(t),GT=i(zu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zu.forEach(t),XT=d(Ct),ya=s(Ct,"P",{});var Pu=r(ya);ZT=i(Pu,"This model is also a "),wa=s(Pu,"A",{href:!0,rel:!0});var ux=r(wa);YT=i(ux,"tf.keras.Model"),ux.forEach(t),e1=i(Pu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Pu.forEach(t),t1=d(Ct),k(Yn.$$.fragment,Ct),o1=d(Ct),dt=s(Ct,"DIV",{class:!0});var mo=r(dt);k(Aa.$$.fragment,mo),n1=d(mo),tn=s(mo,"P",{});var Vd=r(tn);s1=i(Vd,"The "),fd=s(Vd,"A",{href:!0});var gx=r(fd);r1=i(gx,"TFAlbertForPreTraining"),gx.forEach(t),a1=i(Vd," forward method, overrides the "),xp=s(Vd,"CODE",{});var _x=r(xp);i1=i(_x,"__call__"),_x.forEach(t),l1=i(Vd," special method."),Vd.forEach(t),d1=d(mo),k(es.$$.fragment,mo),c1=d(mo),Ep=s(mo,"P",{});var bx=r(Ep);p1=i(bx,"Example:"),bx.forEach(t),h1=d(mo),k(Fa.$$.fragment,mo),mo.forEach(t),Ct.forEach(t),km=d(o),on=s(o,"H2",{class:!0});var qu=r(on);ts=s(qu,"A",{id:!0,class:!0,href:!0});var vx=r(ts);Mp=s(vx,"SPAN",{});var kx=r(Mp);k($a.$$.fragment,kx),kx.forEach(t),vx.forEach(t),f1=d(qu),zp=s(qu,"SPAN",{});var Tx=r(zp);m1=i(Tx,"TFAlbertForMaskedLM"),Tx.forEach(t),qu.forEach(t),Tm=d(o),Ue=s(o,"DIV",{class:!0});var It=r(Ue);k(xa.$$.fragment,It),u1=d(It),Ea=s(It,"P",{});var ju=r(Ea);g1=i(ju,"Albert Model with a "),Pp=s(ju,"CODE",{});var yx=r(Pp);_1=i(yx,"language modeling"),yx.forEach(t),b1=i(ju," head on top."),ju.forEach(t),v1=d(It),Ma=s(It,"P",{});var Cu=r(Ma);k1=i(Cu,"This model inherits from "),md=s(Cu,"A",{href:!0});var wx=r(md);T1=i(wx,"TFPreTrainedModel"),wx.forEach(t),y1=i(Cu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cu.forEach(t),w1=d(It),za=s(It,"P",{});var Iu=r(za);A1=i(Iu,"This model is also a "),Pa=s(Iu,"A",{href:!0,rel:!0});var Ax=r(Pa);F1=i(Ax,"tf.keras.Model"),Ax.forEach(t),$1=i(Iu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Iu.forEach(t),x1=d(It),k(os.$$.fragment,It),E1=d(It),ct=s(It,"DIV",{class:!0});var uo=r(ct);k(qa.$$.fragment,uo),M1=d(uo),nn=s(uo,"P",{});var Jd=r(nn);z1=i(Jd,"The "),ud=s(Jd,"A",{href:!0});var Fx=r(ud);P1=i(Fx,"TFAlbertForMaskedLM"),Fx.forEach(t),q1=i(Jd," forward method, overrides the "),qp=s(Jd,"CODE",{});var $x=r(qp);j1=i($x,"__call__"),$x.forEach(t),C1=i(Jd," special method."),Jd.forEach(t),I1=d(uo),k(ns.$$.fragment,uo),L1=d(uo),jp=s(uo,"P",{});var xx=r(jp);N1=i(xx,"Example:"),xx.forEach(t),D1=d(uo),k(ja.$$.fragment,uo),uo.forEach(t),It.forEach(t),ym=d(o),sn=s(o,"H2",{class:!0});var Lu=r(sn);ss=s(Lu,"A",{id:!0,class:!0,href:!0});var Ex=r(ss);Cp=s(Ex,"SPAN",{});var Mx=r(Cp);k(Ca.$$.fragment,Mx),Mx.forEach(t),Ex.forEach(t),S1=d(Lu),Ip=s(Lu,"SPAN",{});var zx=r(Ip);O1=i(zx,"TFAlbertForSequenceClassification"),zx.forEach(t),Lu.forEach(t),wm=d(o),Be=s(o,"DIV",{class:!0});var Lt=r(Be);k(Ia.$$.fragment,Lt),W1=d(Lt),Lp=s(Lt,"P",{});var Px=r(Lp);U1=i(Px,`Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Px.forEach(t),B1=d(Lt),La=s(Lt,"P",{});var Nu=r(La);R1=i(Nu,"This model inherits from "),gd=s(Nu,"A",{href:!0});var qx=r(gd);H1=i(qx,"TFPreTrainedModel"),qx.forEach(t),Q1=i(Nu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nu.forEach(t),V1=d(Lt),Na=s(Lt,"P",{});var Du=r(Na);J1=i(Du,"This model is also a "),Da=s(Du,"A",{href:!0,rel:!0});var jx=r(Da);K1=i(jx,"tf.keras.Model"),jx.forEach(t),G1=i(Du,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Du.forEach(t),X1=d(Lt),k(rs.$$.fragment,Lt),Z1=d(Lt),pt=s(Lt,"DIV",{class:!0});var go=r(pt);k(Sa.$$.fragment,go),Y1=d(go),rn=s(go,"P",{});var Kd=r(rn);ey=i(Kd,"The "),_d=s(Kd,"A",{href:!0});var Cx=r(_d);ty=i(Cx,"TFAlbertForSequenceClassification"),Cx.forEach(t),oy=i(Kd," forward method, overrides the "),Np=s(Kd,"CODE",{});var Ix=r(Np);ny=i(Ix,"__call__"),Ix.forEach(t),sy=i(Kd," special method."),Kd.forEach(t),ry=d(go),k(as.$$.fragment,go),ay=d(go),Dp=s(go,"P",{});var Lx=r(Dp);iy=i(Lx,"Example:"),Lx.forEach(t),ly=d(go),k(Oa.$$.fragment,go),go.forEach(t),Lt.forEach(t),Am=d(o),an=s(o,"H2",{class:!0});var Su=r(an);is=s(Su,"A",{id:!0,class:!0,href:!0});var Nx=r(is);Sp=s(Nx,"SPAN",{});var Dx=r(Sp);k(Wa.$$.fragment,Dx),Dx.forEach(t),Nx.forEach(t),dy=d(Su),Op=s(Su,"SPAN",{});var Sx=r(Op);cy=i(Sx,"TFAlbertForMultipleChoice"),Sx.forEach(t),Su.forEach(t),Fm=d(o),Re=s(o,"DIV",{class:!0});var Nt=r(Re);k(Ua.$$.fragment,Nt),py=d(Nt),Wp=s(Nt,"P",{});var Ox=r(Wp);hy=i(Ox,`Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Ox.forEach(t),fy=d(Nt),Ba=s(Nt,"P",{});var Ou=r(Ba);my=i(Ou,"This model inherits from "),bd=s(Ou,"A",{href:!0});var Wx=r(bd);uy=i(Wx,"TFPreTrainedModel"),Wx.forEach(t),gy=i(Ou,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ou.forEach(t),_y=d(Nt),Ra=s(Nt,"P",{});var Wu=r(Ra);by=i(Wu,"This model is also a "),Ha=s(Wu,"A",{href:!0,rel:!0});var Ux=r(Ha);vy=i(Ux,"tf.keras.Model"),Ux.forEach(t),ky=i(Wu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Wu.forEach(t),Ty=d(Nt),k(ls.$$.fragment,Nt),yy=d(Nt),ht=s(Nt,"DIV",{class:!0});var _o=r(ht);k(Qa.$$.fragment,_o),wy=d(_o),ln=s(_o,"P",{});var Gd=r(ln);Ay=i(Gd,"The "),vd=s(Gd,"A",{href:!0});var Bx=r(vd);Fy=i(Bx,"TFAlbertForMultipleChoice"),Bx.forEach(t),$y=i(Gd," forward method, overrides the "),Up=s(Gd,"CODE",{});var Rx=r(Up);xy=i(Rx,"__call__"),Rx.forEach(t),Ey=i(Gd," special method."),Gd.forEach(t),My=d(_o),k(ds.$$.fragment,_o),zy=d(_o),Bp=s(_o,"P",{});var Hx=r(Bp);Py=i(Hx,"Example:"),Hx.forEach(t),qy=d(_o),k(Va.$$.fragment,_o),_o.forEach(t),Nt.forEach(t),$m=d(o),dn=s(o,"H2",{class:!0});var Uu=r(dn);cs=s(Uu,"A",{id:!0,class:!0,href:!0});var Qx=r(cs);Rp=s(Qx,"SPAN",{});var Vx=r(Rp);k(Ja.$$.fragment,Vx),Vx.forEach(t),Qx.forEach(t),jy=d(Uu),Hp=s(Uu,"SPAN",{});var Jx=r(Hp);Cy=i(Jx,"TFAlbertForTokenClassification"),Jx.forEach(t),Uu.forEach(t),xm=d(o),He=s(o,"DIV",{class:!0});var Dt=r(He);k(Ka.$$.fragment,Dt),Iy=d(Dt),Qp=s(Dt,"P",{});var Kx=r(Qp);Ly=i(Kx,`Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Kx.forEach(t),Ny=d(Dt),Ga=s(Dt,"P",{});var Bu=r(Ga);Dy=i(Bu,"This model inherits from "),kd=s(Bu,"A",{href:!0});var Gx=r(kd);Sy=i(Gx,"TFPreTrainedModel"),Gx.forEach(t),Oy=i(Bu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bu.forEach(t),Wy=d(Dt),Xa=s(Dt,"P",{});var Ru=r(Xa);Uy=i(Ru,"This model is also a "),Za=s(Ru,"A",{href:!0,rel:!0});var Xx=r(Za);By=i(Xx,"tf.keras.Model"),Xx.forEach(t),Ry=i(Ru,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ru.forEach(t),Hy=d(Dt),k(ps.$$.fragment,Dt),Qy=d(Dt),ft=s(Dt,"DIV",{class:!0});var bo=r(ft);k(Ya.$$.fragment,bo),Vy=d(bo),cn=s(bo,"P",{});var Xd=r(cn);Jy=i(Xd,"The "),Td=s(Xd,"A",{href:!0});var Zx=r(Td);Ky=i(Zx,"TFAlbertForTokenClassification"),Zx.forEach(t),Gy=i(Xd," forward method, overrides the "),Vp=s(Xd,"CODE",{});var Yx=r(Vp);Xy=i(Yx,"__call__"),Yx.forEach(t),Zy=i(Xd," special method."),Xd.forEach(t),Yy=d(bo),k(hs.$$.fragment,bo),ew=d(bo),Jp=s(bo,"P",{});var eE=r(Jp);tw=i(eE,"Example:"),eE.forEach(t),ow=d(bo),k(ei.$$.fragment,bo),bo.forEach(t),Dt.forEach(t),Em=d(o),pn=s(o,"H2",{class:!0});var Hu=r(pn);fs=s(Hu,"A",{id:!0,class:!0,href:!0});var tE=r(fs);Kp=s(tE,"SPAN",{});var oE=r(Kp);k(ti.$$.fragment,oE),oE.forEach(t),tE.forEach(t),nw=d(Hu),Gp=s(Hu,"SPAN",{});var nE=r(Gp);sw=i(nE,"TFAlbertForQuestionAnswering"),nE.forEach(t),Hu.forEach(t),Mm=d(o),Qe=s(o,"DIV",{class:!0});var St=r(Qe);k(oi.$$.fragment,St),rw=d(St),hn=s(St,"P",{});var Zd=r(hn);aw=i(Zd,`Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Xp=s(Zd,"CODE",{});var sE=r(Xp);iw=i(sE,"span start logits"),sE.forEach(t),lw=i(Zd," and "),Zp=s(Zd,"CODE",{});var rE=r(Zp);dw=i(rE,"span end logits"),rE.forEach(t),cw=i(Zd,")."),Zd.forEach(t),pw=d(St),ni=s(St,"P",{});var Qu=r(ni);hw=i(Qu,"This model inherits from "),yd=s(Qu,"A",{href:!0});var aE=r(yd);fw=i(aE,"TFPreTrainedModel"),aE.forEach(t),mw=i(Qu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qu.forEach(t),uw=d(St),si=s(St,"P",{});var Vu=r(si);gw=i(Vu,"This model is also a "),ri=s(Vu,"A",{href:!0,rel:!0});var iE=r(ri);_w=i(iE,"tf.keras.Model"),iE.forEach(t),bw=i(Vu,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vu.forEach(t),vw=d(St),k(ms.$$.fragment,St),kw=d(St),mt=s(St,"DIV",{class:!0});var vo=r(mt);k(ai.$$.fragment,vo),Tw=d(vo),fn=s(vo,"P",{});var Yd=r(fn);yw=i(Yd,"The "),wd=s(Yd,"A",{href:!0});var lE=r(wd);ww=i(lE,"TFAlbertForQuestionAnswering"),lE.forEach(t),Aw=i(Yd," forward method, overrides the "),Yp=s(Yd,"CODE",{});var dE=r(Yp);Fw=i(dE,"__call__"),dE.forEach(t),$w=i(Yd," special method."),Yd.forEach(t),xw=d(vo),k(us.$$.fragment,vo),Ew=d(vo),eh=s(vo,"P",{});var cE=r(eh);Mw=i(cE,"Example:"),cE.forEach(t),zw=d(vo),k(ii.$$.fragment,vo),vo.forEach(t),St.forEach(t),zm=d(o),mn=s(o,"H2",{class:!0});var Ju=r(mn);gs=s(Ju,"A",{id:!0,class:!0,href:!0});var pE=r(gs);th=s(pE,"SPAN",{});var hE=r(th);k(li.$$.fragment,hE),hE.forEach(t),pE.forEach(t),Pw=d(Ju),oh=s(Ju,"SPAN",{});var fE=r(oh);qw=i(fE,"FlaxAlbertModel"),fE.forEach(t),Ju.forEach(t),Pm=d(o),qe=s(o,"DIV",{class:!0});var wt=r(qe);k(di.$$.fragment,wt),jw=d(wt),nh=s(wt,"P",{});var mE=r(nh);Cw=i(mE,"The bare Albert Model transformer outputting raw hidden-states without any specific head on top."),mE.forEach(t),Iw=d(wt),ci=s(wt,"P",{});var Ku=r(ci);Lw=i(Ku,"This model inherits from "),Ad=s(Ku,"A",{href:!0});var uE=r(Ad);Nw=i(uE,"FlaxPreTrainedModel"),uE.forEach(t),Dw=i(Ku,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Ku.forEach(t),Sw=d(wt),pi=s(wt,"P",{});var Gu=r(pi);Ow=i(Gu,"This model is also a Flax Linen "),hi=s(Gu,"A",{href:!0,rel:!0});var gE=r(hi);Ww=i(gE,"flax.linen.Module"),gE.forEach(t),Uw=i(Gu,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Gu.forEach(t),Bw=d(wt),sh=s(wt,"P",{});var _E=r(sh);Rw=i(_E,"Finally, this model supports inherent JAX features such as:"),_E.forEach(t),Hw=d(wt),Ut=s(wt,"UL",{});var Is=r(Ut);rh=s(Is,"LI",{});var bE=r(rh);fi=s(bE,"A",{href:!0,rel:!0});var vE=r(fi);Qw=i(vE,"Just-In-Time (JIT) compilation"),vE.forEach(t),bE.forEach(t),Vw=d(Is),ah=s(Is,"LI",{});var kE=r(ah);mi=s(kE,"A",{href:!0,rel:!0});var TE=r(mi);Jw=i(TE,"Automatic Differentiation"),TE.forEach(t),kE.forEach(t),Kw=d(Is),ih=s(Is,"LI",{});var yE=r(ih);ui=s(yE,"A",{href:!0,rel:!0});var wE=r(ui);Gw=i(wE,"Vectorization"),wE.forEach(t),yE.forEach(t),Xw=d(Is),lh=s(Is,"LI",{});var AE=r(lh);gi=s(AE,"A",{href:!0,rel:!0});var FE=r(gi);Zw=i(FE,"Parallelization"),FE.forEach(t),AE.forEach(t),Is.forEach(t),Yw=d(wt),ut=s(wt,"DIV",{class:!0});var ko=r(ut);k(_i.$$.fragment,ko),e0=d(ko),un=s(ko,"P",{});var ec=r(un);t0=i(ec,"The "),dh=s(ec,"CODE",{});var $E=r(dh);o0=i($E,"FlaxAlbertPreTrainedModel"),$E.forEach(t),n0=i(ec," forward method, overrides the "),ch=s(ec,"CODE",{});var xE=r(ch);s0=i(xE,"__call__"),xE.forEach(t),r0=i(ec," special method."),ec.forEach(t),a0=d(ko),k(_s.$$.fragment,ko),i0=d(ko),ph=s(ko,"P",{});var EE=r(ph);l0=i(EE,"Example:"),EE.forEach(t),d0=d(ko),k(bi.$$.fragment,ko),ko.forEach(t),wt.forEach(t),qm=d(o),gn=s(o,"H2",{class:!0});var Xu=r(gn);bs=s(Xu,"A",{id:!0,class:!0,href:!0});var ME=r(bs);hh=s(ME,"SPAN",{});var zE=r(hh);k(vi.$$.fragment,zE),zE.forEach(t),ME.forEach(t),c0=d(Xu),fh=s(Xu,"SPAN",{});var PE=r(fh);p0=i(PE,"FlaxAlbertForPreTraining"),PE.forEach(t),Xu.forEach(t),jm=d(o),je=s(o,"DIV",{class:!0});var At=r(je);k(ki.$$.fragment,At),h0=d(At),_n=s(At,"P",{});var tc=r(_n);f0=i(tc,"Albert Model with two heads on top as done during the pretraining: a "),mh=s(tc,"CODE",{});var qE=r(mh);m0=i(qE,"masked language modeling"),qE.forEach(t),u0=i(tc,` head and a `),uh=s(tc,"CODE",{});var jE=r(uh);g0=i(jE,"sentence order prediction (classification)"),jE.forEach(t),_0=i(tc," head."),tc.forEach(t),b0=d(At),Ti=s(At,"P",{});var Zu=r(Ti);v0=i(Zu,"This model inherits from "),Fd=s(Zu,"A",{href:!0});var CE=r(Fd);k0=i(CE,"FlaxPreTrainedModel"),CE.forEach(t),T0=i(Zu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Zu.forEach(t),y0=d(At),yi=s(At,"P",{});var Yu=r(yi);w0=i(Yu,"This model is also a Flax Linen "),wi=s(Yu,"A",{href:!0,rel:!0});var IE=r(wi);A0=i(IE,"flax.linen.Module"),IE.forEach(t),F0=i(Yu,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Yu.forEach(t),$0=d(At),gh=s(At,"P",{});var LE=r(gh);x0=i(LE,"Finally, this model supports inherent JAX features such as:"),LE.forEach(t),E0=d(At),Bt=s(At,"UL",{});var Ls=r(Bt);_h=s(Ls,"LI",{});var NE=r(_h);Ai=s(NE,"A",{href:!0,rel:!0});var DE=r(Ai);M0=i(DE,"Just-In-Time (JIT) compilation"),DE.forEach(t),NE.forEach(t),z0=d(Ls),bh=s(Ls,"LI",{});var SE=r(bh);Fi=s(SE,"A",{href:!0,rel:!0});var OE=r(Fi);P0=i(OE,"Automatic Differentiation"),OE.forEach(t),SE.forEach(t),q0=d(Ls),vh=s(Ls,"LI",{});var WE=r(vh);$i=s(WE,"A",{href:!0,rel:!0});var UE=r($i);j0=i(UE,"Vectorization"),UE.forEach(t),WE.forEach(t),C0=d(Ls),kh=s(Ls,"LI",{});var BE=r(kh);xi=s(BE,"A",{href:!0,rel:!0});var RE=r(xi);I0=i(RE,"Parallelization"),RE.forEach(t),BE.forEach(t),Ls.forEach(t),L0=d(At),gt=s(At,"DIV",{class:!0});var To=r(gt);k(Ei.$$.fragment,To),N0=d(To),bn=s(To,"P",{});var oc=r(bn);D0=i(oc,"The "),Th=s(oc,"CODE",{});var HE=r(Th);S0=i(HE,"FlaxAlbertPreTrainedModel"),HE.forEach(t),O0=i(oc," forward method, overrides the "),yh=s(oc,"CODE",{});var QE=r(yh);W0=i(QE,"__call__"),QE.forEach(t),U0=i(oc," special method."),oc.forEach(t),B0=d(To),k(vs.$$.fragment,To),R0=d(To),wh=s(To,"P",{});var VE=r(wh);H0=i(VE,"Example:"),VE.forEach(t),Q0=d(To),k(Mi.$$.fragment,To),To.forEach(t),At.forEach(t),Cm=d(o),vn=s(o,"H2",{class:!0});var eg=r(vn);ks=s(eg,"A",{id:!0,class:!0,href:!0});var JE=r(ks);Ah=s(JE,"SPAN",{});var KE=r(Ah);k(zi.$$.fragment,KE),KE.forEach(t),JE.forEach(t),V0=d(eg),Fh=s(eg,"SPAN",{});var GE=r(Fh);J0=i(GE,"FlaxAlbertForMaskedLM"),GE.forEach(t),eg.forEach(t),Im=d(o),Ce=s(o,"DIV",{class:!0});var Ft=r(Ce);k(Pi.$$.fragment,Ft),K0=d(Ft),qi=s(Ft,"P",{});var tg=r(qi);G0=i(tg,"Albert Model with a "),$h=s(tg,"CODE",{});var XE=r($h);X0=i(XE,"language modeling"),XE.forEach(t),Z0=i(tg," head on top."),tg.forEach(t),Y0=d(Ft),ji=s(Ft,"P",{});var og=r(ji);e2=i(og,"This model inherits from "),$d=s(og,"A",{href:!0});var ZE=r($d);t2=i(ZE,"FlaxPreTrainedModel"),ZE.forEach(t),o2=i(og,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),og.forEach(t),n2=d(Ft),Ci=s(Ft,"P",{});var ng=r(Ci);s2=i(ng,"This model is also a Flax Linen "),Ii=s(ng,"A",{href:!0,rel:!0});var YE=r(Ii);r2=i(YE,"flax.linen.Module"),YE.forEach(t),a2=i(ng,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ng.forEach(t),i2=d(Ft),xh=s(Ft,"P",{});var eM=r(xh);l2=i(eM,"Finally, this model supports inherent JAX features such as:"),eM.forEach(t),d2=d(Ft),Rt=s(Ft,"UL",{});var Ns=r(Rt);Eh=s(Ns,"LI",{});var tM=r(Eh);Li=s(tM,"A",{href:!0,rel:!0});var oM=r(Li);c2=i(oM,"Just-In-Time (JIT) compilation"),oM.forEach(t),tM.forEach(t),p2=d(Ns),Mh=s(Ns,"LI",{});var nM=r(Mh);Ni=s(nM,"A",{href:!0,rel:!0});var sM=r(Ni);h2=i(sM,"Automatic Differentiation"),sM.forEach(t),nM.forEach(t),f2=d(Ns),zh=s(Ns,"LI",{});var rM=r(zh);Di=s(rM,"A",{href:!0,rel:!0});var aM=r(Di);m2=i(aM,"Vectorization"),aM.forEach(t),rM.forEach(t),u2=d(Ns),Ph=s(Ns,"LI",{});var iM=r(Ph);Si=s(iM,"A",{href:!0,rel:!0});var lM=r(Si);g2=i(lM,"Parallelization"),lM.forEach(t),iM.forEach(t),Ns.forEach(t),_2=d(Ft),_t=s(Ft,"DIV",{class:!0});var yo=r(_t);k(Oi.$$.fragment,yo),b2=d(yo),kn=s(yo,"P",{});var nc=r(kn);v2=i(nc,"The "),qh=s(nc,"CODE",{});var dM=r(qh);k2=i(dM,"FlaxAlbertPreTrainedModel"),dM.forEach(t),T2=i(nc," forward method, overrides the "),jh=s(nc,"CODE",{});var cM=r(jh);y2=i(cM,"__call__"),cM.forEach(t),w2=i(nc," special method."),nc.forEach(t),A2=d(yo),k(Ts.$$.fragment,yo),F2=d(yo),Ch=s(yo,"P",{});var pM=r(Ch);$2=i(pM,"Example:"),pM.forEach(t),x2=d(yo),k(Wi.$$.fragment,yo),yo.forEach(t),Ft.forEach(t),Lm=d(o),Tn=s(o,"H2",{class:!0});var sg=r(Tn);ys=s(sg,"A",{id:!0,class:!0,href:!0});var hM=r(ys);Ih=s(hM,"SPAN",{});var fM=r(Ih);k(Ui.$$.fragment,fM),fM.forEach(t),hM.forEach(t),E2=d(sg),Lh=s(sg,"SPAN",{});var mM=r(Lh);M2=i(mM,"FlaxAlbertForSequenceClassification"),mM.forEach(t),sg.forEach(t),Nm=d(o),Ie=s(o,"DIV",{class:!0});var $t=r(Ie);k(Bi.$$.fragment,$t),z2=d($t),Nh=s($t,"P",{});var uM=r(Nh);P2=i(uM,`Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),uM.forEach(t),q2=d($t),Ri=s($t,"P",{});var rg=r(Ri);j2=i(rg,"This model inherits from "),xd=s(rg,"A",{href:!0});var gM=r(xd);C2=i(gM,"FlaxPreTrainedModel"),gM.forEach(t),I2=i(rg,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),rg.forEach(t),L2=d($t),Hi=s($t,"P",{});var ag=r(Hi);N2=i(ag,"This model is also a Flax Linen "),Qi=s(ag,"A",{href:!0,rel:!0});var _M=r(Qi);D2=i(_M,"flax.linen.Module"),_M.forEach(t),S2=i(ag,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ag.forEach(t),O2=d($t),Dh=s($t,"P",{});var bM=r(Dh);W2=i(bM,"Finally, this model supports inherent JAX features such as:"),bM.forEach(t),U2=d($t),Ht=s($t,"UL",{});var Ds=r(Ht);Sh=s(Ds,"LI",{});var vM=r(Sh);Vi=s(vM,"A",{href:!0,rel:!0});var kM=r(Vi);B2=i(kM,"Just-In-Time (JIT) compilation"),kM.forEach(t),vM.forEach(t),R2=d(Ds),Oh=s(Ds,"LI",{});var TM=r(Oh);Ji=s(TM,"A",{href:!0,rel:!0});var yM=r(Ji);H2=i(yM,"Automatic Differentiation"),yM.forEach(t),TM.forEach(t),Q2=d(Ds),Wh=s(Ds,"LI",{});var wM=r(Wh);Ki=s(wM,"A",{href:!0,rel:!0});var AM=r(Ki);V2=i(AM,"Vectorization"),AM.forEach(t),wM.forEach(t),J2=d(Ds),Uh=s(Ds,"LI",{});var FM=r(Uh);Gi=s(FM,"A",{href:!0,rel:!0});var $M=r(Gi);K2=i($M,"Parallelization"),$M.forEach(t),FM.forEach(t),Ds.forEach(t),G2=d($t),bt=s($t,"DIV",{class:!0});var wo=r(bt);k(Xi.$$.fragment,wo),X2=d(wo),yn=s(wo,"P",{});var sc=r(yn);Z2=i(sc,"The "),Bh=s(sc,"CODE",{});var xM=r(Bh);Y2=i(xM,"FlaxAlbertPreTrainedModel"),xM.forEach(t),eA=i(sc," forward method, overrides the "),Rh=s(sc,"CODE",{});var EM=r(Rh);tA=i(EM,"__call__"),EM.forEach(t),oA=i(sc," special method."),sc.forEach(t),nA=d(wo),k(ws.$$.fragment,wo),sA=d(wo),Hh=s(wo,"P",{});var MM=r(Hh);rA=i(MM,"Example:"),MM.forEach(t),aA=d(wo),k(Zi.$$.fragment,wo),wo.forEach(t),$t.forEach(t),Dm=d(o),wn=s(o,"H2",{class:!0});var ig=r(wn);As=s(ig,"A",{id:!0,class:!0,href:!0});var zM=r(As);Qh=s(zM,"SPAN",{});var PM=r(Qh);k(Yi.$$.fragment,PM),PM.forEach(t),zM.forEach(t),iA=d(ig),Vh=s(ig,"SPAN",{});var qM=r(Vh);lA=i(qM,"FlaxAlbertForMultipleChoice"),qM.forEach(t),ig.forEach(t),Sm=d(o),Le=s(o,"DIV",{class:!0});var xt=r(Le);k(el.$$.fragment,xt),dA=d(xt),Jh=s(xt,"P",{});var jM=r(Jh);cA=i(jM,`Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),jM.forEach(t),pA=d(xt),tl=s(xt,"P",{});var lg=r(tl);hA=i(lg,"This model inherits from "),Ed=s(lg,"A",{href:!0});var CM=r(Ed);fA=i(CM,"FlaxPreTrainedModel"),CM.forEach(t),mA=i(lg,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),lg.forEach(t),uA=d(xt),ol=s(xt,"P",{});var dg=r(ol);gA=i(dg,"This model is also a Flax Linen "),nl=s(dg,"A",{href:!0,rel:!0});var IM=r(nl);_A=i(IM,"flax.linen.Module"),IM.forEach(t),bA=i(dg,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),dg.forEach(t),vA=d(xt),Kh=s(xt,"P",{});var LM=r(Kh);kA=i(LM,"Finally, this model supports inherent JAX features such as:"),LM.forEach(t),TA=d(xt),Qt=s(xt,"UL",{});var Ss=r(Qt);Gh=s(Ss,"LI",{});var NM=r(Gh);sl=s(NM,"A",{href:!0,rel:!0});var DM=r(sl);yA=i(DM,"Just-In-Time (JIT) compilation"),DM.forEach(t),NM.forEach(t),wA=d(Ss),Xh=s(Ss,"LI",{});var SM=r(Xh);rl=s(SM,"A",{href:!0,rel:!0});var OM=r(rl);AA=i(OM,"Automatic Differentiation"),OM.forEach(t),SM.forEach(t),FA=d(Ss),Zh=s(Ss,"LI",{});var WM=r(Zh);al=s(WM,"A",{href:!0,rel:!0});var UM=r(al);$A=i(UM,"Vectorization"),UM.forEach(t),WM.forEach(t),xA=d(Ss),Yh=s(Ss,"LI",{});var BM=r(Yh);il=s(BM,"A",{href:!0,rel:!0});var RM=r(il);EA=i(RM,"Parallelization"),RM.forEach(t),BM.forEach(t),Ss.forEach(t),MA=d(xt),vt=s(xt,"DIV",{class:!0});var Ao=r(vt);k(ll.$$.fragment,Ao),zA=d(Ao),An=s(Ao,"P",{});var rc=r(An);PA=i(rc,"The "),ef=s(rc,"CODE",{});var HM=r(ef);qA=i(HM,"FlaxAlbertPreTrainedModel"),HM.forEach(t),jA=i(rc," forward method, overrides the "),tf=s(rc,"CODE",{});var QM=r(tf);CA=i(QM,"__call__"),QM.forEach(t),IA=i(rc," special method."),rc.forEach(t),LA=d(Ao),k(Fs.$$.fragment,Ao),NA=d(Ao),of=s(Ao,"P",{});var VM=r(of);DA=i(VM,"Example:"),VM.forEach(t),SA=d(Ao),k(dl.$$.fragment,Ao),Ao.forEach(t),xt.forEach(t),Om=d(o),Fn=s(o,"H2",{class:!0});var cg=r(Fn);$s=s(cg,"A",{id:!0,class:!0,href:!0});var JM=r($s);nf=s(JM,"SPAN",{});var KM=r(nf);k(cl.$$.fragment,KM),KM.forEach(t),JM.forEach(t),OA=d(cg),sf=s(cg,"SPAN",{});var GM=r(sf);WA=i(GM,"FlaxAlbertForTokenClassification"),GM.forEach(t),cg.forEach(t),Wm=d(o),Ne=s(o,"DIV",{class:!0});var Et=r(Ne);k(pl.$$.fragment,Et),UA=d(Et),rf=s(Et,"P",{});var XM=r(rf);BA=i(XM,`Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),XM.forEach(t),RA=d(Et),hl=s(Et,"P",{});var pg=r(hl);HA=i(pg,"This model inherits from "),Md=s(pg,"A",{href:!0});var ZM=r(Md);QA=i(ZM,"FlaxPreTrainedModel"),ZM.forEach(t),VA=i(pg,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),pg.forEach(t),JA=d(Et),fl=s(Et,"P",{});var hg=r(fl);KA=i(hg,"This model is also a Flax Linen "),ml=s(hg,"A",{href:!0,rel:!0});var YM=r(ml);GA=i(YM,"flax.linen.Module"),YM.forEach(t),XA=i(hg,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),hg.forEach(t),ZA=d(Et),af=s(Et,"P",{});var ez=r(af);YA=i(ez,"Finally, this model supports inherent JAX features such as:"),ez.forEach(t),eF=d(Et),Vt=s(Et,"UL",{});var Os=r(Vt);lf=s(Os,"LI",{});var tz=r(lf);ul=s(tz,"A",{href:!0,rel:!0});var oz=r(ul);tF=i(oz,"Just-In-Time (JIT) compilation"),oz.forEach(t),tz.forEach(t),oF=d(Os),df=s(Os,"LI",{});var nz=r(df);gl=s(nz,"A",{href:!0,rel:!0});var sz=r(gl);nF=i(sz,"Automatic Differentiation"),sz.forEach(t),nz.forEach(t),sF=d(Os),cf=s(Os,"LI",{});var rz=r(cf);_l=s(rz,"A",{href:!0,rel:!0});var az=r(_l);rF=i(az,"Vectorization"),az.forEach(t),rz.forEach(t),aF=d(Os),pf=s(Os,"LI",{});var iz=r(pf);bl=s(iz,"A",{href:!0,rel:!0});var lz=r(bl);iF=i(lz,"Parallelization"),lz.forEach(t),iz.forEach(t),Os.forEach(t),lF=d(Et),kt=s(Et,"DIV",{class:!0});var Fo=r(kt);k(vl.$$.fragment,Fo),dF=d(Fo),$n=s(Fo,"P",{});var ac=r($n);cF=i(ac,"The "),hf=s(ac,"CODE",{});var dz=r(hf);pF=i(dz,"FlaxAlbertPreTrainedModel"),dz.forEach(t),hF=i(ac," forward method, overrides the "),ff=s(ac,"CODE",{});var cz=r(ff);fF=i(cz,"__call__"),cz.forEach(t),mF=i(ac," special method."),ac.forEach(t),uF=d(Fo),k(xs.$$.fragment,Fo),gF=d(Fo),mf=s(Fo,"P",{});var pz=r(mf);_F=i(pz,"Example:"),pz.forEach(t),bF=d(Fo),k(kl.$$.fragment,Fo),Fo.forEach(t),Et.forEach(t),Um=d(o),xn=s(o,"H2",{class:!0});var fg=r(xn);Es=s(fg,"A",{id:!0,class:!0,href:!0});var hz=r(Es);uf=s(hz,"SPAN",{});var fz=r(uf);k(Tl.$$.fragment,fz),fz.forEach(t),hz.forEach(t),vF=d(fg),gf=s(fg,"SPAN",{});var mz=r(gf);kF=i(mz,"FlaxAlbertForQuestionAnswering"),mz.forEach(t),fg.forEach(t),Bm=d(o),De=s(o,"DIV",{class:!0});var Mt=r(De);k(yl.$$.fragment,Mt),TF=d(Mt),En=s(Mt,"P",{});var ic=r(En);yF=i(ic,`Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),_f=s(ic,"CODE",{});var uz=r(_f);wF=i(uz,"span start logits"),uz.forEach(t),AF=i(ic," and "),bf=s(ic,"CODE",{});var gz=r(bf);FF=i(gz,"span end logits"),gz.forEach(t),$F=i(ic,")."),ic.forEach(t),xF=d(Mt),wl=s(Mt,"P",{});var mg=r(wl);EF=i(mg,"This model inherits from "),zd=s(mg,"A",{href:!0});var _z=r(zd);MF=i(_z,"FlaxPreTrainedModel"),_z.forEach(t),zF=i(mg,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),mg.forEach(t),PF=d(Mt),Al=s(Mt,"P",{});var ug=r(Al);qF=i(ug,"This model is also a Flax Linen "),Fl=s(ug,"A",{href:!0,rel:!0});var bz=r(Fl);jF=i(bz,"flax.linen.Module"),bz.forEach(t),CF=i(ug,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ug.forEach(t),IF=d(Mt),vf=s(Mt,"P",{});var vz=r(vf);LF=i(vz,"Finally, this model supports inherent JAX features such as:"),vz.forEach(t),NF=d(Mt),Jt=s(Mt,"UL",{});var Ws=r(Jt);kf=s(Ws,"LI",{});var kz=r(kf);$l=s(kz,"A",{href:!0,rel:!0});var Tz=r($l);DF=i(Tz,"Just-In-Time (JIT) compilation"),Tz.forEach(t),kz.forEach(t),SF=d(Ws),Tf=s(Ws,"LI",{});var yz=r(Tf);xl=s(yz,"A",{href:!0,rel:!0});var wz=r(xl);OF=i(wz,"Automatic Differentiation"),wz.forEach(t),yz.forEach(t),WF=d(Ws),yf=s(Ws,"LI",{});var Az=r(yf);El=s(Az,"A",{href:!0,rel:!0});var Fz=r(El);UF=i(Fz,"Vectorization"),Fz.forEach(t),Az.forEach(t),BF=d(Ws),wf=s(Ws,"LI",{});var $z=r(wf);Ml=s($z,"A",{href:!0,rel:!0});var xz=r(Ml);RF=i(xz,"Parallelization"),xz.forEach(t),$z.forEach(t),Ws.forEach(t),HF=d(Mt),Tt=s(Mt,"DIV",{class:!0});var $o=r(Tt);k(zl.$$.fragment,$o),QF=d($o),Mn=s($o,"P",{});var lc=r(Mn);VF=i(lc,"The "),Af=s(lc,"CODE",{});var Ez=r(Af);JF=i(Ez,"FlaxAlbertPreTrainedModel"),Ez.forEach(t),KF=i(lc," forward method, overrides the "),Ff=s(lc,"CODE",{});var Mz=r(Ff);GF=i(Mz,"__call__"),Mz.forEach(t),XF=i(lc," special method."),lc.forEach(t),ZF=d($o),k(Ms.$$.fragment,$o),YF=d($o),$f=s($o,"P",{});var zz=r($f);e$=i(zz,"Example:"),zz.forEach(t),t$=d($o),k(Pl.$$.fragment,$o),$o.forEach(t),Mt.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(c5)),c(b,"id","albert"),c(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(b,"href","#albert"),c(g,"class","relative group"),c(Z,"id","overview"),c(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Z,"href","#overview"),c(M,"class","relative group"),c(te,"href","https://arxiv.org/abs/1909.11942"),c(te,"rel","nofollow"),c(ve,"href","https://huggingface.co/lysandre"),c(ve,"rel","nofollow"),c(O,"href","https://huggingface.co/kamalkraj"),c(O,"rel","nofollow"),c(W,"href","https://github.com/google-research/ALBERT"),c(W,"rel","nofollow"),c(zn,"id","transformers.AlbertConfig"),c(zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zn,"href","#transformers.AlbertConfig"),c(be,"class","relative group"),c(Dl,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertModel"),c(Sl,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertModel"),c(Rs,"href","https://huggingface.co/albert-xxlarge-v2"),c(Rs,"rel","nofollow"),c(Ol,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Wl,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Je,"class","docstring"),c(Pn,"id","transformers.AlbertTokenizer"),c(Pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Pn,"href","#transformers.AlbertTokenizer"),c(Eo,"class","relative group"),c(Ks,"href","https://github.com/google/sentencepiece"),c(Ks,"rel","nofollow"),c(Ul,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Gt,"class","docstring"),c(qn,"class","docstring"),c(Pt,"class","docstring"),c(wc,"class","docstring"),c(Pe,"class","docstring"),c(jn,"id","transformers.AlbertTokenizerFast"),c(jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jn,"href","#transformers.AlbertTokenizerFast"),c(Po,"class","relative group"),c(rr,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),c(rr,"rel","nofollow"),c(Hl,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Xt,"class","docstring"),c(qt,"class","docstring"),c(zt,"class","docstring"),c(Cn,"id","transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput"),c(Cn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Cn,"href","#transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput"),c(qo,"class","relative group"),c(Jl,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForPreTraining"),c(jo,"class","docstring"),c(Kl,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForPreTraining"),c(Co,"class","docstring"),c(In,"id","transformers.AlbertModel"),c(In,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(In,"href","#transformers.AlbertModel"),c(Io,"class","relative group"),c(Gl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(vr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(vr,"rel","nofollow"),c(Xl,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertModel"),c(ot,"class","docstring"),c(Ke,"class","docstring"),c(Nn,"id","transformers.AlbertForPreTraining"),c(Nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nn,"href","#transformers.AlbertForPreTraining"),c(No,"class","relative group"),c(Zl,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c($r,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c($r,"rel","nofollow"),c(Yl,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForPreTraining"),c(nt,"class","docstring"),c(Ge,"class","docstring"),c(Sn,"id","transformers.AlbertForMaskedLM"),c(Sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Sn,"href","#transformers.AlbertForMaskedLM"),c(Oo,"class","relative group"),c(ed,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Cr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Cr,"rel","nofollow"),c(td,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForMaskedLM"),c(st,"class","docstring"),c(Xe,"class","docstring"),c(Wn,"id","transformers.AlbertForSequenceClassification"),c(Wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wn,"href","#transformers.AlbertForSequenceClassification"),c(Uo,"class","relative group"),c(od,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Wr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Wr,"rel","nofollow"),c(nd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForSequenceClassification"),c(Se,"class","docstring"),c(Ze,"class","docstring"),c(Bn,"id","transformers.AlbertForMultipleChoice"),c(Bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bn,"href","#transformers.AlbertForMultipleChoice"),c(Ro,"class","relative group"),c(sd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Kr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Kr,"rel","nofollow"),c(rd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForMultipleChoice"),c(rt,"class","docstring"),c(Ye,"class","docstring"),c(Hn,"id","transformers.AlbertForTokenClassification"),c(Hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Hn,"href","#transformers.AlbertForTokenClassification"),c(Qo,"class","relative group"),c(ad,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(oa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(oa,"rel","nofollow"),c(id,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForTokenClassification"),c(at,"class","docstring"),c(et,"class","docstring"),c(Vn,"id","transformers.AlbertForQuestionAnswering"),c(Vn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vn,"href","#transformers.AlbertForQuestionAnswering"),c(Jo,"class","relative group"),c(ld,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(da,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(da,"rel","nofollow"),c(dd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForQuestionAnswering"),c(it,"class","docstring"),c(tt,"class","docstring"),c(Kn,"id","transformers.TFAlbertModel"),c(Kn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kn,"href","#transformers.TFAlbertModel"),c(Xo,"class","relative group"),c(cd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ga,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ga,"rel","nofollow"),c(pd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertModel"),c(lt,"class","docstring"),c(Oe,"class","docstring"),c(Zn,"id","transformers.TFAlbertForPreTraining"),c(Zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Zn,"href","#transformers.TFAlbertForPreTraining"),c(Yo,"class","relative group"),c(hd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(wa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(wa,"rel","nofollow"),c(fd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForPreTraining"),c(dt,"class","docstring"),c(We,"class","docstring"),c(ts,"id","transformers.TFAlbertForMaskedLM"),c(ts,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ts,"href","#transformers.TFAlbertForMaskedLM"),c(on,"class","relative group"),c(md,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Pa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Pa,"rel","nofollow"),c(ud,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForMaskedLM"),c(ct,"class","docstring"),c(Ue,"class","docstring"),c(ss,"id","transformers.TFAlbertForSequenceClassification"),c(ss,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ss,"href","#transformers.TFAlbertForSequenceClassification"),c(sn,"class","relative group"),c(gd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Da,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Da,"rel","nofollow"),c(_d,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForSequenceClassification"),c(pt,"class","docstring"),c(Be,"class","docstring"),c(is,"id","transformers.TFAlbertForMultipleChoice"),c(is,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(is,"href","#transformers.TFAlbertForMultipleChoice"),c(an,"class","relative group"),c(bd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ha,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ha,"rel","nofollow"),c(vd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForMultipleChoice"),c(ht,"class","docstring"),c(Re,"class","docstring"),c(cs,"id","transformers.TFAlbertForTokenClassification"),c(cs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(cs,"href","#transformers.TFAlbertForTokenClassification"),c(dn,"class","relative group"),c(kd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Za,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Za,"rel","nofollow"),c(Td,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForTokenClassification"),c(ft,"class","docstring"),c(He,"class","docstring"),c(fs,"id","transformers.TFAlbertForQuestionAnswering"),c(fs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fs,"href","#transformers.TFAlbertForQuestionAnswering"),c(pn,"class","relative group"),c(yd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ri,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ri,"rel","nofollow"),c(wd,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForQuestionAnswering"),c(mt,"class","docstring"),c(Qe,"class","docstring"),c(gs,"id","transformers.FlaxAlbertModel"),c(gs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(gs,"href","#transformers.FlaxAlbertModel"),c(mn,"class","relative group"),c(Ad,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(hi,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(hi,"rel","nofollow"),c(fi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(fi,"rel","nofollow"),c(mi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(mi,"rel","nofollow"),c(ui,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(ui,"rel","nofollow"),c(gi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(gi,"rel","nofollow"),c(ut,"class","docstring"),c(qe,"class","docstring"),c(bs,"id","transformers.FlaxAlbertForPreTraining"),c(bs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bs,"href","#transformers.FlaxAlbertForPreTraining"),c(gn,"class","relative group"),c(Fd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(wi,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(wi,"rel","nofollow"),c(Ai,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Ai,"rel","nofollow"),c(Fi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Fi,"rel","nofollow"),c($i,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c($i,"rel","nofollow"),c(xi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(xi,"rel","nofollow"),c(gt,"class","docstring"),c(je,"class","docstring"),c(ks,"id","transformers.FlaxAlbertForMaskedLM"),c(ks,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ks,"href","#transformers.FlaxAlbertForMaskedLM"),c(vn,"class","relative group"),c($d,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ii,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Ii,"rel","nofollow"),c(Li,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Li,"rel","nofollow"),c(Ni,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ni,"rel","nofollow"),c(Di,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Di,"rel","nofollow"),c(Si,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Si,"rel","nofollow"),c(_t,"class","docstring"),c(Ce,"class","docstring"),c(ys,"id","transformers.FlaxAlbertForSequenceClassification"),c(ys,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ys,"href","#transformers.FlaxAlbertForSequenceClassification"),c(Tn,"class","relative group"),c(xd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Qi,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Qi,"rel","nofollow"),c(Vi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Vi,"rel","nofollow"),c(Ji,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ji,"rel","nofollow"),c(Ki,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ki,"rel","nofollow"),c(Gi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Gi,"rel","nofollow"),c(bt,"class","docstring"),c(Ie,"class","docstring"),c(As,"id","transformers.FlaxAlbertForMultipleChoice"),c(As,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(As,"href","#transformers.FlaxAlbertForMultipleChoice"),c(wn,"class","relative group"),c(Ed,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(nl,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(nl,"rel","nofollow"),c(sl,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(sl,"rel","nofollow"),c(rl,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(rl,"rel","nofollow"),c(al,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(al,"rel","nofollow"),c(il,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(il,"rel","nofollow"),c(vt,"class","docstring"),c(Le,"class","docstring"),c($s,"id","transformers.FlaxAlbertForTokenClassification"),c($s,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c($s,"href","#transformers.FlaxAlbertForTokenClassification"),c(Fn,"class","relative group"),c(Md,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ml,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(ml,"rel","nofollow"),c(ul,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(ul,"rel","nofollow"),c(gl,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(gl,"rel","nofollow"),c(_l,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(_l,"rel","nofollow"),c(bl,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(bl,"rel","nofollow"),c(kt,"class","docstring"),c(Ne,"class","docstring"),c(Es,"id","transformers.FlaxAlbertForQuestionAnswering"),c(Es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Es,"href","#transformers.FlaxAlbertForQuestionAnswering"),c(xn,"class","relative group"),c(zd,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Fl,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Fl,"rel","nofollow"),c($l,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c($l,"rel","nofollow"),c(xl,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(xl,"rel","nofollow"),c(El,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(El,"rel","nofollow"),c(Ml,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Ml,"rel","nofollow"),c(Tt,"class","docstring"),c(De,"class","docstring")},m(o,m){e(document.head,p),f(o,$,m),f(o,g,m),e(g,b),e(b,F),T(_,F,null),e(g,u),e(g,x),e(x,pe),f(o,G,m),f(o,M,m),e(M,Z),e(Z,U),T(ee,U,null),e(M,he),e(M,B),e(B,fe),f(o,ae,m),f(o,J,m),e(J,I),e(J,te),e(te,X),e(J,z),f(o,q,m),f(o,Y,m),e(Y,R),e(R,me),e(Y,ue),e(Y,H),e(H,ge),f(o,ie,m),f(o,P,m),e(P,_e),f(o,Q,m),f(o,oe,m),e(oe,se),e(se,V),f(o,le,m),f(o,ne,m),e(ne,L),f(o,de,m),f(o,S,m),e(S,re),e(re,h),e(S,E),e(S,K),e(K,Ae),f(o,ye,m),f(o,j,m),e(j,Fe),e(j,ve),e(ve,$e),e(j,N),e(j,O),e(O,xe),e(j,Ee),e(j,W),e(W,Me),e(j,ze),f(o,ce,m),f(o,be,m),e(be,zn),e(zn,dc),T(Us,dc,null),e(be,gg),e(be,cc),e(cc,_g),f(o,Jf,m),f(o,Je,m),T(Bs,Je,null),e(Je,bg),e(Je,Ot),e(Ot,vg),e(Ot,Dl),e(Dl,kg),e(Ot,Tg),e(Ot,Sl),e(Sl,yg),e(Ot,wg),e(Ot,Rs),e(Rs,Ag),e(Ot,Fg),e(Je,$g),e(Je,xo),e(xo,xg),e(xo,Ol),e(Ol,Eg),e(xo,Mg),e(xo,Wl),e(Wl,zg),e(xo,Pg),e(Je,qg),e(Je,pc),e(pc,jg),e(Je,Cg),T(Hs,Je,null),f(o,Kf,m),f(o,Eo,m),e(Eo,Pn),e(Pn,hc),T(Qs,hc,null),e(Eo,Ig),e(Eo,fc),e(fc,Lg),f(o,Gf,m),f(o,Pe,m),T(Vs,Pe,null),e(Pe,Ng),e(Pe,Js),e(Js,Dg),e(Js,Ks),e(Ks,Sg),e(Js,Og),e(Pe,Wg),e(Pe,Gs),e(Gs,Ug),e(Gs,Ul),e(Ul,Bg),e(Gs,Rg),e(Pe,Hg),e(Pe,Mo),e(Mo,Qg),e(Mo,mc),e(mc,Vg),e(Mo,Jg),e(Mo,uc),e(uc,Kg),e(Mo,Gg),e(Pe,Xg),e(Pe,Gt),T(Xs,Gt,null),e(Gt,Zg),e(Gt,gc),e(gc,Yg),e(Gt,e_),e(Gt,Zs),e(Zs,Bl),e(Bl,t_),e(Bl,_c),e(_c,o_),e(Zs,n_),e(Zs,Rl),e(Rl,s_),e(Rl,bc),e(bc,r_),e(Pe,a_),e(Pe,qn),T(Ys,qn,null),e(qn,i_),e(qn,er),e(er,l_),e(er,vc),e(vc,d_),e(er,c_),e(Pe,p_),e(Pe,Pt),T(tr,Pt,null),e(Pt,h_),e(Pt,kc),e(kc,f_),e(Pt,m_),T(or,Pt,null),e(Pt,u_),e(Pt,zo),e(zo,g_),e(zo,Tc),e(Tc,__),e(zo,b_),e(zo,yc),e(yc,v_),e(zo,k_),e(Pe,T_),e(Pe,wc),f(o,Xf,m),f(o,Po,m),e(Po,jn),e(jn,Ac),T(nr,Ac,null),e(Po,y_),e(Po,Fc),e(Fc,w_),f(o,Zf,m),f(o,zt,m),T(sr,zt,null),e(zt,A_),e(zt,Wt),e(Wt,F_),e(Wt,$c),e($c,$_),e(Wt,x_),e(Wt,rr),e(rr,E_),e(Wt,M_),e(Wt,Hl),e(Hl,z_),e(Wt,P_),e(zt,q_),e(zt,Xt),T(ar,Xt,null),e(Xt,j_),e(Xt,xc),e(xc,C_),e(Xt,I_),e(Xt,ir),e(ir,Ql),e(Ql,L_),e(Ql,Ec),e(Ec,N_),e(ir,D_),e(ir,Vl),e(Vl,S_),e(Vl,Mc),e(Mc,O_),e(zt,W_),e(zt,qt),T(lr,qt,null),e(qt,U_),e(qt,zc),e(zc,B_),e(qt,R_),T(dr,qt,null),e(qt,H_),e(qt,Pc),e(Pc,Q_),f(o,Yf,m),f(o,qo,m),e(qo,Cn),e(Cn,qc),T(cr,qc,null),e(qo,V_),e(qo,jc),e(jc,J_),f(o,em,m),f(o,jo,m),T(pr,jo,null),e(jo,K_),e(jo,hr),e(hr,G_),e(hr,Jl),e(Jl,X_),e(hr,Z_),f(o,tm,m),f(o,Co,m),T(fr,Co,null),e(Co,Y_),e(Co,mr),e(mr,eb),e(mr,Kl),e(Kl,tb),e(mr,ob),f(o,om,m),f(o,Io,m),e(Io,In),e(In,Cc),T(ur,Cc,null),e(Io,nb),e(Io,Ic),e(Ic,sb),f(o,nm,m),f(o,Ke,m),T(gr,Ke,null),e(Ke,rb),e(Ke,Lc),e(Lc,ab),e(Ke,ib),e(Ke,_r),e(_r,lb),e(_r,Gl),e(Gl,db),e(_r,cb),e(Ke,pb),e(Ke,br),e(br,hb),e(br,vr),e(vr,fb),e(br,mb),e(Ke,ub),e(Ke,ot),T(kr,ot,null),e(ot,gb),e(ot,Lo),e(Lo,_b),e(Lo,Xl),e(Xl,bb),e(Lo,vb),e(Lo,Nc),e(Nc,kb),e(Lo,Tb),e(ot,yb),T(Ln,ot,null),e(ot,wb),e(ot,Dc),e(Dc,Ab),e(ot,Fb),T(Tr,ot,null),f(o,sm,m),f(o,No,m),e(No,Nn),e(Nn,Sc),T(yr,Sc,null),e(No,$b),e(No,Oc),e(Oc,xb),f(o,rm,m),f(o,Ge,m),T(wr,Ge,null),e(Ge,Eb),e(Ge,Do),e(Do,Mb),e(Do,Wc),e(Wc,zb),e(Do,Pb),e(Do,Uc),e(Uc,qb),e(Do,jb),e(Ge,Cb),e(Ge,Ar),e(Ar,Ib),e(Ar,Zl),e(Zl,Lb),e(Ar,Nb),e(Ge,Db),e(Ge,Fr),e(Fr,Sb),e(Fr,$r),e($r,Ob),e(Fr,Wb),e(Ge,Ub),e(Ge,nt),T(xr,nt,null),e(nt,Bb),e(nt,So),e(So,Rb),e(So,Yl),e(Yl,Hb),e(So,Qb),e(So,Bc),e(Bc,Vb),e(So,Jb),e(nt,Kb),T(Dn,nt,null),e(nt,Gb),e(nt,Rc),e(Rc,Xb),e(nt,Zb),T(Er,nt,null),f(o,am,m),f(o,Oo,m),e(Oo,Sn),e(Sn,Hc),T(Mr,Hc,null),e(Oo,Yb),e(Oo,Qc),e(Qc,ev),f(o,im,m),f(o,Xe,m),T(zr,Xe,null),e(Xe,tv),e(Xe,Pr),e(Pr,ov),e(Pr,Vc),e(Vc,nv),e(Pr,sv),e(Xe,rv),e(Xe,qr),e(qr,av),e(qr,ed),e(ed,iv),e(qr,lv),e(Xe,dv),e(Xe,jr),e(jr,cv),e(jr,Cr),e(Cr,pv),e(jr,hv),e(Xe,fv),e(Xe,st),T(Ir,st,null),e(st,mv),e(st,Wo),e(Wo,uv),e(Wo,td),e(td,gv),e(Wo,_v),e(Wo,Jc),e(Jc,bv),e(Wo,vv),e(st,kv),T(On,st,null),e(st,Tv),e(st,Kc),e(Kc,yv),e(st,wv),T(Lr,st,null),f(o,lm,m),f(o,Uo,m),e(Uo,Wn),e(Wn,Gc),T(Nr,Gc,null),e(Uo,Av),e(Uo,Xc),e(Xc,Fv),f(o,dm,m),f(o,Ze,m),T(Dr,Ze,null),e(Ze,$v),e(Ze,Zc),e(Zc,xv),e(Ze,Ev),e(Ze,Sr),e(Sr,Mv),e(Sr,od),e(od,zv),e(Sr,Pv),e(Ze,qv),e(Ze,Or),e(Or,jv),e(Or,Wr),e(Wr,Cv),e(Or,Iv),e(Ze,Lv),e(Ze,Se),T(Ur,Se,null),e(Se,Nv),e(Se,Bo),e(Bo,Dv),e(Bo,nd),e(nd,Sv),e(Bo,Ov),e(Bo,Yc),e(Yc,Wv),e(Bo,Uv),e(Se,Bv),T(Un,Se,null),e(Se,Rv),e(Se,ep),e(ep,Hv),e(Se,Qv),T(Br,Se,null),e(Se,Vv),e(Se,tp),e(tp,Jv),e(Se,Kv),T(Rr,Se,null),f(o,cm,m),f(o,Ro,m),e(Ro,Bn),e(Bn,op),T(Hr,op,null),e(Ro,Gv),e(Ro,np),e(np,Xv),f(o,pm,m),f(o,Ye,m),T(Qr,Ye,null),e(Ye,Zv),e(Ye,sp),e(sp,Yv),e(Ye,ek),e(Ye,Vr),e(Vr,tk),e(Vr,sd),e(sd,ok),e(Vr,nk),e(Ye,sk),e(Ye,Jr),e(Jr,rk),e(Jr,Kr),e(Kr,ak),e(Jr,ik),e(Ye,lk),e(Ye,rt),T(Gr,rt,null),e(rt,dk),e(rt,Ho),e(Ho,ck),e(Ho,rd),e(rd,pk),e(Ho,hk),e(Ho,rp),e(rp,fk),e(Ho,mk),e(rt,uk),T(Rn,rt,null),e(rt,gk),e(rt,ap),e(ap,_k),e(rt,bk),T(Xr,rt,null),f(o,hm,m),f(o,Qo,m),e(Qo,Hn),e(Hn,ip),T(Zr,ip,null),e(Qo,vk),e(Qo,lp),e(lp,kk),f(o,fm,m),f(o,et,m),T(Yr,et,null),e(et,Tk),e(et,dp),e(dp,yk),e(et,wk),e(et,ea),e(ea,Ak),e(ea,ad),e(ad,Fk),e(ea,$k),e(et,xk),e(et,ta),e(ta,Ek),e(ta,oa),e(oa,Mk),e(ta,zk),e(et,Pk),e(et,at),T(na,at,null),e(at,qk),e(at,Vo),e(Vo,jk),e(Vo,id),e(id,Ck),e(Vo,Ik),e(Vo,cp),e(cp,Lk),e(Vo,Nk),e(at,Dk),T(Qn,at,null),e(at,Sk),e(at,pp),e(pp,Ok),e(at,Wk),T(sa,at,null),f(o,mm,m),f(o,Jo,m),e(Jo,Vn),e(Vn,hp),T(ra,hp,null),e(Jo,Uk),e(Jo,fp),e(fp,Bk),f(o,um,m),f(o,tt,m),T(aa,tt,null),e(tt,Rk),e(tt,Ko),e(Ko,Hk),e(Ko,mp),e(mp,Qk),e(Ko,Vk),e(Ko,up),e(up,Jk),e(Ko,Kk),e(tt,Gk),e(tt,ia),e(ia,Xk),e(ia,ld),e(ld,Zk),e(ia,Yk),e(tt,eT),e(tt,la),e(la,tT),e(la,da),e(da,oT),e(la,nT),e(tt,sT),e(tt,it),T(ca,it,null),e(it,rT),e(it,Go),e(Go,aT),e(Go,dd),e(dd,iT),e(Go,lT),e(Go,gp),e(gp,dT),e(Go,cT),e(it,pT),T(Jn,it,null),e(it,hT),e(it,_p),e(_p,fT),e(it,mT),T(pa,it,null),f(o,gm,m),f(o,Xo,m),e(Xo,Kn),e(Kn,bp),T(ha,bp,null),e(Xo,uT),e(Xo,vp),e(vp,gT),f(o,_m,m),f(o,Oe,m),T(fa,Oe,null),e(Oe,_T),e(Oe,kp),e(kp,bT),e(Oe,vT),e(Oe,ma),e(ma,kT),e(ma,cd),e(cd,TT),e(ma,yT),e(Oe,wT),e(Oe,ua),e(ua,AT),e(ua,ga),e(ga,FT),e(ua,$T),e(Oe,xT),T(Gn,Oe,null),e(Oe,ET),e(Oe,lt),T(_a,lt,null),e(lt,MT),e(lt,Zo),e(Zo,zT),e(Zo,pd),e(pd,PT),e(Zo,qT),e(Zo,Tp),e(Tp,jT),e(Zo,CT),e(lt,IT),T(Xn,lt,null),e(lt,LT),e(lt,yp),e(yp,NT),e(lt,DT),T(ba,lt,null),f(o,bm,m),f(o,Yo,m),e(Yo,Zn),e(Zn,wp),T(va,wp,null),e(Yo,ST),e(Yo,Ap),e(Ap,OT),f(o,vm,m),f(o,We,m),T(ka,We,null),e(We,WT),e(We,en),e(en,UT),e(en,Fp),e(Fp,BT),e(en,RT),e(en,$p),e($p,HT),e(en,QT),e(We,VT),e(We,Ta),e(Ta,JT),e(Ta,hd),e(hd,KT),e(Ta,GT),e(We,XT),e(We,ya),e(ya,ZT),e(ya,wa),e(wa,YT),e(ya,e1),e(We,t1),T(Yn,We,null),e(We,o1),e(We,dt),T(Aa,dt,null),e(dt,n1),e(dt,tn),e(tn,s1),e(tn,fd),e(fd,r1),e(tn,a1),e(tn,xp),e(xp,i1),e(tn,l1),e(dt,d1),T(es,dt,null),e(dt,c1),e(dt,Ep),e(Ep,p1),e(dt,h1),T(Fa,dt,null),f(o,km,m),f(o,on,m),e(on,ts),e(ts,Mp),T($a,Mp,null),e(on,f1),e(on,zp),e(zp,m1),f(o,Tm,m),f(o,Ue,m),T(xa,Ue,null),e(Ue,u1),e(Ue,Ea),e(Ea,g1),e(Ea,Pp),e(Pp,_1),e(Ea,b1),e(Ue,v1),e(Ue,Ma),e(Ma,k1),e(Ma,md),e(md,T1),e(Ma,y1),e(Ue,w1),e(Ue,za),e(za,A1),e(za,Pa),e(Pa,F1),e(za,$1),e(Ue,x1),T(os,Ue,null),e(Ue,E1),e(Ue,ct),T(qa,ct,null),e(ct,M1),e(ct,nn),e(nn,z1),e(nn,ud),e(ud,P1),e(nn,q1),e(nn,qp),e(qp,j1),e(nn,C1),e(ct,I1),T(ns,ct,null),e(ct,L1),e(ct,jp),e(jp,N1),e(ct,D1),T(ja,ct,null),f(o,ym,m),f(o,sn,m),e(sn,ss),e(ss,Cp),T(Ca,Cp,null),e(sn,S1),e(sn,Ip),e(Ip,O1),f(o,wm,m),f(o,Be,m),T(Ia,Be,null),e(Be,W1),e(Be,Lp),e(Lp,U1),e(Be,B1),e(Be,La),e(La,R1),e(La,gd),e(gd,H1),e(La,Q1),e(Be,V1),e(Be,Na),e(Na,J1),e(Na,Da),e(Da,K1),e(Na,G1),e(Be,X1),T(rs,Be,null),e(Be,Z1),e(Be,pt),T(Sa,pt,null),e(pt,Y1),e(pt,rn),e(rn,ey),e(rn,_d),e(_d,ty),e(rn,oy),e(rn,Np),e(Np,ny),e(rn,sy),e(pt,ry),T(as,pt,null),e(pt,ay),e(pt,Dp),e(Dp,iy),e(pt,ly),T(Oa,pt,null),f(o,Am,m),f(o,an,m),e(an,is),e(is,Sp),T(Wa,Sp,null),e(an,dy),e(an,Op),e(Op,cy),f(o,Fm,m),f(o,Re,m),T(Ua,Re,null),e(Re,py),e(Re,Wp),e(Wp,hy),e(Re,fy),e(Re,Ba),e(Ba,my),e(Ba,bd),e(bd,uy),e(Ba,gy),e(Re,_y),e(Re,Ra),e(Ra,by),e(Ra,Ha),e(Ha,vy),e(Ra,ky),e(Re,Ty),T(ls,Re,null),e(Re,yy),e(Re,ht),T(Qa,ht,null),e(ht,wy),e(ht,ln),e(ln,Ay),e(ln,vd),e(vd,Fy),e(ln,$y),e(ln,Up),e(Up,xy),e(ln,Ey),e(ht,My),T(ds,ht,null),e(ht,zy),e(ht,Bp),e(Bp,Py),e(ht,qy),T(Va,ht,null),f(o,$m,m),f(o,dn,m),e(dn,cs),e(cs,Rp),T(Ja,Rp,null),e(dn,jy),e(dn,Hp),e(Hp,Cy),f(o,xm,m),f(o,He,m),T(Ka,He,null),e(He,Iy),e(He,Qp),e(Qp,Ly),e(He,Ny),e(He,Ga),e(Ga,Dy),e(Ga,kd),e(kd,Sy),e(Ga,Oy),e(He,Wy),e(He,Xa),e(Xa,Uy),e(Xa,Za),e(Za,By),e(Xa,Ry),e(He,Hy),T(ps,He,null),e(He,Qy),e(He,ft),T(Ya,ft,null),e(ft,Vy),e(ft,cn),e(cn,Jy),e(cn,Td),e(Td,Ky),e(cn,Gy),e(cn,Vp),e(Vp,Xy),e(cn,Zy),e(ft,Yy),T(hs,ft,null),e(ft,ew),e(ft,Jp),e(Jp,tw),e(ft,ow),T(ei,ft,null),f(o,Em,m),f(o,pn,m),e(pn,fs),e(fs,Kp),T(ti,Kp,null),e(pn,nw),e(pn,Gp),e(Gp,sw),f(o,Mm,m),f(o,Qe,m),T(oi,Qe,null),e(Qe,rw),e(Qe,hn),e(hn,aw),e(hn,Xp),e(Xp,iw),e(hn,lw),e(hn,Zp),e(Zp,dw),e(hn,cw),e(Qe,pw),e(Qe,ni),e(ni,hw),e(ni,yd),e(yd,fw),e(ni,mw),e(Qe,uw),e(Qe,si),e(si,gw),e(si,ri),e(ri,_w),e(si,bw),e(Qe,vw),T(ms,Qe,null),e(Qe,kw),e(Qe,mt),T(ai,mt,null),e(mt,Tw),e(mt,fn),e(fn,yw),e(fn,wd),e(wd,ww),e(fn,Aw),e(fn,Yp),e(Yp,Fw),e(fn,$w),e(mt,xw),T(us,mt,null),e(mt,Ew),e(mt,eh),e(eh,Mw),e(mt,zw),T(ii,mt,null),f(o,zm,m),f(o,mn,m),e(mn,gs),e(gs,th),T(li,th,null),e(mn,Pw),e(mn,oh),e(oh,qw),f(o,Pm,m),f(o,qe,m),T(di,qe,null),e(qe,jw),e(qe,nh),e(nh,Cw),e(qe,Iw),e(qe,ci),e(ci,Lw),e(ci,Ad),e(Ad,Nw),e(ci,Dw),e(qe,Sw),e(qe,pi),e(pi,Ow),e(pi,hi),e(hi,Ww),e(pi,Uw),e(qe,Bw),e(qe,sh),e(sh,Rw),e(qe,Hw),e(qe,Ut),e(Ut,rh),e(rh,fi),e(fi,Qw),e(Ut,Vw),e(Ut,ah),e(ah,mi),e(mi,Jw),e(Ut,Kw),e(Ut,ih),e(ih,ui),e(ui,Gw),e(Ut,Xw),e(Ut,lh),e(lh,gi),e(gi,Zw),e(qe,Yw),e(qe,ut),T(_i,ut,null),e(ut,e0),e(ut,un),e(un,t0),e(un,dh),e(dh,o0),e(un,n0),e(un,ch),e(ch,s0),e(un,r0),e(ut,a0),T(_s,ut,null),e(ut,i0),e(ut,ph),e(ph,l0),e(ut,d0),T(bi,ut,null),f(o,qm,m),f(o,gn,m),e(gn,bs),e(bs,hh),T(vi,hh,null),e(gn,c0),e(gn,fh),e(fh,p0),f(o,jm,m),f(o,je,m),T(ki,je,null),e(je,h0),e(je,_n),e(_n,f0),e(_n,mh),e(mh,m0),e(_n,u0),e(_n,uh),e(uh,g0),e(_n,_0),e(je,b0),e(je,Ti),e(Ti,v0),e(Ti,Fd),e(Fd,k0),e(Ti,T0),e(je,y0),e(je,yi),e(yi,w0),e(yi,wi),e(wi,A0),e(yi,F0),e(je,$0),e(je,gh),e(gh,x0),e(je,E0),e(je,Bt),e(Bt,_h),e(_h,Ai),e(Ai,M0),e(Bt,z0),e(Bt,bh),e(bh,Fi),e(Fi,P0),e(Bt,q0),e(Bt,vh),e(vh,$i),e($i,j0),e(Bt,C0),e(Bt,kh),e(kh,xi),e(xi,I0),e(je,L0),e(je,gt),T(Ei,gt,null),e(gt,N0),e(gt,bn),e(bn,D0),e(bn,Th),e(Th,S0),e(bn,O0),e(bn,yh),e(yh,W0),e(bn,U0),e(gt,B0),T(vs,gt,null),e(gt,R0),e(gt,wh),e(wh,H0),e(gt,Q0),T(Mi,gt,null),f(o,Cm,m),f(o,vn,m),e(vn,ks),e(ks,Ah),T(zi,Ah,null),e(vn,V0),e(vn,Fh),e(Fh,J0),f(o,Im,m),f(o,Ce,m),T(Pi,Ce,null),e(Ce,K0),e(Ce,qi),e(qi,G0),e(qi,$h),e($h,X0),e(qi,Z0),e(Ce,Y0),e(Ce,ji),e(ji,e2),e(ji,$d),e($d,t2),e(ji,o2),e(Ce,n2),e(Ce,Ci),e(Ci,s2),e(Ci,Ii),e(Ii,r2),e(Ci,a2),e(Ce,i2),e(Ce,xh),e(xh,l2),e(Ce,d2),e(Ce,Rt),e(Rt,Eh),e(Eh,Li),e(Li,c2),e(Rt,p2),e(Rt,Mh),e(Mh,Ni),e(Ni,h2),e(Rt,f2),e(Rt,zh),e(zh,Di),e(Di,m2),e(Rt,u2),e(Rt,Ph),e(Ph,Si),e(Si,g2),e(Ce,_2),e(Ce,_t),T(Oi,_t,null),e(_t,b2),e(_t,kn),e(kn,v2),e(kn,qh),e(qh,k2),e(kn,T2),e(kn,jh),e(jh,y2),e(kn,w2),e(_t,A2),T(Ts,_t,null),e(_t,F2),e(_t,Ch),e(Ch,$2),e(_t,x2),T(Wi,_t,null),f(o,Lm,m),f(o,Tn,m),e(Tn,ys),e(ys,Ih),T(Ui,Ih,null),e(Tn,E2),e(Tn,Lh),e(Lh,M2),f(o,Nm,m),f(o,Ie,m),T(Bi,Ie,null),e(Ie,z2),e(Ie,Nh),e(Nh,P2),e(Ie,q2),e(Ie,Ri),e(Ri,j2),e(Ri,xd),e(xd,C2),e(Ri,I2),e(Ie,L2),e(Ie,Hi),e(Hi,N2),e(Hi,Qi),e(Qi,D2),e(Hi,S2),e(Ie,O2),e(Ie,Dh),e(Dh,W2),e(Ie,U2),e(Ie,Ht),e(Ht,Sh),e(Sh,Vi),e(Vi,B2),e(Ht,R2),e(Ht,Oh),e(Oh,Ji),e(Ji,H2),e(Ht,Q2),e(Ht,Wh),e(Wh,Ki),e(Ki,V2),e(Ht,J2),e(Ht,Uh),e(Uh,Gi),e(Gi,K2),e(Ie,G2),e(Ie,bt),T(Xi,bt,null),e(bt,X2),e(bt,yn),e(yn,Z2),e(yn,Bh),e(Bh,Y2),e(yn,eA),e(yn,Rh),e(Rh,tA),e(yn,oA),e(bt,nA),T(ws,bt,null),e(bt,sA),e(bt,Hh),e(Hh,rA),e(bt,aA),T(Zi,bt,null),f(o,Dm,m),f(o,wn,m),e(wn,As),e(As,Qh),T(Yi,Qh,null),e(wn,iA),e(wn,Vh),e(Vh,lA),f(o,Sm,m),f(o,Le,m),T(el,Le,null),e(Le,dA),e(Le,Jh),e(Jh,cA),e(Le,pA),e(Le,tl),e(tl,hA),e(tl,Ed),e(Ed,fA),e(tl,mA),e(Le,uA),e(Le,ol),e(ol,gA),e(ol,nl),e(nl,_A),e(ol,bA),e(Le,vA),e(Le,Kh),e(Kh,kA),e(Le,TA),e(Le,Qt),e(Qt,Gh),e(Gh,sl),e(sl,yA),e(Qt,wA),e(Qt,Xh),e(Xh,rl),e(rl,AA),e(Qt,FA),e(Qt,Zh),e(Zh,al),e(al,$A),e(Qt,xA),e(Qt,Yh),e(Yh,il),e(il,EA),e(Le,MA),e(Le,vt),T(ll,vt,null),e(vt,zA),e(vt,An),e(An,PA),e(An,ef),e(ef,qA),e(An,jA),e(An,tf),e(tf,CA),e(An,IA),e(vt,LA),T(Fs,vt,null),e(vt,NA),e(vt,of),e(of,DA),e(vt,SA),T(dl,vt,null),f(o,Om,m),f(o,Fn,m),e(Fn,$s),e($s,nf),T(cl,nf,null),e(Fn,OA),e(Fn,sf),e(sf,WA),f(o,Wm,m),f(o,Ne,m),T(pl,Ne,null),e(Ne,UA),e(Ne,rf),e(rf,BA),e(Ne,RA),e(Ne,hl),e(hl,HA),e(hl,Md),e(Md,QA),e(hl,VA),e(Ne,JA),e(Ne,fl),e(fl,KA),e(fl,ml),e(ml,GA),e(fl,XA),e(Ne,ZA),e(Ne,af),e(af,YA),e(Ne,eF),e(Ne,Vt),e(Vt,lf),e(lf,ul),e(ul,tF),e(Vt,oF),e(Vt,df),e(df,gl),e(gl,nF),e(Vt,sF),e(Vt,cf),e(cf,_l),e(_l,rF),e(Vt,aF),e(Vt,pf),e(pf,bl),e(bl,iF),e(Ne,lF),e(Ne,kt),T(vl,kt,null),e(kt,dF),e(kt,$n),e($n,cF),e($n,hf),e(hf,pF),e($n,hF),e($n,ff),e(ff,fF),e($n,mF),e(kt,uF),T(xs,kt,null),e(kt,gF),e(kt,mf),e(mf,_F),e(kt,bF),T(kl,kt,null),f(o,Um,m),f(o,xn,m),e(xn,Es),e(Es,uf),T(Tl,uf,null),e(xn,vF),e(xn,gf),e(gf,kF),f(o,Bm,m),f(o,De,m),T(yl,De,null),e(De,TF),e(De,En),e(En,yF),e(En,_f),e(_f,wF),e(En,AF),e(En,bf),e(bf,FF),e(En,$F),e(De,xF),e(De,wl),e(wl,EF),e(wl,zd),e(zd,MF),e(wl,zF),e(De,PF),e(De,Al),e(Al,qF),e(Al,Fl),e(Fl,jF),e(Al,CF),e(De,IF),e(De,vf),e(vf,LF),e(De,NF),e(De,Jt),e(Jt,kf),e(kf,$l),e($l,DF),e(Jt,SF),e(Jt,Tf),e(Tf,xl),e(xl,OF),e(Jt,WF),e(Jt,yf),e(yf,El),e(El,UF),e(Jt,BF),e(Jt,wf),e(wf,Ml),e(Ml,RF),e(De,HF),e(De,Tt),T(zl,Tt,null),e(Tt,QF),e(Tt,Mn),e(Mn,VF),e(Mn,Af),e(Af,JF),e(Mn,KF),e(Mn,Ff),e(Ff,GF),e(Mn,XF),e(Tt,ZF),T(Ms,Tt,null),e(Tt,YF),e(Tt,$f),e($f,e$),e(Tt,t$),T(Pl,Tt,null),Rm=!0},p(o,[m]){const ql={};m&2&&(ql.$$scope={dirty:m,ctx:o}),Ln.$set(ql);const xf={};m&2&&(xf.$$scope={dirty:m,ctx:o}),Dn.$set(xf);const Ef={};m&2&&(Ef.$$scope={dirty:m,ctx:o}),On.$set(Ef);const Mf={};m&2&&(Mf.$$scope={dirty:m,ctx:o}),Un.$set(Mf);const jl={};m&2&&(jl.$$scope={dirty:m,ctx:o}),Rn.$set(jl);const zf={};m&2&&(zf.$$scope={dirty:m,ctx:o}),Qn.$set(zf);const Pf={};m&2&&(Pf.$$scope={dirty:m,ctx:o}),Jn.$set(Pf);const qf={};m&2&&(qf.$$scope={dirty:m,ctx:o}),Gn.$set(qf);const Cl={};m&2&&(Cl.$$scope={dirty:m,ctx:o}),Xn.$set(Cl);const jf={};m&2&&(jf.$$scope={dirty:m,ctx:o}),Yn.$set(jf);const Il={};m&2&&(Il.$$scope={dirty:m,ctx:o}),es.$set(Il);const Cf={};m&2&&(Cf.$$scope={dirty:m,ctx:o}),os.$set(Cf);const If={};m&2&&(If.$$scope={dirty:m,ctx:o}),ns.$set(If);const Lf={};m&2&&(Lf.$$scope={dirty:m,ctx:o}),rs.$set(Lf);const Nf={};m&2&&(Nf.$$scope={dirty:m,ctx:o}),as.$set(Nf);const Df={};m&2&&(Df.$$scope={dirty:m,ctx:o}),ls.$set(Df);const Sf={};m&2&&(Sf.$$scope={dirty:m,ctx:o}),ds.$set(Sf);const Ll={};m&2&&(Ll.$$scope={dirty:m,ctx:o}),ps.$set(Ll);const Of={};m&2&&(Of.$$scope={dirty:m,ctx:o}),hs.$set(Of);const Wf={};m&2&&(Wf.$$scope={dirty:m,ctx:o}),ms.$set(Wf);const Kt={};m&2&&(Kt.$$scope={dirty:m,ctx:o}),us.$set(Kt);const Uf={};m&2&&(Uf.$$scope={dirty:m,ctx:o}),_s.$set(Uf);const Bf={};m&2&&(Bf.$$scope={dirty:m,ctx:o}),vs.$set(Bf);const Rf={};m&2&&(Rf.$$scope={dirty:m,ctx:o}),Ts.$set(Rf);const Nl={};m&2&&(Nl.$$scope={dirty:m,ctx:o}),ws.$set(Nl);const Hf={};m&2&&(Hf.$$scope={dirty:m,ctx:o}),Fs.$set(Hf);const Qf={};m&2&&(Qf.$$scope={dirty:m,ctx:o}),xs.$set(Qf);const Vf={};m&2&&(Vf.$$scope={dirty:m,ctx:o}),Ms.$set(Vf)},i(o){Rm||(y(_.$$.fragment,o),y(ee.$$.fragment,o),y(Us.$$.fragment,o),y(Bs.$$.fragment,o),y(Hs.$$.fragment,o),y(Qs.$$.fragment,o),y(Vs.$$.fragment,o),y(Xs.$$.fragment,o),y(Ys.$$.fragment,o),y(tr.$$.fragment,o),y(or.$$.fragment,o),y(nr.$$.fragment,o),y(sr.$$.fragment,o),y(ar.$$.fragment,o),y(lr.$$.fragment,o),y(dr.$$.fragment,o),y(cr.$$.fragment,o),y(pr.$$.fragment,o),y(fr.$$.fragment,o),y(ur.$$.fragment,o),y(gr.$$.fragment,o),y(kr.$$.fragment,o),y(Ln.$$.fragment,o),y(Tr.$$.fragment,o),y(yr.$$.fragment,o),y(wr.$$.fragment,o),y(xr.$$.fragment,o),y(Dn.$$.fragment,o),y(Er.$$.fragment,o),y(Mr.$$.fragment,o),y(zr.$$.fragment,o),y(Ir.$$.fragment,o),y(On.$$.fragment,o),y(Lr.$$.fragment,o),y(Nr.$$.fragment,o),y(Dr.$$.fragment,o),y(Ur.$$.fragment,o),y(Un.$$.fragment,o),y(Br.$$.fragment,o),y(Rr.$$.fragment,o),y(Hr.$$.fragment,o),y(Qr.$$.fragment,o),y(Gr.$$.fragment,o),y(Rn.$$.fragment,o),y(Xr.$$.fragment,o),y(Zr.$$.fragment,o),y(Yr.$$.fragment,o),y(na.$$.fragment,o),y(Qn.$$.fragment,o),y(sa.$$.fragment,o),y(ra.$$.fragment,o),y(aa.$$.fragment,o),y(ca.$$.fragment,o),y(Jn.$$.fragment,o),y(pa.$$.fragment,o),y(ha.$$.fragment,o),y(fa.$$.fragment,o),y(Gn.$$.fragment,o),y(_a.$$.fragment,o),y(Xn.$$.fragment,o),y(ba.$$.fragment,o),y(va.$$.fragment,o),y(ka.$$.fragment,o),y(Yn.$$.fragment,o),y(Aa.$$.fragment,o),y(es.$$.fragment,o),y(Fa.$$.fragment,o),y($a.$$.fragment,o),y(xa.$$.fragment,o),y(os.$$.fragment,o),y(qa.$$.fragment,o),y(ns.$$.fragment,o),y(ja.$$.fragment,o),y(Ca.$$.fragment,o),y(Ia.$$.fragment,o),y(rs.$$.fragment,o),y(Sa.$$.fragment,o),y(as.$$.fragment,o),y(Oa.$$.fragment,o),y(Wa.$$.fragment,o),y(Ua.$$.fragment,o),y(ls.$$.fragment,o),y(Qa.$$.fragment,o),y(ds.$$.fragment,o),y(Va.$$.fragment,o),y(Ja.$$.fragment,o),y(Ka.$$.fragment,o),y(ps.$$.fragment,o),y(Ya.$$.fragment,o),y(hs.$$.fragment,o),y(ei.$$.fragment,o),y(ti.$$.fragment,o),y(oi.$$.fragment,o),y(ms.$$.fragment,o),y(ai.$$.fragment,o),y(us.$$.fragment,o),y(ii.$$.fragment,o),y(li.$$.fragment,o),y(di.$$.fragment,o),y(_i.$$.fragment,o),y(_s.$$.fragment,o),y(bi.$$.fragment,o),y(vi.$$.fragment,o),y(ki.$$.fragment,o),y(Ei.$$.fragment,o),y(vs.$$.fragment,o),y(Mi.$$.fragment,o),y(zi.$$.fragment,o),y(Pi.$$.fragment,o),y(Oi.$$.fragment,o),y(Ts.$$.fragment,o),y(Wi.$$.fragment,o),y(Ui.$$.fragment,o),y(Bi.$$.fragment,o),y(Xi.$$.fragment,o),y(ws.$$.fragment,o),y(Zi.$$.fragment,o),y(Yi.$$.fragment,o),y(el.$$.fragment,o),y(ll.$$.fragment,o),y(Fs.$$.fragment,o),y(dl.$$.fragment,o),y(cl.$$.fragment,o),y(pl.$$.fragment,o),y(vl.$$.fragment,o),y(xs.$$.fragment,o),y(kl.$$.fragment,o),y(Tl.$$.fragment,o),y(yl.$$.fragment,o),y(zl.$$.fragment,o),y(Ms.$$.fragment,o),y(Pl.$$.fragment,o),Rm=!0)},o(o){w(_.$$.fragment,o),w(ee.$$.fragment,o),w(Us.$$.fragment,o),w(Bs.$$.fragment,o),w(Hs.$$.fragment,o),w(Qs.$$.fragment,o),w(Vs.$$.fragment,o),w(Xs.$$.fragment,o),w(Ys.$$.fragment,o),w(tr.$$.fragment,o),w(or.$$.fragment,o),w(nr.$$.fragment,o),w(sr.$$.fragment,o),w(ar.$$.fragment,o),w(lr.$$.fragment,o),w(dr.$$.fragment,o),w(cr.$$.fragment,o),w(pr.$$.fragment,o),w(fr.$$.fragment,o),w(ur.$$.fragment,o),w(gr.$$.fragment,o),w(kr.$$.fragment,o),w(Ln.$$.fragment,o),w(Tr.$$.fragment,o),w(yr.$$.fragment,o),w(wr.$$.fragment,o),w(xr.$$.fragment,o),w(Dn.$$.fragment,o),w(Er.$$.fragment,o),w(Mr.$$.fragment,o),w(zr.$$.fragment,o),w(Ir.$$.fragment,o),w(On.$$.fragment,o),w(Lr.$$.fragment,o),w(Nr.$$.fragment,o),w(Dr.$$.fragment,o),w(Ur.$$.fragment,o),w(Un.$$.fragment,o),w(Br.$$.fragment,o),w(Rr.$$.fragment,o),w(Hr.$$.fragment,o),w(Qr.$$.fragment,o),w(Gr.$$.fragment,o),w(Rn.$$.fragment,o),w(Xr.$$.fragment,o),w(Zr.$$.fragment,o),w(Yr.$$.fragment,o),w(na.$$.fragment,o),w(Qn.$$.fragment,o),w(sa.$$.fragment,o),w(ra.$$.fragment,o),w(aa.$$.fragment,o),w(ca.$$.fragment,o),w(Jn.$$.fragment,o),w(pa.$$.fragment,o),w(ha.$$.fragment,o),w(fa.$$.fragment,o),w(Gn.$$.fragment,o),w(_a.$$.fragment,o),w(Xn.$$.fragment,o),w(ba.$$.fragment,o),w(va.$$.fragment,o),w(ka.$$.fragment,o),w(Yn.$$.fragment,o),w(Aa.$$.fragment,o),w(es.$$.fragment,o),w(Fa.$$.fragment,o),w($a.$$.fragment,o),w(xa.$$.fragment,o),w(os.$$.fragment,o),w(qa.$$.fragment,o),w(ns.$$.fragment,o),w(ja.$$.fragment,o),w(Ca.$$.fragment,o),w(Ia.$$.fragment,o),w(rs.$$.fragment,o),w(Sa.$$.fragment,o),w(as.$$.fragment,o),w(Oa.$$.fragment,o),w(Wa.$$.fragment,o),w(Ua.$$.fragment,o),w(ls.$$.fragment,o),w(Qa.$$.fragment,o),w(ds.$$.fragment,o),w(Va.$$.fragment,o),w(Ja.$$.fragment,o),w(Ka.$$.fragment,o),w(ps.$$.fragment,o),w(Ya.$$.fragment,o),w(hs.$$.fragment,o),w(ei.$$.fragment,o),w(ti.$$.fragment,o),w(oi.$$.fragment,o),w(ms.$$.fragment,o),w(ai.$$.fragment,o),w(us.$$.fragment,o),w(ii.$$.fragment,o),w(li.$$.fragment,o),w(di.$$.fragment,o),w(_i.$$.fragment,o),w(_s.$$.fragment,o),w(bi.$$.fragment,o),w(vi.$$.fragment,o),w(ki.$$.fragment,o),w(Ei.$$.fragment,o),w(vs.$$.fragment,o),w(Mi.$$.fragment,o),w(zi.$$.fragment,o),w(Pi.$$.fragment,o),w(Oi.$$.fragment,o),w(Ts.$$.fragment,o),w(Wi.$$.fragment,o),w(Ui.$$.fragment,o),w(Bi.$$.fragment,o),w(Xi.$$.fragment,o),w(ws.$$.fragment,o),w(Zi.$$.fragment,o),w(Yi.$$.fragment,o),w(el.$$.fragment,o),w(ll.$$.fragment,o),w(Fs.$$.fragment,o),w(dl.$$.fragment,o),w(cl.$$.fragment,o),w(pl.$$.fragment,o),w(vl.$$.fragment,o),w(xs.$$.fragment,o),w(kl.$$.fragment,o),w(Tl.$$.fragment,o),w(yl.$$.fragment,o),w(zl.$$.fragment,o),w(Ms.$$.fragment,o),w(Pl.$$.fragment,o),Rm=!1},d(o){t(p),o&&t($),o&&t(g),A(_),o&&t(G),o&&t(M),A(ee),o&&t(ae),o&&t(J),o&&t(q),o&&t(Y),o&&t(ie),o&&t(P),o&&t(Q),o&&t(oe),o&&t(le),o&&t(ne),o&&t(de),o&&t(S),o&&t(ye),o&&t(j),o&&t(ce),o&&t(be),A(Us),o&&t(Jf),o&&t(Je),A(Bs),A(Hs),o&&t(Kf),o&&t(Eo),A(Qs),o&&t(Gf),o&&t(Pe),A(Vs),A(Xs),A(Ys),A(tr),A(or),o&&t(Xf),o&&t(Po),A(nr),o&&t(Zf),o&&t(zt),A(sr),A(ar),A(lr),A(dr),o&&t(Yf),o&&t(qo),A(cr),o&&t(em),o&&t(jo),A(pr),o&&t(tm),o&&t(Co),A(fr),o&&t(om),o&&t(Io),A(ur),o&&t(nm),o&&t(Ke),A(gr),A(kr),A(Ln),A(Tr),o&&t(sm),o&&t(No),A(yr),o&&t(rm),o&&t(Ge),A(wr),A(xr),A(Dn),A(Er),o&&t(am),o&&t(Oo),A(Mr),o&&t(im),o&&t(Xe),A(zr),A(Ir),A(On),A(Lr),o&&t(lm),o&&t(Uo),A(Nr),o&&t(dm),o&&t(Ze),A(Dr),A(Ur),A(Un),A(Br),A(Rr),o&&t(cm),o&&t(Ro),A(Hr),o&&t(pm),o&&t(Ye),A(Qr),A(Gr),A(Rn),A(Xr),o&&t(hm),o&&t(Qo),A(Zr),o&&t(fm),o&&t(et),A(Yr),A(na),A(Qn),A(sa),o&&t(mm),o&&t(Jo),A(ra),o&&t(um),o&&t(tt),A(aa),A(ca),A(Jn),A(pa),o&&t(gm),o&&t(Xo),A(ha),o&&t(_m),o&&t(Oe),A(fa),A(Gn),A(_a),A(Xn),A(ba),o&&t(bm),o&&t(Yo),A(va),o&&t(vm),o&&t(We),A(ka),A(Yn),A(Aa),A(es),A(Fa),o&&t(km),o&&t(on),A($a),o&&t(Tm),o&&t(Ue),A(xa),A(os),A(qa),A(ns),A(ja),o&&t(ym),o&&t(sn),A(Ca),o&&t(wm),o&&t(Be),A(Ia),A(rs),A(Sa),A(as),A(Oa),o&&t(Am),o&&t(an),A(Wa),o&&t(Fm),o&&t(Re),A(Ua),A(ls),A(Qa),A(ds),A(Va),o&&t($m),o&&t(dn),A(Ja),o&&t(xm),o&&t(He),A(Ka),A(ps),A(Ya),A(hs),A(ei),o&&t(Em),o&&t(pn),A(ti),o&&t(Mm),o&&t(Qe),A(oi),A(ms),A(ai),A(us),A(ii),o&&t(zm),o&&t(mn),A(li),o&&t(Pm),o&&t(qe),A(di),A(_i),A(_s),A(bi),o&&t(qm),o&&t(gn),A(vi),o&&t(jm),o&&t(je),A(ki),A(Ei),A(vs),A(Mi),o&&t(Cm),o&&t(vn),A(zi),o&&t(Im),o&&t(Ce),A(Pi),A(Oi),A(Ts),A(Wi),o&&t(Lm),o&&t(Tn),A(Ui),o&&t(Nm),o&&t(Ie),A(Bi),A(Xi),A(ws),A(Zi),o&&t(Dm),o&&t(wn),A(Yi),o&&t(Sm),o&&t(Le),A(el),A(ll),A(Fs),A(dl),o&&t(Om),o&&t(Fn),A(cl),o&&t(Wm),o&&t(Ne),A(pl),A(vl),A(xs),A(kl),o&&t(Um),o&&t(xn),A(Tl),o&&t(Bm),o&&t(De),A(yl),A(zl),A(Ms),A(Pl)}}}const c5={local:"albert",sections:[{local:"overview",title:"Overview"},{local:"transformers.AlbertConfig",title:"AlbertConfig"},{local:"transformers.AlbertTokenizer",title:"AlbertTokenizer"},{local:"transformers.AlbertTokenizerFast",title:"AlbertTokenizerFast"},{local:"transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput",title:"Albert specific outputs"},{local:"transformers.AlbertModel",title:"AlbertModel"},{local:"transformers.AlbertForPreTraining",title:"AlbertForPreTraining"},{local:"transformers.AlbertForMaskedLM",title:"AlbertForMaskedLM"},{local:"transformers.AlbertForSequenceClassification",title:"AlbertForSequenceClassification"},{local:"transformers.AlbertForMultipleChoice",title:"AlbertForMultipleChoice"},{local:"transformers.AlbertForTokenClassification",title:"AlbertForTokenClassification"},{local:"transformers.AlbertForQuestionAnswering",title:"AlbertForQuestionAnswering"},{local:"transformers.TFAlbertModel",title:"TFAlbertModel"},{local:"transformers.TFAlbertForPreTraining",title:"TFAlbertForPreTraining"},{local:"transformers.TFAlbertForMaskedLM",title:"TFAlbertForMaskedLM"},{local:"transformers.TFAlbertForSequenceClassification",title:"TFAlbertForSequenceClassification"},{local:"transformers.TFAlbertForMultipleChoice",title:"TFAlbertForMultipleChoice"},{local:"transformers.TFAlbertForTokenClassification",title:"TFAlbertForTokenClassification"},{local:"transformers.TFAlbertForQuestionAnswering",title:"TFAlbertForQuestionAnswering"},{local:"transformers.FlaxAlbertModel",title:"FlaxAlbertModel"},{local:"transformers.FlaxAlbertForPreTraining",title:"FlaxAlbertForPreTraining"},{local:"transformers.FlaxAlbertForMaskedLM",title:"FlaxAlbertForMaskedLM"},{local:"transformers.FlaxAlbertForSequenceClassification",title:"FlaxAlbertForSequenceClassification"},{local:"transformers.FlaxAlbertForMultipleChoice",title:"FlaxAlbertForMultipleChoice"},{local:"transformers.FlaxAlbertForTokenClassification",title:"FlaxAlbertForTokenClassification"},{local:"transformers.FlaxAlbertForQuestionAnswering",title:"FlaxAlbertForQuestionAnswering"}],title:"ALBERT"};function p5(C,p,$){let{fw:g}=p;return C.$$set=b=>{"fw"in b&&$(0,g=b.fw)},[g]}class b5 extends Pz{constructor(p){super();qz(this,p,p5,d5,jz,{fw:0})}}export{b5 as default,c5 as metadata};
9,907
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/speech_to_text.mdx-5c38e05f.js
import{S as ll,i as pl,s as hl,e as a,k as d,w as u,t as n,L as fl,c as r,d as o,m as l,a as i,x as _,h as s,b as c,J as e,g as h,y as g,q as v,o as T,B as x}from"../../chunks/vendor-b1433968.js";import{T as zs}from"../../chunks/Tip-c3840994.js";import{D as F}from"../../chunks/Docstring-ff504c58.js";import{C as En}from"../../chunks/CodeBlock-a320dbd7.js";import{I as xe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ml(Y){let f,w,m,k,z,b,y,E;return{c(){f=a("p"),w=n(`This class method is simply calling Speech2TextFeatureExtractor\u2019s `),m=a("code"),k=n("from_pretrained"),z=n(` and Speech2TextTokenizer\u2019s `),b=a("code"),y=n("from_pretrained"),E=n(`. Please refer to the docstrings of the methods above for more information.`)},l(C){f=r(C,"P",{});var $=i(f);w=s($,`This class method is simply calling Speech2TextFeatureExtractor\u2019s `),m=r($,"CODE",{});var M=i(m);k=s(M,"from_pretrained"),M.forEach(o),z=s($,` and Speech2TextTokenizer\u2019s `),b=r($,"CODE",{});var L=i(b);y=s(L,"from_pretrained"),L.forEach(o),E=s($,`. Please refer to the docstrings of the methods above for more information.`),$.forEach(o)},m(C,$){h(C,f,$),e(f,w),e(f,m),e(m,k),e(f,z),e(f,b),e(b,y),e(f,E)},d(C){C&&o(f)}}}function ul(Y){let f,w,m,k,z,b,y,E;return{c(){f=a("p"),w=n("This class method is simply calling "),m=a("code"),k=n("save_pretrained"),z=n(` and `),b=a("code"),y=n("save_pretrained"),E=n(`. Please refer to the docstrings of the methods above for more information.`)},l(C){f=r(C,"P",{});var $=i(f);w=s($,"This class method is simply calling "),m=r($,"CODE",{});var M=i(m);k=s(M,"save_pretrained"),M.forEach(o),z=s($,` and `),b=r($,"CODE",{});var L=i(b);y=s(L,"save_pretrained"),L.forEach(o),E=s($,`. Please refer to the docstrings of the methods above for more information.`),$.forEach(o)},m(C,$){h(C,f,$),e(f,w),e(f,m),e(m,k),e(f,z),e(f,b),e(b,y),e(f,E)},d(C){C&&o(f)}}}function _l(Y){let f,w,m,k,z;return{c(){f=a("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),k=n("Module"),z=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){f=r(b,"P",{});var y=i(f);w=s(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(y,"CODE",{});var E=i(m);k=s(E,"Module"),E.forEach(o),z=s(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(o)},m(b,y){h(b,f,y),e(f,w),e(f,m),e(m,k),e(f,z)},d(b){b&&o(f)}}}function gl(Y){let f,w,m,k,z;return{c(){f=a("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),k=n("Module"),z=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){f=r(b,"P",{});var y=i(f);w=s(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(y,"CODE",{});var E=i(m);k=s(E,"Module"),E.forEach(o),z=s(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(o)},m(b,y){h(b,f,y),e(f,w),e(f,m),e(m,k),e(f,z)},d(b){b&&o(f)}}}function vl(Y){let f,w,m,k,z,b,y,E,C,$,M,L,qo,Ue,Es,Po,qs,qn,O,Ps,Be,js,Fs,Re,Cs,Ms,He,As,Ds,Je,Is,Ns,Pn,Q,Ls,Ye,Os,Gs,Xe,Ws,Vs,jn,ce,be,jo,Ke,Us,Fo,Bs,Fn,ke,Rs,Co,Hs,Js,Cn,G,Ys,Ht,Xs,Ks,Jt,Qs,Zs,Yt,ea,ta,Xt,oa,na,Mn,q,sa,Mo,aa,ra,Ao,ia,ca,Do,da,la,Io,pa,ha,No,fa,ma,Qe,ua,_a,Lo,ga,An,Kt,Oo,va,Dn,Ze,In,Qt,et,Go,Ta,xa,A,ba,Wo,ka,ya,Vo,Sa,wa,Uo,$a,za,Bo,Ea,qa,Ro,Pa,ja,Nn,tt,Ln,ye,Fa,ot,Ca,Ma,On,de,Se,Ho,nt,Aa,Jo,Da,Gn,D,st,Ia,le,Na,Zt,La,Oa,at,Ga,Wa,Va,pe,Ua,eo,Ba,Ra,to,Ha,Ja,Ya,Yo,Xa,Ka,rt,Wn,he,we,Xo,it,Qa,Ko,Za,Vn,P,ct,er,Qo,tr,or,dt,nr,oo,sr,ar,rr,$e,lt,ir,Zo,cr,dr,ze,pt,lr,ht,pr,en,hr,fr,mr,Z,ft,ur,no,_r,so,gr,vr,tn,Tr,xr,on,Un,fe,Ee,nn,mt,br,sn,kr,Bn,I,ut,yr,an,Sr,wr,_t,$r,ao,zr,Er,qr,rn,Pr,jr,qe,gt,Fr,cn,Cr,Rn,me,Pe,dn,vt,Mr,ln,Ar,Hn,S,Tt,Dr,pn,Ir,Nr,W,ro,Lr,Or,io,Gr,Wr,co,Vr,Ur,xt,hn,Br,Rr,Hr,lo,Jr,Yr,Xr,je,bt,Kr,X,Qr,kt,fn,Zr,ei,ti,po,oi,ni,yt,mn,si,ai,ri,ii,ee,St,ci,wt,di,ho,li,pi,hi,Fe,fi,te,$t,mi,ue,ui,un,_i,gi,fo,vi,Ti,xi,Ce,bi,Me,zt,ki,Et,yi,mo,Si,wi,$i,Ae,qt,zi,Pt,Ei,uo,qi,Pi,ji,De,jt,Fi,_n,Ci,Jn,_e,Ie,gn,Ft,Mi,vn,Ai,Yn,R,Ct,Di,Mt,Ii,_o,Ni,Li,Oi,At,Gi,Dt,Wi,Vi,Ui,V,It,Bi,ge,Ri,go,Hi,Ji,Tn,Yi,Xi,Ki,Ne,Qi,xn,Zi,ec,Nt,Xn,ve,Le,bn,Lt,tc,kn,oc,Kn,H,Ot,nc,Gt,sc,vo,ac,rc,ic,Wt,cc,Vt,dc,lc,pc,U,Ut,hc,Te,fc,To,mc,uc,yn,_c,gc,vc,Oe,Tc,Sn,xc,bc,Bt,Qn;return b=new xe({}),Ue=new xe({}),Ke=new xe({}),Ze=new En({props:{code:`import torch from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration from datasets import load_dataset import soundfile as sf model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt") generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"]) transcription = processor.batch_decode(generated_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> torch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2TextProcessor, Speech2TextForConditionalGeneration</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">model = Speech2TextForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-small-librispeech-asr&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">processor = Speech2TextProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-small-librispeech-asr&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>):</span> <span class="hljs-meta">...</span> <span class="language-python"> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>])</span> <span class="hljs-meta">...</span> <span class="language-python"> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech</span> <span class="hljs-meta">...</span> <span class="language-python"> <span class="hljs-keyword">return</span> batch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">ds = ds.<span class="hljs-built_in">map</span>(map_to_array)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">inputs = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">generated_ids = model.generate(input_ids=inputs[<span class="hljs-string">&quot;input_features&quot;</span>], attention_mask=inputs[<span class="hljs-string">&quot;attention_mask&quot;</span>])</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">transcription = processor.batch_decode(generated_ids)</span>`}}),tt=new En({props:{code:`import torch from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration from datasets import load_dataset import soundfile as sf model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-medium-mustc-multilingual-st") processor = Speech2TextProcessor.from_pretrained("facebook/s2t-medium-mustc-multilingual-st") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt") generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask], forced_bos_token_id=processor.tokenizer.lang_code_to_id["fr"]) translation = processor.batch_decode(generated_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> torch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2TextProcessor, Speech2TextForConditionalGeneration</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">model = Speech2TextForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-medium-mustc-multilingual-st&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">processor = Speech2TextProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-medium-mustc-multilingual-st&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>):</span> <span class="hljs-meta">...</span> <span class="language-python"> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>])</span> <span class="hljs-meta">...</span> <span class="language-python"> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech</span> <span class="hljs-meta">...</span> <span class="language-python"> <span class="hljs-keyword">return</span> batch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">ds = ds.<span class="hljs-built_in">map</span>(map_to_array)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">inputs = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">generated_ids = model.generate(input_ids=inputs[<span class="hljs-string">&quot;input_features&quot;</span>], attention_mask=inputs[<span class="hljs-string">&quot;attention_mask], forced_bos_token_id=processor.tokenizer.lang_code_to_id[&quot;</span><span class="hljs-string">fr&quot;])</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-string">translation = processor.batch_decode(generated_ids)</span></span>`}}),nt=new xe({}),st=new F({props:{name:"class transformers.Speech2TextConfig",anchor:"transformers.Speech2TextConfig",parameters:[{name:"vocab_size",val:" = 10000"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 2048"},{name:"encoder_attention_heads",val:" = 4"},{name:"decoder_layers",val:" = 6"},{name:"decoder_ffn_dim",val:" = 2048"},{name:"decoder_attention_heads",val:" = 4"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'relu'"},{name:"d_model",val:" = 256"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"max_source_positions",val:" = 6000"},{name:"max_target_positions",val:" = 1024"},{name:"num_conv_layers",val:" = 2"},{name:"conv_kernel_sizes",val:" = (5, 5)"},{name:"conv_channels",val:" = 1024"},{name:"input_feat_per_channel",val:" = 80"},{name:"input_channels",val:" = 1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/configuration_speech_to_text.py#L29",parametersDescription:[{anchor:"transformers.Speech2TextConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextModel">Speech2TextModel</a>`,name:"vocab_size"},{anchor:"transformers.Speech2TextConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.Speech2TextConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.Speech2TextConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.Speech2TextConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.Speech2TextConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.Speech2TextConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.Speech2TextConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.Speech2TextConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.Speech2TextConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.Speech2TextConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.Speech2TextConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.Speech2TextConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.Speech2TextConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.Speech2TextConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.Speech2TextConfig.max_source_positions",description:`<strong>max_source_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 6000) &#x2014; The maximum sequence length of log-mel filter-bank features that this model might ever be used with.`,name:"max_source_positions"},{anchor:"transformers.Speech2TextConfig.max_target_positions",description:`<strong>max_target_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_target_positions"},{anchor:"transformers.Speech2TextConfig.num_conv_layers",description:`<strong>num_conv_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of 1D convolutional layers in the conv module.`,name:"num_conv_layers"},{anchor:"transformers.Speech2TextConfig.conv_kernel_sizes",description:`<strong>conv_kernel_sizes</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 5)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length of <code>conv_kernel_sizes</code> has to match <code>num_conv_layers</code>.`,name:"conv_kernel_sizes"},{anchor:"transformers.Speech2TextConfig.conv_channels",description:`<strong>conv_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; An integer defining the number of output channels of each convolution layers except the final one in the conv module.`,name:"conv_channels"},{anchor:"transformers.Speech2TextConfig.input_feat_per_channel",description:`<strong>input_feat_per_channel</strong> (<code>int</code>, <em>optional</em>, defaults to 80) &#x2014; An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank features.`,name:"input_feat_per_channel"},{anchor:"transformers.Speech2TextConfig.input_channels",description:`<strong>input_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; An integer specifying number of input channels of the input feature vector.`,name:"input_channels"}]}}),rt=new En({props:{code:`from transformers import Speech2TextModel, Speech2TextConfig # Initializing a Speech2Text s2t_transformer_s style configuration configuration = Speech2TextConfig() # Initializing a model from the s2t_transformer_s style configuration model = Speech2TextModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2TextModel, Speech2TextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Speech2Text s2t_transformer_s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Speech2TextConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the s2t_transformer_s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Speech2TextModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),it=new xe({}),ct=new F({props:{name:"class transformers.Speech2TextTokenizer",anchor:"transformers.Speech2TextTokenizer",parameters:[{name:"vocab_file",val:""},{name:"spm_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"unk_token",val:" = '<unk>'"},{name:"do_upper_case",val:" = False"},{name:"do_lower_case",val:" = False"},{name:"tgt_lang",val:" = None"},{name:"lang_codes",val:" = None"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/tokenization_speech_to_text.py#L55",parametersDescription:[{anchor:"transformers.Speech2TextTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.Speech2TextTokenizer.spm_file",description:`<strong>spm_file</strong> (<code>str</code>) &#x2014; Path to the <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> model file`,name:"spm_file"},{anchor:"transformers.Speech2TextTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.Speech2TextTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.Speech2TextTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.Speech2TextTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.Speech2TextTokenizer.do_upper_case",description:`<strong>do_upper_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to uppercase the output when decoding.`,name:"do_upper_case"},{anchor:"transformers.Speech2TextTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.Speech2TextTokenizer.tgt_lang",description:`<strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.`,name:"tgt_lang"},{anchor:"transformers.Speech2TextTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),lt=new F({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.Speech2TextTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/tokenization_speech_to_text.py#L193"}}),pt=new F({props:{name:"get_special_tokens_mask",anchor:"transformers.Speech2TextTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/tokenization_speech_to_text.py#L200",parametersDescription:[{anchor:"transformers.Speech2TextTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.Speech2TextTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.Speech2TextTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ft=new F({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2818",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),mt=new xe({}),ut=new F({props:{name:"class transformers.Speech2TextFeatureExtractor",anchor:"transformers.Speech2TextFeatureExtractor",parameters:[{name:"feature_size",val:" = 80"},{name:"sampling_rate",val:" = 16000"},{name:"num_mel_bins",val:" = 80"},{name:"padding_value",val:" = 0.0"},{name:"do_ceptral_normalize",val:" = True"},{name:"normalize_means",val:" = True"},{name:"normalize_vars",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py#L34",parametersDescription:[{anchor:"transformers.Speech2TextFeatureExtractor.feature_size",description:`<strong>feature_size</strong> (<code>int</code>, defaults to 80) &#x2014; The feature dimension of the extracted features.`,name:"feature_size"},{anchor:"transformers.Speech2TextFeatureExtractor.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>, defaults to 16000) &#x2014; The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).`,name:"sampling_rate"},{anchor:"transformers.Speech2TextFeatureExtractor.num_mel_bins",description:`<strong>num_mel_bins</strong> (<code>int</code>, defaults to 80) &#x2014; Number of Mel-frequency bins.`,name:"num_mel_bins"},{anchor:"transformers.Speech2TextFeatureExtractor.padding_value",description:`<strong>padding_value</strong> (<code>float</code>, defaults to 0.0) &#x2014; The value that is used to fill the padding vectors.`,name:"padding_value"},{anchor:"transformers.Speech2TextFeatureExtractor.do_ceptral_normalize",description:`<strong>do_ceptral_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features.`,name:"do_ceptral_normalize"},{anchor:"transformers.Speech2TextFeatureExtractor.normalize_means",description:`<strong>normalize_means</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to zero-mean normalize the extracted features.`,name:"normalize_means"},{anchor:"transformers.Speech2TextFeatureExtractor.normalize_vars",description:`<strong>normalize_vars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to unit-variance normalize the extracted features.`,name:"normalize_vars"}]}}),gt=new F({props:{name:"__call__",anchor:"transformers.Speech2TextFeatureExtractor.__call__",parameters:[{name:"raw_speech",val:": typing.Union[numpy.ndarray, typing.List[float], typing.List[numpy.ndarray], typing.List[typing.List[float]]]"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"truncation",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"sampling_rate",val:": typing.Optional[int] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py#L127",parametersDescription:[{anchor:"transformers.Speech2TextFeatureExtractor.__call__.raw_speech",description:`<strong>raw_speech</strong> (<code>np.ndarray</code>, <code>List[float]</code>, <code>List[np.ndarray]</code>, <code>List[List[float]]</code>) &#x2014; The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values.`,name:"raw_speech"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>) &#x2014; Activates truncation to cut input sequences longer than <em>max_length</em> to <em>max_length</em>.`,name:"truncation"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.</p> </blockquote>`,name:"pad_to_multiple_of"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor&#x2019;s default.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>For Speech2TextTransoformer models, <code>attention_mask</code> should alwys be passed for batched inference, to avoid subtle bugs.</p> </div>`,name:"return_attention_mask"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>, <em>optional</em>) &#x2014; The sampling rate at which the <code>raw_speech</code> input was sampled. It is strongly recommended to pass <code>sampling_rate</code> at the forward call to prevent silent errors.`,name:"sampling_rate"},{anchor:"transformers.Speech2TextFeatureExtractor.__call__.padding_value",description:`<strong>padding_value</strong> (<code>float</code>, defaults to 0.0) &#x2014; The value that is used to fill the padding values / vectors.`,name:"padding_value"}]}}),vt=new xe({}),Tt=new F({props:{name:"class transformers.Speech2TextProcessor",anchor:"transformers.Speech2TextProcessor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L24",parametersDescription:[{anchor:"transformers.Speech2TextProcessor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>Speech2TextFeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor">Speech2TextFeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Speech2TextProcessor.tokenizer",description:`<strong>tokenizer</strong> (<code>Speech2TextTokenizer</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer">Speech2TextTokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"}]}}),bt=new F({props:{name:"__call__",anchor:"transformers.Speech2TextProcessor.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L114"}}),St=new F({props:{name:"from_pretrained",anchor:"transformers.Speech2TextProcessor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L79",parametersDescription:[{anchor:"transformers.Speech2TextProcessor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <code>save_pretrained</code> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <code>PreTrainedFeatureExtractor</code> and <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a></li> </ul>`,name:"pretrained_model_name_or_path"}]}}),Fe=new zs({props:{$$slots:{default:[ml]},$$scope:{ctx:Y}}}),$t=new F({props:{name:"save_pretrained",anchor:"transformers.Speech2TextProcessor.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L56",parametersDescription:[{anchor:"transformers.Speech2TextProcessor.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"}]}}),Ce=new zs({props:{$$slots:{default:[ul]},$$scope:{ctx:Y}}}),zt=new F({props:{name:"batch_decode",anchor:"transformers.Speech2TextProcessor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L124"}}),qt=new F({props:{name:"decode",anchor:"transformers.Speech2TextProcessor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L132"}}),jt=new F({props:{name:"as_target_processor",anchor:"transformers.Speech2TextProcessor.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/processing_speech_to_text.py#L140"}}),Ft=new xe({}),Ct=new F({props:{name:"class transformers.Speech2TextModel",anchor:"transformers.Speech2TextModel",parameters:[{name:"config",val:": Speech2TextConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/modeling_speech_to_text.py#L1120",parametersDescription:[{anchor:"transformers.Speech2TextModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig">Speech2TextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),It=new F({props:{name:"forward",anchor:"transformers.Speech2TextModel.forward",parameters:[{name:"input_features",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/modeling_speech_to_text.py#L1142",parametersDescription:[{anchor:"transformers.Speech2TextModel.forward.input_features",description:`<strong>input_features</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, feature_size)</code>) &#x2014; Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a <code>.flac</code> or <code>.wav</code> audio file into an array of type <code>List[float]</code> or a <code>numpy.ndarray</code>, <em>e.g.</em> via the soundfile library (<code>pip install soundfile</code>). To prepare the array into <code>input_features</code>, the <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer">Speech2TextTokenizer</a> should be used for extracting the fbank features, padding and conversion into a tensor of type <code>torch.FloatTensor</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a>`,name:"input_features"},{anchor:"transformers.Speech2TextModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Speech2TextModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>SpeechToTextTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>SpeechToText uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.Speech2TextModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_speech_to_text._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.Speech2TextModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Speech2TextModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.Speech2TextModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.Speech2TextModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.Speech2TextModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. decoder_inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, target_sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>decoder_input_ids<code>you can choose to directly pass an embedded representation. If</code>past_key_values<code>is used, optionally only the last</code>decoder_inputs_embeds<code>have to be input (see</code>past_key_values<code>). This is useful if you want more control over how to convert </code>decoder_input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"past_key_values"},{anchor:"transformers.Speech2TextModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.Speech2TextModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Speech2TextModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Speech2TextModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig" >Speech2TextConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new zs({props:{$$slots:{default:[_l]},$$scope:{ctx:Y}}}),Nt=new En({props:{code:`from transformers import Speech2TextTokenizer, Speech2TextModel import torch tokenizer = Speech2TextTokenizer.from_pretrained('facebook/s2t-small-librispeech-asr') model = Speech2TextModel.from_pretrained('facebook/s2t-small-librispeech-asr') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2TextTokenizer, Speech2TextModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Speech2TextTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/s2t-small-librispeech-asr&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Speech2TextModel.from_pretrained(<span class="hljs-string">&#x27;facebook/s2t-small-librispeech-asr&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Lt=new xe({}),Ot=new F({props:{name:"class transformers.Speech2TextForConditionalGeneration",anchor:"transformers.Speech2TextForConditionalGeneration",parameters:[{name:"config",val:": Speech2TextConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/modeling_speech_to_text.py#L1233",parametersDescription:[{anchor:"transformers.Speech2TextForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig">Speech2TextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ut=new F({props:{name:"forward",anchor:"transformers.Speech2TextForConditionalGeneration.forward",parameters:[{name:"input_features",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text/modeling_speech_to_text.py#L1270",parametersDescription:[{anchor:"transformers.Speech2TextForConditionalGeneration.forward.input_features",description:`<strong>input_features</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, feature_size)</code>) &#x2014; Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a <code>.flac</code> or <code>.wav</code> audio file into an array of type <code>List[float]</code> or a <code>numpy.ndarray</code>, <em>e.g.</em> via the soundfile library (<code>pip install soundfile</code>). To prepare the array into <code>input_features</code>, the <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer">Speech2TextTokenizer</a> should be used for extracting the fbank features, padding and conversion into a tensor of type <code>torch.FloatTensor</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a>`,name:"input_features"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>SpeechToTextTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>SpeechToText uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_speech_to_text._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. decoder_inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, target_sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>decoder_input_ids<code>you can choose to directly pass an embedded representation. If</code>past_key_values<code>is used, optionally only the last</code>decoder_inputs_embeds<code>have to be input (see</code>past_key_values<code>). This is useful if you want more control over how to convert </code>decoder_input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"past_key_values"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Speech2TextForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig" >Speech2TextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Oe=new zs({props:{$$slots:{default:[gl]},$$scope:{ctx:Y}}}),Bt=new En({props:{code:`import torch from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration from datasets import load_dataset import soundfile as sf model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_features = processor(ds["speech"][0], sampling_rate=16000, return_tensors="pt").input_features # Batch size 1 generated_ids = model.generate(input_ids=input_features) transcription = processor.batch_decode(generated_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2TextProcessor, Speech2TextForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>model = Speech2TextForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-small-librispeech-asr&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Speech2TextProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-small-librispeech-asr&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">&gt;&gt;&gt; </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">&gt;&gt;&gt; </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_features = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16000</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_features <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(input_ids=input_features) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(generated_ids)`}}),{c(){f=a("meta"),w=d(),m=a("h1"),k=a("a"),z=a("span"),u(b.$$.fragment),y=d(),E=a("span"),C=n("Speech2Text"),$=d(),M=a("h2"),L=a("a"),qo=a("span"),u(Ue.$$.fragment),Es=d(),Po=a("span"),qs=n("Overview"),qn=d(),O=a("p"),Ps=n("The Speech2Text model was proposed in "),Be=a("a"),js=n("fairseq S2T: Fast Speech-to-Text Modeling with fairseq"),Fs=n(` by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. It\u2019s a transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the transcripts/translations autoregressively. Speech2Text has been fine-tuned on several datasets for ASR and ST: `),Re=a("a"),Cs=n("LibriSpeech"),Ms=n(", "),He=a("a"),As=n("CoVoST 2"),Ds=n(", "),Je=a("a"),Is=n("MuST-C"),Ns=n("."),Pn=d(),Q=a("p"),Ls=n("This model was contributed by "),Ye=a("a"),Os=n("valhalla"),Gs=n(". The original code can be found "),Xe=a("a"),Ws=n("here"),Vs=n("."),jn=d(),ce=a("h2"),be=a("a"),jo=a("span"),u(Ke.$$.fragment),Us=d(),Fo=a("span"),Bs=n("Inference"),Fn=d(),ke=a("p"),Rs=n(`Speech2Text is a speech model that accepts a float tensor of log-mel filter-bank features extracted from the speech signal. It\u2019s a transformer-based seq2seq model, so the transcripts/translations are generated autoregressively. The `),Co=a("code"),Hs=n("generate()"),Js=n(" method can be used for inference."),Cn=d(),G=a("p"),Ys=n("The "),Ht=a("a"),Xs=n("Speech2TextFeatureExtractor"),Ks=n(` class is responsible for extracting the log-mel filter-bank features. The `),Jt=a("a"),Qs=n("Speech2TextProcessor"),Zs=n(" wraps "),Yt=a("a"),ea=n("Speech2TextFeatureExtractor"),ta=n(` and `),Xt=a("a"),oa=n("Speech2TextTokenizer"),na=n(` into a single instance to both extract the input features and decode the predicted token ids.`),Mn=d(),q=a("p"),sa=n("The feature extractor depends on "),Mo=a("code"),aa=n("torchaudio"),ra=n(" and the tokenizer depends on "),Ao=a("code"),ia=n("sentencepiece"),ca=n(` so be sure to install those packages before running the examples. You could either install those as extra speech dependencies with `),Do=a("code"),da=n('pip install transformers"[speech, sentencepiece]"'),la=n(" or install the packages seperately with "),Io=a("code"),pa=n("pip install torchaudio sentencepiece"),ha=n(". Also "),No=a("code"),fa=n("torchaudio"),ma=n(" requires the development version of the "),Qe=a("a"),ua=n("libsndfile"),_a=n(` package which can be installed via a system package manager. On Ubuntu it can be installed as follows: `),Lo=a("code"),ga=n("apt install libsndfile1-dev"),An=d(),Kt=a("ul"),Oo=a("li"),va=n("ASR and Speech Translation"),Dn=d(),u(Ze.$$.fragment),In=d(),Qt=a("ul"),et=a("li"),Go=a("p"),Ta=n("Multilingual speech translation"),xa=d(),A=a("p"),ba=n("For multilingual speech translation models, "),Wo=a("code"),ka=n("eos_token_id"),ya=n(" is used as the "),Vo=a("code"),Sa=n("decoder_start_token_id"),wa=n(` and the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the `),Uo=a("code"),$a=n("forced_bos_token_id"),za=n(" parameter to the "),Bo=a("code"),Ea=n("generate()"),qa=n(` method. The following example shows how to transate English speech to French text using the `),Ro=a("em"),Pa=n("facebook/s2t-medium-mustc-multilingual-st"),ja=n(` checkpoint.`),Nn=d(),u(tt.$$.fragment),Ln=d(),ye=a("p"),Fa=n("See the "),ot=a("a"),Ca=n("model hub"),Ma=n(" to look for Speech2Text checkpoints."),On=d(),de=a("h2"),Se=a("a"),Ho=a("span"),u(nt.$$.fragment),Aa=d(),Jo=a("span"),Da=n("Speech2TextConfig"),Gn=d(),D=a("div"),u(st.$$.fragment),Ia=d(),le=a("p"),Na=n("This is the configuration class to store the configuration of a "),Zt=a("a"),La=n("Speech2TextModel"),Oa=n(`. It is used to instantiate an Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text `),at=a("a"),Ga=n("facebook/s2t-small-librispeech-asr"),Wa=n(" architecture."),Va=d(),pe=a("p"),Ua=n("Configuration objects inherit from "),eo=a("a"),Ba=n("PretrainedConfig"),Ra=n(` and can be used to control the model outputs. Read the documentation from `),to=a("a"),Ha=n("PretrainedConfig"),Ja=n(" for more information."),Ya=d(),Yo=a("p"),Xa=n("Example:"),Ka=d(),u(rt.$$.fragment),Wn=d(),he=a("h2"),we=a("a"),Xo=a("span"),u(it.$$.fragment),Qa=d(),Ko=a("span"),Za=n("Speech2TextTokenizer"),Vn=d(),P=a("div"),u(ct.$$.fragment),er=d(),Qo=a("p"),tr=n("Construct an Speech2Text tokenizer."),or=d(),dt=a("p"),nr=n("This tokenizer inherits from "),oo=a("a"),sr=n("PreTrainedTokenizer"),ar=n(` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),rr=d(),$e=a("div"),u(lt.$$.fragment),ir=d(),Zo=a("p"),cr=n("Build model inputs from a sequence by appending eos_token_id."),dr=d(),ze=a("div"),u(pt.$$.fragment),lr=d(),ht=a("p"),pr=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),en=a("code"),hr=n("prepare_for_model"),fr=n(" method."),mr=d(),Z=a("div"),u(ft.$$.fragment),ur=d(),no=a("p"),_r=n("Create the token type IDs corresponding to the sequences passed. "),so=a("a"),gr=n("What are token type IDs?"),vr=d(),tn=a("p"),Tr=n("Should be overridden in a subclass if the model has a special way of building those."),xr=d(),on=a("div"),Un=d(),fe=a("h2"),Ee=a("a"),nn=a("span"),u(mt.$$.fragment),br=d(),sn=a("span"),kr=n("Speech2TextFeatureExtractor"),Bn=d(),I=a("div"),u(ut.$$.fragment),yr=d(),an=a("p"),Sr=n("Constructs a Speech2Text feature extractor."),wr=d(),_t=a("p"),$r=n("This feature extractor inherits from "),ao=a("a"),zr=n("Speech2TextFeatureExtractor"),Er=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),qr=d(),rn=a("p"),Pr=n(`This class extracts mel-filter bank features from raw speech using TorchAudio and applies utterance-level cepstral mean and variance normalization to the extracted features.`),jr=d(),qe=a("div"),u(gt.$$.fragment),Fr=d(),cn=a("p"),Cr=n("Main method to featurize and prepare for the model one or several sequence(s). sequences."),Rn=d(),me=a("h2"),Pe=a("a"),dn=a("span"),u(vt.$$.fragment),Mr=d(),ln=a("span"),Ar=n("Speech2TextProcessor"),Hn=d(),S=a("div"),u(Tt.$$.fragment),Dr=d(),pn=a("p"),Ir=n(`Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a single processor.`),Nr=d(),W=a("p"),ro=a("a"),Lr=n("Speech2TextProcessor"),Or=n(` offers all the functionalities of `),io=a("a"),Gr=n("Speech2TextFeatureExtractor"),Wr=n(" and "),co=a("a"),Vr=n("Speech2TextTokenizer"),Ur=n(`. See the `),xt=a("a"),hn=a("strong"),Br=n("call"),Rr=n("()"),Hr=n(" and "),lo=a("a"),Jr=n("decode()"),Yr=n(` for more information.`),Xr=d(),je=a("div"),u(bt.$$.fragment),Kr=d(),X=a("p"),Qr=n(`When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor\u2019s `),kt=a("a"),fn=a("strong"),Zr=n("call"),ei=n("()"),ti=n(` and returns its output. If used in the context `),po=a("a"),oi=n("as_target_processor()"),ni=n(` this method forwards all its arguments to Speech2TextTokenizer\u2019s `),yt=a("a"),mn=a("strong"),si=n("call"),ai=n("()"),ri=n(`. Please refer to the doctsring of the above two methods for more information.`),ii=d(),ee=a("div"),u(St.$$.fragment),ci=d(),wt=a("p"),di=n("Instantiate a "),ho=a("a"),li=n("Speech2TextProcessor"),pi=n(" from a pretrained Speech2Text processor."),hi=d(),u(Fe.$$.fragment),fi=d(),te=a("div"),u($t.$$.fragment),mi=d(),ue=a("p"),ui=n(`Save a Speech2Text feature extractor object and Speech2Text tokenizer object to the directory `),un=a("code"),_i=n("save_directory"),gi=n(`, so that it can be re-loaded using the `),fo=a("a"),vi=n("from_pretrained()"),Ti=n(" class method."),xi=d(),u(Ce.$$.fragment),bi=d(),Me=a("div"),u(zt.$$.fragment),ki=d(),Et=a("p"),yi=n(`This method forwards all its arguments to Speech2TextTokenizer\u2019s `),mo=a("a"),Si=n("batch_decode()"),wi=n(`. Please refer to the docstring of this method for more information.`),$i=d(),Ae=a("div"),u(qt.$$.fragment),zi=d(),Pt=a("p"),Ei=n(`This method forwards all its arguments to Speech2TextTokenizer\u2019s `),uo=a("a"),qi=n("decode()"),Pi=n(`. Please refer to the docstring of this method for more information.`),ji=d(),De=a("div"),u(jt.$$.fragment),Fi=d(),_n=a("p"),Ci=n(`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text.`),Jn=d(),_e=a("h2"),Ie=a("a"),gn=a("span"),u(Ft.$$.fragment),Mi=d(),vn=a("span"),Ai=n("Speech2TextModel"),Yn=d(),R=a("div"),u(Ct.$$.fragment),Di=d(),Mt=a("p"),Ii=n(`The bare Speech2Text Model outputting raw hidden-states without any specific head on top. This model inherits from `),_o=a("a"),Ni=n("PreTrainedModel"),Li=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Oi=d(),At=a("p"),Gi=n("This model is also a PyTorch "),Dt=a("a"),Wi=n("torch.nn.Module"),Vi=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ui=d(),V=a("div"),u(It.$$.fragment),Bi=d(),ge=a("p"),Ri=n("The "),go=a("a"),Hi=n("Speech2TextModel"),Ji=n(" forward method, overrides the "),Tn=a("code"),Yi=n("__call__"),Xi=n(" special method."),Ki=d(),u(Ne.$$.fragment),Qi=d(),xn=a("p"),Zi=n("Example:"),ec=d(),u(Nt.$$.fragment),Xn=d(),ve=a("h2"),Le=a("a"),bn=a("span"),u(Lt.$$.fragment),tc=d(),kn=a("span"),oc=n("Speech2TextForConditionalGeneration"),Kn=d(),H=a("div"),u(Ot.$$.fragment),nc=d(),Gt=a("p"),sc=n(`The Speech2Text Model with a language modeling head. Can be used for summarization. This model inherits from `),vo=a("a"),ac=n("PreTrainedModel"),rc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ic=d(),Wt=a("p"),cc=n("This model is also a PyTorch "),Vt=a("a"),dc=n("torch.nn.Module"),lc=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pc=d(),U=a("div"),u(Ut.$$.fragment),hc=d(),Te=a("p"),fc=n("The "),To=a("a"),mc=n("Speech2TextForConditionalGeneration"),uc=n(" forward method, overrides the "),yn=a("code"),_c=n("__call__"),gc=n(" special method."),vc=d(),u(Oe.$$.fragment),Tc=d(),Sn=a("p"),xc=n("Example:"),bc=d(),u(Bt.$$.fragment),this.h()},l(t){const p=fl('[data-svelte="svelte-1phssyn"]',document.head);f=r(p,"META",{name:!0,content:!0}),p.forEach(o),w=l(t),m=r(t,"H1",{class:!0});var Rt=i(m);k=r(Rt,"A",{id:!0,class:!0,href:!0});var wn=i(k);z=r(wn,"SPAN",{});var $n=i(z);_(b.$$.fragment,$n),$n.forEach(o),wn.forEach(o),y=l(Rt),E=r(Rt,"SPAN",{});var zn=i(E);C=s(zn,"Speech2Text"),zn.forEach(o),Rt.forEach(o),$=l(t),M=r(t,"H2",{class:!0});var Zn=i(M);L=r(Zn,"A",{id:!0,class:!0,href:!0});var $c=i(L);qo=r($c,"SPAN",{});var zc=i(qo);_(Ue.$$.fragment,zc),zc.forEach(o),$c.forEach(o),Es=l(Zn),Po=r(Zn,"SPAN",{});var Ec=i(Po);qs=s(Ec,"Overview"),Ec.forEach(o),Zn.forEach(o),qn=l(t),O=r(t,"P",{});var oe=i(O);Ps=s(oe,"The Speech2Text model was proposed in "),Be=r(oe,"A",{href:!0,rel:!0});var qc=i(Be);js=s(qc,"fairseq S2T: Fast Speech-to-Text Modeling with fairseq"),qc.forEach(o),Fs=s(oe,` by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. It\u2019s a transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the transcripts/translations autoregressively. Speech2Text has been fine-tuned on several datasets for ASR and ST: `),Re=r(oe,"A",{href:!0,rel:!0});var Pc=i(Re);Cs=s(Pc,"LibriSpeech"),Pc.forEach(o),Ms=s(oe,", "),He=r(oe,"A",{href:!0,rel:!0});var jc=i(He);As=s(jc,"CoVoST 2"),jc.forEach(o),Ds=s(oe,", "),Je=r(oe,"A",{href:!0,rel:!0});var Fc=i(Je);Is=s(Fc,"MuST-C"),Fc.forEach(o),Ns=s(oe,"."),oe.forEach(o),Pn=l(t),Q=r(t,"P",{});var xo=i(Q);Ls=s(xo,"This model was contributed by "),Ye=r(xo,"A",{href:!0,rel:!0});var Cc=i(Ye);Os=s(Cc,"valhalla"),Cc.forEach(o),Gs=s(xo,". The original code can be found "),Xe=r(xo,"A",{href:!0,rel:!0});var Mc=i(Xe);Ws=s(Mc,"here"),Mc.forEach(o),Vs=s(xo,"."),xo.forEach(o),jn=l(t),ce=r(t,"H2",{class:!0});var es=i(ce);be=r(es,"A",{id:!0,class:!0,href:!0});var Ac=i(be);jo=r(Ac,"SPAN",{});var Dc=i(jo);_(Ke.$$.fragment,Dc),Dc.forEach(o),Ac.forEach(o),Us=l(es),Fo=r(es,"SPAN",{});var Ic=i(Fo);Bs=s(Ic,"Inference"),Ic.forEach(o),es.forEach(o),Fn=l(t),ke=r(t,"P",{});var ts=i(ke);Rs=s(ts,`Speech2Text is a speech model that accepts a float tensor of log-mel filter-bank features extracted from the speech signal. It\u2019s a transformer-based seq2seq model, so the transcripts/translations are generated autoregressively. The `),Co=r(ts,"CODE",{});var Nc=i(Co);Hs=s(Nc,"generate()"),Nc.forEach(o),Js=s(ts," method can be used for inference."),ts.forEach(o),Cn=l(t),G=r(t,"P",{});var ne=i(G);Ys=s(ne,"The "),Ht=r(ne,"A",{href:!0});var Lc=i(Ht);Xs=s(Lc,"Speech2TextFeatureExtractor"),Lc.forEach(o),Ks=s(ne,` class is responsible for extracting the log-mel filter-bank features. The `),Jt=r(ne,"A",{href:!0});var Oc=i(Jt);Qs=s(Oc,"Speech2TextProcessor"),Oc.forEach(o),Zs=s(ne," wraps "),Yt=r(ne,"A",{href:!0});var Gc=i(Yt);ea=s(Gc,"Speech2TextFeatureExtractor"),Gc.forEach(o),ta=s(ne,` and `),Xt=r(ne,"A",{href:!0});var Wc=i(Xt);oa=s(Wc,"Speech2TextTokenizer"),Wc.forEach(o),na=s(ne,` into a single instance to both extract the input features and decode the predicted token ids.`),ne.forEach(o),Mn=l(t),q=r(t,"P",{});var N=i(q);sa=s(N,"The feature extractor depends on "),Mo=r(N,"CODE",{});var Vc=i(Mo);aa=s(Vc,"torchaudio"),Vc.forEach(o),ra=s(N," and the tokenizer depends on "),Ao=r(N,"CODE",{});var Uc=i(Ao);ia=s(Uc,"sentencepiece"),Uc.forEach(o),ca=s(N,` so be sure to install those packages before running the examples. You could either install those as extra speech dependencies with `),Do=r(N,"CODE",{});var Bc=i(Do);da=s(Bc,'pip install transformers"[speech, sentencepiece]"'),Bc.forEach(o),la=s(N," or install the packages seperately with "),Io=r(N,"CODE",{});var Rc=i(Io);pa=s(Rc,"pip install torchaudio sentencepiece"),Rc.forEach(o),ha=s(N,". Also "),No=r(N,"CODE",{});var Hc=i(No);fa=s(Hc,"torchaudio"),Hc.forEach(o),ma=s(N," requires the development version of the "),Qe=r(N,"A",{href:!0,rel:!0});var Jc=i(Qe);ua=s(Jc,"libsndfile"),Jc.forEach(o),_a=s(N,` package which can be installed via a system package manager. On Ubuntu it can be installed as follows: `),Lo=r(N,"CODE",{});var Yc=i(Lo);ga=s(Yc,"apt install libsndfile1-dev"),Yc.forEach(o),N.forEach(o),An=l(t),Kt=r(t,"UL",{});var Xc=i(Kt);Oo=r(Xc,"LI",{});var Kc=i(Oo);va=s(Kc,"ASR and Speech Translation"),Kc.forEach(o),Xc.forEach(o),Dn=l(t),_(Ze.$$.fragment,t),In=l(t),Qt=r(t,"UL",{});var Qc=i(Qt);et=r(Qc,"LI",{});var os=i(et);Go=r(os,"P",{});var Zc=i(Go);Ta=s(Zc,"Multilingual speech translation"),Zc.forEach(o),xa=l(os),A=r(os,"P",{});var J=i(A);ba=s(J,"For multilingual speech translation models, "),Wo=r(J,"CODE",{});var ed=i(Wo);ka=s(ed,"eos_token_id"),ed.forEach(o),ya=s(J," is used as the "),Vo=r(J,"CODE",{});var td=i(Vo);Sa=s(td,"decoder_start_token_id"),td.forEach(o),wa=s(J,` and the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the `),Uo=r(J,"CODE",{});var od=i(Uo);$a=s(od,"forced_bos_token_id"),od.forEach(o),za=s(J," parameter to the "),Bo=r(J,"CODE",{});var nd=i(Bo);Ea=s(nd,"generate()"),nd.forEach(o),qa=s(J,` method. The following example shows how to transate English speech to French text using the `),Ro=r(J,"EM",{});var sd=i(Ro);Pa=s(sd,"facebook/s2t-medium-mustc-multilingual-st"),sd.forEach(o),ja=s(J,` checkpoint.`),J.forEach(o),os.forEach(o),Qc.forEach(o),Nn=l(t),_(tt.$$.fragment,t),Ln=l(t),ye=r(t,"P",{});var ns=i(ye);Fa=s(ns,"See the "),ot=r(ns,"A",{href:!0,rel:!0});var ad=i(ot);Ca=s(ad,"model hub"),ad.forEach(o),Ma=s(ns," to look for Speech2Text checkpoints."),ns.forEach(o),On=l(t),de=r(t,"H2",{class:!0});var ss=i(de);Se=r(ss,"A",{id:!0,class:!0,href:!0});var rd=i(Se);Ho=r(rd,"SPAN",{});var id=i(Ho);_(nt.$$.fragment,id),id.forEach(o),rd.forEach(o),Aa=l(ss),Jo=r(ss,"SPAN",{});var cd=i(Jo);Da=s(cd,"Speech2TextConfig"),cd.forEach(o),ss.forEach(o),Gn=l(t),D=r(t,"DIV",{class:!0});var se=i(D);_(st.$$.fragment,se),Ia=l(se),le=r(se,"P",{});var bo=i(le);Na=s(bo,"This is the configuration class to store the configuration of a "),Zt=r(bo,"A",{href:!0});var dd=i(Zt);La=s(dd,"Speech2TextModel"),dd.forEach(o),Oa=s(bo,`. It is used to instantiate an Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text `),at=r(bo,"A",{href:!0,rel:!0});var ld=i(at);Ga=s(ld,"facebook/s2t-small-librispeech-asr"),ld.forEach(o),Wa=s(bo," architecture."),bo.forEach(o),Va=l(se),pe=r(se,"P",{});var ko=i(pe);Ua=s(ko,"Configuration objects inherit from "),eo=r(ko,"A",{href:!0});var pd=i(eo);Ba=s(pd,"PretrainedConfig"),pd.forEach(o),Ra=s(ko,` and can be used to control the model outputs. Read the documentation from `),to=r(ko,"A",{href:!0});var hd=i(to);Ha=s(hd,"PretrainedConfig"),hd.forEach(o),Ja=s(ko," for more information."),ko.forEach(o),Ya=l(se),Yo=r(se,"P",{});var fd=i(Yo);Xa=s(fd,"Example:"),fd.forEach(o),Ka=l(se),_(rt.$$.fragment,se),se.forEach(o),Wn=l(t),he=r(t,"H2",{class:!0});var as=i(he);we=r(as,"A",{id:!0,class:!0,href:!0});var md=i(we);Xo=r(md,"SPAN",{});var ud=i(Xo);_(it.$$.fragment,ud),ud.forEach(o),md.forEach(o),Qa=l(as),Ko=r(as,"SPAN",{});var _d=i(Ko);Za=s(_d,"Speech2TextTokenizer"),_d.forEach(o),as.forEach(o),Vn=l(t),P=r(t,"DIV",{class:!0});var B=i(P);_(ct.$$.fragment,B),er=l(B),Qo=r(B,"P",{});var gd=i(Qo);tr=s(gd,"Construct an Speech2Text tokenizer."),gd.forEach(o),or=l(B),dt=r(B,"P",{});var rs=i(dt);nr=s(rs,"This tokenizer inherits from "),oo=r(rs,"A",{href:!0});var vd=i(oo);sr=s(vd,"PreTrainedTokenizer"),vd.forEach(o),ar=s(rs,` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),rs.forEach(o),rr=l(B),$e=r(B,"DIV",{class:!0});var is=i($e);_(lt.$$.fragment,is),ir=l(is),Zo=r(is,"P",{});var Td=i(Zo);cr=s(Td,"Build model inputs from a sequence by appending eos_token_id."),Td.forEach(o),is.forEach(o),dr=l(B),ze=r(B,"DIV",{class:!0});var cs=i(ze);_(pt.$$.fragment,cs),lr=l(cs),ht=r(cs,"P",{});var ds=i(ht);pr=s(ds,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),en=r(ds,"CODE",{});var xd=i(en);hr=s(xd,"prepare_for_model"),xd.forEach(o),fr=s(ds," method."),ds.forEach(o),cs.forEach(o),mr=l(B),Z=r(B,"DIV",{class:!0});var yo=i(Z);_(ft.$$.fragment,yo),ur=l(yo),no=r(yo,"P",{});var kc=i(no);_r=s(kc,"Create the token type IDs corresponding to the sequences passed. "),so=r(kc,"A",{href:!0});var bd=i(so);gr=s(bd,"What are token type IDs?"),bd.forEach(o),kc.forEach(o),vr=l(yo),tn=r(yo,"P",{});var kd=i(tn);Tr=s(kd,"Should be overridden in a subclass if the model has a special way of building those."),kd.forEach(o),yo.forEach(o),xr=l(B),on=r(B,"DIV",{class:!0}),i(on).forEach(o),B.forEach(o),Un=l(t),fe=r(t,"H2",{class:!0});var ls=i(fe);Ee=r(ls,"A",{id:!0,class:!0,href:!0});var yd=i(Ee);nn=r(yd,"SPAN",{});var Sd=i(nn);_(mt.$$.fragment,Sd),Sd.forEach(o),yd.forEach(o),br=l(ls),sn=r(ls,"SPAN",{});var wd=i(sn);kr=s(wd,"Speech2TextFeatureExtractor"),wd.forEach(o),ls.forEach(o),Bn=l(t),I=r(t,"DIV",{class:!0});var ae=i(I);_(ut.$$.fragment,ae),yr=l(ae),an=r(ae,"P",{});var $d=i(an);Sr=s($d,"Constructs a Speech2Text feature extractor."),$d.forEach(o),wr=l(ae),_t=r(ae,"P",{});var ps=i(_t);$r=s(ps,"This feature extractor inherits from "),ao=r(ps,"A",{href:!0});var zd=i(ao);zr=s(zd,"Speech2TextFeatureExtractor"),zd.forEach(o),Er=s(ps,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ps.forEach(o),qr=l(ae),rn=r(ae,"P",{});var Ed=i(rn);Pr=s(Ed,`This class extracts mel-filter bank features from raw speech using TorchAudio and applies utterance-level cepstral mean and variance normalization to the extracted features.`),Ed.forEach(o),jr=l(ae),qe=r(ae,"DIV",{class:!0});var hs=i(qe);_(gt.$$.fragment,hs),Fr=l(hs),cn=r(hs,"P",{});var qd=i(cn);Cr=s(qd,"Main method to featurize and prepare for the model one or several sequence(s). sequences."),qd.forEach(o),hs.forEach(o),ae.forEach(o),Rn=l(t),me=r(t,"H2",{class:!0});var fs=i(me);Pe=r(fs,"A",{id:!0,class:!0,href:!0});var Pd=i(Pe);dn=r(Pd,"SPAN",{});var jd=i(dn);_(vt.$$.fragment,jd),jd.forEach(o),Pd.forEach(o),Mr=l(fs),ln=r(fs,"SPAN",{});var Fd=i(ln);Ar=s(Fd,"Speech2TextProcessor"),Fd.forEach(o),fs.forEach(o),Hn=l(t),S=r(t,"DIV",{class:!0});var j=i(S);_(Tt.$$.fragment,j),Dr=l(j),pn=r(j,"P",{});var Cd=i(pn);Ir=s(Cd,`Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a single processor.`),Cd.forEach(o),Nr=l(j),W=r(j,"P",{});var K=i(W);ro=r(K,"A",{href:!0});var Md=i(ro);Lr=s(Md,"Speech2TextProcessor"),Md.forEach(o),Or=s(K,` offers all the functionalities of `),io=r(K,"A",{href:!0});var Ad=i(io);Gr=s(Ad,"Speech2TextFeatureExtractor"),Ad.forEach(o),Wr=s(K," and "),co=r(K,"A",{href:!0});var Dd=i(co);Vr=s(Dd,"Speech2TextTokenizer"),Dd.forEach(o),Ur=s(K,`. See the `),xt=r(K,"A",{href:!0});var yc=i(xt);hn=r(yc,"STRONG",{});var Id=i(hn);Br=s(Id,"call"),Id.forEach(o),Rr=s(yc,"()"),yc.forEach(o),Hr=s(K," and "),lo=r(K,"A",{href:!0});var Nd=i(lo);Jr=s(Nd,"decode()"),Nd.forEach(o),Yr=s(K,` for more information.`),K.forEach(o),Xr=l(j),je=r(j,"DIV",{class:!0});var ms=i(je);_(bt.$$.fragment,ms),Kr=l(ms),X=r(ms,"P",{});var Ge=i(X);Qr=s(Ge,`When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor\u2019s `),kt=r(Ge,"A",{href:!0});var Sc=i(kt);fn=r(Sc,"STRONG",{});var Ld=i(fn);Zr=s(Ld,"call"),Ld.forEach(o),ei=s(Sc,"()"),Sc.forEach(o),ti=s(Ge,` and returns its output. If used in the context `),po=r(Ge,"A",{href:!0});var Od=i(po);oi=s(Od,"as_target_processor()"),Od.forEach(o),ni=s(Ge,` this method forwards all its arguments to Speech2TextTokenizer\u2019s `),yt=r(Ge,"A",{href:!0});var wc=i(yt);mn=r(wc,"STRONG",{});var Gd=i(mn);si=s(Gd,"call"),Gd.forEach(o),ai=s(wc,"()"),wc.forEach(o),ri=s(Ge,`. Please refer to the doctsring of the above two methods for more information.`),Ge.forEach(o),ms.forEach(o),ii=l(j),ee=r(j,"DIV",{class:!0});var So=i(ee);_(St.$$.fragment,So),ci=l(So),wt=r(So,"P",{});var us=i(wt);di=s(us,"Instantiate a "),ho=r(us,"A",{href:!0});var Wd=i(ho);li=s(Wd,"Speech2TextProcessor"),Wd.forEach(o),pi=s(us," from a pretrained Speech2Text processor."),us.forEach(o),hi=l(So),_(Fe.$$.fragment,So),So.forEach(o),fi=l(j),te=r(j,"DIV",{class:!0});var wo=i(te);_($t.$$.fragment,wo),mi=l(wo),ue=r(wo,"P",{});var $o=i(ue);ui=s($o,`Save a Speech2Text feature extractor object and Speech2Text tokenizer object to the directory `),un=r($o,"CODE",{});var Vd=i(un);_i=s(Vd,"save_directory"),Vd.forEach(o),gi=s($o,`, so that it can be re-loaded using the `),fo=r($o,"A",{href:!0});var Ud=i(fo);vi=s(Ud,"from_pretrained()"),Ud.forEach(o),Ti=s($o," class method."),$o.forEach(o),xi=l(wo),_(Ce.$$.fragment,wo),wo.forEach(o),bi=l(j),Me=r(j,"DIV",{class:!0});var _s=i(Me);_(zt.$$.fragment,_s),ki=l(_s),Et=r(_s,"P",{});var gs=i(Et);yi=s(gs,`This method forwards all its arguments to Speech2TextTokenizer\u2019s `),mo=r(gs,"A",{href:!0});var Bd=i(mo);Si=s(Bd,"batch_decode()"),Bd.forEach(o),wi=s(gs,`. Please refer to the docstring of this method for more information.`),gs.forEach(o),_s.forEach(o),$i=l(j),Ae=r(j,"DIV",{class:!0});var vs=i(Ae);_(qt.$$.fragment,vs),zi=l(vs),Pt=r(vs,"P",{});var Ts=i(Pt);Ei=s(Ts,`This method forwards all its arguments to Speech2TextTokenizer\u2019s `),uo=r(Ts,"A",{href:!0});var Rd=i(uo);qi=s(Rd,"decode()"),Rd.forEach(o),Pi=s(Ts,`. Please refer to the docstring of this method for more information.`),Ts.forEach(o),vs.forEach(o),ji=l(j),De=r(j,"DIV",{class:!0});var xs=i(De);_(jt.$$.fragment,xs),Fi=l(xs),_n=r(xs,"P",{});var Hd=i(_n);Ci=s(Hd,`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text.`),Hd.forEach(o),xs.forEach(o),j.forEach(o),Jn=l(t),_e=r(t,"H2",{class:!0});var bs=i(_e);Ie=r(bs,"A",{id:!0,class:!0,href:!0});var Jd=i(Ie);gn=r(Jd,"SPAN",{});var Yd=i(gn);_(Ft.$$.fragment,Yd),Yd.forEach(o),Jd.forEach(o),Mi=l(bs),vn=r(bs,"SPAN",{});var Xd=i(vn);Ai=s(Xd,"Speech2TextModel"),Xd.forEach(o),bs.forEach(o),Yn=l(t),R=r(t,"DIV",{class:!0});var We=i(R);_(Ct.$$.fragment,We),Di=l(We),Mt=r(We,"P",{});var ks=i(Mt);Ii=s(ks,`The bare Speech2Text Model outputting raw hidden-states without any specific head on top. This model inherits from `),_o=r(ks,"A",{href:!0});var Kd=i(_o);Ni=s(Kd,"PreTrainedModel"),Kd.forEach(o),Li=s(ks,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ks.forEach(o),Oi=l(We),At=r(We,"P",{});var ys=i(At);Gi=s(ys,"This model is also a PyTorch "),Dt=r(ys,"A",{href:!0,rel:!0});var Qd=i(Dt);Wi=s(Qd,"torch.nn.Module"),Qd.forEach(o),Vi=s(ys,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ys.forEach(o),Ui=l(We),V=r(We,"DIV",{class:!0});var re=i(V);_(It.$$.fragment,re),Bi=l(re),ge=r(re,"P",{});var zo=i(ge);Ri=s(zo,"The "),go=r(zo,"A",{href:!0});var Zd=i(go);Hi=s(Zd,"Speech2TextModel"),Zd.forEach(o),Ji=s(zo," forward method, overrides the "),Tn=r(zo,"CODE",{});var el=i(Tn);Yi=s(el,"__call__"),el.forEach(o),Xi=s(zo," special method."),zo.forEach(o),Ki=l(re),_(Ne.$$.fragment,re),Qi=l(re),xn=r(re,"P",{});var tl=i(xn);Zi=s(tl,"Example:"),tl.forEach(o),ec=l(re),_(Nt.$$.fragment,re),re.forEach(o),We.forEach(o),Xn=l(t),ve=r(t,"H2",{class:!0});var Ss=i(ve);Le=r(Ss,"A",{id:!0,class:!0,href:!0});var ol=i(Le);bn=r(ol,"SPAN",{});var nl=i(bn);_(Lt.$$.fragment,nl),nl.forEach(o),ol.forEach(o),tc=l(Ss),kn=r(Ss,"SPAN",{});var sl=i(kn);oc=s(sl,"Speech2TextForConditionalGeneration"),sl.forEach(o),Ss.forEach(o),Kn=l(t),H=r(t,"DIV",{class:!0});var Ve=i(H);_(Ot.$$.fragment,Ve),nc=l(Ve),Gt=r(Ve,"P",{});var ws=i(Gt);sc=s(ws,`The Speech2Text Model with a language modeling head. Can be used for summarization. This model inherits from `),vo=r(ws,"A",{href:!0});var al=i(vo);ac=s(al,"PreTrainedModel"),al.forEach(o),rc=s(ws,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ws.forEach(o),ic=l(Ve),Wt=r(Ve,"P",{});var $s=i(Wt);cc=s($s,"This model is also a PyTorch "),Vt=r($s,"A",{href:!0,rel:!0});var rl=i(Vt);dc=s(rl,"torch.nn.Module"),rl.forEach(o),lc=s($s,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$s.forEach(o),pc=l(Ve),U=r(Ve,"DIV",{class:!0});var ie=i(U);_(Ut.$$.fragment,ie),hc=l(ie),Te=r(ie,"P",{});var Eo=i(Te);fc=s(Eo,"The "),To=r(Eo,"A",{href:!0});var il=i(To);mc=s(il,"Speech2TextForConditionalGeneration"),il.forEach(o),uc=s(Eo," forward method, overrides the "),yn=r(Eo,"CODE",{});var cl=i(yn);_c=s(cl,"__call__"),cl.forEach(o),gc=s(Eo," special method."),Eo.forEach(o),vc=l(ie),_(Oe.$$.fragment,ie),Tc=l(ie),Sn=r(ie,"P",{});var dl=i(Sn);xc=s(dl,"Example:"),dl.forEach(o),bc=l(ie),_(Bt.$$.fragment,ie),ie.forEach(o),Ve.forEach(o),this.h()},h(){c(f,"name","hf:doc:metadata"),c(f,"content",JSON.stringify(Tl)),c(k,"id","speech2text"),c(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(k,"href","#speech2text"),c(m,"class","relative group"),c(L,"id","overview"),c(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(L,"href","#overview"),c(M,"class","relative group"),c(Be,"href","https://arxiv.org/abs/2010.05171"),c(Be,"rel","nofollow"),c(Re,"href","http://www.openslr.org/12"),c(Re,"rel","nofollow"),c(He,"href","https://github.com/facebookresearch/covost"),c(He,"rel","nofollow"),c(Je,"href","https://ict.fbk.eu/must-c/"),c(Je,"rel","nofollow"),c(Ye,"href","https://huggingface.co/valhalla"),c(Ye,"rel","nofollow"),c(Xe,"href","https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text"),c(Xe,"rel","nofollow"),c(be,"id","inference"),c(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(be,"href","#inference"),c(ce,"class","relative group"),c(Ht,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor"),c(Jt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor"),c(Yt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor"),c(Xt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer"),c(Qe,"href","http://www.mega-nerd.com/libsndfile/"),c(Qe,"rel","nofollow"),c(ot,"href","https://huggingface.co/models?filter=speech_to_text"),c(ot,"rel","nofollow"),c(Se,"id","transformers.Speech2TextConfig"),c(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Se,"href","#transformers.Speech2TextConfig"),c(de,"class","relative group"),c(Zt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextModel"),c(at,"href","https://huggingface.co/facebook/s2t-small-librispeech-asr"),c(at,"rel","nofollow"),c(eo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(to,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(D,"class","docstring"),c(we,"id","transformers.Speech2TextTokenizer"),c(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(we,"href","#transformers.Speech2TextTokenizer"),c(he,"class","relative group"),c(oo,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c($e,"class","docstring"),c(ze,"class","docstring"),c(so,"href","../glossary#token-type-ids"),c(Z,"class","docstring"),c(on,"class","docstring"),c(P,"class","docstring"),c(Ee,"id","transformers.Speech2TextFeatureExtractor"),c(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ee,"href","#transformers.Speech2TextFeatureExtractor"),c(fe,"class","relative group"),c(ao,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor"),c(qe,"class","docstring"),c(I,"class","docstring"),c(Pe,"id","transformers.Speech2TextProcessor"),c(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Pe,"href","#transformers.Speech2TextProcessor"),c(me,"class","relative group"),c(ro,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor"),c(io,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor"),c(co,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer"),c(xt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor.__call__"),c(lo,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor.decode"),c(kt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor.__call__"),c(po,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor.as_target_processor"),c(yt,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),c(je,"class","docstring"),c(ho,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor"),c(ee,"class","docstring"),c(fo,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor.from_pretrained"),c(te,"class","docstring"),c(mo,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),c(Me,"class","docstring"),c(uo,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),c(Ae,"class","docstring"),c(De,"class","docstring"),c(S,"class","docstring"),c(Ie,"id","transformers.Speech2TextModel"),c(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ie,"href","#transformers.Speech2TextModel"),c(_e,"class","relative group"),c(_o,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Dt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Dt,"rel","nofollow"),c(go,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextModel"),c(V,"class","docstring"),c(R,"class","docstring"),c(Le,"id","transformers.Speech2TextForConditionalGeneration"),c(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Le,"href","#transformers.Speech2TextForConditionalGeneration"),c(ve,"class","relative group"),c(vo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Vt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Vt,"rel","nofollow"),c(To,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextForConditionalGeneration"),c(U,"class","docstring"),c(H,"class","docstring")},m(t,p){e(document.head,f),h(t,w,p),h(t,m,p),e(m,k),e(k,z),g(b,z,null),e(m,y),e(m,E),e(E,C),h(t,$,p),h(t,M,p),e(M,L),e(L,qo),g(Ue,qo,null),e(M,Es),e(M,Po),e(Po,qs),h(t,qn,p),h(t,O,p),e(O,Ps),e(O,Be),e(Be,js),e(O,Fs),e(O,Re),e(Re,Cs),e(O,Ms),e(O,He),e(He,As),e(O,Ds),e(O,Je),e(Je,Is),e(O,Ns),h(t,Pn,p),h(t,Q,p),e(Q,Ls),e(Q,Ye),e(Ye,Os),e(Q,Gs),e(Q,Xe),e(Xe,Ws),e(Q,Vs),h(t,jn,p),h(t,ce,p),e(ce,be),e(be,jo),g(Ke,jo,null),e(ce,Us),e(ce,Fo),e(Fo,Bs),h(t,Fn,p),h(t,ke,p),e(ke,Rs),e(ke,Co),e(Co,Hs),e(ke,Js),h(t,Cn,p),h(t,G,p),e(G,Ys),e(G,Ht),e(Ht,Xs),e(G,Ks),e(G,Jt),e(Jt,Qs),e(G,Zs),e(G,Yt),e(Yt,ea),e(G,ta),e(G,Xt),e(Xt,oa),e(G,na),h(t,Mn,p),h(t,q,p),e(q,sa),e(q,Mo),e(Mo,aa),e(q,ra),e(q,Ao),e(Ao,ia),e(q,ca),e(q,Do),e(Do,da),e(q,la),e(q,Io),e(Io,pa),e(q,ha),e(q,No),e(No,fa),e(q,ma),e(q,Qe),e(Qe,ua),e(q,_a),e(q,Lo),e(Lo,ga),h(t,An,p),h(t,Kt,p),e(Kt,Oo),e(Oo,va),h(t,Dn,p),g(Ze,t,p),h(t,In,p),h(t,Qt,p),e(Qt,et),e(et,Go),e(Go,Ta),e(et,xa),e(et,A),e(A,ba),e(A,Wo),e(Wo,ka),e(A,ya),e(A,Vo),e(Vo,Sa),e(A,wa),e(A,Uo),e(Uo,$a),e(A,za),e(A,Bo),e(Bo,Ea),e(A,qa),e(A,Ro),e(Ro,Pa),e(A,ja),h(t,Nn,p),g(tt,t,p),h(t,Ln,p),h(t,ye,p),e(ye,Fa),e(ye,ot),e(ot,Ca),e(ye,Ma),h(t,On,p),h(t,de,p),e(de,Se),e(Se,Ho),g(nt,Ho,null),e(de,Aa),e(de,Jo),e(Jo,Da),h(t,Gn,p),h(t,D,p),g(st,D,null),e(D,Ia),e(D,le),e(le,Na),e(le,Zt),e(Zt,La),e(le,Oa),e(le,at),e(at,Ga),e(le,Wa),e(D,Va),e(D,pe),e(pe,Ua),e(pe,eo),e(eo,Ba),e(pe,Ra),e(pe,to),e(to,Ha),e(pe,Ja),e(D,Ya),e(D,Yo),e(Yo,Xa),e(D,Ka),g(rt,D,null),h(t,Wn,p),h(t,he,p),e(he,we),e(we,Xo),g(it,Xo,null),e(he,Qa),e(he,Ko),e(Ko,Za),h(t,Vn,p),h(t,P,p),g(ct,P,null),e(P,er),e(P,Qo),e(Qo,tr),e(P,or),e(P,dt),e(dt,nr),e(dt,oo),e(oo,sr),e(dt,ar),e(P,rr),e(P,$e),g(lt,$e,null),e($e,ir),e($e,Zo),e(Zo,cr),e(P,dr),e(P,ze),g(pt,ze,null),e(ze,lr),e(ze,ht),e(ht,pr),e(ht,en),e(en,hr),e(ht,fr),e(P,mr),e(P,Z),g(ft,Z,null),e(Z,ur),e(Z,no),e(no,_r),e(no,so),e(so,gr),e(Z,vr),e(Z,tn),e(tn,Tr),e(P,xr),e(P,on),h(t,Un,p),h(t,fe,p),e(fe,Ee),e(Ee,nn),g(mt,nn,null),e(fe,br),e(fe,sn),e(sn,kr),h(t,Bn,p),h(t,I,p),g(ut,I,null),e(I,yr),e(I,an),e(an,Sr),e(I,wr),e(I,_t),e(_t,$r),e(_t,ao),e(ao,zr),e(_t,Er),e(I,qr),e(I,rn),e(rn,Pr),e(I,jr),e(I,qe),g(gt,qe,null),e(qe,Fr),e(qe,cn),e(cn,Cr),h(t,Rn,p),h(t,me,p),e(me,Pe),e(Pe,dn),g(vt,dn,null),e(me,Mr),e(me,ln),e(ln,Ar),h(t,Hn,p),h(t,S,p),g(Tt,S,null),e(S,Dr),e(S,pn),e(pn,Ir),e(S,Nr),e(S,W),e(W,ro),e(ro,Lr),e(W,Or),e(W,io),e(io,Gr),e(W,Wr),e(W,co),e(co,Vr),e(W,Ur),e(W,xt),e(xt,hn),e(hn,Br),e(xt,Rr),e(W,Hr),e(W,lo),e(lo,Jr),e(W,Yr),e(S,Xr),e(S,je),g(bt,je,null),e(je,Kr),e(je,X),e(X,Qr),e(X,kt),e(kt,fn),e(fn,Zr),e(kt,ei),e(X,ti),e(X,po),e(po,oi),e(X,ni),e(X,yt),e(yt,mn),e(mn,si),e(yt,ai),e(X,ri),e(S,ii),e(S,ee),g(St,ee,null),e(ee,ci),e(ee,wt),e(wt,di),e(wt,ho),e(ho,li),e(wt,pi),e(ee,hi),g(Fe,ee,null),e(S,fi),e(S,te),g($t,te,null),e(te,mi),e(te,ue),e(ue,ui),e(ue,un),e(un,_i),e(ue,gi),e(ue,fo),e(fo,vi),e(ue,Ti),e(te,xi),g(Ce,te,null),e(S,bi),e(S,Me),g(zt,Me,null),e(Me,ki),e(Me,Et),e(Et,yi),e(Et,mo),e(mo,Si),e(Et,wi),e(S,$i),e(S,Ae),g(qt,Ae,null),e(Ae,zi),e(Ae,Pt),e(Pt,Ei),e(Pt,uo),e(uo,qi),e(Pt,Pi),e(S,ji),e(S,De),g(jt,De,null),e(De,Fi),e(De,_n),e(_n,Ci),h(t,Jn,p),h(t,_e,p),e(_e,Ie),e(Ie,gn),g(Ft,gn,null),e(_e,Mi),e(_e,vn),e(vn,Ai),h(t,Yn,p),h(t,R,p),g(Ct,R,null),e(R,Di),e(R,Mt),e(Mt,Ii),e(Mt,_o),e(_o,Ni),e(Mt,Li),e(R,Oi),e(R,At),e(At,Gi),e(At,Dt),e(Dt,Wi),e(At,Vi),e(R,Ui),e(R,V),g(It,V,null),e(V,Bi),e(V,ge),e(ge,Ri),e(ge,go),e(go,Hi),e(ge,Ji),e(ge,Tn),e(Tn,Yi),e(ge,Xi),e(V,Ki),g(Ne,V,null),e(V,Qi),e(V,xn),e(xn,Zi),e(V,ec),g(Nt,V,null),h(t,Xn,p),h(t,ve,p),e(ve,Le),e(Le,bn),g(Lt,bn,null),e(ve,tc),e(ve,kn),e(kn,oc),h(t,Kn,p),h(t,H,p),g(Ot,H,null),e(H,nc),e(H,Gt),e(Gt,sc),e(Gt,vo),e(vo,ac),e(Gt,rc),e(H,ic),e(H,Wt),e(Wt,cc),e(Wt,Vt),e(Vt,dc),e(Wt,lc),e(H,pc),e(H,U),g(Ut,U,null),e(U,hc),e(U,Te),e(Te,fc),e(Te,To),e(To,mc),e(Te,uc),e(Te,yn),e(yn,_c),e(Te,gc),e(U,vc),g(Oe,U,null),e(U,Tc),e(U,Sn),e(Sn,xc),e(U,bc),g(Bt,U,null),Qn=!0},p(t,[p]){const Rt={};p&2&&(Rt.$$scope={dirty:p,ctx:t}),Fe.$set(Rt);const wn={};p&2&&(wn.$$scope={dirty:p,ctx:t}),Ce.$set(wn);const $n={};p&2&&($n.$$scope={dirty:p,ctx:t}),Ne.$set($n);const zn={};p&2&&(zn.$$scope={dirty:p,ctx:t}),Oe.$set(zn)},i(t){Qn||(v(b.$$.fragment,t),v(Ue.$$.fragment,t),v(Ke.$$.fragment,t),v(Ze.$$.fragment,t),v(tt.$$.fragment,t),v(nt.$$.fragment,t),v(st.$$.fragment,t),v(rt.$$.fragment,t),v(it.$$.fragment,t),v(ct.$$.fragment,t),v(lt.$$.fragment,t),v(pt.$$.fragment,t),v(ft.$$.fragment,t),v(mt.$$.fragment,t),v(ut.$$.fragment,t),v(gt.$$.fragment,t),v(vt.$$.fragment,t),v(Tt.$$.fragment,t),v(bt.$$.fragment,t),v(St.$$.fragment,t),v(Fe.$$.fragment,t),v($t.$$.fragment,t),v(Ce.$$.fragment,t),v(zt.$$.fragment,t),v(qt.$$.fragment,t),v(jt.$$.fragment,t),v(Ft.$$.fragment,t),v(Ct.$$.fragment,t),v(It.$$.fragment,t),v(Ne.$$.fragment,t),v(Nt.$$.fragment,t),v(Lt.$$.fragment,t),v(Ot.$$.fragment,t),v(Ut.$$.fragment,t),v(Oe.$$.fragment,t),v(Bt.$$.fragment,t),Qn=!0)},o(t){T(b.$$.fragment,t),T(Ue.$$.fragment,t),T(Ke.$$.fragment,t),T(Ze.$$.fragment,t),T(tt.$$.fragment,t),T(nt.$$.fragment,t),T(st.$$.fragment,t),T(rt.$$.fragment,t),T(it.$$.fragment,t),T(ct.$$.fragment,t),T(lt.$$.fragment,t),T(pt.$$.fragment,t),T(ft.$$.fragment,t),T(mt.$$.fragment,t),T(ut.$$.fragment,t),T(gt.$$.fragment,t),T(vt.$$.fragment,t),T(Tt.$$.fragment,t),T(bt.$$.fragment,t),T(St.$$.fragment,t),T(Fe.$$.fragment,t),T($t.$$.fragment,t),T(Ce.$$.fragment,t),T(zt.$$.fragment,t),T(qt.$$.fragment,t),T(jt.$$.fragment,t),T(Ft.$$.fragment,t),T(Ct.$$.fragment,t),T(It.$$.fragment,t),T(Ne.$$.fragment,t),T(Nt.$$.fragment,t),T(Lt.$$.fragment,t),T(Ot.$$.fragment,t),T(Ut.$$.fragment,t),T(Oe.$$.fragment,t),T(Bt.$$.fragment,t),Qn=!1},d(t){o(f),t&&o(w),t&&o(m),x(b),t&&o($),t&&o(M),x(Ue),t&&o(qn),t&&o(O),t&&o(Pn),t&&o(Q),t&&o(jn),t&&o(ce),x(Ke),t&&o(Fn),t&&o(ke),t&&o(Cn),t&&o(G),t&&o(Mn),t&&o(q),t&&o(An),t&&o(Kt),t&&o(Dn),x(Ze,t),t&&o(In),t&&o(Qt),t&&o(Nn),x(tt,t),t&&o(Ln),t&&o(ye),t&&o(On),t&&o(de),x(nt),t&&o(Gn),t&&o(D),x(st),x(rt),t&&o(Wn),t&&o(he),x(it),t&&o(Vn),t&&o(P),x(ct),x(lt),x(pt),x(ft),t&&o(Un),t&&o(fe),x(mt),t&&o(Bn),t&&o(I),x(ut),x(gt),t&&o(Rn),t&&o(me),x(vt),t&&o(Hn),t&&o(S),x(Tt),x(bt),x(St),x(Fe),x($t),x(Ce),x(zt),x(qt),x(jt),t&&o(Jn),t&&o(_e),x(Ft),t&&o(Yn),t&&o(R),x(Ct),x(It),x(Ne),x(Nt),t&&o(Xn),t&&o(ve),x(Lt),t&&o(Kn),t&&o(H),x(Ot),x(Ut),x(Oe),x(Bt)}}}const Tl={local:"speech2text",sections:[{local:"overview",title:"Overview"},{local:"inference",title:"Inference"},{local:"transformers.Speech2TextConfig",title:"Speech2TextConfig"},{local:"transformers.Speech2TextTokenizer",title:"Speech2TextTokenizer"},{local:"transformers.Speech2TextFeatureExtractor",title:"Speech2TextFeatureExtractor"},{local:"transformers.Speech2TextProcessor",title:"Speech2TextProcessor"},{local:"transformers.Speech2TextModel",title:"Speech2TextModel"},{local:"transformers.Speech2TextForConditionalGeneration",title:"Speech2TextForConditionalGeneration"}],title:"Speech2Text"};function xl(Y,f,w){let{fw:m}=f;return Y.$$set=k=>{"fw"in k&&w(0,m=k.fw)},[m]}class zl extends ll{constructor(f){super();pl(this,f,xl,vl,hl,{fw:0})}}export{zl as default,Tl as metadata};
9,908
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/deberta_v2.mdx-f34757fb.js
import{S as m_,i as g_,s as __,e as a,k as l,w as T,t as n,L as b_,c as r,d as t,m as d,a as i,x as k,h as s,b as c,J as e,g as f,y as w,q as y,o as D,B as E}from"../../chunks/vendor-b1433968.js";import{T as Ce}from"../../chunks/Tip-c3840994.js";import{D as X}from"../../chunks/Docstring-ff504c58.js";import{C as it}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Re}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function v_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function T_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n(`Although the recipe for forward pass needs to be defined within this function, one should call the `),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,`Although the recipe for forward pass needs to be defined within this function, one should call the `),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function k_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function w_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function y_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function D_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function E_(L){let h,$,m,g,v,b,_,V,de,K,z,Y,I,ee,ce,S,pe,re,O,P,te,Z,q,x,ne,W,ie,se,H,le,ae,M,he,j,fe,ue,N,J,me,R,ge,U,oe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),V=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=a("p"),Y=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),S=a("code"),pe=n("model(inputs)"),re=n("."),O=l(),P=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=a("ul"),x=a("li"),ne=n("a single Tensor with "),W=a("code"),ie=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),le=n("model(inputs_ids)"),ae=l(),M=a("li"),he=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=a("code"),fe=n("model([input_ids, attention_mask])"),ue=n(" or "),N=a("code"),J=n("model([input_ids, attention_mask, token_type_ids])"),me=l(),R=a("li"),ge=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var F=i(h);$=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),m=d(p),g=r(p,"UL",{});var G=i(g);v=r(G,"LI",{});var ye=i(v);b=s(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(G),V=r(G,"LI",{});var we=i(V);de=s(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),G.forEach(t),K=d(p),z=r(p,"P",{});var C=i(z);Y=s(C,"This second option is useful when using "),I=r(C,"CODE",{});var _e=i(I);ee=s(_e,"tf.keras.Model.fit"),_e.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(C,"CODE",{});var Te=i(S);pe=s(Te,"model(inputs)"),Te.forEach(t),re=s(C,"."),C.forEach(t),O=d(p),P=r(p,"P",{});var De=i(P);te=s(De,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),De.forEach(t),Z=d(p),q=r(p,"UL",{});var B=i(q);x=r(B,"LI",{});var A=i(x);ne=s(A,"a single Tensor with "),W=r(A,"CODE",{});var Ee=i(W);ie=s(Ee,"input_ids"),Ee.forEach(t),se=s(A," only and nothing else: "),H=r(A,"CODE",{});var $e=i(H);le=s($e,"model(inputs_ids)"),$e.forEach(t),A.forEach(t),ae=d(B),M=r(B,"LI",{});var Q=i(M);he=s(Q,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=r(Q,"CODE",{});var ve=i(j);fe=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),ue=s(Q," or "),N=r(Q,"CODE",{});var ke=i(N);J=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Q.forEach(t),me=d(B),R=r(B,"LI",{});var be=i(R);ge=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var Fe=i(U);oe=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),be.forEach(t),B.forEach(t)},m(p,F){f(p,h,F),e(h,$),f(p,m,F),f(p,g,F),e(g,v),e(v,b),e(g,_),e(g,V),e(V,de),f(p,K,F),f(p,z,F),e(z,Y),e(z,I),e(I,ee),e(z,ce),e(z,S),e(S,pe),e(z,re),f(p,O,F),f(p,P,F),e(P,te),f(p,Z,F),f(p,q,F),e(q,x),e(x,ne),e(x,W),e(W,ie),e(x,se),e(x,H),e(H,le),e(q,ae),e(q,M),e(M,he),e(M,j),e(j,fe),e(M,ue),e(M,N),e(N,J),e(q,me),e(q,R),e(R,ge),e(R,U),e(U,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(K),p&&t(z),p&&t(O),p&&t(P),p&&t(Z),p&&t(q)}}}function $_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function F_(L){let h,$,m,g,v,b,_,V,de,K,z,Y,I,ee,ce,S,pe,re,O,P,te,Z,q,x,ne,W,ie,se,H,le,ae,M,he,j,fe,ue,N,J,me,R,ge,U,oe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),V=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=a("p"),Y=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),S=a("code"),pe=n("model(inputs)"),re=n("."),O=l(),P=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=a("ul"),x=a("li"),ne=n("a single Tensor with "),W=a("code"),ie=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),le=n("model(inputs_ids)"),ae=l(),M=a("li"),he=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=a("code"),fe=n("model([input_ids, attention_mask])"),ue=n(" or "),N=a("code"),J=n("model([input_ids, attention_mask, token_type_ids])"),me=l(),R=a("li"),ge=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var F=i(h);$=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),m=d(p),g=r(p,"UL",{});var G=i(g);v=r(G,"LI",{});var ye=i(v);b=s(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(G),V=r(G,"LI",{});var we=i(V);de=s(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),G.forEach(t),K=d(p),z=r(p,"P",{});var C=i(z);Y=s(C,"This second option is useful when using "),I=r(C,"CODE",{});var _e=i(I);ee=s(_e,"tf.keras.Model.fit"),_e.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(C,"CODE",{});var Te=i(S);pe=s(Te,"model(inputs)"),Te.forEach(t),re=s(C,"."),C.forEach(t),O=d(p),P=r(p,"P",{});var De=i(P);te=s(De,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),De.forEach(t),Z=d(p),q=r(p,"UL",{});var B=i(q);x=r(B,"LI",{});var A=i(x);ne=s(A,"a single Tensor with "),W=r(A,"CODE",{});var Ee=i(W);ie=s(Ee,"input_ids"),Ee.forEach(t),se=s(A," only and nothing else: "),H=r(A,"CODE",{});var $e=i(H);le=s($e,"model(inputs_ids)"),$e.forEach(t),A.forEach(t),ae=d(B),M=r(B,"LI",{});var Q=i(M);he=s(Q,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=r(Q,"CODE",{});var ve=i(j);fe=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),ue=s(Q," or "),N=r(Q,"CODE",{});var ke=i(N);J=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Q.forEach(t),me=d(B),R=r(B,"LI",{});var be=i(R);ge=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var Fe=i(U);oe=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),be.forEach(t),B.forEach(t)},m(p,F){f(p,h,F),e(h,$),f(p,m,F),f(p,g,F),e(g,v),e(v,b),e(g,_),e(g,V),e(V,de),f(p,K,F),f(p,z,F),e(z,Y),e(z,I),e(I,ee),e(z,ce),e(z,S),e(S,pe),e(z,re),f(p,O,F),f(p,P,F),e(P,te),f(p,Z,F),f(p,q,F),e(q,x),e(x,ne),e(x,W),e(W,ie),e(x,se),e(x,H),e(H,le),e(q,ae),e(q,M),e(M,he),e(M,j),e(j,fe),e(M,ue),e(M,N),e(N,J),e(q,me),e(q,R),e(R,ge),e(R,U),e(U,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(K),p&&t(z),p&&t(O),p&&t(P),p&&t(Z),p&&t(q)}}}function V_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function z_(L){let h,$,m,g,v,b,_,V,de,K,z,Y,I,ee,ce,S,pe,re,O,P,te,Z,q,x,ne,W,ie,se,H,le,ae,M,he,j,fe,ue,N,J,me,R,ge,U,oe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),V=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=a("p"),Y=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),S=a("code"),pe=n("model(inputs)"),re=n("."),O=l(),P=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=a("ul"),x=a("li"),ne=n("a single Tensor with "),W=a("code"),ie=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),le=n("model(inputs_ids)"),ae=l(),M=a("li"),he=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=a("code"),fe=n("model([input_ids, attention_mask])"),ue=n(" or "),N=a("code"),J=n("model([input_ids, attention_mask, token_type_ids])"),me=l(),R=a("li"),ge=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var F=i(h);$=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),m=d(p),g=r(p,"UL",{});var G=i(g);v=r(G,"LI",{});var ye=i(v);b=s(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(G),V=r(G,"LI",{});var we=i(V);de=s(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),G.forEach(t),K=d(p),z=r(p,"P",{});var C=i(z);Y=s(C,"This second option is useful when using "),I=r(C,"CODE",{});var _e=i(I);ee=s(_e,"tf.keras.Model.fit"),_e.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(C,"CODE",{});var Te=i(S);pe=s(Te,"model(inputs)"),Te.forEach(t),re=s(C,"."),C.forEach(t),O=d(p),P=r(p,"P",{});var De=i(P);te=s(De,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),De.forEach(t),Z=d(p),q=r(p,"UL",{});var B=i(q);x=r(B,"LI",{});var A=i(x);ne=s(A,"a single Tensor with "),W=r(A,"CODE",{});var Ee=i(W);ie=s(Ee,"input_ids"),Ee.forEach(t),se=s(A," only and nothing else: "),H=r(A,"CODE",{});var $e=i(H);le=s($e,"model(inputs_ids)"),$e.forEach(t),A.forEach(t),ae=d(B),M=r(B,"LI",{});var Q=i(M);he=s(Q,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=r(Q,"CODE",{});var ve=i(j);fe=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),ue=s(Q," or "),N=r(Q,"CODE",{});var ke=i(N);J=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Q.forEach(t),me=d(B),R=r(B,"LI",{});var be=i(R);ge=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var Fe=i(U);oe=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),be.forEach(t),B.forEach(t)},m(p,F){f(p,h,F),e(h,$),f(p,m,F),f(p,g,F),e(g,v),e(v,b),e(g,_),e(g,V),e(V,de),f(p,K,F),f(p,z,F),e(z,Y),e(z,I),e(I,ee),e(z,ce),e(z,S),e(S,pe),e(z,re),f(p,O,F),f(p,P,F),e(P,te),f(p,Z,F),f(p,q,F),e(q,x),e(x,ne),e(x,W),e(W,ie),e(x,se),e(x,H),e(H,le),e(q,ae),e(q,M),e(M,he),e(M,j),e(j,fe),e(M,ue),e(M,N),e(N,J),e(q,me),e(q,R),e(R,ge),e(R,U),e(U,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(K),p&&t(z),p&&t(O),p&&t(P),p&&t(Z),p&&t(q)}}}function q_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function M_(L){let h,$,m,g,v,b,_,V,de,K,z,Y,I,ee,ce,S,pe,re,O,P,te,Z,q,x,ne,W,ie,se,H,le,ae,M,he,j,fe,ue,N,J,me,R,ge,U,oe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),V=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=a("p"),Y=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),S=a("code"),pe=n("model(inputs)"),re=n("."),O=l(),P=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=a("ul"),x=a("li"),ne=n("a single Tensor with "),W=a("code"),ie=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),le=n("model(inputs_ids)"),ae=l(),M=a("li"),he=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=a("code"),fe=n("model([input_ids, attention_mask])"),ue=n(" or "),N=a("code"),J=n("model([input_ids, attention_mask, token_type_ids])"),me=l(),R=a("li"),ge=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var F=i(h);$=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),m=d(p),g=r(p,"UL",{});var G=i(g);v=r(G,"LI",{});var ye=i(v);b=s(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(G),V=r(G,"LI",{});var we=i(V);de=s(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),G.forEach(t),K=d(p),z=r(p,"P",{});var C=i(z);Y=s(C,"This second option is useful when using "),I=r(C,"CODE",{});var _e=i(I);ee=s(_e,"tf.keras.Model.fit"),_e.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(C,"CODE",{});var Te=i(S);pe=s(Te,"model(inputs)"),Te.forEach(t),re=s(C,"."),C.forEach(t),O=d(p),P=r(p,"P",{});var De=i(P);te=s(De,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),De.forEach(t),Z=d(p),q=r(p,"UL",{});var B=i(q);x=r(B,"LI",{});var A=i(x);ne=s(A,"a single Tensor with "),W=r(A,"CODE",{});var Ee=i(W);ie=s(Ee,"input_ids"),Ee.forEach(t),se=s(A," only and nothing else: "),H=r(A,"CODE",{});var $e=i(H);le=s($e,"model(inputs_ids)"),$e.forEach(t),A.forEach(t),ae=d(B),M=r(B,"LI",{});var Q=i(M);he=s(Q,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=r(Q,"CODE",{});var ve=i(j);fe=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),ue=s(Q," or "),N=r(Q,"CODE",{});var ke=i(N);J=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Q.forEach(t),me=d(B),R=r(B,"LI",{});var be=i(R);ge=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var Fe=i(U);oe=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),be.forEach(t),B.forEach(t)},m(p,F){f(p,h,F),e(h,$),f(p,m,F),f(p,g,F),e(g,v),e(v,b),e(g,_),e(g,V),e(V,de),f(p,K,F),f(p,z,F),e(z,Y),e(z,I),e(I,ee),e(z,ce),e(z,S),e(S,pe),e(z,re),f(p,O,F),f(p,P,F),e(P,te),f(p,Z,F),f(p,q,F),e(q,x),e(x,ne),e(x,W),e(W,ie),e(x,se),e(x,H),e(H,le),e(q,ae),e(q,M),e(M,he),e(M,j),e(j,fe),e(M,ue),e(M,N),e(N,J),e(q,me),e(q,R),e(R,ge),e(R,U),e(U,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(K),p&&t(z),p&&t(O),p&&t(P),p&&t(Z),p&&t(q)}}}function x_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function C_(L){let h,$,m,g,v,b,_,V,de,K,z,Y,I,ee,ce,S,pe,re,O,P,te,Z,q,x,ne,W,ie,se,H,le,ae,M,he,j,fe,ue,N,J,me,R,ge,U,oe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),V=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=a("p"),Y=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),S=a("code"),pe=n("model(inputs)"),re=n("."),O=l(),P=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=a("ul"),x=a("li"),ne=n("a single Tensor with "),W=a("code"),ie=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),le=n("model(inputs_ids)"),ae=l(),M=a("li"),he=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=a("code"),fe=n("model([input_ids, attention_mask])"),ue=n(" or "),N=a("code"),J=n("model([input_ids, attention_mask, token_type_ids])"),me=l(),R=a("li"),ge=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var F=i(h);$=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),m=d(p),g=r(p,"UL",{});var G=i(g);v=r(G,"LI",{});var ye=i(v);b=s(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(G),V=r(G,"LI",{});var we=i(V);de=s(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),G.forEach(t),K=d(p),z=r(p,"P",{});var C=i(z);Y=s(C,"This second option is useful when using "),I=r(C,"CODE",{});var _e=i(I);ee=s(_e,"tf.keras.Model.fit"),_e.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(C,"CODE",{});var Te=i(S);pe=s(Te,"model(inputs)"),Te.forEach(t),re=s(C,"."),C.forEach(t),O=d(p),P=r(p,"P",{});var De=i(P);te=s(De,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),De.forEach(t),Z=d(p),q=r(p,"UL",{});var B=i(q);x=r(B,"LI",{});var A=i(x);ne=s(A,"a single Tensor with "),W=r(A,"CODE",{});var Ee=i(W);ie=s(Ee,"input_ids"),Ee.forEach(t),se=s(A," only and nothing else: "),H=r(A,"CODE",{});var $e=i(H);le=s($e,"model(inputs_ids)"),$e.forEach(t),A.forEach(t),ae=d(B),M=r(B,"LI",{});var Q=i(M);he=s(Q,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),j=r(Q,"CODE",{});var ve=i(j);fe=s(ve,"model([input_ids, attention_mask])"),ve.forEach(t),ue=s(Q," or "),N=r(Q,"CODE",{});var ke=i(N);J=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Q.forEach(t),me=d(B),R=r(B,"LI",{});var be=i(R);ge=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var Fe=i(U);oe=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),be.forEach(t),B.forEach(t)},m(p,F){f(p,h,F),e(h,$),f(p,m,F),f(p,g,F),e(g,v),e(v,b),e(g,_),e(g,V),e(V,de),f(p,K,F),f(p,z,F),e(z,Y),e(z,I),e(I,ee),e(z,ce),e(z,S),e(S,pe),e(z,re),f(p,O,F),f(p,P,F),e(P,te),f(p,Z,F),f(p,q,F),e(q,x),e(x,ne),e(x,W),e(W,ie),e(x,se),e(x,H),e(H,le),e(q,ae),e(q,M),e(M,he),e(M,j),e(j,fe),e(M,ue),e(M,N),e(N,J),e(q,me),e(q,R),e(R,ge),e(R,U),e(U,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(K),p&&t(z),p&&t(O),p&&t(P),p&&t(Z),p&&t(q)}}}function R_(L){let h,$,m,g,v;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var V=i(m);g=s(V,"Module"),V.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,$),e(h,m),e(m,g),e(h,v)},d(b){b&&t(h)}}}function P_(L){let h,$,m,g,v,b,_,V,de,K,z,Y,I,ee,ce,S,pe,re,O,P,te,Z,q,x,ne,W,ie,se,H,le,ae,M,he,j,fe,ue,N,J,me,R,ge,U,oe,p,F,G,ye,we,C,_e,Te,De,B,A,Ee,$e,Q,ve,ke,be,Fe,Dl,Os,Fa,El,$l,Fl,Ws,Va,Vl,zl,ql,Hs,za,Ml,xl,hi,Ze,Cl,Ho,Rl,Pl,Uo,Bl,jl,Qo,Al,Ll,fi,Vt,ro,qa,Go,Il,Ma,Sl,ui,lt,Ko,Nl,zt,Ol,Us,Wl,Hl,Jo,Ul,Ql,Gl,qt,Kl,Qs,Jl,Xl,Gs,Yl,Zl,mi,Mt,io,xa,Xo,ed,Ca,td,gi,ze,Yo,od,Zo,nd,en,sd,ad,rd,pt,tn,id,Ra,ld,dd,on,Pa,cd,pd,Ba,hd,fd,lo,nn,ud,xt,md,ja,gd,_d,Aa,bd,vd,Td,et,sn,kd,La,wd,yd,an,Dd,Ct,Ed,Ia,$d,Fd,Sa,Vd,zd,qd,Na,_i,Rt,co,Oa,rn,Md,Wa,xd,bi,Je,ln,Cd,dn,Rd,cn,Pd,Bd,jd,pn,Ad,hn,Ld,Id,Sd,Ie,fn,Nd,Pt,Od,Ks,Wd,Hd,Ha,Ud,Qd,Gd,po,Kd,Ua,Jd,Xd,un,vi,Bt,ho,Qa,mn,Yd,Ga,Zd,Ti,dt,gn,ec,Ka,tc,oc,tt,_n,nc,Ja,sc,ac,Xa,rc,ic,fo,ki,jt,uo,Ya,bn,lc,Za,dc,wi,Xe,vn,cc,At,pc,er,hc,fc,Tn,uc,mc,gc,kn,_c,wn,bc,vc,Tc,Se,yn,kc,Lt,wc,Js,yc,Dc,tr,Ec,$c,Fc,mo,Vc,or,zc,qc,Dn,yi,It,go,nr,En,Mc,sr,xc,Di,Pe,$n,Cc,ar,Rc,Pc,Fn,Bc,Vn,jc,Ac,Lc,zn,Ic,qn,Sc,Nc,Oc,Ve,Mn,Wc,St,Hc,Xs,Uc,Qc,rr,Gc,Kc,Jc,_o,Xc,ir,Yc,Zc,xn,ep,lr,tp,op,Cn,Ei,Nt,bo,dr,Rn,np,cr,sp,$i,Be,Pn,ap,pr,rp,ip,Bn,lp,jn,dp,cp,pp,An,hp,Ln,fp,up,mp,Ne,In,gp,Ot,_p,Ys,bp,vp,hr,Tp,kp,wp,vo,yp,fr,Dp,Ep,Sn,Fi,Wt,To,ur,Nn,$p,mr,Fp,Vi,je,On,Vp,Ht,zp,gr,qp,Mp,_r,xp,Cp,Rp,Wn,Pp,Hn,Bp,jp,Ap,Un,Lp,Qn,Ip,Sp,Np,Oe,Gn,Op,Ut,Wp,Zs,Hp,Up,br,Qp,Gp,Kp,ko,Jp,vr,Xp,Yp,Kn,zi,Qt,wo,Tr,Jn,Zp,kr,eh,qi,Ae,Xn,th,Yn,oh,Zn,nh,sh,ah,es,rh,ts,ih,lh,dh,yo,ch,We,os,ph,Gt,hh,ea,fh,uh,wr,mh,gh,_h,Do,bh,yr,vh,Th,ns,Mi,Kt,Eo,Dr,ss,kh,Er,wh,xi,ct,as,yh,$r,Dh,Eh,ot,rs,$h,Fr,Fh,Vh,is,zh,Vr,qh,Mh,xh,Ye,Ch,zr,Rh,Ph,qr,Bh,jh,Mr,Ah,Lh,xr,Ih,Sh,Ci,Jt,$o,Cr,ls,Nh,Rr,Oh,Ri,Le,ds,Wh,Xt,Hh,Pr,Uh,Qh,cs,Gh,Kh,Jh,ps,Xh,hs,Yh,Zh,ef,Fo,tf,He,fs,of,Yt,nf,ta,sf,af,Br,rf,lf,df,Vo,cf,jr,pf,hf,us,Pi,Zt,zo,Ar,ms,ff,Lr,uf,Bi,qe,gs,mf,Ir,gf,_f,_s,bf,bs,vf,Tf,kf,vs,wf,Ts,yf,Df,Ef,qo,$f,Ue,ks,Ff,eo,Vf,oa,zf,qf,Sr,Mf,xf,Cf,Mo,Rf,Nr,Pf,Bf,ws,ji,to,xo,Or,ys,jf,Wr,Af,Ai,Me,Ds,Lf,Hr,If,Sf,Es,Nf,$s,Of,Wf,Hf,Fs,Uf,Vs,Qf,Gf,Kf,Co,Jf,Qe,zs,Xf,oo,Yf,na,Zf,eu,Ur,tu,ou,nu,Ro,su,Qr,au,ru,qs,Li,no,Po,Gr,Ms,iu,Kr,lu,Ii,xe,xs,du,so,cu,Jr,pu,hu,Xr,fu,uu,mu,Cs,gu,Rs,_u,bu,vu,Ps,Tu,Bs,ku,wu,yu,Bo,Du,Ge,js,Eu,ao,$u,sa,Fu,Vu,Yr,zu,qu,Mu,jo,xu,Zr,Cu,Ru,As,Si;return b=new Re({}),ee=new Re({}),Go=new Re({}),Ko=new X({props:{name:"class transformers.DebertaV2Config",anchor:"transformers.DebertaV2Config",parameters:[{name:"vocab_size",val:" = 128100"},{name:"hidden_size",val:" = 1536"},{name:"num_hidden_layers",val:" = 24"},{name:"num_attention_heads",val:" = 24"},{name:"intermediate_size",val:" = 6144"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 0"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-07"},{name:"relative_attention",val:" = False"},{name:"max_relative_positions",val:" = -1"},{name:"pad_token_id",val:" = 0"},{name:"position_biased_input",val:" = True"},{name:"pos_att_type",val:" = None"},{name:"pooler_dropout",val:" = 0"},{name:"pooler_hidden_act",val:" = 'gelu'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/configuration_deberta_v2.py#L31",parametersDescription:[{anchor:"transformers.DebertaV2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128100) &#x2014; Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Model">DebertaV2Model</a>.`,name:"vocab_size"},{anchor:"transformers.DebertaV2Config.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1536) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.DebertaV2Config.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.DebertaV2Config.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.DebertaV2Config.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 6144) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.DebertaV2Config.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code>, <code>&quot;gelu&quot;</code>, <code>&quot;tanh&quot;</code>, <code>&quot;gelu_fast&quot;</code>, <code>&quot;mish&quot;</code>, <code>&quot;linear&quot;</code>, <code>&quot;sigmoid&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.DebertaV2Config.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.DebertaV2Config.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.DebertaV2Config.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.DebertaV2Config.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.DebertaV2Config.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.DebertaV2Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.DebertaV2Config.relative_attention",description:`<strong>relative_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether use relative position encoding.`,name:"relative_attention"},{anchor:"transformers.DebertaV2Config.max_relative_positions",description:`<strong>max_relative_positions</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The range of relative positions <code>[-max_position_embeddings, max_position_embeddings]</code>. Use the same value as <code>max_position_embeddings</code>.`,name:"max_relative_positions"},{anchor:"transformers.DebertaV2Config.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The value used to pad input_ids.`,name:"pad_token_id"},{anchor:"transformers.DebertaV2Config.position_biased_input",description:`<strong>position_biased_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether add absolute position embedding to content embedding.`,name:"position_biased_input"},{anchor:"transformers.DebertaV2Config.pos_att_type",description:`<strong>pos_att_type</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The type of relative position attention, it can be a combination of <code>[&quot;p2c&quot;, &quot;c2p&quot;, &quot;p2p&quot;]</code>, e.g. <code>[&quot;p2c&quot;]</code>, <code>[&quot;p2c&quot;, &quot;c2p&quot;]</code>, <code>[&quot;p2c&quot;, &quot;c2p&quot;, &apos;p2p&quot;]</code>.`,name:"pos_att_type"},{anchor:"transformers.DebertaV2Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, optional, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"}]}}),Xo=new Re({}),Yo=new X({props:{name:"class transformers.DebertaV2Tokenizer",anchor:"transformers.DebertaV2Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = False"},{name:"split_by_punct",val:" = False"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/tokenization_deberta_v2.py#L53",parametersDescription:[{anchor:"transformers.DebertaV2Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.DebertaV2Tokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.DebertaV2Tokenizer.bos_token",description:`<strong>bos_token</strong> (<code>string</code>, <em>optional</em>, defaults to &#x201D;[CLS]&#x201D;) &#x2014; The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.`,name:"bos_token"},{anchor:"transformers.DebertaV2Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>string</code>, <em>optional</em>, defaults to &#x201D;[SEP]&#x201D;) &#x2014; The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.`,name:"eos_token"},{anchor:"transformers.DebertaV2Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.DebertaV2Tokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.DebertaV2Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.DebertaV2Tokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.DebertaV2Tokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.DebertaV2Tokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),tn=new X({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.DebertaV2Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/tokenization_deberta_v2.py#L176",parametersDescription:[{anchor:"transformers.DebertaV2Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.DebertaV2Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),nn=new X({props:{name:"get_special_tokens_mask",anchor:"transformers.DebertaV2Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"},{name:"already_has_special_tokens",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/tokenization_deberta_v2.py#L200",parametersDescription:[{anchor:"transformers.DebertaV2Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.DebertaV2Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.DebertaV2Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),sn=new X({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.DebertaV2Tokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/tokenization_deberta_v2.py#L226",parametersDescription:[{anchor:"transformers.DebertaV2Tokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.DebertaV2Tokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),an=new it({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),rn=new Re({}),ln=new X({props:{name:"class transformers.DebertaV2Model",anchor:"transformers.DebertaV2Model",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L989",parametersDescription:[{anchor:"transformers.DebertaV2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fn=new X({props:{name:"forward",anchor:"transformers.DebertaV2Model.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1013",parametersDescription:[{anchor:"transformers.DebertaV2Model.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaV2Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaV2Model.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaV2Model.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaV2Model.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaV2Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaV2Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaV2Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),po=new Ce({props:{$$slots:{default:[v_]},$$scope:{ctx:L}}}),un=new it({props:{code:`from transformers import DebertaV2Tokenizer, DebertaV2Model import torch tokenizer = DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v2-xlarge') model = DebertaV2Model.from_pretrained('microsoft/deberta-v2-xlarge') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, DebertaV2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaV2Model.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),mn=new Re({}),gn=new X({props:{name:"class transformers.DebertaV2PreTrainedModel",anchor:"transformers.DebertaV2PreTrainedModel",parameters:[{name:"config",val:": PretrainedConfig"},{name:"*inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L894"}}),_n=new X({props:{name:"_forward_unimplemented",anchor:"None",parameters:[{name:"*input",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/torch/nn/modules/module.py#L190"}}),fo=new Ce({props:{$$slots:{default:[T_]},$$scope:{ctx:L}}}),bn=new Re({}),vn=new X({props:{name:"class transformers.DebertaV2ForMaskedLM",anchor:"transformers.DebertaV2ForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1102",parametersDescription:[{anchor:"transformers.DebertaV2ForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yn=new X({props:{name:"forward",anchor:"transformers.DebertaV2ForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1121",parametersDescription:[{anchor:"transformers.DebertaV2ForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaV2ForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),mo=new Ce({props:{$$slots:{default:[k_]},$$scope:{ctx:L}}}),Dn=new it({props:{code:`from transformers import DebertaV2Tokenizer, DebertaV2ForMaskedLM import torch tokenizer = DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v2-xlarge') model = DebertaV2ForMaskedLM.from_pretrained('microsoft/deberta-v2-xlarge') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, DebertaV2ForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaV2ForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),En=new Re({}),$n=new X({props:{name:"class transformers.DebertaV2ForSequenceClassification",anchor:"transformers.DebertaV2ForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1237",parametersDescription:[{anchor:"transformers.DebertaV2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mn=new X({props:{name:"forward",anchor:"transformers.DebertaV2ForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1262",parametersDescription:[{anchor:"transformers.DebertaV2ForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaV2ForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_o=new Ce({props:{$$slots:{default:[w_]},$$scope:{ctx:L}}}),xn=new it({props:{code:`from transformers import DebertaV2Tokenizer, DebertaV2ForSequenceClassification import torch tokenizer = DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v2-xlarge') model = DebertaV2ForSequenceClassification.from_pretrained('microsoft/deberta-v2-xlarge') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, DebertaV2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaV2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Cn=new it({props:{code:`from transformers import DebertaV2Tokenizer, DebertaV2ForSequenceClassification import torch tokenizer = DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v2-xlarge') model = DebertaV2ForSequenceClassification.from_pretrained('microsoft/deberta-v2-xlarge', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, DebertaV2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaV2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Rn=new Re({}),Pn=new X({props:{name:"class transformers.DebertaV2ForTokenClassification",anchor:"transformers.DebertaV2ForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1356",parametersDescription:[{anchor:"transformers.DebertaV2ForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),In=new X({props:{name:"forward",anchor:"transformers.DebertaV2ForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1370",parametersDescription:[{anchor:"transformers.DebertaV2ForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaV2ForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vo=new Ce({props:{$$slots:{default:[y_]},$$scope:{ctx:L}}}),Sn=new it({props:{code:`from transformers import DebertaV2Tokenizer, DebertaV2ForTokenClassification import torch tokenizer = DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v2-xlarge') model = DebertaV2ForTokenClassification.from_pretrained('microsoft/deberta-v2-xlarge') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, DebertaV2ForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaV2ForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Nn=new Re({}),On=new X({props:{name:"class transformers.DebertaV2ForQuestionAnswering",anchor:"transformers.DebertaV2ForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1442",parametersDescription:[{anchor:"transformers.DebertaV2ForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gn=new X({props:{name:"forward",anchor:"transformers.DebertaV2ForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L1455",parametersDescription:[{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.DebertaV2ForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ko=new Ce({props:{$$slots:{default:[D_]},$$scope:{ctx:L}}}),Kn=new it({props:{code:`from transformers import DebertaV2Tokenizer, DebertaV2ForQuestionAnswering import torch tokenizer = DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v2-xlarge') model = DebertaV2ForQuestionAnswering.from_pretrained('microsoft/deberta-v2-xlarge') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, DebertaV2ForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaV2ForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Jn=new Re({}),Xn=new X({props:{name:"class transformers.TFDebertaV2Model",anchor:"transformers.TFDebertaV2Model",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1213",parametersDescription:[{anchor:"transformers.TFDebertaV2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yo=new Ce({props:{$$slots:{default:[E_]},$$scope:{ctx:L}}}),os=new X({props:{name:"call",anchor:"transformers.TFDebertaV2Model.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1219",parametersDescription:[{anchor:"transformers.TFDebertaV2Model.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaV2Model.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaV2Model.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaV2Model.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaV2Model.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaV2Model.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaV2Model.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaV2Model.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Do=new Ce({props:{$$slots:{default:[$_]},$$scope:{ctx:L}}}),ns=new it({props:{code:`from transformers import DebertaV2Tokenizer, TFDebertaV2Model import tensorflow as tf tokenizer = DebertaV2Tokenizer.from_pretrained('kamalkraj/deberta-v2-xlarge') model = TFDebertaV2Model.from_pretrained('kamalkraj/deberta-v2-xlarge') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, TFDebertaV2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaV2Model.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ss=new Re({}),as=new X({props:{name:"class transformers.TFDebertaV2PreTrainedModel",anchor:"transformers.TFDebertaV2PreTrainedModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1118"}}),rs=new X({props:{name:"call",anchor:"None",parameters:[{name:"inputs",val:""},{name:"training",val:" = None"},{name:"mask",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/keras/engine/training.py#L450",returnDescription:` <p>A tensor if there is a single output, or a list of tensors if there are more than one outputs.</p> `}}),ls=new Re({}),ds=new X({props:{name:"class transformers.TFDebertaV2ForMaskedLM",anchor:"transformers.TFDebertaV2ForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1276",parametersDescription:[{anchor:"transformers.TFDebertaV2ForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fo=new Ce({props:{$$slots:{default:[F_]},$$scope:{ctx:L}}}),fs=new X({props:{name:"call",anchor:"transformers.TFDebertaV2ForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1292",parametersDescription:[{anchor:"transformers.TFDebertaV2ForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaV2ForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Vo=new Ce({props:{$$slots:{default:[V_]},$$scope:{ctx:L}}}),us=new it({props:{code:`from transformers import DebertaV2Tokenizer, TFDebertaV2ForMaskedLM import tensorflow as tf tokenizer = DebertaV2Tokenizer.from_pretrained('kamalkraj/deberta-v2-xlarge') model = TFDebertaV2ForMaskedLM.from_pretrained('kamalkraj/deberta-v2-xlarge') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, TFDebertaV2ForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaV2ForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ms=new Re({}),gs=new X({props:{name:"class transformers.TFDebertaV2ForSequenceClassification",anchor:"transformers.TFDebertaV2ForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1376",parametersDescription:[{anchor:"transformers.TFDebertaV2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qo=new Ce({props:{$$slots:{default:[z_]},$$scope:{ctx:L}}}),ks=new X({props:{name:"call",anchor:"transformers.TFDebertaV2ForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1394",parametersDescription:[{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaV2ForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Mo=new Ce({props:{$$slots:{default:[q_]},$$scope:{ctx:L}}}),ws=new it({props:{code:`from transformers import DebertaV2Tokenizer, TFDebertaV2ForSequenceClassification import tensorflow as tf tokenizer = DebertaV2Tokenizer.from_pretrained('kamalkraj/deberta-v2-xlarge') model = TFDebertaV2ForSequenceClassification.from_pretrained('kamalkraj/deberta-v2-xlarge') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, TFDebertaV2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaV2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ys=new Re({}),Ds=new X({props:{name:"class transformers.TFDebertaV2ForTokenClassification",anchor:"transformers.TFDebertaV2ForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1479",parametersDescription:[{anchor:"transformers.TFDebertaV2ForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Co=new Ce({props:{$$slots:{default:[M_]},$$scope:{ctx:L}}}),zs=new X({props:{name:"call",anchor:"transformers.TFDebertaV2ForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1491",parametersDescription:[{anchor:"transformers.TFDebertaV2ForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaV2ForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ro=new Ce({props:{$$slots:{default:[x_]},$$scope:{ctx:L}}}),qs=new it({props:{code:`from transformers import DebertaV2Tokenizer, TFDebertaV2ForTokenClassification import tensorflow as tf tokenizer = DebertaV2Tokenizer.from_pretrained('kamalkraj/deberta-v2-xlarge') model = TFDebertaV2ForTokenClassification.from_pretrained('kamalkraj/deberta-v2-xlarge') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, TFDebertaV2ForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaV2ForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ms=new Re({}),xs=new X({props:{name:"class transformers.TFDebertaV2ForQuestionAnswering",anchor:"transformers.TFDebertaV2ForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1573",parametersDescription:[{anchor:"transformers.TFDebertaV2ForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bo=new Ce({props:{$$slots:{default:[C_]},$$scope:{ctx:L}}}),js=new X({props:{name:"call",anchor:"transformers.TFDebertaV2ForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py#L1584",parametersDescription:[{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer">DebertaV2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFDebertaV2ForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config" >DebertaV2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),jo=new Ce({props:{$$slots:{default:[R_]},$$scope:{ctx:L}}}),As=new it({props:{code:`from transformers import DebertaV2Tokenizer, TFDebertaV2ForQuestionAnswering import tensorflow as tf tokenizer = DebertaV2Tokenizer.from_pretrained('kamalkraj/deberta-v2-xlarge') model = TFDebertaV2ForQuestionAnswering.from_pretrained('kamalkraj/deberta-v2-xlarge') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaV2Tokenizer, TFDebertaV2ForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaV2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaV2ForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-v2-xlarge&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){h=a("meta"),$=l(),m=a("h1"),g=a("a"),v=a("span"),T(b.$$.fragment),_=l(),V=a("span"),de=n("DeBERTa-v2"),K=l(),z=a("h2"),Y=a("a"),I=a("span"),T(ee.$$.fragment),ce=l(),S=a("span"),pe=n("Overview"),re=l(),O=a("p"),P=n("The DeBERTa model was proposed in "),te=a("a"),Z=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),q=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google\u2019s BERT model released in 2018 and Facebook\u2019s RoBERTa model released in 2019.`),x=l(),ne=a("p"),W=n(`It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa.`),ie=l(),se=a("p"),H=n("The abstract from the paper is the following:"),le=l(),ae=a("p"),M=a("em"),he=n(`Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at `),j=a("a"),fe=n("https://github.com/microsoft/DeBERTa"),ue=n("."),N=l(),J=a("p"),me=n("The following information is visible directly on the "),R=a("a"),ge=n(`original implementation repository`),U=n(`. DeBERTa v2 is the second version of the DeBERTa model. It includes the 1.5B model used for the SuperGLUE single-model submission and achieving 89.9, versus human baseline 89.8. You can find more details about this submission in the authors\u2019 `),oe=a("a"),p=n("blog"),F=l(),G=a("p"),ye=n("New in v2:"),we=l(),C=a("ul"),_e=a("li"),Te=a("strong"),De=n("Vocabulary"),B=n(` In v2 the tokenizer is changed to use a new vocabulary of size 128K built from the training data. Instead of a GPT2-based tokenizer, the tokenizer is now `),A=a("a"),Ee=n("sentencepiece-based"),$e=n(" tokenizer."),Q=l(),ve=a("li"),ke=a("strong"),be=n("nGiE(nGram Induced Input Encoding)"),Fe=n(` The DeBERTa-v2 model uses an additional convolution layer aside with the first transformer layer to better learn the local dependency of input tokens.`),Dl=l(),Os=a("li"),Fa=a("strong"),El=n("Sharing position projection matrix with content projection matrix in attention layer"),$l=n(` Based on previous experiments, this can save parameters without affecting the performance.`),Fl=l(),Ws=a("li"),Va=a("strong"),Vl=n("Apply bucket to encode relative positions"),zl=n(` The DeBERTa-v2 model uses log bucket to encode relative positions similar to T5.`),ql=l(),Hs=a("li"),za=a("strong"),Ml=n("900M model & 1.5B model"),xl=n(` Two additional model sizes are available: 900M and 1.5B, which significantly improves the performance of downstream tasks.`),hi=l(),Ze=a("p"),Cl=n("This model was contributed by "),Ho=a("a"),Rl=n("DeBERTa"),Pl=n(`. This model TF 2.0 implementation was contributed by `),Uo=a("a"),Bl=n("kamalkraj"),jl=n(". The original code can be found "),Qo=a("a"),Al=n("here"),Ll=n("."),fi=l(),Vt=a("h2"),ro=a("a"),qa=a("span"),T(Go.$$.fragment),Il=l(),Ma=a("span"),Sl=n("DebertaV2Config"),ui=l(),lt=a("div"),T(Ko.$$.fragment),Nl=l(),zt=a("p"),Ol=n("This is the configuration class to store the configuration of a "),Us=a("a"),Wl=n("DebertaV2Model"),Hl=n(`. It is used to instantiate a DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa `),Jo=a("a"),Ul=n("microsoft/deberta-v2-xlarge"),Ql=n(" architecture."),Gl=l(),qt=a("p"),Kl=n("Configuration objects inherit from "),Qs=a("a"),Jl=n("PretrainedConfig"),Xl=n(` and can be used to control the model outputs. Read the documentation from `),Gs=a("a"),Yl=n("PretrainedConfig"),Zl=n(" for more information."),mi=l(),Mt=a("h2"),io=a("a"),xa=a("span"),T(Xo.$$.fragment),ed=l(),Ca=a("span"),td=n("DebertaV2Tokenizer"),gi=l(),ze=a("div"),T(Yo.$$.fragment),od=l(),Zo=a("p"),nd=n("Constructs a DeBERTa-v2 tokenizer. Based on "),en=a("a"),sd=n("SentencePiece"),ad=n("."),rd=l(),pt=a("div"),T(tn.$$.fragment),id=l(),Ra=a("p"),ld=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:`),dd=l(),on=a("ul"),Pa=a("li"),cd=n("single sequence: [CLS] X [SEP]"),pd=l(),Ba=a("li"),hd=n("pair of sequences: [CLS] A [SEP] B [SEP]"),fd=l(),lo=a("div"),T(nn.$$.fragment),ud=l(),xt=a("p"),md=n(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ja=a("code"),gd=n("prepare_for_model"),_d=n(" or "),Aa=a("code"),bd=n("encode_plus"),vd=n(" methods."),Td=l(),et=a("div"),T(sn.$$.fragment),kd=l(),La=a("p"),wd=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:`),yd=l(),T(an.$$.fragment),Dd=l(),Ct=a("p"),Ed=n("If "),Ia=a("code"),$d=n("token_ids_1"),Fd=n(" is "),Sa=a("code"),Vd=n("None"),zd=n(", this method only returns the first portion of the mask (0s)."),qd=l(),Na=a("div"),_i=l(),Rt=a("h2"),co=a("a"),Oa=a("span"),T(rn.$$.fragment),Md=l(),Wa=a("span"),xd=n("DebertaV2Model"),bi=l(),Je=a("div"),T(ln.$$.fragment),Cd=l(),dn=a("p"),Rd=n(`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),cn=a("a"),Pd=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Bd=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),jd=l(),pn=a("p"),Ad=n("This model is also a PyTorch "),hn=a("a"),Ld=n("torch.nn.Module"),Id=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Sd=l(),Ie=a("div"),T(fn.$$.fragment),Nd=l(),Pt=a("p"),Od=n("The "),Ks=a("a"),Wd=n("DebertaV2Model"),Hd=n(" forward method, overrides the "),Ha=a("code"),Ud=n("__call__"),Qd=n(" special method."),Gd=l(),T(po.$$.fragment),Kd=l(),Ua=a("p"),Jd=n("Example:"),Xd=l(),T(un.$$.fragment),vi=l(),Bt=a("h2"),ho=a("a"),Qa=a("span"),T(mn.$$.fragment),Yd=l(),Ga=a("span"),Zd=n("DebertaV2PreTrainedModel"),Ti=l(),dt=a("div"),T(gn.$$.fragment),ec=l(),Ka=a("p"),tc=n(`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),oc=l(),tt=a("div"),T(_n.$$.fragment),nc=l(),Ja=a("p"),sc=n("Defines the computation performed at every call."),ac=l(),Xa=a("p"),rc=n("Should be overridden by all subclasses."),ic=l(),T(fo.$$.fragment),ki=l(),jt=a("h2"),uo=a("a"),Ya=a("span"),T(bn.$$.fragment),lc=l(),Za=a("span"),dc=n("DebertaV2ForMaskedLM"),wi=l(),Xe=a("div"),T(vn.$$.fragment),cc=l(),At=a("p"),pc=n("DeBERTa Model with a "),er=a("code"),hc=n("language modeling"),fc=n(` head on top. The DeBERTa model was proposed in `),Tn=a("a"),uc=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),mc=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),gc=l(),kn=a("p"),_c=n("This model is also a PyTorch "),wn=a("a"),bc=n("torch.nn.Module"),vc=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Tc=l(),Se=a("div"),T(yn.$$.fragment),kc=l(),Lt=a("p"),wc=n("The "),Js=a("a"),yc=n("DebertaV2ForMaskedLM"),Dc=n(" forward method, overrides the "),tr=a("code"),Ec=n("__call__"),$c=n(" special method."),Fc=l(),T(mo.$$.fragment),Vc=l(),or=a("p"),zc=n("Example:"),qc=l(),T(Dn.$$.fragment),yi=l(),It=a("h2"),go=a("a"),nr=a("span"),T(En.$$.fragment),Mc=l(),sr=a("span"),xc=n("DebertaV2ForSequenceClassification"),Di=l(),Pe=a("div"),T($n.$$.fragment),Cc=l(),ar=a("p"),Rc=n(`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Pc=l(),Fn=a("p"),Bc=n("The DeBERTa model was proposed in "),Vn=a("a"),jc=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Ac=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Lc=l(),zn=a("p"),Ic=n("This model is also a PyTorch "),qn=a("a"),Sc=n("torch.nn.Module"),Nc=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Oc=l(),Ve=a("div"),T(Mn.$$.fragment),Wc=l(),St=a("p"),Hc=n("The "),Xs=a("a"),Uc=n("DebertaV2ForSequenceClassification"),Qc=n(" forward method, overrides the "),rr=a("code"),Gc=n("__call__"),Kc=n(" special method."),Jc=l(),T(_o.$$.fragment),Xc=l(),ir=a("p"),Yc=n("Example of single-label classification:"),Zc=l(),T(xn.$$.fragment),ep=l(),lr=a("p"),tp=n("Example of multi-label classification:"),op=l(),T(Cn.$$.fragment),Ei=l(),Nt=a("h2"),bo=a("a"),dr=a("span"),T(Rn.$$.fragment),np=l(),cr=a("span"),sp=n("DebertaV2ForTokenClassification"),$i=l(),Be=a("div"),T(Pn.$$.fragment),ap=l(),pr=a("p"),rp=n(`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ip=l(),Bn=a("p"),lp=n("The DeBERTa model was proposed in "),jn=a("a"),dp=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),cp=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),pp=l(),An=a("p"),hp=n("This model is also a PyTorch "),Ln=a("a"),fp=n("torch.nn.Module"),up=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),mp=l(),Ne=a("div"),T(In.$$.fragment),gp=l(),Ot=a("p"),_p=n("The "),Ys=a("a"),bp=n("DebertaV2ForTokenClassification"),vp=n(" forward method, overrides the "),hr=a("code"),Tp=n("__call__"),kp=n(" special method."),wp=l(),T(vo.$$.fragment),yp=l(),fr=a("p"),Dp=n("Example:"),Ep=l(),T(Sn.$$.fragment),Fi=l(),Wt=a("h2"),To=a("a"),ur=a("span"),T(Nn.$$.fragment),$p=l(),mr=a("span"),Fp=n("DebertaV2ForQuestionAnswering"),Vi=l(),je=a("div"),T(On.$$.fragment),Vp=l(),Ht=a("p"),zp=n(`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),gr=a("code"),qp=n("span start logits"),Mp=n(" and "),_r=a("code"),xp=n("span end logits"),Cp=n(")."),Rp=l(),Wn=a("p"),Pp=n("The DeBERTa model was proposed in "),Hn=a("a"),Bp=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),jp=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Ap=l(),Un=a("p"),Lp=n("This model is also a PyTorch "),Qn=a("a"),Ip=n("torch.nn.Module"),Sp=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Np=l(),Oe=a("div"),T(Gn.$$.fragment),Op=l(),Ut=a("p"),Wp=n("The "),Zs=a("a"),Hp=n("DebertaV2ForQuestionAnswering"),Up=n(" forward method, overrides the "),br=a("code"),Qp=n("__call__"),Gp=n(" special method."),Kp=l(),T(ko.$$.fragment),Jp=l(),vr=a("p"),Xp=n("Example:"),Yp=l(),T(Kn.$$.fragment),zi=l(),Qt=a("h2"),wo=a("a"),Tr=a("span"),T(Jn.$$.fragment),Zp=l(),kr=a("span"),eh=n("TFDebertaV2Model"),qi=l(),Ae=a("div"),T(Xn.$$.fragment),th=l(),Yn=a("p"),oh=n(`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),Zn=a("a"),nh=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),sh=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),ah=l(),es=a("p"),rh=n("This model is also a "),ts=a("a"),ih=n("tf.keras.Model"),lh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),dh=l(),T(yo.$$.fragment),ch=l(),We=a("div"),T(os.$$.fragment),ph=l(),Gt=a("p"),hh=n("The "),ea=a("a"),fh=n("TFDebertaV2Model"),uh=n(" forward method, overrides the "),wr=a("code"),mh=n("__call__"),gh=n(" special method."),_h=l(),T(Do.$$.fragment),bh=l(),yr=a("p"),vh=n("Example:"),Th=l(),T(ns.$$.fragment),Mi=l(),Kt=a("h2"),Eo=a("a"),Dr=a("span"),T(ss.$$.fragment),kh=l(),Er=a("span"),wh=n("TFDebertaV2PreTrainedModel"),xi=l(),ct=a("div"),T(as.$$.fragment),yh=l(),$r=a("p"),Dh=n(`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),Eh=l(),ot=a("div"),T(rs.$$.fragment),$h=l(),Fr=a("p"),Fh=n("Calls the model on new inputs and returns the outputs as tensors."),Vh=l(),is=a("p"),zh=n("In this case "),Vr=a("code"),qh=n("call()"),Mh=n(` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs).`),xh=l(),Ye=a("p"),Ch=n(`Note: This method should not be called directly. It is only meant to be overridden when subclassing `),zr=a("code"),Rh=n("tf.keras.Model"),Ph=n(`. To call a model on an input, always use the `),qr=a("code"),Bh=n("__call__()"),jh=n(` method, i.e. `),Mr=a("code"),Ah=n("model(inputs)"),Lh=n(", which relies on the underlying "),xr=a("code"),Ih=n("call()"),Sh=n(" method."),Ci=l(),Jt=a("h2"),$o=a("a"),Cr=a("span"),T(ls.$$.fragment),Nh=l(),Rr=a("span"),Oh=n("TFDebertaV2ForMaskedLM"),Ri=l(),Le=a("div"),T(ds.$$.fragment),Wh=l(),Xt=a("p"),Hh=n("DeBERTa Model with a "),Pr=a("code"),Uh=n("language modeling"),Qh=n(` head on top. The DeBERTa model was proposed in `),cs=a("a"),Gh=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Kh=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Jh=l(),ps=a("p"),Xh=n("This model is also a "),hs=a("a"),Yh=n("tf.keras.Model"),Zh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ef=l(),T(Fo.$$.fragment),tf=l(),He=a("div"),T(fs.$$.fragment),of=l(),Yt=a("p"),nf=n("The "),ta=a("a"),sf=n("TFDebertaV2ForMaskedLM"),af=n(" forward method, overrides the "),Br=a("code"),rf=n("__call__"),lf=n(" special method."),df=l(),T(Vo.$$.fragment),cf=l(),jr=a("p"),pf=n("Example:"),hf=l(),T(us.$$.fragment),Pi=l(),Zt=a("h2"),zo=a("a"),Ar=a("span"),T(ms.$$.fragment),ff=l(),Lr=a("span"),uf=n("TFDebertaV2ForSequenceClassification"),Bi=l(),qe=a("div"),T(gs.$$.fragment),mf=l(),Ir=a("p"),gf=n(`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),_f=l(),_s=a("p"),bf=n("The DeBERTa model was proposed in "),bs=a("a"),vf=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Tf=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),kf=l(),vs=a("p"),wf=n("This model is also a "),Ts=a("a"),yf=n("tf.keras.Model"),Df=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ef=l(),T(qo.$$.fragment),$f=l(),Ue=a("div"),T(ks.$$.fragment),Ff=l(),eo=a("p"),Vf=n("The "),oa=a("a"),zf=n("TFDebertaV2ForSequenceClassification"),qf=n(" forward method, overrides the "),Sr=a("code"),Mf=n("__call__"),xf=n(" special method."),Cf=l(),T(Mo.$$.fragment),Rf=l(),Nr=a("p"),Pf=n("Example:"),Bf=l(),T(ws.$$.fragment),ji=l(),to=a("h2"),xo=a("a"),Or=a("span"),T(ys.$$.fragment),jf=l(),Wr=a("span"),Af=n("TFDebertaV2ForTokenClassification"),Ai=l(),Me=a("div"),T(Ds.$$.fragment),Lf=l(),Hr=a("p"),If=n(`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Sf=l(),Es=a("p"),Nf=n("The DeBERTa model was proposed in "),$s=a("a"),Of=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Wf=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Hf=l(),Fs=a("p"),Uf=n("This model is also a "),Vs=a("a"),Qf=n("tf.keras.Model"),Gf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Kf=l(),T(Co.$$.fragment),Jf=l(),Qe=a("div"),T(zs.$$.fragment),Xf=l(),oo=a("p"),Yf=n("The "),na=a("a"),Zf=n("TFDebertaV2ForTokenClassification"),eu=n(" forward method, overrides the "),Ur=a("code"),tu=n("__call__"),ou=n(" special method."),nu=l(),T(Ro.$$.fragment),su=l(),Qr=a("p"),au=n("Example:"),ru=l(),T(qs.$$.fragment),Li=l(),no=a("h2"),Po=a("a"),Gr=a("span"),T(Ms.$$.fragment),iu=l(),Kr=a("span"),lu=n("TFDebertaV2ForQuestionAnswering"),Ii=l(),xe=a("div"),T(xs.$$.fragment),du=l(),so=a("p"),cu=n(`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Jr=a("code"),pu=n("span start logits"),hu=n(" and "),Xr=a("code"),fu=n("span end logits"),uu=n(")."),mu=l(),Cs=a("p"),gu=n("The DeBERTa model was proposed in "),Rs=a("a"),_u=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),bu=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),vu=l(),Ps=a("p"),Tu=n("This model is also a "),Bs=a("a"),ku=n("tf.keras.Model"),wu=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yu=l(),T(Bo.$$.fragment),Du=l(),Ge=a("div"),T(js.$$.fragment),Eu=l(),ao=a("p"),$u=n("The "),sa=a("a"),Fu=n("TFDebertaV2ForQuestionAnswering"),Vu=n(" forward method, overrides the "),Yr=a("code"),zu=n("__call__"),qu=n(" special method."),Mu=l(),T(jo.$$.fragment),xu=l(),Zr=a("p"),Cu=n("Example:"),Ru=l(),T(As.$$.fragment),this.h()},l(o){const u=b_('[data-svelte="svelte-1phssyn"]',document.head);h=r(u,"META",{name:!0,content:!0}),u.forEach(t),$=d(o),m=r(o,"H1",{class:!0});var Ls=i(m);g=r(Ls,"A",{id:!0,class:!0,href:!0});var ei=i(g);v=r(ei,"SPAN",{});var ti=i(v);k(b.$$.fragment,ti),ti.forEach(t),ei.forEach(t),_=d(Ls),V=r(Ls,"SPAN",{});var oi=i(V);de=s(oi,"DeBERTa-v2"),oi.forEach(t),Ls.forEach(t),K=d(o),z=r(o,"H2",{class:!0});var Is=i(z);Y=r(Is,"A",{id:!0,class:!0,href:!0});var ni=i(Y);I=r(ni,"SPAN",{});var si=i(I);k(ee.$$.fragment,si),si.forEach(t),ni.forEach(t),ce=d(Is),S=r(Is,"SPAN",{});var ai=i(S);pe=s(ai,"Overview"),ai.forEach(t),Is.forEach(t),re=d(o),O=r(o,"P",{});var Ss=i(O);P=s(Ss,"The DeBERTa model was proposed in "),te=r(Ss,"A",{href:!0,rel:!0});var ri=i(te);Z=s(ri,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),ri.forEach(t),q=s(Ss,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google\u2019s BERT model released in 2018 and Facebook\u2019s RoBERTa model released in 2019.`),Ss.forEach(t),x=d(o),ne=r(o,"P",{});var ii=i(ne);W=s(ii,`It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa.`),ii.forEach(t),ie=d(o),se=r(o,"P",{});var li=i(se);H=s(li,"The abstract from the paper is the following:"),li.forEach(t),le=d(o),ae=r(o,"P",{});var di=i(ae);M=r(di,"EM",{});var Ns=i(M);he=s(Ns,`Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at `),j=r(Ns,"A",{href:!0,rel:!0});var ci=i(j);fe=s(ci,"https://github.com/microsoft/DeBERTa"),ci.forEach(t),ue=s(Ns,"."),Ns.forEach(t),di.forEach(t),N=d(o),J=r(o,"P",{});var Ao=i(J);me=s(Ao,"The following information is visible directly on the "),R=r(Ao,"A",{href:!0,rel:!0});var Lu=i(R);ge=s(Lu,`original implementation repository`),Lu.forEach(t),U=s(Ao,`. DeBERTa v2 is the second version of the DeBERTa model. It includes the 1.5B model used for the SuperGLUE single-model submission and achieving 89.9, versus human baseline 89.8. You can find more details about this submission in the authors\u2019 `),oe=r(Ao,"A",{href:!0,rel:!0});var Iu=i(oe);p=s(Iu,"blog"),Iu.forEach(t),Ao.forEach(t),F=d(o),G=r(o,"P",{});var Su=i(G);ye=s(Su,"New in v2:"),Su.forEach(t),we=d(o),C=r(o,"UL",{});var ht=i(C);_e=r(ht,"LI",{});var pi=i(_e);Te=r(pi,"STRONG",{});var Nu=i(Te);De=s(Nu,"Vocabulary"),Nu.forEach(t),B=s(pi,` In v2 the tokenizer is changed to use a new vocabulary of size 128K built from the training data. Instead of a GPT2-based tokenizer, the tokenizer is now `),A=r(pi,"A",{href:!0,rel:!0});var Ou=i(A);Ee=s(Ou,"sentencepiece-based"),Ou.forEach(t),$e=s(pi," tokenizer."),pi.forEach(t),Q=d(ht),ve=r(ht,"LI",{});var Pu=i(ve);ke=r(Pu,"STRONG",{});var Wu=i(ke);be=s(Wu,"nGiE(nGram Induced Input Encoding)"),Wu.forEach(t),Fe=s(Pu,` The DeBERTa-v2 model uses an additional convolution layer aside with the first transformer layer to better learn the local dependency of input tokens.`),Pu.forEach(t),Dl=d(ht),Os=r(ht,"LI",{});var Bu=i(Os);Fa=r(Bu,"STRONG",{});var Hu=i(Fa);El=s(Hu,"Sharing position projection matrix with content projection matrix in attention layer"),Hu.forEach(t),$l=s(Bu,` Based on previous experiments, this can save parameters without affecting the performance.`),Bu.forEach(t),Fl=d(ht),Ws=r(ht,"LI",{});var ju=i(Ws);Va=r(ju,"STRONG",{});var Uu=i(Va);Vl=s(Uu,"Apply bucket to encode relative positions"),Uu.forEach(t),zl=s(ju,` The DeBERTa-v2 model uses log bucket to encode relative positions similar to T5.`),ju.forEach(t),ql=d(ht),Hs=r(ht,"LI",{});var Au=i(Hs);za=r(Au,"STRONG",{});var Qu=i(za);Ml=s(Qu,"900M model & 1.5B model"),Qu.forEach(t),xl=s(Au,` Two additional model sizes are available: 900M and 1.5B, which significantly improves the performance of downstream tasks.`),Au.forEach(t),ht.forEach(t),hi=d(o),Ze=r(o,"P",{});var Lo=i(Ze);Cl=s(Lo,"This model was contributed by "),Ho=r(Lo,"A",{href:!0,rel:!0});var Gu=i(Ho);Rl=s(Gu,"DeBERTa"),Gu.forEach(t),Pl=s(Lo,`. This model TF 2.0 implementation was contributed by `),Uo=r(Lo,"A",{href:!0,rel:!0});var Ku=i(Uo);Bl=s(Ku,"kamalkraj"),Ku.forEach(t),jl=s(Lo,". The original code can be found "),Qo=r(Lo,"A",{href:!0,rel:!0});var Ju=i(Qo);Al=s(Ju,"here"),Ju.forEach(t),Ll=s(Lo,"."),Lo.forEach(t),fi=d(o),Vt=r(o,"H2",{class:!0});var Ni=i(Vt);ro=r(Ni,"A",{id:!0,class:!0,href:!0});var Xu=i(ro);qa=r(Xu,"SPAN",{});var Yu=i(qa);k(Go.$$.fragment,Yu),Yu.forEach(t),Xu.forEach(t),Il=d(Ni),Ma=r(Ni,"SPAN",{});var Zu=i(Ma);Sl=s(Zu,"DebertaV2Config"),Zu.forEach(t),Ni.forEach(t),ui=d(o),lt=r(o,"DIV",{class:!0});var aa=i(lt);k(Ko.$$.fragment,aa),Nl=d(aa),zt=r(aa,"P",{});var ra=i(zt);Ol=s(ra,"This is the configuration class to store the configuration of a "),Us=r(ra,"A",{href:!0});var em=i(Us);Wl=s(em,"DebertaV2Model"),em.forEach(t),Hl=s(ra,`. It is used to instantiate a DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa `),Jo=r(ra,"A",{href:!0,rel:!0});var tm=i(Jo);Ul=s(tm,"microsoft/deberta-v2-xlarge"),tm.forEach(t),Ql=s(ra," architecture."),ra.forEach(t),Gl=d(aa),qt=r(aa,"P",{});var ia=i(qt);Kl=s(ia,"Configuration objects inherit from "),Qs=r(ia,"A",{href:!0});var om=i(Qs);Jl=s(om,"PretrainedConfig"),om.forEach(t),Xl=s(ia,` and can be used to control the model outputs. Read the documentation from `),Gs=r(ia,"A",{href:!0});var nm=i(Gs);Yl=s(nm,"PretrainedConfig"),nm.forEach(t),Zl=s(ia," for more information."),ia.forEach(t),aa.forEach(t),mi=d(o),Mt=r(o,"H2",{class:!0});var Oi=i(Mt);io=r(Oi,"A",{id:!0,class:!0,href:!0});var sm=i(io);xa=r(sm,"SPAN",{});var am=i(xa);k(Xo.$$.fragment,am),am.forEach(t),sm.forEach(t),ed=d(Oi),Ca=r(Oi,"SPAN",{});var rm=i(Ca);td=s(rm,"DebertaV2Tokenizer"),rm.forEach(t),Oi.forEach(t),gi=d(o),ze=r(o,"DIV",{class:!0});var nt=i(ze);k(Yo.$$.fragment,nt),od=d(nt),Zo=r(nt,"P",{});var Wi=i(Zo);nd=s(Wi,"Constructs a DeBERTa-v2 tokenizer. Based on "),en=r(Wi,"A",{href:!0,rel:!0});var im=i(en);sd=s(im,"SentencePiece"),im.forEach(t),ad=s(Wi,"."),Wi.forEach(t),rd=d(nt),pt=r(nt,"DIV",{class:!0});var la=i(pt);k(tn.$$.fragment,la),id=d(la),Ra=r(la,"P",{});var lm=i(Ra);ld=s(lm,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:`),lm.forEach(t),dd=d(la),on=r(la,"UL",{});var Hi=i(on);Pa=r(Hi,"LI",{});var dm=i(Pa);cd=s(dm,"single sequence: [CLS] X [SEP]"),dm.forEach(t),pd=d(Hi),Ba=r(Hi,"LI",{});var cm=i(Ba);hd=s(cm,"pair of sequences: [CLS] A [SEP] B [SEP]"),cm.forEach(t),Hi.forEach(t),la.forEach(t),fd=d(nt),lo=r(nt,"DIV",{class:!0});var Ui=i(lo);k(nn.$$.fragment,Ui),ud=d(Ui),xt=r(Ui,"P",{});var da=i(xt);md=s(da,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ja=r(da,"CODE",{});var pm=i(ja);gd=s(pm,"prepare_for_model"),pm.forEach(t),_d=s(da," or "),Aa=r(da,"CODE",{});var hm=i(Aa);bd=s(hm,"encode_plus"),hm.forEach(t),vd=s(da," methods."),da.forEach(t),Ui.forEach(t),Td=d(nt),et=r(nt,"DIV",{class:!0});var Io=i(et);k(sn.$$.fragment,Io),kd=d(Io),La=r(Io,"P",{});var fm=i(La);wd=s(fm,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:`),fm.forEach(t),yd=d(Io),k(an.$$.fragment,Io),Dd=d(Io),Ct=r(Io,"P",{});var ca=i(Ct);Ed=s(ca,"If "),Ia=r(ca,"CODE",{});var um=i(Ia);$d=s(um,"token_ids_1"),um.forEach(t),Fd=s(ca," is "),Sa=r(ca,"CODE",{});var mm=i(Sa);Vd=s(mm,"None"),mm.forEach(t),zd=s(ca,", this method only returns the first portion of the mask (0s)."),ca.forEach(t),Io.forEach(t),qd=d(nt),Na=r(nt,"DIV",{class:!0}),i(Na).forEach(t),nt.forEach(t),_i=d(o),Rt=r(o,"H2",{class:!0});var Qi=i(Rt);co=r(Qi,"A",{id:!0,class:!0,href:!0});var gm=i(co);Oa=r(gm,"SPAN",{});var _m=i(Oa);k(rn.$$.fragment,_m),_m.forEach(t),gm.forEach(t),Md=d(Qi),Wa=r(Qi,"SPAN",{});var bm=i(Wa);xd=s(bm,"DebertaV2Model"),bm.forEach(t),Qi.forEach(t),bi=d(o),Je=r(o,"DIV",{class:!0});var So=i(Je);k(ln.$$.fragment,So),Cd=d(So),dn=r(So,"P",{});var Gi=i(dn);Rd=s(Gi,`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),cn=r(Gi,"A",{href:!0,rel:!0});var vm=i(cn);Pd=s(vm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),vm.forEach(t),Bd=s(Gi,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Gi.forEach(t),jd=d(So),pn=r(So,"P",{});var Ki=i(pn);Ad=s(Ki,"This model is also a PyTorch "),hn=r(Ki,"A",{href:!0,rel:!0});var Tm=i(hn);Ld=s(Tm,"torch.nn.Module"),Tm.forEach(t),Id=s(Ki,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Ki.forEach(t),Sd=d(So),Ie=r(So,"DIV",{class:!0});var ft=i(Ie);k(fn.$$.fragment,ft),Nd=d(ft),Pt=r(ft,"P",{});var pa=i(Pt);Od=s(pa,"The "),Ks=r(pa,"A",{href:!0});var km=i(Ks);Wd=s(km,"DebertaV2Model"),km.forEach(t),Hd=s(pa," forward method, overrides the "),Ha=r(pa,"CODE",{});var wm=i(Ha);Ud=s(wm,"__call__"),wm.forEach(t),Qd=s(pa," special method."),pa.forEach(t),Gd=d(ft),k(po.$$.fragment,ft),Kd=d(ft),Ua=r(ft,"P",{});var ym=i(Ua);Jd=s(ym,"Example:"),ym.forEach(t),Xd=d(ft),k(un.$$.fragment,ft),ft.forEach(t),So.forEach(t),vi=d(o),Bt=r(o,"H2",{class:!0});var Ji=i(Bt);ho=r(Ji,"A",{id:!0,class:!0,href:!0});var Dm=i(ho);Qa=r(Dm,"SPAN",{});var Em=i(Qa);k(mn.$$.fragment,Em),Em.forEach(t),Dm.forEach(t),Yd=d(Ji),Ga=r(Ji,"SPAN",{});var $m=i(Ga);Zd=s($m,"DebertaV2PreTrainedModel"),$m.forEach(t),Ji.forEach(t),Ti=d(o),dt=r(o,"DIV",{class:!0});var ha=i(dt);k(gn.$$.fragment,ha),ec=d(ha),Ka=r(ha,"P",{});var Fm=i(Ka);tc=s(Fm,`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),Fm.forEach(t),oc=d(ha),tt=r(ha,"DIV",{class:!0});var No=i(tt);k(_n.$$.fragment,No),nc=d(No),Ja=r(No,"P",{});var Vm=i(Ja);sc=s(Vm,"Defines the computation performed at every call."),Vm.forEach(t),ac=d(No),Xa=r(No,"P",{});var zm=i(Xa);rc=s(zm,"Should be overridden by all subclasses."),zm.forEach(t),ic=d(No),k(fo.$$.fragment,No),No.forEach(t),ha.forEach(t),ki=d(o),jt=r(o,"H2",{class:!0});var Xi=i(jt);uo=r(Xi,"A",{id:!0,class:!0,href:!0});var qm=i(uo);Ya=r(qm,"SPAN",{});var Mm=i(Ya);k(bn.$$.fragment,Mm),Mm.forEach(t),qm.forEach(t),lc=d(Xi),Za=r(Xi,"SPAN",{});var xm=i(Za);dc=s(xm,"DebertaV2ForMaskedLM"),xm.forEach(t),Xi.forEach(t),wi=d(o),Xe=r(o,"DIV",{class:!0});var Oo=i(Xe);k(vn.$$.fragment,Oo),cc=d(Oo),At=r(Oo,"P",{});var fa=i(At);pc=s(fa,"DeBERTa Model with a "),er=r(fa,"CODE",{});var Cm=i(er);hc=s(Cm,"language modeling"),Cm.forEach(t),fc=s(fa,` head on top. The DeBERTa model was proposed in `),Tn=r(fa,"A",{href:!0,rel:!0});var Rm=i(Tn);uc=s(Rm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Rm.forEach(t),mc=s(fa,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),fa.forEach(t),gc=d(Oo),kn=r(Oo,"P",{});var Yi=i(kn);_c=s(Yi,"This model is also a PyTorch "),wn=r(Yi,"A",{href:!0,rel:!0});var Pm=i(wn);bc=s(Pm,"torch.nn.Module"),Pm.forEach(t),vc=s(Yi,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Yi.forEach(t),Tc=d(Oo),Se=r(Oo,"DIV",{class:!0});var ut=i(Se);k(yn.$$.fragment,ut),kc=d(ut),Lt=r(ut,"P",{});var ua=i(Lt);wc=s(ua,"The "),Js=r(ua,"A",{href:!0});var Bm=i(Js);yc=s(Bm,"DebertaV2ForMaskedLM"),Bm.forEach(t),Dc=s(ua," forward method, overrides the "),tr=r(ua,"CODE",{});var jm=i(tr);Ec=s(jm,"__call__"),jm.forEach(t),$c=s(ua," special method."),ua.forEach(t),Fc=d(ut),k(mo.$$.fragment,ut),Vc=d(ut),or=r(ut,"P",{});var Am=i(or);zc=s(Am,"Example:"),Am.forEach(t),qc=d(ut),k(Dn.$$.fragment,ut),ut.forEach(t),Oo.forEach(t),yi=d(o),It=r(o,"H2",{class:!0});var Zi=i(It);go=r(Zi,"A",{id:!0,class:!0,href:!0});var Lm=i(go);nr=r(Lm,"SPAN",{});var Im=i(nr);k(En.$$.fragment,Im),Im.forEach(t),Lm.forEach(t),Mc=d(Zi),sr=r(Zi,"SPAN",{});var Sm=i(sr);xc=s(Sm,"DebertaV2ForSequenceClassification"),Sm.forEach(t),Zi.forEach(t),Di=d(o),Pe=r(o,"DIV",{class:!0});var mt=i(Pe);k($n.$$.fragment,mt),Cc=d(mt),ar=r(mt,"P",{});var Nm=i(ar);Rc=s(Nm,`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Nm.forEach(t),Pc=d(mt),Fn=r(mt,"P",{});var el=i(Fn);Bc=s(el,"The DeBERTa model was proposed in "),Vn=r(el,"A",{href:!0,rel:!0});var Om=i(Vn);jc=s(Om,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Om.forEach(t),Ac=s(el,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),el.forEach(t),Lc=d(mt),zn=r(mt,"P",{});var tl=i(zn);Ic=s(tl,"This model is also a PyTorch "),qn=r(tl,"A",{href:!0,rel:!0});var Wm=i(qn);Sc=s(Wm,"torch.nn.Module"),Wm.forEach(t),Nc=s(tl,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),tl.forEach(t),Oc=d(mt),Ve=r(mt,"DIV",{class:!0});var Ke=i(Ve);k(Mn.$$.fragment,Ke),Wc=d(Ke),St=r(Ke,"P",{});var ma=i(St);Hc=s(ma,"The "),Xs=r(ma,"A",{href:!0});var Hm=i(Xs);Uc=s(Hm,"DebertaV2ForSequenceClassification"),Hm.forEach(t),Qc=s(ma," forward method, overrides the "),rr=r(ma,"CODE",{});var Um=i(rr);Gc=s(Um,"__call__"),Um.forEach(t),Kc=s(ma," special method."),ma.forEach(t),Jc=d(Ke),k(_o.$$.fragment,Ke),Xc=d(Ke),ir=r(Ke,"P",{});var Qm=i(ir);Yc=s(Qm,"Example of single-label classification:"),Qm.forEach(t),Zc=d(Ke),k(xn.$$.fragment,Ke),ep=d(Ke),lr=r(Ke,"P",{});var Gm=i(lr);tp=s(Gm,"Example of multi-label classification:"),Gm.forEach(t),op=d(Ke),k(Cn.$$.fragment,Ke),Ke.forEach(t),mt.forEach(t),Ei=d(o),Nt=r(o,"H2",{class:!0});var ol=i(Nt);bo=r(ol,"A",{id:!0,class:!0,href:!0});var Km=i(bo);dr=r(Km,"SPAN",{});var Jm=i(dr);k(Rn.$$.fragment,Jm),Jm.forEach(t),Km.forEach(t),np=d(ol),cr=r(ol,"SPAN",{});var Xm=i(cr);sp=s(Xm,"DebertaV2ForTokenClassification"),Xm.forEach(t),ol.forEach(t),$i=d(o),Be=r(o,"DIV",{class:!0});var gt=i(Be);k(Pn.$$.fragment,gt),ap=d(gt),pr=r(gt,"P",{});var Ym=i(pr);rp=s(Ym,`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ym.forEach(t),ip=d(gt),Bn=r(gt,"P",{});var nl=i(Bn);lp=s(nl,"The DeBERTa model was proposed in "),jn=r(nl,"A",{href:!0,rel:!0});var Zm=i(jn);dp=s(Zm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Zm.forEach(t),cp=s(nl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),nl.forEach(t),pp=d(gt),An=r(gt,"P",{});var sl=i(An);hp=s(sl,"This model is also a PyTorch "),Ln=r(sl,"A",{href:!0,rel:!0});var eg=i(Ln);fp=s(eg,"torch.nn.Module"),eg.forEach(t),up=s(sl,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),sl.forEach(t),mp=d(gt),Ne=r(gt,"DIV",{class:!0});var _t=i(Ne);k(In.$$.fragment,_t),gp=d(_t),Ot=r(_t,"P",{});var ga=i(Ot);_p=s(ga,"The "),Ys=r(ga,"A",{href:!0});var tg=i(Ys);bp=s(tg,"DebertaV2ForTokenClassification"),tg.forEach(t),vp=s(ga," forward method, overrides the "),hr=r(ga,"CODE",{});var og=i(hr);Tp=s(og,"__call__"),og.forEach(t),kp=s(ga," special method."),ga.forEach(t),wp=d(_t),k(vo.$$.fragment,_t),yp=d(_t),fr=r(_t,"P",{});var ng=i(fr);Dp=s(ng,"Example:"),ng.forEach(t),Ep=d(_t),k(Sn.$$.fragment,_t),_t.forEach(t),gt.forEach(t),Fi=d(o),Wt=r(o,"H2",{class:!0});var al=i(Wt);To=r(al,"A",{id:!0,class:!0,href:!0});var sg=i(To);ur=r(sg,"SPAN",{});var ag=i(ur);k(Nn.$$.fragment,ag),ag.forEach(t),sg.forEach(t),$p=d(al),mr=r(al,"SPAN",{});var rg=i(mr);Fp=s(rg,"DebertaV2ForQuestionAnswering"),rg.forEach(t),al.forEach(t),Vi=d(o),je=r(o,"DIV",{class:!0});var bt=i(je);k(On.$$.fragment,bt),Vp=d(bt),Ht=r(bt,"P",{});var _a=i(Ht);zp=s(_a,`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),gr=r(_a,"CODE",{});var ig=i(gr);qp=s(ig,"span start logits"),ig.forEach(t),Mp=s(_a," and "),_r=r(_a,"CODE",{});var lg=i(_r);xp=s(lg,"span end logits"),lg.forEach(t),Cp=s(_a,")."),_a.forEach(t),Rp=d(bt),Wn=r(bt,"P",{});var rl=i(Wn);Pp=s(rl,"The DeBERTa model was proposed in "),Hn=r(rl,"A",{href:!0,rel:!0});var dg=i(Hn);Bp=s(dg,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),dg.forEach(t),jp=s(rl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),rl.forEach(t),Ap=d(bt),Un=r(bt,"P",{});var il=i(Un);Lp=s(il,"This model is also a PyTorch "),Qn=r(il,"A",{href:!0,rel:!0});var cg=i(Qn);Ip=s(cg,"torch.nn.Module"),cg.forEach(t),Sp=s(il,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),il.forEach(t),Np=d(bt),Oe=r(bt,"DIV",{class:!0});var vt=i(Oe);k(Gn.$$.fragment,vt),Op=d(vt),Ut=r(vt,"P",{});var ba=i(Ut);Wp=s(ba,"The "),Zs=r(ba,"A",{href:!0});var pg=i(Zs);Hp=s(pg,"DebertaV2ForQuestionAnswering"),pg.forEach(t),Up=s(ba," forward method, overrides the "),br=r(ba,"CODE",{});var hg=i(br);Qp=s(hg,"__call__"),hg.forEach(t),Gp=s(ba," special method."),ba.forEach(t),Kp=d(vt),k(ko.$$.fragment,vt),Jp=d(vt),vr=r(vt,"P",{});var fg=i(vr);Xp=s(fg,"Example:"),fg.forEach(t),Yp=d(vt),k(Kn.$$.fragment,vt),vt.forEach(t),bt.forEach(t),zi=d(o),Qt=r(o,"H2",{class:!0});var ll=i(Qt);wo=r(ll,"A",{id:!0,class:!0,href:!0});var ug=i(wo);Tr=r(ug,"SPAN",{});var mg=i(Tr);k(Jn.$$.fragment,mg),mg.forEach(t),ug.forEach(t),Zp=d(ll),kr=r(ll,"SPAN",{});var gg=i(kr);eh=s(gg,"TFDebertaV2Model"),gg.forEach(t),ll.forEach(t),qi=d(o),Ae=r(o,"DIV",{class:!0});var Tt=i(Ae);k(Xn.$$.fragment,Tt),th=d(Tt),Yn=r(Tt,"P",{});var dl=i(Yn);oh=s(dl,`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),Zn=r(dl,"A",{href:!0,rel:!0});var _g=i(Zn);nh=s(_g,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),_g.forEach(t),sh=s(dl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),dl.forEach(t),ah=d(Tt),es=r(Tt,"P",{});var cl=i(es);rh=s(cl,"This model is also a "),ts=r(cl,"A",{href:!0,rel:!0});var bg=i(ts);ih=s(bg,"tf.keras.Model"),bg.forEach(t),lh=s(cl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cl.forEach(t),dh=d(Tt),k(yo.$$.fragment,Tt),ch=d(Tt),We=r(Tt,"DIV",{class:!0});var kt=i(We);k(os.$$.fragment,kt),ph=d(kt),Gt=r(kt,"P",{});var va=i(Gt);hh=s(va,"The "),ea=r(va,"A",{href:!0});var vg=i(ea);fh=s(vg,"TFDebertaV2Model"),vg.forEach(t),uh=s(va," forward method, overrides the "),wr=r(va,"CODE",{});var Tg=i(wr);mh=s(Tg,"__call__"),Tg.forEach(t),gh=s(va," special method."),va.forEach(t),_h=d(kt),k(Do.$$.fragment,kt),bh=d(kt),yr=r(kt,"P",{});var kg=i(yr);vh=s(kg,"Example:"),kg.forEach(t),Th=d(kt),k(ns.$$.fragment,kt),kt.forEach(t),Tt.forEach(t),Mi=d(o),Kt=r(o,"H2",{class:!0});var pl=i(Kt);Eo=r(pl,"A",{id:!0,class:!0,href:!0});var wg=i(Eo);Dr=r(wg,"SPAN",{});var yg=i(Dr);k(ss.$$.fragment,yg),yg.forEach(t),wg.forEach(t),kh=d(pl),Er=r(pl,"SPAN",{});var Dg=i(Er);wh=s(Dg,"TFDebertaV2PreTrainedModel"),Dg.forEach(t),pl.forEach(t),xi=d(o),ct=r(o,"DIV",{class:!0});var Ta=i(ct);k(as.$$.fragment,Ta),yh=d(Ta),$r=r(Ta,"P",{});var Eg=i($r);Dh=s(Eg,`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),Eg.forEach(t),Eh=d(Ta),ot=r(Ta,"DIV",{class:!0});var Wo=i(ot);k(rs.$$.fragment,Wo),$h=d(Wo),Fr=r(Wo,"P",{});var $g=i(Fr);Fh=s($g,"Calls the model on new inputs and returns the outputs as tensors."),$g.forEach(t),Vh=d(Wo),is=r(Wo,"P",{});var hl=i(is);zh=s(hl,"In this case "),Vr=r(hl,"CODE",{});var Fg=i(Vr);qh=s(Fg,"call()"),Fg.forEach(t),Mh=s(hl,` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs).`),hl.forEach(t),xh=d(Wo),Ye=r(Wo,"P",{});var wt=i(Ye);Ch=s(wt,`Note: This method should not be called directly. It is only meant to be overridden when subclassing `),zr=r(wt,"CODE",{});var Vg=i(zr);Rh=s(Vg,"tf.keras.Model"),Vg.forEach(t),Ph=s(wt,`. To call a model on an input, always use the `),qr=r(wt,"CODE",{});var zg=i(qr);Bh=s(zg,"__call__()"),zg.forEach(t),jh=s(wt,` method, i.e. `),Mr=r(wt,"CODE",{});var qg=i(Mr);Ah=s(qg,"model(inputs)"),qg.forEach(t),Lh=s(wt,", which relies on the underlying "),xr=r(wt,"CODE",{});var Mg=i(xr);Ih=s(Mg,"call()"),Mg.forEach(t),Sh=s(wt," method."),wt.forEach(t),Wo.forEach(t),Ta.forEach(t),Ci=d(o),Jt=r(o,"H2",{class:!0});var fl=i(Jt);$o=r(fl,"A",{id:!0,class:!0,href:!0});var xg=i($o);Cr=r(xg,"SPAN",{});var Cg=i(Cr);k(ls.$$.fragment,Cg),Cg.forEach(t),xg.forEach(t),Nh=d(fl),Rr=r(fl,"SPAN",{});var Rg=i(Rr);Oh=s(Rg,"TFDebertaV2ForMaskedLM"),Rg.forEach(t),fl.forEach(t),Ri=d(o),Le=r(o,"DIV",{class:!0});var yt=i(Le);k(ds.$$.fragment,yt),Wh=d(yt),Xt=r(yt,"P",{});var ka=i(Xt);Hh=s(ka,"DeBERTa Model with a "),Pr=r(ka,"CODE",{});var Pg=i(Pr);Uh=s(Pg,"language modeling"),Pg.forEach(t),Qh=s(ka,` head on top. The DeBERTa model was proposed in `),cs=r(ka,"A",{href:!0,rel:!0});var Bg=i(cs);Gh=s(Bg,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Bg.forEach(t),Kh=s(ka,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),ka.forEach(t),Jh=d(yt),ps=r(yt,"P",{});var ul=i(ps);Xh=s(ul,"This model is also a "),hs=r(ul,"A",{href:!0,rel:!0});var jg=i(hs);Yh=s(jg,"tf.keras.Model"),jg.forEach(t),Zh=s(ul,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ul.forEach(t),ef=d(yt),k(Fo.$$.fragment,yt),tf=d(yt),He=r(yt,"DIV",{class:!0});var Dt=i(He);k(fs.$$.fragment,Dt),of=d(Dt),Yt=r(Dt,"P",{});var wa=i(Yt);nf=s(wa,"The "),ta=r(wa,"A",{href:!0});var Ag=i(ta);sf=s(Ag,"TFDebertaV2ForMaskedLM"),Ag.forEach(t),af=s(wa," forward method, overrides the "),Br=r(wa,"CODE",{});var Lg=i(Br);rf=s(Lg,"__call__"),Lg.forEach(t),lf=s(wa," special method."),wa.forEach(t),df=d(Dt),k(Vo.$$.fragment,Dt),cf=d(Dt),jr=r(Dt,"P",{});var Ig=i(jr);pf=s(Ig,"Example:"),Ig.forEach(t),hf=d(Dt),k(us.$$.fragment,Dt),Dt.forEach(t),yt.forEach(t),Pi=d(o),Zt=r(o,"H2",{class:!0});var ml=i(Zt);zo=r(ml,"A",{id:!0,class:!0,href:!0});var Sg=i(zo);Ar=r(Sg,"SPAN",{});var Ng=i(Ar);k(ms.$$.fragment,Ng),Ng.forEach(t),Sg.forEach(t),ff=d(ml),Lr=r(ml,"SPAN",{});var Og=i(Lr);uf=s(Og,"TFDebertaV2ForSequenceClassification"),Og.forEach(t),ml.forEach(t),Bi=d(o),qe=r(o,"DIV",{class:!0});var st=i(qe);k(gs.$$.fragment,st),mf=d(st),Ir=r(st,"P",{});var Wg=i(Ir);gf=s(Wg,`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Wg.forEach(t),_f=d(st),_s=r(st,"P",{});var gl=i(_s);bf=s(gl,"The DeBERTa model was proposed in "),bs=r(gl,"A",{href:!0,rel:!0});var Hg=i(bs);vf=s(Hg,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Hg.forEach(t),Tf=s(gl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),gl.forEach(t),kf=d(st),vs=r(st,"P",{});var _l=i(vs);wf=s(_l,"This model is also a "),Ts=r(_l,"A",{href:!0,rel:!0});var Ug=i(Ts);yf=s(Ug,"tf.keras.Model"),Ug.forEach(t),Df=s(_l,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_l.forEach(t),Ef=d(st),k(qo.$$.fragment,st),$f=d(st),Ue=r(st,"DIV",{class:!0});var Et=i(Ue);k(ks.$$.fragment,Et),Ff=d(Et),eo=r(Et,"P",{});var ya=i(eo);Vf=s(ya,"The "),oa=r(ya,"A",{href:!0});var Qg=i(oa);zf=s(Qg,"TFDebertaV2ForSequenceClassification"),Qg.forEach(t),qf=s(ya," forward method, overrides the "),Sr=r(ya,"CODE",{});var Gg=i(Sr);Mf=s(Gg,"__call__"),Gg.forEach(t),xf=s(ya," special method."),ya.forEach(t),Cf=d(Et),k(Mo.$$.fragment,Et),Rf=d(Et),Nr=r(Et,"P",{});var Kg=i(Nr);Pf=s(Kg,"Example:"),Kg.forEach(t),Bf=d(Et),k(ws.$$.fragment,Et),Et.forEach(t),st.forEach(t),ji=d(o),to=r(o,"H2",{class:!0});var bl=i(to);xo=r(bl,"A",{id:!0,class:!0,href:!0});var Jg=i(xo);Or=r(Jg,"SPAN",{});var Xg=i(Or);k(ys.$$.fragment,Xg),Xg.forEach(t),Jg.forEach(t),jf=d(bl),Wr=r(bl,"SPAN",{});var Yg=i(Wr);Af=s(Yg,"TFDebertaV2ForTokenClassification"),Yg.forEach(t),bl.forEach(t),Ai=d(o),Me=r(o,"DIV",{class:!0});var at=i(Me);k(Ds.$$.fragment,at),Lf=d(at),Hr=r(at,"P",{});var Zg=i(Hr);If=s(Zg,`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zg.forEach(t),Sf=d(at),Es=r(at,"P",{});var vl=i(Es);Nf=s(vl,"The DeBERTa model was proposed in "),$s=r(vl,"A",{href:!0,rel:!0});var e_=i($s);Of=s(e_,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),e_.forEach(t),Wf=s(vl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),vl.forEach(t),Hf=d(at),Fs=r(at,"P",{});var Tl=i(Fs);Uf=s(Tl,"This model is also a "),Vs=r(Tl,"A",{href:!0,rel:!0});var t_=i(Vs);Qf=s(t_,"tf.keras.Model"),t_.forEach(t),Gf=s(Tl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Tl.forEach(t),Kf=d(at),k(Co.$$.fragment,at),Jf=d(at),Qe=r(at,"DIV",{class:!0});var $t=i(Qe);k(zs.$$.fragment,$t),Xf=d($t),oo=r($t,"P",{});var Da=i(oo);Yf=s(Da,"The "),na=r(Da,"A",{href:!0});var o_=i(na);Zf=s(o_,"TFDebertaV2ForTokenClassification"),o_.forEach(t),eu=s(Da," forward method, overrides the "),Ur=r(Da,"CODE",{});var n_=i(Ur);tu=s(n_,"__call__"),n_.forEach(t),ou=s(Da," special method."),Da.forEach(t),nu=d($t),k(Ro.$$.fragment,$t),su=d($t),Qr=r($t,"P",{});var s_=i(Qr);au=s(s_,"Example:"),s_.forEach(t),ru=d($t),k(qs.$$.fragment,$t),$t.forEach(t),at.forEach(t),Li=d(o),no=r(o,"H2",{class:!0});var kl=i(no);Po=r(kl,"A",{id:!0,class:!0,href:!0});var a_=i(Po);Gr=r(a_,"SPAN",{});var r_=i(Gr);k(Ms.$$.fragment,r_),r_.forEach(t),a_.forEach(t),iu=d(kl),Kr=r(kl,"SPAN",{});var i_=i(Kr);lu=s(i_,"TFDebertaV2ForQuestionAnswering"),i_.forEach(t),kl.forEach(t),Ii=d(o),xe=r(o,"DIV",{class:!0});var rt=i(xe);k(xs.$$.fragment,rt),du=d(rt),so=r(rt,"P",{});var Ea=i(so);cu=s(Ea,`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Jr=r(Ea,"CODE",{});var l_=i(Jr);pu=s(l_,"span start logits"),l_.forEach(t),hu=s(Ea," and "),Xr=r(Ea,"CODE",{});var d_=i(Xr);fu=s(d_,"span end logits"),d_.forEach(t),uu=s(Ea,")."),Ea.forEach(t),mu=d(rt),Cs=r(rt,"P",{});var wl=i(Cs);gu=s(wl,"The DeBERTa model was proposed in "),Rs=r(wl,"A",{href:!0,rel:!0});var c_=i(Rs);_u=s(c_,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),c_.forEach(t),bu=s(wl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),wl.forEach(t),vu=d(rt),Ps=r(rt,"P",{});var yl=i(Ps);Tu=s(yl,"This model is also a "),Bs=r(yl,"A",{href:!0,rel:!0});var p_=i(Bs);ku=s(p_,"tf.keras.Model"),p_.forEach(t),wu=s(yl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yl.forEach(t),yu=d(rt),k(Bo.$$.fragment,rt),Du=d(rt),Ge=r(rt,"DIV",{class:!0});var Ft=i(Ge);k(js.$$.fragment,Ft),Eu=d(Ft),ao=r(Ft,"P",{});var $a=i(ao);$u=s($a,"The "),sa=r($a,"A",{href:!0});var h_=i(sa);Fu=s(h_,"TFDebertaV2ForQuestionAnswering"),h_.forEach(t),Vu=s($a," forward method, overrides the "),Yr=r($a,"CODE",{});var f_=i(Yr);zu=s(f_,"__call__"),f_.forEach(t),qu=s($a," special method."),$a.forEach(t),Mu=d(Ft),k(jo.$$.fragment,Ft),xu=d(Ft),Zr=r(Ft,"P",{});var u_=i(Zr);Cu=s(u_,"Example:"),u_.forEach(t),Ru=d(Ft),k(As.$$.fragment,Ft),Ft.forEach(t),rt.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(B_)),c(g,"id","debertav2"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#debertav2"),c(m,"class","relative group"),c(Y,"id","overview"),c(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Y,"href","#overview"),c(z,"class","relative group"),c(te,"href","https://arxiv.org/abs/2006.03654"),c(te,"rel","nofollow"),c(j,"href","https://github.com/microsoft/DeBERTa"),c(j,"rel","nofollow"),c(R,"href","https://github.com/microsoft/DeBERTa"),c(R,"rel","nofollow"),c(oe,"href","https://www.microsoft.com/en-us/research/blog/microsoft-deberta-surpasses-human-performance-on-the-superglue-benchmark/"),c(oe,"rel","nofollow"),c(A,"href","https://github.com/google/sentencepiece"),c(A,"rel","nofollow"),c(Ho,"href","https://huggingface.co/DeBERTa"),c(Ho,"rel","nofollow"),c(Uo,"href","https://huggingface.co/kamalkraj"),c(Uo,"rel","nofollow"),c(Qo,"href","https://github.com/microsoft/DeBERTa"),c(Qo,"rel","nofollow"),c(ro,"id","transformers.DebertaV2Config"),c(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ro,"href","#transformers.DebertaV2Config"),c(Vt,"class","relative group"),c(Us,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Model"),c(Jo,"href","https://huggingface.co/microsoft/deberta-base"),c(Jo,"rel","nofollow"),c(Qs,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Gs,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(lt,"class","docstring"),c(io,"id","transformers.DebertaV2Tokenizer"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.DebertaV2Tokenizer"),c(Mt,"class","relative group"),c(en,"href","https://github.com/google/sentencepiece"),c(en,"rel","nofollow"),c(pt,"class","docstring"),c(lo,"class","docstring"),c(et,"class","docstring"),c(Na,"class","docstring"),c(ze,"class","docstring"),c(co,"id","transformers.DebertaV2Model"),c(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(co,"href","#transformers.DebertaV2Model"),c(Rt,"class","relative group"),c(cn,"href","https://arxiv.org/abs/2006.03654"),c(cn,"rel","nofollow"),c(hn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(hn,"rel","nofollow"),c(Ks,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Model"),c(Ie,"class","docstring"),c(Je,"class","docstring"),c(ho,"id","transformers.DebertaV2PreTrainedModel"),c(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ho,"href","#transformers.DebertaV2PreTrainedModel"),c(Bt,"class","relative group"),c(tt,"class","docstring"),c(dt,"class","docstring"),c(uo,"id","transformers.DebertaV2ForMaskedLM"),c(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(uo,"href","#transformers.DebertaV2ForMaskedLM"),c(jt,"class","relative group"),c(Tn,"href","https://arxiv.org/abs/2006.03654"),c(Tn,"rel","nofollow"),c(wn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(wn,"rel","nofollow"),c(Js,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForMaskedLM"),c(Se,"class","docstring"),c(Xe,"class","docstring"),c(go,"id","transformers.DebertaV2ForSequenceClassification"),c(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(go,"href","#transformers.DebertaV2ForSequenceClassification"),c(It,"class","relative group"),c(Vn,"href","https://arxiv.org/abs/2006.03654"),c(Vn,"rel","nofollow"),c(qn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(qn,"rel","nofollow"),c(Xs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForSequenceClassification"),c(Ve,"class","docstring"),c(Pe,"class","docstring"),c(bo,"id","transformers.DebertaV2ForTokenClassification"),c(bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bo,"href","#transformers.DebertaV2ForTokenClassification"),c(Nt,"class","relative group"),c(jn,"href","https://arxiv.org/abs/2006.03654"),c(jn,"rel","nofollow"),c(Ln,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ln,"rel","nofollow"),c(Ys,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForTokenClassification"),c(Ne,"class","docstring"),c(Be,"class","docstring"),c(To,"id","transformers.DebertaV2ForQuestionAnswering"),c(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(To,"href","#transformers.DebertaV2ForQuestionAnswering"),c(Wt,"class","relative group"),c(Hn,"href","https://arxiv.org/abs/2006.03654"),c(Hn,"rel","nofollow"),c(Qn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Qn,"rel","nofollow"),c(Zs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForQuestionAnswering"),c(Oe,"class","docstring"),c(je,"class","docstring"),c(wo,"id","transformers.TFDebertaV2Model"),c(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wo,"href","#transformers.TFDebertaV2Model"),c(Qt,"class","relative group"),c(Zn,"href","https://arxiv.org/abs/2006.03654"),c(Zn,"rel","nofollow"),c(ts,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ts,"rel","nofollow"),c(ea,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2Model"),c(We,"class","docstring"),c(Ae,"class","docstring"),c(Eo,"id","transformers.TFDebertaV2PreTrainedModel"),c(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Eo,"href","#transformers.TFDebertaV2PreTrainedModel"),c(Kt,"class","relative group"),c(ot,"class","docstring"),c(ct,"class","docstring"),c($o,"id","transformers.TFDebertaV2ForMaskedLM"),c($o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c($o,"href","#transformers.TFDebertaV2ForMaskedLM"),c(Jt,"class","relative group"),c(cs,"href","https://arxiv.org/abs/2006.03654"),c(cs,"rel","nofollow"),c(hs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(hs,"rel","nofollow"),c(ta,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForMaskedLM"),c(He,"class","docstring"),c(Le,"class","docstring"),c(zo,"id","transformers.TFDebertaV2ForSequenceClassification"),c(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zo,"href","#transformers.TFDebertaV2ForSequenceClassification"),c(Zt,"class","relative group"),c(bs,"href","https://arxiv.org/abs/2006.03654"),c(bs,"rel","nofollow"),c(Ts,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ts,"rel","nofollow"),c(oa,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForSequenceClassification"),c(Ue,"class","docstring"),c(qe,"class","docstring"),c(xo,"id","transformers.TFDebertaV2ForTokenClassification"),c(xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xo,"href","#transformers.TFDebertaV2ForTokenClassification"),c(to,"class","relative group"),c($s,"href","https://arxiv.org/abs/2006.03654"),c($s,"rel","nofollow"),c(Vs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Vs,"rel","nofollow"),c(na,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForTokenClassification"),c(Qe,"class","docstring"),c(Me,"class","docstring"),c(Po,"id","transformers.TFDebertaV2ForQuestionAnswering"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.TFDebertaV2ForQuestionAnswering"),c(no,"class","relative group"),c(Rs,"href","https://arxiv.org/abs/2006.03654"),c(Rs,"rel","nofollow"),c(Bs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Bs,"rel","nofollow"),c(sa,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForQuestionAnswering"),c(Ge,"class","docstring"),c(xe,"class","docstring")},m(o,u){e(document.head,h),f(o,$,u),f(o,m,u),e(m,g),e(g,v),w(b,v,null),e(m,_),e(m,V),e(V,de),f(o,K,u),f(o,z,u),e(z,Y),e(Y,I),w(ee,I,null),e(z,ce),e(z,S),e(S,pe),f(o,re,u),f(o,O,u),e(O,P),e(O,te),e(te,Z),e(O,q),f(o,x,u),f(o,ne,u),e(ne,W),f(o,ie,u),f(o,se,u),e(se,H),f(o,le,u),f(o,ae,u),e(ae,M),e(M,he),e(M,j),e(j,fe),e(M,ue),f(o,N,u),f(o,J,u),e(J,me),e(J,R),e(R,ge),e(J,U),e(J,oe),e(oe,p),f(o,F,u),f(o,G,u),e(G,ye),f(o,we,u),f(o,C,u),e(C,_e),e(_e,Te),e(Te,De),e(_e,B),e(_e,A),e(A,Ee),e(_e,$e),e(C,Q),e(C,ve),e(ve,ke),e(ke,be),e(ve,Fe),e(C,Dl),e(C,Os),e(Os,Fa),e(Fa,El),e(Os,$l),e(C,Fl),e(C,Ws),e(Ws,Va),e(Va,Vl),e(Ws,zl),e(C,ql),e(C,Hs),e(Hs,za),e(za,Ml),e(Hs,xl),f(o,hi,u),f(o,Ze,u),e(Ze,Cl),e(Ze,Ho),e(Ho,Rl),e(Ze,Pl),e(Ze,Uo),e(Uo,Bl),e(Ze,jl),e(Ze,Qo),e(Qo,Al),e(Ze,Ll),f(o,fi,u),f(o,Vt,u),e(Vt,ro),e(ro,qa),w(Go,qa,null),e(Vt,Il),e(Vt,Ma),e(Ma,Sl),f(o,ui,u),f(o,lt,u),w(Ko,lt,null),e(lt,Nl),e(lt,zt),e(zt,Ol),e(zt,Us),e(Us,Wl),e(zt,Hl),e(zt,Jo),e(Jo,Ul),e(zt,Ql),e(lt,Gl),e(lt,qt),e(qt,Kl),e(qt,Qs),e(Qs,Jl),e(qt,Xl),e(qt,Gs),e(Gs,Yl),e(qt,Zl),f(o,mi,u),f(o,Mt,u),e(Mt,io),e(io,xa),w(Xo,xa,null),e(Mt,ed),e(Mt,Ca),e(Ca,td),f(o,gi,u),f(o,ze,u),w(Yo,ze,null),e(ze,od),e(ze,Zo),e(Zo,nd),e(Zo,en),e(en,sd),e(Zo,ad),e(ze,rd),e(ze,pt),w(tn,pt,null),e(pt,id),e(pt,Ra),e(Ra,ld),e(pt,dd),e(pt,on),e(on,Pa),e(Pa,cd),e(on,pd),e(on,Ba),e(Ba,hd),e(ze,fd),e(ze,lo),w(nn,lo,null),e(lo,ud),e(lo,xt),e(xt,md),e(xt,ja),e(ja,gd),e(xt,_d),e(xt,Aa),e(Aa,bd),e(xt,vd),e(ze,Td),e(ze,et),w(sn,et,null),e(et,kd),e(et,La),e(La,wd),e(et,yd),w(an,et,null),e(et,Dd),e(et,Ct),e(Ct,Ed),e(Ct,Ia),e(Ia,$d),e(Ct,Fd),e(Ct,Sa),e(Sa,Vd),e(Ct,zd),e(ze,qd),e(ze,Na),f(o,_i,u),f(o,Rt,u),e(Rt,co),e(co,Oa),w(rn,Oa,null),e(Rt,Md),e(Rt,Wa),e(Wa,xd),f(o,bi,u),f(o,Je,u),w(ln,Je,null),e(Je,Cd),e(Je,dn),e(dn,Rd),e(dn,cn),e(cn,Pd),e(dn,Bd),e(Je,jd),e(Je,pn),e(pn,Ad),e(pn,hn),e(hn,Ld),e(pn,Id),e(Je,Sd),e(Je,Ie),w(fn,Ie,null),e(Ie,Nd),e(Ie,Pt),e(Pt,Od),e(Pt,Ks),e(Ks,Wd),e(Pt,Hd),e(Pt,Ha),e(Ha,Ud),e(Pt,Qd),e(Ie,Gd),w(po,Ie,null),e(Ie,Kd),e(Ie,Ua),e(Ua,Jd),e(Ie,Xd),w(un,Ie,null),f(o,vi,u),f(o,Bt,u),e(Bt,ho),e(ho,Qa),w(mn,Qa,null),e(Bt,Yd),e(Bt,Ga),e(Ga,Zd),f(o,Ti,u),f(o,dt,u),w(gn,dt,null),e(dt,ec),e(dt,Ka),e(Ka,tc),e(dt,oc),e(dt,tt),w(_n,tt,null),e(tt,nc),e(tt,Ja),e(Ja,sc),e(tt,ac),e(tt,Xa),e(Xa,rc),e(tt,ic),w(fo,tt,null),f(o,ki,u),f(o,jt,u),e(jt,uo),e(uo,Ya),w(bn,Ya,null),e(jt,lc),e(jt,Za),e(Za,dc),f(o,wi,u),f(o,Xe,u),w(vn,Xe,null),e(Xe,cc),e(Xe,At),e(At,pc),e(At,er),e(er,hc),e(At,fc),e(At,Tn),e(Tn,uc),e(At,mc),e(Xe,gc),e(Xe,kn),e(kn,_c),e(kn,wn),e(wn,bc),e(kn,vc),e(Xe,Tc),e(Xe,Se),w(yn,Se,null),e(Se,kc),e(Se,Lt),e(Lt,wc),e(Lt,Js),e(Js,yc),e(Lt,Dc),e(Lt,tr),e(tr,Ec),e(Lt,$c),e(Se,Fc),w(mo,Se,null),e(Se,Vc),e(Se,or),e(or,zc),e(Se,qc),w(Dn,Se,null),f(o,yi,u),f(o,It,u),e(It,go),e(go,nr),w(En,nr,null),e(It,Mc),e(It,sr),e(sr,xc),f(o,Di,u),f(o,Pe,u),w($n,Pe,null),e(Pe,Cc),e(Pe,ar),e(ar,Rc),e(Pe,Pc),e(Pe,Fn),e(Fn,Bc),e(Fn,Vn),e(Vn,jc),e(Fn,Ac),e(Pe,Lc),e(Pe,zn),e(zn,Ic),e(zn,qn),e(qn,Sc),e(zn,Nc),e(Pe,Oc),e(Pe,Ve),w(Mn,Ve,null),e(Ve,Wc),e(Ve,St),e(St,Hc),e(St,Xs),e(Xs,Uc),e(St,Qc),e(St,rr),e(rr,Gc),e(St,Kc),e(Ve,Jc),w(_o,Ve,null),e(Ve,Xc),e(Ve,ir),e(ir,Yc),e(Ve,Zc),w(xn,Ve,null),e(Ve,ep),e(Ve,lr),e(lr,tp),e(Ve,op),w(Cn,Ve,null),f(o,Ei,u),f(o,Nt,u),e(Nt,bo),e(bo,dr),w(Rn,dr,null),e(Nt,np),e(Nt,cr),e(cr,sp),f(o,$i,u),f(o,Be,u),w(Pn,Be,null),e(Be,ap),e(Be,pr),e(pr,rp),e(Be,ip),e(Be,Bn),e(Bn,lp),e(Bn,jn),e(jn,dp),e(Bn,cp),e(Be,pp),e(Be,An),e(An,hp),e(An,Ln),e(Ln,fp),e(An,up),e(Be,mp),e(Be,Ne),w(In,Ne,null),e(Ne,gp),e(Ne,Ot),e(Ot,_p),e(Ot,Ys),e(Ys,bp),e(Ot,vp),e(Ot,hr),e(hr,Tp),e(Ot,kp),e(Ne,wp),w(vo,Ne,null),e(Ne,yp),e(Ne,fr),e(fr,Dp),e(Ne,Ep),w(Sn,Ne,null),f(o,Fi,u),f(o,Wt,u),e(Wt,To),e(To,ur),w(Nn,ur,null),e(Wt,$p),e(Wt,mr),e(mr,Fp),f(o,Vi,u),f(o,je,u),w(On,je,null),e(je,Vp),e(je,Ht),e(Ht,zp),e(Ht,gr),e(gr,qp),e(Ht,Mp),e(Ht,_r),e(_r,xp),e(Ht,Cp),e(je,Rp),e(je,Wn),e(Wn,Pp),e(Wn,Hn),e(Hn,Bp),e(Wn,jp),e(je,Ap),e(je,Un),e(Un,Lp),e(Un,Qn),e(Qn,Ip),e(Un,Sp),e(je,Np),e(je,Oe),w(Gn,Oe,null),e(Oe,Op),e(Oe,Ut),e(Ut,Wp),e(Ut,Zs),e(Zs,Hp),e(Ut,Up),e(Ut,br),e(br,Qp),e(Ut,Gp),e(Oe,Kp),w(ko,Oe,null),e(Oe,Jp),e(Oe,vr),e(vr,Xp),e(Oe,Yp),w(Kn,Oe,null),f(o,zi,u),f(o,Qt,u),e(Qt,wo),e(wo,Tr),w(Jn,Tr,null),e(Qt,Zp),e(Qt,kr),e(kr,eh),f(o,qi,u),f(o,Ae,u),w(Xn,Ae,null),e(Ae,th),e(Ae,Yn),e(Yn,oh),e(Yn,Zn),e(Zn,nh),e(Yn,sh),e(Ae,ah),e(Ae,es),e(es,rh),e(es,ts),e(ts,ih),e(es,lh),e(Ae,dh),w(yo,Ae,null),e(Ae,ch),e(Ae,We),w(os,We,null),e(We,ph),e(We,Gt),e(Gt,hh),e(Gt,ea),e(ea,fh),e(Gt,uh),e(Gt,wr),e(wr,mh),e(Gt,gh),e(We,_h),w(Do,We,null),e(We,bh),e(We,yr),e(yr,vh),e(We,Th),w(ns,We,null),f(o,Mi,u),f(o,Kt,u),e(Kt,Eo),e(Eo,Dr),w(ss,Dr,null),e(Kt,kh),e(Kt,Er),e(Er,wh),f(o,xi,u),f(o,ct,u),w(as,ct,null),e(ct,yh),e(ct,$r),e($r,Dh),e(ct,Eh),e(ct,ot),w(rs,ot,null),e(ot,$h),e(ot,Fr),e(Fr,Fh),e(ot,Vh),e(ot,is),e(is,zh),e(is,Vr),e(Vr,qh),e(is,Mh),e(ot,xh),e(ot,Ye),e(Ye,Ch),e(Ye,zr),e(zr,Rh),e(Ye,Ph),e(Ye,qr),e(qr,Bh),e(Ye,jh),e(Ye,Mr),e(Mr,Ah),e(Ye,Lh),e(Ye,xr),e(xr,Ih),e(Ye,Sh),f(o,Ci,u),f(o,Jt,u),e(Jt,$o),e($o,Cr),w(ls,Cr,null),e(Jt,Nh),e(Jt,Rr),e(Rr,Oh),f(o,Ri,u),f(o,Le,u),w(ds,Le,null),e(Le,Wh),e(Le,Xt),e(Xt,Hh),e(Xt,Pr),e(Pr,Uh),e(Xt,Qh),e(Xt,cs),e(cs,Gh),e(Xt,Kh),e(Le,Jh),e(Le,ps),e(ps,Xh),e(ps,hs),e(hs,Yh),e(ps,Zh),e(Le,ef),w(Fo,Le,null),e(Le,tf),e(Le,He),w(fs,He,null),e(He,of),e(He,Yt),e(Yt,nf),e(Yt,ta),e(ta,sf),e(Yt,af),e(Yt,Br),e(Br,rf),e(Yt,lf),e(He,df),w(Vo,He,null),e(He,cf),e(He,jr),e(jr,pf),e(He,hf),w(us,He,null),f(o,Pi,u),f(o,Zt,u),e(Zt,zo),e(zo,Ar),w(ms,Ar,null),e(Zt,ff),e(Zt,Lr),e(Lr,uf),f(o,Bi,u),f(o,qe,u),w(gs,qe,null),e(qe,mf),e(qe,Ir),e(Ir,gf),e(qe,_f),e(qe,_s),e(_s,bf),e(_s,bs),e(bs,vf),e(_s,Tf),e(qe,kf),e(qe,vs),e(vs,wf),e(vs,Ts),e(Ts,yf),e(vs,Df),e(qe,Ef),w(qo,qe,null),e(qe,$f),e(qe,Ue),w(ks,Ue,null),e(Ue,Ff),e(Ue,eo),e(eo,Vf),e(eo,oa),e(oa,zf),e(eo,qf),e(eo,Sr),e(Sr,Mf),e(eo,xf),e(Ue,Cf),w(Mo,Ue,null),e(Ue,Rf),e(Ue,Nr),e(Nr,Pf),e(Ue,Bf),w(ws,Ue,null),f(o,ji,u),f(o,to,u),e(to,xo),e(xo,Or),w(ys,Or,null),e(to,jf),e(to,Wr),e(Wr,Af),f(o,Ai,u),f(o,Me,u),w(Ds,Me,null),e(Me,Lf),e(Me,Hr),e(Hr,If),e(Me,Sf),e(Me,Es),e(Es,Nf),e(Es,$s),e($s,Of),e(Es,Wf),e(Me,Hf),e(Me,Fs),e(Fs,Uf),e(Fs,Vs),e(Vs,Qf),e(Fs,Gf),e(Me,Kf),w(Co,Me,null),e(Me,Jf),e(Me,Qe),w(zs,Qe,null),e(Qe,Xf),e(Qe,oo),e(oo,Yf),e(oo,na),e(na,Zf),e(oo,eu),e(oo,Ur),e(Ur,tu),e(oo,ou),e(Qe,nu),w(Ro,Qe,null),e(Qe,su),e(Qe,Qr),e(Qr,au),e(Qe,ru),w(qs,Qe,null),f(o,Li,u),f(o,no,u),e(no,Po),e(Po,Gr),w(Ms,Gr,null),e(no,iu),e(no,Kr),e(Kr,lu),f(o,Ii,u),f(o,xe,u),w(xs,xe,null),e(xe,du),e(xe,so),e(so,cu),e(so,Jr),e(Jr,pu),e(so,hu),e(so,Xr),e(Xr,fu),e(so,uu),e(xe,mu),e(xe,Cs),e(Cs,gu),e(Cs,Rs),e(Rs,_u),e(Cs,bu),e(xe,vu),e(xe,Ps),e(Ps,Tu),e(Ps,Bs),e(Bs,ku),e(Ps,wu),e(xe,yu),w(Bo,xe,null),e(xe,Du),e(xe,Ge),w(js,Ge,null),e(Ge,Eu),e(Ge,ao),e(ao,$u),e(ao,sa),e(sa,Fu),e(ao,Vu),e(ao,Yr),e(Yr,zu),e(ao,qu),e(Ge,Mu),w(jo,Ge,null),e(Ge,xu),e(Ge,Zr),e(Zr,Cu),e(Ge,Ru),w(As,Ge,null),Si=!0},p(o,[u]){const Ls={};u&2&&(Ls.$$scope={dirty:u,ctx:o}),po.$set(Ls);const ei={};u&2&&(ei.$$scope={dirty:u,ctx:o}),fo.$set(ei);const ti={};u&2&&(ti.$$scope={dirty:u,ctx:o}),mo.$set(ti);const oi={};u&2&&(oi.$$scope={dirty:u,ctx:o}),_o.$set(oi);const Is={};u&2&&(Is.$$scope={dirty:u,ctx:o}),vo.$set(Is);const ni={};u&2&&(ni.$$scope={dirty:u,ctx:o}),ko.$set(ni);const si={};u&2&&(si.$$scope={dirty:u,ctx:o}),yo.$set(si);const ai={};u&2&&(ai.$$scope={dirty:u,ctx:o}),Do.$set(ai);const Ss={};u&2&&(Ss.$$scope={dirty:u,ctx:o}),Fo.$set(Ss);const ri={};u&2&&(ri.$$scope={dirty:u,ctx:o}),Vo.$set(ri);const ii={};u&2&&(ii.$$scope={dirty:u,ctx:o}),qo.$set(ii);const li={};u&2&&(li.$$scope={dirty:u,ctx:o}),Mo.$set(li);const di={};u&2&&(di.$$scope={dirty:u,ctx:o}),Co.$set(di);const Ns={};u&2&&(Ns.$$scope={dirty:u,ctx:o}),Ro.$set(Ns);const ci={};u&2&&(ci.$$scope={dirty:u,ctx:o}),Bo.$set(ci);const Ao={};u&2&&(Ao.$$scope={dirty:u,ctx:o}),jo.$set(Ao)},i(o){Si||(y(b.$$.fragment,o),y(ee.$$.fragment,o),y(Go.$$.fragment,o),y(Ko.$$.fragment,o),y(Xo.$$.fragment,o),y(Yo.$$.fragment,o),y(tn.$$.fragment,o),y(nn.$$.fragment,o),y(sn.$$.fragment,o),y(an.$$.fragment,o),y(rn.$$.fragment,o),y(ln.$$.fragment,o),y(fn.$$.fragment,o),y(po.$$.fragment,o),y(un.$$.fragment,o),y(mn.$$.fragment,o),y(gn.$$.fragment,o),y(_n.$$.fragment,o),y(fo.$$.fragment,o),y(bn.$$.fragment,o),y(vn.$$.fragment,o),y(yn.$$.fragment,o),y(mo.$$.fragment,o),y(Dn.$$.fragment,o),y(En.$$.fragment,o),y($n.$$.fragment,o),y(Mn.$$.fragment,o),y(_o.$$.fragment,o),y(xn.$$.fragment,o),y(Cn.$$.fragment,o),y(Rn.$$.fragment,o),y(Pn.$$.fragment,o),y(In.$$.fragment,o),y(vo.$$.fragment,o),y(Sn.$$.fragment,o),y(Nn.$$.fragment,o),y(On.$$.fragment,o),y(Gn.$$.fragment,o),y(ko.$$.fragment,o),y(Kn.$$.fragment,o),y(Jn.$$.fragment,o),y(Xn.$$.fragment,o),y(yo.$$.fragment,o),y(os.$$.fragment,o),y(Do.$$.fragment,o),y(ns.$$.fragment,o),y(ss.$$.fragment,o),y(as.$$.fragment,o),y(rs.$$.fragment,o),y(ls.$$.fragment,o),y(ds.$$.fragment,o),y(Fo.$$.fragment,o),y(fs.$$.fragment,o),y(Vo.$$.fragment,o),y(us.$$.fragment,o),y(ms.$$.fragment,o),y(gs.$$.fragment,o),y(qo.$$.fragment,o),y(ks.$$.fragment,o),y(Mo.$$.fragment,o),y(ws.$$.fragment,o),y(ys.$$.fragment,o),y(Ds.$$.fragment,o),y(Co.$$.fragment,o),y(zs.$$.fragment,o),y(Ro.$$.fragment,o),y(qs.$$.fragment,o),y(Ms.$$.fragment,o),y(xs.$$.fragment,o),y(Bo.$$.fragment,o),y(js.$$.fragment,o),y(jo.$$.fragment,o),y(As.$$.fragment,o),Si=!0)},o(o){D(b.$$.fragment,o),D(ee.$$.fragment,o),D(Go.$$.fragment,o),D(Ko.$$.fragment,o),D(Xo.$$.fragment,o),D(Yo.$$.fragment,o),D(tn.$$.fragment,o),D(nn.$$.fragment,o),D(sn.$$.fragment,o),D(an.$$.fragment,o),D(rn.$$.fragment,o),D(ln.$$.fragment,o),D(fn.$$.fragment,o),D(po.$$.fragment,o),D(un.$$.fragment,o),D(mn.$$.fragment,o),D(gn.$$.fragment,o),D(_n.$$.fragment,o),D(fo.$$.fragment,o),D(bn.$$.fragment,o),D(vn.$$.fragment,o),D(yn.$$.fragment,o),D(mo.$$.fragment,o),D(Dn.$$.fragment,o),D(En.$$.fragment,o),D($n.$$.fragment,o),D(Mn.$$.fragment,o),D(_o.$$.fragment,o),D(xn.$$.fragment,o),D(Cn.$$.fragment,o),D(Rn.$$.fragment,o),D(Pn.$$.fragment,o),D(In.$$.fragment,o),D(vo.$$.fragment,o),D(Sn.$$.fragment,o),D(Nn.$$.fragment,o),D(On.$$.fragment,o),D(Gn.$$.fragment,o),D(ko.$$.fragment,o),D(Kn.$$.fragment,o),D(Jn.$$.fragment,o),D(Xn.$$.fragment,o),D(yo.$$.fragment,o),D(os.$$.fragment,o),D(Do.$$.fragment,o),D(ns.$$.fragment,o),D(ss.$$.fragment,o),D(as.$$.fragment,o),D(rs.$$.fragment,o),D(ls.$$.fragment,o),D(ds.$$.fragment,o),D(Fo.$$.fragment,o),D(fs.$$.fragment,o),D(Vo.$$.fragment,o),D(us.$$.fragment,o),D(ms.$$.fragment,o),D(gs.$$.fragment,o),D(qo.$$.fragment,o),D(ks.$$.fragment,o),D(Mo.$$.fragment,o),D(ws.$$.fragment,o),D(ys.$$.fragment,o),D(Ds.$$.fragment,o),D(Co.$$.fragment,o),D(zs.$$.fragment,o),D(Ro.$$.fragment,o),D(qs.$$.fragment,o),D(Ms.$$.fragment,o),D(xs.$$.fragment,o),D(Bo.$$.fragment,o),D(js.$$.fragment,o),D(jo.$$.fragment,o),D(As.$$.fragment,o),Si=!1},d(o){t(h),o&&t($),o&&t(m),E(b),o&&t(K),o&&t(z),E(ee),o&&t(re),o&&t(O),o&&t(x),o&&t(ne),o&&t(ie),o&&t(se),o&&t(le),o&&t(ae),o&&t(N),o&&t(J),o&&t(F),o&&t(G),o&&t(we),o&&t(C),o&&t(hi),o&&t(Ze),o&&t(fi),o&&t(Vt),E(Go),o&&t(ui),o&&t(lt),E(Ko),o&&t(mi),o&&t(Mt),E(Xo),o&&t(gi),o&&t(ze),E(Yo),E(tn),E(nn),E(sn),E(an),o&&t(_i),o&&t(Rt),E(rn),o&&t(bi),o&&t(Je),E(ln),E(fn),E(po),E(un),o&&t(vi),o&&t(Bt),E(mn),o&&t(Ti),o&&t(dt),E(gn),E(_n),E(fo),o&&t(ki),o&&t(jt),E(bn),o&&t(wi),o&&t(Xe),E(vn),E(yn),E(mo),E(Dn),o&&t(yi),o&&t(It),E(En),o&&t(Di),o&&t(Pe),E($n),E(Mn),E(_o),E(xn),E(Cn),o&&t(Ei),o&&t(Nt),E(Rn),o&&t($i),o&&t(Be),E(Pn),E(In),E(vo),E(Sn),o&&t(Fi),o&&t(Wt),E(Nn),o&&t(Vi),o&&t(je),E(On),E(Gn),E(ko),E(Kn),o&&t(zi),o&&t(Qt),E(Jn),o&&t(qi),o&&t(Ae),E(Xn),E(yo),E(os),E(Do),E(ns),o&&t(Mi),o&&t(Kt),E(ss),o&&t(xi),o&&t(ct),E(as),E(rs),o&&t(Ci),o&&t(Jt),E(ls),o&&t(Ri),o&&t(Le),E(ds),E(Fo),E(fs),E(Vo),E(us),o&&t(Pi),o&&t(Zt),E(ms),o&&t(Bi),o&&t(qe),E(gs),E(qo),E(ks),E(Mo),E(ws),o&&t(ji),o&&t(to),E(ys),o&&t(Ai),o&&t(Me),E(Ds),E(Co),E(zs),E(Ro),E(qs),o&&t(Li),o&&t(no),E(Ms),o&&t(Ii),o&&t(xe),E(xs),E(Bo),E(js),E(jo),E(As)}}}const B_={local:"debertav2",sections:[{local:"overview",title:"Overview"},{local:"transformers.DebertaV2Config",title:"DebertaV2Config"},{local:"transformers.DebertaV2Tokenizer",title:"DebertaV2Tokenizer"},{local:"transformers.DebertaV2Model",title:"DebertaV2Model"},{local:"transformers.DebertaV2PreTrainedModel",title:"DebertaV2PreTrainedModel"},{local:"transformers.DebertaV2ForMaskedLM",title:"DebertaV2ForMaskedLM"},{local:"transformers.DebertaV2ForSequenceClassification",title:"DebertaV2ForSequenceClassification"},{local:"transformers.DebertaV2ForTokenClassification",title:"DebertaV2ForTokenClassification"},{local:"transformers.DebertaV2ForQuestionAnswering",title:"DebertaV2ForQuestionAnswering"},{local:"transformers.TFDebertaV2Model",title:"TFDebertaV2Model"},{local:"transformers.TFDebertaV2PreTrainedModel",title:"TFDebertaV2PreTrainedModel"},{local:"transformers.TFDebertaV2ForMaskedLM",title:"TFDebertaV2ForMaskedLM"},{local:"transformers.TFDebertaV2ForSequenceClassification",title:"TFDebertaV2ForSequenceClassification"},{local:"transformers.TFDebertaV2ForTokenClassification",title:"TFDebertaV2ForTokenClassification"},{local:"transformers.TFDebertaV2ForQuestionAnswering",title:"TFDebertaV2ForQuestionAnswering"}],title:"DeBERTa-v2"};function j_(L,h,$){let{fw:m}=h;return L.$$set=g=>{"fw"in g&&$(0,m=g.fw)},[m]}class W_ extends m_{constructor(h){super();g_(this,h,j_,P_,__,{fw:0})}}export{W_ as default,B_ as metadata};
9,909
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/auto.mdx-91311c32.js
import{S as NVt,i as DVt,s as GVt,e as a,k as l,w as f,t as o,L as OVt,c as n,d as r,m as i,a as s,x as c,h as t,b as m,J as e,g as v,y as g,q as h,o as u,B as p}from"../../chunks/vendor-b1433968.js";import{T as Qct}from"../../chunks/Tip-c3840994.js";import{D as C}from"../../chunks/Docstring-ff504c58.js";import{C as w}from"../../chunks/CodeBlock-a320dbd7.js";import{I as X}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function qVt(Wl){let J,ye,se,de,Ue,ie,ue,Bo,Vl,km,zr,Ql,Hl,RF,Rm,Fe,Ze,Ul,gn,SF,hn,un,PF,Jl,pn,$F,Kl,Sm,ba;return{c(){J=a("p"),ye=o("If your "),se=a("code"),de=o("NewModelConfig"),Ue=o(" is a subclass of "),ie=a("code"),ue=o("PretrainedConfig"),Bo=o(`, make sure its `),Vl=a("code"),km=o("model_type"),zr=o(" attribute is set to the same key you use when registering the config (here "),Ql=a("code"),Hl=o('"new-model"'),RF=o(")."),Rm=l(),Fe=a("p"),Ze=o("Likewise, if your "),Ul=a("code"),gn=o("NewModel"),SF=o(" is a subclass of "),hn=a("a"),un=o("PreTrainedModel"),PF=o(`, make sure its `),Jl=a("code"),pn=o("config_class"),$F=o(` attribute is set to the same class you use when registering the model (here `),Kl=a("code"),Sm=o("NewModelConfig"),ba=o(")."),this.h()},l(eo){J=n(eo,"P",{});var me=s(J);ye=t(me,"If your "),se=n(me,"CODE",{});var IA=s(se);de=t(IA,"NewModelConfig"),IA.forEach(r),Ue=t(me," is a subclass of "),ie=n(me,"CODE",{});var Yl=s(ie);ue=t(Yl,"PretrainedConfig"),Yl.forEach(r),Bo=t(me,`, make sure its `),Vl=n(me,"CODE",{});var jA=s(Vl);km=t(jA,"model_type"),jA.forEach(r),zr=t(me," attribute is set to the same key you use when registering the config (here "),Ql=n(me,"CODE",{});var NA=s(Ql);Hl=t(NA,'"new-model"'),NA.forEach(r),RF=t(me,")."),me.forEach(r),Rm=i(eo),Fe=n(eo,"P",{});var ko=s(Fe);Ze=t(ko,"Likewise, if your "),Ul=n(ko,"CODE",{});var Ta=s(Ul);gn=t(Ta,"NewModel"),Ta.forEach(r),SF=t(ko," is a subclass of "),hn=n(ko,"A",{href:!0});var DA=s(hn);un=t(DA,"PreTrainedModel"),DA.forEach(r),PF=t(ko,`, make sure its `),Jl=n(ko,"CODE",{});var Pm=s(Jl);pn=t(Pm,"config_class"),Pm.forEach(r),$F=t(ko,` attribute is set to the same class you use when registering the model (here `),Kl=n(ko,"CODE",{});var GA=s(Kl);Sm=t(GA,"NewModelConfig"),GA.forEach(r),ba=t(ko,")."),ko.forEach(r),this.h()},h(){m(hn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel")},m(eo,me){v(eo,J,me),e(J,ye),e(J,se),e(se,de),e(J,Ue),e(J,ie),e(ie,ue),e(J,Bo),e(J,Vl),e(Vl,km),e(J,zr),e(J,Ql),e(Ql,Hl),e(J,RF),v(eo,Rm,me),v(eo,Fe,me),e(Fe,Ze),e(Fe,Ul),e(Ul,gn),e(Fe,SF),e(Fe,hn),e(hn,un),e(Fe,PF),e(Fe,Jl),e(Jl,pn),e(Fe,$F),e(Fe,Kl),e(Kl,Sm),e(Fe,ba)},d(eo){eo&&r(J),eo&&r(Rm),eo&&r(Fe)}}}function zVt(Wl){let J,ye,se,de,Ue;return{c(){J=a("p"),ye=o("Passing "),se=a("em"),de=o("use_auth_token=True"),Ue=o(" is required when you want to use a private model.")},l(ie){J=n(ie,"P",{});var ue=s(J);ye=t(ue,"Passing "),se=n(ue,"EM",{});var Bo=s(se);de=t(Bo,"use_auth_token=True"),Bo.forEach(r),Ue=t(ue," is required when you want to use a private model."),ue.forEach(r)},m(ie,ue){v(ie,J,ue),e(J,ye),e(J,se),e(se,de),e(J,Ue)},d(ie){ie&&r(J)}}}function XVt(Wl){let J,ye,se,de,Ue;return{c(){J=a("p"),ye=o("Passing "),se=a("em"),de=o("use_auth_token=True"),Ue=o(" is required when you want to use a private model.")},l(ie){J=n(ie,"P",{});var ue=s(J);ye=t(ue,"Passing "),se=n(ue,"EM",{});var Bo=s(se);de=t(Bo,"use_auth_token=True"),Bo.forEach(r),Ue=t(ue," is required when you want to use a private model."),ue.forEach(r)},m(ie,ue){v(ie,J,ue),e(J,ye),e(J,se),e(se,de),e(J,Ue)},d(ie){ie&&r(J)}}}function WVt(Wl){let J,ye,se,de,Ue,ie,ue,Bo,Vl,km,zr,Ql,Hl,RF,Rm,Fe,Ze,Ul,gn,SF,hn,un,PF,Jl,pn,$F,Kl,Sm,ba,eo,me,IA,Yl,jA,NA,ko,Ta,DA,Pm,GA,dwe,bEe,Zl,$m,YG,IF,mwe,ZG,fwe,TEe,_n,cwe,eO,gwe,hwe,oO,uwe,pwe,FEe,jF,MEe,OA,_we,EEe,Im,CEe,ei,jm,tO,NF,vwe,rO,bwe,yEe,Ro,DF,Twe,GF,Fwe,qA,Mwe,Ewe,Cwe,OF,ywe,aO,wwe,Awe,xwe,oo,qF,Lwe,nO,Bwe,kwe,oi,Rwe,sO,Swe,Pwe,lO,$we,Iwe,jwe,b,Nm,iO,Nwe,Dwe,zA,Gwe,Owe,qwe,Dm,dO,zwe,Xwe,XA,Wwe,Vwe,Qwe,Gm,mO,Hwe,Uwe,WA,Jwe,Kwe,Ywe,Om,fO,Zwe,eAe,VA,oAe,tAe,rAe,qm,cO,aAe,nAe,QA,sAe,lAe,iAe,zm,gO,dAe,mAe,HA,fAe,cAe,gAe,Xm,hO,hAe,uAe,UA,pAe,_Ae,vAe,Wm,uO,bAe,TAe,JA,FAe,MAe,EAe,Vm,pO,CAe,yAe,KA,wAe,AAe,xAe,Qm,_O,LAe,BAe,YA,kAe,RAe,SAe,Hm,vO,PAe,$Ae,ZA,IAe,jAe,NAe,Um,bO,DAe,GAe,e7,OAe,qAe,zAe,Jm,TO,XAe,WAe,o7,VAe,QAe,HAe,Km,FO,UAe,JAe,t7,KAe,YAe,ZAe,Ym,MO,e7e,o7e,r7,t7e,r7e,a7e,Zm,EO,n7e,s7e,a7,l7e,i7e,d7e,ef,CO,m7e,f7e,n7,c7e,g7e,h7e,of,yO,u7e,p7e,s7,_7e,v7e,b7e,tf,wO,T7e,F7e,l7,M7e,E7e,C7e,rf,AO,y7e,w7e,i7,A7e,x7e,L7e,af,xO,B7e,k7e,d7,R7e,S7e,P7e,nf,LO,$7e,I7e,m7,j7e,N7e,D7e,sf,BO,G7e,O7e,f7,q7e,z7e,X7e,lf,kO,W7e,V7e,c7,Q7e,H7e,U7e,df,RO,J7e,K7e,g7,Y7e,Z7e,exe,mf,SO,oxe,txe,h7,rxe,axe,nxe,ff,PO,sxe,lxe,u7,ixe,dxe,mxe,cf,$O,fxe,cxe,p7,gxe,hxe,uxe,gf,IO,pxe,_xe,_7,vxe,bxe,Txe,hf,jO,Fxe,Mxe,v7,Exe,Cxe,yxe,uf,NO,wxe,Axe,b7,xxe,Lxe,Bxe,pf,DO,kxe,Rxe,T7,Sxe,Pxe,$xe,_f,GO,Ixe,jxe,F7,Nxe,Dxe,Gxe,vf,OO,Oxe,qxe,M7,zxe,Xxe,Wxe,bf,qO,Vxe,Qxe,E7,Hxe,Uxe,Jxe,Tf,zO,Kxe,Yxe,C7,Zxe,e6e,o6e,Ff,XO,t6e,r6e,y7,a6e,n6e,s6e,Mf,WO,l6e,i6e,w7,d6e,m6e,f6e,Ef,VO,c6e,g6e,A7,h6e,u6e,p6e,Cf,QO,_6e,v6e,x7,b6e,T6e,F6e,yf,HO,M6e,E6e,L7,C6e,y6e,w6e,wf,UO,A6e,x6e,B7,L6e,B6e,k6e,Af,JO,R6e,S6e,k7,P6e,$6e,I6e,xf,KO,j6e,N6e,R7,D6e,G6e,O6e,Lf,YO,q6e,z6e,S7,X6e,W6e,V6e,Bf,ZO,Q6e,H6e,P7,U6e,J6e,K6e,kf,eq,Y6e,Z6e,$7,e8e,o8e,t8e,Rf,oq,r8e,a8e,I7,n8e,s8e,l8e,Sf,tq,i8e,d8e,j7,m8e,f8e,c8e,Pf,rq,g8e,h8e,N7,u8e,p8e,_8e,$f,aq,v8e,b8e,D7,T8e,F8e,M8e,If,nq,E8e,C8e,G7,y8e,w8e,A8e,jf,sq,x8e,L8e,O7,B8e,k8e,R8e,Nf,lq,S8e,P8e,q7,$8e,I8e,j8e,Df,iq,N8e,D8e,z7,G8e,O8e,q8e,Gf,dq,z8e,X8e,X7,W8e,V8e,Q8e,Of,mq,H8e,U8e,W7,J8e,K8e,Y8e,qf,fq,Z8e,eLe,V7,oLe,tLe,rLe,zf,cq,aLe,nLe,Q7,sLe,lLe,iLe,Xf,gq,dLe,mLe,H7,fLe,cLe,gLe,Wf,hq,hLe,uLe,U7,pLe,_Le,vLe,Vf,uq,bLe,TLe,J7,FLe,MLe,ELe,Qf,pq,CLe,yLe,K7,wLe,ALe,xLe,Hf,_q,LLe,BLe,Y7,kLe,RLe,SLe,Uf,vq,PLe,$Le,Z7,ILe,jLe,NLe,Jf,bq,DLe,GLe,ex,OLe,qLe,zLe,Kf,Tq,XLe,WLe,ox,VLe,QLe,HLe,Yf,Fq,ULe,JLe,tx,KLe,YLe,ZLe,Zf,Mq,eBe,oBe,rx,tBe,rBe,aBe,ec,Eq,nBe,sBe,ax,lBe,iBe,dBe,oc,Cq,mBe,fBe,nx,cBe,gBe,hBe,tc,yq,uBe,pBe,sx,_Be,vBe,bBe,rc,wq,TBe,FBe,lx,MBe,EBe,CBe,ac,Aq,yBe,wBe,ix,ABe,xBe,LBe,nc,xq,BBe,kBe,dx,RBe,SBe,PBe,sc,Lq,$Be,IBe,mx,jBe,NBe,DBe,lc,Bq,GBe,OBe,fx,qBe,zBe,XBe,ic,kq,WBe,VBe,cx,QBe,HBe,UBe,dc,Rq,JBe,KBe,gx,YBe,ZBe,e9e,mc,Sq,o9e,t9e,hx,r9e,a9e,n9e,Pq,s9e,l9e,zF,i9e,fc,XF,d9e,$q,m9e,wEe,ti,cc,Iq,WF,f9e,jq,c9e,AEe,So,VF,g9e,QF,h9e,ux,u9e,p9e,_9e,HF,v9e,Nq,b9e,T9e,F9e,to,UF,M9e,Dq,E9e,C9e,Fa,y9e,Gq,w9e,A9e,Oq,x9e,L9e,qq,B9e,k9e,R9e,E,vn,zq,S9e,P9e,px,$9e,I9e,_x,j9e,N9e,D9e,bn,Xq,G9e,O9e,vx,q9e,z9e,bx,X9e,W9e,V9e,Tn,Wq,Q9e,H9e,Tx,U9e,J9e,Fx,K9e,Y9e,Z9e,gc,Vq,eke,oke,Mx,tke,rke,ake,Fn,Qq,nke,ske,Ex,lke,ike,Cx,dke,mke,fke,hc,Hq,cke,gke,yx,hke,uke,pke,uc,Uq,_ke,vke,wx,bke,Tke,Fke,pc,Jq,Mke,Eke,Ax,Cke,yke,wke,Mn,Kq,Ake,xke,xx,Lke,Bke,Lx,kke,Rke,Ske,En,Yq,Pke,$ke,Bx,Ike,jke,kx,Nke,Dke,Gke,Cn,Zq,Oke,qke,Rx,zke,Xke,Sx,Wke,Vke,Qke,_c,ez,Hke,Uke,Px,Jke,Kke,Yke,vc,oz,Zke,eRe,$x,oRe,tRe,rRe,yn,tz,aRe,nRe,Ix,sRe,lRe,jx,iRe,dRe,mRe,bc,rz,fRe,cRe,Nx,gRe,hRe,uRe,wn,az,pRe,_Re,Dx,vRe,bRe,Gx,TRe,FRe,MRe,An,nz,ERe,CRe,Ox,yRe,wRe,qx,ARe,xRe,LRe,xn,sz,BRe,kRe,zx,RRe,SRe,lz,PRe,$Re,IRe,Tc,iz,jRe,NRe,Xx,DRe,GRe,ORe,Ln,dz,qRe,zRe,Wx,XRe,WRe,Vx,VRe,QRe,HRe,Fc,mz,URe,JRe,Qx,KRe,YRe,ZRe,Bn,fz,eSe,oSe,Hx,tSe,rSe,Ux,aSe,nSe,sSe,kn,cz,lSe,iSe,Jx,dSe,mSe,Kx,fSe,cSe,gSe,Rn,gz,hSe,uSe,Yx,pSe,_Se,Zx,vSe,bSe,TSe,Mc,hz,FSe,MSe,e6,ESe,CSe,ySe,Sn,uz,wSe,ASe,o6,xSe,LSe,t6,BSe,kSe,RSe,Ec,pz,SSe,PSe,r6,$Se,ISe,jSe,Pn,_z,NSe,DSe,a6,GSe,OSe,n6,qSe,zSe,XSe,$n,vz,WSe,VSe,s6,QSe,HSe,l6,USe,JSe,KSe,In,bz,YSe,ZSe,i6,ePe,oPe,d6,tPe,rPe,aPe,Cc,Tz,nPe,sPe,m6,lPe,iPe,dPe,jn,Fz,mPe,fPe,f6,cPe,gPe,c6,hPe,uPe,pPe,Nn,Mz,_Pe,vPe,g6,bPe,TPe,h6,FPe,MPe,EPe,Dn,Ez,CPe,yPe,u6,wPe,APe,p6,xPe,LPe,BPe,Gn,Cz,kPe,RPe,_6,SPe,PPe,v6,$Pe,IPe,jPe,On,yz,NPe,DPe,b6,GPe,OPe,T6,qPe,zPe,XPe,yc,wz,WPe,VPe,F6,QPe,HPe,UPe,qn,Az,JPe,KPe,M6,YPe,ZPe,E6,e$e,o$e,t$e,wc,xz,r$e,a$e,C6,n$e,s$e,l$e,Ac,Lz,i$e,d$e,y6,m$e,f$e,c$e,zn,Bz,g$e,h$e,w6,u$e,p$e,A6,_$e,v$e,b$e,Xn,kz,T$e,F$e,x6,M$e,E$e,L6,C$e,y$e,w$e,Wn,Rz,A$e,x$e,B6,L$e,B$e,k6,k$e,R$e,S$e,Vn,Sz,P$e,$$e,R6,I$e,j$e,S6,N$e,D$e,G$e,Qn,Pz,O$e,q$e,P6,z$e,X$e,$6,W$e,V$e,Q$e,Hn,$z,H$e,U$e,I6,J$e,K$e,j6,Y$e,Z$e,eIe,Un,Iz,oIe,tIe,N6,rIe,aIe,D6,nIe,sIe,lIe,xc,jz,iIe,dIe,G6,mIe,fIe,cIe,Lc,Nz,gIe,hIe,O6,uIe,pIe,_Ie,Bc,Dz,vIe,bIe,q6,TIe,FIe,MIe,Jn,Gz,EIe,CIe,z6,yIe,wIe,X6,AIe,xIe,LIe,kc,Oz,BIe,kIe,W6,RIe,SIe,PIe,Kn,qz,$Ie,IIe,V6,jIe,NIe,Q6,DIe,GIe,OIe,Yn,zz,qIe,zIe,H6,XIe,WIe,U6,VIe,QIe,HIe,Zn,Xz,UIe,JIe,J6,KIe,YIe,K6,ZIe,eje,oje,es,Wz,tje,rje,Y6,aje,nje,Z6,sje,lje,ije,os,Vz,dje,mje,e8,fje,cje,o8,gje,hje,uje,Rc,Qz,pje,_je,t8,vje,bje,Tje,Sc,Hz,Fje,Mje,r8,Eje,Cje,yje,ts,Uz,wje,Aje,a8,xje,Lje,n8,Bje,kje,Rje,rs,Jz,Sje,Pje,s8,$je,Ije,l8,jje,Nje,Dje,as,Kz,Gje,Oje,i8,qje,zje,d8,Xje,Wje,Vje,Pc,Yz,Qje,Hje,m8,Uje,Jje,Kje,$c,Zz,Yje,Zje,f8,eNe,oNe,tNe,Ic,eX,rNe,aNe,c8,nNe,sNe,lNe,jc,oX,iNe,dNe,g8,mNe,fNe,cNe,Nc,tX,gNe,hNe,h8,uNe,pNe,_Ne,ns,rX,vNe,bNe,u8,TNe,FNe,p8,MNe,ENe,CNe,ss,aX,yNe,wNe,_8,ANe,xNe,v8,LNe,BNe,kNe,nX,RNe,SNe,JF,PNe,Dc,KF,$Ne,sX,INe,xEe,ri,Gc,lX,YF,jNe,iX,NNe,LEe,Gr,ZF,DNe,eM,GNe,b8,ONe,qNe,zNe,oM,XNe,dX,WNe,VNe,QNe,we,tM,HNe,mX,UNe,JNe,Ma,KNe,fX,YNe,ZNe,cX,eDe,oDe,gX,tDe,rDe,aDe,fe,Oc,hX,nDe,sDe,T8,lDe,iDe,dDe,qc,uX,mDe,fDe,F8,cDe,gDe,hDe,zc,pX,uDe,pDe,M8,_De,vDe,bDe,Xc,_X,TDe,FDe,E8,MDe,EDe,CDe,Wc,vX,yDe,wDe,C8,ADe,xDe,LDe,Vc,bX,BDe,kDe,y8,RDe,SDe,PDe,Qc,TX,$De,IDe,w8,jDe,NDe,DDe,Hc,FX,GDe,ODe,A8,qDe,zDe,XDe,Uc,MX,WDe,VDe,x8,QDe,HDe,UDe,Jc,EX,JDe,KDe,L8,YDe,ZDe,eGe,Kc,oGe,CX,tGe,rGe,rM,BEe,ai,Yc,yX,aM,aGe,wX,nGe,kEe,Or,nM,sGe,sM,lGe,B8,iGe,dGe,mGe,lM,fGe,AX,cGe,gGe,hGe,Ae,iM,uGe,xX,pGe,_Ge,ni,vGe,LX,bGe,TGe,BX,FGe,MGe,EGe,Je,Zc,kX,CGe,yGe,k8,wGe,AGe,xGe,eg,RX,LGe,BGe,R8,kGe,RGe,SGe,og,SX,PGe,$Ge,S8,IGe,jGe,NGe,tg,PX,DGe,GGe,P8,OGe,qGe,zGe,rg,$X,XGe,WGe,$8,VGe,QGe,HGe,ag,IX,UGe,JGe,I8,KGe,YGe,ZGe,ng,jX,eOe,oOe,j8,tOe,rOe,aOe,sg,nOe,NX,sOe,lOe,dM,REe,si,lg,DX,mM,iOe,GX,dOe,SEe,Po,fM,mOe,li,fOe,OX,cOe,gOe,qX,hOe,uOe,pOe,cM,_Oe,zX,vOe,bOe,TOe,wt,gM,FOe,XX,MOe,EOe,ii,COe,WX,yOe,wOe,VX,AOe,xOe,LOe,QX,BOe,kOe,hM,ROe,xe,uM,SOe,HX,POe,$Oe,Ea,IOe,UX,jOe,NOe,JX,DOe,GOe,KX,OOe,qOe,zOe,F,ig,YX,XOe,WOe,N8,VOe,QOe,HOe,dg,ZX,UOe,JOe,D8,KOe,YOe,ZOe,mg,eW,eqe,oqe,G8,tqe,rqe,aqe,fg,oW,nqe,sqe,O8,lqe,iqe,dqe,cg,tW,mqe,fqe,q8,cqe,gqe,hqe,gg,rW,uqe,pqe,z8,_qe,vqe,bqe,hg,aW,Tqe,Fqe,X8,Mqe,Eqe,Cqe,ug,nW,yqe,wqe,W8,Aqe,xqe,Lqe,pg,sW,Bqe,kqe,V8,Rqe,Sqe,Pqe,_g,lW,$qe,Iqe,Q8,jqe,Nqe,Dqe,vg,iW,Gqe,Oqe,H8,qqe,zqe,Xqe,bg,dW,Wqe,Vqe,U8,Qqe,Hqe,Uqe,Tg,mW,Jqe,Kqe,J8,Yqe,Zqe,eze,Fg,fW,oze,tze,K8,rze,aze,nze,Mg,cW,sze,lze,Y8,ize,dze,mze,Eg,gW,fze,cze,Z8,gze,hze,uze,Cg,hW,pze,_ze,eL,vze,bze,Tze,yg,uW,Fze,Mze,oL,Eze,Cze,yze,wg,pW,wze,Aze,tL,xze,Lze,Bze,Ag,_W,kze,Rze,rL,Sze,Pze,$ze,xg,vW,Ize,jze,aL,Nze,Dze,Gze,Lg,bW,Oze,qze,nL,zze,Xze,Wze,Bg,TW,Vze,Qze,sL,Hze,Uze,Jze,kg,FW,Kze,Yze,lL,Zze,eXe,oXe,ls,MW,tXe,rXe,iL,aXe,nXe,dL,sXe,lXe,iXe,Rg,EW,dXe,mXe,mL,fXe,cXe,gXe,Sg,CW,hXe,uXe,fL,pXe,_Xe,vXe,Pg,yW,bXe,TXe,cL,FXe,MXe,EXe,$g,wW,CXe,yXe,gL,wXe,AXe,xXe,Ig,AW,LXe,BXe,hL,kXe,RXe,SXe,jg,xW,PXe,$Xe,uL,IXe,jXe,NXe,Ng,LW,DXe,GXe,pL,OXe,qXe,zXe,Dg,BW,XXe,WXe,_L,VXe,QXe,HXe,Gg,kW,UXe,JXe,vL,KXe,YXe,ZXe,Og,RW,eWe,oWe,bL,tWe,rWe,aWe,qg,SW,nWe,sWe,TL,lWe,iWe,dWe,zg,PW,mWe,fWe,FL,cWe,gWe,hWe,Xg,$W,uWe,pWe,ML,_We,vWe,bWe,Wg,IW,TWe,FWe,EL,MWe,EWe,CWe,Vg,jW,yWe,wWe,CL,AWe,xWe,LWe,Qg,NW,BWe,kWe,yL,RWe,SWe,PWe,Hg,DW,$We,IWe,wL,jWe,NWe,DWe,Ug,GW,GWe,OWe,AL,qWe,zWe,XWe,Jg,OW,WWe,VWe,xL,QWe,HWe,UWe,Kg,qW,JWe,KWe,LL,YWe,ZWe,eVe,Yg,zW,oVe,tVe,BL,rVe,aVe,nVe,Zg,XW,sVe,lVe,kL,iVe,dVe,mVe,eh,WW,fVe,cVe,RL,gVe,hVe,uVe,oh,VW,pVe,_Ve,SL,vVe,bVe,TVe,th,QW,FVe,MVe,PL,EVe,CVe,yVe,rh,HW,wVe,AVe,$L,xVe,LVe,BVe,ah,UW,kVe,RVe,IL,SVe,PVe,$Ve,nh,JW,IVe,jVe,jL,NVe,DVe,GVe,sh,KW,OVe,qVe,NL,zVe,XVe,WVe,lh,YW,VVe,QVe,DL,HVe,UVe,JVe,ih,ZW,KVe,YVe,GL,ZVe,eQe,oQe,dh,eV,tQe,rQe,OL,aQe,nQe,sQe,mh,oV,lQe,iQe,qL,dQe,mQe,fQe,fh,tV,cQe,gQe,zL,hQe,uQe,pQe,ch,rV,_Qe,vQe,XL,bQe,TQe,FQe,gh,aV,MQe,EQe,WL,CQe,yQe,wQe,hh,nV,AQe,xQe,VL,LQe,BQe,kQe,uh,sV,RQe,SQe,QL,PQe,$Qe,IQe,ph,lV,jQe,NQe,HL,DQe,GQe,OQe,_h,iV,qQe,zQe,UL,XQe,WQe,VQe,vh,dV,QQe,HQe,JL,UQe,JQe,KQe,bh,mV,YQe,ZQe,KL,eHe,oHe,tHe,Th,fV,rHe,aHe,YL,nHe,sHe,lHe,Fh,cV,iHe,dHe,ZL,mHe,fHe,cHe,Mh,gV,gHe,hHe,eB,uHe,pHe,_He,Eh,hV,vHe,bHe,oB,THe,FHe,MHe,Ch,uV,EHe,CHe,tB,yHe,wHe,AHe,yh,pV,xHe,LHe,rB,BHe,kHe,RHe,wh,_V,SHe,PHe,aB,$He,IHe,jHe,Ah,NHe,vV,DHe,GHe,bV,OHe,qHe,TV,zHe,XHe,pM,PEe,di,xh,FV,_M,WHe,MV,VHe,$Ee,$o,vM,QHe,mi,HHe,EV,UHe,JHe,CV,KHe,YHe,ZHe,bM,eUe,yV,oUe,tUe,rUe,At,TM,aUe,wV,nUe,sUe,fi,lUe,AV,iUe,dUe,xV,mUe,fUe,cUe,LV,gUe,hUe,FM,uUe,Le,MM,pUe,BV,_Ue,vUe,Ca,bUe,kV,TUe,FUe,RV,MUe,EUe,SV,CUe,yUe,wUe,k,Lh,PV,AUe,xUe,nB,LUe,BUe,kUe,Bh,$V,RUe,SUe,sB,PUe,$Ue,IUe,kh,IV,jUe,NUe,lB,DUe,GUe,OUe,Rh,jV,qUe,zUe,iB,XUe,WUe,VUe,Sh,NV,QUe,HUe,dB,UUe,JUe,KUe,Ph,DV,YUe,ZUe,mB,eJe,oJe,tJe,$h,GV,rJe,aJe,fB,nJe,sJe,lJe,Ih,OV,iJe,dJe,cB,mJe,fJe,cJe,jh,qV,gJe,hJe,gB,uJe,pJe,_Je,Nh,zV,vJe,bJe,hB,TJe,FJe,MJe,Dh,XV,EJe,CJe,uB,yJe,wJe,AJe,Gh,WV,xJe,LJe,pB,BJe,kJe,RJe,Oh,VV,SJe,PJe,_B,$Je,IJe,jJe,qh,QV,NJe,DJe,vB,GJe,OJe,qJe,zh,HV,zJe,XJe,bB,WJe,VJe,QJe,Xh,UV,HJe,UJe,TB,JJe,KJe,YJe,Wh,JV,ZJe,eKe,FB,oKe,tKe,rKe,Vh,KV,aKe,nKe,MB,sKe,lKe,iKe,Qh,YV,dKe,mKe,EB,fKe,cKe,gKe,Hh,ZV,hKe,uKe,CB,pKe,_Ke,vKe,Uh,eQ,bKe,TKe,yB,FKe,MKe,EKe,Jh,oQ,CKe,yKe,wB,wKe,AKe,xKe,Kh,tQ,LKe,BKe,AB,kKe,RKe,SKe,Yh,rQ,PKe,$Ke,xB,IKe,jKe,NKe,Zh,aQ,DKe,GKe,LB,OKe,qKe,zKe,eu,nQ,XKe,WKe,BB,VKe,QKe,HKe,ou,sQ,UKe,JKe,kB,KKe,YKe,ZKe,tu,lQ,eYe,oYe,RB,tYe,rYe,aYe,ru,iQ,nYe,sYe,SB,lYe,iYe,dYe,au,dQ,mYe,fYe,PB,cYe,gYe,hYe,nu,mQ,uYe,pYe,$B,_Ye,vYe,bYe,su,fQ,TYe,FYe,IB,MYe,EYe,CYe,lu,cQ,yYe,wYe,jB,AYe,xYe,LYe,iu,gQ,BYe,kYe,NB,RYe,SYe,PYe,du,hQ,$Ye,IYe,DB,jYe,NYe,DYe,mu,uQ,GYe,OYe,GB,qYe,zYe,XYe,fu,WYe,pQ,VYe,QYe,_Q,HYe,UYe,vQ,JYe,KYe,EM,IEe,ci,cu,bQ,CM,YYe,TQ,ZYe,jEe,Io,yM,eZe,gi,oZe,FQ,tZe,rZe,MQ,aZe,nZe,sZe,wM,lZe,EQ,iZe,dZe,mZe,xt,AM,fZe,CQ,cZe,gZe,hi,hZe,yQ,uZe,pZe,wQ,_Ze,vZe,bZe,AQ,TZe,FZe,xM,MZe,Be,LM,EZe,xQ,CZe,yZe,ya,wZe,LQ,AZe,xZe,BQ,LZe,BZe,kQ,kZe,RZe,SZe,I,gu,RQ,PZe,$Ze,OB,IZe,jZe,NZe,hu,SQ,DZe,GZe,qB,OZe,qZe,zZe,uu,PQ,XZe,WZe,zB,VZe,QZe,HZe,pu,$Q,UZe,JZe,XB,KZe,YZe,ZZe,_u,IQ,eeo,oeo,WB,teo,reo,aeo,vu,jQ,neo,seo,VB,leo,ieo,deo,bu,NQ,meo,feo,QB,ceo,geo,heo,Tu,DQ,ueo,peo,HB,_eo,veo,beo,Fu,GQ,Teo,Feo,UB,Meo,Eeo,Ceo,Mu,OQ,yeo,weo,JB,Aeo,xeo,Leo,Eu,qQ,Beo,keo,KB,Reo,Seo,Peo,Cu,zQ,$eo,Ieo,YB,jeo,Neo,Deo,yu,XQ,Geo,Oeo,ZB,qeo,zeo,Xeo,wu,WQ,Weo,Veo,e9,Qeo,Heo,Ueo,Au,VQ,Jeo,Keo,o9,Yeo,Zeo,eoo,xu,QQ,ooo,too,t9,roo,aoo,noo,Lu,HQ,soo,loo,r9,ioo,doo,moo,Bu,UQ,foo,coo,a9,goo,hoo,uoo,ku,JQ,poo,_oo,n9,voo,boo,Too,Ru,KQ,Foo,Moo,s9,Eoo,Coo,yoo,Su,YQ,woo,Aoo,l9,xoo,Loo,Boo,Pu,ZQ,koo,Roo,i9,Soo,Poo,$oo,$u,eH,Ioo,joo,d9,Noo,Doo,Goo,Iu,oH,Ooo,qoo,m9,zoo,Xoo,Woo,ju,tH,Voo,Qoo,f9,Hoo,Uoo,Joo,Nu,rH,Koo,Yoo,c9,Zoo,eto,oto,Du,aH,tto,rto,g9,ato,nto,sto,Gu,nH,lto,ito,h9,dto,mto,fto,Ou,sH,cto,gto,u9,hto,uto,pto,qu,lH,_to,vto,p9,bto,Tto,Fto,zu,Mto,iH,Eto,Cto,dH,yto,wto,mH,Ato,xto,BM,NEe,ui,Xu,fH,kM,Lto,cH,Bto,DEe,jo,RM,kto,pi,Rto,gH,Sto,Pto,hH,$to,Ito,jto,SM,Nto,uH,Dto,Gto,Oto,Lt,PM,qto,pH,zto,Xto,_i,Wto,_H,Vto,Qto,vH,Hto,Uto,Jto,bH,Kto,Yto,$M,Zto,ke,IM,ero,TH,oro,tro,wa,rro,FH,aro,nro,MH,sro,lro,EH,iro,dro,mro,$,Wu,CH,fro,cro,_9,gro,hro,uro,Vu,yH,pro,_ro,v9,vro,bro,Tro,Qu,wH,Fro,Mro,b9,Ero,Cro,yro,Hu,AH,wro,Aro,T9,xro,Lro,Bro,Uu,xH,kro,Rro,F9,Sro,Pro,$ro,Ju,LH,Iro,jro,M9,Nro,Dro,Gro,Ku,BH,Oro,qro,E9,zro,Xro,Wro,Yu,kH,Vro,Qro,C9,Hro,Uro,Jro,Zu,RH,Kro,Yro,y9,Zro,eao,oao,ep,SH,tao,rao,w9,aao,nao,sao,op,PH,lao,iao,A9,dao,mao,fao,tp,$H,cao,gao,x9,hao,uao,pao,rp,IH,_ao,vao,L9,bao,Tao,Fao,ap,jH,Mao,Eao,B9,Cao,yao,wao,np,NH,Aao,xao,k9,Lao,Bao,kao,sp,DH,Rao,Sao,R9,Pao,$ao,Iao,lp,GH,jao,Nao,S9,Dao,Gao,Oao,ip,OH,qao,zao,P9,Xao,Wao,Vao,dp,qH,Qao,Hao,$9,Uao,Jao,Kao,mp,zH,Yao,Zao,I9,eno,ono,tno,fp,XH,rno,ano,j9,nno,sno,lno,cp,WH,ino,dno,N9,mno,fno,cno,gp,VH,gno,hno,D9,uno,pno,_no,hp,QH,vno,bno,G9,Tno,Fno,Mno,up,HH,Eno,Cno,O9,yno,wno,Ano,pp,UH,xno,Lno,q9,Bno,kno,Rno,_p,JH,Sno,Pno,z9,$no,Ino,jno,vp,KH,Nno,Dno,X9,Gno,Ono,qno,bp,YH,zno,Xno,ZH,Wno,Vno,Qno,Tp,eU,Hno,Uno,W9,Jno,Kno,Yno,Fp,oU,Zno,eso,V9,oso,tso,rso,Mp,aso,tU,nso,sso,rU,lso,iso,aU,dso,mso,jM,GEe,vi,Ep,nU,NM,fso,sU,cso,OEe,No,DM,gso,bi,hso,lU,uso,pso,iU,_so,vso,bso,GM,Tso,dU,Fso,Mso,Eso,Bt,OM,Cso,mU,yso,wso,Ti,Aso,fU,xso,Lso,cU,Bso,kso,Rso,gU,Sso,Pso,qM,$so,Re,zM,Iso,hU,jso,Nso,Aa,Dso,uU,Gso,Oso,pU,qso,zso,_U,Xso,Wso,Vso,ne,Cp,vU,Qso,Hso,Q9,Uso,Jso,Kso,yp,bU,Yso,Zso,H9,elo,olo,tlo,wp,TU,rlo,alo,U9,nlo,slo,llo,Ap,FU,ilo,dlo,J9,mlo,flo,clo,xp,MU,glo,hlo,K9,ulo,plo,_lo,Lp,EU,vlo,blo,Y9,Tlo,Flo,Mlo,Bp,CU,Elo,Clo,Z9,ylo,wlo,Alo,kp,yU,xlo,Llo,ek,Blo,klo,Rlo,Rp,wU,Slo,Plo,ok,$lo,Ilo,jlo,Sp,AU,Nlo,Dlo,tk,Glo,Olo,qlo,Pp,xU,zlo,Xlo,rk,Wlo,Vlo,Qlo,$p,LU,Hlo,Ulo,ak,Jlo,Klo,Ylo,Ip,BU,Zlo,eio,nk,oio,tio,rio,jp,kU,aio,nio,sk,sio,lio,iio,Np,RU,dio,mio,lk,fio,cio,gio,Dp,hio,SU,uio,pio,PU,_io,vio,$U,bio,Tio,XM,qEe,Fi,Gp,IU,WM,Fio,jU,Mio,zEe,Do,VM,Eio,Mi,Cio,NU,yio,wio,DU,Aio,xio,Lio,QM,Bio,GU,kio,Rio,Sio,kt,HM,Pio,OU,$io,Iio,Ei,jio,qU,Nio,Dio,zU,Gio,Oio,qio,XU,zio,Xio,UM,Wio,Se,JM,Vio,WU,Qio,Hio,xa,Uio,VU,Jio,Kio,QU,Yio,Zio,HU,edo,odo,tdo,A,Op,UU,rdo,ado,ik,ndo,sdo,ldo,qp,JU,ido,ddo,dk,mdo,fdo,cdo,zp,KU,gdo,hdo,mk,udo,pdo,_do,Xp,YU,vdo,bdo,fk,Tdo,Fdo,Mdo,Wp,ZU,Edo,Cdo,ck,ydo,wdo,Ado,Vp,eJ,xdo,Ldo,gk,Bdo,kdo,Rdo,Qp,oJ,Sdo,Pdo,hk,$do,Ido,jdo,Hp,tJ,Ndo,Ddo,uk,Gdo,Odo,qdo,Up,rJ,zdo,Xdo,pk,Wdo,Vdo,Qdo,Jp,aJ,Hdo,Udo,_k,Jdo,Kdo,Ydo,Kp,nJ,Zdo,emo,vk,omo,tmo,rmo,Yp,sJ,amo,nmo,bk,smo,lmo,imo,Zp,lJ,dmo,mmo,Tk,fmo,cmo,gmo,e_,iJ,hmo,umo,Fk,pmo,_mo,vmo,o_,dJ,bmo,Tmo,Mk,Fmo,Mmo,Emo,t_,mJ,Cmo,ymo,Ek,wmo,Amo,xmo,r_,fJ,Lmo,Bmo,Ck,kmo,Rmo,Smo,a_,cJ,Pmo,$mo,yk,Imo,jmo,Nmo,n_,gJ,Dmo,Gmo,wk,Omo,qmo,zmo,s_,hJ,Xmo,Wmo,Ak,Vmo,Qmo,Hmo,l_,uJ,Umo,Jmo,xk,Kmo,Ymo,Zmo,i_,pJ,efo,ofo,Lk,tfo,rfo,afo,d_,_J,nfo,sfo,Bk,lfo,ifo,dfo,m_,vJ,mfo,ffo,kk,cfo,gfo,hfo,f_,bJ,ufo,pfo,Rk,_fo,vfo,bfo,c_,TJ,Tfo,Ffo,Sk,Mfo,Efo,Cfo,g_,FJ,yfo,wfo,Pk,Afo,xfo,Lfo,h_,MJ,Bfo,kfo,$k,Rfo,Sfo,Pfo,u_,EJ,$fo,Ifo,Ik,jfo,Nfo,Dfo,p_,CJ,Gfo,Ofo,jk,qfo,zfo,Xfo,__,yJ,Wfo,Vfo,Nk,Qfo,Hfo,Ufo,v_,wJ,Jfo,Kfo,Dk,Yfo,Zfo,eco,b_,AJ,oco,tco,Gk,rco,aco,nco,T_,xJ,sco,lco,Ok,ico,dco,mco,F_,LJ,fco,cco,qk,gco,hco,uco,M_,BJ,pco,_co,zk,vco,bco,Tco,E_,kJ,Fco,Mco,Xk,Eco,Cco,yco,C_,RJ,wco,Aco,Wk,xco,Lco,Bco,y_,SJ,kco,Rco,Vk,Sco,Pco,$co,w_,PJ,Ico,jco,Qk,Nco,Dco,Gco,A_,$J,Oco,qco,Hk,zco,Xco,Wco,x_,Vco,IJ,Qco,Hco,jJ,Uco,Jco,NJ,Kco,Yco,KM,XEe,Ci,L_,DJ,YM,Zco,GJ,ego,WEe,Go,ZM,ogo,yi,tgo,OJ,rgo,ago,qJ,ngo,sgo,lgo,eE,igo,zJ,dgo,mgo,fgo,Rt,oE,cgo,XJ,ggo,hgo,wi,ugo,WJ,pgo,_go,VJ,vgo,bgo,Tgo,QJ,Fgo,Mgo,tE,Ego,Pe,rE,Cgo,HJ,ygo,wgo,La,Ago,UJ,xgo,Lgo,JJ,Bgo,kgo,KJ,Rgo,Sgo,Pgo,q,B_,YJ,$go,Igo,Uk,jgo,Ngo,Dgo,k_,ZJ,Ggo,Ogo,Jk,qgo,zgo,Xgo,R_,eK,Wgo,Vgo,Kk,Qgo,Hgo,Ugo,S_,oK,Jgo,Kgo,Yk,Ygo,Zgo,eho,P_,tK,oho,tho,Zk,rho,aho,nho,$_,rK,sho,lho,eR,iho,dho,mho,I_,aK,fho,cho,oR,gho,hho,uho,j_,nK,pho,_ho,tR,vho,bho,Tho,N_,sK,Fho,Mho,rR,Eho,Cho,yho,D_,lK,who,Aho,aR,xho,Lho,Bho,G_,iK,kho,Rho,nR,Sho,Pho,$ho,O_,dK,Iho,jho,sR,Nho,Dho,Gho,q_,mK,Oho,qho,lR,zho,Xho,Who,z_,fK,Vho,Qho,iR,Hho,Uho,Jho,X_,cK,Kho,Yho,dR,Zho,euo,ouo,W_,gK,tuo,ruo,mR,auo,nuo,suo,V_,hK,luo,iuo,fR,duo,muo,fuo,Q_,uK,cuo,guo,cR,huo,uuo,puo,H_,pK,_uo,vuo,gR,buo,Tuo,Fuo,U_,_K,Muo,Euo,hR,Cuo,yuo,wuo,J_,vK,Auo,xuo,uR,Luo,Buo,kuo,K_,bK,Ruo,Suo,pR,Puo,$uo,Iuo,Y_,TK,juo,Nuo,_R,Duo,Guo,Ouo,Z_,FK,quo,zuo,vR,Xuo,Wuo,Vuo,ev,Quo,MK,Huo,Uuo,EK,Juo,Kuo,CK,Yuo,Zuo,aE,VEe,Ai,ov,yK,nE,epo,wK,opo,QEe,Oo,sE,tpo,xi,rpo,AK,apo,npo,xK,spo,lpo,ipo,lE,dpo,LK,mpo,fpo,cpo,St,iE,gpo,BK,hpo,upo,Li,ppo,kK,_po,vpo,RK,bpo,Tpo,Fpo,SK,Mpo,Epo,dE,Cpo,$e,mE,ypo,PK,wpo,Apo,Ba,xpo,$K,Lpo,Bpo,IK,kpo,Rpo,jK,Spo,Ppo,$po,qr,tv,NK,Ipo,jpo,bR,Npo,Dpo,Gpo,rv,DK,Opo,qpo,TR,zpo,Xpo,Wpo,av,GK,Vpo,Qpo,FR,Hpo,Upo,Jpo,nv,OK,Kpo,Ypo,MR,Zpo,e_o,o_o,sv,qK,t_o,r_o,ER,a_o,n_o,s_o,lv,l_o,zK,i_o,d_o,XK,m_o,f_o,WK,c_o,g_o,fE,HEe,Bi,iv,VK,cE,h_o,QK,u_o,UEe,qo,gE,p_o,ki,__o,HK,v_o,b_o,UK,T_o,F_o,M_o,hE,E_o,JK,C_o,y_o,w_o,Pt,uE,A_o,KK,x_o,L_o,Ri,B_o,YK,k_o,R_o,ZK,S_o,P_o,$_o,eY,I_o,j_o,pE,N_o,Ie,_E,D_o,oY,G_o,O_o,ka,q_o,tY,z_o,X_o,rY,W_o,V_o,aY,Q_o,H_o,U_o,N,dv,nY,J_o,K_o,CR,Y_o,Z_o,evo,mv,sY,ovo,tvo,yR,rvo,avo,nvo,fv,lY,svo,lvo,wR,ivo,dvo,mvo,cv,iY,fvo,cvo,AR,gvo,hvo,uvo,gv,dY,pvo,_vo,xR,vvo,bvo,Tvo,hv,mY,Fvo,Mvo,LR,Evo,Cvo,yvo,uv,fY,wvo,Avo,BR,xvo,Lvo,Bvo,pv,cY,kvo,Rvo,kR,Svo,Pvo,$vo,_v,gY,Ivo,jvo,RR,Nvo,Dvo,Gvo,vv,hY,Ovo,qvo,SR,zvo,Xvo,Wvo,bv,uY,Vvo,Qvo,PR,Hvo,Uvo,Jvo,Tv,pY,Kvo,Yvo,$R,Zvo,e1o,o1o,Fv,_Y,t1o,r1o,IR,a1o,n1o,s1o,Mv,vY,l1o,i1o,jR,d1o,m1o,f1o,Ev,bY,c1o,g1o,NR,h1o,u1o,p1o,Cv,TY,_1o,v1o,DR,b1o,T1o,F1o,yv,FY,M1o,E1o,GR,C1o,y1o,w1o,wv,MY,A1o,x1o,OR,L1o,B1o,k1o,Av,EY,R1o,S1o,qR,P1o,$1o,I1o,xv,CY,j1o,N1o,zR,D1o,G1o,O1o,Lv,yY,q1o,z1o,XR,X1o,W1o,V1o,Bv,wY,Q1o,H1o,WR,U1o,J1o,K1o,kv,AY,Y1o,Z1o,VR,e2o,o2o,t2o,Rv,xY,r2o,a2o,QR,n2o,s2o,l2o,Sv,LY,i2o,d2o,HR,m2o,f2o,c2o,Pv,BY,g2o,h2o,UR,u2o,p2o,_2o,$v,kY,v2o,b2o,JR,T2o,F2o,M2o,Iv,RY,E2o,C2o,KR,y2o,w2o,A2o,jv,SY,x2o,L2o,YR,B2o,k2o,R2o,Nv,S2o,PY,P2o,$2o,$Y,I2o,j2o,IY,N2o,D2o,vE,JEe,Si,Dv,jY,bE,G2o,NY,O2o,KEe,zo,TE,q2o,Pi,z2o,DY,X2o,W2o,GY,V2o,Q2o,H2o,FE,U2o,OY,J2o,K2o,Y2o,$t,ME,Z2o,qY,ebo,obo,$i,tbo,zY,rbo,abo,XY,nbo,sbo,lbo,WY,ibo,dbo,EE,mbo,je,CE,fbo,VY,cbo,gbo,Ra,hbo,QY,ubo,pbo,HY,_bo,vbo,UY,bbo,Tbo,Fbo,R,Gv,JY,Mbo,Ebo,ZR,Cbo,ybo,wbo,Ov,KY,Abo,xbo,eS,Lbo,Bbo,kbo,qv,YY,Rbo,Sbo,oS,Pbo,$bo,Ibo,zv,ZY,jbo,Nbo,tS,Dbo,Gbo,Obo,Xv,eZ,qbo,zbo,rS,Xbo,Wbo,Vbo,Wv,oZ,Qbo,Hbo,aS,Ubo,Jbo,Kbo,Vv,tZ,Ybo,Zbo,nS,e4o,o4o,t4o,Qv,rZ,r4o,a4o,sS,n4o,s4o,l4o,Hv,aZ,i4o,d4o,lS,m4o,f4o,c4o,Uv,nZ,g4o,h4o,iS,u4o,p4o,_4o,Jv,sZ,v4o,b4o,dS,T4o,F4o,M4o,Kv,lZ,E4o,C4o,mS,y4o,w4o,A4o,Yv,iZ,x4o,L4o,fS,B4o,k4o,R4o,Zv,dZ,S4o,P4o,cS,$4o,I4o,j4o,e1,mZ,N4o,D4o,gS,G4o,O4o,q4o,o1,fZ,z4o,X4o,hS,W4o,V4o,Q4o,t1,cZ,H4o,U4o,uS,J4o,K4o,Y4o,r1,gZ,Z4o,e5o,pS,o5o,t5o,r5o,a1,hZ,a5o,n5o,_S,s5o,l5o,i5o,n1,uZ,d5o,m5o,vS,f5o,c5o,g5o,s1,pZ,h5o,u5o,bS,p5o,_5o,v5o,l1,_Z,b5o,T5o,TS,F5o,M5o,E5o,i1,vZ,C5o,y5o,FS,w5o,A5o,x5o,d1,bZ,L5o,B5o,MS,k5o,R5o,S5o,m1,TZ,P5o,$5o,ES,I5o,j5o,N5o,f1,FZ,D5o,G5o,CS,O5o,q5o,z5o,c1,MZ,X5o,W5o,yS,V5o,Q5o,H5o,g1,EZ,U5o,J5o,wS,K5o,Y5o,Z5o,h1,CZ,e0o,o0o,AS,t0o,r0o,a0o,u1,yZ,n0o,s0o,xS,l0o,i0o,d0o,p1,wZ,m0o,f0o,LS,c0o,g0o,h0o,_1,AZ,u0o,p0o,BS,_0o,v0o,b0o,v1,xZ,T0o,F0o,kS,M0o,E0o,C0o,b1,LZ,y0o,w0o,RS,A0o,x0o,L0o,T1,BZ,B0o,k0o,SS,R0o,S0o,P0o,F1,$0o,kZ,I0o,j0o,RZ,N0o,D0o,SZ,G0o,O0o,yE,YEe,Ii,M1,PZ,wE,q0o,$Z,z0o,ZEe,Xo,AE,X0o,ji,W0o,IZ,V0o,Q0o,jZ,H0o,U0o,J0o,xE,K0o,NZ,Y0o,Z0o,eTo,It,LE,oTo,DZ,tTo,rTo,Ni,aTo,GZ,nTo,sTo,OZ,lTo,iTo,dTo,qZ,mTo,fTo,BE,cTo,Ne,kE,gTo,zZ,hTo,uTo,Sa,pTo,XZ,_To,vTo,WZ,bTo,TTo,VZ,FTo,MTo,ETo,QZ,E1,HZ,CTo,yTo,PS,wTo,ATo,xTo,C1,LTo,UZ,BTo,kTo,JZ,RTo,STo,KZ,PTo,$To,RE,eCe,Di,y1,YZ,SE,ITo,ZZ,jTo,oCe,Wo,PE,NTo,Gi,DTo,eee,GTo,OTo,oee,qTo,zTo,XTo,$E,WTo,tee,VTo,QTo,HTo,jt,IE,UTo,ree,JTo,KTo,Oi,YTo,aee,ZTo,eFo,nee,oFo,tFo,rFo,see,aFo,nFo,jE,sFo,De,NE,lFo,lee,iFo,dFo,Pa,mFo,iee,fFo,cFo,dee,gFo,hFo,mee,uFo,pFo,_Fo,Vo,w1,fee,vFo,bFo,$S,TFo,FFo,MFo,is,cee,EFo,CFo,IS,yFo,wFo,jS,AFo,xFo,LFo,A1,gee,BFo,kFo,NS,RFo,SFo,PFo,Xr,hee,$Fo,IFo,DS,jFo,NFo,GS,DFo,GFo,OS,OFo,qFo,zFo,x1,uee,XFo,WFo,qS,VFo,QFo,HFo,L1,pee,UFo,JFo,zS,KFo,YFo,ZFo,B1,eMo,_ee,oMo,tMo,vee,rMo,aMo,bee,nMo,sMo,DE,tCe,qi,k1,Tee,GE,lMo,Fee,iMo,rCe,Qo,OE,dMo,zi,mMo,Mee,fMo,cMo,Eee,gMo,hMo,uMo,qE,pMo,Cee,_Mo,vMo,bMo,Nt,zE,TMo,yee,FMo,MMo,Xi,EMo,wee,CMo,yMo,Aee,wMo,AMo,xMo,xee,LMo,BMo,XE,kMo,Ge,WE,RMo,Lee,SMo,PMo,$a,$Mo,Bee,IMo,jMo,kee,NMo,DMo,Ree,GMo,OMo,qMo,See,R1,Pee,zMo,XMo,XS,WMo,VMo,QMo,S1,HMo,$ee,UMo,JMo,Iee,KMo,YMo,jee,ZMo,eEo,VE,aCe,Wi,P1,Nee,QE,oEo,Dee,tEo,nCe,Ho,HE,rEo,Vi,aEo,Gee,nEo,sEo,Oee,lEo,iEo,dEo,UE,mEo,qee,fEo,cEo,gEo,Dt,JE,hEo,zee,uEo,pEo,Qi,_Eo,Xee,vEo,bEo,Wee,TEo,FEo,MEo,Vee,EEo,CEo,KE,yEo,Oe,YE,wEo,Qee,AEo,xEo,Ia,LEo,Hee,BEo,kEo,Uee,REo,SEo,Jee,PEo,$Eo,IEo,Ke,$1,Kee,jEo,NEo,WS,DEo,GEo,OEo,I1,Yee,qEo,zEo,VS,XEo,WEo,VEo,j1,Zee,QEo,HEo,QS,UEo,JEo,KEo,N1,eoe,YEo,ZEo,HS,eCo,oCo,tCo,D1,ooe,rCo,aCo,US,nCo,sCo,lCo,G1,toe,iCo,dCo,JS,mCo,fCo,cCo,O1,roe,gCo,hCo,KS,uCo,pCo,_Co,q1,vCo,aoe,bCo,TCo,noe,FCo,MCo,soe,ECo,CCo,ZE,sCe,Hi,z1,loe,eC,yCo,ioe,wCo,lCe,Uo,oC,ACo,Ui,xCo,doe,LCo,BCo,moe,kCo,RCo,SCo,tC,PCo,foe,$Co,ICo,jCo,Gt,rC,NCo,coe,DCo,GCo,Ji,OCo,goe,qCo,zCo,hoe,XCo,WCo,VCo,uoe,QCo,HCo,aC,UCo,qe,nC,JCo,poe,KCo,YCo,ja,ZCo,_oe,e3o,o3o,voe,t3o,r3o,boe,a3o,n3o,s3o,Ki,X1,Toe,l3o,i3o,YS,d3o,m3o,f3o,W1,Foe,c3o,g3o,ZS,h3o,u3o,p3o,V1,Moe,_3o,v3o,eP,b3o,T3o,F3o,Q1,M3o,Eoe,E3o,C3o,Coe,y3o,w3o,yoe,A3o,x3o,sC,iCe,Yi,H1,woe,lC,L3o,Aoe,B3o,dCe,Jo,iC,k3o,Zi,R3o,xoe,S3o,P3o,Loe,$3o,I3o,j3o,dC,N3o,Boe,D3o,G3o,O3o,Ot,mC,q3o,koe,z3o,X3o,ed,W3o,Roe,V3o,Q3o,Soe,H3o,U3o,J3o,Poe,K3o,Y3o,fC,Z3o,ze,cC,eyo,$oe,oyo,tyo,Na,ryo,Ioe,ayo,nyo,joe,syo,lyo,Noe,iyo,dyo,myo,Ye,U1,Doe,fyo,cyo,oP,gyo,hyo,uyo,J1,Goe,pyo,_yo,tP,vyo,byo,Tyo,K1,Ooe,Fyo,Myo,rP,Eyo,Cyo,yyo,Y1,qoe,wyo,Ayo,aP,xyo,Lyo,Byo,Z1,zoe,kyo,Ryo,nP,Syo,Pyo,$yo,e2,Xoe,Iyo,jyo,sP,Nyo,Dyo,Gyo,o2,Woe,Oyo,qyo,lP,zyo,Xyo,Wyo,t2,Vyo,Voe,Qyo,Hyo,Qoe,Uyo,Jyo,Hoe,Kyo,Yyo,gC,mCe,od,r2,Uoe,hC,Zyo,Joe,ewo,fCe,Ko,uC,owo,td,two,Koe,rwo,awo,Yoe,nwo,swo,lwo,pC,iwo,Zoe,dwo,mwo,fwo,qt,_C,cwo,ete,gwo,hwo,rd,uwo,ote,pwo,_wo,tte,vwo,bwo,Two,rte,Fwo,Mwo,vC,Ewo,Xe,bC,Cwo,ate,ywo,wwo,Da,Awo,nte,xwo,Lwo,ste,Bwo,kwo,lte,Rwo,Swo,Pwo,TC,a2,ite,$wo,Iwo,iP,jwo,Nwo,Dwo,n2,dte,Gwo,Owo,dP,qwo,zwo,Xwo,s2,Wwo,mte,Vwo,Qwo,fte,Hwo,Uwo,cte,Jwo,Kwo,FC,cCe,ad,l2,gte,MC,Ywo,hte,Zwo,gCe,Yo,EC,eAo,nd,oAo,ute,tAo,rAo,pte,aAo,nAo,sAo,CC,lAo,_te,iAo,dAo,mAo,zt,yC,fAo,vte,cAo,gAo,sd,hAo,bte,uAo,pAo,Tte,_Ao,vAo,bAo,Fte,TAo,FAo,wC,MAo,We,AC,EAo,Mte,CAo,yAo,Ga,wAo,Ete,AAo,xAo,Cte,LAo,BAo,yte,kAo,RAo,SAo,ld,i2,wte,PAo,$Ao,mP,IAo,jAo,NAo,d2,Ate,DAo,GAo,fP,OAo,qAo,zAo,m2,xte,XAo,WAo,cP,VAo,QAo,HAo,f2,UAo,Lte,JAo,KAo,Bte,YAo,ZAo,kte,e7o,o7o,xC,hCe,id,c2,Rte,LC,t7o,Ste,r7o,uCe,Zo,BC,a7o,dd,n7o,Pte,s7o,l7o,$te,i7o,d7o,m7o,kC,f7o,Ite,c7o,g7o,h7o,Xt,RC,u7o,jte,p7o,_7o,md,v7o,Nte,b7o,T7o,Dte,F7o,M7o,E7o,Gte,C7o,y7o,SC,w7o,Ve,PC,A7o,Ote,x7o,L7o,Oa,B7o,qte,k7o,R7o,zte,S7o,P7o,Xte,$7o,I7o,j7o,Wte,g2,Vte,N7o,D7o,gP,G7o,O7o,q7o,h2,z7o,Qte,X7o,W7o,Hte,V7o,Q7o,Ute,H7o,U7o,$C,pCe,fd,u2,Jte,IC,J7o,Kte,K7o,_Ce,et,jC,Y7o,cd,Z7o,Yte,exo,oxo,Zte,txo,rxo,axo,NC,nxo,ere,sxo,lxo,ixo,Wt,DC,dxo,ore,mxo,fxo,gd,cxo,tre,gxo,hxo,rre,uxo,pxo,_xo,are,vxo,bxo,GC,Txo,Qe,OC,Fxo,nre,Mxo,Exo,qa,Cxo,sre,yxo,wxo,lre,Axo,xxo,ire,Lxo,Bxo,kxo,dre,p2,mre,Rxo,Sxo,hP,Pxo,$xo,Ixo,_2,jxo,fre,Nxo,Dxo,cre,Gxo,Oxo,gre,qxo,zxo,qC,vCe,hd,v2,hre,zC,Xxo,ure,Wxo,bCe,ot,XC,Vxo,ud,Qxo,pre,Hxo,Uxo,_re,Jxo,Kxo,Yxo,WC,Zxo,vre,e6o,o6o,t6o,Vt,VC,r6o,bre,a6o,n6o,pd,s6o,Tre,l6o,i6o,Fre,d6o,m6o,f6o,Mre,c6o,g6o,QC,h6o,ro,HC,u6o,Ere,p6o,_6o,za,v6o,Cre,b6o,T6o,yre,F6o,M6o,wre,E6o,C6o,y6o,L,b2,Are,w6o,A6o,uP,x6o,L6o,B6o,T2,xre,k6o,R6o,pP,S6o,P6o,$6o,F2,Lre,I6o,j6o,_P,N6o,D6o,G6o,M2,Bre,O6o,q6o,vP,z6o,X6o,W6o,E2,kre,V6o,Q6o,bP,H6o,U6o,J6o,C2,Rre,K6o,Y6o,TP,Z6o,e8o,o8o,y2,Sre,t8o,r8o,FP,a8o,n8o,s8o,w2,Pre,l8o,i8o,MP,d8o,m8o,f8o,A2,$re,c8o,g8o,EP,h8o,u8o,p8o,x2,Ire,_8o,v8o,CP,b8o,T8o,F8o,L2,jre,M8o,E8o,yP,C8o,y8o,w8o,B2,Nre,A8o,x8o,wP,L8o,B8o,k8o,k2,Dre,R8o,S8o,AP,P8o,$8o,I8o,R2,Gre,j8o,N8o,xP,D8o,G8o,O8o,ds,Ore,q8o,z8o,LP,X8o,W8o,BP,V8o,Q8o,H8o,S2,qre,U8o,J8o,kP,K8o,Y8o,Z8o,P2,zre,eLo,oLo,RP,tLo,rLo,aLo,$2,Xre,nLo,sLo,SP,lLo,iLo,dLo,I2,Wre,mLo,fLo,PP,cLo,gLo,hLo,j2,Vre,uLo,pLo,$P,_Lo,vLo,bLo,N2,Qre,TLo,FLo,IP,MLo,ELo,CLo,D2,Hre,yLo,wLo,jP,ALo,xLo,LLo,G2,Ure,BLo,kLo,NP,RLo,SLo,PLo,O2,Jre,$Lo,ILo,DP,jLo,NLo,DLo,q2,Kre,GLo,OLo,GP,qLo,zLo,XLo,z2,Yre,WLo,VLo,OP,QLo,HLo,ULo,X2,Zre,JLo,KLo,qP,YLo,ZLo,eBo,W2,eae,oBo,tBo,zP,rBo,aBo,nBo,V2,oae,sBo,lBo,XP,iBo,dBo,mBo,Q2,tae,fBo,cBo,WP,gBo,hBo,uBo,H2,rae,pBo,_Bo,VP,vBo,bBo,TBo,U2,aae,FBo,MBo,QP,EBo,CBo,yBo,J2,nae,wBo,ABo,HP,xBo,LBo,BBo,K2,sae,kBo,RBo,UP,SBo,PBo,$Bo,Y2,lae,IBo,jBo,JP,NBo,DBo,GBo,Z2,iae,OBo,qBo,KP,zBo,XBo,WBo,eb,dae,VBo,QBo,YP,HBo,UBo,JBo,ob,mae,KBo,YBo,ZP,ZBo,e9o,o9o,tb,fae,t9o,r9o,e$,a9o,n9o,s9o,cae,l9o,i9o,UC,TCe,_d,rb,gae,JC,d9o,hae,m9o,FCe,tt,KC,f9o,vd,c9o,uae,g9o,h9o,pae,u9o,p9o,_9o,YC,v9o,_ae,b9o,T9o,F9o,Qt,ZC,M9o,vae,E9o,C9o,bd,y9o,bae,w9o,A9o,Tae,x9o,L9o,B9o,Fae,k9o,R9o,e3,S9o,ao,o3,P9o,Mae,$9o,I9o,Xa,j9o,Eae,N9o,D9o,Cae,G9o,O9o,yae,q9o,z9o,X9o,V,ab,wae,W9o,V9o,o$,Q9o,H9o,U9o,nb,Aae,J9o,K9o,t$,Y9o,Z9o,eko,sb,xae,oko,tko,r$,rko,ako,nko,lb,Lae,sko,lko,a$,iko,dko,mko,ib,Bae,fko,cko,n$,gko,hko,uko,db,kae,pko,_ko,s$,vko,bko,Tko,mb,Rae,Fko,Mko,l$,Eko,Cko,yko,fb,Sae,wko,Ako,i$,xko,Lko,Bko,cb,Pae,kko,Rko,d$,Sko,Pko,$ko,gb,$ae,Iko,jko,m$,Nko,Dko,Gko,hb,Iae,Oko,qko,f$,zko,Xko,Wko,ub,jae,Vko,Qko,c$,Hko,Uko,Jko,pb,Nae,Kko,Yko,g$,Zko,eRo,oRo,_b,Dae,tRo,rRo,h$,aRo,nRo,sRo,vb,Gae,lRo,iRo,u$,dRo,mRo,fRo,bb,Oae,cRo,gRo,p$,hRo,uRo,pRo,Tb,qae,_Ro,vRo,_$,bRo,TRo,FRo,Fb,zae,MRo,ERo,v$,CRo,yRo,wRo,Mb,Xae,ARo,xRo,b$,LRo,BRo,kRo,Eb,Wae,RRo,SRo,T$,PRo,$Ro,IRo,Cb,Vae,jRo,NRo,F$,DRo,GRo,ORo,yb,Qae,qRo,zRo,M$,XRo,WRo,VRo,Hae,QRo,HRo,t3,MCe,Td,wb,Uae,r3,URo,Jae,JRo,ECe,rt,a3,KRo,Fd,YRo,Kae,ZRo,eSo,Yae,oSo,tSo,rSo,n3,aSo,Zae,nSo,sSo,lSo,Ht,s3,iSo,ene,dSo,mSo,Md,fSo,one,cSo,gSo,tne,hSo,uSo,pSo,rne,_So,vSo,l3,bSo,no,i3,TSo,ane,FSo,MSo,Wa,ESo,nne,CSo,ySo,sne,wSo,ASo,lne,xSo,LSo,BSo,ce,Ab,ine,kSo,RSo,E$,SSo,PSo,$So,xb,dne,ISo,jSo,C$,NSo,DSo,GSo,Lb,mne,OSo,qSo,y$,zSo,XSo,WSo,Bb,fne,VSo,QSo,w$,HSo,USo,JSo,kb,cne,KSo,YSo,A$,ZSo,ePo,oPo,Rb,gne,tPo,rPo,x$,aPo,nPo,sPo,Sb,hne,lPo,iPo,L$,dPo,mPo,fPo,Pb,une,cPo,gPo,B$,hPo,uPo,pPo,$b,pne,_Po,vPo,k$,bPo,TPo,FPo,Ib,_ne,MPo,EPo,R$,CPo,yPo,wPo,vne,APo,xPo,d3,CCe,Ed,jb,bne,m3,LPo,Tne,BPo,yCe,at,f3,kPo,Cd,RPo,Fne,SPo,PPo,Mne,$Po,IPo,jPo,c3,NPo,Ene,DPo,GPo,OPo,Ut,g3,qPo,Cne,zPo,XPo,yd,WPo,yne,VPo,QPo,wne,HPo,UPo,JPo,Ane,KPo,YPo,h3,ZPo,so,u3,e$o,xne,o$o,t$o,Va,r$o,Lne,a$o,n$o,Bne,s$o,l$o,kne,i$o,d$o,m$o,Rne,Nb,Sne,f$o,c$o,S$,g$o,h$o,u$o,Pne,p$o,_$o,p3,wCe,wd,Db,$ne,_3,v$o,Ine,b$o,ACe,nt,v3,T$o,Ad,F$o,jne,M$o,E$o,Nne,C$o,y$o,w$o,b3,A$o,Dne,x$o,L$o,B$o,Jt,T3,k$o,Gne,R$o,S$o,xd,P$o,One,$$o,I$o,qne,j$o,N$o,D$o,zne,G$o,O$o,F3,q$o,lo,M3,z$o,Xne,X$o,W$o,Qa,V$o,Wne,Q$o,H$o,Vne,U$o,J$o,Qne,K$o,Y$o,Z$o,K,Gb,Hne,eIo,oIo,P$,tIo,rIo,aIo,Ob,Une,nIo,sIo,$$,lIo,iIo,dIo,qb,Jne,mIo,fIo,I$,cIo,gIo,hIo,zb,Kne,uIo,pIo,j$,_Io,vIo,bIo,Xb,Yne,TIo,FIo,N$,MIo,EIo,CIo,Wb,Zne,yIo,wIo,D$,AIo,xIo,LIo,Vb,ese,BIo,kIo,G$,RIo,SIo,PIo,Qb,ose,$Io,IIo,O$,jIo,NIo,DIo,Hb,tse,GIo,OIo,q$,qIo,zIo,XIo,Ub,rse,WIo,VIo,z$,QIo,HIo,UIo,Jb,ase,JIo,KIo,X$,YIo,ZIo,ejo,Kb,nse,ojo,tjo,W$,rjo,ajo,njo,Yb,sse,sjo,ljo,V$,ijo,djo,mjo,Zb,lse,fjo,cjo,Q$,gjo,hjo,ujo,e4,ise,pjo,_jo,H$,vjo,bjo,Tjo,o4,dse,Fjo,Mjo,U$,Ejo,Cjo,yjo,t4,mse,wjo,Ajo,J$,xjo,Ljo,Bjo,r4,fse,kjo,Rjo,K$,Sjo,Pjo,$jo,a4,cse,Ijo,jjo,Y$,Njo,Djo,Gjo,n4,gse,Ojo,qjo,Z$,zjo,Xjo,Wjo,hse,Vjo,Qjo,E3,xCe,Ld,s4,use,C3,Hjo,pse,Ujo,LCe,st,y3,Jjo,Bd,Kjo,_se,Yjo,Zjo,vse,eNo,oNo,tNo,w3,rNo,bse,aNo,nNo,sNo,Kt,A3,lNo,Tse,iNo,dNo,kd,mNo,Fse,fNo,cNo,Mse,gNo,hNo,uNo,Ese,pNo,_No,x3,vNo,io,L3,bNo,Cse,TNo,FNo,Ha,MNo,yse,ENo,CNo,wse,yNo,wNo,Ase,ANo,xNo,LNo,ge,l4,xse,BNo,kNo,eI,RNo,SNo,PNo,i4,Lse,$No,INo,oI,jNo,NNo,DNo,d4,Bse,GNo,ONo,tI,qNo,zNo,XNo,m4,kse,WNo,VNo,rI,QNo,HNo,UNo,f4,Rse,JNo,KNo,aI,YNo,ZNo,eDo,c4,Sse,oDo,tDo,nI,rDo,aDo,nDo,g4,Pse,sDo,lDo,sI,iDo,dDo,mDo,h4,$se,fDo,cDo,lI,gDo,hDo,uDo,u4,Ise,pDo,_Do,iI,vDo,bDo,TDo,p4,jse,FDo,MDo,dI,EDo,CDo,yDo,Nse,wDo,ADo,B3,BCe,Rd,_4,Dse,k3,xDo,Gse,LDo,kCe,lt,R3,BDo,Sd,kDo,Ose,RDo,SDo,qse,PDo,$Do,IDo,S3,jDo,zse,NDo,DDo,GDo,Yt,P3,ODo,Xse,qDo,zDo,Pd,XDo,Wse,WDo,VDo,Vse,QDo,HDo,UDo,Qse,JDo,KDo,$3,YDo,mo,I3,ZDo,Hse,eGo,oGo,Ua,tGo,Use,rGo,aGo,Jse,nGo,sGo,Kse,lGo,iGo,dGo,O,v4,Yse,mGo,fGo,mI,cGo,gGo,hGo,b4,Zse,uGo,pGo,fI,_Go,vGo,bGo,T4,ele,TGo,FGo,cI,MGo,EGo,CGo,F4,ole,yGo,wGo,gI,AGo,xGo,LGo,M4,tle,BGo,kGo,hI,RGo,SGo,PGo,E4,rle,$Go,IGo,uI,jGo,NGo,DGo,C4,ale,GGo,OGo,pI,qGo,zGo,XGo,y4,nle,WGo,VGo,_I,QGo,HGo,UGo,w4,sle,JGo,KGo,vI,YGo,ZGo,eOo,A4,lle,oOo,tOo,bI,rOo,aOo,nOo,x4,ile,sOo,lOo,TI,iOo,dOo,mOo,L4,dle,fOo,cOo,FI,gOo,hOo,uOo,B4,mle,pOo,_Oo,MI,vOo,bOo,TOo,k4,fle,FOo,MOo,EI,EOo,COo,yOo,R4,cle,wOo,AOo,CI,xOo,LOo,BOo,S4,gle,kOo,ROo,yI,SOo,POo,$Oo,P4,hle,IOo,jOo,wI,NOo,DOo,GOo,$4,ule,OOo,qOo,AI,zOo,XOo,WOo,I4,ple,VOo,QOo,xI,HOo,UOo,JOo,j4,_le,KOo,YOo,LI,ZOo,eqo,oqo,N4,vle,tqo,rqo,BI,aqo,nqo,sqo,D4,ble,lqo,iqo,kI,dqo,mqo,fqo,G4,Tle,cqo,gqo,RI,hqo,uqo,pqo,O4,Fle,_qo,vqo,SI,bqo,Tqo,Fqo,q4,Mle,Mqo,Eqo,PI,Cqo,yqo,wqo,Ele,Aqo,xqo,j3,RCe,$d,z4,Cle,N3,Lqo,yle,Bqo,SCe,it,D3,kqo,Id,Rqo,wle,Sqo,Pqo,Ale,$qo,Iqo,jqo,G3,Nqo,xle,Dqo,Gqo,Oqo,Zt,O3,qqo,Lle,zqo,Xqo,jd,Wqo,Ble,Vqo,Qqo,kle,Hqo,Uqo,Jqo,Rle,Kqo,Yqo,q3,Zqo,fo,z3,ezo,Sle,ozo,tzo,Ja,rzo,Ple,azo,nzo,$le,szo,lzo,Ile,izo,dzo,mzo,re,X4,jle,fzo,czo,$I,gzo,hzo,uzo,W4,Nle,pzo,_zo,II,vzo,bzo,Tzo,V4,Dle,Fzo,Mzo,jI,Ezo,Czo,yzo,Q4,Gle,wzo,Azo,NI,xzo,Lzo,Bzo,H4,Ole,kzo,Rzo,DI,Szo,Pzo,$zo,U4,qle,Izo,jzo,GI,Nzo,Dzo,Gzo,J4,zle,Ozo,qzo,OI,zzo,Xzo,Wzo,K4,Xle,Vzo,Qzo,qI,Hzo,Uzo,Jzo,Y4,Wle,Kzo,Yzo,zI,Zzo,eXo,oXo,Z4,Vle,tXo,rXo,XI,aXo,nXo,sXo,e5,Qle,lXo,iXo,WI,dXo,mXo,fXo,o5,Hle,cXo,gXo,VI,hXo,uXo,pXo,t5,Ule,_Xo,vXo,QI,bXo,TXo,FXo,r5,Jle,MXo,EXo,HI,CXo,yXo,wXo,a5,Kle,AXo,xXo,UI,LXo,BXo,kXo,n5,Yle,RXo,SXo,JI,PXo,$Xo,IXo,s5,Zle,jXo,NXo,KI,DXo,GXo,OXo,eie,qXo,zXo,X3,PCe,Nd,l5,oie,W3,XXo,tie,WXo,$Ce,dt,V3,VXo,Dd,QXo,rie,HXo,UXo,aie,JXo,KXo,YXo,Q3,ZXo,nie,eWo,oWo,tWo,er,H3,rWo,sie,aWo,nWo,Gd,sWo,lie,lWo,iWo,iie,dWo,mWo,fWo,die,cWo,gWo,U3,hWo,co,J3,uWo,mie,pWo,_Wo,Ka,vWo,fie,bWo,TWo,cie,FWo,MWo,gie,EWo,CWo,yWo,hie,i5,uie,wWo,AWo,YI,xWo,LWo,BWo,pie,kWo,RWo,K3,ICe,Od,d5,_ie,Y3,SWo,vie,PWo,jCe,mt,Z3,$Wo,qd,IWo,bie,jWo,NWo,Tie,DWo,GWo,OWo,ey,qWo,Fie,zWo,XWo,WWo,or,oy,VWo,Mie,QWo,HWo,zd,UWo,Eie,JWo,KWo,Cie,YWo,ZWo,eVo,yie,oVo,tVo,ty,rVo,go,ry,aVo,wie,nVo,sVo,Ya,lVo,Aie,iVo,dVo,xie,mVo,fVo,Lie,cVo,gVo,hVo,Y,m5,Bie,uVo,pVo,ZI,_Vo,vVo,bVo,f5,kie,TVo,FVo,ej,MVo,EVo,CVo,c5,Rie,yVo,wVo,oj,AVo,xVo,LVo,g5,Sie,BVo,kVo,tj,RVo,SVo,PVo,h5,Pie,$Vo,IVo,rj,jVo,NVo,DVo,u5,$ie,GVo,OVo,aj,qVo,zVo,XVo,p5,Iie,WVo,VVo,nj,QVo,HVo,UVo,_5,jie,JVo,KVo,sj,YVo,ZVo,eQo,v5,Nie,oQo,tQo,lj,rQo,aQo,nQo,b5,Die,sQo,lQo,ij,iQo,dQo,mQo,T5,Gie,fQo,cQo,dj,gQo,hQo,uQo,F5,Oie,pQo,_Qo,mj,vQo,bQo,TQo,M5,qie,FQo,MQo,fj,EQo,CQo,yQo,E5,zie,wQo,AQo,cj,xQo,LQo,BQo,C5,Xie,kQo,RQo,gj,SQo,PQo,$Qo,y5,Wie,IQo,jQo,hj,NQo,DQo,GQo,w5,Vie,OQo,qQo,uj,zQo,XQo,WQo,A5,Qie,VQo,QQo,pj,HQo,UQo,JQo,x5,Hie,KQo,YQo,_j,ZQo,eHo,oHo,L5,Uie,tHo,rHo,vj,aHo,nHo,sHo,Jie,lHo,iHo,ay,NCe,Xd,B5,Kie,ny,dHo,Yie,mHo,DCe,ft,sy,fHo,Wd,cHo,Zie,gHo,hHo,ede,uHo,pHo,_Ho,ly,vHo,ode,bHo,THo,FHo,tr,iy,MHo,tde,EHo,CHo,Vd,yHo,rde,wHo,AHo,ade,xHo,LHo,BHo,nde,kHo,RHo,dy,SHo,ho,my,PHo,sde,$Ho,IHo,Za,jHo,lde,NHo,DHo,ide,GHo,OHo,dde,qHo,zHo,XHo,Z,k5,mde,WHo,VHo,bj,QHo,HHo,UHo,R5,fde,JHo,KHo,Tj,YHo,ZHo,eUo,S5,cde,oUo,tUo,Fj,rUo,aUo,nUo,P5,gde,sUo,lUo,Mj,iUo,dUo,mUo,$5,hde,fUo,cUo,Ej,gUo,hUo,uUo,I5,ude,pUo,_Uo,Cj,vUo,bUo,TUo,j5,pde,FUo,MUo,yj,EUo,CUo,yUo,N5,_de,wUo,AUo,wj,xUo,LUo,BUo,D5,vde,kUo,RUo,Aj,SUo,PUo,$Uo,G5,bde,IUo,jUo,xj,NUo,DUo,GUo,O5,Tde,OUo,qUo,Lj,zUo,XUo,WUo,q5,Fde,VUo,QUo,Bj,HUo,UUo,JUo,z5,Mde,KUo,YUo,kj,ZUo,eJo,oJo,X5,Ede,tJo,rJo,Rj,aJo,nJo,sJo,W5,Cde,lJo,iJo,Sj,dJo,mJo,fJo,V5,yde,cJo,gJo,Pj,hJo,uJo,pJo,Q5,wde,_Jo,vJo,$j,bJo,TJo,FJo,H5,Ade,MJo,EJo,Ij,CJo,yJo,wJo,U5,xde,AJo,xJo,jj,LJo,BJo,kJo,Lde,RJo,SJo,fy,GCe,Qd,J5,Bde,cy,PJo,kde,$Jo,OCe,ct,gy,IJo,Hd,jJo,Rde,NJo,DJo,Sde,GJo,OJo,qJo,hy,zJo,Pde,XJo,WJo,VJo,rr,uy,QJo,$de,HJo,UJo,Ud,JJo,Ide,KJo,YJo,jde,ZJo,eKo,oKo,Nde,tKo,rKo,py,aKo,uo,_y,nKo,Dde,sKo,lKo,en,iKo,Gde,dKo,mKo,Ode,fKo,cKo,qde,gKo,hKo,uKo,Q,K5,zde,pKo,_Ko,Nj,vKo,bKo,TKo,Y5,Xde,FKo,MKo,Dj,EKo,CKo,yKo,Z5,Wde,wKo,AKo,Gj,xKo,LKo,BKo,e0,Vde,kKo,RKo,Oj,SKo,PKo,$Ko,o0,Qde,IKo,jKo,qj,NKo,DKo,GKo,t0,Hde,OKo,qKo,zj,zKo,XKo,WKo,r0,Ude,VKo,QKo,Xj,HKo,UKo,JKo,a0,Jde,KKo,YKo,Wj,ZKo,eYo,oYo,n0,Kde,tYo,rYo,Vj,aYo,nYo,sYo,s0,Yde,lYo,iYo,Qj,dYo,mYo,fYo,l0,Zde,cYo,gYo,Hj,hYo,uYo,pYo,i0,eme,_Yo,vYo,Uj,bYo,TYo,FYo,d0,ome,MYo,EYo,Jj,CYo,yYo,wYo,m0,tme,AYo,xYo,Kj,LYo,BYo,kYo,f0,rme,RYo,SYo,Yj,PYo,$Yo,IYo,c0,ame,jYo,NYo,Zj,DYo,GYo,OYo,g0,nme,qYo,zYo,eN,XYo,WYo,VYo,h0,sme,QYo,HYo,oN,UYo,JYo,KYo,u0,lme,YYo,ZYo,tN,eZo,oZo,tZo,p0,ime,rZo,aZo,rN,nZo,sZo,lZo,_0,dme,iZo,dZo,aN,mZo,fZo,cZo,v0,mme,gZo,hZo,nN,uZo,pZo,_Zo,fme,vZo,bZo,vy,qCe,Jd,b0,cme,by,TZo,gme,FZo,zCe,gt,Ty,MZo,Kd,EZo,hme,CZo,yZo,ume,wZo,AZo,xZo,Fy,LZo,pme,BZo,kZo,RZo,ar,My,SZo,_me,PZo,$Zo,Yd,IZo,vme,jZo,NZo,bme,DZo,GZo,OZo,Tme,qZo,zZo,Ey,XZo,po,Cy,WZo,Fme,VZo,QZo,on,HZo,Mme,UZo,JZo,Eme,KZo,YZo,Cme,ZZo,eet,oet,Zd,T0,yme,tet,ret,sN,aet,net,set,F0,wme,iet,det,lN,met,fet,cet,M0,Ame,get,het,iN,uet,pet,_et,xme,vet,bet,yy,XCe,em,E0,Lme,wy,Tet,Bme,Fet,WCe,ht,Ay,Met,om,Eet,kme,Cet,yet,Rme,wet,Aet,xet,xy,Let,Sme,Bet,ket,Ret,nr,Ly,Set,Pme,Pet,$et,tm,Iet,$me,jet,Net,Ime,Det,Get,Oet,jme,qet,zet,By,Xet,_o,ky,Wet,Nme,Vet,Qet,tn,Het,Dme,Uet,Jet,Gme,Ket,Yet,Ome,Zet,eot,oot,he,C0,qme,tot,rot,dN,aot,not,sot,y0,zme,lot,iot,mN,dot,mot,fot,w0,Xme,cot,got,fN,hot,uot,pot,A0,Wme,_ot,vot,cN,bot,Tot,Fot,x0,Vme,Mot,Eot,gN,Cot,yot,wot,L0,Qme,Aot,xot,hN,Lot,Bot,kot,B0,Hme,Rot,Sot,uN,Pot,$ot,Iot,k0,Ume,jot,Not,pN,Dot,Got,Oot,R0,Jme,qot,zot,_N,Xot,Wot,Vot,S0,Kme,Qot,Hot,vN,Uot,Jot,Kot,Yme,Yot,Zot,Ry,VCe,rm,P0,Zme,Sy,ett,efe,ott,QCe,ut,Py,ttt,am,rtt,ofe,att,ntt,tfe,stt,ltt,itt,$y,dtt,rfe,mtt,ftt,ctt,sr,Iy,gtt,afe,htt,utt,nm,ptt,nfe,_tt,vtt,sfe,btt,Ttt,Ftt,lfe,Mtt,Ett,jy,Ctt,vo,Ny,ytt,ife,wtt,Att,rn,xtt,dfe,Ltt,Btt,mfe,ktt,Rtt,ffe,Stt,Ptt,$tt,Me,$0,cfe,Itt,jtt,bN,Ntt,Dtt,Gtt,I0,gfe,Ott,qtt,TN,ztt,Xtt,Wtt,j0,hfe,Vtt,Qtt,FN,Htt,Utt,Jtt,N0,ufe,Ktt,Ytt,MN,Ztt,ert,ort,D0,pfe,trt,rrt,EN,art,nrt,srt,G0,_fe,lrt,irt,CN,drt,mrt,frt,O0,vfe,crt,grt,yN,hrt,urt,prt,q0,bfe,_rt,vrt,wN,brt,Trt,Frt,Tfe,Mrt,Ert,Dy,HCe,sm,z0,Ffe,Gy,Crt,Mfe,yrt,UCe,pt,Oy,wrt,lm,Art,Efe,xrt,Lrt,Cfe,Brt,krt,Rrt,qy,Srt,yfe,Prt,$rt,Irt,lr,zy,jrt,wfe,Nrt,Drt,im,Grt,Afe,Ort,qrt,xfe,zrt,Xrt,Wrt,Lfe,Vrt,Qrt,Xy,Hrt,bo,Wy,Urt,Bfe,Jrt,Krt,an,Yrt,kfe,Zrt,eat,Rfe,oat,tat,Sfe,rat,aat,nat,pe,X0,Pfe,sat,lat,AN,iat,dat,mat,W0,$fe,fat,cat,xN,gat,hat,uat,V0,Ife,pat,_at,LN,vat,bat,Tat,Q0,jfe,Fat,Mat,BN,Eat,Cat,yat,H0,Nfe,wat,Aat,kN,xat,Lat,Bat,U0,Dfe,kat,Rat,RN,Sat,Pat,$at,J0,Gfe,Iat,jat,SN,Nat,Dat,Gat,K0,Ofe,Oat,qat,PN,zat,Xat,Wat,Y0,qfe,Vat,Qat,$N,Hat,Uat,Jat,zfe,Kat,Yat,Vy,JCe,dm,Z0,Xfe,Qy,Zat,Wfe,ent,KCe,_t,Hy,ont,mm,tnt,Vfe,rnt,ant,Qfe,nnt,snt,lnt,Uy,int,Hfe,dnt,mnt,fnt,ir,Jy,cnt,Ufe,gnt,hnt,fm,unt,Jfe,pnt,_nt,Kfe,vnt,bnt,Tnt,Yfe,Fnt,Mnt,Ky,Ent,To,Yy,Cnt,Zfe,ynt,wnt,nn,Ant,ece,xnt,Lnt,oce,Bnt,knt,tce,Rnt,Snt,Pnt,Ee,eT,rce,$nt,Int,IN,jnt,Nnt,Dnt,oT,ace,Gnt,Ont,jN,qnt,znt,Xnt,tT,nce,Wnt,Vnt,NN,Qnt,Hnt,Unt,rT,sce,Jnt,Knt,DN,Ynt,Znt,est,aT,lce,ost,tst,GN,rst,ast,nst,nT,ice,sst,lst,ON,ist,dst,mst,sT,dce,fst,cst,qN,gst,hst,ust,lT,mce,pst,_st,zN,vst,bst,Tst,fce,Fst,Mst,Zy,YCe,cm,iT,cce,ew,Est,gce,Cst,ZCe,vt,ow,yst,gm,wst,hce,Ast,xst,uce,Lst,Bst,kst,tw,Rst,pce,Sst,Pst,$st,dr,rw,Ist,_ce,jst,Nst,hm,Dst,vce,Gst,Ost,bce,qst,zst,Xst,Tce,Wst,Vst,aw,Qst,Fo,nw,Hst,Fce,Ust,Jst,sn,Kst,Mce,Yst,Zst,Ece,elt,olt,Cce,tlt,rlt,alt,Ce,dT,yce,nlt,slt,XN,llt,ilt,dlt,mT,wce,mlt,flt,WN,clt,glt,hlt,fT,Ace,ult,plt,VN,_lt,vlt,blt,cT,xce,Tlt,Flt,QN,Mlt,Elt,Clt,gT,Lce,ylt,wlt,HN,Alt,xlt,Llt,hT,Bce,Blt,klt,UN,Rlt,Slt,Plt,uT,kce,$lt,Ilt,JN,jlt,Nlt,Dlt,pT,Rce,Glt,Olt,KN,qlt,zlt,Xlt,Sce,Wlt,Vlt,sw,e3e,um,_T,Pce,lw,Qlt,$ce,Hlt,o3e,bt,iw,Ult,pm,Jlt,Ice,Klt,Ylt,jce,Zlt,eit,oit,dw,tit,Nce,rit,ait,nit,mr,mw,sit,Dce,lit,iit,_m,dit,Gce,mit,fit,Oce,cit,git,hit,qce,uit,pit,fw,_it,Mo,cw,vit,zce,bit,Tit,ln,Fit,Xce,Mit,Eit,Wce,Cit,yit,Vce,wit,Ait,xit,Tt,vT,Qce,Lit,Bit,YN,kit,Rit,Sit,bT,Hce,Pit,$it,ZN,Iit,jit,Nit,TT,Uce,Dit,Git,eD,Oit,qit,zit,FT,Jce,Xit,Wit,oD,Vit,Qit,Hit,MT,Kce,Uit,Jit,tD,Kit,Yit,Zit,ET,Yce,edt,odt,rD,tdt,rdt,adt,Zce,ndt,sdt,gw,t3e,vm,CT,ege,hw,ldt,oge,idt,r3e,Ft,uw,ddt,bm,mdt,tge,fdt,cdt,rge,gdt,hdt,udt,pw,pdt,age,_dt,vdt,bdt,fr,_w,Tdt,nge,Fdt,Mdt,Tm,Edt,sge,Cdt,ydt,lge,wdt,Adt,xdt,ige,Ldt,Bdt,vw,kdt,Eo,bw,Rdt,dge,Sdt,Pdt,dn,$dt,mge,Idt,jdt,fge,Ndt,Ddt,cge,Gdt,Odt,qdt,Mt,yT,gge,zdt,Xdt,aD,Wdt,Vdt,Qdt,wT,hge,Hdt,Udt,nD,Jdt,Kdt,Ydt,AT,uge,Zdt,emt,sD,omt,tmt,rmt,xT,pge,amt,nmt,lD,smt,lmt,imt,LT,_ge,dmt,mmt,iD,fmt,cmt,gmt,BT,vge,hmt,umt,dD,pmt,_mt,vmt,bge,bmt,Tmt,Tw,a3e,Fm,kT,Tge,Fw,Fmt,Fge,Mmt,n3e,Et,Mw,Emt,Mm,Cmt,Mge,ymt,wmt,Ege,Amt,xmt,Lmt,Ew,Bmt,Cge,kmt,Rmt,Smt,cr,Cw,Pmt,yge,$mt,Imt,Em,jmt,wge,Nmt,Dmt,Age,Gmt,Omt,qmt,xge,zmt,Xmt,yw,Wmt,Co,ww,Vmt,Lge,Qmt,Hmt,mn,Umt,Bge,Jmt,Kmt,kge,Ymt,Zmt,Rge,eft,oft,tft,Sge,RT,Pge,rft,aft,mD,nft,sft,lft,$ge,ift,dft,Aw,s3e,Cm,ST,Ige,xw,mft,jge,fft,l3e,Ct,Lw,cft,ym,gft,Nge,hft,uft,Dge,pft,_ft,vft,Bw,bft,Gge,Tft,Fft,Mft,gr,kw,Eft,Oge,Cft,yft,wm,wft,qge,Aft,xft,zge,Lft,Bft,kft,Xge,Rft,Sft,Rw,Pft,yo,Sw,$ft,Wge,Ift,jft,fn,Nft,Vge,Dft,Gft,Qge,Oft,qft,Hge,zft,Xft,Wft,Pw,PT,Uge,Vft,Qft,fD,Hft,Uft,Jft,$T,Jge,Kft,Yft,cD,Zft,ect,oct,Kge,tct,rct,$w,i3e,Am,IT,Yge,Iw,act,Zge,nct,d3e,yt,jw,sct,xm,lct,ehe,ict,dct,ohe,mct,fct,cct,Nw,gct,the,hct,uct,pct,hr,Dw,_ct,rhe,vct,bct,Lm,Tct,ahe,Fct,Mct,nhe,Ect,Cct,yct,she,wct,Act,Gw,xct,wo,Ow,Lct,lhe,Bct,kct,cn,Rct,ihe,Sct,Pct,dhe,$ct,Ict,mhe,jct,Nct,Dct,fhe,jT,che,Gct,Oct,gD,qct,zct,Xct,ghe,Wct,Vct,qw,m3e;return ie=new X({}),ba=new w({props:{code:"model = AutoModel.from_pretrained('bert-base-cased'),",highlighted:'model = AutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>)'}}),IF=new X({}),jF=new w({props:{code:`from transformers import AutoConfig, AutoModel AutoConfig.register("new-model", NewModelConfig) AutoModel.register(NewModelConfig, NewModel),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModel AutoConfig.register(<span class="hljs-string">&quot;new-model&quot;</span>, NewModelConfig) AutoModel.register(NewModelConfig, NewModel)`}}),Im=new Qct({props:{warning:!0,$$slots:{default:[qVt]},$$scope:{ctx:Wl}}}),NF=new X({}),DF=new C({props:{name:"class transformers.AutoConfig",anchor:"transformers.AutoConfig",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/configuration_auto.py#L475"}}),qF=new C({props:{name:"from_pretrained",anchor:"transformers.AutoConfig.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/configuration_auto.py#L498",parametersDescription:[{anchor:"transformers.AutoConfig.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing a configuration file saved using the [<em>~PretrainedConfig.save_pretrained</em>] method, or the [<em>~PreTrainedModel.save_pretrained</em>] method, e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a saved configuration JSON <em>file</em>, e.g., <em>./my_model_directory/configuration.json</em>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.AutoConfig.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.AutoConfig.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.AutoConfig.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.AutoConfig.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.AutoConfig.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.AutoConfig.from_pretrained.return_unused_kwargs",description:`<strong>return_unused_kwargs</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; If <em>False</em>, then this function returns just the final configuration object.</p> <p>If <em>True</em>, then this functions returns a <em>Tuple(config, unused_kwargs)</em> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of <em>kwargs</em> which has not been used to update <em>config</em> and is otherwise ignored.`,name:"return_unused_kwargs"},{anchor:"transformers.AutoConfig.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.AutoConfig.from_pretrained.kwargs(additional",description:`<strong>kwargs(additional</strong> keyword arguments, <em>optional</em>) &#x2014; The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> configuration attributes is controlled by the <em>return_unused_kwargs</em> keyword parameter.`,name:"kwargs(additional"}]}}),zF=new w({props:{code:`from transformers import AutoConfig # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-uncased') # Download configuration from huggingface.co (user-uploaded) and cache. config = AutoConfig.from_pretrained('dbmdz/bert-base-german-cased') # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*). config = AutoConfig.from_pretrained('./test/bert_saved_model/') # Load a specific configuration file. config = AutoConfig.from_pretrained('./test/bert_saved_model/my_configuration.json') # Change some config attributes when loading a pretrained config. config = AutoConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False) config.output_attentions config, unused_kwargs = AutoConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True) config.output_attentions config.unused_kwargs,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co (user-uploaded) and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;dbmdz/bert-base-german-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If configuration file is in a directory (e.g., was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./test/bert_saved_model/&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Load a specific configuration file.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./test/bert_saved_model/my_configuration.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Change some config attributes when loading a pretrained config.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config, unused_kwargs = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config.unused_kwargs {<span class="hljs-string">&#x27;foo&#x27;</span>: <span class="hljs-literal">False</span>}`}}),XF=new C({props:{name:"register",anchor:"transformers.AutoConfig.register",parameters:[{name:"model_type",val:""},{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/configuration_auto.py#L616",parametersDescription:[{anchor:"transformers.AutoConfig.register.model_type",description:"<strong>model_type</strong> (<code>str</code>) &#x2014; The model type like &#x201C;bert&#x201D; or &#x201C;gpt&#x201D;.",name:"model_type"},{anchor:"transformers.AutoConfig.register.config",description:'<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config to register.',name:"config"}]}}),WF=new X({}),VF=new C({props:{name:"class transformers.AutoTokenizer",anchor:"transformers.AutoTokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/tokenization_auto.py#L361"}}),UF=new C({props:{name:"from_pretrained",anchor:"transformers.AutoTokenizer.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"*inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/tokenization_auto.py#L375",parametersDescription:[{anchor:"transformers.AutoTokenizer.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing vocabulary files required by the tokenizer, for instance saved using the [<em>~PreTrainedTokenizer.save_pretrained</em>] method, e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (like Bert or XLNet), e.g.: <em>./my_model_directory/vocab.txt</em>. (Not applicable to all derived classes)</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.AutoTokenizer.from_pretrained.inputs",description:`<strong>inputs</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the Tokenizer <em><strong>init</strong>()</em> method.`,name:"inputs"},{anchor:"transformers.AutoTokenizer.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; The configuration object used to dertermine the tokenizer class to instantiate.`,name:"config"},{anchor:"transformers.AutoTokenizer.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.AutoTokenizer.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.AutoTokenizer.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.AutoTokenizer.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.AutoTokenizer.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.AutoTokenizer.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<em>str</em>, <em>optional</em>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here.`,name:"subfolder"},{anchor:"transformers.AutoTokenizer.from_pretrained.use_fast",description:`<strong>use_fast</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>True</em>) &#x2014; Whether or not to try to load the fast version of the tokenizer.`,name:"use_fast"},{anchor:"transformers.AutoTokenizer.from_pretrained.tokenizer_type",description:`<strong>tokenizer_type</strong> (<em>str</em>, <em>optional</em>) &#x2014; Tokenizer type to be loaded.`,name:"tokenizer_type"},{anchor:"transformers.AutoTokenizer.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.AutoTokenizer.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the Tokenizer <em><strong>init</strong>()</em> method. Can be used to set special tokens like <em>bos_token</em>, <em>eos_token</em>, <em>unk_token</em>, <em>sep_token</em>, <em>pad_token</em>, <em>cls_token</em>, <em>mask_token</em>, <em>additional_special_tokens</em>. See parameters in the <em><strong>init</strong>()</em> for more details.`,name:"kwargs"}]}}),JF=new w({props:{code:`from transformers import AutoTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased') # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/'),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download vocabulary from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download vocabulary from huggingface.co (user-uploaded) and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&#x27;dbmdz/bert-base-german-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&#x27;./test/bert_saved_model/&#x27;</span>)`}}),KF=new C({props:{name:"register",anchor:"transformers.AutoTokenizer.register",parameters:[{name:"config_class",val:""},{name:"slow_tokenizer_class",val:" = None"},{name:"fast_tokenizer_class",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/tokenization_auto.py#L565",parametersDescription:[{anchor:"transformers.AutoTokenizer.register.config_class",description:`<strong>config_class</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The configuration corresponding to the model to register.`,name:"config_class"},{anchor:"transformers.AutoTokenizer.register.slow_tokenizer_class",description:`<strong>slow_tokenizer_class</strong> (<code>PretrainedTokenizer</code>, <em>optional</em>) &#x2014; The slow tokenizer to register.`,name:"slow_tokenizer_class"},{anchor:"transformers.AutoTokenizer.register.slow_tokenizer_class",description:`<strong>slow_tokenizer_class</strong> (<code>PretrainedTokenizerFast</code>, <em>optional</em>) &#x2014; The fast tokenizer to register.`,name:"slow_tokenizer_class"}]}}),YF=new X({}),ZF=new C({props:{name:"class transformers.AutoFeatureExtractor",anchor:"transformers.AutoFeatureExtractor",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/feature_extraction_auto.py#L65"}}),tM=new C({props:{name:"from_pretrained",anchor:"transformers.AutoFeatureExtractor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/feature_extraction_auto.py#L79",parametersDescription:[{anchor:"transformers.AutoFeatureExtractor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the [<em>~feature_extraction_utils.FeatureExtractionMixin.save_pretrained</em>] method, e.g., <em>./my_model_directory/</em>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <em>./my_model_directory/preprocessor_config.json</em>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}.</em> The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<em>str</em> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <em>True</em>, will use the token generated when running <em>transformers-cli login</em> (stored in <em>~/.huggingface</em>).`,name:"use_auth_token"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.return_unused_kwargs",description:`<strong>return_unused_kwargs</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; If <em>False</em>, then this function returns just the final feature extractor object. If <em>True</em>, then this functions returns a <em>Tuple(feature_extractor, unused_kwargs)</em> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of <em>kwargs</em> which has not been used to update <em>feature_extractor</em> and is otherwise ignored.`,name:"return_unused_kwargs"},{anchor:"transformers.AutoFeatureExtractor.from_pretrained.kwargs",description:`<strong>kwargs</strong> (<em>Dict[str, Any]</em>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> feature extractor attributes is controlled by the <em>return_unused_kwargs</em> keyword parameter.`,name:"kwargs"}]}}),Kc=new Qct({props:{$$slots:{default:[zVt]},$$scope:{ctx:Wl}}}),rM=new w({props:{code:`from transformers import AutoFeatureExtractor # Download feature extractor from huggingface.co and cache. feature_extractor = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h') # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*) feature_extractor = AutoFeatureExtractor.from_pretrained('./test/saved_model/'),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download feature extractor from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;./test/saved_model/&#x27;</span>)`}}),aM=new X({}),nM=new C({props:{name:"class transformers.AutoProcessor",anchor:"transformers.AutoProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/processing_auto.py#L62"}}),iM=new C({props:{name:"from_pretrained",anchor:"transformers.AutoProcessor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/processing_auto.py#L76",parametersDescription:[{anchor:"transformers.AutoProcessor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>a path to a <em>directory</em> containing a processor files saved using the <em>save_pretrained()</em> method, e.g., <em>./my_model_directory/</em>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.AutoProcessor.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.AutoProcessor.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.AutoProcessor.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.AutoProcessor.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}.</em> The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.AutoProcessor.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<em>str</em> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <em>True</em>, will use the token generated when running <em>transformers-cli login</em> (stored in <em>~/.huggingface</em>).`,name:"use_auth_token"},{anchor:"transformers.AutoProcessor.from_pretrained.revision",description:`<strong>revision</strong> (<em>str</em>, <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.AutoProcessor.from_pretrained.return_unused_kwargs",description:`<strong>return_unused_kwargs</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; If <em>False</em>, then this function returns just the final feature extractor object. If <em>True</em>, then this functions returns a <em>Tuple(feature_extractor, unused_kwargs)</em> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of <em>kwargs</em> which has not been used to update <em>feature_extractor</em> and is otherwise ignored.`,name:"return_unused_kwargs"},{anchor:"transformers.AutoProcessor.from_pretrained.kwargs",description:`<strong>kwargs</strong> (<em>Dict[str, Any]</em>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> feature extractor attributes is controlled by the <em>return_unused_kwargs</em> keyword parameter.`,name:"kwargs"}]}}),sg=new Qct({props:{$$slots:{default:[XVt]},$$scope:{ctx:Wl}}}),dM=new w({props:{code:`from transformers import AutoProcessor # Download processor from huggingface.co and cache. processor = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h') # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*) processor = AutoProcessor.from_pretrained('./test/saved_model/'),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download processor from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If processor files are in a directory (e.g. processor was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&#x27;./test/saved_model/&#x27;</span>)`}}),mM=new X({}),fM=new C({props:{name:"class transformers.AutoModel",anchor:"transformers.AutoModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L608"}}),gM=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertModel">AlbertModel</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel">BartModel</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitModel">BeitModel</a> (BEiT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel">BertModel</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig">BertGenerationConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationEncoder">BertGenerationEncoder</a> (Bert Generation model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdModel">BigBirdModel</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusModel">BigBirdPegasusModel</a> (BigBirdPegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotModel">BlenderbotModel</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallModel">BlenderbotSmallModel</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel">CLIPModel</a> (CLIP model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLModel">CTRLModel</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertModel">CamembertModel</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineModel">CanineModel</a> (Canine model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder">DPRQuestionEncoder</a> (DPR model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Model">DebertaV2Model</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig">DeiTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTModel">DeiTModel</a> (DeiT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig">DetrConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrModel">DetrModel</a> (DETR model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertModel">DistilBertModel</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraModel">ElectraModel</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetModel">FNetModel</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig">FSMTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTModel">FSMTModel</a> (FairSeq Machine-Translation model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertModel">FlaubertModel</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel">FunnelModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelBaseModel">FunnelBaseModel</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Model">GPT2Model</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJModel">GPTJModel</a> (GPT-J model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel">GPTNeoModel</a> (GPT Neo model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertModel">HubertModel</a> (Hubert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertModel">IBertModel</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTModel">ImageGPTModel</a> (ImageGPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDModel">LEDModel</a> (LED model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel">LayoutLMModel</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> (LayoutLMv2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel">LongformerModel</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel">LukeModel</a> (LUKE model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertModel">LxmertModel</a> (LXMERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config">M2M100Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Model">M2M100Model</a> (M2M100 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartModel">MBartModel</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetModel">MPNetModel</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Model">MT5Model</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianModel">MarianModel</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertModel">MegatronBertModel</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertModel">MobileBertModel</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTModel">OpenAIGPTModel</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusModel">PegasusModel</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a> (Perceiver model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetModel">ProphetNetModel</a> (ProphetNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertModel">QDQBertModel</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModel">ReformerModel</a> (Reformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertConfig">RetriBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertModel">RetriBertModel</a> (RetriBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel">RobertaModel</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWModel">SEWModel</a> (SEW model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig">SEWDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDModel">SEWDModel</a> (SEW-D model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerModel">SegformerModel</a> (SegFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig">Speech2TextConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextModel">Speech2TextModel</a> (Speech2Text model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig">SplinterConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterModel">SplinterModel</a> (Splinter model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertModel">SqueezeBertModel</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model">T5Model</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasModel">TapasModel</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLModel">TransfoXLModel</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechModel">UniSpeechModel</a> (UniSpeech model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatModel">UniSpeechSatModel</a> (UniSpeechSat model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTModel">ViTModel</a> (ViT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderModel">VisionTextDualEncoderModel</a> (VisionTextDualEncoder model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel">VisualBertModel</a> (VisualBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Model">Wav2Vec2Model</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMModel">WavLMModel</a> (WavLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMModel">XLMModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetConfig">XLMProphetNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetModel">XLMProphetNetModel</a> (XLMProphetNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaModel">XLMRobertaModel</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetModel">XLNetModel</a> (XLNet model)</li> </ul>`,name:"config"}]}}),hM=new w({props:{code:`from transformers import AutoConfig, AutoModel # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModel.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_config(config)`}}),uM=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),pM=new w({props:{code:`from transformers import AutoConfig, AutoModel # Download model and configuration from huggingface.co and cache. model = AutoModel.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModel.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),_M=new X({}),vM=new C({props:{name:"class transformers.AutoModelForPreTraining",anchor:"transformers.AutoModelForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L615"}}),TM=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForPreTraining">AlbertForPreTraining</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration">BartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForPreTraining">BertForPreTraining</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForPreTraining">BigBirdForPreTraining</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLLMHeadModel">CTRLLMHeadModel</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForMaskedLM">CamembertForMaskedLM</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForMaskedLM">DebertaForMaskedLM</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForMaskedLM">DebertaV2ForMaskedLM</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForMaskedLM">DistilBertForMaskedLM</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForPreTraining">ElectraForPreTraining</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForPreTraining">FNetForPreTraining</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig">FSMTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTForConditionalGeneration">FSMTForConditionalGeneration</a> (FairSeq Machine-Translation model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertWithLMHeadModel">FlaubertWithLMHeadModel</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForPreTraining">FunnelForPreTraining</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2LMHeadModel">GPT2LMHeadModel</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMaskedLM">IBertForMaskedLM</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForMaskedLM">LayoutLMForMaskedLM</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMaskedLM">LongformerForMaskedLM</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForPreTraining">LxmertForPreTraining</a> (LXMERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMaskedLM">MPNetForMaskedLM</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForPreTraining">MegatronBertForPreTraining</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForPreTraining">MobileBertForPreTraining</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTLMHeadModel">OpenAIGPTLMHeadModel</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertConfig">RetriBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertModel">RetriBertModel</a> (RetriBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM">RobertaForMaskedLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMaskedLM">SqueezeBertForMaskedLM</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration">T5ForConditionalGeneration</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForMaskedLM">TapasForMaskedLM</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLLMHeadModel">TransfoXLLMHeadModel</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForPreTraining">UniSpeechForPreTraining</a> (UniSpeech model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForPreTraining">UniSpeechSatForPreTraining</a> (UniSpeechSat model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForPreTraining">VisualBertForPreTraining</a> (VisualBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForPreTraining">Wav2Vec2ForPreTraining</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel">XLMWithLMHeadModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForMaskedLM">XLMRobertaForMaskedLM</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetLMHeadModel">XLNetLMHeadModel</a> (XLNet model)</li> </ul>`,name:"config"}]}}),FM=new w({props:{code:`from transformers import AutoConfig, AutoModelForPreTraining # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForPreTraining.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForPreTraining.from_config(config)`}}),MM=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),EM=new w({props:{code:`from transformers import AutoConfig, AutoModelForPreTraining # Download model and configuration from huggingface.co and cache. model = AutoModelForPreTraining.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForPreTraining.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),CM=new X({}),yM=new C({props:{name:"class transformers.AutoModelForCausalLM",anchor:"transformers.AutoModelForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L630"}}),AM=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForCausalLM">BartForCausalLM</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertLMHeadModel">BertLMHeadModel</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig">BertGenerationConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationDecoder">BertGenerationDecoder</a> (Bert Generation model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForCausalLM">BigBirdForCausalLM</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForCausalLM">BigBirdPegasusForCausalLM</a> (BigBirdPegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotForCausalLM">BlenderbotForCausalLM</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallForCausalLM">BlenderbotSmallForCausalLM</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLLMHeadModel">CTRLLMHeadModel</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForCausalLM">CamembertForCausalLM</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2LMHeadModel">GPT2LMHeadModel</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForCausalLM">GPTJForCausalLM</a> (GPT-J model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForCausalLM">GPTNeoForCausalLM</a> (GPT Neo model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForCausalLM">MBartForCausalLM</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianForCausalLM">MarianForCausalLM</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForCausalLM">MegatronBertForCausalLM</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTLMHeadModel">OpenAIGPTLMHeadModel</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusForCausalLM">PegasusForCausalLM</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForCausalLM">ProphetNetForCausalLM</a> (ProphetNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertLMHeadModel">QDQBertLMHeadModel</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModelWithLMHead">ReformerModelWithLMHead</a> (Reformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForCausalLM">RemBertForCausalLM</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForCausalLM">RoFormerForCausalLM</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForCausalLM">RobertaForCausalLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Config">Speech2Text2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2ForCausalLM">Speech2Text2ForCausalLM</a> (Speech2Text2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/trocr#transformers.TrOCRConfig">TrOCRConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/trocr#transformers.TrOCRForCausalLM">TrOCRForCausalLM</a> (TrOCR model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLLMHeadModel">TransfoXLLMHeadModel</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel">XLMWithLMHeadModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetConfig">XLMProphetNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetForCausalLM">XLMProphetNetForCausalLM</a> (XLMProphetNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForCausalLM">XLMRobertaForCausalLM</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetLMHeadModel">XLNetLMHeadModel</a> (XLNet model)</li> </ul>`,name:"config"}]}}),xM=new w({props:{code:`from transformers import AutoConfig, AutoModelForCausalLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForCausalLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_config(config)`}}),LM=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),BM=new w({props:{code:`from transformers import AutoConfig, AutoModelForCausalLM # Download model and configuration from huggingface.co and cache. model = AutoModelForCausalLM.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForCausalLM.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForCausalLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),kM=new X({}),RM=new C({props:{name:"class transformers.AutoModelForMaskedLM",anchor:"transformers.AutoModelForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L637"}}),PM=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForMaskedLM">AlbertForMaskedLM</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration">BartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForMaskedLM">BertForMaskedLM</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForMaskedLM">BigBirdForMaskedLM</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForMaskedLM">CamembertForMaskedLM</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForMaskedLM">ConvBertForMaskedLM</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForMaskedLM">DebertaForMaskedLM</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForMaskedLM">DebertaV2ForMaskedLM</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForMaskedLM">DistilBertForMaskedLM</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMaskedLM">ElectraForMaskedLM</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForMaskedLM">FNetForMaskedLM</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertWithLMHeadModel">FlaubertWithLMHeadModel</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMaskedLM">FunnelForMaskedLM</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMaskedLM">IBertForMaskedLM</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForMaskedLM">LayoutLMForMaskedLM</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMaskedLM">LongformerForMaskedLM</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForConditionalGeneration">MBartForConditionalGeneration</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMaskedLM">MPNetForMaskedLM</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForMaskedLM">MegatronBertForMaskedLM</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForMaskedLM">MobileBertForMaskedLM</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForMaskedLM">PerceiverForMaskedLM</a> (Perceiver model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForMaskedLM">QDQBertForMaskedLM</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForMaskedLM">ReformerForMaskedLM</a> (Reformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForMaskedLM">RemBertForMaskedLM</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForMaskedLM">RoFormerForMaskedLM</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM">RobertaForMaskedLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMaskedLM">SqueezeBertForMaskedLM</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForMaskedLM">TapasForMaskedLM</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <code>Wav2Vec2ForMaskedLM</code> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel">XLMWithLMHeadModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForMaskedLM">XLMRobertaForMaskedLM</a> (XLM-RoBERTa model)</li> </ul>`,name:"config"}]}}),$M=new w({props:{code:`from transformers import AutoConfig, AutoModelForMaskedLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForMaskedLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_config(config)`}}),IM=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),jM=new w({props:{code:`from transformers import AutoConfig, AutoModelForMaskedLM # Download model and configuration from huggingface.co and cache. model = AutoModelForMaskedLM.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForMaskedLM.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),NM=new X({}),DM=new C({props:{name:"class transformers.AutoModelForSeq2SeqLM",anchor:"transformers.AutoModelForSeq2SeqLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L644"}}),OM=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration">BartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForConditionalGeneration">BigBirdPegasusForConditionalGeneration</a> (BigBirdPegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotForConditionalGeneration">BlenderbotForConditionalGeneration</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallForConditionalGeneration">BlenderbotSmallForConditionalGeneration</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> (Encoder decoder model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig">FSMTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTForConditionalGeneration">FSMTForConditionalGeneration</a> (FairSeq Machine-Translation model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForConditionalGeneration">LEDForConditionalGeneration</a> (LED model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config">M2M100Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100ForConditionalGeneration">M2M100ForConditionalGeneration</a> (M2M100 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForConditionalGeneration">MBartForConditionalGeneration</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5ForConditionalGeneration">MT5ForConditionalGeneration</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianMTModel">MarianMTModel</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusForConditionalGeneration">PegasusForConditionalGeneration</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration">ProphetNetForConditionalGeneration</a> (ProphetNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration">T5ForConditionalGeneration</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetConfig">XLMProphetNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetForConditionalGeneration">XLMProphetNetForConditionalGeneration</a> (XLMProphetNet model)</li> </ul>`,name:"config"}]}}),qM=new w({props:{code:`from transformers import AutoConfig, AutoModelForSeq2SeqLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('t5-base') model = AutoModelForSeq2SeqLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_config(config)`}}),zM=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),XM=new w({props:{code:`from transformers import AutoConfig, AutoModelForSeq2SeqLM # Download model and configuration from huggingface.co and cache. model = AutoModelForSeq2SeqLM.from_pretrained('t5-base') # Update configuration during loading model = AutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/t5_tf_model_config.json') model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/t5_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;./tf_model/t5_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),WM=new X({}),VM=new C({props:{name:"class transformers.AutoModelForSequenceClassification",anchor:"transformers.AutoModelForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L653"}}),HM=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForSequenceClassification">AlbertForSequenceClassification</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForSequenceClassification">BartForSequenceClassification</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForSequenceClassification">BertForSequenceClassification</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForSequenceClassification">BigBirdForSequenceClassification</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForSequenceClassification">BigBirdPegasusForSequenceClassification</a> (BigBirdPegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLForSequenceClassification">CTRLForSequenceClassification</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForSequenceClassification">CamembertForSequenceClassification</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForSequenceClassification">CanineForSequenceClassification</a> (Canine model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForSequenceClassification">ConvBertForSequenceClassification</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForSequenceClassification">DebertaForSequenceClassification</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForSequenceClassification">DebertaV2ForSequenceClassification</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForSequenceClassification">DistilBertForSequenceClassification</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForSequenceClassification">ElectraForSequenceClassification</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForSequenceClassification">FNetForSequenceClassification</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForSequenceClassification">FlaubertForSequenceClassification</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForSequenceClassification">FunnelForSequenceClassification</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForSequenceClassification">GPT2ForSequenceClassification</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForSequenceClassification">GPTJForSequenceClassification</a> (GPT-J model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForSequenceClassification">GPTNeoForSequenceClassification</a> (GPT Neo model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForSequenceClassification">IBertForSequenceClassification</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForSequenceClassification">LEDForSequenceClassification</a> (LED model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForSequenceClassification">LayoutLMForSequenceClassification</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForSequenceClassification">LayoutLMv2ForSequenceClassification</a> (LayoutLMv2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForSequenceClassification">LongformerForSequenceClassification</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForSequenceClassification">MBartForSequenceClassification</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForSequenceClassification">MPNetForSequenceClassification</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForSequenceClassification">MegatronBertForSequenceClassification</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForSequenceClassification">MobileBertForSequenceClassification</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTForSequenceClassification">OpenAIGPTForSequenceClassification</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForSequenceClassification">PerceiverForSequenceClassification</a> (Perceiver model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForSequenceClassification">QDQBertForSequenceClassification</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForSequenceClassification">ReformerForSequenceClassification</a> (Reformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForSequenceClassification">RemBertForSequenceClassification</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForSequenceClassification">RoFormerForSequenceClassification</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForSequenceClassification">RobertaForSequenceClassification</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForSequenceClassification">SqueezeBertForSequenceClassification</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForSequenceClassification">TapasForSequenceClassification</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLForSequenceClassification">TransfoXLForSequenceClassification</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForSequenceClassification">XLMForSequenceClassification</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForSequenceClassification">XLMRobertaForSequenceClassification</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForSequenceClassification">XLNetForSequenceClassification</a> (XLNet model)</li> </ul>`,name:"config"}]}}),UM=new w({props:{code:`from transformers import AutoConfig, AutoModelForSequenceClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForSequenceClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_config(config)`}}),JM=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),KM=new w({props:{code:`from transformers import AutoConfig, AutoModelForSequenceClassification # Download model and configuration from huggingface.co and cache. model = AutoModelForSequenceClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),YM=new X({}),ZM=new C({props:{name:"class transformers.AutoModelForMultipleChoice",anchor:"transformers.AutoModelForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L687"}}),oE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForMultipleChoice">AlbertForMultipleChoice</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForMultipleChoice">BertForMultipleChoice</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForMultipleChoice">BigBirdForMultipleChoice</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForMultipleChoice">CamembertForMultipleChoice</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForMultipleChoice">CanineForMultipleChoice</a> (Canine model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForMultipleChoice">ConvBertForMultipleChoice</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForMultipleChoice">DistilBertForMultipleChoice</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMultipleChoice">ElectraForMultipleChoice</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForMultipleChoice">FNetForMultipleChoice</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForMultipleChoice">FlaubertForMultipleChoice</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMultipleChoice">FunnelForMultipleChoice</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMultipleChoice">IBertForMultipleChoice</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMultipleChoice">LongformerForMultipleChoice</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMultipleChoice">MPNetForMultipleChoice</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForMultipleChoice">MegatronBertForMultipleChoice</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForMultipleChoice">MobileBertForMultipleChoice</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForMultipleChoice">QDQBertForMultipleChoice</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForMultipleChoice">RemBertForMultipleChoice</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForMultipleChoice">RoFormerForMultipleChoice</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMultipleChoice">RobertaForMultipleChoice</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMultipleChoice">SqueezeBertForMultipleChoice</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForMultipleChoice">XLMForMultipleChoice</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForMultipleChoice">XLMRobertaForMultipleChoice</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForMultipleChoice">XLNetForMultipleChoice</a> (XLNet model)</li> </ul>`,name:"config"}]}}),tE=new w({props:{code:`from transformers import AutoConfig, AutoModelForMultipleChoice # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForMultipleChoice.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_config(config)`}}),rE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),aE=new w({props:{code:`from transformers import AutoConfig, AutoModelForMultipleChoice # Download model and configuration from huggingface.co and cache. model = AutoModelForMultipleChoice.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForMultipleChoice.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),nE=new X({}),sE=new C({props:{name:"class transformers.AutoModelForNextSentencePrediction",anchor:"transformers.AutoModelForNextSentencePrediction",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L694"}}),iE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForNextSentencePrediction">BertForNextSentencePrediction</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForNextSentencePrediction">FNetForNextSentencePrediction</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForNextSentencePrediction">MegatronBertForNextSentencePrediction</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForNextSentencePrediction">MobileBertForNextSentencePrediction</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForNextSentencePrediction">QDQBertForNextSentencePrediction</a> (QDQBert model)</li> </ul>`,name:"config"}]}}),dE=new w({props:{code:`from transformers import AutoConfig, AutoModelForNextSentencePrediction # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForNextSentencePrediction.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForNextSentencePrediction.from_config(config)`}}),mE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),fE=new w({props:{code:`from transformers import AutoConfig, AutoModelForNextSentencePrediction # Download model and configuration from huggingface.co and cache. model = AutoModelForNextSentencePrediction.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForNextSentencePrediction.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForNextSentencePrediction.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),cE=new X({}),gE=new C({props:{name:"class transformers.AutoModelForTokenClassification",anchor:"transformers.AutoModelForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L680"}}),uE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForTokenClassification">AlbertForTokenClassification</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForTokenClassification">BertForTokenClassification</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForTokenClassification">BigBirdForTokenClassification</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForTokenClassification">CamembertForTokenClassification</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForTokenClassification">CanineForTokenClassification</a> (Canine model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForTokenClassification">ConvBertForTokenClassification</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForTokenClassification">DebertaForTokenClassification</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForTokenClassification">DebertaV2ForTokenClassification</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForTokenClassification">DistilBertForTokenClassification</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForTokenClassification">ElectraForTokenClassification</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForTokenClassification">FNetForTokenClassification</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForTokenClassification">FlaubertForTokenClassification</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForTokenClassification">FunnelForTokenClassification</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForTokenClassification">GPT2ForTokenClassification</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForTokenClassification">IBertForTokenClassification</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForTokenClassification">LayoutLMForTokenClassification</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForTokenClassification">LayoutLMv2ForTokenClassification</a> (LayoutLMv2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForTokenClassification">LongformerForTokenClassification</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForTokenClassification">MPNetForTokenClassification</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForTokenClassification">MegatronBertForTokenClassification</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForTokenClassification">MobileBertForTokenClassification</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForTokenClassification">QDQBertForTokenClassification</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForTokenClassification">RemBertForTokenClassification</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForTokenClassification">RoFormerForTokenClassification</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForTokenClassification">RobertaForTokenClassification</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForTokenClassification">SqueezeBertForTokenClassification</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForTokenClassification">XLMForTokenClassification</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForTokenClassification">XLMRobertaForTokenClassification</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForTokenClassification">XLNetForTokenClassification</a> (XLNet model)</li> </ul>`,name:"config"}]}}),pE=new w({props:{code:`from transformers import AutoConfig, AutoModelForTokenClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForTokenClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_config(config)`}}),_E=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),vE=new w({props:{code:`from transformers import AutoConfig, AutoModelForTokenClassification # Download model and configuration from huggingface.co and cache. model = AutoModelForTokenClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForTokenClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),bE=new X({}),TE=new C({props:{name:"class transformers.AutoModelForQuestionAnswering",anchor:"transformers.AutoModelForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L662"}}),ME=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForQuestionAnswering">AlbertForQuestionAnswering</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForQuestionAnswering">BartForQuestionAnswering</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForQuestionAnswering">BertForQuestionAnswering</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForQuestionAnswering">BigBirdForQuestionAnswering</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForQuestionAnswering">BigBirdPegasusForQuestionAnswering</a> (BigBirdPegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForQuestionAnswering">CamembertForQuestionAnswering</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForQuestionAnswering">CanineForQuestionAnswering</a> (Canine model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForQuestionAnswering">ConvBertForQuestionAnswering</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForQuestionAnswering">DebertaForQuestionAnswering</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForQuestionAnswering">DebertaV2ForQuestionAnswering</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForQuestionAnswering">DistilBertForQuestionAnswering</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForQuestionAnswering">ElectraForQuestionAnswering</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForQuestionAnswering">FNetForQuestionAnswering</a> (FNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForQuestionAnsweringSimple">FlaubertForQuestionAnsweringSimple</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForQuestionAnswering">FunnelForQuestionAnswering</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForQuestionAnswering">GPTJForQuestionAnswering</a> (GPT-J model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForQuestionAnswering">IBertForQuestionAnswering</a> (I-BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForQuestionAnswering">LEDForQuestionAnswering</a> (LED model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForQuestionAnswering">LayoutLMv2ForQuestionAnswering</a> (LayoutLMv2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForQuestionAnswering">LongformerForQuestionAnswering</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForQuestionAnswering">LxmertForQuestionAnswering</a> (LXMERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForQuestionAnswering">MBartForQuestionAnswering</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForQuestionAnswering">MPNetForQuestionAnswering</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForQuestionAnswering">MegatronBertForQuestionAnswering</a> (MegatronBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForQuestionAnswering">MobileBertForQuestionAnswering</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForQuestionAnswering">QDQBertForQuestionAnswering</a> (QDQBert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForQuestionAnswering">ReformerForQuestionAnswering</a> (Reformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForQuestionAnswering">RemBertForQuestionAnswering</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForQuestionAnswering">RoFormerForQuestionAnswering</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForQuestionAnswering">RobertaForQuestionAnswering</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig">SplinterConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterForQuestionAnswering">SplinterForQuestionAnswering</a> (Splinter model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForQuestionAnswering">SqueezeBertForQuestionAnswering</a> (SqueezeBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnsweringSimple">XLMForQuestionAnsweringSimple</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForQuestionAnswering">XLMRobertaForQuestionAnswering</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForQuestionAnsweringSimple">XLNetForQuestionAnsweringSimple</a> (XLNet model)</li> </ul>`,name:"config"}]}}),EE=new w({props:{code:`from transformers import AutoConfig, AutoModelForQuestionAnswering # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForQuestionAnswering.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_config(config)`}}),CE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),yE=new w({props:{code:`from transformers import AutoConfig, AutoModelForQuestionAnswering # Download model and configuration from huggingface.co and cache. model = AutoModelForQuestionAnswering.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForQuestionAnswering.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),wE=new X({}),AE=new C({props:{name:"class transformers.AutoModelForTableQuestionAnswering",anchor:"transformers.AutoModelForTableQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L669"}}),LE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering">TapasForQuestionAnswering</a> (TAPAS model)</li> </ul>`,name:"config"}]}}),BE=new w({props:{code:`from transformers import AutoConfig, AutoModelForTableQuestionAnswering # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('google/tapas-base-finetuned-wtq') model = AutoModelForTableQuestionAnswering.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForTableQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTableQuestionAnswering.from_config(config)`}}),kE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),RE=new w({props:{code:`from transformers import AutoConfig, AutoModelForTableQuestionAnswering # Download model and configuration from huggingface.co and cache. model = AutoModelForTableQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq') # Update configuration during loading model = AutoModelForTableQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/tapas_tf_model_config.json') model = AutoModelForTableQuestionAnswering.from_pretrained('./tf_model/tapas_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForTableQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTableQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTableQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/tapas_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTableQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;./tf_model/tapas_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),SE=new X({}),PE=new C({props:{name:"class transformers.AutoModelForImageClassification",anchor:"transformers.AutoModelForImageClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L703"}}),IE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitForImageClassification">BeitForImageClassification</a> (BEiT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig">DeiTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassification">DeiTForImageClassification</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassificationWithTeacher">DeiTForImageClassificationWithTeacher</a> (DeiT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification">ImageGPTForImageClassification</a> (ImageGPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationFourier">PerceiverForImageClassificationFourier</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationConvProcessing">PerceiverForImageClassificationConvProcessing</a> (Perceiver model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForImageClassification">SegformerForImageClassification</a> (SegFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTForImageClassification">ViTForImageClassification</a> (ViT model)</li> </ul>`,name:"config"}]}}),jE=new w({props:{code:`from transformers import AutoConfig, AutoModelForImageClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForImageClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_config(config)`}}),NE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),DE=new w({props:{code:`from transformers import AutoConfig, AutoModelForImageClassification # Download model and configuration from huggingface.co and cache. model = AutoModelForImageClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForImageClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForImageClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),GE=new X({}),OE=new C({props:{name:"class transformers.AutoModelForVision2Seq",anchor:"transformers.AutoModelForVision2Seq",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L724"}}),zE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel">VisionEncoderDecoderModel</a> (Vision Encoder decoder model)</li> </ul>`,name:"config"}]}}),XE=new w({props:{code:`from transformers import AutoConfig, AutoModelForVision2Seq # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForVision2Seq.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForVision2Seq <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForVision2Seq.from_config(config)`}}),WE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),VE=new w({props:{code:`from transformers import AutoConfig, AutoModelForVision2Seq # Download model and configuration from huggingface.co and cache. model = AutoModelForVision2Seq.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForVision2Seq.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForVision2Seq.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForVision2Seq <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForVision2Seq.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForVision2Seq.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForVision2Seq.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),QE=new X({}),HE=new C({props:{name:"class transformers.AutoModelForAudioClassification",anchor:"transformers.AutoModelForAudioClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L731"}}),JE=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForSequenceClassification">HubertForSequenceClassification</a> (Hubert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForSequenceClassification">SEWForSequenceClassification</a> (SEW model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig">SEWDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForSequenceClassification">SEWDForSequenceClassification</a> (SEW-D model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForSequenceClassification">UniSpeechForSequenceClassification</a> (UniSpeech model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForSequenceClassification">UniSpeechSatForSequenceClassification</a> (UniSpeechSat model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification">Wav2Vec2ForSequenceClassification</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForSequenceClassification">WavLMForSequenceClassification</a> (WavLM model)</li> </ul>`,name:"config"}]}}),KE=new w({props:{code:`from transformers import AutoConfig, AutoModelForAudioClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForAudioClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForAudioClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_config(config)`}}),YE=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),ZE=new w({props:{code:`from transformers import AutoConfig, AutoModelForAudioClassification # Download model and configuration from huggingface.co and cache. model = AutoModelForAudioClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForAudioClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForAudioClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForAudioClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),eC=new X({}),oC=new C({props:{name:"class transformers.AutoModelForAudioFrameClassification",anchor:"transformers.AutoModelForAudioFrameClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L754"}}),rC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForAudioFrameClassification">UniSpeechSatForAudioFrameClassification</a> (UniSpeechSat model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForAudioFrameClassification">Wav2Vec2ForAudioFrameClassification</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForAudioFrameClassification">WavLMForAudioFrameClassification</a> (WavLM model)</li> </ul>`,name:"config"}]}}),aC=new w({props:{code:`from transformers import AutoConfig, AutoModelForAudioFrameClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForAudioFrameClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioFrameClassification.from_config(config)`}}),nC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),sC=new w({props:{code:`from transformers import AutoConfig, AutoModelForAudioFrameClassification # Download model and configuration from huggingface.co and cache. model = AutoModelForAudioFrameClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForAudioFrameClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForAudioFrameClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioFrameClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioFrameClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioFrameClassification.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),lC=new X({}),iC=new C({props:{name:"class transformers.AutoModelForCTC",anchor:"transformers.AutoModelForCTC",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L738"}}),mC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForCTC">HubertForCTC</a> (Hubert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a> (SEW model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig">SEWDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForCTC">SEWDForCTC</a> (SEW-D model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForCTC">UniSpeechForCTC</a> (UniSpeech model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForCTC">UniSpeechSatForCTC</a> (UniSpeechSat model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForCTC">WavLMForCTC</a> (WavLM model)</li> </ul>`,name:"config"}]}}),fC=new w({props:{code:`from transformers import AutoConfig, AutoModelForCTC # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForCTC.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_config(config)`}}),cC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),gC=new w({props:{code:`from transformers import AutoConfig, AutoModelForCTC # Download model and configuration from huggingface.co and cache. model = AutoModelForCTC.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForCTC.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForCTC.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),hC=new X({}),uC=new C({props:{name:"class transformers.AutoModelForSpeechSeq2Seq",anchor:"transformers.AutoModelForSpeechSeq2Seq",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L745"}}),_C=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig">Speech2TextConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextForConditionalGeneration">Speech2TextForConditionalGeneration</a> (Speech2Text model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig">SpeechEncoderDecoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel">SpeechEncoderDecoderModel</a> (Speech Encoder decoder model)</li> </ul>`,name:"config"}]}}),vC=new w({props:{code:`from transformers import AutoConfig, AutoModelForSpeechSeq2Seq # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForSpeechSeq2Seq.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForSpeechSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSpeechSeq2Seq.from_config(config)`}}),bC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),FC=new w({props:{code:`from transformers import AutoConfig, AutoModelForSpeechSeq2Seq # Download model and configuration from huggingface.co and cache. model = AutoModelForSpeechSeq2Seq.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForSpeechSeq2Seq.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForSpeechSeq2Seq.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForSpeechSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSpeechSeq2Seq.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSpeechSeq2Seq.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSpeechSeq2Seq.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),MC=new X({}),EC=new C({props:{name:"class transformers.AutoModelForAudioXVector",anchor:"transformers.AutoModelForAudioXVector",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L763"}}),yC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForXVector">UniSpeechSatForXVector</a> (UniSpeechSat model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForXVector">Wav2Vec2ForXVector</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForXVector">WavLMForXVector</a> (WavLM model)</li> </ul>`,name:"config"}]}}),wC=new w({props:{code:`from transformers import AutoConfig, AutoModelForAudioXVector # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForAudioXVector.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForAudioXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioXVector.from_config(config)`}}),AC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),xC=new w({props:{code:`from transformers import AutoConfig, AutoModelForAudioXVector # Download model and configuration from huggingface.co and cache. model = AutoModelForAudioXVector.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForAudioXVector.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForAudioXVector.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForAudioXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioXVector.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioXVector.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioXVector.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),LC=new X({}),BC=new C({props:{name:"class transformers.AutoModelForObjectDetection",anchor:"transformers.AutoModelForObjectDetection",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L717"}}),RC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig">DetrConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection">DetrForObjectDetection</a> (DETR model)</li> </ul>`,name:"config"}]}}),SC=new w({props:{code:`from transformers import AutoConfig, AutoModelForObjectDetection # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForObjectDetection.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForObjectDetection <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForObjectDetection.from_config(config)`}}),PC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),$C=new w({props:{code:`from transformers import AutoConfig, AutoModelForObjectDetection # Download model and configuration from huggingface.co and cache. model = AutoModelForObjectDetection.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForObjectDetection.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForObjectDetection.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForObjectDetection <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForObjectDetection.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForObjectDetection.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForObjectDetection.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),IC=new X({}),jC=new C({props:{name:"class transformers.AutoModelForImageSegmentation",anchor:"transformers.AutoModelForImageSegmentation",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_auto.py#L710"}}),DC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig">DetrConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation">DetrForSegmentation</a> (DETR model)</li> </ul>`,name:"config"}]}}),GC=new w({props:{code:`from transformers import AutoConfig, AutoModelForImageSegmentation # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = AutoModelForImageSegmentation.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForImageSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageSegmentation.from_config(config)`}}),OC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <em>./tf_model/model.ckpt.index</em>). In this case, <em>from_tf</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<em>Dict[str, torch.Tensor]</em>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [<em>~PreTrainedModel.save_pretrained</em>] and [<em>~PreTrainedModel.from_pretrained</em>] is not a simpler option.`,name:"state_dict"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_tf"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),qC=new w({props:{code:`from transformers import AutoConfig, AutoModelForImageSegmentation # Download model and configuration from huggingface.co and cache. model = AutoModelForImageSegmentation.from_pretrained('bert-base-cased') # Update configuration during loading model = AutoModelForImageSegmentation.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = AutoModelForImageSegmentation.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModelForImageSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageSegmentation.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageSegmentation.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageSegmentation.from_pretrained(<span class="hljs-string">&#x27;./tf_model/bert_tf_checkpoint.ckpt.index&#x27;</span>, from_tf=<span class="hljs-literal">True</span>, config=config)`}}),zC=new X({}),XC=new C({props:{name:"class transformers.TFAutoModel",anchor:"transformers.TFAutoModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L353"}}),VC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertModel">TFAlbertModel</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartModel">TFBartModel</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel">TFBertModel</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotModel">TFBlenderbotModel</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallModel">TFBlenderbotSmallModel</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLModel">TFCTRLModel</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertModel">TFCamembertModel</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertModel">TFConvBertModel</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder">TFDPRQuestionEncoder</a> (DPR model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2Model">TFDebertaV2Model</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertModel">TFDistilBertModel</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraModel">TFElectraModel</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertModel">TFFlaubertModel</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelModel">TFFunnelModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelBaseModel">TFFunnelBaseModel</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2Model">TFGPT2Model</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.TFHubertModel">TFHubertModel</a> (Hubert model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDModel">TFLEDModel</a> (LED model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMModel">TFLayoutLMModel</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerModel">TFLongformerModel</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertModel">TFLxmertModel</a> (LXMERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartModel">TFMBartModel</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetModel">TFMPNetModel</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.TFMT5Model">TFMT5Model</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianModel">TFMarianModel</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertModel">TFMobileBertModel</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTModel">TFOpenAIGPTModel</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusModel">TFPegasusModel</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertModel">TFRemBertModel</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerModel">TFRoFormerModel</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaModel">TFRobertaModel</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model">TFT5Model</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasModel">TFTapasModel</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLModel">TFTransfoXLModel</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.TFViTModel">TFViTModel</a> (ViT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.TFWav2Vec2Model">TFWav2Vec2Model</a> (Wav2Vec2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMModel">TFXLMModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaModel">TFXLMRobertaModel</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetModel">TFXLNetModel</a> (XLNet model)</li> </ul>`,name:"config"}]}}),QC=new w({props:{code:`from transformers import AutoConfig, TFAutoModel # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModel.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModel.from_config(config)`}}),HC=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),UC=new w({props:{code:`from transformers import AutoConfig, TFAutoModel # Download model and configuration from huggingface.co and cache. model = TFAutoModel.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModel.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModel.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),JC=new X({}),KC=new C({props:{name:"class transformers.TFAutoModelForPreTraining",anchor:"transformers.TFAutoModelForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L360"}}),ZC=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForPreTraining">TFAlbertForPreTraining</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration">TFBartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForPreTraining">TFBertForPreTraining</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLLMHeadModel">TFCTRLLMHeadModel</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForMaskedLM">TFCamembertForMaskedLM</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForMaskedLM">TFDistilBertForMaskedLM</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForPreTraining">TFElectraForPreTraining</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertWithLMHeadModel">TFFlaubertWithLMHeadModel</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForPreTraining">TFFunnelForPreTraining</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2LMHeadModel">TFGPT2LMHeadModel</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForMaskedLM">TFLayoutLMForMaskedLM</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertForPreTraining">TFLxmertForPreTraining</a> (LXMERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMaskedLM">TFMPNetForMaskedLM</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForPreTraining">TFMobileBertForPreTraining</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTLMHeadModel">TFOpenAIGPTLMHeadModel</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM">TFRobertaForMaskedLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5ForConditionalGeneration">TFT5ForConditionalGeneration</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForMaskedLM">TFTapasForMaskedLM</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLLMHeadModel">TFTransfoXLLMHeadModel</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel">TFXLMWithLMHeadModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForMaskedLM">TFXLMRobertaForMaskedLM</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetLMHeadModel">TFXLNetLMHeadModel</a> (XLNet model)</li> </ul>`,name:"config"}]}}),e3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForPreTraining # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForPreTraining.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForPreTraining.from_config(config)`}}),o3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),t3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForPreTraining # Download model and configuration from huggingface.co and cache. model = TFAutoModelForPreTraining.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForPreTraining.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForPreTraining.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),r3=new X({}),a3=new C({props:{name:"class transformers.TFAutoModelForCausalLM",anchor:"transformers.TFAutoModelForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L375"}}),s3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertLMHeadModel">TFBertLMHeadModel</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLLMHeadModel">TFCTRLLMHeadModel</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2LMHeadModel">TFGPT2LMHeadModel</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTLMHeadModel">TFOpenAIGPTLMHeadModel</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForCausalLM">TFRemBertForCausalLM</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForCausalLM">TFRoFormerForCausalLM</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForCausalLM">TFRobertaForCausalLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLLMHeadModel">TFTransfoXLLMHeadModel</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel">TFXLMWithLMHeadModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetLMHeadModel">TFXLNetLMHeadModel</a> (XLNet model)</li> </ul>`,name:"config"}]}}),l3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForCausalLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForCausalLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_config(config)`}}),i3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),d3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForCausalLM # Download model and configuration from huggingface.co and cache. model = TFAutoModelForCausalLM.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForCausalLM.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForCausalLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),m3=new X({}),f3=new C({props:{name:"class transformers.TFAutoModelForImageClassification",anchor:"transformers.TFAutoModelForImageClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L382"}}),g3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.TFViTForImageClassification">TFViTForImageClassification</a> (ViT model)</li> </ul>`,name:"config"}]}}),h3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForImageClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForImageClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForImageClassification.from_config(config)`}}),u3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),p3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForImageClassification # Download model and configuration from huggingface.co and cache. model = TFAutoModelForImageClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForImageClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForImageClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),_3=new X({}),v3=new C({props:{name:"class transformers.TFAutoModelForMaskedLM",anchor:"transformers.TFAutoModelForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L389"}}),T3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForMaskedLM">TFAlbertForMaskedLM</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForMaskedLM">TFBertForMaskedLM</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForMaskedLM">TFCamembertForMaskedLM</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForMaskedLM">TFConvBertForMaskedLM</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForMaskedLM">TFDebertaForMaskedLM</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForMaskedLM">TFDebertaV2ForMaskedLM</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForMaskedLM">TFDistilBertForMaskedLM</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForMaskedLM">TFElectraForMaskedLM</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertWithLMHeadModel">TFFlaubertWithLMHeadModel</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForMaskedLM">TFFunnelForMaskedLM</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForMaskedLM">TFLayoutLMForMaskedLM</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForMaskedLM">TFLongformerForMaskedLM</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMaskedLM">TFMPNetForMaskedLM</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForMaskedLM">TFMobileBertForMaskedLM</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForMaskedLM">TFRemBertForMaskedLM</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForMaskedLM">TFRoFormerForMaskedLM</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM">TFRobertaForMaskedLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForMaskedLM">TFTapasForMaskedLM</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel">TFXLMWithLMHeadModel</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForMaskedLM">TFXLMRobertaForMaskedLM</a> (XLM-RoBERTa model)</li> </ul>`,name:"config"}]}}),F3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForMaskedLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForMaskedLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMaskedLM.from_config(config)`}}),M3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),E3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForMaskedLM # Download model and configuration from huggingface.co and cache. model = TFAutoModelForMaskedLM.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForMaskedLM.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForMaskedLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),C3=new X({}),y3=new C({props:{name:"class transformers.TFAutoModelForSeq2SeqLM",anchor:"transformers.TFAutoModelForSeq2SeqLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L396"}}),A3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration">TFBartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotForConditionalGeneration">TFBlenderbotForConditionalGeneration</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallForConditionalGeneration">TFBlenderbotSmallForConditionalGeneration</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.TFEncoderDecoderModel">TFEncoderDecoderModel</a> (Encoder decoder model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDForConditionalGeneration">TFLEDForConditionalGeneration</a> (LED model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartForConditionalGeneration">TFMBartForConditionalGeneration</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.TFMT5ForConditionalGeneration">TFMT5ForConditionalGeneration</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianMTModel">TFMarianMTModel</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusForConditionalGeneration">TFPegasusForConditionalGeneration</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5ForConditionalGeneration">TFT5ForConditionalGeneration</a> (T5 model)</li> </ul>`,name:"config"}]}}),x3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForSeq2SeqLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('t5-base') model = TFAutoModelForSeq2SeqLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_config(config)`}}),L3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),B3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForSeq2SeqLM # Download model and configuration from huggingface.co and cache. model = TFAutoModelForSeq2SeqLM.from_pretrained('t5-base') # Update configuration during loading model = TFAutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/t5_pt_model_config.json') model = TFAutoModelForSeq2SeqLM.from_pretrained('./pt_model/t5_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/t5_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;./pt_model/t5_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),k3=new X({}),R3=new C({props:{name:"class transformers.TFAutoModelForSequenceClassification",anchor:"transformers.TFAutoModelForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L405"}}),P3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForSequenceClassification">TFAlbertForSequenceClassification</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForSequenceClassification">TFBertForSequenceClassification</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLForSequenceClassification">TFCTRLForSequenceClassification</a> (CTRL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForSequenceClassification">TFCamembertForSequenceClassification</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForSequenceClassification">TFConvBertForSequenceClassification</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForSequenceClassification">TFDebertaForSequenceClassification</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForSequenceClassification">TFDebertaV2ForSequenceClassification</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForSequenceClassification">TFDistilBertForSequenceClassification</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForSequenceClassification">TFElectraForSequenceClassification</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForSequenceClassification">TFFlaubertForSequenceClassification</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForSequenceClassification">TFFunnelForSequenceClassification</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2ForSequenceClassification">TFGPT2ForSequenceClassification</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForSequenceClassification">TFLayoutLMForSequenceClassification</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForSequenceClassification">TFLongformerForSequenceClassification</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForSequenceClassification">TFMPNetForSequenceClassification</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForSequenceClassification">TFMobileBertForSequenceClassification</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTForSequenceClassification">TFOpenAIGPTForSequenceClassification</a> (OpenAI GPT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForSequenceClassification">TFRemBertForSequenceClassification</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForSequenceClassification">TFRoFormerForSequenceClassification</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification">TFRobertaForSequenceClassification</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForSequenceClassification">TFTapasForSequenceClassification</a> (TAPAS model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLForSequenceClassification">TFTransfoXLForSequenceClassification</a> (Transformer-XL model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForSequenceClassification">TFXLMForSequenceClassification</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForSequenceClassification">TFXLMRobertaForSequenceClassification</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForSequenceClassification">TFXLNetForSequenceClassification</a> (XLNet model)</li> </ul>`,name:"config"}]}}),$3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForSequenceClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForSequenceClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_config(config)`}}),I3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),j3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForSequenceClassification # Download model and configuration from huggingface.co and cache. model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),N3=new X({}),D3=new C({props:{name:"class transformers.TFAutoModelForMultipleChoice",anchor:"transformers.TFAutoModelForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L441"}}),O3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForMultipleChoice">TFAlbertForMultipleChoice</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForMultipleChoice">TFBertForMultipleChoice</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForMultipleChoice">TFCamembertForMultipleChoice</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForMultipleChoice">TFConvBertForMultipleChoice</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForMultipleChoice">TFDistilBertForMultipleChoice</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForMultipleChoice">TFElectraForMultipleChoice</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForMultipleChoice">TFFlaubertForMultipleChoice</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForMultipleChoice">TFFunnelForMultipleChoice</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForMultipleChoice">TFLongformerForMultipleChoice</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMultipleChoice">TFMPNetForMultipleChoice</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForMultipleChoice">TFMobileBertForMultipleChoice</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForMultipleChoice">TFRemBertForMultipleChoice</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForMultipleChoice">TFRoFormerForMultipleChoice</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice">TFRobertaForMultipleChoice</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForMultipleChoice">TFXLMForMultipleChoice</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForMultipleChoice">TFXLMRobertaForMultipleChoice</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForMultipleChoice">TFXLNetForMultipleChoice</a> (XLNet model)</li> </ul>`,name:"config"}]}}),q3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForMultipleChoice # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForMultipleChoice.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_config(config)`}}),z3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),X3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForMultipleChoice # Download model and configuration from huggingface.co and cache. model = TFAutoModelForMultipleChoice.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForMultipleChoice.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForMultipleChoice.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),W3=new X({}),V3=new C({props:{name:"class transformers.TFAutoModelForTableQuestionAnswering",anchor:"transformers.TFAutoModelForTableQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L421"}}),H3=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForQuestionAnswering">TFTapasForQuestionAnswering</a> (TAPAS model)</li> </ul>`,name:"config"}]}}),U3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForTableQuestionAnswering # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('google/tapas-base-finetuned-wtq') model = TFAutoModelForTableQuestionAnswering.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForTableQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTableQuestionAnswering.from_config(config)`}}),J3=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),K3=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForTableQuestionAnswering # Download model and configuration from huggingface.co and cache. model = TFAutoModelForTableQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq') # Update configuration during loading model = TFAutoModelForTableQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/tapas_pt_model_config.json') model = TFAutoModelForTableQuestionAnswering.from_pretrained('./pt_model/tapas_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForTableQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTableQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTableQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/tapas_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTableQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;./pt_model/tapas_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Y3=new X({}),Z3=new C({props:{name:"class transformers.TFAutoModelForTokenClassification",anchor:"transformers.TFAutoModelForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L432"}}),oy=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForTokenClassification">TFAlbertForTokenClassification</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForTokenClassification">TFBertForTokenClassification</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForTokenClassification">TFCamembertForTokenClassification</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForTokenClassification">TFConvBertForTokenClassification</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForTokenClassification">TFDebertaForTokenClassification</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForTokenClassification">TFDebertaV2ForTokenClassification</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForTokenClassification">TFDistilBertForTokenClassification</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForTokenClassification">TFElectraForTokenClassification</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForTokenClassification">TFFlaubertForTokenClassification</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForTokenClassification">TFFunnelForTokenClassification</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForTokenClassification">TFLayoutLMForTokenClassification</a> (LayoutLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForTokenClassification">TFLongformerForTokenClassification</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForTokenClassification">TFMPNetForTokenClassification</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForTokenClassification">TFMobileBertForTokenClassification</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForTokenClassification">TFRemBertForTokenClassification</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForTokenClassification">TFRoFormerForTokenClassification</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForTokenClassification">TFRobertaForTokenClassification</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForTokenClassification">TFXLMForTokenClassification</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForTokenClassification">TFXLMRobertaForTokenClassification</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForTokenClassification">TFXLNetForTokenClassification</a> (XLNet model)</li> </ul>`,name:"config"}]}}),ty=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForTokenClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForTokenClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_config(config)`}}),ry=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),ay=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForTokenClassification # Download model and configuration from huggingface.co and cache. model = TFAutoModelForTokenClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForTokenClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForTokenClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),ny=new X({}),sy=new C({props:{name:"class transformers.TFAutoModelForQuestionAnswering",anchor:"transformers.TFAutoModelForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_tf_auto.py#L414"}}),iy=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForQuestionAnswering">TFAlbertForQuestionAnswering</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForQuestionAnswering">TFBertForQuestionAnswering</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForQuestionAnswering">TFCamembertForQuestionAnswering</a> (CamemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForQuestionAnswering">TFConvBertForQuestionAnswering</a> (ConvBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForQuestionAnswering">TFDebertaForQuestionAnswering</a> (DeBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config">DebertaV2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForQuestionAnswering">TFDebertaV2ForQuestionAnswering</a> (DeBERTa-v2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForQuestionAnswering">TFDistilBertForQuestionAnswering</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForQuestionAnswering">TFElectraForQuestionAnswering</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForQuestionAnsweringSimple">TFFlaubertForQuestionAnsweringSimple</a> (FlauBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForQuestionAnswering">TFFunnelForQuestionAnswering</a> (Funnel Transformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig">LongformerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForQuestionAnswering">TFLongformerForQuestionAnswering</a> (Longformer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForQuestionAnswering">TFMPNetForQuestionAnswering</a> (MPNet model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForQuestionAnswering">TFMobileBertForQuestionAnswering</a> (MobileBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForQuestionAnswering">TFRemBertForQuestionAnswering</a> (RemBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForQuestionAnswering">TFRoFormerForQuestionAnswering</a> (RoFormer model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForQuestionAnswering">TFRobertaForQuestionAnswering</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForQuestionAnsweringSimple">TFXLMForQuestionAnsweringSimple</a> (XLM model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForQuestionAnswering">TFXLMRobertaForQuestionAnswering</a> (XLM-RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForQuestionAnsweringSimple">TFXLNetForQuestionAnsweringSimple</a> (XLNet model)</li> </ul>`,name:"config"}]}}),dy=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForQuestionAnswering # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = TFAutoModelForQuestionAnswering.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering.from_config(config)`}}),my=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),fy=new w({props:{code:`from transformers import AutoConfig, TFAutoModelForQuestionAnswering # Download model and configuration from huggingface.co and cache. model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-cased') # Update configuration during loading model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),cy=new X({}),gy=new C({props:{name:"class transformers.FlaxAutoModel",anchor:"transformers.FlaxAutoModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L211"}}),uy=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertModel">FlaxAlbertModel</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartModel">FlaxBartModel</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.FlaxBeitModel">FlaxBeitModel</a> (BEiT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertModel">FlaxBertModel</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdModel">FlaxBigBirdModel</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.FlaxBlenderbotModel">FlaxBlenderbotModel</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.FlaxBlenderbotSmallModel">FlaxBlenderbotSmallModel</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPModel">FlaxCLIPModel</a> (CLIP model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertModel">FlaxDistilBertModel</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraModel">FlaxElectraModel</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.FlaxGPT2Model">FlaxGPT2Model</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.FlaxGPTJModel">FlaxGPTJModel</a> (GPT-J model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.FlaxGPTNeoModel">FlaxGPTNeoModel</a> (GPT Neo model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartModel">FlaxMBartModel</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.FlaxMT5Model">FlaxMT5Model</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.FlaxMarianModel">FlaxMarianModel</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.FlaxPegasusModel">FlaxPegasusModel</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaModel">FlaxRobertaModel</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5Model">FlaxT5Model</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.FlaxViTModel">FlaxViTModel</a> (ViT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.FlaxVisionTextDualEncoderModel">FlaxVisionTextDualEncoderModel</a> (VisionTextDualEncoder model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.FlaxWav2Vec2Model">FlaxWav2Vec2Model</a> (Wav2Vec2 model)</li> </ul>`,name:"config"}]}}),py=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModel # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModel.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModel.from_config(config)`}}),_y=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),vy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModel # Download model and configuration from huggingface.co and cache. model = FlaxAutoModel.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModel.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),by=new X({}),Ty=new C({props:{name:"class transformers.FlaxAutoModelForCausalLM",anchor:"transformers.FlaxAutoModelForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L225"}}),My=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.FlaxGPT2LMHeadModel">FlaxGPT2LMHeadModel</a> (OpenAI GPT-2 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig">GPTJConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.FlaxGPTJForCausalLM">FlaxGPTJForCausalLM</a> (GPT-J model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.FlaxGPTNeoForCausalLM">FlaxGPTNeoForCausalLM</a> (GPT Neo model)</li> </ul>`,name:"config"}]}}),Ey=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForCausalLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForCausalLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_config(config)`}}),Cy=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),yy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForCausalLM # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForCausalLM.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForCausalLM.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForCausalLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),wy=new X({}),Ay=new C({props:{name:"class transformers.FlaxAutoModelForPreTraining",anchor:"transformers.FlaxAutoModelForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L218"}}),Ly=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForPreTraining">FlaxAlbertForPreTraining</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForConditionalGeneration">FlaxBartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForPreTraining">FlaxBertForPreTraining</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForPreTraining">FlaxBigBirdForPreTraining</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForPreTraining">FlaxElectraForPreTraining</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForConditionalGeneration">FlaxMBartForConditionalGeneration</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.FlaxMT5ForConditionalGeneration">FlaxMT5ForConditionalGeneration</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForMaskedLM">FlaxRobertaForMaskedLM</a> (RoBERTa model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5ForConditionalGeneration">FlaxT5ForConditionalGeneration</a> (T5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.FlaxWav2Vec2ForPreTraining">FlaxWav2Vec2ForPreTraining</a> (Wav2Vec2 model)</li> </ul>`,name:"config"}]}}),By=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForPreTraining # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForPreTraining.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForPreTraining.from_config(config)`}}),ky=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),Ry=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForPreTraining # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForPreTraining.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForPreTraining.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForPreTraining.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Sy=new X({}),Py=new C({props:{name:"class transformers.FlaxAutoModelForMaskedLM",anchor:"transformers.FlaxAutoModelForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L232"}}),Iy=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForMaskedLM">FlaxAlbertForMaskedLM</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForConditionalGeneration">FlaxBartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForMaskedLM">FlaxBertForMaskedLM</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForMaskedLM">FlaxBigBirdForMaskedLM</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForMaskedLM">FlaxDistilBertForMaskedLM</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForMaskedLM">FlaxElectraForMaskedLM</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForConditionalGeneration">FlaxMBartForConditionalGeneration</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForMaskedLM">FlaxRobertaForMaskedLM</a> (RoBERTa model)</li> </ul>`,name:"config"}]}}),jy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForMaskedLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForMaskedLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMaskedLM.from_config(config)`}}),Ny=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),Dy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForMaskedLM # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForMaskedLM.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForMaskedLM.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForMaskedLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Gy=new X({}),Oy=new C({props:{name:"class transformers.FlaxAutoModelForSeq2SeqLM",anchor:"transformers.FlaxAutoModelForSeq2SeqLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L239"}}),zy=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForConditionalGeneration">FlaxBartForConditionalGeneration</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.FlaxBlenderbotForConditionalGeneration">FlaxBlenderbotForConditionalGeneration</a> (Blenderbot model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.FlaxBlenderbotSmallForConditionalGeneration">FlaxBlenderbotSmallForConditionalGeneration</a> (BlenderbotSmall model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.FlaxEncoderDecoderModel">FlaxEncoderDecoderModel</a> (Encoder decoder model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForConditionalGeneration">FlaxMBartForConditionalGeneration</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config">MT5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.FlaxMT5ForConditionalGeneration">FlaxMT5ForConditionalGeneration</a> (mT5 model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.FlaxMarianMTModel">FlaxMarianMTModel</a> (Marian model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.FlaxPegasusForConditionalGeneration">FlaxPegasusForConditionalGeneration</a> (Pegasus model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5ForConditionalGeneration">FlaxT5ForConditionalGeneration</a> (T5 model)</li> </ul>`,name:"config"}]}}),Xy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('t5-base') model = FlaxAutoModelForSeq2SeqLM.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSeq2SeqLM.from_config(config)`}}),Wy=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),Vy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForSeq2SeqLM.from_pretrained('t5-base') # Update configuration during loading model = FlaxAutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/t5_pt_model_config.json') model = FlaxAutoModelForSeq2SeqLM.from_pretrained('./pt_model/t5_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;t5-base&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/t5_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&#x27;./pt_model/t5_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Qy=new X({}),Hy=new C({props:{name:"class transformers.FlaxAutoModelForSequenceClassification",anchor:"transformers.FlaxAutoModelForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L248"}}),Jy=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForSequenceClassification">FlaxAlbertForSequenceClassification</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForSequenceClassification">FlaxBartForSequenceClassification</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForSequenceClassification">FlaxBertForSequenceClassification</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForSequenceClassification">FlaxBigBirdForSequenceClassification</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForSequenceClassification">FlaxDistilBertForSequenceClassification</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForSequenceClassification">FlaxElectraForSequenceClassification</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForSequenceClassification">FlaxMBartForSequenceClassification</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForSequenceClassification">FlaxRobertaForSequenceClassification</a> (RoBERTa model)</li> </ul>`,name:"config"}]}}),Ky=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForSequenceClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForSequenceClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSequenceClassification.from_config(config)`}}),Yy=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),Zy=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForSequenceClassification # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForSequenceClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForSequenceClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),ew=new X({}),ow=new C({props:{name:"class transformers.FlaxAutoModelForQuestionAnswering",anchor:"transformers.FlaxAutoModelForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L257"}}),rw=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForQuestionAnswering">FlaxAlbertForQuestionAnswering</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForQuestionAnswering">FlaxBartForQuestionAnswering</a> (BART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForQuestionAnswering">FlaxBertForQuestionAnswering</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForQuestionAnswering">FlaxBigBirdForQuestionAnswering</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForQuestionAnswering">FlaxDistilBertForQuestionAnswering</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForQuestionAnswering">FlaxElectraForQuestionAnswering</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForQuestionAnswering">FlaxMBartForQuestionAnswering</a> (mBART model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForQuestionAnswering">FlaxRobertaForQuestionAnswering</a> (RoBERTa model)</li> </ul>`,name:"config"}]}}),aw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForQuestionAnswering # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForQuestionAnswering.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForQuestionAnswering.from_config(config)`}}),nw=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),sw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForQuestionAnswering # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForQuestionAnswering.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForQuestionAnswering.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),lw=new X({}),iw=new C({props:{name:"class transformers.FlaxAutoModelForTokenClassification",anchor:"transformers.FlaxAutoModelForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L264"}}),mw=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForTokenClassification">FlaxAlbertForTokenClassification</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForTokenClassification">FlaxBertForTokenClassification</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForTokenClassification">FlaxBigBirdForTokenClassification</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForTokenClassification">FlaxDistilBertForTokenClassification</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForTokenClassification">FlaxElectraForTokenClassification</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForTokenClassification">FlaxRobertaForTokenClassification</a> (RoBERTa model)</li> </ul>`,name:"config"}]}}),fw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForTokenClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForTokenClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForTokenClassification.from_config(config)`}}),cw=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),gw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForTokenClassification # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForTokenClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForTokenClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForTokenClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),hw=new X({}),uw=new C({props:{name:"class transformers.FlaxAutoModelForMultipleChoice",anchor:"transformers.FlaxAutoModelForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L273"}}),_w=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig">AlbertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForMultipleChoice">FlaxAlbertForMultipleChoice</a> (ALBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForMultipleChoice">FlaxBertForMultipleChoice</a> (BERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForMultipleChoice">FlaxBigBirdForMultipleChoice</a> (BigBird model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig">DistilBertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForMultipleChoice">FlaxDistilBertForMultipleChoice</a> (DistilBERT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig">ElectraConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForMultipleChoice">FlaxElectraForMultipleChoice</a> (ELECTRA model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForMultipleChoice">FlaxRobertaForMultipleChoice</a> (RoBERTa model)</li> </ul>`,name:"config"}]}}),vw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForMultipleChoice # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForMultipleChoice.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMultipleChoice.from_config(config)`}}),bw=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),Tw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForMultipleChoice # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForMultipleChoice.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForMultipleChoice.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForMultipleChoice.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Fw=new X({}),Mw=new C({props:{name:"class transformers.FlaxAutoModelForNextSentencePrediction",anchor:"transformers.FlaxAutoModelForNextSentencePrediction",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L280"}}),Cw=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForNextSentencePrediction">FlaxBertForNextSentencePrediction</a> (BERT model)</li> </ul>`,name:"config"}]}}),yw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForNextSentencePrediction # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForNextSentencePrediction.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForNextSentencePrediction.from_config(config)`}}),ww=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),Aw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForNextSentencePrediction # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForNextSentencePrediction.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForNextSentencePrediction.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForNextSentencePrediction.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),xw=new X({}),Lw=new C({props:{name:"class transformers.FlaxAutoModelForImageClassification",anchor:"transformers.FlaxAutoModelForImageClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L289"}}),kw=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.FlaxBeitForImageClassification">FlaxBeitForImageClassification</a> (BEiT model)</li> <li><a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.FlaxViTForImageClassification">FlaxViTForImageClassification</a> (ViT model)</li> </ul>`,name:"config"}]}}),Rw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForImageClassification # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForImageClassification.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForImageClassification.from_config(config)`}}),Sw=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),$w=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForImageClassification # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForImageClassification.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForImageClassification.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForImageClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForImageClassification.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Iw=new X({}),jw=new C({props:{name:"class transformers.FlaxAutoModelForVision2Seq",anchor:"transformers.FlaxAutoModelForVision2Seq",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/modeling_flax_auto.py#L298"}}),Dw=new C({props:{name:"from_config",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L384",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_config.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>]) &#x2014; The model class to instantiate is selected based on the configuration class:</p> <ul> <li><a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a> configuration class: <a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel">FlaxVisionEncoderDecoderModel</a> (Vision Encoder decoder model)</li> </ul>`,name:"config"}]}}),Gw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForVision2Seq # Download configuration from huggingface.co and cache. config = AutoConfig.from_pretrained('bert-base-cased') model = FlaxAutoModelForVision2Seq.from_config(config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForVision2Seq <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForVision2Seq.from_config(config)`}}),Ow=new C({props:{name:"from_pretrained",anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained",parameters:[{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/auto/auto_factory.py#L412",parametersDescription:[{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<em>str</em> or <em>os.PathLike</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <em>bert-base-uncased</em>, or namespaced under a user or organization name, like <em>dbmdz/bert-base-german-cased</em>.</li> <li>A path to a <em>directory</em> containing model weights saved using [<em>~PreTrainedModel.save_pretrained</em>], e.g., <em>./my_model_directory/</em>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <em>./pt_model/pytorch_model.bin</em>). In this case, <em>from_pt</em> should be set to <em>True</em> and a configuration object should be provided as <em>config</em> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.model_args",description:`<strong>model_args</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the underlying model <em><strong>init</strong>()</em> method.`,name:"model_args"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.config",description:`<strong>config</strong> ([<em>PretrainedConfig</em>], <em>optional</em>) &#x2014; Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using [<em>~PreTrainedModel.save_pretrained</em>] and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <em>pretrained_model_name_or_path</em> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<em>str</em> or <em>os.PathLike</em>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <em>pretrained_model_name_or_path</em> argument).`,name:"from_pt"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.force_download",description:`<strong>force_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.proxies",description:`<strong>proxies</strong> (<em>Dict[str, str]</em>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <em>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}</em>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<em>bool</em>,</strong> <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to only look at local files (e.g., not try downloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.revision(str,",description:`<strong>revision(<em>str</em>,</strong> <em>optional</em>, defaults to <em>&#x201C;main&#x201D;</em>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <em>revision</em> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.trust_remote_code",description:`<strong>trust_remote_code</strong> (<em>bool</em>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to <em>True</em> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"},{anchor:"transformers.models.auto.auto_factory._BaseAutoModelClass.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <em>output_attentions=True</em>). Behaves differently depending on whether a <em>config</em> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <em>config</em>, <em>**kwargs</em> will be directly passed to the underlying model&#x2019;s <em><strong>init</strong></em> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <em>kwargs</em> will be first passed to the configuration class initialization function ([<em>~PretrainedConfig.from_pretrained</em>]). Each key of <em>kwargs</em> that corresponds to a configuration attribute will be used to override said attribute with the supplied <em>kwargs</em> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <em><strong>init</strong></em> function.</li> </ul>`,name:"kwargs"}]}}),qw=new w({props:{code:`from transformers import AutoConfig, FlaxAutoModelForVision2Seq # Download model and configuration from huggingface.co and cache. model = FlaxAutoModelForVision2Seq.from_pretrained('bert-base-cased') # Update configuration during loading model = FlaxAutoModelForVision2Seq.from_pretrained('bert-base-cased', output_attentions=True) model.config.output_attentions # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json') model = FlaxAutoModelForVision2Seq.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, FlaxAutoModelForVision2Seq <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForVision2Seq.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForVision2Seq.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.output_attentions <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pt_model_config.json&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForVision2Seq.from_pretrained(<span class="hljs-string">&#x27;./pt_model/bert_pytorch_model.bin&#x27;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),{c(){J=a("meta"),ye=l(),se=a("h1"),de=a("a"),Ue=a("span"),f(ie.$$.fragment),ue=l(),Bo=a("span"),Vl=o("Auto Classes"),km=l(),zr=a("p"),Ql=o(`In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you are supplying to the `),Hl=a("code"),RF=o("from_pretrained()"),Rm=o(` method. AutoClasses are here to do this job for you so that you automatically retrieve the relevant model given the name/path to the pretrained weights/config/vocabulary.`),Fe=l(),Ze=a("p"),Ul=o("Instantiating one of "),gn=a("a"),SF=o("AutoConfig"),hn=o(", "),un=a("a"),PF=o("AutoModel"),Jl=o(`, and `),pn=a("a"),$F=o("AutoTokenizer"),Kl=o(" will directly create a class of the relevant architecture. For instance"),Sm=l(),f(ba.$$.fragment),eo=l(),me=a("p"),IA=o("will create a model that is an instance of "),Yl=a("a"),jA=o("BertModel"),NA=o("."),ko=l(),Ta=a("p"),DA=o("There is one class of "),Pm=a("code"),GA=o("AutoModel"),dwe=o(" for each task, and for each backend (PyTorch, TensorFlow, or Flax)."),bEe=l(),Zl=a("h2"),$m=a("a"),YG=a("span"),f(IF.$$.fragment),mwe=l(),ZG=a("span"),fwe=o("Extending the Auto Classes"),TEe=l(),_n=a("p"),cwe=o(`Each of the auto classes has a method to be extended with your custom classes. For instance, if you have defined a custom class of model `),eO=a("code"),gwe=o("NewModel"),hwe=o(", make sure you have a "),oO=a("code"),uwe=o("NewModelConfig"),pwe=o(` then you can add those to the auto classes like this:`),FEe=l(),f(jF.$$.fragment),MEe=l(),OA=a("p"),_we=o("You will then be able to use the auto classes like you would usually do!"),EEe=l(),f(Im.$$.fragment),CEe=l(),ei=a("h2"),jm=a("a"),tO=a("span"),f(NF.$$.fragment),vwe=l(),rO=a("span"),bwe=o("AutoConfig"),yEe=l(),Ro=a("div"),f(DF.$$.fragment),Twe=l(),GF=a("p"),Fwe=o(`This is a generic configuration class that will be instantiated as one of the configuration classes of the library when created with the `),qA=a("a"),Mwe=o("from_pretrained()"),Ewe=o(" class method."),Cwe=l(),OF=a("p"),ywe=o("This class cannot be instantiated directly using "),aO=a("code"),wwe=o("__init__()"),Awe=o(" (throws an error)."),xwe=l(),oo=a("div"),f(qF.$$.fragment),Lwe=l(),nO=a("p"),Bwe=o("Instantiate one of the configuration classes of the library from a pretrained model configuration."),kwe=l(),oi=a("p"),Rwe=o("The configuration class to instantiate is selected based on the "),sO=a("em"),Swe=o("model_type"),Pwe=o(` property of the config object that is loaded, or when it\u2019s missing, by falling back to using pattern matching on `),lO=a("em"),$we=o("pretrained_model_name_or_path"),Iwe=o(":"),jwe=l(),b=a("ul"),Nm=a("li"),iO=a("strong"),Nwe=o("albert"),Dwe=o(" \u2014 "),zA=a("a"),Gwe=o("AlbertConfig"),Owe=o(" (ALBERT model)"),qwe=l(),Dm=a("li"),dO=a("strong"),zwe=o("bart"),Xwe=o(" \u2014 "),XA=a("a"),Wwe=o("BartConfig"),Vwe=o(" (BART model)"),Qwe=l(),Gm=a("li"),mO=a("strong"),Hwe=o("beit"),Uwe=o(" \u2014 "),WA=a("a"),Jwe=o("BeitConfig"),Kwe=o(" (BEiT model)"),Ywe=l(),Om=a("li"),fO=a("strong"),Zwe=o("bert"),eAe=o(" \u2014 "),VA=a("a"),oAe=o("BertConfig"),tAe=o(" (BERT model)"),rAe=l(),qm=a("li"),cO=a("strong"),aAe=o("bert-generation"),nAe=o(" \u2014 "),QA=a("a"),sAe=o("BertGenerationConfig"),lAe=o(" (Bert Generation model)"),iAe=l(),zm=a("li"),gO=a("strong"),dAe=o("big_bird"),mAe=o(" \u2014 "),HA=a("a"),fAe=o("BigBirdConfig"),cAe=o(" (BigBird model)"),gAe=l(),Xm=a("li"),hO=a("strong"),hAe=o("bigbird_pegasus"),uAe=o(" \u2014 "),UA=a("a"),pAe=o("BigBirdPegasusConfig"),_Ae=o(" (BigBirdPegasus model)"),vAe=l(),Wm=a("li"),uO=a("strong"),bAe=o("blenderbot"),TAe=o(" \u2014 "),JA=a("a"),FAe=o("BlenderbotConfig"),MAe=o(" (Blenderbot model)"),EAe=l(),Vm=a("li"),pO=a("strong"),CAe=o("blenderbot-small"),yAe=o(" \u2014 "),KA=a("a"),wAe=o("BlenderbotSmallConfig"),AAe=o(" (BlenderbotSmall model)"),xAe=l(),Qm=a("li"),_O=a("strong"),LAe=o("camembert"),BAe=o(" \u2014 "),YA=a("a"),kAe=o("CamembertConfig"),RAe=o(" (CamemBERT model)"),SAe=l(),Hm=a("li"),vO=a("strong"),PAe=o("canine"),$Ae=o(" \u2014 "),ZA=a("a"),IAe=o("CanineConfig"),jAe=o(" (Canine model)"),NAe=l(),Um=a("li"),bO=a("strong"),DAe=o("clip"),GAe=o(" \u2014 "),e7=a("a"),OAe=o("CLIPConfig"),qAe=o(" (CLIP model)"),zAe=l(),Jm=a("li"),TO=a("strong"),XAe=o("convbert"),WAe=o(" \u2014 "),o7=a("a"),VAe=o("ConvBertConfig"),QAe=o(" (ConvBERT model)"),HAe=l(),Km=a("li"),FO=a("strong"),UAe=o("ctrl"),JAe=o(" \u2014 "),t7=a("a"),KAe=o("CTRLConfig"),YAe=o(" (CTRL model)"),ZAe=l(),Ym=a("li"),MO=a("strong"),e7e=o("deberta"),o7e=o(" \u2014 "),r7=a("a"),t7e=o("DebertaConfig"),r7e=o(" (DeBERTa model)"),a7e=l(),Zm=a("li"),EO=a("strong"),n7e=o("deberta-v2"),s7e=o(" \u2014 "),a7=a("a"),l7e=o("DebertaV2Config"),i7e=o(" (DeBERTa-v2 model)"),d7e=l(),ef=a("li"),CO=a("strong"),m7e=o("deit"),f7e=o(" \u2014 "),n7=a("a"),c7e=o("DeiTConfig"),g7e=o(" (DeiT model)"),h7e=l(),of=a("li"),yO=a("strong"),u7e=o("detr"),p7e=o(" \u2014 "),s7=a("a"),_7e=o("DetrConfig"),v7e=o(" (DETR model)"),b7e=l(),tf=a("li"),wO=a("strong"),T7e=o("distilbert"),F7e=o(" \u2014 "),l7=a("a"),M7e=o("DistilBertConfig"),E7e=o(" (DistilBERT model)"),C7e=l(),rf=a("li"),AO=a("strong"),y7e=o("dpr"),w7e=o(" \u2014 "),i7=a("a"),A7e=o("DPRConfig"),x7e=o(" (DPR model)"),L7e=l(),af=a("li"),xO=a("strong"),B7e=o("electra"),k7e=o(" \u2014 "),d7=a("a"),R7e=o("ElectraConfig"),S7e=o(" (ELECTRA model)"),P7e=l(),nf=a("li"),LO=a("strong"),$7e=o("encoder-decoder"),I7e=o(" \u2014 "),m7=a("a"),j7e=o("EncoderDecoderConfig"),N7e=o(" (Encoder decoder model)"),D7e=l(),sf=a("li"),BO=a("strong"),G7e=o("flaubert"),O7e=o(" \u2014 "),f7=a("a"),q7e=o("FlaubertConfig"),z7e=o(" (FlauBERT model)"),X7e=l(),lf=a("li"),kO=a("strong"),W7e=o("fnet"),V7e=o(" \u2014 "),c7=a("a"),Q7e=o("FNetConfig"),H7e=o(" (FNet model)"),U7e=l(),df=a("li"),RO=a("strong"),J7e=o("fsmt"),K7e=o(" \u2014 "),g7=a("a"),Y7e=o("FSMTConfig"),Z7e=o(" (FairSeq Machine-Translation model)"),exe=l(),mf=a("li"),SO=a("strong"),oxe=o("funnel"),txe=o(" \u2014 "),h7=a("a"),rxe=o("FunnelConfig"),axe=o(" (Funnel Transformer model)"),nxe=l(),ff=a("li"),PO=a("strong"),sxe=o("gpt2"),lxe=o(" \u2014 "),u7=a("a"),ixe=o("GPT2Config"),dxe=o(" (OpenAI GPT-2 model)"),mxe=l(),cf=a("li"),$O=a("strong"),fxe=o("gpt_neo"),cxe=o(" \u2014 "),p7=a("a"),gxe=o("GPTNeoConfig"),hxe=o(" (GPT Neo model)"),uxe=l(),gf=a("li"),IO=a("strong"),pxe=o("gptj"),_xe=o(" \u2014 "),_7=a("a"),vxe=o("GPTJConfig"),bxe=o(" (GPT-J model)"),Txe=l(),hf=a("li"),jO=a("strong"),Fxe=o("hubert"),Mxe=o(" \u2014 "),v7=a("a"),Exe=o("HubertConfig"),Cxe=o(" (Hubert model)"),yxe=l(),uf=a("li"),NO=a("strong"),wxe=o("ibert"),Axe=o(" \u2014 "),b7=a("a"),xxe=o("IBertConfig"),Lxe=o(" (I-BERT model)"),Bxe=l(),pf=a("li"),DO=a("strong"),kxe=o("imagegpt"),Rxe=o(" \u2014 "),T7=a("a"),Sxe=o("ImageGPTConfig"),Pxe=o(" (ImageGPT model)"),$xe=l(),_f=a("li"),GO=a("strong"),Ixe=o("layoutlm"),jxe=o(" \u2014 "),F7=a("a"),Nxe=o("LayoutLMConfig"),Dxe=o(" (LayoutLM model)"),Gxe=l(),vf=a("li"),OO=a("strong"),Oxe=o("layoutlmv2"),qxe=o(" \u2014 "),M7=a("a"),zxe=o("LayoutLMv2Config"),Xxe=o(" (LayoutLMv2 model)"),Wxe=l(),bf=a("li"),qO=a("strong"),Vxe=o("led"),Qxe=o(" \u2014 "),E7=a("a"),Hxe=o("LEDConfig"),Uxe=o(" (LED model)"),Jxe=l(),Tf=a("li"),zO=a("strong"),Kxe=o("longformer"),Yxe=o(" \u2014 "),C7=a("a"),Zxe=o("LongformerConfig"),e6e=o(" (Longformer model)"),o6e=l(),Ff=a("li"),XO=a("strong"),t6e=o("luke"),r6e=o(" \u2014 "),y7=a("a"),a6e=o("LukeConfig"),n6e=o(" (LUKE model)"),s6e=l(),Mf=a("li"),WO=a("strong"),l6e=o("lxmert"),i6e=o(" \u2014 "),w7=a("a"),d6e=o("LxmertConfig"),m6e=o(" (LXMERT model)"),f6e=l(),Ef=a("li"),VO=a("strong"),c6e=o("m2m_100"),g6e=o(" \u2014 "),A7=a("a"),h6e=o("M2M100Config"),u6e=o(" (M2M100 model)"),p6e=l(),Cf=a("li"),QO=a("strong"),_6e=o("marian"),v6e=o(" \u2014 "),x7=a("a"),b6e=o("MarianConfig"),T6e=o(" (Marian model)"),F6e=l(),yf=a("li"),HO=a("strong"),M6e=o("mbart"),E6e=o(" \u2014 "),L7=a("a"),C6e=o("MBartConfig"),y6e=o(" (mBART model)"),w6e=l(),wf=a("li"),UO=a("strong"),A6e=o("megatron-bert"),x6e=o(" \u2014 "),B7=a("a"),L6e=o("MegatronBertConfig"),B6e=o(" (MegatronBert model)"),k6e=l(),Af=a("li"),JO=a("strong"),R6e=o("mobilebert"),S6e=o(" \u2014 "),k7=a("a"),P6e=o("MobileBertConfig"),$6e=o(" (MobileBERT model)"),I6e=l(),xf=a("li"),KO=a("strong"),j6e=o("mpnet"),N6e=o(" \u2014 "),R7=a("a"),D6e=o("MPNetConfig"),G6e=o(" (MPNet model)"),O6e=l(),Lf=a("li"),YO=a("strong"),q6e=o("mt5"),z6e=o(" \u2014 "),S7=a("a"),X6e=o("MT5Config"),W6e=o(" (mT5 model)"),V6e=l(),Bf=a("li"),ZO=a("strong"),Q6e=o("openai-gpt"),H6e=o(" \u2014 "),P7=a("a"),U6e=o("OpenAIGPTConfig"),J6e=o(" (OpenAI GPT model)"),K6e=l(),kf=a("li"),eq=a("strong"),Y6e=o("pegasus"),Z6e=o(" \u2014 "),$7=a("a"),e8e=o("PegasusConfig"),o8e=o(" (Pegasus model)"),t8e=l(),Rf=a("li"),oq=a("strong"),r8e=o("perceiver"),a8e=o(" \u2014 "),I7=a("a"),n8e=o("PerceiverConfig"),s8e=o(" (Perceiver model)"),l8e=l(),Sf=a("li"),tq=a("strong"),i8e=o("prophetnet"),d8e=o(" \u2014 "),j7=a("a"),m8e=o("ProphetNetConfig"),f8e=o(" (ProphetNet model)"),c8e=l(),Pf=a("li"),rq=a("strong"),g8e=o("qdqbert"),h8e=o(" \u2014 "),N7=a("a"),u8e=o("QDQBertConfig"),p8e=o(" (QDQBert model)"),_8e=l(),$f=a("li"),aq=a("strong"),v8e=o("rag"),b8e=o(" \u2014 "),D7=a("a"),T8e=o("RagConfig"),F8e=o(" (RAG model)"),M8e=l(),If=a("li"),nq=a("strong"),E8e=o("reformer"),C8e=o(" \u2014 "),G7=a("a"),y8e=o("ReformerConfig"),w8e=o(" (Reformer model)"),A8e=l(),jf=a("li"),sq=a("strong"),x8e=o("rembert"),L8e=o(" \u2014 "),O7=a("a"),B8e=o("RemBertConfig"),k8e=o(" (RemBERT model)"),R8e=l(),Nf=a("li"),lq=a("strong"),S8e=o("retribert"),P8e=o(" \u2014 "),q7=a("a"),$8e=o("RetriBertConfig"),I8e=o(" (RetriBERT model)"),j8e=l(),Df=a("li"),iq=a("strong"),N8e=o("roberta"),D8e=o(" \u2014 "),z7=a("a"),G8e=o("RobertaConfig"),O8e=o(" (RoBERTa model)"),q8e=l(),Gf=a("li"),dq=a("strong"),z8e=o("roformer"),X8e=o(" \u2014 "),X7=a("a"),W8e=o("RoFormerConfig"),V8e=o(" (RoFormer model)"),Q8e=l(),Of=a("li"),mq=a("strong"),H8e=o("segformer"),U8e=o(" \u2014 "),W7=a("a"),J8e=o("SegformerConfig"),K8e=o(" (SegFormer model)"),Y8e=l(),qf=a("li"),fq=a("strong"),Z8e=o("sew"),eLe=o(" \u2014 "),V7=a("a"),oLe=o("SEWConfig"),tLe=o(" (SEW model)"),rLe=l(),zf=a("li"),cq=a("strong"),aLe=o("sew-d"),nLe=o(" \u2014 "),Q7=a("a"),sLe=o("SEWDConfig"),lLe=o(" (SEW-D model)"),iLe=l(),Xf=a("li"),gq=a("strong"),dLe=o("speech-encoder-decoder"),mLe=o(" \u2014 "),H7=a("a"),fLe=o("SpeechEncoderDecoderConfig"),cLe=o(" (Speech Encoder decoder model)"),gLe=l(),Wf=a("li"),hq=a("strong"),hLe=o("speech_to_text"),uLe=o(" \u2014 "),U7=a("a"),pLe=o("Speech2TextConfig"),_Le=o(" (Speech2Text model)"),vLe=l(),Vf=a("li"),uq=a("strong"),bLe=o("speech_to_text_2"),TLe=o(" \u2014 "),J7=a("a"),FLe=o("Speech2Text2Config"),MLe=o(" (Speech2Text2 model)"),ELe=l(),Qf=a("li"),pq=a("strong"),CLe=o("splinter"),yLe=o(" \u2014 "),K7=a("a"),wLe=o("SplinterConfig"),ALe=o(" (Splinter model)"),xLe=l(),Hf=a("li"),_q=a("strong"),LLe=o("squeezebert"),BLe=o(" \u2014 "),Y7=a("a"),kLe=o("SqueezeBertConfig"),RLe=o(" (SqueezeBERT model)"),SLe=l(),Uf=a("li"),vq=a("strong"),PLe=o("t5"),$Le=o(" \u2014 "),Z7=a("a"),ILe=o("T5Config"),jLe=o(" (T5 model)"),NLe=l(),Jf=a("li"),bq=a("strong"),DLe=o("tapas"),GLe=o(" \u2014 "),ex=a("a"),OLe=o("TapasConfig"),qLe=o(" (TAPAS model)"),zLe=l(),Kf=a("li"),Tq=a("strong"),XLe=o("transfo-xl"),WLe=o(" \u2014 "),ox=a("a"),VLe=o("TransfoXLConfig"),QLe=o(" (Transformer-XL model)"),HLe=l(),Yf=a("li"),Fq=a("strong"),ULe=o("trocr"),JLe=o(" \u2014 "),tx=a("a"),KLe=o("TrOCRConfig"),YLe=o(" (TrOCR model)"),ZLe=l(),Zf=a("li"),Mq=a("strong"),eBe=o("unispeech"),oBe=o(" \u2014 "),rx=a("a"),tBe=o("UniSpeechConfig"),rBe=o(" (UniSpeech model)"),aBe=l(),ec=a("li"),Eq=a("strong"),nBe=o("unispeech-sat"),sBe=o(" \u2014 "),ax=a("a"),lBe=o("UniSpeechSatConfig"),iBe=o(" (UniSpeechSat model)"),dBe=l(),oc=a("li"),Cq=a("strong"),mBe=o("vision-encoder-decoder"),fBe=o(" \u2014 "),nx=a("a"),cBe=o("VisionEncoderDecoderConfig"),gBe=o(" (Vision Encoder decoder model)"),hBe=l(),tc=a("li"),yq=a("strong"),uBe=o("vision-text-dual-encoder"),pBe=o(" \u2014 "),sx=a("a"),_Be=o("VisionTextDualEncoderConfig"),vBe=o(" (VisionTextDualEncoder model)"),bBe=l(),rc=a("li"),wq=a("strong"),TBe=o("visual_bert"),FBe=o(" \u2014 "),lx=a("a"),MBe=o("VisualBertConfig"),EBe=o(" (VisualBert model)"),CBe=l(),ac=a("li"),Aq=a("strong"),yBe=o("vit"),wBe=o(" \u2014 "),ix=a("a"),ABe=o("ViTConfig"),xBe=o(" (ViT model)"),LBe=l(),nc=a("li"),xq=a("strong"),BBe=o("wav2vec2"),kBe=o(" \u2014 "),dx=a("a"),RBe=o("Wav2Vec2Config"),SBe=o(" (Wav2Vec2 model)"),PBe=l(),sc=a("li"),Lq=a("strong"),$Be=o("wavlm"),IBe=o(" \u2014 "),mx=a("a"),jBe=o("WavLMConfig"),NBe=o(" (WavLM model)"),DBe=l(),lc=a("li"),Bq=a("strong"),GBe=o("xlm"),OBe=o(" \u2014 "),fx=a("a"),qBe=o("XLMConfig"),zBe=o(" (XLM model)"),XBe=l(),ic=a("li"),kq=a("strong"),WBe=o("xlm-prophetnet"),VBe=o(" \u2014 "),cx=a("a"),QBe=o("XLMProphetNetConfig"),HBe=o(" (XLMProphetNet model)"),UBe=l(),dc=a("li"),Rq=a("strong"),JBe=o("xlm-roberta"),KBe=o(" \u2014 "),gx=a("a"),YBe=o("XLMRobertaConfig"),ZBe=o(" (XLM-RoBERTa model)"),e9e=l(),mc=a("li"),Sq=a("strong"),o9e=o("xlnet"),t9e=o(" \u2014 "),hx=a("a"),r9e=o("XLNetConfig"),a9e=o(" (XLNet model)"),n9e=l(),Pq=a("p"),s9e=o("Examples:"),l9e=l(),f(zF.$$.fragment),i9e=l(),fc=a("div"),f(XF.$$.fragment),d9e=l(),$q=a("p"),m9e=o("Register a new configuration for this class."),wEe=l(),ti=a("h2"),cc=a("a"),Iq=a("span"),f(WF.$$.fragment),f9e=l(),jq=a("span"),c9e=o("AutoTokenizer"),AEe=l(),So=a("div"),f(VF.$$.fragment),g9e=l(),QF=a("p"),h9e=o(`This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when created with the `),ux=a("a"),u9e=o("AutoTokenizer.from_pretrained()"),p9e=o(" class method."),_9e=l(),HF=a("p"),v9e=o("This class cannot be instantiated directly using "),Nq=a("code"),b9e=o("__init__()"),T9e=o(" (throws an error)."),F9e=l(),to=a("div"),f(UF.$$.fragment),M9e=l(),Dq=a("p"),E9e=o("Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary."),C9e=l(),Fa=a("p"),y9e=o("The tokenizer class to instantiate is selected based on the "),Gq=a("em"),w9e=o("model_type"),A9e=o(` property of the config object (either passed as an argument or loaded from `),Oq=a("em"),x9e=o("pretrained_model_name_or_path"),L9e=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),qq=a("em"),B9e=o("pretrained_model_name_or_path"),k9e=o(":"),R9e=l(),E=a("ul"),vn=a("li"),zq=a("strong"),S9e=o("albert"),P9e=o(" \u2014 "),px=a("a"),$9e=o("AlbertTokenizer"),I9e=o(" or "),_x=a("a"),j9e=o("AlbertTokenizerFast"),N9e=o(" (ALBERT model)"),D9e=l(),bn=a("li"),Xq=a("strong"),G9e=o("bart"),O9e=o(" \u2014 "),vx=a("a"),q9e=o("BartTokenizer"),z9e=o(" or "),bx=a("a"),X9e=o("BartTokenizerFast"),W9e=o(" (BART model)"),V9e=l(),Tn=a("li"),Wq=a("strong"),Q9e=o("barthez"),H9e=o(" \u2014 "),Tx=a("a"),U9e=o("BarthezTokenizer"),J9e=o(" or "),Fx=a("a"),K9e=o("BarthezTokenizerFast"),Y9e=o(" (BARThez model)"),Z9e=l(),gc=a("li"),Vq=a("strong"),eke=o("bartpho"),oke=o(" \u2014 "),Mx=a("a"),tke=o("BartphoTokenizer"),rke=o(" (BARTpho model)"),ake=l(),Fn=a("li"),Qq=a("strong"),nke=o("bert"),ske=o(" \u2014 "),Ex=a("a"),lke=o("BertTokenizer"),ike=o(" or "),Cx=a("a"),dke=o("BertTokenizerFast"),mke=o(" (BERT model)"),fke=l(),hc=a("li"),Hq=a("strong"),cke=o("bert-generation"),gke=o(" \u2014 "),yx=a("a"),hke=o("BertGenerationTokenizer"),uke=o(" (Bert Generation model)"),pke=l(),uc=a("li"),Uq=a("strong"),_ke=o("bert-japanese"),vke=o(" \u2014 "),wx=a("a"),bke=o("BertJapaneseTokenizer"),Tke=o(" (BertJapanese model)"),Fke=l(),pc=a("li"),Jq=a("strong"),Mke=o("bertweet"),Eke=o(" \u2014 "),Ax=a("a"),Cke=o("BertweetTokenizer"),yke=o(" (Bertweet model)"),wke=l(),Mn=a("li"),Kq=a("strong"),Ake=o("big_bird"),xke=o(" \u2014 "),xx=a("a"),Lke=o("BigBirdTokenizer"),Bke=o(" or "),Lx=a("a"),kke=o("BigBirdTokenizerFast"),Rke=o(" (BigBird model)"),Ske=l(),En=a("li"),Yq=a("strong"),Pke=o("bigbird_pegasus"),$ke=o(" \u2014 "),Bx=a("a"),Ike=o("PegasusTokenizer"),jke=o(" or "),kx=a("a"),Nke=o("PegasusTokenizerFast"),Dke=o(" (BigBirdPegasus model)"),Gke=l(),Cn=a("li"),Zq=a("strong"),Oke=o("blenderbot"),qke=o(" \u2014 "),Rx=a("a"),zke=o("BlenderbotTokenizer"),Xke=o(" or "),Sx=a("a"),Wke=o("BlenderbotTokenizerFast"),Vke=o(" (Blenderbot model)"),Qke=l(),_c=a("li"),ez=a("strong"),Hke=o("blenderbot-small"),Uke=o(" \u2014 "),Px=a("a"),Jke=o("BlenderbotSmallTokenizer"),Kke=o(" (BlenderbotSmall model)"),Yke=l(),vc=a("li"),oz=a("strong"),Zke=o("byt5"),eRe=o(" \u2014 "),$x=a("a"),oRe=o("ByT5Tokenizer"),tRe=o(" (ByT5 model)"),rRe=l(),yn=a("li"),tz=a("strong"),aRe=o("camembert"),nRe=o(" \u2014 "),Ix=a("a"),sRe=o("CamembertTokenizer"),lRe=o(" or "),jx=a("a"),iRe=o("CamembertTokenizerFast"),dRe=o(" (CamemBERT model)"),mRe=l(),bc=a("li"),rz=a("strong"),fRe=o("canine"),cRe=o(" \u2014 "),Nx=a("a"),gRe=o("CanineTokenizer"),hRe=o(" (Canine model)"),uRe=l(),wn=a("li"),az=a("strong"),pRe=o("clip"),_Re=o(" \u2014 "),Dx=a("a"),vRe=o("CLIPTokenizer"),bRe=o(" or "),Gx=a("a"),TRe=o("CLIPTokenizerFast"),FRe=o(" (CLIP model)"),MRe=l(),An=a("li"),nz=a("strong"),ERe=o("convbert"),CRe=o(" \u2014 "),Ox=a("a"),yRe=o("ConvBertTokenizer"),wRe=o(" or "),qx=a("a"),ARe=o("ConvBertTokenizerFast"),xRe=o(" (ConvBERT model)"),LRe=l(),xn=a("li"),sz=a("strong"),BRe=o("cpm"),kRe=o(" \u2014 "),zx=a("a"),RRe=o("CpmTokenizer"),SRe=o(" or "),lz=a("code"),PRe=o("CpmTokenizerFast"),$Re=o(" (CPM model)"),IRe=l(),Tc=a("li"),iz=a("strong"),jRe=o("ctrl"),NRe=o(" \u2014 "),Xx=a("a"),DRe=o("CTRLTokenizer"),GRe=o(" (CTRL model)"),ORe=l(),Ln=a("li"),dz=a("strong"),qRe=o("deberta"),zRe=o(" \u2014 "),Wx=a("a"),XRe=o("DebertaTokenizer"),WRe=o(" or "),Vx=a("a"),VRe=o("DebertaTokenizerFast"),QRe=o(" (DeBERTa model)"),HRe=l(),Fc=a("li"),mz=a("strong"),URe=o("deberta-v2"),JRe=o(" \u2014 "),Qx=a("a"),KRe=o("DebertaV2Tokenizer"),YRe=o(" (DeBERTa-v2 model)"),ZRe=l(),Bn=a("li"),fz=a("strong"),eSe=o("distilbert"),oSe=o(" \u2014 "),Hx=a("a"),tSe=o("DistilBertTokenizer"),rSe=o(" or "),Ux=a("a"),aSe=o("DistilBertTokenizerFast"),nSe=o(" (DistilBERT model)"),sSe=l(),kn=a("li"),cz=a("strong"),lSe=o("dpr"),iSe=o(" \u2014 "),Jx=a("a"),dSe=o("DPRQuestionEncoderTokenizer"),mSe=o(" or "),Kx=a("a"),fSe=o("DPRQuestionEncoderTokenizerFast"),cSe=o(" (DPR model)"),gSe=l(),Rn=a("li"),gz=a("strong"),hSe=o("electra"),uSe=o(" \u2014 "),Yx=a("a"),pSe=o("ElectraTokenizer"),_Se=o(" or "),Zx=a("a"),vSe=o("ElectraTokenizerFast"),bSe=o(" (ELECTRA model)"),TSe=l(),Mc=a("li"),hz=a("strong"),FSe=o("flaubert"),MSe=o(" \u2014 "),e6=a("a"),ESe=o("FlaubertTokenizer"),CSe=o(" (FlauBERT model)"),ySe=l(),Sn=a("li"),uz=a("strong"),wSe=o("fnet"),ASe=o(" \u2014 "),o6=a("a"),xSe=o("FNetTokenizer"),LSe=o(" or "),t6=a("a"),BSe=o("FNetTokenizerFast"),kSe=o(" (FNet model)"),RSe=l(),Ec=a("li"),pz=a("strong"),SSe=o("fsmt"),PSe=o(" \u2014 "),r6=a("a"),$Se=o("FSMTTokenizer"),ISe=o(" (FairSeq Machine-Translation model)"),jSe=l(),Pn=a("li"),_z=a("strong"),NSe=o("funnel"),DSe=o(" \u2014 "),a6=a("a"),GSe=o("FunnelTokenizer"),OSe=o(" or "),n6=a("a"),qSe=o("FunnelTokenizerFast"),zSe=o(" (Funnel Transformer model)"),XSe=l(),$n=a("li"),vz=a("strong"),WSe=o("gpt2"),VSe=o(" \u2014 "),s6=a("a"),QSe=o("GPT2Tokenizer"),HSe=o(" or "),l6=a("a"),USe=o("GPT2TokenizerFast"),JSe=o(" (OpenAI GPT-2 model)"),KSe=l(),In=a("li"),bz=a("strong"),YSe=o("gpt_neo"),ZSe=o(" \u2014 "),i6=a("a"),ePe=o("GPT2Tokenizer"),oPe=o(" or "),d6=a("a"),tPe=o("GPT2TokenizerFast"),rPe=o(" (GPT Neo model)"),aPe=l(),Cc=a("li"),Tz=a("strong"),nPe=o("hubert"),sPe=o(" \u2014 "),m6=a("a"),lPe=o("Wav2Vec2CTCTokenizer"),iPe=o(" (Hubert model)"),dPe=l(),jn=a("li"),Fz=a("strong"),mPe=o("ibert"),fPe=o(" \u2014 "),f6=a("a"),cPe=o("RobertaTokenizer"),gPe=o(" or "),c6=a("a"),hPe=o("RobertaTokenizerFast"),uPe=o(" (I-BERT model)"),pPe=l(),Nn=a("li"),Mz=a("strong"),_Pe=o("layoutlm"),vPe=o(" \u2014 "),g6=a("a"),bPe=o("LayoutLMTokenizer"),TPe=o(" or "),h6=a("a"),FPe=o("LayoutLMTokenizerFast"),MPe=o(" (LayoutLM model)"),EPe=l(),Dn=a("li"),Ez=a("strong"),CPe=o("layoutlmv2"),yPe=o(" \u2014 "),u6=a("a"),wPe=o("LayoutLMv2Tokenizer"),APe=o(" or "),p6=a("a"),xPe=o("LayoutLMv2TokenizerFast"),LPe=o(" (LayoutLMv2 model)"),BPe=l(),Gn=a("li"),Cz=a("strong"),kPe=o("led"),RPe=o(" \u2014 "),_6=a("a"),SPe=o("LEDTokenizer"),PPe=o(" or "),v6=a("a"),$Pe=o("LEDTokenizerFast"),IPe=o(" (LED model)"),jPe=l(),On=a("li"),yz=a("strong"),NPe=o("longformer"),DPe=o(" \u2014 "),b6=a("a"),GPe=o("LongformerTokenizer"),OPe=o(" or "),T6=a("a"),qPe=o("LongformerTokenizerFast"),zPe=o(" (Longformer model)"),XPe=l(),yc=a("li"),wz=a("strong"),WPe=o("luke"),VPe=o(" \u2014 "),F6=a("a"),QPe=o("LukeTokenizer"),HPe=o(" (LUKE model)"),UPe=l(),qn=a("li"),Az=a("strong"),JPe=o("lxmert"),KPe=o(" \u2014 "),M6=a("a"),YPe=o("LxmertTokenizer"),ZPe=o(" or "),E6=a("a"),e$e=o("LxmertTokenizerFast"),o$e=o(" (LXMERT model)"),t$e=l(),wc=a("li"),xz=a("strong"),r$e=o("m2m_100"),a$e=o(" \u2014 "),C6=a("a"),n$e=o("M2M100Tokenizer"),s$e=o(" (M2M100 model)"),l$e=l(),Ac=a("li"),Lz=a("strong"),i$e=o("marian"),d$e=o(" \u2014 "),y6=a("a"),m$e=o("MarianTokenizer"),f$e=o(" (Marian model)"),c$e=l(),zn=a("li"),Bz=a("strong"),g$e=o("mbart"),h$e=o(" \u2014 "),w6=a("a"),u$e=o("MBartTokenizer"),p$e=o(" or "),A6=a("a"),_$e=o("MBartTokenizerFast"),v$e=o(" (mBART model)"),b$e=l(),Xn=a("li"),kz=a("strong"),T$e=o("mbart50"),F$e=o(" \u2014 "),x6=a("a"),M$e=o("MBart50Tokenizer"),E$e=o(" or "),L6=a("a"),C$e=o("MBart50TokenizerFast"),y$e=o(" (mBART-50 model)"),w$e=l(),Wn=a("li"),Rz=a("strong"),A$e=o("mobilebert"),x$e=o(" \u2014 "),B6=a("a"),L$e=o("MobileBertTokenizer"),B$e=o(" or "),k6=a("a"),k$e=o("MobileBertTokenizerFast"),R$e=o(" (MobileBERT model)"),S$e=l(),Vn=a("li"),Sz=a("strong"),P$e=o("mpnet"),$$e=o(" \u2014 "),R6=a("a"),I$e=o("MPNetTokenizer"),j$e=o(" or "),S6=a("a"),N$e=o("MPNetTokenizerFast"),D$e=o(" (MPNet model)"),G$e=l(),Qn=a("li"),Pz=a("strong"),O$e=o("mt5"),q$e=o(" \u2014 "),P6=a("a"),z$e=o("MT5Tokenizer"),X$e=o(" or "),$6=a("a"),W$e=o("MT5TokenizerFast"),V$e=o(" (mT5 model)"),Q$e=l(),Hn=a("li"),$z=a("strong"),H$e=o("openai-gpt"),U$e=o(" \u2014 "),I6=a("a"),J$e=o("OpenAIGPTTokenizer"),K$e=o(" or "),j6=a("a"),Y$e=o("OpenAIGPTTokenizerFast"),Z$e=o(" (OpenAI GPT model)"),eIe=l(),Un=a("li"),Iz=a("strong"),oIe=o("pegasus"),tIe=o(" \u2014 "),N6=a("a"),rIe=o("PegasusTokenizer"),aIe=o(" or "),D6=a("a"),nIe=o("PegasusTokenizerFast"),sIe=o(" (Pegasus model)"),lIe=l(),xc=a("li"),jz=a("strong"),iIe=o("perceiver"),dIe=o(" \u2014 "),G6=a("a"),mIe=o("PerceiverTokenizer"),fIe=o(" (Perceiver model)"),cIe=l(),Lc=a("li"),Nz=a("strong"),gIe=o("phobert"),hIe=o(" \u2014 "),O6=a("a"),uIe=o("PhobertTokenizer"),pIe=o(" (PhoBERT model)"),_Ie=l(),Bc=a("li"),Dz=a("strong"),vIe=o("prophetnet"),bIe=o(" \u2014 "),q6=a("a"),TIe=o("ProphetNetTokenizer"),FIe=o(" (ProphetNet model)"),MIe=l(),Jn=a("li"),Gz=a("strong"),EIe=o("qdqbert"),CIe=o(" \u2014 "),z6=a("a"),yIe=o("BertTokenizer"),wIe=o(" or "),X6=a("a"),AIe=o("BertTokenizerFast"),xIe=o(" (QDQBert model)"),LIe=l(),kc=a("li"),Oz=a("strong"),BIe=o("rag"),kIe=o(" \u2014 "),W6=a("a"),RIe=o("RagTokenizer"),SIe=o(" (RAG model)"),PIe=l(),Kn=a("li"),qz=a("strong"),$Ie=o("reformer"),IIe=o(" \u2014 "),V6=a("a"),jIe=o("ReformerTokenizer"),NIe=o(" or "),Q6=a("a"),DIe=o("ReformerTokenizerFast"),GIe=o(" (Reformer model)"),OIe=l(),Yn=a("li"),zz=a("strong"),qIe=o("rembert"),zIe=o(" \u2014 "),H6=a("a"),XIe=o("RemBertTokenizer"),WIe=o(" or "),U6=a("a"),VIe=o("RemBertTokenizerFast"),QIe=o(" (RemBERT model)"),HIe=l(),Zn=a("li"),Xz=a("strong"),UIe=o("retribert"),JIe=o(" \u2014 "),J6=a("a"),KIe=o("RetriBertTokenizer"),YIe=o(" or "),K6=a("a"),ZIe=o("RetriBertTokenizerFast"),eje=o(" (RetriBERT model)"),oje=l(),es=a("li"),Wz=a("strong"),tje=o("roberta"),rje=o(" \u2014 "),Y6=a("a"),aje=o("RobertaTokenizer"),nje=o(" or "),Z6=a("a"),sje=o("RobertaTokenizerFast"),lje=o(" (RoBERTa model)"),ije=l(),os=a("li"),Vz=a("strong"),dje=o("roformer"),mje=o(" \u2014 "),e8=a("a"),fje=o("RoFormerTokenizer"),cje=o(" or "),o8=a("a"),gje=o("RoFormerTokenizerFast"),hje=o(" (RoFormer model)"),uje=l(),Rc=a("li"),Qz=a("strong"),pje=o("speech_to_text"),_je=o(" \u2014 "),t8=a("a"),vje=o("Speech2TextTokenizer"),bje=o(" (Speech2Text model)"),Tje=l(),Sc=a("li"),Hz=a("strong"),Fje=o("speech_to_text_2"),Mje=o(" \u2014 "),r8=a("a"),Eje=o("Speech2Text2Tokenizer"),Cje=o(" (Speech2Text2 model)"),yje=l(),ts=a("li"),Uz=a("strong"),wje=o("splinter"),Aje=o(" \u2014 "),a8=a("a"),xje=o("SplinterTokenizer"),Lje=o(" or "),n8=a("a"),Bje=o("SplinterTokenizerFast"),kje=o(" (Splinter model)"),Rje=l(),rs=a("li"),Jz=a("strong"),Sje=o("squeezebert"),Pje=o(" \u2014 "),s8=a("a"),$je=o("SqueezeBertTokenizer"),Ije=o(" or "),l8=a("a"),jje=o("SqueezeBertTokenizerFast"),Nje=o(" (SqueezeBERT model)"),Dje=l(),as=a("li"),Kz=a("strong"),Gje=o("t5"),Oje=o(" \u2014 "),i8=a("a"),qje=o("T5Tokenizer"),zje=o(" or "),d8=a("a"),Xje=o("T5TokenizerFast"),Wje=o(" (T5 model)"),Vje=l(),Pc=a("li"),Yz=a("strong"),Qje=o("tapas"),Hje=o(" \u2014 "),m8=a("a"),Uje=o("TapasTokenizer"),Jje=o(" (TAPAS model)"),Kje=l(),$c=a("li"),Zz=a("strong"),Yje=o("transfo-xl"),Zje=o(" \u2014 "),f8=a("a"),eNe=o("TransfoXLTokenizer"),oNe=o(" (Transformer-XL model)"),tNe=l(),Ic=a("li"),eX=a("strong"),rNe=o("wav2vec2"),aNe=o(" \u2014 "),c8=a("a"),nNe=o("Wav2Vec2CTCTokenizer"),sNe=o(" (Wav2Vec2 model)"),lNe=l(),jc=a("li"),oX=a("strong"),iNe=o("xlm"),dNe=o(" \u2014 "),g8=a("a"),mNe=o("XLMTokenizer"),fNe=o(" (XLM model)"),cNe=l(),Nc=a("li"),tX=a("strong"),gNe=o("xlm-prophetnet"),hNe=o(" \u2014 "),h8=a("a"),uNe=o("XLMProphetNetTokenizer"),pNe=o(" (XLMProphetNet model)"),_Ne=l(),ns=a("li"),rX=a("strong"),vNe=o("xlm-roberta"),bNe=o(" \u2014 "),u8=a("a"),TNe=o("XLMRobertaTokenizer"),FNe=o(" or "),p8=a("a"),MNe=o("XLMRobertaTokenizerFast"),ENe=o(" (XLM-RoBERTa model)"),CNe=l(),ss=a("li"),aX=a("strong"),yNe=o("xlnet"),wNe=o(" \u2014 "),_8=a("a"),ANe=o("XLNetTokenizer"),xNe=o(" or "),v8=a("a"),LNe=o("XLNetTokenizerFast"),BNe=o(" (XLNet model)"),kNe=l(),nX=a("p"),RNe=o("Examples:"),SNe=l(),f(JF.$$.fragment),PNe=l(),Dc=a("div"),f(KF.$$.fragment),$Ne=l(),sX=a("p"),INe=o("Register a new tokenizer in this mapping."),xEe=l(),ri=a("h2"),Gc=a("a"),lX=a("span"),f(YF.$$.fragment),jNe=l(),iX=a("span"),NNe=o("AutoFeatureExtractor"),LEe=l(),Gr=a("div"),f(ZF.$$.fragment),DNe=l(),eM=a("p"),GNe=o(`This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the library when created with the `),b8=a("a"),ONe=o("AutoFeatureExtractor.from_pretrained()"),qNe=o(" class method."),zNe=l(),oM=a("p"),XNe=o("This class cannot be instantiated directly using "),dX=a("code"),WNe=o("__init__()"),VNe=o(" (throws an error)."),QNe=l(),we=a("div"),f(tM.$$.fragment),HNe=l(),mX=a("p"),UNe=o("Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary."),JNe=l(),Ma=a("p"),KNe=o("The feature extractor class to instantiate is selected based on the "),fX=a("em"),YNe=o("model_type"),ZNe=o(` property of the config object (either passed as an argument or loaded from `),cX=a("em"),eDe=o("pretrained_model_name_or_path"),oDe=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),gX=a("em"),tDe=o("pretrained_model_name_or_path"),rDe=o(":"),aDe=l(),fe=a("ul"),Oc=a("li"),hX=a("strong"),nDe=o("beit"),sDe=o(" \u2014 "),T8=a("a"),lDe=o("BeitFeatureExtractor"),iDe=o(" (BEiT model)"),dDe=l(),qc=a("li"),uX=a("strong"),mDe=o("clip"),fDe=o(" \u2014 "),F8=a("a"),cDe=o("CLIPFeatureExtractor"),gDe=o(" (CLIP model)"),hDe=l(),zc=a("li"),pX=a("strong"),uDe=o("deit"),pDe=o(" \u2014 "),M8=a("a"),_De=o("DeiTFeatureExtractor"),vDe=o(" (DeiT model)"),bDe=l(),Xc=a("li"),_X=a("strong"),TDe=o("detr"),FDe=o(" \u2014 "),E8=a("a"),MDe=o("DetrFeatureExtractor"),EDe=o(" (DETR model)"),CDe=l(),Wc=a("li"),vX=a("strong"),yDe=o("hubert"),wDe=o(" \u2014 "),C8=a("a"),ADe=o("Wav2Vec2FeatureExtractor"),xDe=o(" (Hubert model)"),LDe=l(),Vc=a("li"),bX=a("strong"),BDe=o("layoutlmv2"),kDe=o(" \u2014 "),y8=a("a"),RDe=o("LayoutLMv2FeatureExtractor"),SDe=o(" (LayoutLMv2 model)"),PDe=l(),Qc=a("li"),TX=a("strong"),$De=o("perceiver"),IDe=o(" \u2014 "),w8=a("a"),jDe=o("PerceiverFeatureExtractor"),NDe=o(" (Perceiver model)"),DDe=l(),Hc=a("li"),FX=a("strong"),GDe=o("speech_to_text"),ODe=o(" \u2014 "),A8=a("a"),qDe=o("Speech2TextFeatureExtractor"),zDe=o(" (Speech2Text model)"),XDe=l(),Uc=a("li"),MX=a("strong"),WDe=o("vit"),VDe=o(" \u2014 "),x8=a("a"),QDe=o("ViTFeatureExtractor"),HDe=o(" (ViT model)"),UDe=l(),Jc=a("li"),EX=a("strong"),JDe=o("wav2vec2"),KDe=o(" \u2014 "),L8=a("a"),YDe=o("Wav2Vec2FeatureExtractor"),ZDe=o(" (Wav2Vec2 model)"),eGe=l(),f(Kc.$$.fragment),oGe=l(),CX=a("p"),tGe=o("Examples:"),rGe=l(),f(rM.$$.fragment),BEe=l(),ai=a("h2"),Yc=a("a"),yX=a("span"),f(aM.$$.fragment),aGe=l(),wX=a("span"),nGe=o("AutoProcessor"),kEe=l(),Or=a("div"),f(nM.$$.fragment),sGe=l(),sM=a("p"),lGe=o(`This is a generic processor class that will be instantiated as one of the processor classes of the library when created with the `),B8=a("a"),iGe=o("AutoProcessor.from_pretrained()"),dGe=o(" class method."),mGe=l(),lM=a("p"),fGe=o("This class cannot be instantiated directly using "),AX=a("code"),cGe=o("__init__()"),gGe=o(" (throws an error)."),hGe=l(),Ae=a("div"),f(iM.$$.fragment),uGe=l(),xX=a("p"),pGe=o("Instantiate one of the processor classes of the library from a pretrained model vocabulary."),_Ge=l(),ni=a("p"),vGe=o("The processor class to instantiate is selected based on the "),LX=a("em"),bGe=o("model_type"),TGe=o(` property of the config object (either passed as an argument or loaded from `),BX=a("em"),FGe=o("pretrained_model_name_or_path"),MGe=o(" if possible):"),EGe=l(),Je=a("ul"),Zc=a("li"),kX=a("strong"),CGe=o("clip"),yGe=o(" \u2014 "),k8=a("a"),wGe=o("CLIPProcessor"),AGe=o(" (CLIP model)"),xGe=l(),eg=a("li"),RX=a("strong"),LGe=o("layoutlmv2"),BGe=o(" \u2014 "),R8=a("a"),kGe=o("LayoutLMv2Processor"),RGe=o(" (LayoutLMv2 model)"),SGe=l(),og=a("li"),SX=a("strong"),PGe=o("speech_to_text"),$Ge=o(" \u2014 "),S8=a("a"),IGe=o("Speech2TextProcessor"),jGe=o(" (Speech2Text model)"),NGe=l(),tg=a("li"),PX=a("strong"),DGe=o("speech_to_text_2"),GGe=o(" \u2014 "),P8=a("a"),OGe=o("Speech2Text2Processor"),qGe=o(" (Speech2Text2 model)"),zGe=l(),rg=a("li"),$X=a("strong"),XGe=o("trocr"),WGe=o(" \u2014 "),$8=a("a"),VGe=o("TrOCRProcessor"),QGe=o(" (TrOCR model)"),HGe=l(),ag=a("li"),IX=a("strong"),UGe=o("vision-text-dual-encoder"),JGe=o(" \u2014 "),I8=a("a"),KGe=o("VisionTextDualEncoderProcessor"),YGe=o(" (VisionTextDualEncoder model)"),ZGe=l(),ng=a("li"),jX=a("strong"),eOe=o("wav2vec2"),oOe=o(" \u2014 "),j8=a("a"),tOe=o("Wav2Vec2Processor"),rOe=o(" (Wav2Vec2 model)"),aOe=l(),f(sg.$$.fragment),nOe=l(),NX=a("p"),sOe=o("Examples:"),lOe=l(),f(dM.$$.fragment),REe=l(),si=a("h2"),lg=a("a"),DX=a("span"),f(mM.$$.fragment),iOe=l(),GX=a("span"),dOe=o("AutoModel"),SEe=l(),Po=a("div"),f(fM.$$.fragment),mOe=l(),li=a("p"),fOe=o(`This is a generic model class that will be instantiated as one of the base model classes of the library when created with the `),OX=a("code"),cOe=o("from_pretrained()"),gOe=o(` class method or the `),qX=a("code"),hOe=o("from_config()"),uOe=o(" class method."),pOe=l(),cM=a("p"),_Oe=o("This class cannot be instantiated directly using "),zX=a("code"),vOe=o("__init__()"),bOe=o(" (throws an error)."),TOe=l(),wt=a("div"),f(gM.$$.fragment),FOe=l(),XX=a("p"),MOe=o("Instantiates one of the base model classes of the library from a configuration."),EOe=l(),ii=a("p"),COe=o(`Note: Loading a model from its configuration file does `),WX=a("strong"),yOe=o("not"),wOe=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),VX=a("em"),AOe=o("~AutoModel.from_pretrained"),xOe=o(`] to load the model weights.`),LOe=l(),QX=a("p"),BOe=o("Examples:"),kOe=l(),f(hM.$$.fragment),ROe=l(),xe=a("div"),f(uM.$$.fragment),SOe=l(),HX=a("p"),POe=o("Instantiate one of the base model classes of the library from a pretrained model."),$Oe=l(),Ea=a("p"),IOe=o("The model class to instantiate is selected based on the "),UX=a("em"),jOe=o("model_type"),NOe=o(` property of the config object (either passed as an argument or loaded from `),JX=a("em"),DOe=o("pretrained_model_name_or_path"),GOe=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),KX=a("em"),OOe=o("pretrained_model_name_or_path"),qOe=o(":"),zOe=l(),F=a("ul"),ig=a("li"),YX=a("strong"),XOe=o("albert"),WOe=o(" \u2014 "),N8=a("a"),VOe=o("AlbertModel"),QOe=o(" (ALBERT model)"),HOe=l(),dg=a("li"),ZX=a("strong"),UOe=o("bart"),JOe=o(" \u2014 "),D8=a("a"),KOe=o("BartModel"),YOe=o(" (BART model)"),ZOe=l(),mg=a("li"),eW=a("strong"),eqe=o("beit"),oqe=o(" \u2014 "),G8=a("a"),tqe=o("BeitModel"),rqe=o(" (BEiT model)"),aqe=l(),fg=a("li"),oW=a("strong"),nqe=o("bert"),sqe=o(" \u2014 "),O8=a("a"),lqe=o("BertModel"),iqe=o(" (BERT model)"),dqe=l(),cg=a("li"),tW=a("strong"),mqe=o("bert-generation"),fqe=o(" \u2014 "),q8=a("a"),cqe=o("BertGenerationEncoder"),gqe=o(" (Bert Generation model)"),hqe=l(),gg=a("li"),rW=a("strong"),uqe=o("big_bird"),pqe=o(" \u2014 "),z8=a("a"),_qe=o("BigBirdModel"),vqe=o(" (BigBird model)"),bqe=l(),hg=a("li"),aW=a("strong"),Tqe=o("bigbird_pegasus"),Fqe=o(" \u2014 "),X8=a("a"),Mqe=o("BigBirdPegasusModel"),Eqe=o(" (BigBirdPegasus model)"),Cqe=l(),ug=a("li"),nW=a("strong"),yqe=o("blenderbot"),wqe=o(" \u2014 "),W8=a("a"),Aqe=o("BlenderbotModel"),xqe=o(" (Blenderbot model)"),Lqe=l(),pg=a("li"),sW=a("strong"),Bqe=o("blenderbot-small"),kqe=o(" \u2014 "),V8=a("a"),Rqe=o("BlenderbotSmallModel"),Sqe=o(" (BlenderbotSmall model)"),Pqe=l(),_g=a("li"),lW=a("strong"),$qe=o("camembert"),Iqe=o(" \u2014 "),Q8=a("a"),jqe=o("CamembertModel"),Nqe=o(" (CamemBERT model)"),Dqe=l(),vg=a("li"),iW=a("strong"),Gqe=o("canine"),Oqe=o(" \u2014 "),H8=a("a"),qqe=o("CanineModel"),zqe=o(" (Canine model)"),Xqe=l(),bg=a("li"),dW=a("strong"),Wqe=o("clip"),Vqe=o(" \u2014 "),U8=a("a"),Qqe=o("CLIPModel"),Hqe=o(" (CLIP model)"),Uqe=l(),Tg=a("li"),mW=a("strong"),Jqe=o("convbert"),Kqe=o(" \u2014 "),J8=a("a"),Yqe=o("ConvBertModel"),Zqe=o(" (ConvBERT model)"),eze=l(),Fg=a("li"),fW=a("strong"),oze=o("ctrl"),tze=o(" \u2014 "),K8=a("a"),rze=o("CTRLModel"),aze=o(" (CTRL model)"),nze=l(),Mg=a("li"),cW=a("strong"),sze=o("deberta"),lze=o(" \u2014 "),Y8=a("a"),ize=o("DebertaModel"),dze=o(" (DeBERTa model)"),mze=l(),Eg=a("li"),gW=a("strong"),fze=o("deberta-v2"),cze=o(" \u2014 "),Z8=a("a"),gze=o("DebertaV2Model"),hze=o(" (DeBERTa-v2 model)"),uze=l(),Cg=a("li"),hW=a("strong"),pze=o("deit"),_ze=o(" \u2014 "),eL=a("a"),vze=o("DeiTModel"),bze=o(" (DeiT model)"),Tze=l(),yg=a("li"),uW=a("strong"),Fze=o("detr"),Mze=o(" \u2014 "),oL=a("a"),Eze=o("DetrModel"),Cze=o(" (DETR model)"),yze=l(),wg=a("li"),pW=a("strong"),wze=o("distilbert"),Aze=o(" \u2014 "),tL=a("a"),xze=o("DistilBertModel"),Lze=o(" (DistilBERT model)"),Bze=l(),Ag=a("li"),_W=a("strong"),kze=o("dpr"),Rze=o(" \u2014 "),rL=a("a"),Sze=o("DPRQuestionEncoder"),Pze=o(" (DPR model)"),$ze=l(),xg=a("li"),vW=a("strong"),Ize=o("electra"),jze=o(" \u2014 "),aL=a("a"),Nze=o("ElectraModel"),Dze=o(" (ELECTRA model)"),Gze=l(),Lg=a("li"),bW=a("strong"),Oze=o("flaubert"),qze=o(" \u2014 "),nL=a("a"),zze=o("FlaubertModel"),Xze=o(" (FlauBERT model)"),Wze=l(),Bg=a("li"),TW=a("strong"),Vze=o("fnet"),Qze=o(" \u2014 "),sL=a("a"),Hze=o("FNetModel"),Uze=o(" (FNet model)"),Jze=l(),kg=a("li"),FW=a("strong"),Kze=o("fsmt"),Yze=o(" \u2014 "),lL=a("a"),Zze=o("FSMTModel"),eXe=o(" (FairSeq Machine-Translation model)"),oXe=l(),ls=a("li"),MW=a("strong"),tXe=o("funnel"),rXe=o(" \u2014 "),iL=a("a"),aXe=o("FunnelModel"),nXe=o(" or "),dL=a("a"),sXe=o("FunnelBaseModel"),lXe=o(" (Funnel Transformer model)"),iXe=l(),Rg=a("li"),EW=a("strong"),dXe=o("gpt2"),mXe=o(" \u2014 "),mL=a("a"),fXe=o("GPT2Model"),cXe=o(" (OpenAI GPT-2 model)"),gXe=l(),Sg=a("li"),CW=a("strong"),hXe=o("gpt_neo"),uXe=o(" \u2014 "),fL=a("a"),pXe=o("GPTNeoModel"),_Xe=o(" (GPT Neo model)"),vXe=l(),Pg=a("li"),yW=a("strong"),bXe=o("gptj"),TXe=o(" \u2014 "),cL=a("a"),FXe=o("GPTJModel"),MXe=o(" (GPT-J model)"),EXe=l(),$g=a("li"),wW=a("strong"),CXe=o("hubert"),yXe=o(" \u2014 "),gL=a("a"),wXe=o("HubertModel"),AXe=o(" (Hubert model)"),xXe=l(),Ig=a("li"),AW=a("strong"),LXe=o("ibert"),BXe=o(" \u2014 "),hL=a("a"),kXe=o("IBertModel"),RXe=o(" (I-BERT model)"),SXe=l(),jg=a("li"),xW=a("strong"),PXe=o("imagegpt"),$Xe=o(" \u2014 "),uL=a("a"),IXe=o("ImageGPTModel"),jXe=o(" (ImageGPT model)"),NXe=l(),Ng=a("li"),LW=a("strong"),DXe=o("layoutlm"),GXe=o(" \u2014 "),pL=a("a"),OXe=o("LayoutLMModel"),qXe=o(" (LayoutLM model)"),zXe=l(),Dg=a("li"),BW=a("strong"),XXe=o("layoutlmv2"),WXe=o(" \u2014 "),_L=a("a"),VXe=o("LayoutLMv2Model"),QXe=o(" (LayoutLMv2 model)"),HXe=l(),Gg=a("li"),kW=a("strong"),UXe=o("led"),JXe=o(" \u2014 "),vL=a("a"),KXe=o("LEDModel"),YXe=o(" (LED model)"),ZXe=l(),Og=a("li"),RW=a("strong"),eWe=o("longformer"),oWe=o(" \u2014 "),bL=a("a"),tWe=o("LongformerModel"),rWe=o(" (Longformer model)"),aWe=l(),qg=a("li"),SW=a("strong"),nWe=o("luke"),sWe=o(" \u2014 "),TL=a("a"),lWe=o("LukeModel"),iWe=o(" (LUKE model)"),dWe=l(),zg=a("li"),PW=a("strong"),mWe=o("lxmert"),fWe=o(" \u2014 "),FL=a("a"),cWe=o("LxmertModel"),gWe=o(" (LXMERT model)"),hWe=l(),Xg=a("li"),$W=a("strong"),uWe=o("m2m_100"),pWe=o(" \u2014 "),ML=a("a"),_We=o("M2M100Model"),vWe=o(" (M2M100 model)"),bWe=l(),Wg=a("li"),IW=a("strong"),TWe=o("marian"),FWe=o(" \u2014 "),EL=a("a"),MWe=o("MarianModel"),EWe=o(" (Marian model)"),CWe=l(),Vg=a("li"),jW=a("strong"),yWe=o("mbart"),wWe=o(" \u2014 "),CL=a("a"),AWe=o("MBartModel"),xWe=o(" (mBART model)"),LWe=l(),Qg=a("li"),NW=a("strong"),BWe=o("megatron-bert"),kWe=o(" \u2014 "),yL=a("a"),RWe=o("MegatronBertModel"),SWe=o(" (MegatronBert model)"),PWe=l(),Hg=a("li"),DW=a("strong"),$We=o("mobilebert"),IWe=o(" \u2014 "),wL=a("a"),jWe=o("MobileBertModel"),NWe=o(" (MobileBERT model)"),DWe=l(),Ug=a("li"),GW=a("strong"),GWe=o("mpnet"),OWe=o(" \u2014 "),AL=a("a"),qWe=o("MPNetModel"),zWe=o(" (MPNet model)"),XWe=l(),Jg=a("li"),OW=a("strong"),WWe=o("mt5"),VWe=o(" \u2014 "),xL=a("a"),QWe=o("MT5Model"),HWe=o(" (mT5 model)"),UWe=l(),Kg=a("li"),qW=a("strong"),JWe=o("openai-gpt"),KWe=o(" \u2014 "),LL=a("a"),YWe=o("OpenAIGPTModel"),ZWe=o(" (OpenAI GPT model)"),eVe=l(),Yg=a("li"),zW=a("strong"),oVe=o("pegasus"),tVe=o(" \u2014 "),BL=a("a"),rVe=o("PegasusModel"),aVe=o(" (Pegasus model)"),nVe=l(),Zg=a("li"),XW=a("strong"),sVe=o("perceiver"),lVe=o(" \u2014 "),kL=a("a"),iVe=o("PerceiverModel"),dVe=o(" (Perceiver model)"),mVe=l(),eh=a("li"),WW=a("strong"),fVe=o("prophetnet"),cVe=o(" \u2014 "),RL=a("a"),gVe=o("ProphetNetModel"),hVe=o(" (ProphetNet model)"),uVe=l(),oh=a("li"),VW=a("strong"),pVe=o("qdqbert"),_Ve=o(" \u2014 "),SL=a("a"),vVe=o("QDQBertModel"),bVe=o(" (QDQBert model)"),TVe=l(),th=a("li"),QW=a("strong"),FVe=o("reformer"),MVe=o(" \u2014 "),PL=a("a"),EVe=o("ReformerModel"),CVe=o(" (Reformer model)"),yVe=l(),rh=a("li"),HW=a("strong"),wVe=o("rembert"),AVe=o(" \u2014 "),$L=a("a"),xVe=o("RemBertModel"),LVe=o(" (RemBERT model)"),BVe=l(),ah=a("li"),UW=a("strong"),kVe=o("retribert"),RVe=o(" \u2014 "),IL=a("a"),SVe=o("RetriBertModel"),PVe=o(" (RetriBERT model)"),$Ve=l(),nh=a("li"),JW=a("strong"),IVe=o("roberta"),jVe=o(" \u2014 "),jL=a("a"),NVe=o("RobertaModel"),DVe=o(" (RoBERTa model)"),GVe=l(),sh=a("li"),KW=a("strong"),OVe=o("roformer"),qVe=o(" \u2014 "),NL=a("a"),zVe=o("RoFormerModel"),XVe=o(" (RoFormer model)"),WVe=l(),lh=a("li"),YW=a("strong"),VVe=o("segformer"),QVe=o(" \u2014 "),DL=a("a"),HVe=o("SegformerModel"),UVe=o(" (SegFormer model)"),JVe=l(),ih=a("li"),ZW=a("strong"),KVe=o("sew"),YVe=o(" \u2014 "),GL=a("a"),ZVe=o("SEWModel"),eQe=o(" (SEW model)"),oQe=l(),dh=a("li"),eV=a("strong"),tQe=o("sew-d"),rQe=o(" \u2014 "),OL=a("a"),aQe=o("SEWDModel"),nQe=o(" (SEW-D model)"),sQe=l(),mh=a("li"),oV=a("strong"),lQe=o("speech_to_text"),iQe=o(" \u2014 "),qL=a("a"),dQe=o("Speech2TextModel"),mQe=o(" (Speech2Text model)"),fQe=l(),fh=a("li"),tV=a("strong"),cQe=o("splinter"),gQe=o(" \u2014 "),zL=a("a"),hQe=o("SplinterModel"),uQe=o(" (Splinter model)"),pQe=l(),ch=a("li"),rV=a("strong"),_Qe=o("squeezebert"),vQe=o(" \u2014 "),XL=a("a"),bQe=o("SqueezeBertModel"),TQe=o(" (SqueezeBERT model)"),FQe=l(),gh=a("li"),aV=a("strong"),MQe=o("t5"),EQe=o(" \u2014 "),WL=a("a"),CQe=o("T5Model"),yQe=o(" (T5 model)"),wQe=l(),hh=a("li"),nV=a("strong"),AQe=o("tapas"),xQe=o(" \u2014 "),VL=a("a"),LQe=o("TapasModel"),BQe=o(" (TAPAS model)"),kQe=l(),uh=a("li"),sV=a("strong"),RQe=o("transfo-xl"),SQe=o(" \u2014 "),QL=a("a"),PQe=o("TransfoXLModel"),$Qe=o(" (Transformer-XL model)"),IQe=l(),ph=a("li"),lV=a("strong"),jQe=o("unispeech"),NQe=o(" \u2014 "),HL=a("a"),DQe=o("UniSpeechModel"),GQe=o(" (UniSpeech model)"),OQe=l(),_h=a("li"),iV=a("strong"),qQe=o("unispeech-sat"),zQe=o(" \u2014 "),UL=a("a"),XQe=o("UniSpeechSatModel"),WQe=o(" (UniSpeechSat model)"),VQe=l(),vh=a("li"),dV=a("strong"),QQe=o("vision-text-dual-encoder"),HQe=o(" \u2014 "),JL=a("a"),UQe=o("VisionTextDualEncoderModel"),JQe=o(" (VisionTextDualEncoder model)"),KQe=l(),bh=a("li"),mV=a("strong"),YQe=o("visual_bert"),ZQe=o(" \u2014 "),KL=a("a"),eHe=o("VisualBertModel"),oHe=o(" (VisualBert model)"),tHe=l(),Th=a("li"),fV=a("strong"),rHe=o("vit"),aHe=o(" \u2014 "),YL=a("a"),nHe=o("ViTModel"),sHe=o(" (ViT model)"),lHe=l(),Fh=a("li"),cV=a("strong"),iHe=o("wav2vec2"),dHe=o(" \u2014 "),ZL=a("a"),mHe=o("Wav2Vec2Model"),fHe=o(" (Wav2Vec2 model)"),cHe=l(),Mh=a("li"),gV=a("strong"),gHe=o("wavlm"),hHe=o(" \u2014 "),eB=a("a"),uHe=o("WavLMModel"),pHe=o(" (WavLM model)"),_He=l(),Eh=a("li"),hV=a("strong"),vHe=o("xlm"),bHe=o(" \u2014 "),oB=a("a"),THe=o("XLMModel"),FHe=o(" (XLM model)"),MHe=l(),Ch=a("li"),uV=a("strong"),EHe=o("xlm-prophetnet"),CHe=o(" \u2014 "),tB=a("a"),yHe=o("XLMProphetNetModel"),wHe=o(" (XLMProphetNet model)"),AHe=l(),yh=a("li"),pV=a("strong"),xHe=o("xlm-roberta"),LHe=o(" \u2014 "),rB=a("a"),BHe=o("XLMRobertaModel"),kHe=o(" (XLM-RoBERTa model)"),RHe=l(),wh=a("li"),_V=a("strong"),SHe=o("xlnet"),PHe=o(" \u2014 "),aB=a("a"),$He=o("XLNetModel"),IHe=o(" (XLNet model)"),jHe=l(),Ah=a("p"),NHe=o("The model is set in evaluation mode by default using "),vV=a("em"),DHe=o("model.eval()"),GHe=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),bV=a("em"),OHe=o("model.train()"),qHe=l(),TV=a("p"),zHe=o("Examples:"),XHe=l(),f(pM.$$.fragment),PEe=l(),di=a("h2"),xh=a("a"),FV=a("span"),f(_M.$$.fragment),WHe=l(),MV=a("span"),VHe=o("AutoModelForPreTraining"),$Ee=l(),$o=a("div"),f(vM.$$.fragment),QHe=l(),mi=a("p"),HHe=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a pretraining head) when created with the `),EV=a("code"),UHe=o("from_pretrained()"),JHe=o(` class method or the `),CV=a("code"),KHe=o("from_config()"),YHe=o(" class method."),ZHe=l(),bM=a("p"),eUe=o("This class cannot be instantiated directly using "),yV=a("code"),oUe=o("__init__()"),tUe=o(" (throws an error)."),rUe=l(),At=a("div"),f(TM.$$.fragment),aUe=l(),wV=a("p"),nUe=o("Instantiates one of the model classes of the library (with a pretraining head) from a configuration."),sUe=l(),fi=a("p"),lUe=o(`Note: Loading a model from its configuration file does `),AV=a("strong"),iUe=o("not"),dUe=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),xV=a("em"),mUe=o("~AutoModelForPreTraining.from_pretrained"),fUe=o(`] to load the model weights.`),cUe=l(),LV=a("p"),gUe=o("Examples:"),hUe=l(),f(FM.$$.fragment),uUe=l(),Le=a("div"),f(MM.$$.fragment),pUe=l(),BV=a("p"),_Ue=o("Instantiate one of the model classes of the library (with a pretraining head) from a pretrained model."),vUe=l(),Ca=a("p"),bUe=o("The model class to instantiate is selected based on the "),kV=a("em"),TUe=o("model_type"),FUe=o(` property of the config object (either passed as an argument or loaded from `),RV=a("em"),MUe=o("pretrained_model_name_or_path"),EUe=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),SV=a("em"),CUe=o("pretrained_model_name_or_path"),yUe=o(":"),wUe=l(),k=a("ul"),Lh=a("li"),PV=a("strong"),AUe=o("albert"),xUe=o(" \u2014 "),nB=a("a"),LUe=o("AlbertForPreTraining"),BUe=o(" (ALBERT model)"),kUe=l(),Bh=a("li"),$V=a("strong"),RUe=o("bart"),SUe=o(" \u2014 "),sB=a("a"),PUe=o("BartForConditionalGeneration"),$Ue=o(" (BART model)"),IUe=l(),kh=a("li"),IV=a("strong"),jUe=o("bert"),NUe=o(" \u2014 "),lB=a("a"),DUe=o("BertForPreTraining"),GUe=o(" (BERT model)"),OUe=l(),Rh=a("li"),jV=a("strong"),qUe=o("big_bird"),zUe=o(" \u2014 "),iB=a("a"),XUe=o("BigBirdForPreTraining"),WUe=o(" (BigBird model)"),VUe=l(),Sh=a("li"),NV=a("strong"),QUe=o("camembert"),HUe=o(" \u2014 "),dB=a("a"),UUe=o("CamembertForMaskedLM"),JUe=o(" (CamemBERT model)"),KUe=l(),Ph=a("li"),DV=a("strong"),YUe=o("ctrl"),ZUe=o(" \u2014 "),mB=a("a"),eJe=o("CTRLLMHeadModel"),oJe=o(" (CTRL model)"),tJe=l(),$h=a("li"),GV=a("strong"),rJe=o("deberta"),aJe=o(" \u2014 "),fB=a("a"),nJe=o("DebertaForMaskedLM"),sJe=o(" (DeBERTa model)"),lJe=l(),Ih=a("li"),OV=a("strong"),iJe=o("deberta-v2"),dJe=o(" \u2014 "),cB=a("a"),mJe=o("DebertaV2ForMaskedLM"),fJe=o(" (DeBERTa-v2 model)"),cJe=l(),jh=a("li"),qV=a("strong"),gJe=o("distilbert"),hJe=o(" \u2014 "),gB=a("a"),uJe=o("DistilBertForMaskedLM"),pJe=o(" (DistilBERT model)"),_Je=l(),Nh=a("li"),zV=a("strong"),vJe=o("electra"),bJe=o(" \u2014 "),hB=a("a"),TJe=o("ElectraForPreTraining"),FJe=o(" (ELECTRA model)"),MJe=l(),Dh=a("li"),XV=a("strong"),EJe=o("flaubert"),CJe=o(" \u2014 "),uB=a("a"),yJe=o("FlaubertWithLMHeadModel"),wJe=o(" (FlauBERT model)"),AJe=l(),Gh=a("li"),WV=a("strong"),xJe=o("fnet"),LJe=o(" \u2014 "),pB=a("a"),BJe=o("FNetForPreTraining"),kJe=o(" (FNet model)"),RJe=l(),Oh=a("li"),VV=a("strong"),SJe=o("fsmt"),PJe=o(" \u2014 "),_B=a("a"),$Je=o("FSMTForConditionalGeneration"),IJe=o(" (FairSeq Machine-Translation model)"),jJe=l(),qh=a("li"),QV=a("strong"),NJe=o("funnel"),DJe=o(" \u2014 "),vB=a("a"),GJe=o("FunnelForPreTraining"),OJe=o(" (Funnel Transformer model)"),qJe=l(),zh=a("li"),HV=a("strong"),zJe=o("gpt2"),XJe=o(" \u2014 "),bB=a("a"),WJe=o("GPT2LMHeadModel"),VJe=o(" (OpenAI GPT-2 model)"),QJe=l(),Xh=a("li"),UV=a("strong"),HJe=o("ibert"),UJe=o(" \u2014 "),TB=a("a"),JJe=o("IBertForMaskedLM"),KJe=o(" (I-BERT model)"),YJe=l(),Wh=a("li"),JV=a("strong"),ZJe=o("layoutlm"),eKe=o(" \u2014 "),FB=a("a"),oKe=o("LayoutLMForMaskedLM"),tKe=o(" (LayoutLM model)"),rKe=l(),Vh=a("li"),KV=a("strong"),aKe=o("longformer"),nKe=o(" \u2014 "),MB=a("a"),sKe=o("LongformerForMaskedLM"),lKe=o(" (Longformer model)"),iKe=l(),Qh=a("li"),YV=a("strong"),dKe=o("lxmert"),mKe=o(" \u2014 "),EB=a("a"),fKe=o("LxmertForPreTraining"),cKe=o(" (LXMERT model)"),gKe=l(),Hh=a("li"),ZV=a("strong"),hKe=o("megatron-bert"),uKe=o(" \u2014 "),CB=a("a"),pKe=o("MegatronBertForPreTraining"),_Ke=o(" (MegatronBert model)"),vKe=l(),Uh=a("li"),eQ=a("strong"),bKe=o("mobilebert"),TKe=o(" \u2014 "),yB=a("a"),FKe=o("MobileBertForPreTraining"),MKe=o(" (MobileBERT model)"),EKe=l(),Jh=a("li"),oQ=a("strong"),CKe=o("mpnet"),yKe=o(" \u2014 "),wB=a("a"),wKe=o("MPNetForMaskedLM"),AKe=o(" (MPNet model)"),xKe=l(),Kh=a("li"),tQ=a("strong"),LKe=o("openai-gpt"),BKe=o(" \u2014 "),AB=a("a"),kKe=o("OpenAIGPTLMHeadModel"),RKe=o(" (OpenAI GPT model)"),SKe=l(),Yh=a("li"),rQ=a("strong"),PKe=o("retribert"),$Ke=o(" \u2014 "),xB=a("a"),IKe=o("RetriBertModel"),jKe=o(" (RetriBERT model)"),NKe=l(),Zh=a("li"),aQ=a("strong"),DKe=o("roberta"),GKe=o(" \u2014 "),LB=a("a"),OKe=o("RobertaForMaskedLM"),qKe=o(" (RoBERTa model)"),zKe=l(),eu=a("li"),nQ=a("strong"),XKe=o("squeezebert"),WKe=o(" \u2014 "),BB=a("a"),VKe=o("SqueezeBertForMaskedLM"),QKe=o(" (SqueezeBERT model)"),HKe=l(),ou=a("li"),sQ=a("strong"),UKe=o("t5"),JKe=o(" \u2014 "),kB=a("a"),KKe=o("T5ForConditionalGeneration"),YKe=o(" (T5 model)"),ZKe=l(),tu=a("li"),lQ=a("strong"),eYe=o("tapas"),oYe=o(" \u2014 "),RB=a("a"),tYe=o("TapasForMaskedLM"),rYe=o(" (TAPAS model)"),aYe=l(),ru=a("li"),iQ=a("strong"),nYe=o("transfo-xl"),sYe=o(" \u2014 "),SB=a("a"),lYe=o("TransfoXLLMHeadModel"),iYe=o(" (Transformer-XL model)"),dYe=l(),au=a("li"),dQ=a("strong"),mYe=o("unispeech"),fYe=o(" \u2014 "),PB=a("a"),cYe=o("UniSpeechForPreTraining"),gYe=o(" (UniSpeech model)"),hYe=l(),nu=a("li"),mQ=a("strong"),uYe=o("unispeech-sat"),pYe=o(" \u2014 "),$B=a("a"),_Ye=o("UniSpeechSatForPreTraining"),vYe=o(" (UniSpeechSat model)"),bYe=l(),su=a("li"),fQ=a("strong"),TYe=o("visual_bert"),FYe=o(" \u2014 "),IB=a("a"),MYe=o("VisualBertForPreTraining"),EYe=o(" (VisualBert model)"),CYe=l(),lu=a("li"),cQ=a("strong"),yYe=o("wav2vec2"),wYe=o(" \u2014 "),jB=a("a"),AYe=o("Wav2Vec2ForPreTraining"),xYe=o(" (Wav2Vec2 model)"),LYe=l(),iu=a("li"),gQ=a("strong"),BYe=o("xlm"),kYe=o(" \u2014 "),NB=a("a"),RYe=o("XLMWithLMHeadModel"),SYe=o(" (XLM model)"),PYe=l(),du=a("li"),hQ=a("strong"),$Ye=o("xlm-roberta"),IYe=o(" \u2014 "),DB=a("a"),jYe=o("XLMRobertaForMaskedLM"),NYe=o(" (XLM-RoBERTa model)"),DYe=l(),mu=a("li"),uQ=a("strong"),GYe=o("xlnet"),OYe=o(" \u2014 "),GB=a("a"),qYe=o("XLNetLMHeadModel"),zYe=o(" (XLNet model)"),XYe=l(),fu=a("p"),WYe=o("The model is set in evaluation mode by default using "),pQ=a("em"),VYe=o("model.eval()"),QYe=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),_Q=a("em"),HYe=o("model.train()"),UYe=l(),vQ=a("p"),JYe=o("Examples:"),KYe=l(),f(EM.$$.fragment),IEe=l(),ci=a("h2"),cu=a("a"),bQ=a("span"),f(CM.$$.fragment),YYe=l(),TQ=a("span"),ZYe=o("AutoModelForCausalLM"),jEe=l(),Io=a("div"),f(yM.$$.fragment),eZe=l(),gi=a("p"),oZe=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a causal language modeling head) when created with the `),FQ=a("code"),tZe=o("from_pretrained()"),rZe=o(` class method or the `),MQ=a("code"),aZe=o("from_config()"),nZe=o(" class method."),sZe=l(),wM=a("p"),lZe=o("This class cannot be instantiated directly using "),EQ=a("code"),iZe=o("__init__()"),dZe=o(" (throws an error)."),mZe=l(),xt=a("div"),f(AM.$$.fragment),fZe=l(),CQ=a("p"),cZe=o("Instantiates one of the model classes of the library (with a causal language modeling head) from a configuration."),gZe=l(),hi=a("p"),hZe=o(`Note: Loading a model from its configuration file does `),yQ=a("strong"),uZe=o("not"),pZe=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),wQ=a("em"),_Ze=o("~AutoModelForCausalLM.from_pretrained"),vZe=o(`] to load the model weights.`),bZe=l(),AQ=a("p"),TZe=o("Examples:"),FZe=l(),f(xM.$$.fragment),MZe=l(),Be=a("div"),f(LM.$$.fragment),EZe=l(),xQ=a("p"),CZe=o("Instantiate one of the model classes of the library (with a causal language modeling head) from a pretrained model."),yZe=l(),ya=a("p"),wZe=o("The model class to instantiate is selected based on the "),LQ=a("em"),AZe=o("model_type"),xZe=o(` property of the config object (either passed as an argument or loaded from `),BQ=a("em"),LZe=o("pretrained_model_name_or_path"),BZe=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),kQ=a("em"),kZe=o("pretrained_model_name_or_path"),RZe=o(":"),SZe=l(),I=a("ul"),gu=a("li"),RQ=a("strong"),PZe=o("bart"),$Ze=o(" \u2014 "),OB=a("a"),IZe=o("BartForCausalLM"),jZe=o(" (BART model)"),NZe=l(),hu=a("li"),SQ=a("strong"),DZe=o("bert"),GZe=o(" \u2014 "),qB=a("a"),OZe=o("BertLMHeadModel"),qZe=o(" (BERT model)"),zZe=l(),uu=a("li"),PQ=a("strong"),XZe=o("bert-generation"),WZe=o(" \u2014 "),zB=a("a"),VZe=o("BertGenerationDecoder"),QZe=o(" (Bert Generation model)"),HZe=l(),pu=a("li"),$Q=a("strong"),UZe=o("big_bird"),JZe=o(" \u2014 "),XB=a("a"),KZe=o("BigBirdForCausalLM"),YZe=o(" (BigBird model)"),ZZe=l(),_u=a("li"),IQ=a("strong"),eeo=o("bigbird_pegasus"),oeo=o(" \u2014 "),WB=a("a"),teo=o("BigBirdPegasusForCausalLM"),reo=o(" (BigBirdPegasus model)"),aeo=l(),vu=a("li"),jQ=a("strong"),neo=o("blenderbot"),seo=o(" \u2014 "),VB=a("a"),leo=o("BlenderbotForCausalLM"),ieo=o(" (Blenderbot model)"),deo=l(),bu=a("li"),NQ=a("strong"),meo=o("blenderbot-small"),feo=o(" \u2014 "),QB=a("a"),ceo=o("BlenderbotSmallForCausalLM"),geo=o(" (BlenderbotSmall model)"),heo=l(),Tu=a("li"),DQ=a("strong"),ueo=o("camembert"),peo=o(" \u2014 "),HB=a("a"),_eo=o("CamembertForCausalLM"),veo=o(" (CamemBERT model)"),beo=l(),Fu=a("li"),GQ=a("strong"),Teo=o("ctrl"),Feo=o(" \u2014 "),UB=a("a"),Meo=o("CTRLLMHeadModel"),Eeo=o(" (CTRL model)"),Ceo=l(),Mu=a("li"),OQ=a("strong"),yeo=o("gpt2"),weo=o(" \u2014 "),JB=a("a"),Aeo=o("GPT2LMHeadModel"),xeo=o(" (OpenAI GPT-2 model)"),Leo=l(),Eu=a("li"),qQ=a("strong"),Beo=o("gpt_neo"),keo=o(" \u2014 "),KB=a("a"),Reo=o("GPTNeoForCausalLM"),Seo=o(" (GPT Neo model)"),Peo=l(),Cu=a("li"),zQ=a("strong"),$eo=o("gptj"),Ieo=o(" \u2014 "),YB=a("a"),jeo=o("GPTJForCausalLM"),Neo=o(" (GPT-J model)"),Deo=l(),yu=a("li"),XQ=a("strong"),Geo=o("marian"),Oeo=o(" \u2014 "),ZB=a("a"),qeo=o("MarianForCausalLM"),zeo=o(" (Marian model)"),Xeo=l(),wu=a("li"),WQ=a("strong"),Weo=o("mbart"),Veo=o(" \u2014 "),e9=a("a"),Qeo=o("MBartForCausalLM"),Heo=o(" (mBART model)"),Ueo=l(),Au=a("li"),VQ=a("strong"),Jeo=o("megatron-bert"),Keo=o(" \u2014 "),o9=a("a"),Yeo=o("MegatronBertForCausalLM"),Zeo=o(" (MegatronBert model)"),eoo=l(),xu=a("li"),QQ=a("strong"),ooo=o("openai-gpt"),too=o(" \u2014 "),t9=a("a"),roo=o("OpenAIGPTLMHeadModel"),aoo=o(" (OpenAI GPT model)"),noo=l(),Lu=a("li"),HQ=a("strong"),soo=o("pegasus"),loo=o(" \u2014 "),r9=a("a"),ioo=o("PegasusForCausalLM"),doo=o(" (Pegasus model)"),moo=l(),Bu=a("li"),UQ=a("strong"),foo=o("prophetnet"),coo=o(" \u2014 "),a9=a("a"),goo=o("ProphetNetForCausalLM"),hoo=o(" (ProphetNet model)"),uoo=l(),ku=a("li"),JQ=a("strong"),poo=o("qdqbert"),_oo=o(" \u2014 "),n9=a("a"),voo=o("QDQBertLMHeadModel"),boo=o(" (QDQBert model)"),Too=l(),Ru=a("li"),KQ=a("strong"),Foo=o("reformer"),Moo=o(" \u2014 "),s9=a("a"),Eoo=o("ReformerModelWithLMHead"),Coo=o(" (Reformer model)"),yoo=l(),Su=a("li"),YQ=a("strong"),woo=o("rembert"),Aoo=o(" \u2014 "),l9=a("a"),xoo=o("RemBertForCausalLM"),Loo=o(" (RemBERT model)"),Boo=l(),Pu=a("li"),ZQ=a("strong"),koo=o("roberta"),Roo=o(" \u2014 "),i9=a("a"),Soo=o("RobertaForCausalLM"),Poo=o(" (RoBERTa model)"),$oo=l(),$u=a("li"),eH=a("strong"),Ioo=o("roformer"),joo=o(" \u2014 "),d9=a("a"),Noo=o("RoFormerForCausalLM"),Doo=o(" (RoFormer model)"),Goo=l(),Iu=a("li"),oH=a("strong"),Ooo=o("speech_to_text_2"),qoo=o(" \u2014 "),m9=a("a"),zoo=o("Speech2Text2ForCausalLM"),Xoo=o(" (Speech2Text2 model)"),Woo=l(),ju=a("li"),tH=a("strong"),Voo=o("transfo-xl"),Qoo=o(" \u2014 "),f9=a("a"),Hoo=o("TransfoXLLMHeadModel"),Uoo=o(" (Transformer-XL model)"),Joo=l(),Nu=a("li"),rH=a("strong"),Koo=o("trocr"),Yoo=o(" \u2014 "),c9=a("a"),Zoo=o("TrOCRForCausalLM"),eto=o(" (TrOCR model)"),oto=l(),Du=a("li"),aH=a("strong"),tto=o("xlm"),rto=o(" \u2014 "),g9=a("a"),ato=o("XLMWithLMHeadModel"),nto=o(" (XLM model)"),sto=l(),Gu=a("li"),nH=a("strong"),lto=o("xlm-prophetnet"),ito=o(" \u2014 "),h9=a("a"),dto=o("XLMProphetNetForCausalLM"),mto=o(" (XLMProphetNet model)"),fto=l(),Ou=a("li"),sH=a("strong"),cto=o("xlm-roberta"),gto=o(" \u2014 "),u9=a("a"),hto=o("XLMRobertaForCausalLM"),uto=o(" (XLM-RoBERTa model)"),pto=l(),qu=a("li"),lH=a("strong"),_to=o("xlnet"),vto=o(" \u2014 "),p9=a("a"),bto=o("XLNetLMHeadModel"),Tto=o(" (XLNet model)"),Fto=l(),zu=a("p"),Mto=o("The model is set in evaluation mode by default using "),iH=a("em"),Eto=o("model.eval()"),Cto=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),dH=a("em"),yto=o("model.train()"),wto=l(),mH=a("p"),Ato=o("Examples:"),xto=l(),f(BM.$$.fragment),NEe=l(),ui=a("h2"),Xu=a("a"),fH=a("span"),f(kM.$$.fragment),Lto=l(),cH=a("span"),Bto=o("AutoModelForMaskedLM"),DEe=l(),jo=a("div"),f(RM.$$.fragment),kto=l(),pi=a("p"),Rto=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a masked language modeling head) when created with the `),gH=a("code"),Sto=o("from_pretrained()"),Pto=o(` class method or the `),hH=a("code"),$to=o("from_config()"),Ito=o(" class method."),jto=l(),SM=a("p"),Nto=o("This class cannot be instantiated directly using "),uH=a("code"),Dto=o("__init__()"),Gto=o(" (throws an error)."),Oto=l(),Lt=a("div"),f(PM.$$.fragment),qto=l(),pH=a("p"),zto=o("Instantiates one of the model classes of the library (with a masked language modeling head) from a configuration."),Xto=l(),_i=a("p"),Wto=o(`Note: Loading a model from its configuration file does `),_H=a("strong"),Vto=o("not"),Qto=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),vH=a("em"),Hto=o("~AutoModelForMaskedLM.from_pretrained"),Uto=o(`] to load the model weights.`),Jto=l(),bH=a("p"),Kto=o("Examples:"),Yto=l(),f($M.$$.fragment),Zto=l(),ke=a("div"),f(IM.$$.fragment),ero=l(),TH=a("p"),oro=o("Instantiate one of the model classes of the library (with a masked language modeling head) from a pretrained model."),tro=l(),wa=a("p"),rro=o("The model class to instantiate is selected based on the "),FH=a("em"),aro=o("model_type"),nro=o(` property of the config object (either passed as an argument or loaded from `),MH=a("em"),sro=o("pretrained_model_name_or_path"),lro=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),EH=a("em"),iro=o("pretrained_model_name_or_path"),dro=o(":"),mro=l(),$=a("ul"),Wu=a("li"),CH=a("strong"),fro=o("albert"),cro=o(" \u2014 "),_9=a("a"),gro=o("AlbertForMaskedLM"),hro=o(" (ALBERT model)"),uro=l(),Vu=a("li"),yH=a("strong"),pro=o("bart"),_ro=o(" \u2014 "),v9=a("a"),vro=o("BartForConditionalGeneration"),bro=o(" (BART model)"),Tro=l(),Qu=a("li"),wH=a("strong"),Fro=o("bert"),Mro=o(" \u2014 "),b9=a("a"),Ero=o("BertForMaskedLM"),Cro=o(" (BERT model)"),yro=l(),Hu=a("li"),AH=a("strong"),wro=o("big_bird"),Aro=o(" \u2014 "),T9=a("a"),xro=o("BigBirdForMaskedLM"),Lro=o(" (BigBird model)"),Bro=l(),Uu=a("li"),xH=a("strong"),kro=o("camembert"),Rro=o(" \u2014 "),F9=a("a"),Sro=o("CamembertForMaskedLM"),Pro=o(" (CamemBERT model)"),$ro=l(),Ju=a("li"),LH=a("strong"),Iro=o("convbert"),jro=o(" \u2014 "),M9=a("a"),Nro=o("ConvBertForMaskedLM"),Dro=o(" (ConvBERT model)"),Gro=l(),Ku=a("li"),BH=a("strong"),Oro=o("deberta"),qro=o(" \u2014 "),E9=a("a"),zro=o("DebertaForMaskedLM"),Xro=o(" (DeBERTa model)"),Wro=l(),Yu=a("li"),kH=a("strong"),Vro=o("deberta-v2"),Qro=o(" \u2014 "),C9=a("a"),Hro=o("DebertaV2ForMaskedLM"),Uro=o(" (DeBERTa-v2 model)"),Jro=l(),Zu=a("li"),RH=a("strong"),Kro=o("distilbert"),Yro=o(" \u2014 "),y9=a("a"),Zro=o("DistilBertForMaskedLM"),eao=o(" (DistilBERT model)"),oao=l(),ep=a("li"),SH=a("strong"),tao=o("electra"),rao=o(" \u2014 "),w9=a("a"),aao=o("ElectraForMaskedLM"),nao=o(" (ELECTRA model)"),sao=l(),op=a("li"),PH=a("strong"),lao=o("flaubert"),iao=o(" \u2014 "),A9=a("a"),dao=o("FlaubertWithLMHeadModel"),mao=o(" (FlauBERT model)"),fao=l(),tp=a("li"),$H=a("strong"),cao=o("fnet"),gao=o(" \u2014 "),x9=a("a"),hao=o("FNetForMaskedLM"),uao=o(" (FNet model)"),pao=l(),rp=a("li"),IH=a("strong"),_ao=o("funnel"),vao=o(" \u2014 "),L9=a("a"),bao=o("FunnelForMaskedLM"),Tao=o(" (Funnel Transformer model)"),Fao=l(),ap=a("li"),jH=a("strong"),Mao=o("ibert"),Eao=o(" \u2014 "),B9=a("a"),Cao=o("IBertForMaskedLM"),yao=o(" (I-BERT model)"),wao=l(),np=a("li"),NH=a("strong"),Aao=o("layoutlm"),xao=o(" \u2014 "),k9=a("a"),Lao=o("LayoutLMForMaskedLM"),Bao=o(" (LayoutLM model)"),kao=l(),sp=a("li"),DH=a("strong"),Rao=o("longformer"),Sao=o(" \u2014 "),R9=a("a"),Pao=o("LongformerForMaskedLM"),$ao=o(" (Longformer model)"),Iao=l(),lp=a("li"),GH=a("strong"),jao=o("mbart"),Nao=o(" \u2014 "),S9=a("a"),Dao=o("MBartForConditionalGeneration"),Gao=o(" (mBART model)"),Oao=l(),ip=a("li"),OH=a("strong"),qao=o("megatron-bert"),zao=o(" \u2014 "),P9=a("a"),Xao=o("MegatronBertForMaskedLM"),Wao=o(" (MegatronBert model)"),Vao=l(),dp=a("li"),qH=a("strong"),Qao=o("mobilebert"),Hao=o(" \u2014 "),$9=a("a"),Uao=o("MobileBertForMaskedLM"),Jao=o(" (MobileBERT model)"),Kao=l(),mp=a("li"),zH=a("strong"),Yao=o("mpnet"),Zao=o(" \u2014 "),I9=a("a"),eno=o("MPNetForMaskedLM"),ono=o(" (MPNet model)"),tno=l(),fp=a("li"),XH=a("strong"),rno=o("perceiver"),ano=o(" \u2014 "),j9=a("a"),nno=o("PerceiverForMaskedLM"),sno=o(" (Perceiver model)"),lno=l(),cp=a("li"),WH=a("strong"),ino=o("qdqbert"),dno=o(" \u2014 "),N9=a("a"),mno=o("QDQBertForMaskedLM"),fno=o(" (QDQBert model)"),cno=l(),gp=a("li"),VH=a("strong"),gno=o("reformer"),hno=o(" \u2014 "),D9=a("a"),uno=o("ReformerForMaskedLM"),pno=o(" (Reformer model)"),_no=l(),hp=a("li"),QH=a("strong"),vno=o("rembert"),bno=o(" \u2014 "),G9=a("a"),Tno=o("RemBertForMaskedLM"),Fno=o(" (RemBERT model)"),Mno=l(),up=a("li"),HH=a("strong"),Eno=o("roberta"),Cno=o(" \u2014 "),O9=a("a"),yno=o("RobertaForMaskedLM"),wno=o(" (RoBERTa model)"),Ano=l(),pp=a("li"),UH=a("strong"),xno=o("roformer"),Lno=o(" \u2014 "),q9=a("a"),Bno=o("RoFormerForMaskedLM"),kno=o(" (RoFormer model)"),Rno=l(),_p=a("li"),JH=a("strong"),Sno=o("squeezebert"),Pno=o(" \u2014 "),z9=a("a"),$no=o("SqueezeBertForMaskedLM"),Ino=o(" (SqueezeBERT model)"),jno=l(),vp=a("li"),KH=a("strong"),Nno=o("tapas"),Dno=o(" \u2014 "),X9=a("a"),Gno=o("TapasForMaskedLM"),Ono=o(" (TAPAS model)"),qno=l(),bp=a("li"),YH=a("strong"),zno=o("wav2vec2"),Xno=o(" \u2014 "),ZH=a("code"),Wno=o("Wav2Vec2ForMaskedLM"),Vno=o(" (Wav2Vec2 model)"),Qno=l(),Tp=a("li"),eU=a("strong"),Hno=o("xlm"),Uno=o(" \u2014 "),W9=a("a"),Jno=o("XLMWithLMHeadModel"),Kno=o(" (XLM model)"),Yno=l(),Fp=a("li"),oU=a("strong"),Zno=o("xlm-roberta"),eso=o(" \u2014 "),V9=a("a"),oso=o("XLMRobertaForMaskedLM"),tso=o(" (XLM-RoBERTa model)"),rso=l(),Mp=a("p"),aso=o("The model is set in evaluation mode by default using "),tU=a("em"),nso=o("model.eval()"),sso=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),rU=a("em"),lso=o("model.train()"),iso=l(),aU=a("p"),dso=o("Examples:"),mso=l(),f(jM.$$.fragment),GEe=l(),vi=a("h2"),Ep=a("a"),nU=a("span"),f(NM.$$.fragment),fso=l(),sU=a("span"),cso=o("AutoModelForSeq2SeqLM"),OEe=l(),No=a("div"),f(DM.$$.fragment),gso=l(),bi=a("p"),hso=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence language modeling head) when created with the `),lU=a("code"),uso=o("from_pretrained()"),pso=o(` class method or the `),iU=a("code"),_so=o("from_config()"),vso=o(" class method."),bso=l(),GM=a("p"),Tso=o("This class cannot be instantiated directly using "),dU=a("code"),Fso=o("__init__()"),Mso=o(" (throws an error)."),Eso=l(),Bt=a("div"),f(OM.$$.fragment),Cso=l(),mU=a("p"),yso=o("Instantiates one of the model classes of the library (with a sequence-to-sequence language modeling head) from a configuration."),wso=l(),Ti=a("p"),Aso=o(`Note: Loading a model from its configuration file does `),fU=a("strong"),xso=o("not"),Lso=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),cU=a("em"),Bso=o("~AutoModelForSeq2SeqLM.from_pretrained"),kso=o(`] to load the model weights.`),Rso=l(),gU=a("p"),Sso=o("Examples:"),Pso=l(),f(qM.$$.fragment),$so=l(),Re=a("div"),f(zM.$$.fragment),Iso=l(),hU=a("p"),jso=o("Instantiate one of the model classes of the library (with a sequence-to-sequence language modeling head) from a pretrained model."),Nso=l(),Aa=a("p"),Dso=o("The model class to instantiate is selected based on the "),uU=a("em"),Gso=o("model_type"),Oso=o(` property of the config object (either passed as an argument or loaded from `),pU=a("em"),qso=o("pretrained_model_name_or_path"),zso=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),_U=a("em"),Xso=o("pretrained_model_name_or_path"),Wso=o(":"),Vso=l(),ne=a("ul"),Cp=a("li"),vU=a("strong"),Qso=o("bart"),Hso=o(" \u2014 "),Q9=a("a"),Uso=o("BartForConditionalGeneration"),Jso=o(" (BART model)"),Kso=l(),yp=a("li"),bU=a("strong"),Yso=o("bigbird_pegasus"),Zso=o(" \u2014 "),H9=a("a"),elo=o("BigBirdPegasusForConditionalGeneration"),olo=o(" (BigBirdPegasus model)"),tlo=l(),wp=a("li"),TU=a("strong"),rlo=o("blenderbot"),alo=o(" \u2014 "),U9=a("a"),nlo=o("BlenderbotForConditionalGeneration"),slo=o(" (Blenderbot model)"),llo=l(),Ap=a("li"),FU=a("strong"),ilo=o("blenderbot-small"),dlo=o(" \u2014 "),J9=a("a"),mlo=o("BlenderbotSmallForConditionalGeneration"),flo=o(" (BlenderbotSmall model)"),clo=l(),xp=a("li"),MU=a("strong"),glo=o("encoder-decoder"),hlo=o(" \u2014 "),K9=a("a"),ulo=o("EncoderDecoderModel"),plo=o(" (Encoder decoder model)"),_lo=l(),Lp=a("li"),EU=a("strong"),vlo=o("fsmt"),blo=o(" \u2014 "),Y9=a("a"),Tlo=o("FSMTForConditionalGeneration"),Flo=o(" (FairSeq Machine-Translation model)"),Mlo=l(),Bp=a("li"),CU=a("strong"),Elo=o("led"),Clo=o(" \u2014 "),Z9=a("a"),ylo=o("LEDForConditionalGeneration"),wlo=o(" (LED model)"),Alo=l(),kp=a("li"),yU=a("strong"),xlo=o("m2m_100"),Llo=o(" \u2014 "),ek=a("a"),Blo=o("M2M100ForConditionalGeneration"),klo=o(" (M2M100 model)"),Rlo=l(),Rp=a("li"),wU=a("strong"),Slo=o("marian"),Plo=o(" \u2014 "),ok=a("a"),$lo=o("MarianMTModel"),Ilo=o(" (Marian model)"),jlo=l(),Sp=a("li"),AU=a("strong"),Nlo=o("mbart"),Dlo=o(" \u2014 "),tk=a("a"),Glo=o("MBartForConditionalGeneration"),Olo=o(" (mBART model)"),qlo=l(),Pp=a("li"),xU=a("strong"),zlo=o("mt5"),Xlo=o(" \u2014 "),rk=a("a"),Wlo=o("MT5ForConditionalGeneration"),Vlo=o(" (mT5 model)"),Qlo=l(),$p=a("li"),LU=a("strong"),Hlo=o("pegasus"),Ulo=o(" \u2014 "),ak=a("a"),Jlo=o("PegasusForConditionalGeneration"),Klo=o(" (Pegasus model)"),Ylo=l(),Ip=a("li"),BU=a("strong"),Zlo=o("prophetnet"),eio=o(" \u2014 "),nk=a("a"),oio=o("ProphetNetForConditionalGeneration"),tio=o(" (ProphetNet model)"),rio=l(),jp=a("li"),kU=a("strong"),aio=o("t5"),nio=o(" \u2014 "),sk=a("a"),sio=o("T5ForConditionalGeneration"),lio=o(" (T5 model)"),iio=l(),Np=a("li"),RU=a("strong"),dio=o("xlm-prophetnet"),mio=o(" \u2014 "),lk=a("a"),fio=o("XLMProphetNetForConditionalGeneration"),cio=o(" (XLMProphetNet model)"),gio=l(),Dp=a("p"),hio=o("The model is set in evaluation mode by default using "),SU=a("em"),uio=o("model.eval()"),pio=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),PU=a("em"),_io=o("model.train()"),vio=l(),$U=a("p"),bio=o("Examples:"),Tio=l(),f(XM.$$.fragment),qEe=l(),Fi=a("h2"),Gp=a("a"),IU=a("span"),f(WM.$$.fragment),Fio=l(),jU=a("span"),Mio=o("AutoModelForSequenceClassification"),zEe=l(),Do=a("div"),f(VM.$$.fragment),Eio=l(),Mi=a("p"),Cio=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence classification head) when created with the `),NU=a("code"),yio=o("from_pretrained()"),wio=o(` class method or the `),DU=a("code"),Aio=o("from_config()"),xio=o(" class method."),Lio=l(),QM=a("p"),Bio=o("This class cannot be instantiated directly using "),GU=a("code"),kio=o("__init__()"),Rio=o(" (throws an error)."),Sio=l(),kt=a("div"),f(HM.$$.fragment),Pio=l(),OU=a("p"),$io=o("Instantiates one of the model classes of the library (with a sequence classification head) from a configuration."),Iio=l(),Ei=a("p"),jio=o(`Note: Loading a model from its configuration file does `),qU=a("strong"),Nio=o("not"),Dio=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),zU=a("em"),Gio=o("~AutoModelForSequenceClassification.from_pretrained"),Oio=o(`] to load the model weights.`),qio=l(),XU=a("p"),zio=o("Examples:"),Xio=l(),f(UM.$$.fragment),Wio=l(),Se=a("div"),f(JM.$$.fragment),Vio=l(),WU=a("p"),Qio=o("Instantiate one of the model classes of the library (with a sequence classification head) from a pretrained model."),Hio=l(),xa=a("p"),Uio=o("The model class to instantiate is selected based on the "),VU=a("em"),Jio=o("model_type"),Kio=o(` property of the config object (either passed as an argument or loaded from `),QU=a("em"),Yio=o("pretrained_model_name_or_path"),Zio=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),HU=a("em"),edo=o("pretrained_model_name_or_path"),odo=o(":"),tdo=l(),A=a("ul"),Op=a("li"),UU=a("strong"),rdo=o("albert"),ado=o(" \u2014 "),ik=a("a"),ndo=o("AlbertForSequenceClassification"),sdo=o(" (ALBERT model)"),ldo=l(),qp=a("li"),JU=a("strong"),ido=o("bart"),ddo=o(" \u2014 "),dk=a("a"),mdo=o("BartForSequenceClassification"),fdo=o(" (BART model)"),cdo=l(),zp=a("li"),KU=a("strong"),gdo=o("bert"),hdo=o(" \u2014 "),mk=a("a"),udo=o("BertForSequenceClassification"),pdo=o(" (BERT model)"),_do=l(),Xp=a("li"),YU=a("strong"),vdo=o("big_bird"),bdo=o(" \u2014 "),fk=a("a"),Tdo=o("BigBirdForSequenceClassification"),Fdo=o(" (BigBird model)"),Mdo=l(),Wp=a("li"),ZU=a("strong"),Edo=o("bigbird_pegasus"),Cdo=o(" \u2014 "),ck=a("a"),ydo=o("BigBirdPegasusForSequenceClassification"),wdo=o(" (BigBirdPegasus model)"),Ado=l(),Vp=a("li"),eJ=a("strong"),xdo=o("camembert"),Ldo=o(" \u2014 "),gk=a("a"),Bdo=o("CamembertForSequenceClassification"),kdo=o(" (CamemBERT model)"),Rdo=l(),Qp=a("li"),oJ=a("strong"),Sdo=o("canine"),Pdo=o(" \u2014 "),hk=a("a"),$do=o("CanineForSequenceClassification"),Ido=o(" (Canine model)"),jdo=l(),Hp=a("li"),tJ=a("strong"),Ndo=o("convbert"),Ddo=o(" \u2014 "),uk=a("a"),Gdo=o("ConvBertForSequenceClassification"),Odo=o(" (ConvBERT model)"),qdo=l(),Up=a("li"),rJ=a("strong"),zdo=o("ctrl"),Xdo=o(" \u2014 "),pk=a("a"),Wdo=o("CTRLForSequenceClassification"),Vdo=o(" (CTRL model)"),Qdo=l(),Jp=a("li"),aJ=a("strong"),Hdo=o("deberta"),Udo=o(" \u2014 "),_k=a("a"),Jdo=o("DebertaForSequenceClassification"),Kdo=o(" (DeBERTa model)"),Ydo=l(),Kp=a("li"),nJ=a("strong"),Zdo=o("deberta-v2"),emo=o(" \u2014 "),vk=a("a"),omo=o("DebertaV2ForSequenceClassification"),tmo=o(" (DeBERTa-v2 model)"),rmo=l(),Yp=a("li"),sJ=a("strong"),amo=o("distilbert"),nmo=o(" \u2014 "),bk=a("a"),smo=o("DistilBertForSequenceClassification"),lmo=o(" (DistilBERT model)"),imo=l(),Zp=a("li"),lJ=a("strong"),dmo=o("electra"),mmo=o(" \u2014 "),Tk=a("a"),fmo=o("ElectraForSequenceClassification"),cmo=o(" (ELECTRA model)"),gmo=l(),e_=a("li"),iJ=a("strong"),hmo=o("flaubert"),umo=o(" \u2014 "),Fk=a("a"),pmo=o("FlaubertForSequenceClassification"),_mo=o(" (FlauBERT model)"),vmo=l(),o_=a("li"),dJ=a("strong"),bmo=o("fnet"),Tmo=o(" \u2014 "),Mk=a("a"),Fmo=o("FNetForSequenceClassification"),Mmo=o(" (FNet model)"),Emo=l(),t_=a("li"),mJ=a("strong"),Cmo=o("funnel"),ymo=o(" \u2014 "),Ek=a("a"),wmo=o("FunnelForSequenceClassification"),Amo=o(" (Funnel Transformer model)"),xmo=l(),r_=a("li"),fJ=a("strong"),Lmo=o("gpt2"),Bmo=o(" \u2014 "),Ck=a("a"),kmo=o("GPT2ForSequenceClassification"),Rmo=o(" (OpenAI GPT-2 model)"),Smo=l(),a_=a("li"),cJ=a("strong"),Pmo=o("gpt_neo"),$mo=o(" \u2014 "),yk=a("a"),Imo=o("GPTNeoForSequenceClassification"),jmo=o(" (GPT Neo model)"),Nmo=l(),n_=a("li"),gJ=a("strong"),Dmo=o("gptj"),Gmo=o(" \u2014 "),wk=a("a"),Omo=o("GPTJForSequenceClassification"),qmo=o(" (GPT-J model)"),zmo=l(),s_=a("li"),hJ=a("strong"),Xmo=o("ibert"),Wmo=o(" \u2014 "),Ak=a("a"),Vmo=o("IBertForSequenceClassification"),Qmo=o(" (I-BERT model)"),Hmo=l(),l_=a("li"),uJ=a("strong"),Umo=o("layoutlm"),Jmo=o(" \u2014 "),xk=a("a"),Kmo=o("LayoutLMForSequenceClassification"),Ymo=o(" (LayoutLM model)"),Zmo=l(),i_=a("li"),pJ=a("strong"),efo=o("layoutlmv2"),ofo=o(" \u2014 "),Lk=a("a"),tfo=o("LayoutLMv2ForSequenceClassification"),rfo=o(" (LayoutLMv2 model)"),afo=l(),d_=a("li"),_J=a("strong"),nfo=o("led"),sfo=o(" \u2014 "),Bk=a("a"),lfo=o("LEDForSequenceClassification"),ifo=o(" (LED model)"),dfo=l(),m_=a("li"),vJ=a("strong"),mfo=o("longformer"),ffo=o(" \u2014 "),kk=a("a"),cfo=o("LongformerForSequenceClassification"),gfo=o(" (Longformer model)"),hfo=l(),f_=a("li"),bJ=a("strong"),ufo=o("mbart"),pfo=o(" \u2014 "),Rk=a("a"),_fo=o("MBartForSequenceClassification"),vfo=o(" (mBART model)"),bfo=l(),c_=a("li"),TJ=a("strong"),Tfo=o("megatron-bert"),Ffo=o(" \u2014 "),Sk=a("a"),Mfo=o("MegatronBertForSequenceClassification"),Efo=o(" (MegatronBert model)"),Cfo=l(),g_=a("li"),FJ=a("strong"),yfo=o("mobilebert"),wfo=o(" \u2014 "),Pk=a("a"),Afo=o("MobileBertForSequenceClassification"),xfo=o(" (MobileBERT model)"),Lfo=l(),h_=a("li"),MJ=a("strong"),Bfo=o("mpnet"),kfo=o(" \u2014 "),$k=a("a"),Rfo=o("MPNetForSequenceClassification"),Sfo=o(" (MPNet model)"),Pfo=l(),u_=a("li"),EJ=a("strong"),$fo=o("openai-gpt"),Ifo=o(" \u2014 "),Ik=a("a"),jfo=o("OpenAIGPTForSequenceClassification"),Nfo=o(" (OpenAI GPT model)"),Dfo=l(),p_=a("li"),CJ=a("strong"),Gfo=o("perceiver"),Ofo=o(" \u2014 "),jk=a("a"),qfo=o("PerceiverForSequenceClassification"),zfo=o(" (Perceiver model)"),Xfo=l(),__=a("li"),yJ=a("strong"),Wfo=o("qdqbert"),Vfo=o(" \u2014 "),Nk=a("a"),Qfo=o("QDQBertForSequenceClassification"),Hfo=o(" (QDQBert model)"),Ufo=l(),v_=a("li"),wJ=a("strong"),Jfo=o("reformer"),Kfo=o(" \u2014 "),Dk=a("a"),Yfo=o("ReformerForSequenceClassification"),Zfo=o(" (Reformer model)"),eco=l(),b_=a("li"),AJ=a("strong"),oco=o("rembert"),tco=o(" \u2014 "),Gk=a("a"),rco=o("RemBertForSequenceClassification"),aco=o(" (RemBERT model)"),nco=l(),T_=a("li"),xJ=a("strong"),sco=o("roberta"),lco=o(" \u2014 "),Ok=a("a"),ico=o("RobertaForSequenceClassification"),dco=o(" (RoBERTa model)"),mco=l(),F_=a("li"),LJ=a("strong"),fco=o("roformer"),cco=o(" \u2014 "),qk=a("a"),gco=o("RoFormerForSequenceClassification"),hco=o(" (RoFormer model)"),uco=l(),M_=a("li"),BJ=a("strong"),pco=o("squeezebert"),_co=o(" \u2014 "),zk=a("a"),vco=o("SqueezeBertForSequenceClassification"),bco=o(" (SqueezeBERT model)"),Tco=l(),E_=a("li"),kJ=a("strong"),Fco=o("tapas"),Mco=o(" \u2014 "),Xk=a("a"),Eco=o("TapasForSequenceClassification"),Cco=o(" (TAPAS model)"),yco=l(),C_=a("li"),RJ=a("strong"),wco=o("transfo-xl"),Aco=o(" \u2014 "),Wk=a("a"),xco=o("TransfoXLForSequenceClassification"),Lco=o(" (Transformer-XL model)"),Bco=l(),y_=a("li"),SJ=a("strong"),kco=o("xlm"),Rco=o(" \u2014 "),Vk=a("a"),Sco=o("XLMForSequenceClassification"),Pco=o(" (XLM model)"),$co=l(),w_=a("li"),PJ=a("strong"),Ico=o("xlm-roberta"),jco=o(" \u2014 "),Qk=a("a"),Nco=o("XLMRobertaForSequenceClassification"),Dco=o(" (XLM-RoBERTa model)"),Gco=l(),A_=a("li"),$J=a("strong"),Oco=o("xlnet"),qco=o(" \u2014 "),Hk=a("a"),zco=o("XLNetForSequenceClassification"),Xco=o(" (XLNet model)"),Wco=l(),x_=a("p"),Vco=o("The model is set in evaluation mode by default using "),IJ=a("em"),Qco=o("model.eval()"),Hco=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),jJ=a("em"),Uco=o("model.train()"),Jco=l(),NJ=a("p"),Kco=o("Examples:"),Yco=l(),f(KM.$$.fragment),XEe=l(),Ci=a("h2"),L_=a("a"),DJ=a("span"),f(YM.$$.fragment),Zco=l(),GJ=a("span"),ego=o("AutoModelForMultipleChoice"),WEe=l(),Go=a("div"),f(ZM.$$.fragment),ogo=l(),yi=a("p"),tgo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a multiple choice head) when created with the `),OJ=a("code"),rgo=o("from_pretrained()"),ago=o(` class method or the `),qJ=a("code"),ngo=o("from_config()"),sgo=o(" class method."),lgo=l(),eE=a("p"),igo=o("This class cannot be instantiated directly using "),zJ=a("code"),dgo=o("__init__()"),mgo=o(" (throws an error)."),fgo=l(),Rt=a("div"),f(oE.$$.fragment),cgo=l(),XJ=a("p"),ggo=o("Instantiates one of the model classes of the library (with a multiple choice head) from a configuration."),hgo=l(),wi=a("p"),ugo=o(`Note: Loading a model from its configuration file does `),WJ=a("strong"),pgo=o("not"),_go=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),VJ=a("em"),vgo=o("~AutoModelForMultipleChoice.from_pretrained"),bgo=o(`] to load the model weights.`),Tgo=l(),QJ=a("p"),Fgo=o("Examples:"),Mgo=l(),f(tE.$$.fragment),Ego=l(),Pe=a("div"),f(rE.$$.fragment),Cgo=l(),HJ=a("p"),ygo=o("Instantiate one of the model classes of the library (with a multiple choice head) from a pretrained model."),wgo=l(),La=a("p"),Ago=o("The model class to instantiate is selected based on the "),UJ=a("em"),xgo=o("model_type"),Lgo=o(` property of the config object (either passed as an argument or loaded from `),JJ=a("em"),Bgo=o("pretrained_model_name_or_path"),kgo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),KJ=a("em"),Rgo=o("pretrained_model_name_or_path"),Sgo=o(":"),Pgo=l(),q=a("ul"),B_=a("li"),YJ=a("strong"),$go=o("albert"),Igo=o(" \u2014 "),Uk=a("a"),jgo=o("AlbertForMultipleChoice"),Ngo=o(" (ALBERT model)"),Dgo=l(),k_=a("li"),ZJ=a("strong"),Ggo=o("bert"),Ogo=o(" \u2014 "),Jk=a("a"),qgo=o("BertForMultipleChoice"),zgo=o(" (BERT model)"),Xgo=l(),R_=a("li"),eK=a("strong"),Wgo=o("big_bird"),Vgo=o(" \u2014 "),Kk=a("a"),Qgo=o("BigBirdForMultipleChoice"),Hgo=o(" (BigBird model)"),Ugo=l(),S_=a("li"),oK=a("strong"),Jgo=o("camembert"),Kgo=o(" \u2014 "),Yk=a("a"),Ygo=o("CamembertForMultipleChoice"),Zgo=o(" (CamemBERT model)"),eho=l(),P_=a("li"),tK=a("strong"),oho=o("canine"),tho=o(" \u2014 "),Zk=a("a"),rho=o("CanineForMultipleChoice"),aho=o(" (Canine model)"),nho=l(),$_=a("li"),rK=a("strong"),sho=o("convbert"),lho=o(" \u2014 "),eR=a("a"),iho=o("ConvBertForMultipleChoice"),dho=o(" (ConvBERT model)"),mho=l(),I_=a("li"),aK=a("strong"),fho=o("distilbert"),cho=o(" \u2014 "),oR=a("a"),gho=o("DistilBertForMultipleChoice"),hho=o(" (DistilBERT model)"),uho=l(),j_=a("li"),nK=a("strong"),pho=o("electra"),_ho=o(" \u2014 "),tR=a("a"),vho=o("ElectraForMultipleChoice"),bho=o(" (ELECTRA model)"),Tho=l(),N_=a("li"),sK=a("strong"),Fho=o("flaubert"),Mho=o(" \u2014 "),rR=a("a"),Eho=o("FlaubertForMultipleChoice"),Cho=o(" (FlauBERT model)"),yho=l(),D_=a("li"),lK=a("strong"),who=o("fnet"),Aho=o(" \u2014 "),aR=a("a"),xho=o("FNetForMultipleChoice"),Lho=o(" (FNet model)"),Bho=l(),G_=a("li"),iK=a("strong"),kho=o("funnel"),Rho=o(" \u2014 "),nR=a("a"),Sho=o("FunnelForMultipleChoice"),Pho=o(" (Funnel Transformer model)"),$ho=l(),O_=a("li"),dK=a("strong"),Iho=o("ibert"),jho=o(" \u2014 "),sR=a("a"),Nho=o("IBertForMultipleChoice"),Dho=o(" (I-BERT model)"),Gho=l(),q_=a("li"),mK=a("strong"),Oho=o("longformer"),qho=o(" \u2014 "),lR=a("a"),zho=o("LongformerForMultipleChoice"),Xho=o(" (Longformer model)"),Who=l(),z_=a("li"),fK=a("strong"),Vho=o("megatron-bert"),Qho=o(" \u2014 "),iR=a("a"),Hho=o("MegatronBertForMultipleChoice"),Uho=o(" (MegatronBert model)"),Jho=l(),X_=a("li"),cK=a("strong"),Kho=o("mobilebert"),Yho=o(" \u2014 "),dR=a("a"),Zho=o("MobileBertForMultipleChoice"),euo=o(" (MobileBERT model)"),ouo=l(),W_=a("li"),gK=a("strong"),tuo=o("mpnet"),ruo=o(" \u2014 "),mR=a("a"),auo=o("MPNetForMultipleChoice"),nuo=o(" (MPNet model)"),suo=l(),V_=a("li"),hK=a("strong"),luo=o("qdqbert"),iuo=o(" \u2014 "),fR=a("a"),duo=o("QDQBertForMultipleChoice"),muo=o(" (QDQBert model)"),fuo=l(),Q_=a("li"),uK=a("strong"),cuo=o("rembert"),guo=o(" \u2014 "),cR=a("a"),huo=o("RemBertForMultipleChoice"),uuo=o(" (RemBERT model)"),puo=l(),H_=a("li"),pK=a("strong"),_uo=o("roberta"),vuo=o(" \u2014 "),gR=a("a"),buo=o("RobertaForMultipleChoice"),Tuo=o(" (RoBERTa model)"),Fuo=l(),U_=a("li"),_K=a("strong"),Muo=o("roformer"),Euo=o(" \u2014 "),hR=a("a"),Cuo=o("RoFormerForMultipleChoice"),yuo=o(" (RoFormer model)"),wuo=l(),J_=a("li"),vK=a("strong"),Auo=o("squeezebert"),xuo=o(" \u2014 "),uR=a("a"),Luo=o("SqueezeBertForMultipleChoice"),Buo=o(" (SqueezeBERT model)"),kuo=l(),K_=a("li"),bK=a("strong"),Ruo=o("xlm"),Suo=o(" \u2014 "),pR=a("a"),Puo=o("XLMForMultipleChoice"),$uo=o(" (XLM model)"),Iuo=l(),Y_=a("li"),TK=a("strong"),juo=o("xlm-roberta"),Nuo=o(" \u2014 "),_R=a("a"),Duo=o("XLMRobertaForMultipleChoice"),Guo=o(" (XLM-RoBERTa model)"),Ouo=l(),Z_=a("li"),FK=a("strong"),quo=o("xlnet"),zuo=o(" \u2014 "),vR=a("a"),Xuo=o("XLNetForMultipleChoice"),Wuo=o(" (XLNet model)"),Vuo=l(),ev=a("p"),Quo=o("The model is set in evaluation mode by default using "),MK=a("em"),Huo=o("model.eval()"),Uuo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),EK=a("em"),Juo=o("model.train()"),Kuo=l(),CK=a("p"),Yuo=o("Examples:"),Zuo=l(),f(aE.$$.fragment),VEe=l(),Ai=a("h2"),ov=a("a"),yK=a("span"),f(nE.$$.fragment),epo=l(),wK=a("span"),opo=o("AutoModelForNextSentencePrediction"),QEe=l(),Oo=a("div"),f(sE.$$.fragment),tpo=l(),xi=a("p"),rpo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a next sentence prediction head) when created with the `),AK=a("code"),apo=o("from_pretrained()"),npo=o(` class method or the `),xK=a("code"),spo=o("from_config()"),lpo=o(" class method."),ipo=l(),lE=a("p"),dpo=o("This class cannot be instantiated directly using "),LK=a("code"),mpo=o("__init__()"),fpo=o(" (throws an error)."),cpo=l(),St=a("div"),f(iE.$$.fragment),gpo=l(),BK=a("p"),hpo=o("Instantiates one of the model classes of the library (with a next sentence prediction head) from a configuration."),upo=l(),Li=a("p"),ppo=o(`Note: Loading a model from its configuration file does `),kK=a("strong"),_po=o("not"),vpo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),RK=a("em"),bpo=o("~AutoModelForNextSentencePrediction.from_pretrained"),Tpo=o(`] to load the model weights.`),Fpo=l(),SK=a("p"),Mpo=o("Examples:"),Epo=l(),f(dE.$$.fragment),Cpo=l(),$e=a("div"),f(mE.$$.fragment),ypo=l(),PK=a("p"),wpo=o("Instantiate one of the model classes of the library (with a next sentence prediction head) from a pretrained model."),Apo=l(),Ba=a("p"),xpo=o("The model class to instantiate is selected based on the "),$K=a("em"),Lpo=o("model_type"),Bpo=o(` property of the config object (either passed as an argument or loaded from `),IK=a("em"),kpo=o("pretrained_model_name_or_path"),Rpo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),jK=a("em"),Spo=o("pretrained_model_name_or_path"),Ppo=o(":"),$po=l(),qr=a("ul"),tv=a("li"),NK=a("strong"),Ipo=o("bert"),jpo=o(" \u2014 "),bR=a("a"),Npo=o("BertForNextSentencePrediction"),Dpo=o(" (BERT model)"),Gpo=l(),rv=a("li"),DK=a("strong"),Opo=o("fnet"),qpo=o(" \u2014 "),TR=a("a"),zpo=o("FNetForNextSentencePrediction"),Xpo=o(" (FNet model)"),Wpo=l(),av=a("li"),GK=a("strong"),Vpo=o("megatron-bert"),Qpo=o(" \u2014 "),FR=a("a"),Hpo=o("MegatronBertForNextSentencePrediction"),Upo=o(" (MegatronBert model)"),Jpo=l(),nv=a("li"),OK=a("strong"),Kpo=o("mobilebert"),Ypo=o(" \u2014 "),MR=a("a"),Zpo=o("MobileBertForNextSentencePrediction"),e_o=o(" (MobileBERT model)"),o_o=l(),sv=a("li"),qK=a("strong"),t_o=o("qdqbert"),r_o=o(" \u2014 "),ER=a("a"),a_o=o("QDQBertForNextSentencePrediction"),n_o=o(" (QDQBert model)"),s_o=l(),lv=a("p"),l_o=o("The model is set in evaluation mode by default using "),zK=a("em"),i_o=o("model.eval()"),d_o=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),XK=a("em"),m_o=o("model.train()"),f_o=l(),WK=a("p"),c_o=o("Examples:"),g_o=l(),f(fE.$$.fragment),HEe=l(),Bi=a("h2"),iv=a("a"),VK=a("span"),f(cE.$$.fragment),h_o=l(),QK=a("span"),u_o=o("AutoModelForTokenClassification"),UEe=l(),qo=a("div"),f(gE.$$.fragment),p_o=l(),ki=a("p"),__o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a token classification head) when created with the `),HK=a("code"),v_o=o("from_pretrained()"),b_o=o(` class method or the `),UK=a("code"),T_o=o("from_config()"),F_o=o(" class method."),M_o=l(),hE=a("p"),E_o=o("This class cannot be instantiated directly using "),JK=a("code"),C_o=o("__init__()"),y_o=o(" (throws an error)."),w_o=l(),Pt=a("div"),f(uE.$$.fragment),A_o=l(),KK=a("p"),x_o=o("Instantiates one of the model classes of the library (with a token classification head) from a configuration."),L_o=l(),Ri=a("p"),B_o=o(`Note: Loading a model from its configuration file does `),YK=a("strong"),k_o=o("not"),R_o=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),ZK=a("em"),S_o=o("~AutoModelForTokenClassification.from_pretrained"),P_o=o(`] to load the model weights.`),$_o=l(),eY=a("p"),I_o=o("Examples:"),j_o=l(),f(pE.$$.fragment),N_o=l(),Ie=a("div"),f(_E.$$.fragment),D_o=l(),oY=a("p"),G_o=o("Instantiate one of the model classes of the library (with a token classification head) from a pretrained model."),O_o=l(),ka=a("p"),q_o=o("The model class to instantiate is selected based on the "),tY=a("em"),z_o=o("model_type"),X_o=o(` property of the config object (either passed as an argument or loaded from `),rY=a("em"),W_o=o("pretrained_model_name_or_path"),V_o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),aY=a("em"),Q_o=o("pretrained_model_name_or_path"),H_o=o(":"),U_o=l(),N=a("ul"),dv=a("li"),nY=a("strong"),J_o=o("albert"),K_o=o(" \u2014 "),CR=a("a"),Y_o=o("AlbertForTokenClassification"),Z_o=o(" (ALBERT model)"),evo=l(),mv=a("li"),sY=a("strong"),ovo=o("bert"),tvo=o(" \u2014 "),yR=a("a"),rvo=o("BertForTokenClassification"),avo=o(" (BERT model)"),nvo=l(),fv=a("li"),lY=a("strong"),svo=o("big_bird"),lvo=o(" \u2014 "),wR=a("a"),ivo=o("BigBirdForTokenClassification"),dvo=o(" (BigBird model)"),mvo=l(),cv=a("li"),iY=a("strong"),fvo=o("camembert"),cvo=o(" \u2014 "),AR=a("a"),gvo=o("CamembertForTokenClassification"),hvo=o(" (CamemBERT model)"),uvo=l(),gv=a("li"),dY=a("strong"),pvo=o("canine"),_vo=o(" \u2014 "),xR=a("a"),vvo=o("CanineForTokenClassification"),bvo=o(" (Canine model)"),Tvo=l(),hv=a("li"),mY=a("strong"),Fvo=o("convbert"),Mvo=o(" \u2014 "),LR=a("a"),Evo=o("ConvBertForTokenClassification"),Cvo=o(" (ConvBERT model)"),yvo=l(),uv=a("li"),fY=a("strong"),wvo=o("deberta"),Avo=o(" \u2014 "),BR=a("a"),xvo=o("DebertaForTokenClassification"),Lvo=o(" (DeBERTa model)"),Bvo=l(),pv=a("li"),cY=a("strong"),kvo=o("deberta-v2"),Rvo=o(" \u2014 "),kR=a("a"),Svo=o("DebertaV2ForTokenClassification"),Pvo=o(" (DeBERTa-v2 model)"),$vo=l(),_v=a("li"),gY=a("strong"),Ivo=o("distilbert"),jvo=o(" \u2014 "),RR=a("a"),Nvo=o("DistilBertForTokenClassification"),Dvo=o(" (DistilBERT model)"),Gvo=l(),vv=a("li"),hY=a("strong"),Ovo=o("electra"),qvo=o(" \u2014 "),SR=a("a"),zvo=o("ElectraForTokenClassification"),Xvo=o(" (ELECTRA model)"),Wvo=l(),bv=a("li"),uY=a("strong"),Vvo=o("flaubert"),Qvo=o(" \u2014 "),PR=a("a"),Hvo=o("FlaubertForTokenClassification"),Uvo=o(" (FlauBERT model)"),Jvo=l(),Tv=a("li"),pY=a("strong"),Kvo=o("fnet"),Yvo=o(" \u2014 "),$R=a("a"),Zvo=o("FNetForTokenClassification"),e1o=o(" (FNet model)"),o1o=l(),Fv=a("li"),_Y=a("strong"),t1o=o("funnel"),r1o=o(" \u2014 "),IR=a("a"),a1o=o("FunnelForTokenClassification"),n1o=o(" (Funnel Transformer model)"),s1o=l(),Mv=a("li"),vY=a("strong"),l1o=o("gpt2"),i1o=o(" \u2014 "),jR=a("a"),d1o=o("GPT2ForTokenClassification"),m1o=o(" (OpenAI GPT-2 model)"),f1o=l(),Ev=a("li"),bY=a("strong"),c1o=o("ibert"),g1o=o(" \u2014 "),NR=a("a"),h1o=o("IBertForTokenClassification"),u1o=o(" (I-BERT model)"),p1o=l(),Cv=a("li"),TY=a("strong"),_1o=o("layoutlm"),v1o=o(" \u2014 "),DR=a("a"),b1o=o("LayoutLMForTokenClassification"),T1o=o(" (LayoutLM model)"),F1o=l(),yv=a("li"),FY=a("strong"),M1o=o("layoutlmv2"),E1o=o(" \u2014 "),GR=a("a"),C1o=o("LayoutLMv2ForTokenClassification"),y1o=o(" (LayoutLMv2 model)"),w1o=l(),wv=a("li"),MY=a("strong"),A1o=o("longformer"),x1o=o(" \u2014 "),OR=a("a"),L1o=o("LongformerForTokenClassification"),B1o=o(" (Longformer model)"),k1o=l(),Av=a("li"),EY=a("strong"),R1o=o("megatron-bert"),S1o=o(" \u2014 "),qR=a("a"),P1o=o("MegatronBertForTokenClassification"),$1o=o(" (MegatronBert model)"),I1o=l(),xv=a("li"),CY=a("strong"),j1o=o("mobilebert"),N1o=o(" \u2014 "),zR=a("a"),D1o=o("MobileBertForTokenClassification"),G1o=o(" (MobileBERT model)"),O1o=l(),Lv=a("li"),yY=a("strong"),q1o=o("mpnet"),z1o=o(" \u2014 "),XR=a("a"),X1o=o("MPNetForTokenClassification"),W1o=o(" (MPNet model)"),V1o=l(),Bv=a("li"),wY=a("strong"),Q1o=o("qdqbert"),H1o=o(" \u2014 "),WR=a("a"),U1o=o("QDQBertForTokenClassification"),J1o=o(" (QDQBert model)"),K1o=l(),kv=a("li"),AY=a("strong"),Y1o=o("rembert"),Z1o=o(" \u2014 "),VR=a("a"),e2o=o("RemBertForTokenClassification"),o2o=o(" (RemBERT model)"),t2o=l(),Rv=a("li"),xY=a("strong"),r2o=o("roberta"),a2o=o(" \u2014 "),QR=a("a"),n2o=o("RobertaForTokenClassification"),s2o=o(" (RoBERTa model)"),l2o=l(),Sv=a("li"),LY=a("strong"),i2o=o("roformer"),d2o=o(" \u2014 "),HR=a("a"),m2o=o("RoFormerForTokenClassification"),f2o=o(" (RoFormer model)"),c2o=l(),Pv=a("li"),BY=a("strong"),g2o=o("squeezebert"),h2o=o(" \u2014 "),UR=a("a"),u2o=o("SqueezeBertForTokenClassification"),p2o=o(" (SqueezeBERT model)"),_2o=l(),$v=a("li"),kY=a("strong"),v2o=o("xlm"),b2o=o(" \u2014 "),JR=a("a"),T2o=o("XLMForTokenClassification"),F2o=o(" (XLM model)"),M2o=l(),Iv=a("li"),RY=a("strong"),E2o=o("xlm-roberta"),C2o=o(" \u2014 "),KR=a("a"),y2o=o("XLMRobertaForTokenClassification"),w2o=o(" (XLM-RoBERTa model)"),A2o=l(),jv=a("li"),SY=a("strong"),x2o=o("xlnet"),L2o=o(" \u2014 "),YR=a("a"),B2o=o("XLNetForTokenClassification"),k2o=o(" (XLNet model)"),R2o=l(),Nv=a("p"),S2o=o("The model is set in evaluation mode by default using "),PY=a("em"),P2o=o("model.eval()"),$2o=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),$Y=a("em"),I2o=o("model.train()"),j2o=l(),IY=a("p"),N2o=o("Examples:"),D2o=l(),f(vE.$$.fragment),JEe=l(),Si=a("h2"),Dv=a("a"),jY=a("span"),f(bE.$$.fragment),G2o=l(),NY=a("span"),O2o=o("AutoModelForQuestionAnswering"),KEe=l(),zo=a("div"),f(TE.$$.fragment),q2o=l(),Pi=a("p"),z2o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a question answering head) when created with the `),DY=a("code"),X2o=o("from_pretrained()"),W2o=o(` class method or the `),GY=a("code"),V2o=o("from_config()"),Q2o=o(" class method."),H2o=l(),FE=a("p"),U2o=o("This class cannot be instantiated directly using "),OY=a("code"),J2o=o("__init__()"),K2o=o(" (throws an error)."),Y2o=l(),$t=a("div"),f(ME.$$.fragment),Z2o=l(),qY=a("p"),ebo=o("Instantiates one of the model classes of the library (with a question answering head) from a configuration."),obo=l(),$i=a("p"),tbo=o(`Note: Loading a model from its configuration file does `),zY=a("strong"),rbo=o("not"),abo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),XY=a("em"),nbo=o("~AutoModelForQuestionAnswering.from_pretrained"),sbo=o(`] to load the model weights.`),lbo=l(),WY=a("p"),ibo=o("Examples:"),dbo=l(),f(EE.$$.fragment),mbo=l(),je=a("div"),f(CE.$$.fragment),fbo=l(),VY=a("p"),cbo=o("Instantiate one of the model classes of the library (with a question answering head) from a pretrained model."),gbo=l(),Ra=a("p"),hbo=o("The model class to instantiate is selected based on the "),QY=a("em"),ubo=o("model_type"),pbo=o(` property of the config object (either passed as an argument or loaded from `),HY=a("em"),_bo=o("pretrained_model_name_or_path"),vbo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),UY=a("em"),bbo=o("pretrained_model_name_or_path"),Tbo=o(":"),Fbo=l(),R=a("ul"),Gv=a("li"),JY=a("strong"),Mbo=o("albert"),Ebo=o(" \u2014 "),ZR=a("a"),Cbo=o("AlbertForQuestionAnswering"),ybo=o(" (ALBERT model)"),wbo=l(),Ov=a("li"),KY=a("strong"),Abo=o("bart"),xbo=o(" \u2014 "),eS=a("a"),Lbo=o("BartForQuestionAnswering"),Bbo=o(" (BART model)"),kbo=l(),qv=a("li"),YY=a("strong"),Rbo=o("bert"),Sbo=o(" \u2014 "),oS=a("a"),Pbo=o("BertForQuestionAnswering"),$bo=o(" (BERT model)"),Ibo=l(),zv=a("li"),ZY=a("strong"),jbo=o("big_bird"),Nbo=o(" \u2014 "),tS=a("a"),Dbo=o("BigBirdForQuestionAnswering"),Gbo=o(" (BigBird model)"),Obo=l(),Xv=a("li"),eZ=a("strong"),qbo=o("bigbird_pegasus"),zbo=o(" \u2014 "),rS=a("a"),Xbo=o("BigBirdPegasusForQuestionAnswering"),Wbo=o(" (BigBirdPegasus model)"),Vbo=l(),Wv=a("li"),oZ=a("strong"),Qbo=o("camembert"),Hbo=o(" \u2014 "),aS=a("a"),Ubo=o("CamembertForQuestionAnswering"),Jbo=o(" (CamemBERT model)"),Kbo=l(),Vv=a("li"),tZ=a("strong"),Ybo=o("canine"),Zbo=o(" \u2014 "),nS=a("a"),e4o=o("CanineForQuestionAnswering"),o4o=o(" (Canine model)"),t4o=l(),Qv=a("li"),rZ=a("strong"),r4o=o("convbert"),a4o=o(" \u2014 "),sS=a("a"),n4o=o("ConvBertForQuestionAnswering"),s4o=o(" (ConvBERT model)"),l4o=l(),Hv=a("li"),aZ=a("strong"),i4o=o("deberta"),d4o=o(" \u2014 "),lS=a("a"),m4o=o("DebertaForQuestionAnswering"),f4o=o(" (DeBERTa model)"),c4o=l(),Uv=a("li"),nZ=a("strong"),g4o=o("deberta-v2"),h4o=o(" \u2014 "),iS=a("a"),u4o=o("DebertaV2ForQuestionAnswering"),p4o=o(" (DeBERTa-v2 model)"),_4o=l(),Jv=a("li"),sZ=a("strong"),v4o=o("distilbert"),b4o=o(" \u2014 "),dS=a("a"),T4o=o("DistilBertForQuestionAnswering"),F4o=o(" (DistilBERT model)"),M4o=l(),Kv=a("li"),lZ=a("strong"),E4o=o("electra"),C4o=o(" \u2014 "),mS=a("a"),y4o=o("ElectraForQuestionAnswering"),w4o=o(" (ELECTRA model)"),A4o=l(),Yv=a("li"),iZ=a("strong"),x4o=o("flaubert"),L4o=o(" \u2014 "),fS=a("a"),B4o=o("FlaubertForQuestionAnsweringSimple"),k4o=o(" (FlauBERT model)"),R4o=l(),Zv=a("li"),dZ=a("strong"),S4o=o("fnet"),P4o=o(" \u2014 "),cS=a("a"),$4o=o("FNetForQuestionAnswering"),I4o=o(" (FNet model)"),j4o=l(),e1=a("li"),mZ=a("strong"),N4o=o("funnel"),D4o=o(" \u2014 "),gS=a("a"),G4o=o("FunnelForQuestionAnswering"),O4o=o(" (Funnel Transformer model)"),q4o=l(),o1=a("li"),fZ=a("strong"),z4o=o("gptj"),X4o=o(" \u2014 "),hS=a("a"),W4o=o("GPTJForQuestionAnswering"),V4o=o(" (GPT-J model)"),Q4o=l(),t1=a("li"),cZ=a("strong"),H4o=o("ibert"),U4o=o(" \u2014 "),uS=a("a"),J4o=o("IBertForQuestionAnswering"),K4o=o(" (I-BERT model)"),Y4o=l(),r1=a("li"),gZ=a("strong"),Z4o=o("layoutlmv2"),e5o=o(" \u2014 "),pS=a("a"),o5o=o("LayoutLMv2ForQuestionAnswering"),t5o=o(" (LayoutLMv2 model)"),r5o=l(),a1=a("li"),hZ=a("strong"),a5o=o("led"),n5o=o(" \u2014 "),_S=a("a"),s5o=o("LEDForQuestionAnswering"),l5o=o(" (LED model)"),i5o=l(),n1=a("li"),uZ=a("strong"),d5o=o("longformer"),m5o=o(" \u2014 "),vS=a("a"),f5o=o("LongformerForQuestionAnswering"),c5o=o(" (Longformer model)"),g5o=l(),s1=a("li"),pZ=a("strong"),h5o=o("lxmert"),u5o=o(" \u2014 "),bS=a("a"),p5o=o("LxmertForQuestionAnswering"),_5o=o(" (LXMERT model)"),v5o=l(),l1=a("li"),_Z=a("strong"),b5o=o("mbart"),T5o=o(" \u2014 "),TS=a("a"),F5o=o("MBartForQuestionAnswering"),M5o=o(" (mBART model)"),E5o=l(),i1=a("li"),vZ=a("strong"),C5o=o("megatron-bert"),y5o=o(" \u2014 "),FS=a("a"),w5o=o("MegatronBertForQuestionAnswering"),A5o=o(" (MegatronBert model)"),x5o=l(),d1=a("li"),bZ=a("strong"),L5o=o("mobilebert"),B5o=o(" \u2014 "),MS=a("a"),k5o=o("MobileBertForQuestionAnswering"),R5o=o(" (MobileBERT model)"),S5o=l(),m1=a("li"),TZ=a("strong"),P5o=o("mpnet"),$5o=o(" \u2014 "),ES=a("a"),I5o=o("MPNetForQuestionAnswering"),j5o=o(" (MPNet model)"),N5o=l(),f1=a("li"),FZ=a("strong"),D5o=o("qdqbert"),G5o=o(" \u2014 "),CS=a("a"),O5o=o("QDQBertForQuestionAnswering"),q5o=o(" (QDQBert model)"),z5o=l(),c1=a("li"),MZ=a("strong"),X5o=o("reformer"),W5o=o(" \u2014 "),yS=a("a"),V5o=o("ReformerForQuestionAnswering"),Q5o=o(" (Reformer model)"),H5o=l(),g1=a("li"),EZ=a("strong"),U5o=o("rembert"),J5o=o(" \u2014 "),wS=a("a"),K5o=o("RemBertForQuestionAnswering"),Y5o=o(" (RemBERT model)"),Z5o=l(),h1=a("li"),CZ=a("strong"),e0o=o("roberta"),o0o=o(" \u2014 "),AS=a("a"),t0o=o("RobertaForQuestionAnswering"),r0o=o(" (RoBERTa model)"),a0o=l(),u1=a("li"),yZ=a("strong"),n0o=o("roformer"),s0o=o(" \u2014 "),xS=a("a"),l0o=o("RoFormerForQuestionAnswering"),i0o=o(" (RoFormer model)"),d0o=l(),p1=a("li"),wZ=a("strong"),m0o=o("splinter"),f0o=o(" \u2014 "),LS=a("a"),c0o=o("SplinterForQuestionAnswering"),g0o=o(" (Splinter model)"),h0o=l(),_1=a("li"),AZ=a("strong"),u0o=o("squeezebert"),p0o=o(" \u2014 "),BS=a("a"),_0o=o("SqueezeBertForQuestionAnswering"),v0o=o(" (SqueezeBERT model)"),b0o=l(),v1=a("li"),xZ=a("strong"),T0o=o("xlm"),F0o=o(" \u2014 "),kS=a("a"),M0o=o("XLMForQuestionAnsweringSimple"),E0o=o(" (XLM model)"),C0o=l(),b1=a("li"),LZ=a("strong"),y0o=o("xlm-roberta"),w0o=o(" \u2014 "),RS=a("a"),A0o=o("XLMRobertaForQuestionAnswering"),x0o=o(" (XLM-RoBERTa model)"),L0o=l(),T1=a("li"),BZ=a("strong"),B0o=o("xlnet"),k0o=o(" \u2014 "),SS=a("a"),R0o=o("XLNetForQuestionAnsweringSimple"),S0o=o(" (XLNet model)"),P0o=l(),F1=a("p"),$0o=o("The model is set in evaluation mode by default using "),kZ=a("em"),I0o=o("model.eval()"),j0o=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),RZ=a("em"),N0o=o("model.train()"),D0o=l(),SZ=a("p"),G0o=o("Examples:"),O0o=l(),f(yE.$$.fragment),YEe=l(),Ii=a("h2"),M1=a("a"),PZ=a("span"),f(wE.$$.fragment),q0o=l(),$Z=a("span"),z0o=o("AutoModelForTableQuestionAnswering"),ZEe=l(),Xo=a("div"),f(AE.$$.fragment),X0o=l(),ji=a("p"),W0o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a table question answering head) when created with the `),IZ=a("code"),V0o=o("from_pretrained()"),Q0o=o(` class method or the `),jZ=a("code"),H0o=o("from_config()"),U0o=o(" class method."),J0o=l(),xE=a("p"),K0o=o("This class cannot be instantiated directly using "),NZ=a("code"),Y0o=o("__init__()"),Z0o=o(" (throws an error)."),eTo=l(),It=a("div"),f(LE.$$.fragment),oTo=l(),DZ=a("p"),tTo=o("Instantiates one of the model classes of the library (with a table question answering head) from a configuration."),rTo=l(),Ni=a("p"),aTo=o(`Note: Loading a model from its configuration file does `),GZ=a("strong"),nTo=o("not"),sTo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),OZ=a("em"),lTo=o("~AutoModelForTableQuestionAnswering.from_pretrained"),iTo=o(`] to load the model weights.`),dTo=l(),qZ=a("p"),mTo=o("Examples:"),fTo=l(),f(BE.$$.fragment),cTo=l(),Ne=a("div"),f(kE.$$.fragment),gTo=l(),zZ=a("p"),hTo=o("Instantiate one of the model classes of the library (with a table question answering head) from a pretrained model."),uTo=l(),Sa=a("p"),pTo=o("The model class to instantiate is selected based on the "),XZ=a("em"),_To=o("model_type"),vTo=o(` property of the config object (either passed as an argument or loaded from `),WZ=a("em"),bTo=o("pretrained_model_name_or_path"),TTo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),VZ=a("em"),FTo=o("pretrained_model_name_or_path"),MTo=o(":"),ETo=l(),QZ=a("ul"),E1=a("li"),HZ=a("strong"),CTo=o("tapas"),yTo=o(" \u2014 "),PS=a("a"),wTo=o("TapasForQuestionAnswering"),ATo=o(" (TAPAS model)"),xTo=l(),C1=a("p"),LTo=o("The model is set in evaluation mode by default using "),UZ=a("em"),BTo=o("model.eval()"),kTo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),JZ=a("em"),RTo=o("model.train()"),STo=l(),KZ=a("p"),PTo=o("Examples:"),$To=l(),f(RE.$$.fragment),eCe=l(),Di=a("h2"),y1=a("a"),YZ=a("span"),f(SE.$$.fragment),ITo=l(),ZZ=a("span"),jTo=o("AutoModelForImageClassification"),oCe=l(),Wo=a("div"),f(PE.$$.fragment),NTo=l(),Gi=a("p"),DTo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a image classification head) when created with the `),eee=a("code"),GTo=o("from_pretrained()"),OTo=o(` class method or the `),oee=a("code"),qTo=o("from_config()"),zTo=o(" class method."),XTo=l(),$E=a("p"),WTo=o("This class cannot be instantiated directly using "),tee=a("code"),VTo=o("__init__()"),QTo=o(" (throws an error)."),HTo=l(),jt=a("div"),f(IE.$$.fragment),UTo=l(),ree=a("p"),JTo=o("Instantiates one of the model classes of the library (with a image classification head) from a configuration."),KTo=l(),Oi=a("p"),YTo=o(`Note: Loading a model from its configuration file does `),aee=a("strong"),ZTo=o("not"),eFo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),nee=a("em"),oFo=o("~AutoModelForImageClassification.from_pretrained"),tFo=o(`] to load the model weights.`),rFo=l(),see=a("p"),aFo=o("Examples:"),nFo=l(),f(jE.$$.fragment),sFo=l(),De=a("div"),f(NE.$$.fragment),lFo=l(),lee=a("p"),iFo=o("Instantiate one of the model classes of the library (with a image classification head) from a pretrained model."),dFo=l(),Pa=a("p"),mFo=o("The model class to instantiate is selected based on the "),iee=a("em"),fFo=o("model_type"),cFo=o(` property of the config object (either passed as an argument or loaded from `),dee=a("em"),gFo=o("pretrained_model_name_or_path"),hFo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),mee=a("em"),uFo=o("pretrained_model_name_or_path"),pFo=o(":"),_Fo=l(),Vo=a("ul"),w1=a("li"),fee=a("strong"),vFo=o("beit"),bFo=o(" \u2014 "),$S=a("a"),TFo=o("BeitForImageClassification"),FFo=o(" (BEiT model)"),MFo=l(),is=a("li"),cee=a("strong"),EFo=o("deit"),CFo=o(" \u2014 "),IS=a("a"),yFo=o("DeiTForImageClassification"),wFo=o(" or "),jS=a("a"),AFo=o("DeiTForImageClassificationWithTeacher"),xFo=o(" (DeiT model)"),LFo=l(),A1=a("li"),gee=a("strong"),BFo=o("imagegpt"),kFo=o(" \u2014 "),NS=a("a"),RFo=o("ImageGPTForImageClassification"),SFo=o(" (ImageGPT model)"),PFo=l(),Xr=a("li"),hee=a("strong"),$Fo=o("perceiver"),IFo=o(" \u2014 "),DS=a("a"),jFo=o("PerceiverForImageClassificationLearned"),NFo=o(" or "),GS=a("a"),DFo=o("PerceiverForImageClassificationFourier"),GFo=o(" or "),OS=a("a"),OFo=o("PerceiverForImageClassificationConvProcessing"),qFo=o(" (Perceiver model)"),zFo=l(),x1=a("li"),uee=a("strong"),XFo=o("segformer"),WFo=o(" \u2014 "),qS=a("a"),VFo=o("SegformerForImageClassification"),QFo=o(" (SegFormer model)"),HFo=l(),L1=a("li"),pee=a("strong"),UFo=o("vit"),JFo=o(" \u2014 "),zS=a("a"),KFo=o("ViTForImageClassification"),YFo=o(" (ViT model)"),ZFo=l(),B1=a("p"),eMo=o("The model is set in evaluation mode by default using "),_ee=a("em"),oMo=o("model.eval()"),tMo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),vee=a("em"),rMo=o("model.train()"),aMo=l(),bee=a("p"),nMo=o("Examples:"),sMo=l(),f(DE.$$.fragment),tCe=l(),qi=a("h2"),k1=a("a"),Tee=a("span"),f(GE.$$.fragment),lMo=l(),Fee=a("span"),iMo=o("AutoModelForVision2Seq"),rCe=l(),Qo=a("div"),f(OE.$$.fragment),dMo=l(),zi=a("p"),mMo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a vision-to-text modeling head) when created with the `),Mee=a("code"),fMo=o("from_pretrained()"),cMo=o(` class method or the `),Eee=a("code"),gMo=o("from_config()"),hMo=o(" class method."),uMo=l(),qE=a("p"),pMo=o("This class cannot be instantiated directly using "),Cee=a("code"),_Mo=o("__init__()"),vMo=o(" (throws an error)."),bMo=l(),Nt=a("div"),f(zE.$$.fragment),TMo=l(),yee=a("p"),FMo=o("Instantiates one of the model classes of the library (with a vision-to-text modeling head) from a configuration."),MMo=l(),Xi=a("p"),EMo=o(`Note: Loading a model from its configuration file does `),wee=a("strong"),CMo=o("not"),yMo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Aee=a("em"),wMo=o("~AutoModelForVision2Seq.from_pretrained"),AMo=o(`] to load the model weights.`),xMo=l(),xee=a("p"),LMo=o("Examples:"),BMo=l(),f(XE.$$.fragment),kMo=l(),Ge=a("div"),f(WE.$$.fragment),RMo=l(),Lee=a("p"),SMo=o("Instantiate one of the model classes of the library (with a vision-to-text modeling head) from a pretrained model."),PMo=l(),$a=a("p"),$Mo=o("The model class to instantiate is selected based on the "),Bee=a("em"),IMo=o("model_type"),jMo=o(` property of the config object (either passed as an argument or loaded from `),kee=a("em"),NMo=o("pretrained_model_name_or_path"),DMo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ree=a("em"),GMo=o("pretrained_model_name_or_path"),OMo=o(":"),qMo=l(),See=a("ul"),R1=a("li"),Pee=a("strong"),zMo=o("vision-encoder-decoder"),XMo=o(" \u2014 "),XS=a("a"),WMo=o("VisionEncoderDecoderModel"),VMo=o(" (Vision Encoder decoder model)"),QMo=l(),S1=a("p"),HMo=o("The model is set in evaluation mode by default using "),$ee=a("em"),UMo=o("model.eval()"),JMo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Iee=a("em"),KMo=o("model.train()"),YMo=l(),jee=a("p"),ZMo=o("Examples:"),eEo=l(),f(VE.$$.fragment),aCe=l(),Wi=a("h2"),P1=a("a"),Nee=a("span"),f(QE.$$.fragment),oEo=l(),Dee=a("span"),tEo=o("AutoModelForAudioClassification"),nCe=l(),Ho=a("div"),f(HE.$$.fragment),rEo=l(),Vi=a("p"),aEo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a audio classification head) when created with the `),Gee=a("code"),nEo=o("from_pretrained()"),sEo=o(` class method or the `),Oee=a("code"),lEo=o("from_config()"),iEo=o(" class method."),dEo=l(),UE=a("p"),mEo=o("This class cannot be instantiated directly using "),qee=a("code"),fEo=o("__init__()"),cEo=o(" (throws an error)."),gEo=l(),Dt=a("div"),f(JE.$$.fragment),hEo=l(),zee=a("p"),uEo=o("Instantiates one of the model classes of the library (with a audio classification head) from a configuration."),pEo=l(),Qi=a("p"),_Eo=o(`Note: Loading a model from its configuration file does `),Xee=a("strong"),vEo=o("not"),bEo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Wee=a("em"),TEo=o("~AutoModelForAudioClassification.from_pretrained"),FEo=o(`] to load the model weights.`),MEo=l(),Vee=a("p"),EEo=o("Examples:"),CEo=l(),f(KE.$$.fragment),yEo=l(),Oe=a("div"),f(YE.$$.fragment),wEo=l(),Qee=a("p"),AEo=o("Instantiate one of the model classes of the library (with a audio classification head) from a pretrained model."),xEo=l(),Ia=a("p"),LEo=o("The model class to instantiate is selected based on the "),Hee=a("em"),BEo=o("model_type"),kEo=o(` property of the config object (either passed as an argument or loaded from `),Uee=a("em"),REo=o("pretrained_model_name_or_path"),SEo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Jee=a("em"),PEo=o("pretrained_model_name_or_path"),$Eo=o(":"),IEo=l(),Ke=a("ul"),$1=a("li"),Kee=a("strong"),jEo=o("hubert"),NEo=o(" \u2014 "),WS=a("a"),DEo=o("HubertForSequenceClassification"),GEo=o(" (Hubert model)"),OEo=l(),I1=a("li"),Yee=a("strong"),qEo=o("sew"),zEo=o(" \u2014 "),VS=a("a"),XEo=o("SEWForSequenceClassification"),WEo=o(" (SEW model)"),VEo=l(),j1=a("li"),Zee=a("strong"),QEo=o("sew-d"),HEo=o(" \u2014 "),QS=a("a"),UEo=o("SEWDForSequenceClassification"),JEo=o(" (SEW-D model)"),KEo=l(),N1=a("li"),eoe=a("strong"),YEo=o("unispeech"),ZEo=o(" \u2014 "),HS=a("a"),eCo=o("UniSpeechForSequenceClassification"),oCo=o(" (UniSpeech model)"),tCo=l(),D1=a("li"),ooe=a("strong"),rCo=o("unispeech-sat"),aCo=o(" \u2014 "),US=a("a"),nCo=o("UniSpeechSatForSequenceClassification"),sCo=o(" (UniSpeechSat model)"),lCo=l(),G1=a("li"),toe=a("strong"),iCo=o("wav2vec2"),dCo=o(" \u2014 "),JS=a("a"),mCo=o("Wav2Vec2ForSequenceClassification"),fCo=o(" (Wav2Vec2 model)"),cCo=l(),O1=a("li"),roe=a("strong"),gCo=o("wavlm"),hCo=o(" \u2014 "),KS=a("a"),uCo=o("WavLMForSequenceClassification"),pCo=o(" (WavLM model)"),_Co=l(),q1=a("p"),vCo=o("The model is set in evaluation mode by default using "),aoe=a("em"),bCo=o("model.eval()"),TCo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),noe=a("em"),FCo=o("model.train()"),MCo=l(),soe=a("p"),ECo=o("Examples:"),CCo=l(),f(ZE.$$.fragment),sCe=l(),Hi=a("h2"),z1=a("a"),loe=a("span"),f(eC.$$.fragment),yCo=l(),ioe=a("span"),wCo=o("AutoModelForAudioFrameClassification"),lCe=l(),Uo=a("div"),f(oC.$$.fragment),ACo=l(),Ui=a("p"),xCo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a audio frame (token) classification head) when created with the `),doe=a("code"),LCo=o("from_pretrained()"),BCo=o(` class method or the `),moe=a("code"),kCo=o("from_config()"),RCo=o(" class method."),SCo=l(),tC=a("p"),PCo=o("This class cannot be instantiated directly using "),foe=a("code"),$Co=o("__init__()"),ICo=o(" (throws an error)."),jCo=l(),Gt=a("div"),f(rC.$$.fragment),NCo=l(),coe=a("p"),DCo=o("Instantiates one of the model classes of the library (with a audio frame (token) classification head) from a configuration."),GCo=l(),Ji=a("p"),OCo=o(`Note: Loading a model from its configuration file does `),goe=a("strong"),qCo=o("not"),zCo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),hoe=a("em"),XCo=o("~AutoModelForAudioFrameClassification.from_pretrained"),WCo=o(`] to load the model weights.`),VCo=l(),uoe=a("p"),QCo=o("Examples:"),HCo=l(),f(aC.$$.fragment),UCo=l(),qe=a("div"),f(nC.$$.fragment),JCo=l(),poe=a("p"),KCo=o("Instantiate one of the model classes of the library (with a audio frame (token) classification head) from a pretrained model."),YCo=l(),ja=a("p"),ZCo=o("The model class to instantiate is selected based on the "),_oe=a("em"),e3o=o("model_type"),o3o=o(` property of the config object (either passed as an argument or loaded from `),voe=a("em"),t3o=o("pretrained_model_name_or_path"),r3o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),boe=a("em"),a3o=o("pretrained_model_name_or_path"),n3o=o(":"),s3o=l(),Ki=a("ul"),X1=a("li"),Toe=a("strong"),l3o=o("unispeech-sat"),i3o=o(" \u2014 "),YS=a("a"),d3o=o("UniSpeechSatForAudioFrameClassification"),m3o=o(" (UniSpeechSat model)"),f3o=l(),W1=a("li"),Foe=a("strong"),c3o=o("wav2vec2"),g3o=o(" \u2014 "),ZS=a("a"),h3o=o("Wav2Vec2ForAudioFrameClassification"),u3o=o(" (Wav2Vec2 model)"),p3o=l(),V1=a("li"),Moe=a("strong"),_3o=o("wavlm"),v3o=o(" \u2014 "),eP=a("a"),b3o=o("WavLMForAudioFrameClassification"),T3o=o(" (WavLM model)"),F3o=l(),Q1=a("p"),M3o=o("The model is set in evaluation mode by default using "),Eoe=a("em"),E3o=o("model.eval()"),C3o=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Coe=a("em"),y3o=o("model.train()"),w3o=l(),yoe=a("p"),A3o=o("Examples:"),x3o=l(),f(sC.$$.fragment),iCe=l(),Yi=a("h2"),H1=a("a"),woe=a("span"),f(lC.$$.fragment),L3o=l(),Aoe=a("span"),B3o=o("AutoModelForCTC"),dCe=l(),Jo=a("div"),f(iC.$$.fragment),k3o=l(),Zi=a("p"),R3o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a connectionist temporal classification head) when created with the `),xoe=a("code"),S3o=o("from_pretrained()"),P3o=o(` class method or the `),Loe=a("code"),$3o=o("from_config()"),I3o=o(" class method."),j3o=l(),dC=a("p"),N3o=o("This class cannot be instantiated directly using "),Boe=a("code"),D3o=o("__init__()"),G3o=o(" (throws an error)."),O3o=l(),Ot=a("div"),f(mC.$$.fragment),q3o=l(),koe=a("p"),z3o=o("Instantiates one of the model classes of the library (with a connectionist temporal classification head) from a configuration."),X3o=l(),ed=a("p"),W3o=o(`Note: Loading a model from its configuration file does `),Roe=a("strong"),V3o=o("not"),Q3o=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Soe=a("em"),H3o=o("~AutoModelForCTC.from_pretrained"),U3o=o(`] to load the model weights.`),J3o=l(),Poe=a("p"),K3o=o("Examples:"),Y3o=l(),f(fC.$$.fragment),Z3o=l(),ze=a("div"),f(cC.$$.fragment),eyo=l(),$oe=a("p"),oyo=o("Instantiate one of the model classes of the library (with a connectionist temporal classification head) from a pretrained model."),tyo=l(),Na=a("p"),ryo=o("The model class to instantiate is selected based on the "),Ioe=a("em"),ayo=o("model_type"),nyo=o(` property of the config object (either passed as an argument or loaded from `),joe=a("em"),syo=o("pretrained_model_name_or_path"),lyo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Noe=a("em"),iyo=o("pretrained_model_name_or_path"),dyo=o(":"),myo=l(),Ye=a("ul"),U1=a("li"),Doe=a("strong"),fyo=o("hubert"),cyo=o(" \u2014 "),oP=a("a"),gyo=o("HubertForCTC"),hyo=o(" (Hubert model)"),uyo=l(),J1=a("li"),Goe=a("strong"),pyo=o("sew"),_yo=o(" \u2014 "),tP=a("a"),vyo=o("SEWForCTC"),byo=o(" (SEW model)"),Tyo=l(),K1=a("li"),Ooe=a("strong"),Fyo=o("sew-d"),Myo=o(" \u2014 "),rP=a("a"),Eyo=o("SEWDForCTC"),Cyo=o(" (SEW-D model)"),yyo=l(),Y1=a("li"),qoe=a("strong"),wyo=o("unispeech"),Ayo=o(" \u2014 "),aP=a("a"),xyo=o("UniSpeechForCTC"),Lyo=o(" (UniSpeech model)"),Byo=l(),Z1=a("li"),zoe=a("strong"),kyo=o("unispeech-sat"),Ryo=o(" \u2014 "),nP=a("a"),Syo=o("UniSpeechSatForCTC"),Pyo=o(" (UniSpeechSat model)"),$yo=l(),e2=a("li"),Xoe=a("strong"),Iyo=o("wav2vec2"),jyo=o(" \u2014 "),sP=a("a"),Nyo=o("Wav2Vec2ForCTC"),Dyo=o(" (Wav2Vec2 model)"),Gyo=l(),o2=a("li"),Woe=a("strong"),Oyo=o("wavlm"),qyo=o(" \u2014 "),lP=a("a"),zyo=o("WavLMForCTC"),Xyo=o(" (WavLM model)"),Wyo=l(),t2=a("p"),Vyo=o("The model is set in evaluation mode by default using "),Voe=a("em"),Qyo=o("model.eval()"),Hyo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Qoe=a("em"),Uyo=o("model.train()"),Jyo=l(),Hoe=a("p"),Kyo=o("Examples:"),Yyo=l(),f(gC.$$.fragment),mCe=l(),od=a("h2"),r2=a("a"),Uoe=a("span"),f(hC.$$.fragment),Zyo=l(),Joe=a("span"),ewo=o("AutoModelForSpeechSeq2Seq"),fCe=l(),Ko=a("div"),f(uC.$$.fragment),owo=l(),td=a("p"),two=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence speech-to-text modeing head) when created with the `),Koe=a("code"),rwo=o("from_pretrained()"),awo=o(` class method or the `),Yoe=a("code"),nwo=o("from_config()"),swo=o(" class method."),lwo=l(),pC=a("p"),iwo=o("This class cannot be instantiated directly using "),Zoe=a("code"),dwo=o("__init__()"),mwo=o(" (throws an error)."),fwo=l(),qt=a("div"),f(_C.$$.fragment),cwo=l(),ete=a("p"),gwo=o("Instantiates one of the model classes of the library (with a sequence-to-sequence speech-to-text modeing head) from a configuration."),hwo=l(),rd=a("p"),uwo=o(`Note: Loading a model from its configuration file does `),ote=a("strong"),pwo=o("not"),_wo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),tte=a("em"),vwo=o("~AutoModelForSpeechSeq2Seq.from_pretrained"),bwo=o(`] to load the model weights.`),Two=l(),rte=a("p"),Fwo=o("Examples:"),Mwo=l(),f(vC.$$.fragment),Ewo=l(),Xe=a("div"),f(bC.$$.fragment),Cwo=l(),ate=a("p"),ywo=o("Instantiate one of the model classes of the library (with a sequence-to-sequence speech-to-text modeing head) from a pretrained model."),wwo=l(),Da=a("p"),Awo=o("The model class to instantiate is selected based on the "),nte=a("em"),xwo=o("model_type"),Lwo=o(` property of the config object (either passed as an argument or loaded from `),ste=a("em"),Bwo=o("pretrained_model_name_or_path"),kwo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),lte=a("em"),Rwo=o("pretrained_model_name_or_path"),Swo=o(":"),Pwo=l(),TC=a("ul"),a2=a("li"),ite=a("strong"),$wo=o("speech-encoder-decoder"),Iwo=o(" \u2014 "),iP=a("a"),jwo=o("SpeechEncoderDecoderModel"),Nwo=o(" (Speech Encoder decoder model)"),Dwo=l(),n2=a("li"),dte=a("strong"),Gwo=o("speech_to_text"),Owo=o(" \u2014 "),dP=a("a"),qwo=o("Speech2TextForConditionalGeneration"),zwo=o(" (Speech2Text model)"),Xwo=l(),s2=a("p"),Wwo=o("The model is set in evaluation mode by default using "),mte=a("em"),Vwo=o("model.eval()"),Qwo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),fte=a("em"),Hwo=o("model.train()"),Uwo=l(),cte=a("p"),Jwo=o("Examples:"),Kwo=l(),f(FC.$$.fragment),cCe=l(),ad=a("h2"),l2=a("a"),gte=a("span"),f(MC.$$.fragment),Ywo=l(),hte=a("span"),Zwo=o("AutoModelForAudioXVector"),gCe=l(),Yo=a("div"),f(EC.$$.fragment),eAo=l(),nd=a("p"),oAo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a audio retrieval via x-vector head) when created with the `),ute=a("code"),tAo=o("from_pretrained()"),rAo=o(` class method or the `),pte=a("code"),aAo=o("from_config()"),nAo=o(" class method."),sAo=l(),CC=a("p"),lAo=o("This class cannot be instantiated directly using "),_te=a("code"),iAo=o("__init__()"),dAo=o(" (throws an error)."),mAo=l(),zt=a("div"),f(yC.$$.fragment),fAo=l(),vte=a("p"),cAo=o("Instantiates one of the model classes of the library (with a audio retrieval via x-vector head) from a configuration."),gAo=l(),sd=a("p"),hAo=o(`Note: Loading a model from its configuration file does `),bte=a("strong"),uAo=o("not"),pAo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Tte=a("em"),_Ao=o("~AutoModelForAudioXVector.from_pretrained"),vAo=o(`] to load the model weights.`),bAo=l(),Fte=a("p"),TAo=o("Examples:"),FAo=l(),f(wC.$$.fragment),MAo=l(),We=a("div"),f(AC.$$.fragment),EAo=l(),Mte=a("p"),CAo=o("Instantiate one of the model classes of the library (with a audio retrieval via x-vector head) from a pretrained model."),yAo=l(),Ga=a("p"),wAo=o("The model class to instantiate is selected based on the "),Ete=a("em"),AAo=o("model_type"),xAo=o(` property of the config object (either passed as an argument or loaded from `),Cte=a("em"),LAo=o("pretrained_model_name_or_path"),BAo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),yte=a("em"),kAo=o("pretrained_model_name_or_path"),RAo=o(":"),SAo=l(),ld=a("ul"),i2=a("li"),wte=a("strong"),PAo=o("unispeech-sat"),$Ao=o(" \u2014 "),mP=a("a"),IAo=o("UniSpeechSatForXVector"),jAo=o(" (UniSpeechSat model)"),NAo=l(),d2=a("li"),Ate=a("strong"),DAo=o("wav2vec2"),GAo=o(" \u2014 "),fP=a("a"),OAo=o("Wav2Vec2ForXVector"),qAo=o(" (Wav2Vec2 model)"),zAo=l(),m2=a("li"),xte=a("strong"),XAo=o("wavlm"),WAo=o(" \u2014 "),cP=a("a"),VAo=o("WavLMForXVector"),QAo=o(" (WavLM model)"),HAo=l(),f2=a("p"),UAo=o("The model is set in evaluation mode by default using "),Lte=a("em"),JAo=o("model.eval()"),KAo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Bte=a("em"),YAo=o("model.train()"),ZAo=l(),kte=a("p"),e7o=o("Examples:"),o7o=l(),f(xC.$$.fragment),hCe=l(),id=a("h2"),c2=a("a"),Rte=a("span"),f(LC.$$.fragment),t7o=l(),Ste=a("span"),r7o=o("AutoModelForObjectDetection"),uCe=l(),Zo=a("div"),f(BC.$$.fragment),a7o=l(),dd=a("p"),n7o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a object detection head) when created with the `),Pte=a("code"),s7o=o("from_pretrained()"),l7o=o(` class method or the `),$te=a("code"),i7o=o("from_config()"),d7o=o(" class method."),m7o=l(),kC=a("p"),f7o=o("This class cannot be instantiated directly using "),Ite=a("code"),c7o=o("__init__()"),g7o=o(" (throws an error)."),h7o=l(),Xt=a("div"),f(RC.$$.fragment),u7o=l(),jte=a("p"),p7o=o("Instantiates one of the model classes of the library (with a object detection head) from a configuration."),_7o=l(),md=a("p"),v7o=o(`Note: Loading a model from its configuration file does `),Nte=a("strong"),b7o=o("not"),T7o=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Dte=a("em"),F7o=o("~AutoModelForObjectDetection.from_pretrained"),M7o=o(`] to load the model weights.`),E7o=l(),Gte=a("p"),C7o=o("Examples:"),y7o=l(),f(SC.$$.fragment),w7o=l(),Ve=a("div"),f(PC.$$.fragment),A7o=l(),Ote=a("p"),x7o=o("Instantiate one of the model classes of the library (with a object detection head) from a pretrained model."),L7o=l(),Oa=a("p"),B7o=o("The model class to instantiate is selected based on the "),qte=a("em"),k7o=o("model_type"),R7o=o(` property of the config object (either passed as an argument or loaded from `),zte=a("em"),S7o=o("pretrained_model_name_or_path"),P7o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Xte=a("em"),$7o=o("pretrained_model_name_or_path"),I7o=o(":"),j7o=l(),Wte=a("ul"),g2=a("li"),Vte=a("strong"),N7o=o("detr"),D7o=o(" \u2014 "),gP=a("a"),G7o=o("DetrForObjectDetection"),O7o=o(" (DETR model)"),q7o=l(),h2=a("p"),z7o=o("The model is set in evaluation mode by default using "),Qte=a("em"),X7o=o("model.eval()"),W7o=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Hte=a("em"),V7o=o("model.train()"),Q7o=l(),Ute=a("p"),H7o=o("Examples:"),U7o=l(),f($C.$$.fragment),pCe=l(),fd=a("h2"),u2=a("a"),Jte=a("span"),f(IC.$$.fragment),J7o=l(),Kte=a("span"),K7o=o("AutoModelForImageSegmentation"),_Ce=l(),et=a("div"),f(jC.$$.fragment),Y7o=l(),cd=a("p"),Z7o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a image segmentation head) when created with the `),Yte=a("code"),exo=o("from_pretrained()"),oxo=o(` class method or the `),Zte=a("code"),txo=o("from_config()"),rxo=o(" class method."),axo=l(),NC=a("p"),nxo=o("This class cannot be instantiated directly using "),ere=a("code"),sxo=o("__init__()"),lxo=o(" (throws an error)."),ixo=l(),Wt=a("div"),f(DC.$$.fragment),dxo=l(),ore=a("p"),mxo=o("Instantiates one of the model classes of the library (with a image segmentation head) from a configuration."),fxo=l(),gd=a("p"),cxo=o(`Note: Loading a model from its configuration file does `),tre=a("strong"),gxo=o("not"),hxo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),rre=a("em"),uxo=o("~AutoModelForImageSegmentation.from_pretrained"),pxo=o(`] to load the model weights.`),_xo=l(),are=a("p"),vxo=o("Examples:"),bxo=l(),f(GC.$$.fragment),Txo=l(),Qe=a("div"),f(OC.$$.fragment),Fxo=l(),nre=a("p"),Mxo=o("Instantiate one of the model classes of the library (with a image segmentation head) from a pretrained model."),Exo=l(),qa=a("p"),Cxo=o("The model class to instantiate is selected based on the "),sre=a("em"),yxo=o("model_type"),wxo=o(` property of the config object (either passed as an argument or loaded from `),lre=a("em"),Axo=o("pretrained_model_name_or_path"),xxo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),ire=a("em"),Lxo=o("pretrained_model_name_or_path"),Bxo=o(":"),kxo=l(),dre=a("ul"),p2=a("li"),mre=a("strong"),Rxo=o("detr"),Sxo=o(" \u2014 "),hP=a("a"),Pxo=o("DetrForSegmentation"),$xo=o(" (DETR model)"),Ixo=l(),_2=a("p"),jxo=o("The model is set in evaluation mode by default using "),fre=a("em"),Nxo=o("model.eval()"),Dxo=o(` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),cre=a("em"),Gxo=o("model.train()"),Oxo=l(),gre=a("p"),qxo=o("Examples:"),zxo=l(),f(qC.$$.fragment),vCe=l(),hd=a("h2"),v2=a("a"),hre=a("span"),f(zC.$$.fragment),Xxo=l(),ure=a("span"),Wxo=o("TFAutoModel"),bCe=l(),ot=a("div"),f(XC.$$.fragment),Vxo=l(),ud=a("p"),Qxo=o(`This is a generic model class that will be instantiated as one of the base model classes of the library when created with the `),pre=a("code"),Hxo=o("from_pretrained()"),Uxo=o(` class method or the `),_re=a("code"),Jxo=o("from_config()"),Kxo=o(" class method."),Yxo=l(),WC=a("p"),Zxo=o("This class cannot be instantiated directly using "),vre=a("code"),e6o=o("__init__()"),o6o=o(" (throws an error)."),t6o=l(),Vt=a("div"),f(VC.$$.fragment),r6o=l(),bre=a("p"),a6o=o("Instantiates one of the base model classes of the library from a configuration."),n6o=l(),pd=a("p"),s6o=o(`Note: Loading a model from its configuration file does `),Tre=a("strong"),l6o=o("not"),i6o=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Fre=a("em"),d6o=o("~TFAutoModel.from_pretrained"),m6o=o(`] to load the model weights.`),f6o=l(),Mre=a("p"),c6o=o("Examples:"),g6o=l(),f(QC.$$.fragment),h6o=l(),ro=a("div"),f(HC.$$.fragment),u6o=l(),Ere=a("p"),p6o=o("Instantiate one of the base model classes of the library from a pretrained model."),_6o=l(),za=a("p"),v6o=o("The model class to instantiate is selected based on the "),Cre=a("em"),b6o=o("model_type"),T6o=o(` property of the config object (either passed as an argument or loaded from `),yre=a("em"),F6o=o("pretrained_model_name_or_path"),M6o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),wre=a("em"),E6o=o("pretrained_model_name_or_path"),C6o=o(":"),y6o=l(),L=a("ul"),b2=a("li"),Are=a("strong"),w6o=o("albert"),A6o=o(" \u2014 "),uP=a("a"),x6o=o("TFAlbertModel"),L6o=o(" (ALBERT model)"),B6o=l(),T2=a("li"),xre=a("strong"),k6o=o("bart"),R6o=o(" \u2014 "),pP=a("a"),S6o=o("TFBartModel"),P6o=o(" (BART model)"),$6o=l(),F2=a("li"),Lre=a("strong"),I6o=o("bert"),j6o=o(" \u2014 "),_P=a("a"),N6o=o("TFBertModel"),D6o=o(" (BERT model)"),G6o=l(),M2=a("li"),Bre=a("strong"),O6o=o("blenderbot"),q6o=o(" \u2014 "),vP=a("a"),z6o=o("TFBlenderbotModel"),X6o=o(" (Blenderbot model)"),W6o=l(),E2=a("li"),kre=a("strong"),V6o=o("blenderbot-small"),Q6o=o(" \u2014 "),bP=a("a"),H6o=o("TFBlenderbotSmallModel"),U6o=o(" (BlenderbotSmall model)"),J6o=l(),C2=a("li"),Rre=a("strong"),K6o=o("camembert"),Y6o=o(" \u2014 "),TP=a("a"),Z6o=o("TFCamembertModel"),e8o=o(" (CamemBERT model)"),o8o=l(),y2=a("li"),Sre=a("strong"),t8o=o("convbert"),r8o=o(" \u2014 "),FP=a("a"),a8o=o("TFConvBertModel"),n8o=o(" (ConvBERT model)"),s8o=l(),w2=a("li"),Pre=a("strong"),l8o=o("ctrl"),i8o=o(" \u2014 "),MP=a("a"),d8o=o("TFCTRLModel"),m8o=o(" (CTRL model)"),f8o=l(),A2=a("li"),$re=a("strong"),c8o=o("deberta"),g8o=o(" \u2014 "),EP=a("a"),h8o=o("TFDebertaModel"),u8o=o(" (DeBERTa model)"),p8o=l(),x2=a("li"),Ire=a("strong"),_8o=o("deberta-v2"),v8o=o(" \u2014 "),CP=a("a"),b8o=o("TFDebertaV2Model"),T8o=o(" (DeBERTa-v2 model)"),F8o=l(),L2=a("li"),jre=a("strong"),M8o=o("distilbert"),E8o=o(" \u2014 "),yP=a("a"),C8o=o("TFDistilBertModel"),y8o=o(" (DistilBERT model)"),w8o=l(),B2=a("li"),Nre=a("strong"),A8o=o("dpr"),x8o=o(" \u2014 "),wP=a("a"),L8o=o("TFDPRQuestionEncoder"),B8o=o(" (DPR model)"),k8o=l(),k2=a("li"),Dre=a("strong"),R8o=o("electra"),S8o=o(" \u2014 "),AP=a("a"),P8o=o("TFElectraModel"),$8o=o(" (ELECTRA model)"),I8o=l(),R2=a("li"),Gre=a("strong"),j8o=o("flaubert"),N8o=o(" \u2014 "),xP=a("a"),D8o=o("TFFlaubertModel"),G8o=o(" (FlauBERT model)"),O8o=l(),ds=a("li"),Ore=a("strong"),q8o=o("funnel"),z8o=o(" \u2014 "),LP=a("a"),X8o=o("TFFunnelModel"),W8o=o(" or "),BP=a("a"),V8o=o("TFFunnelBaseModel"),Q8o=o(" (Funnel Transformer model)"),H8o=l(),S2=a("li"),qre=a("strong"),U8o=o("gpt2"),J8o=o(" \u2014 "),kP=a("a"),K8o=o("TFGPT2Model"),Y8o=o(" (OpenAI GPT-2 model)"),Z8o=l(),P2=a("li"),zre=a("strong"),eLo=o("hubert"),oLo=o(" \u2014 "),RP=a("a"),tLo=o("TFHubertModel"),rLo=o(" (Hubert model)"),aLo=l(),$2=a("li"),Xre=a("strong"),nLo=o("layoutlm"),sLo=o(" \u2014 "),SP=a("a"),lLo=o("TFLayoutLMModel"),iLo=o(" (LayoutLM model)"),dLo=l(),I2=a("li"),Wre=a("strong"),mLo=o("led"),fLo=o(" \u2014 "),PP=a("a"),cLo=o("TFLEDModel"),gLo=o(" (LED model)"),hLo=l(),j2=a("li"),Vre=a("strong"),uLo=o("longformer"),pLo=o(" \u2014 "),$P=a("a"),_Lo=o("TFLongformerModel"),vLo=o(" (Longformer model)"),bLo=l(),N2=a("li"),Qre=a("strong"),TLo=o("lxmert"),FLo=o(" \u2014 "),IP=a("a"),MLo=o("TFLxmertModel"),ELo=o(" (LXMERT model)"),CLo=l(),D2=a("li"),Hre=a("strong"),yLo=o("marian"),wLo=o(" \u2014 "),jP=a("a"),ALo=o("TFMarianModel"),xLo=o(" (Marian model)"),LLo=l(),G2=a("li"),Ure=a("strong"),BLo=o("mbart"),kLo=o(" \u2014 "),NP=a("a"),RLo=o("TFMBartModel"),SLo=o(" (mBART model)"),PLo=l(),O2=a("li"),Jre=a("strong"),$Lo=o("mobilebert"),ILo=o(" \u2014 "),DP=a("a"),jLo=o("TFMobileBertModel"),NLo=o(" (MobileBERT model)"),DLo=l(),q2=a("li"),Kre=a("strong"),GLo=o("mpnet"),OLo=o(" \u2014 "),GP=a("a"),qLo=o("TFMPNetModel"),zLo=o(" (MPNet model)"),XLo=l(),z2=a("li"),Yre=a("strong"),WLo=o("mt5"),VLo=o(" \u2014 "),OP=a("a"),QLo=o("TFMT5Model"),HLo=o(" (mT5 model)"),ULo=l(),X2=a("li"),Zre=a("strong"),JLo=o("openai-gpt"),KLo=o(" \u2014 "),qP=a("a"),YLo=o("TFOpenAIGPTModel"),ZLo=o(" (OpenAI GPT model)"),eBo=l(),W2=a("li"),eae=a("strong"),oBo=o("pegasus"),tBo=o(" \u2014 "),zP=a("a"),rBo=o("TFPegasusModel"),aBo=o(" (Pegasus model)"),nBo=l(),V2=a("li"),oae=a("strong"),sBo=o("rembert"),lBo=o(" \u2014 "),XP=a("a"),iBo=o("TFRemBertModel"),dBo=o(" (RemBERT model)"),mBo=l(),Q2=a("li"),tae=a("strong"),fBo=o("roberta"),cBo=o(" \u2014 "),WP=a("a"),gBo=o("TFRobertaModel"),hBo=o(" (RoBERTa model)"),uBo=l(),H2=a("li"),rae=a("strong"),pBo=o("roformer"),_Bo=o(" \u2014 "),VP=a("a"),vBo=o("TFRoFormerModel"),bBo=o(" (RoFormer model)"),TBo=l(),U2=a("li"),aae=a("strong"),FBo=o("t5"),MBo=o(" \u2014 "),QP=a("a"),EBo=o("TFT5Model"),CBo=o(" (T5 model)"),yBo=l(),J2=a("li"),nae=a("strong"),wBo=o("tapas"),ABo=o(" \u2014 "),HP=a("a"),xBo=o("TFTapasModel"),LBo=o(" (TAPAS model)"),BBo=l(),K2=a("li"),sae=a("strong"),kBo=o("transfo-xl"),RBo=o(" \u2014 "),UP=a("a"),SBo=o("TFTransfoXLModel"),PBo=o(" (Transformer-XL model)"),$Bo=l(),Y2=a("li"),lae=a("strong"),IBo=o("vit"),jBo=o(" \u2014 "),JP=a("a"),NBo=o("TFViTModel"),DBo=o(" (ViT model)"),GBo=l(),Z2=a("li"),iae=a("strong"),OBo=o("wav2vec2"),qBo=o(" \u2014 "),KP=a("a"),zBo=o("TFWav2Vec2Model"),XBo=o(" (Wav2Vec2 model)"),WBo=l(),eb=a("li"),dae=a("strong"),VBo=o("xlm"),QBo=o(" \u2014 "),YP=a("a"),HBo=o("TFXLMModel"),UBo=o(" (XLM model)"),JBo=l(),ob=a("li"),mae=a("strong"),KBo=o("xlm-roberta"),YBo=o(" \u2014 "),ZP=a("a"),ZBo=o("TFXLMRobertaModel"),e9o=o(" (XLM-RoBERTa model)"),o9o=l(),tb=a("li"),fae=a("strong"),t9o=o("xlnet"),r9o=o(" \u2014 "),e$=a("a"),a9o=o("TFXLNetModel"),n9o=o(" (XLNet model)"),s9o=l(),cae=a("p"),l9o=o("Examples:"),i9o=l(),f(UC.$$.fragment),TCe=l(),_d=a("h2"),rb=a("a"),gae=a("span"),f(JC.$$.fragment),d9o=l(),hae=a("span"),m9o=o("TFAutoModelForPreTraining"),FCe=l(),tt=a("div"),f(KC.$$.fragment),f9o=l(),vd=a("p"),c9o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a pretraining head) when created with the `),uae=a("code"),g9o=o("from_pretrained()"),h9o=o(` class method or the `),pae=a("code"),u9o=o("from_config()"),p9o=o(" class method."),_9o=l(),YC=a("p"),v9o=o("This class cannot be instantiated directly using "),_ae=a("code"),b9o=o("__init__()"),T9o=o(" (throws an error)."),F9o=l(),Qt=a("div"),f(ZC.$$.fragment),M9o=l(),vae=a("p"),E9o=o("Instantiates one of the model classes of the library (with a pretraining head) from a configuration."),C9o=l(),bd=a("p"),y9o=o(`Note: Loading a model from its configuration file does `),bae=a("strong"),w9o=o("not"),A9o=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Tae=a("em"),x9o=o("~TFAutoModelForPreTraining.from_pretrained"),L9o=o(`] to load the model weights.`),B9o=l(),Fae=a("p"),k9o=o("Examples:"),R9o=l(),f(e3.$$.fragment),S9o=l(),ao=a("div"),f(o3.$$.fragment),P9o=l(),Mae=a("p"),$9o=o("Instantiate one of the model classes of the library (with a pretraining head) from a pretrained model."),I9o=l(),Xa=a("p"),j9o=o("The model class to instantiate is selected based on the "),Eae=a("em"),N9o=o("model_type"),D9o=o(` property of the config object (either passed as an argument or loaded from `),Cae=a("em"),G9o=o("pretrained_model_name_or_path"),O9o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),yae=a("em"),q9o=o("pretrained_model_name_or_path"),z9o=o(":"),X9o=l(),V=a("ul"),ab=a("li"),wae=a("strong"),W9o=o("albert"),V9o=o(" \u2014 "),o$=a("a"),Q9o=o("TFAlbertForPreTraining"),H9o=o(" (ALBERT model)"),U9o=l(),nb=a("li"),Aae=a("strong"),J9o=o("bart"),K9o=o(" \u2014 "),t$=a("a"),Y9o=o("TFBartForConditionalGeneration"),Z9o=o(" (BART model)"),eko=l(),sb=a("li"),xae=a("strong"),oko=o("bert"),tko=o(" \u2014 "),r$=a("a"),rko=o("TFBertForPreTraining"),ako=o(" (BERT model)"),nko=l(),lb=a("li"),Lae=a("strong"),sko=o("camembert"),lko=o(" \u2014 "),a$=a("a"),iko=o("TFCamembertForMaskedLM"),dko=o(" (CamemBERT model)"),mko=l(),ib=a("li"),Bae=a("strong"),fko=o("ctrl"),cko=o(" \u2014 "),n$=a("a"),gko=o("TFCTRLLMHeadModel"),hko=o(" (CTRL model)"),uko=l(),db=a("li"),kae=a("strong"),pko=o("distilbert"),_ko=o(" \u2014 "),s$=a("a"),vko=o("TFDistilBertForMaskedLM"),bko=o(" (DistilBERT model)"),Tko=l(),mb=a("li"),Rae=a("strong"),Fko=o("electra"),Mko=o(" \u2014 "),l$=a("a"),Eko=o("TFElectraForPreTraining"),Cko=o(" (ELECTRA model)"),yko=l(),fb=a("li"),Sae=a("strong"),wko=o("flaubert"),Ako=o(" \u2014 "),i$=a("a"),xko=o("TFFlaubertWithLMHeadModel"),Lko=o(" (FlauBERT model)"),Bko=l(),cb=a("li"),Pae=a("strong"),kko=o("funnel"),Rko=o(" \u2014 "),d$=a("a"),Sko=o("TFFunnelForPreTraining"),Pko=o(" (Funnel Transformer model)"),$ko=l(),gb=a("li"),$ae=a("strong"),Iko=o("gpt2"),jko=o(" \u2014 "),m$=a("a"),Nko=o("TFGPT2LMHeadModel"),Dko=o(" (OpenAI GPT-2 model)"),Gko=l(),hb=a("li"),Iae=a("strong"),Oko=o("layoutlm"),qko=o(" \u2014 "),f$=a("a"),zko=o("TFLayoutLMForMaskedLM"),Xko=o(" (LayoutLM model)"),Wko=l(),ub=a("li"),jae=a("strong"),Vko=o("lxmert"),Qko=o(" \u2014 "),c$=a("a"),Hko=o("TFLxmertForPreTraining"),Uko=o(" (LXMERT model)"),Jko=l(),pb=a("li"),Nae=a("strong"),Kko=o("mobilebert"),Yko=o(" \u2014 "),g$=a("a"),Zko=o("TFMobileBertForPreTraining"),eRo=o(" (MobileBERT model)"),oRo=l(),_b=a("li"),Dae=a("strong"),tRo=o("mpnet"),rRo=o(" \u2014 "),h$=a("a"),aRo=o("TFMPNetForMaskedLM"),nRo=o(" (MPNet model)"),sRo=l(),vb=a("li"),Gae=a("strong"),lRo=o("openai-gpt"),iRo=o(" \u2014 "),u$=a("a"),dRo=o("TFOpenAIGPTLMHeadModel"),mRo=o(" (OpenAI GPT model)"),fRo=l(),bb=a("li"),Oae=a("strong"),cRo=o("roberta"),gRo=o(" \u2014 "),p$=a("a"),hRo=o("TFRobertaForMaskedLM"),uRo=o(" (RoBERTa model)"),pRo=l(),Tb=a("li"),qae=a("strong"),_Ro=o("t5"),vRo=o(" \u2014 "),_$=a("a"),bRo=o("TFT5ForConditionalGeneration"),TRo=o(" (T5 model)"),FRo=l(),Fb=a("li"),zae=a("strong"),MRo=o("tapas"),ERo=o(" \u2014 "),v$=a("a"),CRo=o("TFTapasForMaskedLM"),yRo=o(" (TAPAS model)"),wRo=l(),Mb=a("li"),Xae=a("strong"),ARo=o("transfo-xl"),xRo=o(" \u2014 "),b$=a("a"),LRo=o("TFTransfoXLLMHeadModel"),BRo=o(" (Transformer-XL model)"),kRo=l(),Eb=a("li"),Wae=a("strong"),RRo=o("xlm"),SRo=o(" \u2014 "),T$=a("a"),PRo=o("TFXLMWithLMHeadModel"),$Ro=o(" (XLM model)"),IRo=l(),Cb=a("li"),Vae=a("strong"),jRo=o("xlm-roberta"),NRo=o(" \u2014 "),F$=a("a"),DRo=o("TFXLMRobertaForMaskedLM"),GRo=o(" (XLM-RoBERTa model)"),ORo=l(),yb=a("li"),Qae=a("strong"),qRo=o("xlnet"),zRo=o(" \u2014 "),M$=a("a"),XRo=o("TFXLNetLMHeadModel"),WRo=o(" (XLNet model)"),VRo=l(),Hae=a("p"),QRo=o("Examples:"),HRo=l(),f(t3.$$.fragment),MCe=l(),Td=a("h2"),wb=a("a"),Uae=a("span"),f(r3.$$.fragment),URo=l(),Jae=a("span"),JRo=o("TFAutoModelForCausalLM"),ECe=l(),rt=a("div"),f(a3.$$.fragment),KRo=l(),Fd=a("p"),YRo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a causal language modeling head) when created with the `),Kae=a("code"),ZRo=o("from_pretrained()"),eSo=o(` class method or the `),Yae=a("code"),oSo=o("from_config()"),tSo=o(" class method."),rSo=l(),n3=a("p"),aSo=o("This class cannot be instantiated directly using "),Zae=a("code"),nSo=o("__init__()"),sSo=o(" (throws an error)."),lSo=l(),Ht=a("div"),f(s3.$$.fragment),iSo=l(),ene=a("p"),dSo=o("Instantiates one of the model classes of the library (with a causal language modeling head) from a configuration."),mSo=l(),Md=a("p"),fSo=o(`Note: Loading a model from its configuration file does `),one=a("strong"),cSo=o("not"),gSo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),tne=a("em"),hSo=o("~TFAutoModelForCausalLM.from_pretrained"),uSo=o(`] to load the model weights.`),pSo=l(),rne=a("p"),_So=o("Examples:"),vSo=l(),f(l3.$$.fragment),bSo=l(),no=a("div"),f(i3.$$.fragment),TSo=l(),ane=a("p"),FSo=o("Instantiate one of the model classes of the library (with a causal language modeling head) from a pretrained model."),MSo=l(),Wa=a("p"),ESo=o("The model class to instantiate is selected based on the "),nne=a("em"),CSo=o("model_type"),ySo=o(` property of the config object (either passed as an argument or loaded from `),sne=a("em"),wSo=o("pretrained_model_name_or_path"),ASo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),lne=a("em"),xSo=o("pretrained_model_name_or_path"),LSo=o(":"),BSo=l(),ce=a("ul"),Ab=a("li"),ine=a("strong"),kSo=o("bert"),RSo=o(" \u2014 "),E$=a("a"),SSo=o("TFBertLMHeadModel"),PSo=o(" (BERT model)"),$So=l(),xb=a("li"),dne=a("strong"),ISo=o("ctrl"),jSo=o(" \u2014 "),C$=a("a"),NSo=o("TFCTRLLMHeadModel"),DSo=o(" (CTRL model)"),GSo=l(),Lb=a("li"),mne=a("strong"),OSo=o("gpt2"),qSo=o(" \u2014 "),y$=a("a"),zSo=o("TFGPT2LMHeadModel"),XSo=o(" (OpenAI GPT-2 model)"),WSo=l(),Bb=a("li"),fne=a("strong"),VSo=o("openai-gpt"),QSo=o(" \u2014 "),w$=a("a"),HSo=o("TFOpenAIGPTLMHeadModel"),USo=o(" (OpenAI GPT model)"),JSo=l(),kb=a("li"),cne=a("strong"),KSo=o("rembert"),YSo=o(" \u2014 "),A$=a("a"),ZSo=o("TFRemBertForCausalLM"),ePo=o(" (RemBERT model)"),oPo=l(),Rb=a("li"),gne=a("strong"),tPo=o("roberta"),rPo=o(" \u2014 "),x$=a("a"),aPo=o("TFRobertaForCausalLM"),nPo=o(" (RoBERTa model)"),sPo=l(),Sb=a("li"),hne=a("strong"),lPo=o("roformer"),iPo=o(" \u2014 "),L$=a("a"),dPo=o("TFRoFormerForCausalLM"),mPo=o(" (RoFormer model)"),fPo=l(),Pb=a("li"),une=a("strong"),cPo=o("transfo-xl"),gPo=o(" \u2014 "),B$=a("a"),hPo=o("TFTransfoXLLMHeadModel"),uPo=o(" (Transformer-XL model)"),pPo=l(),$b=a("li"),pne=a("strong"),_Po=o("xlm"),vPo=o(" \u2014 "),k$=a("a"),bPo=o("TFXLMWithLMHeadModel"),TPo=o(" (XLM model)"),FPo=l(),Ib=a("li"),_ne=a("strong"),MPo=o("xlnet"),EPo=o(" \u2014 "),R$=a("a"),CPo=o("TFXLNetLMHeadModel"),yPo=o(" (XLNet model)"),wPo=l(),vne=a("p"),APo=o("Examples:"),xPo=l(),f(d3.$$.fragment),CCe=l(),Ed=a("h2"),jb=a("a"),bne=a("span"),f(m3.$$.fragment),LPo=l(),Tne=a("span"),BPo=o("TFAutoModelForImageClassification"),yCe=l(),at=a("div"),f(f3.$$.fragment),kPo=l(),Cd=a("p"),RPo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a image classification head) when created with the `),Fne=a("code"),SPo=o("from_pretrained()"),PPo=o(` class method or the `),Mne=a("code"),$Po=o("from_config()"),IPo=o(" class method."),jPo=l(),c3=a("p"),NPo=o("This class cannot be instantiated directly using "),Ene=a("code"),DPo=o("__init__()"),GPo=o(" (throws an error)."),OPo=l(),Ut=a("div"),f(g3.$$.fragment),qPo=l(),Cne=a("p"),zPo=o("Instantiates one of the model classes of the library (with a image classification head) from a configuration."),XPo=l(),yd=a("p"),WPo=o(`Note: Loading a model from its configuration file does `),yne=a("strong"),VPo=o("not"),QPo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),wne=a("em"),HPo=o("~TFAutoModelForImageClassification.from_pretrained"),UPo=o(`] to load the model weights.`),JPo=l(),Ane=a("p"),KPo=o("Examples:"),YPo=l(),f(h3.$$.fragment),ZPo=l(),so=a("div"),f(u3.$$.fragment),e$o=l(),xne=a("p"),o$o=o("Instantiate one of the model classes of the library (with a image classification head) from a pretrained model."),t$o=l(),Va=a("p"),r$o=o("The model class to instantiate is selected based on the "),Lne=a("em"),a$o=o("model_type"),n$o=o(` property of the config object (either passed as an argument or loaded from `),Bne=a("em"),s$o=o("pretrained_model_name_or_path"),l$o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),kne=a("em"),i$o=o("pretrained_model_name_or_path"),d$o=o(":"),m$o=l(),Rne=a("ul"),Nb=a("li"),Sne=a("strong"),f$o=o("vit"),c$o=o(" \u2014 "),S$=a("a"),g$o=o("TFViTForImageClassification"),h$o=o(" (ViT model)"),u$o=l(),Pne=a("p"),p$o=o("Examples:"),_$o=l(),f(p3.$$.fragment),wCe=l(),wd=a("h2"),Db=a("a"),$ne=a("span"),f(_3.$$.fragment),v$o=l(),Ine=a("span"),b$o=o("TFAutoModelForMaskedLM"),ACe=l(),nt=a("div"),f(v3.$$.fragment),T$o=l(),Ad=a("p"),F$o=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a masked language modeling head) when created with the `),jne=a("code"),M$o=o("from_pretrained()"),E$o=o(` class method or the `),Nne=a("code"),C$o=o("from_config()"),y$o=o(" class method."),w$o=l(),b3=a("p"),A$o=o("This class cannot be instantiated directly using "),Dne=a("code"),x$o=o("__init__()"),L$o=o(" (throws an error)."),B$o=l(),Jt=a("div"),f(T3.$$.fragment),k$o=l(),Gne=a("p"),R$o=o("Instantiates one of the model classes of the library (with a masked language modeling head) from a configuration."),S$o=l(),xd=a("p"),P$o=o(`Note: Loading a model from its configuration file does `),One=a("strong"),$$o=o("not"),I$o=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),qne=a("em"),j$o=o("~TFAutoModelForMaskedLM.from_pretrained"),N$o=o(`] to load the model weights.`),D$o=l(),zne=a("p"),G$o=o("Examples:"),O$o=l(),f(F3.$$.fragment),q$o=l(),lo=a("div"),f(M3.$$.fragment),z$o=l(),Xne=a("p"),X$o=o("Instantiate one of the model classes of the library (with a masked language modeling head) from a pretrained model."),W$o=l(),Qa=a("p"),V$o=o("The model class to instantiate is selected based on the "),Wne=a("em"),Q$o=o("model_type"),H$o=o(` property of the config object (either passed as an argument or loaded from `),Vne=a("em"),U$o=o("pretrained_model_name_or_path"),J$o=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Qne=a("em"),K$o=o("pretrained_model_name_or_path"),Y$o=o(":"),Z$o=l(),K=a("ul"),Gb=a("li"),Hne=a("strong"),eIo=o("albert"),oIo=o(" \u2014 "),P$=a("a"),tIo=o("TFAlbertForMaskedLM"),rIo=o(" (ALBERT model)"),aIo=l(),Ob=a("li"),Une=a("strong"),nIo=o("bert"),sIo=o(" \u2014 "),$$=a("a"),lIo=o("TFBertForMaskedLM"),iIo=o(" (BERT model)"),dIo=l(),qb=a("li"),Jne=a("strong"),mIo=o("camembert"),fIo=o(" \u2014 "),I$=a("a"),cIo=o("TFCamembertForMaskedLM"),gIo=o(" (CamemBERT model)"),hIo=l(),zb=a("li"),Kne=a("strong"),uIo=o("convbert"),pIo=o(" \u2014 "),j$=a("a"),_Io=o("TFConvBertForMaskedLM"),vIo=o(" (ConvBERT model)"),bIo=l(),Xb=a("li"),Yne=a("strong"),TIo=o("deberta"),FIo=o(" \u2014 "),N$=a("a"),MIo=o("TFDebertaForMaskedLM"),EIo=o(" (DeBERTa model)"),CIo=l(),Wb=a("li"),Zne=a("strong"),yIo=o("deberta-v2"),wIo=o(" \u2014 "),D$=a("a"),AIo=o("TFDebertaV2ForMaskedLM"),xIo=o(" (DeBERTa-v2 model)"),LIo=l(),Vb=a("li"),ese=a("strong"),BIo=o("distilbert"),kIo=o(" \u2014 "),G$=a("a"),RIo=o("TFDistilBertForMaskedLM"),SIo=o(" (DistilBERT model)"),PIo=l(),Qb=a("li"),ose=a("strong"),$Io=o("electra"),IIo=o(" \u2014 "),O$=a("a"),jIo=o("TFElectraForMaskedLM"),NIo=o(" (ELECTRA model)"),DIo=l(),Hb=a("li"),tse=a("strong"),GIo=o("flaubert"),OIo=o(" \u2014 "),q$=a("a"),qIo=o("TFFlaubertWithLMHeadModel"),zIo=o(" (FlauBERT model)"),XIo=l(),Ub=a("li"),rse=a("strong"),WIo=o("funnel"),VIo=o(" \u2014 "),z$=a("a"),QIo=o("TFFunnelForMaskedLM"),HIo=o(" (Funnel Transformer model)"),UIo=l(),Jb=a("li"),ase=a("strong"),JIo=o("layoutlm"),KIo=o(" \u2014 "),X$=a("a"),YIo=o("TFLayoutLMForMaskedLM"),ZIo=o(" (LayoutLM model)"),ejo=l(),Kb=a("li"),nse=a("strong"),ojo=o("longformer"),tjo=o(" \u2014 "),W$=a("a"),rjo=o("TFLongformerForMaskedLM"),ajo=o(" (Longformer model)"),njo=l(),Yb=a("li"),sse=a("strong"),sjo=o("mobilebert"),ljo=o(" \u2014 "),V$=a("a"),ijo=o("TFMobileBertForMaskedLM"),djo=o(" (MobileBERT model)"),mjo=l(),Zb=a("li"),lse=a("strong"),fjo=o("mpnet"),cjo=o(" \u2014 "),Q$=a("a"),gjo=o("TFMPNetForMaskedLM"),hjo=o(" (MPNet model)"),ujo=l(),e4=a("li"),ise=a("strong"),pjo=o("rembert"),_jo=o(" \u2014 "),H$=a("a"),vjo=o("TFRemBertForMaskedLM"),bjo=o(" (RemBERT model)"),Tjo=l(),o4=a("li"),dse=a("strong"),Fjo=o("roberta"),Mjo=o(" \u2014 "),U$=a("a"),Ejo=o("TFRobertaForMaskedLM"),Cjo=o(" (RoBERTa model)"),yjo=l(),t4=a("li"),mse=a("strong"),wjo=o("roformer"),Ajo=o(" \u2014 "),J$=a("a"),xjo=o("TFRoFormerForMaskedLM"),Ljo=o(" (RoFormer model)"),Bjo=l(),r4=a("li"),fse=a("strong"),kjo=o("tapas"),Rjo=o(" \u2014 "),K$=a("a"),Sjo=o("TFTapasForMaskedLM"),Pjo=o(" (TAPAS model)"),$jo=l(),a4=a("li"),cse=a("strong"),Ijo=o("xlm"),jjo=o(" \u2014 "),Y$=a("a"),Njo=o("TFXLMWithLMHeadModel"),Djo=o(" (XLM model)"),Gjo=l(),n4=a("li"),gse=a("strong"),Ojo=o("xlm-roberta"),qjo=o(" \u2014 "),Z$=a("a"),zjo=o("TFXLMRobertaForMaskedLM"),Xjo=o(" (XLM-RoBERTa model)"),Wjo=l(),hse=a("p"),Vjo=o("Examples:"),Qjo=l(),f(E3.$$.fragment),xCe=l(),Ld=a("h2"),s4=a("a"),use=a("span"),f(C3.$$.fragment),Hjo=l(),pse=a("span"),Ujo=o("TFAutoModelForSeq2SeqLM"),LCe=l(),st=a("div"),f(y3.$$.fragment),Jjo=l(),Bd=a("p"),Kjo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence language modeling head) when created with the `),_se=a("code"),Yjo=o("from_pretrained()"),Zjo=o(` class method or the `),vse=a("code"),eNo=o("from_config()"),oNo=o(" class method."),tNo=l(),w3=a("p"),rNo=o("This class cannot be instantiated directly using "),bse=a("code"),aNo=o("__init__()"),nNo=o(" (throws an error)."),sNo=l(),Kt=a("div"),f(A3.$$.fragment),lNo=l(),Tse=a("p"),iNo=o("Instantiates one of the model classes of the library (with a sequence-to-sequence language modeling head) from a configuration."),dNo=l(),kd=a("p"),mNo=o(`Note: Loading a model from its configuration file does `),Fse=a("strong"),fNo=o("not"),cNo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Mse=a("em"),gNo=o("~TFAutoModelForSeq2SeqLM.from_pretrained"),hNo=o(`] to load the model weights.`),uNo=l(),Ese=a("p"),pNo=o("Examples:"),_No=l(),f(x3.$$.fragment),vNo=l(),io=a("div"),f(L3.$$.fragment),bNo=l(),Cse=a("p"),TNo=o("Instantiate one of the model classes of the library (with a sequence-to-sequence language modeling head) from a pretrained model."),FNo=l(),Ha=a("p"),MNo=o("The model class to instantiate is selected based on the "),yse=a("em"),ENo=o("model_type"),CNo=o(` property of the config object (either passed as an argument or loaded from `),wse=a("em"),yNo=o("pretrained_model_name_or_path"),wNo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ase=a("em"),ANo=o("pretrained_model_name_or_path"),xNo=o(":"),LNo=l(),ge=a("ul"),l4=a("li"),xse=a("strong"),BNo=o("bart"),kNo=o(" \u2014 "),eI=a("a"),RNo=o("TFBartForConditionalGeneration"),SNo=o(" (BART model)"),PNo=l(),i4=a("li"),Lse=a("strong"),$No=o("blenderbot"),INo=o(" \u2014 "),oI=a("a"),jNo=o("TFBlenderbotForConditionalGeneration"),NNo=o(" (Blenderbot model)"),DNo=l(),d4=a("li"),Bse=a("strong"),GNo=o("blenderbot-small"),ONo=o(" \u2014 "),tI=a("a"),qNo=o("TFBlenderbotSmallForConditionalGeneration"),zNo=o(" (BlenderbotSmall model)"),XNo=l(),m4=a("li"),kse=a("strong"),WNo=o("encoder-decoder"),VNo=o(" \u2014 "),rI=a("a"),QNo=o("TFEncoderDecoderModel"),HNo=o(" (Encoder decoder model)"),UNo=l(),f4=a("li"),Rse=a("strong"),JNo=o("led"),KNo=o(" \u2014 "),aI=a("a"),YNo=o("TFLEDForConditionalGeneration"),ZNo=o(" (LED model)"),eDo=l(),c4=a("li"),Sse=a("strong"),oDo=o("marian"),tDo=o(" \u2014 "),nI=a("a"),rDo=o("TFMarianMTModel"),aDo=o(" (Marian model)"),nDo=l(),g4=a("li"),Pse=a("strong"),sDo=o("mbart"),lDo=o(" \u2014 "),sI=a("a"),iDo=o("TFMBartForConditionalGeneration"),dDo=o(" (mBART model)"),mDo=l(),h4=a("li"),$se=a("strong"),fDo=o("mt5"),cDo=o(" \u2014 "),lI=a("a"),gDo=o("TFMT5ForConditionalGeneration"),hDo=o(" (mT5 model)"),uDo=l(),u4=a("li"),Ise=a("strong"),pDo=o("pegasus"),_Do=o(" \u2014 "),iI=a("a"),vDo=o("TFPegasusForConditionalGeneration"),bDo=o(" (Pegasus model)"),TDo=l(),p4=a("li"),jse=a("strong"),FDo=o("t5"),MDo=o(" \u2014 "),dI=a("a"),EDo=o("TFT5ForConditionalGeneration"),CDo=o(" (T5 model)"),yDo=l(),Nse=a("p"),wDo=o("Examples:"),ADo=l(),f(B3.$$.fragment),BCe=l(),Rd=a("h2"),_4=a("a"),Dse=a("span"),f(k3.$$.fragment),xDo=l(),Gse=a("span"),LDo=o("TFAutoModelForSequenceClassification"),kCe=l(),lt=a("div"),f(R3.$$.fragment),BDo=l(),Sd=a("p"),kDo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence classification head) when created with the `),Ose=a("code"),RDo=o("from_pretrained()"),SDo=o(` class method or the `),qse=a("code"),PDo=o("from_config()"),$Do=o(" class method."),IDo=l(),S3=a("p"),jDo=o("This class cannot be instantiated directly using "),zse=a("code"),NDo=o("__init__()"),DDo=o(" (throws an error)."),GDo=l(),Yt=a("div"),f(P3.$$.fragment),ODo=l(),Xse=a("p"),qDo=o("Instantiates one of the model classes of the library (with a sequence classification head) from a configuration."),zDo=l(),Pd=a("p"),XDo=o(`Note: Loading a model from its configuration file does `),Wse=a("strong"),WDo=o("not"),VDo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Vse=a("em"),QDo=o("~TFAutoModelForSequenceClassification.from_pretrained"),HDo=o(`] to load the model weights.`),UDo=l(),Qse=a("p"),JDo=o("Examples:"),KDo=l(),f($3.$$.fragment),YDo=l(),mo=a("div"),f(I3.$$.fragment),ZDo=l(),Hse=a("p"),eGo=o("Instantiate one of the model classes of the library (with a sequence classification head) from a pretrained model."),oGo=l(),Ua=a("p"),tGo=o("The model class to instantiate is selected based on the "),Use=a("em"),rGo=o("model_type"),aGo=o(` property of the config object (either passed as an argument or loaded from `),Jse=a("em"),nGo=o("pretrained_model_name_or_path"),sGo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Kse=a("em"),lGo=o("pretrained_model_name_or_path"),iGo=o(":"),dGo=l(),O=a("ul"),v4=a("li"),Yse=a("strong"),mGo=o("albert"),fGo=o(" \u2014 "),mI=a("a"),cGo=o("TFAlbertForSequenceClassification"),gGo=o(" (ALBERT model)"),hGo=l(),b4=a("li"),Zse=a("strong"),uGo=o("bert"),pGo=o(" \u2014 "),fI=a("a"),_Go=o("TFBertForSequenceClassification"),vGo=o(" (BERT model)"),bGo=l(),T4=a("li"),ele=a("strong"),TGo=o("camembert"),FGo=o(" \u2014 "),cI=a("a"),MGo=o("TFCamembertForSequenceClassification"),EGo=o(" (CamemBERT model)"),CGo=l(),F4=a("li"),ole=a("strong"),yGo=o("convbert"),wGo=o(" \u2014 "),gI=a("a"),AGo=o("TFConvBertForSequenceClassification"),xGo=o(" (ConvBERT model)"),LGo=l(),M4=a("li"),tle=a("strong"),BGo=o("ctrl"),kGo=o(" \u2014 "),hI=a("a"),RGo=o("TFCTRLForSequenceClassification"),SGo=o(" (CTRL model)"),PGo=l(),E4=a("li"),rle=a("strong"),$Go=o("deberta"),IGo=o(" \u2014 "),uI=a("a"),jGo=o("TFDebertaForSequenceClassification"),NGo=o(" (DeBERTa model)"),DGo=l(),C4=a("li"),ale=a("strong"),GGo=o("deberta-v2"),OGo=o(" \u2014 "),pI=a("a"),qGo=o("TFDebertaV2ForSequenceClassification"),zGo=o(" (DeBERTa-v2 model)"),XGo=l(),y4=a("li"),nle=a("strong"),WGo=o("distilbert"),VGo=o(" \u2014 "),_I=a("a"),QGo=o("TFDistilBertForSequenceClassification"),HGo=o(" (DistilBERT model)"),UGo=l(),w4=a("li"),sle=a("strong"),JGo=o("electra"),KGo=o(" \u2014 "),vI=a("a"),YGo=o("TFElectraForSequenceClassification"),ZGo=o(" (ELECTRA model)"),eOo=l(),A4=a("li"),lle=a("strong"),oOo=o("flaubert"),tOo=o(" \u2014 "),bI=a("a"),rOo=o("TFFlaubertForSequenceClassification"),aOo=o(" (FlauBERT model)"),nOo=l(),x4=a("li"),ile=a("strong"),sOo=o("funnel"),lOo=o(" \u2014 "),TI=a("a"),iOo=o("TFFunnelForSequenceClassification"),dOo=o(" (Funnel Transformer model)"),mOo=l(),L4=a("li"),dle=a("strong"),fOo=o("gpt2"),cOo=o(" \u2014 "),FI=a("a"),gOo=o("TFGPT2ForSequenceClassification"),hOo=o(" (OpenAI GPT-2 model)"),uOo=l(),B4=a("li"),mle=a("strong"),pOo=o("layoutlm"),_Oo=o(" \u2014 "),MI=a("a"),vOo=o("TFLayoutLMForSequenceClassification"),bOo=o(" (LayoutLM model)"),TOo=l(),k4=a("li"),fle=a("strong"),FOo=o("longformer"),MOo=o(" \u2014 "),EI=a("a"),EOo=o("TFLongformerForSequenceClassification"),COo=o(" (Longformer model)"),yOo=l(),R4=a("li"),cle=a("strong"),wOo=o("mobilebert"),AOo=o(" \u2014 "),CI=a("a"),xOo=o("TFMobileBertForSequenceClassification"),LOo=o(" (MobileBERT model)"),BOo=l(),S4=a("li"),gle=a("strong"),kOo=o("mpnet"),ROo=o(" \u2014 "),yI=a("a"),SOo=o("TFMPNetForSequenceClassification"),POo=o(" (MPNet model)"),$Oo=l(),P4=a("li"),hle=a("strong"),IOo=o("openai-gpt"),jOo=o(" \u2014 "),wI=a("a"),NOo=o("TFOpenAIGPTForSequenceClassification"),DOo=o(" (OpenAI GPT model)"),GOo=l(),$4=a("li"),ule=a("strong"),OOo=o("rembert"),qOo=o(" \u2014 "),AI=a("a"),zOo=o("TFRemBertForSequenceClassification"),XOo=o(" (RemBERT model)"),WOo=l(),I4=a("li"),ple=a("strong"),VOo=o("roberta"),QOo=o(" \u2014 "),xI=a("a"),HOo=o("TFRobertaForSequenceClassification"),UOo=o(" (RoBERTa model)"),JOo=l(),j4=a("li"),_le=a("strong"),KOo=o("roformer"),YOo=o(" \u2014 "),LI=a("a"),ZOo=o("TFRoFormerForSequenceClassification"),eqo=o(" (RoFormer model)"),oqo=l(),N4=a("li"),vle=a("strong"),tqo=o("tapas"),rqo=o(" \u2014 "),BI=a("a"),aqo=o("TFTapasForSequenceClassification"),nqo=o(" (TAPAS model)"),sqo=l(),D4=a("li"),ble=a("strong"),lqo=o("transfo-xl"),iqo=o(" \u2014 "),kI=a("a"),dqo=o("TFTransfoXLForSequenceClassification"),mqo=o(" (Transformer-XL model)"),fqo=l(),G4=a("li"),Tle=a("strong"),cqo=o("xlm"),gqo=o(" \u2014 "),RI=a("a"),hqo=o("TFXLMForSequenceClassification"),uqo=o(" (XLM model)"),pqo=l(),O4=a("li"),Fle=a("strong"),_qo=o("xlm-roberta"),vqo=o(" \u2014 "),SI=a("a"),bqo=o("TFXLMRobertaForSequenceClassification"),Tqo=o(" (XLM-RoBERTa model)"),Fqo=l(),q4=a("li"),Mle=a("strong"),Mqo=o("xlnet"),Eqo=o(" \u2014 "),PI=a("a"),Cqo=o("TFXLNetForSequenceClassification"),yqo=o(" (XLNet model)"),wqo=l(),Ele=a("p"),Aqo=o("Examples:"),xqo=l(),f(j3.$$.fragment),RCe=l(),$d=a("h2"),z4=a("a"),Cle=a("span"),f(N3.$$.fragment),Lqo=l(),yle=a("span"),Bqo=o("TFAutoModelForMultipleChoice"),SCe=l(),it=a("div"),f(D3.$$.fragment),kqo=l(),Id=a("p"),Rqo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a multiple choice head) when created with the `),wle=a("code"),Sqo=o("from_pretrained()"),Pqo=o(` class method or the `),Ale=a("code"),$qo=o("from_config()"),Iqo=o(" class method."),jqo=l(),G3=a("p"),Nqo=o("This class cannot be instantiated directly using "),xle=a("code"),Dqo=o("__init__()"),Gqo=o(" (throws an error)."),Oqo=l(),Zt=a("div"),f(O3.$$.fragment),qqo=l(),Lle=a("p"),zqo=o("Instantiates one of the model classes of the library (with a multiple choice head) from a configuration."),Xqo=l(),jd=a("p"),Wqo=o(`Note: Loading a model from its configuration file does `),Ble=a("strong"),Vqo=o("not"),Qqo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),kle=a("em"),Hqo=o("~TFAutoModelForMultipleChoice.from_pretrained"),Uqo=o(`] to load the model weights.`),Jqo=l(),Rle=a("p"),Kqo=o("Examples:"),Yqo=l(),f(q3.$$.fragment),Zqo=l(),fo=a("div"),f(z3.$$.fragment),ezo=l(),Sle=a("p"),ozo=o("Instantiate one of the model classes of the library (with a multiple choice head) from a pretrained model."),tzo=l(),Ja=a("p"),rzo=o("The model class to instantiate is selected based on the "),Ple=a("em"),azo=o("model_type"),nzo=o(` property of the config object (either passed as an argument or loaded from `),$le=a("em"),szo=o("pretrained_model_name_or_path"),lzo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ile=a("em"),izo=o("pretrained_model_name_or_path"),dzo=o(":"),mzo=l(),re=a("ul"),X4=a("li"),jle=a("strong"),fzo=o("albert"),czo=o(" \u2014 "),$I=a("a"),gzo=o("TFAlbertForMultipleChoice"),hzo=o(" (ALBERT model)"),uzo=l(),W4=a("li"),Nle=a("strong"),pzo=o("bert"),_zo=o(" \u2014 "),II=a("a"),vzo=o("TFBertForMultipleChoice"),bzo=o(" (BERT model)"),Tzo=l(),V4=a("li"),Dle=a("strong"),Fzo=o("camembert"),Mzo=o(" \u2014 "),jI=a("a"),Ezo=o("TFCamembertForMultipleChoice"),Czo=o(" (CamemBERT model)"),yzo=l(),Q4=a("li"),Gle=a("strong"),wzo=o("convbert"),Azo=o(" \u2014 "),NI=a("a"),xzo=o("TFConvBertForMultipleChoice"),Lzo=o(" (ConvBERT model)"),Bzo=l(),H4=a("li"),Ole=a("strong"),kzo=o("distilbert"),Rzo=o(" \u2014 "),DI=a("a"),Szo=o("TFDistilBertForMultipleChoice"),Pzo=o(" (DistilBERT model)"),$zo=l(),U4=a("li"),qle=a("strong"),Izo=o("electra"),jzo=o(" \u2014 "),GI=a("a"),Nzo=o("TFElectraForMultipleChoice"),Dzo=o(" (ELECTRA model)"),Gzo=l(),J4=a("li"),zle=a("strong"),Ozo=o("flaubert"),qzo=o(" \u2014 "),OI=a("a"),zzo=o("TFFlaubertForMultipleChoice"),Xzo=o(" (FlauBERT model)"),Wzo=l(),K4=a("li"),Xle=a("strong"),Vzo=o("funnel"),Qzo=o(" \u2014 "),qI=a("a"),Hzo=o("TFFunnelForMultipleChoice"),Uzo=o(" (Funnel Transformer model)"),Jzo=l(),Y4=a("li"),Wle=a("strong"),Kzo=o("longformer"),Yzo=o(" \u2014 "),zI=a("a"),Zzo=o("TFLongformerForMultipleChoice"),eXo=o(" (Longformer model)"),oXo=l(),Z4=a("li"),Vle=a("strong"),tXo=o("mobilebert"),rXo=o(" \u2014 "),XI=a("a"),aXo=o("TFMobileBertForMultipleChoice"),nXo=o(" (MobileBERT model)"),sXo=l(),e5=a("li"),Qle=a("strong"),lXo=o("mpnet"),iXo=o(" \u2014 "),WI=a("a"),dXo=o("TFMPNetForMultipleChoice"),mXo=o(" (MPNet model)"),fXo=l(),o5=a("li"),Hle=a("strong"),cXo=o("rembert"),gXo=o(" \u2014 "),VI=a("a"),hXo=o("TFRemBertForMultipleChoice"),uXo=o(" (RemBERT model)"),pXo=l(),t5=a("li"),Ule=a("strong"),_Xo=o("roberta"),vXo=o(" \u2014 "),QI=a("a"),bXo=o("TFRobertaForMultipleChoice"),TXo=o(" (RoBERTa model)"),FXo=l(),r5=a("li"),Jle=a("strong"),MXo=o("roformer"),EXo=o(" \u2014 "),HI=a("a"),CXo=o("TFRoFormerForMultipleChoice"),yXo=o(" (RoFormer model)"),wXo=l(),a5=a("li"),Kle=a("strong"),AXo=o("xlm"),xXo=o(" \u2014 "),UI=a("a"),LXo=o("TFXLMForMultipleChoice"),BXo=o(" (XLM model)"),kXo=l(),n5=a("li"),Yle=a("strong"),RXo=o("xlm-roberta"),SXo=o(" \u2014 "),JI=a("a"),PXo=o("TFXLMRobertaForMultipleChoice"),$Xo=o(" (XLM-RoBERTa model)"),IXo=l(),s5=a("li"),Zle=a("strong"),jXo=o("xlnet"),NXo=o(" \u2014 "),KI=a("a"),DXo=o("TFXLNetForMultipleChoice"),GXo=o(" (XLNet model)"),OXo=l(),eie=a("p"),qXo=o("Examples:"),zXo=l(),f(X3.$$.fragment),PCe=l(),Nd=a("h2"),l5=a("a"),oie=a("span"),f(W3.$$.fragment),XXo=l(),tie=a("span"),WXo=o("TFAutoModelForTableQuestionAnswering"),$Ce=l(),dt=a("div"),f(V3.$$.fragment),VXo=l(),Dd=a("p"),QXo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a table question answering head) when created with the `),rie=a("code"),HXo=o("from_pretrained()"),UXo=o(` class method or the `),aie=a("code"),JXo=o("from_config()"),KXo=o(" class method."),YXo=l(),Q3=a("p"),ZXo=o("This class cannot be instantiated directly using "),nie=a("code"),eWo=o("__init__()"),oWo=o(" (throws an error)."),tWo=l(),er=a("div"),f(H3.$$.fragment),rWo=l(),sie=a("p"),aWo=o("Instantiates one of the model classes of the library (with a table question answering head) from a configuration."),nWo=l(),Gd=a("p"),sWo=o(`Note: Loading a model from its configuration file does `),lie=a("strong"),lWo=o("not"),iWo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),iie=a("em"),dWo=o("~TFAutoModelForTableQuestionAnswering.from_pretrained"),mWo=o(`] to load the model weights.`),fWo=l(),die=a("p"),cWo=o("Examples:"),gWo=l(),f(U3.$$.fragment),hWo=l(),co=a("div"),f(J3.$$.fragment),uWo=l(),mie=a("p"),pWo=o("Instantiate one of the model classes of the library (with a table question answering head) from a pretrained model."),_Wo=l(),Ka=a("p"),vWo=o("The model class to instantiate is selected based on the "),fie=a("em"),bWo=o("model_type"),TWo=o(` property of the config object (either passed as an argument or loaded from `),cie=a("em"),FWo=o("pretrained_model_name_or_path"),MWo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),gie=a("em"),EWo=o("pretrained_model_name_or_path"),CWo=o(":"),yWo=l(),hie=a("ul"),i5=a("li"),uie=a("strong"),wWo=o("tapas"),AWo=o(" \u2014 "),YI=a("a"),xWo=o("TFTapasForQuestionAnswering"),LWo=o(" (TAPAS model)"),BWo=l(),pie=a("p"),kWo=o("Examples:"),RWo=l(),f(K3.$$.fragment),ICe=l(),Od=a("h2"),d5=a("a"),_ie=a("span"),f(Y3.$$.fragment),SWo=l(),vie=a("span"),PWo=o("TFAutoModelForTokenClassification"),jCe=l(),mt=a("div"),f(Z3.$$.fragment),$Wo=l(),qd=a("p"),IWo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a token classification head) when created with the `),bie=a("code"),jWo=o("from_pretrained()"),NWo=o(` class method or the `),Tie=a("code"),DWo=o("from_config()"),GWo=o(" class method."),OWo=l(),ey=a("p"),qWo=o("This class cannot be instantiated directly using "),Fie=a("code"),zWo=o("__init__()"),XWo=o(" (throws an error)."),WWo=l(),or=a("div"),f(oy.$$.fragment),VWo=l(),Mie=a("p"),QWo=o("Instantiates one of the model classes of the library (with a token classification head) from a configuration."),HWo=l(),zd=a("p"),UWo=o(`Note: Loading a model from its configuration file does `),Eie=a("strong"),JWo=o("not"),KWo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Cie=a("em"),YWo=o("~TFAutoModelForTokenClassification.from_pretrained"),ZWo=o(`] to load the model weights.`),eVo=l(),yie=a("p"),oVo=o("Examples:"),tVo=l(),f(ty.$$.fragment),rVo=l(),go=a("div"),f(ry.$$.fragment),aVo=l(),wie=a("p"),nVo=o("Instantiate one of the model classes of the library (with a token classification head) from a pretrained model."),sVo=l(),Ya=a("p"),lVo=o("The model class to instantiate is selected based on the "),Aie=a("em"),iVo=o("model_type"),dVo=o(` property of the config object (either passed as an argument or loaded from `),xie=a("em"),mVo=o("pretrained_model_name_or_path"),fVo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Lie=a("em"),cVo=o("pretrained_model_name_or_path"),gVo=o(":"),hVo=l(),Y=a("ul"),m5=a("li"),Bie=a("strong"),uVo=o("albert"),pVo=o(" \u2014 "),ZI=a("a"),_Vo=o("TFAlbertForTokenClassification"),vVo=o(" (ALBERT model)"),bVo=l(),f5=a("li"),kie=a("strong"),TVo=o("bert"),FVo=o(" \u2014 "),ej=a("a"),MVo=o("TFBertForTokenClassification"),EVo=o(" (BERT model)"),CVo=l(),c5=a("li"),Rie=a("strong"),yVo=o("camembert"),wVo=o(" \u2014 "),oj=a("a"),AVo=o("TFCamembertForTokenClassification"),xVo=o(" (CamemBERT model)"),LVo=l(),g5=a("li"),Sie=a("strong"),BVo=o("convbert"),kVo=o(" \u2014 "),tj=a("a"),RVo=o("TFConvBertForTokenClassification"),SVo=o(" (ConvBERT model)"),PVo=l(),h5=a("li"),Pie=a("strong"),$Vo=o("deberta"),IVo=o(" \u2014 "),rj=a("a"),jVo=o("TFDebertaForTokenClassification"),NVo=o(" (DeBERTa model)"),DVo=l(),u5=a("li"),$ie=a("strong"),GVo=o("deberta-v2"),OVo=o(" \u2014 "),aj=a("a"),qVo=o("TFDebertaV2ForTokenClassification"),zVo=o(" (DeBERTa-v2 model)"),XVo=l(),p5=a("li"),Iie=a("strong"),WVo=o("distilbert"),VVo=o(" \u2014 "),nj=a("a"),QVo=o("TFDistilBertForTokenClassification"),HVo=o(" (DistilBERT model)"),UVo=l(),_5=a("li"),jie=a("strong"),JVo=o("electra"),KVo=o(" \u2014 "),sj=a("a"),YVo=o("TFElectraForTokenClassification"),ZVo=o(" (ELECTRA model)"),eQo=l(),v5=a("li"),Nie=a("strong"),oQo=o("flaubert"),tQo=o(" \u2014 "),lj=a("a"),rQo=o("TFFlaubertForTokenClassification"),aQo=o(" (FlauBERT model)"),nQo=l(),b5=a("li"),Die=a("strong"),sQo=o("funnel"),lQo=o(" \u2014 "),ij=a("a"),iQo=o("TFFunnelForTokenClassification"),dQo=o(" (Funnel Transformer model)"),mQo=l(),T5=a("li"),Gie=a("strong"),fQo=o("layoutlm"),cQo=o(" \u2014 "),dj=a("a"),gQo=o("TFLayoutLMForTokenClassification"),hQo=o(" (LayoutLM model)"),uQo=l(),F5=a("li"),Oie=a("strong"),pQo=o("longformer"),_Qo=o(" \u2014 "),mj=a("a"),vQo=o("TFLongformerForTokenClassification"),bQo=o(" (Longformer model)"),TQo=l(),M5=a("li"),qie=a("strong"),FQo=o("mobilebert"),MQo=o(" \u2014 "),fj=a("a"),EQo=o("TFMobileBertForTokenClassification"),CQo=o(" (MobileBERT model)"),yQo=l(),E5=a("li"),zie=a("strong"),wQo=o("mpnet"),AQo=o(" \u2014 "),cj=a("a"),xQo=o("TFMPNetForTokenClassification"),LQo=o(" (MPNet model)"),BQo=l(),C5=a("li"),Xie=a("strong"),kQo=o("rembert"),RQo=o(" \u2014 "),gj=a("a"),SQo=o("TFRemBertForTokenClassification"),PQo=o(" (RemBERT model)"),$Qo=l(),y5=a("li"),Wie=a("strong"),IQo=o("roberta"),jQo=o(" \u2014 "),hj=a("a"),NQo=o("TFRobertaForTokenClassification"),DQo=o(" (RoBERTa model)"),GQo=l(),w5=a("li"),Vie=a("strong"),OQo=o("roformer"),qQo=o(" \u2014 "),uj=a("a"),zQo=o("TFRoFormerForTokenClassification"),XQo=o(" (RoFormer model)"),WQo=l(),A5=a("li"),Qie=a("strong"),VQo=o("xlm"),QQo=o(" \u2014 "),pj=a("a"),HQo=o("TFXLMForTokenClassification"),UQo=o(" (XLM model)"),JQo=l(),x5=a("li"),Hie=a("strong"),KQo=o("xlm-roberta"),YQo=o(" \u2014 "),_j=a("a"),ZQo=o("TFXLMRobertaForTokenClassification"),eHo=o(" (XLM-RoBERTa model)"),oHo=l(),L5=a("li"),Uie=a("strong"),tHo=o("xlnet"),rHo=o(" \u2014 "),vj=a("a"),aHo=o("TFXLNetForTokenClassification"),nHo=o(" (XLNet model)"),sHo=l(),Jie=a("p"),lHo=o("Examples:"),iHo=l(),f(ay.$$.fragment),NCe=l(),Xd=a("h2"),B5=a("a"),Kie=a("span"),f(ny.$$.fragment),dHo=l(),Yie=a("span"),mHo=o("TFAutoModelForQuestionAnswering"),DCe=l(),ft=a("div"),f(sy.$$.fragment),fHo=l(),Wd=a("p"),cHo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a question answering head) when created with the `),Zie=a("code"),gHo=o("from_pretrained()"),hHo=o(` class method or the `),ede=a("code"),uHo=o("from_config()"),pHo=o(" class method."),_Ho=l(),ly=a("p"),vHo=o("This class cannot be instantiated directly using "),ode=a("code"),bHo=o("__init__()"),THo=o(" (throws an error)."),FHo=l(),tr=a("div"),f(iy.$$.fragment),MHo=l(),tde=a("p"),EHo=o("Instantiates one of the model classes of the library (with a question answering head) from a configuration."),CHo=l(),Vd=a("p"),yHo=o(`Note: Loading a model from its configuration file does `),rde=a("strong"),wHo=o("not"),AHo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),ade=a("em"),xHo=o("~TFAutoModelForQuestionAnswering.from_pretrained"),LHo=o(`] to load the model weights.`),BHo=l(),nde=a("p"),kHo=o("Examples:"),RHo=l(),f(dy.$$.fragment),SHo=l(),ho=a("div"),f(my.$$.fragment),PHo=l(),sde=a("p"),$Ho=o("Instantiate one of the model classes of the library (with a question answering head) from a pretrained model."),IHo=l(),Za=a("p"),jHo=o("The model class to instantiate is selected based on the "),lde=a("em"),NHo=o("model_type"),DHo=o(` property of the config object (either passed as an argument or loaded from `),ide=a("em"),GHo=o("pretrained_model_name_or_path"),OHo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),dde=a("em"),qHo=o("pretrained_model_name_or_path"),zHo=o(":"),XHo=l(),Z=a("ul"),k5=a("li"),mde=a("strong"),WHo=o("albert"),VHo=o(" \u2014 "),bj=a("a"),QHo=o("TFAlbertForQuestionAnswering"),HHo=o(" (ALBERT model)"),UHo=l(),R5=a("li"),fde=a("strong"),JHo=o("bert"),KHo=o(" \u2014 "),Tj=a("a"),YHo=o("TFBertForQuestionAnswering"),ZHo=o(" (BERT model)"),eUo=l(),S5=a("li"),cde=a("strong"),oUo=o("camembert"),tUo=o(" \u2014 "),Fj=a("a"),rUo=o("TFCamembertForQuestionAnswering"),aUo=o(" (CamemBERT model)"),nUo=l(),P5=a("li"),gde=a("strong"),sUo=o("convbert"),lUo=o(" \u2014 "),Mj=a("a"),iUo=o("TFConvBertForQuestionAnswering"),dUo=o(" (ConvBERT model)"),mUo=l(),$5=a("li"),hde=a("strong"),fUo=o("deberta"),cUo=o(" \u2014 "),Ej=a("a"),gUo=o("TFDebertaForQuestionAnswering"),hUo=o(" (DeBERTa model)"),uUo=l(),I5=a("li"),ude=a("strong"),pUo=o("deberta-v2"),_Uo=o(" \u2014 "),Cj=a("a"),vUo=o("TFDebertaV2ForQuestionAnswering"),bUo=o(" (DeBERTa-v2 model)"),TUo=l(),j5=a("li"),pde=a("strong"),FUo=o("distilbert"),MUo=o(" \u2014 "),yj=a("a"),EUo=o("TFDistilBertForQuestionAnswering"),CUo=o(" (DistilBERT model)"),yUo=l(),N5=a("li"),_de=a("strong"),wUo=o("electra"),AUo=o(" \u2014 "),wj=a("a"),xUo=o("TFElectraForQuestionAnswering"),LUo=o(" (ELECTRA model)"),BUo=l(),D5=a("li"),vde=a("strong"),kUo=o("flaubert"),RUo=o(" \u2014 "),Aj=a("a"),SUo=o("TFFlaubertForQuestionAnsweringSimple"),PUo=o(" (FlauBERT model)"),$Uo=l(),G5=a("li"),bde=a("strong"),IUo=o("funnel"),jUo=o(" \u2014 "),xj=a("a"),NUo=o("TFFunnelForQuestionAnswering"),DUo=o(" (Funnel Transformer model)"),GUo=l(),O5=a("li"),Tde=a("strong"),OUo=o("longformer"),qUo=o(" \u2014 "),Lj=a("a"),zUo=o("TFLongformerForQuestionAnswering"),XUo=o(" (Longformer model)"),WUo=l(),q5=a("li"),Fde=a("strong"),VUo=o("mobilebert"),QUo=o(" \u2014 "),Bj=a("a"),HUo=o("TFMobileBertForQuestionAnswering"),UUo=o(" (MobileBERT model)"),JUo=l(),z5=a("li"),Mde=a("strong"),KUo=o("mpnet"),YUo=o(" \u2014 "),kj=a("a"),ZUo=o("TFMPNetForQuestionAnswering"),eJo=o(" (MPNet model)"),oJo=l(),X5=a("li"),Ede=a("strong"),tJo=o("rembert"),rJo=o(" \u2014 "),Rj=a("a"),aJo=o("TFRemBertForQuestionAnswering"),nJo=o(" (RemBERT model)"),sJo=l(),W5=a("li"),Cde=a("strong"),lJo=o("roberta"),iJo=o(" \u2014 "),Sj=a("a"),dJo=o("TFRobertaForQuestionAnswering"),mJo=o(" (RoBERTa model)"),fJo=l(),V5=a("li"),yde=a("strong"),cJo=o("roformer"),gJo=o(" \u2014 "),Pj=a("a"),hJo=o("TFRoFormerForQuestionAnswering"),uJo=o(" (RoFormer model)"),pJo=l(),Q5=a("li"),wde=a("strong"),_Jo=o("xlm"),vJo=o(" \u2014 "),$j=a("a"),bJo=o("TFXLMForQuestionAnsweringSimple"),TJo=o(" (XLM model)"),FJo=l(),H5=a("li"),Ade=a("strong"),MJo=o("xlm-roberta"),EJo=o(" \u2014 "),Ij=a("a"),CJo=o("TFXLMRobertaForQuestionAnswering"),yJo=o(" (XLM-RoBERTa model)"),wJo=l(),U5=a("li"),xde=a("strong"),AJo=o("xlnet"),xJo=o(" \u2014 "),jj=a("a"),LJo=o("TFXLNetForQuestionAnsweringSimple"),BJo=o(" (XLNet model)"),kJo=l(),Lde=a("p"),RJo=o("Examples:"),SJo=l(),f(fy.$$.fragment),GCe=l(),Qd=a("h2"),J5=a("a"),Bde=a("span"),f(cy.$$.fragment),PJo=l(),kde=a("span"),$Jo=o("FlaxAutoModel"),OCe=l(),ct=a("div"),f(gy.$$.fragment),IJo=l(),Hd=a("p"),jJo=o(`This is a generic model class that will be instantiated as one of the base model classes of the library when created with the `),Rde=a("code"),NJo=o("from_pretrained()"),DJo=o(` class method or the `),Sde=a("code"),GJo=o("from_config()"),OJo=o(" class method."),qJo=l(),hy=a("p"),zJo=o("This class cannot be instantiated directly using "),Pde=a("code"),XJo=o("__init__()"),WJo=o(" (throws an error)."),VJo=l(),rr=a("div"),f(uy.$$.fragment),QJo=l(),$de=a("p"),HJo=o("Instantiates one of the base model classes of the library from a configuration."),UJo=l(),Ud=a("p"),JJo=o(`Note: Loading a model from its configuration file does `),Ide=a("strong"),KJo=o("not"),YJo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),jde=a("em"),ZJo=o("~FlaxAutoModel.from_pretrained"),eKo=o(`] to load the model weights.`),oKo=l(),Nde=a("p"),tKo=o("Examples:"),rKo=l(),f(py.$$.fragment),aKo=l(),uo=a("div"),f(_y.$$.fragment),nKo=l(),Dde=a("p"),sKo=o("Instantiate one of the base model classes of the library from a pretrained model."),lKo=l(),en=a("p"),iKo=o("The model class to instantiate is selected based on the "),Gde=a("em"),dKo=o("model_type"),mKo=o(` property of the config object (either passed as an argument or loaded from `),Ode=a("em"),fKo=o("pretrained_model_name_or_path"),cKo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),qde=a("em"),gKo=o("pretrained_model_name_or_path"),hKo=o(":"),uKo=l(),Q=a("ul"),K5=a("li"),zde=a("strong"),pKo=o("albert"),_Ko=o(" \u2014 "),Nj=a("a"),vKo=o("FlaxAlbertModel"),bKo=o(" (ALBERT model)"),TKo=l(),Y5=a("li"),Xde=a("strong"),FKo=o("bart"),MKo=o(" \u2014 "),Dj=a("a"),EKo=o("FlaxBartModel"),CKo=o(" (BART model)"),yKo=l(),Z5=a("li"),Wde=a("strong"),wKo=o("beit"),AKo=o(" \u2014 "),Gj=a("a"),xKo=o("FlaxBeitModel"),LKo=o(" (BEiT model)"),BKo=l(),e0=a("li"),Vde=a("strong"),kKo=o("bert"),RKo=o(" \u2014 "),Oj=a("a"),SKo=o("FlaxBertModel"),PKo=o(" (BERT model)"),$Ko=l(),o0=a("li"),Qde=a("strong"),IKo=o("big_bird"),jKo=o(" \u2014 "),qj=a("a"),NKo=o("FlaxBigBirdModel"),DKo=o(" (BigBird model)"),GKo=l(),t0=a("li"),Hde=a("strong"),OKo=o("blenderbot"),qKo=o(" \u2014 "),zj=a("a"),zKo=o("FlaxBlenderbotModel"),XKo=o(" (Blenderbot model)"),WKo=l(),r0=a("li"),Ude=a("strong"),VKo=o("blenderbot-small"),QKo=o(" \u2014 "),Xj=a("a"),HKo=o("FlaxBlenderbotSmallModel"),UKo=o(" (BlenderbotSmall model)"),JKo=l(),a0=a("li"),Jde=a("strong"),KKo=o("clip"),YKo=o(" \u2014 "),Wj=a("a"),ZKo=o("FlaxCLIPModel"),eYo=o(" (CLIP model)"),oYo=l(),n0=a("li"),Kde=a("strong"),tYo=o("distilbert"),rYo=o(" \u2014 "),Vj=a("a"),aYo=o("FlaxDistilBertModel"),nYo=o(" (DistilBERT model)"),sYo=l(),s0=a("li"),Yde=a("strong"),lYo=o("electra"),iYo=o(" \u2014 "),Qj=a("a"),dYo=o("FlaxElectraModel"),mYo=o(" (ELECTRA model)"),fYo=l(),l0=a("li"),Zde=a("strong"),cYo=o("gpt2"),gYo=o(" \u2014 "),Hj=a("a"),hYo=o("FlaxGPT2Model"),uYo=o(" (OpenAI GPT-2 model)"),pYo=l(),i0=a("li"),eme=a("strong"),_Yo=o("gpt_neo"),vYo=o(" \u2014 "),Uj=a("a"),bYo=o("FlaxGPTNeoModel"),TYo=o(" (GPT Neo model)"),FYo=l(),d0=a("li"),ome=a("strong"),MYo=o("gptj"),EYo=o(" \u2014 "),Jj=a("a"),CYo=o("FlaxGPTJModel"),yYo=o(" (GPT-J model)"),wYo=l(),m0=a("li"),tme=a("strong"),AYo=o("marian"),xYo=o(" \u2014 "),Kj=a("a"),LYo=o("FlaxMarianModel"),BYo=o(" (Marian model)"),kYo=l(),f0=a("li"),rme=a("strong"),RYo=o("mbart"),SYo=o(" \u2014 "),Yj=a("a"),PYo=o("FlaxMBartModel"),$Yo=o(" (mBART model)"),IYo=l(),c0=a("li"),ame=a("strong"),jYo=o("mt5"),NYo=o(" \u2014 "),Zj=a("a"),DYo=o("FlaxMT5Model"),GYo=o(" (mT5 model)"),OYo=l(),g0=a("li"),nme=a("strong"),qYo=o("pegasus"),zYo=o(" \u2014 "),eN=a("a"),XYo=o("FlaxPegasusModel"),WYo=o(" (Pegasus model)"),VYo=l(),h0=a("li"),sme=a("strong"),QYo=o("roberta"),HYo=o(" \u2014 "),oN=a("a"),UYo=o("FlaxRobertaModel"),JYo=o(" (RoBERTa model)"),KYo=l(),u0=a("li"),lme=a("strong"),YYo=o("t5"),ZYo=o(" \u2014 "),tN=a("a"),eZo=o("FlaxT5Model"),oZo=o(" (T5 model)"),tZo=l(),p0=a("li"),ime=a("strong"),rZo=o("vision-text-dual-encoder"),aZo=o(" \u2014 "),rN=a("a"),nZo=o("FlaxVisionTextDualEncoderModel"),sZo=o(" (VisionTextDualEncoder model)"),lZo=l(),_0=a("li"),dme=a("strong"),iZo=o("vit"),dZo=o(" \u2014 "),aN=a("a"),mZo=o("FlaxViTModel"),fZo=o(" (ViT model)"),cZo=l(),v0=a("li"),mme=a("strong"),gZo=o("wav2vec2"),hZo=o(" \u2014 "),nN=a("a"),uZo=o("FlaxWav2Vec2Model"),pZo=o(" (Wav2Vec2 model)"),_Zo=l(),fme=a("p"),vZo=o("Examples:"),bZo=l(),f(vy.$$.fragment),qCe=l(),Jd=a("h2"),b0=a("a"),cme=a("span"),f(by.$$.fragment),TZo=l(),gme=a("span"),FZo=o("FlaxAutoModelForCausalLM"),zCe=l(),gt=a("div"),f(Ty.$$.fragment),MZo=l(),Kd=a("p"),EZo=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a causal language modeling head) when created with the `),hme=a("code"),CZo=o("from_pretrained()"),yZo=o(` class method or the `),ume=a("code"),wZo=o("from_config()"),AZo=o(" class method."),xZo=l(),Fy=a("p"),LZo=o("This class cannot be instantiated directly using "),pme=a("code"),BZo=o("__init__()"),kZo=o(" (throws an error)."),RZo=l(),ar=a("div"),f(My.$$.fragment),SZo=l(),_me=a("p"),PZo=o("Instantiates one of the model classes of the library (with a causal language modeling head) from a configuration."),$Zo=l(),Yd=a("p"),IZo=o(`Note: Loading a model from its configuration file does `),vme=a("strong"),jZo=o("not"),NZo=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),bme=a("em"),DZo=o("~FlaxAutoModelForCausalLM.from_pretrained"),GZo=o(`] to load the model weights.`),OZo=l(),Tme=a("p"),qZo=o("Examples:"),zZo=l(),f(Ey.$$.fragment),XZo=l(),po=a("div"),f(Cy.$$.fragment),WZo=l(),Fme=a("p"),VZo=o("Instantiate one of the model classes of the library (with a causal language modeling head) from a pretrained model."),QZo=l(),on=a("p"),HZo=o("The model class to instantiate is selected based on the "),Mme=a("em"),UZo=o("model_type"),JZo=o(` property of the config object (either passed as an argument or loaded from `),Eme=a("em"),KZo=o("pretrained_model_name_or_path"),YZo=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Cme=a("em"),ZZo=o("pretrained_model_name_or_path"),eet=o(":"),oet=l(),Zd=a("ul"),T0=a("li"),yme=a("strong"),tet=o("gpt2"),ret=o(" \u2014 "),sN=a("a"),aet=o("FlaxGPT2LMHeadModel"),net=o(" (OpenAI GPT-2 model)"),set=l(),F0=a("li"),wme=a("strong"),iet=o("gpt_neo"),det=o(" \u2014 "),lN=a("a"),met=o("FlaxGPTNeoForCausalLM"),fet=o(" (GPT Neo model)"),cet=l(),M0=a("li"),Ame=a("strong"),get=o("gptj"),het=o(" \u2014 "),iN=a("a"),uet=o("FlaxGPTJForCausalLM"),pet=o(" (GPT-J model)"),_et=l(),xme=a("p"),vet=o("Examples:"),bet=l(),f(yy.$$.fragment),XCe=l(),em=a("h2"),E0=a("a"),Lme=a("span"),f(wy.$$.fragment),Tet=l(),Bme=a("span"),Fet=o("FlaxAutoModelForPreTraining"),WCe=l(),ht=a("div"),f(Ay.$$.fragment),Met=l(),om=a("p"),Eet=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a pretraining head) when created with the `),kme=a("code"),Cet=o("from_pretrained()"),yet=o(` class method or the `),Rme=a("code"),wet=o("from_config()"),Aet=o(" class method."),xet=l(),xy=a("p"),Let=o("This class cannot be instantiated directly using "),Sme=a("code"),Bet=o("__init__()"),ket=o(" (throws an error)."),Ret=l(),nr=a("div"),f(Ly.$$.fragment),Set=l(),Pme=a("p"),Pet=o("Instantiates one of the model classes of the library (with a pretraining head) from a configuration."),$et=l(),tm=a("p"),Iet=o(`Note: Loading a model from its configuration file does `),$me=a("strong"),jet=o("not"),Net=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Ime=a("em"),Det=o("~FlaxAutoModelForPreTraining.from_pretrained"),Get=o(`] to load the model weights.`),Oet=l(),jme=a("p"),qet=o("Examples:"),zet=l(),f(By.$$.fragment),Xet=l(),_o=a("div"),f(ky.$$.fragment),Wet=l(),Nme=a("p"),Vet=o("Instantiate one of the model classes of the library (with a pretraining head) from a pretrained model."),Qet=l(),tn=a("p"),Het=o("The model class to instantiate is selected based on the "),Dme=a("em"),Uet=o("model_type"),Jet=o(` property of the config object (either passed as an argument or loaded from `),Gme=a("em"),Ket=o("pretrained_model_name_or_path"),Yet=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ome=a("em"),Zet=o("pretrained_model_name_or_path"),eot=o(":"),oot=l(),he=a("ul"),C0=a("li"),qme=a("strong"),tot=o("albert"),rot=o(" \u2014 "),dN=a("a"),aot=o("FlaxAlbertForPreTraining"),not=o(" (ALBERT model)"),sot=l(),y0=a("li"),zme=a("strong"),lot=o("bart"),iot=o(" \u2014 "),mN=a("a"),dot=o("FlaxBartForConditionalGeneration"),mot=o(" (BART model)"),fot=l(),w0=a("li"),Xme=a("strong"),cot=o("bert"),got=o(" \u2014 "),fN=a("a"),hot=o("FlaxBertForPreTraining"),uot=o(" (BERT model)"),pot=l(),A0=a("li"),Wme=a("strong"),_ot=o("big_bird"),vot=o(" \u2014 "),cN=a("a"),bot=o("FlaxBigBirdForPreTraining"),Tot=o(" (BigBird model)"),Fot=l(),x0=a("li"),Vme=a("strong"),Mot=o("electra"),Eot=o(" \u2014 "),gN=a("a"),Cot=o("FlaxElectraForPreTraining"),yot=o(" (ELECTRA model)"),wot=l(),L0=a("li"),Qme=a("strong"),Aot=o("mbart"),xot=o(" \u2014 "),hN=a("a"),Lot=o("FlaxMBartForConditionalGeneration"),Bot=o(" (mBART model)"),kot=l(),B0=a("li"),Hme=a("strong"),Rot=o("mt5"),Sot=o(" \u2014 "),uN=a("a"),Pot=o("FlaxMT5ForConditionalGeneration"),$ot=o(" (mT5 model)"),Iot=l(),k0=a("li"),Ume=a("strong"),jot=o("roberta"),Not=o(" \u2014 "),pN=a("a"),Dot=o("FlaxRobertaForMaskedLM"),Got=o(" (RoBERTa model)"),Oot=l(),R0=a("li"),Jme=a("strong"),qot=o("t5"),zot=o(" \u2014 "),_N=a("a"),Xot=o("FlaxT5ForConditionalGeneration"),Wot=o(" (T5 model)"),Vot=l(),S0=a("li"),Kme=a("strong"),Qot=o("wav2vec2"),Hot=o(" \u2014 "),vN=a("a"),Uot=o("FlaxWav2Vec2ForPreTraining"),Jot=o(" (Wav2Vec2 model)"),Kot=l(),Yme=a("p"),Yot=o("Examples:"),Zot=l(),f(Ry.$$.fragment),VCe=l(),rm=a("h2"),P0=a("a"),Zme=a("span"),f(Sy.$$.fragment),ett=l(),efe=a("span"),ott=o("FlaxAutoModelForMaskedLM"),QCe=l(),ut=a("div"),f(Py.$$.fragment),ttt=l(),am=a("p"),rtt=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a masked language modeling head) when created with the `),ofe=a("code"),att=o("from_pretrained()"),ntt=o(` class method or the `),tfe=a("code"),stt=o("from_config()"),ltt=o(" class method."),itt=l(),$y=a("p"),dtt=o("This class cannot be instantiated directly using "),rfe=a("code"),mtt=o("__init__()"),ftt=o(" (throws an error)."),ctt=l(),sr=a("div"),f(Iy.$$.fragment),gtt=l(),afe=a("p"),htt=o("Instantiates one of the model classes of the library (with a masked language modeling head) from a configuration."),utt=l(),nm=a("p"),ptt=o(`Note: Loading a model from its configuration file does `),nfe=a("strong"),_tt=o("not"),vtt=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),sfe=a("em"),btt=o("~FlaxAutoModelForMaskedLM.from_pretrained"),Ttt=o(`] to load the model weights.`),Ftt=l(),lfe=a("p"),Mtt=o("Examples:"),Ett=l(),f(jy.$$.fragment),Ctt=l(),vo=a("div"),f(Ny.$$.fragment),ytt=l(),ife=a("p"),wtt=o("Instantiate one of the model classes of the library (with a masked language modeling head) from a pretrained model."),Att=l(),rn=a("p"),xtt=o("The model class to instantiate is selected based on the "),dfe=a("em"),Ltt=o("model_type"),Btt=o(` property of the config object (either passed as an argument or loaded from `),mfe=a("em"),ktt=o("pretrained_model_name_or_path"),Rtt=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),ffe=a("em"),Stt=o("pretrained_model_name_or_path"),Ptt=o(":"),$tt=l(),Me=a("ul"),$0=a("li"),cfe=a("strong"),Itt=o("albert"),jtt=o(" \u2014 "),bN=a("a"),Ntt=o("FlaxAlbertForMaskedLM"),Dtt=o(" (ALBERT model)"),Gtt=l(),I0=a("li"),gfe=a("strong"),Ott=o("bart"),qtt=o(" \u2014 "),TN=a("a"),ztt=o("FlaxBartForConditionalGeneration"),Xtt=o(" (BART model)"),Wtt=l(),j0=a("li"),hfe=a("strong"),Vtt=o("bert"),Qtt=o(" \u2014 "),FN=a("a"),Htt=o("FlaxBertForMaskedLM"),Utt=o(" (BERT model)"),Jtt=l(),N0=a("li"),ufe=a("strong"),Ktt=o("big_bird"),Ytt=o(" \u2014 "),MN=a("a"),Ztt=o("FlaxBigBirdForMaskedLM"),ert=o(" (BigBird model)"),ort=l(),D0=a("li"),pfe=a("strong"),trt=o("distilbert"),rrt=o(" \u2014 "),EN=a("a"),art=o("FlaxDistilBertForMaskedLM"),nrt=o(" (DistilBERT model)"),srt=l(),G0=a("li"),_fe=a("strong"),lrt=o("electra"),irt=o(" \u2014 "),CN=a("a"),drt=o("FlaxElectraForMaskedLM"),mrt=o(" (ELECTRA model)"),frt=l(),O0=a("li"),vfe=a("strong"),crt=o("mbart"),grt=o(" \u2014 "),yN=a("a"),hrt=o("FlaxMBartForConditionalGeneration"),urt=o(" (mBART model)"),prt=l(),q0=a("li"),bfe=a("strong"),_rt=o("roberta"),vrt=o(" \u2014 "),wN=a("a"),brt=o("FlaxRobertaForMaskedLM"),Trt=o(" (RoBERTa model)"),Frt=l(),Tfe=a("p"),Mrt=o("Examples:"),Ert=l(),f(Dy.$$.fragment),HCe=l(),sm=a("h2"),z0=a("a"),Ffe=a("span"),f(Gy.$$.fragment),Crt=l(),Mfe=a("span"),yrt=o("FlaxAutoModelForSeq2SeqLM"),UCe=l(),pt=a("div"),f(Oy.$$.fragment),wrt=l(),lm=a("p"),Art=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence language modeling head) when created with the `),Efe=a("code"),xrt=o("from_pretrained()"),Lrt=o(` class method or the `),Cfe=a("code"),Brt=o("from_config()"),krt=o(" class method."),Rrt=l(),qy=a("p"),Srt=o("This class cannot be instantiated directly using "),yfe=a("code"),Prt=o("__init__()"),$rt=o(" (throws an error)."),Irt=l(),lr=a("div"),f(zy.$$.fragment),jrt=l(),wfe=a("p"),Nrt=o("Instantiates one of the model classes of the library (with a sequence-to-sequence language modeling head) from a configuration."),Drt=l(),im=a("p"),Grt=o(`Note: Loading a model from its configuration file does `),Afe=a("strong"),Ort=o("not"),qrt=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),xfe=a("em"),zrt=o("~FlaxAutoModelForSeq2SeqLM.from_pretrained"),Xrt=o(`] to load the model weights.`),Wrt=l(),Lfe=a("p"),Vrt=o("Examples:"),Qrt=l(),f(Xy.$$.fragment),Hrt=l(),bo=a("div"),f(Wy.$$.fragment),Urt=l(),Bfe=a("p"),Jrt=o("Instantiate one of the model classes of the library (with a sequence-to-sequence language modeling head) from a pretrained model."),Krt=l(),an=a("p"),Yrt=o("The model class to instantiate is selected based on the "),kfe=a("em"),Zrt=o("model_type"),eat=o(` property of the config object (either passed as an argument or loaded from `),Rfe=a("em"),oat=o("pretrained_model_name_or_path"),tat=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Sfe=a("em"),rat=o("pretrained_model_name_or_path"),aat=o(":"),nat=l(),pe=a("ul"),X0=a("li"),Pfe=a("strong"),sat=o("bart"),lat=o(" \u2014 "),AN=a("a"),iat=o("FlaxBartForConditionalGeneration"),dat=o(" (BART model)"),mat=l(),W0=a("li"),$fe=a("strong"),fat=o("blenderbot"),cat=o(" \u2014 "),xN=a("a"),gat=o("FlaxBlenderbotForConditionalGeneration"),hat=o(" (Blenderbot model)"),uat=l(),V0=a("li"),Ife=a("strong"),pat=o("blenderbot-small"),_at=o(" \u2014 "),LN=a("a"),vat=o("FlaxBlenderbotSmallForConditionalGeneration"),bat=o(" (BlenderbotSmall model)"),Tat=l(),Q0=a("li"),jfe=a("strong"),Fat=o("encoder-decoder"),Mat=o(" \u2014 "),BN=a("a"),Eat=o("FlaxEncoderDecoderModel"),Cat=o(" (Encoder decoder model)"),yat=l(),H0=a("li"),Nfe=a("strong"),wat=o("marian"),Aat=o(" \u2014 "),kN=a("a"),xat=o("FlaxMarianMTModel"),Lat=o(" (Marian model)"),Bat=l(),U0=a("li"),Dfe=a("strong"),kat=o("mbart"),Rat=o(" \u2014 "),RN=a("a"),Sat=o("FlaxMBartForConditionalGeneration"),Pat=o(" (mBART model)"),$at=l(),J0=a("li"),Gfe=a("strong"),Iat=o("mt5"),jat=o(" \u2014 "),SN=a("a"),Nat=o("FlaxMT5ForConditionalGeneration"),Dat=o(" (mT5 model)"),Gat=l(),K0=a("li"),Ofe=a("strong"),Oat=o("pegasus"),qat=o(" \u2014 "),PN=a("a"),zat=o("FlaxPegasusForConditionalGeneration"),Xat=o(" (Pegasus model)"),Wat=l(),Y0=a("li"),qfe=a("strong"),Vat=o("t5"),Qat=o(" \u2014 "),$N=a("a"),Hat=o("FlaxT5ForConditionalGeneration"),Uat=o(" (T5 model)"),Jat=l(),zfe=a("p"),Kat=o("Examples:"),Yat=l(),f(Vy.$$.fragment),JCe=l(),dm=a("h2"),Z0=a("a"),Xfe=a("span"),f(Qy.$$.fragment),Zat=l(),Wfe=a("span"),ent=o("FlaxAutoModelForSequenceClassification"),KCe=l(),_t=a("div"),f(Hy.$$.fragment),ont=l(),mm=a("p"),tnt=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence classification head) when created with the `),Vfe=a("code"),rnt=o("from_pretrained()"),ant=o(` class method or the `),Qfe=a("code"),nnt=o("from_config()"),snt=o(" class method."),lnt=l(),Uy=a("p"),int=o("This class cannot be instantiated directly using "),Hfe=a("code"),dnt=o("__init__()"),mnt=o(" (throws an error)."),fnt=l(),ir=a("div"),f(Jy.$$.fragment),cnt=l(),Ufe=a("p"),gnt=o("Instantiates one of the model classes of the library (with a sequence classification head) from a configuration."),hnt=l(),fm=a("p"),unt=o(`Note: Loading a model from its configuration file does `),Jfe=a("strong"),pnt=o("not"),_nt=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Kfe=a("em"),vnt=o("~FlaxAutoModelForSequenceClassification.from_pretrained"),bnt=o(`] to load the model weights.`),Tnt=l(),Yfe=a("p"),Fnt=o("Examples:"),Mnt=l(),f(Ky.$$.fragment),Ent=l(),To=a("div"),f(Yy.$$.fragment),Cnt=l(),Zfe=a("p"),ynt=o("Instantiate one of the model classes of the library (with a sequence classification head) from a pretrained model."),wnt=l(),nn=a("p"),Ant=o("The model class to instantiate is selected based on the "),ece=a("em"),xnt=o("model_type"),Lnt=o(` property of the config object (either passed as an argument or loaded from `),oce=a("em"),Bnt=o("pretrained_model_name_or_path"),knt=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),tce=a("em"),Rnt=o("pretrained_model_name_or_path"),Snt=o(":"),Pnt=l(),Ee=a("ul"),eT=a("li"),rce=a("strong"),$nt=o("albert"),Int=o(" \u2014 "),IN=a("a"),jnt=o("FlaxAlbertForSequenceClassification"),Nnt=o(" (ALBERT model)"),Dnt=l(),oT=a("li"),ace=a("strong"),Gnt=o("bart"),Ont=o(" \u2014 "),jN=a("a"),qnt=o("FlaxBartForSequenceClassification"),znt=o(" (BART model)"),Xnt=l(),tT=a("li"),nce=a("strong"),Wnt=o("bert"),Vnt=o(" \u2014 "),NN=a("a"),Qnt=o("FlaxBertForSequenceClassification"),Hnt=o(" (BERT model)"),Unt=l(),rT=a("li"),sce=a("strong"),Jnt=o("big_bird"),Knt=o(" \u2014 "),DN=a("a"),Ynt=o("FlaxBigBirdForSequenceClassification"),Znt=o(" (BigBird model)"),est=l(),aT=a("li"),lce=a("strong"),ost=o("distilbert"),tst=o(" \u2014 "),GN=a("a"),rst=o("FlaxDistilBertForSequenceClassification"),ast=o(" (DistilBERT model)"),nst=l(),nT=a("li"),ice=a("strong"),sst=o("electra"),lst=o(" \u2014 "),ON=a("a"),ist=o("FlaxElectraForSequenceClassification"),dst=o(" (ELECTRA model)"),mst=l(),sT=a("li"),dce=a("strong"),fst=o("mbart"),cst=o(" \u2014 "),qN=a("a"),gst=o("FlaxMBartForSequenceClassification"),hst=o(" (mBART model)"),ust=l(),lT=a("li"),mce=a("strong"),pst=o("roberta"),_st=o(" \u2014 "),zN=a("a"),vst=o("FlaxRobertaForSequenceClassification"),bst=o(" (RoBERTa model)"),Tst=l(),fce=a("p"),Fst=o("Examples:"),Mst=l(),f(Zy.$$.fragment),YCe=l(),cm=a("h2"),iT=a("a"),cce=a("span"),f(ew.$$.fragment),Est=l(),gce=a("span"),Cst=o("FlaxAutoModelForQuestionAnswering"),ZCe=l(),vt=a("div"),f(ow.$$.fragment),yst=l(),gm=a("p"),wst=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a question answering head) when created with the `),hce=a("code"),Ast=o("from_pretrained()"),xst=o(` class method or the `),uce=a("code"),Lst=o("from_config()"),Bst=o(" class method."),kst=l(),tw=a("p"),Rst=o("This class cannot be instantiated directly using "),pce=a("code"),Sst=o("__init__()"),Pst=o(" (throws an error)."),$st=l(),dr=a("div"),f(rw.$$.fragment),Ist=l(),_ce=a("p"),jst=o("Instantiates one of the model classes of the library (with a question answering head) from a configuration."),Nst=l(),hm=a("p"),Dst=o(`Note: Loading a model from its configuration file does `),vce=a("strong"),Gst=o("not"),Ost=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),bce=a("em"),qst=o("~FlaxAutoModelForQuestionAnswering.from_pretrained"),zst=o(`] to load the model weights.`),Xst=l(),Tce=a("p"),Wst=o("Examples:"),Vst=l(),f(aw.$$.fragment),Qst=l(),Fo=a("div"),f(nw.$$.fragment),Hst=l(),Fce=a("p"),Ust=o("Instantiate one of the model classes of the library (with a question answering head) from a pretrained model."),Jst=l(),sn=a("p"),Kst=o("The model class to instantiate is selected based on the "),Mce=a("em"),Yst=o("model_type"),Zst=o(` property of the config object (either passed as an argument or loaded from `),Ece=a("em"),elt=o("pretrained_model_name_or_path"),olt=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Cce=a("em"),tlt=o("pretrained_model_name_or_path"),rlt=o(":"),alt=l(),Ce=a("ul"),dT=a("li"),yce=a("strong"),nlt=o("albert"),slt=o(" \u2014 "),XN=a("a"),llt=o("FlaxAlbertForQuestionAnswering"),ilt=o(" (ALBERT model)"),dlt=l(),mT=a("li"),wce=a("strong"),mlt=o("bart"),flt=o(" \u2014 "),WN=a("a"),clt=o("FlaxBartForQuestionAnswering"),glt=o(" (BART model)"),hlt=l(),fT=a("li"),Ace=a("strong"),ult=o("bert"),plt=o(" \u2014 "),VN=a("a"),_lt=o("FlaxBertForQuestionAnswering"),vlt=o(" (BERT model)"),blt=l(),cT=a("li"),xce=a("strong"),Tlt=o("big_bird"),Flt=o(" \u2014 "),QN=a("a"),Mlt=o("FlaxBigBirdForQuestionAnswering"),Elt=o(" (BigBird model)"),Clt=l(),gT=a("li"),Lce=a("strong"),ylt=o("distilbert"),wlt=o(" \u2014 "),HN=a("a"),Alt=o("FlaxDistilBertForQuestionAnswering"),xlt=o(" (DistilBERT model)"),Llt=l(),hT=a("li"),Bce=a("strong"),Blt=o("electra"),klt=o(" \u2014 "),UN=a("a"),Rlt=o("FlaxElectraForQuestionAnswering"),Slt=o(" (ELECTRA model)"),Plt=l(),uT=a("li"),kce=a("strong"),$lt=o("mbart"),Ilt=o(" \u2014 "),JN=a("a"),jlt=o("FlaxMBartForQuestionAnswering"),Nlt=o(" (mBART model)"),Dlt=l(),pT=a("li"),Rce=a("strong"),Glt=o("roberta"),Olt=o(" \u2014 "),KN=a("a"),qlt=o("FlaxRobertaForQuestionAnswering"),zlt=o(" (RoBERTa model)"),Xlt=l(),Sce=a("p"),Wlt=o("Examples:"),Vlt=l(),f(sw.$$.fragment),e3e=l(),um=a("h2"),_T=a("a"),Pce=a("span"),f(lw.$$.fragment),Qlt=l(),$ce=a("span"),Hlt=o("FlaxAutoModelForTokenClassification"),o3e=l(),bt=a("div"),f(iw.$$.fragment),Ult=l(),pm=a("p"),Jlt=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a token classification head) when created with the `),Ice=a("code"),Klt=o("from_pretrained()"),Ylt=o(` class method or the `),jce=a("code"),Zlt=o("from_config()"),eit=o(" class method."),oit=l(),dw=a("p"),tit=o("This class cannot be instantiated directly using "),Nce=a("code"),rit=o("__init__()"),ait=o(" (throws an error)."),nit=l(),mr=a("div"),f(mw.$$.fragment),sit=l(),Dce=a("p"),lit=o("Instantiates one of the model classes of the library (with a token classification head) from a configuration."),iit=l(),_m=a("p"),dit=o(`Note: Loading a model from its configuration file does `),Gce=a("strong"),mit=o("not"),fit=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Oce=a("em"),cit=o("~FlaxAutoModelForTokenClassification.from_pretrained"),git=o(`] to load the model weights.`),hit=l(),qce=a("p"),uit=o("Examples:"),pit=l(),f(fw.$$.fragment),_it=l(),Mo=a("div"),f(cw.$$.fragment),vit=l(),zce=a("p"),bit=o("Instantiate one of the model classes of the library (with a token classification head) from a pretrained model."),Tit=l(),ln=a("p"),Fit=o("The model class to instantiate is selected based on the "),Xce=a("em"),Mit=o("model_type"),Eit=o(` property of the config object (either passed as an argument or loaded from `),Wce=a("em"),Cit=o("pretrained_model_name_or_path"),yit=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Vce=a("em"),wit=o("pretrained_model_name_or_path"),Ait=o(":"),xit=l(),Tt=a("ul"),vT=a("li"),Qce=a("strong"),Lit=o("albert"),Bit=o(" \u2014 "),YN=a("a"),kit=o("FlaxAlbertForTokenClassification"),Rit=o(" (ALBERT model)"),Sit=l(),bT=a("li"),Hce=a("strong"),Pit=o("bert"),$it=o(" \u2014 "),ZN=a("a"),Iit=o("FlaxBertForTokenClassification"),jit=o(" (BERT model)"),Nit=l(),TT=a("li"),Uce=a("strong"),Dit=o("big_bird"),Git=o(" \u2014 "),eD=a("a"),Oit=o("FlaxBigBirdForTokenClassification"),qit=o(" (BigBird model)"),zit=l(),FT=a("li"),Jce=a("strong"),Xit=o("distilbert"),Wit=o(" \u2014 "),oD=a("a"),Vit=o("FlaxDistilBertForTokenClassification"),Qit=o(" (DistilBERT model)"),Hit=l(),MT=a("li"),Kce=a("strong"),Uit=o("electra"),Jit=o(" \u2014 "),tD=a("a"),Kit=o("FlaxElectraForTokenClassification"),Yit=o(" (ELECTRA model)"),Zit=l(),ET=a("li"),Yce=a("strong"),edt=o("roberta"),odt=o(" \u2014 "),rD=a("a"),tdt=o("FlaxRobertaForTokenClassification"),rdt=o(" (RoBERTa model)"),adt=l(),Zce=a("p"),ndt=o("Examples:"),sdt=l(),f(gw.$$.fragment),t3e=l(),vm=a("h2"),CT=a("a"),ege=a("span"),f(hw.$$.fragment),ldt=l(),oge=a("span"),idt=o("FlaxAutoModelForMultipleChoice"),r3e=l(),Ft=a("div"),f(uw.$$.fragment),ddt=l(),bm=a("p"),mdt=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a multiple choice head) when created with the `),tge=a("code"),fdt=o("from_pretrained()"),cdt=o(` class method or the `),rge=a("code"),gdt=o("from_config()"),hdt=o(" class method."),udt=l(),pw=a("p"),pdt=o("This class cannot be instantiated directly using "),age=a("code"),_dt=o("__init__()"),vdt=o(" (throws an error)."),bdt=l(),fr=a("div"),f(_w.$$.fragment),Tdt=l(),nge=a("p"),Fdt=o("Instantiates one of the model classes of the library (with a multiple choice head) from a configuration."),Mdt=l(),Tm=a("p"),Edt=o(`Note: Loading a model from its configuration file does `),sge=a("strong"),Cdt=o("not"),ydt=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),lge=a("em"),wdt=o("~FlaxAutoModelForMultipleChoice.from_pretrained"),Adt=o(`] to load the model weights.`),xdt=l(),ige=a("p"),Ldt=o("Examples:"),Bdt=l(),f(vw.$$.fragment),kdt=l(),Eo=a("div"),f(bw.$$.fragment),Rdt=l(),dge=a("p"),Sdt=o("Instantiate one of the model classes of the library (with a multiple choice head) from a pretrained model."),Pdt=l(),dn=a("p"),$dt=o("The model class to instantiate is selected based on the "),mge=a("em"),Idt=o("model_type"),jdt=o(` property of the config object (either passed as an argument or loaded from `),fge=a("em"),Ndt=o("pretrained_model_name_or_path"),Ddt=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),cge=a("em"),Gdt=o("pretrained_model_name_or_path"),Odt=o(":"),qdt=l(),Mt=a("ul"),yT=a("li"),gge=a("strong"),zdt=o("albert"),Xdt=o(" \u2014 "),aD=a("a"),Wdt=o("FlaxAlbertForMultipleChoice"),Vdt=o(" (ALBERT model)"),Qdt=l(),wT=a("li"),hge=a("strong"),Hdt=o("bert"),Udt=o(" \u2014 "),nD=a("a"),Jdt=o("FlaxBertForMultipleChoice"),Kdt=o(" (BERT model)"),Ydt=l(),AT=a("li"),uge=a("strong"),Zdt=o("big_bird"),emt=o(" \u2014 "),sD=a("a"),omt=o("FlaxBigBirdForMultipleChoice"),tmt=o(" (BigBird model)"),rmt=l(),xT=a("li"),pge=a("strong"),amt=o("distilbert"),nmt=o(" \u2014 "),lD=a("a"),smt=o("FlaxDistilBertForMultipleChoice"),lmt=o(" (DistilBERT model)"),imt=l(),LT=a("li"),_ge=a("strong"),dmt=o("electra"),mmt=o(" \u2014 "),iD=a("a"),fmt=o("FlaxElectraForMultipleChoice"),cmt=o(" (ELECTRA model)"),gmt=l(),BT=a("li"),vge=a("strong"),hmt=o("roberta"),umt=o(" \u2014 "),dD=a("a"),pmt=o("FlaxRobertaForMultipleChoice"),_mt=o(" (RoBERTa model)"),vmt=l(),bge=a("p"),bmt=o("Examples:"),Tmt=l(),f(Tw.$$.fragment),a3e=l(),Fm=a("h2"),kT=a("a"),Tge=a("span"),f(Fw.$$.fragment),Fmt=l(),Fge=a("span"),Mmt=o("FlaxAutoModelForNextSentencePrediction"),n3e=l(),Et=a("div"),f(Mw.$$.fragment),Emt=l(),Mm=a("p"),Cmt=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a next sentence prediction head) when created with the `),Mge=a("code"),ymt=o("from_pretrained()"),wmt=o(` class method or the `),Ege=a("code"),Amt=o("from_config()"),xmt=o(" class method."),Lmt=l(),Ew=a("p"),Bmt=o("This class cannot be instantiated directly using "),Cge=a("code"),kmt=o("__init__()"),Rmt=o(" (throws an error)."),Smt=l(),cr=a("div"),f(Cw.$$.fragment),Pmt=l(),yge=a("p"),$mt=o("Instantiates one of the model classes of the library (with a next sentence prediction head) from a configuration."),Imt=l(),Em=a("p"),jmt=o(`Note: Loading a model from its configuration file does `),wge=a("strong"),Nmt=o("not"),Dmt=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),Age=a("em"),Gmt=o("~FlaxAutoModelForNextSentencePrediction.from_pretrained"),Omt=o(`] to load the model weights.`),qmt=l(),xge=a("p"),zmt=o("Examples:"),Xmt=l(),f(yw.$$.fragment),Wmt=l(),Co=a("div"),f(ww.$$.fragment),Vmt=l(),Lge=a("p"),Qmt=o("Instantiate one of the model classes of the library (with a next sentence prediction head) from a pretrained model."),Hmt=l(),mn=a("p"),Umt=o("The model class to instantiate is selected based on the "),Bge=a("em"),Jmt=o("model_type"),Kmt=o(` property of the config object (either passed as an argument or loaded from `),kge=a("em"),Ymt=o("pretrained_model_name_or_path"),Zmt=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Rge=a("em"),eft=o("pretrained_model_name_or_path"),oft=o(":"),tft=l(),Sge=a("ul"),RT=a("li"),Pge=a("strong"),rft=o("bert"),aft=o(" \u2014 "),mD=a("a"),nft=o("FlaxBertForNextSentencePrediction"),sft=o(" (BERT model)"),lft=l(),$ge=a("p"),ift=o("Examples:"),dft=l(),f(Aw.$$.fragment),s3e=l(),Cm=a("h2"),ST=a("a"),Ige=a("span"),f(xw.$$.fragment),mft=l(),jge=a("span"),fft=o("FlaxAutoModelForImageClassification"),l3e=l(),Ct=a("div"),f(Lw.$$.fragment),cft=l(),ym=a("p"),gft=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a image classification head) when created with the `),Nge=a("code"),hft=o("from_pretrained()"),uft=o(` class method or the `),Dge=a("code"),pft=o("from_config()"),_ft=o(" class method."),vft=l(),Bw=a("p"),bft=o("This class cannot be instantiated directly using "),Gge=a("code"),Tft=o("__init__()"),Fft=o(" (throws an error)."),Mft=l(),gr=a("div"),f(kw.$$.fragment),Eft=l(),Oge=a("p"),Cft=o("Instantiates one of the model classes of the library (with a image classification head) from a configuration."),yft=l(),wm=a("p"),wft=o(`Note: Loading a model from its configuration file does `),qge=a("strong"),Aft=o("not"),xft=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),zge=a("em"),Lft=o("~FlaxAutoModelForImageClassification.from_pretrained"),Bft=o(`] to load the model weights.`),kft=l(),Xge=a("p"),Rft=o("Examples:"),Sft=l(),f(Rw.$$.fragment),Pft=l(),yo=a("div"),f(Sw.$$.fragment),$ft=l(),Wge=a("p"),Ift=o("Instantiate one of the model classes of the library (with a image classification head) from a pretrained model."),jft=l(),fn=a("p"),Nft=o("The model class to instantiate is selected based on the "),Vge=a("em"),Dft=o("model_type"),Gft=o(` property of the config object (either passed as an argument or loaded from `),Qge=a("em"),Oft=o("pretrained_model_name_or_path"),qft=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Hge=a("em"),zft=o("pretrained_model_name_or_path"),Xft=o(":"),Wft=l(),Pw=a("ul"),PT=a("li"),Uge=a("strong"),Vft=o("beit"),Qft=o(" \u2014 "),fD=a("a"),Hft=o("FlaxBeitForImageClassification"),Uft=o(" (BEiT model)"),Jft=l(),$T=a("li"),Jge=a("strong"),Kft=o("vit"),Yft=o(" \u2014 "),cD=a("a"),Zft=o("FlaxViTForImageClassification"),ect=o(" (ViT model)"),oct=l(),Kge=a("p"),tct=o("Examples:"),rct=l(),f($w.$$.fragment),i3e=l(),Am=a("h2"),IT=a("a"),Yge=a("span"),f(Iw.$$.fragment),act=l(),Zge=a("span"),nct=o("FlaxAutoModelForVision2Seq"),d3e=l(),yt=a("div"),f(jw.$$.fragment),sct=l(),xm=a("p"),lct=o(`This is a generic model class that will be instantiated as one of the model classes of the library (with a vision-to-text modeling head) when created with the `),ehe=a("code"),ict=o("from_pretrained()"),dct=o(` class method or the `),ohe=a("code"),mct=o("from_config()"),fct=o(" class method."),cct=l(),Nw=a("p"),gct=o("This class cannot be instantiated directly using "),the=a("code"),hct=o("__init__()"),uct=o(" (throws an error)."),pct=l(),hr=a("div"),f(Dw.$$.fragment),_ct=l(),rhe=a("p"),vct=o("Instantiates one of the model classes of the library (with a vision-to-text modeling head) from a configuration."),bct=l(),Lm=a("p"),Tct=o(`Note: Loading a model from its configuration file does `),ahe=a("strong"),Fct=o("not"),Mct=o(` load the model weights. It only affects the model\u2019s configuration. Use [`),nhe=a("em"),Ect=o("~FlaxAutoModelForVision2Seq.from_pretrained"),Cct=o(`] to load the model weights.`),yct=l(),she=a("p"),wct=o("Examples:"),Act=l(),f(Gw.$$.fragment),xct=l(),wo=a("div"),f(Ow.$$.fragment),Lct=l(),lhe=a("p"),Bct=o("Instantiate one of the model classes of the library (with a vision-to-text modeling head) from a pretrained model."),kct=l(),cn=a("p"),Rct=o("The model class to instantiate is selected based on the "),ihe=a("em"),Sct=o("model_type"),Pct=o(` property of the config object (either passed as an argument or loaded from `),dhe=a("em"),$ct=o("pretrained_model_name_or_path"),Ict=o(` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),mhe=a("em"),jct=o("pretrained_model_name_or_path"),Nct=o(":"),Dct=l(),fhe=a("ul"),jT=a("li"),che=a("strong"),Gct=o("vision-encoder-decoder"),Oct=o(" \u2014 "),gD=a("a"),qct=o("FlaxVisionEncoderDecoderModel"),zct=o(" (Vision Encoder decoder model)"),Xct=l(),ghe=a("p"),Wct=o("Examples:"),Vct=l(),f(qw.$$.fragment),this.h()},l(d){const _=OVt('[data-svelte="svelte-1phssyn"]',document.head);J=n(_,"META",{name:!0,content:!0}),_.forEach(r),ye=i(d),se=n(d,"H1",{class:!0});var zw=s(se);de=n(zw,"A",{id:!0,class:!0,href:!0});var hhe=s(de);Ue=n(hhe,"SPAN",{});var uhe=s(Ue);c(ie.$$.fragment,uhe),uhe.forEach(r),hhe.forEach(r),ue=i(zw),Bo=n(zw,"SPAN",{});var Hct=s(Bo);Vl=t(Hct,"Auto Classes"),Hct.forEach(r),zw.forEach(r),km=i(d),zr=n(d,"P",{});var f3e=s(zr);Ql=t(f3e,`In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you are supplying to the `),Hl=n(f3e,"CODE",{});var Uct=s(Hl);RF=t(Uct,"from_pretrained()"),Uct.forEach(r),Rm=t(f3e,` method. AutoClasses are here to do this job for you so that you automatically retrieve the relevant model given the name/path to the pretrained weights/config/vocabulary.`),f3e.forEach(r),Fe=i(d),Ze=n(d,"P",{});var NT=s(Ze);Ul=t(NT,"Instantiating one of "),gn=n(NT,"A",{href:!0});var Jct=s(gn);SF=t(Jct,"AutoConfig"),Jct.forEach(r),hn=t(NT,", "),un=n(NT,"A",{href:!0});var Kct=s(un);PF=t(Kct,"AutoModel"),Kct.forEach(r),Jl=t(NT,`, and `),pn=n(NT,"A",{href:!0});var Yct=s(pn);$F=t(Yct,"AutoTokenizer"),Yct.forEach(r),Kl=t(NT," will directly create a class of the relevant architecture. For instance"),NT.forEach(r),Sm=i(d),c(ba.$$.fragment,d),eo=i(d),me=n(d,"P",{});var c3e=s(me);IA=t(c3e,"will create a model that is an instance of "),Yl=n(c3e,"A",{href:!0});var Zct=s(Yl);jA=t(Zct,"BertModel"),Zct.forEach(r),NA=t(c3e,"."),c3e.forEach(r),ko=i(d),Ta=n(d,"P",{});var g3e=s(Ta);DA=t(g3e,"There is one class of "),Pm=n(g3e,"CODE",{});var egt=s(Pm);GA=t(egt,"AutoModel"),egt.forEach(r),dwe=t(g3e," for each task, and for each backend (PyTorch, TensorFlow, or Flax)."),g3e.forEach(r),bEe=i(d),Zl=n(d,"H2",{class:!0});var h3e=s(Zl);$m=n(h3e,"A",{id:!0,class:!0,href:!0});var ogt=s($m);YG=n(ogt,"SPAN",{});var tgt=s(YG);c(IF.$$.fragment,tgt),tgt.forEach(r),ogt.forEach(r),mwe=i(h3e),ZG=n(h3e,"SPAN",{});var rgt=s(ZG);fwe=t(rgt,"Extending the Auto Classes"),rgt.forEach(r),h3e.forEach(r),TEe=i(d),_n=n(d,"P",{});var hD=s(_n);cwe=t(hD,`Each of the auto classes has a method to be extended with your custom classes. For instance, if you have defined a custom class of model `),eO=n(hD,"CODE",{});var agt=s(eO);gwe=t(agt,"NewModel"),agt.forEach(r),hwe=t(hD,", make sure you have a "),oO=n(hD,"CODE",{});var ngt=s(oO);uwe=t(ngt,"NewModelConfig"),ngt.forEach(r),pwe=t(hD,` then you can add those to the auto classes like this:`),hD.forEach(r),FEe=i(d),c(jF.$$.fragment,d),MEe=i(d),OA=n(d,"P",{});var sgt=s(OA);_we=t(sgt,"You will then be able to use the auto classes like you would usually do!"),sgt.forEach(r),EEe=i(d),c(Im.$$.fragment,d),CEe=i(d),ei=n(d,"H2",{class:!0});var u3e=s(ei);jm=n(u3e,"A",{id:!0,class:!0,href:!0});var lgt=s(jm);tO=n(lgt,"SPAN",{});var igt=s(tO);c(NF.$$.fragment,igt),igt.forEach(r),lgt.forEach(r),vwe=i(u3e),rO=n(u3e,"SPAN",{});var dgt=s(rO);bwe=t(dgt,"AutoConfig"),dgt.forEach(r),u3e.forEach(r),yEe=i(d),Ro=n(d,"DIV",{class:!0});var ms=s(Ro);c(DF.$$.fragment,ms),Twe=i(ms),GF=n(ms,"P",{});var p3e=s(GF);Fwe=t(p3e,`This is a generic configuration class that will be instantiated as one of the configuration classes of the library when created with the `),qA=n(p3e,"A",{href:!0});var mgt=s(qA);Mwe=t(mgt,"from_pretrained()"),mgt.forEach(r),Ewe=t(p3e," class method."),p3e.forEach(r),Cwe=i(ms),OF=n(ms,"P",{});var _3e=s(OF);ywe=t(_3e,"This class cannot be instantiated directly using "),aO=n(_3e,"CODE",{});var fgt=s(aO);wwe=t(fgt,"__init__()"),fgt.forEach(r),Awe=t(_3e," (throws an error)."),_3e.forEach(r),xwe=i(ms),oo=n(ms,"DIV",{class:!0});var Wr=s(oo);c(qF.$$.fragment,Wr),Lwe=i(Wr),nO=n(Wr,"P",{});var cgt=s(nO);Bwe=t(cgt,"Instantiate one of the configuration classes of the library from a pretrained model configuration."),cgt.forEach(r),kwe=i(Wr),oi=n(Wr,"P",{});var uD=s(oi);Rwe=t(uD,"The configuration class to instantiate is selected based on the "),sO=n(uD,"EM",{});var ggt=s(sO);Swe=t(ggt,"model_type"),ggt.forEach(r),Pwe=t(uD,` property of the config object that is loaded, or when it\u2019s missing, by falling back to using pattern matching on `),lO=n(uD,"EM",{});var hgt=s(lO);$we=t(hgt,"pretrained_model_name_or_path"),hgt.forEach(r),Iwe=t(uD,":"),uD.forEach(r),jwe=i(Wr),b=n(Wr,"UL",{});var T=s(b);Nm=n(T,"LI",{});var phe=s(Nm);iO=n(phe,"STRONG",{});var ugt=s(iO);Nwe=t(ugt,"albert"),ugt.forEach(r),Dwe=t(phe," \u2014 "),zA=n(phe,"A",{href:!0});var pgt=s(zA);Gwe=t(pgt,"AlbertConfig"),pgt.forEach(r),Owe=t(phe," (ALBERT model)"),phe.forEach(r),qwe=i(T),Dm=n(T,"LI",{});var _he=s(Dm);dO=n(_he,"STRONG",{});var _gt=s(dO);zwe=t(_gt,"bart"),_gt.forEach(r),Xwe=t(_he," \u2014 "),XA=n(_he,"A",{href:!0});var vgt=s(XA);Wwe=t(vgt,"BartConfig"),vgt.forEach(r),Vwe=t(_he," (BART model)"),_he.forEach(r),Qwe=i(T),Gm=n(T,"LI",{});var vhe=s(Gm);mO=n(vhe,"STRONG",{});var bgt=s(mO);Hwe=t(bgt,"beit"),bgt.forEach(r),Uwe=t(vhe," \u2014 "),WA=n(vhe,"A",{href:!0});var Tgt=s(WA);Jwe=t(Tgt,"BeitConfig"),Tgt.forEach(r),Kwe=t(vhe," (BEiT model)"),vhe.forEach(r),Ywe=i(T),Om=n(T,"LI",{});var bhe=s(Om);fO=n(bhe,"STRONG",{});var Fgt=s(fO);Zwe=t(Fgt,"bert"),Fgt.forEach(r),eAe=t(bhe," \u2014 "),VA=n(bhe,"A",{href:!0});var Mgt=s(VA);oAe=t(Mgt,"BertConfig"),Mgt.forEach(r),tAe=t(bhe," (BERT model)"),bhe.forEach(r),rAe=i(T),qm=n(T,"LI",{});var The=s(qm);cO=n(The,"STRONG",{});var Egt=s(cO);aAe=t(Egt,"bert-generation"),Egt.forEach(r),nAe=t(The," \u2014 "),QA=n(The,"A",{href:!0});var Cgt=s(QA);sAe=t(Cgt,"BertGenerationConfig"),Cgt.forEach(r),lAe=t(The," (Bert Generation model)"),The.forEach(r),iAe=i(T),zm=n(T,"LI",{});var Fhe=s(zm);gO=n(Fhe,"STRONG",{});var ygt=s(gO);dAe=t(ygt,"big_bird"),ygt.forEach(r),mAe=t(Fhe," \u2014 "),HA=n(Fhe,"A",{href:!0});var wgt=s(HA);fAe=t(wgt,"BigBirdConfig"),wgt.forEach(r),cAe=t(Fhe," (BigBird model)"),Fhe.forEach(r),gAe=i(T),Xm=n(T,"LI",{});var Mhe=s(Xm);hO=n(Mhe,"STRONG",{});var Agt=s(hO);hAe=t(Agt,"bigbird_pegasus"),Agt.forEach(r),uAe=t(Mhe," \u2014 "),UA=n(Mhe,"A",{href:!0});var xgt=s(UA);pAe=t(xgt,"BigBirdPegasusConfig"),xgt.forEach(r),_Ae=t(Mhe," (BigBirdPegasus model)"),Mhe.forEach(r),vAe=i(T),Wm=n(T,"LI",{});var Ehe=s(Wm);uO=n(Ehe,"STRONG",{});var Lgt=s(uO);bAe=t(Lgt,"blenderbot"),Lgt.forEach(r),TAe=t(Ehe," \u2014 "),JA=n(Ehe,"A",{href:!0});var Bgt=s(JA);FAe=t(Bgt,"BlenderbotConfig"),Bgt.forEach(r),MAe=t(Ehe," (Blenderbot model)"),Ehe.forEach(r),EAe=i(T),Vm=n(T,"LI",{});var Che=s(Vm);pO=n(Che,"STRONG",{});var kgt=s(pO);CAe=t(kgt,"blenderbot-small"),kgt.forEach(r),yAe=t(Che," \u2014 "),KA=n(Che,"A",{href:!0});var Rgt=s(KA);wAe=t(Rgt,"BlenderbotSmallConfig"),Rgt.forEach(r),AAe=t(Che," (BlenderbotSmall model)"),Che.forEach(r),xAe=i(T),Qm=n(T,"LI",{});var yhe=s(Qm);_O=n(yhe,"STRONG",{});var Sgt=s(_O);LAe=t(Sgt,"camembert"),Sgt.forEach(r),BAe=t(yhe," \u2014 "),YA=n(yhe,"A",{href:!0});var Pgt=s(YA);kAe=t(Pgt,"CamembertConfig"),Pgt.forEach(r),RAe=t(yhe," (CamemBERT model)"),yhe.forEach(r),SAe=i(T),Hm=n(T,"LI",{});var whe=s(Hm);vO=n(whe,"STRONG",{});var $gt=s(vO);PAe=t($gt,"canine"),$gt.forEach(r),$Ae=t(whe," \u2014 "),ZA=n(whe,"A",{href:!0});var Igt=s(ZA);IAe=t(Igt,"CanineConfig"),Igt.forEach(r),jAe=t(whe," (Canine model)"),whe.forEach(r),NAe=i(T),Um=n(T,"LI",{});var Ahe=s(Um);bO=n(Ahe,"STRONG",{});var jgt=s(bO);DAe=t(jgt,"clip"),jgt.forEach(r),GAe=t(Ahe," \u2014 "),e7=n(Ahe,"A",{href:!0});var Ngt=s(e7);OAe=t(Ngt,"CLIPConfig"),Ngt.forEach(r),qAe=t(Ahe," (CLIP model)"),Ahe.forEach(r),zAe=i(T),Jm=n(T,"LI",{});var xhe=s(Jm);TO=n(xhe,"STRONG",{});var Dgt=s(TO);XAe=t(Dgt,"convbert"),Dgt.forEach(r),WAe=t(xhe," \u2014 "),o7=n(xhe,"A",{href:!0});var Ggt=s(o7);VAe=t(Ggt,"ConvBertConfig"),Ggt.forEach(r),QAe=t(xhe," (ConvBERT model)"),xhe.forEach(r),HAe=i(T),Km=n(T,"LI",{});var Lhe=s(Km);FO=n(Lhe,"STRONG",{});var Ogt=s(FO);UAe=t(Ogt,"ctrl"),Ogt.forEach(r),JAe=t(Lhe," \u2014 "),t7=n(Lhe,"A",{href:!0});var qgt=s(t7);KAe=t(qgt,"CTRLConfig"),qgt.forEach(r),YAe=t(Lhe," (CTRL model)"),Lhe.forEach(r),ZAe=i(T),Ym=n(T,"LI",{});var Bhe=s(Ym);MO=n(Bhe,"STRONG",{});var zgt=s(MO);e7e=t(zgt,"deberta"),zgt.forEach(r),o7e=t(Bhe," \u2014 "),r7=n(Bhe,"A",{href:!0});var Xgt=s(r7);t7e=t(Xgt,"DebertaConfig"),Xgt.forEach(r),r7e=t(Bhe," (DeBERTa model)"),Bhe.forEach(r),a7e=i(T),Zm=n(T,"LI",{});var khe=s(Zm);EO=n(khe,"STRONG",{});var Wgt=s(EO);n7e=t(Wgt,"deberta-v2"),Wgt.forEach(r),s7e=t(khe," \u2014 "),a7=n(khe,"A",{href:!0});var Vgt=s(a7);l7e=t(Vgt,"DebertaV2Config"),Vgt.forEach(r),i7e=t(khe," (DeBERTa-v2 model)"),khe.forEach(r),d7e=i(T),ef=n(T,"LI",{});var Rhe=s(ef);CO=n(Rhe,"STRONG",{});var Qgt=s(CO);m7e=t(Qgt,"deit"),Qgt.forEach(r),f7e=t(Rhe," \u2014 "),n7=n(Rhe,"A",{href:!0});var Hgt=s(n7);c7e=t(Hgt,"DeiTConfig"),Hgt.forEach(r),g7e=t(Rhe," (DeiT model)"),Rhe.forEach(r),h7e=i(T),of=n(T,"LI",{});var She=s(of);yO=n(She,"STRONG",{});var Ugt=s(yO);u7e=t(Ugt,"detr"),Ugt.forEach(r),p7e=t(She," \u2014 "),s7=n(She,"A",{href:!0});var Jgt=s(s7);_7e=t(Jgt,"DetrConfig"),Jgt.forEach(r),v7e=t(She," (DETR model)"),She.forEach(r),b7e=i(T),tf=n(T,"LI",{});var Phe=s(tf);wO=n(Phe,"STRONG",{});var Kgt=s(wO);T7e=t(Kgt,"distilbert"),Kgt.forEach(r),F7e=t(Phe," \u2014 "),l7=n(Phe,"A",{href:!0});var Ygt=s(l7);M7e=t(Ygt,"DistilBertConfig"),Ygt.forEach(r),E7e=t(Phe," (DistilBERT model)"),Phe.forEach(r),C7e=i(T),rf=n(T,"LI",{});var $he=s(rf);AO=n($he,"STRONG",{});var Zgt=s(AO);y7e=t(Zgt,"dpr"),Zgt.forEach(r),w7e=t($he," \u2014 "),i7=n($he,"A",{href:!0});var eht=s(i7);A7e=t(eht,"DPRConfig"),eht.forEach(r),x7e=t($he," (DPR model)"),$he.forEach(r),L7e=i(T),af=n(T,"LI",{});var Ihe=s(af);xO=n(Ihe,"STRONG",{});var oht=s(xO);B7e=t(oht,"electra"),oht.forEach(r),k7e=t(Ihe," \u2014 "),d7=n(Ihe,"A",{href:!0});var tht=s(d7);R7e=t(tht,"ElectraConfig"),tht.forEach(r),S7e=t(Ihe," (ELECTRA model)"),Ihe.forEach(r),P7e=i(T),nf=n(T,"LI",{});var jhe=s(nf);LO=n(jhe,"STRONG",{});var rht=s(LO);$7e=t(rht,"encoder-decoder"),rht.forEach(r),I7e=t(jhe," \u2014 "),m7=n(jhe,"A",{href:!0});var aht=s(m7);j7e=t(aht,"EncoderDecoderConfig"),aht.forEach(r),N7e=t(jhe," (Encoder decoder model)"),jhe.forEach(r),D7e=i(T),sf=n(T,"LI",{});var Nhe=s(sf);BO=n(Nhe,"STRONG",{});var nht=s(BO);G7e=t(nht,"flaubert"),nht.forEach(r),O7e=t(Nhe," \u2014 "),f7=n(Nhe,"A",{href:!0});var sht=s(f7);q7e=t(sht,"FlaubertConfig"),sht.forEach(r),z7e=t(Nhe," (FlauBERT model)"),Nhe.forEach(r),X7e=i(T),lf=n(T,"LI",{});var Dhe=s(lf);kO=n(Dhe,"STRONG",{});var lht=s(kO);W7e=t(lht,"fnet"),lht.forEach(r),V7e=t(Dhe," \u2014 "),c7=n(Dhe,"A",{href:!0});var iht=s(c7);Q7e=t(iht,"FNetConfig"),iht.forEach(r),H7e=t(Dhe," (FNet model)"),Dhe.forEach(r),U7e=i(T),df=n(T,"LI",{});var Ghe=s(df);RO=n(Ghe,"STRONG",{});var dht=s(RO);J7e=t(dht,"fsmt"),dht.forEach(r),K7e=t(Ghe," \u2014 "),g7=n(Ghe,"A",{href:!0});var mht=s(g7);Y7e=t(mht,"FSMTConfig"),mht.forEach(r),Z7e=t(Ghe," (FairSeq Machine-Translation model)"),Ghe.forEach(r),exe=i(T),mf=n(T,"LI",{});var Ohe=s(mf);SO=n(Ohe,"STRONG",{});var fht=s(SO);oxe=t(fht,"funnel"),fht.forEach(r),txe=t(Ohe," \u2014 "),h7=n(Ohe,"A",{href:!0});var cht=s(h7);rxe=t(cht,"FunnelConfig"),cht.forEach(r),axe=t(Ohe," (Funnel Transformer model)"),Ohe.forEach(r),nxe=i(T),ff=n(T,"LI",{});var qhe=s(ff);PO=n(qhe,"STRONG",{});var ght=s(PO);sxe=t(ght,"gpt2"),ght.forEach(r),lxe=t(qhe," \u2014 "),u7=n(qhe,"A",{href:!0});var hht=s(u7);ixe=t(hht,"GPT2Config"),hht.forEach(r),dxe=t(qhe," (OpenAI GPT-2 model)"),qhe.forEach(r),mxe=i(T),cf=n(T,"LI",{});var zhe=s(cf);$O=n(zhe,"STRONG",{});var uht=s($O);fxe=t(uht,"gpt_neo"),uht.forEach(r),cxe=t(zhe," \u2014 "),p7=n(zhe,"A",{href:!0});var pht=s(p7);gxe=t(pht,"GPTNeoConfig"),pht.forEach(r),hxe=t(zhe," (GPT Neo model)"),zhe.forEach(r),uxe=i(T),gf=n(T,"LI",{});var Xhe=s(gf);IO=n(Xhe,"STRONG",{});var _ht=s(IO);pxe=t(_ht,"gptj"),_ht.forEach(r),_xe=t(Xhe," \u2014 "),_7=n(Xhe,"A",{href:!0});var vht=s(_7);vxe=t(vht,"GPTJConfig"),vht.forEach(r),bxe=t(Xhe," (GPT-J model)"),Xhe.forEach(r),Txe=i(T),hf=n(T,"LI",{});var Whe=s(hf);jO=n(Whe,"STRONG",{});var bht=s(jO);Fxe=t(bht,"hubert"),bht.forEach(r),Mxe=t(Whe," \u2014 "),v7=n(Whe,"A",{href:!0});var Tht=s(v7);Exe=t(Tht,"HubertConfig"),Tht.forEach(r),Cxe=t(Whe," (Hubert model)"),Whe.forEach(r),yxe=i(T),uf=n(T,"LI",{});var Vhe=s(uf);NO=n(Vhe,"STRONG",{});var Fht=s(NO);wxe=t(Fht,"ibert"),Fht.forEach(r),Axe=t(Vhe," \u2014 "),b7=n(Vhe,"A",{href:!0});var Mht=s(b7);xxe=t(Mht,"IBertConfig"),Mht.forEach(r),Lxe=t(Vhe," (I-BERT model)"),Vhe.forEach(r),Bxe=i(T),pf=n(T,"LI",{});var Qhe=s(pf);DO=n(Qhe,"STRONG",{});var Eht=s(DO);kxe=t(Eht,"imagegpt"),Eht.forEach(r),Rxe=t(Qhe," \u2014 "),T7=n(Qhe,"A",{href:!0});var Cht=s(T7);Sxe=t(Cht,"ImageGPTConfig"),Cht.forEach(r),Pxe=t(Qhe," (ImageGPT model)"),Qhe.forEach(r),$xe=i(T),_f=n(T,"LI",{});var Hhe=s(_f);GO=n(Hhe,"STRONG",{});var yht=s(GO);Ixe=t(yht,"layoutlm"),yht.forEach(r),jxe=t(Hhe," \u2014 "),F7=n(Hhe,"A",{href:!0});var wht=s(F7);Nxe=t(wht,"LayoutLMConfig"),wht.forEach(r),Dxe=t(Hhe," (LayoutLM model)"),Hhe.forEach(r),Gxe=i(T),vf=n(T,"LI",{});var Uhe=s(vf);OO=n(Uhe,"STRONG",{});var Aht=s(OO);Oxe=t(Aht,"layoutlmv2"),Aht.forEach(r),qxe=t(Uhe," \u2014 "),M7=n(Uhe,"A",{href:!0});var xht=s(M7);zxe=t(xht,"LayoutLMv2Config"),xht.forEach(r),Xxe=t(Uhe," (LayoutLMv2 model)"),Uhe.forEach(r),Wxe=i(T),bf=n(T,"LI",{});var Jhe=s(bf);qO=n(Jhe,"STRONG",{});var Lht=s(qO);Vxe=t(Lht,"led"),Lht.forEach(r),Qxe=t(Jhe," \u2014 "),E7=n(Jhe,"A",{href:!0});var Bht=s(E7);Hxe=t(Bht,"LEDConfig"),Bht.forEach(r),Uxe=t(Jhe," (LED model)"),Jhe.forEach(r),Jxe=i(T),Tf=n(T,"LI",{});var Khe=s(Tf);zO=n(Khe,"STRONG",{});var kht=s(zO);Kxe=t(kht,"longformer"),kht.forEach(r),Yxe=t(Khe," \u2014 "),C7=n(Khe,"A",{href:!0});var Rht=s(C7);Zxe=t(Rht,"LongformerConfig"),Rht.forEach(r),e6e=t(Khe," (Longformer model)"),Khe.forEach(r),o6e=i(T),Ff=n(T,"LI",{});var Yhe=s(Ff);XO=n(Yhe,"STRONG",{});var Sht=s(XO);t6e=t(Sht,"luke"),Sht.forEach(r),r6e=t(Yhe," \u2014 "),y7=n(Yhe,"A",{href:!0});var Pht=s(y7);a6e=t(Pht,"LukeConfig"),Pht.forEach(r),n6e=t(Yhe," (LUKE model)"),Yhe.forEach(r),s6e=i(T),Mf=n(T,"LI",{});var Zhe=s(Mf);WO=n(Zhe,"STRONG",{});var $ht=s(WO);l6e=t($ht,"lxmert"),$ht.forEach(r),i6e=t(Zhe," \u2014 "),w7=n(Zhe,"A",{href:!0});var Iht=s(w7);d6e=t(Iht,"LxmertConfig"),Iht.forEach(r),m6e=t(Zhe," (LXMERT model)"),Zhe.forEach(r),f6e=i(T),Ef=n(T,"LI",{});var eue=s(Ef);VO=n(eue,"STRONG",{});var jht=s(VO);c6e=t(jht,"m2m_100"),jht.forEach(r),g6e=t(eue," \u2014 "),A7=n(eue,"A",{href:!0});var Nht=s(A7);h6e=t(Nht,"M2M100Config"),Nht.forEach(r),u6e=t(eue," (M2M100 model)"),eue.forEach(r),p6e=i(T),Cf=n(T,"LI",{});var oue=s(Cf);QO=n(oue,"STRONG",{});var Dht=s(QO);_6e=t(Dht,"marian"),Dht.forEach(r),v6e=t(oue," \u2014 "),x7=n(oue,"A",{href:!0});var Ght=s(x7);b6e=t(Ght,"MarianConfig"),Ght.forEach(r),T6e=t(oue," (Marian model)"),oue.forEach(r),F6e=i(T),yf=n(T,"LI",{});var tue=s(yf);HO=n(tue,"STRONG",{});var Oht=s(HO);M6e=t(Oht,"mbart"),Oht.forEach(r),E6e=t(tue," \u2014 "),L7=n(tue,"A",{href:!0});var qht=s(L7);C6e=t(qht,"MBartConfig"),qht.forEach(r),y6e=t(tue," (mBART model)"),tue.forEach(r),w6e=i(T),wf=n(T,"LI",{});var rue=s(wf);UO=n(rue,"STRONG",{});var zht=s(UO);A6e=t(zht,"megatron-bert"),zht.forEach(r),x6e=t(rue," \u2014 "),B7=n(rue,"A",{href:!0});var Xht=s(B7);L6e=t(Xht,"MegatronBertConfig"),Xht.forEach(r),B6e=t(rue," (MegatronBert model)"),rue.forEach(r),k6e=i(T),Af=n(T,"LI",{});var aue=s(Af);JO=n(aue,"STRONG",{});var Wht=s(JO);R6e=t(Wht,"mobilebert"),Wht.forEach(r),S6e=t(aue," \u2014 "),k7=n(aue,"A",{href:!0});var Vht=s(k7);P6e=t(Vht,"MobileBertConfig"),Vht.forEach(r),$6e=t(aue," (MobileBERT model)"),aue.forEach(r),I6e=i(T),xf=n(T,"LI",{});var nue=s(xf);KO=n(nue,"STRONG",{});var Qht=s(KO);j6e=t(Qht,"mpnet"),Qht.forEach(r),N6e=t(nue," \u2014 "),R7=n(nue,"A",{href:!0});var Hht=s(R7);D6e=t(Hht,"MPNetConfig"),Hht.forEach(r),G6e=t(nue," (MPNet model)"),nue.forEach(r),O6e=i(T),Lf=n(T,"LI",{});var sue=s(Lf);YO=n(sue,"STRONG",{});var Uht=s(YO);q6e=t(Uht,"mt5"),Uht.forEach(r),z6e=t(sue," \u2014 "),S7=n(sue,"A",{href:!0});var Jht=s(S7);X6e=t(Jht,"MT5Config"),Jht.forEach(r),W6e=t(sue," (mT5 model)"),sue.forEach(r),V6e=i(T),Bf=n(T,"LI",{});var lue=s(Bf);ZO=n(lue,"STRONG",{});var Kht=s(ZO);Q6e=t(Kht,"openai-gpt"),Kht.forEach(r),H6e=t(lue," \u2014 "),P7=n(lue,"A",{href:!0});var Yht=s(P7);U6e=t(Yht,"OpenAIGPTConfig"),Yht.forEach(r),J6e=t(lue," (OpenAI GPT model)"),lue.forEach(r),K6e=i(T),kf=n(T,"LI",{});var iue=s(kf);eq=n(iue,"STRONG",{});var Zht=s(eq);Y6e=t(Zht,"pegasus"),Zht.forEach(r),Z6e=t(iue," \u2014 "),$7=n(iue,"A",{href:!0});var eut=s($7);e8e=t(eut,"PegasusConfig"),eut.forEach(r),o8e=t(iue," (Pegasus model)"),iue.forEach(r),t8e=i(T),Rf=n(T,"LI",{});var due=s(Rf);oq=n(due,"STRONG",{});var out=s(oq);r8e=t(out,"perceiver"),out.forEach(r),a8e=t(due," \u2014 "),I7=n(due,"A",{href:!0});var tut=s(I7);n8e=t(tut,"PerceiverConfig"),tut.forEach(r),s8e=t(due," (Perceiver model)"),due.forEach(r),l8e=i(T),Sf=n(T,"LI",{});var mue=s(Sf);tq=n(mue,"STRONG",{});var rut=s(tq);i8e=t(rut,"prophetnet"),rut.forEach(r),d8e=t(mue," \u2014 "),j7=n(mue,"A",{href:!0});var aut=s(j7);m8e=t(aut,"ProphetNetConfig"),aut.forEach(r),f8e=t(mue," (ProphetNet model)"),mue.forEach(r),c8e=i(T),Pf=n(T,"LI",{});var fue=s(Pf);rq=n(fue,"STRONG",{});var nut=s(rq);g8e=t(nut,"qdqbert"),nut.forEach(r),h8e=t(fue," \u2014 "),N7=n(fue,"A",{href:!0});var sut=s(N7);u8e=t(sut,"QDQBertConfig"),sut.forEach(r),p8e=t(fue," (QDQBert model)"),fue.forEach(r),_8e=i(T),$f=n(T,"LI",{});var cue=s($f);aq=n(cue,"STRONG",{});var lut=s(aq);v8e=t(lut,"rag"),lut.forEach(r),b8e=t(cue," \u2014 "),D7=n(cue,"A",{href:!0});var iut=s(D7);T8e=t(iut,"RagConfig"),iut.forEach(r),F8e=t(cue," (RAG model)"),cue.forEach(r),M8e=i(T),If=n(T,"LI",{});var gue=s(If);nq=n(gue,"STRONG",{});var dut=s(nq);E8e=t(dut,"reformer"),dut.forEach(r),C8e=t(gue," \u2014 "),G7=n(gue,"A",{href:!0});var mut=s(G7);y8e=t(mut,"ReformerConfig"),mut.forEach(r),w8e=t(gue," (Reformer model)"),gue.forEach(r),A8e=i(T),jf=n(T,"LI",{});var hue=s(jf);sq=n(hue,"STRONG",{});var fut=s(sq);x8e=t(fut,"rembert"),fut.forEach(r),L8e=t(hue," \u2014 "),O7=n(hue,"A",{href:!0});var cut=s(O7);B8e=t(cut,"RemBertConfig"),cut.forEach(r),k8e=t(hue," (RemBERT model)"),hue.forEach(r),R8e=i(T),Nf=n(T,"LI",{});var uue=s(Nf);lq=n(uue,"STRONG",{});var gut=s(lq);S8e=t(gut,"retribert"),gut.forEach(r),P8e=t(uue," \u2014 "),q7=n(uue,"A",{href:!0});var hut=s(q7);$8e=t(hut,"RetriBertConfig"),hut.forEach(r),I8e=t(uue," (RetriBERT model)"),uue.forEach(r),j8e=i(T),Df=n(T,"LI",{});var pue=s(Df);iq=n(pue,"STRONG",{});var uut=s(iq);N8e=t(uut,"roberta"),uut.forEach(r),D8e=t(pue," \u2014 "),z7=n(pue,"A",{href:!0});var put=s(z7);G8e=t(put,"RobertaConfig"),put.forEach(r),O8e=t(pue," (RoBERTa model)"),pue.forEach(r),q8e=i(T),Gf=n(T,"LI",{});var _ue=s(Gf);dq=n(_ue,"STRONG",{});var _ut=s(dq);z8e=t(_ut,"roformer"),_ut.forEach(r),X8e=t(_ue," \u2014 "),X7=n(_ue,"A",{href:!0});var vut=s(X7);W8e=t(vut,"RoFormerConfig"),vut.forEach(r),V8e=t(_ue," (RoFormer model)"),_ue.forEach(r),Q8e=i(T),Of=n(T,"LI",{});var vue=s(Of);mq=n(vue,"STRONG",{});var but=s(mq);H8e=t(but,"segformer"),but.forEach(r),U8e=t(vue," \u2014 "),W7=n(vue,"A",{href:!0});var Tut=s(W7);J8e=t(Tut,"SegformerConfig"),Tut.forEach(r),K8e=t(vue," (SegFormer model)"),vue.forEach(r),Y8e=i(T),qf=n(T,"LI",{});var bue=s(qf);fq=n(bue,"STRONG",{});var Fut=s(fq);Z8e=t(Fut,"sew"),Fut.forEach(r),eLe=t(bue," \u2014 "),V7=n(bue,"A",{href:!0});var Mut=s(V7);oLe=t(Mut,"SEWConfig"),Mut.forEach(r),tLe=t(bue," (SEW model)"),bue.forEach(r),rLe=i(T),zf=n(T,"LI",{});var Tue=s(zf);cq=n(Tue,"STRONG",{});var Eut=s(cq);aLe=t(Eut,"sew-d"),Eut.forEach(r),nLe=t(Tue," \u2014 "),Q7=n(Tue,"A",{href:!0});var Cut=s(Q7);sLe=t(Cut,"SEWDConfig"),Cut.forEach(r),lLe=t(Tue," (SEW-D model)"),Tue.forEach(r),iLe=i(T),Xf=n(T,"LI",{});var Fue=s(Xf);gq=n(Fue,"STRONG",{});var yut=s(gq);dLe=t(yut,"speech-encoder-decoder"),yut.forEach(r),mLe=t(Fue," \u2014 "),H7=n(Fue,"A",{href:!0});var wut=s(H7);fLe=t(wut,"SpeechEncoderDecoderConfig"),wut.forEach(r),cLe=t(Fue," (Speech Encoder decoder model)"),Fue.forEach(r),gLe=i(T),Wf=n(T,"LI",{});var Mue=s(Wf);hq=n(Mue,"STRONG",{});var Aut=s(hq);hLe=t(Aut,"speech_to_text"),Aut.forEach(r),uLe=t(Mue," \u2014 "),U7=n(Mue,"A",{href:!0});var xut=s(U7);pLe=t(xut,"Speech2TextConfig"),xut.forEach(r),_Le=t(Mue," (Speech2Text model)"),Mue.forEach(r),vLe=i(T),Vf=n(T,"LI",{});var Eue=s(Vf);uq=n(Eue,"STRONG",{});var Lut=s(uq);bLe=t(Lut,"speech_to_text_2"),Lut.forEach(r),TLe=t(Eue," \u2014 "),J7=n(Eue,"A",{href:!0});var But=s(J7);FLe=t(But,"Speech2Text2Config"),But.forEach(r),MLe=t(Eue," (Speech2Text2 model)"),Eue.forEach(r),ELe=i(T),Qf=n(T,"LI",{});var Cue=s(Qf);pq=n(Cue,"STRONG",{});var kut=s(pq);CLe=t(kut,"splinter"),kut.forEach(r),yLe=t(Cue," \u2014 "),K7=n(Cue,"A",{href:!0});var Rut=s(K7);wLe=t(Rut,"SplinterConfig"),Rut.forEach(r),ALe=t(Cue," (Splinter model)"),Cue.forEach(r),xLe=i(T),Hf=n(T,"LI",{});var yue=s(Hf);_q=n(yue,"STRONG",{});var Sut=s(_q);LLe=t(Sut,"squeezebert"),Sut.forEach(r),BLe=t(yue," \u2014 "),Y7=n(yue,"A",{href:!0});var Put=s(Y7);kLe=t(Put,"SqueezeBertConfig"),Put.forEach(r),RLe=t(yue," (SqueezeBERT model)"),yue.forEach(r),SLe=i(T),Uf=n(T,"LI",{});var wue=s(Uf);vq=n(wue,"STRONG",{});var $ut=s(vq);PLe=t($ut,"t5"),$ut.forEach(r),$Le=t(wue," \u2014 "),Z7=n(wue,"A",{href:!0});var Iut=s(Z7);ILe=t(Iut,"T5Config"),Iut.forEach(r),jLe=t(wue," (T5 model)"),wue.forEach(r),NLe=i(T),Jf=n(T,"LI",{});var Aue=s(Jf);bq=n(Aue,"STRONG",{});var jut=s(bq);DLe=t(jut,"tapas"),jut.forEach(r),GLe=t(Aue," \u2014 "),ex=n(Aue,"A",{href:!0});var Nut=s(ex);OLe=t(Nut,"TapasConfig"),Nut.forEach(r),qLe=t(Aue," (TAPAS model)"),Aue.forEach(r),zLe=i(T),Kf=n(T,"LI",{});var xue=s(Kf);Tq=n(xue,"STRONG",{});var Dut=s(Tq);XLe=t(Dut,"transfo-xl"),Dut.forEach(r),WLe=t(xue," \u2014 "),ox=n(xue,"A",{href:!0});var Gut=s(ox);VLe=t(Gut,"TransfoXLConfig"),Gut.forEach(r),QLe=t(xue," (Transformer-XL model)"),xue.forEach(r),HLe=i(T),Yf=n(T,"LI",{});var Lue=s(Yf);Fq=n(Lue,"STRONG",{});var Out=s(Fq);ULe=t(Out,"trocr"),Out.forEach(r),JLe=t(Lue," \u2014 "),tx=n(Lue,"A",{href:!0});var qut=s(tx);KLe=t(qut,"TrOCRConfig"),qut.forEach(r),YLe=t(Lue," (TrOCR model)"),Lue.forEach(r),ZLe=i(T),Zf=n(T,"LI",{});var Bue=s(Zf);Mq=n(Bue,"STRONG",{});var zut=s(Mq);eBe=t(zut,"unispeech"),zut.forEach(r),oBe=t(Bue," \u2014 "),rx=n(Bue,"A",{href:!0});var Xut=s(rx);tBe=t(Xut,"UniSpeechConfig"),Xut.forEach(r),rBe=t(Bue," (UniSpeech model)"),Bue.forEach(r),aBe=i(T),ec=n(T,"LI",{});var kue=s(ec);Eq=n(kue,"STRONG",{});var Wut=s(Eq);nBe=t(Wut,"unispeech-sat"),Wut.forEach(r),sBe=t(kue," \u2014 "),ax=n(kue,"A",{href:!0});var Vut=s(ax);lBe=t(Vut,"UniSpeechSatConfig"),Vut.forEach(r),iBe=t(kue," (UniSpeechSat model)"),kue.forEach(r),dBe=i(T),oc=n(T,"LI",{});var Rue=s(oc);Cq=n(Rue,"STRONG",{});var Qut=s(Cq);mBe=t(Qut,"vision-encoder-decoder"),Qut.forEach(r),fBe=t(Rue," \u2014 "),nx=n(Rue,"A",{href:!0});var Hut=s(nx);cBe=t(Hut,"VisionEncoderDecoderConfig"),Hut.forEach(r),gBe=t(Rue," (Vision Encoder decoder model)"),Rue.forEach(r),hBe=i(T),tc=n(T,"LI",{});var Sue=s(tc);yq=n(Sue,"STRONG",{});var Uut=s(yq);uBe=t(Uut,"vision-text-dual-encoder"),Uut.forEach(r),pBe=t(Sue," \u2014 "),sx=n(Sue,"A",{href:!0});var Jut=s(sx);_Be=t(Jut,"VisionTextDualEncoderConfig"),Jut.forEach(r),vBe=t(Sue," (VisionTextDualEncoder model)"),Sue.forEach(r),bBe=i(T),rc=n(T,"LI",{});var Pue=s(rc);wq=n(Pue,"STRONG",{});var Kut=s(wq);TBe=t(Kut,"visual_bert"),Kut.forEach(r),FBe=t(Pue," \u2014 "),lx=n(Pue,"A",{href:!0});var Yut=s(lx);MBe=t(Yut,"VisualBertConfig"),Yut.forEach(r),EBe=t(Pue," (VisualBert model)"),Pue.forEach(r),CBe=i(T),ac=n(T,"LI",{});var $ue=s(ac);Aq=n($ue,"STRONG",{});var Zut=s(Aq);yBe=t(Zut,"vit"),Zut.forEach(r),wBe=t($ue," \u2014 "),ix=n($ue,"A",{href:!0});var ept=s(ix);ABe=t(ept,"ViTConfig"),ept.forEach(r),xBe=t($ue," (ViT model)"),$ue.forEach(r),LBe=i(T),nc=n(T,"LI",{});var Iue=s(nc);xq=n(Iue,"STRONG",{});var opt=s(xq);BBe=t(opt,"wav2vec2"),opt.forEach(r),kBe=t(Iue," \u2014 "),dx=n(Iue,"A",{href:!0});var tpt=s(dx);RBe=t(tpt,"Wav2Vec2Config"),tpt.forEach(r),SBe=t(Iue," (Wav2Vec2 model)"),Iue.forEach(r),PBe=i(T),sc=n(T,"LI",{});var jue=s(sc);Lq=n(jue,"STRONG",{});var rpt=s(Lq);$Be=t(rpt,"wavlm"),rpt.forEach(r),IBe=t(jue," \u2014 "),mx=n(jue,"A",{href:!0});var apt=s(mx);jBe=t(apt,"WavLMConfig"),apt.forEach(r),NBe=t(jue," (WavLM model)"),jue.forEach(r),DBe=i(T),lc=n(T,"LI",{});var Nue=s(lc);Bq=n(Nue,"STRONG",{});var npt=s(Bq);GBe=t(npt,"xlm"),npt.forEach(r),OBe=t(Nue," \u2014 "),fx=n(Nue,"A",{href:!0});var spt=s(fx);qBe=t(spt,"XLMConfig"),spt.forEach(r),zBe=t(Nue," (XLM model)"),Nue.forEach(r),XBe=i(T),ic=n(T,"LI",{});var Due=s(ic);kq=n(Due,"STRONG",{});var lpt=s(kq);WBe=t(lpt,"xlm-prophetnet"),lpt.forEach(r),VBe=t(Due," \u2014 "),cx=n(Due,"A",{href:!0});var ipt=s(cx);QBe=t(ipt,"XLMProphetNetConfig"),ipt.forEach(r),HBe=t(Due," (XLMProphetNet model)"),Due.forEach(r),UBe=i(T),dc=n(T,"LI",{});var Gue=s(dc);Rq=n(Gue,"STRONG",{});var dpt=s(Rq);JBe=t(dpt,"xlm-roberta"),dpt.forEach(r),KBe=t(Gue," \u2014 "),gx=n(Gue,"A",{href:!0});var mpt=s(gx);YBe=t(mpt,"XLMRobertaConfig"),mpt.forEach(r),ZBe=t(Gue," (XLM-RoBERTa model)"),Gue.forEach(r),e9e=i(T),mc=n(T,"LI",{});var Oue=s(mc);Sq=n(Oue,"STRONG",{});var fpt=s(Sq);o9e=t(fpt,"xlnet"),fpt.forEach(r),t9e=t(Oue," \u2014 "),hx=n(Oue,"A",{href:!0});var cpt=s(hx);r9e=t(cpt,"XLNetConfig"),cpt.forEach(r),a9e=t(Oue," (XLNet model)"),Oue.forEach(r),T.forEach(r),n9e=i(Wr),Pq=n(Wr,"P",{});var gpt=s(Pq);s9e=t(gpt,"Examples:"),gpt.forEach(r),l9e=i(Wr),c(zF.$$.fragment,Wr),Wr.forEach(r),i9e=i(ms),fc=n(ms,"DIV",{class:!0});var v3e=s(fc);c(XF.$$.fragment,v3e),d9e=i(v3e),$q=n(v3e,"P",{});var hpt=s($q);m9e=t(hpt,"Register a new configuration for this class."),hpt.forEach(r),v3e.forEach(r),ms.forEach(r),wEe=i(d),ti=n(d,"H2",{class:!0});var b3e=s(ti);cc=n(b3e,"A",{id:!0,class:!0,href:!0});var upt=s(cc);Iq=n(upt,"SPAN",{});var ppt=s(Iq);c(WF.$$.fragment,ppt),ppt.forEach(r),upt.forEach(r),f9e=i(b3e),jq=n(b3e,"SPAN",{});var _pt=s(jq);c9e=t(_pt,"AutoTokenizer"),_pt.forEach(r),b3e.forEach(r),AEe=i(d),So=n(d,"DIV",{class:!0});var fs=s(So);c(VF.$$.fragment,fs),g9e=i(fs),QF=n(fs,"P",{});var T3e=s(QF);h9e=t(T3e,`This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when created with the `),ux=n(T3e,"A",{href:!0});var vpt=s(ux);u9e=t(vpt,"AutoTokenizer.from_pretrained()"),vpt.forEach(r),p9e=t(T3e," class method."),T3e.forEach(r),_9e=i(fs),HF=n(fs,"P",{});var F3e=s(HF);v9e=t(F3e,"This class cannot be instantiated directly using "),Nq=n(F3e,"CODE",{});var bpt=s(Nq);b9e=t(bpt,"__init__()"),bpt.forEach(r),T9e=t(F3e," (throws an error)."),F3e.forEach(r),F9e=i(fs),to=n(fs,"DIV",{class:!0});var Vr=s(to);c(UF.$$.fragment,Vr),M9e=i(Vr),Dq=n(Vr,"P",{});var Tpt=s(Dq);E9e=t(Tpt,"Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary."),Tpt.forEach(r),C9e=i(Vr),Fa=n(Vr,"P",{});var DT=s(Fa);y9e=t(DT,"The tokenizer class to instantiate is selected based on the "),Gq=n(DT,"EM",{});var Fpt=s(Gq);w9e=t(Fpt,"model_type"),Fpt.forEach(r),A9e=t(DT,` property of the config object (either passed as an argument or loaded from `),Oq=n(DT,"EM",{});var Mpt=s(Oq);x9e=t(Mpt,"pretrained_model_name_or_path"),Mpt.forEach(r),L9e=t(DT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),qq=n(DT,"EM",{});var Ept=s(qq);B9e=t(Ept,"pretrained_model_name_or_path"),Ept.forEach(r),k9e=t(DT,":"),DT.forEach(r),R9e=i(Vr),E=n(Vr,"UL",{});var y=s(E);vn=n(y,"LI",{});var Xw=s(vn);zq=n(Xw,"STRONG",{});var Cpt=s(zq);S9e=t(Cpt,"albert"),Cpt.forEach(r),P9e=t(Xw," \u2014 "),px=n(Xw,"A",{href:!0});var ypt=s(px);$9e=t(ypt,"AlbertTokenizer"),ypt.forEach(r),I9e=t(Xw," or "),_x=n(Xw,"A",{href:!0});var wpt=s(_x);j9e=t(wpt,"AlbertTokenizerFast"),wpt.forEach(r),N9e=t(Xw," (ALBERT model)"),Xw.forEach(r),D9e=i(y),bn=n(y,"LI",{});var Ww=s(bn);Xq=n(Ww,"STRONG",{});var Apt=s(Xq);G9e=t(Apt,"bart"),Apt.forEach(r),O9e=t(Ww," \u2014 "),vx=n(Ww,"A",{href:!0});var xpt=s(vx);q9e=t(xpt,"BartTokenizer"),xpt.forEach(r),z9e=t(Ww," or "),bx=n(Ww,"A",{href:!0});var Lpt=s(bx);X9e=t(Lpt,"BartTokenizerFast"),Lpt.forEach(r),W9e=t(Ww," (BART model)"),Ww.forEach(r),V9e=i(y),Tn=n(y,"LI",{});var Vw=s(Tn);Wq=n(Vw,"STRONG",{});var Bpt=s(Wq);Q9e=t(Bpt,"barthez"),Bpt.forEach(r),H9e=t(Vw," \u2014 "),Tx=n(Vw,"A",{href:!0});var kpt=s(Tx);U9e=t(kpt,"BarthezTokenizer"),kpt.forEach(r),J9e=t(Vw," or "),Fx=n(Vw,"A",{href:!0});var Rpt=s(Fx);K9e=t(Rpt,"BarthezTokenizerFast"),Rpt.forEach(r),Y9e=t(Vw," (BARThez model)"),Vw.forEach(r),Z9e=i(y),gc=n(y,"LI",{});var que=s(gc);Vq=n(que,"STRONG",{});var Spt=s(Vq);eke=t(Spt,"bartpho"),Spt.forEach(r),oke=t(que," \u2014 "),Mx=n(que,"A",{href:!0});var Ppt=s(Mx);tke=t(Ppt,"BartphoTokenizer"),Ppt.forEach(r),rke=t(que," (BARTpho model)"),que.forEach(r),ake=i(y),Fn=n(y,"LI",{});var Qw=s(Fn);Qq=n(Qw,"STRONG",{});var $pt=s(Qq);nke=t($pt,"bert"),$pt.forEach(r),ske=t(Qw," \u2014 "),Ex=n(Qw,"A",{href:!0});var Ipt=s(Ex);lke=t(Ipt,"BertTokenizer"),Ipt.forEach(r),ike=t(Qw," or "),Cx=n(Qw,"A",{href:!0});var jpt=s(Cx);dke=t(jpt,"BertTokenizerFast"),jpt.forEach(r),mke=t(Qw," (BERT model)"),Qw.forEach(r),fke=i(y),hc=n(y,"LI",{});var zue=s(hc);Hq=n(zue,"STRONG",{});var Npt=s(Hq);cke=t(Npt,"bert-generation"),Npt.forEach(r),gke=t(zue," \u2014 "),yx=n(zue,"A",{href:!0});var Dpt=s(yx);hke=t(Dpt,"BertGenerationTokenizer"),Dpt.forEach(r),uke=t(zue," (Bert Generation model)"),zue.forEach(r),pke=i(y),uc=n(y,"LI",{});var Xue=s(uc);Uq=n(Xue,"STRONG",{});var Gpt=s(Uq);_ke=t(Gpt,"bert-japanese"),Gpt.forEach(r),vke=t(Xue," \u2014 "),wx=n(Xue,"A",{href:!0});var Opt=s(wx);bke=t(Opt,"BertJapaneseTokenizer"),Opt.forEach(r),Tke=t(Xue," (BertJapanese model)"),Xue.forEach(r),Fke=i(y),pc=n(y,"LI",{});var Wue=s(pc);Jq=n(Wue,"STRONG",{});var qpt=s(Jq);Mke=t(qpt,"bertweet"),qpt.forEach(r),Eke=t(Wue," \u2014 "),Ax=n(Wue,"A",{href:!0});var zpt=s(Ax);Cke=t(zpt,"BertweetTokenizer"),zpt.forEach(r),yke=t(Wue," (Bertweet model)"),Wue.forEach(r),wke=i(y),Mn=n(y,"LI",{});var Hw=s(Mn);Kq=n(Hw,"STRONG",{});var Xpt=s(Kq);Ake=t(Xpt,"big_bird"),Xpt.forEach(r),xke=t(Hw," \u2014 "),xx=n(Hw,"A",{href:!0});var Wpt=s(xx);Lke=t(Wpt,"BigBirdTokenizer"),Wpt.forEach(r),Bke=t(Hw," or "),Lx=n(Hw,"A",{href:!0});var Vpt=s(Lx);kke=t(Vpt,"BigBirdTokenizerFast"),Vpt.forEach(r),Rke=t(Hw," (BigBird model)"),Hw.forEach(r),Ske=i(y),En=n(y,"LI",{});var Uw=s(En);Yq=n(Uw,"STRONG",{});var Qpt=s(Yq);Pke=t(Qpt,"bigbird_pegasus"),Qpt.forEach(r),$ke=t(Uw," \u2014 "),Bx=n(Uw,"A",{href:!0});var Hpt=s(Bx);Ike=t(Hpt,"PegasusTokenizer"),Hpt.forEach(r),jke=t(Uw," or "),kx=n(Uw,"A",{href:!0});var Upt=s(kx);Nke=t(Upt,"PegasusTokenizerFast"),Upt.forEach(r),Dke=t(Uw," (BigBirdPegasus model)"),Uw.forEach(r),Gke=i(y),Cn=n(y,"LI",{});var Jw=s(Cn);Zq=n(Jw,"STRONG",{});var Jpt=s(Zq);Oke=t(Jpt,"blenderbot"),Jpt.forEach(r),qke=t(Jw," \u2014 "),Rx=n(Jw,"A",{href:!0});var Kpt=s(Rx);zke=t(Kpt,"BlenderbotTokenizer"),Kpt.forEach(r),Xke=t(Jw," or "),Sx=n(Jw,"A",{href:!0});var Ypt=s(Sx);Wke=t(Ypt,"BlenderbotTokenizerFast"),Ypt.forEach(r),Vke=t(Jw," (Blenderbot model)"),Jw.forEach(r),Qke=i(y),_c=n(y,"LI",{});var Vue=s(_c);ez=n(Vue,"STRONG",{});var Zpt=s(ez);Hke=t(Zpt,"blenderbot-small"),Zpt.forEach(r),Uke=t(Vue," \u2014 "),Px=n(Vue,"A",{href:!0});var e_t=s(Px);Jke=t(e_t,"BlenderbotSmallTokenizer"),e_t.forEach(r),Kke=t(Vue," (BlenderbotSmall model)"),Vue.forEach(r),Yke=i(y),vc=n(y,"LI",{});var Que=s(vc);oz=n(Que,"STRONG",{});var o_t=s(oz);Zke=t(o_t,"byt5"),o_t.forEach(r),eRe=t(Que," \u2014 "),$x=n(Que,"A",{href:!0});var t_t=s($x);oRe=t(t_t,"ByT5Tokenizer"),t_t.forEach(r),tRe=t(Que," (ByT5 model)"),Que.forEach(r),rRe=i(y),yn=n(y,"LI",{});var Kw=s(yn);tz=n(Kw,"STRONG",{});var r_t=s(tz);aRe=t(r_t,"camembert"),r_t.forEach(r),nRe=t(Kw," \u2014 "),Ix=n(Kw,"A",{href:!0});var a_t=s(Ix);sRe=t(a_t,"CamembertTokenizer"),a_t.forEach(r),lRe=t(Kw," or "),jx=n(Kw,"A",{href:!0});var n_t=s(jx);iRe=t(n_t,"CamembertTokenizerFast"),n_t.forEach(r),dRe=t(Kw," (CamemBERT model)"),Kw.forEach(r),mRe=i(y),bc=n(y,"LI",{});var Hue=s(bc);rz=n(Hue,"STRONG",{});var s_t=s(rz);fRe=t(s_t,"canine"),s_t.forEach(r),cRe=t(Hue," \u2014 "),Nx=n(Hue,"A",{href:!0});var l_t=s(Nx);gRe=t(l_t,"CanineTokenizer"),l_t.forEach(r),hRe=t(Hue," (Canine model)"),Hue.forEach(r),uRe=i(y),wn=n(y,"LI",{});var Yw=s(wn);az=n(Yw,"STRONG",{});var i_t=s(az);pRe=t(i_t,"clip"),i_t.forEach(r),_Re=t(Yw," \u2014 "),Dx=n(Yw,"A",{href:!0});var d_t=s(Dx);vRe=t(d_t,"CLIPTokenizer"),d_t.forEach(r),bRe=t(Yw," or "),Gx=n(Yw,"A",{href:!0});var m_t=s(Gx);TRe=t(m_t,"CLIPTokenizerFast"),m_t.forEach(r),FRe=t(Yw," (CLIP model)"),Yw.forEach(r),MRe=i(y),An=n(y,"LI",{});var Zw=s(An);nz=n(Zw,"STRONG",{});var f_t=s(nz);ERe=t(f_t,"convbert"),f_t.forEach(r),CRe=t(Zw," \u2014 "),Ox=n(Zw,"A",{href:!0});var c_t=s(Ox);yRe=t(c_t,"ConvBertTokenizer"),c_t.forEach(r),wRe=t(Zw," or "),qx=n(Zw,"A",{href:!0});var g_t=s(qx);ARe=t(g_t,"ConvBertTokenizerFast"),g_t.forEach(r),xRe=t(Zw," (ConvBERT model)"),Zw.forEach(r),LRe=i(y),xn=n(y,"LI",{});var eA=s(xn);sz=n(eA,"STRONG",{});var h_t=s(sz);BRe=t(h_t,"cpm"),h_t.forEach(r),kRe=t(eA," \u2014 "),zx=n(eA,"A",{href:!0});var u_t=s(zx);RRe=t(u_t,"CpmTokenizer"),u_t.forEach(r),SRe=t(eA," or "),lz=n(eA,"CODE",{});var p_t=s(lz);PRe=t(p_t,"CpmTokenizerFast"),p_t.forEach(r),$Re=t(eA," (CPM model)"),eA.forEach(r),IRe=i(y),Tc=n(y,"LI",{});var Uue=s(Tc);iz=n(Uue,"STRONG",{});var __t=s(iz);jRe=t(__t,"ctrl"),__t.forEach(r),NRe=t(Uue," \u2014 "),Xx=n(Uue,"A",{href:!0});var v_t=s(Xx);DRe=t(v_t,"CTRLTokenizer"),v_t.forEach(r),GRe=t(Uue," (CTRL model)"),Uue.forEach(r),ORe=i(y),Ln=n(y,"LI",{});var oA=s(Ln);dz=n(oA,"STRONG",{});var b_t=s(dz);qRe=t(b_t,"deberta"),b_t.forEach(r),zRe=t(oA," \u2014 "),Wx=n(oA,"A",{href:!0});var T_t=s(Wx);XRe=t(T_t,"DebertaTokenizer"),T_t.forEach(r),WRe=t(oA," or "),Vx=n(oA,"A",{href:!0});var F_t=s(Vx);VRe=t(F_t,"DebertaTokenizerFast"),F_t.forEach(r),QRe=t(oA," (DeBERTa model)"),oA.forEach(r),HRe=i(y),Fc=n(y,"LI",{});var Jue=s(Fc);mz=n(Jue,"STRONG",{});var M_t=s(mz);URe=t(M_t,"deberta-v2"),M_t.forEach(r),JRe=t(Jue," \u2014 "),Qx=n(Jue,"A",{href:!0});var E_t=s(Qx);KRe=t(E_t,"DebertaV2Tokenizer"),E_t.forEach(r),YRe=t(Jue," (DeBERTa-v2 model)"),Jue.forEach(r),ZRe=i(y),Bn=n(y,"LI",{});var tA=s(Bn);fz=n(tA,"STRONG",{});var C_t=s(fz);eSe=t(C_t,"distilbert"),C_t.forEach(r),oSe=t(tA," \u2014 "),Hx=n(tA,"A",{href:!0});var y_t=s(Hx);tSe=t(y_t,"DistilBertTokenizer"),y_t.forEach(r),rSe=t(tA," or "),Ux=n(tA,"A",{href:!0});var w_t=s(Ux);aSe=t(w_t,"DistilBertTokenizerFast"),w_t.forEach(r),nSe=t(tA," (DistilBERT model)"),tA.forEach(r),sSe=i(y),kn=n(y,"LI",{});var rA=s(kn);cz=n(rA,"STRONG",{});var A_t=s(cz);lSe=t(A_t,"dpr"),A_t.forEach(r),iSe=t(rA," \u2014 "),Jx=n(rA,"A",{href:!0});var x_t=s(Jx);dSe=t(x_t,"DPRQuestionEncoderTokenizer"),x_t.forEach(r),mSe=t(rA," or "),Kx=n(rA,"A",{href:!0});var L_t=s(Kx);fSe=t(L_t,"DPRQuestionEncoderTokenizerFast"),L_t.forEach(r),cSe=t(rA," (DPR model)"),rA.forEach(r),gSe=i(y),Rn=n(y,"LI",{});var aA=s(Rn);gz=n(aA,"STRONG",{});var B_t=s(gz);hSe=t(B_t,"electra"),B_t.forEach(r),uSe=t(aA," \u2014 "),Yx=n(aA,"A",{href:!0});var k_t=s(Yx);pSe=t(k_t,"ElectraTokenizer"),k_t.forEach(r),_Se=t(aA," or "),Zx=n(aA,"A",{href:!0});var R_t=s(Zx);vSe=t(R_t,"ElectraTokenizerFast"),R_t.forEach(r),bSe=t(aA," (ELECTRA model)"),aA.forEach(r),TSe=i(y),Mc=n(y,"LI",{});var Kue=s(Mc);hz=n(Kue,"STRONG",{});var S_t=s(hz);FSe=t(S_t,"flaubert"),S_t.forEach(r),MSe=t(Kue," \u2014 "),e6=n(Kue,"A",{href:!0});var P_t=s(e6);ESe=t(P_t,"FlaubertTokenizer"),P_t.forEach(r),CSe=t(Kue," (FlauBERT model)"),Kue.forEach(r),ySe=i(y),Sn=n(y,"LI",{});var nA=s(Sn);uz=n(nA,"STRONG",{});var $_t=s(uz);wSe=t($_t,"fnet"),$_t.forEach(r),ASe=t(nA," \u2014 "),o6=n(nA,"A",{href:!0});var I_t=s(o6);xSe=t(I_t,"FNetTokenizer"),I_t.forEach(r),LSe=t(nA," or "),t6=n(nA,"A",{href:!0});var j_t=s(t6);BSe=t(j_t,"FNetTokenizerFast"),j_t.forEach(r),kSe=t(nA," (FNet model)"),nA.forEach(r),RSe=i(y),Ec=n(y,"LI",{});var Yue=s(Ec);pz=n(Yue,"STRONG",{});var N_t=s(pz);SSe=t(N_t,"fsmt"),N_t.forEach(r),PSe=t(Yue," \u2014 "),r6=n(Yue,"A",{href:!0});var D_t=s(r6);$Se=t(D_t,"FSMTTokenizer"),D_t.forEach(r),ISe=t(Yue," (FairSeq Machine-Translation model)"),Yue.forEach(r),jSe=i(y),Pn=n(y,"LI",{});var sA=s(Pn);_z=n(sA,"STRONG",{});var G_t=s(_z);NSe=t(G_t,"funnel"),G_t.forEach(r),DSe=t(sA," \u2014 "),a6=n(sA,"A",{href:!0});var O_t=s(a6);GSe=t(O_t,"FunnelTokenizer"),O_t.forEach(r),OSe=t(sA," or "),n6=n(sA,"A",{href:!0});var q_t=s(n6);qSe=t(q_t,"FunnelTokenizerFast"),q_t.forEach(r),zSe=t(sA," (Funnel Transformer model)"),sA.forEach(r),XSe=i(y),$n=n(y,"LI",{});var lA=s($n);vz=n(lA,"STRONG",{});var z_t=s(vz);WSe=t(z_t,"gpt2"),z_t.forEach(r),VSe=t(lA," \u2014 "),s6=n(lA,"A",{href:!0});var X_t=s(s6);QSe=t(X_t,"GPT2Tokenizer"),X_t.forEach(r),HSe=t(lA," or "),l6=n(lA,"A",{href:!0});var W_t=s(l6);USe=t(W_t,"GPT2TokenizerFast"),W_t.forEach(r),JSe=t(lA," (OpenAI GPT-2 model)"),lA.forEach(r),KSe=i(y),In=n(y,"LI",{});var iA=s(In);bz=n(iA,"STRONG",{});var V_t=s(bz);YSe=t(V_t,"gpt_neo"),V_t.forEach(r),ZSe=t(iA," \u2014 "),i6=n(iA,"A",{href:!0});var Q_t=s(i6);ePe=t(Q_t,"GPT2Tokenizer"),Q_t.forEach(r),oPe=t(iA," or "),d6=n(iA,"A",{href:!0});var H_t=s(d6);tPe=t(H_t,"GPT2TokenizerFast"),H_t.forEach(r),rPe=t(iA," (GPT Neo model)"),iA.forEach(r),aPe=i(y),Cc=n(y,"LI",{});var Zue=s(Cc);Tz=n(Zue,"STRONG",{});var U_t=s(Tz);nPe=t(U_t,"hubert"),U_t.forEach(r),sPe=t(Zue," \u2014 "),m6=n(Zue,"A",{href:!0});var J_t=s(m6);lPe=t(J_t,"Wav2Vec2CTCTokenizer"),J_t.forEach(r),iPe=t(Zue," (Hubert model)"),Zue.forEach(r),dPe=i(y),jn=n(y,"LI",{});var dA=s(jn);Fz=n(dA,"STRONG",{});var K_t=s(Fz);mPe=t(K_t,"ibert"),K_t.forEach(r),fPe=t(dA," \u2014 "),f6=n(dA,"A",{href:!0});var Y_t=s(f6);cPe=t(Y_t,"RobertaTokenizer"),Y_t.forEach(r),gPe=t(dA," or "),c6=n(dA,"A",{href:!0});var Z_t=s(c6);hPe=t(Z_t,"RobertaTokenizerFast"),Z_t.forEach(r),uPe=t(dA," (I-BERT model)"),dA.forEach(r),pPe=i(y),Nn=n(y,"LI",{});var mA=s(Nn);Mz=n(mA,"STRONG",{});var evt=s(Mz);_Pe=t(evt,"layoutlm"),evt.forEach(r),vPe=t(mA," \u2014 "),g6=n(mA,"A",{href:!0});var ovt=s(g6);bPe=t(ovt,"LayoutLMTokenizer"),ovt.forEach(r),TPe=t(mA," or "),h6=n(mA,"A",{href:!0});var tvt=s(h6);FPe=t(tvt,"LayoutLMTokenizerFast"),tvt.forEach(r),MPe=t(mA," (LayoutLM model)"),mA.forEach(r),EPe=i(y),Dn=n(y,"LI",{});var fA=s(Dn);Ez=n(fA,"STRONG",{});var rvt=s(Ez);CPe=t(rvt,"layoutlmv2"),rvt.forEach(r),yPe=t(fA," \u2014 "),u6=n(fA,"A",{href:!0});var avt=s(u6);wPe=t(avt,"LayoutLMv2Tokenizer"),avt.forEach(r),APe=t(fA," or "),p6=n(fA,"A",{href:!0});var nvt=s(p6);xPe=t(nvt,"LayoutLMv2TokenizerFast"),nvt.forEach(r),LPe=t(fA," (LayoutLMv2 model)"),fA.forEach(r),BPe=i(y),Gn=n(y,"LI",{});var cA=s(Gn);Cz=n(cA,"STRONG",{});var svt=s(Cz);kPe=t(svt,"led"),svt.forEach(r),RPe=t(cA," \u2014 "),_6=n(cA,"A",{href:!0});var lvt=s(_6);SPe=t(lvt,"LEDTokenizer"),lvt.forEach(r),PPe=t(cA," or "),v6=n(cA,"A",{href:!0});var ivt=s(v6);$Pe=t(ivt,"LEDTokenizerFast"),ivt.forEach(r),IPe=t(cA," (LED model)"),cA.forEach(r),jPe=i(y),On=n(y,"LI",{});var gA=s(On);yz=n(gA,"STRONG",{});var dvt=s(yz);NPe=t(dvt,"longformer"),dvt.forEach(r),DPe=t(gA," \u2014 "),b6=n(gA,"A",{href:!0});var mvt=s(b6);GPe=t(mvt,"LongformerTokenizer"),mvt.forEach(r),OPe=t(gA," or "),T6=n(gA,"A",{href:!0});var fvt=s(T6);qPe=t(fvt,"LongformerTokenizerFast"),fvt.forEach(r),zPe=t(gA," (Longformer model)"),gA.forEach(r),XPe=i(y),yc=n(y,"LI",{});var epe=s(yc);wz=n(epe,"STRONG",{});var cvt=s(wz);WPe=t(cvt,"luke"),cvt.forEach(r),VPe=t(epe," \u2014 "),F6=n(epe,"A",{href:!0});var gvt=s(F6);QPe=t(gvt,"LukeTokenizer"),gvt.forEach(r),HPe=t(epe," (LUKE model)"),epe.forEach(r),UPe=i(y),qn=n(y,"LI",{});var hA=s(qn);Az=n(hA,"STRONG",{});var hvt=s(Az);JPe=t(hvt,"lxmert"),hvt.forEach(r),KPe=t(hA," \u2014 "),M6=n(hA,"A",{href:!0});var uvt=s(M6);YPe=t(uvt,"LxmertTokenizer"),uvt.forEach(r),ZPe=t(hA," or "),E6=n(hA,"A",{href:!0});var pvt=s(E6);e$e=t(pvt,"LxmertTokenizerFast"),pvt.forEach(r),o$e=t(hA," (LXMERT model)"),hA.forEach(r),t$e=i(y),wc=n(y,"LI",{});var ope=s(wc);xz=n(ope,"STRONG",{});var _vt=s(xz);r$e=t(_vt,"m2m_100"),_vt.forEach(r),a$e=t(ope," \u2014 "),C6=n(ope,"A",{href:!0});var vvt=s(C6);n$e=t(vvt,"M2M100Tokenizer"),vvt.forEach(r),s$e=t(ope," (M2M100 model)"),ope.forEach(r),l$e=i(y),Ac=n(y,"LI",{});var tpe=s(Ac);Lz=n(tpe,"STRONG",{});var bvt=s(Lz);i$e=t(bvt,"marian"),bvt.forEach(r),d$e=t(tpe," \u2014 "),y6=n(tpe,"A",{href:!0});var Tvt=s(y6);m$e=t(Tvt,"MarianTokenizer"),Tvt.forEach(r),f$e=t(tpe," (Marian model)"),tpe.forEach(r),c$e=i(y),zn=n(y,"LI",{});var uA=s(zn);Bz=n(uA,"STRONG",{});var Fvt=s(Bz);g$e=t(Fvt,"mbart"),Fvt.forEach(r),h$e=t(uA," \u2014 "),w6=n(uA,"A",{href:!0});var Mvt=s(w6);u$e=t(Mvt,"MBartTokenizer"),Mvt.forEach(r),p$e=t(uA," or "),A6=n(uA,"A",{href:!0});var Evt=s(A6);_$e=t(Evt,"MBartTokenizerFast"),Evt.forEach(r),v$e=t(uA," (mBART model)"),uA.forEach(r),b$e=i(y),Xn=n(y,"LI",{});var pA=s(Xn);kz=n(pA,"STRONG",{});var Cvt=s(kz);T$e=t(Cvt,"mbart50"),Cvt.forEach(r),F$e=t(pA," \u2014 "),x6=n(pA,"A",{href:!0});var yvt=s(x6);M$e=t(yvt,"MBart50Tokenizer"),yvt.forEach(r),E$e=t(pA," or "),L6=n(pA,"A",{href:!0});var wvt=s(L6);C$e=t(wvt,"MBart50TokenizerFast"),wvt.forEach(r),y$e=t(pA," (mBART-50 model)"),pA.forEach(r),w$e=i(y),Wn=n(y,"LI",{});var _A=s(Wn);Rz=n(_A,"STRONG",{});var Avt=s(Rz);A$e=t(Avt,"mobilebert"),Avt.forEach(r),x$e=t(_A," \u2014 "),B6=n(_A,"A",{href:!0});var xvt=s(B6);L$e=t(xvt,"MobileBertTokenizer"),xvt.forEach(r),B$e=t(_A," or "),k6=n(_A,"A",{href:!0});var Lvt=s(k6);k$e=t(Lvt,"MobileBertTokenizerFast"),Lvt.forEach(r),R$e=t(_A," (MobileBERT model)"),_A.forEach(r),S$e=i(y),Vn=n(y,"LI",{});var vA=s(Vn);Sz=n(vA,"STRONG",{});var Bvt=s(Sz);P$e=t(Bvt,"mpnet"),Bvt.forEach(r),$$e=t(vA," \u2014 "),R6=n(vA,"A",{href:!0});var kvt=s(R6);I$e=t(kvt,"MPNetTokenizer"),kvt.forEach(r),j$e=t(vA," or "),S6=n(vA,"A",{href:!0});var Rvt=s(S6);N$e=t(Rvt,"MPNetTokenizerFast"),Rvt.forEach(r),D$e=t(vA," (MPNet model)"),vA.forEach(r),G$e=i(y),Qn=n(y,"LI",{});var bA=s(Qn);Pz=n(bA,"STRONG",{});var Svt=s(Pz);O$e=t(Svt,"mt5"),Svt.forEach(r),q$e=t(bA," \u2014 "),P6=n(bA,"A",{href:!0});var Pvt=s(P6);z$e=t(Pvt,"MT5Tokenizer"),Pvt.forEach(r),X$e=t(bA," or "),$6=n(bA,"A",{href:!0});var $vt=s($6);W$e=t($vt,"MT5TokenizerFast"),$vt.forEach(r),V$e=t(bA," (mT5 model)"),bA.forEach(r),Q$e=i(y),Hn=n(y,"LI",{});var TA=s(Hn);$z=n(TA,"STRONG",{});var Ivt=s($z);H$e=t(Ivt,"openai-gpt"),Ivt.forEach(r),U$e=t(TA," \u2014 "),I6=n(TA,"A",{href:!0});var jvt=s(I6);J$e=t(jvt,"OpenAIGPTTokenizer"),jvt.forEach(r),K$e=t(TA," or "),j6=n(TA,"A",{href:!0});var Nvt=s(j6);Y$e=t(Nvt,"OpenAIGPTTokenizerFast"),Nvt.forEach(r),Z$e=t(TA," (OpenAI GPT model)"),TA.forEach(r),eIe=i(y),Un=n(y,"LI",{});var FA=s(Un);Iz=n(FA,"STRONG",{});var Dvt=s(Iz);oIe=t(Dvt,"pegasus"),Dvt.forEach(r),tIe=t(FA," \u2014 "),N6=n(FA,"A",{href:!0});var Gvt=s(N6);rIe=t(Gvt,"PegasusTokenizer"),Gvt.forEach(r),aIe=t(FA," or "),D6=n(FA,"A",{href:!0});var Ovt=s(D6);nIe=t(Ovt,"PegasusTokenizerFast"),Ovt.forEach(r),sIe=t(FA," (Pegasus model)"),FA.forEach(r),lIe=i(y),xc=n(y,"LI",{});var rpe=s(xc);jz=n(rpe,"STRONG",{});var qvt=s(jz);iIe=t(qvt,"perceiver"),qvt.forEach(r),dIe=t(rpe," \u2014 "),G6=n(rpe,"A",{href:!0});var zvt=s(G6);mIe=t(zvt,"PerceiverTokenizer"),zvt.forEach(r),fIe=t(rpe," (Perceiver model)"),rpe.forEach(r),cIe=i(y),Lc=n(y,"LI",{});var ape=s(Lc);Nz=n(ape,"STRONG",{});var Xvt=s(Nz);gIe=t(Xvt,"phobert"),Xvt.forEach(r),hIe=t(ape," \u2014 "),O6=n(ape,"A",{href:!0});var Wvt=s(O6);uIe=t(Wvt,"PhobertTokenizer"),Wvt.forEach(r),pIe=t(ape," (PhoBERT model)"),ape.forEach(r),_Ie=i(y),Bc=n(y,"LI",{});var npe=s(Bc);Dz=n(npe,"STRONG",{});var Vvt=s(Dz);vIe=t(Vvt,"prophetnet"),Vvt.forEach(r),bIe=t(npe," \u2014 "),q6=n(npe,"A",{href:!0});var Qvt=s(q6);TIe=t(Qvt,"ProphetNetTokenizer"),Qvt.forEach(r),FIe=t(npe," (ProphetNet model)"),npe.forEach(r),MIe=i(y),Jn=n(y,"LI",{});var MA=s(Jn);Gz=n(MA,"STRONG",{});var Hvt=s(Gz);EIe=t(Hvt,"qdqbert"),Hvt.forEach(r),CIe=t(MA," \u2014 "),z6=n(MA,"A",{href:!0});var Uvt=s(z6);yIe=t(Uvt,"BertTokenizer"),Uvt.forEach(r),wIe=t(MA," or "),X6=n(MA,"A",{href:!0});var Jvt=s(X6);AIe=t(Jvt,"BertTokenizerFast"),Jvt.forEach(r),xIe=t(MA," (QDQBert model)"),MA.forEach(r),LIe=i(y),kc=n(y,"LI",{});var spe=s(kc);Oz=n(spe,"STRONG",{});var Kvt=s(Oz);BIe=t(Kvt,"rag"),Kvt.forEach(r),kIe=t(spe," \u2014 "),W6=n(spe,"A",{href:!0});var Yvt=s(W6);RIe=t(Yvt,"RagTokenizer"),Yvt.forEach(r),SIe=t(spe," (RAG model)"),spe.forEach(r),PIe=i(y),Kn=n(y,"LI",{});var EA=s(Kn);qz=n(EA,"STRONG",{});var Zvt=s(qz);$Ie=t(Zvt,"reformer"),Zvt.forEach(r),IIe=t(EA," \u2014 "),V6=n(EA,"A",{href:!0});var e1t=s(V6);jIe=t(e1t,"ReformerTokenizer"),e1t.forEach(r),NIe=t(EA," or "),Q6=n(EA,"A",{href:!0});var o1t=s(Q6);DIe=t(o1t,"ReformerTokenizerFast"),o1t.forEach(r),GIe=t(EA," (Reformer model)"),EA.forEach(r),OIe=i(y),Yn=n(y,"LI",{});var CA=s(Yn);zz=n(CA,"STRONG",{});var t1t=s(zz);qIe=t(t1t,"rembert"),t1t.forEach(r),zIe=t(CA," \u2014 "),H6=n(CA,"A",{href:!0});var r1t=s(H6);XIe=t(r1t,"RemBertTokenizer"),r1t.forEach(r),WIe=t(CA," or "),U6=n(CA,"A",{href:!0});var a1t=s(U6);VIe=t(a1t,"RemBertTokenizerFast"),a1t.forEach(r),QIe=t(CA," (RemBERT model)"),CA.forEach(r),HIe=i(y),Zn=n(y,"LI",{});var yA=s(Zn);Xz=n(yA,"STRONG",{});var n1t=s(Xz);UIe=t(n1t,"retribert"),n1t.forEach(r),JIe=t(yA," \u2014 "),J6=n(yA,"A",{href:!0});var s1t=s(J6);KIe=t(s1t,"RetriBertTokenizer"),s1t.forEach(r),YIe=t(yA," or "),K6=n(yA,"A",{href:!0});var l1t=s(K6);ZIe=t(l1t,"RetriBertTokenizerFast"),l1t.forEach(r),eje=t(yA," (RetriBERT model)"),yA.forEach(r),oje=i(y),es=n(y,"LI",{});var wA=s(es);Wz=n(wA,"STRONG",{});var i1t=s(Wz);tje=t(i1t,"roberta"),i1t.forEach(r),rje=t(wA," \u2014 "),Y6=n(wA,"A",{href:!0});var d1t=s(Y6);aje=t(d1t,"RobertaTokenizer"),d1t.forEach(r),nje=t(wA," or "),Z6=n(wA,"A",{href:!0});var m1t=s(Z6);sje=t(m1t,"RobertaTokenizerFast"),m1t.forEach(r),lje=t(wA," (RoBERTa model)"),wA.forEach(r),ije=i(y),os=n(y,"LI",{});var AA=s(os);Vz=n(AA,"STRONG",{});var f1t=s(Vz);dje=t(f1t,"roformer"),f1t.forEach(r),mje=t(AA," \u2014 "),e8=n(AA,"A",{href:!0});var c1t=s(e8);fje=t(c1t,"RoFormerTokenizer"),c1t.forEach(r),cje=t(AA," or "),o8=n(AA,"A",{href:!0});var g1t=s(o8);gje=t(g1t,"RoFormerTokenizerFast"),g1t.forEach(r),hje=t(AA," (RoFormer model)"),AA.forEach(r),uje=i(y),Rc=n(y,"LI",{});var lpe=s(Rc);Qz=n(lpe,"STRONG",{});var h1t=s(Qz);pje=t(h1t,"speech_to_text"),h1t.forEach(r),_je=t(lpe," \u2014 "),t8=n(lpe,"A",{href:!0});var u1t=s(t8);vje=t(u1t,"Speech2TextTokenizer"),u1t.forEach(r),bje=t(lpe," (Speech2Text model)"),lpe.forEach(r),Tje=i(y),Sc=n(y,"LI",{});var ipe=s(Sc);Hz=n(ipe,"STRONG",{});var p1t=s(Hz);Fje=t(p1t,"speech_to_text_2"),p1t.forEach(r),Mje=t(ipe," \u2014 "),r8=n(ipe,"A",{href:!0});var _1t=s(r8);Eje=t(_1t,"Speech2Text2Tokenizer"),_1t.forEach(r),Cje=t(ipe," (Speech2Text2 model)"),ipe.forEach(r),yje=i(y),ts=n(y,"LI",{});var xA=s(ts);Uz=n(xA,"STRONG",{});var v1t=s(Uz);wje=t(v1t,"splinter"),v1t.forEach(r),Aje=t(xA," \u2014 "),a8=n(xA,"A",{href:!0});var b1t=s(a8);xje=t(b1t,"SplinterTokenizer"),b1t.forEach(r),Lje=t(xA," or "),n8=n(xA,"A",{href:!0});var T1t=s(n8);Bje=t(T1t,"SplinterTokenizerFast"),T1t.forEach(r),kje=t(xA," (Splinter model)"),xA.forEach(r),Rje=i(y),rs=n(y,"LI",{});var LA=s(rs);Jz=n(LA,"STRONG",{});var F1t=s(Jz);Sje=t(F1t,"squeezebert"),F1t.forEach(r),Pje=t(LA," \u2014 "),s8=n(LA,"A",{href:!0});var M1t=s(s8);$je=t(M1t,"SqueezeBertTokenizer"),M1t.forEach(r),Ije=t(LA," or "),l8=n(LA,"A",{href:!0});var E1t=s(l8);jje=t(E1t,"SqueezeBertTokenizerFast"),E1t.forEach(r),Nje=t(LA," (SqueezeBERT model)"),LA.forEach(r),Dje=i(y),as=n(y,"LI",{});var BA=s(as);Kz=n(BA,"STRONG",{});var C1t=s(Kz);Gje=t(C1t,"t5"),C1t.forEach(r),Oje=t(BA," \u2014 "),i8=n(BA,"A",{href:!0});var y1t=s(i8);qje=t(y1t,"T5Tokenizer"),y1t.forEach(r),zje=t(BA," or "),d8=n(BA,"A",{href:!0});var w1t=s(d8);Xje=t(w1t,"T5TokenizerFast"),w1t.forEach(r),Wje=t(BA," (T5 model)"),BA.forEach(r),Vje=i(y),Pc=n(y,"LI",{});var dpe=s(Pc);Yz=n(dpe,"STRONG",{});var A1t=s(Yz);Qje=t(A1t,"tapas"),A1t.forEach(r),Hje=t(dpe," \u2014 "),m8=n(dpe,"A",{href:!0});var x1t=s(m8);Uje=t(x1t,"TapasTokenizer"),x1t.forEach(r),Jje=t(dpe," (TAPAS model)"),dpe.forEach(r),Kje=i(y),$c=n(y,"LI",{});var mpe=s($c);Zz=n(mpe,"STRONG",{});var L1t=s(Zz);Yje=t(L1t,"transfo-xl"),L1t.forEach(r),Zje=t(mpe," \u2014 "),f8=n(mpe,"A",{href:!0});var B1t=s(f8);eNe=t(B1t,"TransfoXLTokenizer"),B1t.forEach(r),oNe=t(mpe," (Transformer-XL model)"),mpe.forEach(r),tNe=i(y),Ic=n(y,"LI",{});var fpe=s(Ic);eX=n(fpe,"STRONG",{});var k1t=s(eX);rNe=t(k1t,"wav2vec2"),k1t.forEach(r),aNe=t(fpe," \u2014 "),c8=n(fpe,"A",{href:!0});var R1t=s(c8);nNe=t(R1t,"Wav2Vec2CTCTokenizer"),R1t.forEach(r),sNe=t(fpe," (Wav2Vec2 model)"),fpe.forEach(r),lNe=i(y),jc=n(y,"LI",{});var cpe=s(jc);oX=n(cpe,"STRONG",{});var S1t=s(oX);iNe=t(S1t,"xlm"),S1t.forEach(r),dNe=t(cpe," \u2014 "),g8=n(cpe,"A",{href:!0});var P1t=s(g8);mNe=t(P1t,"XLMTokenizer"),P1t.forEach(r),fNe=t(cpe," (XLM model)"),cpe.forEach(r),cNe=i(y),Nc=n(y,"LI",{});var gpe=s(Nc);tX=n(gpe,"STRONG",{});var $1t=s(tX);gNe=t($1t,"xlm-prophetnet"),$1t.forEach(r),hNe=t(gpe," \u2014 "),h8=n(gpe,"A",{href:!0});var I1t=s(h8);uNe=t(I1t,"XLMProphetNetTokenizer"),I1t.forEach(r),pNe=t(gpe," (XLMProphetNet model)"),gpe.forEach(r),_Ne=i(y),ns=n(y,"LI",{});var kA=s(ns);rX=n(kA,"STRONG",{});var j1t=s(rX);vNe=t(j1t,"xlm-roberta"),j1t.forEach(r),bNe=t(kA," \u2014 "),u8=n(kA,"A",{href:!0});var N1t=s(u8);TNe=t(N1t,"XLMRobertaTokenizer"),N1t.forEach(r),FNe=t(kA," or "),p8=n(kA,"A",{href:!0});var D1t=s(p8);MNe=t(D1t,"XLMRobertaTokenizerFast"),D1t.forEach(r),ENe=t(kA," (XLM-RoBERTa model)"),kA.forEach(r),CNe=i(y),ss=n(y,"LI",{});var RA=s(ss);aX=n(RA,"STRONG",{});var G1t=s(aX);yNe=t(G1t,"xlnet"),G1t.forEach(r),wNe=t(RA," \u2014 "),_8=n(RA,"A",{href:!0});var O1t=s(_8);ANe=t(O1t,"XLNetTokenizer"),O1t.forEach(r),xNe=t(RA," or "),v8=n(RA,"A",{href:!0});var q1t=s(v8);LNe=t(q1t,"XLNetTokenizerFast"),q1t.forEach(r),BNe=t(RA," (XLNet model)"),RA.forEach(r),y.forEach(r),kNe=i(Vr),nX=n(Vr,"P",{});var z1t=s(nX);RNe=t(z1t,"Examples:"),z1t.forEach(r),SNe=i(Vr),c(JF.$$.fragment,Vr),Vr.forEach(r),PNe=i(fs),Dc=n(fs,"DIV",{class:!0});var M3e=s(Dc);c(KF.$$.fragment,M3e),$Ne=i(M3e),sX=n(M3e,"P",{});var X1t=s(sX);INe=t(X1t,"Register a new tokenizer in this mapping."),X1t.forEach(r),M3e.forEach(r),fs.forEach(r),xEe=i(d),ri=n(d,"H2",{class:!0});var E3e=s(ri);Gc=n(E3e,"A",{id:!0,class:!0,href:!0});var W1t=s(Gc);lX=n(W1t,"SPAN",{});var V1t=s(lX);c(YF.$$.fragment,V1t),V1t.forEach(r),W1t.forEach(r),jNe=i(E3e),iX=n(E3e,"SPAN",{});var Q1t=s(iX);NNe=t(Q1t,"AutoFeatureExtractor"),Q1t.forEach(r),E3e.forEach(r),LEe=i(d),Gr=n(d,"DIV",{class:!0});var GT=s(Gr);c(ZF.$$.fragment,GT),DNe=i(GT),eM=n(GT,"P",{});var C3e=s(eM);GNe=t(C3e,`This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the library when created with the `),b8=n(C3e,"A",{href:!0});var H1t=s(b8);ONe=t(H1t,"AutoFeatureExtractor.from_pretrained()"),H1t.forEach(r),qNe=t(C3e," class method."),C3e.forEach(r),zNe=i(GT),oM=n(GT,"P",{});var y3e=s(oM);XNe=t(y3e,"This class cannot be instantiated directly using "),dX=n(y3e,"CODE",{});var U1t=s(dX);WNe=t(U1t,"__init__()"),U1t.forEach(r),VNe=t(y3e," (throws an error)."),y3e.forEach(r),QNe=i(GT),we=n(GT,"DIV",{class:!0});var ur=s(we);c(tM.$$.fragment,ur),HNe=i(ur),mX=n(ur,"P",{});var J1t=s(mX);UNe=t(J1t,"Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary."),J1t.forEach(r),JNe=i(ur),Ma=n(ur,"P",{});var OT=s(Ma);KNe=t(OT,"The feature extractor class to instantiate is selected based on the "),fX=n(OT,"EM",{});var K1t=s(fX);YNe=t(K1t,"model_type"),K1t.forEach(r),ZNe=t(OT,` property of the config object (either passed as an argument or loaded from `),cX=n(OT,"EM",{});var Y1t=s(cX);eDe=t(Y1t,"pretrained_model_name_or_path"),Y1t.forEach(r),oDe=t(OT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),gX=n(OT,"EM",{});var Z1t=s(gX);tDe=t(Z1t,"pretrained_model_name_or_path"),Z1t.forEach(r),rDe=t(OT,":"),OT.forEach(r),aDe=i(ur),fe=n(ur,"UL",{});var _e=s(fe);Oc=n(_e,"LI",{});var hpe=s(Oc);hX=n(hpe,"STRONG",{});var e2t=s(hX);nDe=t(e2t,"beit"),e2t.forEach(r),sDe=t(hpe," \u2014 "),T8=n(hpe,"A",{href:!0});var o2t=s(T8);lDe=t(o2t,"BeitFeatureExtractor"),o2t.forEach(r),iDe=t(hpe," (BEiT model)"),hpe.forEach(r),dDe=i(_e),qc=n(_e,"LI",{});var upe=s(qc);uX=n(upe,"STRONG",{});var t2t=s(uX);mDe=t(t2t,"clip"),t2t.forEach(r),fDe=t(upe," \u2014 "),F8=n(upe,"A",{href:!0});var r2t=s(F8);cDe=t(r2t,"CLIPFeatureExtractor"),r2t.forEach(r),gDe=t(upe," (CLIP model)"),upe.forEach(r),hDe=i(_e),zc=n(_e,"LI",{});var ppe=s(zc);pX=n(ppe,"STRONG",{});var a2t=s(pX);uDe=t(a2t,"deit"),a2t.forEach(r),pDe=t(ppe," \u2014 "),M8=n(ppe,"A",{href:!0});var n2t=s(M8);_De=t(n2t,"DeiTFeatureExtractor"),n2t.forEach(r),vDe=t(ppe," (DeiT model)"),ppe.forEach(r),bDe=i(_e),Xc=n(_e,"LI",{});var _pe=s(Xc);_X=n(_pe,"STRONG",{});var s2t=s(_X);TDe=t(s2t,"detr"),s2t.forEach(r),FDe=t(_pe," \u2014 "),E8=n(_pe,"A",{href:!0});var l2t=s(E8);MDe=t(l2t,"DetrFeatureExtractor"),l2t.forEach(r),EDe=t(_pe," (DETR model)"),_pe.forEach(r),CDe=i(_e),Wc=n(_e,"LI",{});var vpe=s(Wc);vX=n(vpe,"STRONG",{});var i2t=s(vX);yDe=t(i2t,"hubert"),i2t.forEach(r),wDe=t(vpe," \u2014 "),C8=n(vpe,"A",{href:!0});var d2t=s(C8);ADe=t(d2t,"Wav2Vec2FeatureExtractor"),d2t.forEach(r),xDe=t(vpe," (Hubert model)"),vpe.forEach(r),LDe=i(_e),Vc=n(_e,"LI",{});var bpe=s(Vc);bX=n(bpe,"STRONG",{});var m2t=s(bX);BDe=t(m2t,"layoutlmv2"),m2t.forEach(r),kDe=t(bpe," \u2014 "),y8=n(bpe,"A",{href:!0});var f2t=s(y8);RDe=t(f2t,"LayoutLMv2FeatureExtractor"),f2t.forEach(r),SDe=t(bpe," (LayoutLMv2 model)"),bpe.forEach(r),PDe=i(_e),Qc=n(_e,"LI",{});var Tpe=s(Qc);TX=n(Tpe,"STRONG",{});var c2t=s(TX);$De=t(c2t,"perceiver"),c2t.forEach(r),IDe=t(Tpe," \u2014 "),w8=n(Tpe,"A",{href:!0});var g2t=s(w8);jDe=t(g2t,"PerceiverFeatureExtractor"),g2t.forEach(r),NDe=t(Tpe," (Perceiver model)"),Tpe.forEach(r),DDe=i(_e),Hc=n(_e,"LI",{});var Fpe=s(Hc);FX=n(Fpe,"STRONG",{});var h2t=s(FX);GDe=t(h2t,"speech_to_text"),h2t.forEach(r),ODe=t(Fpe," \u2014 "),A8=n(Fpe,"A",{href:!0});var u2t=s(A8);qDe=t(u2t,"Speech2TextFeatureExtractor"),u2t.forEach(r),zDe=t(Fpe," (Speech2Text model)"),Fpe.forEach(r),XDe=i(_e),Uc=n(_e,"LI",{});var Mpe=s(Uc);MX=n(Mpe,"STRONG",{});var p2t=s(MX);WDe=t(p2t,"vit"),p2t.forEach(r),VDe=t(Mpe," \u2014 "),x8=n(Mpe,"A",{href:!0});var _2t=s(x8);QDe=t(_2t,"ViTFeatureExtractor"),_2t.forEach(r),HDe=t(Mpe," (ViT model)"),Mpe.forEach(r),UDe=i(_e),Jc=n(_e,"LI",{});var Epe=s(Jc);EX=n(Epe,"STRONG",{});var v2t=s(EX);JDe=t(v2t,"wav2vec2"),v2t.forEach(r),KDe=t(Epe," \u2014 "),L8=n(Epe,"A",{href:!0});var b2t=s(L8);YDe=t(b2t,"Wav2Vec2FeatureExtractor"),b2t.forEach(r),ZDe=t(Epe," (Wav2Vec2 model)"),Epe.forEach(r),_e.forEach(r),eGe=i(ur),c(Kc.$$.fragment,ur),oGe=i(ur),CX=n(ur,"P",{});var T2t=s(CX);tGe=t(T2t,"Examples:"),T2t.forEach(r),rGe=i(ur),c(rM.$$.fragment,ur),ur.forEach(r),GT.forEach(r),BEe=i(d),ai=n(d,"H2",{class:!0});var w3e=s(ai);Yc=n(w3e,"A",{id:!0,class:!0,href:!0});var F2t=s(Yc);yX=n(F2t,"SPAN",{});var M2t=s(yX);c(aM.$$.fragment,M2t),M2t.forEach(r),F2t.forEach(r),aGe=i(w3e),wX=n(w3e,"SPAN",{});var E2t=s(wX);nGe=t(E2t,"AutoProcessor"),E2t.forEach(r),w3e.forEach(r),kEe=i(d),Or=n(d,"DIV",{class:!0});var qT=s(Or);c(nM.$$.fragment,qT),sGe=i(qT),sM=n(qT,"P",{});var A3e=s(sM);lGe=t(A3e,`This is a generic processor class that will be instantiated as one of the processor classes of the library when created with the `),B8=n(A3e,"A",{href:!0});var C2t=s(B8);iGe=t(C2t,"AutoProcessor.from_pretrained()"),C2t.forEach(r),dGe=t(A3e," class method."),A3e.forEach(r),mGe=i(qT),lM=n(qT,"P",{});var x3e=s(lM);fGe=t(x3e,"This class cannot be instantiated directly using "),AX=n(x3e,"CODE",{});var y2t=s(AX);cGe=t(y2t,"__init__()"),y2t.forEach(r),gGe=t(x3e," (throws an error)."),x3e.forEach(r),hGe=i(qT),Ae=n(qT,"DIV",{class:!0});var pr=s(Ae);c(iM.$$.fragment,pr),uGe=i(pr),xX=n(pr,"P",{});var w2t=s(xX);pGe=t(w2t,"Instantiate one of the processor classes of the library from a pretrained model vocabulary."),w2t.forEach(r),_Ge=i(pr),ni=n(pr,"P",{});var pD=s(ni);vGe=t(pD,"The processor class to instantiate is selected based on the "),LX=n(pD,"EM",{});var A2t=s(LX);bGe=t(A2t,"model_type"),A2t.forEach(r),TGe=t(pD,` property of the config object (either passed as an argument or loaded from `),BX=n(pD,"EM",{});var x2t=s(BX);FGe=t(x2t,"pretrained_model_name_or_path"),x2t.forEach(r),MGe=t(pD," if possible):"),pD.forEach(r),EGe=i(pr),Je=n(pr,"UL",{});var _r=s(Je);Zc=n(_r,"LI",{});var Cpe=s(Zc);kX=n(Cpe,"STRONG",{});var L2t=s(kX);CGe=t(L2t,"clip"),L2t.forEach(r),yGe=t(Cpe," \u2014 "),k8=n(Cpe,"A",{href:!0});var B2t=s(k8);wGe=t(B2t,"CLIPProcessor"),B2t.forEach(r),AGe=t(Cpe," (CLIP model)"),Cpe.forEach(r),xGe=i(_r),eg=n(_r,"LI",{});var ype=s(eg);RX=n(ype,"STRONG",{});var k2t=s(RX);LGe=t(k2t,"layoutlmv2"),k2t.forEach(r),BGe=t(ype," \u2014 "),R8=n(ype,"A",{href:!0});var R2t=s(R8);kGe=t(R2t,"LayoutLMv2Processor"),R2t.forEach(r),RGe=t(ype," (LayoutLMv2 model)"),ype.forEach(r),SGe=i(_r),og=n(_r,"LI",{});var wpe=s(og);SX=n(wpe,"STRONG",{});var S2t=s(SX);PGe=t(S2t,"speech_to_text"),S2t.forEach(r),$Ge=t(wpe," \u2014 "),S8=n(wpe,"A",{href:!0});var P2t=s(S8);IGe=t(P2t,"Speech2TextProcessor"),P2t.forEach(r),jGe=t(wpe," (Speech2Text model)"),wpe.forEach(r),NGe=i(_r),tg=n(_r,"LI",{});var Ape=s(tg);PX=n(Ape,"STRONG",{});var $2t=s(PX);DGe=t($2t,"speech_to_text_2"),$2t.forEach(r),GGe=t(Ape," \u2014 "),P8=n(Ape,"A",{href:!0});var I2t=s(P8);OGe=t(I2t,"Speech2Text2Processor"),I2t.forEach(r),qGe=t(Ape," (Speech2Text2 model)"),Ape.forEach(r),zGe=i(_r),rg=n(_r,"LI",{});var xpe=s(rg);$X=n(xpe,"STRONG",{});var j2t=s($X);XGe=t(j2t,"trocr"),j2t.forEach(r),WGe=t(xpe," \u2014 "),$8=n(xpe,"A",{href:!0});var N2t=s($8);VGe=t(N2t,"TrOCRProcessor"),N2t.forEach(r),QGe=t(xpe," (TrOCR model)"),xpe.forEach(r),HGe=i(_r),ag=n(_r,"LI",{});var Lpe=s(ag);IX=n(Lpe,"STRONG",{});var D2t=s(IX);UGe=t(D2t,"vision-text-dual-encoder"),D2t.forEach(r),JGe=t(Lpe," \u2014 "),I8=n(Lpe,"A",{href:!0});var G2t=s(I8);KGe=t(G2t,"VisionTextDualEncoderProcessor"),G2t.forEach(r),YGe=t(Lpe," (VisionTextDualEncoder model)"),Lpe.forEach(r),ZGe=i(_r),ng=n(_r,"LI",{});var Bpe=s(ng);jX=n(Bpe,"STRONG",{});var O2t=s(jX);eOe=t(O2t,"wav2vec2"),O2t.forEach(r),oOe=t(Bpe," \u2014 "),j8=n(Bpe,"A",{href:!0});var q2t=s(j8);tOe=t(q2t,"Wav2Vec2Processor"),q2t.forEach(r),rOe=t(Bpe," (Wav2Vec2 model)"),Bpe.forEach(r),_r.forEach(r),aOe=i(pr),c(sg.$$.fragment,pr),nOe=i(pr),NX=n(pr,"P",{});var z2t=s(NX);sOe=t(z2t,"Examples:"),z2t.forEach(r),lOe=i(pr),c(dM.$$.fragment,pr),pr.forEach(r),qT.forEach(r),REe=i(d),si=n(d,"H2",{class:!0});var L3e=s(si);lg=n(L3e,"A",{id:!0,class:!0,href:!0});var X2t=s(lg);DX=n(X2t,"SPAN",{});var W2t=s(DX);c(mM.$$.fragment,W2t),W2t.forEach(r),X2t.forEach(r),iOe=i(L3e),GX=n(L3e,"SPAN",{});var V2t=s(GX);dOe=t(V2t,"AutoModel"),V2t.forEach(r),L3e.forEach(r),SEe=i(d),Po=n(d,"DIV",{class:!0});var cs=s(Po);c(fM.$$.fragment,cs),mOe=i(cs),li=n(cs,"P",{});var _D=s(li);fOe=t(_D,`This is a generic model class that will be instantiated as one of the base model classes of the library when created with the `),OX=n(_D,"CODE",{});var Q2t=s(OX);cOe=t(Q2t,"from_pretrained()"),Q2t.forEach(r),gOe=t(_D,` class method or the `),qX=n(_D,"CODE",{});var H2t=s(qX);hOe=t(H2t,"from_config()"),H2t.forEach(r),uOe=t(_D," class method."),_D.forEach(r),pOe=i(cs),cM=n(cs,"P",{});var B3e=s(cM);_Oe=t(B3e,"This class cannot be instantiated directly using "),zX=n(B3e,"CODE",{});var U2t=s(zX);vOe=t(U2t,"__init__()"),U2t.forEach(r),bOe=t(B3e," (throws an error)."),B3e.forEach(r),TOe=i(cs),wt=n(cs,"DIV",{class:!0});var gs=s(wt);c(gM.$$.fragment,gs),FOe=i(gs),XX=n(gs,"P",{});var J2t=s(XX);MOe=t(J2t,"Instantiates one of the base model classes of the library from a configuration."),J2t.forEach(r),EOe=i(gs),ii=n(gs,"P",{});var vD=s(ii);COe=t(vD,`Note: Loading a model from its configuration file does `),WX=n(vD,"STRONG",{});var K2t=s(WX);yOe=t(K2t,"not"),K2t.forEach(r),wOe=t(vD,` load the model weights. It only affects the model\u2019s configuration. Use [`),VX=n(vD,"EM",{});var Y2t=s(VX);AOe=t(Y2t,"~AutoModel.from_pretrained"),Y2t.forEach(r),xOe=t(vD,`] to load the model weights.`),vD.forEach(r),LOe=i(gs),QX=n(gs,"P",{});var Z2t=s(QX);BOe=t(Z2t,"Examples:"),Z2t.forEach(r),kOe=i(gs),c(hM.$$.fragment,gs),gs.forEach(r),ROe=i(cs),xe=n(cs,"DIV",{class:!0});var vr=s(xe);c(uM.$$.fragment,vr),SOe=i(vr),HX=n(vr,"P",{});var ebt=s(HX);POe=t(ebt,"Instantiate one of the base model classes of the library from a pretrained model."),ebt.forEach(r),$Oe=i(vr),Ea=n(vr,"P",{});var zT=s(Ea);IOe=t(zT,"The model class to instantiate is selected based on the "),UX=n(zT,"EM",{});var obt=s(UX);jOe=t(obt,"model_type"),obt.forEach(r),NOe=t(zT,` property of the config object (either passed as an argument or loaded from `),JX=n(zT,"EM",{});var tbt=s(JX);DOe=t(tbt,"pretrained_model_name_or_path"),tbt.forEach(r),GOe=t(zT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),KX=n(zT,"EM",{});var rbt=s(KX);OOe=t(rbt,"pretrained_model_name_or_path"),rbt.forEach(r),qOe=t(zT,":"),zT.forEach(r),zOe=i(vr),F=n(vr,"UL",{});var M=s(F);ig=n(M,"LI",{});var kpe=s(ig);YX=n(kpe,"STRONG",{});var abt=s(YX);XOe=t(abt,"albert"),abt.forEach(r),WOe=t(kpe," \u2014 "),N8=n(kpe,"A",{href:!0});var nbt=s(N8);VOe=t(nbt,"AlbertModel"),nbt.forEach(r),QOe=t(kpe," (ALBERT model)"),kpe.forEach(r),HOe=i(M),dg=n(M,"LI",{});var Rpe=s(dg);ZX=n(Rpe,"STRONG",{});var sbt=s(ZX);UOe=t(sbt,"bart"),sbt.forEach(r),JOe=t(Rpe," \u2014 "),D8=n(Rpe,"A",{href:!0});var lbt=s(D8);KOe=t(lbt,"BartModel"),lbt.forEach(r),YOe=t(Rpe," (BART model)"),Rpe.forEach(r),ZOe=i(M),mg=n(M,"LI",{});var Spe=s(mg);eW=n(Spe,"STRONG",{});var ibt=s(eW);eqe=t(ibt,"beit"),ibt.forEach(r),oqe=t(Spe," \u2014 "),G8=n(Spe,"A",{href:!0});var dbt=s(G8);tqe=t(dbt,"BeitModel"),dbt.forEach(r),rqe=t(Spe," (BEiT model)"),Spe.forEach(r),aqe=i(M),fg=n(M,"LI",{});var Ppe=s(fg);oW=n(Ppe,"STRONG",{});var mbt=s(oW);nqe=t(mbt,"bert"),mbt.forEach(r),sqe=t(Ppe," \u2014 "),O8=n(Ppe,"A",{href:!0});var fbt=s(O8);lqe=t(fbt,"BertModel"),fbt.forEach(r),iqe=t(Ppe," (BERT model)"),Ppe.forEach(r),dqe=i(M),cg=n(M,"LI",{});var $pe=s(cg);tW=n($pe,"STRONG",{});var cbt=s(tW);mqe=t(cbt,"bert-generation"),cbt.forEach(r),fqe=t($pe," \u2014 "),q8=n($pe,"A",{href:!0});var gbt=s(q8);cqe=t(gbt,"BertGenerationEncoder"),gbt.forEach(r),gqe=t($pe," (Bert Generation model)"),$pe.forEach(r),hqe=i(M),gg=n(M,"LI",{});var Ipe=s(gg);rW=n(Ipe,"STRONG",{});var hbt=s(rW);uqe=t(hbt,"big_bird"),hbt.forEach(r),pqe=t(Ipe," \u2014 "),z8=n(Ipe,"A",{href:!0});var ubt=s(z8);_qe=t(ubt,"BigBirdModel"),ubt.forEach(r),vqe=t(Ipe," (BigBird model)"),Ipe.forEach(r),bqe=i(M),hg=n(M,"LI",{});var jpe=s(hg);aW=n(jpe,"STRONG",{});var pbt=s(aW);Tqe=t(pbt,"bigbird_pegasus"),pbt.forEach(r),Fqe=t(jpe," \u2014 "),X8=n(jpe,"A",{href:!0});var _bt=s(X8);Mqe=t(_bt,"BigBirdPegasusModel"),_bt.forEach(r),Eqe=t(jpe," (BigBirdPegasus model)"),jpe.forEach(r),Cqe=i(M),ug=n(M,"LI",{});var Npe=s(ug);nW=n(Npe,"STRONG",{});var vbt=s(nW);yqe=t(vbt,"blenderbot"),vbt.forEach(r),wqe=t(Npe," \u2014 "),W8=n(Npe,"A",{href:!0});var bbt=s(W8);Aqe=t(bbt,"BlenderbotModel"),bbt.forEach(r),xqe=t(Npe," (Blenderbot model)"),Npe.forEach(r),Lqe=i(M),pg=n(M,"LI",{});var Dpe=s(pg);sW=n(Dpe,"STRONG",{});var Tbt=s(sW);Bqe=t(Tbt,"blenderbot-small"),Tbt.forEach(r),kqe=t(Dpe," \u2014 "),V8=n(Dpe,"A",{href:!0});var Fbt=s(V8);Rqe=t(Fbt,"BlenderbotSmallModel"),Fbt.forEach(r),Sqe=t(Dpe," (BlenderbotSmall model)"),Dpe.forEach(r),Pqe=i(M),_g=n(M,"LI",{});var Gpe=s(_g);lW=n(Gpe,"STRONG",{});var Mbt=s(lW);$qe=t(Mbt,"camembert"),Mbt.forEach(r),Iqe=t(Gpe," \u2014 "),Q8=n(Gpe,"A",{href:!0});var Ebt=s(Q8);jqe=t(Ebt,"CamembertModel"),Ebt.forEach(r),Nqe=t(Gpe," (CamemBERT model)"),Gpe.forEach(r),Dqe=i(M),vg=n(M,"LI",{});var Ope=s(vg);iW=n(Ope,"STRONG",{});var Cbt=s(iW);Gqe=t(Cbt,"canine"),Cbt.forEach(r),Oqe=t(Ope," \u2014 "),H8=n(Ope,"A",{href:!0});var ybt=s(H8);qqe=t(ybt,"CanineModel"),ybt.forEach(r),zqe=t(Ope," (Canine model)"),Ope.forEach(r),Xqe=i(M),bg=n(M,"LI",{});var qpe=s(bg);dW=n(qpe,"STRONG",{});var wbt=s(dW);Wqe=t(wbt,"clip"),wbt.forEach(r),Vqe=t(qpe," \u2014 "),U8=n(qpe,"A",{href:!0});var Abt=s(U8);Qqe=t(Abt,"CLIPModel"),Abt.forEach(r),Hqe=t(qpe," (CLIP model)"),qpe.forEach(r),Uqe=i(M),Tg=n(M,"LI",{});var zpe=s(Tg);mW=n(zpe,"STRONG",{});var xbt=s(mW);Jqe=t(xbt,"convbert"),xbt.forEach(r),Kqe=t(zpe," \u2014 "),J8=n(zpe,"A",{href:!0});var Lbt=s(J8);Yqe=t(Lbt,"ConvBertModel"),Lbt.forEach(r),Zqe=t(zpe," (ConvBERT model)"),zpe.forEach(r),eze=i(M),Fg=n(M,"LI",{});var Xpe=s(Fg);fW=n(Xpe,"STRONG",{});var Bbt=s(fW);oze=t(Bbt,"ctrl"),Bbt.forEach(r),tze=t(Xpe," \u2014 "),K8=n(Xpe,"A",{href:!0});var kbt=s(K8);rze=t(kbt,"CTRLModel"),kbt.forEach(r),aze=t(Xpe," (CTRL model)"),Xpe.forEach(r),nze=i(M),Mg=n(M,"LI",{});var Wpe=s(Mg);cW=n(Wpe,"STRONG",{});var Rbt=s(cW);sze=t(Rbt,"deberta"),Rbt.forEach(r),lze=t(Wpe," \u2014 "),Y8=n(Wpe,"A",{href:!0});var Sbt=s(Y8);ize=t(Sbt,"DebertaModel"),Sbt.forEach(r),dze=t(Wpe," (DeBERTa model)"),Wpe.forEach(r),mze=i(M),Eg=n(M,"LI",{});var Vpe=s(Eg);gW=n(Vpe,"STRONG",{});var Pbt=s(gW);fze=t(Pbt,"deberta-v2"),Pbt.forEach(r),cze=t(Vpe," \u2014 "),Z8=n(Vpe,"A",{href:!0});var $bt=s(Z8);gze=t($bt,"DebertaV2Model"),$bt.forEach(r),hze=t(Vpe," (DeBERTa-v2 model)"),Vpe.forEach(r),uze=i(M),Cg=n(M,"LI",{});var Qpe=s(Cg);hW=n(Qpe,"STRONG",{});var Ibt=s(hW);pze=t(Ibt,"deit"),Ibt.forEach(r),_ze=t(Qpe," \u2014 "),eL=n(Qpe,"A",{href:!0});var jbt=s(eL);vze=t(jbt,"DeiTModel"),jbt.forEach(r),bze=t(Qpe," (DeiT model)"),Qpe.forEach(r),Tze=i(M),yg=n(M,"LI",{});var Hpe=s(yg);uW=n(Hpe,"STRONG",{});var Nbt=s(uW);Fze=t(Nbt,"detr"),Nbt.forEach(r),Mze=t(Hpe," \u2014 "),oL=n(Hpe,"A",{href:!0});var Dbt=s(oL);Eze=t(Dbt,"DetrModel"),Dbt.forEach(r),Cze=t(Hpe," (DETR model)"),Hpe.forEach(r),yze=i(M),wg=n(M,"LI",{});var Upe=s(wg);pW=n(Upe,"STRONG",{});var Gbt=s(pW);wze=t(Gbt,"distilbert"),Gbt.forEach(r),Aze=t(Upe," \u2014 "),tL=n(Upe,"A",{href:!0});var Obt=s(tL);xze=t(Obt,"DistilBertModel"),Obt.forEach(r),Lze=t(Upe," (DistilBERT model)"),Upe.forEach(r),Bze=i(M),Ag=n(M,"LI",{});var Jpe=s(Ag);_W=n(Jpe,"STRONG",{});var qbt=s(_W);kze=t(qbt,"dpr"),qbt.forEach(r),Rze=t(Jpe," \u2014 "),rL=n(Jpe,"A",{href:!0});var zbt=s(rL);Sze=t(zbt,"DPRQuestionEncoder"),zbt.forEach(r),Pze=t(Jpe," (DPR model)"),Jpe.forEach(r),$ze=i(M),xg=n(M,"LI",{});var Kpe=s(xg);vW=n(Kpe,"STRONG",{});var Xbt=s(vW);Ize=t(Xbt,"electra"),Xbt.forEach(r),jze=t(Kpe," \u2014 "),aL=n(Kpe,"A",{href:!0});var Wbt=s(aL);Nze=t(Wbt,"ElectraModel"),Wbt.forEach(r),Dze=t(Kpe," (ELECTRA model)"),Kpe.forEach(r),Gze=i(M),Lg=n(M,"LI",{});var Ype=s(Lg);bW=n(Ype,"STRONG",{});var Vbt=s(bW);Oze=t(Vbt,"flaubert"),Vbt.forEach(r),qze=t(Ype," \u2014 "),nL=n(Ype,"A",{href:!0});var Qbt=s(nL);zze=t(Qbt,"FlaubertModel"),Qbt.forEach(r),Xze=t(Ype," (FlauBERT model)"),Ype.forEach(r),Wze=i(M),Bg=n(M,"LI",{});var Zpe=s(Bg);TW=n(Zpe,"STRONG",{});var Hbt=s(TW);Vze=t(Hbt,"fnet"),Hbt.forEach(r),Qze=t(Zpe," \u2014 "),sL=n(Zpe,"A",{href:!0});var Ubt=s(sL);Hze=t(Ubt,"FNetModel"),Ubt.forEach(r),Uze=t(Zpe," (FNet model)"),Zpe.forEach(r),Jze=i(M),kg=n(M,"LI",{});var e_e=s(kg);FW=n(e_e,"STRONG",{});var Jbt=s(FW);Kze=t(Jbt,"fsmt"),Jbt.forEach(r),Yze=t(e_e," \u2014 "),lL=n(e_e,"A",{href:!0});var Kbt=s(lL);Zze=t(Kbt,"FSMTModel"),Kbt.forEach(r),eXe=t(e_e," (FairSeq Machine-Translation model)"),e_e.forEach(r),oXe=i(M),ls=n(M,"LI",{});var SA=s(ls);MW=n(SA,"STRONG",{});var Ybt=s(MW);tXe=t(Ybt,"funnel"),Ybt.forEach(r),rXe=t(SA," \u2014 "),iL=n(SA,"A",{href:!0});var Zbt=s(iL);aXe=t(Zbt,"FunnelModel"),Zbt.forEach(r),nXe=t(SA," or "),dL=n(SA,"A",{href:!0});var e4t=s(dL);sXe=t(e4t,"FunnelBaseModel"),e4t.forEach(r),lXe=t(SA," (Funnel Transformer model)"),SA.forEach(r),iXe=i(M),Rg=n(M,"LI",{});var o_e=s(Rg);EW=n(o_e,"STRONG",{});var o4t=s(EW);dXe=t(o4t,"gpt2"),o4t.forEach(r),mXe=t(o_e," \u2014 "),mL=n(o_e,"A",{href:!0});var t4t=s(mL);fXe=t(t4t,"GPT2Model"),t4t.forEach(r),cXe=t(o_e," (OpenAI GPT-2 model)"),o_e.forEach(r),gXe=i(M),Sg=n(M,"LI",{});var t_e=s(Sg);CW=n(t_e,"STRONG",{});var r4t=s(CW);hXe=t(r4t,"gpt_neo"),r4t.forEach(r),uXe=t(t_e," \u2014 "),fL=n(t_e,"A",{href:!0});var a4t=s(fL);pXe=t(a4t,"GPTNeoModel"),a4t.forEach(r),_Xe=t(t_e," (GPT Neo model)"),t_e.forEach(r),vXe=i(M),Pg=n(M,"LI",{});var r_e=s(Pg);yW=n(r_e,"STRONG",{});var n4t=s(yW);bXe=t(n4t,"gptj"),n4t.forEach(r),TXe=t(r_e," \u2014 "),cL=n(r_e,"A",{href:!0});var s4t=s(cL);FXe=t(s4t,"GPTJModel"),s4t.forEach(r),MXe=t(r_e," (GPT-J model)"),r_e.forEach(r),EXe=i(M),$g=n(M,"LI",{});var a_e=s($g);wW=n(a_e,"STRONG",{});var l4t=s(wW);CXe=t(l4t,"hubert"),l4t.forEach(r),yXe=t(a_e," \u2014 "),gL=n(a_e,"A",{href:!0});var i4t=s(gL);wXe=t(i4t,"HubertModel"),i4t.forEach(r),AXe=t(a_e," (Hubert model)"),a_e.forEach(r),xXe=i(M),Ig=n(M,"LI",{});var n_e=s(Ig);AW=n(n_e,"STRONG",{});var d4t=s(AW);LXe=t(d4t,"ibert"),d4t.forEach(r),BXe=t(n_e," \u2014 "),hL=n(n_e,"A",{href:!0});var m4t=s(hL);kXe=t(m4t,"IBertModel"),m4t.forEach(r),RXe=t(n_e," (I-BERT model)"),n_e.forEach(r),SXe=i(M),jg=n(M,"LI",{});var s_e=s(jg);xW=n(s_e,"STRONG",{});var f4t=s(xW);PXe=t(f4t,"imagegpt"),f4t.forEach(r),$Xe=t(s_e," \u2014 "),uL=n(s_e,"A",{href:!0});var c4t=s(uL);IXe=t(c4t,"ImageGPTModel"),c4t.forEach(r),jXe=t(s_e," (ImageGPT model)"),s_e.forEach(r),NXe=i(M),Ng=n(M,"LI",{});var l_e=s(Ng);LW=n(l_e,"STRONG",{});var g4t=s(LW);DXe=t(g4t,"layoutlm"),g4t.forEach(r),GXe=t(l_e," \u2014 "),pL=n(l_e,"A",{href:!0});var h4t=s(pL);OXe=t(h4t,"LayoutLMModel"),h4t.forEach(r),qXe=t(l_e," (LayoutLM model)"),l_e.forEach(r),zXe=i(M),Dg=n(M,"LI",{});var i_e=s(Dg);BW=n(i_e,"STRONG",{});var u4t=s(BW);XXe=t(u4t,"layoutlmv2"),u4t.forEach(r),WXe=t(i_e," \u2014 "),_L=n(i_e,"A",{href:!0});var p4t=s(_L);VXe=t(p4t,"LayoutLMv2Model"),p4t.forEach(r),QXe=t(i_e," (LayoutLMv2 model)"),i_e.forEach(r),HXe=i(M),Gg=n(M,"LI",{});var d_e=s(Gg);kW=n(d_e,"STRONG",{});var _4t=s(kW);UXe=t(_4t,"led"),_4t.forEach(r),JXe=t(d_e," \u2014 "),vL=n(d_e,"A",{href:!0});var v4t=s(vL);KXe=t(v4t,"LEDModel"),v4t.forEach(r),YXe=t(d_e," (LED model)"),d_e.forEach(r),ZXe=i(M),Og=n(M,"LI",{});var m_e=s(Og);RW=n(m_e,"STRONG",{});var b4t=s(RW);eWe=t(b4t,"longformer"),b4t.forEach(r),oWe=t(m_e," \u2014 "),bL=n(m_e,"A",{href:!0});var T4t=s(bL);tWe=t(T4t,"LongformerModel"),T4t.forEach(r),rWe=t(m_e," (Longformer model)"),m_e.forEach(r),aWe=i(M),qg=n(M,"LI",{});var f_e=s(qg);SW=n(f_e,"STRONG",{});var F4t=s(SW);nWe=t(F4t,"luke"),F4t.forEach(r),sWe=t(f_e," \u2014 "),TL=n(f_e,"A",{href:!0});var M4t=s(TL);lWe=t(M4t,"LukeModel"),M4t.forEach(r),iWe=t(f_e," (LUKE model)"),f_e.forEach(r),dWe=i(M),zg=n(M,"LI",{});var c_e=s(zg);PW=n(c_e,"STRONG",{});var E4t=s(PW);mWe=t(E4t,"lxmert"),E4t.forEach(r),fWe=t(c_e," \u2014 "),FL=n(c_e,"A",{href:!0});var C4t=s(FL);cWe=t(C4t,"LxmertModel"),C4t.forEach(r),gWe=t(c_e," (LXMERT model)"),c_e.forEach(r),hWe=i(M),Xg=n(M,"LI",{});var g_e=s(Xg);$W=n(g_e,"STRONG",{});var y4t=s($W);uWe=t(y4t,"m2m_100"),y4t.forEach(r),pWe=t(g_e," \u2014 "),ML=n(g_e,"A",{href:!0});var w4t=s(ML);_We=t(w4t,"M2M100Model"),w4t.forEach(r),vWe=t(g_e," (M2M100 model)"),g_e.forEach(r),bWe=i(M),Wg=n(M,"LI",{});var h_e=s(Wg);IW=n(h_e,"STRONG",{});var A4t=s(IW);TWe=t(A4t,"marian"),A4t.forEach(r),FWe=t(h_e," \u2014 "),EL=n(h_e,"A",{href:!0});var x4t=s(EL);MWe=t(x4t,"MarianModel"),x4t.forEach(r),EWe=t(h_e," (Marian model)"),h_e.forEach(r),CWe=i(M),Vg=n(M,"LI",{});var u_e=s(Vg);jW=n(u_e,"STRONG",{});var L4t=s(jW);yWe=t(L4t,"mbart"),L4t.forEach(r),wWe=t(u_e," \u2014 "),CL=n(u_e,"A",{href:!0});var B4t=s(CL);AWe=t(B4t,"MBartModel"),B4t.forEach(r),xWe=t(u_e," (mBART model)"),u_e.forEach(r),LWe=i(M),Qg=n(M,"LI",{});var p_e=s(Qg);NW=n(p_e,"STRONG",{});var k4t=s(NW);BWe=t(k4t,"megatron-bert"),k4t.forEach(r),kWe=t(p_e," \u2014 "),yL=n(p_e,"A",{href:!0});var R4t=s(yL);RWe=t(R4t,"MegatronBertModel"),R4t.forEach(r),SWe=t(p_e," (MegatronBert model)"),p_e.forEach(r),PWe=i(M),Hg=n(M,"LI",{});var __e=s(Hg);DW=n(__e,"STRONG",{});var S4t=s(DW);$We=t(S4t,"mobilebert"),S4t.forEach(r),IWe=t(__e," \u2014 "),wL=n(__e,"A",{href:!0});var P4t=s(wL);jWe=t(P4t,"MobileBertModel"),P4t.forEach(r),NWe=t(__e," (MobileBERT model)"),__e.forEach(r),DWe=i(M),Ug=n(M,"LI",{});var v_e=s(Ug);GW=n(v_e,"STRONG",{});var $4t=s(GW);GWe=t($4t,"mpnet"),$4t.forEach(r),OWe=t(v_e," \u2014 "),AL=n(v_e,"A",{href:!0});var I4t=s(AL);qWe=t(I4t,"MPNetModel"),I4t.forEach(r),zWe=t(v_e," (MPNet model)"),v_e.forEach(r),XWe=i(M),Jg=n(M,"LI",{});var b_e=s(Jg);OW=n(b_e,"STRONG",{});var j4t=s(OW);WWe=t(j4t,"mt5"),j4t.forEach(r),VWe=t(b_e," \u2014 "),xL=n(b_e,"A",{href:!0});var N4t=s(xL);QWe=t(N4t,"MT5Model"),N4t.forEach(r),HWe=t(b_e," (mT5 model)"),b_e.forEach(r),UWe=i(M),Kg=n(M,"LI",{});var T_e=s(Kg);qW=n(T_e,"STRONG",{});var D4t=s(qW);JWe=t(D4t,"openai-gpt"),D4t.forEach(r),KWe=t(T_e," \u2014 "),LL=n(T_e,"A",{href:!0});var G4t=s(LL);YWe=t(G4t,"OpenAIGPTModel"),G4t.forEach(r),ZWe=t(T_e," (OpenAI GPT model)"),T_e.forEach(r),eVe=i(M),Yg=n(M,"LI",{});var F_e=s(Yg);zW=n(F_e,"STRONG",{});var O4t=s(zW);oVe=t(O4t,"pegasus"),O4t.forEach(r),tVe=t(F_e," \u2014 "),BL=n(F_e,"A",{href:!0});var q4t=s(BL);rVe=t(q4t,"PegasusModel"),q4t.forEach(r),aVe=t(F_e," (Pegasus model)"),F_e.forEach(r),nVe=i(M),Zg=n(M,"LI",{});var M_e=s(Zg);XW=n(M_e,"STRONG",{});var z4t=s(XW);sVe=t(z4t,"perceiver"),z4t.forEach(r),lVe=t(M_e," \u2014 "),kL=n(M_e,"A",{href:!0});var X4t=s(kL);iVe=t(X4t,"PerceiverModel"),X4t.forEach(r),dVe=t(M_e," (Perceiver model)"),M_e.forEach(r),mVe=i(M),eh=n(M,"LI",{});var E_e=s(eh);WW=n(E_e,"STRONG",{});var W4t=s(WW);fVe=t(W4t,"prophetnet"),W4t.forEach(r),cVe=t(E_e," \u2014 "),RL=n(E_e,"A",{href:!0});var V4t=s(RL);gVe=t(V4t,"ProphetNetModel"),V4t.forEach(r),hVe=t(E_e," (ProphetNet model)"),E_e.forEach(r),uVe=i(M),oh=n(M,"LI",{});var C_e=s(oh);VW=n(C_e,"STRONG",{});var Q4t=s(VW);pVe=t(Q4t,"qdqbert"),Q4t.forEach(r),_Ve=t(C_e," \u2014 "),SL=n(C_e,"A",{href:!0});var H4t=s(SL);vVe=t(H4t,"QDQBertModel"),H4t.forEach(r),bVe=t(C_e," (QDQBert model)"),C_e.forEach(r),TVe=i(M),th=n(M,"LI",{});var y_e=s(th);QW=n(y_e,"STRONG",{});var U4t=s(QW);FVe=t(U4t,"reformer"),U4t.forEach(r),MVe=t(y_e," \u2014 "),PL=n(y_e,"A",{href:!0});var J4t=s(PL);EVe=t(J4t,"ReformerModel"),J4t.forEach(r),CVe=t(y_e," (Reformer model)"),y_e.forEach(r),yVe=i(M),rh=n(M,"LI",{});var w_e=s(rh);HW=n(w_e,"STRONG",{});var K4t=s(HW);wVe=t(K4t,"rembert"),K4t.forEach(r),AVe=t(w_e," \u2014 "),$L=n(w_e,"A",{href:!0});var Y4t=s($L);xVe=t(Y4t,"RemBertModel"),Y4t.forEach(r),LVe=t(w_e," (RemBERT model)"),w_e.forEach(r),BVe=i(M),ah=n(M,"LI",{});var A_e=s(ah);UW=n(A_e,"STRONG",{});var Z4t=s(UW);kVe=t(Z4t,"retribert"),Z4t.forEach(r),RVe=t(A_e," \u2014 "),IL=n(A_e,"A",{href:!0});var e5t=s(IL);SVe=t(e5t,"RetriBertModel"),e5t.forEach(r),PVe=t(A_e," (RetriBERT model)"),A_e.forEach(r),$Ve=i(M),nh=n(M,"LI",{});var x_e=s(nh);JW=n(x_e,"STRONG",{});var o5t=s(JW);IVe=t(o5t,"roberta"),o5t.forEach(r),jVe=t(x_e," \u2014 "),jL=n(x_e,"A",{href:!0});var t5t=s(jL);NVe=t(t5t,"RobertaModel"),t5t.forEach(r),DVe=t(x_e," (RoBERTa model)"),x_e.forEach(r),GVe=i(M),sh=n(M,"LI",{});var L_e=s(sh);KW=n(L_e,"STRONG",{});var r5t=s(KW);OVe=t(r5t,"roformer"),r5t.forEach(r),qVe=t(L_e," \u2014 "),NL=n(L_e,"A",{href:!0});var a5t=s(NL);zVe=t(a5t,"RoFormerModel"),a5t.forEach(r),XVe=t(L_e," (RoFormer model)"),L_e.forEach(r),WVe=i(M),lh=n(M,"LI",{});var B_e=s(lh);YW=n(B_e,"STRONG",{});var n5t=s(YW);VVe=t(n5t,"segformer"),n5t.forEach(r),QVe=t(B_e," \u2014 "),DL=n(B_e,"A",{href:!0});var s5t=s(DL);HVe=t(s5t,"SegformerModel"),s5t.forEach(r),UVe=t(B_e," (SegFormer model)"),B_e.forEach(r),JVe=i(M),ih=n(M,"LI",{});var k_e=s(ih);ZW=n(k_e,"STRONG",{});var l5t=s(ZW);KVe=t(l5t,"sew"),l5t.forEach(r),YVe=t(k_e," \u2014 "),GL=n(k_e,"A",{href:!0});var i5t=s(GL);ZVe=t(i5t,"SEWModel"),i5t.forEach(r),eQe=t(k_e," (SEW model)"),k_e.forEach(r),oQe=i(M),dh=n(M,"LI",{});var R_e=s(dh);eV=n(R_e,"STRONG",{});var d5t=s(eV);tQe=t(d5t,"sew-d"),d5t.forEach(r),rQe=t(R_e," \u2014 "),OL=n(R_e,"A",{href:!0});var m5t=s(OL);aQe=t(m5t,"SEWDModel"),m5t.forEach(r),nQe=t(R_e," (SEW-D model)"),R_e.forEach(r),sQe=i(M),mh=n(M,"LI",{});var S_e=s(mh);oV=n(S_e,"STRONG",{});var f5t=s(oV);lQe=t(f5t,"speech_to_text"),f5t.forEach(r),iQe=t(S_e," \u2014 "),qL=n(S_e,"A",{href:!0});var c5t=s(qL);dQe=t(c5t,"Speech2TextModel"),c5t.forEach(r),mQe=t(S_e," (Speech2Text model)"),S_e.forEach(r),fQe=i(M),fh=n(M,"LI",{});var P_e=s(fh);tV=n(P_e,"STRONG",{});var g5t=s(tV);cQe=t(g5t,"splinter"),g5t.forEach(r),gQe=t(P_e," \u2014 "),zL=n(P_e,"A",{href:!0});var h5t=s(zL);hQe=t(h5t,"SplinterModel"),h5t.forEach(r),uQe=t(P_e," (Splinter model)"),P_e.forEach(r),pQe=i(M),ch=n(M,"LI",{});var $_e=s(ch);rV=n($_e,"STRONG",{});var u5t=s(rV);_Qe=t(u5t,"squeezebert"),u5t.forEach(r),vQe=t($_e," \u2014 "),XL=n($_e,"A",{href:!0});var p5t=s(XL);bQe=t(p5t,"SqueezeBertModel"),p5t.forEach(r),TQe=t($_e," (SqueezeBERT model)"),$_e.forEach(r),FQe=i(M),gh=n(M,"LI",{});var I_e=s(gh);aV=n(I_e,"STRONG",{});var _5t=s(aV);MQe=t(_5t,"t5"),_5t.forEach(r),EQe=t(I_e," \u2014 "),WL=n(I_e,"A",{href:!0});var v5t=s(WL);CQe=t(v5t,"T5Model"),v5t.forEach(r),yQe=t(I_e," (T5 model)"),I_e.forEach(r),wQe=i(M),hh=n(M,"LI",{});var j_e=s(hh);nV=n(j_e,"STRONG",{});var b5t=s(nV);AQe=t(b5t,"tapas"),b5t.forEach(r),xQe=t(j_e," \u2014 "),VL=n(j_e,"A",{href:!0});var T5t=s(VL);LQe=t(T5t,"TapasModel"),T5t.forEach(r),BQe=t(j_e," (TAPAS model)"),j_e.forEach(r),kQe=i(M),uh=n(M,"LI",{});var N_e=s(uh);sV=n(N_e,"STRONG",{});var F5t=s(sV);RQe=t(F5t,"transfo-xl"),F5t.forEach(r),SQe=t(N_e," \u2014 "),QL=n(N_e,"A",{href:!0});var M5t=s(QL);PQe=t(M5t,"TransfoXLModel"),M5t.forEach(r),$Qe=t(N_e," (Transformer-XL model)"),N_e.forEach(r),IQe=i(M),ph=n(M,"LI",{});var D_e=s(ph);lV=n(D_e,"STRONG",{});var E5t=s(lV);jQe=t(E5t,"unispeech"),E5t.forEach(r),NQe=t(D_e," \u2014 "),HL=n(D_e,"A",{href:!0});var C5t=s(HL);DQe=t(C5t,"UniSpeechModel"),C5t.forEach(r),GQe=t(D_e," (UniSpeech model)"),D_e.forEach(r),OQe=i(M),_h=n(M,"LI",{});var G_e=s(_h);iV=n(G_e,"STRONG",{});var y5t=s(iV);qQe=t(y5t,"unispeech-sat"),y5t.forEach(r),zQe=t(G_e," \u2014 "),UL=n(G_e,"A",{href:!0});var w5t=s(UL);XQe=t(w5t,"UniSpeechSatModel"),w5t.forEach(r),WQe=t(G_e," (UniSpeechSat model)"),G_e.forEach(r),VQe=i(M),vh=n(M,"LI",{});var O_e=s(vh);dV=n(O_e,"STRONG",{});var A5t=s(dV);QQe=t(A5t,"vision-text-dual-encoder"),A5t.forEach(r),HQe=t(O_e," \u2014 "),JL=n(O_e,"A",{href:!0});var x5t=s(JL);UQe=t(x5t,"VisionTextDualEncoderModel"),x5t.forEach(r),JQe=t(O_e," (VisionTextDualEncoder model)"),O_e.forEach(r),KQe=i(M),bh=n(M,"LI",{});var q_e=s(bh);mV=n(q_e,"STRONG",{});var L5t=s(mV);YQe=t(L5t,"visual_bert"),L5t.forEach(r),ZQe=t(q_e," \u2014 "),KL=n(q_e,"A",{href:!0});var B5t=s(KL);eHe=t(B5t,"VisualBertModel"),B5t.forEach(r),oHe=t(q_e," (VisualBert model)"),q_e.forEach(r),tHe=i(M),Th=n(M,"LI",{});var z_e=s(Th);fV=n(z_e,"STRONG",{});var k5t=s(fV);rHe=t(k5t,"vit"),k5t.forEach(r),aHe=t(z_e," \u2014 "),YL=n(z_e,"A",{href:!0});var R5t=s(YL);nHe=t(R5t,"ViTModel"),R5t.forEach(r),sHe=t(z_e," (ViT model)"),z_e.forEach(r),lHe=i(M),Fh=n(M,"LI",{});var X_e=s(Fh);cV=n(X_e,"STRONG",{});var S5t=s(cV);iHe=t(S5t,"wav2vec2"),S5t.forEach(r),dHe=t(X_e," \u2014 "),ZL=n(X_e,"A",{href:!0});var P5t=s(ZL);mHe=t(P5t,"Wav2Vec2Model"),P5t.forEach(r),fHe=t(X_e," (Wav2Vec2 model)"),X_e.forEach(r),cHe=i(M),Mh=n(M,"LI",{});var W_e=s(Mh);gV=n(W_e,"STRONG",{});var $5t=s(gV);gHe=t($5t,"wavlm"),$5t.forEach(r),hHe=t(W_e," \u2014 "),eB=n(W_e,"A",{href:!0});var I5t=s(eB);uHe=t(I5t,"WavLMModel"),I5t.forEach(r),pHe=t(W_e," (WavLM model)"),W_e.forEach(r),_He=i(M),Eh=n(M,"LI",{});var V_e=s(Eh);hV=n(V_e,"STRONG",{});var j5t=s(hV);vHe=t(j5t,"xlm"),j5t.forEach(r),bHe=t(V_e," \u2014 "),oB=n(V_e,"A",{href:!0});var N5t=s(oB);THe=t(N5t,"XLMModel"),N5t.forEach(r),FHe=t(V_e," (XLM model)"),V_e.forEach(r),MHe=i(M),Ch=n(M,"LI",{});var Q_e=s(Ch);uV=n(Q_e,"STRONG",{});var D5t=s(uV);EHe=t(D5t,"xlm-prophetnet"),D5t.forEach(r),CHe=t(Q_e," \u2014 "),tB=n(Q_e,"A",{href:!0});var G5t=s(tB);yHe=t(G5t,"XLMProphetNetModel"),G5t.forEach(r),wHe=t(Q_e," (XLMProphetNet model)"),Q_e.forEach(r),AHe=i(M),yh=n(M,"LI",{});var H_e=s(yh);pV=n(H_e,"STRONG",{});var O5t=s(pV);xHe=t(O5t,"xlm-roberta"),O5t.forEach(r),LHe=t(H_e," \u2014 "),rB=n(H_e,"A",{href:!0});var q5t=s(rB);BHe=t(q5t,"XLMRobertaModel"),q5t.forEach(r),kHe=t(H_e," (XLM-RoBERTa model)"),H_e.forEach(r),RHe=i(M),wh=n(M,"LI",{});var U_e=s(wh);_V=n(U_e,"STRONG",{});var z5t=s(_V);SHe=t(z5t,"xlnet"),z5t.forEach(r),PHe=t(U_e," \u2014 "),aB=n(U_e,"A",{href:!0});var X5t=s(aB);$He=t(X5t,"XLNetModel"),X5t.forEach(r),IHe=t(U_e," (XLNet model)"),U_e.forEach(r),M.forEach(r),jHe=i(vr),Ah=n(vr,"P",{});var J_e=s(Ah);NHe=t(J_e,"The model is set in evaluation mode by default using "),vV=n(J_e,"EM",{});var W5t=s(vV);DHe=t(W5t,"model.eval()"),W5t.forEach(r),GHe=t(J_e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),bV=n(J_e,"EM",{});var V5t=s(bV);OHe=t(V5t,"model.train()"),V5t.forEach(r),J_e.forEach(r),qHe=i(vr),TV=n(vr,"P",{});var Q5t=s(TV);zHe=t(Q5t,"Examples:"),Q5t.forEach(r),XHe=i(vr),c(pM.$$.fragment,vr),vr.forEach(r),cs.forEach(r),PEe=i(d),di=n(d,"H2",{class:!0});var k3e=s(di);xh=n(k3e,"A",{id:!0,class:!0,href:!0});var H5t=s(xh);FV=n(H5t,"SPAN",{});var U5t=s(FV);c(_M.$$.fragment,U5t),U5t.forEach(r),H5t.forEach(r),WHe=i(k3e),MV=n(k3e,"SPAN",{});var J5t=s(MV);VHe=t(J5t,"AutoModelForPreTraining"),J5t.forEach(r),k3e.forEach(r),$Ee=i(d),$o=n(d,"DIV",{class:!0});var hs=s($o);c(vM.$$.fragment,hs),QHe=i(hs),mi=n(hs,"P",{});var bD=s(mi);HHe=t(bD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a pretraining head) when created with the `),EV=n(bD,"CODE",{});var K5t=s(EV);UHe=t(K5t,"from_pretrained()"),K5t.forEach(r),JHe=t(bD,` class method or the `),CV=n(bD,"CODE",{});var Y5t=s(CV);KHe=t(Y5t,"from_config()"),Y5t.forEach(r),YHe=t(bD," class method."),bD.forEach(r),ZHe=i(hs),bM=n(hs,"P",{});var R3e=s(bM);eUe=t(R3e,"This class cannot be instantiated directly using "),yV=n(R3e,"CODE",{});var Z5t=s(yV);oUe=t(Z5t,"__init__()"),Z5t.forEach(r),tUe=t(R3e," (throws an error)."),R3e.forEach(r),rUe=i(hs),At=n(hs,"DIV",{class:!0});var us=s(At);c(TM.$$.fragment,us),aUe=i(us),wV=n(us,"P",{});var e0t=s(wV);nUe=t(e0t,"Instantiates one of the model classes of the library (with a pretraining head) from a configuration."),e0t.forEach(r),sUe=i(us),fi=n(us,"P",{});var TD=s(fi);lUe=t(TD,`Note: Loading a model from its configuration file does `),AV=n(TD,"STRONG",{});var o0t=s(AV);iUe=t(o0t,"not"),o0t.forEach(r),dUe=t(TD,` load the model weights. It only affects the model\u2019s configuration. Use [`),xV=n(TD,"EM",{});var t0t=s(xV);mUe=t(t0t,"~AutoModelForPreTraining.from_pretrained"),t0t.forEach(r),fUe=t(TD,`] to load the model weights.`),TD.forEach(r),cUe=i(us),LV=n(us,"P",{});var r0t=s(LV);gUe=t(r0t,"Examples:"),r0t.forEach(r),hUe=i(us),c(FM.$$.fragment,us),us.forEach(r),uUe=i(hs),Le=n(hs,"DIV",{class:!0});var br=s(Le);c(MM.$$.fragment,br),pUe=i(br),BV=n(br,"P",{});var a0t=s(BV);_Ue=t(a0t,"Instantiate one of the model classes of the library (with a pretraining head) from a pretrained model."),a0t.forEach(r),vUe=i(br),Ca=n(br,"P",{});var XT=s(Ca);bUe=t(XT,"The model class to instantiate is selected based on the "),kV=n(XT,"EM",{});var n0t=s(kV);TUe=t(n0t,"model_type"),n0t.forEach(r),FUe=t(XT,` property of the config object (either passed as an argument or loaded from `),RV=n(XT,"EM",{});var s0t=s(RV);MUe=t(s0t,"pretrained_model_name_or_path"),s0t.forEach(r),EUe=t(XT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),SV=n(XT,"EM",{});var l0t=s(SV);CUe=t(l0t,"pretrained_model_name_or_path"),l0t.forEach(r),yUe=t(XT,":"),XT.forEach(r),wUe=i(br),k=n(br,"UL",{});var S=s(k);Lh=n(S,"LI",{});var K_e=s(Lh);PV=n(K_e,"STRONG",{});var i0t=s(PV);AUe=t(i0t,"albert"),i0t.forEach(r),xUe=t(K_e," \u2014 "),nB=n(K_e,"A",{href:!0});var d0t=s(nB);LUe=t(d0t,"AlbertForPreTraining"),d0t.forEach(r),BUe=t(K_e," (ALBERT model)"),K_e.forEach(r),kUe=i(S),Bh=n(S,"LI",{});var Y_e=s(Bh);$V=n(Y_e,"STRONG",{});var m0t=s($V);RUe=t(m0t,"bart"),m0t.forEach(r),SUe=t(Y_e," \u2014 "),sB=n(Y_e,"A",{href:!0});var f0t=s(sB);PUe=t(f0t,"BartForConditionalGeneration"),f0t.forEach(r),$Ue=t(Y_e," (BART model)"),Y_e.forEach(r),IUe=i(S),kh=n(S,"LI",{});var Z_e=s(kh);IV=n(Z_e,"STRONG",{});var c0t=s(IV);jUe=t(c0t,"bert"),c0t.forEach(r),NUe=t(Z_e," \u2014 "),lB=n(Z_e,"A",{href:!0});var g0t=s(lB);DUe=t(g0t,"BertForPreTraining"),g0t.forEach(r),GUe=t(Z_e," (BERT model)"),Z_e.forEach(r),OUe=i(S),Rh=n(S,"LI",{});var eve=s(Rh);jV=n(eve,"STRONG",{});var h0t=s(jV);qUe=t(h0t,"big_bird"),h0t.forEach(r),zUe=t(eve," \u2014 "),iB=n(eve,"A",{href:!0});var u0t=s(iB);XUe=t(u0t,"BigBirdForPreTraining"),u0t.forEach(r),WUe=t(eve," (BigBird model)"),eve.forEach(r),VUe=i(S),Sh=n(S,"LI",{});var ove=s(Sh);NV=n(ove,"STRONG",{});var p0t=s(NV);QUe=t(p0t,"camembert"),p0t.forEach(r),HUe=t(ove," \u2014 "),dB=n(ove,"A",{href:!0});var _0t=s(dB);UUe=t(_0t,"CamembertForMaskedLM"),_0t.forEach(r),JUe=t(ove," (CamemBERT model)"),ove.forEach(r),KUe=i(S),Ph=n(S,"LI",{});var tve=s(Ph);DV=n(tve,"STRONG",{});var v0t=s(DV);YUe=t(v0t,"ctrl"),v0t.forEach(r),ZUe=t(tve," \u2014 "),mB=n(tve,"A",{href:!0});var b0t=s(mB);eJe=t(b0t,"CTRLLMHeadModel"),b0t.forEach(r),oJe=t(tve," (CTRL model)"),tve.forEach(r),tJe=i(S),$h=n(S,"LI",{});var rve=s($h);GV=n(rve,"STRONG",{});var T0t=s(GV);rJe=t(T0t,"deberta"),T0t.forEach(r),aJe=t(rve," \u2014 "),fB=n(rve,"A",{href:!0});var F0t=s(fB);nJe=t(F0t,"DebertaForMaskedLM"),F0t.forEach(r),sJe=t(rve," (DeBERTa model)"),rve.forEach(r),lJe=i(S),Ih=n(S,"LI",{});var ave=s(Ih);OV=n(ave,"STRONG",{});var M0t=s(OV);iJe=t(M0t,"deberta-v2"),M0t.forEach(r),dJe=t(ave," \u2014 "),cB=n(ave,"A",{href:!0});var E0t=s(cB);mJe=t(E0t,"DebertaV2ForMaskedLM"),E0t.forEach(r),fJe=t(ave," (DeBERTa-v2 model)"),ave.forEach(r),cJe=i(S),jh=n(S,"LI",{});var nve=s(jh);qV=n(nve,"STRONG",{});var C0t=s(qV);gJe=t(C0t,"distilbert"),C0t.forEach(r),hJe=t(nve," \u2014 "),gB=n(nve,"A",{href:!0});var y0t=s(gB);uJe=t(y0t,"DistilBertForMaskedLM"),y0t.forEach(r),pJe=t(nve," (DistilBERT model)"),nve.forEach(r),_Je=i(S),Nh=n(S,"LI",{});var sve=s(Nh);zV=n(sve,"STRONG",{});var w0t=s(zV);vJe=t(w0t,"electra"),w0t.forEach(r),bJe=t(sve," \u2014 "),hB=n(sve,"A",{href:!0});var A0t=s(hB);TJe=t(A0t,"ElectraForPreTraining"),A0t.forEach(r),FJe=t(sve," (ELECTRA model)"),sve.forEach(r),MJe=i(S),Dh=n(S,"LI",{});var lve=s(Dh);XV=n(lve,"STRONG",{});var x0t=s(XV);EJe=t(x0t,"flaubert"),x0t.forEach(r),CJe=t(lve," \u2014 "),uB=n(lve,"A",{href:!0});var L0t=s(uB);yJe=t(L0t,"FlaubertWithLMHeadModel"),L0t.forEach(r),wJe=t(lve," (FlauBERT model)"),lve.forEach(r),AJe=i(S),Gh=n(S,"LI",{});var ive=s(Gh);WV=n(ive,"STRONG",{});var B0t=s(WV);xJe=t(B0t,"fnet"),B0t.forEach(r),LJe=t(ive," \u2014 "),pB=n(ive,"A",{href:!0});var k0t=s(pB);BJe=t(k0t,"FNetForPreTraining"),k0t.forEach(r),kJe=t(ive," (FNet model)"),ive.forEach(r),RJe=i(S),Oh=n(S,"LI",{});var dve=s(Oh);VV=n(dve,"STRONG",{});var R0t=s(VV);SJe=t(R0t,"fsmt"),R0t.forEach(r),PJe=t(dve," \u2014 "),_B=n(dve,"A",{href:!0});var S0t=s(_B);$Je=t(S0t,"FSMTForConditionalGeneration"),S0t.forEach(r),IJe=t(dve," (FairSeq Machine-Translation model)"),dve.forEach(r),jJe=i(S),qh=n(S,"LI",{});var mve=s(qh);QV=n(mve,"STRONG",{});var P0t=s(QV);NJe=t(P0t,"funnel"),P0t.forEach(r),DJe=t(mve," \u2014 "),vB=n(mve,"A",{href:!0});var $0t=s(vB);GJe=t($0t,"FunnelForPreTraining"),$0t.forEach(r),OJe=t(mve," (Funnel Transformer model)"),mve.forEach(r),qJe=i(S),zh=n(S,"LI",{});var fve=s(zh);HV=n(fve,"STRONG",{});var I0t=s(HV);zJe=t(I0t,"gpt2"),I0t.forEach(r),XJe=t(fve," \u2014 "),bB=n(fve,"A",{href:!0});var j0t=s(bB);WJe=t(j0t,"GPT2LMHeadModel"),j0t.forEach(r),VJe=t(fve," (OpenAI GPT-2 model)"),fve.forEach(r),QJe=i(S),Xh=n(S,"LI",{});var cve=s(Xh);UV=n(cve,"STRONG",{});var N0t=s(UV);HJe=t(N0t,"ibert"),N0t.forEach(r),UJe=t(cve," \u2014 "),TB=n(cve,"A",{href:!0});var D0t=s(TB);JJe=t(D0t,"IBertForMaskedLM"),D0t.forEach(r),KJe=t(cve," (I-BERT model)"),cve.forEach(r),YJe=i(S),Wh=n(S,"LI",{});var gve=s(Wh);JV=n(gve,"STRONG",{});var G0t=s(JV);ZJe=t(G0t,"layoutlm"),G0t.forEach(r),eKe=t(gve," \u2014 "),FB=n(gve,"A",{href:!0});var O0t=s(FB);oKe=t(O0t,"LayoutLMForMaskedLM"),O0t.forEach(r),tKe=t(gve," (LayoutLM model)"),gve.forEach(r),rKe=i(S),Vh=n(S,"LI",{});var hve=s(Vh);KV=n(hve,"STRONG",{});var q0t=s(KV);aKe=t(q0t,"longformer"),q0t.forEach(r),nKe=t(hve," \u2014 "),MB=n(hve,"A",{href:!0});var z0t=s(MB);sKe=t(z0t,"LongformerForMaskedLM"),z0t.forEach(r),lKe=t(hve," (Longformer model)"),hve.forEach(r),iKe=i(S),Qh=n(S,"LI",{});var uve=s(Qh);YV=n(uve,"STRONG",{});var X0t=s(YV);dKe=t(X0t,"lxmert"),X0t.forEach(r),mKe=t(uve," \u2014 "),EB=n(uve,"A",{href:!0});var W0t=s(EB);fKe=t(W0t,"LxmertForPreTraining"),W0t.forEach(r),cKe=t(uve," (LXMERT model)"),uve.forEach(r),gKe=i(S),Hh=n(S,"LI",{});var pve=s(Hh);ZV=n(pve,"STRONG",{});var V0t=s(ZV);hKe=t(V0t,"megatron-bert"),V0t.forEach(r),uKe=t(pve," \u2014 "),CB=n(pve,"A",{href:!0});var Q0t=s(CB);pKe=t(Q0t,"MegatronBertForPreTraining"),Q0t.forEach(r),_Ke=t(pve," (MegatronBert model)"),pve.forEach(r),vKe=i(S),Uh=n(S,"LI",{});var _ve=s(Uh);eQ=n(_ve,"STRONG",{});var H0t=s(eQ);bKe=t(H0t,"mobilebert"),H0t.forEach(r),TKe=t(_ve," \u2014 "),yB=n(_ve,"A",{href:!0});var U0t=s(yB);FKe=t(U0t,"MobileBertForPreTraining"),U0t.forEach(r),MKe=t(_ve," (MobileBERT model)"),_ve.forEach(r),EKe=i(S),Jh=n(S,"LI",{});var vve=s(Jh);oQ=n(vve,"STRONG",{});var J0t=s(oQ);CKe=t(J0t,"mpnet"),J0t.forEach(r),yKe=t(vve," \u2014 "),wB=n(vve,"A",{href:!0});var K0t=s(wB);wKe=t(K0t,"MPNetForMaskedLM"),K0t.forEach(r),AKe=t(vve," (MPNet model)"),vve.forEach(r),xKe=i(S),Kh=n(S,"LI",{});var bve=s(Kh);tQ=n(bve,"STRONG",{});var Y0t=s(tQ);LKe=t(Y0t,"openai-gpt"),Y0t.forEach(r),BKe=t(bve," \u2014 "),AB=n(bve,"A",{href:!0});var Z0t=s(AB);kKe=t(Z0t,"OpenAIGPTLMHeadModel"),Z0t.forEach(r),RKe=t(bve," (OpenAI GPT model)"),bve.forEach(r),SKe=i(S),Yh=n(S,"LI",{});var Tve=s(Yh);rQ=n(Tve,"STRONG",{});var eTt=s(rQ);PKe=t(eTt,"retribert"),eTt.forEach(r),$Ke=t(Tve," \u2014 "),xB=n(Tve,"A",{href:!0});var oTt=s(xB);IKe=t(oTt,"RetriBertModel"),oTt.forEach(r),jKe=t(Tve," (RetriBERT model)"),Tve.forEach(r),NKe=i(S),Zh=n(S,"LI",{});var Fve=s(Zh);aQ=n(Fve,"STRONG",{});var tTt=s(aQ);DKe=t(tTt,"roberta"),tTt.forEach(r),GKe=t(Fve," \u2014 "),LB=n(Fve,"A",{href:!0});var rTt=s(LB);OKe=t(rTt,"RobertaForMaskedLM"),rTt.forEach(r),qKe=t(Fve," (RoBERTa model)"),Fve.forEach(r),zKe=i(S),eu=n(S,"LI",{});var Mve=s(eu);nQ=n(Mve,"STRONG",{});var aTt=s(nQ);XKe=t(aTt,"squeezebert"),aTt.forEach(r),WKe=t(Mve," \u2014 "),BB=n(Mve,"A",{href:!0});var nTt=s(BB);VKe=t(nTt,"SqueezeBertForMaskedLM"),nTt.forEach(r),QKe=t(Mve," (SqueezeBERT model)"),Mve.forEach(r),HKe=i(S),ou=n(S,"LI",{});var Eve=s(ou);sQ=n(Eve,"STRONG",{});var sTt=s(sQ);UKe=t(sTt,"t5"),sTt.forEach(r),JKe=t(Eve," \u2014 "),kB=n(Eve,"A",{href:!0});var lTt=s(kB);KKe=t(lTt,"T5ForConditionalGeneration"),lTt.forEach(r),YKe=t(Eve," (T5 model)"),Eve.forEach(r),ZKe=i(S),tu=n(S,"LI",{});var Cve=s(tu);lQ=n(Cve,"STRONG",{});var iTt=s(lQ);eYe=t(iTt,"tapas"),iTt.forEach(r),oYe=t(Cve," \u2014 "),RB=n(Cve,"A",{href:!0});var dTt=s(RB);tYe=t(dTt,"TapasForMaskedLM"),dTt.forEach(r),rYe=t(Cve," (TAPAS model)"),Cve.forEach(r),aYe=i(S),ru=n(S,"LI",{});var yve=s(ru);iQ=n(yve,"STRONG",{});var mTt=s(iQ);nYe=t(mTt,"transfo-xl"),mTt.forEach(r),sYe=t(yve," \u2014 "),SB=n(yve,"A",{href:!0});var fTt=s(SB);lYe=t(fTt,"TransfoXLLMHeadModel"),fTt.forEach(r),iYe=t(yve," (Transformer-XL model)"),yve.forEach(r),dYe=i(S),au=n(S,"LI",{});var wve=s(au);dQ=n(wve,"STRONG",{});var cTt=s(dQ);mYe=t(cTt,"unispeech"),cTt.forEach(r),fYe=t(wve," \u2014 "),PB=n(wve,"A",{href:!0});var gTt=s(PB);cYe=t(gTt,"UniSpeechForPreTraining"),gTt.forEach(r),gYe=t(wve," (UniSpeech model)"),wve.forEach(r),hYe=i(S),nu=n(S,"LI",{});var Ave=s(nu);mQ=n(Ave,"STRONG",{});var hTt=s(mQ);uYe=t(hTt,"unispeech-sat"),hTt.forEach(r),pYe=t(Ave," \u2014 "),$B=n(Ave,"A",{href:!0});var uTt=s($B);_Ye=t(uTt,"UniSpeechSatForPreTraining"),uTt.forEach(r),vYe=t(Ave," (UniSpeechSat model)"),Ave.forEach(r),bYe=i(S),su=n(S,"LI",{});var xve=s(su);fQ=n(xve,"STRONG",{});var pTt=s(fQ);TYe=t(pTt,"visual_bert"),pTt.forEach(r),FYe=t(xve," \u2014 "),IB=n(xve,"A",{href:!0});var _Tt=s(IB);MYe=t(_Tt,"VisualBertForPreTraining"),_Tt.forEach(r),EYe=t(xve," (VisualBert model)"),xve.forEach(r),CYe=i(S),lu=n(S,"LI",{});var Lve=s(lu);cQ=n(Lve,"STRONG",{});var vTt=s(cQ);yYe=t(vTt,"wav2vec2"),vTt.forEach(r),wYe=t(Lve," \u2014 "),jB=n(Lve,"A",{href:!0});var bTt=s(jB);AYe=t(bTt,"Wav2Vec2ForPreTraining"),bTt.forEach(r),xYe=t(Lve," (Wav2Vec2 model)"),Lve.forEach(r),LYe=i(S),iu=n(S,"LI",{});var Bve=s(iu);gQ=n(Bve,"STRONG",{});var TTt=s(gQ);BYe=t(TTt,"xlm"),TTt.forEach(r),kYe=t(Bve," \u2014 "),NB=n(Bve,"A",{href:!0});var FTt=s(NB);RYe=t(FTt,"XLMWithLMHeadModel"),FTt.forEach(r),SYe=t(Bve," (XLM model)"),Bve.forEach(r),PYe=i(S),du=n(S,"LI",{});var kve=s(du);hQ=n(kve,"STRONG",{});var MTt=s(hQ);$Ye=t(MTt,"xlm-roberta"),MTt.forEach(r),IYe=t(kve," \u2014 "),DB=n(kve,"A",{href:!0});var ETt=s(DB);jYe=t(ETt,"XLMRobertaForMaskedLM"),ETt.forEach(r),NYe=t(kve," (XLM-RoBERTa model)"),kve.forEach(r),DYe=i(S),mu=n(S,"LI",{});var Rve=s(mu);uQ=n(Rve,"STRONG",{});var CTt=s(uQ);GYe=t(CTt,"xlnet"),CTt.forEach(r),OYe=t(Rve," \u2014 "),GB=n(Rve,"A",{href:!0});var yTt=s(GB);qYe=t(yTt,"XLNetLMHeadModel"),yTt.forEach(r),zYe=t(Rve," (XLNet model)"),Rve.forEach(r),S.forEach(r),XYe=i(br),fu=n(br,"P",{});var Sve=s(fu);WYe=t(Sve,"The model is set in evaluation mode by default using "),pQ=n(Sve,"EM",{});var wTt=s(pQ);VYe=t(wTt,"model.eval()"),wTt.forEach(r),QYe=t(Sve,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),_Q=n(Sve,"EM",{});var ATt=s(_Q);HYe=t(ATt,"model.train()"),ATt.forEach(r),Sve.forEach(r),UYe=i(br),vQ=n(br,"P",{});var xTt=s(vQ);JYe=t(xTt,"Examples:"),xTt.forEach(r),KYe=i(br),c(EM.$$.fragment,br),br.forEach(r),hs.forEach(r),IEe=i(d),ci=n(d,"H2",{class:!0});var S3e=s(ci);cu=n(S3e,"A",{id:!0,class:!0,href:!0});var LTt=s(cu);bQ=n(LTt,"SPAN",{});var BTt=s(bQ);c(CM.$$.fragment,BTt),BTt.forEach(r),LTt.forEach(r),YYe=i(S3e),TQ=n(S3e,"SPAN",{});var kTt=s(TQ);ZYe=t(kTt,"AutoModelForCausalLM"),kTt.forEach(r),S3e.forEach(r),jEe=i(d),Io=n(d,"DIV",{class:!0});var ps=s(Io);c(yM.$$.fragment,ps),eZe=i(ps),gi=n(ps,"P",{});var FD=s(gi);oZe=t(FD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a causal language modeling head) when created with the `),FQ=n(FD,"CODE",{});var RTt=s(FQ);tZe=t(RTt,"from_pretrained()"),RTt.forEach(r),rZe=t(FD,` class method or the `),MQ=n(FD,"CODE",{});var STt=s(MQ);aZe=t(STt,"from_config()"),STt.forEach(r),nZe=t(FD," class method."),FD.forEach(r),sZe=i(ps),wM=n(ps,"P",{});var P3e=s(wM);lZe=t(P3e,"This class cannot be instantiated directly using "),EQ=n(P3e,"CODE",{});var PTt=s(EQ);iZe=t(PTt,"__init__()"),PTt.forEach(r),dZe=t(P3e," (throws an error)."),P3e.forEach(r),mZe=i(ps),xt=n(ps,"DIV",{class:!0});var _s=s(xt);c(AM.$$.fragment,_s),fZe=i(_s),CQ=n(_s,"P",{});var $Tt=s(CQ);cZe=t($Tt,"Instantiates one of the model classes of the library (with a causal language modeling head) from a configuration."),$Tt.forEach(r),gZe=i(_s),hi=n(_s,"P",{});var MD=s(hi);hZe=t(MD,`Note: Loading a model from its configuration file does `),yQ=n(MD,"STRONG",{});var ITt=s(yQ);uZe=t(ITt,"not"),ITt.forEach(r),pZe=t(MD,` load the model weights. It only affects the model\u2019s configuration. Use [`),wQ=n(MD,"EM",{});var jTt=s(wQ);_Ze=t(jTt,"~AutoModelForCausalLM.from_pretrained"),jTt.forEach(r),vZe=t(MD,`] to load the model weights.`),MD.forEach(r),bZe=i(_s),AQ=n(_s,"P",{});var NTt=s(AQ);TZe=t(NTt,"Examples:"),NTt.forEach(r),FZe=i(_s),c(xM.$$.fragment,_s),_s.forEach(r),MZe=i(ps),Be=n(ps,"DIV",{class:!0});var Tr=s(Be);c(LM.$$.fragment,Tr),EZe=i(Tr),xQ=n(Tr,"P",{});var DTt=s(xQ);CZe=t(DTt,"Instantiate one of the model classes of the library (with a causal language modeling head) from a pretrained model."),DTt.forEach(r),yZe=i(Tr),ya=n(Tr,"P",{});var WT=s(ya);wZe=t(WT,"The model class to instantiate is selected based on the "),LQ=n(WT,"EM",{});var GTt=s(LQ);AZe=t(GTt,"model_type"),GTt.forEach(r),xZe=t(WT,` property of the config object (either passed as an argument or loaded from `),BQ=n(WT,"EM",{});var OTt=s(BQ);LZe=t(OTt,"pretrained_model_name_or_path"),OTt.forEach(r),BZe=t(WT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),kQ=n(WT,"EM",{});var qTt=s(kQ);kZe=t(qTt,"pretrained_model_name_or_path"),qTt.forEach(r),RZe=t(WT,":"),WT.forEach(r),SZe=i(Tr),I=n(Tr,"UL",{});var D=s(I);gu=n(D,"LI",{});var Pve=s(gu);RQ=n(Pve,"STRONG",{});var zTt=s(RQ);PZe=t(zTt,"bart"),zTt.forEach(r),$Ze=t(Pve," \u2014 "),OB=n(Pve,"A",{href:!0});var XTt=s(OB);IZe=t(XTt,"BartForCausalLM"),XTt.forEach(r),jZe=t(Pve," (BART model)"),Pve.forEach(r),NZe=i(D),hu=n(D,"LI",{});var $ve=s(hu);SQ=n($ve,"STRONG",{});var WTt=s(SQ);DZe=t(WTt,"bert"),WTt.forEach(r),GZe=t($ve," \u2014 "),qB=n($ve,"A",{href:!0});var VTt=s(qB);OZe=t(VTt,"BertLMHeadModel"),VTt.forEach(r),qZe=t($ve," (BERT model)"),$ve.forEach(r),zZe=i(D),uu=n(D,"LI",{});var Ive=s(uu);PQ=n(Ive,"STRONG",{});var QTt=s(PQ);XZe=t(QTt,"bert-generation"),QTt.forEach(r),WZe=t(Ive," \u2014 "),zB=n(Ive,"A",{href:!0});var HTt=s(zB);VZe=t(HTt,"BertGenerationDecoder"),HTt.forEach(r),QZe=t(Ive," (Bert Generation model)"),Ive.forEach(r),HZe=i(D),pu=n(D,"LI",{});var jve=s(pu);$Q=n(jve,"STRONG",{});var UTt=s($Q);UZe=t(UTt,"big_bird"),UTt.forEach(r),JZe=t(jve," \u2014 "),XB=n(jve,"A",{href:!0});var JTt=s(XB);KZe=t(JTt,"BigBirdForCausalLM"),JTt.forEach(r),YZe=t(jve," (BigBird model)"),jve.forEach(r),ZZe=i(D),_u=n(D,"LI",{});var Nve=s(_u);IQ=n(Nve,"STRONG",{});var KTt=s(IQ);eeo=t(KTt,"bigbird_pegasus"),KTt.forEach(r),oeo=t(Nve," \u2014 "),WB=n(Nve,"A",{href:!0});var YTt=s(WB);teo=t(YTt,"BigBirdPegasusForCausalLM"),YTt.forEach(r),reo=t(Nve," (BigBirdPegasus model)"),Nve.forEach(r),aeo=i(D),vu=n(D,"LI",{});var Dve=s(vu);jQ=n(Dve,"STRONG",{});var ZTt=s(jQ);neo=t(ZTt,"blenderbot"),ZTt.forEach(r),seo=t(Dve," \u2014 "),VB=n(Dve,"A",{href:!0});var eFt=s(VB);leo=t(eFt,"BlenderbotForCausalLM"),eFt.forEach(r),ieo=t(Dve," (Blenderbot model)"),Dve.forEach(r),deo=i(D),bu=n(D,"LI",{});var Gve=s(bu);NQ=n(Gve,"STRONG",{});var oFt=s(NQ);meo=t(oFt,"blenderbot-small"),oFt.forEach(r),feo=t(Gve," \u2014 "),QB=n(Gve,"A",{href:!0});var tFt=s(QB);ceo=t(tFt,"BlenderbotSmallForCausalLM"),tFt.forEach(r),geo=t(Gve," (BlenderbotSmall model)"),Gve.forEach(r),heo=i(D),Tu=n(D,"LI",{});var Ove=s(Tu);DQ=n(Ove,"STRONG",{});var rFt=s(DQ);ueo=t(rFt,"camembert"),rFt.forEach(r),peo=t(Ove," \u2014 "),HB=n(Ove,"A",{href:!0});var aFt=s(HB);_eo=t(aFt,"CamembertForCausalLM"),aFt.forEach(r),veo=t(Ove," (CamemBERT model)"),Ove.forEach(r),beo=i(D),Fu=n(D,"LI",{});var qve=s(Fu);GQ=n(qve,"STRONG",{});var nFt=s(GQ);Teo=t(nFt,"ctrl"),nFt.forEach(r),Feo=t(qve," \u2014 "),UB=n(qve,"A",{href:!0});var sFt=s(UB);Meo=t(sFt,"CTRLLMHeadModel"),sFt.forEach(r),Eeo=t(qve," (CTRL model)"),qve.forEach(r),Ceo=i(D),Mu=n(D,"LI",{});var zve=s(Mu);OQ=n(zve,"STRONG",{});var lFt=s(OQ);yeo=t(lFt,"gpt2"),lFt.forEach(r),weo=t(zve," \u2014 "),JB=n(zve,"A",{href:!0});var iFt=s(JB);Aeo=t(iFt,"GPT2LMHeadModel"),iFt.forEach(r),xeo=t(zve," (OpenAI GPT-2 model)"),zve.forEach(r),Leo=i(D),Eu=n(D,"LI",{});var Xve=s(Eu);qQ=n(Xve,"STRONG",{});var dFt=s(qQ);Beo=t(dFt,"gpt_neo"),dFt.forEach(r),keo=t(Xve," \u2014 "),KB=n(Xve,"A",{href:!0});var mFt=s(KB);Reo=t(mFt,"GPTNeoForCausalLM"),mFt.forEach(r),Seo=t(Xve," (GPT Neo model)"),Xve.forEach(r),Peo=i(D),Cu=n(D,"LI",{});var Wve=s(Cu);zQ=n(Wve,"STRONG",{});var fFt=s(zQ);$eo=t(fFt,"gptj"),fFt.forEach(r),Ieo=t(Wve," \u2014 "),YB=n(Wve,"A",{href:!0});var cFt=s(YB);jeo=t(cFt,"GPTJForCausalLM"),cFt.forEach(r),Neo=t(Wve," (GPT-J model)"),Wve.forEach(r),Deo=i(D),yu=n(D,"LI",{});var Vve=s(yu);XQ=n(Vve,"STRONG",{});var gFt=s(XQ);Geo=t(gFt,"marian"),gFt.forEach(r),Oeo=t(Vve," \u2014 "),ZB=n(Vve,"A",{href:!0});var hFt=s(ZB);qeo=t(hFt,"MarianForCausalLM"),hFt.forEach(r),zeo=t(Vve," (Marian model)"),Vve.forEach(r),Xeo=i(D),wu=n(D,"LI",{});var Qve=s(wu);WQ=n(Qve,"STRONG",{});var uFt=s(WQ);Weo=t(uFt,"mbart"),uFt.forEach(r),Veo=t(Qve," \u2014 "),e9=n(Qve,"A",{href:!0});var pFt=s(e9);Qeo=t(pFt,"MBartForCausalLM"),pFt.forEach(r),Heo=t(Qve," (mBART model)"),Qve.forEach(r),Ueo=i(D),Au=n(D,"LI",{});var Hve=s(Au);VQ=n(Hve,"STRONG",{});var _Ft=s(VQ);Jeo=t(_Ft,"megatron-bert"),_Ft.forEach(r),Keo=t(Hve," \u2014 "),o9=n(Hve,"A",{href:!0});var vFt=s(o9);Yeo=t(vFt,"MegatronBertForCausalLM"),vFt.forEach(r),Zeo=t(Hve," (MegatronBert model)"),Hve.forEach(r),eoo=i(D),xu=n(D,"LI",{});var Uve=s(xu);QQ=n(Uve,"STRONG",{});var bFt=s(QQ);ooo=t(bFt,"openai-gpt"),bFt.forEach(r),too=t(Uve," \u2014 "),t9=n(Uve,"A",{href:!0});var TFt=s(t9);roo=t(TFt,"OpenAIGPTLMHeadModel"),TFt.forEach(r),aoo=t(Uve," (OpenAI GPT model)"),Uve.forEach(r),noo=i(D),Lu=n(D,"LI",{});var Jve=s(Lu);HQ=n(Jve,"STRONG",{});var FFt=s(HQ);soo=t(FFt,"pegasus"),FFt.forEach(r),loo=t(Jve," \u2014 "),r9=n(Jve,"A",{href:!0});var MFt=s(r9);ioo=t(MFt,"PegasusForCausalLM"),MFt.forEach(r),doo=t(Jve," (Pegasus model)"),Jve.forEach(r),moo=i(D),Bu=n(D,"LI",{});var Kve=s(Bu);UQ=n(Kve,"STRONG",{});var EFt=s(UQ);foo=t(EFt,"prophetnet"),EFt.forEach(r),coo=t(Kve," \u2014 "),a9=n(Kve,"A",{href:!0});var CFt=s(a9);goo=t(CFt,"ProphetNetForCausalLM"),CFt.forEach(r),hoo=t(Kve," (ProphetNet model)"),Kve.forEach(r),uoo=i(D),ku=n(D,"LI",{});var Yve=s(ku);JQ=n(Yve,"STRONG",{});var yFt=s(JQ);poo=t(yFt,"qdqbert"),yFt.forEach(r),_oo=t(Yve," \u2014 "),n9=n(Yve,"A",{href:!0});var wFt=s(n9);voo=t(wFt,"QDQBertLMHeadModel"),wFt.forEach(r),boo=t(Yve," (QDQBert model)"),Yve.forEach(r),Too=i(D),Ru=n(D,"LI",{});var Zve=s(Ru);KQ=n(Zve,"STRONG",{});var AFt=s(KQ);Foo=t(AFt,"reformer"),AFt.forEach(r),Moo=t(Zve," \u2014 "),s9=n(Zve,"A",{href:!0});var xFt=s(s9);Eoo=t(xFt,"ReformerModelWithLMHead"),xFt.forEach(r),Coo=t(Zve," (Reformer model)"),Zve.forEach(r),yoo=i(D),Su=n(D,"LI",{});var e1e=s(Su);YQ=n(e1e,"STRONG",{});var LFt=s(YQ);woo=t(LFt,"rembert"),LFt.forEach(r),Aoo=t(e1e," \u2014 "),l9=n(e1e,"A",{href:!0});var BFt=s(l9);xoo=t(BFt,"RemBertForCausalLM"),BFt.forEach(r),Loo=t(e1e," (RemBERT model)"),e1e.forEach(r),Boo=i(D),Pu=n(D,"LI",{});var o1e=s(Pu);ZQ=n(o1e,"STRONG",{});var kFt=s(ZQ);koo=t(kFt,"roberta"),kFt.forEach(r),Roo=t(o1e," \u2014 "),i9=n(o1e,"A",{href:!0});var RFt=s(i9);Soo=t(RFt,"RobertaForCausalLM"),RFt.forEach(r),Poo=t(o1e," (RoBERTa model)"),o1e.forEach(r),$oo=i(D),$u=n(D,"LI",{});var t1e=s($u);eH=n(t1e,"STRONG",{});var SFt=s(eH);Ioo=t(SFt,"roformer"),SFt.forEach(r),joo=t(t1e," \u2014 "),d9=n(t1e,"A",{href:!0});var PFt=s(d9);Noo=t(PFt,"RoFormerForCausalLM"),PFt.forEach(r),Doo=t(t1e," (RoFormer model)"),t1e.forEach(r),Goo=i(D),Iu=n(D,"LI",{});var r1e=s(Iu);oH=n(r1e,"STRONG",{});var $Ft=s(oH);Ooo=t($Ft,"speech_to_text_2"),$Ft.forEach(r),qoo=t(r1e," \u2014 "),m9=n(r1e,"A",{href:!0});var IFt=s(m9);zoo=t(IFt,"Speech2Text2ForCausalLM"),IFt.forEach(r),Xoo=t(r1e," (Speech2Text2 model)"),r1e.forEach(r),Woo=i(D),ju=n(D,"LI",{});var a1e=s(ju);tH=n(a1e,"STRONG",{});var jFt=s(tH);Voo=t(jFt,"transfo-xl"),jFt.forEach(r),Qoo=t(a1e," \u2014 "),f9=n(a1e,"A",{href:!0});var NFt=s(f9);Hoo=t(NFt,"TransfoXLLMHeadModel"),NFt.forEach(r),Uoo=t(a1e," (Transformer-XL model)"),a1e.forEach(r),Joo=i(D),Nu=n(D,"LI",{});var n1e=s(Nu);rH=n(n1e,"STRONG",{});var DFt=s(rH);Koo=t(DFt,"trocr"),DFt.forEach(r),Yoo=t(n1e," \u2014 "),c9=n(n1e,"A",{href:!0});var GFt=s(c9);Zoo=t(GFt,"TrOCRForCausalLM"),GFt.forEach(r),eto=t(n1e," (TrOCR model)"),n1e.forEach(r),oto=i(D),Du=n(D,"LI",{});var s1e=s(Du);aH=n(s1e,"STRONG",{});var OFt=s(aH);tto=t(OFt,"xlm"),OFt.forEach(r),rto=t(s1e," \u2014 "),g9=n(s1e,"A",{href:!0});var qFt=s(g9);ato=t(qFt,"XLMWithLMHeadModel"),qFt.forEach(r),nto=t(s1e," (XLM model)"),s1e.forEach(r),sto=i(D),Gu=n(D,"LI",{});var l1e=s(Gu);nH=n(l1e,"STRONG",{});var zFt=s(nH);lto=t(zFt,"xlm-prophetnet"),zFt.forEach(r),ito=t(l1e," \u2014 "),h9=n(l1e,"A",{href:!0});var XFt=s(h9);dto=t(XFt,"XLMProphetNetForCausalLM"),XFt.forEach(r),mto=t(l1e," (XLMProphetNet model)"),l1e.forEach(r),fto=i(D),Ou=n(D,"LI",{});var i1e=s(Ou);sH=n(i1e,"STRONG",{});var WFt=s(sH);cto=t(WFt,"xlm-roberta"),WFt.forEach(r),gto=t(i1e," \u2014 "),u9=n(i1e,"A",{href:!0});var VFt=s(u9);hto=t(VFt,"XLMRobertaForCausalLM"),VFt.forEach(r),uto=t(i1e," (XLM-RoBERTa model)"),i1e.forEach(r),pto=i(D),qu=n(D,"LI",{});var d1e=s(qu);lH=n(d1e,"STRONG",{});var QFt=s(lH);_to=t(QFt,"xlnet"),QFt.forEach(r),vto=t(d1e," \u2014 "),p9=n(d1e,"A",{href:!0});var HFt=s(p9);bto=t(HFt,"XLNetLMHeadModel"),HFt.forEach(r),Tto=t(d1e," (XLNet model)"),d1e.forEach(r),D.forEach(r),Fto=i(Tr),zu=n(Tr,"P",{});var m1e=s(zu);Mto=t(m1e,"The model is set in evaluation mode by default using "),iH=n(m1e,"EM",{});var UFt=s(iH);Eto=t(UFt,"model.eval()"),UFt.forEach(r),Cto=t(m1e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),dH=n(m1e,"EM",{});var JFt=s(dH);yto=t(JFt,"model.train()"),JFt.forEach(r),m1e.forEach(r),wto=i(Tr),mH=n(Tr,"P",{});var KFt=s(mH);Ato=t(KFt,"Examples:"),KFt.forEach(r),xto=i(Tr),c(BM.$$.fragment,Tr),Tr.forEach(r),ps.forEach(r),NEe=i(d),ui=n(d,"H2",{class:!0});var $3e=s(ui);Xu=n($3e,"A",{id:!0,class:!0,href:!0});var YFt=s(Xu);fH=n(YFt,"SPAN",{});var ZFt=s(fH);c(kM.$$.fragment,ZFt),ZFt.forEach(r),YFt.forEach(r),Lto=i($3e),cH=n($3e,"SPAN",{});var eMt=s(cH);Bto=t(eMt,"AutoModelForMaskedLM"),eMt.forEach(r),$3e.forEach(r),DEe=i(d),jo=n(d,"DIV",{class:!0});var vs=s(jo);c(RM.$$.fragment,vs),kto=i(vs),pi=n(vs,"P",{});var ED=s(pi);Rto=t(ED,`This is a generic model class that will be instantiated as one of the model classes of the library (with a masked language modeling head) when created with the `),gH=n(ED,"CODE",{});var oMt=s(gH);Sto=t(oMt,"from_pretrained()"),oMt.forEach(r),Pto=t(ED,` class method or the `),hH=n(ED,"CODE",{});var tMt=s(hH);$to=t(tMt,"from_config()"),tMt.forEach(r),Ito=t(ED," class method."),ED.forEach(r),jto=i(vs),SM=n(vs,"P",{});var I3e=s(SM);Nto=t(I3e,"This class cannot be instantiated directly using "),uH=n(I3e,"CODE",{});var rMt=s(uH);Dto=t(rMt,"__init__()"),rMt.forEach(r),Gto=t(I3e," (throws an error)."),I3e.forEach(r),Oto=i(vs),Lt=n(vs,"DIV",{class:!0});var bs=s(Lt);c(PM.$$.fragment,bs),qto=i(bs),pH=n(bs,"P",{});var aMt=s(pH);zto=t(aMt,"Instantiates one of the model classes of the library (with a masked language modeling head) from a configuration."),aMt.forEach(r),Xto=i(bs),_i=n(bs,"P",{});var CD=s(_i);Wto=t(CD,`Note: Loading a model from its configuration file does `),_H=n(CD,"STRONG",{});var nMt=s(_H);Vto=t(nMt,"not"),nMt.forEach(r),Qto=t(CD,` load the model weights. It only affects the model\u2019s configuration. Use [`),vH=n(CD,"EM",{});var sMt=s(vH);Hto=t(sMt,"~AutoModelForMaskedLM.from_pretrained"),sMt.forEach(r),Uto=t(CD,`] to load the model weights.`),CD.forEach(r),Jto=i(bs),bH=n(bs,"P",{});var lMt=s(bH);Kto=t(lMt,"Examples:"),lMt.forEach(r),Yto=i(bs),c($M.$$.fragment,bs),bs.forEach(r),Zto=i(vs),ke=n(vs,"DIV",{class:!0});var Fr=s(ke);c(IM.$$.fragment,Fr),ero=i(Fr),TH=n(Fr,"P",{});var iMt=s(TH);oro=t(iMt,"Instantiate one of the model classes of the library (with a masked language modeling head) from a pretrained model."),iMt.forEach(r),tro=i(Fr),wa=n(Fr,"P",{});var VT=s(wa);rro=t(VT,"The model class to instantiate is selected based on the "),FH=n(VT,"EM",{});var dMt=s(FH);aro=t(dMt,"model_type"),dMt.forEach(r),nro=t(VT,` property of the config object (either passed as an argument or loaded from `),MH=n(VT,"EM",{});var mMt=s(MH);sro=t(mMt,"pretrained_model_name_or_path"),mMt.forEach(r),lro=t(VT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),EH=n(VT,"EM",{});var fMt=s(EH);iro=t(fMt,"pretrained_model_name_or_path"),fMt.forEach(r),dro=t(VT,":"),VT.forEach(r),mro=i(Fr),$=n(Fr,"UL",{});var j=s($);Wu=n(j,"LI",{});var f1e=s(Wu);CH=n(f1e,"STRONG",{});var cMt=s(CH);fro=t(cMt,"albert"),cMt.forEach(r),cro=t(f1e," \u2014 "),_9=n(f1e,"A",{href:!0});var gMt=s(_9);gro=t(gMt,"AlbertForMaskedLM"),gMt.forEach(r),hro=t(f1e," (ALBERT model)"),f1e.forEach(r),uro=i(j),Vu=n(j,"LI",{});var c1e=s(Vu);yH=n(c1e,"STRONG",{});var hMt=s(yH);pro=t(hMt,"bart"),hMt.forEach(r),_ro=t(c1e," \u2014 "),v9=n(c1e,"A",{href:!0});var uMt=s(v9);vro=t(uMt,"BartForConditionalGeneration"),uMt.forEach(r),bro=t(c1e," (BART model)"),c1e.forEach(r),Tro=i(j),Qu=n(j,"LI",{});var g1e=s(Qu);wH=n(g1e,"STRONG",{});var pMt=s(wH);Fro=t(pMt,"bert"),pMt.forEach(r),Mro=t(g1e," \u2014 "),b9=n(g1e,"A",{href:!0});var _Mt=s(b9);Ero=t(_Mt,"BertForMaskedLM"),_Mt.forEach(r),Cro=t(g1e," (BERT model)"),g1e.forEach(r),yro=i(j),Hu=n(j,"LI",{});var h1e=s(Hu);AH=n(h1e,"STRONG",{});var vMt=s(AH);wro=t(vMt,"big_bird"),vMt.forEach(r),Aro=t(h1e," \u2014 "),T9=n(h1e,"A",{href:!0});var bMt=s(T9);xro=t(bMt,"BigBirdForMaskedLM"),bMt.forEach(r),Lro=t(h1e," (BigBird model)"),h1e.forEach(r),Bro=i(j),Uu=n(j,"LI",{});var u1e=s(Uu);xH=n(u1e,"STRONG",{});var TMt=s(xH);kro=t(TMt,"camembert"),TMt.forEach(r),Rro=t(u1e," \u2014 "),F9=n(u1e,"A",{href:!0});var FMt=s(F9);Sro=t(FMt,"CamembertForMaskedLM"),FMt.forEach(r),Pro=t(u1e," (CamemBERT model)"),u1e.forEach(r),$ro=i(j),Ju=n(j,"LI",{});var p1e=s(Ju);LH=n(p1e,"STRONG",{});var MMt=s(LH);Iro=t(MMt,"convbert"),MMt.forEach(r),jro=t(p1e," \u2014 "),M9=n(p1e,"A",{href:!0});var EMt=s(M9);Nro=t(EMt,"ConvBertForMaskedLM"),EMt.forEach(r),Dro=t(p1e," (ConvBERT model)"),p1e.forEach(r),Gro=i(j),Ku=n(j,"LI",{});var _1e=s(Ku);BH=n(_1e,"STRONG",{});var CMt=s(BH);Oro=t(CMt,"deberta"),CMt.forEach(r),qro=t(_1e," \u2014 "),E9=n(_1e,"A",{href:!0});var yMt=s(E9);zro=t(yMt,"DebertaForMaskedLM"),yMt.forEach(r),Xro=t(_1e," (DeBERTa model)"),_1e.forEach(r),Wro=i(j),Yu=n(j,"LI",{});var v1e=s(Yu);kH=n(v1e,"STRONG",{});var wMt=s(kH);Vro=t(wMt,"deberta-v2"),wMt.forEach(r),Qro=t(v1e," \u2014 "),C9=n(v1e,"A",{href:!0});var AMt=s(C9);Hro=t(AMt,"DebertaV2ForMaskedLM"),AMt.forEach(r),Uro=t(v1e," (DeBERTa-v2 model)"),v1e.forEach(r),Jro=i(j),Zu=n(j,"LI",{});var b1e=s(Zu);RH=n(b1e,"STRONG",{});var xMt=s(RH);Kro=t(xMt,"distilbert"),xMt.forEach(r),Yro=t(b1e," \u2014 "),y9=n(b1e,"A",{href:!0});var LMt=s(y9);Zro=t(LMt,"DistilBertForMaskedLM"),LMt.forEach(r),eao=t(b1e," (DistilBERT model)"),b1e.forEach(r),oao=i(j),ep=n(j,"LI",{});var T1e=s(ep);SH=n(T1e,"STRONG",{});var BMt=s(SH);tao=t(BMt,"electra"),BMt.forEach(r),rao=t(T1e," \u2014 "),w9=n(T1e,"A",{href:!0});var kMt=s(w9);aao=t(kMt,"ElectraForMaskedLM"),kMt.forEach(r),nao=t(T1e," (ELECTRA model)"),T1e.forEach(r),sao=i(j),op=n(j,"LI",{});var F1e=s(op);PH=n(F1e,"STRONG",{});var RMt=s(PH);lao=t(RMt,"flaubert"),RMt.forEach(r),iao=t(F1e," \u2014 "),A9=n(F1e,"A",{href:!0});var SMt=s(A9);dao=t(SMt,"FlaubertWithLMHeadModel"),SMt.forEach(r),mao=t(F1e," (FlauBERT model)"),F1e.forEach(r),fao=i(j),tp=n(j,"LI",{});var M1e=s(tp);$H=n(M1e,"STRONG",{});var PMt=s($H);cao=t(PMt,"fnet"),PMt.forEach(r),gao=t(M1e," \u2014 "),x9=n(M1e,"A",{href:!0});var $Mt=s(x9);hao=t($Mt,"FNetForMaskedLM"),$Mt.forEach(r),uao=t(M1e," (FNet model)"),M1e.forEach(r),pao=i(j),rp=n(j,"LI",{});var E1e=s(rp);IH=n(E1e,"STRONG",{});var IMt=s(IH);_ao=t(IMt,"funnel"),IMt.forEach(r),vao=t(E1e," \u2014 "),L9=n(E1e,"A",{href:!0});var jMt=s(L9);bao=t(jMt,"FunnelForMaskedLM"),jMt.forEach(r),Tao=t(E1e," (Funnel Transformer model)"),E1e.forEach(r),Fao=i(j),ap=n(j,"LI",{});var C1e=s(ap);jH=n(C1e,"STRONG",{});var NMt=s(jH);Mao=t(NMt,"ibert"),NMt.forEach(r),Eao=t(C1e," \u2014 "),B9=n(C1e,"A",{href:!0});var DMt=s(B9);Cao=t(DMt,"IBertForMaskedLM"),DMt.forEach(r),yao=t(C1e," (I-BERT model)"),C1e.forEach(r),wao=i(j),np=n(j,"LI",{});var y1e=s(np);NH=n(y1e,"STRONG",{});var GMt=s(NH);Aao=t(GMt,"layoutlm"),GMt.forEach(r),xao=t(y1e," \u2014 "),k9=n(y1e,"A",{href:!0});var OMt=s(k9);Lao=t(OMt,"LayoutLMForMaskedLM"),OMt.forEach(r),Bao=t(y1e," (LayoutLM model)"),y1e.forEach(r),kao=i(j),sp=n(j,"LI",{});var w1e=s(sp);DH=n(w1e,"STRONG",{});var qMt=s(DH);Rao=t(qMt,"longformer"),qMt.forEach(r),Sao=t(w1e," \u2014 "),R9=n(w1e,"A",{href:!0});var zMt=s(R9);Pao=t(zMt,"LongformerForMaskedLM"),zMt.forEach(r),$ao=t(w1e," (Longformer model)"),w1e.forEach(r),Iao=i(j),lp=n(j,"LI",{});var A1e=s(lp);GH=n(A1e,"STRONG",{});var XMt=s(GH);jao=t(XMt,"mbart"),XMt.forEach(r),Nao=t(A1e," \u2014 "),S9=n(A1e,"A",{href:!0});var WMt=s(S9);Dao=t(WMt,"MBartForConditionalGeneration"),WMt.forEach(r),Gao=t(A1e," (mBART model)"),A1e.forEach(r),Oao=i(j),ip=n(j,"LI",{});var x1e=s(ip);OH=n(x1e,"STRONG",{});var VMt=s(OH);qao=t(VMt,"megatron-bert"),VMt.forEach(r),zao=t(x1e," \u2014 "),P9=n(x1e,"A",{href:!0});var QMt=s(P9);Xao=t(QMt,"MegatronBertForMaskedLM"),QMt.forEach(r),Wao=t(x1e," (MegatronBert model)"),x1e.forEach(r),Vao=i(j),dp=n(j,"LI",{});var L1e=s(dp);qH=n(L1e,"STRONG",{});var HMt=s(qH);Qao=t(HMt,"mobilebert"),HMt.forEach(r),Hao=t(L1e," \u2014 "),$9=n(L1e,"A",{href:!0});var UMt=s($9);Uao=t(UMt,"MobileBertForMaskedLM"),UMt.forEach(r),Jao=t(L1e," (MobileBERT model)"),L1e.forEach(r),Kao=i(j),mp=n(j,"LI",{});var B1e=s(mp);zH=n(B1e,"STRONG",{});var JMt=s(zH);Yao=t(JMt,"mpnet"),JMt.forEach(r),Zao=t(B1e," \u2014 "),I9=n(B1e,"A",{href:!0});var KMt=s(I9);eno=t(KMt,"MPNetForMaskedLM"),KMt.forEach(r),ono=t(B1e," (MPNet model)"),B1e.forEach(r),tno=i(j),fp=n(j,"LI",{});var k1e=s(fp);XH=n(k1e,"STRONG",{});var YMt=s(XH);rno=t(YMt,"perceiver"),YMt.forEach(r),ano=t(k1e," \u2014 "),j9=n(k1e,"A",{href:!0});var ZMt=s(j9);nno=t(ZMt,"PerceiverForMaskedLM"),ZMt.forEach(r),sno=t(k1e," (Perceiver model)"),k1e.forEach(r),lno=i(j),cp=n(j,"LI",{});var R1e=s(cp);WH=n(R1e,"STRONG",{});var eEt=s(WH);ino=t(eEt,"qdqbert"),eEt.forEach(r),dno=t(R1e," \u2014 "),N9=n(R1e,"A",{href:!0});var oEt=s(N9);mno=t(oEt,"QDQBertForMaskedLM"),oEt.forEach(r),fno=t(R1e," (QDQBert model)"),R1e.forEach(r),cno=i(j),gp=n(j,"LI",{});var S1e=s(gp);VH=n(S1e,"STRONG",{});var tEt=s(VH);gno=t(tEt,"reformer"),tEt.forEach(r),hno=t(S1e," \u2014 "),D9=n(S1e,"A",{href:!0});var rEt=s(D9);uno=t(rEt,"ReformerForMaskedLM"),rEt.forEach(r),pno=t(S1e," (Reformer model)"),S1e.forEach(r),_no=i(j),hp=n(j,"LI",{});var P1e=s(hp);QH=n(P1e,"STRONG",{});var aEt=s(QH);vno=t(aEt,"rembert"),aEt.forEach(r),bno=t(P1e," \u2014 "),G9=n(P1e,"A",{href:!0});var nEt=s(G9);Tno=t(nEt,"RemBertForMaskedLM"),nEt.forEach(r),Fno=t(P1e," (RemBERT model)"),P1e.forEach(r),Mno=i(j),up=n(j,"LI",{});var $1e=s(up);HH=n($1e,"STRONG",{});var sEt=s(HH);Eno=t(sEt,"roberta"),sEt.forEach(r),Cno=t($1e," \u2014 "),O9=n($1e,"A",{href:!0});var lEt=s(O9);yno=t(lEt,"RobertaForMaskedLM"),lEt.forEach(r),wno=t($1e," (RoBERTa model)"),$1e.forEach(r),Ano=i(j),pp=n(j,"LI",{});var I1e=s(pp);UH=n(I1e,"STRONG",{});var iEt=s(UH);xno=t(iEt,"roformer"),iEt.forEach(r),Lno=t(I1e," \u2014 "),q9=n(I1e,"A",{href:!0});var dEt=s(q9);Bno=t(dEt,"RoFormerForMaskedLM"),dEt.forEach(r),kno=t(I1e," (RoFormer model)"),I1e.forEach(r),Rno=i(j),_p=n(j,"LI",{});var j1e=s(_p);JH=n(j1e,"STRONG",{});var mEt=s(JH);Sno=t(mEt,"squeezebert"),mEt.forEach(r),Pno=t(j1e," \u2014 "),z9=n(j1e,"A",{href:!0});var fEt=s(z9);$no=t(fEt,"SqueezeBertForMaskedLM"),fEt.forEach(r),Ino=t(j1e," (SqueezeBERT model)"),j1e.forEach(r),jno=i(j),vp=n(j,"LI",{});var N1e=s(vp);KH=n(N1e,"STRONG",{});var cEt=s(KH);Nno=t(cEt,"tapas"),cEt.forEach(r),Dno=t(N1e," \u2014 "),X9=n(N1e,"A",{href:!0});var gEt=s(X9);Gno=t(gEt,"TapasForMaskedLM"),gEt.forEach(r),Ono=t(N1e," (TAPAS model)"),N1e.forEach(r),qno=i(j),bp=n(j,"LI",{});var D1e=s(bp);YH=n(D1e,"STRONG",{});var hEt=s(YH);zno=t(hEt,"wav2vec2"),hEt.forEach(r),Xno=t(D1e," \u2014 "),ZH=n(D1e,"CODE",{});var uEt=s(ZH);Wno=t(uEt,"Wav2Vec2ForMaskedLM"),uEt.forEach(r),Vno=t(D1e," (Wav2Vec2 model)"),D1e.forEach(r),Qno=i(j),Tp=n(j,"LI",{});var G1e=s(Tp);eU=n(G1e,"STRONG",{});var pEt=s(eU);Hno=t(pEt,"xlm"),pEt.forEach(r),Uno=t(G1e," \u2014 "),W9=n(G1e,"A",{href:!0});var _Et=s(W9);Jno=t(_Et,"XLMWithLMHeadModel"),_Et.forEach(r),Kno=t(G1e," (XLM model)"),G1e.forEach(r),Yno=i(j),Fp=n(j,"LI",{});var O1e=s(Fp);oU=n(O1e,"STRONG",{});var vEt=s(oU);Zno=t(vEt,"xlm-roberta"),vEt.forEach(r),eso=t(O1e," \u2014 "),V9=n(O1e,"A",{href:!0});var bEt=s(V9);oso=t(bEt,"XLMRobertaForMaskedLM"),bEt.forEach(r),tso=t(O1e," (XLM-RoBERTa model)"),O1e.forEach(r),j.forEach(r),rso=i(Fr),Mp=n(Fr,"P",{});var q1e=s(Mp);aso=t(q1e,"The model is set in evaluation mode by default using "),tU=n(q1e,"EM",{});var TEt=s(tU);nso=t(TEt,"model.eval()"),TEt.forEach(r),sso=t(q1e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),rU=n(q1e,"EM",{});var FEt=s(rU);lso=t(FEt,"model.train()"),FEt.forEach(r),q1e.forEach(r),iso=i(Fr),aU=n(Fr,"P",{});var MEt=s(aU);dso=t(MEt,"Examples:"),MEt.forEach(r),mso=i(Fr),c(jM.$$.fragment,Fr),Fr.forEach(r),vs.forEach(r),GEe=i(d),vi=n(d,"H2",{class:!0});var j3e=s(vi);Ep=n(j3e,"A",{id:!0,class:!0,href:!0});var EEt=s(Ep);nU=n(EEt,"SPAN",{});var CEt=s(nU);c(NM.$$.fragment,CEt),CEt.forEach(r),EEt.forEach(r),fso=i(j3e),sU=n(j3e,"SPAN",{});var yEt=s(sU);cso=t(yEt,"AutoModelForSeq2SeqLM"),yEt.forEach(r),j3e.forEach(r),OEe=i(d),No=n(d,"DIV",{class:!0});var Ts=s(No);c(DM.$$.fragment,Ts),gso=i(Ts),bi=n(Ts,"P",{});var yD=s(bi);hso=t(yD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence language modeling head) when created with the `),lU=n(yD,"CODE",{});var wEt=s(lU);uso=t(wEt,"from_pretrained()"),wEt.forEach(r),pso=t(yD,` class method or the `),iU=n(yD,"CODE",{});var AEt=s(iU);_so=t(AEt,"from_config()"),AEt.forEach(r),vso=t(yD," class method."),yD.forEach(r),bso=i(Ts),GM=n(Ts,"P",{});var N3e=s(GM);Tso=t(N3e,"This class cannot be instantiated directly using "),dU=n(N3e,"CODE",{});var xEt=s(dU);Fso=t(xEt,"__init__()"),xEt.forEach(r),Mso=t(N3e," (throws an error)."),N3e.forEach(r),Eso=i(Ts),Bt=n(Ts,"DIV",{class:!0});var Fs=s(Bt);c(OM.$$.fragment,Fs),Cso=i(Fs),mU=n(Fs,"P",{});var LEt=s(mU);yso=t(LEt,"Instantiates one of the model classes of the library (with a sequence-to-sequence language modeling head) from a configuration."),LEt.forEach(r),wso=i(Fs),Ti=n(Fs,"P",{});var wD=s(Ti);Aso=t(wD,`Note: Loading a model from its configuration file does `),fU=n(wD,"STRONG",{});var BEt=s(fU);xso=t(BEt,"not"),BEt.forEach(r),Lso=t(wD,` load the model weights. It only affects the model\u2019s configuration. Use [`),cU=n(wD,"EM",{});var kEt=s(cU);Bso=t(kEt,"~AutoModelForSeq2SeqLM.from_pretrained"),kEt.forEach(r),kso=t(wD,`] to load the model weights.`),wD.forEach(r),Rso=i(Fs),gU=n(Fs,"P",{});var REt=s(gU);Sso=t(REt,"Examples:"),REt.forEach(r),Pso=i(Fs),c(qM.$$.fragment,Fs),Fs.forEach(r),$so=i(Ts),Re=n(Ts,"DIV",{class:!0});var Mr=s(Re);c(zM.$$.fragment,Mr),Iso=i(Mr),hU=n(Mr,"P",{});var SEt=s(hU);jso=t(SEt,"Instantiate one of the model classes of the library (with a sequence-to-sequence language modeling head) from a pretrained model."),SEt.forEach(r),Nso=i(Mr),Aa=n(Mr,"P",{});var QT=s(Aa);Dso=t(QT,"The model class to instantiate is selected based on the "),uU=n(QT,"EM",{});var PEt=s(uU);Gso=t(PEt,"model_type"),PEt.forEach(r),Oso=t(QT,` property of the config object (either passed as an argument or loaded from `),pU=n(QT,"EM",{});var $Et=s(pU);qso=t($Et,"pretrained_model_name_or_path"),$Et.forEach(r),zso=t(QT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),_U=n(QT,"EM",{});var IEt=s(_U);Xso=t(IEt,"pretrained_model_name_or_path"),IEt.forEach(r),Wso=t(QT,":"),QT.forEach(r),Vso=i(Mr),ne=n(Mr,"UL",{});var le=s(ne);Cp=n(le,"LI",{});var z1e=s(Cp);vU=n(z1e,"STRONG",{});var jEt=s(vU);Qso=t(jEt,"bart"),jEt.forEach(r),Hso=t(z1e," \u2014 "),Q9=n(z1e,"A",{href:!0});var NEt=s(Q9);Uso=t(NEt,"BartForConditionalGeneration"),NEt.forEach(r),Jso=t(z1e," (BART model)"),z1e.forEach(r),Kso=i(le),yp=n(le,"LI",{});var X1e=s(yp);bU=n(X1e,"STRONG",{});var DEt=s(bU);Yso=t(DEt,"bigbird_pegasus"),DEt.forEach(r),Zso=t(X1e," \u2014 "),H9=n(X1e,"A",{href:!0});var GEt=s(H9);elo=t(GEt,"BigBirdPegasusForConditionalGeneration"),GEt.forEach(r),olo=t(X1e," (BigBirdPegasus model)"),X1e.forEach(r),tlo=i(le),wp=n(le,"LI",{});var W1e=s(wp);TU=n(W1e,"STRONG",{});var OEt=s(TU);rlo=t(OEt,"blenderbot"),OEt.forEach(r),alo=t(W1e," \u2014 "),U9=n(W1e,"A",{href:!0});var qEt=s(U9);nlo=t(qEt,"BlenderbotForConditionalGeneration"),qEt.forEach(r),slo=t(W1e," (Blenderbot model)"),W1e.forEach(r),llo=i(le),Ap=n(le,"LI",{});var V1e=s(Ap);FU=n(V1e,"STRONG",{});var zEt=s(FU);ilo=t(zEt,"blenderbot-small"),zEt.forEach(r),dlo=t(V1e," \u2014 "),J9=n(V1e,"A",{href:!0});var XEt=s(J9);mlo=t(XEt,"BlenderbotSmallForConditionalGeneration"),XEt.forEach(r),flo=t(V1e," (BlenderbotSmall model)"),V1e.forEach(r),clo=i(le),xp=n(le,"LI",{});var Q1e=s(xp);MU=n(Q1e,"STRONG",{});var WEt=s(MU);glo=t(WEt,"encoder-decoder"),WEt.forEach(r),hlo=t(Q1e," \u2014 "),K9=n(Q1e,"A",{href:!0});var VEt=s(K9);ulo=t(VEt,"EncoderDecoderModel"),VEt.forEach(r),plo=t(Q1e," (Encoder decoder model)"),Q1e.forEach(r),_lo=i(le),Lp=n(le,"LI",{});var H1e=s(Lp);EU=n(H1e,"STRONG",{});var QEt=s(EU);vlo=t(QEt,"fsmt"),QEt.forEach(r),blo=t(H1e," \u2014 "),Y9=n(H1e,"A",{href:!0});var HEt=s(Y9);Tlo=t(HEt,"FSMTForConditionalGeneration"),HEt.forEach(r),Flo=t(H1e," (FairSeq Machine-Translation model)"),H1e.forEach(r),Mlo=i(le),Bp=n(le,"LI",{});var U1e=s(Bp);CU=n(U1e,"STRONG",{});var UEt=s(CU);Elo=t(UEt,"led"),UEt.forEach(r),Clo=t(U1e," \u2014 "),Z9=n(U1e,"A",{href:!0});var JEt=s(Z9);ylo=t(JEt,"LEDForConditionalGeneration"),JEt.forEach(r),wlo=t(U1e," (LED model)"),U1e.forEach(r),Alo=i(le),kp=n(le,"LI",{});var J1e=s(kp);yU=n(J1e,"STRONG",{});var KEt=s(yU);xlo=t(KEt,"m2m_100"),KEt.forEach(r),Llo=t(J1e," \u2014 "),ek=n(J1e,"A",{href:!0});var YEt=s(ek);Blo=t(YEt,"M2M100ForConditionalGeneration"),YEt.forEach(r),klo=t(J1e," (M2M100 model)"),J1e.forEach(r),Rlo=i(le),Rp=n(le,"LI",{});var K1e=s(Rp);wU=n(K1e,"STRONG",{});var ZEt=s(wU);Slo=t(ZEt,"marian"),ZEt.forEach(r),Plo=t(K1e," \u2014 "),ok=n(K1e,"A",{href:!0});var eCt=s(ok);$lo=t(eCt,"MarianMTModel"),eCt.forEach(r),Ilo=t(K1e," (Marian model)"),K1e.forEach(r),jlo=i(le),Sp=n(le,"LI",{});var Y1e=s(Sp);AU=n(Y1e,"STRONG",{});var oCt=s(AU);Nlo=t(oCt,"mbart"),oCt.forEach(r),Dlo=t(Y1e," \u2014 "),tk=n(Y1e,"A",{href:!0});var tCt=s(tk);Glo=t(tCt,"MBartForConditionalGeneration"),tCt.forEach(r),Olo=t(Y1e," (mBART model)"),Y1e.forEach(r),qlo=i(le),Pp=n(le,"LI",{});var Z1e=s(Pp);xU=n(Z1e,"STRONG",{});var rCt=s(xU);zlo=t(rCt,"mt5"),rCt.forEach(r),Xlo=t(Z1e," \u2014 "),rk=n(Z1e,"A",{href:!0});var aCt=s(rk);Wlo=t(aCt,"MT5ForConditionalGeneration"),aCt.forEach(r),Vlo=t(Z1e," (mT5 model)"),Z1e.forEach(r),Qlo=i(le),$p=n(le,"LI",{});var e2e=s($p);LU=n(e2e,"STRONG",{});var nCt=s(LU);Hlo=t(nCt,"pegasus"),nCt.forEach(r),Ulo=t(e2e," \u2014 "),ak=n(e2e,"A",{href:!0});var sCt=s(ak);Jlo=t(sCt,"PegasusForConditionalGeneration"),sCt.forEach(r),Klo=t(e2e," (Pegasus model)"),e2e.forEach(r),Ylo=i(le),Ip=n(le,"LI",{});var o2e=s(Ip);BU=n(o2e,"STRONG",{});var lCt=s(BU);Zlo=t(lCt,"prophetnet"),lCt.forEach(r),eio=t(o2e," \u2014 "),nk=n(o2e,"A",{href:!0});var iCt=s(nk);oio=t(iCt,"ProphetNetForConditionalGeneration"),iCt.forEach(r),tio=t(o2e," (ProphetNet model)"),o2e.forEach(r),rio=i(le),jp=n(le,"LI",{});var t2e=s(jp);kU=n(t2e,"STRONG",{});var dCt=s(kU);aio=t(dCt,"t5"),dCt.forEach(r),nio=t(t2e," \u2014 "),sk=n(t2e,"A",{href:!0});var mCt=s(sk);sio=t(mCt,"T5ForConditionalGeneration"),mCt.forEach(r),lio=t(t2e," (T5 model)"),t2e.forEach(r),iio=i(le),Np=n(le,"LI",{});var r2e=s(Np);RU=n(r2e,"STRONG",{});var fCt=s(RU);dio=t(fCt,"xlm-prophetnet"),fCt.forEach(r),mio=t(r2e," \u2014 "),lk=n(r2e,"A",{href:!0});var cCt=s(lk);fio=t(cCt,"XLMProphetNetForConditionalGeneration"),cCt.forEach(r),cio=t(r2e," (XLMProphetNet model)"),r2e.forEach(r),le.forEach(r),gio=i(Mr),Dp=n(Mr,"P",{});var a2e=s(Dp);hio=t(a2e,"The model is set in evaluation mode by default using "),SU=n(a2e,"EM",{});var gCt=s(SU);uio=t(gCt,"model.eval()"),gCt.forEach(r),pio=t(a2e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),PU=n(a2e,"EM",{});var hCt=s(PU);_io=t(hCt,"model.train()"),hCt.forEach(r),a2e.forEach(r),vio=i(Mr),$U=n(Mr,"P",{});var uCt=s($U);bio=t(uCt,"Examples:"),uCt.forEach(r),Tio=i(Mr),c(XM.$$.fragment,Mr),Mr.forEach(r),Ts.forEach(r),qEe=i(d),Fi=n(d,"H2",{class:!0});var D3e=s(Fi);Gp=n(D3e,"A",{id:!0,class:!0,href:!0});var pCt=s(Gp);IU=n(pCt,"SPAN",{});var _Ct=s(IU);c(WM.$$.fragment,_Ct),_Ct.forEach(r),pCt.forEach(r),Fio=i(D3e),jU=n(D3e,"SPAN",{});var vCt=s(jU);Mio=t(vCt,"AutoModelForSequenceClassification"),vCt.forEach(r),D3e.forEach(r),zEe=i(d),Do=n(d,"DIV",{class:!0});var Ms=s(Do);c(VM.$$.fragment,Ms),Eio=i(Ms),Mi=n(Ms,"P",{});var AD=s(Mi);Cio=t(AD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence classification head) when created with the `),NU=n(AD,"CODE",{});var bCt=s(NU);yio=t(bCt,"from_pretrained()"),bCt.forEach(r),wio=t(AD,` class method or the `),DU=n(AD,"CODE",{});var TCt=s(DU);Aio=t(TCt,"from_config()"),TCt.forEach(r),xio=t(AD," class method."),AD.forEach(r),Lio=i(Ms),QM=n(Ms,"P",{});var G3e=s(QM);Bio=t(G3e,"This class cannot be instantiated directly using "),GU=n(G3e,"CODE",{});var FCt=s(GU);kio=t(FCt,"__init__()"),FCt.forEach(r),Rio=t(G3e," (throws an error)."),G3e.forEach(r),Sio=i(Ms),kt=n(Ms,"DIV",{class:!0});var Es=s(kt);c(HM.$$.fragment,Es),Pio=i(Es),OU=n(Es,"P",{});var MCt=s(OU);$io=t(MCt,"Instantiates one of the model classes of the library (with a sequence classification head) from a configuration."),MCt.forEach(r),Iio=i(Es),Ei=n(Es,"P",{});var xD=s(Ei);jio=t(xD,`Note: Loading a model from its configuration file does `),qU=n(xD,"STRONG",{});var ECt=s(qU);Nio=t(ECt,"not"),ECt.forEach(r),Dio=t(xD,` load the model weights. It only affects the model\u2019s configuration. Use [`),zU=n(xD,"EM",{});var CCt=s(zU);Gio=t(CCt,"~AutoModelForSequenceClassification.from_pretrained"),CCt.forEach(r),Oio=t(xD,`] to load the model weights.`),xD.forEach(r),qio=i(Es),XU=n(Es,"P",{});var yCt=s(XU);zio=t(yCt,"Examples:"),yCt.forEach(r),Xio=i(Es),c(UM.$$.fragment,Es),Es.forEach(r),Wio=i(Ms),Se=n(Ms,"DIV",{class:!0});var Er=s(Se);c(JM.$$.fragment,Er),Vio=i(Er),WU=n(Er,"P",{});var wCt=s(WU);Qio=t(wCt,"Instantiate one of the model classes of the library (with a sequence classification head) from a pretrained model."),wCt.forEach(r),Hio=i(Er),xa=n(Er,"P",{});var HT=s(xa);Uio=t(HT,"The model class to instantiate is selected based on the "),VU=n(HT,"EM",{});var ACt=s(VU);Jio=t(ACt,"model_type"),ACt.forEach(r),Kio=t(HT,` property of the config object (either passed as an argument or loaded from `),QU=n(HT,"EM",{});var xCt=s(QU);Yio=t(xCt,"pretrained_model_name_or_path"),xCt.forEach(r),Zio=t(HT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),HU=n(HT,"EM",{});var LCt=s(HU);edo=t(LCt,"pretrained_model_name_or_path"),LCt.forEach(r),odo=t(HT,":"),HT.forEach(r),tdo=i(Er),A=n(Er,"UL",{});var x=s(A);Op=n(x,"LI",{});var n2e=s(Op);UU=n(n2e,"STRONG",{});var BCt=s(UU);rdo=t(BCt,"albert"),BCt.forEach(r),ado=t(n2e," \u2014 "),ik=n(n2e,"A",{href:!0});var kCt=s(ik);ndo=t(kCt,"AlbertForSequenceClassification"),kCt.forEach(r),sdo=t(n2e," (ALBERT model)"),n2e.forEach(r),ldo=i(x),qp=n(x,"LI",{});var s2e=s(qp);JU=n(s2e,"STRONG",{});var RCt=s(JU);ido=t(RCt,"bart"),RCt.forEach(r),ddo=t(s2e," \u2014 "),dk=n(s2e,"A",{href:!0});var SCt=s(dk);mdo=t(SCt,"BartForSequenceClassification"),SCt.forEach(r),fdo=t(s2e," (BART model)"),s2e.forEach(r),cdo=i(x),zp=n(x,"LI",{});var l2e=s(zp);KU=n(l2e,"STRONG",{});var PCt=s(KU);gdo=t(PCt,"bert"),PCt.forEach(r),hdo=t(l2e," \u2014 "),mk=n(l2e,"A",{href:!0});var $Ct=s(mk);udo=t($Ct,"BertForSequenceClassification"),$Ct.forEach(r),pdo=t(l2e," (BERT model)"),l2e.forEach(r),_do=i(x),Xp=n(x,"LI",{});var i2e=s(Xp);YU=n(i2e,"STRONG",{});var ICt=s(YU);vdo=t(ICt,"big_bird"),ICt.forEach(r),bdo=t(i2e," \u2014 "),fk=n(i2e,"A",{href:!0});var jCt=s(fk);Tdo=t(jCt,"BigBirdForSequenceClassification"),jCt.forEach(r),Fdo=t(i2e," (BigBird model)"),i2e.forEach(r),Mdo=i(x),Wp=n(x,"LI",{});var d2e=s(Wp);ZU=n(d2e,"STRONG",{});var NCt=s(ZU);Edo=t(NCt,"bigbird_pegasus"),NCt.forEach(r),Cdo=t(d2e," \u2014 "),ck=n(d2e,"A",{href:!0});var DCt=s(ck);ydo=t(DCt,"BigBirdPegasusForSequenceClassification"),DCt.forEach(r),wdo=t(d2e," (BigBirdPegasus model)"),d2e.forEach(r),Ado=i(x),Vp=n(x,"LI",{});var m2e=s(Vp);eJ=n(m2e,"STRONG",{});var GCt=s(eJ);xdo=t(GCt,"camembert"),GCt.forEach(r),Ldo=t(m2e," \u2014 "),gk=n(m2e,"A",{href:!0});var OCt=s(gk);Bdo=t(OCt,"CamembertForSequenceClassification"),OCt.forEach(r),kdo=t(m2e," (CamemBERT model)"),m2e.forEach(r),Rdo=i(x),Qp=n(x,"LI",{});var f2e=s(Qp);oJ=n(f2e,"STRONG",{});var qCt=s(oJ);Sdo=t(qCt,"canine"),qCt.forEach(r),Pdo=t(f2e," \u2014 "),hk=n(f2e,"A",{href:!0});var zCt=s(hk);$do=t(zCt,"CanineForSequenceClassification"),zCt.forEach(r),Ido=t(f2e," (Canine model)"),f2e.forEach(r),jdo=i(x),Hp=n(x,"LI",{});var c2e=s(Hp);tJ=n(c2e,"STRONG",{});var XCt=s(tJ);Ndo=t(XCt,"convbert"),XCt.forEach(r),Ddo=t(c2e," \u2014 "),uk=n(c2e,"A",{href:!0});var WCt=s(uk);Gdo=t(WCt,"ConvBertForSequenceClassification"),WCt.forEach(r),Odo=t(c2e," (ConvBERT model)"),c2e.forEach(r),qdo=i(x),Up=n(x,"LI",{});var g2e=s(Up);rJ=n(g2e,"STRONG",{});var VCt=s(rJ);zdo=t(VCt,"ctrl"),VCt.forEach(r),Xdo=t(g2e," \u2014 "),pk=n(g2e,"A",{href:!0});var QCt=s(pk);Wdo=t(QCt,"CTRLForSequenceClassification"),QCt.forEach(r),Vdo=t(g2e," (CTRL model)"),g2e.forEach(r),Qdo=i(x),Jp=n(x,"LI",{});var h2e=s(Jp);aJ=n(h2e,"STRONG",{});var HCt=s(aJ);Hdo=t(HCt,"deberta"),HCt.forEach(r),Udo=t(h2e," \u2014 "),_k=n(h2e,"A",{href:!0});var UCt=s(_k);Jdo=t(UCt,"DebertaForSequenceClassification"),UCt.forEach(r),Kdo=t(h2e," (DeBERTa model)"),h2e.forEach(r),Ydo=i(x),Kp=n(x,"LI",{});var u2e=s(Kp);nJ=n(u2e,"STRONG",{});var JCt=s(nJ);Zdo=t(JCt,"deberta-v2"),JCt.forEach(r),emo=t(u2e," \u2014 "),vk=n(u2e,"A",{href:!0});var KCt=s(vk);omo=t(KCt,"DebertaV2ForSequenceClassification"),KCt.forEach(r),tmo=t(u2e," (DeBERTa-v2 model)"),u2e.forEach(r),rmo=i(x),Yp=n(x,"LI",{});var p2e=s(Yp);sJ=n(p2e,"STRONG",{});var YCt=s(sJ);amo=t(YCt,"distilbert"),YCt.forEach(r),nmo=t(p2e," \u2014 "),bk=n(p2e,"A",{href:!0});var ZCt=s(bk);smo=t(ZCt,"DistilBertForSequenceClassification"),ZCt.forEach(r),lmo=t(p2e," (DistilBERT model)"),p2e.forEach(r),imo=i(x),Zp=n(x,"LI",{});var _2e=s(Zp);lJ=n(_2e,"STRONG",{});var e3t=s(lJ);dmo=t(e3t,"electra"),e3t.forEach(r),mmo=t(_2e," \u2014 "),Tk=n(_2e,"A",{href:!0});var o3t=s(Tk);fmo=t(o3t,"ElectraForSequenceClassification"),o3t.forEach(r),cmo=t(_2e," (ELECTRA model)"),_2e.forEach(r),gmo=i(x),e_=n(x,"LI",{});var v2e=s(e_);iJ=n(v2e,"STRONG",{});var t3t=s(iJ);hmo=t(t3t,"flaubert"),t3t.forEach(r),umo=t(v2e," \u2014 "),Fk=n(v2e,"A",{href:!0});var r3t=s(Fk);pmo=t(r3t,"FlaubertForSequenceClassification"),r3t.forEach(r),_mo=t(v2e," (FlauBERT model)"),v2e.forEach(r),vmo=i(x),o_=n(x,"LI",{});var b2e=s(o_);dJ=n(b2e,"STRONG",{});var a3t=s(dJ);bmo=t(a3t,"fnet"),a3t.forEach(r),Tmo=t(b2e," \u2014 "),Mk=n(b2e,"A",{href:!0});var n3t=s(Mk);Fmo=t(n3t,"FNetForSequenceClassification"),n3t.forEach(r),Mmo=t(b2e," (FNet model)"),b2e.forEach(r),Emo=i(x),t_=n(x,"LI",{});var T2e=s(t_);mJ=n(T2e,"STRONG",{});var s3t=s(mJ);Cmo=t(s3t,"funnel"),s3t.forEach(r),ymo=t(T2e," \u2014 "),Ek=n(T2e,"A",{href:!0});var l3t=s(Ek);wmo=t(l3t,"FunnelForSequenceClassification"),l3t.forEach(r),Amo=t(T2e," (Funnel Transformer model)"),T2e.forEach(r),xmo=i(x),r_=n(x,"LI",{});var F2e=s(r_);fJ=n(F2e,"STRONG",{});var i3t=s(fJ);Lmo=t(i3t,"gpt2"),i3t.forEach(r),Bmo=t(F2e," \u2014 "),Ck=n(F2e,"A",{href:!0});var d3t=s(Ck);kmo=t(d3t,"GPT2ForSequenceClassification"),d3t.forEach(r),Rmo=t(F2e," (OpenAI GPT-2 model)"),F2e.forEach(r),Smo=i(x),a_=n(x,"LI",{});var M2e=s(a_);cJ=n(M2e,"STRONG",{});var m3t=s(cJ);Pmo=t(m3t,"gpt_neo"),m3t.forEach(r),$mo=t(M2e," \u2014 "),yk=n(M2e,"A",{href:!0});var f3t=s(yk);Imo=t(f3t,"GPTNeoForSequenceClassification"),f3t.forEach(r),jmo=t(M2e," (GPT Neo model)"),M2e.forEach(r),Nmo=i(x),n_=n(x,"LI",{});var E2e=s(n_);gJ=n(E2e,"STRONG",{});var c3t=s(gJ);Dmo=t(c3t,"gptj"),c3t.forEach(r),Gmo=t(E2e," \u2014 "),wk=n(E2e,"A",{href:!0});var g3t=s(wk);Omo=t(g3t,"GPTJForSequenceClassification"),g3t.forEach(r),qmo=t(E2e," (GPT-J model)"),E2e.forEach(r),zmo=i(x),s_=n(x,"LI",{});var C2e=s(s_);hJ=n(C2e,"STRONG",{});var h3t=s(hJ);Xmo=t(h3t,"ibert"),h3t.forEach(r),Wmo=t(C2e," \u2014 "),Ak=n(C2e,"A",{href:!0});var u3t=s(Ak);Vmo=t(u3t,"IBertForSequenceClassification"),u3t.forEach(r),Qmo=t(C2e," (I-BERT model)"),C2e.forEach(r),Hmo=i(x),l_=n(x,"LI",{});var y2e=s(l_);uJ=n(y2e,"STRONG",{});var p3t=s(uJ);Umo=t(p3t,"layoutlm"),p3t.forEach(r),Jmo=t(y2e," \u2014 "),xk=n(y2e,"A",{href:!0});var _3t=s(xk);Kmo=t(_3t,"LayoutLMForSequenceClassification"),_3t.forEach(r),Ymo=t(y2e," (LayoutLM model)"),y2e.forEach(r),Zmo=i(x),i_=n(x,"LI",{});var w2e=s(i_);pJ=n(w2e,"STRONG",{});var v3t=s(pJ);efo=t(v3t,"layoutlmv2"),v3t.forEach(r),ofo=t(w2e," \u2014 "),Lk=n(w2e,"A",{href:!0});var b3t=s(Lk);tfo=t(b3t,"LayoutLMv2ForSequenceClassification"),b3t.forEach(r),rfo=t(w2e," (LayoutLMv2 model)"),w2e.forEach(r),afo=i(x),d_=n(x,"LI",{});var A2e=s(d_);_J=n(A2e,"STRONG",{});var T3t=s(_J);nfo=t(T3t,"led"),T3t.forEach(r),sfo=t(A2e," \u2014 "),Bk=n(A2e,"A",{href:!0});var F3t=s(Bk);lfo=t(F3t,"LEDForSequenceClassification"),F3t.forEach(r),ifo=t(A2e," (LED model)"),A2e.forEach(r),dfo=i(x),m_=n(x,"LI",{});var x2e=s(m_);vJ=n(x2e,"STRONG",{});var M3t=s(vJ);mfo=t(M3t,"longformer"),M3t.forEach(r),ffo=t(x2e," \u2014 "),kk=n(x2e,"A",{href:!0});var E3t=s(kk);cfo=t(E3t,"LongformerForSequenceClassification"),E3t.forEach(r),gfo=t(x2e," (Longformer model)"),x2e.forEach(r),hfo=i(x),f_=n(x,"LI",{});var L2e=s(f_);bJ=n(L2e,"STRONG",{});var C3t=s(bJ);ufo=t(C3t,"mbart"),C3t.forEach(r),pfo=t(L2e," \u2014 "),Rk=n(L2e,"A",{href:!0});var y3t=s(Rk);_fo=t(y3t,"MBartForSequenceClassification"),y3t.forEach(r),vfo=t(L2e," (mBART model)"),L2e.forEach(r),bfo=i(x),c_=n(x,"LI",{});var B2e=s(c_);TJ=n(B2e,"STRONG",{});var w3t=s(TJ);Tfo=t(w3t,"megatron-bert"),w3t.forEach(r),Ffo=t(B2e," \u2014 "),Sk=n(B2e,"A",{href:!0});var A3t=s(Sk);Mfo=t(A3t,"MegatronBertForSequenceClassification"),A3t.forEach(r),Efo=t(B2e," (MegatronBert model)"),B2e.forEach(r),Cfo=i(x),g_=n(x,"LI",{});var k2e=s(g_);FJ=n(k2e,"STRONG",{});var x3t=s(FJ);yfo=t(x3t,"mobilebert"),x3t.forEach(r),wfo=t(k2e," \u2014 "),Pk=n(k2e,"A",{href:!0});var L3t=s(Pk);Afo=t(L3t,"MobileBertForSequenceClassification"),L3t.forEach(r),xfo=t(k2e," (MobileBERT model)"),k2e.forEach(r),Lfo=i(x),h_=n(x,"LI",{});var R2e=s(h_);MJ=n(R2e,"STRONG",{});var B3t=s(MJ);Bfo=t(B3t,"mpnet"),B3t.forEach(r),kfo=t(R2e," \u2014 "),$k=n(R2e,"A",{href:!0});var k3t=s($k);Rfo=t(k3t,"MPNetForSequenceClassification"),k3t.forEach(r),Sfo=t(R2e," (MPNet model)"),R2e.forEach(r),Pfo=i(x),u_=n(x,"LI",{});var S2e=s(u_);EJ=n(S2e,"STRONG",{});var R3t=s(EJ);$fo=t(R3t,"openai-gpt"),R3t.forEach(r),Ifo=t(S2e," \u2014 "),Ik=n(S2e,"A",{href:!0});var S3t=s(Ik);jfo=t(S3t,"OpenAIGPTForSequenceClassification"),S3t.forEach(r),Nfo=t(S2e," (OpenAI GPT model)"),S2e.forEach(r),Dfo=i(x),p_=n(x,"LI",{});var P2e=s(p_);CJ=n(P2e,"STRONG",{});var P3t=s(CJ);Gfo=t(P3t,"perceiver"),P3t.forEach(r),Ofo=t(P2e," \u2014 "),jk=n(P2e,"A",{href:!0});var $3t=s(jk);qfo=t($3t,"PerceiverForSequenceClassification"),$3t.forEach(r),zfo=t(P2e," (Perceiver model)"),P2e.forEach(r),Xfo=i(x),__=n(x,"LI",{});var $2e=s(__);yJ=n($2e,"STRONG",{});var I3t=s(yJ);Wfo=t(I3t,"qdqbert"),I3t.forEach(r),Vfo=t($2e," \u2014 "),Nk=n($2e,"A",{href:!0});var j3t=s(Nk);Qfo=t(j3t,"QDQBertForSequenceClassification"),j3t.forEach(r),Hfo=t($2e," (QDQBert model)"),$2e.forEach(r),Ufo=i(x),v_=n(x,"LI",{});var I2e=s(v_);wJ=n(I2e,"STRONG",{});var N3t=s(wJ);Jfo=t(N3t,"reformer"),N3t.forEach(r),Kfo=t(I2e," \u2014 "),Dk=n(I2e,"A",{href:!0});var D3t=s(Dk);Yfo=t(D3t,"ReformerForSequenceClassification"),D3t.forEach(r),Zfo=t(I2e," (Reformer model)"),I2e.forEach(r),eco=i(x),b_=n(x,"LI",{});var j2e=s(b_);AJ=n(j2e,"STRONG",{});var G3t=s(AJ);oco=t(G3t,"rembert"),G3t.forEach(r),tco=t(j2e," \u2014 "),Gk=n(j2e,"A",{href:!0});var O3t=s(Gk);rco=t(O3t,"RemBertForSequenceClassification"),O3t.forEach(r),aco=t(j2e," (RemBERT model)"),j2e.forEach(r),nco=i(x),T_=n(x,"LI",{});var N2e=s(T_);xJ=n(N2e,"STRONG",{});var q3t=s(xJ);sco=t(q3t,"roberta"),q3t.forEach(r),lco=t(N2e," \u2014 "),Ok=n(N2e,"A",{href:!0});var z3t=s(Ok);ico=t(z3t,"RobertaForSequenceClassification"),z3t.forEach(r),dco=t(N2e," (RoBERTa model)"),N2e.forEach(r),mco=i(x),F_=n(x,"LI",{});var D2e=s(F_);LJ=n(D2e,"STRONG",{});var X3t=s(LJ);fco=t(X3t,"roformer"),X3t.forEach(r),cco=t(D2e," \u2014 "),qk=n(D2e,"A",{href:!0});var W3t=s(qk);gco=t(W3t,"RoFormerForSequenceClassification"),W3t.forEach(r),hco=t(D2e," (RoFormer model)"),D2e.forEach(r),uco=i(x),M_=n(x,"LI",{});var G2e=s(M_);BJ=n(G2e,"STRONG",{});var V3t=s(BJ);pco=t(V3t,"squeezebert"),V3t.forEach(r),_co=t(G2e," \u2014 "),zk=n(G2e,"A",{href:!0});var Q3t=s(zk);vco=t(Q3t,"SqueezeBertForSequenceClassification"),Q3t.forEach(r),bco=t(G2e," (SqueezeBERT model)"),G2e.forEach(r),Tco=i(x),E_=n(x,"LI",{});var O2e=s(E_);kJ=n(O2e,"STRONG",{});var H3t=s(kJ);Fco=t(H3t,"tapas"),H3t.forEach(r),Mco=t(O2e," \u2014 "),Xk=n(O2e,"A",{href:!0});var U3t=s(Xk);Eco=t(U3t,"TapasForSequenceClassification"),U3t.forEach(r),Cco=t(O2e," (TAPAS model)"),O2e.forEach(r),yco=i(x),C_=n(x,"LI",{});var q2e=s(C_);RJ=n(q2e,"STRONG",{});var J3t=s(RJ);wco=t(J3t,"transfo-xl"),J3t.forEach(r),Aco=t(q2e," \u2014 "),Wk=n(q2e,"A",{href:!0});var K3t=s(Wk);xco=t(K3t,"TransfoXLForSequenceClassification"),K3t.forEach(r),Lco=t(q2e," (Transformer-XL model)"),q2e.forEach(r),Bco=i(x),y_=n(x,"LI",{});var z2e=s(y_);SJ=n(z2e,"STRONG",{});var Y3t=s(SJ);kco=t(Y3t,"xlm"),Y3t.forEach(r),Rco=t(z2e," \u2014 "),Vk=n(z2e,"A",{href:!0});var Z3t=s(Vk);Sco=t(Z3t,"XLMForSequenceClassification"),Z3t.forEach(r),Pco=t(z2e," (XLM model)"),z2e.forEach(r),$co=i(x),w_=n(x,"LI",{});var X2e=s(w_);PJ=n(X2e,"STRONG",{});var eyt=s(PJ);Ico=t(eyt,"xlm-roberta"),eyt.forEach(r),jco=t(X2e," \u2014 "),Qk=n(X2e,"A",{href:!0});var oyt=s(Qk);Nco=t(oyt,"XLMRobertaForSequenceClassification"),oyt.forEach(r),Dco=t(X2e," (XLM-RoBERTa model)"),X2e.forEach(r),Gco=i(x),A_=n(x,"LI",{});var W2e=s(A_);$J=n(W2e,"STRONG",{});var tyt=s($J);Oco=t(tyt,"xlnet"),tyt.forEach(r),qco=t(W2e," \u2014 "),Hk=n(W2e,"A",{href:!0});var ryt=s(Hk);zco=t(ryt,"XLNetForSequenceClassification"),ryt.forEach(r),Xco=t(W2e," (XLNet model)"),W2e.forEach(r),x.forEach(r),Wco=i(Er),x_=n(Er,"P",{});var V2e=s(x_);Vco=t(V2e,"The model is set in evaluation mode by default using "),IJ=n(V2e,"EM",{});var ayt=s(IJ);Qco=t(ayt,"model.eval()"),ayt.forEach(r),Hco=t(V2e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),jJ=n(V2e,"EM",{});var nyt=s(jJ);Uco=t(nyt,"model.train()"),nyt.forEach(r),V2e.forEach(r),Jco=i(Er),NJ=n(Er,"P",{});var syt=s(NJ);Kco=t(syt,"Examples:"),syt.forEach(r),Yco=i(Er),c(KM.$$.fragment,Er),Er.forEach(r),Ms.forEach(r),XEe=i(d),Ci=n(d,"H2",{class:!0});var O3e=s(Ci);L_=n(O3e,"A",{id:!0,class:!0,href:!0});var lyt=s(L_);DJ=n(lyt,"SPAN",{});var iyt=s(DJ);c(YM.$$.fragment,iyt),iyt.forEach(r),lyt.forEach(r),Zco=i(O3e),GJ=n(O3e,"SPAN",{});var dyt=s(GJ);ego=t(dyt,"AutoModelForMultipleChoice"),dyt.forEach(r),O3e.forEach(r),WEe=i(d),Go=n(d,"DIV",{class:!0});var Cs=s(Go);c(ZM.$$.fragment,Cs),ogo=i(Cs),yi=n(Cs,"P",{});var LD=s(yi);tgo=t(LD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a multiple choice head) when created with the `),OJ=n(LD,"CODE",{});var myt=s(OJ);rgo=t(myt,"from_pretrained()"),myt.forEach(r),ago=t(LD,` class method or the `),qJ=n(LD,"CODE",{});var fyt=s(qJ);ngo=t(fyt,"from_config()"),fyt.forEach(r),sgo=t(LD," class method."),LD.forEach(r),lgo=i(Cs),eE=n(Cs,"P",{});var q3e=s(eE);igo=t(q3e,"This class cannot be instantiated directly using "),zJ=n(q3e,"CODE",{});var cyt=s(zJ);dgo=t(cyt,"__init__()"),cyt.forEach(r),mgo=t(q3e," (throws an error)."),q3e.forEach(r),fgo=i(Cs),Rt=n(Cs,"DIV",{class:!0});var ys=s(Rt);c(oE.$$.fragment,ys),cgo=i(ys),XJ=n(ys,"P",{});var gyt=s(XJ);ggo=t(gyt,"Instantiates one of the model classes of the library (with a multiple choice head) from a configuration."),gyt.forEach(r),hgo=i(ys),wi=n(ys,"P",{});var BD=s(wi);ugo=t(BD,`Note: Loading a model from its configuration file does `),WJ=n(BD,"STRONG",{});var hyt=s(WJ);pgo=t(hyt,"not"),hyt.forEach(r),_go=t(BD,` load the model weights. It only affects the model\u2019s configuration. Use [`),VJ=n(BD,"EM",{});var uyt=s(VJ);vgo=t(uyt,"~AutoModelForMultipleChoice.from_pretrained"),uyt.forEach(r),bgo=t(BD,`] to load the model weights.`),BD.forEach(r),Tgo=i(ys),QJ=n(ys,"P",{});var pyt=s(QJ);Fgo=t(pyt,"Examples:"),pyt.forEach(r),Mgo=i(ys),c(tE.$$.fragment,ys),ys.forEach(r),Ego=i(Cs),Pe=n(Cs,"DIV",{class:!0});var Cr=s(Pe);c(rE.$$.fragment,Cr),Cgo=i(Cr),HJ=n(Cr,"P",{});var _yt=s(HJ);ygo=t(_yt,"Instantiate one of the model classes of the library (with a multiple choice head) from a pretrained model."),_yt.forEach(r),wgo=i(Cr),La=n(Cr,"P",{});var UT=s(La);Ago=t(UT,"The model class to instantiate is selected based on the "),UJ=n(UT,"EM",{});var vyt=s(UJ);xgo=t(vyt,"model_type"),vyt.forEach(r),Lgo=t(UT,` property of the config object (either passed as an argument or loaded from `),JJ=n(UT,"EM",{});var byt=s(JJ);Bgo=t(byt,"pretrained_model_name_or_path"),byt.forEach(r),kgo=t(UT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),KJ=n(UT,"EM",{});var Tyt=s(KJ);Rgo=t(Tyt,"pretrained_model_name_or_path"),Tyt.forEach(r),Sgo=t(UT,":"),UT.forEach(r),Pgo=i(Cr),q=n(Cr,"UL",{});var W=s(q);B_=n(W,"LI",{});var Q2e=s(B_);YJ=n(Q2e,"STRONG",{});var Fyt=s(YJ);$go=t(Fyt,"albert"),Fyt.forEach(r),Igo=t(Q2e," \u2014 "),Uk=n(Q2e,"A",{href:!0});var Myt=s(Uk);jgo=t(Myt,"AlbertForMultipleChoice"),Myt.forEach(r),Ngo=t(Q2e," (ALBERT model)"),Q2e.forEach(r),Dgo=i(W),k_=n(W,"LI",{});var H2e=s(k_);ZJ=n(H2e,"STRONG",{});var Eyt=s(ZJ);Ggo=t(Eyt,"bert"),Eyt.forEach(r),Ogo=t(H2e," \u2014 "),Jk=n(H2e,"A",{href:!0});var Cyt=s(Jk);qgo=t(Cyt,"BertForMultipleChoice"),Cyt.forEach(r),zgo=t(H2e," (BERT model)"),H2e.forEach(r),Xgo=i(W),R_=n(W,"LI",{});var U2e=s(R_);eK=n(U2e,"STRONG",{});var yyt=s(eK);Wgo=t(yyt,"big_bird"),yyt.forEach(r),Vgo=t(U2e," \u2014 "),Kk=n(U2e,"A",{href:!0});var wyt=s(Kk);Qgo=t(wyt,"BigBirdForMultipleChoice"),wyt.forEach(r),Hgo=t(U2e," (BigBird model)"),U2e.forEach(r),Ugo=i(W),S_=n(W,"LI",{});var J2e=s(S_);oK=n(J2e,"STRONG",{});var Ayt=s(oK);Jgo=t(Ayt,"camembert"),Ayt.forEach(r),Kgo=t(J2e," \u2014 "),Yk=n(J2e,"A",{href:!0});var xyt=s(Yk);Ygo=t(xyt,"CamembertForMultipleChoice"),xyt.forEach(r),Zgo=t(J2e," (CamemBERT model)"),J2e.forEach(r),eho=i(W),P_=n(W,"LI",{});var K2e=s(P_);tK=n(K2e,"STRONG",{});var Lyt=s(tK);oho=t(Lyt,"canine"),Lyt.forEach(r),tho=t(K2e," \u2014 "),Zk=n(K2e,"A",{href:!0});var Byt=s(Zk);rho=t(Byt,"CanineForMultipleChoice"),Byt.forEach(r),aho=t(K2e," (Canine model)"),K2e.forEach(r),nho=i(W),$_=n(W,"LI",{});var Y2e=s($_);rK=n(Y2e,"STRONG",{});var kyt=s(rK);sho=t(kyt,"convbert"),kyt.forEach(r),lho=t(Y2e," \u2014 "),eR=n(Y2e,"A",{href:!0});var Ryt=s(eR);iho=t(Ryt,"ConvBertForMultipleChoice"),Ryt.forEach(r),dho=t(Y2e," (ConvBERT model)"),Y2e.forEach(r),mho=i(W),I_=n(W,"LI",{});var Z2e=s(I_);aK=n(Z2e,"STRONG",{});var Syt=s(aK);fho=t(Syt,"distilbert"),Syt.forEach(r),cho=t(Z2e," \u2014 "),oR=n(Z2e,"A",{href:!0});var Pyt=s(oR);gho=t(Pyt,"DistilBertForMultipleChoice"),Pyt.forEach(r),hho=t(Z2e," (DistilBERT model)"),Z2e.forEach(r),uho=i(W),j_=n(W,"LI",{});var ebe=s(j_);nK=n(ebe,"STRONG",{});var $yt=s(nK);pho=t($yt,"electra"),$yt.forEach(r),_ho=t(ebe," \u2014 "),tR=n(ebe,"A",{href:!0});var Iyt=s(tR);vho=t(Iyt,"ElectraForMultipleChoice"),Iyt.forEach(r),bho=t(ebe," (ELECTRA model)"),ebe.forEach(r),Tho=i(W),N_=n(W,"LI",{});var obe=s(N_);sK=n(obe,"STRONG",{});var jyt=s(sK);Fho=t(jyt,"flaubert"),jyt.forEach(r),Mho=t(obe," \u2014 "),rR=n(obe,"A",{href:!0});var Nyt=s(rR);Eho=t(Nyt,"FlaubertForMultipleChoice"),Nyt.forEach(r),Cho=t(obe," (FlauBERT model)"),obe.forEach(r),yho=i(W),D_=n(W,"LI",{});var tbe=s(D_);lK=n(tbe,"STRONG",{});var Dyt=s(lK);who=t(Dyt,"fnet"),Dyt.forEach(r),Aho=t(tbe," \u2014 "),aR=n(tbe,"A",{href:!0});var Gyt=s(aR);xho=t(Gyt,"FNetForMultipleChoice"),Gyt.forEach(r),Lho=t(tbe," (FNet model)"),tbe.forEach(r),Bho=i(W),G_=n(W,"LI",{});var rbe=s(G_);iK=n(rbe,"STRONG",{});var Oyt=s(iK);kho=t(Oyt,"funnel"),Oyt.forEach(r),Rho=t(rbe," \u2014 "),nR=n(rbe,"A",{href:!0});var qyt=s(nR);Sho=t(qyt,"FunnelForMultipleChoice"),qyt.forEach(r),Pho=t(rbe," (Funnel Transformer model)"),rbe.forEach(r),$ho=i(W),O_=n(W,"LI",{});var abe=s(O_);dK=n(abe,"STRONG",{});var zyt=s(dK);Iho=t(zyt,"ibert"),zyt.forEach(r),jho=t(abe," \u2014 "),sR=n(abe,"A",{href:!0});var Xyt=s(sR);Nho=t(Xyt,"IBertForMultipleChoice"),Xyt.forEach(r),Dho=t(abe," (I-BERT model)"),abe.forEach(r),Gho=i(W),q_=n(W,"LI",{});var nbe=s(q_);mK=n(nbe,"STRONG",{});var Wyt=s(mK);Oho=t(Wyt,"longformer"),Wyt.forEach(r),qho=t(nbe," \u2014 "),lR=n(nbe,"A",{href:!0});var Vyt=s(lR);zho=t(Vyt,"LongformerForMultipleChoice"),Vyt.forEach(r),Xho=t(nbe," (Longformer model)"),nbe.forEach(r),Who=i(W),z_=n(W,"LI",{});var sbe=s(z_);fK=n(sbe,"STRONG",{});var Qyt=s(fK);Vho=t(Qyt,"megatron-bert"),Qyt.forEach(r),Qho=t(sbe," \u2014 "),iR=n(sbe,"A",{href:!0});var Hyt=s(iR);Hho=t(Hyt,"MegatronBertForMultipleChoice"),Hyt.forEach(r),Uho=t(sbe," (MegatronBert model)"),sbe.forEach(r),Jho=i(W),X_=n(W,"LI",{});var lbe=s(X_);cK=n(lbe,"STRONG",{});var Uyt=s(cK);Kho=t(Uyt,"mobilebert"),Uyt.forEach(r),Yho=t(lbe," \u2014 "),dR=n(lbe,"A",{href:!0});var Jyt=s(dR);Zho=t(Jyt,"MobileBertForMultipleChoice"),Jyt.forEach(r),euo=t(lbe," (MobileBERT model)"),lbe.forEach(r),ouo=i(W),W_=n(W,"LI",{});var ibe=s(W_);gK=n(ibe,"STRONG",{});var Kyt=s(gK);tuo=t(Kyt,"mpnet"),Kyt.forEach(r),ruo=t(ibe," \u2014 "),mR=n(ibe,"A",{href:!0});var Yyt=s(mR);auo=t(Yyt,"MPNetForMultipleChoice"),Yyt.forEach(r),nuo=t(ibe," (MPNet model)"),ibe.forEach(r),suo=i(W),V_=n(W,"LI",{});var dbe=s(V_);hK=n(dbe,"STRONG",{});var Zyt=s(hK);luo=t(Zyt,"qdqbert"),Zyt.forEach(r),iuo=t(dbe," \u2014 "),fR=n(dbe,"A",{href:!0});var ewt=s(fR);duo=t(ewt,"QDQBertForMultipleChoice"),ewt.forEach(r),muo=t(dbe," (QDQBert model)"),dbe.forEach(r),fuo=i(W),Q_=n(W,"LI",{});var mbe=s(Q_);uK=n(mbe,"STRONG",{});var owt=s(uK);cuo=t(owt,"rembert"),owt.forEach(r),guo=t(mbe," \u2014 "),cR=n(mbe,"A",{href:!0});var twt=s(cR);huo=t(twt,"RemBertForMultipleChoice"),twt.forEach(r),uuo=t(mbe," (RemBERT model)"),mbe.forEach(r),puo=i(W),H_=n(W,"LI",{});var fbe=s(H_);pK=n(fbe,"STRONG",{});var rwt=s(pK);_uo=t(rwt,"roberta"),rwt.forEach(r),vuo=t(fbe," \u2014 "),gR=n(fbe,"A",{href:!0});var awt=s(gR);buo=t(awt,"RobertaForMultipleChoice"),awt.forEach(r),Tuo=t(fbe," (RoBERTa model)"),fbe.forEach(r),Fuo=i(W),U_=n(W,"LI",{});var cbe=s(U_);_K=n(cbe,"STRONG",{});var nwt=s(_K);Muo=t(nwt,"roformer"),nwt.forEach(r),Euo=t(cbe," \u2014 "),hR=n(cbe,"A",{href:!0});var swt=s(hR);Cuo=t(swt,"RoFormerForMultipleChoice"),swt.forEach(r),yuo=t(cbe," (RoFormer model)"),cbe.forEach(r),wuo=i(W),J_=n(W,"LI",{});var gbe=s(J_);vK=n(gbe,"STRONG",{});var lwt=s(vK);Auo=t(lwt,"squeezebert"),lwt.forEach(r),xuo=t(gbe," \u2014 "),uR=n(gbe,"A",{href:!0});var iwt=s(uR);Luo=t(iwt,"SqueezeBertForMultipleChoice"),iwt.forEach(r),Buo=t(gbe," (SqueezeBERT model)"),gbe.forEach(r),kuo=i(W),K_=n(W,"LI",{});var hbe=s(K_);bK=n(hbe,"STRONG",{});var dwt=s(bK);Ruo=t(dwt,"xlm"),dwt.forEach(r),Suo=t(hbe," \u2014 "),pR=n(hbe,"A",{href:!0});var mwt=s(pR);Puo=t(mwt,"XLMForMultipleChoice"),mwt.forEach(r),$uo=t(hbe," (XLM model)"),hbe.forEach(r),Iuo=i(W),Y_=n(W,"LI",{});var ube=s(Y_);TK=n(ube,"STRONG",{});var fwt=s(TK);juo=t(fwt,"xlm-roberta"),fwt.forEach(r),Nuo=t(ube," \u2014 "),_R=n(ube,"A",{href:!0});var cwt=s(_R);Duo=t(cwt,"XLMRobertaForMultipleChoice"),cwt.forEach(r),Guo=t(ube," (XLM-RoBERTa model)"),ube.forEach(r),Ouo=i(W),Z_=n(W,"LI",{});var pbe=s(Z_);FK=n(pbe,"STRONG",{});var gwt=s(FK);quo=t(gwt,"xlnet"),gwt.forEach(r),zuo=t(pbe," \u2014 "),vR=n(pbe,"A",{href:!0});var hwt=s(vR);Xuo=t(hwt,"XLNetForMultipleChoice"),hwt.forEach(r),Wuo=t(pbe," (XLNet model)"),pbe.forEach(r),W.forEach(r),Vuo=i(Cr),ev=n(Cr,"P",{});var _be=s(ev);Quo=t(_be,"The model is set in evaluation mode by default using "),MK=n(_be,"EM",{});var uwt=s(MK);Huo=t(uwt,"model.eval()"),uwt.forEach(r),Uuo=t(_be,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),EK=n(_be,"EM",{});var pwt=s(EK);Juo=t(pwt,"model.train()"),pwt.forEach(r),_be.forEach(r),Kuo=i(Cr),CK=n(Cr,"P",{});var _wt=s(CK);Yuo=t(_wt,"Examples:"),_wt.forEach(r),Zuo=i(Cr),c(aE.$$.fragment,Cr),Cr.forEach(r),Cs.forEach(r),VEe=i(d),Ai=n(d,"H2",{class:!0});var z3e=s(Ai);ov=n(z3e,"A",{id:!0,class:!0,href:!0});var vwt=s(ov);yK=n(vwt,"SPAN",{});var bwt=s(yK);c(nE.$$.fragment,bwt),bwt.forEach(r),vwt.forEach(r),epo=i(z3e),wK=n(z3e,"SPAN",{});var Twt=s(wK);opo=t(Twt,"AutoModelForNextSentencePrediction"),Twt.forEach(r),z3e.forEach(r),QEe=i(d),Oo=n(d,"DIV",{class:!0});var ws=s(Oo);c(sE.$$.fragment,ws),tpo=i(ws),xi=n(ws,"P",{});var kD=s(xi);rpo=t(kD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a next sentence prediction head) when created with the `),AK=n(kD,"CODE",{});var Fwt=s(AK);apo=t(Fwt,"from_pretrained()"),Fwt.forEach(r),npo=t(kD,` class method or the `),xK=n(kD,"CODE",{});var Mwt=s(xK);spo=t(Mwt,"from_config()"),Mwt.forEach(r),lpo=t(kD," class method."),kD.forEach(r),ipo=i(ws),lE=n(ws,"P",{});var X3e=s(lE);dpo=t(X3e,"This class cannot be instantiated directly using "),LK=n(X3e,"CODE",{});var Ewt=s(LK);mpo=t(Ewt,"__init__()"),Ewt.forEach(r),fpo=t(X3e," (throws an error)."),X3e.forEach(r),cpo=i(ws),St=n(ws,"DIV",{class:!0});var As=s(St);c(iE.$$.fragment,As),gpo=i(As),BK=n(As,"P",{});var Cwt=s(BK);hpo=t(Cwt,"Instantiates one of the model classes of the library (with a next sentence prediction head) from a configuration."),Cwt.forEach(r),upo=i(As),Li=n(As,"P",{});var RD=s(Li);ppo=t(RD,`Note: Loading a model from its configuration file does `),kK=n(RD,"STRONG",{});var ywt=s(kK);_po=t(ywt,"not"),ywt.forEach(r),vpo=t(RD,` load the model weights. It only affects the model\u2019s configuration. Use [`),RK=n(RD,"EM",{});var wwt=s(RK);bpo=t(wwt,"~AutoModelForNextSentencePrediction.from_pretrained"),wwt.forEach(r),Tpo=t(RD,`] to load the model weights.`),RD.forEach(r),Fpo=i(As),SK=n(As,"P",{});var Awt=s(SK);Mpo=t(Awt,"Examples:"),Awt.forEach(r),Epo=i(As),c(dE.$$.fragment,As),As.forEach(r),Cpo=i(ws),$e=n(ws,"DIV",{class:!0});var yr=s($e);c(mE.$$.fragment,yr),ypo=i(yr),PK=n(yr,"P",{});var xwt=s(PK);wpo=t(xwt,"Instantiate one of the model classes of the library (with a next sentence prediction head) from a pretrained model."),xwt.forEach(r),Apo=i(yr),Ba=n(yr,"P",{});var JT=s(Ba);xpo=t(JT,"The model class to instantiate is selected based on the "),$K=n(JT,"EM",{});var Lwt=s($K);Lpo=t(Lwt,"model_type"),Lwt.forEach(r),Bpo=t(JT,` property of the config object (either passed as an argument or loaded from `),IK=n(JT,"EM",{});var Bwt=s(IK);kpo=t(Bwt,"pretrained_model_name_or_path"),Bwt.forEach(r),Rpo=t(JT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),jK=n(JT,"EM",{});var kwt=s(jK);Spo=t(kwt,"pretrained_model_name_or_path"),kwt.forEach(r),Ppo=t(JT,":"),JT.forEach(r),$po=i(yr),qr=n(yr,"UL",{});var xs=s(qr);tv=n(xs,"LI",{});var vbe=s(tv);NK=n(vbe,"STRONG",{});var Rwt=s(NK);Ipo=t(Rwt,"bert"),Rwt.forEach(r),jpo=t(vbe," \u2014 "),bR=n(vbe,"A",{href:!0});var Swt=s(bR);Npo=t(Swt,"BertForNextSentencePrediction"),Swt.forEach(r),Dpo=t(vbe," (BERT model)"),vbe.forEach(r),Gpo=i(xs),rv=n(xs,"LI",{});var bbe=s(rv);DK=n(bbe,"STRONG",{});var Pwt=s(DK);Opo=t(Pwt,"fnet"),Pwt.forEach(r),qpo=t(bbe," \u2014 "),TR=n(bbe,"A",{href:!0});var $wt=s(TR);zpo=t($wt,"FNetForNextSentencePrediction"),$wt.forEach(r),Xpo=t(bbe," (FNet model)"),bbe.forEach(r),Wpo=i(xs),av=n(xs,"LI",{});var Tbe=s(av);GK=n(Tbe,"STRONG",{});var Iwt=s(GK);Vpo=t(Iwt,"megatron-bert"),Iwt.forEach(r),Qpo=t(Tbe," \u2014 "),FR=n(Tbe,"A",{href:!0});var jwt=s(FR);Hpo=t(jwt,"MegatronBertForNextSentencePrediction"),jwt.forEach(r),Upo=t(Tbe," (MegatronBert model)"),Tbe.forEach(r),Jpo=i(xs),nv=n(xs,"LI",{});var Fbe=s(nv);OK=n(Fbe,"STRONG",{});var Nwt=s(OK);Kpo=t(Nwt,"mobilebert"),Nwt.forEach(r),Ypo=t(Fbe," \u2014 "),MR=n(Fbe,"A",{href:!0});var Dwt=s(MR);Zpo=t(Dwt,"MobileBertForNextSentencePrediction"),Dwt.forEach(r),e_o=t(Fbe," (MobileBERT model)"),Fbe.forEach(r),o_o=i(xs),sv=n(xs,"LI",{});var Mbe=s(sv);qK=n(Mbe,"STRONG",{});var Gwt=s(qK);t_o=t(Gwt,"qdqbert"),Gwt.forEach(r),r_o=t(Mbe," \u2014 "),ER=n(Mbe,"A",{href:!0});var Owt=s(ER);a_o=t(Owt,"QDQBertForNextSentencePrediction"),Owt.forEach(r),n_o=t(Mbe," (QDQBert model)"),Mbe.forEach(r),xs.forEach(r),s_o=i(yr),lv=n(yr,"P",{});var Ebe=s(lv);l_o=t(Ebe,"The model is set in evaluation mode by default using "),zK=n(Ebe,"EM",{});var qwt=s(zK);i_o=t(qwt,"model.eval()"),qwt.forEach(r),d_o=t(Ebe,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),XK=n(Ebe,"EM",{});var zwt=s(XK);m_o=t(zwt,"model.train()"),zwt.forEach(r),Ebe.forEach(r),f_o=i(yr),WK=n(yr,"P",{});var Xwt=s(WK);c_o=t(Xwt,"Examples:"),Xwt.forEach(r),g_o=i(yr),c(fE.$$.fragment,yr),yr.forEach(r),ws.forEach(r),HEe=i(d),Bi=n(d,"H2",{class:!0});var W3e=s(Bi);iv=n(W3e,"A",{id:!0,class:!0,href:!0});var Wwt=s(iv);VK=n(Wwt,"SPAN",{});var Vwt=s(VK);c(cE.$$.fragment,Vwt),Vwt.forEach(r),Wwt.forEach(r),h_o=i(W3e),QK=n(W3e,"SPAN",{});var Qwt=s(QK);u_o=t(Qwt,"AutoModelForTokenClassification"),Qwt.forEach(r),W3e.forEach(r),UEe=i(d),qo=n(d,"DIV",{class:!0});var Ls=s(qo);c(gE.$$.fragment,Ls),p_o=i(Ls),ki=n(Ls,"P",{});var SD=s(ki);__o=t(SD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a token classification head) when created with the `),HK=n(SD,"CODE",{});var Hwt=s(HK);v_o=t(Hwt,"from_pretrained()"),Hwt.forEach(r),b_o=t(SD,` class method or the `),UK=n(SD,"CODE",{});var Uwt=s(UK);T_o=t(Uwt,"from_config()"),Uwt.forEach(r),F_o=t(SD," class method."),SD.forEach(r),M_o=i(Ls),hE=n(Ls,"P",{});var V3e=s(hE);E_o=t(V3e,"This class cannot be instantiated directly using "),JK=n(V3e,"CODE",{});var Jwt=s(JK);C_o=t(Jwt,"__init__()"),Jwt.forEach(r),y_o=t(V3e," (throws an error)."),V3e.forEach(r),w_o=i(Ls),Pt=n(Ls,"DIV",{class:!0});var Bs=s(Pt);c(uE.$$.fragment,Bs),A_o=i(Bs),KK=n(Bs,"P",{});var Kwt=s(KK);x_o=t(Kwt,"Instantiates one of the model classes of the library (with a token classification head) from a configuration."),Kwt.forEach(r),L_o=i(Bs),Ri=n(Bs,"P",{});var PD=s(Ri);B_o=t(PD,`Note: Loading a model from its configuration file does `),YK=n(PD,"STRONG",{});var Ywt=s(YK);k_o=t(Ywt,"not"),Ywt.forEach(r),R_o=t(PD,` load the model weights. It only affects the model\u2019s configuration. Use [`),ZK=n(PD,"EM",{});var Zwt=s(ZK);S_o=t(Zwt,"~AutoModelForTokenClassification.from_pretrained"),Zwt.forEach(r),P_o=t(PD,`] to load the model weights.`),PD.forEach(r),$_o=i(Bs),eY=n(Bs,"P",{});var eAt=s(eY);I_o=t(eAt,"Examples:"),eAt.forEach(r),j_o=i(Bs),c(pE.$$.fragment,Bs),Bs.forEach(r),N_o=i(Ls),Ie=n(Ls,"DIV",{class:!0});var wr=s(Ie);c(_E.$$.fragment,wr),D_o=i(wr),oY=n(wr,"P",{});var oAt=s(oY);G_o=t(oAt,"Instantiate one of the model classes of the library (with a token classification head) from a pretrained model."),oAt.forEach(r),O_o=i(wr),ka=n(wr,"P",{});var KT=s(ka);q_o=t(KT,"The model class to instantiate is selected based on the "),tY=n(KT,"EM",{});var tAt=s(tY);z_o=t(tAt,"model_type"),tAt.forEach(r),X_o=t(KT,` property of the config object (either passed as an argument or loaded from `),rY=n(KT,"EM",{});var rAt=s(rY);W_o=t(rAt,"pretrained_model_name_or_path"),rAt.forEach(r),V_o=t(KT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),aY=n(KT,"EM",{});var aAt=s(aY);Q_o=t(aAt,"pretrained_model_name_or_path"),aAt.forEach(r),H_o=t(KT,":"),KT.forEach(r),U_o=i(wr),N=n(wr,"UL",{});var G=s(N);dv=n(G,"LI",{});var Cbe=s(dv);nY=n(Cbe,"STRONG",{});var nAt=s(nY);J_o=t(nAt,"albert"),nAt.forEach(r),K_o=t(Cbe," \u2014 "),CR=n(Cbe,"A",{href:!0});var sAt=s(CR);Y_o=t(sAt,"AlbertForTokenClassification"),sAt.forEach(r),Z_o=t(Cbe," (ALBERT model)"),Cbe.forEach(r),evo=i(G),mv=n(G,"LI",{});var ybe=s(mv);sY=n(ybe,"STRONG",{});var lAt=s(sY);ovo=t(lAt,"bert"),lAt.forEach(r),tvo=t(ybe," \u2014 "),yR=n(ybe,"A",{href:!0});var iAt=s(yR);rvo=t(iAt,"BertForTokenClassification"),iAt.forEach(r),avo=t(ybe," (BERT model)"),ybe.forEach(r),nvo=i(G),fv=n(G,"LI",{});var wbe=s(fv);lY=n(wbe,"STRONG",{});var dAt=s(lY);svo=t(dAt,"big_bird"),dAt.forEach(r),lvo=t(wbe," \u2014 "),wR=n(wbe,"A",{href:!0});var mAt=s(wR);ivo=t(mAt,"BigBirdForTokenClassification"),mAt.forEach(r),dvo=t(wbe," (BigBird model)"),wbe.forEach(r),mvo=i(G),cv=n(G,"LI",{});var Abe=s(cv);iY=n(Abe,"STRONG",{});var fAt=s(iY);fvo=t(fAt,"camembert"),fAt.forEach(r),cvo=t(Abe," \u2014 "),AR=n(Abe,"A",{href:!0});var cAt=s(AR);gvo=t(cAt,"CamembertForTokenClassification"),cAt.forEach(r),hvo=t(Abe," (CamemBERT model)"),Abe.forEach(r),uvo=i(G),gv=n(G,"LI",{});var xbe=s(gv);dY=n(xbe,"STRONG",{});var gAt=s(dY);pvo=t(gAt,"canine"),gAt.forEach(r),_vo=t(xbe," \u2014 "),xR=n(xbe,"A",{href:!0});var hAt=s(xR);vvo=t(hAt,"CanineForTokenClassification"),hAt.forEach(r),bvo=t(xbe," (Canine model)"),xbe.forEach(r),Tvo=i(G),hv=n(G,"LI",{});var Lbe=s(hv);mY=n(Lbe,"STRONG",{});var uAt=s(mY);Fvo=t(uAt,"convbert"),uAt.forEach(r),Mvo=t(Lbe," \u2014 "),LR=n(Lbe,"A",{href:!0});var pAt=s(LR);Evo=t(pAt,"ConvBertForTokenClassification"),pAt.forEach(r),Cvo=t(Lbe," (ConvBERT model)"),Lbe.forEach(r),yvo=i(G),uv=n(G,"LI",{});var Bbe=s(uv);fY=n(Bbe,"STRONG",{});var _At=s(fY);wvo=t(_At,"deberta"),_At.forEach(r),Avo=t(Bbe," \u2014 "),BR=n(Bbe,"A",{href:!0});var vAt=s(BR);xvo=t(vAt,"DebertaForTokenClassification"),vAt.forEach(r),Lvo=t(Bbe," (DeBERTa model)"),Bbe.forEach(r),Bvo=i(G),pv=n(G,"LI",{});var kbe=s(pv);cY=n(kbe,"STRONG",{});var bAt=s(cY);kvo=t(bAt,"deberta-v2"),bAt.forEach(r),Rvo=t(kbe," \u2014 "),kR=n(kbe,"A",{href:!0});var TAt=s(kR);Svo=t(TAt,"DebertaV2ForTokenClassification"),TAt.forEach(r),Pvo=t(kbe," (DeBERTa-v2 model)"),kbe.forEach(r),$vo=i(G),_v=n(G,"LI",{});var Rbe=s(_v);gY=n(Rbe,"STRONG",{});var FAt=s(gY);Ivo=t(FAt,"distilbert"),FAt.forEach(r),jvo=t(Rbe," \u2014 "),RR=n(Rbe,"A",{href:!0});var MAt=s(RR);Nvo=t(MAt,"DistilBertForTokenClassification"),MAt.forEach(r),Dvo=t(Rbe," (DistilBERT model)"),Rbe.forEach(r),Gvo=i(G),vv=n(G,"LI",{});var Sbe=s(vv);hY=n(Sbe,"STRONG",{});var EAt=s(hY);Ovo=t(EAt,"electra"),EAt.forEach(r),qvo=t(Sbe," \u2014 "),SR=n(Sbe,"A",{href:!0});var CAt=s(SR);zvo=t(CAt,"ElectraForTokenClassification"),CAt.forEach(r),Xvo=t(Sbe," (ELECTRA model)"),Sbe.forEach(r),Wvo=i(G),bv=n(G,"LI",{});var Pbe=s(bv);uY=n(Pbe,"STRONG",{});var yAt=s(uY);Vvo=t(yAt,"flaubert"),yAt.forEach(r),Qvo=t(Pbe," \u2014 "),PR=n(Pbe,"A",{href:!0});var wAt=s(PR);Hvo=t(wAt,"FlaubertForTokenClassification"),wAt.forEach(r),Uvo=t(Pbe," (FlauBERT model)"),Pbe.forEach(r),Jvo=i(G),Tv=n(G,"LI",{});var $be=s(Tv);pY=n($be,"STRONG",{});var AAt=s(pY);Kvo=t(AAt,"fnet"),AAt.forEach(r),Yvo=t($be," \u2014 "),$R=n($be,"A",{href:!0});var xAt=s($R);Zvo=t(xAt,"FNetForTokenClassification"),xAt.forEach(r),e1o=t($be," (FNet model)"),$be.forEach(r),o1o=i(G),Fv=n(G,"LI",{});var Ibe=s(Fv);_Y=n(Ibe,"STRONG",{});var LAt=s(_Y);t1o=t(LAt,"funnel"),LAt.forEach(r),r1o=t(Ibe," \u2014 "),IR=n(Ibe,"A",{href:!0});var BAt=s(IR);a1o=t(BAt,"FunnelForTokenClassification"),BAt.forEach(r),n1o=t(Ibe," (Funnel Transformer model)"),Ibe.forEach(r),s1o=i(G),Mv=n(G,"LI",{});var jbe=s(Mv);vY=n(jbe,"STRONG",{});var kAt=s(vY);l1o=t(kAt,"gpt2"),kAt.forEach(r),i1o=t(jbe," \u2014 "),jR=n(jbe,"A",{href:!0});var RAt=s(jR);d1o=t(RAt,"GPT2ForTokenClassification"),RAt.forEach(r),m1o=t(jbe," (OpenAI GPT-2 model)"),jbe.forEach(r),f1o=i(G),Ev=n(G,"LI",{});var Nbe=s(Ev);bY=n(Nbe,"STRONG",{});var SAt=s(bY);c1o=t(SAt,"ibert"),SAt.forEach(r),g1o=t(Nbe," \u2014 "),NR=n(Nbe,"A",{href:!0});var PAt=s(NR);h1o=t(PAt,"IBertForTokenClassification"),PAt.forEach(r),u1o=t(Nbe," (I-BERT model)"),Nbe.forEach(r),p1o=i(G),Cv=n(G,"LI",{});var Dbe=s(Cv);TY=n(Dbe,"STRONG",{});var $At=s(TY);_1o=t($At,"layoutlm"),$At.forEach(r),v1o=t(Dbe," \u2014 "),DR=n(Dbe,"A",{href:!0});var IAt=s(DR);b1o=t(IAt,"LayoutLMForTokenClassification"),IAt.forEach(r),T1o=t(Dbe," (LayoutLM model)"),Dbe.forEach(r),F1o=i(G),yv=n(G,"LI",{});var Gbe=s(yv);FY=n(Gbe,"STRONG",{});var jAt=s(FY);M1o=t(jAt,"layoutlmv2"),jAt.forEach(r),E1o=t(Gbe," \u2014 "),GR=n(Gbe,"A",{href:!0});var NAt=s(GR);C1o=t(NAt,"LayoutLMv2ForTokenClassification"),NAt.forEach(r),y1o=t(Gbe," (LayoutLMv2 model)"),Gbe.forEach(r),w1o=i(G),wv=n(G,"LI",{});var Obe=s(wv);MY=n(Obe,"STRONG",{});var DAt=s(MY);A1o=t(DAt,"longformer"),DAt.forEach(r),x1o=t(Obe," \u2014 "),OR=n(Obe,"A",{href:!0});var GAt=s(OR);L1o=t(GAt,"LongformerForTokenClassification"),GAt.forEach(r),B1o=t(Obe," (Longformer model)"),Obe.forEach(r),k1o=i(G),Av=n(G,"LI",{});var qbe=s(Av);EY=n(qbe,"STRONG",{});var OAt=s(EY);R1o=t(OAt,"megatron-bert"),OAt.forEach(r),S1o=t(qbe," \u2014 "),qR=n(qbe,"A",{href:!0});var qAt=s(qR);P1o=t(qAt,"MegatronBertForTokenClassification"),qAt.forEach(r),$1o=t(qbe," (MegatronBert model)"),qbe.forEach(r),I1o=i(G),xv=n(G,"LI",{});var zbe=s(xv);CY=n(zbe,"STRONG",{});var zAt=s(CY);j1o=t(zAt,"mobilebert"),zAt.forEach(r),N1o=t(zbe," \u2014 "),zR=n(zbe,"A",{href:!0});var XAt=s(zR);D1o=t(XAt,"MobileBertForTokenClassification"),XAt.forEach(r),G1o=t(zbe," (MobileBERT model)"),zbe.forEach(r),O1o=i(G),Lv=n(G,"LI",{});var Xbe=s(Lv);yY=n(Xbe,"STRONG",{});var WAt=s(yY);q1o=t(WAt,"mpnet"),WAt.forEach(r),z1o=t(Xbe," \u2014 "),XR=n(Xbe,"A",{href:!0});var VAt=s(XR);X1o=t(VAt,"MPNetForTokenClassification"),VAt.forEach(r),W1o=t(Xbe," (MPNet model)"),Xbe.forEach(r),V1o=i(G),Bv=n(G,"LI",{});var Wbe=s(Bv);wY=n(Wbe,"STRONG",{});var QAt=s(wY);Q1o=t(QAt,"qdqbert"),QAt.forEach(r),H1o=t(Wbe," \u2014 "),WR=n(Wbe,"A",{href:!0});var HAt=s(WR);U1o=t(HAt,"QDQBertForTokenClassification"),HAt.forEach(r),J1o=t(Wbe," (QDQBert model)"),Wbe.forEach(r),K1o=i(G),kv=n(G,"LI",{});var Vbe=s(kv);AY=n(Vbe,"STRONG",{});var UAt=s(AY);Y1o=t(UAt,"rembert"),UAt.forEach(r),Z1o=t(Vbe," \u2014 "),VR=n(Vbe,"A",{href:!0});var JAt=s(VR);e2o=t(JAt,"RemBertForTokenClassification"),JAt.forEach(r),o2o=t(Vbe," (RemBERT model)"),Vbe.forEach(r),t2o=i(G),Rv=n(G,"LI",{});var Qbe=s(Rv);xY=n(Qbe,"STRONG",{});var KAt=s(xY);r2o=t(KAt,"roberta"),KAt.forEach(r),a2o=t(Qbe," \u2014 "),QR=n(Qbe,"A",{href:!0});var YAt=s(QR);n2o=t(YAt,"RobertaForTokenClassification"),YAt.forEach(r),s2o=t(Qbe," (RoBERTa model)"),Qbe.forEach(r),l2o=i(G),Sv=n(G,"LI",{});var Hbe=s(Sv);LY=n(Hbe,"STRONG",{});var ZAt=s(LY);i2o=t(ZAt,"roformer"),ZAt.forEach(r),d2o=t(Hbe," \u2014 "),HR=n(Hbe,"A",{href:!0});var e7t=s(HR);m2o=t(e7t,"RoFormerForTokenClassification"),e7t.forEach(r),f2o=t(Hbe," (RoFormer model)"),Hbe.forEach(r),c2o=i(G),Pv=n(G,"LI",{});var Ube=s(Pv);BY=n(Ube,"STRONG",{});var o7t=s(BY);g2o=t(o7t,"squeezebert"),o7t.forEach(r),h2o=t(Ube," \u2014 "),UR=n(Ube,"A",{href:!0});var t7t=s(UR);u2o=t(t7t,"SqueezeBertForTokenClassification"),t7t.forEach(r),p2o=t(Ube," (SqueezeBERT model)"),Ube.forEach(r),_2o=i(G),$v=n(G,"LI",{});var Jbe=s($v);kY=n(Jbe,"STRONG",{});var r7t=s(kY);v2o=t(r7t,"xlm"),r7t.forEach(r),b2o=t(Jbe," \u2014 "),JR=n(Jbe,"A",{href:!0});var a7t=s(JR);T2o=t(a7t,"XLMForTokenClassification"),a7t.forEach(r),F2o=t(Jbe," (XLM model)"),Jbe.forEach(r),M2o=i(G),Iv=n(G,"LI",{});var Kbe=s(Iv);RY=n(Kbe,"STRONG",{});var n7t=s(RY);E2o=t(n7t,"xlm-roberta"),n7t.forEach(r),C2o=t(Kbe," \u2014 "),KR=n(Kbe,"A",{href:!0});var s7t=s(KR);y2o=t(s7t,"XLMRobertaForTokenClassification"),s7t.forEach(r),w2o=t(Kbe," (XLM-RoBERTa model)"),Kbe.forEach(r),A2o=i(G),jv=n(G,"LI",{});var Ybe=s(jv);SY=n(Ybe,"STRONG",{});var l7t=s(SY);x2o=t(l7t,"xlnet"),l7t.forEach(r),L2o=t(Ybe," \u2014 "),YR=n(Ybe,"A",{href:!0});var i7t=s(YR);B2o=t(i7t,"XLNetForTokenClassification"),i7t.forEach(r),k2o=t(Ybe," (XLNet model)"),Ybe.forEach(r),G.forEach(r),R2o=i(wr),Nv=n(wr,"P",{});var Zbe=s(Nv);S2o=t(Zbe,"The model is set in evaluation mode by default using "),PY=n(Zbe,"EM",{});var d7t=s(PY);P2o=t(d7t,"model.eval()"),d7t.forEach(r),$2o=t(Zbe,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),$Y=n(Zbe,"EM",{});var m7t=s($Y);I2o=t(m7t,"model.train()"),m7t.forEach(r),Zbe.forEach(r),j2o=i(wr),IY=n(wr,"P",{});var f7t=s(IY);N2o=t(f7t,"Examples:"),f7t.forEach(r),D2o=i(wr),c(vE.$$.fragment,wr),wr.forEach(r),Ls.forEach(r),JEe=i(d),Si=n(d,"H2",{class:!0});var Q3e=s(Si);Dv=n(Q3e,"A",{id:!0,class:!0,href:!0});var c7t=s(Dv);jY=n(c7t,"SPAN",{});var g7t=s(jY);c(bE.$$.fragment,g7t),g7t.forEach(r),c7t.forEach(r),G2o=i(Q3e),NY=n(Q3e,"SPAN",{});var h7t=s(NY);O2o=t(h7t,"AutoModelForQuestionAnswering"),h7t.forEach(r),Q3e.forEach(r),KEe=i(d),zo=n(d,"DIV",{class:!0});var ks=s(zo);c(TE.$$.fragment,ks),q2o=i(ks),Pi=n(ks,"P",{});var $D=s(Pi);z2o=t($D,`This is a generic model class that will be instantiated as one of the model classes of the library (with a question answering head) when created with the `),DY=n($D,"CODE",{});var u7t=s(DY);X2o=t(u7t,"from_pretrained()"),u7t.forEach(r),W2o=t($D,` class method or the `),GY=n($D,"CODE",{});var p7t=s(GY);V2o=t(p7t,"from_config()"),p7t.forEach(r),Q2o=t($D," class method."),$D.forEach(r),H2o=i(ks),FE=n(ks,"P",{});var H3e=s(FE);U2o=t(H3e,"This class cannot be instantiated directly using "),OY=n(H3e,"CODE",{});var _7t=s(OY);J2o=t(_7t,"__init__()"),_7t.forEach(r),K2o=t(H3e," (throws an error)."),H3e.forEach(r),Y2o=i(ks),$t=n(ks,"DIV",{class:!0});var Rs=s($t);c(ME.$$.fragment,Rs),Z2o=i(Rs),qY=n(Rs,"P",{});var v7t=s(qY);ebo=t(v7t,"Instantiates one of the model classes of the library (with a question answering head) from a configuration."),v7t.forEach(r),obo=i(Rs),$i=n(Rs,"P",{});var ID=s($i);tbo=t(ID,`Note: Loading a model from its configuration file does `),zY=n(ID,"STRONG",{});var b7t=s(zY);rbo=t(b7t,"not"),b7t.forEach(r),abo=t(ID,` load the model weights. It only affects the model\u2019s configuration. Use [`),XY=n(ID,"EM",{});var T7t=s(XY);nbo=t(T7t,"~AutoModelForQuestionAnswering.from_pretrained"),T7t.forEach(r),sbo=t(ID,`] to load the model weights.`),ID.forEach(r),lbo=i(Rs),WY=n(Rs,"P",{});var F7t=s(WY);ibo=t(F7t,"Examples:"),F7t.forEach(r),dbo=i(Rs),c(EE.$$.fragment,Rs),Rs.forEach(r),mbo=i(ks),je=n(ks,"DIV",{class:!0});var Ar=s(je);c(CE.$$.fragment,Ar),fbo=i(Ar),VY=n(Ar,"P",{});var M7t=s(VY);cbo=t(M7t,"Instantiate one of the model classes of the library (with a question answering head) from a pretrained model."),M7t.forEach(r),gbo=i(Ar),Ra=n(Ar,"P",{});var YT=s(Ra);hbo=t(YT,"The model class to instantiate is selected based on the "),QY=n(YT,"EM",{});var E7t=s(QY);ubo=t(E7t,"model_type"),E7t.forEach(r),pbo=t(YT,` property of the config object (either passed as an argument or loaded from `),HY=n(YT,"EM",{});var C7t=s(HY);_bo=t(C7t,"pretrained_model_name_or_path"),C7t.forEach(r),vbo=t(YT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),UY=n(YT,"EM",{});var y7t=s(UY);bbo=t(y7t,"pretrained_model_name_or_path"),y7t.forEach(r),Tbo=t(YT,":"),YT.forEach(r),Fbo=i(Ar),R=n(Ar,"UL",{});var P=s(R);Gv=n(P,"LI",{});var e4e=s(Gv);JY=n(e4e,"STRONG",{});var w7t=s(JY);Mbo=t(w7t,"albert"),w7t.forEach(r),Ebo=t(e4e," \u2014 "),ZR=n(e4e,"A",{href:!0});var A7t=s(ZR);Cbo=t(A7t,"AlbertForQuestionAnswering"),A7t.forEach(r),ybo=t(e4e," (ALBERT model)"),e4e.forEach(r),wbo=i(P),Ov=n(P,"LI",{});var o4e=s(Ov);KY=n(o4e,"STRONG",{});var x7t=s(KY);Abo=t(x7t,"bart"),x7t.forEach(r),xbo=t(o4e," \u2014 "),eS=n(o4e,"A",{href:!0});var L7t=s(eS);Lbo=t(L7t,"BartForQuestionAnswering"),L7t.forEach(r),Bbo=t(o4e," (BART model)"),o4e.forEach(r),kbo=i(P),qv=n(P,"LI",{});var t4e=s(qv);YY=n(t4e,"STRONG",{});var B7t=s(YY);Rbo=t(B7t,"bert"),B7t.forEach(r),Sbo=t(t4e," \u2014 "),oS=n(t4e,"A",{href:!0});var k7t=s(oS);Pbo=t(k7t,"BertForQuestionAnswering"),k7t.forEach(r),$bo=t(t4e," (BERT model)"),t4e.forEach(r),Ibo=i(P),zv=n(P,"LI",{});var r4e=s(zv);ZY=n(r4e,"STRONG",{});var R7t=s(ZY);jbo=t(R7t,"big_bird"),R7t.forEach(r),Nbo=t(r4e," \u2014 "),tS=n(r4e,"A",{href:!0});var S7t=s(tS);Dbo=t(S7t,"BigBirdForQuestionAnswering"),S7t.forEach(r),Gbo=t(r4e," (BigBird model)"),r4e.forEach(r),Obo=i(P),Xv=n(P,"LI",{});var a4e=s(Xv);eZ=n(a4e,"STRONG",{});var P7t=s(eZ);qbo=t(P7t,"bigbird_pegasus"),P7t.forEach(r),zbo=t(a4e," \u2014 "),rS=n(a4e,"A",{href:!0});var $7t=s(rS);Xbo=t($7t,"BigBirdPegasusForQuestionAnswering"),$7t.forEach(r),Wbo=t(a4e," (BigBirdPegasus model)"),a4e.forEach(r),Vbo=i(P),Wv=n(P,"LI",{});var n4e=s(Wv);oZ=n(n4e,"STRONG",{});var I7t=s(oZ);Qbo=t(I7t,"camembert"),I7t.forEach(r),Hbo=t(n4e," \u2014 "),aS=n(n4e,"A",{href:!0});var j7t=s(aS);Ubo=t(j7t,"CamembertForQuestionAnswering"),j7t.forEach(r),Jbo=t(n4e," (CamemBERT model)"),n4e.forEach(r),Kbo=i(P),Vv=n(P,"LI",{});var s4e=s(Vv);tZ=n(s4e,"STRONG",{});var N7t=s(tZ);Ybo=t(N7t,"canine"),N7t.forEach(r),Zbo=t(s4e," \u2014 "),nS=n(s4e,"A",{href:!0});var D7t=s(nS);e4o=t(D7t,"CanineForQuestionAnswering"),D7t.forEach(r),o4o=t(s4e," (Canine model)"),s4e.forEach(r),t4o=i(P),Qv=n(P,"LI",{});var l4e=s(Qv);rZ=n(l4e,"STRONG",{});var G7t=s(rZ);r4o=t(G7t,"convbert"),G7t.forEach(r),a4o=t(l4e," \u2014 "),sS=n(l4e,"A",{href:!0});var O7t=s(sS);n4o=t(O7t,"ConvBertForQuestionAnswering"),O7t.forEach(r),s4o=t(l4e," (ConvBERT model)"),l4e.forEach(r),l4o=i(P),Hv=n(P,"LI",{});var i4e=s(Hv);aZ=n(i4e,"STRONG",{});var q7t=s(aZ);i4o=t(q7t,"deberta"),q7t.forEach(r),d4o=t(i4e," \u2014 "),lS=n(i4e,"A",{href:!0});var z7t=s(lS);m4o=t(z7t,"DebertaForQuestionAnswering"),z7t.forEach(r),f4o=t(i4e," (DeBERTa model)"),i4e.forEach(r),c4o=i(P),Uv=n(P,"LI",{});var d4e=s(Uv);nZ=n(d4e,"STRONG",{});var X7t=s(nZ);g4o=t(X7t,"deberta-v2"),X7t.forEach(r),h4o=t(d4e," \u2014 "),iS=n(d4e,"A",{href:!0});var W7t=s(iS);u4o=t(W7t,"DebertaV2ForQuestionAnswering"),W7t.forEach(r),p4o=t(d4e," (DeBERTa-v2 model)"),d4e.forEach(r),_4o=i(P),Jv=n(P,"LI",{});var m4e=s(Jv);sZ=n(m4e,"STRONG",{});var V7t=s(sZ);v4o=t(V7t,"distilbert"),V7t.forEach(r),b4o=t(m4e," \u2014 "),dS=n(m4e,"A",{href:!0});var Q7t=s(dS);T4o=t(Q7t,"DistilBertForQuestionAnswering"),Q7t.forEach(r),F4o=t(m4e," (DistilBERT model)"),m4e.forEach(r),M4o=i(P),Kv=n(P,"LI",{});var f4e=s(Kv);lZ=n(f4e,"STRONG",{});var H7t=s(lZ);E4o=t(H7t,"electra"),H7t.forEach(r),C4o=t(f4e," \u2014 "),mS=n(f4e,"A",{href:!0});var U7t=s(mS);y4o=t(U7t,"ElectraForQuestionAnswering"),U7t.forEach(r),w4o=t(f4e," (ELECTRA model)"),f4e.forEach(r),A4o=i(P),Yv=n(P,"LI",{});var c4e=s(Yv);iZ=n(c4e,"STRONG",{});var J7t=s(iZ);x4o=t(J7t,"flaubert"),J7t.forEach(r),L4o=t(c4e," \u2014 "),fS=n(c4e,"A",{href:!0});var K7t=s(fS);B4o=t(K7t,"FlaubertForQuestionAnsweringSimple"),K7t.forEach(r),k4o=t(c4e," (FlauBERT model)"),c4e.forEach(r),R4o=i(P),Zv=n(P,"LI",{});var g4e=s(Zv);dZ=n(g4e,"STRONG",{});var Y7t=s(dZ);S4o=t(Y7t,"fnet"),Y7t.forEach(r),P4o=t(g4e," \u2014 "),cS=n(g4e,"A",{href:!0});var Z7t=s(cS);$4o=t(Z7t,"FNetForQuestionAnswering"),Z7t.forEach(r),I4o=t(g4e," (FNet model)"),g4e.forEach(r),j4o=i(P),e1=n(P,"LI",{});var h4e=s(e1);mZ=n(h4e,"STRONG",{});var ext=s(mZ);N4o=t(ext,"funnel"),ext.forEach(r),D4o=t(h4e," \u2014 "),gS=n(h4e,"A",{href:!0});var oxt=s(gS);G4o=t(oxt,"FunnelForQuestionAnswering"),oxt.forEach(r),O4o=t(h4e," (Funnel Transformer model)"),h4e.forEach(r),q4o=i(P),o1=n(P,"LI",{});var u4e=s(o1);fZ=n(u4e,"STRONG",{});var txt=s(fZ);z4o=t(txt,"gptj"),txt.forEach(r),X4o=t(u4e," \u2014 "),hS=n(u4e,"A",{href:!0});var rxt=s(hS);W4o=t(rxt,"GPTJForQuestionAnswering"),rxt.forEach(r),V4o=t(u4e," (GPT-J model)"),u4e.forEach(r),Q4o=i(P),t1=n(P,"LI",{});var p4e=s(t1);cZ=n(p4e,"STRONG",{});var axt=s(cZ);H4o=t(axt,"ibert"),axt.forEach(r),U4o=t(p4e," \u2014 "),uS=n(p4e,"A",{href:!0});var nxt=s(uS);J4o=t(nxt,"IBertForQuestionAnswering"),nxt.forEach(r),K4o=t(p4e," (I-BERT model)"),p4e.forEach(r),Y4o=i(P),r1=n(P,"LI",{});var _4e=s(r1);gZ=n(_4e,"STRONG",{});var sxt=s(gZ);Z4o=t(sxt,"layoutlmv2"),sxt.forEach(r),e5o=t(_4e," \u2014 "),pS=n(_4e,"A",{href:!0});var lxt=s(pS);o5o=t(lxt,"LayoutLMv2ForQuestionAnswering"),lxt.forEach(r),t5o=t(_4e," (LayoutLMv2 model)"),_4e.forEach(r),r5o=i(P),a1=n(P,"LI",{});var v4e=s(a1);hZ=n(v4e,"STRONG",{});var ixt=s(hZ);a5o=t(ixt,"led"),ixt.forEach(r),n5o=t(v4e," \u2014 "),_S=n(v4e,"A",{href:!0});var dxt=s(_S);s5o=t(dxt,"LEDForQuestionAnswering"),dxt.forEach(r),l5o=t(v4e," (LED model)"),v4e.forEach(r),i5o=i(P),n1=n(P,"LI",{});var b4e=s(n1);uZ=n(b4e,"STRONG",{});var mxt=s(uZ);d5o=t(mxt,"longformer"),mxt.forEach(r),m5o=t(b4e," \u2014 "),vS=n(b4e,"A",{href:!0});var fxt=s(vS);f5o=t(fxt,"LongformerForQuestionAnswering"),fxt.forEach(r),c5o=t(b4e," (Longformer model)"),b4e.forEach(r),g5o=i(P),s1=n(P,"LI",{});var T4e=s(s1);pZ=n(T4e,"STRONG",{});var cxt=s(pZ);h5o=t(cxt,"lxmert"),cxt.forEach(r),u5o=t(T4e," \u2014 "),bS=n(T4e,"A",{href:!0});var gxt=s(bS);p5o=t(gxt,"LxmertForQuestionAnswering"),gxt.forEach(r),_5o=t(T4e," (LXMERT model)"),T4e.forEach(r),v5o=i(P),l1=n(P,"LI",{});var F4e=s(l1);_Z=n(F4e,"STRONG",{});var hxt=s(_Z);b5o=t(hxt,"mbart"),hxt.forEach(r),T5o=t(F4e," \u2014 "),TS=n(F4e,"A",{href:!0});var uxt=s(TS);F5o=t(uxt,"MBartForQuestionAnswering"),uxt.forEach(r),M5o=t(F4e," (mBART model)"),F4e.forEach(r),E5o=i(P),i1=n(P,"LI",{});var M4e=s(i1);vZ=n(M4e,"STRONG",{});var pxt=s(vZ);C5o=t(pxt,"megatron-bert"),pxt.forEach(r),y5o=t(M4e," \u2014 "),FS=n(M4e,"A",{href:!0});var _xt=s(FS);w5o=t(_xt,"MegatronBertForQuestionAnswering"),_xt.forEach(r),A5o=t(M4e," (MegatronBert model)"),M4e.forEach(r),x5o=i(P),d1=n(P,"LI",{});var E4e=s(d1);bZ=n(E4e,"STRONG",{});var vxt=s(bZ);L5o=t(vxt,"mobilebert"),vxt.forEach(r),B5o=t(E4e," \u2014 "),MS=n(E4e,"A",{href:!0});var bxt=s(MS);k5o=t(bxt,"MobileBertForQuestionAnswering"),bxt.forEach(r),R5o=t(E4e," (MobileBERT model)"),E4e.forEach(r),S5o=i(P),m1=n(P,"LI",{});var C4e=s(m1);TZ=n(C4e,"STRONG",{});var Txt=s(TZ);P5o=t(Txt,"mpnet"),Txt.forEach(r),$5o=t(C4e," \u2014 "),ES=n(C4e,"A",{href:!0});var Fxt=s(ES);I5o=t(Fxt,"MPNetForQuestionAnswering"),Fxt.forEach(r),j5o=t(C4e," (MPNet model)"),C4e.forEach(r),N5o=i(P),f1=n(P,"LI",{});var y4e=s(f1);FZ=n(y4e,"STRONG",{});var Mxt=s(FZ);D5o=t(Mxt,"qdqbert"),Mxt.forEach(r),G5o=t(y4e," \u2014 "),CS=n(y4e,"A",{href:!0});var Ext=s(CS);O5o=t(Ext,"QDQBertForQuestionAnswering"),Ext.forEach(r),q5o=t(y4e," (QDQBert model)"),y4e.forEach(r),z5o=i(P),c1=n(P,"LI",{});var w4e=s(c1);MZ=n(w4e,"STRONG",{});var Cxt=s(MZ);X5o=t(Cxt,"reformer"),Cxt.forEach(r),W5o=t(w4e," \u2014 "),yS=n(w4e,"A",{href:!0});var yxt=s(yS);V5o=t(yxt,"ReformerForQuestionAnswering"),yxt.forEach(r),Q5o=t(w4e," (Reformer model)"),w4e.forEach(r),H5o=i(P),g1=n(P,"LI",{});var A4e=s(g1);EZ=n(A4e,"STRONG",{});var wxt=s(EZ);U5o=t(wxt,"rembert"),wxt.forEach(r),J5o=t(A4e," \u2014 "),wS=n(A4e,"A",{href:!0});var Axt=s(wS);K5o=t(Axt,"RemBertForQuestionAnswering"),Axt.forEach(r),Y5o=t(A4e," (RemBERT model)"),A4e.forEach(r),Z5o=i(P),h1=n(P,"LI",{});var x4e=s(h1);CZ=n(x4e,"STRONG",{});var xxt=s(CZ);e0o=t(xxt,"roberta"),xxt.forEach(r),o0o=t(x4e," \u2014 "),AS=n(x4e,"A",{href:!0});var Lxt=s(AS);t0o=t(Lxt,"RobertaForQuestionAnswering"),Lxt.forEach(r),r0o=t(x4e," (RoBERTa model)"),x4e.forEach(r),a0o=i(P),u1=n(P,"LI",{});var L4e=s(u1);yZ=n(L4e,"STRONG",{});var Bxt=s(yZ);n0o=t(Bxt,"roformer"),Bxt.forEach(r),s0o=t(L4e," \u2014 "),xS=n(L4e,"A",{href:!0});var kxt=s(xS);l0o=t(kxt,"RoFormerForQuestionAnswering"),kxt.forEach(r),i0o=t(L4e," (RoFormer model)"),L4e.forEach(r),d0o=i(P),p1=n(P,"LI",{});var B4e=s(p1);wZ=n(B4e,"STRONG",{});var Rxt=s(wZ);m0o=t(Rxt,"splinter"),Rxt.forEach(r),f0o=t(B4e," \u2014 "),LS=n(B4e,"A",{href:!0});var Sxt=s(LS);c0o=t(Sxt,"SplinterForQuestionAnswering"),Sxt.forEach(r),g0o=t(B4e," (Splinter model)"),B4e.forEach(r),h0o=i(P),_1=n(P,"LI",{});var k4e=s(_1);AZ=n(k4e,"STRONG",{});var Pxt=s(AZ);u0o=t(Pxt,"squeezebert"),Pxt.forEach(r),p0o=t(k4e," \u2014 "),BS=n(k4e,"A",{href:!0});var $xt=s(BS);_0o=t($xt,"SqueezeBertForQuestionAnswering"),$xt.forEach(r),v0o=t(k4e," (SqueezeBERT model)"),k4e.forEach(r),b0o=i(P),v1=n(P,"LI",{});var R4e=s(v1);xZ=n(R4e,"STRONG",{});var Ixt=s(xZ);T0o=t(Ixt,"xlm"),Ixt.forEach(r),F0o=t(R4e," \u2014 "),kS=n(R4e,"A",{href:!0});var jxt=s(kS);M0o=t(jxt,"XLMForQuestionAnsweringSimple"),jxt.forEach(r),E0o=t(R4e," (XLM model)"),R4e.forEach(r),C0o=i(P),b1=n(P,"LI",{});var S4e=s(b1);LZ=n(S4e,"STRONG",{});var Nxt=s(LZ);y0o=t(Nxt,"xlm-roberta"),Nxt.forEach(r),w0o=t(S4e," \u2014 "),RS=n(S4e,"A",{href:!0});var Dxt=s(RS);A0o=t(Dxt,"XLMRobertaForQuestionAnswering"),Dxt.forEach(r),x0o=t(S4e," (XLM-RoBERTa model)"),S4e.forEach(r),L0o=i(P),T1=n(P,"LI",{});var P4e=s(T1);BZ=n(P4e,"STRONG",{});var Gxt=s(BZ);B0o=t(Gxt,"xlnet"),Gxt.forEach(r),k0o=t(P4e," \u2014 "),SS=n(P4e,"A",{href:!0});var Oxt=s(SS);R0o=t(Oxt,"XLNetForQuestionAnsweringSimple"),Oxt.forEach(r),S0o=t(P4e," (XLNet model)"),P4e.forEach(r),P.forEach(r),P0o=i(Ar),F1=n(Ar,"P",{});var $4e=s(F1);$0o=t($4e,"The model is set in evaluation mode by default using "),kZ=n($4e,"EM",{});var qxt=s(kZ);I0o=t(qxt,"model.eval()"),qxt.forEach(r),j0o=t($4e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),RZ=n($4e,"EM",{});var zxt=s(RZ);N0o=t(zxt,"model.train()"),zxt.forEach(r),$4e.forEach(r),D0o=i(Ar),SZ=n(Ar,"P",{});var Xxt=s(SZ);G0o=t(Xxt,"Examples:"),Xxt.forEach(r),O0o=i(Ar),c(yE.$$.fragment,Ar),Ar.forEach(r),ks.forEach(r),YEe=i(d),Ii=n(d,"H2",{class:!0});var U3e=s(Ii);M1=n(U3e,"A",{id:!0,class:!0,href:!0});var Wxt=s(M1);PZ=n(Wxt,"SPAN",{});var Vxt=s(PZ);c(wE.$$.fragment,Vxt),Vxt.forEach(r),Wxt.forEach(r),q0o=i(U3e),$Z=n(U3e,"SPAN",{});var Qxt=s($Z);z0o=t(Qxt,"AutoModelForTableQuestionAnswering"),Qxt.forEach(r),U3e.forEach(r),ZEe=i(d),Xo=n(d,"DIV",{class:!0});var Ss=s(Xo);c(AE.$$.fragment,Ss),X0o=i(Ss),ji=n(Ss,"P",{});var jD=s(ji);W0o=t(jD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a table question answering head) when created with the `),IZ=n(jD,"CODE",{});var Hxt=s(IZ);V0o=t(Hxt,"from_pretrained()"),Hxt.forEach(r),Q0o=t(jD,` class method or the `),jZ=n(jD,"CODE",{});var Uxt=s(jZ);H0o=t(Uxt,"from_config()"),Uxt.forEach(r),U0o=t(jD," class method."),jD.forEach(r),J0o=i(Ss),xE=n(Ss,"P",{});var J3e=s(xE);K0o=t(J3e,"This class cannot be instantiated directly using "),NZ=n(J3e,"CODE",{});var Jxt=s(NZ);Y0o=t(Jxt,"__init__()"),Jxt.forEach(r),Z0o=t(J3e," (throws an error)."),J3e.forEach(r),eTo=i(Ss),It=n(Ss,"DIV",{class:!0});var Ps=s(It);c(LE.$$.fragment,Ps),oTo=i(Ps),DZ=n(Ps,"P",{});var Kxt=s(DZ);tTo=t(Kxt,"Instantiates one of the model classes of the library (with a table question answering head) from a configuration."),Kxt.forEach(r),rTo=i(Ps),Ni=n(Ps,"P",{});var ND=s(Ni);aTo=t(ND,`Note: Loading a model from its configuration file does `),GZ=n(ND,"STRONG",{});var Yxt=s(GZ);nTo=t(Yxt,"not"),Yxt.forEach(r),sTo=t(ND,` load the model weights. It only affects the model\u2019s configuration. Use [`),OZ=n(ND,"EM",{});var Zxt=s(OZ);lTo=t(Zxt,"~AutoModelForTableQuestionAnswering.from_pretrained"),Zxt.forEach(r),iTo=t(ND,`] to load the model weights.`),ND.forEach(r),dTo=i(Ps),qZ=n(Ps,"P",{});var e6t=s(qZ);mTo=t(e6t,"Examples:"),e6t.forEach(r),fTo=i(Ps),c(BE.$$.fragment,Ps),Ps.forEach(r),cTo=i(Ss),Ne=n(Ss,"DIV",{class:!0});var xr=s(Ne);c(kE.$$.fragment,xr),gTo=i(xr),zZ=n(xr,"P",{});var o6t=s(zZ);hTo=t(o6t,"Instantiate one of the model classes of the library (with a table question answering head) from a pretrained model."),o6t.forEach(r),uTo=i(xr),Sa=n(xr,"P",{});var ZT=s(Sa);pTo=t(ZT,"The model class to instantiate is selected based on the "),XZ=n(ZT,"EM",{});var t6t=s(XZ);_To=t(t6t,"model_type"),t6t.forEach(r),vTo=t(ZT,` property of the config object (either passed as an argument or loaded from `),WZ=n(ZT,"EM",{});var r6t=s(WZ);bTo=t(r6t,"pretrained_model_name_or_path"),r6t.forEach(r),TTo=t(ZT,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),VZ=n(ZT,"EM",{});var a6t=s(VZ);FTo=t(a6t,"pretrained_model_name_or_path"),a6t.forEach(r),MTo=t(ZT,":"),ZT.forEach(r),ETo=i(xr),QZ=n(xr,"UL",{});var n6t=s(QZ);E1=n(n6t,"LI",{});var I4e=s(E1);HZ=n(I4e,"STRONG",{});var s6t=s(HZ);CTo=t(s6t,"tapas"),s6t.forEach(r),yTo=t(I4e," \u2014 "),PS=n(I4e,"A",{href:!0});var l6t=s(PS);wTo=t(l6t,"TapasForQuestionAnswering"),l6t.forEach(r),ATo=t(I4e," (TAPAS model)"),I4e.forEach(r),n6t.forEach(r),xTo=i(xr),C1=n(xr,"P",{});var j4e=s(C1);LTo=t(j4e,"The model is set in evaluation mode by default using "),UZ=n(j4e,"EM",{});var i6t=s(UZ);BTo=t(i6t,"model.eval()"),i6t.forEach(r),kTo=t(j4e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),JZ=n(j4e,"EM",{});var d6t=s(JZ);RTo=t(d6t,"model.train()"),d6t.forEach(r),j4e.forEach(r),STo=i(xr),KZ=n(xr,"P",{});var m6t=s(KZ);PTo=t(m6t,"Examples:"),m6t.forEach(r),$To=i(xr),c(RE.$$.fragment,xr),xr.forEach(r),Ss.forEach(r),eCe=i(d),Di=n(d,"H2",{class:!0});var K3e=s(Di);y1=n(K3e,"A",{id:!0,class:!0,href:!0});var f6t=s(y1);YZ=n(f6t,"SPAN",{});var c6t=s(YZ);c(SE.$$.fragment,c6t),c6t.forEach(r),f6t.forEach(r),ITo=i(K3e),ZZ=n(K3e,"SPAN",{});var g6t=s(ZZ);jTo=t(g6t,"AutoModelForImageClassification"),g6t.forEach(r),K3e.forEach(r),oCe=i(d),Wo=n(d,"DIV",{class:!0});var $s=s(Wo);c(PE.$$.fragment,$s),NTo=i($s),Gi=n($s,"P",{});var DD=s(Gi);DTo=t(DD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a image classification head) when created with the `),eee=n(DD,"CODE",{});var h6t=s(eee);GTo=t(h6t,"from_pretrained()"),h6t.forEach(r),OTo=t(DD,` class method or the `),oee=n(DD,"CODE",{});var u6t=s(oee);qTo=t(u6t,"from_config()"),u6t.forEach(r),zTo=t(DD," class method."),DD.forEach(r),XTo=i($s),$E=n($s,"P",{});var Y3e=s($E);WTo=t(Y3e,"This class cannot be instantiated directly using "),tee=n(Y3e,"CODE",{});var p6t=s(tee);VTo=t(p6t,"__init__()"),p6t.forEach(r),QTo=t(Y3e," (throws an error)."),Y3e.forEach(r),HTo=i($s),jt=n($s,"DIV",{class:!0});var Is=s(jt);c(IE.$$.fragment,Is),UTo=i(Is),ree=n(Is,"P",{});var _6t=s(ree);JTo=t(_6t,"Instantiates one of the model classes of the library (with a image classification head) from a configuration."),_6t.forEach(r),KTo=i(Is),Oi=n(Is,"P",{});var GD=s(Oi);YTo=t(GD,`Note: Loading a model from its configuration file does `),aee=n(GD,"STRONG",{});var v6t=s(aee);ZTo=t(v6t,"not"),v6t.forEach(r),eFo=t(GD,` load the model weights. It only affects the model\u2019s configuration. Use [`),nee=n(GD,"EM",{});var b6t=s(nee);oFo=t(b6t,"~AutoModelForImageClassification.from_pretrained"),b6t.forEach(r),tFo=t(GD,`] to load the model weights.`),GD.forEach(r),rFo=i(Is),see=n(Is,"P",{});var T6t=s(see);aFo=t(T6t,"Examples:"),T6t.forEach(r),nFo=i(Is),c(jE.$$.fragment,Is),Is.forEach(r),sFo=i($s),De=n($s,"DIV",{class:!0});var Lr=s(De);c(NE.$$.fragment,Lr),lFo=i(Lr),lee=n(Lr,"P",{});var F6t=s(lee);iFo=t(F6t,"Instantiate one of the model classes of the library (with a image classification head) from a pretrained model."),F6t.forEach(r),dFo=i(Lr),Pa=n(Lr,"P",{});var eF=s(Pa);mFo=t(eF,"The model class to instantiate is selected based on the "),iee=n(eF,"EM",{});var M6t=s(iee);fFo=t(M6t,"model_type"),M6t.forEach(r),cFo=t(eF,` property of the config object (either passed as an argument or loaded from `),dee=n(eF,"EM",{});var E6t=s(dee);gFo=t(E6t,"pretrained_model_name_or_path"),E6t.forEach(r),hFo=t(eF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),mee=n(eF,"EM",{});var C6t=s(mee);uFo=t(C6t,"pretrained_model_name_or_path"),C6t.forEach(r),pFo=t(eF,":"),eF.forEach(r),_Fo=i(Lr),Vo=n(Lr,"UL",{});var Qr=s(Vo);w1=n(Qr,"LI",{});var N4e=s(w1);fee=n(N4e,"STRONG",{});var y6t=s(fee);vFo=t(y6t,"beit"),y6t.forEach(r),bFo=t(N4e," \u2014 "),$S=n(N4e,"A",{href:!0});var w6t=s($S);TFo=t(w6t,"BeitForImageClassification"),w6t.forEach(r),FFo=t(N4e," (BEiT model)"),N4e.forEach(r),MFo=i(Qr),is=n(Qr,"LI",{});var PA=s(is);cee=n(PA,"STRONG",{});var A6t=s(cee);EFo=t(A6t,"deit"),A6t.forEach(r),CFo=t(PA," \u2014 "),IS=n(PA,"A",{href:!0});var x6t=s(IS);yFo=t(x6t,"DeiTForImageClassification"),x6t.forEach(r),wFo=t(PA," or "),jS=n(PA,"A",{href:!0});var L6t=s(jS);AFo=t(L6t,"DeiTForImageClassificationWithTeacher"),L6t.forEach(r),xFo=t(PA," (DeiT model)"),PA.forEach(r),LFo=i(Qr),A1=n(Qr,"LI",{});var D4e=s(A1);gee=n(D4e,"STRONG",{});var B6t=s(gee);BFo=t(B6t,"imagegpt"),B6t.forEach(r),kFo=t(D4e," \u2014 "),NS=n(D4e,"A",{href:!0});var k6t=s(NS);RFo=t(k6t,"ImageGPTForImageClassification"),k6t.forEach(r),SFo=t(D4e," (ImageGPT model)"),D4e.forEach(r),PFo=i(Qr),Xr=n(Qr,"LI",{});var Bm=s(Xr);hee=n(Bm,"STRONG",{});var R6t=s(hee);$Fo=t(R6t,"perceiver"),R6t.forEach(r),IFo=t(Bm," \u2014 "),DS=n(Bm,"A",{href:!0});var S6t=s(DS);jFo=t(S6t,"PerceiverForImageClassificationLearned"),S6t.forEach(r),NFo=t(Bm," or "),GS=n(Bm,"A",{href:!0});var P6t=s(GS);DFo=t(P6t,"PerceiverForImageClassificationFourier"),P6t.forEach(r),GFo=t(Bm," or "),OS=n(Bm,"A",{href:!0});var $6t=s(OS);OFo=t($6t,"PerceiverForImageClassificationConvProcessing"),$6t.forEach(r),qFo=t(Bm," (Perceiver model)"),Bm.forEach(r),zFo=i(Qr),x1=n(Qr,"LI",{});var G4e=s(x1);uee=n(G4e,"STRONG",{});var I6t=s(uee);XFo=t(I6t,"segformer"),I6t.forEach(r),WFo=t(G4e," \u2014 "),qS=n(G4e,"A",{href:!0});var j6t=s(qS);VFo=t(j6t,"SegformerForImageClassification"),j6t.forEach(r),QFo=t(G4e," (SegFormer model)"),G4e.forEach(r),HFo=i(Qr),L1=n(Qr,"LI",{});var O4e=s(L1);pee=n(O4e,"STRONG",{});var N6t=s(pee);UFo=t(N6t,"vit"),N6t.forEach(r),JFo=t(O4e," \u2014 "),zS=n(O4e,"A",{href:!0});var D6t=s(zS);KFo=t(D6t,"ViTForImageClassification"),D6t.forEach(r),YFo=t(O4e," (ViT model)"),O4e.forEach(r),Qr.forEach(r),ZFo=i(Lr),B1=n(Lr,"P",{});var q4e=s(B1);eMo=t(q4e,"The model is set in evaluation mode by default using "),_ee=n(q4e,"EM",{});var G6t=s(_ee);oMo=t(G6t,"model.eval()"),G6t.forEach(r),tMo=t(q4e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),vee=n(q4e,"EM",{});var O6t=s(vee);rMo=t(O6t,"model.train()"),O6t.forEach(r),q4e.forEach(r),aMo=i(Lr),bee=n(Lr,"P",{});var q6t=s(bee);nMo=t(q6t,"Examples:"),q6t.forEach(r),sMo=i(Lr),c(DE.$$.fragment,Lr),Lr.forEach(r),$s.forEach(r),tCe=i(d),qi=n(d,"H2",{class:!0});var Z3e=s(qi);k1=n(Z3e,"A",{id:!0,class:!0,href:!0});var z6t=s(k1);Tee=n(z6t,"SPAN",{});var X6t=s(Tee);c(GE.$$.fragment,X6t),X6t.forEach(r),z6t.forEach(r),lMo=i(Z3e),Fee=n(Z3e,"SPAN",{});var W6t=s(Fee);iMo=t(W6t,"AutoModelForVision2Seq"),W6t.forEach(r),Z3e.forEach(r),rCe=i(d),Qo=n(d,"DIV",{class:!0});var js=s(Qo);c(OE.$$.fragment,js),dMo=i(js),zi=n(js,"P",{});var OD=s(zi);mMo=t(OD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a vision-to-text modeling head) when created with the `),Mee=n(OD,"CODE",{});var V6t=s(Mee);fMo=t(V6t,"from_pretrained()"),V6t.forEach(r),cMo=t(OD,` class method or the `),Eee=n(OD,"CODE",{});var Q6t=s(Eee);gMo=t(Q6t,"from_config()"),Q6t.forEach(r),hMo=t(OD," class method."),OD.forEach(r),uMo=i(js),qE=n(js,"P",{});var eye=s(qE);pMo=t(eye,"This class cannot be instantiated directly using "),Cee=n(eye,"CODE",{});var H6t=s(Cee);_Mo=t(H6t,"__init__()"),H6t.forEach(r),vMo=t(eye," (throws an error)."),eye.forEach(r),bMo=i(js),Nt=n(js,"DIV",{class:!0});var Ns=s(Nt);c(zE.$$.fragment,Ns),TMo=i(Ns),yee=n(Ns,"P",{});var U6t=s(yee);FMo=t(U6t,"Instantiates one of the model classes of the library (with a vision-to-text modeling head) from a configuration."),U6t.forEach(r),MMo=i(Ns),Xi=n(Ns,"P",{});var qD=s(Xi);EMo=t(qD,`Note: Loading a model from its configuration file does `),wee=n(qD,"STRONG",{});var J6t=s(wee);CMo=t(J6t,"not"),J6t.forEach(r),yMo=t(qD,` load the model weights. It only affects the model\u2019s configuration. Use [`),Aee=n(qD,"EM",{});var K6t=s(Aee);wMo=t(K6t,"~AutoModelForVision2Seq.from_pretrained"),K6t.forEach(r),AMo=t(qD,`] to load the model weights.`),qD.forEach(r),xMo=i(Ns),xee=n(Ns,"P",{});var Y6t=s(xee);LMo=t(Y6t,"Examples:"),Y6t.forEach(r),BMo=i(Ns),c(XE.$$.fragment,Ns),Ns.forEach(r),kMo=i(js),Ge=n(js,"DIV",{class:!0});var Br=s(Ge);c(WE.$$.fragment,Br),RMo=i(Br),Lee=n(Br,"P",{});var Z6t=s(Lee);SMo=t(Z6t,"Instantiate one of the model classes of the library (with a vision-to-text modeling head) from a pretrained model."),Z6t.forEach(r),PMo=i(Br),$a=n(Br,"P",{});var oF=s($a);$Mo=t(oF,"The model class to instantiate is selected based on the "),Bee=n(oF,"EM",{});var e8t=s(Bee);IMo=t(e8t,"model_type"),e8t.forEach(r),jMo=t(oF,` property of the config object (either passed as an argument or loaded from `),kee=n(oF,"EM",{});var o8t=s(kee);NMo=t(o8t,"pretrained_model_name_or_path"),o8t.forEach(r),DMo=t(oF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ree=n(oF,"EM",{});var t8t=s(Ree);GMo=t(t8t,"pretrained_model_name_or_path"),t8t.forEach(r),OMo=t(oF,":"),oF.forEach(r),qMo=i(Br),See=n(Br,"UL",{});var r8t=s(See);R1=n(r8t,"LI",{});var z4e=s(R1);Pee=n(z4e,"STRONG",{});var a8t=s(Pee);zMo=t(a8t,"vision-encoder-decoder"),a8t.forEach(r),XMo=t(z4e," \u2014 "),XS=n(z4e,"A",{href:!0});var n8t=s(XS);WMo=t(n8t,"VisionEncoderDecoderModel"),n8t.forEach(r),VMo=t(z4e," (Vision Encoder decoder model)"),z4e.forEach(r),r8t.forEach(r),QMo=i(Br),S1=n(Br,"P",{});var X4e=s(S1);HMo=t(X4e,"The model is set in evaluation mode by default using "),$ee=n(X4e,"EM",{});var s8t=s($ee);UMo=t(s8t,"model.eval()"),s8t.forEach(r),JMo=t(X4e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Iee=n(X4e,"EM",{});var l8t=s(Iee);KMo=t(l8t,"model.train()"),l8t.forEach(r),X4e.forEach(r),YMo=i(Br),jee=n(Br,"P",{});var i8t=s(jee);ZMo=t(i8t,"Examples:"),i8t.forEach(r),eEo=i(Br),c(VE.$$.fragment,Br),Br.forEach(r),js.forEach(r),aCe=i(d),Wi=n(d,"H2",{class:!0});var oye=s(Wi);P1=n(oye,"A",{id:!0,class:!0,href:!0});var d8t=s(P1);Nee=n(d8t,"SPAN",{});var m8t=s(Nee);c(QE.$$.fragment,m8t),m8t.forEach(r),d8t.forEach(r),oEo=i(oye),Dee=n(oye,"SPAN",{});var f8t=s(Dee);tEo=t(f8t,"AutoModelForAudioClassification"),f8t.forEach(r),oye.forEach(r),nCe=i(d),Ho=n(d,"DIV",{class:!0});var Ds=s(Ho);c(HE.$$.fragment,Ds),rEo=i(Ds),Vi=n(Ds,"P",{});var zD=s(Vi);aEo=t(zD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a audio classification head) when created with the `),Gee=n(zD,"CODE",{});var c8t=s(Gee);nEo=t(c8t,"from_pretrained()"),c8t.forEach(r),sEo=t(zD,` class method or the `),Oee=n(zD,"CODE",{});var g8t=s(Oee);lEo=t(g8t,"from_config()"),g8t.forEach(r),iEo=t(zD," class method."),zD.forEach(r),dEo=i(Ds),UE=n(Ds,"P",{});var tye=s(UE);mEo=t(tye,"This class cannot be instantiated directly using "),qee=n(tye,"CODE",{});var h8t=s(qee);fEo=t(h8t,"__init__()"),h8t.forEach(r),cEo=t(tye," (throws an error)."),tye.forEach(r),gEo=i(Ds),Dt=n(Ds,"DIV",{class:!0});var Gs=s(Dt);c(JE.$$.fragment,Gs),hEo=i(Gs),zee=n(Gs,"P",{});var u8t=s(zee);uEo=t(u8t,"Instantiates one of the model classes of the library (with a audio classification head) from a configuration."),u8t.forEach(r),pEo=i(Gs),Qi=n(Gs,"P",{});var XD=s(Qi);_Eo=t(XD,`Note: Loading a model from its configuration file does `),Xee=n(XD,"STRONG",{});var p8t=s(Xee);vEo=t(p8t,"not"),p8t.forEach(r),bEo=t(XD,` load the model weights. It only affects the model\u2019s configuration. Use [`),Wee=n(XD,"EM",{});var _8t=s(Wee);TEo=t(_8t,"~AutoModelForAudioClassification.from_pretrained"),_8t.forEach(r),FEo=t(XD,`] to load the model weights.`),XD.forEach(r),MEo=i(Gs),Vee=n(Gs,"P",{});var v8t=s(Vee);EEo=t(v8t,"Examples:"),v8t.forEach(r),CEo=i(Gs),c(KE.$$.fragment,Gs),Gs.forEach(r),yEo=i(Ds),Oe=n(Ds,"DIV",{class:!0});var kr=s(Oe);c(YE.$$.fragment,kr),wEo=i(kr),Qee=n(kr,"P",{});var b8t=s(Qee);AEo=t(b8t,"Instantiate one of the model classes of the library (with a audio classification head) from a pretrained model."),b8t.forEach(r),xEo=i(kr),Ia=n(kr,"P",{});var tF=s(Ia);LEo=t(tF,"The model class to instantiate is selected based on the "),Hee=n(tF,"EM",{});var T8t=s(Hee);BEo=t(T8t,"model_type"),T8t.forEach(r),kEo=t(tF,` property of the config object (either passed as an argument or loaded from `),Uee=n(tF,"EM",{});var F8t=s(Uee);REo=t(F8t,"pretrained_model_name_or_path"),F8t.forEach(r),SEo=t(tF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Jee=n(tF,"EM",{});var M8t=s(Jee);PEo=t(M8t,"pretrained_model_name_or_path"),M8t.forEach(r),$Eo=t(tF,":"),tF.forEach(r),IEo=i(kr),Ke=n(kr,"UL",{});var Rr=s(Ke);$1=n(Rr,"LI",{});var W4e=s($1);Kee=n(W4e,"STRONG",{});var E8t=s(Kee);jEo=t(E8t,"hubert"),E8t.forEach(r),NEo=t(W4e," \u2014 "),WS=n(W4e,"A",{href:!0});var C8t=s(WS);DEo=t(C8t,"HubertForSequenceClassification"),C8t.forEach(r),GEo=t(W4e," (Hubert model)"),W4e.forEach(r),OEo=i(Rr),I1=n(Rr,"LI",{});var V4e=s(I1);Yee=n(V4e,"STRONG",{});var y8t=s(Yee);qEo=t(y8t,"sew"),y8t.forEach(r),zEo=t(V4e," \u2014 "),VS=n(V4e,"A",{href:!0});var w8t=s(VS);XEo=t(w8t,"SEWForSequenceClassification"),w8t.forEach(r),WEo=t(V4e," (SEW model)"),V4e.forEach(r),VEo=i(Rr),j1=n(Rr,"LI",{});var Q4e=s(j1);Zee=n(Q4e,"STRONG",{});var A8t=s(Zee);QEo=t(A8t,"sew-d"),A8t.forEach(r),HEo=t(Q4e," \u2014 "),QS=n(Q4e,"A",{href:!0});var x8t=s(QS);UEo=t(x8t,"SEWDForSequenceClassification"),x8t.forEach(r),JEo=t(Q4e," (SEW-D model)"),Q4e.forEach(r),KEo=i(Rr),N1=n(Rr,"LI",{});var H4e=s(N1);eoe=n(H4e,"STRONG",{});var L8t=s(eoe);YEo=t(L8t,"unispeech"),L8t.forEach(r),ZEo=t(H4e," \u2014 "),HS=n(H4e,"A",{href:!0});var B8t=s(HS);eCo=t(B8t,"UniSpeechForSequenceClassification"),B8t.forEach(r),oCo=t(H4e," (UniSpeech model)"),H4e.forEach(r),tCo=i(Rr),D1=n(Rr,"LI",{});var U4e=s(D1);ooe=n(U4e,"STRONG",{});var k8t=s(ooe);rCo=t(k8t,"unispeech-sat"),k8t.forEach(r),aCo=t(U4e," \u2014 "),US=n(U4e,"A",{href:!0});var R8t=s(US);nCo=t(R8t,"UniSpeechSatForSequenceClassification"),R8t.forEach(r),sCo=t(U4e," (UniSpeechSat model)"),U4e.forEach(r),lCo=i(Rr),G1=n(Rr,"LI",{});var J4e=s(G1);toe=n(J4e,"STRONG",{});var S8t=s(toe);iCo=t(S8t,"wav2vec2"),S8t.forEach(r),dCo=t(J4e," \u2014 "),JS=n(J4e,"A",{href:!0});var P8t=s(JS);mCo=t(P8t,"Wav2Vec2ForSequenceClassification"),P8t.forEach(r),fCo=t(J4e," (Wav2Vec2 model)"),J4e.forEach(r),cCo=i(Rr),O1=n(Rr,"LI",{});var K4e=s(O1);roe=n(K4e,"STRONG",{});var $8t=s(roe);gCo=t($8t,"wavlm"),$8t.forEach(r),hCo=t(K4e," \u2014 "),KS=n(K4e,"A",{href:!0});var I8t=s(KS);uCo=t(I8t,"WavLMForSequenceClassification"),I8t.forEach(r),pCo=t(K4e," (WavLM model)"),K4e.forEach(r),Rr.forEach(r),_Co=i(kr),q1=n(kr,"P",{});var Y4e=s(q1);vCo=t(Y4e,"The model is set in evaluation mode by default using "),aoe=n(Y4e,"EM",{});var j8t=s(aoe);bCo=t(j8t,"model.eval()"),j8t.forEach(r),TCo=t(Y4e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),noe=n(Y4e,"EM",{});var N8t=s(noe);FCo=t(N8t,"model.train()"),N8t.forEach(r),Y4e.forEach(r),MCo=i(kr),soe=n(kr,"P",{});var D8t=s(soe);ECo=t(D8t,"Examples:"),D8t.forEach(r),CCo=i(kr),c(ZE.$$.fragment,kr),kr.forEach(r),Ds.forEach(r),sCe=i(d),Hi=n(d,"H2",{class:!0});var rye=s(Hi);z1=n(rye,"A",{id:!0,class:!0,href:!0});var G8t=s(z1);loe=n(G8t,"SPAN",{});var O8t=s(loe);c(eC.$$.fragment,O8t),O8t.forEach(r),G8t.forEach(r),yCo=i(rye),ioe=n(rye,"SPAN",{});var q8t=s(ioe);wCo=t(q8t,"AutoModelForAudioFrameClassification"),q8t.forEach(r),rye.forEach(r),lCe=i(d),Uo=n(d,"DIV",{class:!0});var Os=s(Uo);c(oC.$$.fragment,Os),ACo=i(Os),Ui=n(Os,"P",{});var WD=s(Ui);xCo=t(WD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a audio frame (token) classification head) when created with the `),doe=n(WD,"CODE",{});var z8t=s(doe);LCo=t(z8t,"from_pretrained()"),z8t.forEach(r),BCo=t(WD,` class method or the `),moe=n(WD,"CODE",{});var X8t=s(moe);kCo=t(X8t,"from_config()"),X8t.forEach(r),RCo=t(WD," class method."),WD.forEach(r),SCo=i(Os),tC=n(Os,"P",{});var aye=s(tC);PCo=t(aye,"This class cannot be instantiated directly using "),foe=n(aye,"CODE",{});var W8t=s(foe);$Co=t(W8t,"__init__()"),W8t.forEach(r),ICo=t(aye," (throws an error)."),aye.forEach(r),jCo=i(Os),Gt=n(Os,"DIV",{class:!0});var qs=s(Gt);c(rC.$$.fragment,qs),NCo=i(qs),coe=n(qs,"P",{});var V8t=s(coe);DCo=t(V8t,"Instantiates one of the model classes of the library (with a audio frame (token) classification head) from a configuration."),V8t.forEach(r),GCo=i(qs),Ji=n(qs,"P",{});var VD=s(Ji);OCo=t(VD,`Note: Loading a model from its configuration file does `),goe=n(VD,"STRONG",{});var Q8t=s(goe);qCo=t(Q8t,"not"),Q8t.forEach(r),zCo=t(VD,` load the model weights. It only affects the model\u2019s configuration. Use [`),hoe=n(VD,"EM",{});var H8t=s(hoe);XCo=t(H8t,"~AutoModelForAudioFrameClassification.from_pretrained"),H8t.forEach(r),WCo=t(VD,`] to load the model weights.`),VD.forEach(r),VCo=i(qs),uoe=n(qs,"P",{});var U8t=s(uoe);QCo=t(U8t,"Examples:"),U8t.forEach(r),HCo=i(qs),c(aC.$$.fragment,qs),qs.forEach(r),UCo=i(Os),qe=n(Os,"DIV",{class:!0});var Sr=s(qe);c(nC.$$.fragment,Sr),JCo=i(Sr),poe=n(Sr,"P",{});var J8t=s(poe);KCo=t(J8t,"Instantiate one of the model classes of the library (with a audio frame (token) classification head) from a pretrained model."),J8t.forEach(r),YCo=i(Sr),ja=n(Sr,"P",{});var rF=s(ja);ZCo=t(rF,"The model class to instantiate is selected based on the "),_oe=n(rF,"EM",{});var K8t=s(_oe);e3o=t(K8t,"model_type"),K8t.forEach(r),o3o=t(rF,` property of the config object (either passed as an argument or loaded from `),voe=n(rF,"EM",{});var Y8t=s(voe);t3o=t(Y8t,"pretrained_model_name_or_path"),Y8t.forEach(r),r3o=t(rF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),boe=n(rF,"EM",{});var Z8t=s(boe);a3o=t(Z8t,"pretrained_model_name_or_path"),Z8t.forEach(r),n3o=t(rF,":"),rF.forEach(r),s3o=i(Sr),Ki=n(Sr,"UL",{});var QD=s(Ki);X1=n(QD,"LI",{});var Z4e=s(X1);Toe=n(Z4e,"STRONG",{});var eLt=s(Toe);l3o=t(eLt,"unispeech-sat"),eLt.forEach(r),i3o=t(Z4e," \u2014 "),YS=n(Z4e,"A",{href:!0});var oLt=s(YS);d3o=t(oLt,"UniSpeechSatForAudioFrameClassification"),oLt.forEach(r),m3o=t(Z4e," (UniSpeechSat model)"),Z4e.forEach(r),f3o=i(QD),W1=n(QD,"LI",{});var e5e=s(W1);Foe=n(e5e,"STRONG",{});var tLt=s(Foe);c3o=t(tLt,"wav2vec2"),tLt.forEach(r),g3o=t(e5e," \u2014 "),ZS=n(e5e,"A",{href:!0});var rLt=s(ZS);h3o=t(rLt,"Wav2Vec2ForAudioFrameClassification"),rLt.forEach(r),u3o=t(e5e," (Wav2Vec2 model)"),e5e.forEach(r),p3o=i(QD),V1=n(QD,"LI",{});var o5e=s(V1);Moe=n(o5e,"STRONG",{});var aLt=s(Moe);_3o=t(aLt,"wavlm"),aLt.forEach(r),v3o=t(o5e," \u2014 "),eP=n(o5e,"A",{href:!0});var nLt=s(eP);b3o=t(nLt,"WavLMForAudioFrameClassification"),nLt.forEach(r),T3o=t(o5e," (WavLM model)"),o5e.forEach(r),QD.forEach(r),F3o=i(Sr),Q1=n(Sr,"P",{});var t5e=s(Q1);M3o=t(t5e,"The model is set in evaluation mode by default using "),Eoe=n(t5e,"EM",{});var sLt=s(Eoe);E3o=t(sLt,"model.eval()"),sLt.forEach(r),C3o=t(t5e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Coe=n(t5e,"EM",{});var lLt=s(Coe);y3o=t(lLt,"model.train()"),lLt.forEach(r),t5e.forEach(r),w3o=i(Sr),yoe=n(Sr,"P",{});var iLt=s(yoe);A3o=t(iLt,"Examples:"),iLt.forEach(r),x3o=i(Sr),c(sC.$$.fragment,Sr),Sr.forEach(r),Os.forEach(r),iCe=i(d),Yi=n(d,"H2",{class:!0});var nye=s(Yi);H1=n(nye,"A",{id:!0,class:!0,href:!0});var dLt=s(H1);woe=n(dLt,"SPAN",{});var mLt=s(woe);c(lC.$$.fragment,mLt),mLt.forEach(r),dLt.forEach(r),L3o=i(nye),Aoe=n(nye,"SPAN",{});var fLt=s(Aoe);B3o=t(fLt,"AutoModelForCTC"),fLt.forEach(r),nye.forEach(r),dCe=i(d),Jo=n(d,"DIV",{class:!0});var zs=s(Jo);c(iC.$$.fragment,zs),k3o=i(zs),Zi=n(zs,"P",{});var HD=s(Zi);R3o=t(HD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a connectionist temporal classification head) when created with the `),xoe=n(HD,"CODE",{});var cLt=s(xoe);S3o=t(cLt,"from_pretrained()"),cLt.forEach(r),P3o=t(HD,` class method or the `),Loe=n(HD,"CODE",{});var gLt=s(Loe);$3o=t(gLt,"from_config()"),gLt.forEach(r),I3o=t(HD," class method."),HD.forEach(r),j3o=i(zs),dC=n(zs,"P",{});var sye=s(dC);N3o=t(sye,"This class cannot be instantiated directly using "),Boe=n(sye,"CODE",{});var hLt=s(Boe);D3o=t(hLt,"__init__()"),hLt.forEach(r),G3o=t(sye," (throws an error)."),sye.forEach(r),O3o=i(zs),Ot=n(zs,"DIV",{class:!0});var Xs=s(Ot);c(mC.$$.fragment,Xs),q3o=i(Xs),koe=n(Xs,"P",{});var uLt=s(koe);z3o=t(uLt,"Instantiates one of the model classes of the library (with a connectionist temporal classification head) from a configuration."),uLt.forEach(r),X3o=i(Xs),ed=n(Xs,"P",{});var UD=s(ed);W3o=t(UD,`Note: Loading a model from its configuration file does `),Roe=n(UD,"STRONG",{});var pLt=s(Roe);V3o=t(pLt,"not"),pLt.forEach(r),Q3o=t(UD,` load the model weights. It only affects the model\u2019s configuration. Use [`),Soe=n(UD,"EM",{});var _Lt=s(Soe);H3o=t(_Lt,"~AutoModelForCTC.from_pretrained"),_Lt.forEach(r),U3o=t(UD,`] to load the model weights.`),UD.forEach(r),J3o=i(Xs),Poe=n(Xs,"P",{});var vLt=s(Poe);K3o=t(vLt,"Examples:"),vLt.forEach(r),Y3o=i(Xs),c(fC.$$.fragment,Xs),Xs.forEach(r),Z3o=i(zs),ze=n(zs,"DIV",{class:!0});var Pr=s(ze);c(cC.$$.fragment,Pr),eyo=i(Pr),$oe=n(Pr,"P",{});var bLt=s($oe);oyo=t(bLt,"Instantiate one of the model classes of the library (with a connectionist temporal classification head) from a pretrained model."),bLt.forEach(r),tyo=i(Pr),Na=n(Pr,"P",{});var aF=s(Na);ryo=t(aF,"The model class to instantiate is selected based on the "),Ioe=n(aF,"EM",{});var TLt=s(Ioe);ayo=t(TLt,"model_type"),TLt.forEach(r),nyo=t(aF,` property of the config object (either passed as an argument or loaded from `),joe=n(aF,"EM",{});var FLt=s(joe);syo=t(FLt,"pretrained_model_name_or_path"),FLt.forEach(r),lyo=t(aF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Noe=n(aF,"EM",{});var MLt=s(Noe);iyo=t(MLt,"pretrained_model_name_or_path"),MLt.forEach(r),dyo=t(aF,":"),aF.forEach(r),myo=i(Pr),Ye=n(Pr,"UL",{});var $r=s(Ye);U1=n($r,"LI",{});var r5e=s(U1);Doe=n(r5e,"STRONG",{});var ELt=s(Doe);fyo=t(ELt,"hubert"),ELt.forEach(r),cyo=t(r5e," \u2014 "),oP=n(r5e,"A",{href:!0});var CLt=s(oP);gyo=t(CLt,"HubertForCTC"),CLt.forEach(r),hyo=t(r5e," (Hubert model)"),r5e.forEach(r),uyo=i($r),J1=n($r,"LI",{});var a5e=s(J1);Goe=n(a5e,"STRONG",{});var yLt=s(Goe);pyo=t(yLt,"sew"),yLt.forEach(r),_yo=t(a5e," \u2014 "),tP=n(a5e,"A",{href:!0});var wLt=s(tP);vyo=t(wLt,"SEWForCTC"),wLt.forEach(r),byo=t(a5e," (SEW model)"),a5e.forEach(r),Tyo=i($r),K1=n($r,"LI",{});var n5e=s(K1);Ooe=n(n5e,"STRONG",{});var ALt=s(Ooe);Fyo=t(ALt,"sew-d"),ALt.forEach(r),Myo=t(n5e," \u2014 "),rP=n(n5e,"A",{href:!0});var xLt=s(rP);Eyo=t(xLt,"SEWDForCTC"),xLt.forEach(r),Cyo=t(n5e," (SEW-D model)"),n5e.forEach(r),yyo=i($r),Y1=n($r,"LI",{});var s5e=s(Y1);qoe=n(s5e,"STRONG",{});var LLt=s(qoe);wyo=t(LLt,"unispeech"),LLt.forEach(r),Ayo=t(s5e," \u2014 "),aP=n(s5e,"A",{href:!0});var BLt=s(aP);xyo=t(BLt,"UniSpeechForCTC"),BLt.forEach(r),Lyo=t(s5e," (UniSpeech model)"),s5e.forEach(r),Byo=i($r),Z1=n($r,"LI",{});var l5e=s(Z1);zoe=n(l5e,"STRONG",{});var kLt=s(zoe);kyo=t(kLt,"unispeech-sat"),kLt.forEach(r),Ryo=t(l5e," \u2014 "),nP=n(l5e,"A",{href:!0});var RLt=s(nP);Syo=t(RLt,"UniSpeechSatForCTC"),RLt.forEach(r),Pyo=t(l5e," (UniSpeechSat model)"),l5e.forEach(r),$yo=i($r),e2=n($r,"LI",{});var i5e=s(e2);Xoe=n(i5e,"STRONG",{});var SLt=s(Xoe);Iyo=t(SLt,"wav2vec2"),SLt.forEach(r),jyo=t(i5e," \u2014 "),sP=n(i5e,"A",{href:!0});var PLt=s(sP);Nyo=t(PLt,"Wav2Vec2ForCTC"),PLt.forEach(r),Dyo=t(i5e," (Wav2Vec2 model)"),i5e.forEach(r),Gyo=i($r),o2=n($r,"LI",{});var d5e=s(o2);Woe=n(d5e,"STRONG",{});var $Lt=s(Woe);Oyo=t($Lt,"wavlm"),$Lt.forEach(r),qyo=t(d5e," \u2014 "),lP=n(d5e,"A",{href:!0});var ILt=s(lP);zyo=t(ILt,"WavLMForCTC"),ILt.forEach(r),Xyo=t(d5e," (WavLM model)"),d5e.forEach(r),$r.forEach(r),Wyo=i(Pr),t2=n(Pr,"P",{});var m5e=s(t2);Vyo=t(m5e,"The model is set in evaluation mode by default using "),Voe=n(m5e,"EM",{});var jLt=s(Voe);Qyo=t(jLt,"model.eval()"),jLt.forEach(r),Hyo=t(m5e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Qoe=n(m5e,"EM",{});var NLt=s(Qoe);Uyo=t(NLt,"model.train()"),NLt.forEach(r),m5e.forEach(r),Jyo=i(Pr),Hoe=n(Pr,"P",{});var DLt=s(Hoe);Kyo=t(DLt,"Examples:"),DLt.forEach(r),Yyo=i(Pr),c(gC.$$.fragment,Pr),Pr.forEach(r),zs.forEach(r),mCe=i(d),od=n(d,"H2",{class:!0});var lye=s(od);r2=n(lye,"A",{id:!0,class:!0,href:!0});var GLt=s(r2);Uoe=n(GLt,"SPAN",{});var OLt=s(Uoe);c(hC.$$.fragment,OLt),OLt.forEach(r),GLt.forEach(r),Zyo=i(lye),Joe=n(lye,"SPAN",{});var qLt=s(Joe);ewo=t(qLt,"AutoModelForSpeechSeq2Seq"),qLt.forEach(r),lye.forEach(r),fCe=i(d),Ko=n(d,"DIV",{class:!0});var Ws=s(Ko);c(uC.$$.fragment,Ws),owo=i(Ws),td=n(Ws,"P",{});var JD=s(td);two=t(JD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence speech-to-text modeing head) when created with the `),Koe=n(JD,"CODE",{});var zLt=s(Koe);rwo=t(zLt,"from_pretrained()"),zLt.forEach(r),awo=t(JD,` class method or the `),Yoe=n(JD,"CODE",{});var XLt=s(Yoe);nwo=t(XLt,"from_config()"),XLt.forEach(r),swo=t(JD," class method."),JD.forEach(r),lwo=i(Ws),pC=n(Ws,"P",{});var iye=s(pC);iwo=t(iye,"This class cannot be instantiated directly using "),Zoe=n(iye,"CODE",{});var WLt=s(Zoe);dwo=t(WLt,"__init__()"),WLt.forEach(r),mwo=t(iye," (throws an error)."),iye.forEach(r),fwo=i(Ws),qt=n(Ws,"DIV",{class:!0});var Vs=s(qt);c(_C.$$.fragment,Vs),cwo=i(Vs),ete=n(Vs,"P",{});var VLt=s(ete);gwo=t(VLt,"Instantiates one of the model classes of the library (with a sequence-to-sequence speech-to-text modeing head) from a configuration."),VLt.forEach(r),hwo=i(Vs),rd=n(Vs,"P",{});var KD=s(rd);uwo=t(KD,`Note: Loading a model from its configuration file does `),ote=n(KD,"STRONG",{});var QLt=s(ote);pwo=t(QLt,"not"),QLt.forEach(r),_wo=t(KD,` load the model weights. It only affects the model\u2019s configuration. Use [`),tte=n(KD,"EM",{});var HLt=s(tte);vwo=t(HLt,"~AutoModelForSpeechSeq2Seq.from_pretrained"),HLt.forEach(r),bwo=t(KD,`] to load the model weights.`),KD.forEach(r),Two=i(Vs),rte=n(Vs,"P",{});var ULt=s(rte);Fwo=t(ULt,"Examples:"),ULt.forEach(r),Mwo=i(Vs),c(vC.$$.fragment,Vs),Vs.forEach(r),Ewo=i(Ws),Xe=n(Ws,"DIV",{class:!0});var Ir=s(Xe);c(bC.$$.fragment,Ir),Cwo=i(Ir),ate=n(Ir,"P",{});var JLt=s(ate);ywo=t(JLt,"Instantiate one of the model classes of the library (with a sequence-to-sequence speech-to-text modeing head) from a pretrained model."),JLt.forEach(r),wwo=i(Ir),Da=n(Ir,"P",{});var nF=s(Da);Awo=t(nF,"The model class to instantiate is selected based on the "),nte=n(nF,"EM",{});var KLt=s(nte);xwo=t(KLt,"model_type"),KLt.forEach(r),Lwo=t(nF,` property of the config object (either passed as an argument or loaded from `),ste=n(nF,"EM",{});var YLt=s(ste);Bwo=t(YLt,"pretrained_model_name_or_path"),YLt.forEach(r),kwo=t(nF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),lte=n(nF,"EM",{});var ZLt=s(lte);Rwo=t(ZLt,"pretrained_model_name_or_path"),ZLt.forEach(r),Swo=t(nF,":"),nF.forEach(r),Pwo=i(Ir),TC=n(Ir,"UL",{});var dye=s(TC);a2=n(dye,"LI",{});var f5e=s(a2);ite=n(f5e,"STRONG",{});var eBt=s(ite);$wo=t(eBt,"speech-encoder-decoder"),eBt.forEach(r),Iwo=t(f5e," \u2014 "),iP=n(f5e,"A",{href:!0});var oBt=s(iP);jwo=t(oBt,"SpeechEncoderDecoderModel"),oBt.forEach(r),Nwo=t(f5e," (Speech Encoder decoder model)"),f5e.forEach(r),Dwo=i(dye),n2=n(dye,"LI",{});var c5e=s(n2);dte=n(c5e,"STRONG",{});var tBt=s(dte);Gwo=t(tBt,"speech_to_text"),tBt.forEach(r),Owo=t(c5e," \u2014 "),dP=n(c5e,"A",{href:!0});var rBt=s(dP);qwo=t(rBt,"Speech2TextForConditionalGeneration"),rBt.forEach(r),zwo=t(c5e," (Speech2Text model)"),c5e.forEach(r),dye.forEach(r),Xwo=i(Ir),s2=n(Ir,"P",{});var g5e=s(s2);Wwo=t(g5e,"The model is set in evaluation mode by default using "),mte=n(g5e,"EM",{});var aBt=s(mte);Vwo=t(aBt,"model.eval()"),aBt.forEach(r),Qwo=t(g5e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),fte=n(g5e,"EM",{});var nBt=s(fte);Hwo=t(nBt,"model.train()"),nBt.forEach(r),g5e.forEach(r),Uwo=i(Ir),cte=n(Ir,"P",{});var sBt=s(cte);Jwo=t(sBt,"Examples:"),sBt.forEach(r),Kwo=i(Ir),c(FC.$$.fragment,Ir),Ir.forEach(r),Ws.forEach(r),cCe=i(d),ad=n(d,"H2",{class:!0});var mye=s(ad);l2=n(mye,"A",{id:!0,class:!0,href:!0});var lBt=s(l2);gte=n(lBt,"SPAN",{});var iBt=s(gte);c(MC.$$.fragment,iBt),iBt.forEach(r),lBt.forEach(r),Ywo=i(mye),hte=n(mye,"SPAN",{});var dBt=s(hte);Zwo=t(dBt,"AutoModelForAudioXVector"),dBt.forEach(r),mye.forEach(r),gCe=i(d),Yo=n(d,"DIV",{class:!0});var Qs=s(Yo);c(EC.$$.fragment,Qs),eAo=i(Qs),nd=n(Qs,"P",{});var YD=s(nd);oAo=t(YD,`This is a generic model class that will be instantiated as one of the model classes of the library (with a audio retrieval via x-vector head) when created with the `),ute=n(YD,"CODE",{});var mBt=s(ute);tAo=t(mBt,"from_pretrained()"),mBt.forEach(r),rAo=t(YD,` class method or the `),pte=n(YD,"CODE",{});var fBt=s(pte);aAo=t(fBt,"from_config()"),fBt.forEach(r),nAo=t(YD," class method."),YD.forEach(r),sAo=i(Qs),CC=n(Qs,"P",{});var fye=s(CC);lAo=t(fye,"This class cannot be instantiated directly using "),_te=n(fye,"CODE",{});var cBt=s(_te);iAo=t(cBt,"__init__()"),cBt.forEach(r),dAo=t(fye," (throws an error)."),fye.forEach(r),mAo=i(Qs),zt=n(Qs,"DIV",{class:!0});var Hs=s(zt);c(yC.$$.fragment,Hs),fAo=i(Hs),vte=n(Hs,"P",{});var gBt=s(vte);cAo=t(gBt,"Instantiates one of the model classes of the library (with a audio retrieval via x-vector head) from a configuration."),gBt.forEach(r),gAo=i(Hs),sd=n(Hs,"P",{});var ZD=s(sd);hAo=t(ZD,`Note: Loading a model from its configuration file does `),bte=n(ZD,"STRONG",{});var hBt=s(bte);uAo=t(hBt,"not"),hBt.forEach(r),pAo=t(ZD,` load the model weights. It only affects the model\u2019s configuration. Use [`),Tte=n(ZD,"EM",{});var uBt=s(Tte);_Ao=t(uBt,"~AutoModelForAudioXVector.from_pretrained"),uBt.forEach(r),vAo=t(ZD,`] to load the model weights.`),ZD.forEach(r),bAo=i(Hs),Fte=n(Hs,"P",{});var pBt=s(Fte);TAo=t(pBt,"Examples:"),pBt.forEach(r),FAo=i(Hs),c(wC.$$.fragment,Hs),Hs.forEach(r),MAo=i(Qs),We=n(Qs,"DIV",{class:!0});var jr=s(We);c(AC.$$.fragment,jr),EAo=i(jr),Mte=n(jr,"P",{});var _Bt=s(Mte);CAo=t(_Bt,"Instantiate one of the model classes of the library (with a audio retrieval via x-vector head) from a pretrained model."),_Bt.forEach(r),yAo=i(jr),Ga=n(jr,"P",{});var sF=s(Ga);wAo=t(sF,"The model class to instantiate is selected based on the "),Ete=n(sF,"EM",{});var vBt=s(Ete);AAo=t(vBt,"model_type"),vBt.forEach(r),xAo=t(sF,` property of the config object (either passed as an argument or loaded from `),Cte=n(sF,"EM",{});var bBt=s(Cte);LAo=t(bBt,"pretrained_model_name_or_path"),bBt.forEach(r),BAo=t(sF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),yte=n(sF,"EM",{});var TBt=s(yte);kAo=t(TBt,"pretrained_model_name_or_path"),TBt.forEach(r),RAo=t(sF,":"),sF.forEach(r),SAo=i(jr),ld=n(jr,"UL",{});var eG=s(ld);i2=n(eG,"LI",{});var h5e=s(i2);wte=n(h5e,"STRONG",{});var FBt=s(wte);PAo=t(FBt,"unispeech-sat"),FBt.forEach(r),$Ao=t(h5e," \u2014 "),mP=n(h5e,"A",{href:!0});var MBt=s(mP);IAo=t(MBt,"UniSpeechSatForXVector"),MBt.forEach(r),jAo=t(h5e," (UniSpeechSat model)"),h5e.forEach(r),NAo=i(eG),d2=n(eG,"LI",{});var u5e=s(d2);Ate=n(u5e,"STRONG",{});var EBt=s(Ate);DAo=t(EBt,"wav2vec2"),EBt.forEach(r),GAo=t(u5e," \u2014 "),fP=n(u5e,"A",{href:!0});var CBt=s(fP);OAo=t(CBt,"Wav2Vec2ForXVector"),CBt.forEach(r),qAo=t(u5e," (Wav2Vec2 model)"),u5e.forEach(r),zAo=i(eG),m2=n(eG,"LI",{});var p5e=s(m2);xte=n(p5e,"STRONG",{});var yBt=s(xte);XAo=t(yBt,"wavlm"),yBt.forEach(r),WAo=t(p5e," \u2014 "),cP=n(p5e,"A",{href:!0});var wBt=s(cP);VAo=t(wBt,"WavLMForXVector"),wBt.forEach(r),QAo=t(p5e," (WavLM model)"),p5e.forEach(r),eG.forEach(r),HAo=i(jr),f2=n(jr,"P",{});var _5e=s(f2);UAo=t(_5e,"The model is set in evaluation mode by default using "),Lte=n(_5e,"EM",{});var ABt=s(Lte);JAo=t(ABt,"model.eval()"),ABt.forEach(r),KAo=t(_5e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Bte=n(_5e,"EM",{});var xBt=s(Bte);YAo=t(xBt,"model.train()"),xBt.forEach(r),_5e.forEach(r),ZAo=i(jr),kte=n(jr,"P",{});var LBt=s(kte);e7o=t(LBt,"Examples:"),LBt.forEach(r),o7o=i(jr),c(xC.$$.fragment,jr),jr.forEach(r),Qs.forEach(r),hCe=i(d),id=n(d,"H2",{class:!0});var cye=s(id);c2=n(cye,"A",{id:!0,class:!0,href:!0});var BBt=s(c2);Rte=n(BBt,"SPAN",{});var kBt=s(Rte);c(LC.$$.fragment,kBt),kBt.forEach(r),BBt.forEach(r),t7o=i(cye),Ste=n(cye,"SPAN",{});var RBt=s(Ste);r7o=t(RBt,"AutoModelForObjectDetection"),RBt.forEach(r),cye.forEach(r),uCe=i(d),Zo=n(d,"DIV",{class:!0});var Us=s(Zo);c(BC.$$.fragment,Us),a7o=i(Us),dd=n(Us,"P",{});var oG=s(dd);n7o=t(oG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a object detection head) when created with the `),Pte=n(oG,"CODE",{});var SBt=s(Pte);s7o=t(SBt,"from_pretrained()"),SBt.forEach(r),l7o=t(oG,` class method or the `),$te=n(oG,"CODE",{});var PBt=s($te);i7o=t(PBt,"from_config()"),PBt.forEach(r),d7o=t(oG," class method."),oG.forEach(r),m7o=i(Us),kC=n(Us,"P",{});var gye=s(kC);f7o=t(gye,"This class cannot be instantiated directly using "),Ite=n(gye,"CODE",{});var $Bt=s(Ite);c7o=t($Bt,"__init__()"),$Bt.forEach(r),g7o=t(gye," (throws an error)."),gye.forEach(r),h7o=i(Us),Xt=n(Us,"DIV",{class:!0});var Js=s(Xt);c(RC.$$.fragment,Js),u7o=i(Js),jte=n(Js,"P",{});var IBt=s(jte);p7o=t(IBt,"Instantiates one of the model classes of the library (with a object detection head) from a configuration."),IBt.forEach(r),_7o=i(Js),md=n(Js,"P",{});var tG=s(md);v7o=t(tG,`Note: Loading a model from its configuration file does `),Nte=n(tG,"STRONG",{});var jBt=s(Nte);b7o=t(jBt,"not"),jBt.forEach(r),T7o=t(tG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Dte=n(tG,"EM",{});var NBt=s(Dte);F7o=t(NBt,"~AutoModelForObjectDetection.from_pretrained"),NBt.forEach(r),M7o=t(tG,`] to load the model weights.`),tG.forEach(r),E7o=i(Js),Gte=n(Js,"P",{});var DBt=s(Gte);C7o=t(DBt,"Examples:"),DBt.forEach(r),y7o=i(Js),c(SC.$$.fragment,Js),Js.forEach(r),w7o=i(Us),Ve=n(Us,"DIV",{class:!0});var Nr=s(Ve);c(PC.$$.fragment,Nr),A7o=i(Nr),Ote=n(Nr,"P",{});var GBt=s(Ote);x7o=t(GBt,"Instantiate one of the model classes of the library (with a object detection head) from a pretrained model."),GBt.forEach(r),L7o=i(Nr),Oa=n(Nr,"P",{});var lF=s(Oa);B7o=t(lF,"The model class to instantiate is selected based on the "),qte=n(lF,"EM",{});var OBt=s(qte);k7o=t(OBt,"model_type"),OBt.forEach(r),R7o=t(lF,` property of the config object (either passed as an argument or loaded from `),zte=n(lF,"EM",{});var qBt=s(zte);S7o=t(qBt,"pretrained_model_name_or_path"),qBt.forEach(r),P7o=t(lF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Xte=n(lF,"EM",{});var zBt=s(Xte);$7o=t(zBt,"pretrained_model_name_or_path"),zBt.forEach(r),I7o=t(lF,":"),lF.forEach(r),j7o=i(Nr),Wte=n(Nr,"UL",{});var XBt=s(Wte);g2=n(XBt,"LI",{});var v5e=s(g2);Vte=n(v5e,"STRONG",{});var WBt=s(Vte);N7o=t(WBt,"detr"),WBt.forEach(r),D7o=t(v5e," \u2014 "),gP=n(v5e,"A",{href:!0});var VBt=s(gP);G7o=t(VBt,"DetrForObjectDetection"),VBt.forEach(r),O7o=t(v5e," (DETR model)"),v5e.forEach(r),XBt.forEach(r),q7o=i(Nr),h2=n(Nr,"P",{});var b5e=s(h2);z7o=t(b5e,"The model is set in evaluation mode by default using "),Qte=n(b5e,"EM",{});var QBt=s(Qte);X7o=t(QBt,"model.eval()"),QBt.forEach(r),W7o=t(b5e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Hte=n(b5e,"EM",{});var HBt=s(Hte);V7o=t(HBt,"model.train()"),HBt.forEach(r),b5e.forEach(r),Q7o=i(Nr),Ute=n(Nr,"P",{});var UBt=s(Ute);H7o=t(UBt,"Examples:"),UBt.forEach(r),U7o=i(Nr),c($C.$$.fragment,Nr),Nr.forEach(r),Us.forEach(r),pCe=i(d),fd=n(d,"H2",{class:!0});var hye=s(fd);u2=n(hye,"A",{id:!0,class:!0,href:!0});var JBt=s(u2);Jte=n(JBt,"SPAN",{});var KBt=s(Jte);c(IC.$$.fragment,KBt),KBt.forEach(r),JBt.forEach(r),J7o=i(hye),Kte=n(hye,"SPAN",{});var YBt=s(Kte);K7o=t(YBt,"AutoModelForImageSegmentation"),YBt.forEach(r),hye.forEach(r),_Ce=i(d),et=n(d,"DIV",{class:!0});var Ks=s(et);c(jC.$$.fragment,Ks),Y7o=i(Ks),cd=n(Ks,"P",{});var rG=s(cd);Z7o=t(rG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a image segmentation head) when created with the `),Yte=n(rG,"CODE",{});var ZBt=s(Yte);exo=t(ZBt,"from_pretrained()"),ZBt.forEach(r),oxo=t(rG,` class method or the `),Zte=n(rG,"CODE",{});var e9t=s(Zte);txo=t(e9t,"from_config()"),e9t.forEach(r),rxo=t(rG," class method."),rG.forEach(r),axo=i(Ks),NC=n(Ks,"P",{});var uye=s(NC);nxo=t(uye,"This class cannot be instantiated directly using "),ere=n(uye,"CODE",{});var o9t=s(ere);sxo=t(o9t,"__init__()"),o9t.forEach(r),lxo=t(uye," (throws an error)."),uye.forEach(r),ixo=i(Ks),Wt=n(Ks,"DIV",{class:!0});var Ys=s(Wt);c(DC.$$.fragment,Ys),dxo=i(Ys),ore=n(Ys,"P",{});var t9t=s(ore);mxo=t(t9t,"Instantiates one of the model classes of the library (with a image segmentation head) from a configuration."),t9t.forEach(r),fxo=i(Ys),gd=n(Ys,"P",{});var aG=s(gd);cxo=t(aG,`Note: Loading a model from its configuration file does `),tre=n(aG,"STRONG",{});var r9t=s(tre);gxo=t(r9t,"not"),r9t.forEach(r),hxo=t(aG,` load the model weights. It only affects the model\u2019s configuration. Use [`),rre=n(aG,"EM",{});var a9t=s(rre);uxo=t(a9t,"~AutoModelForImageSegmentation.from_pretrained"),a9t.forEach(r),pxo=t(aG,`] to load the model weights.`),aG.forEach(r),_xo=i(Ys),are=n(Ys,"P",{});var n9t=s(are);vxo=t(n9t,"Examples:"),n9t.forEach(r),bxo=i(Ys),c(GC.$$.fragment,Ys),Ys.forEach(r),Txo=i(Ks),Qe=n(Ks,"DIV",{class:!0});var Dr=s(Qe);c(OC.$$.fragment,Dr),Fxo=i(Dr),nre=n(Dr,"P",{});var s9t=s(nre);Mxo=t(s9t,"Instantiate one of the model classes of the library (with a image segmentation head) from a pretrained model."),s9t.forEach(r),Exo=i(Dr),qa=n(Dr,"P",{});var iF=s(qa);Cxo=t(iF,"The model class to instantiate is selected based on the "),sre=n(iF,"EM",{});var l9t=s(sre);yxo=t(l9t,"model_type"),l9t.forEach(r),wxo=t(iF,` property of the config object (either passed as an argument or loaded from `),lre=n(iF,"EM",{});var i9t=s(lre);Axo=t(i9t,"pretrained_model_name_or_path"),i9t.forEach(r),xxo=t(iF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),ire=n(iF,"EM",{});var d9t=s(ire);Lxo=t(d9t,"pretrained_model_name_or_path"),d9t.forEach(r),Bxo=t(iF,":"),iF.forEach(r),kxo=i(Dr),dre=n(Dr,"UL",{});var m9t=s(dre);p2=n(m9t,"LI",{});var T5e=s(p2);mre=n(T5e,"STRONG",{});var f9t=s(mre);Rxo=t(f9t,"detr"),f9t.forEach(r),Sxo=t(T5e," \u2014 "),hP=n(T5e,"A",{href:!0});var c9t=s(hP);Pxo=t(c9t,"DetrForSegmentation"),c9t.forEach(r),$xo=t(T5e," (DETR model)"),T5e.forEach(r),m9t.forEach(r),Ixo=i(Dr),_2=n(Dr,"P",{});var F5e=s(_2);jxo=t(F5e,"The model is set in evaluation mode by default using "),fre=n(F5e,"EM",{});var g9t=s(fre);Nxo=t(g9t,"model.eval()"),g9t.forEach(r),Dxo=t(F5e,` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `),cre=n(F5e,"EM",{});var h9t=s(cre);Gxo=t(h9t,"model.train()"),h9t.forEach(r),F5e.forEach(r),Oxo=i(Dr),gre=n(Dr,"P",{});var u9t=s(gre);qxo=t(u9t,"Examples:"),u9t.forEach(r),zxo=i(Dr),c(qC.$$.fragment,Dr),Dr.forEach(r),Ks.forEach(r),vCe=i(d),hd=n(d,"H2",{class:!0});var pye=s(hd);v2=n(pye,"A",{id:!0,class:!0,href:!0});var p9t=s(v2);hre=n(p9t,"SPAN",{});var _9t=s(hre);c(zC.$$.fragment,_9t),_9t.forEach(r),p9t.forEach(r),Xxo=i(pye),ure=n(pye,"SPAN",{});var v9t=s(ure);Wxo=t(v9t,"TFAutoModel"),v9t.forEach(r),pye.forEach(r),bCe=i(d),ot=n(d,"DIV",{class:!0});var Zs=s(ot);c(XC.$$.fragment,Zs),Vxo=i(Zs),ud=n(Zs,"P",{});var nG=s(ud);Qxo=t(nG,`This is a generic model class that will be instantiated as one of the base model classes of the library when created with the `),pre=n(nG,"CODE",{});var b9t=s(pre);Hxo=t(b9t,"from_pretrained()"),b9t.forEach(r),Uxo=t(nG,` class method or the `),_re=n(nG,"CODE",{});var T9t=s(_re);Jxo=t(T9t,"from_config()"),T9t.forEach(r),Kxo=t(nG," class method."),nG.forEach(r),Yxo=i(Zs),WC=n(Zs,"P",{});var _ye=s(WC);Zxo=t(_ye,"This class cannot be instantiated directly using "),vre=n(_ye,"CODE",{});var F9t=s(vre);e6o=t(F9t,"__init__()"),F9t.forEach(r),o6o=t(_ye," (throws an error)."),_ye.forEach(r),t6o=i(Zs),Vt=n(Zs,"DIV",{class:!0});var el=s(Vt);c(VC.$$.fragment,el),r6o=i(el),bre=n(el,"P",{});var M9t=s(bre);a6o=t(M9t,"Instantiates one of the base model classes of the library from a configuration."),M9t.forEach(r),n6o=i(el),pd=n(el,"P",{});var sG=s(pd);s6o=t(sG,`Note: Loading a model from its configuration file does `),Tre=n(sG,"STRONG",{});var E9t=s(Tre);l6o=t(E9t,"not"),E9t.forEach(r),i6o=t(sG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Fre=n(sG,"EM",{});var C9t=s(Fre);d6o=t(C9t,"~TFAutoModel.from_pretrained"),C9t.forEach(r),m6o=t(sG,`] to load the model weights.`),sG.forEach(r),f6o=i(el),Mre=n(el,"P",{});var y9t=s(Mre);c6o=t(y9t,"Examples:"),y9t.forEach(r),g6o=i(el),c(QC.$$.fragment,el),el.forEach(r),h6o=i(Zs),ro=n(Zs,"DIV",{class:!0});var Hr=s(ro);c(HC.$$.fragment,Hr),u6o=i(Hr),Ere=n(Hr,"P",{});var w9t=s(Ere);p6o=t(w9t,"Instantiate one of the base model classes of the library from a pretrained model."),w9t.forEach(r),_6o=i(Hr),za=n(Hr,"P",{});var dF=s(za);v6o=t(dF,"The model class to instantiate is selected based on the "),Cre=n(dF,"EM",{});var A9t=s(Cre);b6o=t(A9t,"model_type"),A9t.forEach(r),T6o=t(dF,` property of the config object (either passed as an argument or loaded from `),yre=n(dF,"EM",{});var x9t=s(yre);F6o=t(x9t,"pretrained_model_name_or_path"),x9t.forEach(r),M6o=t(dF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),wre=n(dF,"EM",{});var L9t=s(wre);E6o=t(L9t,"pretrained_model_name_or_path"),L9t.forEach(r),C6o=t(dF,":"),dF.forEach(r),y6o=i(Hr),L=n(Hr,"UL",{});var B=s(L);b2=n(B,"LI",{});var M5e=s(b2);Are=n(M5e,"STRONG",{});var B9t=s(Are);w6o=t(B9t,"albert"),B9t.forEach(r),A6o=t(M5e," \u2014 "),uP=n(M5e,"A",{href:!0});var k9t=s(uP);x6o=t(k9t,"TFAlbertModel"),k9t.forEach(r),L6o=t(M5e," (ALBERT model)"),M5e.forEach(r),B6o=i(B),T2=n(B,"LI",{});var E5e=s(T2);xre=n(E5e,"STRONG",{});var R9t=s(xre);k6o=t(R9t,"bart"),R9t.forEach(r),R6o=t(E5e," \u2014 "),pP=n(E5e,"A",{href:!0});var S9t=s(pP);S6o=t(S9t,"TFBartModel"),S9t.forEach(r),P6o=t(E5e," (BART model)"),E5e.forEach(r),$6o=i(B),F2=n(B,"LI",{});var C5e=s(F2);Lre=n(C5e,"STRONG",{});var P9t=s(Lre);I6o=t(P9t,"bert"),P9t.forEach(r),j6o=t(C5e," \u2014 "),_P=n(C5e,"A",{href:!0});var $9t=s(_P);N6o=t($9t,"TFBertModel"),$9t.forEach(r),D6o=t(C5e," (BERT model)"),C5e.forEach(r),G6o=i(B),M2=n(B,"LI",{});var y5e=s(M2);Bre=n(y5e,"STRONG",{});var I9t=s(Bre);O6o=t(I9t,"blenderbot"),I9t.forEach(r),q6o=t(y5e," \u2014 "),vP=n(y5e,"A",{href:!0});var j9t=s(vP);z6o=t(j9t,"TFBlenderbotModel"),j9t.forEach(r),X6o=t(y5e," (Blenderbot model)"),y5e.forEach(r),W6o=i(B),E2=n(B,"LI",{});var w5e=s(E2);kre=n(w5e,"STRONG",{});var N9t=s(kre);V6o=t(N9t,"blenderbot-small"),N9t.forEach(r),Q6o=t(w5e," \u2014 "),bP=n(w5e,"A",{href:!0});var D9t=s(bP);H6o=t(D9t,"TFBlenderbotSmallModel"),D9t.forEach(r),U6o=t(w5e," (BlenderbotSmall model)"),w5e.forEach(r),J6o=i(B),C2=n(B,"LI",{});var A5e=s(C2);Rre=n(A5e,"STRONG",{});var G9t=s(Rre);K6o=t(G9t,"camembert"),G9t.forEach(r),Y6o=t(A5e," \u2014 "),TP=n(A5e,"A",{href:!0});var O9t=s(TP);Z6o=t(O9t,"TFCamembertModel"),O9t.forEach(r),e8o=t(A5e," (CamemBERT model)"),A5e.forEach(r),o8o=i(B),y2=n(B,"LI",{});var x5e=s(y2);Sre=n(x5e,"STRONG",{});var q9t=s(Sre);t8o=t(q9t,"convbert"),q9t.forEach(r),r8o=t(x5e," \u2014 "),FP=n(x5e,"A",{href:!0});var z9t=s(FP);a8o=t(z9t,"TFConvBertModel"),z9t.forEach(r),n8o=t(x5e," (ConvBERT model)"),x5e.forEach(r),s8o=i(B),w2=n(B,"LI",{});var L5e=s(w2);Pre=n(L5e,"STRONG",{});var X9t=s(Pre);l8o=t(X9t,"ctrl"),X9t.forEach(r),i8o=t(L5e," \u2014 "),MP=n(L5e,"A",{href:!0});var W9t=s(MP);d8o=t(W9t,"TFCTRLModel"),W9t.forEach(r),m8o=t(L5e," (CTRL model)"),L5e.forEach(r),f8o=i(B),A2=n(B,"LI",{});var B5e=s(A2);$re=n(B5e,"STRONG",{});var V9t=s($re);c8o=t(V9t,"deberta"),V9t.forEach(r),g8o=t(B5e," \u2014 "),EP=n(B5e,"A",{href:!0});var Q9t=s(EP);h8o=t(Q9t,"TFDebertaModel"),Q9t.forEach(r),u8o=t(B5e," (DeBERTa model)"),B5e.forEach(r),p8o=i(B),x2=n(B,"LI",{});var k5e=s(x2);Ire=n(k5e,"STRONG",{});var H9t=s(Ire);_8o=t(H9t,"deberta-v2"),H9t.forEach(r),v8o=t(k5e," \u2014 "),CP=n(k5e,"A",{href:!0});var U9t=s(CP);b8o=t(U9t,"TFDebertaV2Model"),U9t.forEach(r),T8o=t(k5e," (DeBERTa-v2 model)"),k5e.forEach(r),F8o=i(B),L2=n(B,"LI",{});var R5e=s(L2);jre=n(R5e,"STRONG",{});var J9t=s(jre);M8o=t(J9t,"distilbert"),J9t.forEach(r),E8o=t(R5e," \u2014 "),yP=n(R5e,"A",{href:!0});var K9t=s(yP);C8o=t(K9t,"TFDistilBertModel"),K9t.forEach(r),y8o=t(R5e," (DistilBERT model)"),R5e.forEach(r),w8o=i(B),B2=n(B,"LI",{});var S5e=s(B2);Nre=n(S5e,"STRONG",{});var Y9t=s(Nre);A8o=t(Y9t,"dpr"),Y9t.forEach(r),x8o=t(S5e," \u2014 "),wP=n(S5e,"A",{href:!0});var Z9t=s(wP);L8o=t(Z9t,"TFDPRQuestionEncoder"),Z9t.forEach(r),B8o=t(S5e," (DPR model)"),S5e.forEach(r),k8o=i(B),k2=n(B,"LI",{});var P5e=s(k2);Dre=n(P5e,"STRONG",{});var ekt=s(Dre);R8o=t(ekt,"electra"),ekt.forEach(r),S8o=t(P5e," \u2014 "),AP=n(P5e,"A",{href:!0});var okt=s(AP);P8o=t(okt,"TFElectraModel"),okt.forEach(r),$8o=t(P5e," (ELECTRA model)"),P5e.forEach(r),I8o=i(B),R2=n(B,"LI",{});var $5e=s(R2);Gre=n($5e,"STRONG",{});var tkt=s(Gre);j8o=t(tkt,"flaubert"),tkt.forEach(r),N8o=t($5e," \u2014 "),xP=n($5e,"A",{href:!0});var rkt=s(xP);D8o=t(rkt,"TFFlaubertModel"),rkt.forEach(r),G8o=t($5e," (FlauBERT model)"),$5e.forEach(r),O8o=i(B),ds=n(B,"LI",{});var $A=s(ds);Ore=n($A,"STRONG",{});var akt=s(Ore);q8o=t(akt,"funnel"),akt.forEach(r),z8o=t($A," \u2014 "),LP=n($A,"A",{href:!0});var nkt=s(LP);X8o=t(nkt,"TFFunnelModel"),nkt.forEach(r),W8o=t($A," or "),BP=n($A,"A",{href:!0});var skt=s(BP);V8o=t(skt,"TFFunnelBaseModel"),skt.forEach(r),Q8o=t($A," (Funnel Transformer model)"),$A.forEach(r),H8o=i(B),S2=n(B,"LI",{});var I5e=s(S2);qre=n(I5e,"STRONG",{});var lkt=s(qre);U8o=t(lkt,"gpt2"),lkt.forEach(r),J8o=t(I5e," \u2014 "),kP=n(I5e,"A",{href:!0});var ikt=s(kP);K8o=t(ikt,"TFGPT2Model"),ikt.forEach(r),Y8o=t(I5e," (OpenAI GPT-2 model)"),I5e.forEach(r),Z8o=i(B),P2=n(B,"LI",{});var j5e=s(P2);zre=n(j5e,"STRONG",{});var dkt=s(zre);eLo=t(dkt,"hubert"),dkt.forEach(r),oLo=t(j5e," \u2014 "),RP=n(j5e,"A",{href:!0});var mkt=s(RP);tLo=t(mkt,"TFHubertModel"),mkt.forEach(r),rLo=t(j5e," (Hubert model)"),j5e.forEach(r),aLo=i(B),$2=n(B,"LI",{});var N5e=s($2);Xre=n(N5e,"STRONG",{});var fkt=s(Xre);nLo=t(fkt,"layoutlm"),fkt.forEach(r),sLo=t(N5e," \u2014 "),SP=n(N5e,"A",{href:!0});var ckt=s(SP);lLo=t(ckt,"TFLayoutLMModel"),ckt.forEach(r),iLo=t(N5e," (LayoutLM model)"),N5e.forEach(r),dLo=i(B),I2=n(B,"LI",{});var D5e=s(I2);Wre=n(D5e,"STRONG",{});var gkt=s(Wre);mLo=t(gkt,"led"),gkt.forEach(r),fLo=t(D5e," \u2014 "),PP=n(D5e,"A",{href:!0});var hkt=s(PP);cLo=t(hkt,"TFLEDModel"),hkt.forEach(r),gLo=t(D5e," (LED model)"),D5e.forEach(r),hLo=i(B),j2=n(B,"LI",{});var G5e=s(j2);Vre=n(G5e,"STRONG",{});var ukt=s(Vre);uLo=t(ukt,"longformer"),ukt.forEach(r),pLo=t(G5e," \u2014 "),$P=n(G5e,"A",{href:!0});var pkt=s($P);_Lo=t(pkt,"TFLongformerModel"),pkt.forEach(r),vLo=t(G5e," (Longformer model)"),G5e.forEach(r),bLo=i(B),N2=n(B,"LI",{});var O5e=s(N2);Qre=n(O5e,"STRONG",{});var _kt=s(Qre);TLo=t(_kt,"lxmert"),_kt.forEach(r),FLo=t(O5e," \u2014 "),IP=n(O5e,"A",{href:!0});var vkt=s(IP);MLo=t(vkt,"TFLxmertModel"),vkt.forEach(r),ELo=t(O5e," (LXMERT model)"),O5e.forEach(r),CLo=i(B),D2=n(B,"LI",{});var q5e=s(D2);Hre=n(q5e,"STRONG",{});var bkt=s(Hre);yLo=t(bkt,"marian"),bkt.forEach(r),wLo=t(q5e," \u2014 "),jP=n(q5e,"A",{href:!0});var Tkt=s(jP);ALo=t(Tkt,"TFMarianModel"),Tkt.forEach(r),xLo=t(q5e," (Marian model)"),q5e.forEach(r),LLo=i(B),G2=n(B,"LI",{});var z5e=s(G2);Ure=n(z5e,"STRONG",{});var Fkt=s(Ure);BLo=t(Fkt,"mbart"),Fkt.forEach(r),kLo=t(z5e," \u2014 "),NP=n(z5e,"A",{href:!0});var Mkt=s(NP);RLo=t(Mkt,"TFMBartModel"),Mkt.forEach(r),SLo=t(z5e," (mBART model)"),z5e.forEach(r),PLo=i(B),O2=n(B,"LI",{});var X5e=s(O2);Jre=n(X5e,"STRONG",{});var Ekt=s(Jre);$Lo=t(Ekt,"mobilebert"),Ekt.forEach(r),ILo=t(X5e," \u2014 "),DP=n(X5e,"A",{href:!0});var Ckt=s(DP);jLo=t(Ckt,"TFMobileBertModel"),Ckt.forEach(r),NLo=t(X5e," (MobileBERT model)"),X5e.forEach(r),DLo=i(B),q2=n(B,"LI",{});var W5e=s(q2);Kre=n(W5e,"STRONG",{});var ykt=s(Kre);GLo=t(ykt,"mpnet"),ykt.forEach(r),OLo=t(W5e," \u2014 "),GP=n(W5e,"A",{href:!0});var wkt=s(GP);qLo=t(wkt,"TFMPNetModel"),wkt.forEach(r),zLo=t(W5e," (MPNet model)"),W5e.forEach(r),XLo=i(B),z2=n(B,"LI",{});var V5e=s(z2);Yre=n(V5e,"STRONG",{});var Akt=s(Yre);WLo=t(Akt,"mt5"),Akt.forEach(r),VLo=t(V5e," \u2014 "),OP=n(V5e,"A",{href:!0});var xkt=s(OP);QLo=t(xkt,"TFMT5Model"),xkt.forEach(r),HLo=t(V5e," (mT5 model)"),V5e.forEach(r),ULo=i(B),X2=n(B,"LI",{});var Q5e=s(X2);Zre=n(Q5e,"STRONG",{});var Lkt=s(Zre);JLo=t(Lkt,"openai-gpt"),Lkt.forEach(r),KLo=t(Q5e," \u2014 "),qP=n(Q5e,"A",{href:!0});var Bkt=s(qP);YLo=t(Bkt,"TFOpenAIGPTModel"),Bkt.forEach(r),ZLo=t(Q5e," (OpenAI GPT model)"),Q5e.forEach(r),eBo=i(B),W2=n(B,"LI",{});var H5e=s(W2);eae=n(H5e,"STRONG",{});var kkt=s(eae);oBo=t(kkt,"pegasus"),kkt.forEach(r),tBo=t(H5e," \u2014 "),zP=n(H5e,"A",{href:!0});var Rkt=s(zP);rBo=t(Rkt,"TFPegasusModel"),Rkt.forEach(r),aBo=t(H5e," (Pegasus model)"),H5e.forEach(r),nBo=i(B),V2=n(B,"LI",{});var U5e=s(V2);oae=n(U5e,"STRONG",{});var Skt=s(oae);sBo=t(Skt,"rembert"),Skt.forEach(r),lBo=t(U5e," \u2014 "),XP=n(U5e,"A",{href:!0});var Pkt=s(XP);iBo=t(Pkt,"TFRemBertModel"),Pkt.forEach(r),dBo=t(U5e," (RemBERT model)"),U5e.forEach(r),mBo=i(B),Q2=n(B,"LI",{});var J5e=s(Q2);tae=n(J5e,"STRONG",{});var $kt=s(tae);fBo=t($kt,"roberta"),$kt.forEach(r),cBo=t(J5e," \u2014 "),WP=n(J5e,"A",{href:!0});var Ikt=s(WP);gBo=t(Ikt,"TFRobertaModel"),Ikt.forEach(r),hBo=t(J5e," (RoBERTa model)"),J5e.forEach(r),uBo=i(B),H2=n(B,"LI",{});var K5e=s(H2);rae=n(K5e,"STRONG",{});var jkt=s(rae);pBo=t(jkt,"roformer"),jkt.forEach(r),_Bo=t(K5e," \u2014 "),VP=n(K5e,"A",{href:!0});var Nkt=s(VP);vBo=t(Nkt,"TFRoFormerModel"),Nkt.forEach(r),bBo=t(K5e," (RoFormer model)"),K5e.forEach(r),TBo=i(B),U2=n(B,"LI",{});var Y5e=s(U2);aae=n(Y5e,"STRONG",{});var Dkt=s(aae);FBo=t(Dkt,"t5"),Dkt.forEach(r),MBo=t(Y5e," \u2014 "),QP=n(Y5e,"A",{href:!0});var Gkt=s(QP);EBo=t(Gkt,"TFT5Model"),Gkt.forEach(r),CBo=t(Y5e," (T5 model)"),Y5e.forEach(r),yBo=i(B),J2=n(B,"LI",{});var Z5e=s(J2);nae=n(Z5e,"STRONG",{});var Okt=s(nae);wBo=t(Okt,"tapas"),Okt.forEach(r),ABo=t(Z5e," \u2014 "),HP=n(Z5e,"A",{href:!0});var qkt=s(HP);xBo=t(qkt,"TFTapasModel"),qkt.forEach(r),LBo=t(Z5e," (TAPAS model)"),Z5e.forEach(r),BBo=i(B),K2=n(B,"LI",{});var e0e=s(K2);sae=n(e0e,"STRONG",{});var zkt=s(sae);kBo=t(zkt,"transfo-xl"),zkt.forEach(r),RBo=t(e0e," \u2014 "),UP=n(e0e,"A",{href:!0});var Xkt=s(UP);SBo=t(Xkt,"TFTransfoXLModel"),Xkt.forEach(r),PBo=t(e0e," (Transformer-XL model)"),e0e.forEach(r),$Bo=i(B),Y2=n(B,"LI",{});var o0e=s(Y2);lae=n(o0e,"STRONG",{});var Wkt=s(lae);IBo=t(Wkt,"vit"),Wkt.forEach(r),jBo=t(o0e," \u2014 "),JP=n(o0e,"A",{href:!0});var Vkt=s(JP);NBo=t(Vkt,"TFViTModel"),Vkt.forEach(r),DBo=t(o0e," (ViT model)"),o0e.forEach(r),GBo=i(B),Z2=n(B,"LI",{});var t0e=s(Z2);iae=n(t0e,"STRONG",{});var Qkt=s(iae);OBo=t(Qkt,"wav2vec2"),Qkt.forEach(r),qBo=t(t0e," \u2014 "),KP=n(t0e,"A",{href:!0});var Hkt=s(KP);zBo=t(Hkt,"TFWav2Vec2Model"),Hkt.forEach(r),XBo=t(t0e," (Wav2Vec2 model)"),t0e.forEach(r),WBo=i(B),eb=n(B,"LI",{});var r0e=s(eb);dae=n(r0e,"STRONG",{});var Ukt=s(dae);VBo=t(Ukt,"xlm"),Ukt.forEach(r),QBo=t(r0e," \u2014 "),YP=n(r0e,"A",{href:!0});var Jkt=s(YP);HBo=t(Jkt,"TFXLMModel"),Jkt.forEach(r),UBo=t(r0e," (XLM model)"),r0e.forEach(r),JBo=i(B),ob=n(B,"LI",{});var a0e=s(ob);mae=n(a0e,"STRONG",{});var Kkt=s(mae);KBo=t(Kkt,"xlm-roberta"),Kkt.forEach(r),YBo=t(a0e," \u2014 "),ZP=n(a0e,"A",{href:!0});var Ykt=s(ZP);ZBo=t(Ykt,"TFXLMRobertaModel"),Ykt.forEach(r),e9o=t(a0e," (XLM-RoBERTa model)"),a0e.forEach(r),o9o=i(B),tb=n(B,"LI",{});var n0e=s(tb);fae=n(n0e,"STRONG",{});var Zkt=s(fae);t9o=t(Zkt,"xlnet"),Zkt.forEach(r),r9o=t(n0e," \u2014 "),e$=n(n0e,"A",{href:!0});var eRt=s(e$);a9o=t(eRt,"TFXLNetModel"),eRt.forEach(r),n9o=t(n0e," (XLNet model)"),n0e.forEach(r),B.forEach(r),s9o=i(Hr),cae=n(Hr,"P",{});var oRt=s(cae);l9o=t(oRt,"Examples:"),oRt.forEach(r),i9o=i(Hr),c(UC.$$.fragment,Hr),Hr.forEach(r),Zs.forEach(r),TCe=i(d),_d=n(d,"H2",{class:!0});var vye=s(_d);rb=n(vye,"A",{id:!0,class:!0,href:!0});var tRt=s(rb);gae=n(tRt,"SPAN",{});var rRt=s(gae);c(JC.$$.fragment,rRt),rRt.forEach(r),tRt.forEach(r),d9o=i(vye),hae=n(vye,"SPAN",{});var aRt=s(hae);m9o=t(aRt,"TFAutoModelForPreTraining"),aRt.forEach(r),vye.forEach(r),FCe=i(d),tt=n(d,"DIV",{class:!0});var ol=s(tt);c(KC.$$.fragment,ol),f9o=i(ol),vd=n(ol,"P",{});var lG=s(vd);c9o=t(lG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a pretraining head) when created with the `),uae=n(lG,"CODE",{});var nRt=s(uae);g9o=t(nRt,"from_pretrained()"),nRt.forEach(r),h9o=t(lG,` class method or the `),pae=n(lG,"CODE",{});var sRt=s(pae);u9o=t(sRt,"from_config()"),sRt.forEach(r),p9o=t(lG," class method."),lG.forEach(r),_9o=i(ol),YC=n(ol,"P",{});var bye=s(YC);v9o=t(bye,"This class cannot be instantiated directly using "),_ae=n(bye,"CODE",{});var lRt=s(_ae);b9o=t(lRt,"__init__()"),lRt.forEach(r),T9o=t(bye," (throws an error)."),bye.forEach(r),F9o=i(ol),Qt=n(ol,"DIV",{class:!0});var tl=s(Qt);c(ZC.$$.fragment,tl),M9o=i(tl),vae=n(tl,"P",{});var iRt=s(vae);E9o=t(iRt,"Instantiates one of the model classes of the library (with a pretraining head) from a configuration."),iRt.forEach(r),C9o=i(tl),bd=n(tl,"P",{});var iG=s(bd);y9o=t(iG,`Note: Loading a model from its configuration file does `),bae=n(iG,"STRONG",{});var dRt=s(bae);w9o=t(dRt,"not"),dRt.forEach(r),A9o=t(iG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Tae=n(iG,"EM",{});var mRt=s(Tae);x9o=t(mRt,"~TFAutoModelForPreTraining.from_pretrained"),mRt.forEach(r),L9o=t(iG,`] to load the model weights.`),iG.forEach(r),B9o=i(tl),Fae=n(tl,"P",{});var fRt=s(Fae);k9o=t(fRt,"Examples:"),fRt.forEach(r),R9o=i(tl),c(e3.$$.fragment,tl),tl.forEach(r),S9o=i(ol),ao=n(ol,"DIV",{class:!0});var Ur=s(ao);c(o3.$$.fragment,Ur),P9o=i(Ur),Mae=n(Ur,"P",{});var cRt=s(Mae);$9o=t(cRt,"Instantiate one of the model classes of the library (with a pretraining head) from a pretrained model."),cRt.forEach(r),I9o=i(Ur),Xa=n(Ur,"P",{});var mF=s(Xa);j9o=t(mF,"The model class to instantiate is selected based on the "),Eae=n(mF,"EM",{});var gRt=s(Eae);N9o=t(gRt,"model_type"),gRt.forEach(r),D9o=t(mF,` property of the config object (either passed as an argument or loaded from `),Cae=n(mF,"EM",{});var hRt=s(Cae);G9o=t(hRt,"pretrained_model_name_or_path"),hRt.forEach(r),O9o=t(mF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),yae=n(mF,"EM",{});var uRt=s(yae);q9o=t(uRt,"pretrained_model_name_or_path"),uRt.forEach(r),z9o=t(mF,":"),mF.forEach(r),X9o=i(Ur),V=n(Ur,"UL",{});var H=s(V);ab=n(H,"LI",{});var s0e=s(ab);wae=n(s0e,"STRONG",{});var pRt=s(wae);W9o=t(pRt,"albert"),pRt.forEach(r),V9o=t(s0e," \u2014 "),o$=n(s0e,"A",{href:!0});var _Rt=s(o$);Q9o=t(_Rt,"TFAlbertForPreTraining"),_Rt.forEach(r),H9o=t(s0e," (ALBERT model)"),s0e.forEach(r),U9o=i(H),nb=n(H,"LI",{});var l0e=s(nb);Aae=n(l0e,"STRONG",{});var vRt=s(Aae);J9o=t(vRt,"bart"),vRt.forEach(r),K9o=t(l0e," \u2014 "),t$=n(l0e,"A",{href:!0});var bRt=s(t$);Y9o=t(bRt,"TFBartForConditionalGeneration"),bRt.forEach(r),Z9o=t(l0e," (BART model)"),l0e.forEach(r),eko=i(H),sb=n(H,"LI",{});var i0e=s(sb);xae=n(i0e,"STRONG",{});var TRt=s(xae);oko=t(TRt,"bert"),TRt.forEach(r),tko=t(i0e," \u2014 "),r$=n(i0e,"A",{href:!0});var FRt=s(r$);rko=t(FRt,"TFBertForPreTraining"),FRt.forEach(r),ako=t(i0e," (BERT model)"),i0e.forEach(r),nko=i(H),lb=n(H,"LI",{});var d0e=s(lb);Lae=n(d0e,"STRONG",{});var MRt=s(Lae);sko=t(MRt,"camembert"),MRt.forEach(r),lko=t(d0e," \u2014 "),a$=n(d0e,"A",{href:!0});var ERt=s(a$);iko=t(ERt,"TFCamembertForMaskedLM"),ERt.forEach(r),dko=t(d0e," (CamemBERT model)"),d0e.forEach(r),mko=i(H),ib=n(H,"LI",{});var m0e=s(ib);Bae=n(m0e,"STRONG",{});var CRt=s(Bae);fko=t(CRt,"ctrl"),CRt.forEach(r),cko=t(m0e," \u2014 "),n$=n(m0e,"A",{href:!0});var yRt=s(n$);gko=t(yRt,"TFCTRLLMHeadModel"),yRt.forEach(r),hko=t(m0e," (CTRL model)"),m0e.forEach(r),uko=i(H),db=n(H,"LI",{});var f0e=s(db);kae=n(f0e,"STRONG",{});var wRt=s(kae);pko=t(wRt,"distilbert"),wRt.forEach(r),_ko=t(f0e," \u2014 "),s$=n(f0e,"A",{href:!0});var ARt=s(s$);vko=t(ARt,"TFDistilBertForMaskedLM"),ARt.forEach(r),bko=t(f0e," (DistilBERT model)"),f0e.forEach(r),Tko=i(H),mb=n(H,"LI",{});var c0e=s(mb);Rae=n(c0e,"STRONG",{});var xRt=s(Rae);Fko=t(xRt,"electra"),xRt.forEach(r),Mko=t(c0e," \u2014 "),l$=n(c0e,"A",{href:!0});var LRt=s(l$);Eko=t(LRt,"TFElectraForPreTraining"),LRt.forEach(r),Cko=t(c0e," (ELECTRA model)"),c0e.forEach(r),yko=i(H),fb=n(H,"LI",{});var g0e=s(fb);Sae=n(g0e,"STRONG",{});var BRt=s(Sae);wko=t(BRt,"flaubert"),BRt.forEach(r),Ako=t(g0e," \u2014 "),i$=n(g0e,"A",{href:!0});var kRt=s(i$);xko=t(kRt,"TFFlaubertWithLMHeadModel"),kRt.forEach(r),Lko=t(g0e," (FlauBERT model)"),g0e.forEach(r),Bko=i(H),cb=n(H,"LI",{});var h0e=s(cb);Pae=n(h0e,"STRONG",{});var RRt=s(Pae);kko=t(RRt,"funnel"),RRt.forEach(r),Rko=t(h0e," \u2014 "),d$=n(h0e,"A",{href:!0});var SRt=s(d$);Sko=t(SRt,"TFFunnelForPreTraining"),SRt.forEach(r),Pko=t(h0e," (Funnel Transformer model)"),h0e.forEach(r),$ko=i(H),gb=n(H,"LI",{});var u0e=s(gb);$ae=n(u0e,"STRONG",{});var PRt=s($ae);Iko=t(PRt,"gpt2"),PRt.forEach(r),jko=t(u0e," \u2014 "),m$=n(u0e,"A",{href:!0});var $Rt=s(m$);Nko=t($Rt,"TFGPT2LMHeadModel"),$Rt.forEach(r),Dko=t(u0e," (OpenAI GPT-2 model)"),u0e.forEach(r),Gko=i(H),hb=n(H,"LI",{});var p0e=s(hb);Iae=n(p0e,"STRONG",{});var IRt=s(Iae);Oko=t(IRt,"layoutlm"),IRt.forEach(r),qko=t(p0e," \u2014 "),f$=n(p0e,"A",{href:!0});var jRt=s(f$);zko=t(jRt,"TFLayoutLMForMaskedLM"),jRt.forEach(r),Xko=t(p0e," (LayoutLM model)"),p0e.forEach(r),Wko=i(H),ub=n(H,"LI",{});var _0e=s(ub);jae=n(_0e,"STRONG",{});var NRt=s(jae);Vko=t(NRt,"lxmert"),NRt.forEach(r),Qko=t(_0e," \u2014 "),c$=n(_0e,"A",{href:!0});var DRt=s(c$);Hko=t(DRt,"TFLxmertForPreTraining"),DRt.forEach(r),Uko=t(_0e," (LXMERT model)"),_0e.forEach(r),Jko=i(H),pb=n(H,"LI",{});var v0e=s(pb);Nae=n(v0e,"STRONG",{});var GRt=s(Nae);Kko=t(GRt,"mobilebert"),GRt.forEach(r),Yko=t(v0e," \u2014 "),g$=n(v0e,"A",{href:!0});var ORt=s(g$);Zko=t(ORt,"TFMobileBertForPreTraining"),ORt.forEach(r),eRo=t(v0e," (MobileBERT model)"),v0e.forEach(r),oRo=i(H),_b=n(H,"LI",{});var b0e=s(_b);Dae=n(b0e,"STRONG",{});var qRt=s(Dae);tRo=t(qRt,"mpnet"),qRt.forEach(r),rRo=t(b0e," \u2014 "),h$=n(b0e,"A",{href:!0});var zRt=s(h$);aRo=t(zRt,"TFMPNetForMaskedLM"),zRt.forEach(r),nRo=t(b0e," (MPNet model)"),b0e.forEach(r),sRo=i(H),vb=n(H,"LI",{});var T0e=s(vb);Gae=n(T0e,"STRONG",{});var XRt=s(Gae);lRo=t(XRt,"openai-gpt"),XRt.forEach(r),iRo=t(T0e," \u2014 "),u$=n(T0e,"A",{href:!0});var WRt=s(u$);dRo=t(WRt,"TFOpenAIGPTLMHeadModel"),WRt.forEach(r),mRo=t(T0e," (OpenAI GPT model)"),T0e.forEach(r),fRo=i(H),bb=n(H,"LI",{});var F0e=s(bb);Oae=n(F0e,"STRONG",{});var VRt=s(Oae);cRo=t(VRt,"roberta"),VRt.forEach(r),gRo=t(F0e," \u2014 "),p$=n(F0e,"A",{href:!0});var QRt=s(p$);hRo=t(QRt,"TFRobertaForMaskedLM"),QRt.forEach(r),uRo=t(F0e," (RoBERTa model)"),F0e.forEach(r),pRo=i(H),Tb=n(H,"LI",{});var M0e=s(Tb);qae=n(M0e,"STRONG",{});var HRt=s(qae);_Ro=t(HRt,"t5"),HRt.forEach(r),vRo=t(M0e," \u2014 "),_$=n(M0e,"A",{href:!0});var URt=s(_$);bRo=t(URt,"TFT5ForConditionalGeneration"),URt.forEach(r),TRo=t(M0e," (T5 model)"),M0e.forEach(r),FRo=i(H),Fb=n(H,"LI",{});var E0e=s(Fb);zae=n(E0e,"STRONG",{});var JRt=s(zae);MRo=t(JRt,"tapas"),JRt.forEach(r),ERo=t(E0e," \u2014 "),v$=n(E0e,"A",{href:!0});var KRt=s(v$);CRo=t(KRt,"TFTapasForMaskedLM"),KRt.forEach(r),yRo=t(E0e," (TAPAS model)"),E0e.forEach(r),wRo=i(H),Mb=n(H,"LI",{});var C0e=s(Mb);Xae=n(C0e,"STRONG",{});var YRt=s(Xae);ARo=t(YRt,"transfo-xl"),YRt.forEach(r),xRo=t(C0e," \u2014 "),b$=n(C0e,"A",{href:!0});var ZRt=s(b$);LRo=t(ZRt,"TFTransfoXLLMHeadModel"),ZRt.forEach(r),BRo=t(C0e," (Transformer-XL model)"),C0e.forEach(r),kRo=i(H),Eb=n(H,"LI",{});var y0e=s(Eb);Wae=n(y0e,"STRONG",{});var eSt=s(Wae);RRo=t(eSt,"xlm"),eSt.forEach(r),SRo=t(y0e," \u2014 "),T$=n(y0e,"A",{href:!0});var oSt=s(T$);PRo=t(oSt,"TFXLMWithLMHeadModel"),oSt.forEach(r),$Ro=t(y0e," (XLM model)"),y0e.forEach(r),IRo=i(H),Cb=n(H,"LI",{});var w0e=s(Cb);Vae=n(w0e,"STRONG",{});var tSt=s(Vae);jRo=t(tSt,"xlm-roberta"),tSt.forEach(r),NRo=t(w0e," \u2014 "),F$=n(w0e,"A",{href:!0});var rSt=s(F$);DRo=t(rSt,"TFXLMRobertaForMaskedLM"),rSt.forEach(r),GRo=t(w0e," (XLM-RoBERTa model)"),w0e.forEach(r),ORo=i(H),yb=n(H,"LI",{});var A0e=s(yb);Qae=n(A0e,"STRONG",{});var aSt=s(Qae);qRo=t(aSt,"xlnet"),aSt.forEach(r),zRo=t(A0e," \u2014 "),M$=n(A0e,"A",{href:!0});var nSt=s(M$);XRo=t(nSt,"TFXLNetLMHeadModel"),nSt.forEach(r),WRo=t(A0e," (XLNet model)"),A0e.forEach(r),H.forEach(r),VRo=i(Ur),Hae=n(Ur,"P",{});var sSt=s(Hae);QRo=t(sSt,"Examples:"),sSt.forEach(r),HRo=i(Ur),c(t3.$$.fragment,Ur),Ur.forEach(r),ol.forEach(r),MCe=i(d),Td=n(d,"H2",{class:!0});var Tye=s(Td);wb=n(Tye,"A",{id:!0,class:!0,href:!0});var lSt=s(wb);Uae=n(lSt,"SPAN",{});var iSt=s(Uae);c(r3.$$.fragment,iSt),iSt.forEach(r),lSt.forEach(r),URo=i(Tye),Jae=n(Tye,"SPAN",{});var dSt=s(Jae);JRo=t(dSt,"TFAutoModelForCausalLM"),dSt.forEach(r),Tye.forEach(r),ECe=i(d),rt=n(d,"DIV",{class:!0});var rl=s(rt);c(a3.$$.fragment,rl),KRo=i(rl),Fd=n(rl,"P",{});var dG=s(Fd);YRo=t(dG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a causal language modeling head) when created with the `),Kae=n(dG,"CODE",{});var mSt=s(Kae);ZRo=t(mSt,"from_pretrained()"),mSt.forEach(r),eSo=t(dG,` class method or the `),Yae=n(dG,"CODE",{});var fSt=s(Yae);oSo=t(fSt,"from_config()"),fSt.forEach(r),tSo=t(dG," class method."),dG.forEach(r),rSo=i(rl),n3=n(rl,"P",{});var Fye=s(n3);aSo=t(Fye,"This class cannot be instantiated directly using "),Zae=n(Fye,"CODE",{});var cSt=s(Zae);nSo=t(cSt,"__init__()"),cSt.forEach(r),sSo=t(Fye," (throws an error)."),Fye.forEach(r),lSo=i(rl),Ht=n(rl,"DIV",{class:!0});var al=s(Ht);c(s3.$$.fragment,al),iSo=i(al),ene=n(al,"P",{});var gSt=s(ene);dSo=t(gSt,"Instantiates one of the model classes of the library (with a causal language modeling head) from a configuration."),gSt.forEach(r),mSo=i(al),Md=n(al,"P",{});var mG=s(Md);fSo=t(mG,`Note: Loading a model from its configuration file does `),one=n(mG,"STRONG",{});var hSt=s(one);cSo=t(hSt,"not"),hSt.forEach(r),gSo=t(mG,` load the model weights. It only affects the model\u2019s configuration. Use [`),tne=n(mG,"EM",{});var uSt=s(tne);hSo=t(uSt,"~TFAutoModelForCausalLM.from_pretrained"),uSt.forEach(r),uSo=t(mG,`] to load the model weights.`),mG.forEach(r),pSo=i(al),rne=n(al,"P",{});var pSt=s(rne);_So=t(pSt,"Examples:"),pSt.forEach(r),vSo=i(al),c(l3.$$.fragment,al),al.forEach(r),bSo=i(rl),no=n(rl,"DIV",{class:!0});var Jr=s(no);c(i3.$$.fragment,Jr),TSo=i(Jr),ane=n(Jr,"P",{});var _St=s(ane);FSo=t(_St,"Instantiate one of the model classes of the library (with a causal language modeling head) from a pretrained model."),_St.forEach(r),MSo=i(Jr),Wa=n(Jr,"P",{});var fF=s(Wa);ESo=t(fF,"The model class to instantiate is selected based on the "),nne=n(fF,"EM",{});var vSt=s(nne);CSo=t(vSt,"model_type"),vSt.forEach(r),ySo=t(fF,` property of the config object (either passed as an argument or loaded from `),sne=n(fF,"EM",{});var bSt=s(sne);wSo=t(bSt,"pretrained_model_name_or_path"),bSt.forEach(r),ASo=t(fF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),lne=n(fF,"EM",{});var TSt=s(lne);xSo=t(TSt,"pretrained_model_name_or_path"),TSt.forEach(r),LSo=t(fF,":"),fF.forEach(r),BSo=i(Jr),ce=n(Jr,"UL",{});var ve=s(ce);Ab=n(ve,"LI",{});var x0e=s(Ab);ine=n(x0e,"STRONG",{});var FSt=s(ine);kSo=t(FSt,"bert"),FSt.forEach(r),RSo=t(x0e," \u2014 "),E$=n(x0e,"A",{href:!0});var MSt=s(E$);SSo=t(MSt,"TFBertLMHeadModel"),MSt.forEach(r),PSo=t(x0e," (BERT model)"),x0e.forEach(r),$So=i(ve),xb=n(ve,"LI",{});var L0e=s(xb);dne=n(L0e,"STRONG",{});var ESt=s(dne);ISo=t(ESt,"ctrl"),ESt.forEach(r),jSo=t(L0e," \u2014 "),C$=n(L0e,"A",{href:!0});var CSt=s(C$);NSo=t(CSt,"TFCTRLLMHeadModel"),CSt.forEach(r),DSo=t(L0e," (CTRL model)"),L0e.forEach(r),GSo=i(ve),Lb=n(ve,"LI",{});var B0e=s(Lb);mne=n(B0e,"STRONG",{});var ySt=s(mne);OSo=t(ySt,"gpt2"),ySt.forEach(r),qSo=t(B0e," \u2014 "),y$=n(B0e,"A",{href:!0});var wSt=s(y$);zSo=t(wSt,"TFGPT2LMHeadModel"),wSt.forEach(r),XSo=t(B0e," (OpenAI GPT-2 model)"),B0e.forEach(r),WSo=i(ve),Bb=n(ve,"LI",{});var k0e=s(Bb);fne=n(k0e,"STRONG",{});var ASt=s(fne);VSo=t(ASt,"openai-gpt"),ASt.forEach(r),QSo=t(k0e," \u2014 "),w$=n(k0e,"A",{href:!0});var xSt=s(w$);HSo=t(xSt,"TFOpenAIGPTLMHeadModel"),xSt.forEach(r),USo=t(k0e," (OpenAI GPT model)"),k0e.forEach(r),JSo=i(ve),kb=n(ve,"LI",{});var R0e=s(kb);cne=n(R0e,"STRONG",{});var LSt=s(cne);KSo=t(LSt,"rembert"),LSt.forEach(r),YSo=t(R0e," \u2014 "),A$=n(R0e,"A",{href:!0});var BSt=s(A$);ZSo=t(BSt,"TFRemBertForCausalLM"),BSt.forEach(r),ePo=t(R0e," (RemBERT model)"),R0e.forEach(r),oPo=i(ve),Rb=n(ve,"LI",{});var S0e=s(Rb);gne=n(S0e,"STRONG",{});var kSt=s(gne);tPo=t(kSt,"roberta"),kSt.forEach(r),rPo=t(S0e," \u2014 "),x$=n(S0e,"A",{href:!0});var RSt=s(x$);aPo=t(RSt,"TFRobertaForCausalLM"),RSt.forEach(r),nPo=t(S0e," (RoBERTa model)"),S0e.forEach(r),sPo=i(ve),Sb=n(ve,"LI",{});var P0e=s(Sb);hne=n(P0e,"STRONG",{});var SSt=s(hne);lPo=t(SSt,"roformer"),SSt.forEach(r),iPo=t(P0e," \u2014 "),L$=n(P0e,"A",{href:!0});var PSt=s(L$);dPo=t(PSt,"TFRoFormerForCausalLM"),PSt.forEach(r),mPo=t(P0e," (RoFormer model)"),P0e.forEach(r),fPo=i(ve),Pb=n(ve,"LI",{});var $0e=s(Pb);une=n($0e,"STRONG",{});var $St=s(une);cPo=t($St,"transfo-xl"),$St.forEach(r),gPo=t($0e," \u2014 "),B$=n($0e,"A",{href:!0});var ISt=s(B$);hPo=t(ISt,"TFTransfoXLLMHeadModel"),ISt.forEach(r),uPo=t($0e," (Transformer-XL model)"),$0e.forEach(r),pPo=i(ve),$b=n(ve,"LI",{});var I0e=s($b);pne=n(I0e,"STRONG",{});var jSt=s(pne);_Po=t(jSt,"xlm"),jSt.forEach(r),vPo=t(I0e," \u2014 "),k$=n(I0e,"A",{href:!0});var NSt=s(k$);bPo=t(NSt,"TFXLMWithLMHeadModel"),NSt.forEach(r),TPo=t(I0e," (XLM model)"),I0e.forEach(r),FPo=i(ve),Ib=n(ve,"LI",{});var j0e=s(Ib);_ne=n(j0e,"STRONG",{});var DSt=s(_ne);MPo=t(DSt,"xlnet"),DSt.forEach(r),EPo=t(j0e," \u2014 "),R$=n(j0e,"A",{href:!0});var GSt=s(R$);CPo=t(GSt,"TFXLNetLMHeadModel"),GSt.forEach(r),yPo=t(j0e," (XLNet model)"),j0e.forEach(r),ve.forEach(r),wPo=i(Jr),vne=n(Jr,"P",{});var OSt=s(vne);APo=t(OSt,"Examples:"),OSt.forEach(r),xPo=i(Jr),c(d3.$$.fragment,Jr),Jr.forEach(r),rl.forEach(r),CCe=i(d),Ed=n(d,"H2",{class:!0});var Mye=s(Ed);jb=n(Mye,"A",{id:!0,class:!0,href:!0});var qSt=s(jb);bne=n(qSt,"SPAN",{});var zSt=s(bne);c(m3.$$.fragment,zSt),zSt.forEach(r),qSt.forEach(r),LPo=i(Mye),Tne=n(Mye,"SPAN",{});var XSt=s(Tne);BPo=t(XSt,"TFAutoModelForImageClassification"),XSt.forEach(r),Mye.forEach(r),yCe=i(d),at=n(d,"DIV",{class:!0});var nl=s(at);c(f3.$$.fragment,nl),kPo=i(nl),Cd=n(nl,"P",{});var fG=s(Cd);RPo=t(fG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a image classification head) when created with the `),Fne=n(fG,"CODE",{});var WSt=s(Fne);SPo=t(WSt,"from_pretrained()"),WSt.forEach(r),PPo=t(fG,` class method or the `),Mne=n(fG,"CODE",{});var VSt=s(Mne);$Po=t(VSt,"from_config()"),VSt.forEach(r),IPo=t(fG," class method."),fG.forEach(r),jPo=i(nl),c3=n(nl,"P",{});var Eye=s(c3);NPo=t(Eye,"This class cannot be instantiated directly using "),Ene=n(Eye,"CODE",{});var QSt=s(Ene);DPo=t(QSt,"__init__()"),QSt.forEach(r),GPo=t(Eye," (throws an error)."),Eye.forEach(r),OPo=i(nl),Ut=n(nl,"DIV",{class:!0});var sl=s(Ut);c(g3.$$.fragment,sl),qPo=i(sl),Cne=n(sl,"P",{});var HSt=s(Cne);zPo=t(HSt,"Instantiates one of the model classes of the library (with a image classification head) from a configuration."),HSt.forEach(r),XPo=i(sl),yd=n(sl,"P",{});var cG=s(yd);WPo=t(cG,`Note: Loading a model from its configuration file does `),yne=n(cG,"STRONG",{});var USt=s(yne);VPo=t(USt,"not"),USt.forEach(r),QPo=t(cG,` load the model weights. It only affects the model\u2019s configuration. Use [`),wne=n(cG,"EM",{});var JSt=s(wne);HPo=t(JSt,"~TFAutoModelForImageClassification.from_pretrained"),JSt.forEach(r),UPo=t(cG,`] to load the model weights.`),cG.forEach(r),JPo=i(sl),Ane=n(sl,"P",{});var KSt=s(Ane);KPo=t(KSt,"Examples:"),KSt.forEach(r),YPo=i(sl),c(h3.$$.fragment,sl),sl.forEach(r),ZPo=i(nl),so=n(nl,"DIV",{class:!0});var Kr=s(so);c(u3.$$.fragment,Kr),e$o=i(Kr),xne=n(Kr,"P",{});var YSt=s(xne);o$o=t(YSt,"Instantiate one of the model classes of the library (with a image classification head) from a pretrained model."),YSt.forEach(r),t$o=i(Kr),Va=n(Kr,"P",{});var cF=s(Va);r$o=t(cF,"The model class to instantiate is selected based on the "),Lne=n(cF,"EM",{});var ZSt=s(Lne);a$o=t(ZSt,"model_type"),ZSt.forEach(r),n$o=t(cF,` property of the config object (either passed as an argument or loaded from `),Bne=n(cF,"EM",{});var ePt=s(Bne);s$o=t(ePt,"pretrained_model_name_or_path"),ePt.forEach(r),l$o=t(cF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),kne=n(cF,"EM",{});var oPt=s(kne);i$o=t(oPt,"pretrained_model_name_or_path"),oPt.forEach(r),d$o=t(cF,":"),cF.forEach(r),m$o=i(Kr),Rne=n(Kr,"UL",{});var tPt=s(Rne);Nb=n(tPt,"LI",{});var N0e=s(Nb);Sne=n(N0e,"STRONG",{});var rPt=s(Sne);f$o=t(rPt,"vit"),rPt.forEach(r),c$o=t(N0e," \u2014 "),S$=n(N0e,"A",{href:!0});var aPt=s(S$);g$o=t(aPt,"TFViTForImageClassification"),aPt.forEach(r),h$o=t(N0e," (ViT model)"),N0e.forEach(r),tPt.forEach(r),u$o=i(Kr),Pne=n(Kr,"P",{});var nPt=s(Pne);p$o=t(nPt,"Examples:"),nPt.forEach(r),_$o=i(Kr),c(p3.$$.fragment,Kr),Kr.forEach(r),nl.forEach(r),wCe=i(d),wd=n(d,"H2",{class:!0});var Cye=s(wd);Db=n(Cye,"A",{id:!0,class:!0,href:!0});var sPt=s(Db);$ne=n(sPt,"SPAN",{});var lPt=s($ne);c(_3.$$.fragment,lPt),lPt.forEach(r),sPt.forEach(r),v$o=i(Cye),Ine=n(Cye,"SPAN",{});var iPt=s(Ine);b$o=t(iPt,"TFAutoModelForMaskedLM"),iPt.forEach(r),Cye.forEach(r),ACe=i(d),nt=n(d,"DIV",{class:!0});var ll=s(nt);c(v3.$$.fragment,ll),T$o=i(ll),Ad=n(ll,"P",{});var gG=s(Ad);F$o=t(gG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a masked language modeling head) when created with the `),jne=n(gG,"CODE",{});var dPt=s(jne);M$o=t(dPt,"from_pretrained()"),dPt.forEach(r),E$o=t(gG,` class method or the `),Nne=n(gG,"CODE",{});var mPt=s(Nne);C$o=t(mPt,"from_config()"),mPt.forEach(r),y$o=t(gG," class method."),gG.forEach(r),w$o=i(ll),b3=n(ll,"P",{});var yye=s(b3);A$o=t(yye,"This class cannot be instantiated directly using "),Dne=n(yye,"CODE",{});var fPt=s(Dne);x$o=t(fPt,"__init__()"),fPt.forEach(r),L$o=t(yye," (throws an error)."),yye.forEach(r),B$o=i(ll),Jt=n(ll,"DIV",{class:!0});var il=s(Jt);c(T3.$$.fragment,il),k$o=i(il),Gne=n(il,"P",{});var cPt=s(Gne);R$o=t(cPt,"Instantiates one of the model classes of the library (with a masked language modeling head) from a configuration."),cPt.forEach(r),S$o=i(il),xd=n(il,"P",{});var hG=s(xd);P$o=t(hG,`Note: Loading a model from its configuration file does `),One=n(hG,"STRONG",{});var gPt=s(One);$$o=t(gPt,"not"),gPt.forEach(r),I$o=t(hG,` load the model weights. It only affects the model\u2019s configuration. Use [`),qne=n(hG,"EM",{});var hPt=s(qne);j$o=t(hPt,"~TFAutoModelForMaskedLM.from_pretrained"),hPt.forEach(r),N$o=t(hG,`] to load the model weights.`),hG.forEach(r),D$o=i(il),zne=n(il,"P",{});var uPt=s(zne);G$o=t(uPt,"Examples:"),uPt.forEach(r),O$o=i(il),c(F3.$$.fragment,il),il.forEach(r),q$o=i(ll),lo=n(ll,"DIV",{class:!0});var Yr=s(lo);c(M3.$$.fragment,Yr),z$o=i(Yr),Xne=n(Yr,"P",{});var pPt=s(Xne);X$o=t(pPt,"Instantiate one of the model classes of the library (with a masked language modeling head) from a pretrained model."),pPt.forEach(r),W$o=i(Yr),Qa=n(Yr,"P",{});var gF=s(Qa);V$o=t(gF,"The model class to instantiate is selected based on the "),Wne=n(gF,"EM",{});var _Pt=s(Wne);Q$o=t(_Pt,"model_type"),_Pt.forEach(r),H$o=t(gF,` property of the config object (either passed as an argument or loaded from `),Vne=n(gF,"EM",{});var vPt=s(Vne);U$o=t(vPt,"pretrained_model_name_or_path"),vPt.forEach(r),J$o=t(gF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Qne=n(gF,"EM",{});var bPt=s(Qne);K$o=t(bPt,"pretrained_model_name_or_path"),bPt.forEach(r),Y$o=t(gF,":"),gF.forEach(r),Z$o=i(Yr),K=n(Yr,"UL",{});var ee=s(K);Gb=n(ee,"LI",{});var D0e=s(Gb);Hne=n(D0e,"STRONG",{});var TPt=s(Hne);eIo=t(TPt,"albert"),TPt.forEach(r),oIo=t(D0e," \u2014 "),P$=n(D0e,"A",{href:!0});var FPt=s(P$);tIo=t(FPt,"TFAlbertForMaskedLM"),FPt.forEach(r),rIo=t(D0e," (ALBERT model)"),D0e.forEach(r),aIo=i(ee),Ob=n(ee,"LI",{});var G0e=s(Ob);Une=n(G0e,"STRONG",{});var MPt=s(Une);nIo=t(MPt,"bert"),MPt.forEach(r),sIo=t(G0e," \u2014 "),$$=n(G0e,"A",{href:!0});var EPt=s($$);lIo=t(EPt,"TFBertForMaskedLM"),EPt.forEach(r),iIo=t(G0e," (BERT model)"),G0e.forEach(r),dIo=i(ee),qb=n(ee,"LI",{});var O0e=s(qb);Jne=n(O0e,"STRONG",{});var CPt=s(Jne);mIo=t(CPt,"camembert"),CPt.forEach(r),fIo=t(O0e," \u2014 "),I$=n(O0e,"A",{href:!0});var yPt=s(I$);cIo=t(yPt,"TFCamembertForMaskedLM"),yPt.forEach(r),gIo=t(O0e," (CamemBERT model)"),O0e.forEach(r),hIo=i(ee),zb=n(ee,"LI",{});var q0e=s(zb);Kne=n(q0e,"STRONG",{});var wPt=s(Kne);uIo=t(wPt,"convbert"),wPt.forEach(r),pIo=t(q0e," \u2014 "),j$=n(q0e,"A",{href:!0});var APt=s(j$);_Io=t(APt,"TFConvBertForMaskedLM"),APt.forEach(r),vIo=t(q0e," (ConvBERT model)"),q0e.forEach(r),bIo=i(ee),Xb=n(ee,"LI",{});var z0e=s(Xb);Yne=n(z0e,"STRONG",{});var xPt=s(Yne);TIo=t(xPt,"deberta"),xPt.forEach(r),FIo=t(z0e," \u2014 "),N$=n(z0e,"A",{href:!0});var LPt=s(N$);MIo=t(LPt,"TFDebertaForMaskedLM"),LPt.forEach(r),EIo=t(z0e," (DeBERTa model)"),z0e.forEach(r),CIo=i(ee),Wb=n(ee,"LI",{});var X0e=s(Wb);Zne=n(X0e,"STRONG",{});var BPt=s(Zne);yIo=t(BPt,"deberta-v2"),BPt.forEach(r),wIo=t(X0e," \u2014 "),D$=n(X0e,"A",{href:!0});var kPt=s(D$);AIo=t(kPt,"TFDebertaV2ForMaskedLM"),kPt.forEach(r),xIo=t(X0e," (DeBERTa-v2 model)"),X0e.forEach(r),LIo=i(ee),Vb=n(ee,"LI",{});var W0e=s(Vb);ese=n(W0e,"STRONG",{});var RPt=s(ese);BIo=t(RPt,"distilbert"),RPt.forEach(r),kIo=t(W0e," \u2014 "),G$=n(W0e,"A",{href:!0});var SPt=s(G$);RIo=t(SPt,"TFDistilBertForMaskedLM"),SPt.forEach(r),SIo=t(W0e," (DistilBERT model)"),W0e.forEach(r),PIo=i(ee),Qb=n(ee,"LI",{});var V0e=s(Qb);ose=n(V0e,"STRONG",{});var PPt=s(ose);$Io=t(PPt,"electra"),PPt.forEach(r),IIo=t(V0e," \u2014 "),O$=n(V0e,"A",{href:!0});var $Pt=s(O$);jIo=t($Pt,"TFElectraForMaskedLM"),$Pt.forEach(r),NIo=t(V0e," (ELECTRA model)"),V0e.forEach(r),DIo=i(ee),Hb=n(ee,"LI",{});var Q0e=s(Hb);tse=n(Q0e,"STRONG",{});var IPt=s(tse);GIo=t(IPt,"flaubert"),IPt.forEach(r),OIo=t(Q0e," \u2014 "),q$=n(Q0e,"A",{href:!0});var jPt=s(q$);qIo=t(jPt,"TFFlaubertWithLMHeadModel"),jPt.forEach(r),zIo=t(Q0e," (FlauBERT model)"),Q0e.forEach(r),XIo=i(ee),Ub=n(ee,"LI",{});var H0e=s(Ub);rse=n(H0e,"STRONG",{});var NPt=s(rse);WIo=t(NPt,"funnel"),NPt.forEach(r),VIo=t(H0e," \u2014 "),z$=n(H0e,"A",{href:!0});var DPt=s(z$);QIo=t(DPt,"TFFunnelForMaskedLM"),DPt.forEach(r),HIo=t(H0e," (Funnel Transformer model)"),H0e.forEach(r),UIo=i(ee),Jb=n(ee,"LI",{});var U0e=s(Jb);ase=n(U0e,"STRONG",{});var GPt=s(ase);JIo=t(GPt,"layoutlm"),GPt.forEach(r),KIo=t(U0e," \u2014 "),X$=n(U0e,"A",{href:!0});var OPt=s(X$);YIo=t(OPt,"TFLayoutLMForMaskedLM"),OPt.forEach(r),ZIo=t(U0e," (LayoutLM model)"),U0e.forEach(r),ejo=i(ee),Kb=n(ee,"LI",{});var J0e=s(Kb);nse=n(J0e,"STRONG",{});var qPt=s(nse);ojo=t(qPt,"longformer"),qPt.forEach(r),tjo=t(J0e," \u2014 "),W$=n(J0e,"A",{href:!0});var zPt=s(W$);rjo=t(zPt,"TFLongformerForMaskedLM"),zPt.forEach(r),ajo=t(J0e," (Longformer model)"),J0e.forEach(r),njo=i(ee),Yb=n(ee,"LI",{});var K0e=s(Yb);sse=n(K0e,"STRONG",{});var XPt=s(sse);sjo=t(XPt,"mobilebert"),XPt.forEach(r),ljo=t(K0e," \u2014 "),V$=n(K0e,"A",{href:!0});var WPt=s(V$);ijo=t(WPt,"TFMobileBertForMaskedLM"),WPt.forEach(r),djo=t(K0e," (MobileBERT model)"),K0e.forEach(r),mjo=i(ee),Zb=n(ee,"LI",{});var Y0e=s(Zb);lse=n(Y0e,"STRONG",{});var VPt=s(lse);fjo=t(VPt,"mpnet"),VPt.forEach(r),cjo=t(Y0e," \u2014 "),Q$=n(Y0e,"A",{href:!0});var QPt=s(Q$);gjo=t(QPt,"TFMPNetForMaskedLM"),QPt.forEach(r),hjo=t(Y0e," (MPNet model)"),Y0e.forEach(r),ujo=i(ee),e4=n(ee,"LI",{});var Z0e=s(e4);ise=n(Z0e,"STRONG",{});var HPt=s(ise);pjo=t(HPt,"rembert"),HPt.forEach(r),_jo=t(Z0e," \u2014 "),H$=n(Z0e,"A",{href:!0});var UPt=s(H$);vjo=t(UPt,"TFRemBertForMaskedLM"),UPt.forEach(r),bjo=t(Z0e," (RemBERT model)"),Z0e.forEach(r),Tjo=i(ee),o4=n(ee,"LI",{});var eTe=s(o4);dse=n(eTe,"STRONG",{});var JPt=s(dse);Fjo=t(JPt,"roberta"),JPt.forEach(r),Mjo=t(eTe," \u2014 "),U$=n(eTe,"A",{href:!0});var KPt=s(U$);Ejo=t(KPt,"TFRobertaForMaskedLM"),KPt.forEach(r),Cjo=t(eTe," (RoBERTa model)"),eTe.forEach(r),yjo=i(ee),t4=n(ee,"LI",{});var oTe=s(t4);mse=n(oTe,"STRONG",{});var YPt=s(mse);wjo=t(YPt,"roformer"),YPt.forEach(r),Ajo=t(oTe," \u2014 "),J$=n(oTe,"A",{href:!0});var ZPt=s(J$);xjo=t(ZPt,"TFRoFormerForMaskedLM"),ZPt.forEach(r),Ljo=t(oTe," (RoFormer model)"),oTe.forEach(r),Bjo=i(ee),r4=n(ee,"LI",{});var tTe=s(r4);fse=n(tTe,"STRONG",{});var e$t=s(fse);kjo=t(e$t,"tapas"),e$t.forEach(r),Rjo=t(tTe," \u2014 "),K$=n(tTe,"A",{href:!0});var o$t=s(K$);Sjo=t(o$t,"TFTapasForMaskedLM"),o$t.forEach(r),Pjo=t(tTe," (TAPAS model)"),tTe.forEach(r),$jo=i(ee),a4=n(ee,"LI",{});var rTe=s(a4);cse=n(rTe,"STRONG",{});var t$t=s(cse);Ijo=t(t$t,"xlm"),t$t.forEach(r),jjo=t(rTe," \u2014 "),Y$=n(rTe,"A",{href:!0});var r$t=s(Y$);Njo=t(r$t,"TFXLMWithLMHeadModel"),r$t.forEach(r),Djo=t(rTe," (XLM model)"),rTe.forEach(r),Gjo=i(ee),n4=n(ee,"LI",{});var aTe=s(n4);gse=n(aTe,"STRONG",{});var a$t=s(gse);Ojo=t(a$t,"xlm-roberta"),a$t.forEach(r),qjo=t(aTe," \u2014 "),Z$=n(aTe,"A",{href:!0});var n$t=s(Z$);zjo=t(n$t,"TFXLMRobertaForMaskedLM"),n$t.forEach(r),Xjo=t(aTe," (XLM-RoBERTa model)"),aTe.forEach(r),ee.forEach(r),Wjo=i(Yr),hse=n(Yr,"P",{});var s$t=s(hse);Vjo=t(s$t,"Examples:"),s$t.forEach(r),Qjo=i(Yr),c(E3.$$.fragment,Yr),Yr.forEach(r),ll.forEach(r),xCe=i(d),Ld=n(d,"H2",{class:!0});var wye=s(Ld);s4=n(wye,"A",{id:!0,class:!0,href:!0});var l$t=s(s4);use=n(l$t,"SPAN",{});var i$t=s(use);c(C3.$$.fragment,i$t),i$t.forEach(r),l$t.forEach(r),Hjo=i(wye),pse=n(wye,"SPAN",{});var d$t=s(pse);Ujo=t(d$t,"TFAutoModelForSeq2SeqLM"),d$t.forEach(r),wye.forEach(r),LCe=i(d),st=n(d,"DIV",{class:!0});var dl=s(st);c(y3.$$.fragment,dl),Jjo=i(dl),Bd=n(dl,"P",{});var uG=s(Bd);Kjo=t(uG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence language modeling head) when created with the `),_se=n(uG,"CODE",{});var m$t=s(_se);Yjo=t(m$t,"from_pretrained()"),m$t.forEach(r),Zjo=t(uG,` class method or the `),vse=n(uG,"CODE",{});var f$t=s(vse);eNo=t(f$t,"from_config()"),f$t.forEach(r),oNo=t(uG," class method."),uG.forEach(r),tNo=i(dl),w3=n(dl,"P",{});var Aye=s(w3);rNo=t(Aye,"This class cannot be instantiated directly using "),bse=n(Aye,"CODE",{});var c$t=s(bse);aNo=t(c$t,"__init__()"),c$t.forEach(r),nNo=t(Aye," (throws an error)."),Aye.forEach(r),sNo=i(dl),Kt=n(dl,"DIV",{class:!0});var ml=s(Kt);c(A3.$$.fragment,ml),lNo=i(ml),Tse=n(ml,"P",{});var g$t=s(Tse);iNo=t(g$t,"Instantiates one of the model classes of the library (with a sequence-to-sequence language modeling head) from a configuration."),g$t.forEach(r),dNo=i(ml),kd=n(ml,"P",{});var pG=s(kd);mNo=t(pG,`Note: Loading a model from its configuration file does `),Fse=n(pG,"STRONG",{});var h$t=s(Fse);fNo=t(h$t,"not"),h$t.forEach(r),cNo=t(pG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Mse=n(pG,"EM",{});var u$t=s(Mse);gNo=t(u$t,"~TFAutoModelForSeq2SeqLM.from_pretrained"),u$t.forEach(r),hNo=t(pG,`] to load the model weights.`),pG.forEach(r),uNo=i(ml),Ese=n(ml,"P",{});var p$t=s(Ese);pNo=t(p$t,"Examples:"),p$t.forEach(r),_No=i(ml),c(x3.$$.fragment,ml),ml.forEach(r),vNo=i(dl),io=n(dl,"DIV",{class:!0});var Zr=s(io);c(L3.$$.fragment,Zr),bNo=i(Zr),Cse=n(Zr,"P",{});var _$t=s(Cse);TNo=t(_$t,"Instantiate one of the model classes of the library (with a sequence-to-sequence language modeling head) from a pretrained model."),_$t.forEach(r),FNo=i(Zr),Ha=n(Zr,"P",{});var hF=s(Ha);MNo=t(hF,"The model class to instantiate is selected based on the "),yse=n(hF,"EM",{});var v$t=s(yse);ENo=t(v$t,"model_type"),v$t.forEach(r),CNo=t(hF,` property of the config object (either passed as an argument or loaded from `),wse=n(hF,"EM",{});var b$t=s(wse);yNo=t(b$t,"pretrained_model_name_or_path"),b$t.forEach(r),wNo=t(hF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ase=n(hF,"EM",{});var T$t=s(Ase);ANo=t(T$t,"pretrained_model_name_or_path"),T$t.forEach(r),xNo=t(hF,":"),hF.forEach(r),LNo=i(Zr),ge=n(Zr,"UL",{});var be=s(ge);l4=n(be,"LI",{});var nTe=s(l4);xse=n(nTe,"STRONG",{});var F$t=s(xse);BNo=t(F$t,"bart"),F$t.forEach(r),kNo=t(nTe," \u2014 "),eI=n(nTe,"A",{href:!0});var M$t=s(eI);RNo=t(M$t,"TFBartForConditionalGeneration"),M$t.forEach(r),SNo=t(nTe," (BART model)"),nTe.forEach(r),PNo=i(be),i4=n(be,"LI",{});var sTe=s(i4);Lse=n(sTe,"STRONG",{});var E$t=s(Lse);$No=t(E$t,"blenderbot"),E$t.forEach(r),INo=t(sTe," \u2014 "),oI=n(sTe,"A",{href:!0});var C$t=s(oI);jNo=t(C$t,"TFBlenderbotForConditionalGeneration"),C$t.forEach(r),NNo=t(sTe," (Blenderbot model)"),sTe.forEach(r),DNo=i(be),d4=n(be,"LI",{});var lTe=s(d4);Bse=n(lTe,"STRONG",{});var y$t=s(Bse);GNo=t(y$t,"blenderbot-small"),y$t.forEach(r),ONo=t(lTe," \u2014 "),tI=n(lTe,"A",{href:!0});var w$t=s(tI);qNo=t(w$t,"TFBlenderbotSmallForConditionalGeneration"),w$t.forEach(r),zNo=t(lTe," (BlenderbotSmall model)"),lTe.forEach(r),XNo=i(be),m4=n(be,"LI",{});var iTe=s(m4);kse=n(iTe,"STRONG",{});var A$t=s(kse);WNo=t(A$t,"encoder-decoder"),A$t.forEach(r),VNo=t(iTe," \u2014 "),rI=n(iTe,"A",{href:!0});var x$t=s(rI);QNo=t(x$t,"TFEncoderDecoderModel"),x$t.forEach(r),HNo=t(iTe," (Encoder decoder model)"),iTe.forEach(r),UNo=i(be),f4=n(be,"LI",{});var dTe=s(f4);Rse=n(dTe,"STRONG",{});var L$t=s(Rse);JNo=t(L$t,"led"),L$t.forEach(r),KNo=t(dTe," \u2014 "),aI=n(dTe,"A",{href:!0});var B$t=s(aI);YNo=t(B$t,"TFLEDForConditionalGeneration"),B$t.forEach(r),ZNo=t(dTe," (LED model)"),dTe.forEach(r),eDo=i(be),c4=n(be,"LI",{});var mTe=s(c4);Sse=n(mTe,"STRONG",{});var k$t=s(Sse);oDo=t(k$t,"marian"),k$t.forEach(r),tDo=t(mTe," \u2014 "),nI=n(mTe,"A",{href:!0});var R$t=s(nI);rDo=t(R$t,"TFMarianMTModel"),R$t.forEach(r),aDo=t(mTe," (Marian model)"),mTe.forEach(r),nDo=i(be),g4=n(be,"LI",{});var fTe=s(g4);Pse=n(fTe,"STRONG",{});var S$t=s(Pse);sDo=t(S$t,"mbart"),S$t.forEach(r),lDo=t(fTe," \u2014 "),sI=n(fTe,"A",{href:!0});var P$t=s(sI);iDo=t(P$t,"TFMBartForConditionalGeneration"),P$t.forEach(r),dDo=t(fTe," (mBART model)"),fTe.forEach(r),mDo=i(be),h4=n(be,"LI",{});var cTe=s(h4);$se=n(cTe,"STRONG",{});var $$t=s($se);fDo=t($$t,"mt5"),$$t.forEach(r),cDo=t(cTe," \u2014 "),lI=n(cTe,"A",{href:!0});var I$t=s(lI);gDo=t(I$t,"TFMT5ForConditionalGeneration"),I$t.forEach(r),hDo=t(cTe," (mT5 model)"),cTe.forEach(r),uDo=i(be),u4=n(be,"LI",{});var gTe=s(u4);Ise=n(gTe,"STRONG",{});var j$t=s(Ise);pDo=t(j$t,"pegasus"),j$t.forEach(r),_Do=t(gTe," \u2014 "),iI=n(gTe,"A",{href:!0});var N$t=s(iI);vDo=t(N$t,"TFPegasusForConditionalGeneration"),N$t.forEach(r),bDo=t(gTe," (Pegasus model)"),gTe.forEach(r),TDo=i(be),p4=n(be,"LI",{});var hTe=s(p4);jse=n(hTe,"STRONG",{});var D$t=s(jse);FDo=t(D$t,"t5"),D$t.forEach(r),MDo=t(hTe," \u2014 "),dI=n(hTe,"A",{href:!0});var G$t=s(dI);EDo=t(G$t,"TFT5ForConditionalGeneration"),G$t.forEach(r),CDo=t(hTe," (T5 model)"),hTe.forEach(r),be.forEach(r),yDo=i(Zr),Nse=n(Zr,"P",{});var O$t=s(Nse);wDo=t(O$t,"Examples:"),O$t.forEach(r),ADo=i(Zr),c(B3.$$.fragment,Zr),Zr.forEach(r),dl.forEach(r),BCe=i(d),Rd=n(d,"H2",{class:!0});var xye=s(Rd);_4=n(xye,"A",{id:!0,class:!0,href:!0});var q$t=s(_4);Dse=n(q$t,"SPAN",{});var z$t=s(Dse);c(k3.$$.fragment,z$t),z$t.forEach(r),q$t.forEach(r),xDo=i(xye),Gse=n(xye,"SPAN",{});var X$t=s(Gse);LDo=t(X$t,"TFAutoModelForSequenceClassification"),X$t.forEach(r),xye.forEach(r),kCe=i(d),lt=n(d,"DIV",{class:!0});var fl=s(lt);c(R3.$$.fragment,fl),BDo=i(fl),Sd=n(fl,"P",{});var _G=s(Sd);kDo=t(_G,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence classification head) when created with the `),Ose=n(_G,"CODE",{});var W$t=s(Ose);RDo=t(W$t,"from_pretrained()"),W$t.forEach(r),SDo=t(_G,` class method or the `),qse=n(_G,"CODE",{});var V$t=s(qse);PDo=t(V$t,"from_config()"),V$t.forEach(r),$Do=t(_G," class method."),_G.forEach(r),IDo=i(fl),S3=n(fl,"P",{});var Lye=s(S3);jDo=t(Lye,"This class cannot be instantiated directly using "),zse=n(Lye,"CODE",{});var Q$t=s(zse);NDo=t(Q$t,"__init__()"),Q$t.forEach(r),DDo=t(Lye," (throws an error)."),Lye.forEach(r),GDo=i(fl),Yt=n(fl,"DIV",{class:!0});var cl=s(Yt);c(P3.$$.fragment,cl),ODo=i(cl),Xse=n(cl,"P",{});var H$t=s(Xse);qDo=t(H$t,"Instantiates one of the model classes of the library (with a sequence classification head) from a configuration."),H$t.forEach(r),zDo=i(cl),Pd=n(cl,"P",{});var vG=s(Pd);XDo=t(vG,`Note: Loading a model from its configuration file does `),Wse=n(vG,"STRONG",{});var U$t=s(Wse);WDo=t(U$t,"not"),U$t.forEach(r),VDo=t(vG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Vse=n(vG,"EM",{});var J$t=s(Vse);QDo=t(J$t,"~TFAutoModelForSequenceClassification.from_pretrained"),J$t.forEach(r),HDo=t(vG,`] to load the model weights.`),vG.forEach(r),UDo=i(cl),Qse=n(cl,"P",{});var K$t=s(Qse);JDo=t(K$t,"Examples:"),K$t.forEach(r),KDo=i(cl),c($3.$$.fragment,cl),cl.forEach(r),YDo=i(fl),mo=n(fl,"DIV",{class:!0});var ea=s(mo);c(I3.$$.fragment,ea),ZDo=i(ea),Hse=n(ea,"P",{});var Y$t=s(Hse);eGo=t(Y$t,"Instantiate one of the model classes of the library (with a sequence classification head) from a pretrained model."),Y$t.forEach(r),oGo=i(ea),Ua=n(ea,"P",{});var uF=s(Ua);tGo=t(uF,"The model class to instantiate is selected based on the "),Use=n(uF,"EM",{});var Z$t=s(Use);rGo=t(Z$t,"model_type"),Z$t.forEach(r),aGo=t(uF,` property of the config object (either passed as an argument or loaded from `),Jse=n(uF,"EM",{});var eIt=s(Jse);nGo=t(eIt,"pretrained_model_name_or_path"),eIt.forEach(r),sGo=t(uF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Kse=n(uF,"EM",{});var oIt=s(Kse);lGo=t(oIt,"pretrained_model_name_or_path"),oIt.forEach(r),iGo=t(uF,":"),uF.forEach(r),dGo=i(ea),O=n(ea,"UL",{});var z=s(O);v4=n(z,"LI",{});var uTe=s(v4);Yse=n(uTe,"STRONG",{});var tIt=s(Yse);mGo=t(tIt,"albert"),tIt.forEach(r),fGo=t(uTe," \u2014 "),mI=n(uTe,"A",{href:!0});var rIt=s(mI);cGo=t(rIt,"TFAlbertForSequenceClassification"),rIt.forEach(r),gGo=t(uTe," (ALBERT model)"),uTe.forEach(r),hGo=i(z),b4=n(z,"LI",{});var pTe=s(b4);Zse=n(pTe,"STRONG",{});var aIt=s(Zse);uGo=t(aIt,"bert"),aIt.forEach(r),pGo=t(pTe," \u2014 "),fI=n(pTe,"A",{href:!0});var nIt=s(fI);_Go=t(nIt,"TFBertForSequenceClassification"),nIt.forEach(r),vGo=t(pTe," (BERT model)"),pTe.forEach(r),bGo=i(z),T4=n(z,"LI",{});var _Te=s(T4);ele=n(_Te,"STRONG",{});var sIt=s(ele);TGo=t(sIt,"camembert"),sIt.forEach(r),FGo=t(_Te," \u2014 "),cI=n(_Te,"A",{href:!0});var lIt=s(cI);MGo=t(lIt,"TFCamembertForSequenceClassification"),lIt.forEach(r),EGo=t(_Te," (CamemBERT model)"),_Te.forEach(r),CGo=i(z),F4=n(z,"LI",{});var vTe=s(F4);ole=n(vTe,"STRONG",{});var iIt=s(ole);yGo=t(iIt,"convbert"),iIt.forEach(r),wGo=t(vTe," \u2014 "),gI=n(vTe,"A",{href:!0});var dIt=s(gI);AGo=t(dIt,"TFConvBertForSequenceClassification"),dIt.forEach(r),xGo=t(vTe," (ConvBERT model)"),vTe.forEach(r),LGo=i(z),M4=n(z,"LI",{});var bTe=s(M4);tle=n(bTe,"STRONG",{});var mIt=s(tle);BGo=t(mIt,"ctrl"),mIt.forEach(r),kGo=t(bTe," \u2014 "),hI=n(bTe,"A",{href:!0});var fIt=s(hI);RGo=t(fIt,"TFCTRLForSequenceClassification"),fIt.forEach(r),SGo=t(bTe," (CTRL model)"),bTe.forEach(r),PGo=i(z),E4=n(z,"LI",{});var TTe=s(E4);rle=n(TTe,"STRONG",{});var cIt=s(rle);$Go=t(cIt,"deberta"),cIt.forEach(r),IGo=t(TTe," \u2014 "),uI=n(TTe,"A",{href:!0});var gIt=s(uI);jGo=t(gIt,"TFDebertaForSequenceClassification"),gIt.forEach(r),NGo=t(TTe," (DeBERTa model)"),TTe.forEach(r),DGo=i(z),C4=n(z,"LI",{});var FTe=s(C4);ale=n(FTe,"STRONG",{});var hIt=s(ale);GGo=t(hIt,"deberta-v2"),hIt.forEach(r),OGo=t(FTe," \u2014 "),pI=n(FTe,"A",{href:!0});var uIt=s(pI);qGo=t(uIt,"TFDebertaV2ForSequenceClassification"),uIt.forEach(r),zGo=t(FTe," (DeBERTa-v2 model)"),FTe.forEach(r),XGo=i(z),y4=n(z,"LI",{});var MTe=s(y4);nle=n(MTe,"STRONG",{});var pIt=s(nle);WGo=t(pIt,"distilbert"),pIt.forEach(r),VGo=t(MTe," \u2014 "),_I=n(MTe,"A",{href:!0});var _It=s(_I);QGo=t(_It,"TFDistilBertForSequenceClassification"),_It.forEach(r),HGo=t(MTe," (DistilBERT model)"),MTe.forEach(r),UGo=i(z),w4=n(z,"LI",{});var ETe=s(w4);sle=n(ETe,"STRONG",{});var vIt=s(sle);JGo=t(vIt,"electra"),vIt.forEach(r),KGo=t(ETe," \u2014 "),vI=n(ETe,"A",{href:!0});var bIt=s(vI);YGo=t(bIt,"TFElectraForSequenceClassification"),bIt.forEach(r),ZGo=t(ETe," (ELECTRA model)"),ETe.forEach(r),eOo=i(z),A4=n(z,"LI",{});var CTe=s(A4);lle=n(CTe,"STRONG",{});var TIt=s(lle);oOo=t(TIt,"flaubert"),TIt.forEach(r),tOo=t(CTe," \u2014 "),bI=n(CTe,"A",{href:!0});var FIt=s(bI);rOo=t(FIt,"TFFlaubertForSequenceClassification"),FIt.forEach(r),aOo=t(CTe," (FlauBERT model)"),CTe.forEach(r),nOo=i(z),x4=n(z,"LI",{});var yTe=s(x4);ile=n(yTe,"STRONG",{});var MIt=s(ile);sOo=t(MIt,"funnel"),MIt.forEach(r),lOo=t(yTe," \u2014 "),TI=n(yTe,"A",{href:!0});var EIt=s(TI);iOo=t(EIt,"TFFunnelForSequenceClassification"),EIt.forEach(r),dOo=t(yTe," (Funnel Transformer model)"),yTe.forEach(r),mOo=i(z),L4=n(z,"LI",{});var wTe=s(L4);dle=n(wTe,"STRONG",{});var CIt=s(dle);fOo=t(CIt,"gpt2"),CIt.forEach(r),cOo=t(wTe," \u2014 "),FI=n(wTe,"A",{href:!0});var yIt=s(FI);gOo=t(yIt,"TFGPT2ForSequenceClassification"),yIt.forEach(r),hOo=t(wTe," (OpenAI GPT-2 model)"),wTe.forEach(r),uOo=i(z),B4=n(z,"LI",{});var ATe=s(B4);mle=n(ATe,"STRONG",{});var wIt=s(mle);pOo=t(wIt,"layoutlm"),wIt.forEach(r),_Oo=t(ATe," \u2014 "),MI=n(ATe,"A",{href:!0});var AIt=s(MI);vOo=t(AIt,"TFLayoutLMForSequenceClassification"),AIt.forEach(r),bOo=t(ATe," (LayoutLM model)"),ATe.forEach(r),TOo=i(z),k4=n(z,"LI",{});var xTe=s(k4);fle=n(xTe,"STRONG",{});var xIt=s(fle);FOo=t(xIt,"longformer"),xIt.forEach(r),MOo=t(xTe," \u2014 "),EI=n(xTe,"A",{href:!0});var LIt=s(EI);EOo=t(LIt,"TFLongformerForSequenceClassification"),LIt.forEach(r),COo=t(xTe," (Longformer model)"),xTe.forEach(r),yOo=i(z),R4=n(z,"LI",{});var LTe=s(R4);cle=n(LTe,"STRONG",{});var BIt=s(cle);wOo=t(BIt,"mobilebert"),BIt.forEach(r),AOo=t(LTe," \u2014 "),CI=n(LTe,"A",{href:!0});var kIt=s(CI);xOo=t(kIt,"TFMobileBertForSequenceClassification"),kIt.forEach(r),LOo=t(LTe," (MobileBERT model)"),LTe.forEach(r),BOo=i(z),S4=n(z,"LI",{});var BTe=s(S4);gle=n(BTe,"STRONG",{});var RIt=s(gle);kOo=t(RIt,"mpnet"),RIt.forEach(r),ROo=t(BTe," \u2014 "),yI=n(BTe,"A",{href:!0});var SIt=s(yI);SOo=t(SIt,"TFMPNetForSequenceClassification"),SIt.forEach(r),POo=t(BTe," (MPNet model)"),BTe.forEach(r),$Oo=i(z),P4=n(z,"LI",{});var kTe=s(P4);hle=n(kTe,"STRONG",{});var PIt=s(hle);IOo=t(PIt,"openai-gpt"),PIt.forEach(r),jOo=t(kTe," \u2014 "),wI=n(kTe,"A",{href:!0});var $It=s(wI);NOo=t($It,"TFOpenAIGPTForSequenceClassification"),$It.forEach(r),DOo=t(kTe," (OpenAI GPT model)"),kTe.forEach(r),GOo=i(z),$4=n(z,"LI",{});var RTe=s($4);ule=n(RTe,"STRONG",{});var IIt=s(ule);OOo=t(IIt,"rembert"),IIt.forEach(r),qOo=t(RTe," \u2014 "),AI=n(RTe,"A",{href:!0});var jIt=s(AI);zOo=t(jIt,"TFRemBertForSequenceClassification"),jIt.forEach(r),XOo=t(RTe," (RemBERT model)"),RTe.forEach(r),WOo=i(z),I4=n(z,"LI",{});var STe=s(I4);ple=n(STe,"STRONG",{});var NIt=s(ple);VOo=t(NIt,"roberta"),NIt.forEach(r),QOo=t(STe," \u2014 "),xI=n(STe,"A",{href:!0});var DIt=s(xI);HOo=t(DIt,"TFRobertaForSequenceClassification"),DIt.forEach(r),UOo=t(STe," (RoBERTa model)"),STe.forEach(r),JOo=i(z),j4=n(z,"LI",{});var PTe=s(j4);_le=n(PTe,"STRONG",{});var GIt=s(_le);KOo=t(GIt,"roformer"),GIt.forEach(r),YOo=t(PTe," \u2014 "),LI=n(PTe,"A",{href:!0});var OIt=s(LI);ZOo=t(OIt,"TFRoFormerForSequenceClassification"),OIt.forEach(r),eqo=t(PTe," (RoFormer model)"),PTe.forEach(r),oqo=i(z),N4=n(z,"LI",{});var $Te=s(N4);vle=n($Te,"STRONG",{});var qIt=s(vle);tqo=t(qIt,"tapas"),qIt.forEach(r),rqo=t($Te," \u2014 "),BI=n($Te,"A",{href:!0});var zIt=s(BI);aqo=t(zIt,"TFTapasForSequenceClassification"),zIt.forEach(r),nqo=t($Te," (TAPAS model)"),$Te.forEach(r),sqo=i(z),D4=n(z,"LI",{});var ITe=s(D4);ble=n(ITe,"STRONG",{});var XIt=s(ble);lqo=t(XIt,"transfo-xl"),XIt.forEach(r),iqo=t(ITe," \u2014 "),kI=n(ITe,"A",{href:!0});var WIt=s(kI);dqo=t(WIt,"TFTransfoXLForSequenceClassification"),WIt.forEach(r),mqo=t(ITe," (Transformer-XL model)"),ITe.forEach(r),fqo=i(z),G4=n(z,"LI",{});var jTe=s(G4);Tle=n(jTe,"STRONG",{});var VIt=s(Tle);cqo=t(VIt,"xlm"),VIt.forEach(r),gqo=t(jTe," \u2014 "),RI=n(jTe,"A",{href:!0});var QIt=s(RI);hqo=t(QIt,"TFXLMForSequenceClassification"),QIt.forEach(r),uqo=t(jTe," (XLM model)"),jTe.forEach(r),pqo=i(z),O4=n(z,"LI",{});var NTe=s(O4);Fle=n(NTe,"STRONG",{});var HIt=s(Fle);_qo=t(HIt,"xlm-roberta"),HIt.forEach(r),vqo=t(NTe," \u2014 "),SI=n(NTe,"A",{href:!0});var UIt=s(SI);bqo=t(UIt,"TFXLMRobertaForSequenceClassification"),UIt.forEach(r),Tqo=t(NTe," (XLM-RoBERTa model)"),NTe.forEach(r),Fqo=i(z),q4=n(z,"LI",{});var DTe=s(q4);Mle=n(DTe,"STRONG",{});var JIt=s(Mle);Mqo=t(JIt,"xlnet"),JIt.forEach(r),Eqo=t(DTe," \u2014 "),PI=n(DTe,"A",{href:!0});var KIt=s(PI);Cqo=t(KIt,"TFXLNetForSequenceClassification"),KIt.forEach(r),yqo=t(DTe," (XLNet model)"),DTe.forEach(r),z.forEach(r),wqo=i(ea),Ele=n(ea,"P",{});var YIt=s(Ele);Aqo=t(YIt,"Examples:"),YIt.forEach(r),xqo=i(ea),c(j3.$$.fragment,ea),ea.forEach(r),fl.forEach(r),RCe=i(d),$d=n(d,"H2",{class:!0});var Bye=s($d);z4=n(Bye,"A",{id:!0,class:!0,href:!0});var ZIt=s(z4);Cle=n(ZIt,"SPAN",{});var ejt=s(Cle);c(N3.$$.fragment,ejt),ejt.forEach(r),ZIt.forEach(r),Lqo=i(Bye),yle=n(Bye,"SPAN",{});var ojt=s(yle);Bqo=t(ojt,"TFAutoModelForMultipleChoice"),ojt.forEach(r),Bye.forEach(r),SCe=i(d),it=n(d,"DIV",{class:!0});var gl=s(it);c(D3.$$.fragment,gl),kqo=i(gl),Id=n(gl,"P",{});var bG=s(Id);Rqo=t(bG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a multiple choice head) when created with the `),wle=n(bG,"CODE",{});var tjt=s(wle);Sqo=t(tjt,"from_pretrained()"),tjt.forEach(r),Pqo=t(bG,` class method or the `),Ale=n(bG,"CODE",{});var rjt=s(Ale);$qo=t(rjt,"from_config()"),rjt.forEach(r),Iqo=t(bG," class method."),bG.forEach(r),jqo=i(gl),G3=n(gl,"P",{});var kye=s(G3);Nqo=t(kye,"This class cannot be instantiated directly using "),xle=n(kye,"CODE",{});var ajt=s(xle);Dqo=t(ajt,"__init__()"),ajt.forEach(r),Gqo=t(kye," (throws an error)."),kye.forEach(r),Oqo=i(gl),Zt=n(gl,"DIV",{class:!0});var hl=s(Zt);c(O3.$$.fragment,hl),qqo=i(hl),Lle=n(hl,"P",{});var njt=s(Lle);zqo=t(njt,"Instantiates one of the model classes of the library (with a multiple choice head) from a configuration."),njt.forEach(r),Xqo=i(hl),jd=n(hl,"P",{});var TG=s(jd);Wqo=t(TG,`Note: Loading a model from its configuration file does `),Ble=n(TG,"STRONG",{});var sjt=s(Ble);Vqo=t(sjt,"not"),sjt.forEach(r),Qqo=t(TG,` load the model weights. It only affects the model\u2019s configuration. Use [`),kle=n(TG,"EM",{});var ljt=s(kle);Hqo=t(ljt,"~TFAutoModelForMultipleChoice.from_pretrained"),ljt.forEach(r),Uqo=t(TG,`] to load the model weights.`),TG.forEach(r),Jqo=i(hl),Rle=n(hl,"P",{});var ijt=s(Rle);Kqo=t(ijt,"Examples:"),ijt.forEach(r),Yqo=i(hl),c(q3.$$.fragment,hl),hl.forEach(r),Zqo=i(gl),fo=n(gl,"DIV",{class:!0});var oa=s(fo);c(z3.$$.fragment,oa),ezo=i(oa),Sle=n(oa,"P",{});var djt=s(Sle);ozo=t(djt,"Instantiate one of the model classes of the library (with a multiple choice head) from a pretrained model."),djt.forEach(r),tzo=i(oa),Ja=n(oa,"P",{});var pF=s(Ja);rzo=t(pF,"The model class to instantiate is selected based on the "),Ple=n(pF,"EM",{});var mjt=s(Ple);azo=t(mjt,"model_type"),mjt.forEach(r),nzo=t(pF,` property of the config object (either passed as an argument or loaded from `),$le=n(pF,"EM",{});var fjt=s($le);szo=t(fjt,"pretrained_model_name_or_path"),fjt.forEach(r),lzo=t(pF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ile=n(pF,"EM",{});var cjt=s(Ile);izo=t(cjt,"pretrained_model_name_or_path"),cjt.forEach(r),dzo=t(pF,":"),pF.forEach(r),mzo=i(oa),re=n(oa,"UL",{});var ae=s(re);X4=n(ae,"LI",{});var GTe=s(X4);jle=n(GTe,"STRONG",{});var gjt=s(jle);fzo=t(gjt,"albert"),gjt.forEach(r),czo=t(GTe," \u2014 "),$I=n(GTe,"A",{href:!0});var hjt=s($I);gzo=t(hjt,"TFAlbertForMultipleChoice"),hjt.forEach(r),hzo=t(GTe," (ALBERT model)"),GTe.forEach(r),uzo=i(ae),W4=n(ae,"LI",{});var OTe=s(W4);Nle=n(OTe,"STRONG",{});var ujt=s(Nle);pzo=t(ujt,"bert"),ujt.forEach(r),_zo=t(OTe," \u2014 "),II=n(OTe,"A",{href:!0});var pjt=s(II);vzo=t(pjt,"TFBertForMultipleChoice"),pjt.forEach(r),bzo=t(OTe," (BERT model)"),OTe.forEach(r),Tzo=i(ae),V4=n(ae,"LI",{});var qTe=s(V4);Dle=n(qTe,"STRONG",{});var _jt=s(Dle);Fzo=t(_jt,"camembert"),_jt.forEach(r),Mzo=t(qTe," \u2014 "),jI=n(qTe,"A",{href:!0});var vjt=s(jI);Ezo=t(vjt,"TFCamembertForMultipleChoice"),vjt.forEach(r),Czo=t(qTe," (CamemBERT model)"),qTe.forEach(r),yzo=i(ae),Q4=n(ae,"LI",{});var zTe=s(Q4);Gle=n(zTe,"STRONG",{});var bjt=s(Gle);wzo=t(bjt,"convbert"),bjt.forEach(r),Azo=t(zTe," \u2014 "),NI=n(zTe,"A",{href:!0});var Tjt=s(NI);xzo=t(Tjt,"TFConvBertForMultipleChoice"),Tjt.forEach(r),Lzo=t(zTe," (ConvBERT model)"),zTe.forEach(r),Bzo=i(ae),H4=n(ae,"LI",{});var XTe=s(H4);Ole=n(XTe,"STRONG",{});var Fjt=s(Ole);kzo=t(Fjt,"distilbert"),Fjt.forEach(r),Rzo=t(XTe," \u2014 "),DI=n(XTe,"A",{href:!0});var Mjt=s(DI);Szo=t(Mjt,"TFDistilBertForMultipleChoice"),Mjt.forEach(r),Pzo=t(XTe," (DistilBERT model)"),XTe.forEach(r),$zo=i(ae),U4=n(ae,"LI",{});var WTe=s(U4);qle=n(WTe,"STRONG",{});var Ejt=s(qle);Izo=t(Ejt,"electra"),Ejt.forEach(r),jzo=t(WTe," \u2014 "),GI=n(WTe,"A",{href:!0});var Cjt=s(GI);Nzo=t(Cjt,"TFElectraForMultipleChoice"),Cjt.forEach(r),Dzo=t(WTe," (ELECTRA model)"),WTe.forEach(r),Gzo=i(ae),J4=n(ae,"LI",{});var VTe=s(J4);zle=n(VTe,"STRONG",{});var yjt=s(zle);Ozo=t(yjt,"flaubert"),yjt.forEach(r),qzo=t(VTe," \u2014 "),OI=n(VTe,"A",{href:!0});var wjt=s(OI);zzo=t(wjt,"TFFlaubertForMultipleChoice"),wjt.forEach(r),Xzo=t(VTe," (FlauBERT model)"),VTe.forEach(r),Wzo=i(ae),K4=n(ae,"LI",{});var QTe=s(K4);Xle=n(QTe,"STRONG",{});var Ajt=s(Xle);Vzo=t(Ajt,"funnel"),Ajt.forEach(r),Qzo=t(QTe," \u2014 "),qI=n(QTe,"A",{href:!0});var xjt=s(qI);Hzo=t(xjt,"TFFunnelForMultipleChoice"),xjt.forEach(r),Uzo=t(QTe," (Funnel Transformer model)"),QTe.forEach(r),Jzo=i(ae),Y4=n(ae,"LI",{});var HTe=s(Y4);Wle=n(HTe,"STRONG",{});var Ljt=s(Wle);Kzo=t(Ljt,"longformer"),Ljt.forEach(r),Yzo=t(HTe," \u2014 "),zI=n(HTe,"A",{href:!0});var Bjt=s(zI);Zzo=t(Bjt,"TFLongformerForMultipleChoice"),Bjt.forEach(r),eXo=t(HTe," (Longformer model)"),HTe.forEach(r),oXo=i(ae),Z4=n(ae,"LI",{});var UTe=s(Z4);Vle=n(UTe,"STRONG",{});var kjt=s(Vle);tXo=t(kjt,"mobilebert"),kjt.forEach(r),rXo=t(UTe," \u2014 "),XI=n(UTe,"A",{href:!0});var Rjt=s(XI);aXo=t(Rjt,"TFMobileBertForMultipleChoice"),Rjt.forEach(r),nXo=t(UTe," (MobileBERT model)"),UTe.forEach(r),sXo=i(ae),e5=n(ae,"LI",{});var JTe=s(e5);Qle=n(JTe,"STRONG",{});var Sjt=s(Qle);lXo=t(Sjt,"mpnet"),Sjt.forEach(r),iXo=t(JTe," \u2014 "),WI=n(JTe,"A",{href:!0});var Pjt=s(WI);dXo=t(Pjt,"TFMPNetForMultipleChoice"),Pjt.forEach(r),mXo=t(JTe," (MPNet model)"),JTe.forEach(r),fXo=i(ae),o5=n(ae,"LI",{});var KTe=s(o5);Hle=n(KTe,"STRONG",{});var $jt=s(Hle);cXo=t($jt,"rembert"),$jt.forEach(r),gXo=t(KTe," \u2014 "),VI=n(KTe,"A",{href:!0});var Ijt=s(VI);hXo=t(Ijt,"TFRemBertForMultipleChoice"),Ijt.forEach(r),uXo=t(KTe," (RemBERT model)"),KTe.forEach(r),pXo=i(ae),t5=n(ae,"LI",{});var YTe=s(t5);Ule=n(YTe,"STRONG",{});var jjt=s(Ule);_Xo=t(jjt,"roberta"),jjt.forEach(r),vXo=t(YTe," \u2014 "),QI=n(YTe,"A",{href:!0});var Njt=s(QI);bXo=t(Njt,"TFRobertaForMultipleChoice"),Njt.forEach(r),TXo=t(YTe," (RoBERTa model)"),YTe.forEach(r),FXo=i(ae),r5=n(ae,"LI",{});var ZTe=s(r5);Jle=n(ZTe,"STRONG",{});var Djt=s(Jle);MXo=t(Djt,"roformer"),Djt.forEach(r),EXo=t(ZTe," \u2014 "),HI=n(ZTe,"A",{href:!0});var Gjt=s(HI);CXo=t(Gjt,"TFRoFormerForMultipleChoice"),Gjt.forEach(r),yXo=t(ZTe," (RoFormer model)"),ZTe.forEach(r),wXo=i(ae),a5=n(ae,"LI",{});var eFe=s(a5);Kle=n(eFe,"STRONG",{});var Ojt=s(Kle);AXo=t(Ojt,"xlm"),Ojt.forEach(r),xXo=t(eFe," \u2014 "),UI=n(eFe,"A",{href:!0});var qjt=s(UI);LXo=t(qjt,"TFXLMForMultipleChoice"),qjt.forEach(r),BXo=t(eFe," (XLM model)"),eFe.forEach(r),kXo=i(ae),n5=n(ae,"LI",{});var oFe=s(n5);Yle=n(oFe,"STRONG",{});var zjt=s(Yle);RXo=t(zjt,"xlm-roberta"),zjt.forEach(r),SXo=t(oFe," \u2014 "),JI=n(oFe,"A",{href:!0});var Xjt=s(JI);PXo=t(Xjt,"TFXLMRobertaForMultipleChoice"),Xjt.forEach(r),$Xo=t(oFe," (XLM-RoBERTa model)"),oFe.forEach(r),IXo=i(ae),s5=n(ae,"LI",{});var tFe=s(s5);Zle=n(tFe,"STRONG",{});var Wjt=s(Zle);jXo=t(Wjt,"xlnet"),Wjt.forEach(r),NXo=t(tFe," \u2014 "),KI=n(tFe,"A",{href:!0});var Vjt=s(KI);DXo=t(Vjt,"TFXLNetForMultipleChoice"),Vjt.forEach(r),GXo=t(tFe," (XLNet model)"),tFe.forEach(r),ae.forEach(r),OXo=i(oa),eie=n(oa,"P",{});var Qjt=s(eie);qXo=t(Qjt,"Examples:"),Qjt.forEach(r),zXo=i(oa),c(X3.$$.fragment,oa),oa.forEach(r),gl.forEach(r),PCe=i(d),Nd=n(d,"H2",{class:!0});var Rye=s(Nd);l5=n(Rye,"A",{id:!0,class:!0,href:!0});var Hjt=s(l5);oie=n(Hjt,"SPAN",{});var Ujt=s(oie);c(W3.$$.fragment,Ujt),Ujt.forEach(r),Hjt.forEach(r),XXo=i(Rye),tie=n(Rye,"SPAN",{});var Jjt=s(tie);WXo=t(Jjt,"TFAutoModelForTableQuestionAnswering"),Jjt.forEach(r),Rye.forEach(r),$Ce=i(d),dt=n(d,"DIV",{class:!0});var ul=s(dt);c(V3.$$.fragment,ul),VXo=i(ul),Dd=n(ul,"P",{});var FG=s(Dd);QXo=t(FG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a table question answering head) when created with the `),rie=n(FG,"CODE",{});var Kjt=s(rie);HXo=t(Kjt,"from_pretrained()"),Kjt.forEach(r),UXo=t(FG,` class method or the `),aie=n(FG,"CODE",{});var Yjt=s(aie);JXo=t(Yjt,"from_config()"),Yjt.forEach(r),KXo=t(FG," class method."),FG.forEach(r),YXo=i(ul),Q3=n(ul,"P",{});var Sye=s(Q3);ZXo=t(Sye,"This class cannot be instantiated directly using "),nie=n(Sye,"CODE",{});var Zjt=s(nie);eWo=t(Zjt,"__init__()"),Zjt.forEach(r),oWo=t(Sye," (throws an error)."),Sye.forEach(r),tWo=i(ul),er=n(ul,"DIV",{class:!0});var pl=s(er);c(H3.$$.fragment,pl),rWo=i(pl),sie=n(pl,"P",{});var eNt=s(sie);aWo=t(eNt,"Instantiates one of the model classes of the library (with a table question answering head) from a configuration."),eNt.forEach(r),nWo=i(pl),Gd=n(pl,"P",{});var MG=s(Gd);sWo=t(MG,`Note: Loading a model from its configuration file does `),lie=n(MG,"STRONG",{});var oNt=s(lie);lWo=t(oNt,"not"),oNt.forEach(r),iWo=t(MG,` load the model weights. It only affects the model\u2019s configuration. Use [`),iie=n(MG,"EM",{});var tNt=s(iie);dWo=t(tNt,"~TFAutoModelForTableQuestionAnswering.from_pretrained"),tNt.forEach(r),mWo=t(MG,`] to load the model weights.`),MG.forEach(r),fWo=i(pl),die=n(pl,"P",{});var rNt=s(die);cWo=t(rNt,"Examples:"),rNt.forEach(r),gWo=i(pl),c(U3.$$.fragment,pl),pl.forEach(r),hWo=i(ul),co=n(ul,"DIV",{class:!0});var ta=s(co);c(J3.$$.fragment,ta),uWo=i(ta),mie=n(ta,"P",{});var aNt=s(mie);pWo=t(aNt,"Instantiate one of the model classes of the library (with a table question answering head) from a pretrained model."),aNt.forEach(r),_Wo=i(ta),Ka=n(ta,"P",{});var _F=s(Ka);vWo=t(_F,"The model class to instantiate is selected based on the "),fie=n(_F,"EM",{});var nNt=s(fie);bWo=t(nNt,"model_type"),nNt.forEach(r),TWo=t(_F,` property of the config object (either passed as an argument or loaded from `),cie=n(_F,"EM",{});var sNt=s(cie);FWo=t(sNt,"pretrained_model_name_or_path"),sNt.forEach(r),MWo=t(_F,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),gie=n(_F,"EM",{});var lNt=s(gie);EWo=t(lNt,"pretrained_model_name_or_path"),lNt.forEach(r),CWo=t(_F,":"),_F.forEach(r),yWo=i(ta),hie=n(ta,"UL",{});var iNt=s(hie);i5=n(iNt,"LI",{});var rFe=s(i5);uie=n(rFe,"STRONG",{});var dNt=s(uie);wWo=t(dNt,"tapas"),dNt.forEach(r),AWo=t(rFe," \u2014 "),YI=n(rFe,"A",{href:!0});var mNt=s(YI);xWo=t(mNt,"TFTapasForQuestionAnswering"),mNt.forEach(r),LWo=t(rFe," (TAPAS model)"),rFe.forEach(r),iNt.forEach(r),BWo=i(ta),pie=n(ta,"P",{});var fNt=s(pie);kWo=t(fNt,"Examples:"),fNt.forEach(r),RWo=i(ta),c(K3.$$.fragment,ta),ta.forEach(r),ul.forEach(r),ICe=i(d),Od=n(d,"H2",{class:!0});var Pye=s(Od);d5=n(Pye,"A",{id:!0,class:!0,href:!0});var cNt=s(d5);_ie=n(cNt,"SPAN",{});var gNt=s(_ie);c(Y3.$$.fragment,gNt),gNt.forEach(r),cNt.forEach(r),SWo=i(Pye),vie=n(Pye,"SPAN",{});var hNt=s(vie);PWo=t(hNt,"TFAutoModelForTokenClassification"),hNt.forEach(r),Pye.forEach(r),jCe=i(d),mt=n(d,"DIV",{class:!0});var _l=s(mt);c(Z3.$$.fragment,_l),$Wo=i(_l),qd=n(_l,"P",{});var EG=s(qd);IWo=t(EG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a token classification head) when created with the `),bie=n(EG,"CODE",{});var uNt=s(bie);jWo=t(uNt,"from_pretrained()"),uNt.forEach(r),NWo=t(EG,` class method or the `),Tie=n(EG,"CODE",{});var pNt=s(Tie);DWo=t(pNt,"from_config()"),pNt.forEach(r),GWo=t(EG," class method."),EG.forEach(r),OWo=i(_l),ey=n(_l,"P",{});var $ye=s(ey);qWo=t($ye,"This class cannot be instantiated directly using "),Fie=n($ye,"CODE",{});var _Nt=s(Fie);zWo=t(_Nt,"__init__()"),_Nt.forEach(r),XWo=t($ye," (throws an error)."),$ye.forEach(r),WWo=i(_l),or=n(_l,"DIV",{class:!0});var vl=s(or);c(oy.$$.fragment,vl),VWo=i(vl),Mie=n(vl,"P",{});var vNt=s(Mie);QWo=t(vNt,"Instantiates one of the model classes of the library (with a token classification head) from a configuration."),vNt.forEach(r),HWo=i(vl),zd=n(vl,"P",{});var CG=s(zd);UWo=t(CG,`Note: Loading a model from its configuration file does `),Eie=n(CG,"STRONG",{});var bNt=s(Eie);JWo=t(bNt,"not"),bNt.forEach(r),KWo=t(CG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Cie=n(CG,"EM",{});var TNt=s(Cie);YWo=t(TNt,"~TFAutoModelForTokenClassification.from_pretrained"),TNt.forEach(r),ZWo=t(CG,`] to load the model weights.`),CG.forEach(r),eVo=i(vl),yie=n(vl,"P",{});var FNt=s(yie);oVo=t(FNt,"Examples:"),FNt.forEach(r),tVo=i(vl),c(ty.$$.fragment,vl),vl.forEach(r),rVo=i(_l),go=n(_l,"DIV",{class:!0});var ra=s(go);c(ry.$$.fragment,ra),aVo=i(ra),wie=n(ra,"P",{});var MNt=s(wie);nVo=t(MNt,"Instantiate one of the model classes of the library (with a token classification head) from a pretrained model."),MNt.forEach(r),sVo=i(ra),Ya=n(ra,"P",{});var vF=s(Ya);lVo=t(vF,"The model class to instantiate is selected based on the "),Aie=n(vF,"EM",{});var ENt=s(Aie);iVo=t(ENt,"model_type"),ENt.forEach(r),dVo=t(vF,` property of the config object (either passed as an argument or loaded from `),xie=n(vF,"EM",{});var CNt=s(xie);mVo=t(CNt,"pretrained_model_name_or_path"),CNt.forEach(r),fVo=t(vF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Lie=n(vF,"EM",{});var yNt=s(Lie);cVo=t(yNt,"pretrained_model_name_or_path"),yNt.forEach(r),gVo=t(vF,":"),vF.forEach(r),hVo=i(ra),Y=n(ra,"UL",{});var oe=s(Y);m5=n(oe,"LI",{});var aFe=s(m5);Bie=n(aFe,"STRONG",{});var wNt=s(Bie);uVo=t(wNt,"albert"),wNt.forEach(r),pVo=t(aFe," \u2014 "),ZI=n(aFe,"A",{href:!0});var ANt=s(ZI);_Vo=t(ANt,"TFAlbertForTokenClassification"),ANt.forEach(r),vVo=t(aFe," (ALBERT model)"),aFe.forEach(r),bVo=i(oe),f5=n(oe,"LI",{});var nFe=s(f5);kie=n(nFe,"STRONG",{});var xNt=s(kie);TVo=t(xNt,"bert"),xNt.forEach(r),FVo=t(nFe," \u2014 "),ej=n(nFe,"A",{href:!0});var LNt=s(ej);MVo=t(LNt,"TFBertForTokenClassification"),LNt.forEach(r),EVo=t(nFe," (BERT model)"),nFe.forEach(r),CVo=i(oe),c5=n(oe,"LI",{});var sFe=s(c5);Rie=n(sFe,"STRONG",{});var BNt=s(Rie);yVo=t(BNt,"camembert"),BNt.forEach(r),wVo=t(sFe," \u2014 "),oj=n(sFe,"A",{href:!0});var kNt=s(oj);AVo=t(kNt,"TFCamembertForTokenClassification"),kNt.forEach(r),xVo=t(sFe," (CamemBERT model)"),sFe.forEach(r),LVo=i(oe),g5=n(oe,"LI",{});var lFe=s(g5);Sie=n(lFe,"STRONG",{});var RNt=s(Sie);BVo=t(RNt,"convbert"),RNt.forEach(r),kVo=t(lFe," \u2014 "),tj=n(lFe,"A",{href:!0});var SNt=s(tj);RVo=t(SNt,"TFConvBertForTokenClassification"),SNt.forEach(r),SVo=t(lFe," (ConvBERT model)"),lFe.forEach(r),PVo=i(oe),h5=n(oe,"LI",{});var iFe=s(h5);Pie=n(iFe,"STRONG",{});var PNt=s(Pie);$Vo=t(PNt,"deberta"),PNt.forEach(r),IVo=t(iFe," \u2014 "),rj=n(iFe,"A",{href:!0});var $Nt=s(rj);jVo=t($Nt,"TFDebertaForTokenClassification"),$Nt.forEach(r),NVo=t(iFe," (DeBERTa model)"),iFe.forEach(r),DVo=i(oe),u5=n(oe,"LI",{});var dFe=s(u5);$ie=n(dFe,"STRONG",{});var INt=s($ie);GVo=t(INt,"deberta-v2"),INt.forEach(r),OVo=t(dFe," \u2014 "),aj=n(dFe,"A",{href:!0});var jNt=s(aj);qVo=t(jNt,"TFDebertaV2ForTokenClassification"),jNt.forEach(r),zVo=t(dFe," (DeBERTa-v2 model)"),dFe.forEach(r),XVo=i(oe),p5=n(oe,"LI",{});var mFe=s(p5);Iie=n(mFe,"STRONG",{});var NNt=s(Iie);WVo=t(NNt,"distilbert"),NNt.forEach(r),VVo=t(mFe," \u2014 "),nj=n(mFe,"A",{href:!0});var DNt=s(nj);QVo=t(DNt,"TFDistilBertForTokenClassification"),DNt.forEach(r),HVo=t(mFe," (DistilBERT model)"),mFe.forEach(r),UVo=i(oe),_5=n(oe,"LI",{});var fFe=s(_5);jie=n(fFe,"STRONG",{});var GNt=s(jie);JVo=t(GNt,"electra"),GNt.forEach(r),KVo=t(fFe," \u2014 "),sj=n(fFe,"A",{href:!0});var ONt=s(sj);YVo=t(ONt,"TFElectraForTokenClassification"),ONt.forEach(r),ZVo=t(fFe," (ELECTRA model)"),fFe.forEach(r),eQo=i(oe),v5=n(oe,"LI",{});var cFe=s(v5);Nie=n(cFe,"STRONG",{});var qNt=s(Nie);oQo=t(qNt,"flaubert"),qNt.forEach(r),tQo=t(cFe," \u2014 "),lj=n(cFe,"A",{href:!0});var zNt=s(lj);rQo=t(zNt,"TFFlaubertForTokenClassification"),zNt.forEach(r),aQo=t(cFe," (FlauBERT model)"),cFe.forEach(r),nQo=i(oe),b5=n(oe,"LI",{});var gFe=s(b5);Die=n(gFe,"STRONG",{});var XNt=s(Die);sQo=t(XNt,"funnel"),XNt.forEach(r),lQo=t(gFe," \u2014 "),ij=n(gFe,"A",{href:!0});var WNt=s(ij);iQo=t(WNt,"TFFunnelForTokenClassification"),WNt.forEach(r),dQo=t(gFe," (Funnel Transformer model)"),gFe.forEach(r),mQo=i(oe),T5=n(oe,"LI",{});var hFe=s(T5);Gie=n(hFe,"STRONG",{});var VNt=s(Gie);fQo=t(VNt,"layoutlm"),VNt.forEach(r),cQo=t(hFe," \u2014 "),dj=n(hFe,"A",{href:!0});var QNt=s(dj);gQo=t(QNt,"TFLayoutLMForTokenClassification"),QNt.forEach(r),hQo=t(hFe," (LayoutLM model)"),hFe.forEach(r),uQo=i(oe),F5=n(oe,"LI",{});var uFe=s(F5);Oie=n(uFe,"STRONG",{});var HNt=s(Oie);pQo=t(HNt,"longformer"),HNt.forEach(r),_Qo=t(uFe," \u2014 "),mj=n(uFe,"A",{href:!0});var UNt=s(mj);vQo=t(UNt,"TFLongformerForTokenClassification"),UNt.forEach(r),bQo=t(uFe," (Longformer model)"),uFe.forEach(r),TQo=i(oe),M5=n(oe,"LI",{});var pFe=s(M5);qie=n(pFe,"STRONG",{});var JNt=s(qie);FQo=t(JNt,"mobilebert"),JNt.forEach(r),MQo=t(pFe," \u2014 "),fj=n(pFe,"A",{href:!0});var KNt=s(fj);EQo=t(KNt,"TFMobileBertForTokenClassification"),KNt.forEach(r),CQo=t(pFe," (MobileBERT model)"),pFe.forEach(r),yQo=i(oe),E5=n(oe,"LI",{});var _Fe=s(E5);zie=n(_Fe,"STRONG",{});var YNt=s(zie);wQo=t(YNt,"mpnet"),YNt.forEach(r),AQo=t(_Fe," \u2014 "),cj=n(_Fe,"A",{href:!0});var ZNt=s(cj);xQo=t(ZNt,"TFMPNetForTokenClassification"),ZNt.forEach(r),LQo=t(_Fe," (MPNet model)"),_Fe.forEach(r),BQo=i(oe),C5=n(oe,"LI",{});var vFe=s(C5);Xie=n(vFe,"STRONG",{});var eDt=s(Xie);kQo=t(eDt,"rembert"),eDt.forEach(r),RQo=t(vFe," \u2014 "),gj=n(vFe,"A",{href:!0});var oDt=s(gj);SQo=t(oDt,"TFRemBertForTokenClassification"),oDt.forEach(r),PQo=t(vFe," (RemBERT model)"),vFe.forEach(r),$Qo=i(oe),y5=n(oe,"LI",{});var bFe=s(y5);Wie=n(bFe,"STRONG",{});var tDt=s(Wie);IQo=t(tDt,"roberta"),tDt.forEach(r),jQo=t(bFe," \u2014 "),hj=n(bFe,"A",{href:!0});var rDt=s(hj);NQo=t(rDt,"TFRobertaForTokenClassification"),rDt.forEach(r),DQo=t(bFe," (RoBERTa model)"),bFe.forEach(r),GQo=i(oe),w5=n(oe,"LI",{});var TFe=s(w5);Vie=n(TFe,"STRONG",{});var aDt=s(Vie);OQo=t(aDt,"roformer"),aDt.forEach(r),qQo=t(TFe," \u2014 "),uj=n(TFe,"A",{href:!0});var nDt=s(uj);zQo=t(nDt,"TFRoFormerForTokenClassification"),nDt.forEach(r),XQo=t(TFe," (RoFormer model)"),TFe.forEach(r),WQo=i(oe),A5=n(oe,"LI",{});var FFe=s(A5);Qie=n(FFe,"STRONG",{});var sDt=s(Qie);VQo=t(sDt,"xlm"),sDt.forEach(r),QQo=t(FFe," \u2014 "),pj=n(FFe,"A",{href:!0});var lDt=s(pj);HQo=t(lDt,"TFXLMForTokenClassification"),lDt.forEach(r),UQo=t(FFe," (XLM model)"),FFe.forEach(r),JQo=i(oe),x5=n(oe,"LI",{});var MFe=s(x5);Hie=n(MFe,"STRONG",{});var iDt=s(Hie);KQo=t(iDt,"xlm-roberta"),iDt.forEach(r),YQo=t(MFe," \u2014 "),_j=n(MFe,"A",{href:!0});var dDt=s(_j);ZQo=t(dDt,"TFXLMRobertaForTokenClassification"),dDt.forEach(r),eHo=t(MFe," (XLM-RoBERTa model)"),MFe.forEach(r),oHo=i(oe),L5=n(oe,"LI",{});var EFe=s(L5);Uie=n(EFe,"STRONG",{});var mDt=s(Uie);tHo=t(mDt,"xlnet"),mDt.forEach(r),rHo=t(EFe," \u2014 "),vj=n(EFe,"A",{href:!0});var fDt=s(vj);aHo=t(fDt,"TFXLNetForTokenClassification"),fDt.forEach(r),nHo=t(EFe," (XLNet model)"),EFe.forEach(r),oe.forEach(r),sHo=i(ra),Jie=n(ra,"P",{});var cDt=s(Jie);lHo=t(cDt,"Examples:"),cDt.forEach(r),iHo=i(ra),c(ay.$$.fragment,ra),ra.forEach(r),_l.forEach(r),NCe=i(d),Xd=n(d,"H2",{class:!0});var Iye=s(Xd);B5=n(Iye,"A",{id:!0,class:!0,href:!0});var gDt=s(B5);Kie=n(gDt,"SPAN",{});var hDt=s(Kie);c(ny.$$.fragment,hDt),hDt.forEach(r),gDt.forEach(r),dHo=i(Iye),Yie=n(Iye,"SPAN",{});var uDt=s(Yie);mHo=t(uDt,"TFAutoModelForQuestionAnswering"),uDt.forEach(r),Iye.forEach(r),DCe=i(d),ft=n(d,"DIV",{class:!0});var bl=s(ft);c(sy.$$.fragment,bl),fHo=i(bl),Wd=n(bl,"P",{});var yG=s(Wd);cHo=t(yG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a question answering head) when created with the `),Zie=n(yG,"CODE",{});var pDt=s(Zie);gHo=t(pDt,"from_pretrained()"),pDt.forEach(r),hHo=t(yG,` class method or the `),ede=n(yG,"CODE",{});var _Dt=s(ede);uHo=t(_Dt,"from_config()"),_Dt.forEach(r),pHo=t(yG," class method."),yG.forEach(r),_Ho=i(bl),ly=n(bl,"P",{});var jye=s(ly);vHo=t(jye,"This class cannot be instantiated directly using "),ode=n(jye,"CODE",{});var vDt=s(ode);bHo=t(vDt,"__init__()"),vDt.forEach(r),THo=t(jye," (throws an error)."),jye.forEach(r),FHo=i(bl),tr=n(bl,"DIV",{class:!0});var Tl=s(tr);c(iy.$$.fragment,Tl),MHo=i(Tl),tde=n(Tl,"P",{});var bDt=s(tde);EHo=t(bDt,"Instantiates one of the model classes of the library (with a question answering head) from a configuration."),bDt.forEach(r),CHo=i(Tl),Vd=n(Tl,"P",{});var wG=s(Vd);yHo=t(wG,`Note: Loading a model from its configuration file does `),rde=n(wG,"STRONG",{});var TDt=s(rde);wHo=t(TDt,"not"),TDt.forEach(r),AHo=t(wG,` load the model weights. It only affects the model\u2019s configuration. Use [`),ade=n(wG,"EM",{});var FDt=s(ade);xHo=t(FDt,"~TFAutoModelForQuestionAnswering.from_pretrained"),FDt.forEach(r),LHo=t(wG,`] to load the model weights.`),wG.forEach(r),BHo=i(Tl),nde=n(Tl,"P",{});var MDt=s(nde);kHo=t(MDt,"Examples:"),MDt.forEach(r),RHo=i(Tl),c(dy.$$.fragment,Tl),Tl.forEach(r),SHo=i(bl),ho=n(bl,"DIV",{class:!0});var aa=s(ho);c(my.$$.fragment,aa),PHo=i(aa),sde=n(aa,"P",{});var EDt=s(sde);$Ho=t(EDt,"Instantiate one of the model classes of the library (with a question answering head) from a pretrained model."),EDt.forEach(r),IHo=i(aa),Za=n(aa,"P",{});var bF=s(Za);jHo=t(bF,"The model class to instantiate is selected based on the "),lde=n(bF,"EM",{});var CDt=s(lde);NHo=t(CDt,"model_type"),CDt.forEach(r),DHo=t(bF,` property of the config object (either passed as an argument or loaded from `),ide=n(bF,"EM",{});var yDt=s(ide);GHo=t(yDt,"pretrained_model_name_or_path"),yDt.forEach(r),OHo=t(bF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),dde=n(bF,"EM",{});var wDt=s(dde);qHo=t(wDt,"pretrained_model_name_or_path"),wDt.forEach(r),zHo=t(bF,":"),bF.forEach(r),XHo=i(aa),Z=n(aa,"UL",{});var te=s(Z);k5=n(te,"LI",{});var CFe=s(k5);mde=n(CFe,"STRONG",{});var ADt=s(mde);WHo=t(ADt,"albert"),ADt.forEach(r),VHo=t(CFe," \u2014 "),bj=n(CFe,"A",{href:!0});var xDt=s(bj);QHo=t(xDt,"TFAlbertForQuestionAnswering"),xDt.forEach(r),HHo=t(CFe," (ALBERT model)"),CFe.forEach(r),UHo=i(te),R5=n(te,"LI",{});var yFe=s(R5);fde=n(yFe,"STRONG",{});var LDt=s(fde);JHo=t(LDt,"bert"),LDt.forEach(r),KHo=t(yFe," \u2014 "),Tj=n(yFe,"A",{href:!0});var BDt=s(Tj);YHo=t(BDt,"TFBertForQuestionAnswering"),BDt.forEach(r),ZHo=t(yFe," (BERT model)"),yFe.forEach(r),eUo=i(te),S5=n(te,"LI",{});var wFe=s(S5);cde=n(wFe,"STRONG",{});var kDt=s(cde);oUo=t(kDt,"camembert"),kDt.forEach(r),tUo=t(wFe," \u2014 "),Fj=n(wFe,"A",{href:!0});var RDt=s(Fj);rUo=t(RDt,"TFCamembertForQuestionAnswering"),RDt.forEach(r),aUo=t(wFe," (CamemBERT model)"),wFe.forEach(r),nUo=i(te),P5=n(te,"LI",{});var AFe=s(P5);gde=n(AFe,"STRONG",{});var SDt=s(gde);sUo=t(SDt,"convbert"),SDt.forEach(r),lUo=t(AFe," \u2014 "),Mj=n(AFe,"A",{href:!0});var PDt=s(Mj);iUo=t(PDt,"TFConvBertForQuestionAnswering"),PDt.forEach(r),dUo=t(AFe," (ConvBERT model)"),AFe.forEach(r),mUo=i(te),$5=n(te,"LI",{});var xFe=s($5);hde=n(xFe,"STRONG",{});var $Dt=s(hde);fUo=t($Dt,"deberta"),$Dt.forEach(r),cUo=t(xFe," \u2014 "),Ej=n(xFe,"A",{href:!0});var IDt=s(Ej);gUo=t(IDt,"TFDebertaForQuestionAnswering"),IDt.forEach(r),hUo=t(xFe," (DeBERTa model)"),xFe.forEach(r),uUo=i(te),I5=n(te,"LI",{});var LFe=s(I5);ude=n(LFe,"STRONG",{});var jDt=s(ude);pUo=t(jDt,"deberta-v2"),jDt.forEach(r),_Uo=t(LFe," \u2014 "),Cj=n(LFe,"A",{href:!0});var NDt=s(Cj);vUo=t(NDt,"TFDebertaV2ForQuestionAnswering"),NDt.forEach(r),bUo=t(LFe," (DeBERTa-v2 model)"),LFe.forEach(r),TUo=i(te),j5=n(te,"LI",{});var BFe=s(j5);pde=n(BFe,"STRONG",{});var DDt=s(pde);FUo=t(DDt,"distilbert"),DDt.forEach(r),MUo=t(BFe," \u2014 "),yj=n(BFe,"A",{href:!0});var GDt=s(yj);EUo=t(GDt,"TFDistilBertForQuestionAnswering"),GDt.forEach(r),CUo=t(BFe," (DistilBERT model)"),BFe.forEach(r),yUo=i(te),N5=n(te,"LI",{});var kFe=s(N5);_de=n(kFe,"STRONG",{});var ODt=s(_de);wUo=t(ODt,"electra"),ODt.forEach(r),AUo=t(kFe," \u2014 "),wj=n(kFe,"A",{href:!0});var qDt=s(wj);xUo=t(qDt,"TFElectraForQuestionAnswering"),qDt.forEach(r),LUo=t(kFe," (ELECTRA model)"),kFe.forEach(r),BUo=i(te),D5=n(te,"LI",{});var RFe=s(D5);vde=n(RFe,"STRONG",{});var zDt=s(vde);kUo=t(zDt,"flaubert"),zDt.forEach(r),RUo=t(RFe," \u2014 "),Aj=n(RFe,"A",{href:!0});var XDt=s(Aj);SUo=t(XDt,"TFFlaubertForQuestionAnsweringSimple"),XDt.forEach(r),PUo=t(RFe," (FlauBERT model)"),RFe.forEach(r),$Uo=i(te),G5=n(te,"LI",{});var SFe=s(G5);bde=n(SFe,"STRONG",{});var WDt=s(bde);IUo=t(WDt,"funnel"),WDt.forEach(r),jUo=t(SFe," \u2014 "),xj=n(SFe,"A",{href:!0});var VDt=s(xj);NUo=t(VDt,"TFFunnelForQuestionAnswering"),VDt.forEach(r),DUo=t(SFe," (Funnel Transformer model)"),SFe.forEach(r),GUo=i(te),O5=n(te,"LI",{});var PFe=s(O5);Tde=n(PFe,"STRONG",{});var QDt=s(Tde);OUo=t(QDt,"longformer"),QDt.forEach(r),qUo=t(PFe," \u2014 "),Lj=n(PFe,"A",{href:!0});var HDt=s(Lj);zUo=t(HDt,"TFLongformerForQuestionAnswering"),HDt.forEach(r),XUo=t(PFe," (Longformer model)"),PFe.forEach(r),WUo=i(te),q5=n(te,"LI",{});var $Fe=s(q5);Fde=n($Fe,"STRONG",{});var UDt=s(Fde);VUo=t(UDt,"mobilebert"),UDt.forEach(r),QUo=t($Fe," \u2014 "),Bj=n($Fe,"A",{href:!0});var JDt=s(Bj);HUo=t(JDt,"TFMobileBertForQuestionAnswering"),JDt.forEach(r),UUo=t($Fe," (MobileBERT model)"),$Fe.forEach(r),JUo=i(te),z5=n(te,"LI",{});var IFe=s(z5);Mde=n(IFe,"STRONG",{});var KDt=s(Mde);KUo=t(KDt,"mpnet"),KDt.forEach(r),YUo=t(IFe," \u2014 "),kj=n(IFe,"A",{href:!0});var YDt=s(kj);ZUo=t(YDt,"TFMPNetForQuestionAnswering"),YDt.forEach(r),eJo=t(IFe," (MPNet model)"),IFe.forEach(r),oJo=i(te),X5=n(te,"LI",{});var jFe=s(X5);Ede=n(jFe,"STRONG",{});var ZDt=s(Ede);tJo=t(ZDt,"rembert"),ZDt.forEach(r),rJo=t(jFe," \u2014 "),Rj=n(jFe,"A",{href:!0});var eGt=s(Rj);aJo=t(eGt,"TFRemBertForQuestionAnswering"),eGt.forEach(r),nJo=t(jFe," (RemBERT model)"),jFe.forEach(r),sJo=i(te),W5=n(te,"LI",{});var NFe=s(W5);Cde=n(NFe,"STRONG",{});var oGt=s(Cde);lJo=t(oGt,"roberta"),oGt.forEach(r),iJo=t(NFe," \u2014 "),Sj=n(NFe,"A",{href:!0});var tGt=s(Sj);dJo=t(tGt,"TFRobertaForQuestionAnswering"),tGt.forEach(r),mJo=t(NFe," (RoBERTa model)"),NFe.forEach(r),fJo=i(te),V5=n(te,"LI",{});var DFe=s(V5);yde=n(DFe,"STRONG",{});var rGt=s(yde);cJo=t(rGt,"roformer"),rGt.forEach(r),gJo=t(DFe," \u2014 "),Pj=n(DFe,"A",{href:!0});var aGt=s(Pj);hJo=t(aGt,"TFRoFormerForQuestionAnswering"),aGt.forEach(r),uJo=t(DFe," (RoFormer model)"),DFe.forEach(r),pJo=i(te),Q5=n(te,"LI",{});var GFe=s(Q5);wde=n(GFe,"STRONG",{});var nGt=s(wde);_Jo=t(nGt,"xlm"),nGt.forEach(r),vJo=t(GFe," \u2014 "),$j=n(GFe,"A",{href:!0});var sGt=s($j);bJo=t(sGt,"TFXLMForQuestionAnsweringSimple"),sGt.forEach(r),TJo=t(GFe," (XLM model)"),GFe.forEach(r),FJo=i(te),H5=n(te,"LI",{});var OFe=s(H5);Ade=n(OFe,"STRONG",{});var lGt=s(Ade);MJo=t(lGt,"xlm-roberta"),lGt.forEach(r),EJo=t(OFe," \u2014 "),Ij=n(OFe,"A",{href:!0});var iGt=s(Ij);CJo=t(iGt,"TFXLMRobertaForQuestionAnswering"),iGt.forEach(r),yJo=t(OFe," (XLM-RoBERTa model)"),OFe.forEach(r),wJo=i(te),U5=n(te,"LI",{});var qFe=s(U5);xde=n(qFe,"STRONG",{});var dGt=s(xde);AJo=t(dGt,"xlnet"),dGt.forEach(r),xJo=t(qFe," \u2014 "),jj=n(qFe,"A",{href:!0});var mGt=s(jj);LJo=t(mGt,"TFXLNetForQuestionAnsweringSimple"),mGt.forEach(r),BJo=t(qFe," (XLNet model)"),qFe.forEach(r),te.forEach(r),kJo=i(aa),Lde=n(aa,"P",{});var fGt=s(Lde);RJo=t(fGt,"Examples:"),fGt.forEach(r),SJo=i(aa),c(fy.$$.fragment,aa),aa.forEach(r),bl.forEach(r),GCe=i(d),Qd=n(d,"H2",{class:!0});var Nye=s(Qd);J5=n(Nye,"A",{id:!0,class:!0,href:!0});var cGt=s(J5);Bde=n(cGt,"SPAN",{});var gGt=s(Bde);c(cy.$$.fragment,gGt),gGt.forEach(r),cGt.forEach(r),PJo=i(Nye),kde=n(Nye,"SPAN",{});var hGt=s(kde);$Jo=t(hGt,"FlaxAutoModel"),hGt.forEach(r),Nye.forEach(r),OCe=i(d),ct=n(d,"DIV",{class:!0});var Fl=s(ct);c(gy.$$.fragment,Fl),IJo=i(Fl),Hd=n(Fl,"P",{});var AG=s(Hd);jJo=t(AG,`This is a generic model class that will be instantiated as one of the base model classes of the library when created with the `),Rde=n(AG,"CODE",{});var uGt=s(Rde);NJo=t(uGt,"from_pretrained()"),uGt.forEach(r),DJo=t(AG,` class method or the `),Sde=n(AG,"CODE",{});var pGt=s(Sde);GJo=t(pGt,"from_config()"),pGt.forEach(r),OJo=t(AG," class method."),AG.forEach(r),qJo=i(Fl),hy=n(Fl,"P",{});var Dye=s(hy);zJo=t(Dye,"This class cannot be instantiated directly using "),Pde=n(Dye,"CODE",{});var _Gt=s(Pde);XJo=t(_Gt,"__init__()"),_Gt.forEach(r),WJo=t(Dye," (throws an error)."),Dye.forEach(r),VJo=i(Fl),rr=n(Fl,"DIV",{class:!0});var Ml=s(rr);c(uy.$$.fragment,Ml),QJo=i(Ml),$de=n(Ml,"P",{});var vGt=s($de);HJo=t(vGt,"Instantiates one of the base model classes of the library from a configuration."),vGt.forEach(r),UJo=i(Ml),Ud=n(Ml,"P",{});var xG=s(Ud);JJo=t(xG,`Note: Loading a model from its configuration file does `),Ide=n(xG,"STRONG",{});var bGt=s(Ide);KJo=t(bGt,"not"),bGt.forEach(r),YJo=t(xG,` load the model weights. It only affects the model\u2019s configuration. Use [`),jde=n(xG,"EM",{});var TGt=s(jde);ZJo=t(TGt,"~FlaxAutoModel.from_pretrained"),TGt.forEach(r),eKo=t(xG,`] to load the model weights.`),xG.forEach(r),oKo=i(Ml),Nde=n(Ml,"P",{});var FGt=s(Nde);tKo=t(FGt,"Examples:"),FGt.forEach(r),rKo=i(Ml),c(py.$$.fragment,Ml),Ml.forEach(r),aKo=i(Fl),uo=n(Fl,"DIV",{class:!0});var na=s(uo);c(_y.$$.fragment,na),nKo=i(na),Dde=n(na,"P",{});var MGt=s(Dde);sKo=t(MGt,"Instantiate one of the base model classes of the library from a pretrained model."),MGt.forEach(r),lKo=i(na),en=n(na,"P",{});var TF=s(en);iKo=t(TF,"The model class to instantiate is selected based on the "),Gde=n(TF,"EM",{});var EGt=s(Gde);dKo=t(EGt,"model_type"),EGt.forEach(r),mKo=t(TF,` property of the config object (either passed as an argument or loaded from `),Ode=n(TF,"EM",{});var CGt=s(Ode);fKo=t(CGt,"pretrained_model_name_or_path"),CGt.forEach(r),cKo=t(TF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),qde=n(TF,"EM",{});var yGt=s(qde);gKo=t(yGt,"pretrained_model_name_or_path"),yGt.forEach(r),hKo=t(TF,":"),TF.forEach(r),uKo=i(na),Q=n(na,"UL",{});var U=s(Q);K5=n(U,"LI",{});var zFe=s(K5);zde=n(zFe,"STRONG",{});var wGt=s(zde);pKo=t(wGt,"albert"),wGt.forEach(r),_Ko=t(zFe," \u2014 "),Nj=n(zFe,"A",{href:!0});var AGt=s(Nj);vKo=t(AGt,"FlaxAlbertModel"),AGt.forEach(r),bKo=t(zFe," (ALBERT model)"),zFe.forEach(r),TKo=i(U),Y5=n(U,"LI",{});var XFe=s(Y5);Xde=n(XFe,"STRONG",{});var xGt=s(Xde);FKo=t(xGt,"bart"),xGt.forEach(r),MKo=t(XFe," \u2014 "),Dj=n(XFe,"A",{href:!0});var LGt=s(Dj);EKo=t(LGt,"FlaxBartModel"),LGt.forEach(r),CKo=t(XFe," (BART model)"),XFe.forEach(r),yKo=i(U),Z5=n(U,"LI",{});var WFe=s(Z5);Wde=n(WFe,"STRONG",{});var BGt=s(Wde);wKo=t(BGt,"beit"),BGt.forEach(r),AKo=t(WFe," \u2014 "),Gj=n(WFe,"A",{href:!0});var kGt=s(Gj);xKo=t(kGt,"FlaxBeitModel"),kGt.forEach(r),LKo=t(WFe," (BEiT model)"),WFe.forEach(r),BKo=i(U),e0=n(U,"LI",{});var VFe=s(e0);Vde=n(VFe,"STRONG",{});var RGt=s(Vde);kKo=t(RGt,"bert"),RGt.forEach(r),RKo=t(VFe," \u2014 "),Oj=n(VFe,"A",{href:!0});var SGt=s(Oj);SKo=t(SGt,"FlaxBertModel"),SGt.forEach(r),PKo=t(VFe," (BERT model)"),VFe.forEach(r),$Ko=i(U),o0=n(U,"LI",{});var QFe=s(o0);Qde=n(QFe,"STRONG",{});var PGt=s(Qde);IKo=t(PGt,"big_bird"),PGt.forEach(r),jKo=t(QFe," \u2014 "),qj=n(QFe,"A",{href:!0});var $Gt=s(qj);NKo=t($Gt,"FlaxBigBirdModel"),$Gt.forEach(r),DKo=t(QFe," (BigBird model)"),QFe.forEach(r),GKo=i(U),t0=n(U,"LI",{});var HFe=s(t0);Hde=n(HFe,"STRONG",{});var IGt=s(Hde);OKo=t(IGt,"blenderbot"),IGt.forEach(r),qKo=t(HFe," \u2014 "),zj=n(HFe,"A",{href:!0});var jGt=s(zj);zKo=t(jGt,"FlaxBlenderbotModel"),jGt.forEach(r),XKo=t(HFe," (Blenderbot model)"),HFe.forEach(r),WKo=i(U),r0=n(U,"LI",{});var UFe=s(r0);Ude=n(UFe,"STRONG",{});var NGt=s(Ude);VKo=t(NGt,"blenderbot-small"),NGt.forEach(r),QKo=t(UFe," \u2014 "),Xj=n(UFe,"A",{href:!0});var DGt=s(Xj);HKo=t(DGt,"FlaxBlenderbotSmallModel"),DGt.forEach(r),UKo=t(UFe," (BlenderbotSmall model)"),UFe.forEach(r),JKo=i(U),a0=n(U,"LI",{});var JFe=s(a0);Jde=n(JFe,"STRONG",{});var GGt=s(Jde);KKo=t(GGt,"clip"),GGt.forEach(r),YKo=t(JFe," \u2014 "),Wj=n(JFe,"A",{href:!0});var OGt=s(Wj);ZKo=t(OGt,"FlaxCLIPModel"),OGt.forEach(r),eYo=t(JFe," (CLIP model)"),JFe.forEach(r),oYo=i(U),n0=n(U,"LI",{});var KFe=s(n0);Kde=n(KFe,"STRONG",{});var qGt=s(Kde);tYo=t(qGt,"distilbert"),qGt.forEach(r),rYo=t(KFe," \u2014 "),Vj=n(KFe,"A",{href:!0});var zGt=s(Vj);aYo=t(zGt,"FlaxDistilBertModel"),zGt.forEach(r),nYo=t(KFe," (DistilBERT model)"),KFe.forEach(r),sYo=i(U),s0=n(U,"LI",{});var YFe=s(s0);Yde=n(YFe,"STRONG",{});var XGt=s(Yde);lYo=t(XGt,"electra"),XGt.forEach(r),iYo=t(YFe," \u2014 "),Qj=n(YFe,"A",{href:!0});var WGt=s(Qj);dYo=t(WGt,"FlaxElectraModel"),WGt.forEach(r),mYo=t(YFe," (ELECTRA model)"),YFe.forEach(r),fYo=i(U),l0=n(U,"LI",{});var ZFe=s(l0);Zde=n(ZFe,"STRONG",{});var VGt=s(Zde);cYo=t(VGt,"gpt2"),VGt.forEach(r),gYo=t(ZFe," \u2014 "),Hj=n(ZFe,"A",{href:!0});var QGt=s(Hj);hYo=t(QGt,"FlaxGPT2Model"),QGt.forEach(r),uYo=t(ZFe," (OpenAI GPT-2 model)"),ZFe.forEach(r),pYo=i(U),i0=n(U,"LI",{});var eMe=s(i0);eme=n(eMe,"STRONG",{});var HGt=s(eme);_Yo=t(HGt,"gpt_neo"),HGt.forEach(r),vYo=t(eMe," \u2014 "),Uj=n(eMe,"A",{href:!0});var UGt=s(Uj);bYo=t(UGt,"FlaxGPTNeoModel"),UGt.forEach(r),TYo=t(eMe," (GPT Neo model)"),eMe.forEach(r),FYo=i(U),d0=n(U,"LI",{});var oMe=s(d0);ome=n(oMe,"STRONG",{});var JGt=s(ome);MYo=t(JGt,"gptj"),JGt.forEach(r),EYo=t(oMe," \u2014 "),Jj=n(oMe,"A",{href:!0});var KGt=s(Jj);CYo=t(KGt,"FlaxGPTJModel"),KGt.forEach(r),yYo=t(oMe," (GPT-J model)"),oMe.forEach(r),wYo=i(U),m0=n(U,"LI",{});var tMe=s(m0);tme=n(tMe,"STRONG",{});var YGt=s(tme);AYo=t(YGt,"marian"),YGt.forEach(r),xYo=t(tMe," \u2014 "),Kj=n(tMe,"A",{href:!0});var ZGt=s(Kj);LYo=t(ZGt,"FlaxMarianModel"),ZGt.forEach(r),BYo=t(tMe," (Marian model)"),tMe.forEach(r),kYo=i(U),f0=n(U,"LI",{});var rMe=s(f0);rme=n(rMe,"STRONG",{});var eOt=s(rme);RYo=t(eOt,"mbart"),eOt.forEach(r),SYo=t(rMe," \u2014 "),Yj=n(rMe,"A",{href:!0});var oOt=s(Yj);PYo=t(oOt,"FlaxMBartModel"),oOt.forEach(r),$Yo=t(rMe," (mBART model)"),rMe.forEach(r),IYo=i(U),c0=n(U,"LI",{});var aMe=s(c0);ame=n(aMe,"STRONG",{});var tOt=s(ame);jYo=t(tOt,"mt5"),tOt.forEach(r),NYo=t(aMe," \u2014 "),Zj=n(aMe,"A",{href:!0});var rOt=s(Zj);DYo=t(rOt,"FlaxMT5Model"),rOt.forEach(r),GYo=t(aMe," (mT5 model)"),aMe.forEach(r),OYo=i(U),g0=n(U,"LI",{});var nMe=s(g0);nme=n(nMe,"STRONG",{});var aOt=s(nme);qYo=t(aOt,"pegasus"),aOt.forEach(r),zYo=t(nMe," \u2014 "),eN=n(nMe,"A",{href:!0});var nOt=s(eN);XYo=t(nOt,"FlaxPegasusModel"),nOt.forEach(r),WYo=t(nMe," (Pegasus model)"),nMe.forEach(r),VYo=i(U),h0=n(U,"LI",{});var sMe=s(h0);sme=n(sMe,"STRONG",{});var sOt=s(sme);QYo=t(sOt,"roberta"),sOt.forEach(r),HYo=t(sMe," \u2014 "),oN=n(sMe,"A",{href:!0});var lOt=s(oN);UYo=t(lOt,"FlaxRobertaModel"),lOt.forEach(r),JYo=t(sMe," (RoBERTa model)"),sMe.forEach(r),KYo=i(U),u0=n(U,"LI",{});var lMe=s(u0);lme=n(lMe,"STRONG",{});var iOt=s(lme);YYo=t(iOt,"t5"),iOt.forEach(r),ZYo=t(lMe," \u2014 "),tN=n(lMe,"A",{href:!0});var dOt=s(tN);eZo=t(dOt,"FlaxT5Model"),dOt.forEach(r),oZo=t(lMe," (T5 model)"),lMe.forEach(r),tZo=i(U),p0=n(U,"LI",{});var iMe=s(p0);ime=n(iMe,"STRONG",{});var mOt=s(ime);rZo=t(mOt,"vision-text-dual-encoder"),mOt.forEach(r),aZo=t(iMe," \u2014 "),rN=n(iMe,"A",{href:!0});var fOt=s(rN);nZo=t(fOt,"FlaxVisionTextDualEncoderModel"),fOt.forEach(r),sZo=t(iMe," (VisionTextDualEncoder model)"),iMe.forEach(r),lZo=i(U),_0=n(U,"LI",{});var dMe=s(_0);dme=n(dMe,"STRONG",{});var cOt=s(dme);iZo=t(cOt,"vit"),cOt.forEach(r),dZo=t(dMe," \u2014 "),aN=n(dMe,"A",{href:!0});var gOt=s(aN);mZo=t(gOt,"FlaxViTModel"),gOt.forEach(r),fZo=t(dMe," (ViT model)"),dMe.forEach(r),cZo=i(U),v0=n(U,"LI",{});var mMe=s(v0);mme=n(mMe,"STRONG",{});var hOt=s(mme);gZo=t(hOt,"wav2vec2"),hOt.forEach(r),hZo=t(mMe," \u2014 "),nN=n(mMe,"A",{href:!0});var uOt=s(nN);uZo=t(uOt,"FlaxWav2Vec2Model"),uOt.forEach(r),pZo=t(mMe," (Wav2Vec2 model)"),mMe.forEach(r),U.forEach(r),_Zo=i(na),fme=n(na,"P",{});var pOt=s(fme);vZo=t(pOt,"Examples:"),pOt.forEach(r),bZo=i(na),c(vy.$$.fragment,na),na.forEach(r),Fl.forEach(r),qCe=i(d),Jd=n(d,"H2",{class:!0});var Gye=s(Jd);b0=n(Gye,"A",{id:!0,class:!0,href:!0});var _Ot=s(b0);cme=n(_Ot,"SPAN",{});var vOt=s(cme);c(by.$$.fragment,vOt),vOt.forEach(r),_Ot.forEach(r),TZo=i(Gye),gme=n(Gye,"SPAN",{});var bOt=s(gme);FZo=t(bOt,"FlaxAutoModelForCausalLM"),bOt.forEach(r),Gye.forEach(r),zCe=i(d),gt=n(d,"DIV",{class:!0});var El=s(gt);c(Ty.$$.fragment,El),MZo=i(El),Kd=n(El,"P",{});var LG=s(Kd);EZo=t(LG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a causal language modeling head) when created with the `),hme=n(LG,"CODE",{});var TOt=s(hme);CZo=t(TOt,"from_pretrained()"),TOt.forEach(r),yZo=t(LG,` class method or the `),ume=n(LG,"CODE",{});var FOt=s(ume);wZo=t(FOt,"from_config()"),FOt.forEach(r),AZo=t(LG," class method."),LG.forEach(r),xZo=i(El),Fy=n(El,"P",{});var Oye=s(Fy);LZo=t(Oye,"This class cannot be instantiated directly using "),pme=n(Oye,"CODE",{});var MOt=s(pme);BZo=t(MOt,"__init__()"),MOt.forEach(r),kZo=t(Oye," (throws an error)."),Oye.forEach(r),RZo=i(El),ar=n(El,"DIV",{class:!0});var Cl=s(ar);c(My.$$.fragment,Cl),SZo=i(Cl),_me=n(Cl,"P",{});var EOt=s(_me);PZo=t(EOt,"Instantiates one of the model classes of the library (with a causal language modeling head) from a configuration."),EOt.forEach(r),$Zo=i(Cl),Yd=n(Cl,"P",{});var BG=s(Yd);IZo=t(BG,`Note: Loading a model from its configuration file does `),vme=n(BG,"STRONG",{});var COt=s(vme);jZo=t(COt,"not"),COt.forEach(r),NZo=t(BG,` load the model weights. It only affects the model\u2019s configuration. Use [`),bme=n(BG,"EM",{});var yOt=s(bme);DZo=t(yOt,"~FlaxAutoModelForCausalLM.from_pretrained"),yOt.forEach(r),GZo=t(BG,`] to load the model weights.`),BG.forEach(r),OZo=i(Cl),Tme=n(Cl,"P",{});var wOt=s(Tme);qZo=t(wOt,"Examples:"),wOt.forEach(r),zZo=i(Cl),c(Ey.$$.fragment,Cl),Cl.forEach(r),XZo=i(El),po=n(El,"DIV",{class:!0});var sa=s(po);c(Cy.$$.fragment,sa),WZo=i(sa),Fme=n(sa,"P",{});var AOt=s(Fme);VZo=t(AOt,"Instantiate one of the model classes of the library (with a causal language modeling head) from a pretrained model."),AOt.forEach(r),QZo=i(sa),on=n(sa,"P",{});var FF=s(on);HZo=t(FF,"The model class to instantiate is selected based on the "),Mme=n(FF,"EM",{});var xOt=s(Mme);UZo=t(xOt,"model_type"),xOt.forEach(r),JZo=t(FF,` property of the config object (either passed as an argument or loaded from `),Eme=n(FF,"EM",{});var LOt=s(Eme);KZo=t(LOt,"pretrained_model_name_or_path"),LOt.forEach(r),YZo=t(FF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Cme=n(FF,"EM",{});var BOt=s(Cme);ZZo=t(BOt,"pretrained_model_name_or_path"),BOt.forEach(r),eet=t(FF,":"),FF.forEach(r),oet=i(sa),Zd=n(sa,"UL",{});var kG=s(Zd);T0=n(kG,"LI",{});var fMe=s(T0);yme=n(fMe,"STRONG",{});var kOt=s(yme);tet=t(kOt,"gpt2"),kOt.forEach(r),ret=t(fMe," \u2014 "),sN=n(fMe,"A",{href:!0});var ROt=s(sN);aet=t(ROt,"FlaxGPT2LMHeadModel"),ROt.forEach(r),net=t(fMe," (OpenAI GPT-2 model)"),fMe.forEach(r),set=i(kG),F0=n(kG,"LI",{});var cMe=s(F0);wme=n(cMe,"STRONG",{});var SOt=s(wme);iet=t(SOt,"gpt_neo"),SOt.forEach(r),det=t(cMe," \u2014 "),lN=n(cMe,"A",{href:!0});var POt=s(lN);met=t(POt,"FlaxGPTNeoForCausalLM"),POt.forEach(r),fet=t(cMe," (GPT Neo model)"),cMe.forEach(r),cet=i(kG),M0=n(kG,"LI",{});var gMe=s(M0);Ame=n(gMe,"STRONG",{});var $Ot=s(Ame);get=t($Ot,"gptj"),$Ot.forEach(r),het=t(gMe," \u2014 "),iN=n(gMe,"A",{href:!0});var IOt=s(iN);uet=t(IOt,"FlaxGPTJForCausalLM"),IOt.forEach(r),pet=t(gMe," (GPT-J model)"),gMe.forEach(r),kG.forEach(r),_et=i(sa),xme=n(sa,"P",{});var jOt=s(xme);vet=t(jOt,"Examples:"),jOt.forEach(r),bet=i(sa),c(yy.$$.fragment,sa),sa.forEach(r),El.forEach(r),XCe=i(d),em=n(d,"H2",{class:!0});var qye=s(em);E0=n(qye,"A",{id:!0,class:!0,href:!0});var NOt=s(E0);Lme=n(NOt,"SPAN",{});var DOt=s(Lme);c(wy.$$.fragment,DOt),DOt.forEach(r),NOt.forEach(r),Tet=i(qye),Bme=n(qye,"SPAN",{});var GOt=s(Bme);Fet=t(GOt,"FlaxAutoModelForPreTraining"),GOt.forEach(r),qye.forEach(r),WCe=i(d),ht=n(d,"DIV",{class:!0});var yl=s(ht);c(Ay.$$.fragment,yl),Met=i(yl),om=n(yl,"P",{});var RG=s(om);Eet=t(RG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a pretraining head) when created with the `),kme=n(RG,"CODE",{});var OOt=s(kme);Cet=t(OOt,"from_pretrained()"),OOt.forEach(r),yet=t(RG,` class method or the `),Rme=n(RG,"CODE",{});var qOt=s(Rme);wet=t(qOt,"from_config()"),qOt.forEach(r),Aet=t(RG," class method."),RG.forEach(r),xet=i(yl),xy=n(yl,"P",{});var zye=s(xy);Let=t(zye,"This class cannot be instantiated directly using "),Sme=n(zye,"CODE",{});var zOt=s(Sme);Bet=t(zOt,"__init__()"),zOt.forEach(r),ket=t(zye," (throws an error)."),zye.forEach(r),Ret=i(yl),nr=n(yl,"DIV",{class:!0});var wl=s(nr);c(Ly.$$.fragment,wl),Set=i(wl),Pme=n(wl,"P",{});var XOt=s(Pme);Pet=t(XOt,"Instantiates one of the model classes of the library (with a pretraining head) from a configuration."),XOt.forEach(r),$et=i(wl),tm=n(wl,"P",{});var SG=s(tm);Iet=t(SG,`Note: Loading a model from its configuration file does `),$me=n(SG,"STRONG",{});var WOt=s($me);jet=t(WOt,"not"),WOt.forEach(r),Net=t(SG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Ime=n(SG,"EM",{});var VOt=s(Ime);Det=t(VOt,"~FlaxAutoModelForPreTraining.from_pretrained"),VOt.forEach(r),Get=t(SG,`] to load the model weights.`),SG.forEach(r),Oet=i(wl),jme=n(wl,"P",{});var QOt=s(jme);qet=t(QOt,"Examples:"),QOt.forEach(r),zet=i(wl),c(By.$$.fragment,wl),wl.forEach(r),Xet=i(yl),_o=n(yl,"DIV",{class:!0});var la=s(_o);c(ky.$$.fragment,la),Wet=i(la),Nme=n(la,"P",{});var HOt=s(Nme);Vet=t(HOt,"Instantiate one of the model classes of the library (with a pretraining head) from a pretrained model."),HOt.forEach(r),Qet=i(la),tn=n(la,"P",{});var MF=s(tn);Het=t(MF,"The model class to instantiate is selected based on the "),Dme=n(MF,"EM",{});var UOt=s(Dme);Uet=t(UOt,"model_type"),UOt.forEach(r),Jet=t(MF,` property of the config object (either passed as an argument or loaded from `),Gme=n(MF,"EM",{});var JOt=s(Gme);Ket=t(JOt,"pretrained_model_name_or_path"),JOt.forEach(r),Yet=t(MF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Ome=n(MF,"EM",{});var KOt=s(Ome);Zet=t(KOt,"pretrained_model_name_or_path"),KOt.forEach(r),eot=t(MF,":"),MF.forEach(r),oot=i(la),he=n(la,"UL",{});var Te=s(he);C0=n(Te,"LI",{});var hMe=s(C0);qme=n(hMe,"STRONG",{});var YOt=s(qme);tot=t(YOt,"albert"),YOt.forEach(r),rot=t(hMe," \u2014 "),dN=n(hMe,"A",{href:!0});var ZOt=s(dN);aot=t(ZOt,"FlaxAlbertForPreTraining"),ZOt.forEach(r),not=t(hMe," (ALBERT model)"),hMe.forEach(r),sot=i(Te),y0=n(Te,"LI",{});var uMe=s(y0);zme=n(uMe,"STRONG",{});var eqt=s(zme);lot=t(eqt,"bart"),eqt.forEach(r),iot=t(uMe," \u2014 "),mN=n(uMe,"A",{href:!0});var oqt=s(mN);dot=t(oqt,"FlaxBartForConditionalGeneration"),oqt.forEach(r),mot=t(uMe," (BART model)"),uMe.forEach(r),fot=i(Te),w0=n(Te,"LI",{});var pMe=s(w0);Xme=n(pMe,"STRONG",{});var tqt=s(Xme);cot=t(tqt,"bert"),tqt.forEach(r),got=t(pMe," \u2014 "),fN=n(pMe,"A",{href:!0});var rqt=s(fN);hot=t(rqt,"FlaxBertForPreTraining"),rqt.forEach(r),uot=t(pMe," (BERT model)"),pMe.forEach(r),pot=i(Te),A0=n(Te,"LI",{});var _Me=s(A0);Wme=n(_Me,"STRONG",{});var aqt=s(Wme);_ot=t(aqt,"big_bird"),aqt.forEach(r),vot=t(_Me," \u2014 "),cN=n(_Me,"A",{href:!0});var nqt=s(cN);bot=t(nqt,"FlaxBigBirdForPreTraining"),nqt.forEach(r),Tot=t(_Me," (BigBird model)"),_Me.forEach(r),Fot=i(Te),x0=n(Te,"LI",{});var vMe=s(x0);Vme=n(vMe,"STRONG",{});var sqt=s(Vme);Mot=t(sqt,"electra"),sqt.forEach(r),Eot=t(vMe," \u2014 "),gN=n(vMe,"A",{href:!0});var lqt=s(gN);Cot=t(lqt,"FlaxElectraForPreTraining"),lqt.forEach(r),yot=t(vMe," (ELECTRA model)"),vMe.forEach(r),wot=i(Te),L0=n(Te,"LI",{});var bMe=s(L0);Qme=n(bMe,"STRONG",{});var iqt=s(Qme);Aot=t(iqt,"mbart"),iqt.forEach(r),xot=t(bMe," \u2014 "),hN=n(bMe,"A",{href:!0});var dqt=s(hN);Lot=t(dqt,"FlaxMBartForConditionalGeneration"),dqt.forEach(r),Bot=t(bMe," (mBART model)"),bMe.forEach(r),kot=i(Te),B0=n(Te,"LI",{});var TMe=s(B0);Hme=n(TMe,"STRONG",{});var mqt=s(Hme);Rot=t(mqt,"mt5"),mqt.forEach(r),Sot=t(TMe," \u2014 "),uN=n(TMe,"A",{href:!0});var fqt=s(uN);Pot=t(fqt,"FlaxMT5ForConditionalGeneration"),fqt.forEach(r),$ot=t(TMe," (mT5 model)"),TMe.forEach(r),Iot=i(Te),k0=n(Te,"LI",{});var FMe=s(k0);Ume=n(FMe,"STRONG",{});var cqt=s(Ume);jot=t(cqt,"roberta"),cqt.forEach(r),Not=t(FMe," \u2014 "),pN=n(FMe,"A",{href:!0});var gqt=s(pN);Dot=t(gqt,"FlaxRobertaForMaskedLM"),gqt.forEach(r),Got=t(FMe," (RoBERTa model)"),FMe.forEach(r),Oot=i(Te),R0=n(Te,"LI",{});var MMe=s(R0);Jme=n(MMe,"STRONG",{});var hqt=s(Jme);qot=t(hqt,"t5"),hqt.forEach(r),zot=t(MMe," \u2014 "),_N=n(MMe,"A",{href:!0});var uqt=s(_N);Xot=t(uqt,"FlaxT5ForConditionalGeneration"),uqt.forEach(r),Wot=t(MMe," (T5 model)"),MMe.forEach(r),Vot=i(Te),S0=n(Te,"LI",{});var EMe=s(S0);Kme=n(EMe,"STRONG",{});var pqt=s(Kme);Qot=t(pqt,"wav2vec2"),pqt.forEach(r),Hot=t(EMe," \u2014 "),vN=n(EMe,"A",{href:!0});var _qt=s(vN);Uot=t(_qt,"FlaxWav2Vec2ForPreTraining"),_qt.forEach(r),Jot=t(EMe," (Wav2Vec2 model)"),EMe.forEach(r),Te.forEach(r),Kot=i(la),Yme=n(la,"P",{});var vqt=s(Yme);Yot=t(vqt,"Examples:"),vqt.forEach(r),Zot=i(la),c(Ry.$$.fragment,la),la.forEach(r),yl.forEach(r),VCe=i(d),rm=n(d,"H2",{class:!0});var Xye=s(rm);P0=n(Xye,"A",{id:!0,class:!0,href:!0});var bqt=s(P0);Zme=n(bqt,"SPAN",{});var Tqt=s(Zme);c(Sy.$$.fragment,Tqt),Tqt.forEach(r),bqt.forEach(r),ett=i(Xye),efe=n(Xye,"SPAN",{});var Fqt=s(efe);ott=t(Fqt,"FlaxAutoModelForMaskedLM"),Fqt.forEach(r),Xye.forEach(r),QCe=i(d),ut=n(d,"DIV",{class:!0});var Al=s(ut);c(Py.$$.fragment,Al),ttt=i(Al),am=n(Al,"P",{});var PG=s(am);rtt=t(PG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a masked language modeling head) when created with the `),ofe=n(PG,"CODE",{});var Mqt=s(ofe);att=t(Mqt,"from_pretrained()"),Mqt.forEach(r),ntt=t(PG,` class method or the `),tfe=n(PG,"CODE",{});var Eqt=s(tfe);stt=t(Eqt,"from_config()"),Eqt.forEach(r),ltt=t(PG," class method."),PG.forEach(r),itt=i(Al),$y=n(Al,"P",{});var Wye=s($y);dtt=t(Wye,"This class cannot be instantiated directly using "),rfe=n(Wye,"CODE",{});var Cqt=s(rfe);mtt=t(Cqt,"__init__()"),Cqt.forEach(r),ftt=t(Wye," (throws an error)."),Wye.forEach(r),ctt=i(Al),sr=n(Al,"DIV",{class:!0});var xl=s(sr);c(Iy.$$.fragment,xl),gtt=i(xl),afe=n(xl,"P",{});var yqt=s(afe);htt=t(yqt,"Instantiates one of the model classes of the library (with a masked language modeling head) from a configuration."),yqt.forEach(r),utt=i(xl),nm=n(xl,"P",{});var $G=s(nm);ptt=t($G,`Note: Loading a model from its configuration file does `),nfe=n($G,"STRONG",{});var wqt=s(nfe);_tt=t(wqt,"not"),wqt.forEach(r),vtt=t($G,` load the model weights. It only affects the model\u2019s configuration. Use [`),sfe=n($G,"EM",{});var Aqt=s(sfe);btt=t(Aqt,"~FlaxAutoModelForMaskedLM.from_pretrained"),Aqt.forEach(r),Ttt=t($G,`] to load the model weights.`),$G.forEach(r),Ftt=i(xl),lfe=n(xl,"P",{});var xqt=s(lfe);Mtt=t(xqt,"Examples:"),xqt.forEach(r),Ett=i(xl),c(jy.$$.fragment,xl),xl.forEach(r),Ctt=i(Al),vo=n(Al,"DIV",{class:!0});var ia=s(vo);c(Ny.$$.fragment,ia),ytt=i(ia),ife=n(ia,"P",{});var Lqt=s(ife);wtt=t(Lqt,"Instantiate one of the model classes of the library (with a masked language modeling head) from a pretrained model."),Lqt.forEach(r),Att=i(ia),rn=n(ia,"P",{});var EF=s(rn);xtt=t(EF,"The model class to instantiate is selected based on the "),dfe=n(EF,"EM",{});var Bqt=s(dfe);Ltt=t(Bqt,"model_type"),Bqt.forEach(r),Btt=t(EF,` property of the config object (either passed as an argument or loaded from `),mfe=n(EF,"EM",{});var kqt=s(mfe);ktt=t(kqt,"pretrained_model_name_or_path"),kqt.forEach(r),Rtt=t(EF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),ffe=n(EF,"EM",{});var Rqt=s(ffe);Stt=t(Rqt,"pretrained_model_name_or_path"),Rqt.forEach(r),Ptt=t(EF,":"),EF.forEach(r),$tt=i(ia),Me=n(ia,"UL",{});var Ao=s(Me);$0=n(Ao,"LI",{});var CMe=s($0);cfe=n(CMe,"STRONG",{});var Sqt=s(cfe);Itt=t(Sqt,"albert"),Sqt.forEach(r),jtt=t(CMe," \u2014 "),bN=n(CMe,"A",{href:!0});var Pqt=s(bN);Ntt=t(Pqt,"FlaxAlbertForMaskedLM"),Pqt.forEach(r),Dtt=t(CMe," (ALBERT model)"),CMe.forEach(r),Gtt=i(Ao),I0=n(Ao,"LI",{});var yMe=s(I0);gfe=n(yMe,"STRONG",{});var $qt=s(gfe);Ott=t($qt,"bart"),$qt.forEach(r),qtt=t(yMe," \u2014 "),TN=n(yMe,"A",{href:!0});var Iqt=s(TN);ztt=t(Iqt,"FlaxBartForConditionalGeneration"),Iqt.forEach(r),Xtt=t(yMe," (BART model)"),yMe.forEach(r),Wtt=i(Ao),j0=n(Ao,"LI",{});var wMe=s(j0);hfe=n(wMe,"STRONG",{});var jqt=s(hfe);Vtt=t(jqt,"bert"),jqt.forEach(r),Qtt=t(wMe," \u2014 "),FN=n(wMe,"A",{href:!0});var Nqt=s(FN);Htt=t(Nqt,"FlaxBertForMaskedLM"),Nqt.forEach(r),Utt=t(wMe," (BERT model)"),wMe.forEach(r),Jtt=i(Ao),N0=n(Ao,"LI",{});var AMe=s(N0);ufe=n(AMe,"STRONG",{});var Dqt=s(ufe);Ktt=t(Dqt,"big_bird"),Dqt.forEach(r),Ytt=t(AMe," \u2014 "),MN=n(AMe,"A",{href:!0});var Gqt=s(MN);Ztt=t(Gqt,"FlaxBigBirdForMaskedLM"),Gqt.forEach(r),ert=t(AMe," (BigBird model)"),AMe.forEach(r),ort=i(Ao),D0=n(Ao,"LI",{});var xMe=s(D0);pfe=n(xMe,"STRONG",{});var Oqt=s(pfe);trt=t(Oqt,"distilbert"),Oqt.forEach(r),rrt=t(xMe," \u2014 "),EN=n(xMe,"A",{href:!0});var qqt=s(EN);art=t(qqt,"FlaxDistilBertForMaskedLM"),qqt.forEach(r),nrt=t(xMe," (DistilBERT model)"),xMe.forEach(r),srt=i(Ao),G0=n(Ao,"LI",{});var LMe=s(G0);_fe=n(LMe,"STRONG",{});var zqt=s(_fe);lrt=t(zqt,"electra"),zqt.forEach(r),irt=t(LMe," \u2014 "),CN=n(LMe,"A",{href:!0});var Xqt=s(CN);drt=t(Xqt,"FlaxElectraForMaskedLM"),Xqt.forEach(r),mrt=t(LMe," (ELECTRA model)"),LMe.forEach(r),frt=i(Ao),O0=n(Ao,"LI",{});var BMe=s(O0);vfe=n(BMe,"STRONG",{});var Wqt=s(vfe);crt=t(Wqt,"mbart"),Wqt.forEach(r),grt=t(BMe," \u2014 "),yN=n(BMe,"A",{href:!0});var Vqt=s(yN);hrt=t(Vqt,"FlaxMBartForConditionalGeneration"),Vqt.forEach(r),urt=t(BMe," (mBART model)"),BMe.forEach(r),prt=i(Ao),q0=n(Ao,"LI",{});var kMe=s(q0);bfe=n(kMe,"STRONG",{});var Qqt=s(bfe);_rt=t(Qqt,"roberta"),Qqt.forEach(r),vrt=t(kMe," \u2014 "),wN=n(kMe,"A",{href:!0});var Hqt=s(wN);brt=t(Hqt,"FlaxRobertaForMaskedLM"),Hqt.forEach(r),Trt=t(kMe," (RoBERTa model)"),kMe.forEach(r),Ao.forEach(r),Frt=i(ia),Tfe=n(ia,"P",{});var Uqt=s(Tfe);Mrt=t(Uqt,"Examples:"),Uqt.forEach(r),Ert=i(ia),c(Dy.$$.fragment,ia),ia.forEach(r),Al.forEach(r),HCe=i(d),sm=n(d,"H2",{class:!0});var Vye=s(sm);z0=n(Vye,"A",{id:!0,class:!0,href:!0});var Jqt=s(z0);Ffe=n(Jqt,"SPAN",{});var Kqt=s(Ffe);c(Gy.$$.fragment,Kqt),Kqt.forEach(r),Jqt.forEach(r),Crt=i(Vye),Mfe=n(Vye,"SPAN",{});var Yqt=s(Mfe);yrt=t(Yqt,"FlaxAutoModelForSeq2SeqLM"),Yqt.forEach(r),Vye.forEach(r),UCe=i(d),pt=n(d,"DIV",{class:!0});var Ll=s(pt);c(Oy.$$.fragment,Ll),wrt=i(Ll),lm=n(Ll,"P",{});var IG=s(lm);Art=t(IG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence-to-sequence language modeling head) when created with the `),Efe=n(IG,"CODE",{});var Zqt=s(Efe);xrt=t(Zqt,"from_pretrained()"),Zqt.forEach(r),Lrt=t(IG,` class method or the `),Cfe=n(IG,"CODE",{});var ezt=s(Cfe);Brt=t(ezt,"from_config()"),ezt.forEach(r),krt=t(IG," class method."),IG.forEach(r),Rrt=i(Ll),qy=n(Ll,"P",{});var Qye=s(qy);Srt=t(Qye,"This class cannot be instantiated directly using "),yfe=n(Qye,"CODE",{});var ozt=s(yfe);Prt=t(ozt,"__init__()"),ozt.forEach(r),$rt=t(Qye," (throws an error)."),Qye.forEach(r),Irt=i(Ll),lr=n(Ll,"DIV",{class:!0});var Bl=s(lr);c(zy.$$.fragment,Bl),jrt=i(Bl),wfe=n(Bl,"P",{});var tzt=s(wfe);Nrt=t(tzt,"Instantiates one of the model classes of the library (with a sequence-to-sequence language modeling head) from a configuration."),tzt.forEach(r),Drt=i(Bl),im=n(Bl,"P",{});var jG=s(im);Grt=t(jG,`Note: Loading a model from its configuration file does `),Afe=n(jG,"STRONG",{});var rzt=s(Afe);Ort=t(rzt,"not"),rzt.forEach(r),qrt=t(jG,` load the model weights. It only affects the model\u2019s configuration. Use [`),xfe=n(jG,"EM",{});var azt=s(xfe);zrt=t(azt,"~FlaxAutoModelForSeq2SeqLM.from_pretrained"),azt.forEach(r),Xrt=t(jG,`] to load the model weights.`),jG.forEach(r),Wrt=i(Bl),Lfe=n(Bl,"P",{});var nzt=s(Lfe);Vrt=t(nzt,"Examples:"),nzt.forEach(r),Qrt=i(Bl),c(Xy.$$.fragment,Bl),Bl.forEach(r),Hrt=i(Ll),bo=n(Ll,"DIV",{class:!0});var da=s(bo);c(Wy.$$.fragment,da),Urt=i(da),Bfe=n(da,"P",{});var szt=s(Bfe);Jrt=t(szt,"Instantiate one of the model classes of the library (with a sequence-to-sequence language modeling head) from a pretrained model."),szt.forEach(r),Krt=i(da),an=n(da,"P",{});var CF=s(an);Yrt=t(CF,"The model class to instantiate is selected based on the "),kfe=n(CF,"EM",{});var lzt=s(kfe);Zrt=t(lzt,"model_type"),lzt.forEach(r),eat=t(CF,` property of the config object (either passed as an argument or loaded from `),Rfe=n(CF,"EM",{});var izt=s(Rfe);oat=t(izt,"pretrained_model_name_or_path"),izt.forEach(r),tat=t(CF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Sfe=n(CF,"EM",{});var dzt=s(Sfe);rat=t(dzt,"pretrained_model_name_or_path"),dzt.forEach(r),aat=t(CF,":"),CF.forEach(r),nat=i(da),pe=n(da,"UL",{});var He=s(pe);X0=n(He,"LI",{});var RMe=s(X0);Pfe=n(RMe,"STRONG",{});var mzt=s(Pfe);sat=t(mzt,"bart"),mzt.forEach(r),lat=t(RMe," \u2014 "),AN=n(RMe,"A",{href:!0});var fzt=s(AN);iat=t(fzt,"FlaxBartForConditionalGeneration"),fzt.forEach(r),dat=t(RMe," (BART model)"),RMe.forEach(r),mat=i(He),W0=n(He,"LI",{});var SMe=s(W0);$fe=n(SMe,"STRONG",{});var czt=s($fe);fat=t(czt,"blenderbot"),czt.forEach(r),cat=t(SMe," \u2014 "),xN=n(SMe,"A",{href:!0});var gzt=s(xN);gat=t(gzt,"FlaxBlenderbotForConditionalGeneration"),gzt.forEach(r),hat=t(SMe," (Blenderbot model)"),SMe.forEach(r),uat=i(He),V0=n(He,"LI",{});var PMe=s(V0);Ife=n(PMe,"STRONG",{});var hzt=s(Ife);pat=t(hzt,"blenderbot-small"),hzt.forEach(r),_at=t(PMe," \u2014 "),LN=n(PMe,"A",{href:!0});var uzt=s(LN);vat=t(uzt,"FlaxBlenderbotSmallForConditionalGeneration"),uzt.forEach(r),bat=t(PMe," (BlenderbotSmall model)"),PMe.forEach(r),Tat=i(He),Q0=n(He,"LI",{});var $Me=s(Q0);jfe=n($Me,"STRONG",{});var pzt=s(jfe);Fat=t(pzt,"encoder-decoder"),pzt.forEach(r),Mat=t($Me," \u2014 "),BN=n($Me,"A",{href:!0});var _zt=s(BN);Eat=t(_zt,"FlaxEncoderDecoderModel"),_zt.forEach(r),Cat=t($Me," (Encoder decoder model)"),$Me.forEach(r),yat=i(He),H0=n(He,"LI",{});var IMe=s(H0);Nfe=n(IMe,"STRONG",{});var vzt=s(Nfe);wat=t(vzt,"marian"),vzt.forEach(r),Aat=t(IMe," \u2014 "),kN=n(IMe,"A",{href:!0});var bzt=s(kN);xat=t(bzt,"FlaxMarianMTModel"),bzt.forEach(r),Lat=t(IMe," (Marian model)"),IMe.forEach(r),Bat=i(He),U0=n(He,"LI",{});var jMe=s(U0);Dfe=n(jMe,"STRONG",{});var Tzt=s(Dfe);kat=t(Tzt,"mbart"),Tzt.forEach(r),Rat=t(jMe," \u2014 "),RN=n(jMe,"A",{href:!0});var Fzt=s(RN);Sat=t(Fzt,"FlaxMBartForConditionalGeneration"),Fzt.forEach(r),Pat=t(jMe," (mBART model)"),jMe.forEach(r),$at=i(He),J0=n(He,"LI",{});var NMe=s(J0);Gfe=n(NMe,"STRONG",{});var Mzt=s(Gfe);Iat=t(Mzt,"mt5"),Mzt.forEach(r),jat=t(NMe," \u2014 "),SN=n(NMe,"A",{href:!0});var Ezt=s(SN);Nat=t(Ezt,"FlaxMT5ForConditionalGeneration"),Ezt.forEach(r),Dat=t(NMe," (mT5 model)"),NMe.forEach(r),Gat=i(He),K0=n(He,"LI",{});var DMe=s(K0);Ofe=n(DMe,"STRONG",{});var Czt=s(Ofe);Oat=t(Czt,"pegasus"),Czt.forEach(r),qat=t(DMe," \u2014 "),PN=n(DMe,"A",{href:!0});var yzt=s(PN);zat=t(yzt,"FlaxPegasusForConditionalGeneration"),yzt.forEach(r),Xat=t(DMe," (Pegasus model)"),DMe.forEach(r),Wat=i(He),Y0=n(He,"LI",{});var GMe=s(Y0);qfe=n(GMe,"STRONG",{});var wzt=s(qfe);Vat=t(wzt,"t5"),wzt.forEach(r),Qat=t(GMe," \u2014 "),$N=n(GMe,"A",{href:!0});var Azt=s($N);Hat=t(Azt,"FlaxT5ForConditionalGeneration"),Azt.forEach(r),Uat=t(GMe," (T5 model)"),GMe.forEach(r),He.forEach(r),Jat=i(da),zfe=n(da,"P",{});var xzt=s(zfe);Kat=t(xzt,"Examples:"),xzt.forEach(r),Yat=i(da),c(Vy.$$.fragment,da),da.forEach(r),Ll.forEach(r),JCe=i(d),dm=n(d,"H2",{class:!0});var Hye=s(dm);Z0=n(Hye,"A",{id:!0,class:!0,href:!0});var Lzt=s(Z0);Xfe=n(Lzt,"SPAN",{});var Bzt=s(Xfe);c(Qy.$$.fragment,Bzt),Bzt.forEach(r),Lzt.forEach(r),Zat=i(Hye),Wfe=n(Hye,"SPAN",{});var kzt=s(Wfe);ent=t(kzt,"FlaxAutoModelForSequenceClassification"),kzt.forEach(r),Hye.forEach(r),KCe=i(d),_t=n(d,"DIV",{class:!0});var kl=s(_t);c(Hy.$$.fragment,kl),ont=i(kl),mm=n(kl,"P",{});var NG=s(mm);tnt=t(NG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a sequence classification head) when created with the `),Vfe=n(NG,"CODE",{});var Rzt=s(Vfe);rnt=t(Rzt,"from_pretrained()"),Rzt.forEach(r),ant=t(NG,` class method or the `),Qfe=n(NG,"CODE",{});var Szt=s(Qfe);nnt=t(Szt,"from_config()"),Szt.forEach(r),snt=t(NG," class method."),NG.forEach(r),lnt=i(kl),Uy=n(kl,"P",{});var Uye=s(Uy);int=t(Uye,"This class cannot be instantiated directly using "),Hfe=n(Uye,"CODE",{});var Pzt=s(Hfe);dnt=t(Pzt,"__init__()"),Pzt.forEach(r),mnt=t(Uye," (throws an error)."),Uye.forEach(r),fnt=i(kl),ir=n(kl,"DIV",{class:!0});var Rl=s(ir);c(Jy.$$.fragment,Rl),cnt=i(Rl),Ufe=n(Rl,"P",{});var $zt=s(Ufe);gnt=t($zt,"Instantiates one of the model classes of the library (with a sequence classification head) from a configuration."),$zt.forEach(r),hnt=i(Rl),fm=n(Rl,"P",{});var DG=s(fm);unt=t(DG,`Note: Loading a model from its configuration file does `),Jfe=n(DG,"STRONG",{});var Izt=s(Jfe);pnt=t(Izt,"not"),Izt.forEach(r),_nt=t(DG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Kfe=n(DG,"EM",{});var jzt=s(Kfe);vnt=t(jzt,"~FlaxAutoModelForSequenceClassification.from_pretrained"),jzt.forEach(r),bnt=t(DG,`] to load the model weights.`),DG.forEach(r),Tnt=i(Rl),Yfe=n(Rl,"P",{});var Nzt=s(Yfe);Fnt=t(Nzt,"Examples:"),Nzt.forEach(r),Mnt=i(Rl),c(Ky.$$.fragment,Rl),Rl.forEach(r),Ent=i(kl),To=n(kl,"DIV",{class:!0});var ma=s(To);c(Yy.$$.fragment,ma),Cnt=i(ma),Zfe=n(ma,"P",{});var Dzt=s(Zfe);ynt=t(Dzt,"Instantiate one of the model classes of the library (with a sequence classification head) from a pretrained model."),Dzt.forEach(r),wnt=i(ma),nn=n(ma,"P",{});var yF=s(nn);Ant=t(yF,"The model class to instantiate is selected based on the "),ece=n(yF,"EM",{});var Gzt=s(ece);xnt=t(Gzt,"model_type"),Gzt.forEach(r),Lnt=t(yF,` property of the config object (either passed as an argument or loaded from `),oce=n(yF,"EM",{});var Ozt=s(oce);Bnt=t(Ozt,"pretrained_model_name_or_path"),Ozt.forEach(r),knt=t(yF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),tce=n(yF,"EM",{});var qzt=s(tce);Rnt=t(qzt,"pretrained_model_name_or_path"),qzt.forEach(r),Snt=t(yF,":"),yF.forEach(r),Pnt=i(ma),Ee=n(ma,"UL",{});var xo=s(Ee);eT=n(xo,"LI",{});var OMe=s(eT);rce=n(OMe,"STRONG",{});var zzt=s(rce);$nt=t(zzt,"albert"),zzt.forEach(r),Int=t(OMe," \u2014 "),IN=n(OMe,"A",{href:!0});var Xzt=s(IN);jnt=t(Xzt,"FlaxAlbertForSequenceClassification"),Xzt.forEach(r),Nnt=t(OMe," (ALBERT model)"),OMe.forEach(r),Dnt=i(xo),oT=n(xo,"LI",{});var qMe=s(oT);ace=n(qMe,"STRONG",{});var Wzt=s(ace);Gnt=t(Wzt,"bart"),Wzt.forEach(r),Ont=t(qMe," \u2014 "),jN=n(qMe,"A",{href:!0});var Vzt=s(jN);qnt=t(Vzt,"FlaxBartForSequenceClassification"),Vzt.forEach(r),znt=t(qMe," (BART model)"),qMe.forEach(r),Xnt=i(xo),tT=n(xo,"LI",{});var zMe=s(tT);nce=n(zMe,"STRONG",{});var Qzt=s(nce);Wnt=t(Qzt,"bert"),Qzt.forEach(r),Vnt=t(zMe," \u2014 "),NN=n(zMe,"A",{href:!0});var Hzt=s(NN);Qnt=t(Hzt,"FlaxBertForSequenceClassification"),Hzt.forEach(r),Hnt=t(zMe," (BERT model)"),zMe.forEach(r),Unt=i(xo),rT=n(xo,"LI",{});var XMe=s(rT);sce=n(XMe,"STRONG",{});var Uzt=s(sce);Jnt=t(Uzt,"big_bird"),Uzt.forEach(r),Knt=t(XMe," \u2014 "),DN=n(XMe,"A",{href:!0});var Jzt=s(DN);Ynt=t(Jzt,"FlaxBigBirdForSequenceClassification"),Jzt.forEach(r),Znt=t(XMe," (BigBird model)"),XMe.forEach(r),est=i(xo),aT=n(xo,"LI",{});var WMe=s(aT);lce=n(WMe,"STRONG",{});var Kzt=s(lce);ost=t(Kzt,"distilbert"),Kzt.forEach(r),tst=t(WMe," \u2014 "),GN=n(WMe,"A",{href:!0});var Yzt=s(GN);rst=t(Yzt,"FlaxDistilBertForSequenceClassification"),Yzt.forEach(r),ast=t(WMe," (DistilBERT model)"),WMe.forEach(r),nst=i(xo),nT=n(xo,"LI",{});var VMe=s(nT);ice=n(VMe,"STRONG",{});var Zzt=s(ice);sst=t(Zzt,"electra"),Zzt.forEach(r),lst=t(VMe," \u2014 "),ON=n(VMe,"A",{href:!0});var eXt=s(ON);ist=t(eXt,"FlaxElectraForSequenceClassification"),eXt.forEach(r),dst=t(VMe," (ELECTRA model)"),VMe.forEach(r),mst=i(xo),sT=n(xo,"LI",{});var QMe=s(sT);dce=n(QMe,"STRONG",{});var oXt=s(dce);fst=t(oXt,"mbart"),oXt.forEach(r),cst=t(QMe," \u2014 "),qN=n(QMe,"A",{href:!0});var tXt=s(qN);gst=t(tXt,"FlaxMBartForSequenceClassification"),tXt.forEach(r),hst=t(QMe," (mBART model)"),QMe.forEach(r),ust=i(xo),lT=n(xo,"LI",{});var HMe=s(lT);mce=n(HMe,"STRONG",{});var rXt=s(mce);pst=t(rXt,"roberta"),rXt.forEach(r),_st=t(HMe," \u2014 "),zN=n(HMe,"A",{href:!0});var aXt=s(zN);vst=t(aXt,"FlaxRobertaForSequenceClassification"),aXt.forEach(r),bst=t(HMe," (RoBERTa model)"),HMe.forEach(r),xo.forEach(r),Tst=i(ma),fce=n(ma,"P",{});var nXt=s(fce);Fst=t(nXt,"Examples:"),nXt.forEach(r),Mst=i(ma),c(Zy.$$.fragment,ma),ma.forEach(r),kl.forEach(r),YCe=i(d),cm=n(d,"H2",{class:!0});var Jye=s(cm);iT=n(Jye,"A",{id:!0,class:!0,href:!0});var sXt=s(iT);cce=n(sXt,"SPAN",{});var lXt=s(cce);c(ew.$$.fragment,lXt),lXt.forEach(r),sXt.forEach(r),Est=i(Jye),gce=n(Jye,"SPAN",{});var iXt=s(gce);Cst=t(iXt,"FlaxAutoModelForQuestionAnswering"),iXt.forEach(r),Jye.forEach(r),ZCe=i(d),vt=n(d,"DIV",{class:!0});var Sl=s(vt);c(ow.$$.fragment,Sl),yst=i(Sl),gm=n(Sl,"P",{});var GG=s(gm);wst=t(GG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a question answering head) when created with the `),hce=n(GG,"CODE",{});var dXt=s(hce);Ast=t(dXt,"from_pretrained()"),dXt.forEach(r),xst=t(GG,` class method or the `),uce=n(GG,"CODE",{});var mXt=s(uce);Lst=t(mXt,"from_config()"),mXt.forEach(r),Bst=t(GG," class method."),GG.forEach(r),kst=i(Sl),tw=n(Sl,"P",{});var Kye=s(tw);Rst=t(Kye,"This class cannot be instantiated directly using "),pce=n(Kye,"CODE",{});var fXt=s(pce);Sst=t(fXt,"__init__()"),fXt.forEach(r),Pst=t(Kye," (throws an error)."),Kye.forEach(r),$st=i(Sl),dr=n(Sl,"DIV",{class:!0});var Pl=s(dr);c(rw.$$.fragment,Pl),Ist=i(Pl),_ce=n(Pl,"P",{});var cXt=s(_ce);jst=t(cXt,"Instantiates one of the model classes of the library (with a question answering head) from a configuration."),cXt.forEach(r),Nst=i(Pl),hm=n(Pl,"P",{});var OG=s(hm);Dst=t(OG,`Note: Loading a model from its configuration file does `),vce=n(OG,"STRONG",{});var gXt=s(vce);Gst=t(gXt,"not"),gXt.forEach(r),Ost=t(OG,` load the model weights. It only affects the model\u2019s configuration. Use [`),bce=n(OG,"EM",{});var hXt=s(bce);qst=t(hXt,"~FlaxAutoModelForQuestionAnswering.from_pretrained"),hXt.forEach(r),zst=t(OG,`] to load the model weights.`),OG.forEach(r),Xst=i(Pl),Tce=n(Pl,"P",{});var uXt=s(Tce);Wst=t(uXt,"Examples:"),uXt.forEach(r),Vst=i(Pl),c(aw.$$.fragment,Pl),Pl.forEach(r),Qst=i(Sl),Fo=n(Sl,"DIV",{class:!0});var fa=s(Fo);c(nw.$$.fragment,fa),Hst=i(fa),Fce=n(fa,"P",{});var pXt=s(Fce);Ust=t(pXt,"Instantiate one of the model classes of the library (with a question answering head) from a pretrained model."),pXt.forEach(r),Jst=i(fa),sn=n(fa,"P",{});var wF=s(sn);Kst=t(wF,"The model class to instantiate is selected based on the "),Mce=n(wF,"EM",{});var _Xt=s(Mce);Yst=t(_Xt,"model_type"),_Xt.forEach(r),Zst=t(wF,` property of the config object (either passed as an argument or loaded from `),Ece=n(wF,"EM",{});var vXt=s(Ece);elt=t(vXt,"pretrained_model_name_or_path"),vXt.forEach(r),olt=t(wF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Cce=n(wF,"EM",{});var bXt=s(Cce);tlt=t(bXt,"pretrained_model_name_or_path"),bXt.forEach(r),rlt=t(wF,":"),wF.forEach(r),alt=i(fa),Ce=n(fa,"UL",{});var Lo=s(Ce);dT=n(Lo,"LI",{});var UMe=s(dT);yce=n(UMe,"STRONG",{});var TXt=s(yce);nlt=t(TXt,"albert"),TXt.forEach(r),slt=t(UMe," \u2014 "),XN=n(UMe,"A",{href:!0});var FXt=s(XN);llt=t(FXt,"FlaxAlbertForQuestionAnswering"),FXt.forEach(r),ilt=t(UMe," (ALBERT model)"),UMe.forEach(r),dlt=i(Lo),mT=n(Lo,"LI",{});var JMe=s(mT);wce=n(JMe,"STRONG",{});var MXt=s(wce);mlt=t(MXt,"bart"),MXt.forEach(r),flt=t(JMe," \u2014 "),WN=n(JMe,"A",{href:!0});var EXt=s(WN);clt=t(EXt,"FlaxBartForQuestionAnswering"),EXt.forEach(r),glt=t(JMe," (BART model)"),JMe.forEach(r),hlt=i(Lo),fT=n(Lo,"LI",{});var KMe=s(fT);Ace=n(KMe,"STRONG",{});var CXt=s(Ace);ult=t(CXt,"bert"),CXt.forEach(r),plt=t(KMe," \u2014 "),VN=n(KMe,"A",{href:!0});var yXt=s(VN);_lt=t(yXt,"FlaxBertForQuestionAnswering"),yXt.forEach(r),vlt=t(KMe," (BERT model)"),KMe.forEach(r),blt=i(Lo),cT=n(Lo,"LI",{});var YMe=s(cT);xce=n(YMe,"STRONG",{});var wXt=s(xce);Tlt=t(wXt,"big_bird"),wXt.forEach(r),Flt=t(YMe," \u2014 "),QN=n(YMe,"A",{href:!0});var AXt=s(QN);Mlt=t(AXt,"FlaxBigBirdForQuestionAnswering"),AXt.forEach(r),Elt=t(YMe," (BigBird model)"),YMe.forEach(r),Clt=i(Lo),gT=n(Lo,"LI",{});var ZMe=s(gT);Lce=n(ZMe,"STRONG",{});var xXt=s(Lce);ylt=t(xXt,"distilbert"),xXt.forEach(r),wlt=t(ZMe," \u2014 "),HN=n(ZMe,"A",{href:!0});var LXt=s(HN);Alt=t(LXt,"FlaxDistilBertForQuestionAnswering"),LXt.forEach(r),xlt=t(ZMe," (DistilBERT model)"),ZMe.forEach(r),Llt=i(Lo),hT=n(Lo,"LI",{});var eEe=s(hT);Bce=n(eEe,"STRONG",{});var BXt=s(Bce);Blt=t(BXt,"electra"),BXt.forEach(r),klt=t(eEe," \u2014 "),UN=n(eEe,"A",{href:!0});var kXt=s(UN);Rlt=t(kXt,"FlaxElectraForQuestionAnswering"),kXt.forEach(r),Slt=t(eEe," (ELECTRA model)"),eEe.forEach(r),Plt=i(Lo),uT=n(Lo,"LI",{});var oEe=s(uT);kce=n(oEe,"STRONG",{});var RXt=s(kce);$lt=t(RXt,"mbart"),RXt.forEach(r),Ilt=t(oEe," \u2014 "),JN=n(oEe,"A",{href:!0});var SXt=s(JN);jlt=t(SXt,"FlaxMBartForQuestionAnswering"),SXt.forEach(r),Nlt=t(oEe," (mBART model)"),oEe.forEach(r),Dlt=i(Lo),pT=n(Lo,"LI",{});var tEe=s(pT);Rce=n(tEe,"STRONG",{});var PXt=s(Rce);Glt=t(PXt,"roberta"),PXt.forEach(r),Olt=t(tEe," \u2014 "),KN=n(tEe,"A",{href:!0});var $Xt=s(KN);qlt=t($Xt,"FlaxRobertaForQuestionAnswering"),$Xt.forEach(r),zlt=t(tEe," (RoBERTa model)"),tEe.forEach(r),Lo.forEach(r),Xlt=i(fa),Sce=n(fa,"P",{});var IXt=s(Sce);Wlt=t(IXt,"Examples:"),IXt.forEach(r),Vlt=i(fa),c(sw.$$.fragment,fa),fa.forEach(r),Sl.forEach(r),e3e=i(d),um=n(d,"H2",{class:!0});var Yye=s(um);_T=n(Yye,"A",{id:!0,class:!0,href:!0});var jXt=s(_T);Pce=n(jXt,"SPAN",{});var NXt=s(Pce);c(lw.$$.fragment,NXt),NXt.forEach(r),jXt.forEach(r),Qlt=i(Yye),$ce=n(Yye,"SPAN",{});var DXt=s($ce);Hlt=t(DXt,"FlaxAutoModelForTokenClassification"),DXt.forEach(r),Yye.forEach(r),o3e=i(d),bt=n(d,"DIV",{class:!0});var $l=s(bt);c(iw.$$.fragment,$l),Ult=i($l),pm=n($l,"P",{});var qG=s(pm);Jlt=t(qG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a token classification head) when created with the `),Ice=n(qG,"CODE",{});var GXt=s(Ice);Klt=t(GXt,"from_pretrained()"),GXt.forEach(r),Ylt=t(qG,` class method or the `),jce=n(qG,"CODE",{});var OXt=s(jce);Zlt=t(OXt,"from_config()"),OXt.forEach(r),eit=t(qG," class method."),qG.forEach(r),oit=i($l),dw=n($l,"P",{});var Zye=s(dw);tit=t(Zye,"This class cannot be instantiated directly using "),Nce=n(Zye,"CODE",{});var qXt=s(Nce);rit=t(qXt,"__init__()"),qXt.forEach(r),ait=t(Zye," (throws an error)."),Zye.forEach(r),nit=i($l),mr=n($l,"DIV",{class:!0});var Il=s(mr);c(mw.$$.fragment,Il),sit=i(Il),Dce=n(Il,"P",{});var zXt=s(Dce);lit=t(zXt,"Instantiates one of the model classes of the library (with a token classification head) from a configuration."),zXt.forEach(r),iit=i(Il),_m=n(Il,"P",{});var zG=s(_m);dit=t(zG,`Note: Loading a model from its configuration file does `),Gce=n(zG,"STRONG",{});var XXt=s(Gce);mit=t(XXt,"not"),XXt.forEach(r),fit=t(zG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Oce=n(zG,"EM",{});var WXt=s(Oce);cit=t(WXt,"~FlaxAutoModelForTokenClassification.from_pretrained"),WXt.forEach(r),git=t(zG,`] to load the model weights.`),zG.forEach(r),hit=i(Il),qce=n(Il,"P",{});var VXt=s(qce);uit=t(VXt,"Examples:"),VXt.forEach(r),pit=i(Il),c(fw.$$.fragment,Il),Il.forEach(r),_it=i($l),Mo=n($l,"DIV",{class:!0});var ca=s(Mo);c(cw.$$.fragment,ca),vit=i(ca),zce=n(ca,"P",{});var QXt=s(zce);bit=t(QXt,"Instantiate one of the model classes of the library (with a token classification head) from a pretrained model."),QXt.forEach(r),Tit=i(ca),ln=n(ca,"P",{});var AF=s(ln);Fit=t(AF,"The model class to instantiate is selected based on the "),Xce=n(AF,"EM",{});var HXt=s(Xce);Mit=t(HXt,"model_type"),HXt.forEach(r),Eit=t(AF,` property of the config object (either passed as an argument or loaded from `),Wce=n(AF,"EM",{});var UXt=s(Wce);Cit=t(UXt,"pretrained_model_name_or_path"),UXt.forEach(r),yit=t(AF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Vce=n(AF,"EM",{});var JXt=s(Vce);wit=t(JXt,"pretrained_model_name_or_path"),JXt.forEach(r),Ait=t(AF,":"),AF.forEach(r),xit=i(ca),Tt=n(ca,"UL",{});var ga=s(Tt);vT=n(ga,"LI",{});var rEe=s(vT);Qce=n(rEe,"STRONG",{});var KXt=s(Qce);Lit=t(KXt,"albert"),KXt.forEach(r),Bit=t(rEe," \u2014 "),YN=n(rEe,"A",{href:!0});var YXt=s(YN);kit=t(YXt,"FlaxAlbertForTokenClassification"),YXt.forEach(r),Rit=t(rEe," (ALBERT model)"),rEe.forEach(r),Sit=i(ga),bT=n(ga,"LI",{});var aEe=s(bT);Hce=n(aEe,"STRONG",{});var ZXt=s(Hce);Pit=t(ZXt,"bert"),ZXt.forEach(r),$it=t(aEe," \u2014 "),ZN=n(aEe,"A",{href:!0});var eWt=s(ZN);Iit=t(eWt,"FlaxBertForTokenClassification"),eWt.forEach(r),jit=t(aEe," (BERT model)"),aEe.forEach(r),Nit=i(ga),TT=n(ga,"LI",{});var nEe=s(TT);Uce=n(nEe,"STRONG",{});var oWt=s(Uce);Dit=t(oWt,"big_bird"),oWt.forEach(r),Git=t(nEe," \u2014 "),eD=n(nEe,"A",{href:!0});var tWt=s(eD);Oit=t(tWt,"FlaxBigBirdForTokenClassification"),tWt.forEach(r),qit=t(nEe," (BigBird model)"),nEe.forEach(r),zit=i(ga),FT=n(ga,"LI",{});var sEe=s(FT);Jce=n(sEe,"STRONG",{});var rWt=s(Jce);Xit=t(rWt,"distilbert"),rWt.forEach(r),Wit=t(sEe," \u2014 "),oD=n(sEe,"A",{href:!0});var aWt=s(oD);Vit=t(aWt,"FlaxDistilBertForTokenClassification"),aWt.forEach(r),Qit=t(sEe," (DistilBERT model)"),sEe.forEach(r),Hit=i(ga),MT=n(ga,"LI",{});var lEe=s(MT);Kce=n(lEe,"STRONG",{});var nWt=s(Kce);Uit=t(nWt,"electra"),nWt.forEach(r),Jit=t(lEe," \u2014 "),tD=n(lEe,"A",{href:!0});var sWt=s(tD);Kit=t(sWt,"FlaxElectraForTokenClassification"),sWt.forEach(r),Yit=t(lEe," (ELECTRA model)"),lEe.forEach(r),Zit=i(ga),ET=n(ga,"LI",{});var iEe=s(ET);Yce=n(iEe,"STRONG",{});var lWt=s(Yce);edt=t(lWt,"roberta"),lWt.forEach(r),odt=t(iEe," \u2014 "),rD=n(iEe,"A",{href:!0});var iWt=s(rD);tdt=t(iWt,"FlaxRobertaForTokenClassification"),iWt.forEach(r),rdt=t(iEe," (RoBERTa model)"),iEe.forEach(r),ga.forEach(r),adt=i(ca),Zce=n(ca,"P",{});var dWt=s(Zce);ndt=t(dWt,"Examples:"),dWt.forEach(r),sdt=i(ca),c(gw.$$.fragment,ca),ca.forEach(r),$l.forEach(r),t3e=i(d),vm=n(d,"H2",{class:!0});var ewe=s(vm);CT=n(ewe,"A",{id:!0,class:!0,href:!0});var mWt=s(CT);ege=n(mWt,"SPAN",{});var fWt=s(ege);c(hw.$$.fragment,fWt),fWt.forEach(r),mWt.forEach(r),ldt=i(ewe),oge=n(ewe,"SPAN",{});var cWt=s(oge);idt=t(cWt,"FlaxAutoModelForMultipleChoice"),cWt.forEach(r),ewe.forEach(r),r3e=i(d),Ft=n(d,"DIV",{class:!0});var jl=s(Ft);c(uw.$$.fragment,jl),ddt=i(jl),bm=n(jl,"P",{});var XG=s(bm);mdt=t(XG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a multiple choice head) when created with the `),tge=n(XG,"CODE",{});var gWt=s(tge);fdt=t(gWt,"from_pretrained()"),gWt.forEach(r),cdt=t(XG,` class method or the `),rge=n(XG,"CODE",{});var hWt=s(rge);gdt=t(hWt,"from_config()"),hWt.forEach(r),hdt=t(XG," class method."),XG.forEach(r),udt=i(jl),pw=n(jl,"P",{});var owe=s(pw);pdt=t(owe,"This class cannot be instantiated directly using "),age=n(owe,"CODE",{});var uWt=s(age);_dt=t(uWt,"__init__()"),uWt.forEach(r),vdt=t(owe," (throws an error)."),owe.forEach(r),bdt=i(jl),fr=n(jl,"DIV",{class:!0});var Nl=s(fr);c(_w.$$.fragment,Nl),Tdt=i(Nl),nge=n(Nl,"P",{});var pWt=s(nge);Fdt=t(pWt,"Instantiates one of the model classes of the library (with a multiple choice head) from a configuration."),pWt.forEach(r),Mdt=i(Nl),Tm=n(Nl,"P",{});var WG=s(Tm);Edt=t(WG,`Note: Loading a model from its configuration file does `),sge=n(WG,"STRONG",{});var _Wt=s(sge);Cdt=t(_Wt,"not"),_Wt.forEach(r),ydt=t(WG,` load the model weights. It only affects the model\u2019s configuration. Use [`),lge=n(WG,"EM",{});var vWt=s(lge);wdt=t(vWt,"~FlaxAutoModelForMultipleChoice.from_pretrained"),vWt.forEach(r),Adt=t(WG,`] to load the model weights.`),WG.forEach(r),xdt=i(Nl),ige=n(Nl,"P",{});var bWt=s(ige);Ldt=t(bWt,"Examples:"),bWt.forEach(r),Bdt=i(Nl),c(vw.$$.fragment,Nl),Nl.forEach(r),kdt=i(jl),Eo=n(jl,"DIV",{class:!0});var ha=s(Eo);c(bw.$$.fragment,ha),Rdt=i(ha),dge=n(ha,"P",{});var TWt=s(dge);Sdt=t(TWt,"Instantiate one of the model classes of the library (with a multiple choice head) from a pretrained model."),TWt.forEach(r),Pdt=i(ha),dn=n(ha,"P",{});var xF=s(dn);$dt=t(xF,"The model class to instantiate is selected based on the "),mge=n(xF,"EM",{});var FWt=s(mge);Idt=t(FWt,"model_type"),FWt.forEach(r),jdt=t(xF,` property of the config object (either passed as an argument or loaded from `),fge=n(xF,"EM",{});var MWt=s(fge);Ndt=t(MWt,"pretrained_model_name_or_path"),MWt.forEach(r),Ddt=t(xF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),cge=n(xF,"EM",{});var EWt=s(cge);Gdt=t(EWt,"pretrained_model_name_or_path"),EWt.forEach(r),Odt=t(xF,":"),xF.forEach(r),qdt=i(ha),Mt=n(ha,"UL",{});var ua=s(Mt);yT=n(ua,"LI",{});var dEe=s(yT);gge=n(dEe,"STRONG",{});var CWt=s(gge);zdt=t(CWt,"albert"),CWt.forEach(r),Xdt=t(dEe," \u2014 "),aD=n(dEe,"A",{href:!0});var yWt=s(aD);Wdt=t(yWt,"FlaxAlbertForMultipleChoice"),yWt.forEach(r),Vdt=t(dEe," (ALBERT model)"),dEe.forEach(r),Qdt=i(ua),wT=n(ua,"LI",{});var mEe=s(wT);hge=n(mEe,"STRONG",{});var wWt=s(hge);Hdt=t(wWt,"bert"),wWt.forEach(r),Udt=t(mEe," \u2014 "),nD=n(mEe,"A",{href:!0});var AWt=s(nD);Jdt=t(AWt,"FlaxBertForMultipleChoice"),AWt.forEach(r),Kdt=t(mEe," (BERT model)"),mEe.forEach(r),Ydt=i(ua),AT=n(ua,"LI",{});var fEe=s(AT);uge=n(fEe,"STRONG",{});var xWt=s(uge);Zdt=t(xWt,"big_bird"),xWt.forEach(r),emt=t(fEe," \u2014 "),sD=n(fEe,"A",{href:!0});var LWt=s(sD);omt=t(LWt,"FlaxBigBirdForMultipleChoice"),LWt.forEach(r),tmt=t(fEe," (BigBird model)"),fEe.forEach(r),rmt=i(ua),xT=n(ua,"LI",{});var cEe=s(xT);pge=n(cEe,"STRONG",{});var BWt=s(pge);amt=t(BWt,"distilbert"),BWt.forEach(r),nmt=t(cEe," \u2014 "),lD=n(cEe,"A",{href:!0});var kWt=s(lD);smt=t(kWt,"FlaxDistilBertForMultipleChoice"),kWt.forEach(r),lmt=t(cEe," (DistilBERT model)"),cEe.forEach(r),imt=i(ua),LT=n(ua,"LI",{});var gEe=s(LT);_ge=n(gEe,"STRONG",{});var RWt=s(_ge);dmt=t(RWt,"electra"),RWt.forEach(r),mmt=t(gEe," \u2014 "),iD=n(gEe,"A",{href:!0});var SWt=s(iD);fmt=t(SWt,"FlaxElectraForMultipleChoice"),SWt.forEach(r),cmt=t(gEe," (ELECTRA model)"),gEe.forEach(r),gmt=i(ua),BT=n(ua,"LI",{});var hEe=s(BT);vge=n(hEe,"STRONG",{});var PWt=s(vge);hmt=t(PWt,"roberta"),PWt.forEach(r),umt=t(hEe," \u2014 "),dD=n(hEe,"A",{href:!0});var $Wt=s(dD);pmt=t($Wt,"FlaxRobertaForMultipleChoice"),$Wt.forEach(r),_mt=t(hEe," (RoBERTa model)"),hEe.forEach(r),ua.forEach(r),vmt=i(ha),bge=n(ha,"P",{});var IWt=s(bge);bmt=t(IWt,"Examples:"),IWt.forEach(r),Tmt=i(ha),c(Tw.$$.fragment,ha),ha.forEach(r),jl.forEach(r),a3e=i(d),Fm=n(d,"H2",{class:!0});var twe=s(Fm);kT=n(twe,"A",{id:!0,class:!0,href:!0});var jWt=s(kT);Tge=n(jWt,"SPAN",{});var NWt=s(Tge);c(Fw.$$.fragment,NWt),NWt.forEach(r),jWt.forEach(r),Fmt=i(twe),Fge=n(twe,"SPAN",{});var DWt=s(Fge);Mmt=t(DWt,"FlaxAutoModelForNextSentencePrediction"),DWt.forEach(r),twe.forEach(r),n3e=i(d),Et=n(d,"DIV",{class:!0});var Dl=s(Et);c(Mw.$$.fragment,Dl),Emt=i(Dl),Mm=n(Dl,"P",{});var VG=s(Mm);Cmt=t(VG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a next sentence prediction head) when created with the `),Mge=n(VG,"CODE",{});var GWt=s(Mge);ymt=t(GWt,"from_pretrained()"),GWt.forEach(r),wmt=t(VG,` class method or the `),Ege=n(VG,"CODE",{});var OWt=s(Ege);Amt=t(OWt,"from_config()"),OWt.forEach(r),xmt=t(VG," class method."),VG.forEach(r),Lmt=i(Dl),Ew=n(Dl,"P",{});var rwe=s(Ew);Bmt=t(rwe,"This class cannot be instantiated directly using "),Cge=n(rwe,"CODE",{});var qWt=s(Cge);kmt=t(qWt,"__init__()"),qWt.forEach(r),Rmt=t(rwe," (throws an error)."),rwe.forEach(r),Smt=i(Dl),cr=n(Dl,"DIV",{class:!0});var Gl=s(cr);c(Cw.$$.fragment,Gl),Pmt=i(Gl),yge=n(Gl,"P",{});var zWt=s(yge);$mt=t(zWt,"Instantiates one of the model classes of the library (with a next sentence prediction head) from a configuration."),zWt.forEach(r),Imt=i(Gl),Em=n(Gl,"P",{});var QG=s(Em);jmt=t(QG,`Note: Loading a model from its configuration file does `),wge=n(QG,"STRONG",{});var XWt=s(wge);Nmt=t(XWt,"not"),XWt.forEach(r),Dmt=t(QG,` load the model weights. It only affects the model\u2019s configuration. Use [`),Age=n(QG,"EM",{});var WWt=s(Age);Gmt=t(WWt,"~FlaxAutoModelForNextSentencePrediction.from_pretrained"),WWt.forEach(r),Omt=t(QG,`] to load the model weights.`),QG.forEach(r),qmt=i(Gl),xge=n(Gl,"P",{});var VWt=s(xge);zmt=t(VWt,"Examples:"),VWt.forEach(r),Xmt=i(Gl),c(yw.$$.fragment,Gl),Gl.forEach(r),Wmt=i(Dl),Co=n(Dl,"DIV",{class:!0});var pa=s(Co);c(ww.$$.fragment,pa),Vmt=i(pa),Lge=n(pa,"P",{});var QWt=s(Lge);Qmt=t(QWt,"Instantiate one of the model classes of the library (with a next sentence prediction head) from a pretrained model."),QWt.forEach(r),Hmt=i(pa),mn=n(pa,"P",{});var LF=s(mn);Umt=t(LF,"The model class to instantiate is selected based on the "),Bge=n(LF,"EM",{});var HWt=s(Bge);Jmt=t(HWt,"model_type"),HWt.forEach(r),Kmt=t(LF,` property of the config object (either passed as an argument or loaded from `),kge=n(LF,"EM",{});var UWt=s(kge);Ymt=t(UWt,"pretrained_model_name_or_path"),UWt.forEach(r),Zmt=t(LF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Rge=n(LF,"EM",{});var JWt=s(Rge);eft=t(JWt,"pretrained_model_name_or_path"),JWt.forEach(r),oft=t(LF,":"),LF.forEach(r),tft=i(pa),Sge=n(pa,"UL",{});var KWt=s(Sge);RT=n(KWt,"LI",{});var uEe=s(RT);Pge=n(uEe,"STRONG",{});var YWt=s(Pge);rft=t(YWt,"bert"),YWt.forEach(r),aft=t(uEe," \u2014 "),mD=n(uEe,"A",{href:!0});var ZWt=s(mD);nft=t(ZWt,"FlaxBertForNextSentencePrediction"),ZWt.forEach(r),sft=t(uEe," (BERT model)"),uEe.forEach(r),KWt.forEach(r),lft=i(pa),$ge=n(pa,"P",{});var eVt=s($ge);ift=t(eVt,"Examples:"),eVt.forEach(r),dft=i(pa),c(Aw.$$.fragment,pa),pa.forEach(r),Dl.forEach(r),s3e=i(d),Cm=n(d,"H2",{class:!0});var awe=s(Cm);ST=n(awe,"A",{id:!0,class:!0,href:!0});var oVt=s(ST);Ige=n(oVt,"SPAN",{});var tVt=s(Ige);c(xw.$$.fragment,tVt),tVt.forEach(r),oVt.forEach(r),mft=i(awe),jge=n(awe,"SPAN",{});var rVt=s(jge);fft=t(rVt,"FlaxAutoModelForImageClassification"),rVt.forEach(r),awe.forEach(r),l3e=i(d),Ct=n(d,"DIV",{class:!0});var Ol=s(Ct);c(Lw.$$.fragment,Ol),cft=i(Ol),ym=n(Ol,"P",{});var HG=s(ym);gft=t(HG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a image classification head) when created with the `),Nge=n(HG,"CODE",{});var aVt=s(Nge);hft=t(aVt,"from_pretrained()"),aVt.forEach(r),uft=t(HG,` class method or the `),Dge=n(HG,"CODE",{});var nVt=s(Dge);pft=t(nVt,"from_config()"),nVt.forEach(r),_ft=t(HG," class method."),HG.forEach(r),vft=i(Ol),Bw=n(Ol,"P",{});var nwe=s(Bw);bft=t(nwe,"This class cannot be instantiated directly using "),Gge=n(nwe,"CODE",{});var sVt=s(Gge);Tft=t(sVt,"__init__()"),sVt.forEach(r),Fft=t(nwe," (throws an error)."),nwe.forEach(r),Mft=i(Ol),gr=n(Ol,"DIV",{class:!0});var ql=s(gr);c(kw.$$.fragment,ql),Eft=i(ql),Oge=n(ql,"P",{});var lVt=s(Oge);Cft=t(lVt,"Instantiates one of the model classes of the library (with a image classification head) from a configuration."),lVt.forEach(r),yft=i(ql),wm=n(ql,"P",{});var UG=s(wm);wft=t(UG,`Note: Loading a model from its configuration file does `),qge=n(UG,"STRONG",{});var iVt=s(qge);Aft=t(iVt,"not"),iVt.forEach(r),xft=t(UG,` load the model weights. It only affects the model\u2019s configuration. Use [`),zge=n(UG,"EM",{});var dVt=s(zge);Lft=t(dVt,"~FlaxAutoModelForImageClassification.from_pretrained"),dVt.forEach(r),Bft=t(UG,`] to load the model weights.`),UG.forEach(r),kft=i(ql),Xge=n(ql,"P",{});var mVt=s(Xge);Rft=t(mVt,"Examples:"),mVt.forEach(r),Sft=i(ql),c(Rw.$$.fragment,ql),ql.forEach(r),Pft=i(Ol),yo=n(Ol,"DIV",{class:!0});var _a=s(yo);c(Sw.$$.fragment,_a),$ft=i(_a),Wge=n(_a,"P",{});var fVt=s(Wge);Ift=t(fVt,"Instantiate one of the model classes of the library (with a image classification head) from a pretrained model."),fVt.forEach(r),jft=i(_a),fn=n(_a,"P",{});var BF=s(fn);Nft=t(BF,"The model class to instantiate is selected based on the "),Vge=n(BF,"EM",{});var cVt=s(Vge);Dft=t(cVt,"model_type"),cVt.forEach(r),Gft=t(BF,` property of the config object (either passed as an argument or loaded from `),Qge=n(BF,"EM",{});var gVt=s(Qge);Oft=t(gVt,"pretrained_model_name_or_path"),gVt.forEach(r),qft=t(BF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),Hge=n(BF,"EM",{});var hVt=s(Hge);zft=t(hVt,"pretrained_model_name_or_path"),hVt.forEach(r),Xft=t(BF,":"),BF.forEach(r),Wft=i(_a),Pw=n(_a,"UL",{});var swe=s(Pw);PT=n(swe,"LI",{});var pEe=s(PT);Uge=n(pEe,"STRONG",{});var uVt=s(Uge);Vft=t(uVt,"beit"),uVt.forEach(r),Qft=t(pEe," \u2014 "),fD=n(pEe,"A",{href:!0});var pVt=s(fD);Hft=t(pVt,"FlaxBeitForImageClassification"),pVt.forEach(r),Uft=t(pEe," (BEiT model)"),pEe.forEach(r),Jft=i(swe),$T=n(swe,"LI",{});var _Ee=s($T);Jge=n(_Ee,"STRONG",{});var _Vt=s(Jge);Kft=t(_Vt,"vit"),_Vt.forEach(r),Yft=t(_Ee," \u2014 "),cD=n(_Ee,"A",{href:!0});var vVt=s(cD);Zft=t(vVt,"FlaxViTForImageClassification"),vVt.forEach(r),ect=t(_Ee," (ViT model)"),_Ee.forEach(r),swe.forEach(r),oct=i(_a),Kge=n(_a,"P",{});var bVt=s(Kge);tct=t(bVt,"Examples:"),bVt.forEach(r),rct=i(_a),c($w.$$.fragment,_a),_a.forEach(r),Ol.forEach(r),i3e=i(d),Am=n(d,"H2",{class:!0});var lwe=s(Am);IT=n(lwe,"A",{id:!0,class:!0,href:!0});var TVt=s(IT);Yge=n(TVt,"SPAN",{});var FVt=s(Yge);c(Iw.$$.fragment,FVt),FVt.forEach(r),TVt.forEach(r),act=i(lwe),Zge=n(lwe,"SPAN",{});var MVt=s(Zge);nct=t(MVt,"FlaxAutoModelForVision2Seq"),MVt.forEach(r),lwe.forEach(r),d3e=i(d),yt=n(d,"DIV",{class:!0});var zl=s(yt);c(jw.$$.fragment,zl),sct=i(zl),xm=n(zl,"P",{});var JG=s(xm);lct=t(JG,`This is a generic model class that will be instantiated as one of the model classes of the library (with a vision-to-text modeling head) when created with the `),ehe=n(JG,"CODE",{});var EVt=s(ehe);ict=t(EVt,"from_pretrained()"),EVt.forEach(r),dct=t(JG,` class method or the `),ohe=n(JG,"CODE",{});var CVt=s(ohe);mct=t(CVt,"from_config()"),CVt.forEach(r),fct=t(JG," class method."),JG.forEach(r),cct=i(zl),Nw=n(zl,"P",{});var iwe=s(Nw);gct=t(iwe,"This class cannot be instantiated directly using "),the=n(iwe,"CODE",{});var yVt=s(the);hct=t(yVt,"__init__()"),yVt.forEach(r),uct=t(iwe," (throws an error)."),iwe.forEach(r),pct=i(zl),hr=n(zl,"DIV",{class:!0});var Xl=s(hr);c(Dw.$$.fragment,Xl),_ct=i(Xl),rhe=n(Xl,"P",{});var wVt=s(rhe);vct=t(wVt,"Instantiates one of the model classes of the library (with a vision-to-text modeling head) from a configuration."),wVt.forEach(r),bct=i(Xl),Lm=n(Xl,"P",{});var KG=s(Lm);Tct=t(KG,`Note: Loading a model from its configuration file does `),ahe=n(KG,"STRONG",{});var AVt=s(ahe);Fct=t(AVt,"not"),AVt.forEach(r),Mct=t(KG,` load the model weights. It only affects the model\u2019s configuration. Use [`),nhe=n(KG,"EM",{});var xVt=s(nhe);Ect=t(xVt,"~FlaxAutoModelForVision2Seq.from_pretrained"),xVt.forEach(r),Cct=t(KG,`] to load the model weights.`),KG.forEach(r),yct=i(Xl),she=n(Xl,"P",{});var LVt=s(she);wct=t(LVt,"Examples:"),LVt.forEach(r),Act=i(Xl),c(Gw.$$.fragment,Xl),Xl.forEach(r),xct=i(zl),wo=n(zl,"DIV",{class:!0});var va=s(wo);c(Ow.$$.fragment,va),Lct=i(va),lhe=n(va,"P",{});var BVt=s(lhe);Bct=t(BVt,"Instantiate one of the model classes of the library (with a vision-to-text modeling head) from a pretrained model."),BVt.forEach(r),kct=i(va),cn=n(va,"P",{});var kF=s(cn);Rct=t(kF,"The model class to instantiate is selected based on the "),ihe=n(kF,"EM",{});var kVt=s(ihe);Sct=t(kVt,"model_type"),kVt.forEach(r),Pct=t(kF,` property of the config object (either passed as an argument or loaded from `),dhe=n(kF,"EM",{});var RVt=s(dhe);$ct=t(RVt,"pretrained_model_name_or_path"),RVt.forEach(r),Ict=t(kF,` if possible), or when it\u2019s missing, by falling back to using pattern matching on `),mhe=n(kF,"EM",{});var SVt=s(mhe);jct=t(SVt,"pretrained_model_name_or_path"),SVt.forEach(r),Nct=t(kF,":"),kF.forEach(r),Dct=i(va),fhe=n(va,"UL",{});var PVt=s(fhe);jT=n(PVt,"LI",{});var vEe=s(jT);che=n(vEe,"STRONG",{});var $Vt=s(che);Gct=t($Vt,"vision-encoder-decoder"),$Vt.forEach(r),Oct=t(vEe," \u2014 "),gD=n(vEe,"A",{href:!0});var IVt=s(gD);qct=t(IVt,"FlaxVisionEncoderDecoderModel"),IVt.forEach(r),zct=t(vEe," (Vision Encoder decoder model)"),vEe.forEach(r),PVt.forEach(r),Xct=i(va),ghe=n(va,"P",{});var jVt=s(ghe);Wct=t(jVt,"Examples:"),jVt.forEach(r),Vct=i(va),c(qw.$$.fragment,va),va.forEach(r),zl.forEach(r),this.h()},h(){m(J,"name","hf:doc:metadata"),m(J,"content",JSON.stringify(VVt)),m(de,"id","auto-classes"),m(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(de,"href","#auto-classes"),m(se,"class","relative group"),m(gn,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoConfig"),m(un,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoModel"),m(pn,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoTokenizer"),m(Yl,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel"),m($m,"id","extending-the-auto-classes"),m($m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m($m,"href","#extending-the-auto-classes"),m(Zl,"class","relative group"),m(jm,"id","transformers.AutoConfig"),m(jm,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(jm,"href","#transformers.AutoConfig"),m(ei,"class","relative group"),m(qA,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoConfig.from_pretrained"),m(zA,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertConfig"),m(XA,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig"),m(WA,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig"),m(VA,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig"),m(QA,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig"),m(HA,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig"),m(UA,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig"),m(JA,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig"),m(KA,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig"),m(YA,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig"),m(ZA,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig"),m(e7,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig"),m(o7,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig"),m(t7,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig"),m(r7,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig"),m(a7,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Config"),m(n7,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig"),m(s7,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig"),m(l7,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertConfig"),m(i7,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRConfig"),m(d7,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraConfig"),m(m7,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig"),m(f7,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig"),m(c7,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig"),m(g7,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTConfig"),m(h7,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig"),m(u7,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config"),m(p7,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig"),m(_7,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJConfig"),m(v7,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig"),m(b7,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig"),m(T7,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig"),m(F7,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig"),m(M7,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config"),m(E7,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig"),m(C7,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerConfig"),m(y7,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig"),m(w7,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig"),m(A7,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Config"),m(x7,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig"),m(L7,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig"),m(B7,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig"),m(k7,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig"),m(R7,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig"),m(S7,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Config"),m(P7,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig"),m($7,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig"),m(I7,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig"),m(j7,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig"),m(N7,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig"),m(D7,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig"),m(G7,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig"),m(O7,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig"),m(q7,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertConfig"),m(z7,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig"),m(X7,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig"),m(W7,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig"),m(V7,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig"),m(Q7,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig"),m(H7,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig"),m(U7,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextConfig"),m(J7,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Config"),m(K7,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig"),m(Y7,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig"),m(Z7,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config"),m(ex,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig"),m(ox,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig"),m(tx,"href","/docs/transformers/v4.15.0/en/model_doc/trocr#transformers.TrOCRConfig"),m(rx,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig"),m(ax,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig"),m(nx,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig"),m(sx,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig"),m(lx,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig"),m(ix,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig"),m(dx,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config"),m(mx,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig"),m(fx,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig"),m(cx,"href","/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetConfig"),m(gx,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig"),m(hx,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig"),m(oo,"class","docstring"),m(fc,"class","docstring"),m(Ro,"class","docstring"),m(cc,"id","transformers.AutoTokenizer"),m(cc,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(cc,"href","#transformers.AutoTokenizer"),m(ti,"class","relative group"),m(ux,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoTokenizer.from_pretrained"),m(px,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer"),m(_x,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizerFast"),m(vx,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),m(bx,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizerFast"),m(Tx,"href","/docs/transformers/v4.15.0/en/model_doc/barthez#transformers.BarthezTokenizer"),m(Fx,"href","/docs/transformers/v4.15.0/en/model_doc/barthez#transformers.BarthezTokenizerFast"),m(Mx,"href","/docs/transformers/v4.15.0/en/model_doc/bartpho#transformers.BartphoTokenizer"),m(Ex,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),m(Cx,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),m(yx,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationTokenizer"),m(wx,"href","/docs/transformers/v4.15.0/en/model_doc/bert_japanese#transformers.BertJapaneseTokenizer"),m(Ax,"href","/docs/transformers/v4.15.0/en/model_doc/bertweet#transformers.BertweetTokenizer"),m(xx,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer"),m(Lx,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizerFast"),m(Bx,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer"),m(kx,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizerFast"),m(Rx,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer"),m(Sx,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizerFast"),m(Px,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer"),m($x,"href","/docs/transformers/v4.15.0/en/model_doc/byt5#transformers.ByT5Tokenizer"),m(Ix,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertTokenizer"),m(jx,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertTokenizerFast"),m(Nx,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer"),m(Dx,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer"),m(Gx,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizerFast"),m(Ox,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer"),m(qx,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizerFast"),m(zx,"href","/docs/transformers/v4.15.0/en/model_doc/cpm#transformers.CpmTokenizer"),m(Xx,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer"),m(Wx,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer"),m(Vx,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizerFast"),m(Qx,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Tokenizer"),m(Hx,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertTokenizer"),m(Ux,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertTokenizerFast"),m(Jx,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoderTokenizer"),m(Kx,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoderTokenizerFast"),m(Yx,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizer"),m(Zx,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraTokenizerFast"),m(e6,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertTokenizer"),m(o6,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer"),m(t6,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizerFast"),m(r6,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTTokenizer"),m(a6,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer"),m(n6,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizerFast"),m(s6,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer"),m(l6,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2TokenizerFast"),m(i6,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer"),m(d6,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2TokenizerFast"),m(m6,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),m(f6,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),m(c6,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),m(g6,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer"),m(h6,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizerFast"),m(u6,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer"),m(p6,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast"),m(_6,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer"),m(v6,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizerFast"),m(b6,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizer"),m(T6,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerTokenizerFast"),m(F6,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer"),m(M6,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer"),m(E6,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizerFast"),m(C6,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Tokenizer"),m(y6,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer"),m(w6,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer"),m(A6,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizerFast"),m(x6,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBart50Tokenizer"),m(L6,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBart50TokenizerFast"),m(B6,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer"),m(k6,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizerFast"),m(R6,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer"),m(S6,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizerFast"),m(P6,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer"),m($6,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5TokenizerFast"),m(I6,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer"),m(j6,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizerFast"),m(N6,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer"),m(D6,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizerFast"),m(G6,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverTokenizer"),m(O6,"href","/docs/transformers/v4.15.0/en/model_doc/phobert#transformers.PhobertTokenizer"),m(q6,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer"),m(z6,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),m(X6,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),m(W6,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenizer"),m(V6,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizer"),m(Q6,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizerFast"),m(H6,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer"),m(U6,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizerFast"),m(J6,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertTokenizer"),m(K6,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertTokenizerFast"),m(Y6,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),m(Z6,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),m(e8,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer"),m(o8,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizerFast"),m(t8,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer"),m(r8,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),m(a8,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterTokenizer"),m(n8,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterTokenizerFast"),m(s8,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer"),m(l8,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizerFast"),m(i8,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer"),m(d8,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5TokenizerFast"),m(m8,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),m(f8,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLTokenizer"),m(c8,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),m(g8,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer"),m(h8,"href","/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetTokenizer"),m(u8,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizer"),m(p8,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizerFast"),m(_8,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),m(v8,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizerFast"),m(to,"class","docstring"),m(Dc,"class","docstring"),m(So,"class","docstring"),m(Gc,"id","transformers.AutoFeatureExtractor"),m(Gc,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Gc,"href","#transformers.AutoFeatureExtractor"),m(ri,"class","relative group"),m(b8,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoFeatureExtractor.from_pretrained"),m(T8,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor"),m(F8,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor"),m(M8,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor"),m(E8,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor"),m(C8,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),m(y8,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),m(w8,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverFeatureExtractor"),m(A8,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextFeatureExtractor"),m(x8,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor"),m(L8,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),m(we,"class","docstring"),m(Gr,"class","docstring"),m(Yc,"id","transformers.AutoProcessor"),m(Yc,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Yc,"href","#transformers.AutoProcessor"),m(ai,"class","relative group"),m(B8,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoProcessor.from_pretrained"),m(k8,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor"),m(R8,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor"),m(S8,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextProcessor"),m(P8,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor"),m($8,"href","/docs/transformers/v4.15.0/en/model_doc/trocr#transformers.TrOCRProcessor"),m(I8,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderProcessor"),m(j8,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),m(Ae,"class","docstring"),m(Or,"class","docstring"),m(lg,"id","transformers.AutoModel"),m(lg,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(lg,"href","#transformers.AutoModel"),m(si,"class","relative group"),m(wt,"class","docstring"),m(N8,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertModel"),m(D8,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel"),m(G8,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitModel"),m(O8,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel"),m(q8,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationEncoder"),m(z8,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdModel"),m(X8,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusModel"),m(W8,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotModel"),m(V8,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallModel"),m(Q8,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertModel"),m(H8,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineModel"),m(U8,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),m(J8,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertModel"),m(K8,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLModel"),m(Y8,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel"),m(Z8,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2Model"),m(eL,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTModel"),m(oL,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrModel"),m(tL,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertModel"),m(rL,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),m(aL,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraModel"),m(nL,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertModel"),m(sL,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetModel"),m(lL,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTModel"),m(iL,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel"),m(dL,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelBaseModel"),m(mL,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Model"),m(fL,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel"),m(cL,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJModel"),m(gL,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertModel"),m(hL,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertModel"),m(uL,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTModel"),m(pL,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel"),m(_L,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model"),m(vL,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDModel"),m(bL,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel"),m(TL,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel"),m(FL,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertModel"),m(ML,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100Model"),m(EL,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianModel"),m(CL,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartModel"),m(yL,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertModel"),m(wL,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertModel"),m(AL,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetModel"),m(xL,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5Model"),m(LL,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTModel"),m(BL,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusModel"),m(kL,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),m(RL,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetModel"),m(SL,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertModel"),m(PL,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModel"),m($L,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel"),m(IL,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertModel"),m(jL,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel"),m(NL,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerModel"),m(DL,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerModel"),m(GL,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWModel"),m(OL,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDModel"),m(qL,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextModel"),m(zL,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterModel"),m(XL,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertModel"),m(WL,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model"),m(VL,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasModel"),m(QL,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLModel"),m(HL,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechModel"),m(UL,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatModel"),m(JL,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderModel"),m(KL,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel"),m(YL,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTModel"),m(ZL,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Model"),m(eB,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMModel"),m(oB,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMModel"),m(tB,"href","/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetModel"),m(rB,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaModel"),m(aB,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetModel"),m(xe,"class","docstring"),m(Po,"class","docstring"),m(xh,"id","transformers.AutoModelForPreTraining"),m(xh,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(xh,"href","#transformers.AutoModelForPreTraining"),m(di,"class","relative group"),m(At,"class","docstring"),m(nB,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForPreTraining"),m(sB,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),m(lB,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForPreTraining"),m(iB,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForPreTraining"),m(dB,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForMaskedLM"),m(mB,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLLMHeadModel"),m(fB,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForMaskedLM"),m(cB,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForMaskedLM"),m(gB,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForMaskedLM"),m(hB,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForPreTraining"),m(uB,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertWithLMHeadModel"),m(pB,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForPreTraining"),m(_B,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTForConditionalGeneration"),m(vB,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForPreTraining"),m(bB,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2LMHeadModel"),m(TB,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMaskedLM"),m(FB,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForMaskedLM"),m(MB,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMaskedLM"),m(EB,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForPreTraining"),m(CB,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForPreTraining"),m(yB,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForPreTraining"),m(wB,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMaskedLM"),m(AB,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTLMHeadModel"),m(xB,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertModel"),m(LB,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM"),m(BB,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMaskedLM"),m(kB,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),m(RB,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForMaskedLM"),m(SB,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLLMHeadModel"),m(PB,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForPreTraining"),m($B,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForPreTraining"),m(IB,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForPreTraining"),m(jB,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForPreTraining"),m(NB,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel"),m(DB,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForMaskedLM"),m(GB,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetLMHeadModel"),m(Le,"class","docstring"),m($o,"class","docstring"),m(cu,"id","transformers.AutoModelForCausalLM"),m(cu,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(cu,"href","#transformers.AutoModelForCausalLM"),m(ci,"class","relative group"),m(xt,"class","docstring"),m(OB,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForCausalLM"),m(qB,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertLMHeadModel"),m(zB,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationDecoder"),m(XB,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForCausalLM"),m(WB,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForCausalLM"),m(VB,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotForCausalLM"),m(QB,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallForCausalLM"),m(HB,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForCausalLM"),m(UB,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLLMHeadModel"),m(JB,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2LMHeadModel"),m(KB,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForCausalLM"),m(YB,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForCausalLM"),m(ZB,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianForCausalLM"),m(e9,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForCausalLM"),m(o9,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForCausalLM"),m(t9,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTLMHeadModel"),m(r9,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusForCausalLM"),m(a9,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForCausalLM"),m(n9,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertLMHeadModel"),m(s9,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModelWithLMHead"),m(l9,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForCausalLM"),m(i9,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForCausalLM"),m(d9,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForCausalLM"),m(m9,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2ForCausalLM"),m(f9,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLLMHeadModel"),m(c9,"href","/docs/transformers/v4.15.0/en/model_doc/trocr#transformers.TrOCRForCausalLM"),m(g9,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel"),m(h9,"href","/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetForCausalLM"),m(u9,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForCausalLM"),m(p9,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetLMHeadModel"),m(Be,"class","docstring"),m(Io,"class","docstring"),m(Xu,"id","transformers.AutoModelForMaskedLM"),m(Xu,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Xu,"href","#transformers.AutoModelForMaskedLM"),m(ui,"class","relative group"),m(Lt,"class","docstring"),m(_9,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForMaskedLM"),m(v9,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),m(b9,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForMaskedLM"),m(T9,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForMaskedLM"),m(F9,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForMaskedLM"),m(M9,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForMaskedLM"),m(E9,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForMaskedLM"),m(C9,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForMaskedLM"),m(y9,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForMaskedLM"),m(w9,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMaskedLM"),m(A9,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertWithLMHeadModel"),m(x9,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForMaskedLM"),m(L9,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMaskedLM"),m(B9,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMaskedLM"),m(k9,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForMaskedLM"),m(R9,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMaskedLM"),m(S9,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForConditionalGeneration"),m(P9,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForMaskedLM"),m($9,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForMaskedLM"),m(I9,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMaskedLM"),m(j9,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForMaskedLM"),m(N9,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForMaskedLM"),m(D9,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForMaskedLM"),m(G9,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForMaskedLM"),m(O9,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM"),m(q9,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForMaskedLM"),m(z9,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMaskedLM"),m(X9,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForMaskedLM"),m(W9,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel"),m(V9,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForMaskedLM"),m(ke,"class","docstring"),m(jo,"class","docstring"),m(Ep,"id","transformers.AutoModelForSeq2SeqLM"),m(Ep,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ep,"href","#transformers.AutoModelForSeq2SeqLM"),m(vi,"class","relative group"),m(Bt,"class","docstring"),m(Q9,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),m(H9,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForConditionalGeneration"),m(U9,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotForConditionalGeneration"),m(J9,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallForConditionalGeneration"),m(K9,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),m(Y9,"href","/docs/transformers/v4.15.0/en/model_doc/fsmt#transformers.FSMTForConditionalGeneration"),m(Z9,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForConditionalGeneration"),m(ek,"href","/docs/transformers/v4.15.0/en/model_doc/m2m_100#transformers.M2M100ForConditionalGeneration"),m(ok,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianMTModel"),m(tk,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForConditionalGeneration"),m(rk,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.MT5ForConditionalGeneration"),m(ak,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusForConditionalGeneration"),m(nk,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration"),m(sk,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),m(lk,"href","/docs/transformers/v4.15.0/en/model_doc/xlmprophetnet#transformers.XLMProphetNetForConditionalGeneration"),m(Re,"class","docstring"),m(No,"class","docstring"),m(Gp,"id","transformers.AutoModelForSequenceClassification"),m(Gp,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Gp,"href","#transformers.AutoModelForSequenceClassification"),m(Fi,"class","relative group"),m(kt,"class","docstring"),m(ik,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForSequenceClassification"),m(dk,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForSequenceClassification"),m(mk,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForSequenceClassification"),m(fk,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForSequenceClassification"),m(ck,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForSequenceClassification"),m(gk,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForSequenceClassification"),m(hk,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForSequenceClassification"),m(uk,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForSequenceClassification"),m(pk,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLForSequenceClassification"),m(_k,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForSequenceClassification"),m(vk,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForSequenceClassification"),m(bk,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForSequenceClassification"),m(Tk,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForSequenceClassification"),m(Fk,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForSequenceClassification"),m(Mk,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForSequenceClassification"),m(Ek,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForSequenceClassification"),m(Ck,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForSequenceClassification"),m(yk,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForSequenceClassification"),m(wk,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForSequenceClassification"),m(Ak,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForSequenceClassification"),m(xk,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForSequenceClassification"),m(Lk,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForSequenceClassification"),m(Bk,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForSequenceClassification"),m(kk,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForSequenceClassification"),m(Rk,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForSequenceClassification"),m(Sk,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForSequenceClassification"),m(Pk,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForSequenceClassification"),m($k,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForSequenceClassification"),m(Ik,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTForSequenceClassification"),m(jk,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForSequenceClassification"),m(Nk,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForSequenceClassification"),m(Dk,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForSequenceClassification"),m(Gk,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForSequenceClassification"),m(Ok,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForSequenceClassification"),m(qk,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForSequenceClassification"),m(zk,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForSequenceClassification"),m(Xk,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForSequenceClassification"),m(Wk,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLForSequenceClassification"),m(Vk,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForSequenceClassification"),m(Qk,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForSequenceClassification"),m(Hk,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForSequenceClassification"),m(Se,"class","docstring"),m(Do,"class","docstring"),m(L_,"id","transformers.AutoModelForMultipleChoice"),m(L_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(L_,"href","#transformers.AutoModelForMultipleChoice"),m(Ci,"class","relative group"),m(Rt,"class","docstring"),m(Uk,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForMultipleChoice"),m(Jk,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForMultipleChoice"),m(Kk,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForMultipleChoice"),m(Yk,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForMultipleChoice"),m(Zk,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForMultipleChoice"),m(eR,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForMultipleChoice"),m(oR,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForMultipleChoice"),m(tR,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForMultipleChoice"),m(rR,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForMultipleChoice"),m(aR,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForMultipleChoice"),m(nR,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMultipleChoice"),m(sR,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMultipleChoice"),m(lR,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForMultipleChoice"),m(iR,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForMultipleChoice"),m(dR,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForMultipleChoice"),m(mR,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMultipleChoice"),m(fR,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForMultipleChoice"),m(cR,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForMultipleChoice"),m(gR,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMultipleChoice"),m(hR,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForMultipleChoice"),m(uR,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMultipleChoice"),m(pR,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForMultipleChoice"),m(_R,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForMultipleChoice"),m(vR,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForMultipleChoice"),m(Pe,"class","docstring"),m(Go,"class","docstring"),m(ov,"id","transformers.AutoModelForNextSentencePrediction"),m(ov,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ov,"href","#transformers.AutoModelForNextSentencePrediction"),m(Ai,"class","relative group"),m(St,"class","docstring"),m(bR,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForNextSentencePrediction"),m(TR,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForNextSentencePrediction"),m(FR,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForNextSentencePrediction"),m(MR,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForNextSentencePrediction"),m(ER,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForNextSentencePrediction"),m($e,"class","docstring"),m(Oo,"class","docstring"),m(iv,"id","transformers.AutoModelForTokenClassification"),m(iv,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(iv,"href","#transformers.AutoModelForTokenClassification"),m(Bi,"class","relative group"),m(Pt,"class","docstring"),m(CR,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForTokenClassification"),m(yR,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForTokenClassification"),m(wR,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForTokenClassification"),m(AR,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForTokenClassification"),m(xR,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForTokenClassification"),m(LR,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForTokenClassification"),m(BR,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForTokenClassification"),m(kR,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForTokenClassification"),m(RR,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForTokenClassification"),m(SR,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForTokenClassification"),m(PR,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForTokenClassification"),m($R,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForTokenClassification"),m(IR,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForTokenClassification"),m(jR,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForTokenClassification"),m(NR,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForTokenClassification"),m(DR,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForTokenClassification"),m(GR,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForTokenClassification"),m(OR,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForTokenClassification"),m(qR,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForTokenClassification"),m(zR,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForTokenClassification"),m(XR,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForTokenClassification"),m(WR,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForTokenClassification"),m(VR,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForTokenClassification"),m(QR,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForTokenClassification"),m(HR,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForTokenClassification"),m(UR,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForTokenClassification"),m(JR,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForTokenClassification"),m(KR,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForTokenClassification"),m(YR,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForTokenClassification"),m(Ie,"class","docstring"),m(qo,"class","docstring"),m(Dv,"id","transformers.AutoModelForQuestionAnswering"),m(Dv,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Dv,"href","#transformers.AutoModelForQuestionAnswering"),m(Si,"class","relative group"),m($t,"class","docstring"),m(ZR,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertForQuestionAnswering"),m(eS,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForQuestionAnswering"),m(oS,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForQuestionAnswering"),m(tS,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForQuestionAnswering"),m(rS,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForQuestionAnswering"),m(aS,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertForQuestionAnswering"),m(nS,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForQuestionAnswering"),m(sS,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForQuestionAnswering"),m(lS,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForQuestionAnswering"),m(iS,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.DebertaV2ForQuestionAnswering"),m(dS,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.DistilBertForQuestionAnswering"),m(mS,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.ElectraForQuestionAnswering"),m(fS,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertForQuestionAnsweringSimple"),m(cS,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForQuestionAnswering"),m(gS,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForQuestionAnswering"),m(hS,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.GPTJForQuestionAnswering"),m(uS,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForQuestionAnswering"),m(pS,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForQuestionAnswering"),m(_S,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForQuestionAnswering"),m(vS,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerForQuestionAnswering"),m(bS,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForQuestionAnswering"),m(TS,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForQuestionAnswering"),m(FS,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForQuestionAnswering"),m(MS,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForQuestionAnswering"),m(ES,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForQuestionAnswering"),m(CS,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForQuestionAnswering"),m(yS,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForQuestionAnswering"),m(wS,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForQuestionAnswering"),m(AS,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForQuestionAnswering"),m(xS,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForQuestionAnswering"),m(LS,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterForQuestionAnswering"),m(BS,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForQuestionAnswering"),m(kS,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnsweringSimple"),m(RS,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaForQuestionAnswering"),m(SS,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForQuestionAnsweringSimple"),m(je,"class","docstring"),m(zo,"class","docstring"),m(M1,"id","transformers.AutoModelForTableQuestionAnswering"),m(M1,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(M1,"href","#transformers.AutoModelForTableQuestionAnswering"),m(Ii,"class","relative group"),m(It,"class","docstring"),m(PS,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),m(Ne,"class","docstring"),m(Xo,"class","docstring"),m(y1,"id","transformers.AutoModelForImageClassification"),m(y1,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(y1,"href","#transformers.AutoModelForImageClassification"),m(Di,"class","relative group"),m(jt,"class","docstring"),m($S,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitForImageClassification"),m(IS,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassification"),m(jS,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassificationWithTeacher"),m(NS,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification"),m(DS,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned"),m(GS,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationFourier"),m(OS,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationConvProcessing"),m(qS,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForImageClassification"),m(zS,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTForImageClassification"),m(De,"class","docstring"),m(Wo,"class","docstring"),m(k1,"id","transformers.AutoModelForVision2Seq"),m(k1,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(k1,"href","#transformers.AutoModelForVision2Seq"),m(qi,"class","relative group"),m(Nt,"class","docstring"),m(XS,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel"),m(Ge,"class","docstring"),m(Qo,"class","docstring"),m(P1,"id","transformers.AutoModelForAudioClassification"),m(P1,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(P1,"href","#transformers.AutoModelForAudioClassification"),m(Wi,"class","relative group"),m(Dt,"class","docstring"),m(WS,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForSequenceClassification"),m(VS,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForSequenceClassification"),m(QS,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForSequenceClassification"),m(HS,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForSequenceClassification"),m(US,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForSequenceClassification"),m(JS,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification"),m(KS,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForSequenceClassification"),m(Oe,"class","docstring"),m(Ho,"class","docstring"),m(z1,"id","transformers.AutoModelForAudioFrameClassification"),m(z1,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(z1,"href","#transformers.AutoModelForAudioFrameClassification"),m(Hi,"class","relative group"),m(Gt,"class","docstring"),m(YS,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForAudioFrameClassification"),m(ZS,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForAudioFrameClassification"),m(eP,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForAudioFrameClassification"),m(qe,"class","docstring"),m(Uo,"class","docstring"),m(H1,"id","transformers.AutoModelForCTC"),m(H1,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(H1,"href","#transformers.AutoModelForCTC"),m(Yi,"class","relative group"),m(Ot,"class","docstring"),m(oP,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForCTC"),m(tP,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForCTC"),m(rP,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForCTC"),m(aP,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForCTC"),m(nP,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForCTC"),m(sP,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC"),m(lP,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForCTC"),m(ze,"class","docstring"),m(Jo,"class","docstring"),m(r2,"id","transformers.AutoModelForSpeechSeq2Seq"),m(r2,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(r2,"href","#transformers.AutoModelForSpeechSeq2Seq"),m(od,"class","relative group"),m(qt,"class","docstring"),m(iP,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),m(dP,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextForConditionalGeneration"),m(Xe,"class","docstring"),m(Ko,"class","docstring"),m(l2,"id","transformers.AutoModelForAudioXVector"),m(l2,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(l2,"href","#transformers.AutoModelForAudioXVector"),m(ad,"class","relative group"),m(zt,"class","docstring"),m(mP,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForXVector"),m(fP,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForXVector"),m(cP,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForXVector"),m(We,"class","docstring"),m(Yo,"class","docstring"),m(c2,"id","transformers.AutoModelForObjectDetection"),m(c2,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(c2,"href","#transformers.AutoModelForObjectDetection"),m(id,"class","relative group"),m(Xt,"class","docstring"),m(gP,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),m(Ve,"class","docstring"),m(Zo,"class","docstring"),m(u2,"id","transformers.AutoModelForImageSegmentation"),m(u2,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(u2,"href","#transformers.AutoModelForImageSegmentation"),m(fd,"class","relative group"),m(Wt,"class","docstring"),m(hP,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),m(Qe,"class","docstring"),m(et,"class","docstring"),m(v2,"id","transformers.TFAutoModel"),m(v2,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(v2,"href","#transformers.TFAutoModel"),m(hd,"class","relative group"),m(Vt,"class","docstring"),m(uP,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertModel"),m(pP,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartModel"),m(_P,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel"),m(vP,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotModel"),m(bP,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallModel"),m(TP,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertModel"),m(FP,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertModel"),m(MP,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLModel"),m(EP,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel"),m(CP,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2Model"),m(yP,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertModel"),m(wP,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),m(AP,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraModel"),m(xP,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertModel"),m(LP,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelModel"),m(BP,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelBaseModel"),m(kP,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2Model"),m(RP,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.TFHubertModel"),m(SP,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMModel"),m(PP,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDModel"),m($P,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerModel"),m(IP,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertModel"),m(jP,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianModel"),m(NP,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartModel"),m(DP,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertModel"),m(GP,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetModel"),m(OP,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.TFMT5Model"),m(qP,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTModel"),m(zP,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusModel"),m(XP,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertModel"),m(WP,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaModel"),m(VP,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerModel"),m(QP,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model"),m(HP,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasModel"),m(UP,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLModel"),m(JP,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.TFViTModel"),m(KP,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.TFWav2Vec2Model"),m(YP,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMModel"),m(ZP,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaModel"),m(e$,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetModel"),m(ro,"class","docstring"),m(ot,"class","docstring"),m(rb,"id","transformers.TFAutoModelForPreTraining"),m(rb,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(rb,"href","#transformers.TFAutoModelForPreTraining"),m(_d,"class","relative group"),m(Qt,"class","docstring"),m(o$,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForPreTraining"),m(t$,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),m(r$,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForPreTraining"),m(a$,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForMaskedLM"),m(n$,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLLMHeadModel"),m(s$,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForMaskedLM"),m(l$,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForPreTraining"),m(i$,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertWithLMHeadModel"),m(d$,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForPreTraining"),m(m$,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2LMHeadModel"),m(f$,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForMaskedLM"),m(c$,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertForPreTraining"),m(g$,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForPreTraining"),m(h$,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMaskedLM"),m(u$,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTLMHeadModel"),m(p$,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM"),m(_$,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5ForConditionalGeneration"),m(v$,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForMaskedLM"),m(b$,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLLMHeadModel"),m(T$,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel"),m(F$,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForMaskedLM"),m(M$,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetLMHeadModel"),m(ao,"class","docstring"),m(tt,"class","docstring"),m(wb,"id","transformers.TFAutoModelForCausalLM"),m(wb,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(wb,"href","#transformers.TFAutoModelForCausalLM"),m(Td,"class","relative group"),m(Ht,"class","docstring"),m(E$,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertLMHeadModel"),m(C$,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLLMHeadModel"),m(y$,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2LMHeadModel"),m(w$,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTLMHeadModel"),m(A$,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForCausalLM"),m(x$,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForCausalLM"),m(L$,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForCausalLM"),m(B$,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLLMHeadModel"),m(k$,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel"),m(R$,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetLMHeadModel"),m(no,"class","docstring"),m(rt,"class","docstring"),m(jb,"id","transformers.TFAutoModelForImageClassification"),m(jb,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(jb,"href","#transformers.TFAutoModelForImageClassification"),m(Ed,"class","relative group"),m(Ut,"class","docstring"),m(S$,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.TFViTForImageClassification"),m(so,"class","docstring"),m(at,"class","docstring"),m(Db,"id","transformers.TFAutoModelForMaskedLM"),m(Db,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Db,"href","#transformers.TFAutoModelForMaskedLM"),m(wd,"class","relative group"),m(Jt,"class","docstring"),m(P$,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForMaskedLM"),m($$,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForMaskedLM"),m(I$,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForMaskedLM"),m(j$,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForMaskedLM"),m(N$,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForMaskedLM"),m(D$,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForMaskedLM"),m(G$,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForMaskedLM"),m(O$,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForMaskedLM"),m(q$,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertWithLMHeadModel"),m(z$,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForMaskedLM"),m(X$,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForMaskedLM"),m(W$,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForMaskedLM"),m(V$,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForMaskedLM"),m(Q$,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMaskedLM"),m(H$,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForMaskedLM"),m(U$,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM"),m(J$,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForMaskedLM"),m(K$,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForMaskedLM"),m(Y$,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel"),m(Z$,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForMaskedLM"),m(lo,"class","docstring"),m(nt,"class","docstring"),m(s4,"id","transformers.TFAutoModelForSeq2SeqLM"),m(s4,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(s4,"href","#transformers.TFAutoModelForSeq2SeqLM"),m(Ld,"class","relative group"),m(Kt,"class","docstring"),m(eI,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),m(oI,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotForConditionalGeneration"),m(tI,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallForConditionalGeneration"),m(rI,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.TFEncoderDecoderModel"),m(aI,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDForConditionalGeneration"),m(nI,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianMTModel"),m(sI,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartForConditionalGeneration"),m(lI,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.TFMT5ForConditionalGeneration"),m(iI,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusForConditionalGeneration"),m(dI,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5ForConditionalGeneration"),m(io,"class","docstring"),m(st,"class","docstring"),m(_4,"id","transformers.TFAutoModelForSequenceClassification"),m(_4,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(_4,"href","#transformers.TFAutoModelForSequenceClassification"),m(Rd,"class","relative group"),m(Yt,"class","docstring"),m(mI,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForSequenceClassification"),m(fI,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForSequenceClassification"),m(cI,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForSequenceClassification"),m(gI,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForSequenceClassification"),m(hI,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLForSequenceClassification"),m(uI,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForSequenceClassification"),m(pI,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForSequenceClassification"),m(_I,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForSequenceClassification"),m(vI,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForSequenceClassification"),m(bI,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForSequenceClassification"),m(TI,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForSequenceClassification"),m(FI,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2ForSequenceClassification"),m(MI,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForSequenceClassification"),m(EI,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForSequenceClassification"),m(CI,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForSequenceClassification"),m(yI,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForSequenceClassification"),m(wI,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTForSequenceClassification"),m(AI,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForSequenceClassification"),m(xI,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification"),m(LI,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForSequenceClassification"),m(BI,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForSequenceClassification"),m(kI,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLForSequenceClassification"),m(RI,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForSequenceClassification"),m(SI,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForSequenceClassification"),m(PI,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForSequenceClassification"),m(mo,"class","docstring"),m(lt,"class","docstring"),m(z4,"id","transformers.TFAutoModelForMultipleChoice"),m(z4,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(z4,"href","#transformers.TFAutoModelForMultipleChoice"),m($d,"class","relative group"),m(Zt,"class","docstring"),m($I,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForMultipleChoice"),m(II,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForMultipleChoice"),m(jI,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForMultipleChoice"),m(NI,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForMultipleChoice"),m(DI,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForMultipleChoice"),m(GI,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForMultipleChoice"),m(OI,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForMultipleChoice"),m(qI,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForMultipleChoice"),m(zI,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForMultipleChoice"),m(XI,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForMultipleChoice"),m(WI,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMultipleChoice"),m(VI,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForMultipleChoice"),m(QI,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice"),m(HI,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForMultipleChoice"),m(UI,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForMultipleChoice"),m(JI,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForMultipleChoice"),m(KI,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForMultipleChoice"),m(fo,"class","docstring"),m(it,"class","docstring"),m(l5,"id","transformers.TFAutoModelForTableQuestionAnswering"),m(l5,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(l5,"href","#transformers.TFAutoModelForTableQuestionAnswering"),m(Nd,"class","relative group"),m(er,"class","docstring"),m(YI,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForQuestionAnswering"),m(co,"class","docstring"),m(dt,"class","docstring"),m(d5,"id","transformers.TFAutoModelForTokenClassification"),m(d5,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(d5,"href","#transformers.TFAutoModelForTokenClassification"),m(Od,"class","relative group"),m(or,"class","docstring"),m(ZI,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForTokenClassification"),m(ej,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForTokenClassification"),m(oj,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForTokenClassification"),m(tj,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForTokenClassification"),m(rj,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForTokenClassification"),m(aj,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForTokenClassification"),m(nj,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForTokenClassification"),m(sj,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForTokenClassification"),m(lj,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForTokenClassification"),m(ij,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForTokenClassification"),m(dj,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForTokenClassification"),m(mj,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForTokenClassification"),m(fj,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForTokenClassification"),m(cj,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForTokenClassification"),m(gj,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForTokenClassification"),m(hj,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForTokenClassification"),m(uj,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForTokenClassification"),m(pj,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForTokenClassification"),m(_j,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForTokenClassification"),m(vj,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForTokenClassification"),m(go,"class","docstring"),m(mt,"class","docstring"),m(B5,"id","transformers.TFAutoModelForQuestionAnswering"),m(B5,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(B5,"href","#transformers.TFAutoModelForQuestionAnswering"),m(Xd,"class","relative group"),m(tr,"class","docstring"),m(bj,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.TFAlbertForQuestionAnswering"),m(Tj,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForQuestionAnswering"),m(Fj,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.TFCamembertForQuestionAnswering"),m(Mj,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForQuestionAnswering"),m(Ej,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForQuestionAnswering"),m(Cj,"href","/docs/transformers/v4.15.0/en/model_doc/deberta_v2#transformers.TFDebertaV2ForQuestionAnswering"),m(yj,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.TFDistilBertForQuestionAnswering"),m(wj,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.TFElectraForQuestionAnswering"),m(Aj,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertForQuestionAnsweringSimple"),m(xj,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForQuestionAnswering"),m(Lj,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.TFLongformerForQuestionAnswering"),m(Bj,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForQuestionAnswering"),m(kj,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForQuestionAnswering"),m(Rj,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForQuestionAnswering"),m(Sj,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForQuestionAnswering"),m(Pj,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForQuestionAnswering"),m($j,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForQuestionAnsweringSimple"),m(Ij,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.TFXLMRobertaForQuestionAnswering"),m(jj,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForQuestionAnsweringSimple"),m(ho,"class","docstring"),m(ft,"class","docstring"),m(J5,"id","transformers.FlaxAutoModel"),m(J5,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(J5,"href","#transformers.FlaxAutoModel"),m(Qd,"class","relative group"),m(rr,"class","docstring"),m(Nj,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertModel"),m(Dj,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartModel"),m(Gj,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.FlaxBeitModel"),m(Oj,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertModel"),m(qj,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdModel"),m(zj,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.FlaxBlenderbotModel"),m(Xj,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.FlaxBlenderbotSmallModel"),m(Wj,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPModel"),m(Vj,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertModel"),m(Qj,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraModel"),m(Hj,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.FlaxGPT2Model"),m(Uj,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.FlaxGPTNeoModel"),m(Jj,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.FlaxGPTJModel"),m(Kj,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.FlaxMarianModel"),m(Yj,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartModel"),m(Zj,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.FlaxMT5Model"),m(eN,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.FlaxPegasusModel"),m(oN,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaModel"),m(tN,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5Model"),m(rN,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.FlaxVisionTextDualEncoderModel"),m(aN,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.FlaxViTModel"),m(nN,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.FlaxWav2Vec2Model"),m(uo,"class","docstring"),m(ct,"class","docstring"),m(b0,"id","transformers.FlaxAutoModelForCausalLM"),m(b0,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(b0,"href","#transformers.FlaxAutoModelForCausalLM"),m(Jd,"class","relative group"),m(ar,"class","docstring"),m(sN,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.FlaxGPT2LMHeadModel"),m(lN,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.FlaxGPTNeoForCausalLM"),m(iN,"href","/docs/transformers/v4.15.0/en/model_doc/gptj#transformers.FlaxGPTJForCausalLM"),m(po,"class","docstring"),m(gt,"class","docstring"),m(E0,"id","transformers.FlaxAutoModelForPreTraining"),m(E0,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(E0,"href","#transformers.FlaxAutoModelForPreTraining"),m(em,"class","relative group"),m(nr,"class","docstring"),m(dN,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForPreTraining"),m(mN,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForConditionalGeneration"),m(fN,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForPreTraining"),m(cN,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForPreTraining"),m(gN,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForPreTraining"),m(hN,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForConditionalGeneration"),m(uN,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.FlaxMT5ForConditionalGeneration"),m(pN,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForMaskedLM"),m(_N,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5ForConditionalGeneration"),m(vN,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.FlaxWav2Vec2ForPreTraining"),m(_o,"class","docstring"),m(ht,"class","docstring"),m(P0,"id","transformers.FlaxAutoModelForMaskedLM"),m(P0,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(P0,"href","#transformers.FlaxAutoModelForMaskedLM"),m(rm,"class","relative group"),m(sr,"class","docstring"),m(bN,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForMaskedLM"),m(TN,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForConditionalGeneration"),m(FN,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForMaskedLM"),m(MN,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForMaskedLM"),m(EN,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForMaskedLM"),m(CN,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForMaskedLM"),m(yN,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForConditionalGeneration"),m(wN,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForMaskedLM"),m(vo,"class","docstring"),m(ut,"class","docstring"),m(z0,"id","transformers.FlaxAutoModelForSeq2SeqLM"),m(z0,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(z0,"href","#transformers.FlaxAutoModelForSeq2SeqLM"),m(sm,"class","relative group"),m(lr,"class","docstring"),m(AN,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForConditionalGeneration"),m(xN,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.FlaxBlenderbotForConditionalGeneration"),m(LN,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.FlaxBlenderbotSmallForConditionalGeneration"),m(BN,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.FlaxEncoderDecoderModel"),m(kN,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.FlaxMarianMTModel"),m(RN,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForConditionalGeneration"),m(SN,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.FlaxMT5ForConditionalGeneration"),m(PN,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.FlaxPegasusForConditionalGeneration"),m($N,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.FlaxT5ForConditionalGeneration"),m(bo,"class","docstring"),m(pt,"class","docstring"),m(Z0,"id","transformers.FlaxAutoModelForSequenceClassification"),m(Z0,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Z0,"href","#transformers.FlaxAutoModelForSequenceClassification"),m(dm,"class","relative group"),m(ir,"class","docstring"),m(IN,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForSequenceClassification"),m(jN,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForSequenceClassification"),m(NN,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForSequenceClassification"),m(DN,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForSequenceClassification"),m(GN,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForSequenceClassification"),m(ON,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForSequenceClassification"),m(qN,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForSequenceClassification"),m(zN,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForSequenceClassification"),m(To,"class","docstring"),m(_t,"class","docstring"),m(iT,"id","transformers.FlaxAutoModelForQuestionAnswering"),m(iT,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(iT,"href","#transformers.FlaxAutoModelForQuestionAnswering"),m(cm,"class","relative group"),m(dr,"class","docstring"),m(XN,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForQuestionAnswering"),m(WN,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.FlaxBartForQuestionAnswering"),m(VN,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForQuestionAnswering"),m(QN,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForQuestionAnswering"),m(HN,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForQuestionAnswering"),m(UN,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForQuestionAnswering"),m(JN,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.FlaxMBartForQuestionAnswering"),m(KN,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForQuestionAnswering"),m(Fo,"class","docstring"),m(vt,"class","docstring"),m(_T,"id","transformers.FlaxAutoModelForTokenClassification"),m(_T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(_T,"href","#transformers.FlaxAutoModelForTokenClassification"),m(um,"class","relative group"),m(mr,"class","docstring"),m(YN,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForTokenClassification"),m(ZN,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForTokenClassification"),m(eD,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForTokenClassification"),m(oD,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForTokenClassification"),m(tD,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForTokenClassification"),m(rD,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForTokenClassification"),m(Mo,"class","docstring"),m(bt,"class","docstring"),m(CT,"id","transformers.FlaxAutoModelForMultipleChoice"),m(CT,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(CT,"href","#transformers.FlaxAutoModelForMultipleChoice"),m(vm,"class","relative group"),m(fr,"class","docstring"),m(aD,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.FlaxAlbertForMultipleChoice"),m(nD,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForMultipleChoice"),m(sD,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForMultipleChoice"),m(lD,"href","/docs/transformers/v4.15.0/en/model_doc/distilbert#transformers.FlaxDistilBertForMultipleChoice"),m(iD,"href","/docs/transformers/v4.15.0/en/model_doc/electra#transformers.FlaxElectraForMultipleChoice"),m(dD,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.FlaxRobertaForMultipleChoice"),m(Eo,"class","docstring"),m(Ft,"class","docstring"),m(kT,"id","transformers.FlaxAutoModelForNextSentencePrediction"),m(kT,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(kT,"href","#transformers.FlaxAutoModelForNextSentencePrediction"),m(Fm,"class","relative group"),m(cr,"class","docstring"),m(mD,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.FlaxBertForNextSentencePrediction"),m(Co,"class","docstring"),m(Et,"class","docstring"),m(ST,"id","transformers.FlaxAutoModelForImageClassification"),m(ST,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ST,"href","#transformers.FlaxAutoModelForImageClassification"),m(Cm,"class","relative group"),m(gr,"class","docstring"),m(fD,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.FlaxBeitForImageClassification"),m(cD,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.FlaxViTForImageClassification"),m(yo,"class","docstring"),m(Ct,"class","docstring"),m(IT,"id","transformers.FlaxAutoModelForVision2Seq"),m(IT,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(IT,"href","#transformers.FlaxAutoModelForVision2Seq"),m(Am,"class","relative group"),m(hr,"class","docstring"),m(gD,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel"),m(wo,"class","docstring"),m(yt,"class","docstring")},m(d,_){e(document.head,J),v(d,ye,_),v(d,se,_),e(se,de),e(de,Ue),g(ie,Ue,null),e(se,ue),e(se,Bo),e(Bo,Vl),v(d,km,_),v(d,zr,_),e(zr,Ql),e(zr,Hl),e(Hl,RF),e(zr,Rm),v(d,Fe,_),v(d,Ze,_),e(Ze,Ul),e(Ze,gn),e(gn,SF),e(Ze,hn),e(Ze,un),e(un,PF),e(Ze,Jl),e(Ze,pn),e(pn,$F),e(Ze,Kl),v(d,Sm,_),g(ba,d,_),v(d,eo,_),v(d,me,_),e(me,IA),e(me,Yl),e(Yl,jA),e(me,NA),v(d,ko,_),v(d,Ta,_),e(Ta,DA),e(Ta,Pm),e(Pm,GA),e(Ta,dwe),v(d,bEe,_),v(d,Zl,_),e(Zl,$m),e($m,YG),g(IF,YG,null),e(Zl,mwe),e(Zl,ZG),e(ZG,fwe),v(d,TEe,_),v(d,_n,_),e(_n,cwe),e(_n,eO),e(eO,gwe),e(_n,hwe),e(_n,oO),e(oO,uwe),e(_n,pwe),v(d,FEe,_),g(jF,d,_),v(d,MEe,_),v(d,OA,_),e(OA,_we),v(d,EEe,_),g(Im,d,_),v(d,CEe,_),v(d,ei,_),e(ei,jm),e(jm,tO),g(NF,tO,null),e(ei,vwe),e(ei,rO),e(rO,bwe),v(d,yEe,_),v(d,Ro,_),g(DF,Ro,null),e(Ro,Twe),e(Ro,GF),e(GF,Fwe),e(GF,qA),e(qA,Mwe),e(GF,Ewe),e(Ro,Cwe),e(Ro,OF),e(OF,ywe),e(OF,aO),e(aO,wwe),e(OF,Awe),e(Ro,xwe),e(Ro,oo),g(qF,oo,null),e(oo,Lwe),e(oo,nO),e(nO,Bwe),e(oo,kwe),e(oo,oi),e(oi,Rwe),e(oi,sO),e(sO,Swe),e(oi,Pwe),e(oi,lO),e(lO,$we),e(oi,Iwe),e(oo,jwe),e(oo,b),e(b,Nm),e(Nm,iO),e(iO,Nwe),e(Nm,Dwe),e(Nm,zA),e(zA,Gwe),e(Nm,Owe),e(b,qwe),e(b,Dm),e(Dm,dO),e(dO,zwe),e(Dm,Xwe),e(Dm,XA),e(XA,Wwe),e(Dm,Vwe),e(b,Qwe),e(b,Gm),e(Gm,mO),e(mO,Hwe),e(Gm,Uwe),e(Gm,WA),e(WA,Jwe),e(Gm,Kwe),e(b,Ywe),e(b,Om),e(Om,fO),e(fO,Zwe),e(Om,eAe),e(Om,VA),e(VA,oAe),e(Om,tAe),e(b,rAe),e(b,qm),e(qm,cO),e(cO,aAe),e(qm,nAe),e(qm,QA),e(QA,sAe),e(qm,lAe),e(b,iAe),e(b,zm),e(zm,gO),e(gO,dAe),e(zm,mAe),e(zm,HA),e(HA,fAe),e(zm,cAe),e(b,gAe),e(b,Xm),e(Xm,hO),e(hO,hAe),e(Xm,uAe),e(Xm,UA),e(UA,pAe),e(Xm,_Ae),e(b,vAe),e(b,Wm),e(Wm,uO),e(uO,bAe),e(Wm,TAe),e(Wm,JA),e(JA,FAe),e(Wm,MAe),e(b,EAe),e(b,Vm),e(Vm,pO),e(pO,CAe),e(Vm,yAe),e(Vm,KA),e(KA,wAe),e(Vm,AAe),e(b,xAe),e(b,Qm),e(Qm,_O),e(_O,LAe),e(Qm,BAe),e(Qm,YA),e(YA,kAe),e(Qm,RAe),e(b,SAe),e(b,Hm),e(Hm,vO),e(vO,PAe),e(Hm,$Ae),e(Hm,ZA),e(ZA,IAe),e(Hm,jAe),e(b,NAe),e(b,Um),e(Um,bO),e(bO,DAe),e(Um,GAe),e(Um,e7),e(e7,OAe),e(Um,qAe),e(b,zAe),e(b,Jm),e(Jm,TO),e(TO,XAe),e(Jm,WAe),e(Jm,o7),e(o7,VAe),e(Jm,QAe),e(b,HAe),e(b,Km),e(Km,FO),e(FO,UAe),e(Km,JAe),e(Km,t7),e(t7,KAe),e(Km,YAe),e(b,ZAe),e(b,Ym),e(Ym,MO),e(MO,e7e),e(Ym,o7e),e(Ym,r7),e(r7,t7e),e(Ym,r7e),e(b,a7e),e(b,Zm),e(Zm,EO),e(EO,n7e),e(Zm,s7e),e(Zm,a7),e(a7,l7e),e(Zm,i7e),e(b,d7e),e(b,ef),e(ef,CO),e(CO,m7e),e(ef,f7e),e(ef,n7),e(n7,c7e),e(ef,g7e),e(b,h7e),e(b,of),e(of,yO),e(yO,u7e),e(of,p7e),e(of,s7),e(s7,_7e),e(of,v7e),e(b,b7e),e(b,tf),e(tf,wO),e(wO,T7e),e(tf,F7e),e(tf,l7),e(l7,M7e),e(tf,E7e),e(b,C7e),e(b,rf),e(rf,AO),e(AO,y7e),e(rf,w7e),e(rf,i7),e(i7,A7e),e(rf,x7e),e(b,L7e),e(b,af),e(af,xO),e(xO,B7e),e(af,k7e),e(af,d7),e(d7,R7e),e(af,S7e),e(b,P7e),e(b,nf),e(nf,LO),e(LO,$7e),e(nf,I7e),e(nf,m7),e(m7,j7e),e(nf,N7e),e(b,D7e),e(b,sf),e(sf,BO),e(BO,G7e),e(sf,O7e),e(sf,f7),e(f7,q7e),e(sf,z7e),e(b,X7e),e(b,lf),e(lf,kO),e(kO,W7e),e(lf,V7e),e(lf,c7),e(c7,Q7e),e(lf,H7e),e(b,U7e),e(b,df),e(df,RO),e(RO,J7e),e(df,K7e),e(df,g7),e(g7,Y7e),e(df,Z7e),e(b,exe),e(b,mf),e(mf,SO),e(SO,oxe),e(mf,txe),e(mf,h7),e(h7,rxe),e(mf,axe),e(b,nxe),e(b,ff),e(ff,PO),e(PO,sxe),e(ff,lxe),e(ff,u7),e(u7,ixe),e(ff,dxe),e(b,mxe),e(b,cf),e(cf,$O),e($O,fxe),e(cf,cxe),e(cf,p7),e(p7,gxe),e(cf,hxe),e(b,uxe),e(b,gf),e(gf,IO),e(IO,pxe),e(gf,_xe),e(gf,_7),e(_7,vxe),e(gf,bxe),e(b,Txe),e(b,hf),e(hf,jO),e(jO,Fxe),e(hf,Mxe),e(hf,v7),e(v7,Exe),e(hf,Cxe),e(b,yxe),e(b,uf),e(uf,NO),e(NO,wxe),e(uf,Axe),e(uf,b7),e(b7,xxe),e(uf,Lxe),e(b,Bxe),e(b,pf),e(pf,DO),e(DO,kxe),e(pf,Rxe),e(pf,T7),e(T7,Sxe),e(pf,Pxe),e(b,$xe),e(b,_f),e(_f,GO),e(GO,Ixe),e(_f,jxe),e(_f,F7),e(F7,Nxe),e(_f,Dxe),e(b,Gxe),e(b,vf),e(vf,OO),e(OO,Oxe),e(vf,qxe),e(vf,M7),e(M7,zxe),e(vf,Xxe),e(b,Wxe),e(b,bf),e(bf,qO),e(qO,Vxe),e(bf,Qxe),e(bf,E7),e(E7,Hxe),e(bf,Uxe),e(b,Jxe),e(b,Tf),e(Tf,zO),e(zO,Kxe),e(Tf,Yxe),e(Tf,C7),e(C7,Zxe),e(Tf,e6e),e(b,o6e),e(b,Ff),e(Ff,XO),e(XO,t6e),e(Ff,r6e),e(Ff,y7),e(y7,a6e),e(Ff,n6e),e(b,s6e),e(b,Mf),e(Mf,WO),e(WO,l6e),e(Mf,i6e),e(Mf,w7),e(w7,d6e),e(Mf,m6e),e(b,f6e),e(b,Ef),e(Ef,VO),e(VO,c6e),e(Ef,g6e),e(Ef,A7),e(A7,h6e),e(Ef,u6e),e(b,p6e),e(b,Cf),e(Cf,QO),e(QO,_6e),e(Cf,v6e),e(Cf,x7),e(x7,b6e),e(Cf,T6e),e(b,F6e),e(b,yf),e(yf,HO),e(HO,M6e),e(yf,E6e),e(yf,L7),e(L7,C6e),e(yf,y6e),e(b,w6e),e(b,wf),e(wf,UO),e(UO,A6e),e(wf,x6e),e(wf,B7),e(B7,L6e),e(wf,B6e),e(b,k6e),e(b,Af),e(Af,JO),e(JO,R6e),e(Af,S6e),e(Af,k7),e(k7,P6e),e(Af,$6e),e(b,I6e),e(b,xf),e(xf,KO),e(KO,j6e),e(xf,N6e),e(xf,R7),e(R7,D6e),e(xf,G6e),e(b,O6e),e(b,Lf),e(Lf,YO),e(YO,q6e),e(Lf,z6e),e(Lf,S7),e(S7,X6e),e(Lf,W6e),e(b,V6e),e(b,Bf),e(Bf,ZO),e(ZO,Q6e),e(Bf,H6e),e(Bf,P7),e(P7,U6e),e(Bf,J6e),e(b,K6e),e(b,kf),e(kf,eq),e(eq,Y6e),e(kf,Z6e),e(kf,$7),e($7,e8e),e(kf,o8e),e(b,t8e),e(b,Rf),e(Rf,oq),e(oq,r8e),e(Rf,a8e),e(Rf,I7),e(I7,n8e),e(Rf,s8e),e(b,l8e),e(b,Sf),e(Sf,tq),e(tq,i8e),e(Sf,d8e),e(Sf,j7),e(j7,m8e),e(Sf,f8e),e(b,c8e),e(b,Pf),e(Pf,rq),e(rq,g8e),e(Pf,h8e),e(Pf,N7),e(N7,u8e),e(Pf,p8e),e(b,_8e),e(b,$f),e($f,aq),e(aq,v8e),e($f,b8e),e($f,D7),e(D7,T8e),e($f,F8e),e(b,M8e),e(b,If),e(If,nq),e(nq,E8e),e(If,C8e),e(If,G7),e(G7,y8e),e(If,w8e),e(b,A8e),e(b,jf),e(jf,sq),e(sq,x8e),e(jf,L8e),e(jf,O7),e(O7,B8e),e(jf,k8e),e(b,R8e),e(b,Nf),e(Nf,lq),e(lq,S8e),e(Nf,P8e),e(Nf,q7),e(q7,$8e),e(Nf,I8e),e(b,j8e),e(b,Df),e(Df,iq),e(iq,N8e),e(Df,D8e),e(Df,z7),e(z7,G8e),e(Df,O8e),e(b,q8e),e(b,Gf),e(Gf,dq),e(dq,z8e),e(Gf,X8e),e(Gf,X7),e(X7,W8e),e(Gf,V8e),e(b,Q8e),e(b,Of),e(Of,mq),e(mq,H8e),e(Of,U8e),e(Of,W7),e(W7,J8e),e(Of,K8e),e(b,Y8e),e(b,qf),e(qf,fq),e(fq,Z8e),e(qf,eLe),e(qf,V7),e(V7,oLe),e(qf,tLe),e(b,rLe),e(b,zf),e(zf,cq),e(cq,aLe),e(zf,nLe),e(zf,Q7),e(Q7,sLe),e(zf,lLe),e(b,iLe),e(b,Xf),e(Xf,gq),e(gq,dLe),e(Xf,mLe),e(Xf,H7),e(H7,fLe),e(Xf,cLe),e(b,gLe),e(b,Wf),e(Wf,hq),e(hq,hLe),e(Wf,uLe),e(Wf,U7),e(U7,pLe),e(Wf,_Le),e(b,vLe),e(b,Vf),e(Vf,uq),e(uq,bLe),e(Vf,TLe),e(Vf,J7),e(J7,FLe),e(Vf,MLe),e(b,ELe),e(b,Qf),e(Qf,pq),e(pq,CLe),e(Qf,yLe),e(Qf,K7),e(K7,wLe),e(Qf,ALe),e(b,xLe),e(b,Hf),e(Hf,_q),e(_q,LLe),e(Hf,BLe),e(Hf,Y7),e(Y7,kLe),e(Hf,RLe),e(b,SLe),e(b,Uf),e(Uf,vq),e(vq,PLe),e(Uf,$Le),e(Uf,Z7),e(Z7,ILe),e(Uf,jLe),e(b,NLe),e(b,Jf),e(Jf,bq),e(bq,DLe),e(Jf,GLe),e(Jf,ex),e(ex,OLe),e(Jf,qLe),e(b,zLe),e(b,Kf),e(Kf,Tq),e(Tq,XLe),e(Kf,WLe),e(Kf,ox),e(ox,VLe),e(Kf,QLe),e(b,HLe),e(b,Yf),e(Yf,Fq),e(Fq,ULe),e(Yf,JLe),e(Yf,tx),e(tx,KLe),e(Yf,YLe),e(b,ZLe),e(b,Zf),e(Zf,Mq),e(Mq,eBe),e(Zf,oBe),e(Zf,rx),e(rx,tBe),e(Zf,rBe),e(b,aBe),e(b,ec),e(ec,Eq),e(Eq,nBe),e(ec,sBe),e(ec,ax),e(ax,lBe),e(ec,iBe),e(b,dBe),e(b,oc),e(oc,Cq),e(Cq,mBe),e(oc,fBe),e(oc,nx),e(nx,cBe),e(oc,gBe),e(b,hBe),e(b,tc),e(tc,yq),e(yq,uBe),e(tc,pBe),e(tc,sx),e(sx,_Be),e(tc,vBe),e(b,bBe),e(b,rc),e(rc,wq),e(wq,TBe),e(rc,FBe),e(rc,lx),e(lx,MBe),e(rc,EBe),e(b,CBe),e(b,ac),e(ac,Aq),e(Aq,yBe),e(ac,wBe),e(ac,ix),e(ix,ABe),e(ac,xBe),e(b,LBe),e(b,nc),e(nc,xq),e(xq,BBe),e(nc,kBe),e(nc,dx),e(dx,RBe),e(nc,SBe),e(b,PBe),e(b,sc),e(sc,Lq),e(Lq,$Be),e(sc,IBe),e(sc,mx),e(mx,jBe),e(sc,NBe),e(b,DBe),e(b,lc),e(lc,Bq),e(Bq,GBe),e(lc,OBe),e(lc,fx),e(fx,qBe),e(lc,zBe),e(b,XBe),e(b,ic),e(ic,kq),e(kq,WBe),e(ic,VBe),e(ic,cx),e(cx,QBe),e(ic,HBe),e(b,UBe),e(b,dc),e(dc,Rq),e(Rq,JBe),e(dc,KBe),e(dc,gx),e(gx,YBe),e(dc,ZBe),e(b,e9e),e(b,mc),e(mc,Sq),e(Sq,o9e),e(mc,t9e),e(mc,hx),e(hx,r9e),e(mc,a9e),e(oo,n9e),e(oo,Pq),e(Pq,s9e),e(oo,l9e),g(zF,oo,null),e(Ro,i9e),e(Ro,fc),g(XF,fc,null),e(fc,d9e),e(fc,$q),e($q,m9e),v(d,wEe,_),v(d,ti,_),e(ti,cc),e(cc,Iq),g(WF,Iq,null),e(ti,f9e),e(ti,jq),e(jq,c9e),v(d,AEe,_),v(d,So,_),g(VF,So,null),e(So,g9e),e(So,QF),e(QF,h9e),e(QF,ux),e(ux,u9e),e(QF,p9e),e(So,_9e),e(So,HF),e(HF,v9e),e(HF,Nq),e(Nq,b9e),e(HF,T9e),e(So,F9e),e(So,to),g(UF,to,null),e(to,M9e),e(to,Dq),e(Dq,E9e),e(to,C9e),e(to,Fa),e(Fa,y9e),e(Fa,Gq),e(Gq,w9e),e(Fa,A9e),e(Fa,Oq),e(Oq,x9e),e(Fa,L9e),e(Fa,qq),e(qq,B9e),e(Fa,k9e),e(to,R9e),e(to,E),e(E,vn),e(vn,zq),e(zq,S9e),e(vn,P9e),e(vn,px),e(px,$9e),e(vn,I9e),e(vn,_x),e(_x,j9e),e(vn,N9e),e(E,D9e),e(E,bn),e(bn,Xq),e(Xq,G9e),e(bn,O9e),e(bn,vx),e(vx,q9e),e(bn,z9e),e(bn,bx),e(bx,X9e),e(bn,W9e),e(E,V9e),e(E,Tn),e(Tn,Wq),e(Wq,Q9e),e(Tn,H9e),e(Tn,Tx),e(Tx,U9e),e(Tn,J9e),e(Tn,Fx),e(Fx,K9e),e(Tn,Y9e),e(E,Z9e),e(E,gc),e(gc,Vq),e(Vq,eke),e(gc,oke),e(gc,Mx),e(Mx,tke),e(gc,rke),e(E,ake),e(E,Fn),e(Fn,Qq),e(Qq,nke),e(Fn,ske),e(Fn,Ex),e(Ex,lke),e(Fn,ike),e(Fn,Cx),e(Cx,dke),e(Fn,mke),e(E,fke),e(E,hc),e(hc,Hq),e(Hq,cke),e(hc,gke),e(hc,yx),e(yx,hke),e(hc,uke),e(E,pke),e(E,uc),e(uc,Uq),e(Uq,_ke),e(uc,vke),e(uc,wx),e(wx,bke),e(uc,Tke),e(E,Fke),e(E,pc),e(pc,Jq),e(Jq,Mke),e(pc,Eke),e(pc,Ax),e(Ax,Cke),e(pc,yke),e(E,wke),e(E,Mn),e(Mn,Kq),e(Kq,Ake),e(Mn,xke),e(Mn,xx),e(xx,Lke),e(Mn,Bke),e(Mn,Lx),e(Lx,kke),e(Mn,Rke),e(E,Ske),e(E,En),e(En,Yq),e(Yq,Pke),e(En,$ke),e(En,Bx),e(Bx,Ike),e(En,jke),e(En,kx),e(kx,Nke),e(En,Dke),e(E,Gke),e(E,Cn),e(Cn,Zq),e(Zq,Oke),e(Cn,qke),e(Cn,Rx),e(Rx,zke),e(Cn,Xke),e(Cn,Sx),e(Sx,Wke),e(Cn,Vke),e(E,Qke),e(E,_c),e(_c,ez),e(ez,Hke),e(_c,Uke),e(_c,Px),e(Px,Jke),e(_c,Kke),e(E,Yke),e(E,vc),e(vc,oz),e(oz,Zke),e(vc,eRe),e(vc,$x),e($x,oRe),e(vc,tRe),e(E,rRe),e(E,yn),e(yn,tz),e(tz,aRe),e(yn,nRe),e(yn,Ix),e(Ix,sRe),e(yn,lRe),e(yn,jx),e(jx,iRe),e(yn,dRe),e(E,mRe),e(E,bc),e(bc,rz),e(rz,fRe),e(bc,cRe),e(bc,Nx),e(Nx,gRe),e(bc,hRe),e(E,uRe),e(E,wn),e(wn,az),e(az,pRe),e(wn,_Re),e(wn,Dx),e(Dx,vRe),e(wn,bRe),e(wn,Gx),e(Gx,TRe),e(wn,FRe),e(E,MRe),e(E,An),e(An,nz),e(nz,ERe),e(An,CRe),e(An,Ox),e(Ox,yRe),e(An,wRe),e(An,qx),e(qx,ARe),e(An,xRe),e(E,LRe),e(E,xn),e(xn,sz),e(sz,BRe),e(xn,kRe),e(xn,zx),e(zx,RRe),e(xn,SRe),e(xn,lz),e(lz,PRe),e(xn,$Re),e(E,IRe),e(E,Tc),e(Tc,iz),e(iz,jRe),e(Tc,NRe),e(Tc,Xx),e(Xx,DRe),e(Tc,GRe),e(E,ORe),e(E,Ln),e(Ln,dz),e(dz,qRe),e(Ln,zRe),e(Ln,Wx),e(Wx,XRe),e(Ln,WRe),e(Ln,Vx),e(Vx,VRe),e(Ln,QRe),e(E,HRe),e(E,Fc),e(Fc,mz),e(mz,URe),e(Fc,JRe),e(Fc,Qx),e(Qx,KRe),e(Fc,YRe),e(E,ZRe),e(E,Bn),e(Bn,fz),e(fz,eSe),e(Bn,oSe),e(Bn,Hx),e(Hx,tSe),e(Bn,rSe),e(Bn,Ux),e(Ux,aSe),e(Bn,nSe),e(E,sSe),e(E,kn),e(kn,cz),e(cz,lSe),e(kn,iSe),e(kn,Jx),e(Jx,dSe),e(kn,mSe),e(kn,Kx),e(Kx,fSe),e(kn,cSe),e(E,gSe),e(E,Rn),e(Rn,gz),e(gz,hSe),e(Rn,uSe),e(Rn,Yx),e(Yx,pSe),e(Rn,_Se),e(Rn,Zx),e(Zx,vSe),e(Rn,bSe),e(E,TSe),e(E,Mc),e(Mc,hz),e(hz,FSe),e(Mc,MSe),e(Mc,e6),e(e6,ESe),e(Mc,CSe),e(E,ySe),e(E,Sn),e(Sn,uz),e(uz,wSe),e(Sn,ASe),e(Sn,o6),e(o6,xSe),e(Sn,LSe),e(Sn,t6),e(t6,BSe),e(Sn,kSe),e(E,RSe),e(E,Ec),e(Ec,pz),e(pz,SSe),e(Ec,PSe),e(Ec,r6),e(r6,$Se),e(Ec,ISe),e(E,jSe),e(E,Pn),e(Pn,_z),e(_z,NSe),e(Pn,DSe),e(Pn,a6),e(a6,GSe),e(Pn,OSe),e(Pn,n6),e(n6,qSe),e(Pn,zSe),e(E,XSe),e(E,$n),e($n,vz),e(vz,WSe),e($n,VSe),e($n,s6),e(s6,QSe),e($n,HSe),e($n,l6),e(l6,USe),e($n,JSe),e(E,KSe),e(E,In),e(In,bz),e(bz,YSe),e(In,ZSe),e(In,i6),e(i6,ePe),e(In,oPe),e(In,d6),e(d6,tPe),e(In,rPe),e(E,aPe),e(E,Cc),e(Cc,Tz),e(Tz,nPe),e(Cc,sPe),e(Cc,m6),e(m6,lPe),e(Cc,iPe),e(E,dPe),e(E,jn),e(jn,Fz),e(Fz,mPe),e(jn,fPe),e(jn,f6),e(f6,cPe),e(jn,gPe),e(jn,c6),e(c6,hPe),e(jn,uPe),e(E,pPe),e(E,Nn),e(Nn,Mz),e(Mz,_Pe),e(Nn,vPe),e(Nn,g6),e(g6,bPe),e(Nn,TPe),e(Nn,h6),e(h6,FPe),e(Nn,MPe),e(E,EPe),e(E,Dn),e(Dn,Ez),e(Ez,CPe),e(Dn,yPe),e(Dn,u6),e(u6,wPe),e(Dn,APe),e(Dn,p6),e(p6,xPe),e(Dn,LPe),e(E,BPe),e(E,Gn),e(Gn,Cz),e(Cz,kPe),e(Gn,RPe),e(Gn,_6),e(_6,SPe),e(Gn,PPe),e(Gn,v6),e(v6,$Pe),e(Gn,IPe),e(E,jPe),e(E,On),e(On,yz),e(yz,NPe),e(On,DPe),e(On,b6),e(b6,GPe),e(On,OPe),e(On,T6),e(T6,qPe),e(On,zPe),e(E,XPe),e(E,yc),e(yc,wz),e(wz,WPe),e(yc,VPe),e(yc,F6),e(F6,QPe),e(yc,HPe),e(E,UPe),e(E,qn),e(qn,Az),e(Az,JPe),e(qn,KPe),e(qn,M6),e(M6,YPe),e(qn,ZPe),e(qn,E6),e(E6,e$e),e(qn,o$e),e(E,t$e),e(E,wc),e(wc,xz),e(xz,r$e),e(wc,a$e),e(wc,C6),e(C6,n$e),e(wc,s$e),e(E,l$e),e(E,Ac),e(Ac,Lz),e(Lz,i$e),e(Ac,d$e),e(Ac,y6),e(y6,m$e),e(Ac,f$e),e(E,c$e),e(E,zn),e(zn,Bz),e(Bz,g$e),e(zn,h$e),e(zn,w6),e(w6,u$e),e(zn,p$e),e(zn,A6),e(A6,_$e),e(zn,v$e),e(E,b$e),e(E,Xn),e(Xn,kz),e(kz,T$e),e(Xn,F$e),e(Xn,x6),e(x6,M$e),e(Xn,E$e),e(Xn,L6),e(L6,C$e),e(Xn,y$e),e(E,w$e),e(E,Wn),e(Wn,Rz),e(Rz,A$e),e(Wn,x$e),e(Wn,B6),e(B6,L$e),e(Wn,B$e),e(Wn,k6),e(k6,k$e),e(Wn,R$e),e(E,S$e),e(E,Vn),e(Vn,Sz),e(Sz,P$e),e(Vn,$$e),e(Vn,R6),e(R6,I$e),e(Vn,j$e),e(Vn,S6),e(S6,N$e),e(Vn,D$e),e(E,G$e),e(E,Qn),e(Qn,Pz),e(Pz,O$e),e(Qn,q$e),e(Qn,P6),e(P6,z$e),e(Qn,X$e),e(Qn,$6),e($6,W$e),e(Qn,V$e),e(E,Q$e),e(E,Hn),e(Hn,$z),e($z,H$e),e(Hn,U$e),e(Hn,I6),e(I6,J$e),e(Hn,K$e),e(Hn,j6),e(j6,Y$e),e(Hn,Z$e),e(E,eIe),e(E,Un),e(Un,Iz),e(Iz,oIe),e(Un,tIe),e(Un,N6),e(N6,rIe),e(Un,aIe),e(Un,D6),e(D6,nIe),e(Un,sIe),e(E,lIe),e(E,xc),e(xc,jz),e(jz,iIe),e(xc,dIe),e(xc,G6),e(G6,mIe),e(xc,fIe),e(E,cIe),e(E,Lc),e(Lc,Nz),e(Nz,gIe),e(Lc,hIe),e(Lc,O6),e(O6,uIe),e(Lc,pIe),e(E,_Ie),e(E,Bc),e(Bc,Dz),e(Dz,vIe),e(Bc,bIe),e(Bc,q6),e(q6,TIe),e(Bc,FIe),e(E,MIe),e(E,Jn),e(Jn,Gz),e(Gz,EIe),e(Jn,CIe),e(Jn,z6),e(z6,yIe),e(Jn,wIe),e(Jn,X6),e(X6,AIe),e(Jn,xIe),e(E,LIe),e(E,kc),e(kc,Oz),e(Oz,BIe),e(kc,kIe),e(kc,W6),e(W6,RIe),e(kc,SIe),e(E,PIe),e(E,Kn),e(Kn,qz),e(qz,$Ie),e(Kn,IIe),e(Kn,V6),e(V6,jIe),e(Kn,NIe),e(Kn,Q6),e(Q6,DIe),e(Kn,GIe),e(E,OIe),e(E,Yn),e(Yn,zz),e(zz,qIe),e(Yn,zIe),e(Yn,H6),e(H6,XIe),e(Yn,WIe),e(Yn,U6),e(U6,VIe),e(Yn,QIe),e(E,HIe),e(E,Zn),e(Zn,Xz),e(Xz,UIe),e(Zn,JIe),e(Zn,J6),e(J6,KIe),e(Zn,YIe),e(Zn,K6),e(K6,ZIe),e(Zn,eje),e(E,oje),e(E,es),e(es,Wz),e(Wz,tje),e(es,rje),e(es,Y6),e(Y6,aje),e(es,nje),e(es,Z6),e(Z6,sje),e(es,lje),e(E,ije),e(E,os),e(os,Vz),e(Vz,dje),e(os,mje),e(os,e8),e(e8,fje),e(os,cje),e(os,o8),e(o8,gje),e(os,hje),e(E,uje),e(E,Rc),e(Rc,Qz),e(Qz,pje),e(Rc,_je),e(Rc,t8),e(t8,vje),e(Rc,bje),e(E,Tje),e(E,Sc),e(Sc,Hz),e(Hz,Fje),e(Sc,Mje),e(Sc,r8),e(r8,Eje),e(Sc,Cje),e(E,yje),e(E,ts),e(ts,Uz),e(Uz,wje),e(ts,Aje),e(ts,a8),e(a8,xje),e(ts,Lje),e(ts,n8),e(n8,Bje),e(ts,kje),e(E,Rje),e(E,rs),e(rs,Jz),e(Jz,Sje),e(rs,Pje),e(rs,s8),e(s8,$je),e(rs,Ije),e(rs,l8),e(l8,jje),e(rs,Nje),e(E,Dje),e(E,as),e(as,Kz),e(Kz,Gje),e(as,Oje),e(as,i8),e(i8,qje),e(as,zje),e(as,d8),e(d8,Xje),e(as,Wje),e(E,Vje),e(E,Pc),e(Pc,Yz),e(Yz,Qje),e(Pc,Hje),e(Pc,m8),e(m8,Uje),e(Pc,Jje),e(E,Kje),e(E,$c),e($c,Zz),e(Zz,Yje),e($c,Zje),e($c,f8),e(f8,eNe),e($c,oNe),e(E,tNe),e(E,Ic),e(Ic,eX),e(eX,rNe),e(Ic,aNe),e(Ic,c8),e(c8,nNe),e(Ic,sNe),e(E,lNe),e(E,jc),e(jc,oX),e(oX,iNe),e(jc,dNe),e(jc,g8),e(g8,mNe),e(jc,fNe),e(E,cNe),e(E,Nc),e(Nc,tX),e(tX,gNe),e(Nc,hNe),e(Nc,h8),e(h8,uNe),e(Nc,pNe),e(E,_Ne),e(E,ns),e(ns,rX),e(rX,vNe),e(ns,bNe),e(ns,u8),e(u8,TNe),e(ns,FNe),e(ns,p8),e(p8,MNe),e(ns,ENe),e(E,CNe),e(E,ss),e(ss,aX),e(aX,yNe),e(ss,wNe),e(ss,_8),e(_8,ANe),e(ss,xNe),e(ss,v8),e(v8,LNe),e(ss,BNe),e(to,kNe),e(to,nX),e(nX,RNe),e(to,SNe),g(JF,to,null),e(So,PNe),e(So,Dc),g(KF,Dc,null),e(Dc,$Ne),e(Dc,sX),e(sX,INe),v(d,xEe,_),v(d,ri,_),e(ri,Gc),e(Gc,lX),g(YF,lX,null),e(ri,jNe),e(ri,iX),e(iX,NNe),v(d,LEe,_),v(d,Gr,_),g(ZF,Gr,null),e(Gr,DNe),e(Gr,eM),e(eM,GNe),e(eM,b8),e(b8,ONe),e(eM,qNe),e(Gr,zNe),e(Gr,oM),e(oM,XNe),e(oM,dX),e(dX,WNe),e(oM,VNe),e(Gr,QNe),e(Gr,we),g(tM,we,null),e(we,HNe),e(we,mX),e(mX,UNe),e(we,JNe),e(we,Ma),e(Ma,KNe),e(Ma,fX),e(fX,YNe),e(Ma,ZNe),e(Ma,cX),e(cX,eDe),e(Ma,oDe),e(Ma,gX),e(gX,tDe),e(Ma,rDe),e(we,aDe),e(we,fe),e(fe,Oc),e(Oc,hX),e(hX,nDe),e(Oc,sDe),e(Oc,T8),e(T8,lDe),e(Oc,iDe),e(fe,dDe),e(fe,qc),e(qc,uX),e(uX,mDe),e(qc,fDe),e(qc,F8),e(F8,cDe),e(qc,gDe),e(fe,hDe),e(fe,zc),e(zc,pX),e(pX,uDe),e(zc,pDe),e(zc,M8),e(M8,_De),e(zc,vDe),e(fe,bDe),e(fe,Xc),e(Xc,_X),e(_X,TDe),e(Xc,FDe),e(Xc,E8),e(E8,MDe),e(Xc,EDe),e(fe,CDe),e(fe,Wc),e(Wc,vX),e(vX,yDe),e(Wc,wDe),e(Wc,C8),e(C8,ADe),e(Wc,xDe),e(fe,LDe),e(fe,Vc),e(Vc,bX),e(bX,BDe),e(Vc,kDe),e(Vc,y8),e(y8,RDe),e(Vc,SDe),e(fe,PDe),e(fe,Qc),e(Qc,TX),e(TX,$De),e(Qc,IDe),e(Qc,w8),e(w8,jDe),e(Qc,NDe),e(fe,DDe),e(fe,Hc),e(Hc,FX),e(FX,GDe),e(Hc,ODe),e(Hc,A8),e(A8,qDe),e(Hc,zDe),e(fe,XDe),e(fe,Uc),e(Uc,MX),e(MX,WDe),e(Uc,VDe),e(Uc,x8),e(x8,QDe),e(Uc,HDe),e(fe,UDe),e(fe,Jc),e(Jc,EX),e(EX,JDe),e(Jc,KDe),e(Jc,L8),e(L8,YDe),e(Jc,ZDe),e(we,eGe),g(Kc,we,null),e(we,oGe),e(we,CX),e(CX,tGe),e(we,rGe),g(rM,we,null),v(d,BEe,_),v(d,ai,_),e(ai,Yc),e(Yc,yX),g(aM,yX,null),e(ai,aGe),e(ai,wX),e(wX,nGe),v(d,kEe,_),v(d,Or,_),g(nM,Or,null),e(Or,sGe),e(Or,sM),e(sM,lGe),e(sM,B8),e(B8,iGe),e(sM,dGe),e(Or,mGe),e(Or,lM),e(lM,fGe),e(lM,AX),e(AX,cGe),e(lM,gGe),e(Or,hGe),e(Or,Ae),g(iM,Ae,null),e(Ae,uGe),e(Ae,xX),e(xX,pGe),e(Ae,_Ge),e(Ae,ni),e(ni,vGe),e(ni,LX),e(LX,bGe),e(ni,TGe),e(ni,BX),e(BX,FGe),e(ni,MGe),e(Ae,EGe),e(Ae,Je),e(Je,Zc),e(Zc,kX),e(kX,CGe),e(Zc,yGe),e(Zc,k8),e(k8,wGe),e(Zc,AGe),e(Je,xGe),e(Je,eg),e(eg,RX),e(RX,LGe),e(eg,BGe),e(eg,R8),e(R8,kGe),e(eg,RGe),e(Je,SGe),e(Je,og),e(og,SX),e(SX,PGe),e(og,$Ge),e(og,S8),e(S8,IGe),e(og,jGe),e(Je,NGe),e(Je,tg),e(tg,PX),e(PX,DGe),e(tg,GGe),e(tg,P8),e(P8,OGe),e(tg,qGe),e(Je,zGe),e(Je,rg),e(rg,$X),e($X,XGe),e(rg,WGe),e(rg,$8),e($8,VGe),e(rg,QGe),e(Je,HGe),e(Je,ag),e(ag,IX),e(IX,UGe),e(ag,JGe),e(ag,I8),e(I8,KGe),e(ag,YGe),e(Je,ZGe),e(Je,ng),e(ng,jX),e(jX,eOe),e(ng,oOe),e(ng,j8),e(j8,tOe),e(ng,rOe),e(Ae,aOe),g(sg,Ae,null),e(Ae,nOe),e(Ae,NX),e(NX,sOe),e(Ae,lOe),g(dM,Ae,null),v(d,REe,_),v(d,si,_),e(si,lg),e(lg,DX),g(mM,DX,null),e(si,iOe),e(si,GX),e(GX,dOe),v(d,SEe,_),v(d,Po,_),g(fM,Po,null),e(Po,mOe),e(Po,li),e(li,fOe),e(li,OX),e(OX,cOe),e(li,gOe),e(li,qX),e(qX,hOe),e(li,uOe),e(Po,pOe),e(Po,cM),e(cM,_Oe),e(cM,zX),e(zX,vOe),e(cM,bOe),e(Po,TOe),e(Po,wt),g(gM,wt,null),e(wt,FOe),e(wt,XX),e(XX,MOe),e(wt,EOe),e(wt,ii),e(ii,COe),e(ii,WX),e(WX,yOe),e(ii,wOe),e(ii,VX),e(VX,AOe),e(ii,xOe),e(wt,LOe),e(wt,QX),e(QX,BOe),e(wt,kOe),g(hM,wt,null),e(Po,ROe),e(Po,xe),g(uM,xe,null),e(xe,SOe),e(xe,HX),e(HX,POe),e(xe,$Oe),e(xe,Ea),e(Ea,IOe),e(Ea,UX),e(UX,jOe),e(Ea,NOe),e(Ea,JX),e(JX,DOe),e(Ea,GOe),e(Ea,KX),e(KX,OOe),e(Ea,qOe),e(xe,zOe),e(xe,F),e(F,ig),e(ig,YX),e(YX,XOe),e(ig,WOe),e(ig,N8),e(N8,VOe),e(ig,QOe),e(F,HOe),e(F,dg),e(dg,ZX),e(ZX,UOe),e(dg,JOe),e(dg,D8),e(D8,KOe),e(dg,YOe),e(F,ZOe),e(F,mg),e(mg,eW),e(eW,eqe),e(mg,oqe),e(mg,G8),e(G8,tqe),e(mg,rqe),e(F,aqe),e(F,fg),e(fg,oW),e(oW,nqe),e(fg,sqe),e(fg,O8),e(O8,lqe),e(fg,iqe),e(F,dqe),e(F,cg),e(cg,tW),e(tW,mqe),e(cg,fqe),e(cg,q8),e(q8,cqe),e(cg,gqe),e(F,hqe),e(F,gg),e(gg,rW),e(rW,uqe),e(gg,pqe),e(gg,z8),e(z8,_qe),e(gg,vqe),e(F,bqe),e(F,hg),e(hg,aW),e(aW,Tqe),e(hg,Fqe),e(hg,X8),e(X8,Mqe),e(hg,Eqe),e(F,Cqe),e(F,ug),e(ug,nW),e(nW,yqe),e(ug,wqe),e(ug,W8),e(W8,Aqe),e(ug,xqe),e(F,Lqe),e(F,pg),e(pg,sW),e(sW,Bqe),e(pg,kqe),e(pg,V8),e(V8,Rqe),e(pg,Sqe),e(F,Pqe),e(F,_g),e(_g,lW),e(lW,$qe),e(_g,Iqe),e(_g,Q8),e(Q8,jqe),e(_g,Nqe),e(F,Dqe),e(F,vg),e(vg,iW),e(iW,Gqe),e(vg,Oqe),e(vg,H8),e(H8,qqe),e(vg,zqe),e(F,Xqe),e(F,bg),e(bg,dW),e(dW,Wqe),e(bg,Vqe),e(bg,U8),e(U8,Qqe),e(bg,Hqe),e(F,Uqe),e(F,Tg),e(Tg,mW),e(mW,Jqe),e(Tg,Kqe),e(Tg,J8),e(J8,Yqe),e(Tg,Zqe),e(F,eze),e(F,Fg),e(Fg,fW),e(fW,oze),e(Fg,tze),e(Fg,K8),e(K8,rze),e(Fg,aze),e(F,nze),e(F,Mg),e(Mg,cW),e(cW,sze),e(Mg,lze),e(Mg,Y8),e(Y8,ize),e(Mg,dze),e(F,mze),e(F,Eg),e(Eg,gW),e(gW,fze),e(Eg,cze),e(Eg,Z8),e(Z8,gze),e(Eg,hze),e(F,uze),e(F,Cg),e(Cg,hW),e(hW,pze),e(Cg,_ze),e(Cg,eL),e(eL,vze),e(Cg,bze),e(F,Tze),e(F,yg),e(yg,uW),e(uW,Fze),e(yg,Mze),e(yg,oL),e(oL,Eze),e(yg,Cze),e(F,yze),e(F,wg),e(wg,pW),e(pW,wze),e(wg,Aze),e(wg,tL),e(tL,xze),e(wg,Lze),e(F,Bze),e(F,Ag),e(Ag,_W),e(_W,kze),e(Ag,Rze),e(Ag,rL),e(rL,Sze),e(Ag,Pze),e(F,$ze),e(F,xg),e(xg,vW),e(vW,Ize),e(xg,jze),e(xg,aL),e(aL,Nze),e(xg,Dze),e(F,Gze),e(F,Lg),e(Lg,bW),e(bW,Oze),e(Lg,qze),e(Lg,nL),e(nL,zze),e(Lg,Xze),e(F,Wze),e(F,Bg),e(Bg,TW),e(TW,Vze),e(Bg,Qze),e(Bg,sL),e(sL,Hze),e(Bg,Uze),e(F,Jze),e(F,kg),e(kg,FW),e(FW,Kze),e(kg,Yze),e(kg,lL),e(lL,Zze),e(kg,eXe),e(F,oXe),e(F,ls),e(ls,MW),e(MW,tXe),e(ls,rXe),e(ls,iL),e(iL,aXe),e(ls,nXe),e(ls,dL),e(dL,sXe),e(ls,lXe),e(F,iXe),e(F,Rg),e(Rg,EW),e(EW,dXe),e(Rg,mXe),e(Rg,mL),e(mL,fXe),e(Rg,cXe),e(F,gXe),e(F,Sg),e(Sg,CW),e(CW,hXe),e(Sg,uXe),e(Sg,fL),e(fL,pXe),e(Sg,_Xe),e(F,vXe),e(F,Pg),e(Pg,yW),e(yW,bXe),e(Pg,TXe),e(Pg,cL),e(cL,FXe),e(Pg,MXe),e(F,EXe),e(F,$g),e($g,wW),e(wW,CXe),e($g,yXe),e($g,gL),e(gL,wXe),e($g,AXe),e(F,xXe),e(F,Ig),e(Ig,AW),e(AW,LXe),e(Ig,BXe),e(Ig,hL),e(hL,kXe),e(Ig,RXe),e(F,SXe),e(F,jg),e(jg,xW),e(xW,PXe),e(jg,$Xe),e(jg,uL),e(uL,IXe),e(jg,jXe),e(F,NXe),e(F,Ng),e(Ng,LW),e(LW,DXe),e(Ng,GXe),e(Ng,pL),e(pL,OXe),e(Ng,qXe),e(F,zXe),e(F,Dg),e(Dg,BW),e(BW,XXe),e(Dg,WXe),e(Dg,_L),e(_L,VXe),e(Dg,QXe),e(F,HXe),e(F,Gg),e(Gg,kW),e(kW,UXe),e(Gg,JXe),e(Gg,vL),e(vL,KXe),e(Gg,YXe),e(F,ZXe),e(F,Og),e(Og,RW),e(RW,eWe),e(Og,oWe),e(Og,bL),e(bL,tWe),e(Og,rWe),e(F,aWe),e(F,qg),e(qg,SW),e(SW,nWe),e(qg,sWe),e(qg,TL),e(TL,lWe),e(qg,iWe),e(F,dWe),e(F,zg),e(zg,PW),e(PW,mWe),e(zg,fWe),e(zg,FL),e(FL,cWe),e(zg,gWe),e(F,hWe),e(F,Xg),e(Xg,$W),e($W,uWe),e(Xg,pWe),e(Xg,ML),e(ML,_We),e(Xg,vWe),e(F,bWe),e(F,Wg),e(Wg,IW),e(IW,TWe),e(Wg,FWe),e(Wg,EL),e(EL,MWe),e(Wg,EWe),e(F,CWe),e(F,Vg),e(Vg,jW),e(jW,yWe),e(Vg,wWe),e(Vg,CL),e(CL,AWe),e(Vg,xWe),e(F,LWe),e(F,Qg),e(Qg,NW),e(NW,BWe),e(Qg,kWe),e(Qg,yL),e(yL,RWe),e(Qg,SWe),e(F,PWe),e(F,Hg),e(Hg,DW),e(DW,$We),e(Hg,IWe),e(Hg,wL),e(wL,jWe),e(Hg,NWe),e(F,DWe),e(F,Ug),e(Ug,GW),e(GW,GWe),e(Ug,OWe),e(Ug,AL),e(AL,qWe),e(Ug,zWe),e(F,XWe),e(F,Jg),e(Jg,OW),e(OW,WWe),e(Jg,VWe),e(Jg,xL),e(xL,QWe),e(Jg,HWe),e(F,UWe),e(F,Kg),e(Kg,qW),e(qW,JWe),e(Kg,KWe),e(Kg,LL),e(LL,YWe),e(Kg,ZWe),e(F,eVe),e(F,Yg),e(Yg,zW),e(zW,oVe),e(Yg,tVe),e(Yg,BL),e(BL,rVe),e(Yg,aVe),e(F,nVe),e(F,Zg),e(Zg,XW),e(XW,sVe),e(Zg,lVe),e(Zg,kL),e(kL,iVe),e(Zg,dVe),e(F,mVe),e(F,eh),e(eh,WW),e(WW,fVe),e(eh,cVe),e(eh,RL),e(RL,gVe),e(eh,hVe),e(F,uVe),e(F,oh),e(oh,VW),e(VW,pVe),e(oh,_Ve),e(oh,SL),e(SL,vVe),e(oh,bVe),e(F,TVe),e(F,th),e(th,QW),e(QW,FVe),e(th,MVe),e(th,PL),e(PL,EVe),e(th,CVe),e(F,yVe),e(F,rh),e(rh,HW),e(HW,wVe),e(rh,AVe),e(rh,$L),e($L,xVe),e(rh,LVe),e(F,BVe),e(F,ah),e(ah,UW),e(UW,kVe),e(ah,RVe),e(ah,IL),e(IL,SVe),e(ah,PVe),e(F,$Ve),e(F,nh),e(nh,JW),e(JW,IVe),e(nh,jVe),e(nh,jL),e(jL,NVe),e(nh,DVe),e(F,GVe),e(F,sh),e(sh,KW),e(KW,OVe),e(sh,qVe),e(sh,NL),e(NL,zVe),e(sh,XVe),e(F,WVe),e(F,lh),e(lh,YW),e(YW,VVe),e(lh,QVe),e(lh,DL),e(DL,HVe),e(lh,UVe),e(F,JVe),e(F,ih),e(ih,ZW),e(ZW,KVe),e(ih,YVe),e(ih,GL),e(GL,ZVe),e(ih,eQe),e(F,oQe),e(F,dh),e(dh,eV),e(eV,tQe),e(dh,rQe),e(dh,OL),e(OL,aQe),e(dh,nQe),e(F,sQe),e(F,mh),e(mh,oV),e(oV,lQe),e(mh,iQe),e(mh,qL),e(qL,dQe),e(mh,mQe),e(F,fQe),e(F,fh),e(fh,tV),e(tV,cQe),e(fh,gQe),e(fh,zL),e(zL,hQe),e(fh,uQe),e(F,pQe),e(F,ch),e(ch,rV),e(rV,_Qe),e(ch,vQe),e(ch,XL),e(XL,bQe),e(ch,TQe),e(F,FQe),e(F,gh),e(gh,aV),e(aV,MQe),e(gh,EQe),e(gh,WL),e(WL,CQe),e(gh,yQe),e(F,wQe),e(F,hh),e(hh,nV),e(nV,AQe),e(hh,xQe),e(hh,VL),e(VL,LQe),e(hh,BQe),e(F,kQe),e(F,uh),e(uh,sV),e(sV,RQe),e(uh,SQe),e(uh,QL),e(QL,PQe),e(uh,$Qe),e(F,IQe),e(F,ph),e(ph,lV),e(lV,jQe),e(ph,NQe),e(ph,HL),e(HL,DQe),e(ph,GQe),e(F,OQe),e(F,_h),e(_h,iV),e(iV,qQe),e(_h,zQe),e(_h,UL),e(UL,XQe),e(_h,WQe),e(F,VQe),e(F,vh),e(vh,dV),e(dV,QQe),e(vh,HQe),e(vh,JL),e(JL,UQe),e(vh,JQe),e(F,KQe),e(F,bh),e(bh,mV),e(mV,YQe),e(bh,ZQe),e(bh,KL),e(KL,eHe),e(bh,oHe),e(F,tHe),e(F,Th),e(Th,fV),e(fV,rHe),e(Th,aHe),e(Th,YL),e(YL,nHe),e(Th,sHe),e(F,lHe),e(F,Fh),e(Fh,cV),e(cV,iHe),e(Fh,dHe),e(Fh,ZL),e(ZL,mHe),e(Fh,fHe),e(F,cHe),e(F,Mh),e(Mh,gV),e(gV,gHe),e(Mh,hHe),e(Mh,eB),e(eB,uHe),e(Mh,pHe),e(F,_He),e(F,Eh),e(Eh,hV),e(hV,vHe),e(Eh,bHe),e(Eh,oB),e(oB,THe),e(Eh,FHe),e(F,MHe),e(F,Ch),e(Ch,uV),e(uV,EHe),e(Ch,CHe),e(Ch,tB),e(tB,yHe),e(Ch,wHe),e(F,AHe),e(F,yh),e(yh,pV),e(pV,xHe),e(yh,LHe),e(yh,rB),e(rB,BHe),e(yh,kHe),e(F,RHe),e(F,wh),e(wh,_V),e(_V,SHe),e(wh,PHe),e(wh,aB),e(aB,$He),e(wh,IHe),e(xe,jHe),e(xe,Ah),e(Ah,NHe),e(Ah,vV),e(vV,DHe),e(Ah,GHe),e(Ah,bV),e(bV,OHe),e(xe,qHe),e(xe,TV),e(TV,zHe),e(xe,XHe),g(pM,xe,null),v(d,PEe,_),v(d,di,_),e(di,xh),e(xh,FV),g(_M,FV,null),e(di,WHe),e(di,MV),e(MV,VHe),v(d,$Ee,_),v(d,$o,_),g(vM,$o,null),e($o,QHe),e($o,mi),e(mi,HHe),e(mi,EV),e(EV,UHe),e(mi,JHe),e(mi,CV),e(CV,KHe),e(mi,YHe),e($o,ZHe),e($o,bM),e(bM,eUe),e(bM,yV),e(yV,oUe),e(bM,tUe),e($o,rUe),e($o,At),g(TM,At,null),e(At,aUe),e(At,wV),e(wV,nUe),e(At,sUe),e(At,fi),e(fi,lUe),e(fi,AV),e(AV,iUe),e(fi,dUe),e(fi,xV),e(xV,mUe),e(fi,fUe),e(At,cUe),e(At,LV),e(LV,gUe),e(At,hUe),g(FM,At,null),e($o,uUe),e($o,Le),g(MM,Le,null),e(Le,pUe),e(Le,BV),e(BV,_Ue),e(Le,vUe),e(Le,Ca),e(Ca,bUe),e(Ca,kV),e(kV,TUe),e(Ca,FUe),e(Ca,RV),e(RV,MUe),e(Ca,EUe),e(Ca,SV),e(SV,CUe),e(Ca,yUe),e(Le,wUe),e(Le,k),e(k,Lh),e(Lh,PV),e(PV,AUe),e(Lh,xUe),e(Lh,nB),e(nB,LUe),e(Lh,BUe),e(k,kUe),e(k,Bh),e(Bh,$V),e($V,RUe),e(Bh,SUe),e(Bh,sB),e(sB,PUe),e(Bh,$Ue),e(k,IUe),e(k,kh),e(kh,IV),e(IV,jUe),e(kh,NUe),e(kh,lB),e(lB,DUe),e(kh,GUe),e(k,OUe),e(k,Rh),e(Rh,jV),e(jV,qUe),e(Rh,zUe),e(Rh,iB),e(iB,XUe),e(Rh,WUe),e(k,VUe),e(k,Sh),e(Sh,NV),e(NV,QUe),e(Sh,HUe),e(Sh,dB),e(dB,UUe),e(Sh,JUe),e(k,KUe),e(k,Ph),e(Ph,DV),e(DV,YUe),e(Ph,ZUe),e(Ph,mB),e(mB,eJe),e(Ph,oJe),e(k,tJe),e(k,$h),e($h,GV),e(GV,rJe),e($h,aJe),e($h,fB),e(fB,nJe),e($h,sJe),e(k,lJe),e(k,Ih),e(Ih,OV),e(OV,iJe),e(Ih,dJe),e(Ih,cB),e(cB,mJe),e(Ih,fJe),e(k,cJe),e(k,jh),e(jh,qV),e(qV,gJe),e(jh,hJe),e(jh,gB),e(gB,uJe),e(jh,pJe),e(k,_Je),e(k,Nh),e(Nh,zV),e(zV,vJe),e(Nh,bJe),e(Nh,hB),e(hB,TJe),e(Nh,FJe),e(k,MJe),e(k,Dh),e(Dh,XV),e(XV,EJe),e(Dh,CJe),e(Dh,uB),e(uB,yJe),e(Dh,wJe),e(k,AJe),e(k,Gh),e(Gh,WV),e(WV,xJe),e(Gh,LJe),e(Gh,pB),e(pB,BJe),e(Gh,kJe),e(k,RJe),e(k,Oh),e(Oh,VV),e(VV,SJe),e(Oh,PJe),e(Oh,_B),e(_B,$Je),e(Oh,IJe),e(k,jJe),e(k,qh),e(qh,QV),e(QV,NJe),e(qh,DJe),e(qh,vB),e(vB,GJe),e(qh,OJe),e(k,qJe),e(k,zh),e(zh,HV),e(HV,zJe),e(zh,XJe),e(zh,bB),e(bB,WJe),e(zh,VJe),e(k,QJe),e(k,Xh),e(Xh,UV),e(UV,HJe),e(Xh,UJe),e(Xh,TB),e(TB,JJe),e(Xh,KJe),e(k,YJe),e(k,Wh),e(Wh,JV),e(JV,ZJe),e(Wh,eKe),e(Wh,FB),e(FB,oKe),e(Wh,tKe),e(k,rKe),e(k,Vh),e(Vh,KV),e(KV,aKe),e(Vh,nKe),e(Vh,MB),e(MB,sKe),e(Vh,lKe),e(k,iKe),e(k,Qh),e(Qh,YV),e(YV,dKe),e(Qh,mKe),e(Qh,EB),e(EB,fKe),e(Qh,cKe),e(k,gKe),e(k,Hh),e(Hh,ZV),e(ZV,hKe),e(Hh,uKe),e(Hh,CB),e(CB,pKe),e(Hh,_Ke),e(k,vKe),e(k,Uh),e(Uh,eQ),e(eQ,bKe),e(Uh,TKe),e(Uh,yB),e(yB,FKe),e(Uh,MKe),e(k,EKe),e(k,Jh),e(Jh,oQ),e(oQ,CKe),e(Jh,yKe),e(Jh,wB),e(wB,wKe),e(Jh,AKe),e(k,xKe),e(k,Kh),e(Kh,tQ),e(tQ,LKe),e(Kh,BKe),e(Kh,AB),e(AB,kKe),e(Kh,RKe),e(k,SKe),e(k,Yh),e(Yh,rQ),e(rQ,PKe),e(Yh,$Ke),e(Yh,xB),e(xB,IKe),e(Yh,jKe),e(k,NKe),e(k,Zh),e(Zh,aQ),e(aQ,DKe),e(Zh,GKe),e(Zh,LB),e(LB,OKe),e(Zh,qKe),e(k,zKe),e(k,eu),e(eu,nQ),e(nQ,XKe),e(eu,WKe),e(eu,BB),e(BB,VKe),e(eu,QKe),e(k,HKe),e(k,ou),e(ou,sQ),e(sQ,UKe),e(ou,JKe),e(ou,kB),e(kB,KKe),e(ou,YKe),e(k,ZKe),e(k,tu),e(tu,lQ),e(lQ,eYe),e(tu,oYe),e(tu,RB),e(RB,tYe),e(tu,rYe),e(k,aYe),e(k,ru),e(ru,iQ),e(iQ,nYe),e(ru,sYe),e(ru,SB),e(SB,lYe),e(ru,iYe),e(k,dYe),e(k,au),e(au,dQ),e(dQ,mYe),e(au,fYe),e(au,PB),e(PB,cYe),e(au,gYe),e(k,hYe),e(k,nu),e(nu,mQ),e(mQ,uYe),e(nu,pYe),e(nu,$B),e($B,_Ye),e(nu,vYe),e(k,bYe),e(k,su),e(su,fQ),e(fQ,TYe),e(su,FYe),e(su,IB),e(IB,MYe),e(su,EYe),e(k,CYe),e(k,lu),e(lu,cQ),e(cQ,yYe),e(lu,wYe),e(lu,jB),e(jB,AYe),e(lu,xYe),e(k,LYe),e(k,iu),e(iu,gQ),e(gQ,BYe),e(iu,kYe),e(iu,NB),e(NB,RYe),e(iu,SYe),e(k,PYe),e(k,du),e(du,hQ),e(hQ,$Ye),e(du,IYe),e(du,DB),e(DB,jYe),e(du,NYe),e(k,DYe),e(k,mu),e(mu,uQ),e(uQ,GYe),e(mu,OYe),e(mu,GB),e(GB,qYe),e(mu,zYe),e(Le,XYe),e(Le,fu),e(fu,WYe),e(fu,pQ),e(pQ,VYe),e(fu,QYe),e(fu,_Q),e(_Q,HYe),e(Le,UYe),e(Le,vQ),e(vQ,JYe),e(Le,KYe),g(EM,Le,null),v(d,IEe,_),v(d,ci,_),e(ci,cu),e(cu,bQ),g(CM,bQ,null),e(ci,YYe),e(ci,TQ),e(TQ,ZYe),v(d,jEe,_),v(d,Io,_),g(yM,Io,null),e(Io,eZe),e(Io,gi),e(gi,oZe),e(gi,FQ),e(FQ,tZe),e(gi,rZe),e(gi,MQ),e(MQ,aZe),e(gi,nZe),e(Io,sZe),e(Io,wM),e(wM,lZe),e(wM,EQ),e(EQ,iZe),e(wM,dZe),e(Io,mZe),e(Io,xt),g(AM,xt,null),e(xt,fZe),e(xt,CQ),e(CQ,cZe),e(xt,gZe),e(xt,hi),e(hi,hZe),e(hi,yQ),e(yQ,uZe),e(hi,pZe),e(hi,wQ),e(wQ,_Ze),e(hi,vZe),e(xt,bZe),e(xt,AQ),e(AQ,TZe),e(xt,FZe),g(xM,xt,null),e(Io,MZe),e(Io,Be),g(LM,Be,null),e(Be,EZe),e(Be,xQ),e(xQ,CZe),e(Be,yZe),e(Be,ya),e(ya,wZe),e(ya,LQ),e(LQ,AZe),e(ya,xZe),e(ya,BQ),e(BQ,LZe),e(ya,BZe),e(ya,kQ),e(kQ,kZe),e(ya,RZe),e(Be,SZe),e(Be,I),e(I,gu),e(gu,RQ),e(RQ,PZe),e(gu,$Ze),e(gu,OB),e(OB,IZe),e(gu,jZe),e(I,NZe),e(I,hu),e(hu,SQ),e(SQ,DZe),e(hu,GZe),e(hu,qB),e(qB,OZe),e(hu,qZe),e(I,zZe),e(I,uu),e(uu,PQ),e(PQ,XZe),e(uu,WZe),e(uu,zB),e(zB,VZe),e(uu,QZe),e(I,HZe),e(I,pu),e(pu,$Q),e($Q,UZe),e(pu,JZe),e(pu,XB),e(XB,KZe),e(pu,YZe),e(I,ZZe),e(I,_u),e(_u,IQ),e(IQ,eeo),e(_u,oeo),e(_u,WB),e(WB,teo),e(_u,reo),e(I,aeo),e(I,vu),e(vu,jQ),e(jQ,neo),e(vu,seo),e(vu,VB),e(VB,leo),e(vu,ieo),e(I,deo),e(I,bu),e(bu,NQ),e(NQ,meo),e(bu,feo),e(bu,QB),e(QB,ceo),e(bu,geo),e(I,heo),e(I,Tu),e(Tu,DQ),e(DQ,ueo),e(Tu,peo),e(Tu,HB),e(HB,_eo),e(Tu,veo),e(I,beo),e(I,Fu),e(Fu,GQ),e(GQ,Teo),e(Fu,Feo),e(Fu,UB),e(UB,Meo),e(Fu,Eeo),e(I,Ceo),e(I,Mu),e(Mu,OQ),e(OQ,yeo),e(Mu,weo),e(Mu,JB),e(JB,Aeo),e(Mu,xeo),e(I,Leo),e(I,Eu),e(Eu,qQ),e(qQ,Beo),e(Eu,keo),e(Eu,KB),e(KB,Reo),e(Eu,Seo),e(I,Peo),e(I,Cu),e(Cu,zQ),e(zQ,$eo),e(Cu,Ieo),e(Cu,YB),e(YB,jeo),e(Cu,Neo),e(I,Deo),e(I,yu),e(yu,XQ),e(XQ,Geo),e(yu,Oeo),e(yu,ZB),e(ZB,qeo),e(yu,zeo),e(I,Xeo),e(I,wu),e(wu,WQ),e(WQ,Weo),e(wu,Veo),e(wu,e9),e(e9,Qeo),e(wu,Heo),e(I,Ueo),e(I,Au),e(Au,VQ),e(VQ,Jeo),e(Au,Keo),e(Au,o9),e(o9,Yeo),e(Au,Zeo),e(I,eoo),e(I,xu),e(xu,QQ),e(QQ,ooo),e(xu,too),e(xu,t9),e(t9,roo),e(xu,aoo),e(I,noo),e(I,Lu),e(Lu,HQ),e(HQ,soo),e(Lu,loo),e(Lu,r9),e(r9,ioo),e(Lu,doo),e(I,moo),e(I,Bu),e(Bu,UQ),e(UQ,foo),e(Bu,coo),e(Bu,a9),e(a9,goo),e(Bu,hoo),e(I,uoo),e(I,ku),e(ku,JQ),e(JQ,poo),e(ku,_oo),e(ku,n9),e(n9,voo),e(ku,boo),e(I,Too),e(I,Ru),e(Ru,KQ),e(KQ,Foo),e(Ru,Moo),e(Ru,s9),e(s9,Eoo),e(Ru,Coo),e(I,yoo),e(I,Su),e(Su,YQ),e(YQ,woo),e(Su,Aoo),e(Su,l9),e(l9,xoo),e(Su,Loo),e(I,Boo),e(I,Pu),e(Pu,ZQ),e(ZQ,koo),e(Pu,Roo),e(Pu,i9),e(i9,Soo),e(Pu,Poo),e(I,$oo),e(I,$u),e($u,eH),e(eH,Ioo),e($u,joo),e($u,d9),e(d9,Noo),e($u,Doo),e(I,Goo),e(I,Iu),e(Iu,oH),e(oH,Ooo),e(Iu,qoo),e(Iu,m9),e(m9,zoo),e(Iu,Xoo),e(I,Woo),e(I,ju),e(ju,tH),e(tH,Voo),e(ju,Qoo),e(ju,f9),e(f9,Hoo),e(ju,Uoo),e(I,Joo),e(I,Nu),e(Nu,rH),e(rH,Koo),e(Nu,Yoo),e(Nu,c9),e(c9,Zoo),e(Nu,eto),e(I,oto),e(I,Du),e(Du,aH),e(aH,tto),e(Du,rto),e(Du,g9),e(g9,ato),e(Du,nto),e(I,sto),e(I,Gu),e(Gu,nH),e(nH,lto),e(Gu,ito),e(Gu,h9),e(h9,dto),e(Gu,mto),e(I,fto),e(I,Ou),e(Ou,sH),e(sH,cto),e(Ou,gto),e(Ou,u9),e(u9,hto),e(Ou,uto),e(I,pto),e(I,qu),e(qu,lH),e(lH,_to),e(qu,vto),e(qu,p9),e(p9,bto),e(qu,Tto),e(Be,Fto),e(Be,zu),e(zu,Mto),e(zu,iH),e(iH,Eto),e(zu,Cto),e(zu,dH),e(dH,yto),e(Be,wto),e(Be,mH),e(mH,Ato),e(Be,xto),g(BM,Be,null),v(d,NEe,_),v(d,ui,_),e(ui,Xu),e(Xu,fH),g(kM,fH,null),e(ui,Lto),e(ui,cH),e(cH,Bto),v(d,DEe,_),v(d,jo,_),g(RM,jo,null),e(jo,kto),e(jo,pi),e(pi,Rto),e(pi,gH),e(gH,Sto),e(pi,Pto),e(pi,hH),e(hH,$to),e(pi,Ito),e(jo,jto),e(jo,SM),e(SM,Nto),e(SM,uH),e(uH,Dto),e(SM,Gto),e(jo,Oto),e(jo,Lt),g(PM,Lt,null),e(Lt,qto),e(Lt,pH),e(pH,zto),e(Lt,Xto),e(Lt,_i),e(_i,Wto),e(_i,_H),e(_H,Vto),e(_i,Qto),e(_i,vH),e(vH,Hto),e(_i,Uto),e(Lt,Jto),e(Lt,bH),e(bH,Kto),e(Lt,Yto),g($M,Lt,null),e(jo,Zto),e(jo,ke),g(IM,ke,null),e(ke,ero),e(ke,TH),e(TH,oro),e(ke,tro),e(ke,wa),e(wa,rro),e(wa,FH),e(FH,aro),e(wa,nro),e(wa,MH),e(MH,sro),e(wa,lro),e(wa,EH),e(EH,iro),e(wa,dro),e(ke,mro),e(ke,$),e($,Wu),e(Wu,CH),e(CH,fro),e(Wu,cro),e(Wu,_9),e(_9,gro),e(Wu,hro),e($,uro),e($,Vu),e(Vu,yH),e(yH,pro),e(Vu,_ro),e(Vu,v9),e(v9,vro),e(Vu,bro),e($,Tro),e($,Qu),e(Qu,wH),e(wH,Fro),e(Qu,Mro),e(Qu,b9),e(b9,Ero),e(Qu,Cro),e($,yro),e($,Hu),e(Hu,AH),e(AH,wro),e(Hu,Aro),e(Hu,T9),e(T9,xro),e(Hu,Lro),e($,Bro),e($,Uu),e(Uu,xH),e(xH,kro),e(Uu,Rro),e(Uu,F9),e(F9,Sro),e(Uu,Pro),e($,$ro),e($,Ju),e(Ju,LH),e(LH,Iro),e(Ju,jro),e(Ju,M9),e(M9,Nro),e(Ju,Dro),e($,Gro),e($,Ku),e(Ku,BH),e(BH,Oro),e(Ku,qro),e(Ku,E9),e(E9,zro),e(Ku,Xro),e($,Wro),e($,Yu),e(Yu,kH),e(kH,Vro),e(Yu,Qro),e(Yu,C9),e(C9,Hro),e(Yu,Uro),e($,Jro),e($,Zu),e(Zu,RH),e(RH,Kro),e(Zu,Yro),e(Zu,y9),e(y9,Zro),e(Zu,eao),e($,oao),e($,ep),e(ep,SH),e(SH,tao),e(ep,rao),e(ep,w9),e(w9,aao),e(ep,nao),e($,sao),e($,op),e(op,PH),e(PH,lao),e(op,iao),e(op,A9),e(A9,dao),e(op,mao),e($,fao),e($,tp),e(tp,$H),e($H,cao),e(tp,gao),e(tp,x9),e(x9,hao),e(tp,uao),e($,pao),e($,rp),e(rp,IH),e(IH,_ao),e(rp,vao),e(rp,L9),e(L9,bao),e(rp,Tao),e($,Fao),e($,ap),e(ap,jH),e(jH,Mao),e(ap,Eao),e(ap,B9),e(B9,Cao),e(ap,yao),e($,wao),e($,np),e(np,NH),e(NH,Aao),e(np,xao),e(np,k9),e(k9,Lao),e(np,Bao),e($,kao),e($,sp),e(sp,DH),e(DH,Rao),e(sp,Sao),e(sp,R9),e(R9,Pao),e(sp,$ao),e($,Iao),e($,lp),e(lp,GH),e(GH,jao),e(lp,Nao),e(lp,S9),e(S9,Dao),e(lp,Gao),e($,Oao),e($,ip),e(ip,OH),e(OH,qao),e(ip,zao),e(ip,P9),e(P9,Xao),e(ip,Wao),e($,Vao),e($,dp),e(dp,qH),e(qH,Qao),e(dp,Hao),e(dp,$9),e($9,Uao),e(dp,Jao),e($,Kao),e($,mp),e(mp,zH),e(zH,Yao),e(mp,Zao),e(mp,I9),e(I9,eno),e(mp,ono),e($,tno),e($,fp),e(fp,XH),e(XH,rno),e(fp,ano),e(fp,j9),e(j9,nno),e(fp,sno),e($,lno),e($,cp),e(cp,WH),e(WH,ino),e(cp,dno),e(cp,N9),e(N9,mno),e(cp,fno),e($,cno),e($,gp),e(gp,VH),e(VH,gno),e(gp,hno),e(gp,D9),e(D9,uno),e(gp,pno),e($,_no),e($,hp),e(hp,QH),e(QH,vno),e(hp,bno),e(hp,G9),e(G9,Tno),e(hp,Fno),e($,Mno),e($,up),e(up,HH),e(HH,Eno),e(up,Cno),e(up,O9),e(O9,yno),e(up,wno),e($,Ano),e($,pp),e(pp,UH),e(UH,xno),e(pp,Lno),e(pp,q9),e(q9,Bno),e(pp,kno),e($,Rno),e($,_p),e(_p,JH),e(JH,Sno),e(_p,Pno),e(_p,z9),e(z9,$no),e(_p,Ino),e($,jno),e($,vp),e(vp,KH),e(KH,Nno),e(vp,Dno),e(vp,X9),e(X9,Gno),e(vp,Ono),e($,qno),e($,bp),e(bp,YH),e(YH,zno),e(bp,Xno),e(bp,ZH),e(ZH,Wno),e(bp,Vno),e($,Qno),e($,Tp),e(Tp,eU),e(eU,Hno),e(Tp,Uno),e(Tp,W9),e(W9,Jno),e(Tp,Kno),e($,Yno),e($,Fp),e(Fp,oU),e(oU,Zno),e(Fp,eso),e(Fp,V9),e(V9,oso),e(Fp,tso),e(ke,rso),e(ke,Mp),e(Mp,aso),e(Mp,tU),e(tU,nso),e(Mp,sso),e(Mp,rU),e(rU,lso),e(ke,iso),e(ke,aU),e(aU,dso),e(ke,mso),g(jM,ke,null),v(d,GEe,_),v(d,vi,_),e(vi,Ep),e(Ep,nU),g(NM,nU,null),e(vi,fso),e(vi,sU),e(sU,cso),v(d,OEe,_),v(d,No,_),g(DM,No,null),e(No,gso),e(No,bi),e(bi,hso),e(bi,lU),e(lU,uso),e(bi,pso),e(bi,iU),e(iU,_so),e(bi,vso),e(No,bso),e(No,GM),e(GM,Tso),e(GM,dU),e(dU,Fso),e(GM,Mso),e(No,Eso),e(No,Bt),g(OM,Bt,null),e(Bt,Cso),e(Bt,mU),e(mU,yso),e(Bt,wso),e(Bt,Ti),e(Ti,Aso),e(Ti,fU),e(fU,xso),e(Ti,Lso),e(Ti,cU),e(cU,Bso),e(Ti,kso),e(Bt,Rso),e(Bt,gU),e(gU,Sso),e(Bt,Pso),g(qM,Bt,null),e(No,$so),e(No,Re),g(zM,Re,null),e(Re,Iso),e(Re,hU),e(hU,jso),e(Re,Nso),e(Re,Aa),e(Aa,Dso),e(Aa,uU),e(uU,Gso),e(Aa,Oso),e(Aa,pU),e(pU,qso),e(Aa,zso),e(Aa,_U),e(_U,Xso),e(Aa,Wso),e(Re,Vso),e(Re,ne),e(ne,Cp),e(Cp,vU),e(vU,Qso),e(Cp,Hso),e(Cp,Q9),e(Q9,Uso),e(Cp,Jso),e(ne,Kso),e(ne,yp),e(yp,bU),e(bU,Yso),e(yp,Zso),e(yp,H9),e(H9,elo),e(yp,olo),e(ne,tlo),e(ne,wp),e(wp,TU),e(TU,rlo),e(wp,alo),e(wp,U9),e(U9,nlo),e(wp,slo),e(ne,llo),e(ne,Ap),e(Ap,FU),e(FU,ilo),e(Ap,dlo),e(Ap,J9),e(J9,mlo),e(Ap,flo),e(ne,clo),e(ne,xp),e(xp,MU),e(MU,glo),e(xp,hlo),e(xp,K9),e(K9,ulo),e(xp,plo),e(ne,_lo),e(ne,Lp),e(Lp,EU),e(EU,vlo),e(Lp,blo),e(Lp,Y9),e(Y9,Tlo),e(Lp,Flo),e(ne,Mlo),e(ne,Bp),e(Bp,CU),e(CU,Elo),e(Bp,Clo),e(Bp,Z9),e(Z9,ylo),e(Bp,wlo),e(ne,Alo),e(ne,kp),e(kp,yU),e(yU,xlo),e(kp,Llo),e(kp,ek),e(ek,Blo),e(kp,klo),e(ne,Rlo),e(ne,Rp),e(Rp,wU),e(wU,Slo),e(Rp,Plo),e(Rp,ok),e(ok,$lo),e(Rp,Ilo),e(ne,jlo),e(ne,Sp),e(Sp,AU),e(AU,Nlo),e(Sp,Dlo),e(Sp,tk),e(tk,Glo),e(Sp,Olo),e(ne,qlo),e(ne,Pp),e(Pp,xU),e(xU,zlo),e(Pp,Xlo),e(Pp,rk),e(rk,Wlo),e(Pp,Vlo),e(ne,Qlo),e(ne,$p),e($p,LU),e(LU,Hlo),e($p,Ulo),e($p,ak),e(ak,Jlo),e($p,Klo),e(ne,Ylo),e(ne,Ip),e(Ip,BU),e(BU,Zlo),e(Ip,eio),e(Ip,nk),e(nk,oio),e(Ip,tio),e(ne,rio),e(ne,jp),e(jp,kU),e(kU,aio),e(jp,nio),e(jp,sk),e(sk,sio),e(jp,lio),e(ne,iio),e(ne,Np),e(Np,RU),e(RU,dio),e(Np,mio),e(Np,lk),e(lk,fio),e(Np,cio),e(Re,gio),e(Re,Dp),e(Dp,hio),e(Dp,SU),e(SU,uio),e(Dp,pio),e(Dp,PU),e(PU,_io),e(Re,vio),e(Re,$U),e($U,bio),e(Re,Tio),g(XM,Re,null),v(d,qEe,_),v(d,Fi,_),e(Fi,Gp),e(Gp,IU),g(WM,IU,null),e(Fi,Fio),e(Fi,jU),e(jU,Mio),v(d,zEe,_),v(d,Do,_),g(VM,Do,null),e(Do,Eio),e(Do,Mi),e(Mi,Cio),e(Mi,NU),e(NU,yio),e(Mi,wio),e(Mi,DU),e(DU,Aio),e(Mi,xio),e(Do,Lio),e(Do,QM),e(QM,Bio),e(QM,GU),e(GU,kio),e(QM,Rio),e(Do,Sio),e(Do,kt),g(HM,kt,null),e(kt,Pio),e(kt,OU),e(OU,$io),e(kt,Iio),e(kt,Ei),e(Ei,jio),e(Ei,qU),e(qU,Nio),e(Ei,Dio),e(Ei,zU),e(zU,Gio),e(Ei,Oio),e(kt,qio),e(kt,XU),e(XU,zio),e(kt,Xio),g(UM,kt,null),e(Do,Wio),e(Do,Se),g(JM,Se,null),e(Se,Vio),e(Se,WU),e(WU,Qio),e(Se,Hio),e(Se,xa),e(xa,Uio),e(xa,VU),e(VU,Jio),e(xa,Kio),e(xa,QU),e(QU,Yio),e(xa,Zio),e(xa,HU),e(HU,edo),e(xa,odo),e(Se,tdo),e(Se,A),e(A,Op),e(Op,UU),e(UU,rdo),e(Op,ado),e(Op,ik),e(ik,ndo),e(Op,sdo),e(A,ldo),e(A,qp),e(qp,JU),e(JU,ido),e(qp,ddo),e(qp,dk),e(dk,mdo),e(qp,fdo),e(A,cdo),e(A,zp),e(zp,KU),e(KU,gdo),e(zp,hdo),e(zp,mk),e(mk,udo),e(zp,pdo),e(A,_do),e(A,Xp),e(Xp,YU),e(YU,vdo),e(Xp,bdo),e(Xp,fk),e(fk,Tdo),e(Xp,Fdo),e(A,Mdo),e(A,Wp),e(Wp,ZU),e(ZU,Edo),e(Wp,Cdo),e(Wp,ck),e(ck,ydo),e(Wp,wdo),e(A,Ado),e(A,Vp),e(Vp,eJ),e(eJ,xdo),e(Vp,Ldo),e(Vp,gk),e(gk,Bdo),e(Vp,kdo),e(A,Rdo),e(A,Qp),e(Qp,oJ),e(oJ,Sdo),e(Qp,Pdo),e(Qp,hk),e(hk,$do),e(Qp,Ido),e(A,jdo),e(A,Hp),e(Hp,tJ),e(tJ,Ndo),e(Hp,Ddo),e(Hp,uk),e(uk,Gdo),e(Hp,Odo),e(A,qdo),e(A,Up),e(Up,rJ),e(rJ,zdo),e(Up,Xdo),e(Up,pk),e(pk,Wdo),e(Up,Vdo),e(A,Qdo),e(A,Jp),e(Jp,aJ),e(aJ,Hdo),e(Jp,Udo),e(Jp,_k),e(_k,Jdo),e(Jp,Kdo),e(A,Ydo),e(A,Kp),e(Kp,nJ),e(nJ,Zdo),e(Kp,emo),e(Kp,vk),e(vk,omo),e(Kp,tmo),e(A,rmo),e(A,Yp),e(Yp,sJ),e(sJ,amo),e(Yp,nmo),e(Yp,bk),e(bk,smo),e(Yp,lmo),e(A,imo),e(A,Zp),e(Zp,lJ),e(lJ,dmo),e(Zp,mmo),e(Zp,Tk),e(Tk,fmo),e(Zp,cmo),e(A,gmo),e(A,e_),e(e_,iJ),e(iJ,hmo),e(e_,umo),e(e_,Fk),e(Fk,pmo),e(e_,_mo),e(A,vmo),e(A,o_),e(o_,dJ),e(dJ,bmo),e(o_,Tmo),e(o_,Mk),e(Mk,Fmo),e(o_,Mmo),e(A,Emo),e(A,t_),e(t_,mJ),e(mJ,Cmo),e(t_,ymo),e(t_,Ek),e(Ek,wmo),e(t_,Amo),e(A,xmo),e(A,r_),e(r_,fJ),e(fJ,Lmo),e(r_,Bmo),e(r_,Ck),e(Ck,kmo),e(r_,Rmo),e(A,Smo),e(A,a_),e(a_,cJ),e(cJ,Pmo),e(a_,$mo),e(a_,yk),e(yk,Imo),e(a_,jmo),e(A,Nmo),e(A,n_),e(n_,gJ),e(gJ,Dmo),e(n_,Gmo),e(n_,wk),e(wk,Omo),e(n_,qmo),e(A,zmo),e(A,s_),e(s_,hJ),e(hJ,Xmo),e(s_,Wmo),e(s_,Ak),e(Ak,Vmo),e(s_,Qmo),e(A,Hmo),e(A,l_),e(l_,uJ),e(uJ,Umo),e(l_,Jmo),e(l_,xk),e(xk,Kmo),e(l_,Ymo),e(A,Zmo),e(A,i_),e(i_,pJ),e(pJ,efo),e(i_,ofo),e(i_,Lk),e(Lk,tfo),e(i_,rfo),e(A,afo),e(A,d_),e(d_,_J),e(_J,nfo),e(d_,sfo),e(d_,Bk),e(Bk,lfo),e(d_,ifo),e(A,dfo),e(A,m_),e(m_,vJ),e(vJ,mfo),e(m_,ffo),e(m_,kk),e(kk,cfo),e(m_,gfo),e(A,hfo),e(A,f_),e(f_,bJ),e(bJ,ufo),e(f_,pfo),e(f_,Rk),e(Rk,_fo),e(f_,vfo),e(A,bfo),e(A,c_),e(c_,TJ),e(TJ,Tfo),e(c_,Ffo),e(c_,Sk),e(Sk,Mfo),e(c_,Efo),e(A,Cfo),e(A,g_),e(g_,FJ),e(FJ,yfo),e(g_,wfo),e(g_,Pk),e(Pk,Afo),e(g_,xfo),e(A,Lfo),e(A,h_),e(h_,MJ),e(MJ,Bfo),e(h_,kfo),e(h_,$k),e($k,Rfo),e(h_,Sfo),e(A,Pfo),e(A,u_),e(u_,EJ),e(EJ,$fo),e(u_,Ifo),e(u_,Ik),e(Ik,jfo),e(u_,Nfo),e(A,Dfo),e(A,p_),e(p_,CJ),e(CJ,Gfo),e(p_,Ofo),e(p_,jk),e(jk,qfo),e(p_,zfo),e(A,Xfo),e(A,__),e(__,yJ),e(yJ,Wfo),e(__,Vfo),e(__,Nk),e(Nk,Qfo),e(__,Hfo),e(A,Ufo),e(A,v_),e(v_,wJ),e(wJ,Jfo),e(v_,Kfo),e(v_,Dk),e(Dk,Yfo),e(v_,Zfo),e(A,eco),e(A,b_),e(b_,AJ),e(AJ,oco),e(b_,tco),e(b_,Gk),e(Gk,rco),e(b_,aco),e(A,nco),e(A,T_),e(T_,xJ),e(xJ,sco),e(T_,lco),e(T_,Ok),e(Ok,ico),e(T_,dco),e(A,mco),e(A,F_),e(F_,LJ),e(LJ,fco),e(F_,cco),e(F_,qk),e(qk,gco),e(F_,hco),e(A,uco),e(A,M_),e(M_,BJ),e(BJ,pco),e(M_,_co),e(M_,zk),e(zk,vco),e(M_,bco),e(A,Tco),e(A,E_),e(E_,kJ),e(kJ,Fco),e(E_,Mco),e(E_,Xk),e(Xk,Eco),e(E_,Cco),e(A,yco),e(A,C_),e(C_,RJ),e(RJ,wco),e(C_,Aco),e(C_,Wk),e(Wk,xco),e(C_,Lco),e(A,Bco),e(A,y_),e(y_,SJ),e(SJ,kco),e(y_,Rco),e(y_,Vk),e(Vk,Sco),e(y_,Pco),e(A,$co),e(A,w_),e(w_,PJ),e(PJ,Ico),e(w_,jco),e(w_,Qk),e(Qk,Nco),e(w_,Dco),e(A,Gco),e(A,A_),e(A_,$J),e($J,Oco),e(A_,qco),e(A_,Hk),e(Hk,zco),e(A_,Xco),e(Se,Wco),e(Se,x_),e(x_,Vco),e(x_,IJ),e(IJ,Qco),e(x_,Hco),e(x_,jJ),e(jJ,Uco),e(Se,Jco),e(Se,NJ),e(NJ,Kco),e(Se,Yco),g(KM,Se,null),v(d,XEe,_),v(d,Ci,_),e(Ci,L_),e(L_,DJ),g(YM,DJ,null),e(Ci,Zco),e(Ci,GJ),e(GJ,ego),v(d,WEe,_),v(d,Go,_),g(ZM,Go,null),e(Go,ogo),e(Go,yi),e(yi,tgo),e(yi,OJ),e(OJ,rgo),e(yi,ago),e(yi,qJ),e(qJ,ngo),e(yi,sgo),e(Go,lgo),e(Go,eE),e(eE,igo),e(eE,zJ),e(zJ,dgo),e(eE,mgo),e(Go,fgo),e(Go,Rt),g(oE,Rt,null),e(Rt,cgo),e(Rt,XJ),e(XJ,ggo),e(Rt,hgo),e(Rt,wi),e(wi,ugo),e(wi,WJ),e(WJ,pgo),e(wi,_go),e(wi,VJ),e(VJ,vgo),e(wi,bgo),e(Rt,Tgo),e(Rt,QJ),e(QJ,Fgo),e(Rt,Mgo),g(tE,Rt,null),e(Go,Ego),e(Go,Pe),g(rE,Pe,null),e(Pe,Cgo),e(Pe,HJ),e(HJ,ygo),e(Pe,wgo),e(Pe,La),e(La,Ago),e(La,UJ),e(UJ,xgo),e(La,Lgo),e(La,JJ),e(JJ,Bgo),e(La,kgo),e(La,KJ),e(KJ,Rgo),e(La,Sgo),e(Pe,Pgo),e(Pe,q),e(q,B_),e(B_,YJ),e(YJ,$go),e(B_,Igo),e(B_,Uk),e(Uk,jgo),e(B_,Ngo),e(q,Dgo),e(q,k_),e(k_,ZJ),e(ZJ,Ggo),e(k_,Ogo),e(k_,Jk),e(Jk,qgo),e(k_,zgo),e(q,Xgo),e(q,R_),e(R_,eK),e(eK,Wgo),e(R_,Vgo),e(R_,Kk),e(Kk,Qgo),e(R_,Hgo),e(q,Ugo),e(q,S_),e(S_,oK),e(oK,Jgo),e(S_,Kgo),e(S_,Yk),e(Yk,Ygo),e(S_,Zgo),e(q,eho),e(q,P_),e(P_,tK),e(tK,oho),e(P_,tho),e(P_,Zk),e(Zk,rho),e(P_,aho),e(q,nho),e(q,$_),e($_,rK),e(rK,sho),e($_,lho),e($_,eR),e(eR,iho),e($_,dho),e(q,mho),e(q,I_),e(I_,aK),e(aK,fho),e(I_,cho),e(I_,oR),e(oR,gho),e(I_,hho),e(q,uho),e(q,j_),e(j_,nK),e(nK,pho),e(j_,_ho),e(j_,tR),e(tR,vho),e(j_,bho),e(q,Tho),e(q,N_),e(N_,sK),e(sK,Fho),e(N_,Mho),e(N_,rR),e(rR,Eho),e(N_,Cho),e(q,yho),e(q,D_),e(D_,lK),e(lK,who),e(D_,Aho),e(D_,aR),e(aR,xho),e(D_,Lho),e(q,Bho),e(q,G_),e(G_,iK),e(iK,kho),e(G_,Rho),e(G_,nR),e(nR,Sho),e(G_,Pho),e(q,$ho),e(q,O_),e(O_,dK),e(dK,Iho),e(O_,jho),e(O_,sR),e(sR,Nho),e(O_,Dho),e(q,Gho),e(q,q_),e(q_,mK),e(mK,Oho),e(q_,qho),e(q_,lR),e(lR,zho),e(q_,Xho),e(q,Who),e(q,z_),e(z_,fK),e(fK,Vho),e(z_,Qho),e(z_,iR),e(iR,Hho),e(z_,Uho),e(q,Jho),e(q,X_),e(X_,cK),e(cK,Kho),e(X_,Yho),e(X_,dR),e(dR,Zho),e(X_,euo),e(q,ouo),e(q,W_),e(W_,gK),e(gK,tuo),e(W_,ruo),e(W_,mR),e(mR,auo),e(W_,nuo),e(q,suo),e(q,V_),e(V_,hK),e(hK,luo),e(V_,iuo),e(V_,fR),e(fR,duo),e(V_,muo),e(q,fuo),e(q,Q_),e(Q_,uK),e(uK,cuo),e(Q_,guo),e(Q_,cR),e(cR,huo),e(Q_,uuo),e(q,puo),e(q,H_),e(H_,pK),e(pK,_uo),e(H_,vuo),e(H_,gR),e(gR,buo),e(H_,Tuo),e(q,Fuo),e(q,U_),e(U_,_K),e(_K,Muo),e(U_,Euo),e(U_,hR),e(hR,Cuo),e(U_,yuo),e(q,wuo),e(q,J_),e(J_,vK),e(vK,Auo),e(J_,xuo),e(J_,uR),e(uR,Luo),e(J_,Buo),e(q,kuo),e(q,K_),e(K_,bK),e(bK,Ruo),e(K_,Suo),e(K_,pR),e(pR,Puo),e(K_,$uo),e(q,Iuo),e(q,Y_),e(Y_,TK),e(TK,juo),e(Y_,Nuo),e(Y_,_R),e(_R,Duo),e(Y_,Guo),e(q,Ouo),e(q,Z_),e(Z_,FK),e(FK,quo),e(Z_,zuo),e(Z_,vR),e(vR,Xuo),e(Z_,Wuo),e(Pe,Vuo),e(Pe,ev),e(ev,Quo),e(ev,MK),e(MK,Huo),e(ev,Uuo),e(ev,EK),e(EK,Juo),e(Pe,Kuo),e(Pe,CK),e(CK,Yuo),e(Pe,Zuo),g(aE,Pe,null),v(d,VEe,_),v(d,Ai,_),e(Ai,ov),e(ov,yK),g(nE,yK,null),e(Ai,epo),e(Ai,wK),e(wK,opo),v(d,QEe,_),v(d,Oo,_),g(sE,Oo,null),e(Oo,tpo),e(Oo,xi),e(xi,rpo),e(xi,AK),e(AK,apo),e(xi,npo),e(xi,xK),e(xK,spo),e(xi,lpo),e(Oo,ipo),e(Oo,lE),e(lE,dpo),e(lE,LK),e(LK,mpo),e(lE,fpo),e(Oo,cpo),e(Oo,St),g(iE,St,null),e(St,gpo),e(St,BK),e(BK,hpo),e(St,upo),e(St,Li),e(Li,ppo),e(Li,kK),e(kK,_po),e(Li,vpo),e(Li,RK),e(RK,bpo),e(Li,Tpo),e(St,Fpo),e(St,SK),e(SK,Mpo),e(St,Epo),g(dE,St,null),e(Oo,Cpo),e(Oo,$e),g(mE,$e,null),e($e,ypo),e($e,PK),e(PK,wpo),e($e,Apo),e($e,Ba),e(Ba,xpo),e(Ba,$K),e($K,Lpo),e(Ba,Bpo),e(Ba,IK),e(IK,kpo),e(Ba,Rpo),e(Ba,jK),e(jK,Spo),e(Ba,Ppo),e($e,$po),e($e,qr),e(qr,tv),e(tv,NK),e(NK,Ipo),e(tv,jpo),e(tv,bR),e(bR,Npo),e(tv,Dpo),e(qr,Gpo),e(qr,rv),e(rv,DK),e(DK,Opo),e(rv,qpo),e(rv,TR),e(TR,zpo),e(rv,Xpo),e(qr,Wpo),e(qr,av),e(av,GK),e(GK,Vpo),e(av,Qpo),e(av,FR),e(FR,Hpo),e(av,Upo),e(qr,Jpo),e(qr,nv),e(nv,OK),e(OK,Kpo),e(nv,Ypo),e(nv,MR),e(MR,Zpo),e(nv,e_o),e(qr,o_o),e(qr,sv),e(sv,qK),e(qK,t_o),e(sv,r_o),e(sv,ER),e(ER,a_o),e(sv,n_o),e($e,s_o),e($e,lv),e(lv,l_o),e(lv,zK),e(zK,i_o),e(lv,d_o),e(lv,XK),e(XK,m_o),e($e,f_o),e($e,WK),e(WK,c_o),e($e,g_o),g(fE,$e,null),v(d,HEe,_),v(d,Bi,_),e(Bi,iv),e(iv,VK),g(cE,VK,null),e(Bi,h_o),e(Bi,QK),e(QK,u_o),v(d,UEe,_),v(d,qo,_),g(gE,qo,null),e(qo,p_o),e(qo,ki),e(ki,__o),e(ki,HK),e(HK,v_o),e(ki,b_o),e(ki,UK),e(UK,T_o),e(ki,F_o),e(qo,M_o),e(qo,hE),e(hE,E_o),e(hE,JK),e(JK,C_o),e(hE,y_o),e(qo,w_o),e(qo,Pt),g(uE,Pt,null),e(Pt,A_o),e(Pt,KK),e(KK,x_o),e(Pt,L_o),e(Pt,Ri),e(Ri,B_o),e(Ri,YK),e(YK,k_o),e(Ri,R_o),e(Ri,ZK),e(ZK,S_o),e(Ri,P_o),e(Pt,$_o),e(Pt,eY),e(eY,I_o),e(Pt,j_o),g(pE,Pt,null),e(qo,N_o),e(qo,Ie),g(_E,Ie,null),e(Ie,D_o),e(Ie,oY),e(oY,G_o),e(Ie,O_o),e(Ie,ka),e(ka,q_o),e(ka,tY),e(tY,z_o),e(ka,X_o),e(ka,rY),e(rY,W_o),e(ka,V_o),e(ka,aY),e(aY,Q_o),e(ka,H_o),e(Ie,U_o),e(Ie,N),e(N,dv),e(dv,nY),e(nY,J_o),e(dv,K_o),e(dv,CR),e(CR,Y_o),e(dv,Z_o),e(N,evo),e(N,mv),e(mv,sY),e(sY,ovo),e(mv,tvo),e(mv,yR),e(yR,rvo),e(mv,avo),e(N,nvo),e(N,fv),e(fv,lY),e(lY,svo),e(fv,lvo),e(fv,wR),e(wR,ivo),e(fv,dvo),e(N,mvo),e(N,cv),e(cv,iY),e(iY,fvo),e(cv,cvo),e(cv,AR),e(AR,gvo),e(cv,hvo),e(N,uvo),e(N,gv),e(gv,dY),e(dY,pvo),e(gv,_vo),e(gv,xR),e(xR,vvo),e(gv,bvo),e(N,Tvo),e(N,hv),e(hv,mY),e(mY,Fvo),e(hv,Mvo),e(hv,LR),e(LR,Evo),e(hv,Cvo),e(N,yvo),e(N,uv),e(uv,fY),e(fY,wvo),e(uv,Avo),e(uv,BR),e(BR,xvo),e(uv,Lvo),e(N,Bvo),e(N,pv),e(pv,cY),e(cY,kvo),e(pv,Rvo),e(pv,kR),e(kR,Svo),e(pv,Pvo),e(N,$vo),e(N,_v),e(_v,gY),e(gY,Ivo),e(_v,jvo),e(_v,RR),e(RR,Nvo),e(_v,Dvo),e(N,Gvo),e(N,vv),e(vv,hY),e(hY,Ovo),e(vv,qvo),e(vv,SR),e(SR,zvo),e(vv,Xvo),e(N,Wvo),e(N,bv),e(bv,uY),e(uY,Vvo),e(bv,Qvo),e(bv,PR),e(PR,Hvo),e(bv,Uvo),e(N,Jvo),e(N,Tv),e(Tv,pY),e(pY,Kvo),e(Tv,Yvo),e(Tv,$R),e($R,Zvo),e(Tv,e1o),e(N,o1o),e(N,Fv),e(Fv,_Y),e(_Y,t1o),e(Fv,r1o),e(Fv,IR),e(IR,a1o),e(Fv,n1o),e(N,s1o),e(N,Mv),e(Mv,vY),e(vY,l1o),e(Mv,i1o),e(Mv,jR),e(jR,d1o),e(Mv,m1o),e(N,f1o),e(N,Ev),e(Ev,bY),e(bY,c1o),e(Ev,g1o),e(Ev,NR),e(NR,h1o),e(Ev,u1o),e(N,p1o),e(N,Cv),e(Cv,TY),e(TY,_1o),e(Cv,v1o),e(Cv,DR),e(DR,b1o),e(Cv,T1o),e(N,F1o),e(N,yv),e(yv,FY),e(FY,M1o),e(yv,E1o),e(yv,GR),e(GR,C1o),e(yv,y1o),e(N,w1o),e(N,wv),e(wv,MY),e(MY,A1o),e(wv,x1o),e(wv,OR),e(OR,L1o),e(wv,B1o),e(N,k1o),e(N,Av),e(Av,EY),e(EY,R1o),e(Av,S1o),e(Av,qR),e(qR,P1o),e(Av,$1o),e(N,I1o),e(N,xv),e(xv,CY),e(CY,j1o),e(xv,N1o),e(xv,zR),e(zR,D1o),e(xv,G1o),e(N,O1o),e(N,Lv),e(Lv,yY),e(yY,q1o),e(Lv,z1o),e(Lv,XR),e(XR,X1o),e(Lv,W1o),e(N,V1o),e(N,Bv),e(Bv,wY),e(wY,Q1o),e(Bv,H1o),e(Bv,WR),e(WR,U1o),e(Bv,J1o),e(N,K1o),e(N,kv),e(kv,AY),e(AY,Y1o),e(kv,Z1o),e(kv,VR),e(VR,e2o),e(kv,o2o),e(N,t2o),e(N,Rv),e(Rv,xY),e(xY,r2o),e(Rv,a2o),e(Rv,QR),e(QR,n2o),e(Rv,s2o),e(N,l2o),e(N,Sv),e(Sv,LY),e(LY,i2o),e(Sv,d2o),e(Sv,HR),e(HR,m2o),e(Sv,f2o),e(N,c2o),e(N,Pv),e(Pv,BY),e(BY,g2o),e(Pv,h2o),e(Pv,UR),e(UR,u2o),e(Pv,p2o),e(N,_2o),e(N,$v),e($v,kY),e(kY,v2o),e($v,b2o),e($v,JR),e(JR,T2o),e($v,F2o),e(N,M2o),e(N,Iv),e(Iv,RY),e(RY,E2o),e(Iv,C2o),e(Iv,KR),e(KR,y2o),e(Iv,w2o),e(N,A2o),e(N,jv),e(jv,SY),e(SY,x2o),e(jv,L2o),e(jv,YR),e(YR,B2o),e(jv,k2o),e(Ie,R2o),e(Ie,Nv),e(Nv,S2o),e(Nv,PY),e(PY,P2o),e(Nv,$2o),e(Nv,$Y),e($Y,I2o),e(Ie,j2o),e(Ie,IY),e(IY,N2o),e(Ie,D2o),g(vE,Ie,null),v(d,JEe,_),v(d,Si,_),e(Si,Dv),e(Dv,jY),g(bE,jY,null),e(Si,G2o),e(Si,NY),e(NY,O2o),v(d,KEe,_),v(d,zo,_),g(TE,zo,null),e(zo,q2o),e(zo,Pi),e(Pi,z2o),e(Pi,DY),e(DY,X2o),e(Pi,W2o),e(Pi,GY),e(GY,V2o),e(Pi,Q2o),e(zo,H2o),e(zo,FE),e(FE,U2o),e(FE,OY),e(OY,J2o),e(FE,K2o),e(zo,Y2o),e(zo,$t),g(ME,$t,null),e($t,Z2o),e($t,qY),e(qY,ebo),e($t,obo),e($t,$i),e($i,tbo),e($i,zY),e(zY,rbo),e($i,abo),e($i,XY),e(XY,nbo),e($i,sbo),e($t,lbo),e($t,WY),e(WY,ibo),e($t,dbo),g(EE,$t,null),e(zo,mbo),e(zo,je),g(CE,je,null),e(je,fbo),e(je,VY),e(VY,cbo),e(je,gbo),e(je,Ra),e(Ra,hbo),e(Ra,QY),e(QY,ubo),e(Ra,pbo),e(Ra,HY),e(HY,_bo),e(Ra,vbo),e(Ra,UY),e(UY,bbo),e(Ra,Tbo),e(je,Fbo),e(je,R),e(R,Gv),e(Gv,JY),e(JY,Mbo),e(Gv,Ebo),e(Gv,ZR),e(ZR,Cbo),e(Gv,ybo),e(R,wbo),e(R,Ov),e(Ov,KY),e(KY,Abo),e(Ov,xbo),e(Ov,eS),e(eS,Lbo),e(Ov,Bbo),e(R,kbo),e(R,qv),e(qv,YY),e(YY,Rbo),e(qv,Sbo),e(qv,oS),e(oS,Pbo),e(qv,$bo),e(R,Ibo),e(R,zv),e(zv,ZY),e(ZY,jbo),e(zv,Nbo),e(zv,tS),e(tS,Dbo),e(zv,Gbo),e(R,Obo),e(R,Xv),e(Xv,eZ),e(eZ,qbo),e(Xv,zbo),e(Xv,rS),e(rS,Xbo),e(Xv,Wbo),e(R,Vbo),e(R,Wv),e(Wv,oZ),e(oZ,Qbo),e(Wv,Hbo),e(Wv,aS),e(aS,Ubo),e(Wv,Jbo),e(R,Kbo),e(R,Vv),e(Vv,tZ),e(tZ,Ybo),e(Vv,Zbo),e(Vv,nS),e(nS,e4o),e(Vv,o4o),e(R,t4o),e(R,Qv),e(Qv,rZ),e(rZ,r4o),e(Qv,a4o),e(Qv,sS),e(sS,n4o),e(Qv,s4o),e(R,l4o),e(R,Hv),e(Hv,aZ),e(aZ,i4o),e(Hv,d4o),e(Hv,lS),e(lS,m4o),e(Hv,f4o),e(R,c4o),e(R,Uv),e(Uv,nZ),e(nZ,g4o),e(Uv,h4o),e(Uv,iS),e(iS,u4o),e(Uv,p4o),e(R,_4o),e(R,Jv),e(Jv,sZ),e(sZ,v4o),e(Jv,b4o),e(Jv,dS),e(dS,T4o),e(Jv,F4o),e(R,M4o),e(R,Kv),e(Kv,lZ),e(lZ,E4o),e(Kv,C4o),e(Kv,mS),e(mS,y4o),e(Kv,w4o),e(R,A4o),e(R,Yv),e(Yv,iZ),e(iZ,x4o),e(Yv,L4o),e(Yv,fS),e(fS,B4o),e(Yv,k4o),e(R,R4o),e(R,Zv),e(Zv,dZ),e(dZ,S4o),e(Zv,P4o),e(Zv,cS),e(cS,$4o),e(Zv,I4o),e(R,j4o),e(R,e1),e(e1,mZ),e(mZ,N4o),e(e1,D4o),e(e1,gS),e(gS,G4o),e(e1,O4o),e(R,q4o),e(R,o1),e(o1,fZ),e(fZ,z4o),e(o1,X4o),e(o1,hS),e(hS,W4o),e(o1,V4o),e(R,Q4o),e(R,t1),e(t1,cZ),e(cZ,H4o),e(t1,U4o),e(t1,uS),e(uS,J4o),e(t1,K4o),e(R,Y4o),e(R,r1),e(r1,gZ),e(gZ,Z4o),e(r1,e5o),e(r1,pS),e(pS,o5o),e(r1,t5o),e(R,r5o),e(R,a1),e(a1,hZ),e(hZ,a5o),e(a1,n5o),e(a1,_S),e(_S,s5o),e(a1,l5o),e(R,i5o),e(R,n1),e(n1,uZ),e(uZ,d5o),e(n1,m5o),e(n1,vS),e(vS,f5o),e(n1,c5o),e(R,g5o),e(R,s1),e(s1,pZ),e(pZ,h5o),e(s1,u5o),e(s1,bS),e(bS,p5o),e(s1,_5o),e(R,v5o),e(R,l1),e(l1,_Z),e(_Z,b5o),e(l1,T5o),e(l1,TS),e(TS,F5o),e(l1,M5o),e(R,E5o),e(R,i1),e(i1,vZ),e(vZ,C5o),e(i1,y5o),e(i1,FS),e(FS,w5o),e(i1,A5o),e(R,x5o),e(R,d1),e(d1,bZ),e(bZ,L5o),e(d1,B5o),e(d1,MS),e(MS,k5o),e(d1,R5o),e(R,S5o),e(R,m1),e(m1,TZ),e(TZ,P5o),e(m1,$5o),e(m1,ES),e(ES,I5o),e(m1,j5o),e(R,N5o),e(R,f1),e(f1,FZ),e(FZ,D5o),e(f1,G5o),e(f1,CS),e(CS,O5o),e(f1,q5o),e(R,z5o),e(R,c1),e(c1,MZ),e(MZ,X5o),e(c1,W5o),e(c1,yS),e(yS,V5o),e(c1,Q5o),e(R,H5o),e(R,g1),e(g1,EZ),e(EZ,U5o),e(g1,J5o),e(g1,wS),e(wS,K5o),e(g1,Y5o),e(R,Z5o),e(R,h1),e(h1,CZ),e(CZ,e0o),e(h1,o0o),e(h1,AS),e(AS,t0o),e(h1,r0o),e(R,a0o),e(R,u1),e(u1,yZ),e(yZ,n0o),e(u1,s0o),e(u1,xS),e(xS,l0o),e(u1,i0o),e(R,d0o),e(R,p1),e(p1,wZ),e(wZ,m0o),e(p1,f0o),e(p1,LS),e(LS,c0o),e(p1,g0o),e(R,h0o),e(R,_1),e(_1,AZ),e(AZ,u0o),e(_1,p0o),e(_1,BS),e(BS,_0o),e(_1,v0o),e(R,b0o),e(R,v1),e(v1,xZ),e(xZ,T0o),e(v1,F0o),e(v1,kS),e(kS,M0o),e(v1,E0o),e(R,C0o),e(R,b1),e(b1,LZ),e(LZ,y0o),e(b1,w0o),e(b1,RS),e(RS,A0o),e(b1,x0o),e(R,L0o),e(R,T1),e(T1,BZ),e(BZ,B0o),e(T1,k0o),e(T1,SS),e(SS,R0o),e(T1,S0o),e(je,P0o),e(je,F1),e(F1,$0o),e(F1,kZ),e(kZ,I0o),e(F1,j0o),e(F1,RZ),e(RZ,N0o),e(je,D0o),e(je,SZ),e(SZ,G0o),e(je,O0o),g(yE,je,null),v(d,YEe,_),v(d,Ii,_),e(Ii,M1),e(M1,PZ),g(wE,PZ,null),e(Ii,q0o),e(Ii,$Z),e($Z,z0o),v(d,ZEe,_),v(d,Xo,_),g(AE,Xo,null),e(Xo,X0o),e(Xo,ji),e(ji,W0o),e(ji,IZ),e(IZ,V0o),e(ji,Q0o),e(ji,jZ),e(jZ,H0o),e(ji,U0o),e(Xo,J0o),e(Xo,xE),e(xE,K0o),e(xE,NZ),e(NZ,Y0o),e(xE,Z0o),e(Xo,eTo),e(Xo,It),g(LE,It,null),e(It,oTo),e(It,DZ),e(DZ,tTo),e(It,rTo),e(It,Ni),e(Ni,aTo),e(Ni,GZ),e(GZ,nTo),e(Ni,sTo),e(Ni,OZ),e(OZ,lTo),e(Ni,iTo),e(It,dTo),e(It,qZ),e(qZ,mTo),e(It,fTo),g(BE,It,null),e(Xo,cTo),e(Xo,Ne),g(kE,Ne,null),e(Ne,gTo),e(Ne,zZ),e(zZ,hTo),e(Ne,uTo),e(Ne,Sa),e(Sa,pTo),e(Sa,XZ),e(XZ,_To),e(Sa,vTo),e(Sa,WZ),e(WZ,bTo),e(Sa,TTo),e(Sa,VZ),e(VZ,FTo),e(Sa,MTo),e(Ne,ETo),e(Ne,QZ),e(QZ,E1),e(E1,HZ),e(HZ,CTo),e(E1,yTo),e(E1,PS),e(PS,wTo),e(E1,ATo),e(Ne,xTo),e(Ne,C1),e(C1,LTo),e(C1,UZ),e(UZ,BTo),e(C1,kTo),e(C1,JZ),e(JZ,RTo),e(Ne,STo),e(Ne,KZ),e(KZ,PTo),e(Ne,$To),g(RE,Ne,null),v(d,eCe,_),v(d,Di,_),e(Di,y1),e(y1,YZ),g(SE,YZ,null),e(Di,ITo),e(Di,ZZ),e(ZZ,jTo),v(d,oCe,_),v(d,Wo,_),g(PE,Wo,null),e(Wo,NTo),e(Wo,Gi),e(Gi,DTo),e(Gi,eee),e(eee,GTo),e(Gi,OTo),e(Gi,oee),e(oee,qTo),e(Gi,zTo),e(Wo,XTo),e(Wo,$E),e($E,WTo),e($E,tee),e(tee,VTo),e($E,QTo),e(Wo,HTo),e(Wo,jt),g(IE,jt,null),e(jt,UTo),e(jt,ree),e(ree,JTo),e(jt,KTo),e(jt,Oi),e(Oi,YTo),e(Oi,aee),e(aee,ZTo),e(Oi,eFo),e(Oi,nee),e(nee,oFo),e(Oi,tFo),e(jt,rFo),e(jt,see),e(see,aFo),e(jt,nFo),g(jE,jt,null),e(Wo,sFo),e(Wo,De),g(NE,De,null),e(De,lFo),e(De,lee),e(lee,iFo),e(De,dFo),e(De,Pa),e(Pa,mFo),e(Pa,iee),e(iee,fFo),e(Pa,cFo),e(Pa,dee),e(dee,gFo),e(Pa,hFo),e(Pa,mee),e(mee,uFo),e(Pa,pFo),e(De,_Fo),e(De,Vo),e(Vo,w1),e(w1,fee),e(fee,vFo),e(w1,bFo),e(w1,$S),e($S,TFo),e(w1,FFo),e(Vo,MFo),e(Vo,is),e(is,cee),e(cee,EFo),e(is,CFo),e(is,IS),e(IS,yFo),e(is,wFo),e(is,jS),e(jS,AFo),e(is,xFo),e(Vo,LFo),e(Vo,A1),e(A1,gee),e(gee,BFo),e(A1,kFo),e(A1,NS),e(NS,RFo),e(A1,SFo),e(Vo,PFo),e(Vo,Xr),e(Xr,hee),e(hee,$Fo),e(Xr,IFo),e(Xr,DS),e(DS,jFo),e(Xr,NFo),e(Xr,GS),e(GS,DFo),e(Xr,GFo),e(Xr,OS),e(OS,OFo),e(Xr,qFo),e(Vo,zFo),e(Vo,x1),e(x1,uee),e(uee,XFo),e(x1,WFo),e(x1,qS),e(qS,VFo),e(x1,QFo),e(Vo,HFo),e(Vo,L1),e(L1,pee),e(pee,UFo),e(L1,JFo),e(L1,zS),e(zS,KFo),e(L1,YFo),e(De,ZFo),e(De,B1),e(B1,eMo),e(B1,_ee),e(_ee,oMo),e(B1,tMo),e(B1,vee),e(vee,rMo),e(De,aMo),e(De,bee),e(bee,nMo),e(De,sMo),g(DE,De,null),v(d,tCe,_),v(d,qi,_),e(qi,k1),e(k1,Tee),g(GE,Tee,null),e(qi,lMo),e(qi,Fee),e(Fee,iMo),v(d,rCe,_),v(d,Qo,_),g(OE,Qo,null),e(Qo,dMo),e(Qo,zi),e(zi,mMo),e(zi,Mee),e(Mee,fMo),e(zi,cMo),e(zi,Eee),e(Eee,gMo),e(zi,hMo),e(Qo,uMo),e(Qo,qE),e(qE,pMo),e(qE,Cee),e(Cee,_Mo),e(qE,vMo),e(Qo,bMo),e(Qo,Nt),g(zE,Nt,null),e(Nt,TMo),e(Nt,yee),e(yee,FMo),e(Nt,MMo),e(Nt,Xi),e(Xi,EMo),e(Xi,wee),e(wee,CMo),e(Xi,yMo),e(Xi,Aee),e(Aee,wMo),e(Xi,AMo),e(Nt,xMo),e(Nt,xee),e(xee,LMo),e(Nt,BMo),g(XE,Nt,null),e(Qo,kMo),e(Qo,Ge),g(WE,Ge,null),e(Ge,RMo),e(Ge,Lee),e(Lee,SMo),e(Ge,PMo),e(Ge,$a),e($a,$Mo),e($a,Bee),e(Bee,IMo),e($a,jMo),e($a,kee),e(kee,NMo),e($a,DMo),e($a,Ree),e(Ree,GMo),e($a,OMo),e(Ge,qMo),e(Ge,See),e(See,R1),e(R1,Pee),e(Pee,zMo),e(R1,XMo),e(R1,XS),e(XS,WMo),e(R1,VMo),e(Ge,QMo),e(Ge,S1),e(S1,HMo),e(S1,$ee),e($ee,UMo),e(S1,JMo),e(S1,Iee),e(Iee,KMo),e(Ge,YMo),e(Ge,jee),e(jee,ZMo),e(Ge,eEo),g(VE,Ge,null),v(d,aCe,_),v(d,Wi,_),e(Wi,P1),e(P1,Nee),g(QE,Nee,null),e(Wi,oEo),e(Wi,Dee),e(Dee,tEo),v(d,nCe,_),v(d,Ho,_),g(HE,Ho,null),e(Ho,rEo),e(Ho,Vi),e(Vi,aEo),e(Vi,Gee),e(Gee,nEo),e(Vi,sEo),e(Vi,Oee),e(Oee,lEo),e(Vi,iEo),e(Ho,dEo),e(Ho,UE),e(UE,mEo),e(UE,qee),e(qee,fEo),e(UE,cEo),e(Ho,gEo),e(Ho,Dt),g(JE,Dt,null),e(Dt,hEo),e(Dt,zee),e(zee,uEo),e(Dt,pEo),e(Dt,Qi),e(Qi,_Eo),e(Qi,Xee),e(Xee,vEo),e(Qi,bEo),e(Qi,Wee),e(Wee,TEo),e(Qi,FEo),e(Dt,MEo),e(Dt,Vee),e(Vee,EEo),e(Dt,CEo),g(KE,Dt,null),e(Ho,yEo),e(Ho,Oe),g(YE,Oe,null),e(Oe,wEo),e(Oe,Qee),e(Qee,AEo),e(Oe,xEo),e(Oe,Ia),e(Ia,LEo),e(Ia,Hee),e(Hee,BEo),e(Ia,kEo),e(Ia,Uee),e(Uee,REo),e(Ia,SEo),e(Ia,Jee),e(Jee,PEo),e(Ia,$Eo),e(Oe,IEo),e(Oe,Ke),e(Ke,$1),e($1,Kee),e(Kee,jEo),e($1,NEo),e($1,WS),e(WS,DEo),e($1,GEo),e(Ke,OEo),e(Ke,I1),e(I1,Yee),e(Yee,qEo),e(I1,zEo),e(I1,VS),e(VS,XEo),e(I1,WEo),e(Ke,VEo),e(Ke,j1),e(j1,Zee),e(Zee,QEo),e(j1,HEo),e(j1,QS),e(QS,UEo),e(j1,JEo),e(Ke,KEo),e(Ke,N1),e(N1,eoe),e(eoe,YEo),e(N1,ZEo),e(N1,HS),e(HS,eCo),e(N1,oCo),e(Ke,tCo),e(Ke,D1),e(D1,ooe),e(ooe,rCo),e(D1,aCo),e(D1,US),e(US,nCo),e(D1,sCo),e(Ke,lCo),e(Ke,G1),e(G1,toe),e(toe,iCo),e(G1,dCo),e(G1,JS),e(JS,mCo),e(G1,fCo),e(Ke,cCo),e(Ke,O1),e(O1,roe),e(roe,gCo),e(O1,hCo),e(O1,KS),e(KS,uCo),e(O1,pCo),e(Oe,_Co),e(Oe,q1),e(q1,vCo),e(q1,aoe),e(aoe,bCo),e(q1,TCo),e(q1,noe),e(noe,FCo),e(Oe,MCo),e(Oe,soe),e(soe,ECo),e(Oe,CCo),g(ZE,Oe,null),v(d,sCe,_),v(d,Hi,_),e(Hi,z1),e(z1,loe),g(eC,loe,null),e(Hi,yCo),e(Hi,ioe),e(ioe,wCo),v(d,lCe,_),v(d,Uo,_),g(oC,Uo,null),e(Uo,ACo),e(Uo,Ui),e(Ui,xCo),e(Ui,doe),e(doe,LCo),e(Ui,BCo),e(Ui,moe),e(moe,kCo),e(Ui,RCo),e(Uo,SCo),e(Uo,tC),e(tC,PCo),e(tC,foe),e(foe,$Co),e(tC,ICo),e(Uo,jCo),e(Uo,Gt),g(rC,Gt,null),e(Gt,NCo),e(Gt,coe),e(coe,DCo),e(Gt,GCo),e(Gt,Ji),e(Ji,OCo),e(Ji,goe),e(goe,qCo),e(Ji,zCo),e(Ji,hoe),e(hoe,XCo),e(Ji,WCo),e(Gt,VCo),e(Gt,uoe),e(uoe,QCo),e(Gt,HCo),g(aC,Gt,null),e(Uo,UCo),e(Uo,qe),g(nC,qe,null),e(qe,JCo),e(qe,poe),e(poe,KCo),e(qe,YCo),e(qe,ja),e(ja,ZCo),e(ja,_oe),e(_oe,e3o),e(ja,o3o),e(ja,voe),e(voe,t3o),e(ja,r3o),e(ja,boe),e(boe,a3o),e(ja,n3o),e(qe,s3o),e(qe,Ki),e(Ki,X1),e(X1,Toe),e(Toe,l3o),e(X1,i3o),e(X1,YS),e(YS,d3o),e(X1,m3o),e(Ki,f3o),e(Ki,W1),e(W1,Foe),e(Foe,c3o),e(W1,g3o),e(W1,ZS),e(ZS,h3o),e(W1,u3o),e(Ki,p3o),e(Ki,V1),e(V1,Moe),e(Moe,_3o),e(V1,v3o),e(V1,eP),e(eP,b3o),e(V1,T3o),e(qe,F3o),e(qe,Q1),e(Q1,M3o),e(Q1,Eoe),e(Eoe,E3o),e(Q1,C3o),e(Q1,Coe),e(Coe,y3o),e(qe,w3o),e(qe,yoe),e(yoe,A3o),e(qe,x3o),g(sC,qe,null),v(d,iCe,_),v(d,Yi,_),e(Yi,H1),e(H1,woe),g(lC,woe,null),e(Yi,L3o),e(Yi,Aoe),e(Aoe,B3o),v(d,dCe,_),v(d,Jo,_),g(iC,Jo,null),e(Jo,k3o),e(Jo,Zi),e(Zi,R3o),e(Zi,xoe),e(xoe,S3o),e(Zi,P3o),e(Zi,Loe),e(Loe,$3o),e(Zi,I3o),e(Jo,j3o),e(Jo,dC),e(dC,N3o),e(dC,Boe),e(Boe,D3o),e(dC,G3o),e(Jo,O3o),e(Jo,Ot),g(mC,Ot,null),e(Ot,q3o),e(Ot,koe),e(koe,z3o),e(Ot,X3o),e(Ot,ed),e(ed,W3o),e(ed,Roe),e(Roe,V3o),e(ed,Q3o),e(ed,Soe),e(Soe,H3o),e(ed,U3o),e(Ot,J3o),e(Ot,Poe),e(Poe,K3o),e(Ot,Y3o),g(fC,Ot,null),e(Jo,Z3o),e(Jo,ze),g(cC,ze,null),e(ze,eyo),e(ze,$oe),e($oe,oyo),e(ze,tyo),e(ze,Na),e(Na,ryo),e(Na,Ioe),e(Ioe,ayo),e(Na,nyo),e(Na,joe),e(joe,syo),e(Na,lyo),e(Na,Noe),e(Noe,iyo),e(Na,dyo),e(ze,myo),e(ze,Ye),e(Ye,U1),e(U1,Doe),e(Doe,fyo),e(U1,cyo),e(U1,oP),e(oP,gyo),e(U1,hyo),e(Ye,uyo),e(Ye,J1),e(J1,Goe),e(Goe,pyo),e(J1,_yo),e(J1,tP),e(tP,vyo),e(J1,byo),e(Ye,Tyo),e(Ye,K1),e(K1,Ooe),e(Ooe,Fyo),e(K1,Myo),e(K1,rP),e(rP,Eyo),e(K1,Cyo),e(Ye,yyo),e(Ye,Y1),e(Y1,qoe),e(qoe,wyo),e(Y1,Ayo),e(Y1,aP),e(aP,xyo),e(Y1,Lyo),e(Ye,Byo),e(Ye,Z1),e(Z1,zoe),e(zoe,kyo),e(Z1,Ryo),e(Z1,nP),e(nP,Syo),e(Z1,Pyo),e(Ye,$yo),e(Ye,e2),e(e2,Xoe),e(Xoe,Iyo),e(e2,jyo),e(e2,sP),e(sP,Nyo),e(e2,Dyo),e(Ye,Gyo),e(Ye,o2),e(o2,Woe),e(Woe,Oyo),e(o2,qyo),e(o2,lP),e(lP,zyo),e(o2,Xyo),e(ze,Wyo),e(ze,t2),e(t2,Vyo),e(t2,Voe),e(Voe,Qyo),e(t2,Hyo),e(t2,Qoe),e(Qoe,Uyo),e(ze,Jyo),e(ze,Hoe),e(Hoe,Kyo),e(ze,Yyo),g(gC,ze,null),v(d,mCe,_),v(d,od,_),e(od,r2),e(r2,Uoe),g(hC,Uoe,null),e(od,Zyo),e(od,Joe),e(Joe,ewo),v(d,fCe,_),v(d,Ko,_),g(uC,Ko,null),e(Ko,owo),e(Ko,td),e(td,two),e(td,Koe),e(Koe,rwo),e(td,awo),e(td,Yoe),e(Yoe,nwo),e(td,swo),e(Ko,lwo),e(Ko,pC),e(pC,iwo),e(pC,Zoe),e(Zoe,dwo),e(pC,mwo),e(Ko,fwo),e(Ko,qt),g(_C,qt,null),e(qt,cwo),e(qt,ete),e(ete,gwo),e(qt,hwo),e(qt,rd),e(rd,uwo),e(rd,ote),e(ote,pwo),e(rd,_wo),e(rd,tte),e(tte,vwo),e(rd,bwo),e(qt,Two),e(qt,rte),e(rte,Fwo),e(qt,Mwo),g(vC,qt,null),e(Ko,Ewo),e(Ko,Xe),g(bC,Xe,null),e(Xe,Cwo),e(Xe,ate),e(ate,ywo),e(Xe,wwo),e(Xe,Da),e(Da,Awo),e(Da,nte),e(nte,xwo),e(Da,Lwo),e(Da,ste),e(ste,Bwo),e(Da,kwo),e(Da,lte),e(lte,Rwo),e(Da,Swo),e(Xe,Pwo),e(Xe,TC),e(TC,a2),e(a2,ite),e(ite,$wo),e(a2,Iwo),e(a2,iP),e(iP,jwo),e(a2,Nwo),e(TC,Dwo),e(TC,n2),e(n2,dte),e(dte,Gwo),e(n2,Owo),e(n2,dP),e(dP,qwo),e(n2,zwo),e(Xe,Xwo),e(Xe,s2),e(s2,Wwo),e(s2,mte),e(mte,Vwo),e(s2,Qwo),e(s2,fte),e(fte,Hwo),e(Xe,Uwo),e(Xe,cte),e(cte,Jwo),e(Xe,Kwo),g(FC,Xe,null),v(d,cCe,_),v(d,ad,_),e(ad,l2),e(l2,gte),g(MC,gte,null),e(ad,Ywo),e(ad,hte),e(hte,Zwo),v(d,gCe,_),v(d,Yo,_),g(EC,Yo,null),e(Yo,eAo),e(Yo,nd),e(nd,oAo),e(nd,ute),e(ute,tAo),e(nd,rAo),e(nd,pte),e(pte,aAo),e(nd,nAo),e(Yo,sAo),e(Yo,CC),e(CC,lAo),e(CC,_te),e(_te,iAo),e(CC,dAo),e(Yo,mAo),e(Yo,zt),g(yC,zt,null),e(zt,fAo),e(zt,vte),e(vte,cAo),e(zt,gAo),e(zt,sd),e(sd,hAo),e(sd,bte),e(bte,uAo),e(sd,pAo),e(sd,Tte),e(Tte,_Ao),e(sd,vAo),e(zt,bAo),e(zt,Fte),e(Fte,TAo),e(zt,FAo),g(wC,zt,null),e(Yo,MAo),e(Yo,We),g(AC,We,null),e(We,EAo),e(We,Mte),e(Mte,CAo),e(We,yAo),e(We,Ga),e(Ga,wAo),e(Ga,Ete),e(Ete,AAo),e(Ga,xAo),e(Ga,Cte),e(Cte,LAo),e(Ga,BAo),e(Ga,yte),e(yte,kAo),e(Ga,RAo),e(We,SAo),e(We,ld),e(ld,i2),e(i2,wte),e(wte,PAo),e(i2,$Ao),e(i2,mP),e(mP,IAo),e(i2,jAo),e(ld,NAo),e(ld,d2),e(d2,Ate),e(Ate,DAo),e(d2,GAo),e(d2,fP),e(fP,OAo),e(d2,qAo),e(ld,zAo),e(ld,m2),e(m2,xte),e(xte,XAo),e(m2,WAo),e(m2,cP),e(cP,VAo),e(m2,QAo),e(We,HAo),e(We,f2),e(f2,UAo),e(f2,Lte),e(Lte,JAo),e(f2,KAo),e(f2,Bte),e(Bte,YAo),e(We,ZAo),e(We,kte),e(kte,e7o),e(We,o7o),g(xC,We,null),v(d,hCe,_),v(d,id,_),e(id,c2),e(c2,Rte),g(LC,Rte,null),e(id,t7o),e(id,Ste),e(Ste,r7o),v(d,uCe,_),v(d,Zo,_),g(BC,Zo,null),e(Zo,a7o),e(Zo,dd),e(dd,n7o),e(dd,Pte),e(Pte,s7o),e(dd,l7o),e(dd,$te),e($te,i7o),e(dd,d7o),e(Zo,m7o),e(Zo,kC),e(kC,f7o),e(kC,Ite),e(Ite,c7o),e(kC,g7o),e(Zo,h7o),e(Zo,Xt),g(RC,Xt,null),e(Xt,u7o),e(Xt,jte),e(jte,p7o),e(Xt,_7o),e(Xt,md),e(md,v7o),e(md,Nte),e(Nte,b7o),e(md,T7o),e(md,Dte),e(Dte,F7o),e(md,M7o),e(Xt,E7o),e(Xt,Gte),e(Gte,C7o),e(Xt,y7o),g(SC,Xt,null),e(Zo,w7o),e(Zo,Ve),g(PC,Ve,null),e(Ve,A7o),e(Ve,Ote),e(Ote,x7o),e(Ve,L7o),e(Ve,Oa),e(Oa,B7o),e(Oa,qte),e(qte,k7o),e(Oa,R7o),e(Oa,zte),e(zte,S7o),e(Oa,P7o),e(Oa,Xte),e(Xte,$7o),e(Oa,I7o),e(Ve,j7o),e(Ve,Wte),e(Wte,g2),e(g2,Vte),e(Vte,N7o),e(g2,D7o),e(g2,gP),e(gP,G7o),e(g2,O7o),e(Ve,q7o),e(Ve,h2),e(h2,z7o),e(h2,Qte),e(Qte,X7o),e(h2,W7o),e(h2,Hte),e(Hte,V7o),e(Ve,Q7o),e(Ve,Ute),e(Ute,H7o),e(Ve,U7o),g($C,Ve,null),v(d,pCe,_),v(d,fd,_),e(fd,u2),e(u2,Jte),g(IC,Jte,null),e(fd,J7o),e(fd,Kte),e(Kte,K7o),v(d,_Ce,_),v(d,et,_),g(jC,et,null),e(et,Y7o),e(et,cd),e(cd,Z7o),e(cd,Yte),e(Yte,exo),e(cd,oxo),e(cd,Zte),e(Zte,txo),e(cd,rxo),e(et,axo),e(et,NC),e(NC,nxo),e(NC,ere),e(ere,sxo),e(NC,lxo),e(et,ixo),e(et,Wt),g(DC,Wt,null),e(Wt,dxo),e(Wt,ore),e(ore,mxo),e(Wt,fxo),e(Wt,gd),e(gd,cxo),e(gd,tre),e(tre,gxo),e(gd,hxo),e(gd,rre),e(rre,uxo),e(gd,pxo),e(Wt,_xo),e(Wt,are),e(are,vxo),e(Wt,bxo),g(GC,Wt,null),e(et,Txo),e(et,Qe),g(OC,Qe,null),e(Qe,Fxo),e(Qe,nre),e(nre,Mxo),e(Qe,Exo),e(Qe,qa),e(qa,Cxo),e(qa,sre),e(sre,yxo),e(qa,wxo),e(qa,lre),e(lre,Axo),e(qa,xxo),e(qa,ire),e(ire,Lxo),e(qa,Bxo),e(Qe,kxo),e(Qe,dre),e(dre,p2),e(p2,mre),e(mre,Rxo),e(p2,Sxo),e(p2,hP),e(hP,Pxo),e(p2,$xo),e(Qe,Ixo),e(Qe,_2),e(_2,jxo),e(_2,fre),e(fre,Nxo),e(_2,Dxo),e(_2,cre),e(cre,Gxo),e(Qe,Oxo),e(Qe,gre),e(gre,qxo),e(Qe,zxo),g(qC,Qe,null),v(d,vCe,_),v(d,hd,_),e(hd,v2),e(v2,hre),g(zC,hre,null),e(hd,Xxo),e(hd,ure),e(ure,Wxo),v(d,bCe,_),v(d,ot,_),g(XC,ot,null),e(ot,Vxo),e(ot,ud),e(ud,Qxo),e(ud,pre),e(pre,Hxo),e(ud,Uxo),e(ud,_re),e(_re,Jxo),e(ud,Kxo),e(ot,Yxo),e(ot,WC),e(WC,Zxo),e(WC,vre),e(vre,e6o),e(WC,o6o),e(ot,t6o),e(ot,Vt),g(VC,Vt,null),e(Vt,r6o),e(Vt,bre),e(bre,a6o),e(Vt,n6o),e(Vt,pd),e(pd,s6o),e(pd,Tre),e(Tre,l6o),e(pd,i6o),e(pd,Fre),e(Fre,d6o),e(pd,m6o),e(Vt,f6o),e(Vt,Mre),e(Mre,c6o),e(Vt,g6o),g(QC,Vt,null),e(ot,h6o),e(ot,ro),g(HC,ro,null),e(ro,u6o),e(ro,Ere),e(Ere,p6o),e(ro,_6o),e(ro,za),e(za,v6o),e(za,Cre),e(Cre,b6o),e(za,T6o),e(za,yre),e(yre,F6o),e(za,M6o),e(za,wre),e(wre,E6o),e(za,C6o),e(ro,y6o),e(ro,L),e(L,b2),e(b2,Are),e(Are,w6o),e(b2,A6o),e(b2,uP),e(uP,x6o),e(b2,L6o),e(L,B6o),e(L,T2),e(T2,xre),e(xre,k6o),e(T2,R6o),e(T2,pP),e(pP,S6o),e(T2,P6o),e(L,$6o),e(L,F2),e(F2,Lre),e(Lre,I6o),e(F2,j6o),e(F2,_P),e(_P,N6o),e(F2,D6o),e(L,G6o),e(L,M2),e(M2,Bre),e(Bre,O6o),e(M2,q6o),e(M2,vP),e(vP,z6o),e(M2,X6o),e(L,W6o),e(L,E2),e(E2,kre),e(kre,V6o),e(E2,Q6o),e(E2,bP),e(bP,H6o),e(E2,U6o),e(L,J6o),e(L,C2),e(C2,Rre),e(Rre,K6o),e(C2,Y6o),e(C2,TP),e(TP,Z6o),e(C2,e8o),e(L,o8o),e(L,y2),e(y2,Sre),e(Sre,t8o),e(y2,r8o),e(y2,FP),e(FP,a8o),e(y2,n8o),e(L,s8o),e(L,w2),e(w2,Pre),e(Pre,l8o),e(w2,i8o),e(w2,MP),e(MP,d8o),e(w2,m8o),e(L,f8o),e(L,A2),e(A2,$re),e($re,c8o),e(A2,g8o),e(A2,EP),e(EP,h8o),e(A2,u8o),e(L,p8o),e(L,x2),e(x2,Ire),e(Ire,_8o),e(x2,v8o),e(x2,CP),e(CP,b8o),e(x2,T8o),e(L,F8o),e(L,L2),e(L2,jre),e(jre,M8o),e(L2,E8o),e(L2,yP),e(yP,C8o),e(L2,y8o),e(L,w8o),e(L,B2),e(B2,Nre),e(Nre,A8o),e(B2,x8o),e(B2,wP),e(wP,L8o),e(B2,B8o),e(L,k8o),e(L,k2),e(k2,Dre),e(Dre,R8o),e(k2,S8o),e(k2,AP),e(AP,P8o),e(k2,$8o),e(L,I8o),e(L,R2),e(R2,Gre),e(Gre,j8o),e(R2,N8o),e(R2,xP),e(xP,D8o),e(R2,G8o),e(L,O8o),e(L,ds),e(ds,Ore),e(Ore,q8o),e(ds,z8o),e(ds,LP),e(LP,X8o),e(ds,W8o),e(ds,BP),e(BP,V8o),e(ds,Q8o),e(L,H8o),e(L,S2),e(S2,qre),e(qre,U8o),e(S2,J8o),e(S2,kP),e(kP,K8o),e(S2,Y8o),e(L,Z8o),e(L,P2),e(P2,zre),e(zre,eLo),e(P2,oLo),e(P2,RP),e(RP,tLo),e(P2,rLo),e(L,aLo),e(L,$2),e($2,Xre),e(Xre,nLo),e($2,sLo),e($2,SP),e(SP,lLo),e($2,iLo),e(L,dLo),e(L,I2),e(I2,Wre),e(Wre,mLo),e(I2,fLo),e(I2,PP),e(PP,cLo),e(I2,gLo),e(L,hLo),e(L,j2),e(j2,Vre),e(Vre,uLo),e(j2,pLo),e(j2,$P),e($P,_Lo),e(j2,vLo),e(L,bLo),e(L,N2),e(N2,Qre),e(Qre,TLo),e(N2,FLo),e(N2,IP),e(IP,MLo),e(N2,ELo),e(L,CLo),e(L,D2),e(D2,Hre),e(Hre,yLo),e(D2,wLo),e(D2,jP),e(jP,ALo),e(D2,xLo),e(L,LLo),e(L,G2),e(G2,Ure),e(Ure,BLo),e(G2,kLo),e(G2,NP),e(NP,RLo),e(G2,SLo),e(L,PLo),e(L,O2),e(O2,Jre),e(Jre,$Lo),e(O2,ILo),e(O2,DP),e(DP,jLo),e(O2,NLo),e(L,DLo),e(L,q2),e(q2,Kre),e(Kre,GLo),e(q2,OLo),e(q2,GP),e(GP,qLo),e(q2,zLo),e(L,XLo),e(L,z2),e(z2,Yre),e(Yre,WLo),e(z2,VLo),e(z2,OP),e(OP,QLo),e(z2,HLo),e(L,ULo),e(L,X2),e(X2,Zre),e(Zre,JLo),e(X2,KLo),e(X2,qP),e(qP,YLo),e(X2,ZLo),e(L,eBo),e(L,W2),e(W2,eae),e(eae,oBo),e(W2,tBo),e(W2,zP),e(zP,rBo),e(W2,aBo),e(L,nBo),e(L,V2),e(V2,oae),e(oae,sBo),e(V2,lBo),e(V2,XP),e(XP,iBo),e(V2,dBo),e(L,mBo),e(L,Q2),e(Q2,tae),e(tae,fBo),e(Q2,cBo),e(Q2,WP),e(WP,gBo),e(Q2,hBo),e(L,uBo),e(L,H2),e(H2,rae),e(rae,pBo),e(H2,_Bo),e(H2,VP),e(VP,vBo),e(H2,bBo),e(L,TBo),e(L,U2),e(U2,aae),e(aae,FBo),e(U2,MBo),e(U2,QP),e(QP,EBo),e(U2,CBo),e(L,yBo),e(L,J2),e(J2,nae),e(nae,wBo),e(J2,ABo),e(J2,HP),e(HP,xBo),e(J2,LBo),e(L,BBo),e(L,K2),e(K2,sae),e(sae,kBo),e(K2,RBo),e(K2,UP),e(UP,SBo),e(K2,PBo),e(L,$Bo),e(L,Y2),e(Y2,lae),e(lae,IBo),e(Y2,jBo),e(Y2,JP),e(JP,NBo),e(Y2,DBo),e(L,GBo),e(L,Z2),e(Z2,iae),e(iae,OBo),e(Z2,qBo),e(Z2,KP),e(KP,zBo),e(Z2,XBo),e(L,WBo),e(L,eb),e(eb,dae),e(dae,VBo),e(eb,QBo),e(eb,YP),e(YP,HBo),e(eb,UBo),e(L,JBo),e(L,ob),e(ob,mae),e(mae,KBo),e(ob,YBo),e(ob,ZP),e(ZP,ZBo),e(ob,e9o),e(L,o9o),e(L,tb),e(tb,fae),e(fae,t9o),e(tb,r9o),e(tb,e$),e(e$,a9o),e(tb,n9o),e(ro,s9o),e(ro,cae),e(cae,l9o),e(ro,i9o),g(UC,ro,null),v(d,TCe,_),v(d,_d,_),e(_d,rb),e(rb,gae),g(JC,gae,null),e(_d,d9o),e(_d,hae),e(hae,m9o),v(d,FCe,_),v(d,tt,_),g(KC,tt,null),e(tt,f9o),e(tt,vd),e(vd,c9o),e(vd,uae),e(uae,g9o),e(vd,h9o),e(vd,pae),e(pae,u9o),e(vd,p9o),e(tt,_9o),e(tt,YC),e(YC,v9o),e(YC,_ae),e(_ae,b9o),e(YC,T9o),e(tt,F9o),e(tt,Qt),g(ZC,Qt,null),e(Qt,M9o),e(Qt,vae),e(vae,E9o),e(Qt,C9o),e(Qt,bd),e(bd,y9o),e(bd,bae),e(bae,w9o),e(bd,A9o),e(bd,Tae),e(Tae,x9o),e(bd,L9o),e(Qt,B9o),e(Qt,Fae),e(Fae,k9o),e(Qt,R9o),g(e3,Qt,null),e(tt,S9o),e(tt,ao),g(o3,ao,null),e(ao,P9o),e(ao,Mae),e(Mae,$9o),e(ao,I9o),e(ao,Xa),e(Xa,j9o),e(Xa,Eae),e(Eae,N9o),e(Xa,D9o),e(Xa,Cae),e(Cae,G9o),e(Xa,O9o),e(Xa,yae),e(yae,q9o),e(Xa,z9o),e(ao,X9o),e(ao,V),e(V,ab),e(ab,wae),e(wae,W9o),e(ab,V9o),e(ab,o$),e(o$,Q9o),e(ab,H9o),e(V,U9o),e(V,nb),e(nb,Aae),e(Aae,J9o),e(nb,K9o),e(nb,t$),e(t$,Y9o),e(nb,Z9o),e(V,eko),e(V,sb),e(sb,xae),e(xae,oko),e(sb,tko),e(sb,r$),e(r$,rko),e(sb,ako),e(V,nko),e(V,lb),e(lb,Lae),e(Lae,sko),e(lb,lko),e(lb,a$),e(a$,iko),e(lb,dko),e(V,mko),e(V,ib),e(ib,Bae),e(Bae,fko),e(ib,cko),e(ib,n$),e(n$,gko),e(ib,hko),e(V,uko),e(V,db),e(db,kae),e(kae,pko),e(db,_ko),e(db,s$),e(s$,vko),e(db,bko),e(V,Tko),e(V,mb),e(mb,Rae),e(Rae,Fko),e(mb,Mko),e(mb,l$),e(l$,Eko),e(mb,Cko),e(V,yko),e(V,fb),e(fb,Sae),e(Sae,wko),e(fb,Ako),e(fb,i$),e(i$,xko),e(fb,Lko),e(V,Bko),e(V,cb),e(cb,Pae),e(Pae,kko),e(cb,Rko),e(cb,d$),e(d$,Sko),e(cb,Pko),e(V,$ko),e(V,gb),e(gb,$ae),e($ae,Iko),e(gb,jko),e(gb,m$),e(m$,Nko),e(gb,Dko),e(V,Gko),e(V,hb),e(hb,Iae),e(Iae,Oko),e(hb,qko),e(hb,f$),e(f$,zko),e(hb,Xko),e(V,Wko),e(V,ub),e(ub,jae),e(jae,Vko),e(ub,Qko),e(ub,c$),e(c$,Hko),e(ub,Uko),e(V,Jko),e(V,pb),e(pb,Nae),e(Nae,Kko),e(pb,Yko),e(pb,g$),e(g$,Zko),e(pb,eRo),e(V,oRo),e(V,_b),e(_b,Dae),e(Dae,tRo),e(_b,rRo),e(_b,h$),e(h$,aRo),e(_b,nRo),e(V,sRo),e(V,vb),e(vb,Gae),e(Gae,lRo),e(vb,iRo),e(vb,u$),e(u$,dRo),e(vb,mRo),e(V,fRo),e(V,bb),e(bb,Oae),e(Oae,cRo),e(bb,gRo),e(bb,p$),e(p$,hRo),e(bb,uRo),e(V,pRo),e(V,Tb),e(Tb,qae),e(qae,_Ro),e(Tb,vRo),e(Tb,_$),e(_$,bRo),e(Tb,TRo),e(V,FRo),e(V,Fb),e(Fb,zae),e(zae,MRo),e(Fb,ERo),e(Fb,v$),e(v$,CRo),e(Fb,yRo),e(V,wRo),e(V,Mb),e(Mb,Xae),e(Xae,ARo),e(Mb,xRo),e(Mb,b$),e(b$,LRo),e(Mb,BRo),e(V,kRo),e(V,Eb),e(Eb,Wae),e(Wae,RRo),e(Eb,SRo),e(Eb,T$),e(T$,PRo),e(Eb,$Ro),e(V,IRo),e(V,Cb),e(Cb,Vae),e(Vae,jRo),e(Cb,NRo),e(Cb,F$),e(F$,DRo),e(Cb,GRo),e(V,ORo),e(V,yb),e(yb,Qae),e(Qae,qRo),e(yb,zRo),e(yb,M$),e(M$,XRo),e(yb,WRo),e(ao,VRo),e(ao,Hae),e(Hae,QRo),e(ao,HRo),g(t3,ao,null),v(d,MCe,_),v(d,Td,_),e(Td,wb),e(wb,Uae),g(r3,Uae,null),e(Td,URo),e(Td,Jae),e(Jae,JRo),v(d,ECe,_),v(d,rt,_),g(a3,rt,null),e(rt,KRo),e(rt,Fd),e(Fd,YRo),e(Fd,Kae),e(Kae,ZRo),e(Fd,eSo),e(Fd,Yae),e(Yae,oSo),e(Fd,tSo),e(rt,rSo),e(rt,n3),e(n3,aSo),e(n3,Zae),e(Zae,nSo),e(n3,sSo),e(rt,lSo),e(rt,Ht),g(s3,Ht,null),e(Ht,iSo),e(Ht,ene),e(ene,dSo),e(Ht,mSo),e(Ht,Md),e(Md,fSo),e(Md,one),e(one,cSo),e(Md,gSo),e(Md,tne),e(tne,hSo),e(Md,uSo),e(Ht,pSo),e(Ht,rne),e(rne,_So),e(Ht,vSo),g(l3,Ht,null),e(rt,bSo),e(rt,no),g(i3,no,null),e(no,TSo),e(no,ane),e(ane,FSo),e(no,MSo),e(no,Wa),e(Wa,ESo),e(Wa,nne),e(nne,CSo),e(Wa,ySo),e(Wa,sne),e(sne,wSo),e(Wa,ASo),e(Wa,lne),e(lne,xSo),e(Wa,LSo),e(no,BSo),e(no,ce),e(ce,Ab),e(Ab,ine),e(ine,kSo),e(Ab,RSo),e(Ab,E$),e(E$,SSo),e(Ab,PSo),e(ce,$So),e(ce,xb),e(xb,dne),e(dne,ISo),e(xb,jSo),e(xb,C$),e(C$,NSo),e(xb,DSo),e(ce,GSo),e(ce,Lb),e(Lb,mne),e(mne,OSo),e(Lb,qSo),e(Lb,y$),e(y$,zSo),e(Lb,XSo),e(ce,WSo),e(ce,Bb),e(Bb,fne),e(fne,VSo),e(Bb,QSo),e(Bb,w$),e(w$,HSo),e(Bb,USo),e(ce,JSo),e(ce,kb),e(kb,cne),e(cne,KSo),e(kb,YSo),e(kb,A$),e(A$,ZSo),e(kb,ePo),e(ce,oPo),e(ce,Rb),e(Rb,gne),e(gne,tPo),e(Rb,rPo),e(Rb,x$),e(x$,aPo),e(Rb,nPo),e(ce,sPo),e(ce,Sb),e(Sb,hne),e(hne,lPo),e(Sb,iPo),e(Sb,L$),e(L$,dPo),e(Sb,mPo),e(ce,fPo),e(ce,Pb),e(Pb,une),e(une,cPo),e(Pb,gPo),e(Pb,B$),e(B$,hPo),e(Pb,uPo),e(ce,pPo),e(ce,$b),e($b,pne),e(pne,_Po),e($b,vPo),e($b,k$),e(k$,bPo),e($b,TPo),e(ce,FPo),e(ce,Ib),e(Ib,_ne),e(_ne,MPo),e(Ib,EPo),e(Ib,R$),e(R$,CPo),e(Ib,yPo),e(no,wPo),e(no,vne),e(vne,APo),e(no,xPo),g(d3,no,null),v(d,CCe,_),v(d,Ed,_),e(Ed,jb),e(jb,bne),g(m3,bne,null),e(Ed,LPo),e(Ed,Tne),e(Tne,BPo),v(d,yCe,_),v(d,at,_),g(f3,at,null),e(at,kPo),e(at,Cd),e(Cd,RPo),e(Cd,Fne),e(Fne,SPo),e(Cd,PPo),e(Cd,Mne),e(Mne,$Po),e(Cd,IPo),e(at,jPo),e(at,c3),e(c3,NPo),e(c3,Ene),e(Ene,DPo),e(c3,GPo),e(at,OPo),e(at,Ut),g(g3,Ut,null),e(Ut,qPo),e(Ut,Cne),e(Cne,zPo),e(Ut,XPo),e(Ut,yd),e(yd,WPo),e(yd,yne),e(yne,VPo),e(yd,QPo),e(yd,wne),e(wne,HPo),e(yd,UPo),e(Ut,JPo),e(Ut,Ane),e(Ane,KPo),e(Ut,YPo),g(h3,Ut,null),e(at,ZPo),e(at,so),g(u3,so,null),e(so,e$o),e(so,xne),e(xne,o$o),e(so,t$o),e(so,Va),e(Va,r$o),e(Va,Lne),e(Lne,a$o),e(Va,n$o),e(Va,Bne),e(Bne,s$o),e(Va,l$o),e(Va,kne),e(kne,i$o),e(Va,d$o),e(so,m$o),e(so,Rne),e(Rne,Nb),e(Nb,Sne),e(Sne,f$o),e(Nb,c$o),e(Nb,S$),e(S$,g$o),e(Nb,h$o),e(so,u$o),e(so,Pne),e(Pne,p$o),e(so,_$o),g(p3,so,null),v(d,wCe,_),v(d,wd,_),e(wd,Db),e(Db,$ne),g(_3,$ne,null),e(wd,v$o),e(wd,Ine),e(Ine,b$o),v(d,ACe,_),v(d,nt,_),g(v3,nt,null),e(nt,T$o),e(nt,Ad),e(Ad,F$o),e(Ad,jne),e(jne,M$o),e(Ad,E$o),e(Ad,Nne),e(Nne,C$o),e(Ad,y$o),e(nt,w$o),e(nt,b3),e(b3,A$o),e(b3,Dne),e(Dne,x$o),e(b3,L$o),e(nt,B$o),e(nt,Jt),g(T3,Jt,null),e(Jt,k$o),e(Jt,Gne),e(Gne,R$o),e(Jt,S$o),e(Jt,xd),e(xd,P$o),e(xd,One),e(One,$$o),e(xd,I$o),e(xd,qne),e(qne,j$o),e(xd,N$o),e(Jt,D$o),e(Jt,zne),e(zne,G$o),e(Jt,O$o),g(F3,Jt,null),e(nt,q$o),e(nt,lo),g(M3,lo,null),e(lo,z$o),e(lo,Xne),e(Xne,X$o),e(lo,W$o),e(lo,Qa),e(Qa,V$o),e(Qa,Wne),e(Wne,Q$o),e(Qa,H$o),e(Qa,Vne),e(Vne,U$o),e(Qa,J$o),e(Qa,Qne),e(Qne,K$o),e(Qa,Y$o),e(lo,Z$o),e(lo,K),e(K,Gb),e(Gb,Hne),e(Hne,eIo),e(Gb,oIo),e(Gb,P$),e(P$,tIo),e(Gb,rIo),e(K,aIo),e(K,Ob),e(Ob,Une),e(Une,nIo),e(Ob,sIo),e(Ob,$$),e($$,lIo),e(Ob,iIo),e(K,dIo),e(K,qb),e(qb,Jne),e(Jne,mIo),e(qb,fIo),e(qb,I$),e(I$,cIo),e(qb,gIo),e(K,hIo),e(K,zb),e(zb,Kne),e(Kne,uIo),e(zb,pIo),e(zb,j$),e(j$,_Io),e(zb,vIo),e(K,bIo),e(K,Xb),e(Xb,Yne),e(Yne,TIo),e(Xb,FIo),e(Xb,N$),e(N$,MIo),e(Xb,EIo),e(K,CIo),e(K,Wb),e(Wb,Zne),e(Zne,yIo),e(Wb,wIo),e(Wb,D$),e(D$,AIo),e(Wb,xIo),e(K,LIo),e(K,Vb),e(Vb,ese),e(ese,BIo),e(Vb,kIo),e(Vb,G$),e(G$,RIo),e(Vb,SIo),e(K,PIo),e(K,Qb),e(Qb,ose),e(ose,$Io),e(Qb,IIo),e(Qb,O$),e(O$,jIo),e(Qb,NIo),e(K,DIo),e(K,Hb),e(Hb,tse),e(tse,GIo),e(Hb,OIo),e(Hb,q$),e(q$,qIo),e(Hb,zIo),e(K,XIo),e(K,Ub),e(Ub,rse),e(rse,WIo),e(Ub,VIo),e(Ub,z$),e(z$,QIo),e(Ub,HIo),e(K,UIo),e(K,Jb),e(Jb,ase),e(ase,JIo),e(Jb,KIo),e(Jb,X$),e(X$,YIo),e(Jb,ZIo),e(K,ejo),e(K,Kb),e(Kb,nse),e(nse,ojo),e(Kb,tjo),e(Kb,W$),e(W$,rjo),e(Kb,ajo),e(K,njo),e(K,Yb),e(Yb,sse),e(sse,sjo),e(Yb,ljo),e(Yb,V$),e(V$,ijo),e(Yb,djo),e(K,mjo),e(K,Zb),e(Zb,lse),e(lse,fjo),e(Zb,cjo),e(Zb,Q$),e(Q$,gjo),e(Zb,hjo),e(K,ujo),e(K,e4),e(e4,ise),e(ise,pjo),e(e4,_jo),e(e4,H$),e(H$,vjo),e(e4,bjo),e(K,Tjo),e(K,o4),e(o4,dse),e(dse,Fjo),e(o4,Mjo),e(o4,U$),e(U$,Ejo),e(o4,Cjo),e(K,yjo),e(K,t4),e(t4,mse),e(mse,wjo),e(t4,Ajo),e(t4,J$),e(J$,xjo),e(t4,Ljo),e(K,Bjo),e(K,r4),e(r4,fse),e(fse,kjo),e(r4,Rjo),e(r4,K$),e(K$,Sjo),e(r4,Pjo),e(K,$jo),e(K,a4),e(a4,cse),e(cse,Ijo),e(a4,jjo),e(a4,Y$),e(Y$,Njo),e(a4,Djo),e(K,Gjo),e(K,n4),e(n4,gse),e(gse,Ojo),e(n4,qjo),e(n4,Z$),e(Z$,zjo),e(n4,Xjo),e(lo,Wjo),e(lo,hse),e(hse,Vjo),e(lo,Qjo),g(E3,lo,null),v(d,xCe,_),v(d,Ld,_),e(Ld,s4),e(s4,use),g(C3,use,null),e(Ld,Hjo),e(Ld,pse),e(pse,Ujo),v(d,LCe,_),v(d,st,_),g(y3,st,null),e(st,Jjo),e(st,Bd),e(Bd,Kjo),e(Bd,_se),e(_se,Yjo),e(Bd,Zjo),e(Bd,vse),e(vse,eNo),e(Bd,oNo),e(st,tNo),e(st,w3),e(w3,rNo),e(w3,bse),e(bse,aNo),e(w3,nNo),e(st,sNo),e(st,Kt),g(A3,Kt,null),e(Kt,lNo),e(Kt,Tse),e(Tse,iNo),e(Kt,dNo),e(Kt,kd),e(kd,mNo),e(kd,Fse),e(Fse,fNo),e(kd,cNo),e(kd,Mse),e(Mse,gNo),e(kd,hNo),e(Kt,uNo),e(Kt,Ese),e(Ese,pNo),e(Kt,_No),g(x3,Kt,null),e(st,vNo),e(st,io),g(L3,io,null),e(io,bNo),e(io,Cse),e(Cse,TNo),e(io,FNo),e(io,Ha),e(Ha,MNo),e(Ha,yse),e(yse,ENo),e(Ha,CNo),e(Ha,wse),e(wse,yNo),e(Ha,wNo),e(Ha,Ase),e(Ase,ANo),e(Ha,xNo),e(io,LNo),e(io,ge),e(ge,l4),e(l4,xse),e(xse,BNo),e(l4,kNo),e(l4,eI),e(eI,RNo),e(l4,SNo),e(ge,PNo),e(ge,i4),e(i4,Lse),e(Lse,$No),e(i4,INo),e(i4,oI),e(oI,jNo),e(i4,NNo),e(ge,DNo),e(ge,d4),e(d4,Bse),e(Bse,GNo),e(d4,ONo),e(d4,tI),e(tI,qNo),e(d4,zNo),e(ge,XNo),e(ge,m4),e(m4,kse),e(kse,WNo),e(m4,VNo),e(m4,rI),e(rI,QNo),e(m4,HNo),e(ge,UNo),e(ge,f4),e(f4,Rse),e(Rse,JNo),e(f4,KNo),e(f4,aI),e(aI,YNo),e(f4,ZNo),e(ge,eDo),e(ge,c4),e(c4,Sse),e(Sse,oDo),e(c4,tDo),e(c4,nI),e(nI,rDo),e(c4,aDo),e(ge,nDo),e(ge,g4),e(g4,Pse),e(Pse,sDo),e(g4,lDo),e(g4,sI),e(sI,iDo),e(g4,dDo),e(ge,mDo),e(ge,h4),e(h4,$se),e($se,fDo),e(h4,cDo),e(h4,lI),e(lI,gDo),e(h4,hDo),e(ge,uDo),e(ge,u4),e(u4,Ise),e(Ise,pDo),e(u4,_Do),e(u4,iI),e(iI,vDo),e(u4,bDo),e(ge,TDo),e(ge,p4),e(p4,jse),e(jse,FDo),e(p4,MDo),e(p4,dI),e(dI,EDo),e(p4,CDo),e(io,yDo),e(io,Nse),e(Nse,wDo),e(io,ADo),g(B3,io,null),v(d,BCe,_),v(d,Rd,_),e(Rd,_4),e(_4,Dse),g(k3,Dse,null),e(Rd,xDo),e(Rd,Gse),e(Gse,LDo),v(d,kCe,_),v(d,lt,_),g(R3,lt,null),e(lt,BDo),e(lt,Sd),e(Sd,kDo),e(Sd,Ose),e(Ose,RDo),e(Sd,SDo),e(Sd,qse),e(qse,PDo),e(Sd,$Do),e(lt,IDo),e(lt,S3),e(S3,jDo),e(S3,zse),e(zse,NDo),e(S3,DDo),e(lt,GDo),e(lt,Yt),g(P3,Yt,null),e(Yt,ODo),e(Yt,Xse),e(Xse,qDo),e(Yt,zDo),e(Yt,Pd),e(Pd,XDo),e(Pd,Wse),e(Wse,WDo),e(Pd,VDo),e(Pd,Vse),e(Vse,QDo),e(Pd,HDo),e(Yt,UDo),e(Yt,Qse),e(Qse,JDo),e(Yt,KDo),g($3,Yt,null),e(lt,YDo),e(lt,mo),g(I3,mo,null),e(mo,ZDo),e(mo,Hse),e(Hse,eGo),e(mo,oGo),e(mo,Ua),e(Ua,tGo),e(Ua,Use),e(Use,rGo),e(Ua,aGo),e(Ua,Jse),e(Jse,nGo),e(Ua,sGo),e(Ua,Kse),e(Kse,lGo),e(Ua,iGo),e(mo,dGo),e(mo,O),e(O,v4),e(v4,Yse),e(Yse,mGo),e(v4,fGo),e(v4,mI),e(mI,cGo),e(v4,gGo),e(O,hGo),e(O,b4),e(b4,Zse),e(Zse,uGo),e(b4,pGo),e(b4,fI),e(fI,_Go),e(b4,vGo),e(O,bGo),e(O,T4),e(T4,ele),e(ele,TGo),e(T4,FGo),e(T4,cI),e(cI,MGo),e(T4,EGo),e(O,CGo),e(O,F4),e(F4,ole),e(ole,yGo),e(F4,wGo),e(F4,gI),e(gI,AGo),e(F4,xGo),e(O,LGo),e(O,M4),e(M4,tle),e(tle,BGo),e(M4,kGo),e(M4,hI),e(hI,RGo),e(M4,SGo),e(O,PGo),e(O,E4),e(E4,rle),e(rle,$Go),e(E4,IGo),e(E4,uI),e(uI,jGo),e(E4,NGo),e(O,DGo),e(O,C4),e(C4,ale),e(ale,GGo),e(C4,OGo),e(C4,pI),e(pI,qGo),e(C4,zGo),e(O,XGo),e(O,y4),e(y4,nle),e(nle,WGo),e(y4,VGo),e(y4,_I),e(_I,QGo),e(y4,HGo),e(O,UGo),e(O,w4),e(w4,sle),e(sle,JGo),e(w4,KGo),e(w4,vI),e(vI,YGo),e(w4,ZGo),e(O,eOo),e(O,A4),e(A4,lle),e(lle,oOo),e(A4,tOo),e(A4,bI),e(bI,rOo),e(A4,aOo),e(O,nOo),e(O,x4),e(x4,ile),e(ile,sOo),e(x4,lOo),e(x4,TI),e(TI,iOo),e(x4,dOo),e(O,mOo),e(O,L4),e(L4,dle),e(dle,fOo),e(L4,cOo),e(L4,FI),e(FI,gOo),e(L4,hOo),e(O,uOo),e(O,B4),e(B4,mle),e(mle,pOo),e(B4,_Oo),e(B4,MI),e(MI,vOo),e(B4,bOo),e(O,TOo),e(O,k4),e(k4,fle),e(fle,FOo),e(k4,MOo),e(k4,EI),e(EI,EOo),e(k4,COo),e(O,yOo),e(O,R4),e(R4,cle),e(cle,wOo),e(R4,AOo),e(R4,CI),e(CI,xOo),e(R4,LOo),e(O,BOo),e(O,S4),e(S4,gle),e(gle,kOo),e(S4,ROo),e(S4,yI),e(yI,SOo),e(S4,POo),e(O,$Oo),e(O,P4),e(P4,hle),e(hle,IOo),e(P4,jOo),e(P4,wI),e(wI,NOo),e(P4,DOo),e(O,GOo),e(O,$4),e($4,ule),e(ule,OOo),e($4,qOo),e($4,AI),e(AI,zOo),e($4,XOo),e(O,WOo),e(O,I4),e(I4,ple),e(ple,VOo),e(I4,QOo),e(I4,xI),e(xI,HOo),e(I4,UOo),e(O,JOo),e(O,j4),e(j4,_le),e(_le,KOo),e(j4,YOo),e(j4,LI),e(LI,ZOo),e(j4,eqo),e(O,oqo),e(O,N4),e(N4,vle),e(vle,tqo),e(N4,rqo),e(N4,BI),e(BI,aqo),e(N4,nqo),e(O,sqo),e(O,D4),e(D4,ble),e(ble,lqo),e(D4,iqo),e(D4,kI),e(kI,dqo),e(D4,mqo),e(O,fqo),e(O,G4),e(G4,Tle),e(Tle,cqo),e(G4,gqo),e(G4,RI),e(RI,hqo),e(G4,uqo),e(O,pqo),e(O,O4),e(O4,Fle),e(Fle,_qo),e(O4,vqo),e(O4,SI),e(SI,bqo),e(O4,Tqo),e(O,Fqo),e(O,q4),e(q4,Mle),e(Mle,Mqo),e(q4,Eqo),e(q4,PI),e(PI,Cqo),e(q4,yqo),e(mo,wqo),e(mo,Ele),e(Ele,Aqo),e(mo,xqo),g(j3,mo,null),v(d,RCe,_),v(d,$d,_),e($d,z4),e(z4,Cle),g(N3,Cle,null),e($d,Lqo),e($d,yle),e(yle,Bqo),v(d,SCe,_),v(d,it,_),g(D3,it,null),e(it,kqo),e(it,Id),e(Id,Rqo),e(Id,wle),e(wle,Sqo),e(Id,Pqo),e(Id,Ale),e(Ale,$qo),e(Id,Iqo),e(it,jqo),e(it,G3),e(G3,Nqo),e(G3,xle),e(xle,Dqo),e(G3,Gqo),e(it,Oqo),e(it,Zt),g(O3,Zt,null),e(Zt,qqo),e(Zt,Lle),e(Lle,zqo),e(Zt,Xqo),e(Zt,jd),e(jd,Wqo),e(jd,Ble),e(Ble,Vqo),e(jd,Qqo),e(jd,kle),e(kle,Hqo),e(jd,Uqo),e(Zt,Jqo),e(Zt,Rle),e(Rle,Kqo),e(Zt,Yqo),g(q3,Zt,null),e(it,Zqo),e(it,fo),g(z3,fo,null),e(fo,ezo),e(fo,Sle),e(Sle,ozo),e(fo,tzo),e(fo,Ja),e(Ja,rzo),e(Ja,Ple),e(Ple,azo),e(Ja,nzo),e(Ja,$le),e($le,szo),e(Ja,lzo),e(Ja,Ile),e(Ile,izo),e(Ja,dzo),e(fo,mzo),e(fo,re),e(re,X4),e(X4,jle),e(jle,fzo),e(X4,czo),e(X4,$I),e($I,gzo),e(X4,hzo),e(re,uzo),e(re,W4),e(W4,Nle),e(Nle,pzo),e(W4,_zo),e(W4,II),e(II,vzo),e(W4,bzo),e(re,Tzo),e(re,V4),e(V4,Dle),e(Dle,Fzo),e(V4,Mzo),e(V4,jI),e(jI,Ezo),e(V4,Czo),e(re,yzo),e(re,Q4),e(Q4,Gle),e(Gle,wzo),e(Q4,Azo),e(Q4,NI),e(NI,xzo),e(Q4,Lzo),e(re,Bzo),e(re,H4),e(H4,Ole),e(Ole,kzo),e(H4,Rzo),e(H4,DI),e(DI,Szo),e(H4,Pzo),e(re,$zo),e(re,U4),e(U4,qle),e(qle,Izo),e(U4,jzo),e(U4,GI),e(GI,Nzo),e(U4,Dzo),e(re,Gzo),e(re,J4),e(J4,zle),e(zle,Ozo),e(J4,qzo),e(J4,OI),e(OI,zzo),e(J4,Xzo),e(re,Wzo),e(re,K4),e(K4,Xle),e(Xle,Vzo),e(K4,Qzo),e(K4,qI),e(qI,Hzo),e(K4,Uzo),e(re,Jzo),e(re,Y4),e(Y4,Wle),e(Wle,Kzo),e(Y4,Yzo),e(Y4,zI),e(zI,Zzo),e(Y4,eXo),e(re,oXo),e(re,Z4),e(Z4,Vle),e(Vle,tXo),e(Z4,rXo),e(Z4,XI),e(XI,aXo),e(Z4,nXo),e(re,sXo),e(re,e5),e(e5,Qle),e(Qle,lXo),e(e5,iXo),e(e5,WI),e(WI,dXo),e(e5,mXo),e(re,fXo),e(re,o5),e(o5,Hle),e(Hle,cXo),e(o5,gXo),e(o5,VI),e(VI,hXo),e(o5,uXo),e(re,pXo),e(re,t5),e(t5,Ule),e(Ule,_Xo),e(t5,vXo),e(t5,QI),e(QI,bXo),e(t5,TXo),e(re,FXo),e(re,r5),e(r5,Jle),e(Jle,MXo),e(r5,EXo),e(r5,HI),e(HI,CXo),e(r5,yXo),e(re,wXo),e(re,a5),e(a5,Kle),e(Kle,AXo),e(a5,xXo),e(a5,UI),e(UI,LXo),e(a5,BXo),e(re,kXo),e(re,n5),e(n5,Yle),e(Yle,RXo),e(n5,SXo),e(n5,JI),e(JI,PXo),e(n5,$Xo),e(re,IXo),e(re,s5),e(s5,Zle),e(Zle,jXo),e(s5,NXo),e(s5,KI),e(KI,DXo),e(s5,GXo),e(fo,OXo),e(fo,eie),e(eie,qXo),e(fo,zXo),g(X3,fo,null),v(d,PCe,_),v(d,Nd,_),e(Nd,l5),e(l5,oie),g(W3,oie,null),e(Nd,XXo),e(Nd,tie),e(tie,WXo),v(d,$Ce,_),v(d,dt,_),g(V3,dt,null),e(dt,VXo),e(dt,Dd),e(Dd,QXo),e(Dd,rie),e(rie,HXo),e(Dd,UXo),e(Dd,aie),e(aie,JXo),e(Dd,KXo),e(dt,YXo),e(dt,Q3),e(Q3,ZXo),e(Q3,nie),e(nie,eWo),e(Q3,oWo),e(dt,tWo),e(dt,er),g(H3,er,null),e(er,rWo),e(er,sie),e(sie,aWo),e(er,nWo),e(er,Gd),e(Gd,sWo),e(Gd,lie),e(lie,lWo),e(Gd,iWo),e(Gd,iie),e(iie,dWo),e(Gd,mWo),e(er,fWo),e(er,die),e(die,cWo),e(er,gWo),g(U3,er,null),e(dt,hWo),e(dt,co),g(J3,co,null),e(co,uWo),e(co,mie),e(mie,pWo),e(co,_Wo),e(co,Ka),e(Ka,vWo),e(Ka,fie),e(fie,bWo),e(Ka,TWo),e(Ka,cie),e(cie,FWo),e(Ka,MWo),e(Ka,gie),e(gie,EWo),e(Ka,CWo),e(co,yWo),e(co,hie),e(hie,i5),e(i5,uie),e(uie,wWo),e(i5,AWo),e(i5,YI),e(YI,xWo),e(i5,LWo),e(co,BWo),e(co,pie),e(pie,kWo),e(co,RWo),g(K3,co,null),v(d,ICe,_),v(d,Od,_),e(Od,d5),e(d5,_ie),g(Y3,_ie,null),e(Od,SWo),e(Od,vie),e(vie,PWo),v(d,jCe,_),v(d,mt,_),g(Z3,mt,null),e(mt,$Wo),e(mt,qd),e(qd,IWo),e(qd,bie),e(bie,jWo),e(qd,NWo),e(qd,Tie),e(Tie,DWo),e(qd,GWo),e(mt,OWo),e(mt,ey),e(ey,qWo),e(ey,Fie),e(Fie,zWo),e(ey,XWo),e(mt,WWo),e(mt,or),g(oy,or,null),e(or,VWo),e(or,Mie),e(Mie,QWo),e(or,HWo),e(or,zd),e(zd,UWo),e(zd,Eie),e(Eie,JWo),e(zd,KWo),e(zd,Cie),e(Cie,YWo),e(zd,ZWo),e(or,eVo),e(or,yie),e(yie,oVo),e(or,tVo),g(ty,or,null),e(mt,rVo),e(mt,go),g(ry,go,null),e(go,aVo),e(go,wie),e(wie,nVo),e(go,sVo),e(go,Ya),e(Ya,lVo),e(Ya,Aie),e(Aie,iVo),e(Ya,dVo),e(Ya,xie),e(xie,mVo),e(Ya,fVo),e(Ya,Lie),e(Lie,cVo),e(Ya,gVo),e(go,hVo),e(go,Y),e(Y,m5),e(m5,Bie),e(Bie,uVo),e(m5,pVo),e(m5,ZI),e(ZI,_Vo),e(m5,vVo),e(Y,bVo),e(Y,f5),e(f5,kie),e(kie,TVo),e(f5,FVo),e(f5,ej),e(ej,MVo),e(f5,EVo),e(Y,CVo),e(Y,c5),e(c5,Rie),e(Rie,yVo),e(c5,wVo),e(c5,oj),e(oj,AVo),e(c5,xVo),e(Y,LVo),e(Y,g5),e(g5,Sie),e(Sie,BVo),e(g5,kVo),e(g5,tj),e(tj,RVo),e(g5,SVo),e(Y,PVo),e(Y,h5),e(h5,Pie),e(Pie,$Vo),e(h5,IVo),e(h5,rj),e(rj,jVo),e(h5,NVo),e(Y,DVo),e(Y,u5),e(u5,$ie),e($ie,GVo),e(u5,OVo),e(u5,aj),e(aj,qVo),e(u5,zVo),e(Y,XVo),e(Y,p5),e(p5,Iie),e(Iie,WVo),e(p5,VVo),e(p5,nj),e(nj,QVo),e(p5,HVo),e(Y,UVo),e(Y,_5),e(_5,jie),e(jie,JVo),e(_5,KVo),e(_5,sj),e(sj,YVo),e(_5,ZVo),e(Y,eQo),e(Y,v5),e(v5,Nie),e(Nie,oQo),e(v5,tQo),e(v5,lj),e(lj,rQo),e(v5,aQo),e(Y,nQo),e(Y,b5),e(b5,Die),e(Die,sQo),e(b5,lQo),e(b5,ij),e(ij,iQo),e(b5,dQo),e(Y,mQo),e(Y,T5),e(T5,Gie),e(Gie,fQo),e(T5,cQo),e(T5,dj),e(dj,gQo),e(T5,hQo),e(Y,uQo),e(Y,F5),e(F5,Oie),e(Oie,pQo),e(F5,_Qo),e(F5,mj),e(mj,vQo),e(F5,bQo),e(Y,TQo),e(Y,M5),e(M5,qie),e(qie,FQo),e(M5,MQo),e(M5,fj),e(fj,EQo),e(M5,CQo),e(Y,yQo),e(Y,E5),e(E5,zie),e(zie,wQo),e(E5,AQo),e(E5,cj),e(cj,xQo),e(E5,LQo),e(Y,BQo),e(Y,C5),e(C5,Xie),e(Xie,kQo),e(C5,RQo),e(C5,gj),e(gj,SQo),e(C5,PQo),e(Y,$Qo),e(Y,y5),e(y5,Wie),e(Wie,IQo),e(y5,jQo),e(y5,hj),e(hj,NQo),e(y5,DQo),e(Y,GQo),e(Y,w5),e(w5,Vie),e(Vie,OQo),e(w5,qQo),e(w5,uj),e(uj,zQo),e(w5,XQo),e(Y,WQo),e(Y,A5),e(A5,Qie),e(Qie,VQo),e(A5,QQo),e(A5,pj),e(pj,HQo),e(A5,UQo),e(Y,JQo),e(Y,x5),e(x5,Hie),e(Hie,KQo),e(x5,YQo),e(x5,_j),e(_j,ZQo),e(x5,eHo),e(Y,oHo),e(Y,L5),e(L5,Uie),e(Uie,tHo),e(L5,rHo),e(L5,vj),e(vj,aHo),e(L5,nHo),e(go,sHo),e(go,Jie),e(Jie,lHo),e(go,iHo),g(ay,go,null),v(d,NCe,_),v(d,Xd,_),e(Xd,B5),e(B5,Kie),g(ny,Kie,null),e(Xd,dHo),e(Xd,Yie),e(Yie,mHo),v(d,DCe,_),v(d,ft,_),g(sy,ft,null),e(ft,fHo),e(ft,Wd),e(Wd,cHo),e(Wd,Zie),e(Zie,gHo),e(Wd,hHo),e(Wd,ede),e(ede,uHo),e(Wd,pHo),e(ft,_Ho),e(ft,ly),e(ly,vHo),e(ly,ode),e(ode,bHo),e(ly,THo),e(ft,FHo),e(ft,tr),g(iy,tr,null),e(tr,MHo),e(tr,tde),e(tde,EHo),e(tr,CHo),e(tr,Vd),e(Vd,yHo),e(Vd,rde),e(rde,wHo),e(Vd,AHo),e(Vd,ade),e(ade,xHo),e(Vd,LHo),e(tr,BHo),e(tr,nde),e(nde,kHo),e(tr,RHo),g(dy,tr,null),e(ft,SHo),e(ft,ho),g(my,ho,null),e(ho,PHo),e(ho,sde),e(sde,$Ho),e(ho,IHo),e(ho,Za),e(Za,jHo),e(Za,lde),e(lde,NHo),e(Za,DHo),e(Za,ide),e(ide,GHo),e(Za,OHo),e(Za,dde),e(dde,qHo),e(Za,zHo),e(ho,XHo),e(ho,Z),e(Z,k5),e(k5,mde),e(mde,WHo),e(k5,VHo),e(k5,bj),e(bj,QHo),e(k5,HHo),e(Z,UHo),e(Z,R5),e(R5,fde),e(fde,JHo),e(R5,KHo),e(R5,Tj),e(Tj,YHo),e(R5,ZHo),e(Z,eUo),e(Z,S5),e(S5,cde),e(cde,oUo),e(S5,tUo),e(S5,Fj),e(Fj,rUo),e(S5,aUo),e(Z,nUo),e(Z,P5),e(P5,gde),e(gde,sUo),e(P5,lUo),e(P5,Mj),e(Mj,iUo),e(P5,dUo),e(Z,mUo),e(Z,$5),e($5,hde),e(hde,fUo),e($5,cUo),e($5,Ej),e(Ej,gUo),e($5,hUo),e(Z,uUo),e(Z,I5),e(I5,ude),e(ude,pUo),e(I5,_Uo),e(I5,Cj),e(Cj,vUo),e(I5,bUo),e(Z,TUo),e(Z,j5),e(j5,pde),e(pde,FUo),e(j5,MUo),e(j5,yj),e(yj,EUo),e(j5,CUo),e(Z,yUo),e(Z,N5),e(N5,_de),e(_de,wUo),e(N5,AUo),e(N5,wj),e(wj,xUo),e(N5,LUo),e(Z,BUo),e(Z,D5),e(D5,vde),e(vde,kUo),e(D5,RUo),e(D5,Aj),e(Aj,SUo),e(D5,PUo),e(Z,$Uo),e(Z,G5),e(G5,bde),e(bde,IUo),e(G5,jUo),e(G5,xj),e(xj,NUo),e(G5,DUo),e(Z,GUo),e(Z,O5),e(O5,Tde),e(Tde,OUo),e(O5,qUo),e(O5,Lj),e(Lj,zUo),e(O5,XUo),e(Z,WUo),e(Z,q5),e(q5,Fde),e(Fde,VUo),e(q5,QUo),e(q5,Bj),e(Bj,HUo),e(q5,UUo),e(Z,JUo),e(Z,z5),e(z5,Mde),e(Mde,KUo),e(z5,YUo),e(z5,kj),e(kj,ZUo),e(z5,eJo),e(Z,oJo),e(Z,X5),e(X5,Ede),e(Ede,tJo),e(X5,rJo),e(X5,Rj),e(Rj,aJo),e(X5,nJo),e(Z,sJo),e(Z,W5),e(W5,Cde),e(Cde,lJo),e(W5,iJo),e(W5,Sj),e(Sj,dJo),e(W5,mJo),e(Z,fJo),e(Z,V5),e(V5,yde),e(yde,cJo),e(V5,gJo),e(V5,Pj),e(Pj,hJo),e(V5,uJo),e(Z,pJo),e(Z,Q5),e(Q5,wde),e(wde,_Jo),e(Q5,vJo),e(Q5,$j),e($j,bJo),e(Q5,TJo),e(Z,FJo),e(Z,H5),e(H5,Ade),e(Ade,MJo),e(H5,EJo),e(H5,Ij),e(Ij,CJo),e(H5,yJo),e(Z,wJo),e(Z,U5),e(U5,xde),e(xde,AJo),e(U5,xJo),e(U5,jj),e(jj,LJo),e(U5,BJo),e(ho,kJo),e(ho,Lde),e(Lde,RJo),e(ho,SJo),g(fy,ho,null),v(d,GCe,_),v(d,Qd,_),e(Qd,J5),e(J5,Bde),g(cy,Bde,null),e(Qd,PJo),e(Qd,kde),e(kde,$Jo),v(d,OCe,_),v(d,ct,_),g(gy,ct,null),e(ct,IJo),e(ct,Hd),e(Hd,jJo),e(Hd,Rde),e(Rde,NJo),e(Hd,DJo),e(Hd,Sde),e(Sde,GJo),e(Hd,OJo),e(ct,qJo),e(ct,hy),e(hy,zJo),e(hy,Pde),e(Pde,XJo),e(hy,WJo),e(ct,VJo),e(ct,rr),g(uy,rr,null),e(rr,QJo),e(rr,$de),e($de,HJo),e(rr,UJo),e(rr,Ud),e(Ud,JJo),e(Ud,Ide),e(Ide,KJo),e(Ud,YJo),e(Ud,jde),e(jde,ZJo),e(Ud,eKo),e(rr,oKo),e(rr,Nde),e(Nde,tKo),e(rr,rKo),g(py,rr,null),e(ct,aKo),e(ct,uo),g(_y,uo,null),e(uo,nKo),e(uo,Dde),e(Dde,sKo),e(uo,lKo),e(uo,en),e(en,iKo),e(en,Gde),e(Gde,dKo),e(en,mKo),e(en,Ode),e(Ode,fKo),e(en,cKo),e(en,qde),e(qde,gKo),e(en,hKo),e(uo,uKo),e(uo,Q),e(Q,K5),e(K5,zde),e(zde,pKo),e(K5,_Ko),e(K5,Nj),e(Nj,vKo),e(K5,bKo),e(Q,TKo),e(Q,Y5),e(Y5,Xde),e(Xde,FKo),e(Y5,MKo),e(Y5,Dj),e(Dj,EKo),e(Y5,CKo),e(Q,yKo),e(Q,Z5),e(Z5,Wde),e(Wde,wKo),e(Z5,AKo),e(Z5,Gj),e(Gj,xKo),e(Z5,LKo),e(Q,BKo),e(Q,e0),e(e0,Vde),e(Vde,kKo),e(e0,RKo),e(e0,Oj),e(Oj,SKo),e(e0,PKo),e(Q,$Ko),e(Q,o0),e(o0,Qde),e(Qde,IKo),e(o0,jKo),e(o0,qj),e(qj,NKo),e(o0,DKo),e(Q,GKo),e(Q,t0),e(t0,Hde),e(Hde,OKo),e(t0,qKo),e(t0,zj),e(zj,zKo),e(t0,XKo),e(Q,WKo),e(Q,r0),e(r0,Ude),e(Ude,VKo),e(r0,QKo),e(r0,Xj),e(Xj,HKo),e(r0,UKo),e(Q,JKo),e(Q,a0),e(a0,Jde),e(Jde,KKo),e(a0,YKo),e(a0,Wj),e(Wj,ZKo),e(a0,eYo),e(Q,oYo),e(Q,n0),e(n0,Kde),e(Kde,tYo),e(n0,rYo),e(n0,Vj),e(Vj,aYo),e(n0,nYo),e(Q,sYo),e(Q,s0),e(s0,Yde),e(Yde,lYo),e(s0,iYo),e(s0,Qj),e(Qj,dYo),e(s0,mYo),e(Q,fYo),e(Q,l0),e(l0,Zde),e(Zde,cYo),e(l0,gYo),e(l0,Hj),e(Hj,hYo),e(l0,uYo),e(Q,pYo),e(Q,i0),e(i0,eme),e(eme,_Yo),e(i0,vYo),e(i0,Uj),e(Uj,bYo),e(i0,TYo),e(Q,FYo),e(Q,d0),e(d0,ome),e(ome,MYo),e(d0,EYo),e(d0,Jj),e(Jj,CYo),e(d0,yYo),e(Q,wYo),e(Q,m0),e(m0,tme),e(tme,AYo),e(m0,xYo),e(m0,Kj),e(Kj,LYo),e(m0,BYo),e(Q,kYo),e(Q,f0),e(f0,rme),e(rme,RYo),e(f0,SYo),e(f0,Yj),e(Yj,PYo),e(f0,$Yo),e(Q,IYo),e(Q,c0),e(c0,ame),e(ame,jYo),e(c0,NYo),e(c0,Zj),e(Zj,DYo),e(c0,GYo),e(Q,OYo),e(Q,g0),e(g0,nme),e(nme,qYo),e(g0,zYo),e(g0,eN),e(eN,XYo),e(g0,WYo),e(Q,VYo),e(Q,h0),e(h0,sme),e(sme,QYo),e(h0,HYo),e(h0,oN),e(oN,UYo),e(h0,JYo),e(Q,KYo),e(Q,u0),e(u0,lme),e(lme,YYo),e(u0,ZYo),e(u0,tN),e(tN,eZo),e(u0,oZo),e(Q,tZo),e(Q,p0),e(p0,ime),e(ime,rZo),e(p0,aZo),e(p0,rN),e(rN,nZo),e(p0,sZo),e(Q,lZo),e(Q,_0),e(_0,dme),e(dme,iZo),e(_0,dZo),e(_0,aN),e(aN,mZo),e(_0,fZo),e(Q,cZo),e(Q,v0),e(v0,mme),e(mme,gZo),e(v0,hZo),e(v0,nN),e(nN,uZo),e(v0,pZo),e(uo,_Zo),e(uo,fme),e(fme,vZo),e(uo,bZo),g(vy,uo,null),v(d,qCe,_),v(d,Jd,_),e(Jd,b0),e(b0,cme),g(by,cme,null),e(Jd,TZo),e(Jd,gme),e(gme,FZo),v(d,zCe,_),v(d,gt,_),g(Ty,gt,null),e(gt,MZo),e(gt,Kd),e(Kd,EZo),e(Kd,hme),e(hme,CZo),e(Kd,yZo),e(Kd,ume),e(ume,wZo),e(Kd,AZo),e(gt,xZo),e(gt,Fy),e(Fy,LZo),e(Fy,pme),e(pme,BZo),e(Fy,kZo),e(gt,RZo),e(gt,ar),g(My,ar,null),e(ar,SZo),e(ar,_me),e(_me,PZo),e(ar,$Zo),e(ar,Yd),e(Yd,IZo),e(Yd,vme),e(vme,jZo),e(Yd,NZo),e(Yd,bme),e(bme,DZo),e(Yd,GZo),e(ar,OZo),e(ar,Tme),e(Tme,qZo),e(ar,zZo),g(Ey,ar,null),e(gt,XZo),e(gt,po),g(Cy,po,null),e(po,WZo),e(po,Fme),e(Fme,VZo),e(po,QZo),e(po,on),e(on,HZo),e(on,Mme),e(Mme,UZo),e(on,JZo),e(on,Eme),e(Eme,KZo),e(on,YZo),e(on,Cme),e(Cme,ZZo),e(on,eet),e(po,oet),e(po,Zd),e(Zd,T0),e(T0,yme),e(yme,tet),e(T0,ret),e(T0,sN),e(sN,aet),e(T0,net),e(Zd,set),e(Zd,F0),e(F0,wme),e(wme,iet),e(F0,det),e(F0,lN),e(lN,met),e(F0,fet),e(Zd,cet),e(Zd,M0),e(M0,Ame),e(Ame,get),e(M0,het),e(M0,iN),e(iN,uet),e(M0,pet),e(po,_et),e(po,xme),e(xme,vet),e(po,bet),g(yy,po,null),v(d,XCe,_),v(d,em,_),e(em,E0),e(E0,Lme),g(wy,Lme,null),e(em,Tet),e(em,Bme),e(Bme,Fet),v(d,WCe,_),v(d,ht,_),g(Ay,ht,null),e(ht,Met),e(ht,om),e(om,Eet),e(om,kme),e(kme,Cet),e(om,yet),e(om,Rme),e(Rme,wet),e(om,Aet),e(ht,xet),e(ht,xy),e(xy,Let),e(xy,Sme),e(Sme,Bet),e(xy,ket),e(ht,Ret),e(ht,nr),g(Ly,nr,null),e(nr,Set),e(nr,Pme),e(Pme,Pet),e(nr,$et),e(nr,tm),e(tm,Iet),e(tm,$me),e($me,jet),e(tm,Net),e(tm,Ime),e(Ime,Det),e(tm,Get),e(nr,Oet),e(nr,jme),e(jme,qet),e(nr,zet),g(By,nr,null),e(ht,Xet),e(ht,_o),g(ky,_o,null),e(_o,Wet),e(_o,Nme),e(Nme,Vet),e(_o,Qet),e(_o,tn),e(tn,Het),e(tn,Dme),e(Dme,Uet),e(tn,Jet),e(tn,Gme),e(Gme,Ket),e(tn,Yet),e(tn,Ome),e(Ome,Zet),e(tn,eot),e(_o,oot),e(_o,he),e(he,C0),e(C0,qme),e(qme,tot),e(C0,rot),e(C0,dN),e(dN,aot),e(C0,not),e(he,sot),e(he,y0),e(y0,zme),e(zme,lot),e(y0,iot),e(y0,mN),e(mN,dot),e(y0,mot),e(he,fot),e(he,w0),e(w0,Xme),e(Xme,cot),e(w0,got),e(w0,fN),e(fN,hot),e(w0,uot),e(he,pot),e(he,A0),e(A0,Wme),e(Wme,_ot),e(A0,vot),e(A0,cN),e(cN,bot),e(A0,Tot),e(he,Fot),e(he,x0),e(x0,Vme),e(Vme,Mot),e(x0,Eot),e(x0,gN),e(gN,Cot),e(x0,yot),e(he,wot),e(he,L0),e(L0,Qme),e(Qme,Aot),e(L0,xot),e(L0,hN),e(hN,Lot),e(L0,Bot),e(he,kot),e(he,B0),e(B0,Hme),e(Hme,Rot),e(B0,Sot),e(B0,uN),e(uN,Pot),e(B0,$ot),e(he,Iot),e(he,k0),e(k0,Ume),e(Ume,jot),e(k0,Not),e(k0,pN),e(pN,Dot),e(k0,Got),e(he,Oot),e(he,R0),e(R0,Jme),e(Jme,qot),e(R0,zot),e(R0,_N),e(_N,Xot),e(R0,Wot),e(he,Vot),e(he,S0),e(S0,Kme),e(Kme,Qot),e(S0,Hot),e(S0,vN),e(vN,Uot),e(S0,Jot),e(_o,Kot),e(_o,Yme),e(Yme,Yot),e(_o,Zot),g(Ry,_o,null),v(d,VCe,_),v(d,rm,_),e(rm,P0),e(P0,Zme),g(Sy,Zme,null),e(rm,ett),e(rm,efe),e(efe,ott),v(d,QCe,_),v(d,ut,_),g(Py,ut,null),e(ut,ttt),e(ut,am),e(am,rtt),e(am,ofe),e(ofe,att),e(am,ntt),e(am,tfe),e(tfe,stt),e(am,ltt),e(ut,itt),e(ut,$y),e($y,dtt),e($y,rfe),e(rfe,mtt),e($y,ftt),e(ut,ctt),e(ut,sr),g(Iy,sr,null),e(sr,gtt),e(sr,afe),e(afe,htt),e(sr,utt),e(sr,nm),e(nm,ptt),e(nm,nfe),e(nfe,_tt),e(nm,vtt),e(nm,sfe),e(sfe,btt),e(nm,Ttt),e(sr,Ftt),e(sr,lfe),e(lfe,Mtt),e(sr,Ett),g(jy,sr,null),e(ut,Ctt),e(ut,vo),g(Ny,vo,null),e(vo,ytt),e(vo,ife),e(ife,wtt),e(vo,Att),e(vo,rn),e(rn,xtt),e(rn,dfe),e(dfe,Ltt),e(rn,Btt),e(rn,mfe),e(mfe,ktt),e(rn,Rtt),e(rn,ffe),e(ffe,Stt),e(rn,Ptt),e(vo,$tt),e(vo,Me),e(Me,$0),e($0,cfe),e(cfe,Itt),e($0,jtt),e($0,bN),e(bN,Ntt),e($0,Dtt),e(Me,Gtt),e(Me,I0),e(I0,gfe),e(gfe,Ott),e(I0,qtt),e(I0,TN),e(TN,ztt),e(I0,Xtt),e(Me,Wtt),e(Me,j0),e(j0,hfe),e(hfe,Vtt),e(j0,Qtt),e(j0,FN),e(FN,Htt),e(j0,Utt),e(Me,Jtt),e(Me,N0),e(N0,ufe),e(ufe,Ktt),e(N0,Ytt),e(N0,MN),e(MN,Ztt),e(N0,ert),e(Me,ort),e(Me,D0),e(D0,pfe),e(pfe,trt),e(D0,rrt),e(D0,EN),e(EN,art),e(D0,nrt),e(Me,srt),e(Me,G0),e(G0,_fe),e(_fe,lrt),e(G0,irt),e(G0,CN),e(CN,drt),e(G0,mrt),e(Me,frt),e(Me,O0),e(O0,vfe),e(vfe,crt),e(O0,grt),e(O0,yN),e(yN,hrt),e(O0,urt),e(Me,prt),e(Me,q0),e(q0,bfe),e(bfe,_rt),e(q0,vrt),e(q0,wN),e(wN,brt),e(q0,Trt),e(vo,Frt),e(vo,Tfe),e(Tfe,Mrt),e(vo,Ert),g(Dy,vo,null),v(d,HCe,_),v(d,sm,_),e(sm,z0),e(z0,Ffe),g(Gy,Ffe,null),e(sm,Crt),e(sm,Mfe),e(Mfe,yrt),v(d,UCe,_),v(d,pt,_),g(Oy,pt,null),e(pt,wrt),e(pt,lm),e(lm,Art),e(lm,Efe),e(Efe,xrt),e(lm,Lrt),e(lm,Cfe),e(Cfe,Brt),e(lm,krt),e(pt,Rrt),e(pt,qy),e(qy,Srt),e(qy,yfe),e(yfe,Prt),e(qy,$rt),e(pt,Irt),e(pt,lr),g(zy,lr,null),e(lr,jrt),e(lr,wfe),e(wfe,Nrt),e(lr,Drt),e(lr,im),e(im,Grt),e(im,Afe),e(Afe,Ort),e(im,qrt),e(im,xfe),e(xfe,zrt),e(im,Xrt),e(lr,Wrt),e(lr,Lfe),e(Lfe,Vrt),e(lr,Qrt),g(Xy,lr,null),e(pt,Hrt),e(pt,bo),g(Wy,bo,null),e(bo,Urt),e(bo,Bfe),e(Bfe,Jrt),e(bo,Krt),e(bo,an),e(an,Yrt),e(an,kfe),e(kfe,Zrt),e(an,eat),e(an,Rfe),e(Rfe,oat),e(an,tat),e(an,Sfe),e(Sfe,rat),e(an,aat),e(bo,nat),e(bo,pe),e(pe,X0),e(X0,Pfe),e(Pfe,sat),e(X0,lat),e(X0,AN),e(AN,iat),e(X0,dat),e(pe,mat),e(pe,W0),e(W0,$fe),e($fe,fat),e(W0,cat),e(W0,xN),e(xN,gat),e(W0,hat),e(pe,uat),e(pe,V0),e(V0,Ife),e(Ife,pat),e(V0,_at),e(V0,LN),e(LN,vat),e(V0,bat),e(pe,Tat),e(pe,Q0),e(Q0,jfe),e(jfe,Fat),e(Q0,Mat),e(Q0,BN),e(BN,Eat),e(Q0,Cat),e(pe,yat),e(pe,H0),e(H0,Nfe),e(Nfe,wat),e(H0,Aat),e(H0,kN),e(kN,xat),e(H0,Lat),e(pe,Bat),e(pe,U0),e(U0,Dfe),e(Dfe,kat),e(U0,Rat),e(U0,RN),e(RN,Sat),e(U0,Pat),e(pe,$at),e(pe,J0),e(J0,Gfe),e(Gfe,Iat),e(J0,jat),e(J0,SN),e(SN,Nat),e(J0,Dat),e(pe,Gat),e(pe,K0),e(K0,Ofe),e(Ofe,Oat),e(K0,qat),e(K0,PN),e(PN,zat),e(K0,Xat),e(pe,Wat),e(pe,Y0),e(Y0,qfe),e(qfe,Vat),e(Y0,Qat),e(Y0,$N),e($N,Hat),e(Y0,Uat),e(bo,Jat),e(bo,zfe),e(zfe,Kat),e(bo,Yat),g(Vy,bo,null),v(d,JCe,_),v(d,dm,_),e(dm,Z0),e(Z0,Xfe),g(Qy,Xfe,null),e(dm,Zat),e(dm,Wfe),e(Wfe,ent),v(d,KCe,_),v(d,_t,_),g(Hy,_t,null),e(_t,ont),e(_t,mm),e(mm,tnt),e(mm,Vfe),e(Vfe,rnt),e(mm,ant),e(mm,Qfe),e(Qfe,nnt),e(mm,snt),e(_t,lnt),e(_t,Uy),e(Uy,int),e(Uy,Hfe),e(Hfe,dnt),e(Uy,mnt),e(_t,fnt),e(_t,ir),g(Jy,ir,null),e(ir,cnt),e(ir,Ufe),e(Ufe,gnt),e(ir,hnt),e(ir,fm),e(fm,unt),e(fm,Jfe),e(Jfe,pnt),e(fm,_nt),e(fm,Kfe),e(Kfe,vnt),e(fm,bnt),e(ir,Tnt),e(ir,Yfe),e(Yfe,Fnt),e(ir,Mnt),g(Ky,ir,null),e(_t,Ent),e(_t,To),g(Yy,To,null),e(To,Cnt),e(To,Zfe),e(Zfe,ynt),e(To,wnt),e(To,nn),e(nn,Ant),e(nn,ece),e(ece,xnt),e(nn,Lnt),e(nn,oce),e(oce,Bnt),e(nn,knt),e(nn,tce),e(tce,Rnt),e(nn,Snt),e(To,Pnt),e(To,Ee),e(Ee,eT),e(eT,rce),e(rce,$nt),e(eT,Int),e(eT,IN),e(IN,jnt),e(eT,Nnt),e(Ee,Dnt),e(Ee,oT),e(oT,ace),e(ace,Gnt),e(oT,Ont),e(oT,jN),e(jN,qnt),e(oT,znt),e(Ee,Xnt),e(Ee,tT),e(tT,nce),e(nce,Wnt),e(tT,Vnt),e(tT,NN),e(NN,Qnt),e(tT,Hnt),e(Ee,Unt),e(Ee,rT),e(rT,sce),e(sce,Jnt),e(rT,Knt),e(rT,DN),e(DN,Ynt),e(rT,Znt),e(Ee,est),e(Ee,aT),e(aT,lce),e(lce,ost),e(aT,tst),e(aT,GN),e(GN,rst),e(aT,ast),e(Ee,nst),e(Ee,nT),e(nT,ice),e(ice,sst),e(nT,lst),e(nT,ON),e(ON,ist),e(nT,dst),e(Ee,mst),e(Ee,sT),e(sT,dce),e(dce,fst),e(sT,cst),e(sT,qN),e(qN,gst),e(sT,hst),e(Ee,ust),e(Ee,lT),e(lT,mce),e(mce,pst),e(lT,_st),e(lT,zN),e(zN,vst),e(lT,bst),e(To,Tst),e(To,fce),e(fce,Fst),e(To,Mst),g(Zy,To,null),v(d,YCe,_),v(d,cm,_),e(cm,iT),e(iT,cce),g(ew,cce,null),e(cm,Est),e(cm,gce),e(gce,Cst),v(d,ZCe,_),v(d,vt,_),g(ow,vt,null),e(vt,yst),e(vt,gm),e(gm,wst),e(gm,hce),e(hce,Ast),e(gm,xst),e(gm,uce),e(uce,Lst),e(gm,Bst),e(vt,kst),e(vt,tw),e(tw,Rst),e(tw,pce),e(pce,Sst),e(tw,Pst),e(vt,$st),e(vt,dr),g(rw,dr,null),e(dr,Ist),e(dr,_ce),e(_ce,jst),e(dr,Nst),e(dr,hm),e(hm,Dst),e(hm,vce),e(vce,Gst),e(hm,Ost),e(hm,bce),e(bce,qst),e(hm,zst),e(dr,Xst),e(dr,Tce),e(Tce,Wst),e(dr,Vst),g(aw,dr,null),e(vt,Qst),e(vt,Fo),g(nw,Fo,null),e(Fo,Hst),e(Fo,Fce),e(Fce,Ust),e(Fo,Jst),e(Fo,sn),e(sn,Kst),e(sn,Mce),e(Mce,Yst),e(sn,Zst),e(sn,Ece),e(Ece,elt),e(sn,olt),e(sn,Cce),e(Cce,tlt),e(sn,rlt),e(Fo,alt),e(Fo,Ce),e(Ce,dT),e(dT,yce),e(yce,nlt),e(dT,slt),e(dT,XN),e(XN,llt),e(dT,ilt),e(Ce,dlt),e(Ce,mT),e(mT,wce),e(wce,mlt),e(mT,flt),e(mT,WN),e(WN,clt),e(mT,glt),e(Ce,hlt),e(Ce,fT),e(fT,Ace),e(Ace,ult),e(fT,plt),e(fT,VN),e(VN,_lt),e(fT,vlt),e(Ce,blt),e(Ce,cT),e(cT,xce),e(xce,Tlt),e(cT,Flt),e(cT,QN),e(QN,Mlt),e(cT,Elt),e(Ce,Clt),e(Ce,gT),e(gT,Lce),e(Lce,ylt),e(gT,wlt),e(gT,HN),e(HN,Alt),e(gT,xlt),e(Ce,Llt),e(Ce,hT),e(hT,Bce),e(Bce,Blt),e(hT,klt),e(hT,UN),e(UN,Rlt),e(hT,Slt),e(Ce,Plt),e(Ce,uT),e(uT,kce),e(kce,$lt),e(uT,Ilt),e(uT,JN),e(JN,jlt),e(uT,Nlt),e(Ce,Dlt),e(Ce,pT),e(pT,Rce),e(Rce,Glt),e(pT,Olt),e(pT,KN),e(KN,qlt),e(pT,zlt),e(Fo,Xlt),e(Fo,Sce),e(Sce,Wlt),e(Fo,Vlt),g(sw,Fo,null),v(d,e3e,_),v(d,um,_),e(um,_T),e(_T,Pce),g(lw,Pce,null),e(um,Qlt),e(um,$ce),e($ce,Hlt),v(d,o3e,_),v(d,bt,_),g(iw,bt,null),e(bt,Ult),e(bt,pm),e(pm,Jlt),e(pm,Ice),e(Ice,Klt),e(pm,Ylt),e(pm,jce),e(jce,Zlt),e(pm,eit),e(bt,oit),e(bt,dw),e(dw,tit),e(dw,Nce),e(Nce,rit),e(dw,ait),e(bt,nit),e(bt,mr),g(mw,mr,null),e(mr,sit),e(mr,Dce),e(Dce,lit),e(mr,iit),e(mr,_m),e(_m,dit),e(_m,Gce),e(Gce,mit),e(_m,fit),e(_m,Oce),e(Oce,cit),e(_m,git),e(mr,hit),e(mr,qce),e(qce,uit),e(mr,pit),g(fw,mr,null),e(bt,_it),e(bt,Mo),g(cw,Mo,null),e(Mo,vit),e(Mo,zce),e(zce,bit),e(Mo,Tit),e(Mo,ln),e(ln,Fit),e(ln,Xce),e(Xce,Mit),e(ln,Eit),e(ln,Wce),e(Wce,Cit),e(ln,yit),e(ln,Vce),e(Vce,wit),e(ln,Ait),e(Mo,xit),e(Mo,Tt),e(Tt,vT),e(vT,Qce),e(Qce,Lit),e(vT,Bit),e(vT,YN),e(YN,kit),e(vT,Rit),e(Tt,Sit),e(Tt,bT),e(bT,Hce),e(Hce,Pit),e(bT,$it),e(bT,ZN),e(ZN,Iit),e(bT,jit),e(Tt,Nit),e(Tt,TT),e(TT,Uce),e(Uce,Dit),e(TT,Git),e(TT,eD),e(eD,Oit),e(TT,qit),e(Tt,zit),e(Tt,FT),e(FT,Jce),e(Jce,Xit),e(FT,Wit),e(FT,oD),e(oD,Vit),e(FT,Qit),e(Tt,Hit),e(Tt,MT),e(MT,Kce),e(Kce,Uit),e(MT,Jit),e(MT,tD),e(tD,Kit),e(MT,Yit),e(Tt,Zit),e(Tt,ET),e(ET,Yce),e(Yce,edt),e(ET,odt),e(ET,rD),e(rD,tdt),e(ET,rdt),e(Mo,adt),e(Mo,Zce),e(Zce,ndt),e(Mo,sdt),g(gw,Mo,null),v(d,t3e,_),v(d,vm,_),e(vm,CT),e(CT,ege),g(hw,ege,null),e(vm,ldt),e(vm,oge),e(oge,idt),v(d,r3e,_),v(d,Ft,_),g(uw,Ft,null),e(Ft,ddt),e(Ft,bm),e(bm,mdt),e(bm,tge),e(tge,fdt),e(bm,cdt),e(bm,rge),e(rge,gdt),e(bm,hdt),e(Ft,udt),e(Ft,pw),e(pw,pdt),e(pw,age),e(age,_dt),e(pw,vdt),e(Ft,bdt),e(Ft,fr),g(_w,fr,null),e(fr,Tdt),e(fr,nge),e(nge,Fdt),e(fr,Mdt),e(fr,Tm),e(Tm,Edt),e(Tm,sge),e(sge,Cdt),e(Tm,ydt),e(Tm,lge),e(lge,wdt),e(Tm,Adt),e(fr,xdt),e(fr,ige),e(ige,Ldt),e(fr,Bdt),g(vw,fr,null),e(Ft,kdt),e(Ft,Eo),g(bw,Eo,null),e(Eo,Rdt),e(Eo,dge),e(dge,Sdt),e(Eo,Pdt),e(Eo,dn),e(dn,$dt),e(dn,mge),e(mge,Idt),e(dn,jdt),e(dn,fge),e(fge,Ndt),e(dn,Ddt),e(dn,cge),e(cge,Gdt),e(dn,Odt),e(Eo,qdt),e(Eo,Mt),e(Mt,yT),e(yT,gge),e(gge,zdt),e(yT,Xdt),e(yT,aD),e(aD,Wdt),e(yT,Vdt),e(Mt,Qdt),e(Mt,wT),e(wT,hge),e(hge,Hdt),e(wT,Udt),e(wT,nD),e(nD,Jdt),e(wT,Kdt),e(Mt,Ydt),e(Mt,AT),e(AT,uge),e(uge,Zdt),e(AT,emt),e(AT,sD),e(sD,omt),e(AT,tmt),e(Mt,rmt),e(Mt,xT),e(xT,pge),e(pge,amt),e(xT,nmt),e(xT,lD),e(lD,smt),e(xT,lmt),e(Mt,imt),e(Mt,LT),e(LT,_ge),e(_ge,dmt),e(LT,mmt),e(LT,iD),e(iD,fmt),e(LT,cmt),e(Mt,gmt),e(Mt,BT),e(BT,vge),e(vge,hmt),e(BT,umt),e(BT,dD),e(dD,pmt),e(BT,_mt),e(Eo,vmt),e(Eo,bge),e(bge,bmt),e(Eo,Tmt),g(Tw,Eo,null),v(d,a3e,_),v(d,Fm,_),e(Fm,kT),e(kT,Tge),g(Fw,Tge,null),e(Fm,Fmt),e(Fm,Fge),e(Fge,Mmt),v(d,n3e,_),v(d,Et,_),g(Mw,Et,null),e(Et,Emt),e(Et,Mm),e(Mm,Cmt),e(Mm,Mge),e(Mge,ymt),e(Mm,wmt),e(Mm,Ege),e(Ege,Amt),e(Mm,xmt),e(Et,Lmt),e(Et,Ew),e(Ew,Bmt),e(Ew,Cge),e(Cge,kmt),e(Ew,Rmt),e(Et,Smt),e(Et,cr),g(Cw,cr,null),e(cr,Pmt),e(cr,yge),e(yge,$mt),e(cr,Imt),e(cr,Em),e(Em,jmt),e(Em,wge),e(wge,Nmt),e(Em,Dmt),e(Em,Age),e(Age,Gmt),e(Em,Omt),e(cr,qmt),e(cr,xge),e(xge,zmt),e(cr,Xmt),g(yw,cr,null),e(Et,Wmt),e(Et,Co),g(ww,Co,null),e(Co,Vmt),e(Co,Lge),e(Lge,Qmt),e(Co,Hmt),e(Co,mn),e(mn,Umt),e(mn,Bge),e(Bge,Jmt),e(mn,Kmt),e(mn,kge),e(kge,Ymt),e(mn,Zmt),e(mn,Rge),e(Rge,eft),e(mn,oft),e(Co,tft),e(Co,Sge),e(Sge,RT),e(RT,Pge),e(Pge,rft),e(RT,aft),e(RT,mD),e(mD,nft),e(RT,sft),e(Co,lft),e(Co,$ge),e($ge,ift),e(Co,dft),g(Aw,Co,null),v(d,s3e,_),v(d,Cm,_),e(Cm,ST),e(ST,Ige),g(xw,Ige,null),e(Cm,mft),e(Cm,jge),e(jge,fft),v(d,l3e,_),v(d,Ct,_),g(Lw,Ct,null),e(Ct,cft),e(Ct,ym),e(ym,gft),e(ym,Nge),e(Nge,hft),e(ym,uft),e(ym,Dge),e(Dge,pft),e(ym,_ft),e(Ct,vft),e(Ct,Bw),e(Bw,bft),e(Bw,Gge),e(Gge,Tft),e(Bw,Fft),e(Ct,Mft),e(Ct,gr),g(kw,gr,null),e(gr,Eft),e(gr,Oge),e(Oge,Cft),e(gr,yft),e(gr,wm),e(wm,wft),e(wm,qge),e(qge,Aft),e(wm,xft),e(wm,zge),e(zge,Lft),e(wm,Bft),e(gr,kft),e(gr,Xge),e(Xge,Rft),e(gr,Sft),g(Rw,gr,null),e(Ct,Pft),e(Ct,yo),g(Sw,yo,null),e(yo,$ft),e(yo,Wge),e(Wge,Ift),e(yo,jft),e(yo,fn),e(fn,Nft),e(fn,Vge),e(Vge,Dft),e(fn,Gft),e(fn,Qge),e(Qge,Oft),e(fn,qft),e(fn,Hge),e(Hge,zft),e(fn,Xft),e(yo,Wft),e(yo,Pw),e(Pw,PT),e(PT,Uge),e(Uge,Vft),e(PT,Qft),e(PT,fD),e(fD,Hft),e(PT,Uft),e(Pw,Jft),e(Pw,$T),e($T,Jge),e(Jge,Kft),e($T,Yft),e($T,cD),e(cD,Zft),e($T,ect),e(yo,oct),e(yo,Kge),e(Kge,tct),e(yo,rct),g($w,yo,null),v(d,i3e,_),v(d,Am,_),e(Am,IT),e(IT,Yge),g(Iw,Yge,null),e(Am,act),e(Am,Zge),e(Zge,nct),v(d,d3e,_),v(d,yt,_),g(jw,yt,null),e(yt,sct),e(yt,xm),e(xm,lct),e(xm,ehe),e(ehe,ict),e(xm,dct),e(xm,ohe),e(ohe,mct),e(xm,fct),e(yt,cct),e(yt,Nw),e(Nw,gct),e(Nw,the),e(the,hct),e(Nw,uct),e(yt,pct),e(yt,hr),g(Dw,hr,null),e(hr,_ct),e(hr,rhe),e(rhe,vct),e(hr,bct),e(hr,Lm),e(Lm,Tct),e(Lm,ahe),e(ahe,Fct),e(Lm,Mct),e(Lm,nhe),e(nhe,Ect),e(Lm,Cct),e(hr,yct),e(hr,she),e(she,wct),e(hr,Act),g(Gw,hr,null),e(yt,xct),e(yt,wo),g(Ow,wo,null),e(wo,Lct),e(wo,lhe),e(lhe,Bct),e(wo,kct),e(wo,cn),e(cn,Rct),e(cn,ihe),e(ihe,Sct),e(cn,Pct),e(cn,dhe),e(dhe,$ct),e(cn,Ict),e(cn,mhe),e(mhe,jct),e(cn,Nct),e(wo,Dct),e(wo,fhe),e(fhe,jT),e(jT,che),e(che,Gct),e(jT,Oct),e(jT,gD),e(gD,qct),e(jT,zct),e(wo,Xct),e(wo,ghe),e(ghe,Wct),e(wo,Vct),g(qw,wo,null),m3e=!0},p(d,[_]){const zw={};_&2&&(zw.$$scope={dirty:_,ctx:d}),Im.$set(zw);const hhe={};_&2&&(hhe.$$scope={dirty:_,ctx:d}),Kc.$set(hhe);const uhe={};_&2&&(uhe.$$scope={dirty:_,ctx:d}),sg.$set(uhe)},i(d){m3e||(h(ie.$$.fragment,d),h(ba.$$.fragment,d),h(IF.$$.fragment,d),h(jF.$$.fragment,d),h(Im.$$.fragment,d),h(NF.$$.fragment,d),h(DF.$$.fragment,d),h(qF.$$.fragment,d),h(zF.$$.fragment,d),h(XF.$$.fragment,d),h(WF.$$.fragment,d),h(VF.$$.fragment,d),h(UF.$$.fragment,d),h(JF.$$.fragment,d),h(KF.$$.fragment,d),h(YF.$$.fragment,d),h(ZF.$$.fragment,d),h(tM.$$.fragment,d),h(Kc.$$.fragment,d),h(rM.$$.fragment,d),h(aM.$$.fragment,d),h(nM.$$.fragment,d),h(iM.$$.fragment,d),h(sg.$$.fragment,d),h(dM.$$.fragment,d),h(mM.$$.fragment,d),h(fM.$$.fragment,d),h(gM.$$.fragment,d),h(hM.$$.fragment,d),h(uM.$$.fragment,d),h(pM.$$.fragment,d),h(_M.$$.fragment,d),h(vM.$$.fragment,d),h(TM.$$.fragment,d),h(FM.$$.fragment,d),h(MM.$$.fragment,d),h(EM.$$.fragment,d),h(CM.$$.fragment,d),h(yM.$$.fragment,d),h(AM.$$.fragment,d),h(xM.$$.fragment,d),h(LM.$$.fragment,d),h(BM.$$.fragment,d),h(kM.$$.fragment,d),h(RM.$$.fragment,d),h(PM.$$.fragment,d),h($M.$$.fragment,d),h(IM.$$.fragment,d),h(jM.$$.fragment,d),h(NM.$$.fragment,d),h(DM.$$.fragment,d),h(OM.$$.fragment,d),h(qM.$$.fragment,d),h(zM.$$.fragment,d),h(XM.$$.fragment,d),h(WM.$$.fragment,d),h(VM.$$.fragment,d),h(HM.$$.fragment,d),h(UM.$$.fragment,d),h(JM.$$.fragment,d),h(KM.$$.fragment,d),h(YM.$$.fragment,d),h(ZM.$$.fragment,d),h(oE.$$.fragment,d),h(tE.$$.fragment,d),h(rE.$$.fragment,d),h(aE.$$.fragment,d),h(nE.$$.fragment,d),h(sE.$$.fragment,d),h(iE.$$.fragment,d),h(dE.$$.fragment,d),h(mE.$$.fragment,d),h(fE.$$.fragment,d),h(cE.$$.fragment,d),h(gE.$$.fragment,d),h(uE.$$.fragment,d),h(pE.$$.fragment,d),h(_E.$$.fragment,d),h(vE.$$.fragment,d),h(bE.$$.fragment,d),h(TE.$$.fragment,d),h(ME.$$.fragment,d),h(EE.$$.fragment,d),h(CE.$$.fragment,d),h(yE.$$.fragment,d),h(wE.$$.fragment,d),h(AE.$$.fragment,d),h(LE.$$.fragment,d),h(BE.$$.fragment,d),h(kE.$$.fragment,d),h(RE.$$.fragment,d),h(SE.$$.fragment,d),h(PE.$$.fragment,d),h(IE.$$.fragment,d),h(jE.$$.fragment,d),h(NE.$$.fragment,d),h(DE.$$.fragment,d),h(GE.$$.fragment,d),h(OE.$$.fragment,d),h(zE.$$.fragment,d),h(XE.$$.fragment,d),h(WE.$$.fragment,d),h(VE.$$.fragment,d),h(QE.$$.fragment,d),h(HE.$$.fragment,d),h(JE.$$.fragment,d),h(KE.$$.fragment,d),h(YE.$$.fragment,d),h(ZE.$$.fragment,d),h(eC.$$.fragment,d),h(oC.$$.fragment,d),h(rC.$$.fragment,d),h(aC.$$.fragment,d),h(nC.$$.fragment,d),h(sC.$$.fragment,d),h(lC.$$.fragment,d),h(iC.$$.fragment,d),h(mC.$$.fragment,d),h(fC.$$.fragment,d),h(cC.$$.fragment,d),h(gC.$$.fragment,d),h(hC.$$.fragment,d),h(uC.$$.fragment,d),h(_C.$$.fragment,d),h(vC.$$.fragment,d),h(bC.$$.fragment,d),h(FC.$$.fragment,d),h(MC.$$.fragment,d),h(EC.$$.fragment,d),h(yC.$$.fragment,d),h(wC.$$.fragment,d),h(AC.$$.fragment,d),h(xC.$$.fragment,d),h(LC.$$.fragment,d),h(BC.$$.fragment,d),h(RC.$$.fragment,d),h(SC.$$.fragment,d),h(PC.$$.fragment,d),h($C.$$.fragment,d),h(IC.$$.fragment,d),h(jC.$$.fragment,d),h(DC.$$.fragment,d),h(GC.$$.fragment,d),h(OC.$$.fragment,d),h(qC.$$.fragment,d),h(zC.$$.fragment,d),h(XC.$$.fragment,d),h(VC.$$.fragment,d),h(QC.$$.fragment,d),h(HC.$$.fragment,d),h(UC.$$.fragment,d),h(JC.$$.fragment,d),h(KC.$$.fragment,d),h(ZC.$$.fragment,d),h(e3.$$.fragment,d),h(o3.$$.fragment,d),h(t3.$$.fragment,d),h(r3.$$.fragment,d),h(a3.$$.fragment,d),h(s3.$$.fragment,d),h(l3.$$.fragment,d),h(i3.$$.fragment,d),h(d3.$$.fragment,d),h(m3.$$.fragment,d),h(f3.$$.fragment,d),h(g3.$$.fragment,d),h(h3.$$.fragment,d),h(u3.$$.fragment,d),h(p3.$$.fragment,d),h(_3.$$.fragment,d),h(v3.$$.fragment,d),h(T3.$$.fragment,d),h(F3.$$.fragment,d),h(M3.$$.fragment,d),h(E3.$$.fragment,d),h(C3.$$.fragment,d),h(y3.$$.fragment,d),h(A3.$$.fragment,d),h(x3.$$.fragment,d),h(L3.$$.fragment,d),h(B3.$$.fragment,d),h(k3.$$.fragment,d),h(R3.$$.fragment,d),h(P3.$$.fragment,d),h($3.$$.fragment,d),h(I3.$$.fragment,d),h(j3.$$.fragment,d),h(N3.$$.fragment,d),h(D3.$$.fragment,d),h(O3.$$.fragment,d),h(q3.$$.fragment,d),h(z3.$$.fragment,d),h(X3.$$.fragment,d),h(W3.$$.fragment,d),h(V3.$$.fragment,d),h(H3.$$.fragment,d),h(U3.$$.fragment,d),h(J3.$$.fragment,d),h(K3.$$.fragment,d),h(Y3.$$.fragment,d),h(Z3.$$.fragment,d),h(oy.$$.fragment,d),h(ty.$$.fragment,d),h(ry.$$.fragment,d),h(ay.$$.fragment,d),h(ny.$$.fragment,d),h(sy.$$.fragment,d),h(iy.$$.fragment,d),h(dy.$$.fragment,d),h(my.$$.fragment,d),h(fy.$$.fragment,d),h(cy.$$.fragment,d),h(gy.$$.fragment,d),h(uy.$$.fragment,d),h(py.$$.fragment,d),h(_y.$$.fragment,d),h(vy.$$.fragment,d),h(by.$$.fragment,d),h(Ty.$$.fragment,d),h(My.$$.fragment,d),h(Ey.$$.fragment,d),h(Cy.$$.fragment,d),h(yy.$$.fragment,d),h(wy.$$.fragment,d),h(Ay.$$.fragment,d),h(Ly.$$.fragment,d),h(By.$$.fragment,d),h(ky.$$.fragment,d),h(Ry.$$.fragment,d),h(Sy.$$.fragment,d),h(Py.$$.fragment,d),h(Iy.$$.fragment,d),h(jy.$$.fragment,d),h(Ny.$$.fragment,d),h(Dy.$$.fragment,d),h(Gy.$$.fragment,d),h(Oy.$$.fragment,d),h(zy.$$.fragment,d),h(Xy.$$.fragment,d),h(Wy.$$.fragment,d),h(Vy.$$.fragment,d),h(Qy.$$.fragment,d),h(Hy.$$.fragment,d),h(Jy.$$.fragment,d),h(Ky.$$.fragment,d),h(Yy.$$.fragment,d),h(Zy.$$.fragment,d),h(ew.$$.fragment,d),h(ow.$$.fragment,d),h(rw.$$.fragment,d),h(aw.$$.fragment,d),h(nw.$$.fragment,d),h(sw.$$.fragment,d),h(lw.$$.fragment,d),h(iw.$$.fragment,d),h(mw.$$.fragment,d),h(fw.$$.fragment,d),h(cw.$$.fragment,d),h(gw.$$.fragment,d),h(hw.$$.fragment,d),h(uw.$$.fragment,d),h(_w.$$.fragment,d),h(vw.$$.fragment,d),h(bw.$$.fragment,d),h(Tw.$$.fragment,d),h(Fw.$$.fragment,d),h(Mw.$$.fragment,d),h(Cw.$$.fragment,d),h(yw.$$.fragment,d),h(ww.$$.fragment,d),h(Aw.$$.fragment,d),h(xw.$$.fragment,d),h(Lw.$$.fragment,d),h(kw.$$.fragment,d),h(Rw.$$.fragment,d),h(Sw.$$.fragment,d),h($w.$$.fragment,d),h(Iw.$$.fragment,d),h(jw.$$.fragment,d),h(Dw.$$.fragment,d),h(Gw.$$.fragment,d),h(Ow.$$.fragment,d),h(qw.$$.fragment,d),m3e=!0)},o(d){u(ie.$$.fragment,d),u(ba.$$.fragment,d),u(IF.$$.fragment,d),u(jF.$$.fragment,d),u(Im.$$.fragment,d),u(NF.$$.fragment,d),u(DF.$$.fragment,d),u(qF.$$.fragment,d),u(zF.$$.fragment,d),u(XF.$$.fragment,d),u(WF.$$.fragment,d),u(VF.$$.fragment,d),u(UF.$$.fragment,d),u(JF.$$.fragment,d),u(KF.$$.fragment,d),u(YF.$$.fragment,d),u(ZF.$$.fragment,d),u(tM.$$.fragment,d),u(Kc.$$.fragment,d),u(rM.$$.fragment,d),u(aM.$$.fragment,d),u(nM.$$.fragment,d),u(iM.$$.fragment,d),u(sg.$$.fragment,d),u(dM.$$.fragment,d),u(mM.$$.fragment,d),u(fM.$$.fragment,d),u(gM.$$.fragment,d),u(hM.$$.fragment,d),u(uM.$$.fragment,d),u(pM.$$.fragment,d),u(_M.$$.fragment,d),u(vM.$$.fragment,d),u(TM.$$.fragment,d),u(FM.$$.fragment,d),u(MM.$$.fragment,d),u(EM.$$.fragment,d),u(CM.$$.fragment,d),u(yM.$$.fragment,d),u(AM.$$.fragment,d),u(xM.$$.fragment,d),u(LM.$$.fragment,d),u(BM.$$.fragment,d),u(kM.$$.fragment,d),u(RM.$$.fragment,d),u(PM.$$.fragment,d),u($M.$$.fragment,d),u(IM.$$.fragment,d),u(jM.$$.fragment,d),u(NM.$$.fragment,d),u(DM.$$.fragment,d),u(OM.$$.fragment,d),u(qM.$$.fragment,d),u(zM.$$.fragment,d),u(XM.$$.fragment,d),u(WM.$$.fragment,d),u(VM.$$.fragment,d),u(HM.$$.fragment,d),u(UM.$$.fragment,d),u(JM.$$.fragment,d),u(KM.$$.fragment,d),u(YM.$$.fragment,d),u(ZM.$$.fragment,d),u(oE.$$.fragment,d),u(tE.$$.fragment,d),u(rE.$$.fragment,d),u(aE.$$.fragment,d),u(nE.$$.fragment,d),u(sE.$$.fragment,d),u(iE.$$.fragment,d),u(dE.$$.fragment,d),u(mE.$$.fragment,d),u(fE.$$.fragment,d),u(cE.$$.fragment,d),u(gE.$$.fragment,d),u(uE.$$.fragment,d),u(pE.$$.fragment,d),u(_E.$$.fragment,d),u(vE.$$.fragment,d),u(bE.$$.fragment,d),u(TE.$$.fragment,d),u(ME.$$.fragment,d),u(EE.$$.fragment,d),u(CE.$$.fragment,d),u(yE.$$.fragment,d),u(wE.$$.fragment,d),u(AE.$$.fragment,d),u(LE.$$.fragment,d),u(BE.$$.fragment,d),u(kE.$$.fragment,d),u(RE.$$.fragment,d),u(SE.$$.fragment,d),u(PE.$$.fragment,d),u(IE.$$.fragment,d),u(jE.$$.fragment,d),u(NE.$$.fragment,d),u(DE.$$.fragment,d),u(GE.$$.fragment,d),u(OE.$$.fragment,d),u(zE.$$.fragment,d),u(XE.$$.fragment,d),u(WE.$$.fragment,d),u(VE.$$.fragment,d),u(QE.$$.fragment,d),u(HE.$$.fragment,d),u(JE.$$.fragment,d),u(KE.$$.fragment,d),u(YE.$$.fragment,d),u(ZE.$$.fragment,d),u(eC.$$.fragment,d),u(oC.$$.fragment,d),u(rC.$$.fragment,d),u(aC.$$.fragment,d),u(nC.$$.fragment,d),u(sC.$$.fragment,d),u(lC.$$.fragment,d),u(iC.$$.fragment,d),u(mC.$$.fragment,d),u(fC.$$.fragment,d),u(cC.$$.fragment,d),u(gC.$$.fragment,d),u(hC.$$.fragment,d),u(uC.$$.fragment,d),u(_C.$$.fragment,d),u(vC.$$.fragment,d),u(bC.$$.fragment,d),u(FC.$$.fragment,d),u(MC.$$.fragment,d),u(EC.$$.fragment,d),u(yC.$$.fragment,d),u(wC.$$.fragment,d),u(AC.$$.fragment,d),u(xC.$$.fragment,d),u(LC.$$.fragment,d),u(BC.$$.fragment,d),u(RC.$$.fragment,d),u(SC.$$.fragment,d),u(PC.$$.fragment,d),u($C.$$.fragment,d),u(IC.$$.fragment,d),u(jC.$$.fragment,d),u(DC.$$.fragment,d),u(GC.$$.fragment,d),u(OC.$$.fragment,d),u(qC.$$.fragment,d),u(zC.$$.fragment,d),u(XC.$$.fragment,d),u(VC.$$.fragment,d),u(QC.$$.fragment,d),u(HC.$$.fragment,d),u(UC.$$.fragment,d),u(JC.$$.fragment,d),u(KC.$$.fragment,d),u(ZC.$$.fragment,d),u(e3.$$.fragment,d),u(o3.$$.fragment,d),u(t3.$$.fragment,d),u(r3.$$.fragment,d),u(a3.$$.fragment,d),u(s3.$$.fragment,d),u(l3.$$.fragment,d),u(i3.$$.fragment,d),u(d3.$$.fragment,d),u(m3.$$.fragment,d),u(f3.$$.fragment,d),u(g3.$$.fragment,d),u(h3.$$.fragment,d),u(u3.$$.fragment,d),u(p3.$$.fragment,d),u(_3.$$.fragment,d),u(v3.$$.fragment,d),u(T3.$$.fragment,d),u(F3.$$.fragment,d),u(M3.$$.fragment,d),u(E3.$$.fragment,d),u(C3.$$.fragment,d),u(y3.$$.fragment,d),u(A3.$$.fragment,d),u(x3.$$.fragment,d),u(L3.$$.fragment,d),u(B3.$$.fragment,d),u(k3.$$.fragment,d),u(R3.$$.fragment,d),u(P3.$$.fragment,d),u($3.$$.fragment,d),u(I3.$$.fragment,d),u(j3.$$.fragment,d),u(N3.$$.fragment,d),u(D3.$$.fragment,d),u(O3.$$.fragment,d),u(q3.$$.fragment,d),u(z3.$$.fragment,d),u(X3.$$.fragment,d),u(W3.$$.fragment,d),u(V3.$$.fragment,d),u(H3.$$.fragment,d),u(U3.$$.fragment,d),u(J3.$$.fragment,d),u(K3.$$.fragment,d),u(Y3.$$.fragment,d),u(Z3.$$.fragment,d),u(oy.$$.fragment,d),u(ty.$$.fragment,d),u(ry.$$.fragment,d),u(ay.$$.fragment,d),u(ny.$$.fragment,d),u(sy.$$.fragment,d),u(iy.$$.fragment,d),u(dy.$$.fragment,d),u(my.$$.fragment,d),u(fy.$$.fragment,d),u(cy.$$.fragment,d),u(gy.$$.fragment,d),u(uy.$$.fragment,d),u(py.$$.fragment,d),u(_y.$$.fragment,d),u(vy.$$.fragment,d),u(by.$$.fragment,d),u(Ty.$$.fragment,d),u(My.$$.fragment,d),u(Ey.$$.fragment,d),u(Cy.$$.fragment,d),u(yy.$$.fragment,d),u(wy.$$.fragment,d),u(Ay.$$.fragment,d),u(Ly.$$.fragment,d),u(By.$$.fragment,d),u(ky.$$.fragment,d),u(Ry.$$.fragment,d),u(Sy.$$.fragment,d),u(Py.$$.fragment,d),u(Iy.$$.fragment,d),u(jy.$$.fragment,d),u(Ny.$$.fragment,d),u(Dy.$$.fragment,d),u(Gy.$$.fragment,d),u(Oy.$$.fragment,d),u(zy.$$.fragment,d),u(Xy.$$.fragment,d),u(Wy.$$.fragment,d),u(Vy.$$.fragment,d),u(Qy.$$.fragment,d),u(Hy.$$.fragment,d),u(Jy.$$.fragment,d),u(Ky.$$.fragment,d),u(Yy.$$.fragment,d),u(Zy.$$.fragment,d),u(ew.$$.fragment,d),u(ow.$$.fragment,d),u(rw.$$.fragment,d),u(aw.$$.fragment,d),u(nw.$$.fragment,d),u(sw.$$.fragment,d),u(lw.$$.fragment,d),u(iw.$$.fragment,d),u(mw.$$.fragment,d),u(fw.$$.fragment,d),u(cw.$$.fragment,d),u(gw.$$.fragment,d),u(hw.$$.fragment,d),u(uw.$$.fragment,d),u(_w.$$.fragment,d),u(vw.$$.fragment,d),u(bw.$$.fragment,d),u(Tw.$$.fragment,d),u(Fw.$$.fragment,d),u(Mw.$$.fragment,d),u(Cw.$$.fragment,d),u(yw.$$.fragment,d),u(ww.$$.fragment,d),u(Aw.$$.fragment,d),u(xw.$$.fragment,d),u(Lw.$$.fragment,d),u(kw.$$.fragment,d),u(Rw.$$.fragment,d),u(Sw.$$.fragment,d),u($w.$$.fragment,d),u(Iw.$$.fragment,d),u(jw.$$.fragment,d),u(Dw.$$.fragment,d),u(Gw.$$.fragment,d),u(Ow.$$.fragment,d),u(qw.$$.fragment,d),m3e=!1},d(d){r(J),d&&r(ye),d&&r(se),p(ie),d&&r(km),d&&r(zr),d&&r(Fe),d&&r(Ze),d&&r(Sm),p(ba,d),d&&r(eo),d&&r(me),d&&r(ko),d&&r(Ta),d&&r(bEe),d&&r(Zl),p(IF),d&&r(TEe),d&&r(_n),d&&r(FEe),p(jF,d),d&&r(MEe),d&&r(OA),d&&r(EEe),p(Im,d),d&&r(CEe),d&&r(ei),p(NF),d&&r(yEe),d&&r(Ro),p(DF),p(qF),p(zF),p(XF),d&&r(wEe),d&&r(ti),p(WF),d&&r(AEe),d&&r(So),p(VF),p(UF),p(JF),p(KF),d&&r(xEe),d&&r(ri),p(YF),d&&r(LEe),d&&r(Gr),p(ZF),p(tM),p(Kc),p(rM),d&&r(BEe),d&&r(ai),p(aM),d&&r(kEe),d&&r(Or),p(nM),p(iM),p(sg),p(dM),d&&r(REe),d&&r(si),p(mM),d&&r(SEe),d&&r(Po),p(fM),p(gM),p(hM),p(uM),p(pM),d&&r(PEe),d&&r(di),p(_M),d&&r($Ee),d&&r($o),p(vM),p(TM),p(FM),p(MM),p(EM),d&&r(IEe),d&&r(ci),p(CM),d&&r(jEe),d&&r(Io),p(yM),p(AM),p(xM),p(LM),p(BM),d&&r(NEe),d&&r(ui),p(kM),d&&r(DEe),d&&r(jo),p(RM),p(PM),p($M),p(IM),p(jM),d&&r(GEe),d&&r(vi),p(NM),d&&r(OEe),d&&r(No),p(DM),p(OM),p(qM),p(zM),p(XM),d&&r(qEe),d&&r(Fi),p(WM),d&&r(zEe),d&&r(Do),p(VM),p(HM),p(UM),p(JM),p(KM),d&&r(XEe),d&&r(Ci),p(YM),d&&r(WEe),d&&r(Go),p(ZM),p(oE),p(tE),p(rE),p(aE),d&&r(VEe),d&&r(Ai),p(nE),d&&r(QEe),d&&r(Oo),p(sE),p(iE),p(dE),p(mE),p(fE),d&&r(HEe),d&&r(Bi),p(cE),d&&r(UEe),d&&r(qo),p(gE),p(uE),p(pE),p(_E),p(vE),d&&r(JEe),d&&r(Si),p(bE),d&&r(KEe),d&&r(zo),p(TE),p(ME),p(EE),p(CE),p(yE),d&&r(YEe),d&&r(Ii),p(wE),d&&r(ZEe),d&&r(Xo),p(AE),p(LE),p(BE),p(kE),p(RE),d&&r(eCe),d&&r(Di),p(SE),d&&r(oCe),d&&r(Wo),p(PE),p(IE),p(jE),p(NE),p(DE),d&&r(tCe),d&&r(qi),p(GE),d&&r(rCe),d&&r(Qo),p(OE),p(zE),p(XE),p(WE),p(VE),d&&r(aCe),d&&r(Wi),p(QE),d&&r(nCe),d&&r(Ho),p(HE),p(JE),p(KE),p(YE),p(ZE),d&&r(sCe),d&&r(Hi),p(eC),d&&r(lCe),d&&r(Uo),p(oC),p(rC),p(aC),p(nC),p(sC),d&&r(iCe),d&&r(Yi),p(lC),d&&r(dCe),d&&r(Jo),p(iC),p(mC),p(fC),p(cC),p(gC),d&&r(mCe),d&&r(od),p(hC),d&&r(fCe),d&&r(Ko),p(uC),p(_C),p(vC),p(bC),p(FC),d&&r(cCe),d&&r(ad),p(MC),d&&r(gCe),d&&r(Yo),p(EC),p(yC),p(wC),p(AC),p(xC),d&&r(hCe),d&&r(id),p(LC),d&&r(uCe),d&&r(Zo),p(BC),p(RC),p(SC),p(PC),p($C),d&&r(pCe),d&&r(fd),p(IC),d&&r(_Ce),d&&r(et),p(jC),p(DC),p(GC),p(OC),p(qC),d&&r(vCe),d&&r(hd),p(zC),d&&r(bCe),d&&r(ot),p(XC),p(VC),p(QC),p(HC),p(UC),d&&r(TCe),d&&r(_d),p(JC),d&&r(FCe),d&&r(tt),p(KC),p(ZC),p(e3),p(o3),p(t3),d&&r(MCe),d&&r(Td),p(r3),d&&r(ECe),d&&r(rt),p(a3),p(s3),p(l3),p(i3),p(d3),d&&r(CCe),d&&r(Ed),p(m3),d&&r(yCe),d&&r(at),p(f3),p(g3),p(h3),p(u3),p(p3),d&&r(wCe),d&&r(wd),p(_3),d&&r(ACe),d&&r(nt),p(v3),p(T3),p(F3),p(M3),p(E3),d&&r(xCe),d&&r(Ld),p(C3),d&&r(LCe),d&&r(st),p(y3),p(A3),p(x3),p(L3),p(B3),d&&r(BCe),d&&r(Rd),p(k3),d&&r(kCe),d&&r(lt),p(R3),p(P3),p($3),p(I3),p(j3),d&&r(RCe),d&&r($d),p(N3),d&&r(SCe),d&&r(it),p(D3),p(O3),p(q3),p(z3),p(X3),d&&r(PCe),d&&r(Nd),p(W3),d&&r($Ce),d&&r(dt),p(V3),p(H3),p(U3),p(J3),p(K3),d&&r(ICe),d&&r(Od),p(Y3),d&&r(jCe),d&&r(mt),p(Z3),p(oy),p(ty),p(ry),p(ay),d&&r(NCe),d&&r(Xd),p(ny),d&&r(DCe),d&&r(ft),p(sy),p(iy),p(dy),p(my),p(fy),d&&r(GCe),d&&r(Qd),p(cy),d&&r(OCe),d&&r(ct),p(gy),p(uy),p(py),p(_y),p(vy),d&&r(qCe),d&&r(Jd),p(by),d&&r(zCe),d&&r(gt),p(Ty),p(My),p(Ey),p(Cy),p(yy),d&&r(XCe),d&&r(em),p(wy),d&&r(WCe),d&&r(ht),p(Ay),p(Ly),p(By),p(ky),p(Ry),d&&r(VCe),d&&r(rm),p(Sy),d&&r(QCe),d&&r(ut),p(Py),p(Iy),p(jy),p(Ny),p(Dy),d&&r(HCe),d&&r(sm),p(Gy),d&&r(UCe),d&&r(pt),p(Oy),p(zy),p(Xy),p(Wy),p(Vy),d&&r(JCe),d&&r(dm),p(Qy),d&&r(KCe),d&&r(_t),p(Hy),p(Jy),p(Ky),p(Yy),p(Zy),d&&r(YCe),d&&r(cm),p(ew),d&&r(ZCe),d&&r(vt),p(ow),p(rw),p(aw),p(nw),p(sw),d&&r(e3e),d&&r(um),p(lw),d&&r(o3e),d&&r(bt),p(iw),p(mw),p(fw),p(cw),p(gw),d&&r(t3e),d&&r(vm),p(hw),d&&r(r3e),d&&r(Ft),p(uw),p(_w),p(vw),p(bw),p(Tw),d&&r(a3e),d&&r(Fm),p(Fw),d&&r(n3e),d&&r(Et),p(Mw),p(Cw),p(yw),p(ww),p(Aw),d&&r(s3e),d&&r(Cm),p(xw),d&&r(l3e),d&&r(Ct),p(Lw),p(kw),p(Rw),p(Sw),p($w),d&&r(i3e),d&&r(Am),p(Iw),d&&r(d3e),d&&r(yt),p(jw),p(Dw),p(Gw),p(Ow),p(qw)}}}const VVt={local:"auto-classes",sections:[{local:"extending-the-auto-classes",title:"Extending the Auto Classes"},{local:"transformers.AutoConfig",title:"AutoConfig"},{local:"transformers.AutoTokenizer",title:"AutoTokenizer"},{local:"transformers.AutoFeatureExtractor",title:"AutoFeatureExtractor"},{local:"transformers.AutoProcessor",title:"AutoProcessor"},{local:"transformers.AutoModel",title:"AutoModel"},{local:"transformers.AutoModelForPreTraining",title:"AutoModelForPreTraining"},{local:"transformers.AutoModelForCausalLM",title:"AutoModelForCausalLM"},{local:"transformers.AutoModelForMaskedLM",title:"AutoModelForMaskedLM"},{local:"transformers.AutoModelForSeq2SeqLM",title:"AutoModelForSeq2SeqLM"},{local:"transformers.AutoModelForSequenceClassification",title:"AutoModelForSequenceClassification"},{local:"transformers.AutoModelForMultipleChoice",title:"AutoModelForMultipleChoice"},{local:"transformers.AutoModelForNextSentencePrediction",title:"AutoModelForNextSentencePrediction"},{local:"transformers.AutoModelForTokenClassification",title:"AutoModelForTokenClassification"},{local:"transformers.AutoModelForQuestionAnswering",title:"AutoModelForQuestionAnswering"},{local:"transformers.AutoModelForTableQuestionAnswering",title:"AutoModelForTableQuestionAnswering"},{local:"transformers.AutoModelForImageClassification",title:"AutoModelForImageClassification"},{local:"transformers.AutoModelForVision2Seq",title:"AutoModelForVision2Seq"},{local:"transformers.AutoModelForAudioClassification",title:"AutoModelForAudioClassification"},{local:"transformers.AutoModelForAudioFrameClassification",title:"AutoModelForAudioFrameClassification"},{local:"transformers.AutoModelForCTC",title:"AutoModelForCTC"},{local:"transformers.AutoModelForSpeechSeq2Seq",title:"AutoModelForSpeechSeq2Seq"},{local:"transformers.AutoModelForAudioXVector",title:"AutoModelForAudioXVector"},{local:"transformers.AutoModelForObjectDetection",title:"AutoModelForObjectDetection"},{local:"transformers.AutoModelForImageSegmentation",title:"AutoModelForImageSegmentation"},{local:"transformers.TFAutoModel",title:"TFAutoModel"},{local:"transformers.TFAutoModelForPreTraining",title:"TFAutoModelForPreTraining"},{local:"transformers.TFAutoModelForCausalLM",title:"TFAutoModelForCausalLM"},{local:"transformers.TFAutoModelForImageClassification",title:"TFAutoModelForImageClassification"},{local:"transformers.TFAutoModelForMaskedLM",title:"TFAutoModelForMaskedLM"},{local:"transformers.TFAutoModelForSeq2SeqLM",title:"TFAutoModelForSeq2SeqLM"},{local:"transformers.TFAutoModelForSequenceClassification",title:"TFAutoModelForSequenceClassification"},{local:"transformers.TFAutoModelForMultipleChoice",title:"TFAutoModelForMultipleChoice"},{local:"transformers.TFAutoModelForTableQuestionAnswering",title:"TFAutoModelForTableQuestionAnswering"},{local:"transformers.TFAutoModelForTokenClassification",title:"TFAutoModelForTokenClassification"},{local:"transformers.TFAutoModelForQuestionAnswering",title:"TFAutoModelForQuestionAnswering"},{local:"transformers.FlaxAutoModel",title:"FlaxAutoModel"},{local:"transformers.FlaxAutoModelForCausalLM",title:"FlaxAutoModelForCausalLM"},{local:"transformers.FlaxAutoModelForPreTraining",title:"FlaxAutoModelForPreTraining"},{local:"transformers.FlaxAutoModelForMaskedLM",title:"FlaxAutoModelForMaskedLM"},{local:"transformers.FlaxAutoModelForSeq2SeqLM",title:"FlaxAutoModelForSeq2SeqLM"},{local:"transformers.FlaxAutoModelForSequenceClassification",title:"FlaxAutoModelForSequenceClassification"},{local:"transformers.FlaxAutoModelForQuestionAnswering",title:"FlaxAutoModelForQuestionAnswering"},{local:"transformers.FlaxAutoModelForTokenClassification",title:"FlaxAutoModelForTokenClassification"},{local:"transformers.FlaxAutoModelForMultipleChoice",title:"FlaxAutoModelForMultipleChoice"},{local:"transformers.FlaxAutoModelForNextSentencePrediction",title:"FlaxAutoModelForNextSentencePrediction"},{local:"transformers.FlaxAutoModelForImageClassification",title:"FlaxAutoModelForImageClassification"},{local:"transformers.FlaxAutoModelForVision2Seq",title:"FlaxAutoModelForVision2Seq"}],title:"Auto Classes"};function QVt(Wl,J,ye){let{fw:se}=J;return Wl.$$set=de=>{"fw"in de&&ye(0,se=de.fw)},[se]}class eQt extends NVt{constructor(J){super();DVt(this,J,QVt,WVt,GVt,{fw:0})}}export{eQt as default,VVt as metadata};
9,910
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/barthez.mdx-281be95f.js
import{S as ls,i as cs,s as ds,e as o,k as c,w as g,t as a,L as hs,c as s,d as n,m as d,a as r,x as k,h as i,b as l,J as e,g as m,y as _,K as ms,q as v,o as b,B as z}from"../../chunks/vendor-b1433968.js";import{D as K}from"../../chunks/Docstring-ff504c58.js";import{I as at}from"../../chunks/IconCopyLink-7029626d.js";function ps(Ft){let B,ve,T,w,Se,X,It,Ce,St,it,L,I,Ne,j,Ct,Oe,Nt,lt,S,Ot,G,Ut,Mt,ct,be,Vt,dt,ze,Ue,Wt,ht,E,Ht,Q,Jt,Kt,Y,Xt,jt,mt,P,C,Me,Z,Gt,Ve,Qt,pt,Te,ee,Yt,te,Zt,en,ft,R,N,We,ne,tn,He,nn,ut,p,oe,on,q,sn,we,rn,an,qe,ln,cn,se,dn,hn,mn,re,pn,ye,fn,un,gn,D,kn,Je,_n,vn,Ke,bn,zn,Tn,$,ae,wn,Xe,qn,yn,ie,Be,Bn,je,En,$n,Ee,An,Ge,xn,Ln,O,le,Pn,Qe,Rn,Dn,U,ce,Fn,Ye,In,Sn,M,de,Cn,he,Nn,Ze,On,Un,gt,F,V,et,me,Mn,tt,Vn,kt,u,pe,Wn,y,Hn,$e,Jn,Kn,Ae,Xn,jn,fe,Gn,Qn,Yn,ue,Zn,xe,eo,to,no,A,ge,oo,nt,so,ro,ke,Le,ao,ot,io,lo,Pe,co,st,ho,mo,W,_e,po,rt,fo,_t;return X=new at({}),j=new at({}),Z=new at({}),ne=new at({}),oe=new K({props:{name:"class transformers.BarthezTokenizer",anchor:"transformers.BarthezTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez.py#L49",parametersDescription:[{anchor:"transformers.BarthezTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.BarthezTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.BarthezTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.BarthezTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BarthezTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BarthezTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BarthezTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BarthezTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.BarthezTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.BarthezTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),ae=new K({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BarthezTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez.py#L161",parametersDescription:[{anchor:"transformers.BarthezTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BarthezTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),le=new K({props:{name:"convert_tokens_to_string",anchor:"transformers.BarthezTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez.py#L277"}}),ce=new K({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BarthezTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez.py#L214",parametersDescription:[{anchor:"transformers.BarthezTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BarthezTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),de=new K({props:{name:"get_special_tokens_mask",anchor:"transformers.BarthezTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez.py#L187",parametersDescription:[{anchor:"transformers.BarthezTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BarthezTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BarthezTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),me=new at({}),pe=new K({props:{name:"class transformers.BarthezTokenizerFast",anchor:"transformers.BarthezTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez_fast.py#L59",parametersDescription:[{anchor:"transformers.BarthezTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.BarthezTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.BarthezTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.BarthezTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BarthezTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BarthezTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BarthezTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BarthezTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.BarthezTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),ge=new K({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BarthezTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez_fast.py#L148",parametersDescription:[{anchor:"transformers.BarthezTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BarthezTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),_e=new K({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BarthezTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/barthez/tokenization_barthez_fast.py#L174",parametersDescription:[{anchor:"transformers.BarthezTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BarthezTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){B=o("meta"),ve=c(),T=o("h1"),w=o("a"),Se=o("span"),g(X.$$.fragment),It=c(),Ce=o("span"),St=a("BARThez"),it=c(),L=o("h2"),I=o("a"),Ne=o("span"),g(j.$$.fragment),Ct=c(),Oe=o("span"),Nt=a("Overview"),lt=c(),S=o("p"),Ot=a("The BARThez model was proposed in "),G=o("a"),Ut=a("BARThez: a Skilled Pretrained French Sequence-to-Sequence Model"),Mt=a(` by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct, 2020.`),ct=c(),be=o("p"),Vt=a("The abstract of the paper:"),dt=c(),ze=o("p"),Ue=o("em"),Wt=a(`Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing (NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language understanding tasks. While there are some notable exceptions, most of the available models and research have been conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language (to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research that we adapted to suit BART\u2019s perturbation schemes. Unlike already existing BERT-based French language models such as CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already pretrained multilingual BART on BARThez\u2019s corpus, and we show that the resulting model, which we call mBARTHez, provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.`),ht=c(),E=o("p"),Ht=a("This model was contributed by "),Q=o("a"),Jt=a("moussakam"),Kt=a(". The Authors\u2019 code can be found "),Y=o("a"),Xt=a("here"),jt=a("."),mt=c(),P=o("h3"),C=o("a"),Me=o("span"),g(Z.$$.fragment),Gt=c(),Ve=o("span"),Qt=a("Examples"),pt=c(),Te=o("ul"),ee=o("li"),Yt=a(`BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check: `),te=o("a"),Zt=a("examples/pytorch/summarization/"),en=a("."),ft=c(),R=o("h2"),N=o("a"),We=o("span"),g(ne.$$.fragment),tn=c(),He=o("span"),nn=a("BarthezTokenizer"),ut=c(),p=o("div"),g(oe.$$.fragment),on=c(),q=o("p"),sn=a("Adapted from "),we=o("a"),rn=a("CamembertTokenizer"),an=a(" and "),qe=o("a"),ln=a("BartTokenizer"),cn=a(`. Construct a BARThez tokenizer. Based on `),se=o("a"),dn=a("SentencePiece"),hn=a("."),mn=c(),re=o("p"),pn=a("This tokenizer inherits from "),ye=o("a"),fn=a("PreTrainedTokenizer"),un=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),gn=c(),D=o("p"),kn=a(`Attributes: sp_model (`),Je=o("code"),_n=a("SentencePieceProcessor"),vn=a(`): The `),Ke=o("em"),bn=a("SentencePiece"),zn=a(" processor that is used for every conversion (string, tokens and IDs)."),Tn=c(),$=o("div"),g(ae.$$.fragment),wn=c(),Xe=o("p"),qn=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BARThez sequence has the following format:`),yn=c(),ie=o("ul"),Be=o("li"),Bn=a("single sequence: "),je=o("code"),En=a("<s> X </s>"),$n=c(),Ee=o("li"),An=a("pair of sequences: "),Ge=o("code"),xn=a("<s> A </s></s> B </s>"),Ln=c(),O=o("div"),g(le.$$.fragment),Pn=c(),Qe=o("p"),Rn=a("Converts a sequence of tokens (strings for sub-words) in a single string."),Dn=c(),U=o("div"),g(ce.$$.fragment),Fn=c(),Ye=o("p"),In=a("Create a mask from the two sequences passed to be used in a sequence-pair classification task."),Sn=c(),M=o("div"),g(de.$$.fragment),Cn=c(),he=o("p"),Nn=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Ze=o("code"),On=a("prepare_for_model"),Un=a(" method."),gt=c(),F=o("h2"),V=o("a"),et=o("span"),g(me.$$.fragment),Mn=c(),tt=o("span"),Vn=a("BarthezTokenizerFast"),kt=c(),u=o("div"),g(pe.$$.fragment),Wn=c(),y=o("p"),Hn=a("Adapted from "),$e=o("a"),Jn=a("CamembertTokenizer"),Kn=a(" and "),Ae=o("a"),Xn=a("BartTokenizer"),jn=a(`. Construct a \u201Cfast\u201D BARThez tokenizer. Based on `),fe=o("a"),Gn=a("SentencePiece"),Qn=a("."),Yn=c(),ue=o("p"),Zn=a("This tokenizer inherits from "),xe=o("a"),eo=a("PreTrainedTokenizerFast"),to=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),no=c(),A=o("div"),g(ge.$$.fragment),oo=c(),nt=o("p"),so=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BARThez sequence has the following format:`),ro=c(),ke=o("ul"),Le=o("li"),ao=a("single sequence: "),ot=o("code"),io=a("<s> X </s>"),lo=c(),Pe=o("li"),co=a("pair of sequences: "),st=o("code"),ho=a("<s> A </s></s> B </s>"),mo=c(),W=o("div"),g(_e.$$.fragment),po=c(),rt=o("p"),fo=a("Create a mask from the two sequences passed to be used in a sequence-pair classification task."),this.h()},l(t){const h=hs('[data-svelte="svelte-1phssyn"]',document.head);B=s(h,"META",{name:!0,content:!0}),h.forEach(n),ve=d(t),T=s(t,"H1",{class:!0});var vt=r(T);w=s(vt,"A",{id:!0,class:!0,href:!0});var vo=r(w);Se=s(vo,"SPAN",{});var bo=r(Se);k(X.$$.fragment,bo),bo.forEach(n),vo.forEach(n),It=d(vt),Ce=s(vt,"SPAN",{});var zo=r(Ce);St=i(zo,"BARThez"),zo.forEach(n),vt.forEach(n),it=d(t),L=s(t,"H2",{class:!0});var bt=r(L);I=s(bt,"A",{id:!0,class:!0,href:!0});var To=r(I);Ne=s(To,"SPAN",{});var wo=r(Ne);k(j.$$.fragment,wo),wo.forEach(n),To.forEach(n),Ct=d(bt),Oe=s(bt,"SPAN",{});var qo=r(Oe);Nt=i(qo,"Overview"),qo.forEach(n),bt.forEach(n),lt=d(t),S=s(t,"P",{});var zt=r(S);Ot=i(zt,"The BARThez model was proposed in "),G=s(zt,"A",{href:!0,rel:!0});var yo=r(G);Ut=i(yo,"BARThez: a Skilled Pretrained French Sequence-to-Sequence Model"),yo.forEach(n),Mt=i(zt,` by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct, 2020.`),zt.forEach(n),ct=d(t),be=s(t,"P",{});var Bo=r(be);Vt=i(Bo,"The abstract of the paper:"),Bo.forEach(n),dt=d(t),ze=s(t,"P",{});var Eo=r(ze);Ue=s(Eo,"EM",{});var $o=r(Ue);Wt=i($o,`Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing (NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language understanding tasks. While there are some notable exceptions, most of the available models and research have been conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language (to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research that we adapted to suit BART\u2019s perturbation schemes. Unlike already existing BERT-based French language models such as CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already pretrained multilingual BART on BARThez\u2019s corpus, and we show that the resulting model, which we call mBARTHez, provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.`),$o.forEach(n),Eo.forEach(n),ht=d(t),E=s(t,"P",{});var Re=r(E);Ht=i(Re,"This model was contributed by "),Q=s(Re,"A",{href:!0,rel:!0});var Ao=r(Q);Jt=i(Ao,"moussakam"),Ao.forEach(n),Kt=i(Re,". The Authors\u2019 code can be found "),Y=s(Re,"A",{href:!0,rel:!0});var xo=r(Y);Xt=i(xo,"here"),xo.forEach(n),jt=i(Re,"."),Re.forEach(n),mt=d(t),P=s(t,"H3",{class:!0});var Tt=r(P);C=s(Tt,"A",{id:!0,class:!0,href:!0});var Lo=r(C);Me=s(Lo,"SPAN",{});var Po=r(Me);k(Z.$$.fragment,Po),Po.forEach(n),Lo.forEach(n),Gt=d(Tt),Ve=s(Tt,"SPAN",{});var Ro=r(Ve);Qt=i(Ro,"Examples"),Ro.forEach(n),Tt.forEach(n),pt=d(t),Te=s(t,"UL",{});var Do=r(Te);ee=s(Do,"LI",{});var wt=r(ee);Yt=i(wt,`BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check: `),te=s(wt,"A",{href:!0,rel:!0});var Fo=r(te);Zt=i(Fo,"examples/pytorch/summarization/"),Fo.forEach(n),en=i(wt,"."),wt.forEach(n),Do.forEach(n),ft=d(t),R=s(t,"H2",{class:!0});var qt=r(R);N=s(qt,"A",{id:!0,class:!0,href:!0});var Io=r(N);We=s(Io,"SPAN",{});var So=r(We);k(ne.$$.fragment,So),So.forEach(n),Io.forEach(n),tn=d(qt),He=s(qt,"SPAN",{});var Co=r(He);nn=i(Co,"BarthezTokenizer"),Co.forEach(n),qt.forEach(n),ut=d(t),p=s(t,"DIV",{class:!0});var f=r(p);k(oe.$$.fragment,f),on=d(f),q=s(f,"P",{});var H=r(q);sn=i(H,"Adapted from "),we=s(H,"A",{href:!0});var No=r(we);rn=i(No,"CamembertTokenizer"),No.forEach(n),an=i(H," and "),qe=s(H,"A",{href:!0});var Oo=r(qe);ln=i(Oo,"BartTokenizer"),Oo.forEach(n),cn=i(H,`. Construct a BARThez tokenizer. Based on `),se=s(H,"A",{href:!0,rel:!0});var Uo=r(se);dn=i(Uo,"SentencePiece"),Uo.forEach(n),hn=i(H,"."),H.forEach(n),mn=d(f),re=s(f,"P",{});var yt=r(re);pn=i(yt,"This tokenizer inherits from "),ye=s(yt,"A",{href:!0});var Mo=r(ye);fn=i(Mo,"PreTrainedTokenizer"),Mo.forEach(n),un=i(yt,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),yt.forEach(n),gn=d(f),D=s(f,"P",{});var De=r(D);kn=i(De,`Attributes: sp_model (`),Je=s(De,"CODE",{});var Vo=r(Je);_n=i(Vo,"SentencePieceProcessor"),Vo.forEach(n),vn=i(De,`): The `),Ke=s(De,"EM",{});var Wo=r(Ke);bn=i(Wo,"SentencePiece"),Wo.forEach(n),zn=i(De," processor that is used for every conversion (string, tokens and IDs)."),De.forEach(n),Tn=d(f),$=s(f,"DIV",{class:!0});var Fe=r($);k(ae.$$.fragment,Fe),wn=d(Fe),Xe=s(Fe,"P",{});var Ho=r(Xe);qn=i(Ho,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BARThez sequence has the following format:`),Ho.forEach(n),yn=d(Fe),ie=s(Fe,"UL",{});var Bt=r(ie);Be=s(Bt,"LI",{});var uo=r(Be);Bn=i(uo,"single sequence: "),je=s(uo,"CODE",{});var Jo=r(je);En=i(Jo,"<s> X </s>"),Jo.forEach(n),uo.forEach(n),$n=d(Bt),Ee=s(Bt,"LI",{});var go=r(Ee);An=i(go,"pair of sequences: "),Ge=s(go,"CODE",{});var Ko=r(Ge);xn=i(Ko,"<s> A </s></s> B </s>"),Ko.forEach(n),go.forEach(n),Bt.forEach(n),Fe.forEach(n),Ln=d(f),O=s(f,"DIV",{class:!0});var Et=r(O);k(le.$$.fragment,Et),Pn=d(Et),Qe=s(Et,"P",{});var Xo=r(Qe);Rn=i(Xo,"Converts a sequence of tokens (strings for sub-words) in a single string."),Xo.forEach(n),Et.forEach(n),Dn=d(f),U=s(f,"DIV",{class:!0});var $t=r(U);k(ce.$$.fragment,$t),Fn=d($t),Ye=s($t,"P",{});var jo=r(Ye);In=i(jo,"Create a mask from the two sequences passed to be used in a sequence-pair classification task."),jo.forEach(n),$t.forEach(n),Sn=d(f),M=s(f,"DIV",{class:!0});var At=r(M);k(de.$$.fragment,At),Cn=d(At),he=s(At,"P",{});var xt=r(he);Nn=i(xt,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Ze=s(xt,"CODE",{});var Go=r(Ze);On=i(Go,"prepare_for_model"),Go.forEach(n),Un=i(xt," method."),xt.forEach(n),At.forEach(n),f.forEach(n),gt=d(t),F=s(t,"H2",{class:!0});var Lt=r(F);V=s(Lt,"A",{id:!0,class:!0,href:!0});var Qo=r(V);et=s(Qo,"SPAN",{});var Yo=r(et);k(me.$$.fragment,Yo),Yo.forEach(n),Qo.forEach(n),Mn=d(Lt),tt=s(Lt,"SPAN",{});var Zo=r(tt);Vn=i(Zo,"BarthezTokenizerFast"),Zo.forEach(n),Lt.forEach(n),kt=d(t),u=s(t,"DIV",{class:!0});var x=r(u);k(pe.$$.fragment,x),Wn=d(x),y=s(x,"P",{});var J=r(y);Hn=i(J,"Adapted from "),$e=s(J,"A",{href:!0});var es=r($e);Jn=i(es,"CamembertTokenizer"),es.forEach(n),Kn=i(J," and "),Ae=s(J,"A",{href:!0});var ts=r(Ae);Xn=i(ts,"BartTokenizer"),ts.forEach(n),jn=i(J,`. Construct a \u201Cfast\u201D BARThez tokenizer. Based on `),fe=s(J,"A",{href:!0,rel:!0});var ns=r(fe);Gn=i(ns,"SentencePiece"),ns.forEach(n),Qn=i(J,"."),J.forEach(n),Yn=d(x),ue=s(x,"P",{});var Pt=r(ue);Zn=i(Pt,"This tokenizer inherits from "),xe=s(Pt,"A",{href:!0});var os=r(xe);eo=i(os,"PreTrainedTokenizerFast"),os.forEach(n),to=i(Pt,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Pt.forEach(n),no=d(x),A=s(x,"DIV",{class:!0});var Ie=r(A);k(ge.$$.fragment,Ie),oo=d(Ie),nt=s(Ie,"P",{});var ss=r(nt);so=i(ss,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BARThez sequence has the following format:`),ss.forEach(n),ro=d(Ie),ke=s(Ie,"UL",{});var Rt=r(ke);Le=s(Rt,"LI",{});var ko=r(Le);ao=i(ko,"single sequence: "),ot=s(ko,"CODE",{});var rs=r(ot);io=i(rs,"<s> X </s>"),rs.forEach(n),ko.forEach(n),lo=d(Rt),Pe=s(Rt,"LI",{});var _o=r(Pe);co=i(_o,"pair of sequences: "),st=s(_o,"CODE",{});var as=r(st);ho=i(as,"<s> A </s></s> B </s>"),as.forEach(n),_o.forEach(n),Rt.forEach(n),Ie.forEach(n),mo=d(x),W=s(x,"DIV",{class:!0});var Dt=r(W);k(_e.$$.fragment,Dt),po=d(Dt),rt=s(Dt,"P",{});var is=r(rt);fo=i(is,"Create a mask from the two sequences passed to be used in a sequence-pair classification task."),is.forEach(n),Dt.forEach(n),x.forEach(n),this.h()},h(){l(B,"name","hf:doc:metadata"),l(B,"content",JSON.stringify(fs)),l(w,"id","barthez"),l(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(w,"href","#barthez"),l(T,"class","relative group"),l(I,"id","overview"),l(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(I,"href","#overview"),l(L,"class","relative group"),l(G,"href","https://arxiv.org/abs/2010.12321"),l(G,"rel","nofollow"),l(Q,"href","https://huggingface.co/moussakam"),l(Q,"rel","nofollow"),l(Y,"href","https://github.com/moussaKam/BARThez"),l(Y,"rel","nofollow"),l(C,"id","examples"),l(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(C,"href","#examples"),l(P,"class","relative group"),l(te,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization/README.md"),l(te,"rel","nofollow"),l(N,"id","transformers.BarthezTokenizer"),l(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(N,"href","#transformers.BarthezTokenizer"),l(R,"class","relative group"),l(we,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertTokenizer"),l(qe,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),l(se,"href","https://github.com/google/sentencepiece"),l(se,"rel","nofollow"),l(ye,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l($,"class","docstring"),l(O,"class","docstring"),l(U,"class","docstring"),l(M,"class","docstring"),l(p,"class","docstring"),l(V,"id","transformers.BarthezTokenizerFast"),l(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(V,"href","#transformers.BarthezTokenizerFast"),l(F,"class","relative group"),l($e,"href","/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertTokenizer"),l(Ae,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),l(fe,"href","https://github.com/google/sentencepiece"),l(fe,"rel","nofollow"),l(xe,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(A,"class","docstring"),l(W,"class","docstring"),l(u,"class","docstring")},m(t,h){e(document.head,B),m(t,ve,h),m(t,T,h),e(T,w),e(w,Se),_(X,Se,null),e(T,It),e(T,Ce),e(Ce,St),m(t,it,h),m(t,L,h),e(L,I),e(I,Ne),_(j,Ne,null),e(L,Ct),e(L,Oe),e(Oe,Nt),m(t,lt,h),m(t,S,h),e(S,Ot),e(S,G),e(G,Ut),e(S,Mt),m(t,ct,h),m(t,be,h),e(be,Vt),m(t,dt,h),m(t,ze,h),e(ze,Ue),e(Ue,Wt),m(t,ht,h),m(t,E,h),e(E,Ht),e(E,Q),e(Q,Jt),e(E,Kt),e(E,Y),e(Y,Xt),e(E,jt),m(t,mt,h),m(t,P,h),e(P,C),e(C,Me),_(Z,Me,null),e(P,Gt),e(P,Ve),e(Ve,Qt),m(t,pt,h),m(t,Te,h),e(Te,ee),e(ee,Yt),e(ee,te),e(te,Zt),e(ee,en),m(t,ft,h),m(t,R,h),e(R,N),e(N,We),_(ne,We,null),e(R,tn),e(R,He),e(He,nn),m(t,ut,h),m(t,p,h),_(oe,p,null),e(p,on),e(p,q),e(q,sn),e(q,we),e(we,rn),e(q,an),e(q,qe),e(qe,ln),e(q,cn),e(q,se),e(se,dn),e(q,hn),e(p,mn),e(p,re),e(re,pn),e(re,ye),e(ye,fn),e(re,un),e(p,gn),e(p,D),e(D,kn),e(D,Je),e(Je,_n),e(D,vn),e(D,Ke),e(Ke,bn),e(D,zn),e(p,Tn),e(p,$),_(ae,$,null),e($,wn),e($,Xe),e(Xe,qn),e($,yn),e($,ie),e(ie,Be),e(Be,Bn),e(Be,je),e(je,En),e(ie,$n),e(ie,Ee),e(Ee,An),e(Ee,Ge),e(Ge,xn),e(p,Ln),e(p,O),_(le,O,null),e(O,Pn),e(O,Qe),e(Qe,Rn),e(p,Dn),e(p,U),_(ce,U,null),e(U,Fn),e(U,Ye),e(Ye,In),e(p,Sn),e(p,M),_(de,M,null),e(M,Cn),e(M,he),e(he,Nn),e(he,Ze),e(Ze,On),e(he,Un),m(t,gt,h),m(t,F,h),e(F,V),e(V,et),_(me,et,null),e(F,Mn),e(F,tt),e(tt,Vn),m(t,kt,h),m(t,u,h),_(pe,u,null),e(u,Wn),e(u,y),e(y,Hn),e(y,$e),e($e,Jn),e(y,Kn),e(y,Ae),e(Ae,Xn),e(y,jn),e(y,fe),e(fe,Gn),e(y,Qn),e(u,Yn),e(u,ue),e(ue,Zn),e(ue,xe),e(xe,eo),e(ue,to),e(u,no),e(u,A),_(ge,A,null),e(A,oo),e(A,nt),e(nt,so),e(A,ro),e(A,ke),e(ke,Le),e(Le,ao),e(Le,ot),e(ot,io),e(ke,lo),e(ke,Pe),e(Pe,co),e(Pe,st),e(st,ho),e(u,mo),e(u,W),_(_e,W,null),e(W,po),e(W,rt),e(rt,fo),_t=!0},p:ms,i(t){_t||(v(X.$$.fragment,t),v(j.$$.fragment,t),v(Z.$$.fragment,t),v(ne.$$.fragment,t),v(oe.$$.fragment,t),v(ae.$$.fragment,t),v(le.$$.fragment,t),v(ce.$$.fragment,t),v(de.$$.fragment,t),v(me.$$.fragment,t),v(pe.$$.fragment,t),v(ge.$$.fragment,t),v(_e.$$.fragment,t),_t=!0)},o(t){b(X.$$.fragment,t),b(j.$$.fragment,t),b(Z.$$.fragment,t),b(ne.$$.fragment,t),b(oe.$$.fragment,t),b(ae.$$.fragment,t),b(le.$$.fragment,t),b(ce.$$.fragment,t),b(de.$$.fragment,t),b(me.$$.fragment,t),b(pe.$$.fragment,t),b(ge.$$.fragment,t),b(_e.$$.fragment,t),_t=!1},d(t){n(B),t&&n(ve),t&&n(T),z(X),t&&n(it),t&&n(L),z(j),t&&n(lt),t&&n(S),t&&n(ct),t&&n(be),t&&n(dt),t&&n(ze),t&&n(ht),t&&n(E),t&&n(mt),t&&n(P),z(Z),t&&n(pt),t&&n(Te),t&&n(ft),t&&n(R),z(ne),t&&n(ut),t&&n(p),z(oe),z(ae),z(le),z(ce),z(de),t&&n(gt),t&&n(F),z(me),t&&n(kt),t&&n(u),z(pe),z(ge),z(_e)}}}const fs={local:"barthez",sections:[{local:"overview",sections:[{local:"examples",title:"Examples"}],title:"Overview"},{local:"transformers.BarthezTokenizer",title:"BarthezTokenizer"},{local:"transformers.BarthezTokenizerFast",title:"BarthezTokenizerFast"}],title:"BARThez"};function us(Ft,B,ve){let{fw:T}=B;return Ft.$$set=w=>{"fw"in w&&ve(0,T=w.fw)},[T]}class vs extends ls{constructor(B){super();cs(this,B,us,ps,ds,{fw:0})}}export{vs as default,fs as metadata};
9,911
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/sew_d.mdx-34927389.js
import{S as vs,i as ws,s as bs,e as n,k as d,w as v,t as r,L as ys,c as a,d as o,m as c,a as s,x as w,h as i,b as l,J as e,g as f,y as b,q as y,o as E,B as S}from"../../chunks/vendor-b1433968.js";import{T as Ea}from"../../chunks/Tip-c3840994.js";import{D as Ue}from"../../chunks/Docstring-ff504c58.js";import{C as uo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as pt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Es(K){let m,W,h,_,D;return{c(){m=n("p"),W=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),_=r("Module"),D=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){m=a(u,"P",{});var g=s(m);W=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=a(g,"CODE",{});var $=s(h);_=i($,"Module"),$.forEach(o),D=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){f(u,m,g),e(m,W),e(m,h),e(h,_),e(m,D)},d(u){u&&o(m)}}}function Ss(K){let m,W,h,_,D;return{c(){m=n("p"),W=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),_=r("Module"),D=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){m=a(u,"P",{});var g=s(m);W=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=a(g,"CODE",{});var $=s(h);_=i($,"Module"),$.forEach(o),D=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){f(u,m,g),e(m,W),e(m,h),e(h,_),e(m,D)},d(u){u&&o(m)}}}function Ws(K){let m,W,h,_,D;return{c(){m=n("p"),W=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),_=r("Module"),D=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){m=a(u,"P",{});var g=s(m);W=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=a(g,"CODE",{});var $=s(h);_=i($,"Module"),$.forEach(o),D=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){f(u,m,g),e(m,W),e(m,h),e(h,_),e(m,D)},d(u){u&&o(m)}}}function Ds(K){let m,W,h,_,D,u,g,$,go,Mt,L,Z,mt,ce,_o,ft,vo,zt,ee,wo,pe,bo,yo,At,Re,Eo,Vt,Je,ht,So,Nt,Be,Wo,Ot,te,ut,Do,Co,me,ko,Qe,To,xo,Kt,oe,$o,fe,qo,jo,Lt,I,ne,gt,he,Po,_t,Fo,It,k,ue,Mo,H,zo,Ye,Ao,Vo,ge,No,Oo,Ko,U,Lo,Ge,Io,Ho,Xe,Uo,Ro,Jo,vt,Bo,Qo,_e,Ht,R,ae,wt,ve,Yo,bt,Go,Ut,T,we,Xo,be,Zo,ye,en,tn,on,Ee,nn,Ze,an,sn,rn,Se,ln,We,dn,cn,pn,q,De,mn,J,fn,et,hn,un,yt,gn,_n,vn,se,wn,Et,bn,yn,Ce,Rt,B,re,St,ke,En,Wt,Sn,Jt,x,Te,Wn,Q,Dn,Dt,Cn,kn,xe,Tn,xn,$n,$e,qn,tt,jn,Pn,Fn,qe,Mn,je,zn,An,Vn,j,Pe,Nn,Y,On,ot,Kn,Ln,Ct,In,Hn,Un,ie,Rn,kt,Jn,Bn,Fe,Bt,G,le,Tt,Me,Qn,xt,Yn,Qt,C,ze,Gn,$t,Xn,Zn,Ae,ea,Ve,ta,oa,na,Ne,aa,nt,sa,ra,ia,Oe,la,Ke,da,ca,pa,P,Le,ma,X,fa,at,ha,ua,qt,ga,_a,va,de,wa,jt,ba,ya,Ie,Yt;return u=new pt({}),ce=new pt({}),he=new pt({}),ue=new Ue({props:{name:"class transformers.SEWDConfig",anchor:"transformers.SEWDConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"squeeze_factor",val:" = 2"},{name:"max_position_embeddings",val:" = 512"},{name:"position_buckets",val:" = 256"},{name:"share_att_key",val:" = True"},{name:"relative_attention",val:" = True"},{name:"position_biased_input",val:" = False"},{name:"pos_att_type",val:" = ('p2c', 'c2p')"},{name:"norm_rel_ebd",val:" = 'layer_norm'"},{name:"hidden_act",val:" = 'gelu_python'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-07"},{name:"feature_layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)"},{name:"conv_kernel",val:" = (10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"ctc_loss_reduction",val:" = 'mean'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/configuration_sew_d.py#L29",parametersDescription:[{anchor:"transformers.SEWDConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the SEW-D model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>SEWD</code>.`,name:"vocab_size"},{anchor:"transformers.SEWDConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.SEWDConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.SEWDConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SEWDConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.SEWDConfig.squeeze_factor",description:`<strong>squeeze_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Sequence length downsampling factor after the encoder and upsampling factor after the transformer.`,name:"squeeze_factor"},{anchor:"transformers.SEWDConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.SEWDConfig.position_buckets",description:`<strong>position_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The maximum size of relative position embeddings.`,name:"position_buckets"},{anchor:"transformers.SEWDConfig.share_att_key",description:`<strong>share_att_key</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to share attention key with c2p and p2c.`,name:"share_att_key"},{anchor:"transformers.SEWDConfig.relative_attention",description:`<strong>relative_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use relative position encoding.`,name:"relative_attention"},{anchor:"transformers.SEWDConfig.position_biased_input",description:`<strong>position_biased_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to add absolute position embedding to content embedding.`,name:"position_biased_input"},{anchor:"transformers.SEWDConfig.pos_att_type",description:`<strong>pos_att_type</strong> (<code>Tuple[str]</code>, <em>optional</em>, defaults to <code>(&quot;p2c&quot;, &quot;c2p&quot;)</code>) &#x2014; The type of relative position attention, it can be a combination of <code>(&quot;p2c&quot;, &quot;c2p&quot;, &quot;p2p&quot;)</code>, e.g. <code>(&quot;p2c&quot;)</code>, <code>(&quot;p2c&quot;, &quot;c2p&quot;)</code>, <code>(&quot;p2c&quot;, &quot;c2p&quot;, &apos;p2p&quot;)</code>.`,name:"pos_att_type"},{anchor:"transformers.SEWDConfig.norm_rel_ebd",description:`<strong>norm_rel_ebd</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;layer_norm&quot;</code>) &#x2014; Whether to use layer norm in relative embedding (<code>&quot;layer_norm&quot;</code> if yes)`,name:"norm_rel_ebd"},{anchor:"transformers.SEWDConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_python&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code>, <code>&quot;gelu_python&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SEWDConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.SEWDConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.SEWDConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForCTC">SEWDForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.SEWDConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SEWDConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon used by the layer normalization layers in the transformer encoder.`,name:"layer_norm_eps"},{anchor:"transformers.SEWDConfig.feature_layer_norm_eps",description:`<strong>feature_layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon used by the layer normalization after the feature extractor.`,name:"feature_layer_norm_eps"},{anchor:"transformers.SEWDConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.SEWDConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.SEWDConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.SEWDConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.SEWDConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.SEWDConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.SEWDConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.SEWDConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.SEWDConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.SEWDConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.SEWDConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.SEWDConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.SEWDConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.SEWDConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.SEWDConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.SEWDConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.SEWDConfig.diversity_loss_weight",description:`<strong>diversity_loss_weight</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The weight of the codebook diversity loss component.`,name:"diversity_loss_weight"},{anchor:"transformers.SEWDConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForCTC">SEWDForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.SEWDConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForCTC">SEWDForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.SEWDConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification">Wav2Vec2ForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.SEWDConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"}]}}),_e=new uo({props:{code:`from transformers import SEWDModel, SEWDConfig # Initializing a SEW-D asapp/sew-d-tiny-100k style configuration configuration = SEWDConfig() # Initializing a model from the asapp/sew-d-tiny-100k style configuration model = SEWDModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SEWDModel, SEWDConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SEW-D asapp/sew-d-tiny-100k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SEWDConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the asapp/sew-d-tiny-100k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWDModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),ve=new pt({}),we=new Ue({props:{name:"class transformers.SEWDModel",anchor:"transformers.SEWDModel",parameters:[{name:"config",val:": SEWDConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/modeling_sew_d.py#L1328",parametersDescription:[{anchor:"transformers.SEWDModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig">SEWDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),De=new Ue({props:{name:"forward",anchor:"transformers.SEWDModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/modeling_sew_d.py#L1394",parametersDescription:[{anchor:"transformers.SEWDModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SEWDModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SEWDModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SEWDModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SEWDModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig" >SEWDConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),se=new Ea({props:{$$slots:{default:[Es]},$$scope:{ctx:K}}}),Ce=new uo({props:{code:`from transformers import Wav2Vec2Processor, SEWDModel from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('asapp/sew-d-tiny-100k') model = SEWDModel.from_pretrained('asapp/sew-d-tiny-100k') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, SEWDModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-d-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWDModel.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-d-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ke=new pt({}),Te=new Ue({props:{name:"class transformers.SEWDForCTC",anchor:"transformers.SEWDForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/modeling_sew_d.py#L1456",parametersDescription:[{anchor:"transformers.SEWDForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig">SEWDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Pe=new Ue({props:{name:"forward",anchor:"transformers.SEWDForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/modeling_sew_d.py#L1482",parametersDescription:[{anchor:"transformers.SEWDForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SEWDForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SEWDForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SEWDForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SEWDForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SEWDForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig" >SEWDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ie=new Ea({props:{$$slots:{default:[Ss]},$$scope:{ctx:K}}}),Fe=new uo({props:{code:`from transformers import Wav2Vec2Processor, SEWDForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('asapp/sew-d-tiny-100k') model = SEWDForCTC.from_pretrained('asapp/sew-d-tiny-100k') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, SEWDForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-d-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWDForCTC.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-d-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),Me=new pt({}),ze=new Ue({props:{name:"class transformers.SEWDForSequenceClassification",anchor:"transformers.SEWDForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/modeling_sew_d.py#L1568",parametersDescription:[{anchor:"transformers.SEWDForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig">SEWDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Le=new Ue({props:{name:"forward",anchor:"transformers.SEWDForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew_d/modeling_sew_d.py#L1597",parametersDescription:[{anchor:"transformers.SEWDForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SEWDForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SEWDForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SEWDForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SEWDForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SEWDForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDConfig" >SEWDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),de=new Ea({props:{$$slots:{default:[Ws]},$$scope:{ctx:K}}}),Ie=new uo({props:{code:`from transformers import Wav2Vec2FeatureExtractor, SEWDForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('asapp/sew-d-tiny-100k') model = SEWDForSequenceClassification.from_pretrained('asapp/sew-d-tiny-100k') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, SEWDForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-d-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWDForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-d-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),{c(){m=n("meta"),W=d(),h=n("h1"),_=n("a"),D=n("span"),v(u.$$.fragment),g=d(),$=n("span"),go=r("SEW-D"),Mt=d(),L=n("h2"),Z=n("a"),mt=n("span"),v(ce.$$.fragment),_o=d(),ft=n("span"),vo=r("Overview"),zt=d(),ee=n("p"),wo=r("SEW-D (Squeezed and Efficient Wav2Vec with Disentangled attention) was proposed in "),pe=n("a"),bo=r(`Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition`),yo=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),At=d(),Re=n("p"),Eo=r("The abstract from the paper is the following:"),Vt=d(),Je=n("p"),ht=n("em"),So=r(`This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.`),Nt=d(),Be=n("p"),Wo=r("Tips:"),Ot=d(),te=n("ul"),ut=n("li"),Do=r("SEW-D is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),Co=d(),me=n("li"),ko=r(`SEWDForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Qe=n("a"),To=r("Wav2Vec2CTCTokenizer"),xo=r("."),Kt=d(),oe=n("p"),$o=r("This model was contributed by "),fe=n("a"),qo=r("anton-l"),jo=r("."),Lt=d(),I=n("h2"),ne=n("a"),gt=n("span"),v(he.$$.fragment),Po=d(),_t=n("span"),Fo=r("SEWDConfig"),It=d(),k=n("div"),v(ue.$$.fragment),Mo=d(),H=n("p"),zo=r("This is the configuration class to store the configuration of a "),Ye=n("a"),Ao=r("SEWDModel"),Vo=r(`. It is used to instantiate a SEW-D model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW-D `),ge=n("a"),No=r("asapp/sew-d-tiny-100k"),Oo=r(" architecture."),Ko=d(),U=n("p"),Lo=r("Configuration objects inherit from "),Ge=n("a"),Io=r("PretrainedConfig"),Ho=r(` and can be used to control the model outputs. Read the documentation from `),Xe=n("a"),Uo=r("PretrainedConfig"),Ro=r(" for more information."),Jo=d(),vt=n("p"),Bo=r("Example:"),Qo=d(),v(_e.$$.fragment),Ht=d(),R=n("h2"),ae=n("a"),wt=n("span"),v(ve.$$.fragment),Yo=d(),bt=n("span"),Go=r("SEWDModel"),Ut=d(),T=n("div"),v(we.$$.fragment),Xo=d(),be=n("p"),Zo=r(`The bare SEW-D Model transformer outputting raw hidden-states without any specific head on top. SEW-D was proposed in `),ye=n("a"),en=r("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),tn=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),on=d(),Ee=n("p"),nn=r("This model inherits from "),Ze=n("a"),an=r("PreTrainedModel"),sn=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),rn=d(),Se=n("p"),ln=r("This model is a PyTorch "),We=n("a"),dn=r("torch.nn.Module"),cn=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pn=d(),q=n("div"),v(De.$$.fragment),mn=d(),J=n("p"),fn=r("The "),et=n("a"),hn=r("SEWDModel"),un=r(" forward method, overrides the "),yt=n("code"),gn=r("__call__"),_n=r(" special method."),vn=d(),v(se.$$.fragment),wn=d(),Et=n("p"),bn=r("Example:"),yn=d(),v(Ce.$$.fragment),Rt=d(),B=n("h2"),re=n("a"),St=n("span"),v(ke.$$.fragment),En=d(),Wt=n("span"),Sn=r("SEWDForCTC"),Jt=d(),x=n("div"),v(Te.$$.fragment),Wn=d(),Q=n("p"),Dn=r("SEW-D Model with a "),Dt=n("code"),Cn=r("language modeling"),kn=r(` head on top for Connectionist Temporal Classification (CTC). SEW-D was proposed in `),xe=n("a"),Tn=r("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),xn=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),$n=d(),$e=n("p"),qn=r("This model inherits from "),tt=n("a"),jn=r("PreTrainedModel"),Pn=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Fn=d(),qe=n("p"),Mn=r("This model is a PyTorch "),je=n("a"),zn=r("torch.nn.Module"),An=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vn=d(),j=n("div"),v(Pe.$$.fragment),Nn=d(),Y=n("p"),On=r("The "),ot=n("a"),Kn=r("SEWDForCTC"),Ln=r(" forward method, overrides the "),Ct=n("code"),In=r("__call__"),Hn=r(" special method."),Un=d(),v(ie.$$.fragment),Rn=d(),kt=n("p"),Jn=r("Example:"),Bn=d(),v(Fe.$$.fragment),Bt=d(),G=n("h2"),le=n("a"),Tt=n("span"),v(Me.$$.fragment),Qn=d(),xt=n("span"),Yn=r("SEWDForSequenceClassification"),Qt=d(),C=n("div"),v(ze.$$.fragment),Gn=d(),$t=n("p"),Xn=r(`SEWD Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),Zn=d(),Ae=n("p"),ea=r("SEW-D was proposed in "),Ve=n("a"),ta=r("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),oa=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),na=d(),Ne=n("p"),aa=r("This model inherits from "),nt=n("a"),sa=r("PreTrainedModel"),ra=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),ia=d(),Oe=n("p"),la=r("This model is a PyTorch "),Ke=n("a"),da=r("torch.nn.Module"),ca=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pa=d(),P=n("div"),v(Le.$$.fragment),ma=d(),X=n("p"),fa=r("The "),at=n("a"),ha=r("SEWDForSequenceClassification"),ua=r(" forward method, overrides the "),qt=n("code"),ga=r("__call__"),_a=r(" special method."),va=d(),v(de.$$.fragment),wa=d(),jt=n("p"),ba=r("Example:"),ya=d(),v(Ie.$$.fragment),this.h()},l(t){const p=ys('[data-svelte="svelte-1phssyn"]',document.head);m=a(p,"META",{name:!0,content:!0}),p.forEach(o),W=c(t),h=a(t,"H1",{class:!0});var He=s(h);_=a(He,"A",{id:!0,class:!0,href:!0});var Pt=s(_);D=a(Pt,"SPAN",{});var Ft=s(D);w(u.$$.fragment,Ft),Ft.forEach(o),Pt.forEach(o),g=c(He),$=a(He,"SPAN",{});var Sa=s($);go=i(Sa,"SEW-D"),Sa.forEach(o),He.forEach(o),Mt=c(t),L=a(t,"H2",{class:!0});var Gt=s(L);Z=a(Gt,"A",{id:!0,class:!0,href:!0});var Wa=s(Z);mt=a(Wa,"SPAN",{});var Da=s(mt);w(ce.$$.fragment,Da),Da.forEach(o),Wa.forEach(o),_o=c(Gt),ft=a(Gt,"SPAN",{});var Ca=s(ft);vo=i(Ca,"Overview"),Ca.forEach(o),Gt.forEach(o),zt=c(t),ee=a(t,"P",{});var Xt=s(ee);wo=i(Xt,"SEW-D (Squeezed and Efficient Wav2Vec with Disentangled attention) was proposed in "),pe=a(Xt,"A",{href:!0,rel:!0});var ka=s(pe);bo=i(ka,`Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition`),ka.forEach(o),yo=i(Xt,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),Xt.forEach(o),At=c(t),Re=a(t,"P",{});var Ta=s(Re);Eo=i(Ta,"The abstract from the paper is the following:"),Ta.forEach(o),Vt=c(t),Je=a(t,"P",{});var xa=s(Je);ht=a(xa,"EM",{});var $a=s(ht);So=i($a,`This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.`),$a.forEach(o),xa.forEach(o),Nt=c(t),Be=a(t,"P",{});var qa=s(Be);Wo=i(qa,"Tips:"),qa.forEach(o),Ot=c(t),te=a(t,"UL",{});var Zt=s(te);ut=a(Zt,"LI",{});var ja=s(ut);Do=i(ja,"SEW-D is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ja.forEach(o),Co=c(Zt),me=a(Zt,"LI",{});var eo=s(me);ko=i(eo,`SEWDForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Qe=a(eo,"A",{href:!0});var Pa=s(Qe);To=i(Pa,"Wav2Vec2CTCTokenizer"),Pa.forEach(o),xo=i(eo,"."),eo.forEach(o),Zt.forEach(o),Kt=c(t),oe=a(t,"P",{});var to=s(oe);$o=i(to,"This model was contributed by "),fe=a(to,"A",{href:!0,rel:!0});var Fa=s(fe);qo=i(Fa,"anton-l"),Fa.forEach(o),jo=i(to,"."),to.forEach(o),Lt=c(t),I=a(t,"H2",{class:!0});var oo=s(I);ne=a(oo,"A",{id:!0,class:!0,href:!0});var Ma=s(ne);gt=a(Ma,"SPAN",{});var za=s(gt);w(he.$$.fragment,za),za.forEach(o),Ma.forEach(o),Po=c(oo),_t=a(oo,"SPAN",{});var Aa=s(_t);Fo=i(Aa,"SEWDConfig"),Aa.forEach(o),oo.forEach(o),It=c(t),k=a(t,"DIV",{class:!0});var M=s(k);w(ue.$$.fragment,M),Mo=c(M),H=a(M,"P",{});var st=s(H);zo=i(st,"This is the configuration class to store the configuration of a "),Ye=a(st,"A",{href:!0});var Va=s(Ye);Ao=i(Va,"SEWDModel"),Va.forEach(o),Vo=i(st,`. It is used to instantiate a SEW-D model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW-D `),ge=a(st,"A",{href:!0,rel:!0});var Na=s(ge);No=i(Na,"asapp/sew-d-tiny-100k"),Na.forEach(o),Oo=i(st," architecture."),st.forEach(o),Ko=c(M),U=a(M,"P",{});var rt=s(U);Lo=i(rt,"Configuration objects inherit from "),Ge=a(rt,"A",{href:!0});var Oa=s(Ge);Io=i(Oa,"PretrainedConfig"),Oa.forEach(o),Ho=i(rt,` and can be used to control the model outputs. Read the documentation from `),Xe=a(rt,"A",{href:!0});var Ka=s(Xe);Uo=i(Ka,"PretrainedConfig"),Ka.forEach(o),Ro=i(rt," for more information."),rt.forEach(o),Jo=c(M),vt=a(M,"P",{});var La=s(vt);Bo=i(La,"Example:"),La.forEach(o),Qo=c(M),w(_e.$$.fragment,M),M.forEach(o),Ht=c(t),R=a(t,"H2",{class:!0});var no=s(R);ae=a(no,"A",{id:!0,class:!0,href:!0});var Ia=s(ae);wt=a(Ia,"SPAN",{});var Ha=s(wt);w(ve.$$.fragment,Ha),Ha.forEach(o),Ia.forEach(o),Yo=c(no),bt=a(no,"SPAN",{});var Ua=s(bt);Go=i(Ua,"SEWDModel"),Ua.forEach(o),no.forEach(o),Ut=c(t),T=a(t,"DIV",{class:!0});var z=s(T);w(we.$$.fragment,z),Xo=c(z),be=a(z,"P",{});var ao=s(be);Zo=i(ao,`The bare SEW-D Model transformer outputting raw hidden-states without any specific head on top. SEW-D was proposed in `),ye=a(ao,"A",{href:!0,rel:!0});var Ra=s(ye);en=i(Ra,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),Ra.forEach(o),tn=i(ao,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),ao.forEach(o),on=c(z),Ee=a(z,"P",{});var so=s(Ee);nn=i(so,"This model inherits from "),Ze=a(so,"A",{href:!0});var Ja=s(Ze);an=i(Ja,"PreTrainedModel"),Ja.forEach(o),sn=i(so,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),so.forEach(o),rn=c(z),Se=a(z,"P",{});var ro=s(Se);ln=i(ro,"This model is a PyTorch "),We=a(ro,"A",{href:!0,rel:!0});var Ba=s(We);dn=i(Ba,"torch.nn.Module"),Ba.forEach(o),cn=i(ro,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ro.forEach(o),pn=c(z),q=a(z,"DIV",{class:!0});var A=s(q);w(De.$$.fragment,A),mn=c(A),J=a(A,"P",{});var it=s(J);fn=i(it,"The "),et=a(it,"A",{href:!0});var Qa=s(et);hn=i(Qa,"SEWDModel"),Qa.forEach(o),un=i(it," forward method, overrides the "),yt=a(it,"CODE",{});var Ya=s(yt);gn=i(Ya,"__call__"),Ya.forEach(o),_n=i(it," special method."),it.forEach(o),vn=c(A),w(se.$$.fragment,A),wn=c(A),Et=a(A,"P",{});var Ga=s(Et);bn=i(Ga,"Example:"),Ga.forEach(o),yn=c(A),w(Ce.$$.fragment,A),A.forEach(o),z.forEach(o),Rt=c(t),B=a(t,"H2",{class:!0});var io=s(B);re=a(io,"A",{id:!0,class:!0,href:!0});var Xa=s(re);St=a(Xa,"SPAN",{});var Za=s(St);w(ke.$$.fragment,Za),Za.forEach(o),Xa.forEach(o),En=c(io),Wt=a(io,"SPAN",{});var es=s(Wt);Sn=i(es,"SEWDForCTC"),es.forEach(o),io.forEach(o),Jt=c(t),x=a(t,"DIV",{class:!0});var V=s(x);w(Te.$$.fragment,V),Wn=c(V),Q=a(V,"P",{});var lt=s(Q);Dn=i(lt,"SEW-D Model with a "),Dt=a(lt,"CODE",{});var ts=s(Dt);Cn=i(ts,"language modeling"),ts.forEach(o),kn=i(lt,` head on top for Connectionist Temporal Classification (CTC). SEW-D was proposed in `),xe=a(lt,"A",{href:!0,rel:!0});var os=s(xe);Tn=i(os,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),os.forEach(o),xn=i(lt,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),lt.forEach(o),$n=c(V),$e=a(V,"P",{});var lo=s($e);qn=i(lo,"This model inherits from "),tt=a(lo,"A",{href:!0});var ns=s(tt);jn=i(ns,"PreTrainedModel"),ns.forEach(o),Pn=i(lo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),lo.forEach(o),Fn=c(V),qe=a(V,"P",{});var co=s(qe);Mn=i(co,"This model is a PyTorch "),je=a(co,"A",{href:!0,rel:!0});var as=s(je);zn=i(as,"torch.nn.Module"),as.forEach(o),An=i(co,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),co.forEach(o),Vn=c(V),j=a(V,"DIV",{class:!0});var N=s(j);w(Pe.$$.fragment,N),Nn=c(N),Y=a(N,"P",{});var dt=s(Y);On=i(dt,"The "),ot=a(dt,"A",{href:!0});var ss=s(ot);Kn=i(ss,"SEWDForCTC"),ss.forEach(o),Ln=i(dt," forward method, overrides the "),Ct=a(dt,"CODE",{});var rs=s(Ct);In=i(rs,"__call__"),rs.forEach(o),Hn=i(dt," special method."),dt.forEach(o),Un=c(N),w(ie.$$.fragment,N),Rn=c(N),kt=a(N,"P",{});var is=s(kt);Jn=i(is,"Example:"),is.forEach(o),Bn=c(N),w(Fe.$$.fragment,N),N.forEach(o),V.forEach(o),Bt=c(t),G=a(t,"H2",{class:!0});var po=s(G);le=a(po,"A",{id:!0,class:!0,href:!0});var ls=s(le);Tt=a(ls,"SPAN",{});var ds=s(Tt);w(Me.$$.fragment,ds),ds.forEach(o),ls.forEach(o),Qn=c(po),xt=a(po,"SPAN",{});var cs=s(xt);Yn=i(cs,"SEWDForSequenceClassification"),cs.forEach(o),po.forEach(o),Qt=c(t),C=a(t,"DIV",{class:!0});var F=s(C);w(ze.$$.fragment,F),Gn=c(F),$t=a(F,"P",{});var ps=s($t);Xn=i(ps,`SEWD Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),ps.forEach(o),Zn=c(F),Ae=a(F,"P",{});var mo=s(Ae);ea=i(mo,"SEW-D was proposed in "),Ve=a(mo,"A",{href:!0,rel:!0});var ms=s(Ve);ta=i(ms,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),ms.forEach(o),oa=i(mo,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),mo.forEach(o),na=c(F),Ne=a(F,"P",{});var fo=s(Ne);aa=i(fo,"This model inherits from "),nt=a(fo,"A",{href:!0});var fs=s(nt);sa=i(fs,"PreTrainedModel"),fs.forEach(o),ra=i(fo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),fo.forEach(o),ia=c(F),Oe=a(F,"P",{});var ho=s(Oe);la=i(ho,"This model is a PyTorch "),Ke=a(ho,"A",{href:!0,rel:!0});var hs=s(Ke);da=i(hs,"torch.nn.Module"),hs.forEach(o),ca=i(ho,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ho.forEach(o),pa=c(F),P=a(F,"DIV",{class:!0});var O=s(P);w(Le.$$.fragment,O),ma=c(O),X=a(O,"P",{});var ct=s(X);fa=i(ct,"The "),at=a(ct,"A",{href:!0});var us=s(at);ha=i(us,"SEWDForSequenceClassification"),us.forEach(o),ua=i(ct," forward method, overrides the "),qt=a(ct,"CODE",{});var gs=s(qt);ga=i(gs,"__call__"),gs.forEach(o),_a=i(ct," special method."),ct.forEach(o),va=c(O),w(de.$$.fragment,O),wa=c(O),jt=a(O,"P",{});var _s=s(jt);ba=i(_s,"Example:"),_s.forEach(o),ya=c(O),w(Ie.$$.fragment,O),O.forEach(o),F.forEach(o),this.h()},h(){l(m,"name","hf:doc:metadata"),l(m,"content",JSON.stringify(Cs)),l(_,"id","sewd"),l(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_,"href","#sewd"),l(h,"class","relative group"),l(Z,"id","overview"),l(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Z,"href","#overview"),l(L,"class","relative group"),l(pe,"href","https://arxiv.org/abs/2109.06870"),l(pe,"rel","nofollow"),l(Qe,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),l(fe,"href","https://huggingface.co/anton-l"),l(fe,"rel","nofollow"),l(ne,"id","transformers.SEWDConfig"),l(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ne,"href","#transformers.SEWDConfig"),l(I,"class","relative group"),l(Ye,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDModel"),l(ge,"href","https://huggingface.co/asapp/sew-d-tiny-100k"),l(ge,"rel","nofollow"),l(Ge,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Xe,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(k,"class","docstring"),l(ae,"id","transformers.SEWDModel"),l(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ae,"href","#transformers.SEWDModel"),l(R,"class","relative group"),l(ye,"href","https://arxiv.org/abs/2109.06870"),l(ye,"rel","nofollow"),l(Ze,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(We,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(We,"rel","nofollow"),l(et,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDModel"),l(q,"class","docstring"),l(T,"class","docstring"),l(re,"id","transformers.SEWDForCTC"),l(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(re,"href","#transformers.SEWDForCTC"),l(B,"class","relative group"),l(xe,"href","https://arxiv.org/abs/2109.06870"),l(xe,"rel","nofollow"),l(tt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(je,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(je,"rel","nofollow"),l(ot,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForCTC"),l(j,"class","docstring"),l(x,"class","docstring"),l(le,"id","transformers.SEWDForSequenceClassification"),l(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(le,"href","#transformers.SEWDForSequenceClassification"),l(G,"class","relative group"),l(Ve,"href","https://arxiv.org/abs/2109.06870"),l(Ve,"rel","nofollow"),l(nt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ke,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ke,"rel","nofollow"),l(at,"href","/docs/transformers/v4.15.0/en/model_doc/sew_d#transformers.SEWDForSequenceClassification"),l(P,"class","docstring"),l(C,"class","docstring")},m(t,p){e(document.head,m),f(t,W,p),f(t,h,p),e(h,_),e(_,D),b(u,D,null),e(h,g),e(h,$),e($,go),f(t,Mt,p),f(t,L,p),e(L,Z),e(Z,mt),b(ce,mt,null),e(L,_o),e(L,ft),e(ft,vo),f(t,zt,p),f(t,ee,p),e(ee,wo),e(ee,pe),e(pe,bo),e(ee,yo),f(t,At,p),f(t,Re,p),e(Re,Eo),f(t,Vt,p),f(t,Je,p),e(Je,ht),e(ht,So),f(t,Nt,p),f(t,Be,p),e(Be,Wo),f(t,Ot,p),f(t,te,p),e(te,ut),e(ut,Do),e(te,Co),e(te,me),e(me,ko),e(me,Qe),e(Qe,To),e(me,xo),f(t,Kt,p),f(t,oe,p),e(oe,$o),e(oe,fe),e(fe,qo),e(oe,jo),f(t,Lt,p),f(t,I,p),e(I,ne),e(ne,gt),b(he,gt,null),e(I,Po),e(I,_t),e(_t,Fo),f(t,It,p),f(t,k,p),b(ue,k,null),e(k,Mo),e(k,H),e(H,zo),e(H,Ye),e(Ye,Ao),e(H,Vo),e(H,ge),e(ge,No),e(H,Oo),e(k,Ko),e(k,U),e(U,Lo),e(U,Ge),e(Ge,Io),e(U,Ho),e(U,Xe),e(Xe,Uo),e(U,Ro),e(k,Jo),e(k,vt),e(vt,Bo),e(k,Qo),b(_e,k,null),f(t,Ht,p),f(t,R,p),e(R,ae),e(ae,wt),b(ve,wt,null),e(R,Yo),e(R,bt),e(bt,Go),f(t,Ut,p),f(t,T,p),b(we,T,null),e(T,Xo),e(T,be),e(be,Zo),e(be,ye),e(ye,en),e(be,tn),e(T,on),e(T,Ee),e(Ee,nn),e(Ee,Ze),e(Ze,an),e(Ee,sn),e(T,rn),e(T,Se),e(Se,ln),e(Se,We),e(We,dn),e(Se,cn),e(T,pn),e(T,q),b(De,q,null),e(q,mn),e(q,J),e(J,fn),e(J,et),e(et,hn),e(J,un),e(J,yt),e(yt,gn),e(J,_n),e(q,vn),b(se,q,null),e(q,wn),e(q,Et),e(Et,bn),e(q,yn),b(Ce,q,null),f(t,Rt,p),f(t,B,p),e(B,re),e(re,St),b(ke,St,null),e(B,En),e(B,Wt),e(Wt,Sn),f(t,Jt,p),f(t,x,p),b(Te,x,null),e(x,Wn),e(x,Q),e(Q,Dn),e(Q,Dt),e(Dt,Cn),e(Q,kn),e(Q,xe),e(xe,Tn),e(Q,xn),e(x,$n),e(x,$e),e($e,qn),e($e,tt),e(tt,jn),e($e,Pn),e(x,Fn),e(x,qe),e(qe,Mn),e(qe,je),e(je,zn),e(qe,An),e(x,Vn),e(x,j),b(Pe,j,null),e(j,Nn),e(j,Y),e(Y,On),e(Y,ot),e(ot,Kn),e(Y,Ln),e(Y,Ct),e(Ct,In),e(Y,Hn),e(j,Un),b(ie,j,null),e(j,Rn),e(j,kt),e(kt,Jn),e(j,Bn),b(Fe,j,null),f(t,Bt,p),f(t,G,p),e(G,le),e(le,Tt),b(Me,Tt,null),e(G,Qn),e(G,xt),e(xt,Yn),f(t,Qt,p),f(t,C,p),b(ze,C,null),e(C,Gn),e(C,$t),e($t,Xn),e(C,Zn),e(C,Ae),e(Ae,ea),e(Ae,Ve),e(Ve,ta),e(Ae,oa),e(C,na),e(C,Ne),e(Ne,aa),e(Ne,nt),e(nt,sa),e(Ne,ra),e(C,ia),e(C,Oe),e(Oe,la),e(Oe,Ke),e(Ke,da),e(Oe,ca),e(C,pa),e(C,P),b(Le,P,null),e(P,ma),e(P,X),e(X,fa),e(X,at),e(at,ha),e(X,ua),e(X,qt),e(qt,ga),e(X,_a),e(P,va),b(de,P,null),e(P,wa),e(P,jt),e(jt,ba),e(P,ya),b(Ie,P,null),Yt=!0},p(t,[p]){const He={};p&2&&(He.$$scope={dirty:p,ctx:t}),se.$set(He);const Pt={};p&2&&(Pt.$$scope={dirty:p,ctx:t}),ie.$set(Pt);const Ft={};p&2&&(Ft.$$scope={dirty:p,ctx:t}),de.$set(Ft)},i(t){Yt||(y(u.$$.fragment,t),y(ce.$$.fragment,t),y(he.$$.fragment,t),y(ue.$$.fragment,t),y(_e.$$.fragment,t),y(ve.$$.fragment,t),y(we.$$.fragment,t),y(De.$$.fragment,t),y(se.$$.fragment,t),y(Ce.$$.fragment,t),y(ke.$$.fragment,t),y(Te.$$.fragment,t),y(Pe.$$.fragment,t),y(ie.$$.fragment,t),y(Fe.$$.fragment,t),y(Me.$$.fragment,t),y(ze.$$.fragment,t),y(Le.$$.fragment,t),y(de.$$.fragment,t),y(Ie.$$.fragment,t),Yt=!0)},o(t){E(u.$$.fragment,t),E(ce.$$.fragment,t),E(he.$$.fragment,t),E(ue.$$.fragment,t),E(_e.$$.fragment,t),E(ve.$$.fragment,t),E(we.$$.fragment,t),E(De.$$.fragment,t),E(se.$$.fragment,t),E(Ce.$$.fragment,t),E(ke.$$.fragment,t),E(Te.$$.fragment,t),E(Pe.$$.fragment,t),E(ie.$$.fragment,t),E(Fe.$$.fragment,t),E(Me.$$.fragment,t),E(ze.$$.fragment,t),E(Le.$$.fragment,t),E(de.$$.fragment,t),E(Ie.$$.fragment,t),Yt=!1},d(t){o(m),t&&o(W),t&&o(h),S(u),t&&o(Mt),t&&o(L),S(ce),t&&o(zt),t&&o(ee),t&&o(At),t&&o(Re),t&&o(Vt),t&&o(Je),t&&o(Nt),t&&o(Be),t&&o(Ot),t&&o(te),t&&o(Kt),t&&o(oe),t&&o(Lt),t&&o(I),S(he),t&&o(It),t&&o(k),S(ue),S(_e),t&&o(Ht),t&&o(R),S(ve),t&&o(Ut),t&&o(T),S(we),S(De),S(se),S(Ce),t&&o(Rt),t&&o(B),S(ke),t&&o(Jt),t&&o(x),S(Te),S(Pe),S(ie),S(Fe),t&&o(Bt),t&&o(G),S(Me),t&&o(Qt),t&&o(C),S(ze),S(Le),S(de),S(Ie)}}}const Cs={local:"sewd",sections:[{local:"overview",title:"Overview"},{local:"transformers.SEWDConfig",title:"SEWDConfig"},{local:"transformers.SEWDModel",title:"SEWDModel"},{local:"transformers.SEWDForCTC",title:"SEWDForCTC"},{local:"transformers.SEWDForSequenceClassification",title:"SEWDForSequenceClassification"}],title:"SEW-D"};function ks(K,m,W){let{fw:h}=m;return K.$$set=_=>{"fw"in _&&W(0,h=_.fw)},[h]}class Fs extends vs{constructor(m){super();ws(this,m,ks,Ds,bs,{fw:0})}}export{Fs as default,Cs as metadata};
9,912
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/squeezebert.mdx-b51eb0a9.js
import{S as Ef,i as Mf,s as Ff,e as r,k as l,w as f,t as s,L as jf,c as a,d as n,m as d,a as i,x as m,h as o,b as c,J as e,g as p,y as g,q as _,o as z,B as b}from"../../chunks/vendor-b1433968.js";import{T as Ss}from"../../chunks/Tip-c3840994.js";import{D as W}from"../../chunks/Docstring-ff504c58.js";import{C as D}from"../../chunks/CodeBlock-a320dbd7.js";import{I as te}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Cf(H){let h,T,q,w,y;return{c(){h=r("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),w=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=a(k,"P",{});var v=i(h);T=o(v,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(v,"CODE",{});var j=i(q);w=o(j,"Module"),j.forEach(n),y=o(v,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),v.forEach(n)},m(k,v){p(k,h,v),e(h,T),e(h,q),e(q,w),e(h,y)},d(k){k&&n(h)}}}function Pf(H){let h,T,q,w,y;return{c(){h=r("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),w=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=a(k,"P",{});var v=i(h);T=o(v,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(v,"CODE",{});var j=i(q);w=o(j,"Module"),j.forEach(n),y=o(v,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),v.forEach(n)},m(k,v){p(k,h,v),e(h,T),e(h,q),e(q,w),e(h,y)},d(k){k&&n(h)}}}function xf(H){let h,T,q,w,y;return{c(){h=r("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),w=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=a(k,"P",{});var v=i(h);T=o(v,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(v,"CODE",{});var j=i(q);w=o(j,"Module"),j.forEach(n),y=o(v,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),v.forEach(n)},m(k,v){p(k,h,v),e(h,T),e(h,q),e(q,w),e(h,y)},d(k){k&&n(h)}}}function Af(H){let h,T,q,w,y;return{c(){h=r("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),w=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=a(k,"P",{});var v=i(h);T=o(v,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(v,"CODE",{});var j=i(q);w=o(j,"Module"),j.forEach(n),y=o(v,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),v.forEach(n)},m(k,v){p(k,h,v),e(h,T),e(h,q),e(q,w),e(h,y)},d(k){k&&n(h)}}}function Lf(H){let h,T,q,w,y;return{c(){h=r("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),w=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=a(k,"P",{});var v=i(h);T=o(v,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(v,"CODE",{});var j=i(q);w=o(j,"Module"),j.forEach(n),y=o(v,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),v.forEach(n)},m(k,v){p(k,h,v),e(h,T),e(h,q),e(q,w),e(h,y)},d(k){k&&n(h)}}}function Nf(H){let h,T,q,w,y;return{c(){h=r("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),w=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=a(k,"P",{});var v=i(h);T=o(v,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(v,"CODE",{});var j=i(q);w=o(j,"Module"),j.forEach(n),y=o(v,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),v.forEach(n)},m(k,v){p(k,h,v),e(h,T),e(h,q),e(q,w),e(h,y)},d(k){k&&n(h)}}}function If(H){let h,T,q,w,y,k,v,j,ma,er,ce,Ee,Bs,Xe,ga,$s,_a,tr,ne,za,Ye,ba,qa,Ze,va,ka,nr,Wn,wa,sr,On,Es,Ta,or,Kn,ya,rr,se,Ms,Sa,Ba,Fs,$a,Ea,et,Ma,js,Fa,ja,ar,Me,Ca,tt,Pa,xa,ir,ue,Fe,Cs,nt,Aa,Ps,La,lr,O,st,Na,ot,Ia,Hn,Da,Ra,Wa,he,Oa,Qn,Ka,Ha,Vn,Qa,Va,Ua,xs,Ga,Ja,rt,Xa,As,Ya,dr,pe,je,Ls,at,Za,Ns,ei,cr,I,it,ti,Is,ni,si,Ce,Un,oi,ri,Gn,ai,ii,li,lt,di,Jn,ci,ui,hi,oe,dt,pi,Ds,fi,mi,ct,Xn,gi,Rs,_i,zi,Yn,bi,Ws,qi,vi,Pe,ut,ki,ht,wi,Os,Ti,yi,Si,Z,pt,Bi,Ks,$i,Ei,ft,Mi,fe,Fi,Hs,ji,Ci,Qs,Pi,xi,Ai,Vs,ur,me,xe,Us,mt,Li,Gs,Ni,hr,Y,gt,Ii,_t,Di,Js,Ri,Wi,Oi,Ae,Zn,Ki,Hi,es,Qi,Vi,Ui,zt,Gi,ts,Ji,Xi,pr,ge,Le,Xs,bt,Yi,Ys,Zi,fr,S,qt,el,Zs,tl,nl,vt,sl,kt,ol,rl,al,wt,il,ns,ll,dl,cl,Tt,ul,yt,hl,pl,fl,St,ml,eo,gl,_l,zl,to,bl,ql,Bt,vl,no,kl,wl,$t,Tl,Q,Et,yl,_e,Sl,ss,Bl,$l,so,El,Ml,Fl,Ne,jl,oo,Cl,Pl,Mt,mr,ze,Ie,ro,Ft,xl,ao,Al,gr,B,jt,Ll,Ct,Nl,io,Il,Dl,Rl,Pt,Wl,xt,Ol,Kl,Hl,At,Ql,os,Vl,Ul,Gl,Lt,Jl,Nt,Xl,Yl,Zl,It,ed,lo,td,nd,sd,co,od,rd,Dt,ad,uo,id,ld,Rt,dd,V,Wt,cd,be,ud,rs,hd,pd,ho,fd,md,gd,De,_d,po,zd,bd,Ot,_r,qe,Re,fo,Kt,qd,mo,vd,zr,$,Ht,kd,go,wd,Td,Qt,yd,Vt,Sd,Bd,$d,Ut,Ed,as,Md,Fd,jd,Gt,Cd,Jt,Pd,xd,Ad,Xt,Ld,_o,Nd,Id,Dd,zo,Rd,Wd,Yt,Od,bo,Kd,Hd,Zt,Qd,R,en,Vd,ve,Ud,is,Gd,Jd,qo,Xd,Yd,Zd,We,ec,vo,tc,nc,tn,sc,ko,oc,rc,nn,br,ke,Oe,wo,sn,ac,To,ic,qr,E,on,lc,yo,dc,cc,rn,uc,an,hc,pc,fc,ln,mc,ls,gc,_c,zc,dn,bc,cn,qc,vc,kc,un,wc,So,Tc,yc,Sc,Bo,Bc,$c,hn,Ec,$o,Mc,Fc,pn,jc,U,fn,Cc,we,Pc,ds,xc,Ac,Eo,Lc,Nc,Ic,Ke,Dc,Mo,Rc,Wc,mn,vr,Te,He,Fo,gn,Oc,jo,Kc,kr,M,_n,Hc,Co,Qc,Vc,zn,Uc,bn,Gc,Jc,Xc,qn,Yc,cs,Zc,eu,tu,vn,nu,kn,su,ou,ru,wn,au,Po,iu,lu,du,xo,cu,uu,Tn,hu,Ao,pu,fu,yn,mu,G,Sn,gu,ye,_u,us,zu,bu,Lo,qu,vu,ku,Qe,wu,No,Tu,yu,Bn,wr,Se,Ve,Io,$n,Su,Do,Bu,Tr,F,En,$u,Be,Eu,Ro,Mu,Fu,Wo,ju,Cu,Pu,Mn,xu,Fn,Au,Lu,Nu,jn,Iu,hs,Du,Ru,Wu,Cn,Ou,Pn,Ku,Hu,Qu,xn,Vu,Oo,Uu,Gu,Ju,Ko,Xu,Yu,An,Zu,Ho,eh,th,Ln,nh,J,Nn,sh,$e,oh,ps,rh,ah,Qo,ih,lh,dh,Ue,ch,Vo,uh,hh,In,yr;return k=new te({}),Xe=new te({}),nt=new te({}),st=new W({props:{name:"class transformers.SqueezeBertConfig",anchor:"transformers.SqueezeBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"embedding_size",val:" = 768"},{name:"q_groups",val:" = 4"},{name:"k_groups",val:" = 4"},{name:"v_groups",val:" = 4"},{name:"post_attention_groups",val:" = 1"},{name:"intermediate_groups",val:" = 4"},{name:"output_groups",val:" = 4"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/configuration_squeezebert.py#L30",parametersDescription:[{anchor:"transformers.SqueezeBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertModel">SqueezeBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.SqueezeBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.SqueezeBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.SqueezeBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SqueezeBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.SqueezeBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SqueezeBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.SqueezeBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.SqueezeBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.SqueezeBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel">BertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel">TFBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.SqueezeBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SqueezeBertConfig.layer_norm_eps",description:"<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014;",name:"layer_norm_eps"},{anchor:"transformers.SqueezeBertConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The ID of the token in the word embedding to use as padding.`,name:"pad_token_id"},{anchor:"transformers.SqueezeBertConfig.embedding_size",description:`<strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; The dimension of the word embedding vectors.`,name:"embedding_size"},{anchor:"transformers.SqueezeBertConfig.q_groups",description:`<strong>q_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in Q layer.`,name:"q_groups"},{anchor:"transformers.SqueezeBertConfig.k_groups",description:`<strong>k_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in K layer.`,name:"k_groups"},{anchor:"transformers.SqueezeBertConfig.v_groups",description:`<strong>v_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in V layer.`,name:"v_groups"},{anchor:"transformers.SqueezeBertConfig.post_attention_groups",description:`<strong>post_attention_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of groups in the first feed forward network layer.`,name:"post_attention_groups"},{anchor:"transformers.SqueezeBertConfig.intermediate_groups",description:`<strong>intermediate_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in the second feed forward network layer.`,name:"intermediate_groups"},{anchor:"transformers.SqueezeBertConfig.output_groups",description:`<strong>output_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in the third feed forward network layer.`,name:"output_groups"}]}}),rt=new D({props:{code:`from transformers import SqueezeBertModel, SqueezeBertConfig # Initializing a SqueezeBERT configuration configuration = SqueezeBertConfig() # Initializing a model from the configuration above model = SqueezeBertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertModel, SqueezeBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SqueezeBERT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SqueezeBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration above</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),at=new te({}),it=new W({props:{name:"class transformers.SqueezeBertTokenizer",anchor:"transformers.SqueezeBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/tokenization_squeezebert.py#L47"}}),dt=new W({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L247",parametersDescription:[{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ut=new W({props:{name:"get_special_tokens_mask",anchor:"transformers.BertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L272",parametersDescription:[{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),pt=new W({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L300",parametersDescription:[{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),ft=new D({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),mt=new te({}),gt=new W({props:{name:"class transformers.SqueezeBertTokenizerFast",anchor:"transformers.SqueezeBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py#L53"}}),bt=new te({}),qt=new W({props:{name:"class transformers.SqueezeBertModel",anchor:"transformers.SqueezeBertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L549",parametersDescription:[{anchor:"transformers.SqueezeBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bt=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm,`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-title class_">hierarchy</span>: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`}}),$t=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.,`,highlighted:`Input data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-built_in">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-literal">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. The final output of the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>.`}}),Et=new W({props:{name:"forward",anchor:"transformers.SqueezeBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L574",parametersDescription:[{anchor:"transformers.SqueezeBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new Ss({props:{$$slots:{default:[Cf]},$$scope:{ctx:H}}}),Mt=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertModel import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertModel.from_pretrained('squeezebert/squeezebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertModel.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ft=new te({}),jt=new W({props:{name:"class transformers.SqueezeBertForMaskedLM",anchor:"transformers.SqueezeBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L649",parametersDescription:[{anchor:"transformers.SqueezeBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Dt=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm,`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-title class_">hierarchy</span>: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`}}),Rt=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.,`,highlighted:`Input data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-built_in">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-literal">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. The final output of the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>.`}}),Wt=new W({props:{name:"forward",anchor:"transformers.SqueezeBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L668",parametersDescription:[{anchor:"transformers.SqueezeBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),De=new Ss({props:{$$slots:{default:[Pf]},$$scope:{ctx:H}}}),Ot=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForMaskedLM import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertForMaskedLM.from_pretrained('squeezebert/squeezebert-uncased') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Kt=new te({}),Ht=new W({props:{name:"class transformers.SqueezeBertForSequenceClassification",anchor:"transformers.SqueezeBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L734",parametersDescription:[{anchor:"transformers.SqueezeBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Yt=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm,`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-title class_">hierarchy</span>: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`}}),Zt=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.,`,highlighted:`Input data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-built_in">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-literal">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. The final output of the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>.`}}),en=new W({props:{name:"forward",anchor:"transformers.SqueezeBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L747",parametersDescription:[{anchor:"transformers.SqueezeBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),We=new Ss({props:{$$slots:{default:[xf]},$$scope:{ctx:H}}}),tn=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForSequenceClassification import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),nn=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForSequenceClassification import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-uncased', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sn=new te({}),on=new W({props:{name:"class transformers.SqueezeBertForMultipleChoice",anchor:"transformers.SqueezeBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L833",parametersDescription:[{anchor:"transformers.SqueezeBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),hn=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm,`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-title class_">hierarchy</span>: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`}}),pn=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.,`,highlighted:`Input data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-built_in">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-literal">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. The final output of the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>.`}}),fn=new W({props:{name:"forward",anchor:"transformers.SqueezeBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L844",parametersDescription:[{anchor:"transformers.SqueezeBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <em>num_choices</em> is the size of the second dimension of the input tensors. (see <em>input_ids</em> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ke=new Ss({props:{$$slots:{default:[Af]},$$scope:{ctx:H}}}),mn=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForMultipleChoice import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertForMultipleChoice.from_pretrained('squeezebert/squeezebert-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),gn=new te({}),_n=new W({props:{name:"class transformers.SqueezeBertForTokenClassification",anchor:"transformers.SqueezeBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L926",parametersDescription:[{anchor:"transformers.SqueezeBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tn=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm,`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-title class_">hierarchy</span>: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`}}),yn=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.,`,highlighted:`Input data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-built_in">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-literal">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. The final output of the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>.`}}),Sn=new W({props:{name:"forward",anchor:"transformers.SqueezeBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L938",parametersDescription:[{anchor:"transformers.SqueezeBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qe=new Ss({props:{$$slots:{default:[Lf]},$$scope:{ctx:H}}}),Bn=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForTokenClassification import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertForTokenClassification.from_pretrained('squeezebert/squeezebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$n=new te({}),En=new W({props:{name:"class transformers.SqueezeBertForQuestionAnswering",anchor:"transformers.SqueezeBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L1014",parametersDescription:[{anchor:"transformers.SqueezeBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),An=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm,`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-title class_">hierarchy</span>: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`}}),Ln=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.,`,highlighted:`Input data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-built_in">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-literal">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>. The final output of the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-built_in">format</span>.`}}),Nn=new W({props:{name:"forward",anchor:"transformers.SqueezeBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/squeezebert/modeling_squeezebert.py#L1025",parametersDescription:[{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ue=new Ss({props:{$$slots:{default:[Nf]},$$scope:{ctx:H}}}),In=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForQuestionAnswering import torch tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-uncased') model = SqueezeBertForQuestionAnswering.from_pretrained('squeezebert/squeezebert-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;squeezebert/squeezebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){h=r("meta"),T=l(),q=r("h1"),w=r("a"),y=r("span"),f(k.$$.fragment),v=l(),j=r("span"),ma=s("SqueezeBERT"),er=l(),ce=r("h2"),Ee=r("a"),Bs=r("span"),f(Xe.$$.fragment),ga=l(),$s=r("span"),_a=s("Overview"),tr=l(),ne=r("p"),za=s("The SqueezeBERT model was proposed in "),Ye=r("a"),ba=s("SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),qa=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, Kurt W. Keutzer. It\u2019s a bidirectional transformer similar to the BERT model. The key difference between the BERT architecture and the SqueezeBERT architecture is that SqueezeBERT uses `),Ze=r("a"),va=s("grouped convolutions"),ka=s(` instead of fully-connected layers for the Q, K, V and FFN layers.`),nr=l(),Wn=r("p"),wa=s("The abstract from the paper is the following:"),sr=l(),On=r("p"),Es=r("em"),Ta=s(`Humans read and write hundreds of billions of messages every day. Further, due to the availability of large datasets, large computing systems, and better neural network models, natural language processing (NLP) technology has made significant strides in understanding, proofreading, and organizing these messages. Thus, there is a significant opportunity to deploy NLP in myriad applications to help web users, social networks, and businesses. In particular, we consider smartphones and other mobile devices as crucial platforms for deploying NLP models at scale. However, today\u2019s highly-accurate NLP neural network models such as BERT and RoBERTa are extremely computationally expensive, with BERT-base taking 1.7 seconds to classify a text snippet on a Pixel 3 smartphone. In this work, we observe that methods such as grouped convolutions have yielded significant speedups for computer vision networks, but many of these techniques have not been adopted by NLP neural network designers. We demonstrate how to replace several operations in self-attention layers with grouped convolutions, and we use this technique in a novel network architecture called SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. The SqueezeBERT code will be released.`),or=l(),Kn=r("p"),ya=s("Tips:"),rr=l(),se=r("ul"),Ms=r("li"),Sa=s(`SqueezeBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),Ba=l(),Fs=r("li"),$a=s(`SqueezeBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.`),Ea=l(),et=r("li"),Ma=s(`For best results when finetuning on sequence classification tasks, it is recommended to start with the `),js=r("em"),Fa=s("squeezebert/squeezebert-mnli-headless"),ja=s(" checkpoint."),ar=l(),Me=r("p"),Ca=s("This model was contributed by "),tt=r("a"),Pa=s("forresti"),xa=s("."),ir=l(),ue=r("h2"),Fe=r("a"),Cs=r("span"),f(nt.$$.fragment),Aa=l(),Ps=r("span"),La=s("SqueezeBertConfig"),lr=l(),O=r("div"),f(st.$$.fragment),Na=l(),ot=r("p"),Ia=s("This is the configuration class to store the configuration of a "),Hn=r("a"),Da=s("SqueezeBertModel"),Ra=s(`. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture.`),Wa=l(),he=r("p"),Oa=s("Configuration objects inherit from "),Qn=r("a"),Ka=s("PretrainedConfig"),Ha=s(` and can be used to control the model outputs. Read the documentation from `),Vn=r("a"),Qa=s("PretrainedConfig"),Va=s(" for more information."),Ua=l(),xs=r("p"),Ga=s("Examples:"),Ja=l(),f(rt.$$.fragment),Xa=l(),As=r("p"),Ya=s(`Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints.`),dr=l(),pe=r("h2"),je=r("a"),Ls=r("span"),f(at.$$.fragment),Za=l(),Ns=r("span"),ei=s("SqueezeBertTokenizer"),cr=l(),I=r("div"),f(it.$$.fragment),ti=l(),Is=r("p"),ni=s("Constructs a SqueezeBert tokenizer."),si=l(),Ce=r("p"),Un=r("a"),oi=s("SqueezeBertTokenizer"),ri=s(" is identical to "),Gn=r("a"),ai=s("BertTokenizer"),ii=s(` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),li=l(),lt=r("p"),di=s("Refer to superclass "),Jn=r("a"),ci=s("BertTokenizer"),ui=s(` for usage examples and documentation concerning parameters.`),hi=l(),oe=r("div"),f(dt.$$.fragment),pi=l(),Ds=r("p"),fi=s(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),mi=l(),ct=r("ul"),Xn=r("li"),gi=s("single sequence: "),Rs=r("code"),_i=s("[CLS] X [SEP]"),zi=l(),Yn=r("li"),bi=s("pair of sequences: "),Ws=r("code"),qi=s("[CLS] A [SEP] B [SEP]"),vi=l(),Pe=r("div"),f(ut.$$.fragment),ki=l(),ht=r("p"),wi=s(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Os=r("code"),Ti=s("prepare_for_model"),yi=s(" method."),Si=l(),Z=r("div"),f(pt.$$.fragment),Bi=l(),Ks=r("p"),$i=s(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),Ei=l(),f(ft.$$.fragment),Mi=l(),fe=r("p"),Fi=s("If "),Hs=r("code"),ji=s("token_ids_1"),Ci=s(" is "),Qs=r("code"),Pi=s("None"),xi=s(", this method only returns the first portion of the mask (0s)."),Ai=l(),Vs=r("div"),ur=l(),me=r("h2"),xe=r("a"),Us=r("span"),f(mt.$$.fragment),Li=l(),Gs=r("span"),Ni=s("SqueezeBertTokenizerFast"),hr=l(),Y=r("div"),f(gt.$$.fragment),Ii=l(),_t=r("p"),Di=s("Constructs a \u201CFast\u201D SqueezeBert tokenizer (backed by HuggingFace\u2019s "),Js=r("em"),Ri=s("tokenizers"),Wi=s(" library)."),Oi=l(),Ae=r("p"),Zn=r("a"),Ki=s("SqueezeBertTokenizerFast"),Hi=s(" is identical to "),es=r("a"),Qi=s("BertTokenizerFast"),Vi=s(` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Ui=l(),zt=r("p"),Gi=s("Refer to superclass "),ts=r("a"),Ji=s("BertTokenizerFast"),Xi=s(` for usage examples and documentation concerning parameters.`),pr=l(),ge=r("h2"),Le=r("a"),Xs=r("span"),f(bt.$$.fragment),Yi=l(),Ys=r("span"),Zi=s("SqueezeBertModel"),fr=l(),S=r("div"),f(qt.$$.fragment),el=l(),Zs=r("p"),tl=s("The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top."),nl=l(),vt=r("p"),sl=s("The SqueezeBERT model was proposed in "),kt=r("a"),ol=s(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),rl=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),al=l(),wt=r("p"),il=s("This model inherits from "),ns=r("a"),ll=s("PreTrainedModel"),dl=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cl=l(),Tt=r("p"),ul=s("This model is also a PyTorch "),yt=r("a"),hl=s("torch.nn.Module"),pl=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fl=l(),St=r("p"),ml=s(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),eo=r("em"),gl=s("squeezebert/squeezebert-mnli-headless"),_l=s(" checkpoint as a starting point."),zl=l(),to=r("p"),bl=s("Hierarchy:"),ql=l(),f(Bt.$$.fragment),vl=l(),no=r("p"),kl=s("Data layouts:"),wl=l(),f($t.$$.fragment),Tl=l(),Q=r("div"),f(Et.$$.fragment),yl=l(),_e=r("p"),Sl=s("The "),ss=r("a"),Bl=s("SqueezeBertModel"),$l=s(" forward method, overrides the "),so=r("code"),El=s("__call__"),Ml=s(" special method."),Fl=l(),f(Ne.$$.fragment),jl=l(),oo=r("p"),Cl=s("Example:"),Pl=l(),f(Mt.$$.fragment),mr=l(),ze=r("h2"),Ie=r("a"),ro=r("span"),f(Ft.$$.fragment),xl=l(),ao=r("span"),Al=s("SqueezeBertForMaskedLM"),gr=l(),B=r("div"),f(jt.$$.fragment),Ll=l(),Ct=r("p"),Nl=s("SqueezeBERT Model with a "),io=r("code"),Il=s("language modeling"),Dl=s(" head on top."),Rl=l(),Pt=r("p"),Wl=s("The SqueezeBERT model was proposed in "),xt=r("a"),Ol=s(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Kl=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Hl=l(),At=r("p"),Ql=s("This model inherits from "),os=r("a"),Vl=s("PreTrainedModel"),Ul=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gl=l(),Lt=r("p"),Jl=s("This model is also a PyTorch "),Nt=r("a"),Xl=s("torch.nn.Module"),Yl=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zl=l(),It=r("p"),ed=s(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),lo=r("em"),td=s("squeezebert/squeezebert-mnli-headless"),nd=s(" checkpoint as a starting point."),sd=l(),co=r("p"),od=s("Hierarchy:"),rd=l(),f(Dt.$$.fragment),ad=l(),uo=r("p"),id=s("Data layouts:"),ld=l(),f(Rt.$$.fragment),dd=l(),V=r("div"),f(Wt.$$.fragment),cd=l(),be=r("p"),ud=s("The "),rs=r("a"),hd=s("SqueezeBertForMaskedLM"),pd=s(" forward method, overrides the "),ho=r("code"),fd=s("__call__"),md=s(" special method."),gd=l(),f(De.$$.fragment),_d=l(),po=r("p"),zd=s("Example:"),bd=l(),f(Ot.$$.fragment),_r=l(),qe=r("h2"),Re=r("a"),fo=r("span"),f(Kt.$$.fragment),qd=l(),mo=r("span"),vd=s("SqueezeBertForSequenceClassification"),zr=l(),$=r("div"),f(Ht.$$.fragment),kd=l(),go=r("p"),wd=s(`SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Td=l(),Qt=r("p"),yd=s("The SqueezeBERT model was proposed in "),Vt=r("a"),Sd=s(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Bd=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),$d=l(),Ut=r("p"),Ed=s("This model inherits from "),as=r("a"),Md=s("PreTrainedModel"),Fd=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jd=l(),Gt=r("p"),Cd=s("This model is also a PyTorch "),Jt=r("a"),Pd=s("torch.nn.Module"),xd=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ad=l(),Xt=r("p"),Ld=s(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),_o=r("em"),Nd=s("squeezebert/squeezebert-mnli-headless"),Id=s(" checkpoint as a starting point."),Dd=l(),zo=r("p"),Rd=s("Hierarchy:"),Wd=l(),f(Yt.$$.fragment),Od=l(),bo=r("p"),Kd=s("Data layouts:"),Hd=l(),f(Zt.$$.fragment),Qd=l(),R=r("div"),f(en.$$.fragment),Vd=l(),ve=r("p"),Ud=s("The "),is=r("a"),Gd=s("SqueezeBertForSequenceClassification"),Jd=s(" forward method, overrides the "),qo=r("code"),Xd=s("__call__"),Yd=s(" special method."),Zd=l(),f(We.$$.fragment),ec=l(),vo=r("p"),tc=s("Example of single-label classification:"),nc=l(),f(tn.$$.fragment),sc=l(),ko=r("p"),oc=s("Example of multi-label classification:"),rc=l(),f(nn.$$.fragment),br=l(),ke=r("h2"),Oe=r("a"),wo=r("span"),f(sn.$$.fragment),ac=l(),To=r("span"),ic=s("SqueezeBertForMultipleChoice"),qr=l(),E=r("div"),f(on.$$.fragment),lc=l(),yo=r("p"),dc=s(`SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),cc=l(),rn=r("p"),uc=s("The SqueezeBERT model was proposed in "),an=r("a"),hc=s(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),pc=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),fc=l(),ln=r("p"),mc=s("This model inherits from "),ls=r("a"),gc=s("PreTrainedModel"),_c=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zc=l(),dn=r("p"),bc=s("This model is also a PyTorch "),cn=r("a"),qc=s("torch.nn.Module"),vc=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kc=l(),un=r("p"),wc=s(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),So=r("em"),Tc=s("squeezebert/squeezebert-mnli-headless"),yc=s(" checkpoint as a starting point."),Sc=l(),Bo=r("p"),Bc=s("Hierarchy:"),$c=l(),f(hn.$$.fragment),Ec=l(),$o=r("p"),Mc=s("Data layouts:"),Fc=l(),f(pn.$$.fragment),jc=l(),U=r("div"),f(fn.$$.fragment),Cc=l(),we=r("p"),Pc=s("The "),ds=r("a"),xc=s("SqueezeBertForMultipleChoice"),Ac=s(" forward method, overrides the "),Eo=r("code"),Lc=s("__call__"),Nc=s(" special method."),Ic=l(),f(Ke.$$.fragment),Dc=l(),Mo=r("p"),Rc=s("Example:"),Wc=l(),f(mn.$$.fragment),vr=l(),Te=r("h2"),He=r("a"),Fo=r("span"),f(gn.$$.fragment),Oc=l(),jo=r("span"),Kc=s("SqueezeBertForTokenClassification"),kr=l(),M=r("div"),f(_n.$$.fragment),Hc=l(),Co=r("p"),Qc=s(`SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Vc=l(),zn=r("p"),Uc=s("The SqueezeBERT model was proposed in "),bn=r("a"),Gc=s(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Jc=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Xc=l(),qn=r("p"),Yc=s("This model inherits from "),cs=r("a"),Zc=s("PreTrainedModel"),eu=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tu=l(),vn=r("p"),nu=s("This model is also a PyTorch "),kn=r("a"),su=s("torch.nn.Module"),ou=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ru=l(),wn=r("p"),au=s(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Po=r("em"),iu=s("squeezebert/squeezebert-mnli-headless"),lu=s(" checkpoint as a starting point."),du=l(),xo=r("p"),cu=s("Hierarchy:"),uu=l(),f(Tn.$$.fragment),hu=l(),Ao=r("p"),pu=s("Data layouts:"),fu=l(),f(yn.$$.fragment),mu=l(),G=r("div"),f(Sn.$$.fragment),gu=l(),ye=r("p"),_u=s("The "),us=r("a"),zu=s("SqueezeBertForTokenClassification"),bu=s(" forward method, overrides the "),Lo=r("code"),qu=s("__call__"),vu=s(" special method."),ku=l(),f(Qe.$$.fragment),wu=l(),No=r("p"),Tu=s("Example:"),yu=l(),f(Bn.$$.fragment),wr=l(),Se=r("h2"),Ve=r("a"),Io=r("span"),f($n.$$.fragment),Su=l(),Do=r("span"),Bu=s("SqueezeBertForQuestionAnswering"),Tr=l(),F=r("div"),f(En.$$.fragment),$u=l(),Be=r("p"),Eu=s(`SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ro=r("code"),Mu=s("span start logits"),Fu=s(" and "),Wo=r("code"),ju=s("span end logits"),Cu=s(")."),Pu=l(),Mn=r("p"),xu=s("The SqueezeBERT model was proposed in "),Fn=r("a"),Au=s(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Lu=s(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Nu=l(),jn=r("p"),Iu=s("This model inherits from "),hs=r("a"),Du=s("PreTrainedModel"),Ru=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wu=l(),Cn=r("p"),Ou=s("This model is also a PyTorch "),Pn=r("a"),Ku=s("torch.nn.Module"),Hu=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qu=l(),xn=r("p"),Vu=s(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Oo=r("em"),Uu=s("squeezebert/squeezebert-mnli-headless"),Gu=s(" checkpoint as a starting point."),Ju=l(),Ko=r("p"),Xu=s("Hierarchy:"),Yu=l(),f(An.$$.fragment),Zu=l(),Ho=r("p"),eh=s("Data layouts:"),th=l(),f(Ln.$$.fragment),nh=l(),J=r("div"),f(Nn.$$.fragment),sh=l(),$e=r("p"),oh=s("The "),ps=r("a"),rh=s("SqueezeBertForQuestionAnswering"),ah=s(" forward method, overrides the "),Qo=r("code"),ih=s("__call__"),lh=s(" special method."),dh=l(),f(Ue.$$.fragment),ch=l(),Vo=r("p"),uh=s("Example:"),hh=l(),f(In.$$.fragment),this.h()},l(t){const u=jf('[data-svelte="svelte-1phssyn"]',document.head);h=a(u,"META",{name:!0,content:!0}),u.forEach(n),T=d(t),q=a(t,"H1",{class:!0});var Dn=i(q);w=a(Dn,"A",{id:!0,class:!0,href:!0});var Uo=i(w);y=a(Uo,"SPAN",{});var Go=i(y);m(k.$$.fragment,Go),Go.forEach(n),Uo.forEach(n),v=d(Dn),j=a(Dn,"SPAN",{});var Jo=i(j);ma=o(Jo,"SqueezeBERT"),Jo.forEach(n),Dn.forEach(n),er=d(t),ce=a(t,"H2",{class:!0});var Rn=i(ce);Ee=a(Rn,"A",{id:!0,class:!0,href:!0});var Xo=i(Ee);Bs=a(Xo,"SPAN",{});var mh=i(Bs);m(Xe.$$.fragment,mh),mh.forEach(n),Xo.forEach(n),ga=d(Rn),$s=a(Rn,"SPAN",{});var gh=i($s);_a=o(gh,"Overview"),gh.forEach(n),Rn.forEach(n),tr=d(t),ne=a(t,"P",{});var fs=i(ne);za=o(fs,"The SqueezeBERT model was proposed in "),Ye=a(fs,"A",{href:!0,rel:!0});var _h=i(Ye);ba=o(_h,"SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),_h.forEach(n),qa=o(fs,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, Kurt W. Keutzer. It\u2019s a bidirectional transformer similar to the BERT model. The key difference between the BERT architecture and the SqueezeBERT architecture is that SqueezeBERT uses `),Ze=a(fs,"A",{href:!0,rel:!0});var zh=i(Ze);va=o(zh,"grouped convolutions"),zh.forEach(n),ka=o(fs,` instead of fully-connected layers for the Q, K, V and FFN layers.`),fs.forEach(n),nr=d(t),Wn=a(t,"P",{});var bh=i(Wn);wa=o(bh,"The abstract from the paper is the following:"),bh.forEach(n),sr=d(t),On=a(t,"P",{});var qh=i(On);Es=a(qh,"EM",{});var vh=i(Es);Ta=o(vh,`Humans read and write hundreds of billions of messages every day. Further, due to the availability of large datasets, large computing systems, and better neural network models, natural language processing (NLP) technology has made significant strides in understanding, proofreading, and organizing these messages. Thus, there is a significant opportunity to deploy NLP in myriad applications to help web users, social networks, and businesses. In particular, we consider smartphones and other mobile devices as crucial platforms for deploying NLP models at scale. However, today\u2019s highly-accurate NLP neural network models such as BERT and RoBERTa are extremely computationally expensive, with BERT-base taking 1.7 seconds to classify a text snippet on a Pixel 3 smartphone. In this work, we observe that methods such as grouped convolutions have yielded significant speedups for computer vision networks, but many of these techniques have not been adopted by NLP neural network designers. We demonstrate how to replace several operations in self-attention layers with grouped convolutions, and we use this technique in a novel network architecture called SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. The SqueezeBERT code will be released.`),vh.forEach(n),qh.forEach(n),or=d(t),Kn=a(t,"P",{});var kh=i(Kn);ya=o(kh,"Tips:"),kh.forEach(n),rr=d(t),se=a(t,"UL",{});var ms=i(se);Ms=a(ms,"LI",{});var wh=i(Ms);Sa=o(wh,`SqueezeBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),wh.forEach(n),Ba=d(ms),Fs=a(ms,"LI",{});var Th=i(Fs);$a=o(Th,`SqueezeBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.`),Th.forEach(n),Ea=d(ms),et=a(ms,"LI",{});var Sr=i(et);Ma=o(Sr,`For best results when finetuning on sequence classification tasks, it is recommended to start with the `),js=a(Sr,"EM",{});var yh=i(js);Fa=o(yh,"squeezebert/squeezebert-mnli-headless"),yh.forEach(n),ja=o(Sr," checkpoint."),Sr.forEach(n),ms.forEach(n),ar=d(t),Me=a(t,"P",{});var Br=i(Me);Ca=o(Br,"This model was contributed by "),tt=a(Br,"A",{href:!0,rel:!0});var Sh=i(tt);Pa=o(Sh,"forresti"),Sh.forEach(n),xa=o(Br,"."),Br.forEach(n),ir=d(t),ue=a(t,"H2",{class:!0});var $r=i(ue);Fe=a($r,"A",{id:!0,class:!0,href:!0});var Bh=i(Fe);Cs=a(Bh,"SPAN",{});var $h=i(Cs);m(nt.$$.fragment,$h),$h.forEach(n),Bh.forEach(n),Aa=d($r),Ps=a($r,"SPAN",{});var Eh=i(Ps);La=o(Eh,"SqueezeBertConfig"),Eh.forEach(n),$r.forEach(n),lr=d(t),O=a(t,"DIV",{class:!0});var ee=i(O);m(st.$$.fragment,ee),Na=d(ee),ot=a(ee,"P",{});var Er=i(ot);Ia=o(Er,"This is the configuration class to store the configuration of a "),Hn=a(Er,"A",{href:!0});var Mh=i(Hn);Da=o(Mh,"SqueezeBertModel"),Mh.forEach(n),Ra=o(Er,`. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture.`),Er.forEach(n),Wa=d(ee),he=a(ee,"P",{});var gs=i(he);Oa=o(gs,"Configuration objects inherit from "),Qn=a(gs,"A",{href:!0});var Fh=i(Qn);Ka=o(Fh,"PretrainedConfig"),Fh.forEach(n),Ha=o(gs,` and can be used to control the model outputs. Read the documentation from `),Vn=a(gs,"A",{href:!0});var jh=i(Vn);Qa=o(jh,"PretrainedConfig"),jh.forEach(n),Va=o(gs," for more information."),gs.forEach(n),Ua=d(ee),xs=a(ee,"P",{});var Ch=i(xs);Ga=o(Ch,"Examples:"),Ch.forEach(n),Ja=d(ee),m(rt.$$.fragment,ee),Xa=d(ee),As=a(ee,"P",{});var Ph=i(As);Ya=o(Ph,`Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints.`),Ph.forEach(n),ee.forEach(n),dr=d(t),pe=a(t,"H2",{class:!0});var Mr=i(pe);je=a(Mr,"A",{id:!0,class:!0,href:!0});var xh=i(je);Ls=a(xh,"SPAN",{});var Ah=i(Ls);m(at.$$.fragment,Ah),Ah.forEach(n),xh.forEach(n),Za=d(Mr),Ns=a(Mr,"SPAN",{});var Lh=i(Ns);ei=o(Lh,"SqueezeBertTokenizer"),Lh.forEach(n),Mr.forEach(n),cr=d(t),I=a(t,"DIV",{class:!0});var K=i(I);m(it.$$.fragment,K),ti=d(K),Is=a(K,"P",{});var Nh=i(Is);ni=o(Nh,"Constructs a SqueezeBert tokenizer."),Nh.forEach(n),si=d(K),Ce=a(K,"P",{});var Yo=i(Ce);Un=a(Yo,"A",{href:!0});var Ih=i(Un);oi=o(Ih,"SqueezeBertTokenizer"),Ih.forEach(n),ri=o(Yo," is identical to "),Gn=a(Yo,"A",{href:!0});var Dh=i(Gn);ai=o(Dh,"BertTokenizer"),Dh.forEach(n),ii=o(Yo,` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Yo.forEach(n),li=d(K),lt=a(K,"P",{});var Fr=i(lt);di=o(Fr,"Refer to superclass "),Jn=a(Fr,"A",{href:!0});var Rh=i(Jn);ci=o(Rh,"BertTokenizer"),Rh.forEach(n),ui=o(Fr,` for usage examples and documentation concerning parameters.`),Fr.forEach(n),hi=d(K),oe=a(K,"DIV",{class:!0});var _s=i(oe);m(dt.$$.fragment,_s),pi=d(_s),Ds=a(_s,"P",{});var Wh=i(Ds);fi=o(Wh,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),Wh.forEach(n),mi=d(_s),ct=a(_s,"UL",{});var jr=i(ct);Xn=a(jr,"LI",{});var ph=i(Xn);gi=o(ph,"single sequence: "),Rs=a(ph,"CODE",{});var Oh=i(Rs);_i=o(Oh,"[CLS] X [SEP]"),Oh.forEach(n),ph.forEach(n),zi=d(jr),Yn=a(jr,"LI",{});var fh=i(Yn);bi=o(fh,"pair of sequences: "),Ws=a(fh,"CODE",{});var Kh=i(Ws);qi=o(Kh,"[CLS] A [SEP] B [SEP]"),Kh.forEach(n),fh.forEach(n),jr.forEach(n),_s.forEach(n),vi=d(K),Pe=a(K,"DIV",{class:!0});var Cr=i(Pe);m(ut.$$.fragment,Cr),ki=d(Cr),ht=a(Cr,"P",{});var Pr=i(ht);wi=o(Pr,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Os=a(Pr,"CODE",{});var Hh=i(Os);Ti=o(Hh,"prepare_for_model"),Hh.forEach(n),yi=o(Pr," method."),Pr.forEach(n),Cr.forEach(n),Si=d(K),Z=a(K,"DIV",{class:!0});var Ge=i(Z);m(pt.$$.fragment,Ge),Bi=d(Ge),Ks=a(Ge,"P",{});var Qh=i(Ks);$i=o(Qh,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),Qh.forEach(n),Ei=d(Ge),m(ft.$$.fragment,Ge),Mi=d(Ge),fe=a(Ge,"P",{});var zs=i(fe);Fi=o(zs,"If "),Hs=a(zs,"CODE",{});var Vh=i(Hs);ji=o(Vh,"token_ids_1"),Vh.forEach(n),Ci=o(zs," is "),Qs=a(zs,"CODE",{});var Uh=i(Qs);Pi=o(Uh,"None"),Uh.forEach(n),xi=o(zs,", this method only returns the first portion of the mask (0s)."),zs.forEach(n),Ge.forEach(n),Ai=d(K),Vs=a(K,"DIV",{class:!0}),i(Vs).forEach(n),K.forEach(n),ur=d(t),me=a(t,"H2",{class:!0});var xr=i(me);xe=a(xr,"A",{id:!0,class:!0,href:!0});var Gh=i(xe);Us=a(Gh,"SPAN",{});var Jh=i(Us);m(mt.$$.fragment,Jh),Jh.forEach(n),Gh.forEach(n),Li=d(xr),Gs=a(xr,"SPAN",{});var Xh=i(Gs);Ni=o(Xh,"SqueezeBertTokenizerFast"),Xh.forEach(n),xr.forEach(n),hr=d(t),Y=a(t,"DIV",{class:!0});var Je=i(Y);m(gt.$$.fragment,Je),Ii=d(Je),_t=a(Je,"P",{});var Ar=i(_t);Di=o(Ar,"Constructs a \u201CFast\u201D SqueezeBert tokenizer (backed by HuggingFace\u2019s "),Js=a(Ar,"EM",{});var Yh=i(Js);Ri=o(Yh,"tokenizers"),Yh.forEach(n),Wi=o(Ar," library)."),Ar.forEach(n),Oi=d(Je),Ae=a(Je,"P",{});var Zo=i(Ae);Zn=a(Zo,"A",{href:!0});var Zh=i(Zn);Ki=o(Zh,"SqueezeBertTokenizerFast"),Zh.forEach(n),Hi=o(Zo," is identical to "),es=a(Zo,"A",{href:!0});var ep=i(es);Qi=o(ep,"BertTokenizerFast"),ep.forEach(n),Vi=o(Zo,` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Zo.forEach(n),Ui=d(Je),zt=a(Je,"P",{});var Lr=i(zt);Gi=o(Lr,"Refer to superclass "),ts=a(Lr,"A",{href:!0});var tp=i(ts);Ji=o(tp,"BertTokenizerFast"),tp.forEach(n),Xi=o(Lr,` for usage examples and documentation concerning parameters.`),Lr.forEach(n),Je.forEach(n),pr=d(t),ge=a(t,"H2",{class:!0});var Nr=i(ge);Le=a(Nr,"A",{id:!0,class:!0,href:!0});var np=i(Le);Xs=a(np,"SPAN",{});var sp=i(Xs);m(bt.$$.fragment,sp),sp.forEach(n),np.forEach(n),Yi=d(Nr),Ys=a(Nr,"SPAN",{});var op=i(Ys);Zi=o(op,"SqueezeBertModel"),op.forEach(n),Nr.forEach(n),fr=d(t),S=a(t,"DIV",{class:!0});var C=i(S);m(qt.$$.fragment,C),el=d(C),Zs=a(C,"P",{});var rp=i(Zs);tl=o(rp,"The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top."),rp.forEach(n),nl=d(C),vt=a(C,"P",{});var Ir=i(vt);sl=o(Ir,"The SqueezeBERT model was proposed in "),kt=a(Ir,"A",{href:!0,rel:!0});var ap=i(kt);ol=o(ap,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),ap.forEach(n),rl=o(Ir,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Ir.forEach(n),al=d(C),wt=a(C,"P",{});var Dr=i(wt);il=o(Dr,"This model inherits from "),ns=a(Dr,"A",{href:!0});var ip=i(ns);ll=o(ip,"PreTrainedModel"),ip.forEach(n),dl=o(Dr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Dr.forEach(n),cl=d(C),Tt=a(C,"P",{});var Rr=i(Tt);ul=o(Rr,"This model is also a PyTorch "),yt=a(Rr,"A",{href:!0,rel:!0});var lp=i(yt);hl=o(lp,"torch.nn.Module"),lp.forEach(n),pl=o(Rr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rr.forEach(n),fl=d(C),St=a(C,"P",{});var Wr=i(St);ml=o(Wr,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),eo=a(Wr,"EM",{});var dp=i(eo);gl=o(dp,"squeezebert/squeezebert-mnli-headless"),dp.forEach(n),_l=o(Wr," checkpoint as a starting point."),Wr.forEach(n),zl=d(C),to=a(C,"P",{});var cp=i(to);bl=o(cp,"Hierarchy:"),cp.forEach(n),ql=d(C),m(Bt.$$.fragment,C),vl=d(C),no=a(C,"P",{});var up=i(no);kl=o(up,"Data layouts:"),up.forEach(n),wl=d(C),m($t.$$.fragment,C),Tl=d(C),Q=a(C,"DIV",{class:!0});var re=i(Q);m(Et.$$.fragment,re),yl=d(re),_e=a(re,"P",{});var bs=i(_e);Sl=o(bs,"The "),ss=a(bs,"A",{href:!0});var hp=i(ss);Bl=o(hp,"SqueezeBertModel"),hp.forEach(n),$l=o(bs," forward method, overrides the "),so=a(bs,"CODE",{});var pp=i(so);El=o(pp,"__call__"),pp.forEach(n),Ml=o(bs," special method."),bs.forEach(n),Fl=d(re),m(Ne.$$.fragment,re),jl=d(re),oo=a(re,"P",{});var fp=i(oo);Cl=o(fp,"Example:"),fp.forEach(n),Pl=d(re),m(Mt.$$.fragment,re),re.forEach(n),C.forEach(n),mr=d(t),ze=a(t,"H2",{class:!0});var Or=i(ze);Ie=a(Or,"A",{id:!0,class:!0,href:!0});var mp=i(Ie);ro=a(mp,"SPAN",{});var gp=i(ro);m(Ft.$$.fragment,gp),gp.forEach(n),mp.forEach(n),xl=d(Or),ao=a(Or,"SPAN",{});var _p=i(ao);Al=o(_p,"SqueezeBertForMaskedLM"),_p.forEach(n),Or.forEach(n),gr=d(t),B=a(t,"DIV",{class:!0});var P=i(B);m(jt.$$.fragment,P),Ll=d(P),Ct=a(P,"P",{});var Kr=i(Ct);Nl=o(Kr,"SqueezeBERT Model with a "),io=a(Kr,"CODE",{});var zp=i(io);Il=o(zp,"language modeling"),zp.forEach(n),Dl=o(Kr," head on top."),Kr.forEach(n),Rl=d(P),Pt=a(P,"P",{});var Hr=i(Pt);Wl=o(Hr,"The SqueezeBERT model was proposed in "),xt=a(Hr,"A",{href:!0,rel:!0});var bp=i(xt);Ol=o(bp,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),bp.forEach(n),Kl=o(Hr,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Hr.forEach(n),Hl=d(P),At=a(P,"P",{});var Qr=i(At);Ql=o(Qr,"This model inherits from "),os=a(Qr,"A",{href:!0});var qp=i(os);Vl=o(qp,"PreTrainedModel"),qp.forEach(n),Ul=o(Qr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qr.forEach(n),Gl=d(P),Lt=a(P,"P",{});var Vr=i(Lt);Jl=o(Vr,"This model is also a PyTorch "),Nt=a(Vr,"A",{href:!0,rel:!0});var vp=i(Nt);Xl=o(vp,"torch.nn.Module"),vp.forEach(n),Yl=o(Vr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vr.forEach(n),Zl=d(P),It=a(P,"P",{});var Ur=i(It);ed=o(Ur,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),lo=a(Ur,"EM",{});var kp=i(lo);td=o(kp,"squeezebert/squeezebert-mnli-headless"),kp.forEach(n),nd=o(Ur," checkpoint as a starting point."),Ur.forEach(n),sd=d(P),co=a(P,"P",{});var wp=i(co);od=o(wp,"Hierarchy:"),wp.forEach(n),rd=d(P),m(Dt.$$.fragment,P),ad=d(P),uo=a(P,"P",{});var Tp=i(uo);id=o(Tp,"Data layouts:"),Tp.forEach(n),ld=d(P),m(Rt.$$.fragment,P),dd=d(P),V=a(P,"DIV",{class:!0});var ae=i(V);m(Wt.$$.fragment,ae),cd=d(ae),be=a(ae,"P",{});var qs=i(be);ud=o(qs,"The "),rs=a(qs,"A",{href:!0});var yp=i(rs);hd=o(yp,"SqueezeBertForMaskedLM"),yp.forEach(n),pd=o(qs," forward method, overrides the "),ho=a(qs,"CODE",{});var Sp=i(ho);fd=o(Sp,"__call__"),Sp.forEach(n),md=o(qs," special method."),qs.forEach(n),gd=d(ae),m(De.$$.fragment,ae),_d=d(ae),po=a(ae,"P",{});var Bp=i(po);zd=o(Bp,"Example:"),Bp.forEach(n),bd=d(ae),m(Ot.$$.fragment,ae),ae.forEach(n),P.forEach(n),_r=d(t),qe=a(t,"H2",{class:!0});var Gr=i(qe);Re=a(Gr,"A",{id:!0,class:!0,href:!0});var $p=i(Re);fo=a($p,"SPAN",{});var Ep=i(fo);m(Kt.$$.fragment,Ep),Ep.forEach(n),$p.forEach(n),qd=d(Gr),mo=a(Gr,"SPAN",{});var Mp=i(mo);vd=o(Mp,"SqueezeBertForSequenceClassification"),Mp.forEach(n),Gr.forEach(n),zr=d(t),$=a(t,"DIV",{class:!0});var x=i($);m(Ht.$$.fragment,x),kd=d(x),go=a(x,"P",{});var Fp=i(go);wd=o(Fp,`SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Fp.forEach(n),Td=d(x),Qt=a(x,"P",{});var Jr=i(Qt);yd=o(Jr,"The SqueezeBERT model was proposed in "),Vt=a(Jr,"A",{href:!0,rel:!0});var jp=i(Vt);Sd=o(jp,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),jp.forEach(n),Bd=o(Jr,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Jr.forEach(n),$d=d(x),Ut=a(x,"P",{});var Xr=i(Ut);Ed=o(Xr,"This model inherits from "),as=a(Xr,"A",{href:!0});var Cp=i(as);Md=o(Cp,"PreTrainedModel"),Cp.forEach(n),Fd=o(Xr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xr.forEach(n),jd=d(x),Gt=a(x,"P",{});var Yr=i(Gt);Cd=o(Yr,"This model is also a PyTorch "),Jt=a(Yr,"A",{href:!0,rel:!0});var Pp=i(Jt);Pd=o(Pp,"torch.nn.Module"),Pp.forEach(n),xd=o(Yr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yr.forEach(n),Ad=d(x),Xt=a(x,"P",{});var Zr=i(Xt);Ld=o(Zr,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),_o=a(Zr,"EM",{});var xp=i(_o);Nd=o(xp,"squeezebert/squeezebert-mnli-headless"),xp.forEach(n),Id=o(Zr," checkpoint as a starting point."),Zr.forEach(n),Dd=d(x),zo=a(x,"P",{});var Ap=i(zo);Rd=o(Ap,"Hierarchy:"),Ap.forEach(n),Wd=d(x),m(Yt.$$.fragment,x),Od=d(x),bo=a(x,"P",{});var Lp=i(bo);Kd=o(Lp,"Data layouts:"),Lp.forEach(n),Hd=d(x),m(Zt.$$.fragment,x),Qd=d(x),R=a(x,"DIV",{class:!0});var X=i(R);m(en.$$.fragment,X),Vd=d(X),ve=a(X,"P",{});var vs=i(ve);Ud=o(vs,"The "),is=a(vs,"A",{href:!0});var Np=i(is);Gd=o(Np,"SqueezeBertForSequenceClassification"),Np.forEach(n),Jd=o(vs," forward method, overrides the "),qo=a(vs,"CODE",{});var Ip=i(qo);Xd=o(Ip,"__call__"),Ip.forEach(n),Yd=o(vs," special method."),vs.forEach(n),Zd=d(X),m(We.$$.fragment,X),ec=d(X),vo=a(X,"P",{});var Dp=i(vo);tc=o(Dp,"Example of single-label classification:"),Dp.forEach(n),nc=d(X),m(tn.$$.fragment,X),sc=d(X),ko=a(X,"P",{});var Rp=i(ko);oc=o(Rp,"Example of multi-label classification:"),Rp.forEach(n),rc=d(X),m(nn.$$.fragment,X),X.forEach(n),x.forEach(n),br=d(t),ke=a(t,"H2",{class:!0});var ea=i(ke);Oe=a(ea,"A",{id:!0,class:!0,href:!0});var Wp=i(Oe);wo=a(Wp,"SPAN",{});var Op=i(wo);m(sn.$$.fragment,Op),Op.forEach(n),Wp.forEach(n),ac=d(ea),To=a(ea,"SPAN",{});var Kp=i(To);ic=o(Kp,"SqueezeBertForMultipleChoice"),Kp.forEach(n),ea.forEach(n),qr=d(t),E=a(t,"DIV",{class:!0});var A=i(E);m(on.$$.fragment,A),lc=d(A),yo=a(A,"P",{});var Hp=i(yo);dc=o(Hp,`SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Hp.forEach(n),cc=d(A),rn=a(A,"P",{});var ta=i(rn);uc=o(ta,"The SqueezeBERT model was proposed in "),an=a(ta,"A",{href:!0,rel:!0});var Qp=i(an);hc=o(Qp,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Qp.forEach(n),pc=o(ta,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),ta.forEach(n),fc=d(A),ln=a(A,"P",{});var na=i(ln);mc=o(na,"This model inherits from "),ls=a(na,"A",{href:!0});var Vp=i(ls);gc=o(Vp,"PreTrainedModel"),Vp.forEach(n),_c=o(na,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),na.forEach(n),zc=d(A),dn=a(A,"P",{});var sa=i(dn);bc=o(sa,"This model is also a PyTorch "),cn=a(sa,"A",{href:!0,rel:!0});var Up=i(cn);qc=o(Up,"torch.nn.Module"),Up.forEach(n),vc=o(sa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sa.forEach(n),kc=d(A),un=a(A,"P",{});var oa=i(un);wc=o(oa,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),So=a(oa,"EM",{});var Gp=i(So);Tc=o(Gp,"squeezebert/squeezebert-mnli-headless"),Gp.forEach(n),yc=o(oa," checkpoint as a starting point."),oa.forEach(n),Sc=d(A),Bo=a(A,"P",{});var Jp=i(Bo);Bc=o(Jp,"Hierarchy:"),Jp.forEach(n),$c=d(A),m(hn.$$.fragment,A),Ec=d(A),$o=a(A,"P",{});var Xp=i($o);Mc=o(Xp,"Data layouts:"),Xp.forEach(n),Fc=d(A),m(pn.$$.fragment,A),jc=d(A),U=a(A,"DIV",{class:!0});var ie=i(U);m(fn.$$.fragment,ie),Cc=d(ie),we=a(ie,"P",{});var ks=i(we);Pc=o(ks,"The "),ds=a(ks,"A",{href:!0});var Yp=i(ds);xc=o(Yp,"SqueezeBertForMultipleChoice"),Yp.forEach(n),Ac=o(ks," forward method, overrides the "),Eo=a(ks,"CODE",{});var Zp=i(Eo);Lc=o(Zp,"__call__"),Zp.forEach(n),Nc=o(ks," special method."),ks.forEach(n),Ic=d(ie),m(Ke.$$.fragment,ie),Dc=d(ie),Mo=a(ie,"P",{});var ef=i(Mo);Rc=o(ef,"Example:"),ef.forEach(n),Wc=d(ie),m(mn.$$.fragment,ie),ie.forEach(n),A.forEach(n),vr=d(t),Te=a(t,"H2",{class:!0});var ra=i(Te);He=a(ra,"A",{id:!0,class:!0,href:!0});var tf=i(He);Fo=a(tf,"SPAN",{});var nf=i(Fo);m(gn.$$.fragment,nf),nf.forEach(n),tf.forEach(n),Oc=d(ra),jo=a(ra,"SPAN",{});var sf=i(jo);Kc=o(sf,"SqueezeBertForTokenClassification"),sf.forEach(n),ra.forEach(n),kr=d(t),M=a(t,"DIV",{class:!0});var L=i(M);m(_n.$$.fragment,L),Hc=d(L),Co=a(L,"P",{});var of=i(Co);Qc=o(of,`SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),of.forEach(n),Vc=d(L),zn=a(L,"P",{});var aa=i(zn);Uc=o(aa,"The SqueezeBERT model was proposed in "),bn=a(aa,"A",{href:!0,rel:!0});var rf=i(bn);Gc=o(rf,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),rf.forEach(n),Jc=o(aa,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),aa.forEach(n),Xc=d(L),qn=a(L,"P",{});var ia=i(qn);Yc=o(ia,"This model inherits from "),cs=a(ia,"A",{href:!0});var af=i(cs);Zc=o(af,"PreTrainedModel"),af.forEach(n),eu=o(ia,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ia.forEach(n),tu=d(L),vn=a(L,"P",{});var la=i(vn);nu=o(la,"This model is also a PyTorch "),kn=a(la,"A",{href:!0,rel:!0});var lf=i(kn);su=o(lf,"torch.nn.Module"),lf.forEach(n),ou=o(la,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),la.forEach(n),ru=d(L),wn=a(L,"P",{});var da=i(wn);au=o(da,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Po=a(da,"EM",{});var df=i(Po);iu=o(df,"squeezebert/squeezebert-mnli-headless"),df.forEach(n),lu=o(da," checkpoint as a starting point."),da.forEach(n),du=d(L),xo=a(L,"P",{});var cf=i(xo);cu=o(cf,"Hierarchy:"),cf.forEach(n),uu=d(L),m(Tn.$$.fragment,L),hu=d(L),Ao=a(L,"P",{});var uf=i(Ao);pu=o(uf,"Data layouts:"),uf.forEach(n),fu=d(L),m(yn.$$.fragment,L),mu=d(L),G=a(L,"DIV",{class:!0});var le=i(G);m(Sn.$$.fragment,le),gu=d(le),ye=a(le,"P",{});var ws=i(ye);_u=o(ws,"The "),us=a(ws,"A",{href:!0});var hf=i(us);zu=o(hf,"SqueezeBertForTokenClassification"),hf.forEach(n),bu=o(ws," forward method, overrides the "),Lo=a(ws,"CODE",{});var pf=i(Lo);qu=o(pf,"__call__"),pf.forEach(n),vu=o(ws," special method."),ws.forEach(n),ku=d(le),m(Qe.$$.fragment,le),wu=d(le),No=a(le,"P",{});var ff=i(No);Tu=o(ff,"Example:"),ff.forEach(n),yu=d(le),m(Bn.$$.fragment,le),le.forEach(n),L.forEach(n),wr=d(t),Se=a(t,"H2",{class:!0});var ca=i(Se);Ve=a(ca,"A",{id:!0,class:!0,href:!0});var mf=i(Ve);Io=a(mf,"SPAN",{});var gf=i(Io);m($n.$$.fragment,gf),gf.forEach(n),mf.forEach(n),Su=d(ca),Do=a(ca,"SPAN",{});var _f=i(Do);Bu=o(_f,"SqueezeBertForQuestionAnswering"),_f.forEach(n),ca.forEach(n),Tr=d(t),F=a(t,"DIV",{class:!0});var N=i(F);m(En.$$.fragment,N),$u=d(N),Be=a(N,"P",{});var Ts=i(Be);Eu=o(Ts,`SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ro=a(Ts,"CODE",{});var zf=i(Ro);Mu=o(zf,"span start logits"),zf.forEach(n),Fu=o(Ts," and "),Wo=a(Ts,"CODE",{});var bf=i(Wo);ju=o(bf,"span end logits"),bf.forEach(n),Cu=o(Ts,")."),Ts.forEach(n),Pu=d(N),Mn=a(N,"P",{});var ua=i(Mn);xu=o(ua,"The SqueezeBERT model was proposed in "),Fn=a(ua,"A",{href:!0,rel:!0});var qf=i(Fn);Au=o(qf,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),qf.forEach(n),Lu=o(ua,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),ua.forEach(n),Nu=d(N),jn=a(N,"P",{});var ha=i(jn);Iu=o(ha,"This model inherits from "),hs=a(ha,"A",{href:!0});var vf=i(hs);Du=o(vf,"PreTrainedModel"),vf.forEach(n),Ru=o(ha,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ha.forEach(n),Wu=d(N),Cn=a(N,"P",{});var pa=i(Cn);Ou=o(pa,"This model is also a PyTorch "),Pn=a(pa,"A",{href:!0,rel:!0});var kf=i(Pn);Ku=o(kf,"torch.nn.Module"),kf.forEach(n),Hu=o(pa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pa.forEach(n),Qu=d(N),xn=a(N,"P",{});var fa=i(xn);Vu=o(fa,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Oo=a(fa,"EM",{});var wf=i(Oo);Uu=o(wf,"squeezebert/squeezebert-mnli-headless"),wf.forEach(n),Gu=o(fa," checkpoint as a starting point."),fa.forEach(n),Ju=d(N),Ko=a(N,"P",{});var Tf=i(Ko);Xu=o(Tf,"Hierarchy:"),Tf.forEach(n),Yu=d(N),m(An.$$.fragment,N),Zu=d(N),Ho=a(N,"P",{});var yf=i(Ho);eh=o(yf,"Data layouts:"),yf.forEach(n),th=d(N),m(Ln.$$.fragment,N),nh=d(N),J=a(N,"DIV",{class:!0});var de=i(J);m(Nn.$$.fragment,de),sh=d(de),$e=a(de,"P",{});var ys=i($e);oh=o(ys,"The "),ps=a(ys,"A",{href:!0});var Sf=i(ps);rh=o(Sf,"SqueezeBertForQuestionAnswering"),Sf.forEach(n),ah=o(ys," forward method, overrides the "),Qo=a(ys,"CODE",{});var Bf=i(Qo);ih=o(Bf,"__call__"),Bf.forEach(n),lh=o(ys," special method."),ys.forEach(n),dh=d(de),m(Ue.$$.fragment,de),ch=d(de),Vo=a(de,"P",{});var $f=i(Vo);uh=o($f,"Example:"),$f.forEach(n),hh=d(de),m(In.$$.fragment,de),de.forEach(n),N.forEach(n),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(Df)),c(w,"id","squeezebert"),c(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(w,"href","#squeezebert"),c(q,"class","relative group"),c(Ee,"id","overview"),c(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ee,"href","#overview"),c(ce,"class","relative group"),c(Ye,"href","https://arxiv.org/abs/2006.11316"),c(Ye,"rel","nofollow"),c(Ze,"href","https://blog.yani.io/filter-group-tutorial"),c(Ze,"rel","nofollow"),c(tt,"href","https://huggingface.co/forresti"),c(tt,"rel","nofollow"),c(Fe,"id","transformers.SqueezeBertConfig"),c(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fe,"href","#transformers.SqueezeBertConfig"),c(ue,"class","relative group"),c(Hn,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertModel"),c(Qn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Vn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(O,"class","docstring"),c(je,"id","transformers.SqueezeBertTokenizer"),c(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(je,"href","#transformers.SqueezeBertTokenizer"),c(pe,"class","relative group"),c(Un,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer"),c(Gn,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(Jn,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(oe,"class","docstring"),c(Pe,"class","docstring"),c(Z,"class","docstring"),c(Vs,"class","docstring"),c(I,"class","docstring"),c(xe,"id","transformers.SqueezeBertTokenizerFast"),c(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xe,"href","#transformers.SqueezeBertTokenizerFast"),c(me,"class","relative group"),c(Zn,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertTokenizerFast"),c(es,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(ts,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(Y,"class","docstring"),c(Le,"id","transformers.SqueezeBertModel"),c(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Le,"href","#transformers.SqueezeBertModel"),c(ge,"class","relative group"),c(kt,"href","https://arxiv.org/abs/2006.11316"),c(kt,"rel","nofollow"),c(ns,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(yt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(yt,"rel","nofollow"),c(ss,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertModel"),c(Q,"class","docstring"),c(S,"class","docstring"),c(Ie,"id","transformers.SqueezeBertForMaskedLM"),c(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ie,"href","#transformers.SqueezeBertForMaskedLM"),c(ze,"class","relative group"),c(xt,"href","https://arxiv.org/abs/2006.11316"),c(xt,"rel","nofollow"),c(os,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Nt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Nt,"rel","nofollow"),c(rs,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMaskedLM"),c(V,"class","docstring"),c(B,"class","docstring"),c(Re,"id","transformers.SqueezeBertForSequenceClassification"),c(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Re,"href","#transformers.SqueezeBertForSequenceClassification"),c(qe,"class","relative group"),c(Vt,"href","https://arxiv.org/abs/2006.11316"),c(Vt,"rel","nofollow"),c(as,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Jt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Jt,"rel","nofollow"),c(is,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForSequenceClassification"),c(R,"class","docstring"),c($,"class","docstring"),c(Oe,"id","transformers.SqueezeBertForMultipleChoice"),c(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oe,"href","#transformers.SqueezeBertForMultipleChoice"),c(ke,"class","relative group"),c(an,"href","https://arxiv.org/abs/2006.11316"),c(an,"rel","nofollow"),c(ls,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(cn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(cn,"rel","nofollow"),c(ds,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForMultipleChoice"),c(U,"class","docstring"),c(E,"class","docstring"),c(He,"id","transformers.SqueezeBertForTokenClassification"),c(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(He,"href","#transformers.SqueezeBertForTokenClassification"),c(Te,"class","relative group"),c(bn,"href","https://arxiv.org/abs/2006.11316"),c(bn,"rel","nofollow"),c(cs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(kn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(kn,"rel","nofollow"),c(us,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForTokenClassification"),c(G,"class","docstring"),c(M,"class","docstring"),c(Ve,"id","transformers.SqueezeBertForQuestionAnswering"),c(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ve,"href","#transformers.SqueezeBertForQuestionAnswering"),c(Se,"class","relative group"),c(Fn,"href","https://arxiv.org/abs/2006.11316"),c(Fn,"rel","nofollow"),c(hs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Pn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Pn,"rel","nofollow"),c(ps,"href","/docs/transformers/v4.15.0/en/model_doc/squeezebert#transformers.SqueezeBertForQuestionAnswering"),c(J,"class","docstring"),c(F,"class","docstring")},m(t,u){e(document.head,h),p(t,T,u),p(t,q,u),e(q,w),e(w,y),g(k,y,null),e(q,v),e(q,j),e(j,ma),p(t,er,u),p(t,ce,u),e(ce,Ee),e(Ee,Bs),g(Xe,Bs,null),e(ce,ga),e(ce,$s),e($s,_a),p(t,tr,u),p(t,ne,u),e(ne,za),e(ne,Ye),e(Ye,ba),e(ne,qa),e(ne,Ze),e(Ze,va),e(ne,ka),p(t,nr,u),p(t,Wn,u),e(Wn,wa),p(t,sr,u),p(t,On,u),e(On,Es),e(Es,Ta),p(t,or,u),p(t,Kn,u),e(Kn,ya),p(t,rr,u),p(t,se,u),e(se,Ms),e(Ms,Sa),e(se,Ba),e(se,Fs),e(Fs,$a),e(se,Ea),e(se,et),e(et,Ma),e(et,js),e(js,Fa),e(et,ja),p(t,ar,u),p(t,Me,u),e(Me,Ca),e(Me,tt),e(tt,Pa),e(Me,xa),p(t,ir,u),p(t,ue,u),e(ue,Fe),e(Fe,Cs),g(nt,Cs,null),e(ue,Aa),e(ue,Ps),e(Ps,La),p(t,lr,u),p(t,O,u),g(st,O,null),e(O,Na),e(O,ot),e(ot,Ia),e(ot,Hn),e(Hn,Da),e(ot,Ra),e(O,Wa),e(O,he),e(he,Oa),e(he,Qn),e(Qn,Ka),e(he,Ha),e(he,Vn),e(Vn,Qa),e(he,Va),e(O,Ua),e(O,xs),e(xs,Ga),e(O,Ja),g(rt,O,null),e(O,Xa),e(O,As),e(As,Ya),p(t,dr,u),p(t,pe,u),e(pe,je),e(je,Ls),g(at,Ls,null),e(pe,Za),e(pe,Ns),e(Ns,ei),p(t,cr,u),p(t,I,u),g(it,I,null),e(I,ti),e(I,Is),e(Is,ni),e(I,si),e(I,Ce),e(Ce,Un),e(Un,oi),e(Ce,ri),e(Ce,Gn),e(Gn,ai),e(Ce,ii),e(I,li),e(I,lt),e(lt,di),e(lt,Jn),e(Jn,ci),e(lt,ui),e(I,hi),e(I,oe),g(dt,oe,null),e(oe,pi),e(oe,Ds),e(Ds,fi),e(oe,mi),e(oe,ct),e(ct,Xn),e(Xn,gi),e(Xn,Rs),e(Rs,_i),e(ct,zi),e(ct,Yn),e(Yn,bi),e(Yn,Ws),e(Ws,qi),e(I,vi),e(I,Pe),g(ut,Pe,null),e(Pe,ki),e(Pe,ht),e(ht,wi),e(ht,Os),e(Os,Ti),e(ht,yi),e(I,Si),e(I,Z),g(pt,Z,null),e(Z,Bi),e(Z,Ks),e(Ks,$i),e(Z,Ei),g(ft,Z,null),e(Z,Mi),e(Z,fe),e(fe,Fi),e(fe,Hs),e(Hs,ji),e(fe,Ci),e(fe,Qs),e(Qs,Pi),e(fe,xi),e(I,Ai),e(I,Vs),p(t,ur,u),p(t,me,u),e(me,xe),e(xe,Us),g(mt,Us,null),e(me,Li),e(me,Gs),e(Gs,Ni),p(t,hr,u),p(t,Y,u),g(gt,Y,null),e(Y,Ii),e(Y,_t),e(_t,Di),e(_t,Js),e(Js,Ri),e(_t,Wi),e(Y,Oi),e(Y,Ae),e(Ae,Zn),e(Zn,Ki),e(Ae,Hi),e(Ae,es),e(es,Qi),e(Ae,Vi),e(Y,Ui),e(Y,zt),e(zt,Gi),e(zt,ts),e(ts,Ji),e(zt,Xi),p(t,pr,u),p(t,ge,u),e(ge,Le),e(Le,Xs),g(bt,Xs,null),e(ge,Yi),e(ge,Ys),e(Ys,Zi),p(t,fr,u),p(t,S,u),g(qt,S,null),e(S,el),e(S,Zs),e(Zs,tl),e(S,nl),e(S,vt),e(vt,sl),e(vt,kt),e(kt,ol),e(vt,rl),e(S,al),e(S,wt),e(wt,il),e(wt,ns),e(ns,ll),e(wt,dl),e(S,cl),e(S,Tt),e(Tt,ul),e(Tt,yt),e(yt,hl),e(Tt,pl),e(S,fl),e(S,St),e(St,ml),e(St,eo),e(eo,gl),e(St,_l),e(S,zl),e(S,to),e(to,bl),e(S,ql),g(Bt,S,null),e(S,vl),e(S,no),e(no,kl),e(S,wl),g($t,S,null),e(S,Tl),e(S,Q),g(Et,Q,null),e(Q,yl),e(Q,_e),e(_e,Sl),e(_e,ss),e(ss,Bl),e(_e,$l),e(_e,so),e(so,El),e(_e,Ml),e(Q,Fl),g(Ne,Q,null),e(Q,jl),e(Q,oo),e(oo,Cl),e(Q,Pl),g(Mt,Q,null),p(t,mr,u),p(t,ze,u),e(ze,Ie),e(Ie,ro),g(Ft,ro,null),e(ze,xl),e(ze,ao),e(ao,Al),p(t,gr,u),p(t,B,u),g(jt,B,null),e(B,Ll),e(B,Ct),e(Ct,Nl),e(Ct,io),e(io,Il),e(Ct,Dl),e(B,Rl),e(B,Pt),e(Pt,Wl),e(Pt,xt),e(xt,Ol),e(Pt,Kl),e(B,Hl),e(B,At),e(At,Ql),e(At,os),e(os,Vl),e(At,Ul),e(B,Gl),e(B,Lt),e(Lt,Jl),e(Lt,Nt),e(Nt,Xl),e(Lt,Yl),e(B,Zl),e(B,It),e(It,ed),e(It,lo),e(lo,td),e(It,nd),e(B,sd),e(B,co),e(co,od),e(B,rd),g(Dt,B,null),e(B,ad),e(B,uo),e(uo,id),e(B,ld),g(Rt,B,null),e(B,dd),e(B,V),g(Wt,V,null),e(V,cd),e(V,be),e(be,ud),e(be,rs),e(rs,hd),e(be,pd),e(be,ho),e(ho,fd),e(be,md),e(V,gd),g(De,V,null),e(V,_d),e(V,po),e(po,zd),e(V,bd),g(Ot,V,null),p(t,_r,u),p(t,qe,u),e(qe,Re),e(Re,fo),g(Kt,fo,null),e(qe,qd),e(qe,mo),e(mo,vd),p(t,zr,u),p(t,$,u),g(Ht,$,null),e($,kd),e($,go),e(go,wd),e($,Td),e($,Qt),e(Qt,yd),e(Qt,Vt),e(Vt,Sd),e(Qt,Bd),e($,$d),e($,Ut),e(Ut,Ed),e(Ut,as),e(as,Md),e(Ut,Fd),e($,jd),e($,Gt),e(Gt,Cd),e(Gt,Jt),e(Jt,Pd),e(Gt,xd),e($,Ad),e($,Xt),e(Xt,Ld),e(Xt,_o),e(_o,Nd),e(Xt,Id),e($,Dd),e($,zo),e(zo,Rd),e($,Wd),g(Yt,$,null),e($,Od),e($,bo),e(bo,Kd),e($,Hd),g(Zt,$,null),e($,Qd),e($,R),g(en,R,null),e(R,Vd),e(R,ve),e(ve,Ud),e(ve,is),e(is,Gd),e(ve,Jd),e(ve,qo),e(qo,Xd),e(ve,Yd),e(R,Zd),g(We,R,null),e(R,ec),e(R,vo),e(vo,tc),e(R,nc),g(tn,R,null),e(R,sc),e(R,ko),e(ko,oc),e(R,rc),g(nn,R,null),p(t,br,u),p(t,ke,u),e(ke,Oe),e(Oe,wo),g(sn,wo,null),e(ke,ac),e(ke,To),e(To,ic),p(t,qr,u),p(t,E,u),g(on,E,null),e(E,lc),e(E,yo),e(yo,dc),e(E,cc),e(E,rn),e(rn,uc),e(rn,an),e(an,hc),e(rn,pc),e(E,fc),e(E,ln),e(ln,mc),e(ln,ls),e(ls,gc),e(ln,_c),e(E,zc),e(E,dn),e(dn,bc),e(dn,cn),e(cn,qc),e(dn,vc),e(E,kc),e(E,un),e(un,wc),e(un,So),e(So,Tc),e(un,yc),e(E,Sc),e(E,Bo),e(Bo,Bc),e(E,$c),g(hn,E,null),e(E,Ec),e(E,$o),e($o,Mc),e(E,Fc),g(pn,E,null),e(E,jc),e(E,U),g(fn,U,null),e(U,Cc),e(U,we),e(we,Pc),e(we,ds),e(ds,xc),e(we,Ac),e(we,Eo),e(Eo,Lc),e(we,Nc),e(U,Ic),g(Ke,U,null),e(U,Dc),e(U,Mo),e(Mo,Rc),e(U,Wc),g(mn,U,null),p(t,vr,u),p(t,Te,u),e(Te,He),e(He,Fo),g(gn,Fo,null),e(Te,Oc),e(Te,jo),e(jo,Kc),p(t,kr,u),p(t,M,u),g(_n,M,null),e(M,Hc),e(M,Co),e(Co,Qc),e(M,Vc),e(M,zn),e(zn,Uc),e(zn,bn),e(bn,Gc),e(zn,Jc),e(M,Xc),e(M,qn),e(qn,Yc),e(qn,cs),e(cs,Zc),e(qn,eu),e(M,tu),e(M,vn),e(vn,nu),e(vn,kn),e(kn,su),e(vn,ou),e(M,ru),e(M,wn),e(wn,au),e(wn,Po),e(Po,iu),e(wn,lu),e(M,du),e(M,xo),e(xo,cu),e(M,uu),g(Tn,M,null),e(M,hu),e(M,Ao),e(Ao,pu),e(M,fu),g(yn,M,null),e(M,mu),e(M,G),g(Sn,G,null),e(G,gu),e(G,ye),e(ye,_u),e(ye,us),e(us,zu),e(ye,bu),e(ye,Lo),e(Lo,qu),e(ye,vu),e(G,ku),g(Qe,G,null),e(G,wu),e(G,No),e(No,Tu),e(G,yu),g(Bn,G,null),p(t,wr,u),p(t,Se,u),e(Se,Ve),e(Ve,Io),g($n,Io,null),e(Se,Su),e(Se,Do),e(Do,Bu),p(t,Tr,u),p(t,F,u),g(En,F,null),e(F,$u),e(F,Be),e(Be,Eu),e(Be,Ro),e(Ro,Mu),e(Be,Fu),e(Be,Wo),e(Wo,ju),e(Be,Cu),e(F,Pu),e(F,Mn),e(Mn,xu),e(Mn,Fn),e(Fn,Au),e(Mn,Lu),e(F,Nu),e(F,jn),e(jn,Iu),e(jn,hs),e(hs,Du),e(jn,Ru),e(F,Wu),e(F,Cn),e(Cn,Ou),e(Cn,Pn),e(Pn,Ku),e(Cn,Hu),e(F,Qu),e(F,xn),e(xn,Vu),e(xn,Oo),e(Oo,Uu),e(xn,Gu),e(F,Ju),e(F,Ko),e(Ko,Xu),e(F,Yu),g(An,F,null),e(F,Zu),e(F,Ho),e(Ho,eh),e(F,th),g(Ln,F,null),e(F,nh),e(F,J),g(Nn,J,null),e(J,sh),e(J,$e),e($e,oh),e($e,ps),e(ps,rh),e($e,ah),e($e,Qo),e(Qo,ih),e($e,lh),e(J,dh),g(Ue,J,null),e(J,ch),e(J,Vo),e(Vo,uh),e(J,hh),g(In,J,null),yr=!0},p(t,[u]){const Dn={};u&2&&(Dn.$$scope={dirty:u,ctx:t}),Ne.$set(Dn);const Uo={};u&2&&(Uo.$$scope={dirty:u,ctx:t}),De.$set(Uo);const Go={};u&2&&(Go.$$scope={dirty:u,ctx:t}),We.$set(Go);const Jo={};u&2&&(Jo.$$scope={dirty:u,ctx:t}),Ke.$set(Jo);const Rn={};u&2&&(Rn.$$scope={dirty:u,ctx:t}),Qe.$set(Rn);const Xo={};u&2&&(Xo.$$scope={dirty:u,ctx:t}),Ue.$set(Xo)},i(t){yr||(_(k.$$.fragment,t),_(Xe.$$.fragment,t),_(nt.$$.fragment,t),_(st.$$.fragment,t),_(rt.$$.fragment,t),_(at.$$.fragment,t),_(it.$$.fragment,t),_(dt.$$.fragment,t),_(ut.$$.fragment,t),_(pt.$$.fragment,t),_(ft.$$.fragment,t),_(mt.$$.fragment,t),_(gt.$$.fragment,t),_(bt.$$.fragment,t),_(qt.$$.fragment,t),_(Bt.$$.fragment,t),_($t.$$.fragment,t),_(Et.$$.fragment,t),_(Ne.$$.fragment,t),_(Mt.$$.fragment,t),_(Ft.$$.fragment,t),_(jt.$$.fragment,t),_(Dt.$$.fragment,t),_(Rt.$$.fragment,t),_(Wt.$$.fragment,t),_(De.$$.fragment,t),_(Ot.$$.fragment,t),_(Kt.$$.fragment,t),_(Ht.$$.fragment,t),_(Yt.$$.fragment,t),_(Zt.$$.fragment,t),_(en.$$.fragment,t),_(We.$$.fragment,t),_(tn.$$.fragment,t),_(nn.$$.fragment,t),_(sn.$$.fragment,t),_(on.$$.fragment,t),_(hn.$$.fragment,t),_(pn.$$.fragment,t),_(fn.$$.fragment,t),_(Ke.$$.fragment,t),_(mn.$$.fragment,t),_(gn.$$.fragment,t),_(_n.$$.fragment,t),_(Tn.$$.fragment,t),_(yn.$$.fragment,t),_(Sn.$$.fragment,t),_(Qe.$$.fragment,t),_(Bn.$$.fragment,t),_($n.$$.fragment,t),_(En.$$.fragment,t),_(An.$$.fragment,t),_(Ln.$$.fragment,t),_(Nn.$$.fragment,t),_(Ue.$$.fragment,t),_(In.$$.fragment,t),yr=!0)},o(t){z(k.$$.fragment,t),z(Xe.$$.fragment,t),z(nt.$$.fragment,t),z(st.$$.fragment,t),z(rt.$$.fragment,t),z(at.$$.fragment,t),z(it.$$.fragment,t),z(dt.$$.fragment,t),z(ut.$$.fragment,t),z(pt.$$.fragment,t),z(ft.$$.fragment,t),z(mt.$$.fragment,t),z(gt.$$.fragment,t),z(bt.$$.fragment,t),z(qt.$$.fragment,t),z(Bt.$$.fragment,t),z($t.$$.fragment,t),z(Et.$$.fragment,t),z(Ne.$$.fragment,t),z(Mt.$$.fragment,t),z(Ft.$$.fragment,t),z(jt.$$.fragment,t),z(Dt.$$.fragment,t),z(Rt.$$.fragment,t),z(Wt.$$.fragment,t),z(De.$$.fragment,t),z(Ot.$$.fragment,t),z(Kt.$$.fragment,t),z(Ht.$$.fragment,t),z(Yt.$$.fragment,t),z(Zt.$$.fragment,t),z(en.$$.fragment,t),z(We.$$.fragment,t),z(tn.$$.fragment,t),z(nn.$$.fragment,t),z(sn.$$.fragment,t),z(on.$$.fragment,t),z(hn.$$.fragment,t),z(pn.$$.fragment,t),z(fn.$$.fragment,t),z(Ke.$$.fragment,t),z(mn.$$.fragment,t),z(gn.$$.fragment,t),z(_n.$$.fragment,t),z(Tn.$$.fragment,t),z(yn.$$.fragment,t),z(Sn.$$.fragment,t),z(Qe.$$.fragment,t),z(Bn.$$.fragment,t),z($n.$$.fragment,t),z(En.$$.fragment,t),z(An.$$.fragment,t),z(Ln.$$.fragment,t),z(Nn.$$.fragment,t),z(Ue.$$.fragment,t),z(In.$$.fragment,t),yr=!1},d(t){n(h),t&&n(T),t&&n(q),b(k),t&&n(er),t&&n(ce),b(Xe),t&&n(tr),t&&n(ne),t&&n(nr),t&&n(Wn),t&&n(sr),t&&n(On),t&&n(or),t&&n(Kn),t&&n(rr),t&&n(se),t&&n(ar),t&&n(Me),t&&n(ir),t&&n(ue),b(nt),t&&n(lr),t&&n(O),b(st),b(rt),t&&n(dr),t&&n(pe),b(at),t&&n(cr),t&&n(I),b(it),b(dt),b(ut),b(pt),b(ft),t&&n(ur),t&&n(me),b(mt),t&&n(hr),t&&n(Y),b(gt),t&&n(pr),t&&n(ge),b(bt),t&&n(fr),t&&n(S),b(qt),b(Bt),b($t),b(Et),b(Ne),b(Mt),t&&n(mr),t&&n(ze),b(Ft),t&&n(gr),t&&n(B),b(jt),b(Dt),b(Rt),b(Wt),b(De),b(Ot),t&&n(_r),t&&n(qe),b(Kt),t&&n(zr),t&&n($),b(Ht),b(Yt),b(Zt),b(en),b(We),b(tn),b(nn),t&&n(br),t&&n(ke),b(sn),t&&n(qr),t&&n(E),b(on),b(hn),b(pn),b(fn),b(Ke),b(mn),t&&n(vr),t&&n(Te),b(gn),t&&n(kr),t&&n(M),b(_n),b(Tn),b(yn),b(Sn),b(Qe),b(Bn),t&&n(wr),t&&n(Se),b($n),t&&n(Tr),t&&n(F),b(En),b(An),b(Ln),b(Nn),b(Ue),b(In)}}}const Df={local:"squeezebert",sections:[{local:"overview",title:"Overview"},{local:"transformers.SqueezeBertConfig",title:"SqueezeBertConfig"},{local:"transformers.SqueezeBertTokenizer",title:"SqueezeBertTokenizer"},{local:"transformers.SqueezeBertTokenizerFast",title:"SqueezeBertTokenizerFast"},{local:"transformers.SqueezeBertModel",title:"SqueezeBertModel"},{local:"transformers.SqueezeBertForMaskedLM",title:"SqueezeBertForMaskedLM"},{local:"transformers.SqueezeBertForSequenceClassification",title:"SqueezeBertForSequenceClassification"},{local:"transformers.SqueezeBertForMultipleChoice",title:"SqueezeBertForMultipleChoice"},{local:"transformers.SqueezeBertForTokenClassification",title:"SqueezeBertForTokenClassification"},{local:"transformers.SqueezeBertForQuestionAnswering",title:"SqueezeBertForQuestionAnswering"}],title:"SqueezeBERT"};function Rf(H,h,T){let{fw:q}=h;return H.$$set=w=>{"fw"in w&&T(0,q=w.fw)},[q]}class Uf extends Ef{constructor(h){super();Mf(this,h,Rf,If,Ff,{fw:0})}}export{Uf as default,Df as metadata};
9,913
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/megatron_bert.mdx-63921c48.js
import{S as Vm,i as Jm,s as Km,e as s,k as d,w as _,t as n,L as Xm,c as a,d as o,m as l,a as i,x as v,h as r,b as c,J as e,g as m,y as b,q as k,o as M,B as w}from"../../chunks/vendor-b1433968.js";import{T as He}from"../../chunks/Tip-c3840994.js";import{D as P}from"../../chunks/Docstring-ff504c58.js";import{C as E}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Z}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ym(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function Zm(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function eu(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function tu(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function ou(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function nu(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function ru(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function su(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function au(z){let p,y,u,T,$;return{c(){p=s("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=a(g,"P",{});var f=i(p);y=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(f,"CODE",{});var B=i(u);T=r(B,"Module"),B.forEach(o),$=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,p,f),e(p,y),e(p,u),e(u,T),e(p,$)},d(g){g&&o(p)}}}function iu(z){let p,y,u,T,$,g,f,B,Ma,ts,ke,Ue,Dn,mt,wa,Wn,Ta,os,Ge,ya,ut,$a,Ba,ns,Ko,za,rs,Xo,Rn,Fa,ss,Yo,Pa,as,Qe,xa,ft,Ea,Ca,is,ee,qa,gt,ja,Aa,_t,Na,Sa,ds,Zo,La,ls,en,Ia,cs,vt,hs,bt,ps,tn,Oa,ms,kt,us,Mt,fs,on,Da,gs,te,Wa,Hn,Ra,Ha,Un,Ua,Ga,_s,wt,vs,Tt,bs,yt,ks,oe,Qa,$t,Va,Ja,Bt,Ka,Xa,Ms,Me,Ve,Gn,zt,Ya,Qn,Za,ws,q,Ft,ei,we,ti,nn,oi,ni,Pt,ri,si,ai,Te,ii,rn,di,li,sn,ci,hi,pi,Vn,mi,ui,xt,Ts,ye,Je,Jn,Et,fi,Kn,gi,ys,F,Ct,_i,Xn,vi,bi,qt,ki,an,Mi,wi,Ti,jt,yi,At,$i,Bi,zi,Nt,Fi,St,Pi,xi,Ei,C,Ci,Yn,qi,ji,Zn,Ai,Ni,er,Si,Li,tr,Ii,Oi,or,Di,Wi,nr,Ri,Hi,Ui,W,Lt,Gi,$e,Qi,dn,Vi,Ji,rr,Ki,Xi,Yi,Ke,Zi,sr,ed,td,It,$s,Be,Xe,ar,Ot,od,ir,nd,Bs,j,Dt,rd,Wt,sd,dr,ad,id,dd,Rt,ld,ln,cd,hd,pd,Ht,md,Ut,ud,fd,gd,R,Gt,_d,ze,vd,cn,bd,kd,lr,Md,wd,Td,Ye,yd,cr,$d,Bd,Qt,zs,Fe,Ze,hr,Vt,zd,pr,Fd,Fs,A,Jt,Pd,Kt,xd,mr,Ed,Cd,qd,Xt,jd,hn,Ad,Nd,Sd,Yt,Ld,Zt,Id,Od,Dd,H,eo,Wd,Pe,Rd,pn,Hd,Ud,ur,Gd,Qd,Vd,et,Jd,fr,Kd,Xd,to,Ps,xe,tt,gr,oo,Yd,_r,Zd,xs,N,no,el,ro,tl,vr,ol,nl,rl,so,sl,mn,al,il,dl,ao,ll,io,cl,hl,pl,U,lo,ml,Ee,ul,un,fl,gl,br,_l,vl,bl,ot,kl,kr,Ml,wl,co,Es,Ce,nt,Mr,ho,Tl,wr,yl,Cs,S,po,$l,qe,Bl,Tr,zl,Fl,yr,Pl,xl,El,mo,Cl,fn,ql,jl,Al,uo,Nl,fo,Sl,Ll,Il,G,go,Ol,je,Dl,gn,Wl,Rl,$r,Hl,Ul,Gl,rt,Ql,Br,Vl,Jl,_o,qs,Ae,st,zr,vo,Kl,Fr,Xl,js,L,bo,Yl,Pr,Zl,ec,ko,tc,_n,oc,nc,rc,Mo,sc,wo,ac,ic,dc,x,To,lc,Ne,cc,vn,hc,pc,xr,mc,uc,fc,at,gc,Er,_c,vc,yo,bc,Cr,kc,Mc,$o,As,Se,it,qr,Bo,wc,jr,Tc,Ns,I,zo,yc,Ar,$c,Bc,Fo,zc,bn,Fc,Pc,xc,Po,Ec,xo,Cc,qc,jc,Q,Eo,Ac,Le,Nc,kn,Sc,Lc,Nr,Ic,Oc,Dc,dt,Wc,Sr,Rc,Hc,Co,Ss,Ie,lt,Lr,qo,Uc,Ir,Gc,Ls,O,jo,Qc,Or,Vc,Jc,Ao,Kc,Mn,Xc,Yc,Zc,No,eh,So,th,oh,nh,V,Lo,rh,Oe,sh,wn,ah,ih,Dr,dh,lh,ch,ct,hh,Wr,ph,mh,Io,Is,De,ht,Rr,Oo,uh,Hr,fh,Os,D,Do,gh,We,_h,Ur,vh,bh,Gr,kh,Mh,wh,Wo,Th,Tn,yh,$h,Bh,Ro,zh,Ho,Fh,Ph,xh,J,Uo,Eh,Re,Ch,yn,qh,jh,Qr,Ah,Nh,Sh,pt,Lh,Vr,Ih,Oh,Go,Ds;return g=new Z({}),mt=new Z({}),vt=new E({props:{code:",",highlighted:""}}),bt=new E({props:{code:`wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip -O megatron_bert_345m_v0_1_uncased.zip,`,highlighted:`wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip -O megatron_bert_345m_v0_1_uncased.zip`}}),kt=new E({props:{code:",",highlighted:""}}),Mt=new E({props:{code:`wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O megatron_bert_345m_v0_1_cased.zip,`,highlighted:`wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O megatron_bert_345m_v0_1_cased.zip`}}),wt=new E({props:{code:",",highlighted:""}}),Tt=new E({props:{code:"python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_uncased.zip,",highlighted:'python3 <span class="hljs-variable">$PATH_TO_TRANSFORMERS</span>/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_uncased.zip'}}),yt=new E({props:{code:"python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip,",highlighted:'python3 <span class="hljs-variable">$PATH_TO_TRANSFORMERS</span>/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip'}}),zt=new Z({}),Ft=new P({props:{name:"class transformers.MegatronBertConfig",anchor:"transformers.MegatronBertConfig",parameters:[{name:"vocab_size",val:" = 29056"},{name:"hidden_size",val:" = 1024"},{name:"num_hidden_layers",val:" = 24"},{name:"num_attention_heads",val:" = 16"},{name:"intermediate_size",val:" = 4096"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"use_cache",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/configuration_megatron_bert.py#L28",parametersDescription:[{anchor:"transformers.MegatronBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 29056) &#x2014; Vocabulary size of the MEGATRON_BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertModel">MegatronBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.MegatronBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.MegatronBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.MegatronBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.MegatronBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.MegatronBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.MegatronBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.MegatronBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.MegatronBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.MegatronBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertModel">MegatronBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.MegatronBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.MegatronBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.MegatronBertConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.MegatronBertConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"}]}}),xt=new E({props:{code:`from transformers import MegatronBertModel, MegatronBertConfig # Initializing a MEGATRON_BERT bert-base-uncased style configuration configuration = MegatronBertConfig() # Initializing a model from the bert-base-uncased style configuration model = MegatronBertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MegatronBertModel, MegatronBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MEGATRON_BERT bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MegatronBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Et=new Z({}),Ct=new P({props:{name:"class transformers.MegatronBertModel",anchor:"transformers.MegatronBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L835",parametersDescription:[{anchor:"transformers.MegatronBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Lt=new P({props:{name:"forward",anchor:"transformers.MegatronBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L875",parametersDescription:[{anchor:"transformers.MegatronBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.MegatronBertModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.MegatronBertModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MegatronBertModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ke=new He({props:{$$slots:{default:[Ym]},$$scope:{ctx:z}}}),It=new E({props:{code:`from transformers import BertTokenizer, MegatronBertModel import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertModel.from_pretrained('nvidia/megatron-bert-cased-345m') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertModel.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ot=new Z({}),Dt=new P({props:{name:"class transformers.MegatronBertForMaskedLM",anchor:"transformers.MegatronBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1262",parametersDescription:[{anchor:"transformers.MegatronBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gt=new P({props:{name:"forward",anchor:"transformers.MegatronBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1288",parametersDescription:[{anchor:"transformers.MegatronBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ye=new He({props:{$$slots:{default:[Zm]},$$scope:{ctx:z}}}),Qt=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForMaskedLM import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForMaskedLM.from_pretrained('nvidia/megatron-bert-cased-345m') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Vt=new Z({}),Jt=new P({props:{name:"class transformers.MegatronBertForCausalLM",anchor:"transformers.MegatronBertForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1116",parametersDescription:[{anchor:"transformers.MegatronBertForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),eo=new P({props:{name:"forward",anchor:"transformers.MegatronBertForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1139",parametersDescription:[{anchor:"transformers.MegatronBertForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.MegatronBertForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.MegatronBertForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.MegatronBertForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MegatronBertForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),et=new He({props:{$$slots:{default:[eu]},$$scope:{ctx:z}}}),to=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForCausalLM, MegatronBertConfig import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForCausalLM.from_pretrained('nvidia/megatron-bert-cased-345m', is_decoder=True) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForCausalLM, MegatronBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForCausalLM.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>, is_decoder=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),oo=new Z({}),no=new P({props:{name:"class transformers.MegatronBertForNextSentencePrediction",anchor:"transformers.MegatronBertForNextSentencePrediction",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1370",parametersDescription:[{anchor:"transformers.MegatronBertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lo=new P({props:{name:"forward",anchor:"transformers.MegatronBertForNextSentencePrediction.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1383",parametersDescription:[{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForNextSentencePrediction.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring). Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) \u2014 Next sequence prediction (classification) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ot=new He({props:{$$slots:{default:[tu]},$$scope:{ctx:z}}}),co=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForNextSentencePrediction import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForNextSentencePrediction.from_pretrained('nvidia/megatron-bert-cased-345m') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='pt') outputs = model(**encoding, labels=torch.LongTensor([1])) logits = outputs.logits assert logits[0, 0] < logits[0, 1] # next sentence was random,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=torch.LongTensor([<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># next sentence was random</span>`}}),ho=new Z({}),po=new P({props:{name:"class transformers.MegatronBertForPreTraining",anchor:"transformers.MegatronBertForPreTraining",parameters:[{name:"config",val:""},{name:"add_binary_head",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1013",parametersDescription:[{anchor:"transformers.MegatronBertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),go=new P({props:{name:"forward",anchor:"transformers.MegatronBertForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"next_sentence_label",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1029",parametersDescription:[{anchor:"transformers.MegatronBertForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.MegatronBertForPreTraining.forward.next_sentence_label",description:`<strong>next_sentence_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"next_sentence_label"},{anchor:"transformers.MegatronBertForPreTraining.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.megatron_bert.modeling_megatron_bert.MegatronBertForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</p> </li> <li> <p><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.megatron_bert.modeling_megatron_bert.MegatronBertForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),rt=new He({props:{$$slots:{default:[ou]},$$scope:{ctx:z}}}),_o=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForPreTraining import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForPreTraining.from_pretrained('nvidia/megatron-bert-cased-345m') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),vo=new Z({}),bo=new P({props:{name:"class transformers.MegatronBertForSequenceClassification",anchor:"transformers.MegatronBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1476",parametersDescription:[{anchor:"transformers.MegatronBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),To=new P({props:{name:"forward",anchor:"transformers.MegatronBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1488",parametersDescription:[{anchor:"transformers.MegatronBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),at=new He({props:{$$slots:{default:[nu]},$$scope:{ctx:z}}}),yo=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForSequenceClassification.from_pretrained('nvidia/megatron-bert-cased-345m') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$o=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForSequenceClassification.from_pretrained('nvidia/megatron-bert-cased-345m', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Bo=new Z({}),zo=new P({props:{name:"class transformers.MegatronBertForMultipleChoice",anchor:"transformers.MegatronBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1573",parametersDescription:[{anchor:"transformers.MegatronBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Eo=new P({props:{name:"forward",anchor:"transformers.MegatronBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1584",parametersDescription:[{anchor:"transformers.MegatronBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),dt=new He({props:{$$slots:{default:[ru]},$$scope:{ctx:z}}}),Co=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForMultipleChoice import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForMultipleChoice.from_pretrained('nvidia/megatron-bert-cased-345m') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qo=new Z({}),jo=new P({props:{name:"class transformers.MegatronBertForTokenClassification",anchor:"transformers.MegatronBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1666",parametersDescription:[{anchor:"transformers.MegatronBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Lo=new P({props:{name:"forward",anchor:"transformers.MegatronBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1681",parametersDescription:[{anchor:"transformers.MegatronBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ct=new He({props:{$$slots:{default:[su]},$$scope:{ctx:z}}}),Io=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForTokenClassification import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForTokenClassification.from_pretrained('nvidia/megatron-bert-cased-345m') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Oo=new Z({}),Do=new P({props:{name:"class transformers.MegatronBertForQuestionAnswering",anchor:"transformers.MegatronBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1757",parametersDescription:[{anchor:"transformers.MegatronBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig">MegatronBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Uo=new P({props:{name:"forward",anchor:"transformers.MegatronBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/megatron_bert/modeling_megatron_bert.py#L1771",parametersDescription:[{anchor:"transformers.MegatronBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.MegatronBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertConfig" >MegatronBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),pt=new He({props:{$$slots:{default:[au]},$$scope:{ctx:z}}}),Go=new E({props:{code:`from transformers import BertTokenizer, MegatronBertForQuestionAnswering import torch tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m') model = MegatronBertForQuestionAnswering.from_pretrained('nvidia/megatron-bert-cased-345m') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, MegatronBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MegatronBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;nvidia/megatron-bert-cased-345m&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){p=s("meta"),y=d(),u=s("h1"),T=s("a"),$=s("span"),_(g.$$.fragment),f=d(),B=s("span"),Ma=n("MegatronBERT"),ts=d(),ke=s("h2"),Ue=s("a"),Dn=s("span"),_(mt.$$.fragment),wa=d(),Wn=s("span"),Ta=n("Overview"),os=d(),Ge=s("p"),ya=n("The MegatronBERT model was proposed in "),ut=s("a"),$a=n(`Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism`),Ba=n(` by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.`),ns=d(),Ko=s("p"),za=n("The abstract from the paper is the following:"),rs=d(),Xo=s("p"),Rn=s("em"),Fa=n(`Recent work in language modeling demonstrates that training large transformer models advances the state of the art in Natural Language Processing applications. However, very large models can be quite difficult to train due to memory constraints. In this work, we present our techniques for training very large transformer models and implement a simple, efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain 15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9 billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).`),ss=d(),Yo=s("p"),Pa=n("Tips:"),as=d(),Qe=s("p"),xa=n("We have provided pretrained "),ft=s("a"),Ea=n("BERT-345M"),Ca=n(` checkpoints for use to evaluate or finetuning downstream tasks.`),is=d(),ee=s("p"),qa=n("To access these checkpoints, first "),gt=s("a"),ja=n("sign up"),Aa=n(` for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the `),_t=s("a"),Na=n("NGC documentation"),Sa=n("."),ds=d(),Zo=s("p"),La=n("Alternatively, you can directly download the checkpoints using:"),ls=d(),en=s("p"),Ia=n("BERT-345M-uncased:"),cs=d(),_(vt.$$.fragment),hs=d(),_(bt.$$.fragment),ps=d(),tn=s("p"),Oa=n("BERT-345M-cased:"),ms=d(),_(kt.$$.fragment),us=d(),_(Mt.$$.fragment),fs=d(),on=s("p"),Da=n(`Once you have obtained the checkpoints from NVIDIA GPU Cloud (NGC), you have to convert them to a format that will easily be loaded by Hugging Face Transformers and our port of the BERT code.`),gs=d(),te=s("p"),Wa=n("The following commands allow you to do the conversion. We assume that the folder "),Hn=s("code"),Ra=n("models/megatron_bert"),Ha=n(` contains `),Un=s("code"),Ua=n("megatron_bert_345m_v0_1_{cased, uncased}.zip"),Ga=n(" and that the commands are run from inside that folder:"),_s=d(),_(wt.$$.fragment),vs=d(),_(Tt.$$.fragment),bs=d(),_(yt.$$.fragment),ks=d(),oe=s("p"),Qa=n("This model was contributed by "),$t=s("a"),Va=n("jdemouth"),Ja=n(". The original code can be found "),Bt=s("a"),Ka=n("here"),Xa=n(`. That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it contains a hybrid model parallel approach using \u201Ctensor parallel\u201D and \u201Cpipeline parallel\u201D techniques.`),Ms=d(),Me=s("h2"),Ve=s("a"),Gn=s("span"),_(zt.$$.fragment),Ya=d(),Qn=s("span"),Za=n("MegatronBertConfig"),ws=d(),q=s("div"),_(Ft.$$.fragment),ei=d(),we=s("p"),ti=n("This is the configuration class to store the configuration of a "),nn=s("a"),oi=n("MegatronBertModel"),ni=n(`. It is used to instantiate a MEGATRON_BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MEGATRON_BERT `),Pt=s("a"),ri=n("megatron-bert-uncased-345m"),si=n(" architecture."),ai=d(),Te=s("p"),ii=n("Configuration objects inherit from "),rn=s("a"),di=n("PretrainedConfig"),li=n(` and can be used to control the model outputs. Read the documentation from `),sn=s("a"),ci=n("PretrainedConfig"),hi=n(" for more information."),pi=d(),Vn=s("p"),mi=n("Examples:"),ui=d(),_(xt.$$.fragment),Ts=d(),ye=s("h2"),Je=s("a"),Jn=s("span"),_(Et.$$.fragment),fi=d(),Kn=s("span"),gi=n("MegatronBertModel"),ys=d(),F=s("div"),_(Ct.$$.fragment),_i=d(),Xn=s("p"),vi=n("The bare MegatronBert Model transformer outputting raw hidden-states without any specific head on top."),bi=d(),qt=s("p"),ki=n("This model inherits from "),an=s("a"),Mi=n("PreTrainedModel"),wi=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ti=d(),jt=s("p"),yi=n("This model is also a PyTorch "),At=s("a"),$i=n("torch.nn.Module"),Bi=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zi=d(),Nt=s("p"),Fi=n(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),St=s("a"),Pi=n(`Attention is all you need`),xi=n(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Ei=d(),C=s("p"),Ci=n("To behave as an decoder the model needs to be initialized with the "),Yn=s("code"),qi=n("is_decoder"),ji=n(` argument of the configuration set to `),Zn=s("code"),Ai=n("True"),Ni=n(". To be used in a Seq2Seq model, the model needs to initialized with both "),er=s("code"),Si=n("is_decoder"),Li=n(` argument and `),tr=s("code"),Ii=n("add_cross_attention"),Oi=n(" set to "),or=s("code"),Di=n("True"),Wi=n("; an "),nr=s("code"),Ri=n("encoder_hidden_states"),Hi=n(` is then expected as an input to the forward pass.`),Ui=d(),W=s("div"),_(Lt.$$.fragment),Gi=d(),$e=s("p"),Qi=n("The "),dn=s("a"),Vi=n("MegatronBertModel"),Ji=n(" forward method, overrides the "),rr=s("code"),Ki=n("__call__"),Xi=n(" special method."),Yi=d(),_(Ke.$$.fragment),Zi=d(),sr=s("p"),ed=n("Example:"),td=d(),_(It.$$.fragment),$s=d(),Be=s("h2"),Xe=s("a"),ar=s("span"),_(Ot.$$.fragment),od=d(),ir=s("span"),nd=n("MegatronBertForMaskedLM"),Bs=d(),j=s("div"),_(Dt.$$.fragment),rd=d(),Wt=s("p"),sd=n("MegatronBert Model with a "),dr=s("code"),ad=n("language modeling"),id=n(" head on top."),dd=d(),Rt=s("p"),ld=n("This model inherits from "),ln=s("a"),cd=n("PreTrainedModel"),hd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pd=d(),Ht=s("p"),md=n("This model is also a PyTorch "),Ut=s("a"),ud=n("torch.nn.Module"),fd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gd=d(),R=s("div"),_(Gt.$$.fragment),_d=d(),ze=s("p"),vd=n("The "),cn=s("a"),bd=n("MegatronBertForMaskedLM"),kd=n(" forward method, overrides the "),lr=s("code"),Md=n("__call__"),wd=n(" special method."),Td=d(),_(Ye.$$.fragment),yd=d(),cr=s("p"),$d=n("Example:"),Bd=d(),_(Qt.$$.fragment),zs=d(),Fe=s("h2"),Ze=s("a"),hr=s("span"),_(Vt.$$.fragment),zd=d(),pr=s("span"),Fd=n("MegatronBertForCausalLM"),Fs=d(),A=s("div"),_(Jt.$$.fragment),Pd=d(),Kt=s("p"),xd=n("MegatronBert Model with a "),mr=s("code"),Ed=n("language modeling"),Cd=n(" head on top for CLM fine-tuning."),qd=d(),Xt=s("p"),jd=n("This model inherits from "),hn=s("a"),Ad=n("PreTrainedModel"),Nd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sd=d(),Yt=s("p"),Ld=n("This model is also a PyTorch "),Zt=s("a"),Id=n("torch.nn.Module"),Od=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dd=d(),H=s("div"),_(eo.$$.fragment),Wd=d(),Pe=s("p"),Rd=n("The "),pn=s("a"),Hd=n("MegatronBertForCausalLM"),Ud=n(" forward method, overrides the "),ur=s("code"),Gd=n("__call__"),Qd=n(" special method."),Vd=d(),_(et.$$.fragment),Jd=d(),fr=s("p"),Kd=n("Example:"),Xd=d(),_(to.$$.fragment),Ps=d(),xe=s("h2"),tt=s("a"),gr=s("span"),_(oo.$$.fragment),Yd=d(),_r=s("span"),Zd=n("MegatronBertForNextSentencePrediction"),xs=d(),N=s("div"),_(no.$$.fragment),el=d(),ro=s("p"),tl=n("MegatronBert Model with a "),vr=s("code"),ol=n("next sentence prediction (classification)"),nl=n(" head on top."),rl=d(),so=s("p"),sl=n("This model inherits from "),mn=s("a"),al=n("PreTrainedModel"),il=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dl=d(),ao=s("p"),ll=n("This model is also a PyTorch "),io=s("a"),cl=n("torch.nn.Module"),hl=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pl=d(),U=s("div"),_(lo.$$.fragment),ml=d(),Ee=s("p"),ul=n("The "),un=s("a"),fl=n("MegatronBertForNextSentencePrediction"),gl=n(" forward method, overrides the "),br=s("code"),_l=n("__call__"),vl=n(" special method."),bl=d(),_(ot.$$.fragment),kl=d(),kr=s("p"),Ml=n("Example:"),wl=d(),_(co.$$.fragment),Es=d(),Ce=s("h2"),nt=s("a"),Mr=s("span"),_(ho.$$.fragment),Tl=d(),wr=s("span"),yl=n("MegatronBertForPreTraining"),Cs=d(),S=s("div"),_(po.$$.fragment),$l=d(),qe=s("p"),Bl=n("MegatronBert Model with two heads on top as done during the pretraining: a "),Tr=s("code"),zl=n("masked language modeling"),Fl=n(` head and a `),yr=s("code"),Pl=n("next sentence prediction (classification)"),xl=n(" head."),El=d(),mo=s("p"),Cl=n("This model inherits from "),fn=s("a"),ql=n("PreTrainedModel"),jl=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Al=d(),uo=s("p"),Nl=n("This model is also a PyTorch "),fo=s("a"),Sl=n("torch.nn.Module"),Ll=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Il=d(),G=s("div"),_(go.$$.fragment),Ol=d(),je=s("p"),Dl=n("The "),gn=s("a"),Wl=n("MegatronBertForPreTraining"),Rl=n(" forward method, overrides the "),$r=s("code"),Hl=n("__call__"),Ul=n(" special method."),Gl=d(),_(rt.$$.fragment),Ql=d(),Br=s("p"),Vl=n("Example:"),Jl=d(),_(_o.$$.fragment),qs=d(),Ae=s("h2"),st=s("a"),zr=s("span"),_(vo.$$.fragment),Kl=d(),Fr=s("span"),Xl=n("MegatronBertForSequenceClassification"),js=d(),L=s("div"),_(bo.$$.fragment),Yl=d(),Pr=s("p"),Zl=n(`MegatronBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),ec=d(),ko=s("p"),tc=n("This model inherits from "),_n=s("a"),oc=n("PreTrainedModel"),nc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rc=d(),Mo=s("p"),sc=n("This model is also a PyTorch "),wo=s("a"),ac=n("torch.nn.Module"),ic=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dc=d(),x=s("div"),_(To.$$.fragment),lc=d(),Ne=s("p"),cc=n("The "),vn=s("a"),hc=n("MegatronBertForSequenceClassification"),pc=n(" forward method, overrides the "),xr=s("code"),mc=n("__call__"),uc=n(" special method."),fc=d(),_(at.$$.fragment),gc=d(),Er=s("p"),_c=n("Example of single-label classification:"),vc=d(),_(yo.$$.fragment),bc=d(),Cr=s("p"),kc=n("Example of multi-label classification:"),Mc=d(),_($o.$$.fragment),As=d(),Se=s("h2"),it=s("a"),qr=s("span"),_(Bo.$$.fragment),wc=d(),jr=s("span"),Tc=n("MegatronBertForMultipleChoice"),Ns=d(),I=s("div"),_(zo.$$.fragment),yc=d(),Ar=s("p"),$c=n(`MegatronBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Bc=d(),Fo=s("p"),zc=n("This model inherits from "),bn=s("a"),Fc=n("PreTrainedModel"),Pc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xc=d(),Po=s("p"),Ec=n("This model is also a PyTorch "),xo=s("a"),Cc=n("torch.nn.Module"),qc=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jc=d(),Q=s("div"),_(Eo.$$.fragment),Ac=d(),Le=s("p"),Nc=n("The "),kn=s("a"),Sc=n("MegatronBertForMultipleChoice"),Lc=n(" forward method, overrides the "),Nr=s("code"),Ic=n("__call__"),Oc=n(" special method."),Dc=d(),_(dt.$$.fragment),Wc=d(),Sr=s("p"),Rc=n("Example:"),Hc=d(),_(Co.$$.fragment),Ss=d(),Ie=s("h2"),lt=s("a"),Lr=s("span"),_(qo.$$.fragment),Uc=d(),Ir=s("span"),Gc=n("MegatronBertForTokenClassification"),Ls=d(),O=s("div"),_(jo.$$.fragment),Qc=d(),Or=s("p"),Vc=n(`MegatronBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Jc=d(),Ao=s("p"),Kc=n("This model inherits from "),Mn=s("a"),Xc=n("PreTrainedModel"),Yc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zc=d(),No=s("p"),eh=n("This model is also a PyTorch "),So=s("a"),th=n("torch.nn.Module"),oh=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nh=d(),V=s("div"),_(Lo.$$.fragment),rh=d(),Oe=s("p"),sh=n("The "),wn=s("a"),ah=n("MegatronBertForTokenClassification"),ih=n(" forward method, overrides the "),Dr=s("code"),dh=n("__call__"),lh=n(" special method."),ch=d(),_(ct.$$.fragment),hh=d(),Wr=s("p"),ph=n("Example:"),mh=d(),_(Io.$$.fragment),Is=d(),De=s("h2"),ht=s("a"),Rr=s("span"),_(Oo.$$.fragment),uh=d(),Hr=s("span"),fh=n("MegatronBertForQuestionAnswering"),Os=d(),D=s("div"),_(Do.$$.fragment),gh=d(),We=s("p"),_h=n(`MegatronBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ur=s("code"),vh=n("span start logits"),bh=n(" and "),Gr=s("code"),kh=n("span end logits"),Mh=n(")."),wh=d(),Wo=s("p"),Th=n("This model inherits from "),Tn=s("a"),yh=n("PreTrainedModel"),$h=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bh=d(),Ro=s("p"),zh=n("This model is also a PyTorch "),Ho=s("a"),Fh=n("torch.nn.Module"),Ph=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xh=d(),J=s("div"),_(Uo.$$.fragment),Eh=d(),Re=s("p"),Ch=n("The "),yn=s("a"),qh=n("MegatronBertForQuestionAnswering"),jh=n(" forward method, overrides the "),Qr=s("code"),Ah=n("__call__"),Nh=n(" special method."),Sh=d(),_(pt.$$.fragment),Lh=d(),Vr=s("p"),Ih=n("Example:"),Oh=d(),_(Go.$$.fragment),this.h()},l(t){const h=Xm('[data-svelte="svelte-1phssyn"]',document.head);p=a(h,"META",{name:!0,content:!0}),h.forEach(o),y=l(t),u=a(t,"H1",{class:!0});var Qo=i(u);T=a(Qo,"A",{id:!0,class:!0,href:!0});var Jr=i(T);$=a(Jr,"SPAN",{});var Kr=i($);v(g.$$.fragment,Kr),Kr.forEach(o),Jr.forEach(o),f=l(Qo),B=a(Qo,"SPAN",{});var Xr=i(B);Ma=r(Xr,"MegatronBERT"),Xr.forEach(o),Qo.forEach(o),ts=l(t),ke=a(t,"H2",{class:!0});var Vo=i(ke);Ue=a(Vo,"A",{id:!0,class:!0,href:!0});var Yr=i(Ue);Dn=a(Yr,"SPAN",{});var Zr=i(Dn);v(mt.$$.fragment,Zr),Zr.forEach(o),Yr.forEach(o),wa=l(Vo),Wn=a(Vo,"SPAN",{});var es=i(Wn);Ta=r(es,"Overview"),es.forEach(o),Vo.forEach(o),os=l(t),Ge=a(t,"P",{});var Jo=i(Ge);ya=r(Jo,"The MegatronBERT model was proposed in "),ut=a(Jo,"A",{href:!0,rel:!0});var Dh=i(ut);$a=r(Dh,`Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism`),Dh.forEach(o),Ba=r(Jo,` by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.`),Jo.forEach(o),ns=l(t),Ko=a(t,"P",{});var Wh=i(Ko);za=r(Wh,"The abstract from the paper is the following:"),Wh.forEach(o),rs=l(t),Xo=a(t,"P",{});var Rh=i(Xo);Rn=a(Rh,"EM",{});var Hh=i(Rn);Fa=r(Hh,`Recent work in language modeling demonstrates that training large transformer models advances the state of the art in Natural Language Processing applications. However, very large models can be quite difficult to train due to memory constraints. In this work, we present our techniques for training very large transformer models and implement a simple, efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain 15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9 billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).`),Hh.forEach(o),Rh.forEach(o),ss=l(t),Yo=a(t,"P",{});var Uh=i(Yo);Pa=r(Uh,"Tips:"),Uh.forEach(o),as=l(t),Qe=a(t,"P",{});var Ws=i(Qe);xa=r(Ws,"We have provided pretrained "),ft=a(Ws,"A",{href:!0,rel:!0});var Gh=i(ft);Ea=r(Gh,"BERT-345M"),Gh.forEach(o),Ca=r(Ws,` checkpoints for use to evaluate or finetuning downstream tasks.`),Ws.forEach(o),is=l(t),ee=a(t,"P",{});var $n=i(ee);qa=r($n,"To access these checkpoints, first "),gt=a($n,"A",{href:!0,rel:!0});var Qh=i(gt);ja=r(Qh,"sign up"),Qh.forEach(o),Aa=r($n,` for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the `),_t=a($n,"A",{href:!0,rel:!0});var Vh=i(_t);Na=r(Vh,"NGC documentation"),Vh.forEach(o),Sa=r($n,"."),$n.forEach(o),ds=l(t),Zo=a(t,"P",{});var Jh=i(Zo);La=r(Jh,"Alternatively, you can directly download the checkpoints using:"),Jh.forEach(o),ls=l(t),en=a(t,"P",{});var Kh=i(en);Ia=r(Kh,"BERT-345M-uncased:"),Kh.forEach(o),cs=l(t),v(vt.$$.fragment,t),hs=l(t),v(bt.$$.fragment,t),ps=l(t),tn=a(t,"P",{});var Xh=i(tn);Oa=r(Xh,"BERT-345M-cased:"),Xh.forEach(o),ms=l(t),v(kt.$$.fragment,t),us=l(t),v(Mt.$$.fragment,t),fs=l(t),on=a(t,"P",{});var Yh=i(on);Da=r(Yh,`Once you have obtained the checkpoints from NVIDIA GPU Cloud (NGC), you have to convert them to a format that will easily be loaded by Hugging Face Transformers and our port of the BERT code.`),Yh.forEach(o),gs=l(t),te=a(t,"P",{});var Bn=i(te);Wa=r(Bn,"The following commands allow you to do the conversion. We assume that the folder "),Hn=a(Bn,"CODE",{});var Zh=i(Hn);Ra=r(Zh,"models/megatron_bert"),Zh.forEach(o),Ha=r(Bn,` contains `),Un=a(Bn,"CODE",{});var ep=i(Un);Ua=r(ep,"megatron_bert_345m_v0_1_{cased, uncased}.zip"),ep.forEach(o),Ga=r(Bn," and that the commands are run from inside that folder:"),Bn.forEach(o),_s=l(t),v(wt.$$.fragment,t),vs=l(t),v(Tt.$$.fragment,t),bs=l(t),v(yt.$$.fragment,t),ks=l(t),oe=a(t,"P",{});var zn=i(oe);Qa=r(zn,"This model was contributed by "),$t=a(zn,"A",{href:!0,rel:!0});var tp=i($t);Va=r(tp,"jdemouth"),tp.forEach(o),Ja=r(zn,". The original code can be found "),Bt=a(zn,"A",{href:!0,rel:!0});var op=i(Bt);Ka=r(op,"here"),op.forEach(o),Xa=r(zn,`. That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it contains a hybrid model parallel approach using \u201Ctensor parallel\u201D and \u201Cpipeline parallel\u201D techniques.`),zn.forEach(o),Ms=l(t),Me=a(t,"H2",{class:!0});var Rs=i(Me);Ve=a(Rs,"A",{id:!0,class:!0,href:!0});var np=i(Ve);Gn=a(np,"SPAN",{});var rp=i(Gn);v(zt.$$.fragment,rp),rp.forEach(o),np.forEach(o),Ya=l(Rs),Qn=a(Rs,"SPAN",{});var sp=i(Qn);Za=r(sp,"MegatronBertConfig"),sp.forEach(o),Rs.forEach(o),ws=l(t),q=a(t,"DIV",{class:!0});var ne=i(q);v(Ft.$$.fragment,ne),ei=l(ne),we=a(ne,"P",{});var Fn=i(we);ti=r(Fn,"This is the configuration class to store the configuration of a "),nn=a(Fn,"A",{href:!0});var ap=i(nn);oi=r(ap,"MegatronBertModel"),ap.forEach(o),ni=r(Fn,`. It is used to instantiate a MEGATRON_BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MEGATRON_BERT `),Pt=a(Fn,"A",{href:!0,rel:!0});var ip=i(Pt);ri=r(ip,"megatron-bert-uncased-345m"),ip.forEach(o),si=r(Fn," architecture."),Fn.forEach(o),ai=l(ne),Te=a(ne,"P",{});var Pn=i(Te);ii=r(Pn,"Configuration objects inherit from "),rn=a(Pn,"A",{href:!0});var dp=i(rn);di=r(dp,"PretrainedConfig"),dp.forEach(o),li=r(Pn,` and can be used to control the model outputs. Read the documentation from `),sn=a(Pn,"A",{href:!0});var lp=i(sn);ci=r(lp,"PretrainedConfig"),lp.forEach(o),hi=r(Pn," for more information."),Pn.forEach(o),pi=l(ne),Vn=a(ne,"P",{});var cp=i(Vn);mi=r(cp,"Examples:"),cp.forEach(o),ui=l(ne),v(xt.$$.fragment,ne),ne.forEach(o),Ts=l(t),ye=a(t,"H2",{class:!0});var Hs=i(ye);Je=a(Hs,"A",{id:!0,class:!0,href:!0});var hp=i(Je);Jn=a(hp,"SPAN",{});var pp=i(Jn);v(Et.$$.fragment,pp),pp.forEach(o),hp.forEach(o),fi=l(Hs),Kn=a(Hs,"SPAN",{});var mp=i(Kn);gi=r(mp,"MegatronBertModel"),mp.forEach(o),Hs.forEach(o),ys=l(t),F=a(t,"DIV",{class:!0});var K=i(F);v(Ct.$$.fragment,K),_i=l(K),Xn=a(K,"P",{});var up=i(Xn);vi=r(up,"The bare MegatronBert Model transformer outputting raw hidden-states without any specific head on top."),up.forEach(o),bi=l(K),qt=a(K,"P",{});var Us=i(qt);ki=r(Us,"This model inherits from "),an=a(Us,"A",{href:!0});var fp=i(an);Mi=r(fp,"PreTrainedModel"),fp.forEach(o),wi=r(Us,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Us.forEach(o),Ti=l(K),jt=a(K,"P",{});var Gs=i(jt);yi=r(Gs,"This model is also a PyTorch "),At=a(Gs,"A",{href:!0,rel:!0});var gp=i(At);$i=r(gp,"torch.nn.Module"),gp.forEach(o),Bi=r(Gs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gs.forEach(o),zi=l(K),Nt=a(K,"P",{});var Qs=i(Nt);Fi=r(Qs,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),St=a(Qs,"A",{href:!0,rel:!0});var _p=i(St);Pi=r(_p,`Attention is all you need`),_p.forEach(o),xi=r(Qs,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Qs.forEach(o),Ei=l(K),C=a(K,"P",{});var X=i(C);Ci=r(X,"To behave as an decoder the model needs to be initialized with the "),Yn=a(X,"CODE",{});var vp=i(Yn);qi=r(vp,"is_decoder"),vp.forEach(o),ji=r(X,` argument of the configuration set to `),Zn=a(X,"CODE",{});var bp=i(Zn);Ai=r(bp,"True"),bp.forEach(o),Ni=r(X,". To be used in a Seq2Seq model, the model needs to initialized with both "),er=a(X,"CODE",{});var kp=i(er);Si=r(kp,"is_decoder"),kp.forEach(o),Li=r(X,` argument and `),tr=a(X,"CODE",{});var Mp=i(tr);Ii=r(Mp,"add_cross_attention"),Mp.forEach(o),Oi=r(X," set to "),or=a(X,"CODE",{});var wp=i(or);Di=r(wp,"True"),wp.forEach(o),Wi=r(X,"; an "),nr=a(X,"CODE",{});var Tp=i(nr);Ri=r(Tp,"encoder_hidden_states"),Tp.forEach(o),Hi=r(X,` is then expected as an input to the forward pass.`),X.forEach(o),Ui=l(K),W=a(K,"DIV",{class:!0});var re=i(W);v(Lt.$$.fragment,re),Gi=l(re),$e=a(re,"P",{});var xn=i($e);Qi=r(xn,"The "),dn=a(xn,"A",{href:!0});var yp=i(dn);Vi=r(yp,"MegatronBertModel"),yp.forEach(o),Ji=r(xn," forward method, overrides the "),rr=a(xn,"CODE",{});var $p=i(rr);Ki=r($p,"__call__"),$p.forEach(o),Xi=r(xn," special method."),xn.forEach(o),Yi=l(re),v(Ke.$$.fragment,re),Zi=l(re),sr=a(re,"P",{});var Bp=i(sr);ed=r(Bp,"Example:"),Bp.forEach(o),td=l(re),v(It.$$.fragment,re),re.forEach(o),K.forEach(o),$s=l(t),Be=a(t,"H2",{class:!0});var Vs=i(Be);Xe=a(Vs,"A",{id:!0,class:!0,href:!0});var zp=i(Xe);ar=a(zp,"SPAN",{});var Fp=i(ar);v(Ot.$$.fragment,Fp),Fp.forEach(o),zp.forEach(o),od=l(Vs),ir=a(Vs,"SPAN",{});var Pp=i(ir);nd=r(Pp,"MegatronBertForMaskedLM"),Pp.forEach(o),Vs.forEach(o),Bs=l(t),j=a(t,"DIV",{class:!0});var se=i(j);v(Dt.$$.fragment,se),rd=l(se),Wt=a(se,"P",{});var Js=i(Wt);sd=r(Js,"MegatronBert Model with a "),dr=a(Js,"CODE",{});var xp=i(dr);ad=r(xp,"language modeling"),xp.forEach(o),id=r(Js," head on top."),Js.forEach(o),dd=l(se),Rt=a(se,"P",{});var Ks=i(Rt);ld=r(Ks,"This model inherits from "),ln=a(Ks,"A",{href:!0});var Ep=i(ln);cd=r(Ep,"PreTrainedModel"),Ep.forEach(o),hd=r(Ks,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ks.forEach(o),pd=l(se),Ht=a(se,"P",{});var Xs=i(Ht);md=r(Xs,"This model is also a PyTorch "),Ut=a(Xs,"A",{href:!0,rel:!0});var Cp=i(Ut);ud=r(Cp,"torch.nn.Module"),Cp.forEach(o),fd=r(Xs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xs.forEach(o),gd=l(se),R=a(se,"DIV",{class:!0});var ae=i(R);v(Gt.$$.fragment,ae),_d=l(ae),ze=a(ae,"P",{});var En=i(ze);vd=r(En,"The "),cn=a(En,"A",{href:!0});var qp=i(cn);bd=r(qp,"MegatronBertForMaskedLM"),qp.forEach(o),kd=r(En," forward method, overrides the "),lr=a(En,"CODE",{});var jp=i(lr);Md=r(jp,"__call__"),jp.forEach(o),wd=r(En," special method."),En.forEach(o),Td=l(ae),v(Ye.$$.fragment,ae),yd=l(ae),cr=a(ae,"P",{});var Ap=i(cr);$d=r(Ap,"Example:"),Ap.forEach(o),Bd=l(ae),v(Qt.$$.fragment,ae),ae.forEach(o),se.forEach(o),zs=l(t),Fe=a(t,"H2",{class:!0});var Ys=i(Fe);Ze=a(Ys,"A",{id:!0,class:!0,href:!0});var Np=i(Ze);hr=a(Np,"SPAN",{});var Sp=i(hr);v(Vt.$$.fragment,Sp),Sp.forEach(o),Np.forEach(o),zd=l(Ys),pr=a(Ys,"SPAN",{});var Lp=i(pr);Fd=r(Lp,"MegatronBertForCausalLM"),Lp.forEach(o),Ys.forEach(o),Fs=l(t),A=a(t,"DIV",{class:!0});var ie=i(A);v(Jt.$$.fragment,ie),Pd=l(ie),Kt=a(ie,"P",{});var Zs=i(Kt);xd=r(Zs,"MegatronBert Model with a "),mr=a(Zs,"CODE",{});var Ip=i(mr);Ed=r(Ip,"language modeling"),Ip.forEach(o),Cd=r(Zs," head on top for CLM fine-tuning."),Zs.forEach(o),qd=l(ie),Xt=a(ie,"P",{});var ea=i(Xt);jd=r(ea,"This model inherits from "),hn=a(ea,"A",{href:!0});var Op=i(hn);Ad=r(Op,"PreTrainedModel"),Op.forEach(o),Nd=r(ea,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ea.forEach(o),Sd=l(ie),Yt=a(ie,"P",{});var ta=i(Yt);Ld=r(ta,"This model is also a PyTorch "),Zt=a(ta,"A",{href:!0,rel:!0});var Dp=i(Zt);Id=r(Dp,"torch.nn.Module"),Dp.forEach(o),Od=r(ta,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ta.forEach(o),Dd=l(ie),H=a(ie,"DIV",{class:!0});var de=i(H);v(eo.$$.fragment,de),Wd=l(de),Pe=a(de,"P",{});var Cn=i(Pe);Rd=r(Cn,"The "),pn=a(Cn,"A",{href:!0});var Wp=i(pn);Hd=r(Wp,"MegatronBertForCausalLM"),Wp.forEach(o),Ud=r(Cn," forward method, overrides the "),ur=a(Cn,"CODE",{});var Rp=i(ur);Gd=r(Rp,"__call__"),Rp.forEach(o),Qd=r(Cn," special method."),Cn.forEach(o),Vd=l(de),v(et.$$.fragment,de),Jd=l(de),fr=a(de,"P",{});var Hp=i(fr);Kd=r(Hp,"Example:"),Hp.forEach(o),Xd=l(de),v(to.$$.fragment,de),de.forEach(o),ie.forEach(o),Ps=l(t),xe=a(t,"H2",{class:!0});var oa=i(xe);tt=a(oa,"A",{id:!0,class:!0,href:!0});var Up=i(tt);gr=a(Up,"SPAN",{});var Gp=i(gr);v(oo.$$.fragment,Gp),Gp.forEach(o),Up.forEach(o),Yd=l(oa),_r=a(oa,"SPAN",{});var Qp=i(_r);Zd=r(Qp,"MegatronBertForNextSentencePrediction"),Qp.forEach(o),oa.forEach(o),xs=l(t),N=a(t,"DIV",{class:!0});var le=i(N);v(no.$$.fragment,le),el=l(le),ro=a(le,"P",{});var na=i(ro);tl=r(na,"MegatronBert Model with a "),vr=a(na,"CODE",{});var Vp=i(vr);ol=r(Vp,"next sentence prediction (classification)"),Vp.forEach(o),nl=r(na," head on top."),na.forEach(o),rl=l(le),so=a(le,"P",{});var ra=i(so);sl=r(ra,"This model inherits from "),mn=a(ra,"A",{href:!0});var Jp=i(mn);al=r(Jp,"PreTrainedModel"),Jp.forEach(o),il=r(ra,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ra.forEach(o),dl=l(le),ao=a(le,"P",{});var sa=i(ao);ll=r(sa,"This model is also a PyTorch "),io=a(sa,"A",{href:!0,rel:!0});var Kp=i(io);cl=r(Kp,"torch.nn.Module"),Kp.forEach(o),hl=r(sa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sa.forEach(o),pl=l(le),U=a(le,"DIV",{class:!0});var ce=i(U);v(lo.$$.fragment,ce),ml=l(ce),Ee=a(ce,"P",{});var qn=i(Ee);ul=r(qn,"The "),un=a(qn,"A",{href:!0});var Xp=i(un);fl=r(Xp,"MegatronBertForNextSentencePrediction"),Xp.forEach(o),gl=r(qn," forward method, overrides the "),br=a(qn,"CODE",{});var Yp=i(br);_l=r(Yp,"__call__"),Yp.forEach(o),vl=r(qn," special method."),qn.forEach(o),bl=l(ce),v(ot.$$.fragment,ce),kl=l(ce),kr=a(ce,"P",{});var Zp=i(kr);Ml=r(Zp,"Example:"),Zp.forEach(o),wl=l(ce),v(co.$$.fragment,ce),ce.forEach(o),le.forEach(o),Es=l(t),Ce=a(t,"H2",{class:!0});var aa=i(Ce);nt=a(aa,"A",{id:!0,class:!0,href:!0});var em=i(nt);Mr=a(em,"SPAN",{});var tm=i(Mr);v(ho.$$.fragment,tm),tm.forEach(o),em.forEach(o),Tl=l(aa),wr=a(aa,"SPAN",{});var om=i(wr);yl=r(om,"MegatronBertForPreTraining"),om.forEach(o),aa.forEach(o),Cs=l(t),S=a(t,"DIV",{class:!0});var he=i(S);v(po.$$.fragment,he),$l=l(he),qe=a(he,"P",{});var jn=i(qe);Bl=r(jn,"MegatronBert Model with two heads on top as done during the pretraining: a "),Tr=a(jn,"CODE",{});var nm=i(Tr);zl=r(nm,"masked language modeling"),nm.forEach(o),Fl=r(jn,` head and a `),yr=a(jn,"CODE",{});var rm=i(yr);Pl=r(rm,"next sentence prediction (classification)"),rm.forEach(o),xl=r(jn," head."),jn.forEach(o),El=l(he),mo=a(he,"P",{});var ia=i(mo);Cl=r(ia,"This model inherits from "),fn=a(ia,"A",{href:!0});var sm=i(fn);ql=r(sm,"PreTrainedModel"),sm.forEach(o),jl=r(ia,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ia.forEach(o),Al=l(he),uo=a(he,"P",{});var da=i(uo);Nl=r(da,"This model is also a PyTorch "),fo=a(da,"A",{href:!0,rel:!0});var am=i(fo);Sl=r(am,"torch.nn.Module"),am.forEach(o),Ll=r(da,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),da.forEach(o),Il=l(he),G=a(he,"DIV",{class:!0});var pe=i(G);v(go.$$.fragment,pe),Ol=l(pe),je=a(pe,"P",{});var An=i(je);Dl=r(An,"The "),gn=a(An,"A",{href:!0});var im=i(gn);Wl=r(im,"MegatronBertForPreTraining"),im.forEach(o),Rl=r(An," forward method, overrides the "),$r=a(An,"CODE",{});var dm=i($r);Hl=r(dm,"__call__"),dm.forEach(o),Ul=r(An," special method."),An.forEach(o),Gl=l(pe),v(rt.$$.fragment,pe),Ql=l(pe),Br=a(pe,"P",{});var lm=i(Br);Vl=r(lm,"Example:"),lm.forEach(o),Jl=l(pe),v(_o.$$.fragment,pe),pe.forEach(o),he.forEach(o),qs=l(t),Ae=a(t,"H2",{class:!0});var la=i(Ae);st=a(la,"A",{id:!0,class:!0,href:!0});var cm=i(st);zr=a(cm,"SPAN",{});var hm=i(zr);v(vo.$$.fragment,hm),hm.forEach(o),cm.forEach(o),Kl=l(la),Fr=a(la,"SPAN",{});var pm=i(Fr);Xl=r(pm,"MegatronBertForSequenceClassification"),pm.forEach(o),la.forEach(o),js=l(t),L=a(t,"DIV",{class:!0});var me=i(L);v(bo.$$.fragment,me),Yl=l(me),Pr=a(me,"P",{});var mm=i(Pr);Zl=r(mm,`MegatronBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),mm.forEach(o),ec=l(me),ko=a(me,"P",{});var ca=i(ko);tc=r(ca,"This model inherits from "),_n=a(ca,"A",{href:!0});var um=i(_n);oc=r(um,"PreTrainedModel"),um.forEach(o),nc=r(ca,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ca.forEach(o),rc=l(me),Mo=a(me,"P",{});var ha=i(Mo);sc=r(ha,"This model is also a PyTorch "),wo=a(ha,"A",{href:!0,rel:!0});var fm=i(wo);ac=r(fm,"torch.nn.Module"),fm.forEach(o),ic=r(ha,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ha.forEach(o),dc=l(me),x=a(me,"DIV",{class:!0});var Y=i(x);v(To.$$.fragment,Y),lc=l(Y),Ne=a(Y,"P",{});var Nn=i(Ne);cc=r(Nn,"The "),vn=a(Nn,"A",{href:!0});var gm=i(vn);hc=r(gm,"MegatronBertForSequenceClassification"),gm.forEach(o),pc=r(Nn," forward method, overrides the "),xr=a(Nn,"CODE",{});var _m=i(xr);mc=r(_m,"__call__"),_m.forEach(o),uc=r(Nn," special method."),Nn.forEach(o),fc=l(Y),v(at.$$.fragment,Y),gc=l(Y),Er=a(Y,"P",{});var vm=i(Er);_c=r(vm,"Example of single-label classification:"),vm.forEach(o),vc=l(Y),v(yo.$$.fragment,Y),bc=l(Y),Cr=a(Y,"P",{});var bm=i(Cr);kc=r(bm,"Example of multi-label classification:"),bm.forEach(o),Mc=l(Y),v($o.$$.fragment,Y),Y.forEach(o),me.forEach(o),As=l(t),Se=a(t,"H2",{class:!0});var pa=i(Se);it=a(pa,"A",{id:!0,class:!0,href:!0});var km=i(it);qr=a(km,"SPAN",{});var Mm=i(qr);v(Bo.$$.fragment,Mm),Mm.forEach(o),km.forEach(o),wc=l(pa),jr=a(pa,"SPAN",{});var wm=i(jr);Tc=r(wm,"MegatronBertForMultipleChoice"),wm.forEach(o),pa.forEach(o),Ns=l(t),I=a(t,"DIV",{class:!0});var ue=i(I);v(zo.$$.fragment,ue),yc=l(ue),Ar=a(ue,"P",{});var Tm=i(Ar);$c=r(Tm,`MegatronBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Tm.forEach(o),Bc=l(ue),Fo=a(ue,"P",{});var ma=i(Fo);zc=r(ma,"This model inherits from "),bn=a(ma,"A",{href:!0});var ym=i(bn);Fc=r(ym,"PreTrainedModel"),ym.forEach(o),Pc=r(ma,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ma.forEach(o),xc=l(ue),Po=a(ue,"P",{});var ua=i(Po);Ec=r(ua,"This model is also a PyTorch "),xo=a(ua,"A",{href:!0,rel:!0});var $m=i(xo);Cc=r($m,"torch.nn.Module"),$m.forEach(o),qc=r(ua,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ua.forEach(o),jc=l(ue),Q=a(ue,"DIV",{class:!0});var fe=i(Q);v(Eo.$$.fragment,fe),Ac=l(fe),Le=a(fe,"P",{});var Sn=i(Le);Nc=r(Sn,"The "),kn=a(Sn,"A",{href:!0});var Bm=i(kn);Sc=r(Bm,"MegatronBertForMultipleChoice"),Bm.forEach(o),Lc=r(Sn," forward method, overrides the "),Nr=a(Sn,"CODE",{});var zm=i(Nr);Ic=r(zm,"__call__"),zm.forEach(o),Oc=r(Sn," special method."),Sn.forEach(o),Dc=l(fe),v(dt.$$.fragment,fe),Wc=l(fe),Sr=a(fe,"P",{});var Fm=i(Sr);Rc=r(Fm,"Example:"),Fm.forEach(o),Hc=l(fe),v(Co.$$.fragment,fe),fe.forEach(o),ue.forEach(o),Ss=l(t),Ie=a(t,"H2",{class:!0});var fa=i(Ie);lt=a(fa,"A",{id:!0,class:!0,href:!0});var Pm=i(lt);Lr=a(Pm,"SPAN",{});var xm=i(Lr);v(qo.$$.fragment,xm),xm.forEach(o),Pm.forEach(o),Uc=l(fa),Ir=a(fa,"SPAN",{});var Em=i(Ir);Gc=r(Em,"MegatronBertForTokenClassification"),Em.forEach(o),fa.forEach(o),Ls=l(t),O=a(t,"DIV",{class:!0});var ge=i(O);v(jo.$$.fragment,ge),Qc=l(ge),Or=a(ge,"P",{});var Cm=i(Or);Vc=r(Cm,`MegatronBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Cm.forEach(o),Jc=l(ge),Ao=a(ge,"P",{});var ga=i(Ao);Kc=r(ga,"This model inherits from "),Mn=a(ga,"A",{href:!0});var qm=i(Mn);Xc=r(qm,"PreTrainedModel"),qm.forEach(o),Yc=r(ga,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ga.forEach(o),Zc=l(ge),No=a(ge,"P",{});var _a=i(No);eh=r(_a,"This model is also a PyTorch "),So=a(_a,"A",{href:!0,rel:!0});var jm=i(So);th=r(jm,"torch.nn.Module"),jm.forEach(o),oh=r(_a,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_a.forEach(o),nh=l(ge),V=a(ge,"DIV",{class:!0});var _e=i(V);v(Lo.$$.fragment,_e),rh=l(_e),Oe=a(_e,"P",{});var Ln=i(Oe);sh=r(Ln,"The "),wn=a(Ln,"A",{href:!0});var Am=i(wn);ah=r(Am,"MegatronBertForTokenClassification"),Am.forEach(o),ih=r(Ln," forward method, overrides the "),Dr=a(Ln,"CODE",{});var Nm=i(Dr);dh=r(Nm,"__call__"),Nm.forEach(o),lh=r(Ln," special method."),Ln.forEach(o),ch=l(_e),v(ct.$$.fragment,_e),hh=l(_e),Wr=a(_e,"P",{});var Sm=i(Wr);ph=r(Sm,"Example:"),Sm.forEach(o),mh=l(_e),v(Io.$$.fragment,_e),_e.forEach(o),ge.forEach(o),Is=l(t),De=a(t,"H2",{class:!0});var va=i(De);ht=a(va,"A",{id:!0,class:!0,href:!0});var Lm=i(ht);Rr=a(Lm,"SPAN",{});var Im=i(Rr);v(Oo.$$.fragment,Im),Im.forEach(o),Lm.forEach(o),uh=l(va),Hr=a(va,"SPAN",{});var Om=i(Hr);fh=r(Om,"MegatronBertForQuestionAnswering"),Om.forEach(o),va.forEach(o),Os=l(t),D=a(t,"DIV",{class:!0});var ve=i(D);v(Do.$$.fragment,ve),gh=l(ve),We=a(ve,"P",{});var In=i(We);_h=r(In,`MegatronBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ur=a(In,"CODE",{});var Dm=i(Ur);vh=r(Dm,"span start logits"),Dm.forEach(o),bh=r(In," and "),Gr=a(In,"CODE",{});var Wm=i(Gr);kh=r(Wm,"span end logits"),Wm.forEach(o),Mh=r(In,")."),In.forEach(o),wh=l(ve),Wo=a(ve,"P",{});var ba=i(Wo);Th=r(ba,"This model inherits from "),Tn=a(ba,"A",{href:!0});var Rm=i(Tn);yh=r(Rm,"PreTrainedModel"),Rm.forEach(o),$h=r(ba,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ba.forEach(o),Bh=l(ve),Ro=a(ve,"P",{});var ka=i(Ro);zh=r(ka,"This model is also a PyTorch "),Ho=a(ka,"A",{href:!0,rel:!0});var Hm=i(Ho);Fh=r(Hm,"torch.nn.Module"),Hm.forEach(o),Ph=r(ka,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ka.forEach(o),xh=l(ve),J=a(ve,"DIV",{class:!0});var be=i(J);v(Uo.$$.fragment,be),Eh=l(be),Re=a(be,"P",{});var On=i(Re);Ch=r(On,"The "),yn=a(On,"A",{href:!0});var Um=i(yn);qh=r(Um,"MegatronBertForQuestionAnswering"),Um.forEach(o),jh=r(On," forward method, overrides the "),Qr=a(On,"CODE",{});var Gm=i(Qr);Ah=r(Gm,"__call__"),Gm.forEach(o),Nh=r(On," special method."),On.forEach(o),Sh=l(be),v(pt.$$.fragment,be),Lh=l(be),Vr=a(be,"P",{});var Qm=i(Vr);Ih=r(Qm,"Example:"),Qm.forEach(o),Oh=l(be),v(Go.$$.fragment,be),be.forEach(o),ve.forEach(o),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(du)),c(T,"id","megatronbert"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#megatronbert"),c(u,"class","relative group"),c(Ue,"id","transformers.MegatronBertConfig"),c(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ue,"href","#transformers.MegatronBertConfig"),c(ke,"class","relative group"),c(ut,"href","https://arxiv.org/abs/1909.08053"),c(ut,"rel","nofollow"),c(ft,"href","https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m"),c(ft,"rel","nofollow"),c(gt,"href","https://ngc.nvidia.com/signup"),c(gt,"rel","nofollow"),c(_t,"href","https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1"),c(_t,"rel","nofollow"),c($t,"href","https://huggingface.co/jdemouth"),c($t,"rel","nofollow"),c(Bt,"href","https://github.com/NVIDIA/Megatron-LM"),c(Bt,"rel","nofollow"),c(Ve,"id","megatronbertconfig"),c(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ve,"href","#megatronbertconfig"),c(Me,"class","relative group"),c(nn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertModel"),c(Pt,"href","https://huggingface.co/nvidia/megatron-bert-uncased-345m"),c(Pt,"rel","nofollow"),c(rn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(sn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(q,"class","docstring"),c(Je,"id","megatronbertmodel"),c(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Je,"href","#megatronbertmodel"),c(ye,"class","relative group"),c(an,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(At,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(At,"rel","nofollow"),c(St,"href","https://arxiv.org/abs/1706.03762"),c(St,"rel","nofollow"),c(dn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertModel"),c(W,"class","docstring"),c(F,"class","docstring"),c(Xe,"id","megatronbertformaskedlm"),c(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xe,"href","#megatronbertformaskedlm"),c(Be,"class","relative group"),c(ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ut,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ut,"rel","nofollow"),c(cn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForMaskedLM"),c(R,"class","docstring"),c(j,"class","docstring"),c(Ze,"id","megatronbertforcausallm"),c(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ze,"href","#megatronbertforcausallm"),c(Fe,"class","relative group"),c(hn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Zt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Zt,"rel","nofollow"),c(pn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForCausalLM"),c(H,"class","docstring"),c(A,"class","docstring"),c(tt,"id","megatronbertfornextsentenceprediction"),c(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(tt,"href","#megatronbertfornextsentenceprediction"),c(xe,"class","relative group"),c(mn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(io,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(io,"rel","nofollow"),c(un,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForNextSentencePrediction"),c(U,"class","docstring"),c(N,"class","docstring"),c(nt,"id","megatronbertforpretraining"),c(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(nt,"href","#megatronbertforpretraining"),c(Ce,"class","relative group"),c(fn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(fo,"rel","nofollow"),c(gn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForPreTraining"),c(G,"class","docstring"),c(S,"class","docstring"),c(st,"id","megatronbertforsequenceclassification"),c(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(st,"href","#megatronbertforsequenceclassification"),c(Ae,"class","relative group"),c(_n,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(wo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(wo,"rel","nofollow"),c(vn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForSequenceClassification"),c(x,"class","docstring"),c(L,"class","docstring"),c(it,"id","megatronbertformultiplechoice"),c(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(it,"href","#megatronbertformultiplechoice"),c(Se,"class","relative group"),c(bn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(xo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(xo,"rel","nofollow"),c(kn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForMultipleChoice"),c(Q,"class","docstring"),c(I,"class","docstring"),c(lt,"id","megatronbertfortokenclassification"),c(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(lt,"href","#megatronbertfortokenclassification"),c(Ie,"class","relative group"),c(Mn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(So,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(So,"rel","nofollow"),c(wn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForTokenClassification"),c(V,"class","docstring"),c(O,"class","docstring"),c(ht,"id","megatronbertforquestionanswering"),c(ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ht,"href","#megatronbertforquestionanswering"),c(De,"class","relative group"),c(Tn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ho,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ho,"rel","nofollow"),c(yn,"href","/docs/transformers/v4.15.0/en/model_doc/megatron_bert#transformers.MegatronBertForQuestionAnswering"),c(J,"class","docstring"),c(D,"class","docstring")},m(t,h){e(document.head,p),m(t,y,h),m(t,u,h),e(u,T),e(T,$),b(g,$,null),e(u,f),e(u,B),e(B,Ma),m(t,ts,h),m(t,ke,h),e(ke,Ue),e(Ue,Dn),b(mt,Dn,null),e(ke,wa),e(ke,Wn),e(Wn,Ta),m(t,os,h),m(t,Ge,h),e(Ge,ya),e(Ge,ut),e(ut,$a),e(Ge,Ba),m(t,ns,h),m(t,Ko,h),e(Ko,za),m(t,rs,h),m(t,Xo,h),e(Xo,Rn),e(Rn,Fa),m(t,ss,h),m(t,Yo,h),e(Yo,Pa),m(t,as,h),m(t,Qe,h),e(Qe,xa),e(Qe,ft),e(ft,Ea),e(Qe,Ca),m(t,is,h),m(t,ee,h),e(ee,qa),e(ee,gt),e(gt,ja),e(ee,Aa),e(ee,_t),e(_t,Na),e(ee,Sa),m(t,ds,h),m(t,Zo,h),e(Zo,La),m(t,ls,h),m(t,en,h),e(en,Ia),m(t,cs,h),b(vt,t,h),m(t,hs,h),b(bt,t,h),m(t,ps,h),m(t,tn,h),e(tn,Oa),m(t,ms,h),b(kt,t,h),m(t,us,h),b(Mt,t,h),m(t,fs,h),m(t,on,h),e(on,Da),m(t,gs,h),m(t,te,h),e(te,Wa),e(te,Hn),e(Hn,Ra),e(te,Ha),e(te,Un),e(Un,Ua),e(te,Ga),m(t,_s,h),b(wt,t,h),m(t,vs,h),b(Tt,t,h),m(t,bs,h),b(yt,t,h),m(t,ks,h),m(t,oe,h),e(oe,Qa),e(oe,$t),e($t,Va),e(oe,Ja),e(oe,Bt),e(Bt,Ka),e(oe,Xa),m(t,Ms,h),m(t,Me,h),e(Me,Ve),e(Ve,Gn),b(zt,Gn,null),e(Me,Ya),e(Me,Qn),e(Qn,Za),m(t,ws,h),m(t,q,h),b(Ft,q,null),e(q,ei),e(q,we),e(we,ti),e(we,nn),e(nn,oi),e(we,ni),e(we,Pt),e(Pt,ri),e(we,si),e(q,ai),e(q,Te),e(Te,ii),e(Te,rn),e(rn,di),e(Te,li),e(Te,sn),e(sn,ci),e(Te,hi),e(q,pi),e(q,Vn),e(Vn,mi),e(q,ui),b(xt,q,null),m(t,Ts,h),m(t,ye,h),e(ye,Je),e(Je,Jn),b(Et,Jn,null),e(ye,fi),e(ye,Kn),e(Kn,gi),m(t,ys,h),m(t,F,h),b(Ct,F,null),e(F,_i),e(F,Xn),e(Xn,vi),e(F,bi),e(F,qt),e(qt,ki),e(qt,an),e(an,Mi),e(qt,wi),e(F,Ti),e(F,jt),e(jt,yi),e(jt,At),e(At,$i),e(jt,Bi),e(F,zi),e(F,Nt),e(Nt,Fi),e(Nt,St),e(St,Pi),e(Nt,xi),e(F,Ei),e(F,C),e(C,Ci),e(C,Yn),e(Yn,qi),e(C,ji),e(C,Zn),e(Zn,Ai),e(C,Ni),e(C,er),e(er,Si),e(C,Li),e(C,tr),e(tr,Ii),e(C,Oi),e(C,or),e(or,Di),e(C,Wi),e(C,nr),e(nr,Ri),e(C,Hi),e(F,Ui),e(F,W),b(Lt,W,null),e(W,Gi),e(W,$e),e($e,Qi),e($e,dn),e(dn,Vi),e($e,Ji),e($e,rr),e(rr,Ki),e($e,Xi),e(W,Yi),b(Ke,W,null),e(W,Zi),e(W,sr),e(sr,ed),e(W,td),b(It,W,null),m(t,$s,h),m(t,Be,h),e(Be,Xe),e(Xe,ar),b(Ot,ar,null),e(Be,od),e(Be,ir),e(ir,nd),m(t,Bs,h),m(t,j,h),b(Dt,j,null),e(j,rd),e(j,Wt),e(Wt,sd),e(Wt,dr),e(dr,ad),e(Wt,id),e(j,dd),e(j,Rt),e(Rt,ld),e(Rt,ln),e(ln,cd),e(Rt,hd),e(j,pd),e(j,Ht),e(Ht,md),e(Ht,Ut),e(Ut,ud),e(Ht,fd),e(j,gd),e(j,R),b(Gt,R,null),e(R,_d),e(R,ze),e(ze,vd),e(ze,cn),e(cn,bd),e(ze,kd),e(ze,lr),e(lr,Md),e(ze,wd),e(R,Td),b(Ye,R,null),e(R,yd),e(R,cr),e(cr,$d),e(R,Bd),b(Qt,R,null),m(t,zs,h),m(t,Fe,h),e(Fe,Ze),e(Ze,hr),b(Vt,hr,null),e(Fe,zd),e(Fe,pr),e(pr,Fd),m(t,Fs,h),m(t,A,h),b(Jt,A,null),e(A,Pd),e(A,Kt),e(Kt,xd),e(Kt,mr),e(mr,Ed),e(Kt,Cd),e(A,qd),e(A,Xt),e(Xt,jd),e(Xt,hn),e(hn,Ad),e(Xt,Nd),e(A,Sd),e(A,Yt),e(Yt,Ld),e(Yt,Zt),e(Zt,Id),e(Yt,Od),e(A,Dd),e(A,H),b(eo,H,null),e(H,Wd),e(H,Pe),e(Pe,Rd),e(Pe,pn),e(pn,Hd),e(Pe,Ud),e(Pe,ur),e(ur,Gd),e(Pe,Qd),e(H,Vd),b(et,H,null),e(H,Jd),e(H,fr),e(fr,Kd),e(H,Xd),b(to,H,null),m(t,Ps,h),m(t,xe,h),e(xe,tt),e(tt,gr),b(oo,gr,null),e(xe,Yd),e(xe,_r),e(_r,Zd),m(t,xs,h),m(t,N,h),b(no,N,null),e(N,el),e(N,ro),e(ro,tl),e(ro,vr),e(vr,ol),e(ro,nl),e(N,rl),e(N,so),e(so,sl),e(so,mn),e(mn,al),e(so,il),e(N,dl),e(N,ao),e(ao,ll),e(ao,io),e(io,cl),e(ao,hl),e(N,pl),e(N,U),b(lo,U,null),e(U,ml),e(U,Ee),e(Ee,ul),e(Ee,un),e(un,fl),e(Ee,gl),e(Ee,br),e(br,_l),e(Ee,vl),e(U,bl),b(ot,U,null),e(U,kl),e(U,kr),e(kr,Ml),e(U,wl),b(co,U,null),m(t,Es,h),m(t,Ce,h),e(Ce,nt),e(nt,Mr),b(ho,Mr,null),e(Ce,Tl),e(Ce,wr),e(wr,yl),m(t,Cs,h),m(t,S,h),b(po,S,null),e(S,$l),e(S,qe),e(qe,Bl),e(qe,Tr),e(Tr,zl),e(qe,Fl),e(qe,yr),e(yr,Pl),e(qe,xl),e(S,El),e(S,mo),e(mo,Cl),e(mo,fn),e(fn,ql),e(mo,jl),e(S,Al),e(S,uo),e(uo,Nl),e(uo,fo),e(fo,Sl),e(uo,Ll),e(S,Il),e(S,G),b(go,G,null),e(G,Ol),e(G,je),e(je,Dl),e(je,gn),e(gn,Wl),e(je,Rl),e(je,$r),e($r,Hl),e(je,Ul),e(G,Gl),b(rt,G,null),e(G,Ql),e(G,Br),e(Br,Vl),e(G,Jl),b(_o,G,null),m(t,qs,h),m(t,Ae,h),e(Ae,st),e(st,zr),b(vo,zr,null),e(Ae,Kl),e(Ae,Fr),e(Fr,Xl),m(t,js,h),m(t,L,h),b(bo,L,null),e(L,Yl),e(L,Pr),e(Pr,Zl),e(L,ec),e(L,ko),e(ko,tc),e(ko,_n),e(_n,oc),e(ko,nc),e(L,rc),e(L,Mo),e(Mo,sc),e(Mo,wo),e(wo,ac),e(Mo,ic),e(L,dc),e(L,x),b(To,x,null),e(x,lc),e(x,Ne),e(Ne,cc),e(Ne,vn),e(vn,hc),e(Ne,pc),e(Ne,xr),e(xr,mc),e(Ne,uc),e(x,fc),b(at,x,null),e(x,gc),e(x,Er),e(Er,_c),e(x,vc),b(yo,x,null),e(x,bc),e(x,Cr),e(Cr,kc),e(x,Mc),b($o,x,null),m(t,As,h),m(t,Se,h),e(Se,it),e(it,qr),b(Bo,qr,null),e(Se,wc),e(Se,jr),e(jr,Tc),m(t,Ns,h),m(t,I,h),b(zo,I,null),e(I,yc),e(I,Ar),e(Ar,$c),e(I,Bc),e(I,Fo),e(Fo,zc),e(Fo,bn),e(bn,Fc),e(Fo,Pc),e(I,xc),e(I,Po),e(Po,Ec),e(Po,xo),e(xo,Cc),e(Po,qc),e(I,jc),e(I,Q),b(Eo,Q,null),e(Q,Ac),e(Q,Le),e(Le,Nc),e(Le,kn),e(kn,Sc),e(Le,Lc),e(Le,Nr),e(Nr,Ic),e(Le,Oc),e(Q,Dc),b(dt,Q,null),e(Q,Wc),e(Q,Sr),e(Sr,Rc),e(Q,Hc),b(Co,Q,null),m(t,Ss,h),m(t,Ie,h),e(Ie,lt),e(lt,Lr),b(qo,Lr,null),e(Ie,Uc),e(Ie,Ir),e(Ir,Gc),m(t,Ls,h),m(t,O,h),b(jo,O,null),e(O,Qc),e(O,Or),e(Or,Vc),e(O,Jc),e(O,Ao),e(Ao,Kc),e(Ao,Mn),e(Mn,Xc),e(Ao,Yc),e(O,Zc),e(O,No),e(No,eh),e(No,So),e(So,th),e(No,oh),e(O,nh),e(O,V),b(Lo,V,null),e(V,rh),e(V,Oe),e(Oe,sh),e(Oe,wn),e(wn,ah),e(Oe,ih),e(Oe,Dr),e(Dr,dh),e(Oe,lh),e(V,ch),b(ct,V,null),e(V,hh),e(V,Wr),e(Wr,ph),e(V,mh),b(Io,V,null),m(t,Is,h),m(t,De,h),e(De,ht),e(ht,Rr),b(Oo,Rr,null),e(De,uh),e(De,Hr),e(Hr,fh),m(t,Os,h),m(t,D,h),b(Do,D,null),e(D,gh),e(D,We),e(We,_h),e(We,Ur),e(Ur,vh),e(We,bh),e(We,Gr),e(Gr,kh),e(We,Mh),e(D,wh),e(D,Wo),e(Wo,Th),e(Wo,Tn),e(Tn,yh),e(Wo,$h),e(D,Bh),e(D,Ro),e(Ro,zh),e(Ro,Ho),e(Ho,Fh),e(Ro,Ph),e(D,xh),e(D,J),b(Uo,J,null),e(J,Eh),e(J,Re),e(Re,Ch),e(Re,yn),e(yn,qh),e(Re,jh),e(Re,Qr),e(Qr,Ah),e(Re,Nh),e(J,Sh),b(pt,J,null),e(J,Lh),e(J,Vr),e(Vr,Ih),e(J,Oh),b(Go,J,null),Ds=!0},p(t,[h]){const Qo={};h&2&&(Qo.$$scope={dirty:h,ctx:t}),Ke.$set(Qo);const Jr={};h&2&&(Jr.$$scope={dirty:h,ctx:t}),Ye.$set(Jr);const Kr={};h&2&&(Kr.$$scope={dirty:h,ctx:t}),et.$set(Kr);const Xr={};h&2&&(Xr.$$scope={dirty:h,ctx:t}),ot.$set(Xr);const Vo={};h&2&&(Vo.$$scope={dirty:h,ctx:t}),rt.$set(Vo);const Yr={};h&2&&(Yr.$$scope={dirty:h,ctx:t}),at.$set(Yr);const Zr={};h&2&&(Zr.$$scope={dirty:h,ctx:t}),dt.$set(Zr);const es={};h&2&&(es.$$scope={dirty:h,ctx:t}),ct.$set(es);const Jo={};h&2&&(Jo.$$scope={dirty:h,ctx:t}),pt.$set(Jo)},i(t){Ds||(k(g.$$.fragment,t),k(mt.$$.fragment,t),k(vt.$$.fragment,t),k(bt.$$.fragment,t),k(kt.$$.fragment,t),k(Mt.$$.fragment,t),k(wt.$$.fragment,t),k(Tt.$$.fragment,t),k(yt.$$.fragment,t),k(zt.$$.fragment,t),k(Ft.$$.fragment,t),k(xt.$$.fragment,t),k(Et.$$.fragment,t),k(Ct.$$.fragment,t),k(Lt.$$.fragment,t),k(Ke.$$.fragment,t),k(It.$$.fragment,t),k(Ot.$$.fragment,t),k(Dt.$$.fragment,t),k(Gt.$$.fragment,t),k(Ye.$$.fragment,t),k(Qt.$$.fragment,t),k(Vt.$$.fragment,t),k(Jt.$$.fragment,t),k(eo.$$.fragment,t),k(et.$$.fragment,t),k(to.$$.fragment,t),k(oo.$$.fragment,t),k(no.$$.fragment,t),k(lo.$$.fragment,t),k(ot.$$.fragment,t),k(co.$$.fragment,t),k(ho.$$.fragment,t),k(po.$$.fragment,t),k(go.$$.fragment,t),k(rt.$$.fragment,t),k(_o.$$.fragment,t),k(vo.$$.fragment,t),k(bo.$$.fragment,t),k(To.$$.fragment,t),k(at.$$.fragment,t),k(yo.$$.fragment,t),k($o.$$.fragment,t),k(Bo.$$.fragment,t),k(zo.$$.fragment,t),k(Eo.$$.fragment,t),k(dt.$$.fragment,t),k(Co.$$.fragment,t),k(qo.$$.fragment,t),k(jo.$$.fragment,t),k(Lo.$$.fragment,t),k(ct.$$.fragment,t),k(Io.$$.fragment,t),k(Oo.$$.fragment,t),k(Do.$$.fragment,t),k(Uo.$$.fragment,t),k(pt.$$.fragment,t),k(Go.$$.fragment,t),Ds=!0)},o(t){M(g.$$.fragment,t),M(mt.$$.fragment,t),M(vt.$$.fragment,t),M(bt.$$.fragment,t),M(kt.$$.fragment,t),M(Mt.$$.fragment,t),M(wt.$$.fragment,t),M(Tt.$$.fragment,t),M(yt.$$.fragment,t),M(zt.$$.fragment,t),M(Ft.$$.fragment,t),M(xt.$$.fragment,t),M(Et.$$.fragment,t),M(Ct.$$.fragment,t),M(Lt.$$.fragment,t),M(Ke.$$.fragment,t),M(It.$$.fragment,t),M(Ot.$$.fragment,t),M(Dt.$$.fragment,t),M(Gt.$$.fragment,t),M(Ye.$$.fragment,t),M(Qt.$$.fragment,t),M(Vt.$$.fragment,t),M(Jt.$$.fragment,t),M(eo.$$.fragment,t),M(et.$$.fragment,t),M(to.$$.fragment,t),M(oo.$$.fragment,t),M(no.$$.fragment,t),M(lo.$$.fragment,t),M(ot.$$.fragment,t),M(co.$$.fragment,t),M(ho.$$.fragment,t),M(po.$$.fragment,t),M(go.$$.fragment,t),M(rt.$$.fragment,t),M(_o.$$.fragment,t),M(vo.$$.fragment,t),M(bo.$$.fragment,t),M(To.$$.fragment,t),M(at.$$.fragment,t),M(yo.$$.fragment,t),M($o.$$.fragment,t),M(Bo.$$.fragment,t),M(zo.$$.fragment,t),M(Eo.$$.fragment,t),M(dt.$$.fragment,t),M(Co.$$.fragment,t),M(qo.$$.fragment,t),M(jo.$$.fragment,t),M(Lo.$$.fragment,t),M(ct.$$.fragment,t),M(Io.$$.fragment,t),M(Oo.$$.fragment,t),M(Do.$$.fragment,t),M(Uo.$$.fragment,t),M(pt.$$.fragment,t),M(Go.$$.fragment,t),Ds=!1},d(t){o(p),t&&o(y),t&&o(u),w(g),t&&o(ts),t&&o(ke),w(mt),t&&o(os),t&&o(Ge),t&&o(ns),t&&o(Ko),t&&o(rs),t&&o(Xo),t&&o(ss),t&&o(Yo),t&&o(as),t&&o(Qe),t&&o(is),t&&o(ee),t&&o(ds),t&&o(Zo),t&&o(ls),t&&o(en),t&&o(cs),w(vt,t),t&&o(hs),w(bt,t),t&&o(ps),t&&o(tn),t&&o(ms),w(kt,t),t&&o(us),w(Mt,t),t&&o(fs),t&&o(on),t&&o(gs),t&&o(te),t&&o(_s),w(wt,t),t&&o(vs),w(Tt,t),t&&o(bs),w(yt,t),t&&o(ks),t&&o(oe),t&&o(Ms),t&&o(Me),w(zt),t&&o(ws),t&&o(q),w(Ft),w(xt),t&&o(Ts),t&&o(ye),w(Et),t&&o(ys),t&&o(F),w(Ct),w(Lt),w(Ke),w(It),t&&o($s),t&&o(Be),w(Ot),t&&o(Bs),t&&o(j),w(Dt),w(Gt),w(Ye),w(Qt),t&&o(zs),t&&o(Fe),w(Vt),t&&o(Fs),t&&o(A),w(Jt),w(eo),w(et),w(to),t&&o(Ps),t&&o(xe),w(oo),t&&o(xs),t&&o(N),w(no),w(lo),w(ot),w(co),t&&o(Es),t&&o(Ce),w(ho),t&&o(Cs),t&&o(S),w(po),w(go),w(rt),w(_o),t&&o(qs),t&&o(Ae),w(vo),t&&o(js),t&&o(L),w(bo),w(To),w(at),w(yo),w($o),t&&o(As),t&&o(Se),w(Bo),t&&o(Ns),t&&o(I),w(zo),w(Eo),w(dt),w(Co),t&&o(Ss),t&&o(Ie),w(qo),t&&o(Ls),t&&o(O),w(jo),w(Lo),w(ct),w(Io),t&&o(Is),t&&o(De),w(Oo),t&&o(Os),t&&o(D),w(Do),w(Uo),w(pt),w(Go)}}}const du={local:"megatronbert",sections:[{local:"transformers.MegatronBertConfig",title:"Overview"},{local:"megatronbertconfig",title:"MegatronBertConfig"},{local:"megatronbertmodel",title:"MegatronBertModel"},{local:"megatronbertformaskedlm",title:"MegatronBertForMaskedLM"},{local:"megatronbertforcausallm",title:"MegatronBertForCausalLM"},{local:"megatronbertfornextsentenceprediction",title:"MegatronBertForNextSentencePrediction"},{local:"megatronbertforpretraining",title:"MegatronBertForPreTraining"},{local:"megatronbertforsequenceclassification",title:"MegatronBertForSequenceClassification"},{local:"megatronbertformultiplechoice",title:"MegatronBertForMultipleChoice"},{local:"megatronbertfortokenclassification",title:"MegatronBertForTokenClassification"},{local:"megatronbertforquestionanswering",title:"MegatronBertForQuestionAnswering"}],title:"MegatronBERT"};function lu(z,p,y){let{fw:u}=p;return z.$$set=T=>{"fw"in T&&y(0,u=T.fw)},[u]}class gu extends Vm{constructor(p){super();Jm(this,p,lu,iu,Km,{fw:0})}}export{gu as default,du as metadata};
9,914
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/led.mdx-2d525ebc.js
import{S as Z_,i as Y_,s as eg,e as n,k as i,w as m,t as r,L as tg,c as s,d as t,m as l,a,x as _,h as d,b as c,J as e,g as h,y as g,q as v,o as T,B as k}from"../../chunks/vendor-b1433968.js";import{T as ko}from"../../chunks/Tip-c3840994.js";import{D as S}from"../../chunks/Docstring-ff504c58.js";import{C as bo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Y}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function og(B){let p,L,b,w,q;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=n("code"),w=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(E){p=s(E,"P",{});var y=a(p);L=d(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(y,"CODE",{});var D=a(b);w=d(D,"Module"),D.forEach(t),q=d(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(E,y){h(E,p,y),e(p,L),e(p,b),e(b,w),e(p,q)},d(E){E&&t(p)}}}function ng(B){let p,L,b,w,q;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=n("code"),w=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(E){p=s(E,"P",{});var y=a(p);L=d(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(y,"CODE",{});var D=a(b);w=d(D,"Module"),D.forEach(t),q=d(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(E,y){h(E,p,y),e(p,L),e(p,b),e(b,w),e(p,q)},d(E){E&&t(p)}}}function sg(B){let p,L,b,w,q;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=n("code"),w=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(E){p=s(E,"P",{});var y=a(p);L=d(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(y,"CODE",{});var D=a(b);w=d(D,"Module"),D.forEach(t),q=d(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(E,y){h(E,p,y),e(p,L),e(p,b),e(b,w),e(p,q)},d(E){E&&t(p)}}}function ag(B){let p,L,b,w,q;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=n("code"),w=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(E){p=s(E,"P",{});var y=a(p);L=d(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(y,"CODE",{});var D=a(b);w=d(D,"Module"),D.forEach(t),q=d(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(E,y){h(E,p,y),e(p,L),e(p,b),e(b,w),e(p,q)},d(E){E&&t(p)}}}function rg(B){let p,L,b,w,q,E,y,D,Ae,_e,z,ee,W,ne,Ne,K,Ie,Me,Q,V,se,Le,C,A,ge,ae,Se,ve,X,je,Ce,O,Be,te,$,x,H,Ge,Qe,j,He,re,De;return{c(){p=n("p"),L=r("TF 2.0 models accepts two formats as inputs:"),b=i(),w=n("ul"),q=n("li"),E=r("having all inputs as keyword arguments (like PyTorch models), or"),y=i(),D=n("li"),Ae=r("having all inputs as a list, tuple or dict in the first positional arguments."),_e=i(),z=n("p"),ee=r("This second option is useful when using "),W=n("code"),ne=r("tf.keras.Model.fit"),Ne=r(` method which currently requires having all the tensors in the first argument of the model call function: `),K=n("code"),Ie=r("model(inputs)"),Me=r("."),Q=i(),V=n("p"),se=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Le=i(),C=n("ul"),A=n("li"),ge=r("a single Tensor with "),ae=n("code"),Se=r("input_ids"),ve=r(" only and nothing else: "),X=n("code"),je=r("model(input_ids)"),Ce=i(),O=n("li"),Be=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),te=n("code"),$=r("model([input_ids, attention_mask])"),x=r(" or "),H=n("code"),Ge=r("model([input_ids, attention_mask, token_type_ids])"),Qe=i(),j=n("li"),He=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),re=n("code"),De=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(f){p=s(f,"P",{});var F=a(p);L=d(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),b=l(f),w=s(f,"UL",{});var de=a(w);q=s(de,"LI",{});var nt=a(q);E=d(nt,"having all inputs as keyword arguments (like PyTorch models), or"),nt.forEach(t),y=l(de),D=s(de,"LI",{});var st=a(D);Ae=d(st,"having all inputs as a list, tuple or dict in the first positional arguments."),st.forEach(t),de.forEach(t),_e=l(f),z=s(f,"P",{});var G=a(z);ee=d(G,"This second option is useful when using "),W=s(G,"CODE",{});var at=a(W);ne=d(at,"tf.keras.Model.fit"),at.forEach(t),Ne=d(G,` method which currently requires having all the tensors in the first argument of the model call function: `),K=s(G,"CODE",{});var rt=a(K);Ie=d(rt,"model(inputs)"),rt.forEach(t),Me=d(G,"."),G.forEach(t),Q=l(f),V=s(f,"P",{});var Oe=a(V);se=d(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),Le=l(f),C=s(f,"UL",{});var J=a(C);A=s(J,"LI",{});var ie=a(A);ge=d(ie,"a single Tensor with "),ae=s(ie,"CODE",{});var Pe=a(ae);Se=d(Pe,"input_ids"),Pe.forEach(t),ve=d(ie," only and nothing else: "),X=s(ie,"CODE",{});var dt=a(X);je=d(dt,"model(input_ids)"),dt.forEach(t),ie.forEach(t),Ce=l(J),O=s(J,"LI",{});var le=a(O);Be=d(le,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),te=s(le,"CODE",{});var it=a(te);$=d(it,"model([input_ids, attention_mask])"),it.forEach(t),x=d(le," or "),H=s(le,"CODE",{});var ce=a(H);Ge=d(ce,"model([input_ids, attention_mask, token_type_ids])"),ce.forEach(t),le.forEach(t),Qe=l(J),j=s(J,"LI",{});var Ue=a(j);He=d(Ue,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),re=s(Ue,"CODE",{});var Re=a(re);De=d(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),Ue.forEach(t),J.forEach(t)},m(f,F){h(f,p,F),e(p,L),h(f,b,F),h(f,w,F),e(w,q),e(q,E),e(w,y),e(w,D),e(D,Ae),h(f,_e,F),h(f,z,F),e(z,ee),e(z,W),e(W,ne),e(z,Ne),e(z,K),e(K,Ie),e(z,Me),h(f,Q,F),h(f,V,F),e(V,se),h(f,Le,F),h(f,C,F),e(C,A),e(A,ge),e(A,ae),e(ae,Se),e(A,ve),e(A,X),e(X,je),e(C,Ce),e(C,O),e(O,Be),e(O,te),e(te,$),e(O,x),e(O,H),e(H,Ge),e(C,Qe),e(C,j),e(j,He),e(j,re),e(re,De)},d(f){f&&t(p),f&&t(b),f&&t(w),f&&t(_e),f&&t(z),f&&t(Q),f&&t(V),f&&t(Le),f&&t(C)}}}function dg(B){let p,L,b,w,q;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=n("code"),w=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(E){p=s(E,"P",{});var y=a(p);L=d(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(y,"CODE",{});var D=a(b);w=d(D,"Module"),D.forEach(t),q=d(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(E,y){h(E,p,y),e(p,L),e(p,b),e(b,w),e(p,q)},d(E){E&&t(p)}}}function ig(B){let p,L,b,w,q,E,y,D,Ae,_e,z,ee,W,ne,Ne,K,Ie,Me,Q,V,se,Le,C,A,ge,ae,Se,ve,X,je,Ce,O,Be,te,$,x,H,Ge,Qe,j,He,re,De;return{c(){p=n("p"),L=r("TF 2.0 models accepts two formats as inputs:"),b=i(),w=n("ul"),q=n("li"),E=r("having all inputs as keyword arguments (like PyTorch models), or"),y=i(),D=n("li"),Ae=r("having all inputs as a list, tuple or dict in the first positional arguments."),_e=i(),z=n("p"),ee=r("This second option is useful when using "),W=n("code"),ne=r("tf.keras.Model.fit"),Ne=r(` method which currently requires having all the tensors in the first argument of the model call function: `),K=n("code"),Ie=r("model(inputs)"),Me=r("."),Q=i(),V=n("p"),se=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Le=i(),C=n("ul"),A=n("li"),ge=r("a single Tensor with "),ae=n("code"),Se=r("input_ids"),ve=r(" only and nothing else: "),X=n("code"),je=r("model(input_ids)"),Ce=i(),O=n("li"),Be=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),te=n("code"),$=r("model([input_ids, attention_mask])"),x=r(" or "),H=n("code"),Ge=r("model([input_ids, attention_mask, token_type_ids])"),Qe=i(),j=n("li"),He=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),re=n("code"),De=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(f){p=s(f,"P",{});var F=a(p);L=d(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),b=l(f),w=s(f,"UL",{});var de=a(w);q=s(de,"LI",{});var nt=a(q);E=d(nt,"having all inputs as keyword arguments (like PyTorch models), or"),nt.forEach(t),y=l(de),D=s(de,"LI",{});var st=a(D);Ae=d(st,"having all inputs as a list, tuple or dict in the first positional arguments."),st.forEach(t),de.forEach(t),_e=l(f),z=s(f,"P",{});var G=a(z);ee=d(G,"This second option is useful when using "),W=s(G,"CODE",{});var at=a(W);ne=d(at,"tf.keras.Model.fit"),at.forEach(t),Ne=d(G,` method which currently requires having all the tensors in the first argument of the model call function: `),K=s(G,"CODE",{});var rt=a(K);Ie=d(rt,"model(inputs)"),rt.forEach(t),Me=d(G,"."),G.forEach(t),Q=l(f),V=s(f,"P",{});var Oe=a(V);se=d(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),Le=l(f),C=s(f,"UL",{});var J=a(C);A=s(J,"LI",{});var ie=a(A);ge=d(ie,"a single Tensor with "),ae=s(ie,"CODE",{});var Pe=a(ae);Se=d(Pe,"input_ids"),Pe.forEach(t),ve=d(ie," only and nothing else: "),X=s(ie,"CODE",{});var dt=a(X);je=d(dt,"model(input_ids)"),dt.forEach(t),ie.forEach(t),Ce=l(J),O=s(J,"LI",{});var le=a(O);Be=d(le,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),te=s(le,"CODE",{});var it=a(te);$=d(it,"model([input_ids, attention_mask])"),it.forEach(t),x=d(le," or "),H=s(le,"CODE",{});var ce=a(H);Ge=d(ce,"model([input_ids, attention_mask, token_type_ids])"),ce.forEach(t),le.forEach(t),Qe=l(J),j=s(J,"LI",{});var Ue=a(j);He=d(Ue,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),re=s(Ue,"CODE",{});var Re=a(re);De=d(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),Ue.forEach(t),J.forEach(t)},m(f,F){h(f,p,F),e(p,L),h(f,b,F),h(f,w,F),e(w,q),e(q,E),e(w,y),e(w,D),e(D,Ae),h(f,_e,F),h(f,z,F),e(z,ee),e(z,W),e(W,ne),e(z,Ne),e(z,K),e(K,Ie),e(z,Me),h(f,Q,F),h(f,V,F),e(V,se),h(f,Le,F),h(f,C,F),e(C,A),e(A,ge),e(A,ae),e(ae,Se),e(A,ve),e(A,X),e(X,je),e(C,Ce),e(C,O),e(O,Be),e(O,te),e(te,$),e(O,x),e(O,H),e(H,Ge),e(C,Qe),e(C,j),e(j,He),e(j,re),e(re,De)},d(f){f&&t(p),f&&t(b),f&&t(w),f&&t(_e),f&&t(z),f&&t(Q),f&&t(V),f&&t(Le),f&&t(C)}}}function lg(B){let p,L,b,w,q;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=n("code"),w=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(E){p=s(E,"P",{});var y=a(p);L=d(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(y,"CODE",{});var D=a(b);w=d(D,"Module"),D.forEach(t),q=d(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(E,y){h(E,p,y),e(p,L),e(p,b),e(b,w),e(p,q)},d(E){E&&t(p)}}}function cg(B){let p,L,b,w,q,E,y,D,Ae,_e,z,ee,W,ne,Ne,K,Ie,Me,Q,V,se,Le,C,A,ge,ae,Se,ve,X,je,Ce,O,Be,te,$,x,H,Ge,Qe,j,He,re,De,f,F,de,nt,st,G,at,rt,Oe,J,ie,Pe,dt,le,it,ce,Ue,Re,mi,_i,As,gi,vi,Ti,ze,ki,Ns,bi,wi,Is,yi,Ei,es,qi,Li,js,Di,zi,Fi,Z,$i,Bs,xi,Mi,Gs,Si,Ci,ts,Oi,Pi,Qs,Ai,Ni,Hs,Ii,ji,Us,Bi,Gi,Qi,lt,Hi,Rs,Ui,Ri,Ws,Wi,Ki,Vi,wo,Xi,yo,Ji,Zi,Yi,Eo,el,qo,tl,ol,Qr,At,nl,Lo,sl,al,Hr,ct,Nt,Ks,Do,rl,Vs,dl,Ur,P,zo,il,ht,ll,os,cl,hl,Fo,ul,pl,fl,ut,ml,ns,_l,gl,ss,vl,Tl,kl,Xs,bl,wl,$o,yl,Js,Zs,Ys,ea,El,ql,ta,oa,xo,It,jt,na,Mo,Ll,sa,Dl,zl,aa,Fl,$l,ra,da,So,Bt,Gt,ia,Co,xl,la,Ml,Sl,ca,Cl,Ol,ha,ua,Oo,Qt,Ht,pa,Po,Pl,fa,Al,Nl,ma,Il,Rr,pt,Ut,_a,Ao,jl,ga,Bl,Wr,I,No,Gl,va,Ql,Hl,Rt,as,Ul,Rl,rs,Wl,Kl,Vl,Io,Xl,ds,Jl,Zl,Yl,We,jo,ec,Ta,tc,oc,Bo,is,nc,ka,sc,ac,ls,rc,ba,dc,ic,Wt,Go,lc,Qo,cc,wa,hc,uc,pc,Kt,Ho,fc,ya,mc,_c,Ea,Kr,ft,Vt,qa,Uo,gc,La,vc,Vr,Fe,Ro,Tc,Wo,kc,Da,bc,wc,yc,Xt,cs,Ec,qc,hs,Lc,Dc,zc,Ko,Fc,us,$c,xc,Xr,mt,Jt,za,Vo,Mc,Fa,Sc,Jr,_t,Xo,Cc,$a,Oc,Zr,gt,Jo,Pc,xa,Ac,Yr,vt,Zo,Nc,Ma,Ic,ed,Tt,Yo,jc,Sa,Bc,td,kt,en,Gc,Ca,Qc,od,bt,tn,Hc,Oa,Uc,nd,wt,on,Rc,Pa,Wc,sd,yt,nn,Kc,Aa,Vc,ad,Et,Zt,Na,sn,Xc,Ia,Jc,rd,$e,an,Zc,rn,Yc,ps,eh,th,oh,dn,nh,ln,sh,ah,rh,Te,cn,dh,qt,ih,fs,lh,ch,ja,hh,uh,ph,Yt,fh,Ba,mh,_h,hn,dd,Lt,eo,Ga,un,gh,Qa,vh,id,xe,pn,Th,fn,kh,ms,bh,wh,yh,mn,Eh,_n,qh,Lh,Dh,M,gn,zh,Dt,Fh,_s,$h,xh,Ha,Mh,Sh,Ch,to,Oh,Ua,Ph,Ah,vn,Nh,Ra,Ih,jh,Wa,Ka,Va,Xa,Bh,Gh,Ja,Za,Ya,er,Qh,Hh,tr,or,nr,sr,Uh,Rh,ar,rr,Tn,oo,no,dr,kn,Wh,ir,Kh,Vh,lr,Xh,Jh,cr,hr,bn,so,ao,ur,wn,Zh,pr,Yh,eu,fr,tu,ld,zt,ro,mr,yn,ou,_r,nu,cd,he,En,su,gr,au,ru,qn,du,gs,iu,lu,cu,Ln,hu,Dn,uu,pu,fu,U,zn,mu,Ft,_u,vs,gu,vu,vr,Tu,ku,bu,io,wu,Tr,yu,Eu,Fn,qu,kr,Lu,Du,$n,hd,$t,lo,br,xn,zu,wr,Fu,ud,ue,Mn,$u,xt,xu,yr,Mu,Su,Er,Cu,Ou,Pu,Sn,Au,Ts,Nu,Iu,ju,Cn,Bu,On,Gu,Qu,Hu,ke,Pn,Uu,Mt,Ru,ks,Wu,Ku,qr,Vu,Xu,Ju,co,Zu,Lr,Yu,ep,An,pd,St,ho,Dr,Nn,tp,zr,op,fd,pe,In,np,jn,sp,bs,ap,rp,dp,Bn,ip,Gn,lp,cp,hp,uo,up,be,Qn,pp,Ct,fp,ws,mp,_p,Fr,gp,vp,Tp,po,kp,$r,bp,wp,Hn,md,Ot,fo,xr,Un,yp,Mr,Ep,_d,fe,Rn,qp,Wn,Lp,ys,Dp,zp,Fp,Kn,$p,Vn,xp,Mp,Sp,mo,Cp,we,Xn,Op,Pt,Pp,Es,Ap,Np,Sr,Ip,jp,Bp,_o,Gp,Cr,Qp,Hp,Jn,gd;return E=new Y({}),ne=new Y({}),Do=new Y({}),zo=new S({props:{name:"class transformers.LEDConfig",anchor:"transformers.LEDConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"max_encoder_position_embeddings",val:" = 16384"},{name:"max_decoder_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"classifier_dropout",val:" = 0.0"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"attention_window",val:": typing.Union[typing.List[int], int] = 512"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/configuration_led.py#L31",parametersDescription:[{anchor:"transformers.LEDConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDModel">LEDModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDModel">TFLEDModel</a>.`,name:"vocab_size"},{anchor:"transformers.LEDConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.LEDConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.LEDConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.LEDConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.LEDConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.LEDConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.LEDConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.LEDConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.LEDConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.LEDConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.LEDConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.LEDConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.LEDConfig.max_encoder_position_embeddings",description:`<strong>max_encoder_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The maximum sequence length that the encoder might ever be used with.`,name:"max_encoder_position_embeddings"},{anchor:"transformers.LEDConfig.max_decoder_position_embeddings",description:`<strong>max_decoder_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The maximum sequence length that the decoder might ever be used with.`,name:"max_decoder_position_embeddings"},{anchor:"transformers.LEDConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.LEDConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"}]}}),$o=new bo({props:{code:",",highlighted:""}}),Mo=new Y({}),Co=new Y({}),Po=new Y({}),Ao=new Y({}),No=new S({props:{name:"class transformers.LEDTokenizer",anchor:"transformers.LEDTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/tokenization_led.py#L39"}}),jo=new S({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RobertaTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/tokenization_roberta.py#L181",parametersDescription:[{anchor:"transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Go=new S({props:{name:"get_special_tokens_mask",anchor:"transformers.RobertaTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/tokenization_roberta.py#L206",parametersDescription:[{anchor:"transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.RobertaTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ho=new S({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.RobertaTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/tokenization_roberta.py#L233",parametersDescription:[{anchor:"transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Uo=new Y({}),Ro=new S({props:{name:"class transformers.LEDTokenizerFast",anchor:"transformers.LEDTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/tokenization_led_fast.py#L40"}}),Vo=new Y({}),Xo=new S({props:{name:"class transformers.models.led.modeling_led.LEDEncoderBaseModelOutput",anchor:"transformers.models.led.modeling_led.LEDEncoderBaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L1113",parametersDescription:[{anchor:"transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),Jo=new S({props:{name:"class transformers.models.led.modeling_led.LEDSeq2SeqModelOutput",anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L1154",parametersDescription:[{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_global_attentions",description:`<strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"encoder_global_attentions"}]}}),Zo=new S({props:{name:"class transformers.models.led.modeling_led.LEDSeq2SeqLMOutput",anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L1216",parametersDescription:[{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_global_attentions",description:`<strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"encoder_global_attentions"}]}}),Yo=new S({props:{name:"class transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput",anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L1278",parametersDescription:[{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_global_attentions",description:`<strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"encoder_global_attentions"}]}}),en=new S({props:{name:"class transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput",anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_logits",val:": FloatTensor = None"},{name:"end_logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_global_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L1340",parametersDescription:[{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_global_attentions",description:`<strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"encoder_global_attentions"}]}}),tn=new S({props:{name:"class transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput",anchor:"transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L1310",parametersDescription:[{anchor:"transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining <code>attention_window + 1</code> values). Note that the first <code>x</code> values refer to tokens with fixed positions in the text, but the remaining <code>attention_window + 1</code> values refer to tokens with relative positions: the attention weight of a token to itself is located at index <code>x + attention_window / 2</code> and the <code>attention_window / 2</code> preceding (succeeding) values are the attention weights to the <code>attention_window / 2</code> preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first <code>x</code> attention weights. If a token has global attention, the attention weights to all other tokens in <code>attentions</code> is set to 0, the values should be accessed from <code>global_attentions</code>.`,name:"attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.global_attentions",description:`<strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"global_attentions"}]}}),on=new S({props:{name:"class transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput",anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L1351",parametersDescription:[{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_global_attentions",description:`<strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"encoder_global_attentions"}]}}),nn=new S({props:{name:"class transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput",anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_global_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L1414",parametersDescription:[{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_global_attentions",description:`<strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.`,name:"encoder_global_attentions"}]}}),sn=new Y({}),an=new S({props:{name:"class transformers.LEDModel",anchor:"transformers.LEDModel",parameters:[{name:"config",val:": LEDConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2120",parametersDescription:[{anchor:"transformers.LEDModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),cn=new S({props:{name:"forward",anchor:"transformers.LEDModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2147",parametersDescription:[{anchor:"transformers.LEDModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LEDModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LEDModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.LEDModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.LEDModel.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LEDModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LEDModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LEDModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.LEDModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.LEDModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.LEDModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.LEDModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.LEDModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LEDModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LEDModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Yt=new ko({props:{$$slots:{default:[og]},$$scope:{ctx:B}}}),hn=new bo({props:{code:`from transformers import LEDTokenizer, LEDModel import torch tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384') model = LEDModel.from_pretrained('allenai/led-base-16384') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDModel.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),un=new Y({}),pn=new S({props:{name:"class transformers.LEDForConditionalGeneration",anchor:"transformers.LEDForConditionalGeneration",parameters:[{name:"config",val:": LEDConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2243",parametersDescription:[{anchor:"transformers.LEDForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gn=new S({props:{name:"forward",anchor:"transformers.LEDForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2287",parametersDescription:[{anchor:"transformers.LEDForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LEDForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LEDForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.LEDForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.LEDForConditionalGeneration.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LEDForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LEDForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LEDForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.LEDForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.LEDForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.LEDForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.LEDForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.LEDForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LEDForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LEDForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LEDForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),to=new ko({props:{$$slots:{default:[ng]},$$scope:{ctx:B}}}),vn=new bo({props:{code:`from transformers import LEDTokenizer, LEDForConditionalGeneration tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384') TXT = "My friends are <mask> but they eat too many carbs." model = LEDForConditionalGeneration.from_pretrained('allenai/led-base-16384') input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] prediction = model.generate(input_ids)[0] print(tokenizer.decode(prediction, skip_special_tokens=True)),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;My friends are &lt;mask&gt; but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer([TXT], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>prediction = model.generate(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(prediction, skip_special_tokens=<span class="hljs-literal">True</span>))`}}),kn=new Y({}),wn=new Y({}),yn=new Y({}),En=new S({props:{name:"class transformers.LEDForSequenceClassification",anchor:"transformers.LEDForSequenceClassification",parameters:[{name:"config",val:": LEDConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2429",parametersDescription:[{anchor:"transformers.LEDForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zn=new S({props:{name:"forward",anchor:"transformers.LEDForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2442",parametersDescription:[{anchor:"transformers.LEDForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LEDForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LEDForSequenceClassification.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.LEDForSequenceClassification.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.LEDForSequenceClassification.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LEDForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LEDForSequenceClassification.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LEDForSequenceClassification.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.LEDForSequenceClassification.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.LEDForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.LEDForSequenceClassification.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.LEDForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.LEDForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LEDForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LEDForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LEDForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),io=new ko({props:{$$slots:{default:[sg]},$$scope:{ctx:B}}}),Fn=new bo({props:{code:`from transformers import LEDTokenizer, LEDForSequenceClassification import torch tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384') model = LEDForSequenceClassification.from_pretrained('allenai/led-base-16384') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$n=new bo({props:{code:`from transformers import LEDTokenizer, LEDForSequenceClassification import torch tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384') model = LEDForSequenceClassification.from_pretrained('allenai/led-base-16384', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),xn=new Y({}),Mn=new S({props:{name:"class transformers.LEDForQuestionAnswering",anchor:"transformers.LEDForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2556",parametersDescription:[{anchor:"transformers.LEDForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Pn=new S({props:{name:"forward",anchor:"transformers.LEDForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"global_attention_mask",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_led.py#L2568",parametersDescription:[{anchor:"transformers.LEDForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LEDForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LEDForQuestionAnswering.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.LEDForQuestionAnswering.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.LEDForQuestionAnswering.forward.global_attention_mask",description:`<strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul>`,name:"global_attention_mask"},{anchor:"transformers.LEDForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LEDForQuestionAnswering.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LEDForQuestionAnswering.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.LEDForQuestionAnswering.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.LEDForQuestionAnswering.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.LEDForQuestionAnswering.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.LEDForQuestionAnswering.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.LEDForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LEDForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LEDForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LEDForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.LEDForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),co=new ko({props:{$$slots:{default:[ag]},$$scope:{ctx:B}}}),An=new bo({props:{code:`from transformers import LEDTokenizer, LEDForQuestionAnswering import torch tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384') model = LEDForQuestionAnswering.from_pretrained('allenai/led-base-16384') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Nn=new Y({}),In=new S({props:{name:"class transformers.TFLEDModel",anchor:"transformers.TFLEDModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L2210",parametersDescription:[{anchor:"transformers.TFLEDModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),uo=new ko({props:{$$slots:{default:[rg]},$$scope:{ctx:B}}}),Qn=new S({props:{name:"call",anchor:"transformers.TFLEDModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput, NoneType] = None"},{name:"global_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L2222",parametersDescription:[{anchor:"transformers.TFLEDModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLEDModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLEDModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFLEDModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFLEDModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLEDModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFLEDModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFLEDModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFLEDModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFLEDModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLEDModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLEDModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLEDModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),po=new ko({props:{$$slots:{default:[dg]},$$scope:{ctx:B}}}),Hn=new bo({props:{code:`from transformers import LEDTokenizer, TFLEDModel import tensorflow as tf tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384') model = TFLEDModel.from_pretrained('allenai/led-base-16384') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, TFLEDModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLEDModel.from_pretrained(<span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Un=new Y({}),Rn=new S({props:{name:"class transformers.TFLEDForConditionalGeneration",anchor:"transformers.TFLEDForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L2315",parametersDescription:[{anchor:"transformers.TFLEDForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mo=new ko({props:{$$slots:{default:[ig]},$$scope:{ctx:B}}}),Xn=new S({props:{name:"call",anchor:"transformers.TFLEDForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput] = None"},{name:"global_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/led/modeling_tf_led.py#L2348",parametersDescription:[{anchor:"transformers.TFLEDForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLEDForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLEDForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFLEDForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFLEDForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLEDForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFLEDForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFLEDForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFLEDForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFLEDForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLEDForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLEDForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLEDForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),_o=new ko({props:{$$slots:{default:[lg]},$$scope:{ctx:B}}}),Jn=new bo({props:{code:`from transformers import LEDTokenizer, TFLEDForConditionalGeneration import tensorflow as tf mname = 'allenai/led-base-16384' tokenizer = LEDTokenizer.from_pretrained(mname) TXT = "My friends are <mask> but they eat too many carbs." model = TFLEDForConditionalGeneration.from_pretrained(mname) batch = tokenizer([TXT], return_tensors='tf') logits = model(inputs=batch.input_ids).logits probs = tf.nn.softmax(logits[0]) # probs[5] is associated with the mask token,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, TFLEDForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&#x27;allenai/led-base-16384&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;My friends are &lt;mask&gt; but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLEDForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer([TXT], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(inputs=batch.input_ids).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probs = tf.nn.softmax(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># probs[5] is associated with the mask token</span>`}}),{c(){p=n("meta"),L=i(),b=n("h1"),w=n("a"),q=n("span"),m(E.$$.fragment),y=i(),D=n("span"),Ae=r("LED"),_e=i(),z=n("h2"),ee=n("a"),W=n("span"),m(ne.$$.fragment),Ne=i(),K=n("span"),Ie=r("Overview"),Me=i(),Q=n("p"),V=r("The LED model was proposed in "),se=n("a"),Le=r("Longformer: The Long-Document Transformer"),C=r(` by Iz Beltagy, Matthew E. Peters, Arman Cohan.`),A=i(),ge=n("p"),ae=r("The abstract from the paper is the following:"),Se=i(),ve=n("p"),X=n("em"),je=r(`Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer\u2019s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), a Longformer variant for supporting long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization dataset.`),Ce=i(),O=n("p"),Be=r("Tips:"),te=i(),$=n("ul"),x=n("li"),H=n("a"),Ge=r("LEDForConditionalGeneration"),Qe=r(` is an extension of `),j=n("a"),He=r("BartForConditionalGeneration"),re=r(" exchanging the traditional "),De=n("em"),f=r("self-attention"),F=r(` layer with `),de=n("em"),nt=r("Longformer"),st=r("\u2019s "),G=n("em"),at=r("chunked self-attention"),rt=r(" layer. "),Oe=n("a"),J=r("LEDTokenizer"),ie=r(` is an alias of `),Pe=n("a"),dt=r("BartTokenizer"),le=r("."),it=i(),ce=n("li"),Ue=r("LED works very well on long-range "),Re=n("em"),mi=r("sequence-to-sequence"),_i=r(" tasks where the "),As=n("code"),gi=r("input_ids"),vi=r(` largely exceed a length of 1024 tokens.`),Ti=i(),ze=n("li"),ki=r("LED pads the "),Ns=n("code"),bi=r("input_ids"),wi=r(" to be a multiple of "),Is=n("code"),yi=r("config.attention_window"),Ei=r(` if required. Therefore a small speed-up is gained, when `),es=n("a"),qi=r("LEDTokenizer"),Li=r(" is used with the "),js=n("code"),Di=r("pad_to_multiple_of"),zi=r(" argument."),Fi=i(),Z=n("li"),$i=r("LED makes use of "),Bs=n("em"),xi=r("global attention"),Mi=r(" by means of the "),Gs=n("code"),Si=r("global_attention_mask"),Ci=r(` (see `),ts=n("a"),Oi=r("LongformerModel"),Pi=r("). For summarization, it is advised to put "),Qs=n("em"),Ai=r("global attention"),Ni=r(` only on the first `),Hs=n("code"),Ii=r("<s>"),ji=r(" token. For question answering, it is advised to put "),Us=n("em"),Bi=r("global attention"),Gi=r(" on all tokens of the question."),Qi=i(),lt=n("li"),Hi=r("To fine-tune LED on all 16384, it is necessary to enable "),Rs=n("em"),Ui=r("gradient checkpointing"),Ri=r(` by executing `),Ws=n("code"),Wi=r("model.gradient_checkpointing_enable()"),Ki=r("."),Vi=i(),wo=n("li"),Xi=r("A notebook showing how to evaluate LED, can be accessed "),yo=n("a"),Ji=r("here"),Zi=r("."),Yi=i(),Eo=n("li"),el=r("A notebook showing how to fine-tune LED, can be accessed "),qo=n("a"),tl=r("here"),ol=r("."),Qr=i(),At=n("p"),nl=r("This model was contributed by "),Lo=n("a"),sl=r("patrickvonplaten"),al=r("."),Hr=i(),ct=n("h2"),Nt=n("a"),Ks=n("span"),m(Do.$$.fragment),rl=i(),Vs=n("span"),dl=r("LEDConfig"),Ur=i(),P=n("div"),m(zo.$$.fragment),il=i(),ht=n("p"),ll=r("This is the configuration class to store the configuration of a "),os=n("a"),cl=r("LEDModel"),hl=r(`. It is used to instantiate an LED model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LED `),Fo=n("a"),ul=r("allenai/led-base-16384"),pl=r(" architecture."),fl=i(),ut=n("p"),ml=r("Configuration objects inherit from "),ns=n("a"),_l=r("PretrainedConfig"),gl=r(` and can be used to control the model outputs. Read the documentation from `),ss=n("a"),vl=r("PretrainedConfig"),Tl=r(" for more information."),kl=i(),Xs=n("p"),bl=r("Example:"),wl=i(),m($o.$$.fragment),yl=i(),Js=n("blockquote"),Zs=n("blockquote"),Ys=n("blockquote"),ea=n("p"),El=r("from transformers import LEDModel, LEDConfig"),ql=i(),ta=n("blockquote"),oa=n("blockquote"),xo=n("blockquote"),It=n("h1"),jt=n("a"),na=n("span"),m(Mo.$$.fragment),Ll=i(),sa=n("span"),Dl=r("Initializing a LED allenai/led-base-16384 style configuration"),zl=i(),aa=n("p"),Fl=r("configuration = LEDConfig()"),$l=i(),ra=n("blockquote"),da=n("blockquote"),So=n("blockquote"),Bt=n("h1"),Gt=n("a"),ia=n("span"),m(Co.$$.fragment),xl=i(),la=n("span"),Ml=r("Initializing a model from the allenai/led-base-16384 style configuration"),Sl=i(),ca=n("p"),Cl=r("model = LEDModel(configuration)"),Ol=i(),ha=n("blockquote"),ua=n("blockquote"),Oo=n("blockquote"),Qt=n("h1"),Ht=n("a"),pa=n("span"),m(Po.$$.fragment),Pl=i(),fa=n("span"),Al=r("Accessing the model configuration"),Nl=i(),ma=n("p"),Il=r("configuration = model.config"),Rr=i(),pt=n("h2"),Ut=n("a"),_a=n("span"),m(Ao.$$.fragment),jl=i(),ga=n("span"),Bl=r("LEDTokenizer"),Wr=i(),I=n("div"),m(No.$$.fragment),Gl=i(),va=n("p"),Ql=r("Construct a LED tokenizer."),Hl=i(),Rt=n("p"),as=n("a"),Ul=r("LEDTokenizer"),Rl=r(" is identical to "),rs=n("a"),Wl=r("BartTokenizer"),Kl=r(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Vl=i(),Io=n("p"),Xl=r("Refer to superclass "),ds=n("a"),Jl=r("BartTokenizer"),Zl=r(` for usage examples and documentation concerning parameters.`),Yl=i(),We=n("div"),m(jo.$$.fragment),ec=i(),Ta=n("p"),tc=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format:`),oc=i(),Bo=n("ul"),is=n("li"),nc=r("single sequence: "),ka=n("code"),sc=r("<s> X </s>"),ac=i(),ls=n("li"),rc=r("pair of sequences: "),ba=n("code"),dc=r("<s> A </s></s> B </s>"),ic=i(),Wt=n("div"),m(Go.$$.fragment),lc=i(),Qo=n("p"),cc=r(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),wa=n("code"),hc=r("prepare_for_model"),uc=r(" method."),pc=i(),Kt=n("div"),m(Ho.$$.fragment),fc=i(),ya=n("p"),mc=r(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),_c=i(),Ea=n("div"),Kr=i(),ft=n("h2"),Vt=n("a"),qa=n("span"),m(Uo.$$.fragment),gc=i(),La=n("span"),vc=r("LEDTokenizerFast"),Vr=i(),Fe=n("div"),m(Ro.$$.fragment),Tc=i(),Wo=n("p"),kc=r("Construct a \u201Cfast\u201D LED tokenizer (backed by HuggingFace\u2019s "),Da=n("em"),bc=r("tokenizers"),wc=r(" library)."),yc=i(),Xt=n("p"),cs=n("a"),Ec=r("LEDTokenizerFast"),qc=r(" is identical to "),hs=n("a"),Lc=r("BartTokenizerFast"),Dc=r(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),zc=i(),Ko=n("p"),Fc=r("Refer to superclass "),us=n("a"),$c=r("BartTokenizerFast"),xc=r(` for usage examples and documentation concerning parameters.`),Xr=i(),mt=n("h2"),Jt=n("a"),za=n("span"),m(Vo.$$.fragment),Mc=i(),Fa=n("span"),Sc=r("LED specific outputs"),Jr=i(),_t=n("div"),m(Xo.$$.fragment),Cc=i(),$a=n("p"),Oc=r("Base class for LEDEncoder\u2019s outputs, with potential hidden states, local and global attentions."),Zr=i(),gt=n("div"),m(Jo.$$.fragment),Pc=i(),xa=n("p"),Ac=r(`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Yr=i(),vt=n("div"),m(Zo.$$.fragment),Nc=i(),Ma=n("p"),Ic=r("Base class for sequence-to-sequence language models outputs."),ed=i(),Tt=n("div"),m(Yo.$$.fragment),jc=i(),Sa=n("p"),Bc=r("Base class for outputs of sequence-to-sequence sentence classification models."),td=i(),kt=n("div"),m(en.$$.fragment),Gc=i(),Ca=n("p"),Qc=r("Base class for outputs of sequence-to-sequence question answering models."),od=i(),bt=n("div"),m(tn.$$.fragment),Hc=i(),Oa=n("p"),Uc=r("Base class for Longformer\u2019s outputs, with potential hidden states, local and global attentions."),nd=i(),wt=n("div"),m(on.$$.fragment),Rc=i(),Pa=n("p"),Wc=r(`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),sd=i(),yt=n("div"),m(nn.$$.fragment),Kc=i(),Aa=n("p"),Vc=r("Base class for sequence-to-sequence language models outputs."),ad=i(),Et=n("h2"),Zt=n("a"),Na=n("span"),m(sn.$$.fragment),Xc=i(),Ia=n("span"),Jc=r("LEDModel"),rd=i(),$e=n("div"),m(an.$$.fragment),Zc=i(),rn=n("p"),Yc=r(`The bare LED Model outputting raw hidden-states without any specific head on top. This model inherits from `),ps=n("a"),eh=r("PreTrainedModel"),th=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oh=i(),dn=n("p"),nh=r("This model is also a PyTorch "),ln=n("a"),sh=r("torch.nn.Module"),ah=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),rh=i(),Te=n("div"),m(cn.$$.fragment),dh=i(),qt=n("p"),ih=r("The "),fs=n("a"),lh=r("LEDModel"),ch=r(" forward method, overrides the "),ja=n("code"),hh=r("__call__"),uh=r(" special method."),ph=i(),m(Yt.$$.fragment),fh=i(),Ba=n("p"),mh=r("Example:"),_h=i(),m(hn.$$.fragment),dd=i(),Lt=n("h2"),eo=n("a"),Ga=n("span"),m(un.$$.fragment),gh=i(),Qa=n("span"),vh=r("LEDForConditionalGeneration"),id=i(),xe=n("div"),m(pn.$$.fragment),Th=i(),fn=n("p"),kh=r(`The LED Model with a language modeling head. Can be used for summarization. This model inherits from `),ms=n("a"),bh=r("PreTrainedModel"),wh=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yh=i(),mn=n("p"),Eh=r("This model is also a PyTorch "),_n=n("a"),qh=r("torch.nn.Module"),Lh=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dh=i(),M=n("div"),m(gn.$$.fragment),zh=i(),Dt=n("p"),Fh=r("The "),_s=n("a"),$h=r("LEDForConditionalGeneration"),xh=r(" forward method, overrides the "),Ha=n("code"),Mh=r("__call__"),Sh=r(" special method."),Ch=i(),m(to.$$.fragment),Oh=i(),Ua=n("p"),Ph=r("Conditional generation example:"),Ah=i(),m(vn.$$.fragment),Nh=i(),Ra=n("p"),Ih=r("Summarization example::"),jh=i(),Wa=n("blockquote"),Ka=n("blockquote"),Va=n("blockquote"),Xa=n("p"),Bh=r(`import torch from transformers import LEDTokenizer, LEDForConditionalGeneration`),Gh=i(),Ja=n("blockquote"),Za=n("blockquote"),Ya=n("blockquote"),er=n("p"),Qh=r(`model = LEDForConditionalGeneration.from_pretrained(\u2018allenai/led-large-16384-arxiv\u2019) tokenizer = LEDTokenizer.from_pretrained(\u2018allenai/led-large-16384-arxiv\u2019)`),Hh=i(),tr=n("blockquote"),or=n("blockquote"),nr=n("blockquote"),sr=n("p"),Uh=r(`ARTICLE_TO_SUMMARIZE = '''Transformers (Vaswani et al., 2017) have achieved state-of-the-art \u2026 results in a wide range of natural language tasks including generative \u2026 language modeling (Dai et al., 2019; Radford et al., 2019) and discriminative \u2026 language understanding (Devlin et al., 2019). This success is partly due to \u2026 the self-attention component which enables the network to capture contextual \u2026 information from the entire sequence. While powerful, the memory and computational \u2026 requirements of self-attention grow quadratically with sequence length, making \u2026 it infeasible (or very expensive) to process long sequences. \u2026 \u2026 To address this limitation, we present Longformer, a modified Transformer \u2026 architecture with a self-attention operation that scales linearly with the \u2026 sequence length, making it versatile for processing long documents (Fig 1). This \u2026 is an advantage for natural language tasks such as long document classification, \u2026 question answering (QA), and coreference resolution, where existing approaches \u2026 partition or shorten the long context into smaller sequences that fall within the \u2026 typical 512 token limit of BERT-style pretrained models. Such partitioning could \u2026 potentially result in loss of important cross-partition information, and to \u2026 mitigate this problem, existing methods often rely on complex architectures to \u2026 address such interactions. On the other hand, our proposed Longformer is able to \u2026 build contextual representations of the entire context using multiple layers of \u2026 attention, reducing the need for task-specific architectures.''' inputs = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors=\u2018pt\u2019)`),Rh=i(),ar=n("blockquote"),rr=n("blockquote"),Tn=n("blockquote"),oo=n("h1"),no=n("a"),dr=n("span"),m(kn.$$.fragment),Wh=i(),ir=n("span"),Kh=r("Global attention on the first token (cf. Beltagy et al. 2020)"),Vh=i(),lr=n("p"),Xh=r(`global_attention_mask = torch.zeros_like(inputs) global_attention_mask[:, 0] = 1`),Jh=i(),cr=n("blockquote"),hr=n("blockquote"),bn=n("blockquote"),so=n("h1"),ao=n("a"),ur=n("span"),m(wn.$$.fragment),Zh=i(),pr=n("span"),Yh=r("Generate Summary"),eu=i(),fr=n("p"),tu=r(`summary_ids = model.generate(inputs, global_attention_mask=global_attention_mask, \u2026 num_beams=3, max_length=32, early_stopping=True) print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))`),ld=i(),zt=n("h2"),ro=n("a"),mr=n("span"),m(yn.$$.fragment),ou=i(),_r=n("span"),nu=r("LEDForSequenceClassification"),cd=i(),he=n("div"),m(En.$$.fragment),su=i(),gr=n("p"),au=r(`LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),ru=i(),qn=n("p"),du=r("This model inherits from "),gs=n("a"),iu=r("PreTrainedModel"),lu=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cu=i(),Ln=n("p"),hu=r("This model is also a PyTorch "),Dn=n("a"),uu=r("torch.nn.Module"),pu=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fu=i(),U=n("div"),m(zn.$$.fragment),mu=i(),Ft=n("p"),_u=r("The "),vs=n("a"),gu=r("LEDForSequenceClassification"),vu=r(" forward method, overrides the "),vr=n("code"),Tu=r("__call__"),ku=r(" special method."),bu=i(),m(io.$$.fragment),wu=i(),Tr=n("p"),yu=r("Example of single-label classification:"),Eu=i(),m(Fn.$$.fragment),qu=i(),kr=n("p"),Lu=r("Example of multi-label classification:"),Du=i(),m($n.$$.fragment),hd=i(),$t=n("h2"),lo=n("a"),br=n("span"),m(xn.$$.fragment),zu=i(),wr=n("span"),Fu=r("LEDForQuestionAnswering"),ud=i(),ue=n("div"),m(Mn.$$.fragment),$u=i(),xt=n("p"),xu=r(`LED Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),yr=n("code"),Mu=r("span start logits"),Su=r(" and "),Er=n("code"),Cu=r("span end logits"),Ou=r(")."),Pu=i(),Sn=n("p"),Au=r("This model inherits from "),Ts=n("a"),Nu=r("PreTrainedModel"),Iu=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ju=i(),Cn=n("p"),Bu=r("This model is also a PyTorch "),On=n("a"),Gu=r("torch.nn.Module"),Qu=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hu=i(),ke=n("div"),m(Pn.$$.fragment),Uu=i(),Mt=n("p"),Ru=r("The "),ks=n("a"),Wu=r("LEDForQuestionAnswering"),Ku=r(" forward method, overrides the "),qr=n("code"),Vu=r("__call__"),Xu=r(" special method."),Ju=i(),m(co.$$.fragment),Zu=i(),Lr=n("p"),Yu=r("Example:"),ep=i(),m(An.$$.fragment),pd=i(),St=n("h2"),ho=n("a"),Dr=n("span"),m(Nn.$$.fragment),tp=i(),zr=n("span"),op=r("TFLEDModel"),fd=i(),pe=n("div"),m(In.$$.fragment),np=i(),jn=n("p"),sp=r(`The bare LED Model outputting raw hidden-states without any specific head on top. This model inherits from `),bs=n("a"),ap=r("TFPreTrainedModel"),rp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dp=i(),Bn=n("p"),ip=r("This model is also a "),Gn=n("a"),lp=r("tf.keras.Model"),cp=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hp=i(),m(uo.$$.fragment),up=i(),be=n("div"),m(Qn.$$.fragment),pp=i(),Ct=n("p"),fp=r("The "),ws=n("a"),mp=r("TFLEDModel"),_p=r(" forward method, overrides the "),Fr=n("code"),gp=r("__call__"),vp=r(" special method."),Tp=i(),m(po.$$.fragment),kp=i(),$r=n("p"),bp=r("Example:"),wp=i(),m(Hn.$$.fragment),md=i(),Ot=n("h2"),fo=n("a"),xr=n("span"),m(Un.$$.fragment),yp=i(),Mr=n("span"),Ep=r("TFLEDForConditionalGeneration"),_d=i(),fe=n("div"),m(Rn.$$.fragment),qp=i(),Wn=n("p"),Lp=r(`The LED Model with a language modeling head. Can be used for summarization. This model inherits from `),ys=n("a"),Dp=r("TFPreTrainedModel"),zp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fp=i(),Kn=n("p"),$p=r("This model is also a "),Vn=n("a"),xp=r("tf.keras.Model"),Mp=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sp=i(),m(mo.$$.fragment),Cp=i(),we=n("div"),m(Xn.$$.fragment),Op=i(),Pt=n("p"),Pp=r("The "),Es=n("a"),Ap=r("TFLEDForConditionalGeneration"),Np=r(" forward method, overrides the "),Sr=n("code"),Ip=r("__call__"),jp=r(" special method."),Bp=i(),m(_o.$$.fragment),Gp=i(),Cr=n("p"),Qp=r("Examples:"),Hp=i(),m(Jn.$$.fragment),this.h()},l(o){const u=tg('[data-svelte="svelte-1phssyn"]',document.head);p=s(u,"META",{name:!0,content:!0}),u.forEach(t),L=l(o),b=s(o,"H1",{class:!0});var Zn=a(b);w=s(Zn,"A",{id:!0,class:!0,href:!0});var Or=a(w);q=s(Or,"SPAN",{});var Pr=a(q);_(E.$$.fragment,Pr),Pr.forEach(t),Or.forEach(t),y=l(Zn),D=s(Zn,"SPAN",{});var Ar=a(D);Ae=d(Ar,"LED"),Ar.forEach(t),Zn.forEach(t),_e=l(o),z=s(o,"H2",{class:!0});var Yn=a(z);ee=s(Yn,"A",{id:!0,class:!0,href:!0});var Nr=a(ee);W=s(Nr,"SPAN",{});var Ir=a(W);_(ne.$$.fragment,Ir),Ir.forEach(t),Nr.forEach(t),Ne=l(Yn),K=s(Yn,"SPAN",{});var jr=a(K);Ie=d(jr,"Overview"),jr.forEach(t),Yn.forEach(t),Me=l(o),Q=s(o,"P",{});var vd=a(Q);V=d(vd,"The LED model was proposed in "),se=s(vd,"A",{href:!0,rel:!0});var Wp=a(se);Le=d(Wp,"Longformer: The Long-Document Transformer"),Wp.forEach(t),C=d(vd,` by Iz Beltagy, Matthew E. Peters, Arman Cohan.`),vd.forEach(t),A=l(o),ge=s(o,"P",{});var Kp=a(ge);ae=d(Kp,"The abstract from the paper is the following:"),Kp.forEach(t),Se=l(o),ve=s(o,"P",{});var Vp=a(ve);X=s(Vp,"EM",{});var Xp=a(X);je=d(Xp,`Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer\u2019s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), a Longformer variant for supporting long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization dataset.`),Xp.forEach(t),Vp.forEach(t),Ce=l(o),O=s(o,"P",{});var Jp=a(O);Be=d(Jp,"Tips:"),Jp.forEach(t),te=l(o),$=s(o,"UL",{});var ye=a($);x=s(ye,"LI",{});var me=a(x);H=s(me,"A",{href:!0});var Zp=a(H);Ge=d(Zp,"LEDForConditionalGeneration"),Zp.forEach(t),Qe=d(me,` is an extension of `),j=s(me,"A",{href:!0});var Yp=a(j);He=d(Yp,"BartForConditionalGeneration"),Yp.forEach(t),re=d(me," exchanging the traditional "),De=s(me,"EM",{});var ef=a(De);f=d(ef,"self-attention"),ef.forEach(t),F=d(me,` layer with `),de=s(me,"EM",{});var tf=a(de);nt=d(tf,"Longformer"),tf.forEach(t),st=d(me,"\u2019s "),G=s(me,"EM",{});var of=a(G);at=d(of,"chunked self-attention"),of.forEach(t),rt=d(me," layer. "),Oe=s(me,"A",{href:!0});var nf=a(Oe);J=d(nf,"LEDTokenizer"),nf.forEach(t),ie=d(me,` is an alias of `),Pe=s(me,"A",{href:!0});var sf=a(Pe);dt=d(sf,"BartTokenizer"),sf.forEach(t),le=d(me,"."),me.forEach(t),it=l(ye),ce=s(ye,"LI",{});var qs=a(ce);Ue=d(qs,"LED works very well on long-range "),Re=s(qs,"EM",{});var af=a(Re);mi=d(af,"sequence-to-sequence"),af.forEach(t),_i=d(qs," tasks where the "),As=s(qs,"CODE",{});var rf=a(As);gi=d(rf,"input_ids"),rf.forEach(t),vi=d(qs,` largely exceed a length of 1024 tokens.`),qs.forEach(t),Ti=l(ye),ze=s(ye,"LI",{});var Ke=a(ze);ki=d(Ke,"LED pads the "),Ns=s(Ke,"CODE",{});var df=a(Ns);bi=d(df,"input_ids"),df.forEach(t),wi=d(Ke," to be a multiple of "),Is=s(Ke,"CODE",{});var lf=a(Is);yi=d(lf,"config.attention_window"),lf.forEach(t),Ei=d(Ke,` if required. Therefore a small speed-up is gained, when `),es=s(Ke,"A",{href:!0});var cf=a(es);qi=d(cf,"LEDTokenizer"),cf.forEach(t),Li=d(Ke," is used with the "),js=s(Ke,"CODE",{});var hf=a(js);Di=d(hf,"pad_to_multiple_of"),hf.forEach(t),zi=d(Ke," argument."),Ke.forEach(t),Fi=l(ye),Z=s(ye,"LI",{});var Ee=a(Z);$i=d(Ee,"LED makes use of "),Bs=s(Ee,"EM",{});var uf=a(Bs);xi=d(uf,"global attention"),uf.forEach(t),Mi=d(Ee," by means of the "),Gs=s(Ee,"CODE",{});var pf=a(Gs);Si=d(pf,"global_attention_mask"),pf.forEach(t),Ci=d(Ee,` (see `),ts=s(Ee,"A",{href:!0});var ff=a(ts);Oi=d(ff,"LongformerModel"),ff.forEach(t),Pi=d(Ee,"). For summarization, it is advised to put "),Qs=s(Ee,"EM",{});var mf=a(Qs);Ai=d(mf,"global attention"),mf.forEach(t),Ni=d(Ee,` only on the first `),Hs=s(Ee,"CODE",{});var _f=a(Hs);Ii=d(_f,"<s>"),_f.forEach(t),ji=d(Ee," token. For question answering, it is advised to put "),Us=s(Ee,"EM",{});var gf=a(Us);Bi=d(gf,"global attention"),gf.forEach(t),Gi=d(Ee," on all tokens of the question."),Ee.forEach(t),Qi=l(ye),lt=s(ye,"LI",{});var Ls=a(lt);Hi=d(Ls,"To fine-tune LED on all 16384, it is necessary to enable "),Rs=s(Ls,"EM",{});var vf=a(Rs);Ui=d(vf,"gradient checkpointing"),vf.forEach(t),Ri=d(Ls,` by executing `),Ws=s(Ls,"CODE",{});var Tf=a(Ws);Wi=d(Tf,"model.gradient_checkpointing_enable()"),Tf.forEach(t),Ki=d(Ls,"."),Ls.forEach(t),Vi=l(ye),wo=s(ye,"LI",{});var Td=a(wo);Xi=d(Td,"A notebook showing how to evaluate LED, can be accessed "),yo=s(Td,"A",{href:!0,rel:!0});var kf=a(yo);Ji=d(kf,"here"),kf.forEach(t),Zi=d(Td,"."),Td.forEach(t),Yi=l(ye),Eo=s(ye,"LI",{});var kd=a(Eo);el=d(kd,"A notebook showing how to fine-tune LED, can be accessed "),qo=s(kd,"A",{href:!0,rel:!0});var bf=a(qo);tl=d(bf,"here"),bf.forEach(t),ol=d(kd,"."),kd.forEach(t),ye.forEach(t),Qr=l(o),At=s(o,"P",{});var bd=a(At);nl=d(bd,"This model was contributed by "),Lo=s(bd,"A",{href:!0,rel:!0});var wf=a(Lo);sl=d(wf,"patrickvonplaten"),wf.forEach(t),al=d(bd,"."),bd.forEach(t),Hr=l(o),ct=s(o,"H2",{class:!0});var wd=a(ct);Nt=s(wd,"A",{id:!0,class:!0,href:!0});var yf=a(Nt);Ks=s(yf,"SPAN",{});var Ef=a(Ks);_(Do.$$.fragment,Ef),Ef.forEach(t),yf.forEach(t),rl=l(wd),Vs=s(wd,"SPAN",{});var qf=a(Vs);dl=d(qf,"LEDConfig"),qf.forEach(t),wd.forEach(t),Ur=l(o),P=s(o,"DIV",{class:!0});var R=a(P);_(zo.$$.fragment,R),il=l(R),ht=s(R,"P",{});var Ds=a(ht);ll=d(Ds,"This is the configuration class to store the configuration of a "),os=s(Ds,"A",{href:!0});var Lf=a(os);cl=d(Lf,"LEDModel"),Lf.forEach(t),hl=d(Ds,`. It is used to instantiate an LED model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LED `),Fo=s(Ds,"A",{href:!0,rel:!0});var Df=a(Fo);ul=d(Df,"allenai/led-base-16384"),Df.forEach(t),pl=d(Ds," architecture."),Ds.forEach(t),fl=l(R),ut=s(R,"P",{});var zs=a(ut);ml=d(zs,"Configuration objects inherit from "),ns=s(zs,"A",{href:!0});var zf=a(ns);_l=d(zf,"PretrainedConfig"),zf.forEach(t),gl=d(zs,` and can be used to control the model outputs. Read the documentation from `),ss=s(zs,"A",{href:!0});var Ff=a(ss);vl=d(Ff,"PretrainedConfig"),Ff.forEach(t),Tl=d(zs," for more information."),zs.forEach(t),kl=l(R),Xs=s(R,"P",{});var $f=a(Xs);bl=d($f,"Example:"),$f.forEach(t),wl=l(R),_($o.$$.fragment,R),yl=l(R),Js=s(R,"BLOCKQUOTE",{});var xf=a(Js);Zs=s(xf,"BLOCKQUOTE",{});var Mf=a(Zs);Ys=s(Mf,"BLOCKQUOTE",{});var Sf=a(Ys);ea=s(Sf,"P",{});var Cf=a(ea);El=d(Cf,"from transformers import LEDModel, LEDConfig"),Cf.forEach(t),Sf.forEach(t),Mf.forEach(t),xf.forEach(t),ql=l(R),ta=s(R,"BLOCKQUOTE",{});var Of=a(ta);oa=s(Of,"BLOCKQUOTE",{});var Pf=a(oa);xo=s(Pf,"BLOCKQUOTE",{});var yd=a(xo);It=s(yd,"H1",{class:!0});var Ed=a(It);jt=s(Ed,"A",{id:!0,class:!0,href:!0});var Af=a(jt);na=s(Af,"SPAN",{});var Nf=a(na);_(Mo.$$.fragment,Nf),Nf.forEach(t),Af.forEach(t),Ll=l(Ed),sa=s(Ed,"SPAN",{});var If=a(sa);Dl=d(If,"Initializing a LED allenai/led-base-16384 style configuration"),If.forEach(t),Ed.forEach(t),zl=l(yd),aa=s(yd,"P",{});var jf=a(aa);Fl=d(jf,"configuration = LEDConfig()"),jf.forEach(t),yd.forEach(t),Pf.forEach(t),Of.forEach(t),$l=l(R),ra=s(R,"BLOCKQUOTE",{});var Bf=a(ra);da=s(Bf,"BLOCKQUOTE",{});var Gf=a(da);So=s(Gf,"BLOCKQUOTE",{});var qd=a(So);Bt=s(qd,"H1",{class:!0});var Ld=a(Bt);Gt=s(Ld,"A",{id:!0,class:!0,href:!0});var Qf=a(Gt);ia=s(Qf,"SPAN",{});var Hf=a(ia);_(Co.$$.fragment,Hf),Hf.forEach(t),Qf.forEach(t),xl=l(Ld),la=s(Ld,"SPAN",{});var Uf=a(la);Ml=d(Uf,"Initializing a model from the allenai/led-base-16384 style configuration"),Uf.forEach(t),Ld.forEach(t),Sl=l(qd),ca=s(qd,"P",{});var Rf=a(ca);Cl=d(Rf,"model = LEDModel(configuration)"),Rf.forEach(t),qd.forEach(t),Gf.forEach(t),Bf.forEach(t),Ol=l(R),ha=s(R,"BLOCKQUOTE",{});var Wf=a(ha);ua=s(Wf,"BLOCKQUOTE",{});var Kf=a(ua);Oo=s(Kf,"BLOCKQUOTE",{});var Dd=a(Oo);Qt=s(Dd,"H1",{class:!0});var zd=a(Qt);Ht=s(zd,"A",{id:!0,class:!0,href:!0});var Vf=a(Ht);pa=s(Vf,"SPAN",{});var Xf=a(pa);_(Po.$$.fragment,Xf),Xf.forEach(t),Vf.forEach(t),Pl=l(zd),fa=s(zd,"SPAN",{});var Jf=a(fa);Al=d(Jf,"Accessing the model configuration"),Jf.forEach(t),zd.forEach(t),Nl=l(Dd),ma=s(Dd,"P",{});var Zf=a(ma);Il=d(Zf,"configuration = model.config"),Zf.forEach(t),Dd.forEach(t),Kf.forEach(t),Wf.forEach(t),R.forEach(t),Rr=l(o),pt=s(o,"H2",{class:!0});var Fd=a(pt);Ut=s(Fd,"A",{id:!0,class:!0,href:!0});var Yf=a(Ut);_a=s(Yf,"SPAN",{});var em=a(_a);_(Ao.$$.fragment,em),em.forEach(t),Yf.forEach(t),jl=l(Fd),ga=s(Fd,"SPAN",{});var tm=a(ga);Bl=d(tm,"LEDTokenizer"),tm.forEach(t),Fd.forEach(t),Wr=l(o),I=s(o,"DIV",{class:!0});var oe=a(I);_(No.$$.fragment,oe),Gl=l(oe),va=s(oe,"P",{});var om=a(va);Ql=d(om,"Construct a LED tokenizer."),om.forEach(t),Hl=l(oe),Rt=s(oe,"P",{});var Br=a(Rt);as=s(Br,"A",{href:!0});var nm=a(as);Ul=d(nm,"LEDTokenizer"),nm.forEach(t),Rl=d(Br," is identical to "),rs=s(Br,"A",{href:!0});var sm=a(rs);Wl=d(sm,"BartTokenizer"),sm.forEach(t),Kl=d(Br,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Br.forEach(t),Vl=l(oe),Io=s(oe,"P",{});var $d=a(Io);Xl=d($d,"Refer to superclass "),ds=s($d,"A",{href:!0});var am=a(ds);Jl=d(am,"BartTokenizer"),am.forEach(t),Zl=d($d,` for usage examples and documentation concerning parameters.`),$d.forEach(t),Yl=l(oe),We=s(oe,"DIV",{class:!0});var Fs=a(We);_(jo.$$.fragment,Fs),ec=l(Fs),Ta=s(Fs,"P",{});var rm=a(Ta);tc=d(rm,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format:`),rm.forEach(t),oc=l(Fs),Bo=s(Fs,"UL",{});var xd=a(Bo);is=s(xd,"LI",{});var Up=a(is);nc=d(Up,"single sequence: "),ka=s(Up,"CODE",{});var dm=a(ka);sc=d(dm,"<s> X </s>"),dm.forEach(t),Up.forEach(t),ac=l(xd),ls=s(xd,"LI",{});var Rp=a(ls);rc=d(Rp,"pair of sequences: "),ba=s(Rp,"CODE",{});var im=a(ba);dc=d(im,"<s> A </s></s> B </s>"),im.forEach(t),Rp.forEach(t),xd.forEach(t),Fs.forEach(t),ic=l(oe),Wt=s(oe,"DIV",{class:!0});var Md=a(Wt);_(Go.$$.fragment,Md),lc=l(Md),Qo=s(Md,"P",{});var Sd=a(Qo);cc=d(Sd,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),wa=s(Sd,"CODE",{});var lm=a(wa);hc=d(lm,"prepare_for_model"),lm.forEach(t),uc=d(Sd," method."),Sd.forEach(t),Md.forEach(t),pc=l(oe),Kt=s(oe,"DIV",{class:!0});var Cd=a(Kt);_(Ho.$$.fragment,Cd),fc=l(Cd),ya=s(Cd,"P",{});var cm=a(ya);mc=d(cm,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),cm.forEach(t),Cd.forEach(t),_c=l(oe),Ea=s(oe,"DIV",{class:!0}),a(Ea).forEach(t),oe.forEach(t),Kr=l(o),ft=s(o,"H2",{class:!0});var Od=a(ft);Vt=s(Od,"A",{id:!0,class:!0,href:!0});var hm=a(Vt);qa=s(hm,"SPAN",{});var um=a(qa);_(Uo.$$.fragment,um),um.forEach(t),hm.forEach(t),gc=l(Od),La=s(Od,"SPAN",{});var pm=a(La);vc=d(pm,"LEDTokenizerFast"),pm.forEach(t),Od.forEach(t),Vr=l(o),Fe=s(o,"DIV",{class:!0});var go=a(Fe);_(Ro.$$.fragment,go),Tc=l(go),Wo=s(go,"P",{});var Pd=a(Wo);kc=d(Pd,"Construct a \u201Cfast\u201D LED tokenizer (backed by HuggingFace\u2019s "),Da=s(Pd,"EM",{});var fm=a(Da);bc=d(fm,"tokenizers"),fm.forEach(t),wc=d(Pd," library)."),Pd.forEach(t),yc=l(go),Xt=s(go,"P",{});var Gr=a(Xt);cs=s(Gr,"A",{href:!0});var mm=a(cs);Ec=d(mm,"LEDTokenizerFast"),mm.forEach(t),qc=d(Gr," is identical to "),hs=s(Gr,"A",{href:!0});var _m=a(hs);Lc=d(_m,"BartTokenizerFast"),_m.forEach(t),Dc=d(Gr,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Gr.forEach(t),zc=l(go),Ko=s(go,"P",{});var Ad=a(Ko);Fc=d(Ad,"Refer to superclass "),us=s(Ad,"A",{href:!0});var gm=a(us);$c=d(gm,"BartTokenizerFast"),gm.forEach(t),xc=d(Ad,` for usage examples and documentation concerning parameters.`),Ad.forEach(t),go.forEach(t),Xr=l(o),mt=s(o,"H2",{class:!0});var Nd=a(mt);Jt=s(Nd,"A",{id:!0,class:!0,href:!0});var vm=a(Jt);za=s(vm,"SPAN",{});var Tm=a(za);_(Vo.$$.fragment,Tm),Tm.forEach(t),vm.forEach(t),Mc=l(Nd),Fa=s(Nd,"SPAN",{});var km=a(Fa);Sc=d(km,"LED specific outputs"),km.forEach(t),Nd.forEach(t),Jr=l(o),_t=s(o,"DIV",{class:!0});var Id=a(_t);_(Xo.$$.fragment,Id),Cc=l(Id),$a=s(Id,"P",{});var bm=a($a);Oc=d(bm,"Base class for LEDEncoder\u2019s outputs, with potential hidden states, local and global attentions."),bm.forEach(t),Id.forEach(t),Zr=l(o),gt=s(o,"DIV",{class:!0});var jd=a(gt);_(Jo.$$.fragment,jd),Pc=l(jd),xa=s(jd,"P",{});var wm=a(xa);Ac=d(wm,`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),wm.forEach(t),jd.forEach(t),Yr=l(o),vt=s(o,"DIV",{class:!0});var Bd=a(vt);_(Zo.$$.fragment,Bd),Nc=l(Bd),Ma=s(Bd,"P",{});var ym=a(Ma);Ic=d(ym,"Base class for sequence-to-sequence language models outputs."),ym.forEach(t),Bd.forEach(t),ed=l(o),Tt=s(o,"DIV",{class:!0});var Gd=a(Tt);_(Yo.$$.fragment,Gd),jc=l(Gd),Sa=s(Gd,"P",{});var Em=a(Sa);Bc=d(Em,"Base class for outputs of sequence-to-sequence sentence classification models."),Em.forEach(t),Gd.forEach(t),td=l(o),kt=s(o,"DIV",{class:!0});var Qd=a(kt);_(en.$$.fragment,Qd),Gc=l(Qd),Ca=s(Qd,"P",{});var qm=a(Ca);Qc=d(qm,"Base class for outputs of sequence-to-sequence question answering models."),qm.forEach(t),Qd.forEach(t),od=l(o),bt=s(o,"DIV",{class:!0});var Hd=a(bt);_(tn.$$.fragment,Hd),Hc=l(Hd),Oa=s(Hd,"P",{});var Lm=a(Oa);Uc=d(Lm,"Base class for Longformer\u2019s outputs, with potential hidden states, local and global attentions."),Lm.forEach(t),Hd.forEach(t),nd=l(o),wt=s(o,"DIV",{class:!0});var Ud=a(wt);_(on.$$.fragment,Ud),Rc=l(Ud),Pa=s(Ud,"P",{});var Dm=a(Pa);Wc=d(Dm,`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Dm.forEach(t),Ud.forEach(t),sd=l(o),yt=s(o,"DIV",{class:!0});var Rd=a(yt);_(nn.$$.fragment,Rd),Kc=l(Rd),Aa=s(Rd,"P",{});var zm=a(Aa);Vc=d(zm,"Base class for sequence-to-sequence language models outputs."),zm.forEach(t),Rd.forEach(t),ad=l(o),Et=s(o,"H2",{class:!0});var Wd=a(Et);Zt=s(Wd,"A",{id:!0,class:!0,href:!0});var Fm=a(Zt);Na=s(Fm,"SPAN",{});var $m=a(Na);_(sn.$$.fragment,$m),$m.forEach(t),Fm.forEach(t),Xc=l(Wd),Ia=s(Wd,"SPAN",{});var xm=a(Ia);Jc=d(xm,"LEDModel"),xm.forEach(t),Wd.forEach(t),rd=l(o),$e=s(o,"DIV",{class:!0});var vo=a($e);_(an.$$.fragment,vo),Zc=l(vo),rn=s(vo,"P",{});var Kd=a(rn);Yc=d(Kd,`The bare LED Model outputting raw hidden-states without any specific head on top. This model inherits from `),ps=s(Kd,"A",{href:!0});var Mm=a(ps);eh=d(Mm,"PreTrainedModel"),Mm.forEach(t),th=d(Kd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kd.forEach(t),oh=l(vo),dn=s(vo,"P",{});var Vd=a(dn);nh=d(Vd,"This model is also a PyTorch "),ln=s(Vd,"A",{href:!0,rel:!0});var Sm=a(ln);sh=d(Sm,"torch.nn.Module"),Sm.forEach(t),ah=d(Vd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vd.forEach(t),rh=l(vo),Te=s(vo,"DIV",{class:!0});var Ve=a(Te);_(cn.$$.fragment,Ve),dh=l(Ve),qt=s(Ve,"P",{});var $s=a(qt);ih=d($s,"The "),fs=s($s,"A",{href:!0});var Cm=a(fs);lh=d(Cm,"LEDModel"),Cm.forEach(t),ch=d($s," forward method, overrides the "),ja=s($s,"CODE",{});var Om=a(ja);hh=d(Om,"__call__"),Om.forEach(t),uh=d($s," special method."),$s.forEach(t),ph=l(Ve),_(Yt.$$.fragment,Ve),fh=l(Ve),Ba=s(Ve,"P",{});var Pm=a(Ba);mh=d(Pm,"Example:"),Pm.forEach(t),_h=l(Ve),_(hn.$$.fragment,Ve),Ve.forEach(t),vo.forEach(t),dd=l(o),Lt=s(o,"H2",{class:!0});var Xd=a(Lt);eo=s(Xd,"A",{id:!0,class:!0,href:!0});var Am=a(eo);Ga=s(Am,"SPAN",{});var Nm=a(Ga);_(un.$$.fragment,Nm),Nm.forEach(t),Am.forEach(t),gh=l(Xd),Qa=s(Xd,"SPAN",{});var Im=a(Qa);vh=d(Im,"LEDForConditionalGeneration"),Im.forEach(t),Xd.forEach(t),id=l(o),xe=s(o,"DIV",{class:!0});var To=a(xe);_(pn.$$.fragment,To),Th=l(To),fn=s(To,"P",{});var Jd=a(fn);kh=d(Jd,`The LED Model with a language modeling head. Can be used for summarization. This model inherits from `),ms=s(Jd,"A",{href:!0});var jm=a(ms);bh=d(jm,"PreTrainedModel"),jm.forEach(t),wh=d(Jd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jd.forEach(t),yh=l(To),mn=s(To,"P",{});var Zd=a(mn);Eh=d(Zd,"This model is also a PyTorch "),_n=s(Zd,"A",{href:!0,rel:!0});var Bm=a(_n);qh=d(Bm,"torch.nn.Module"),Bm.forEach(t),Lh=d(Zd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zd.forEach(t),Dh=l(To),M=s(To,"DIV",{class:!0});var N=a(M);_(gn.$$.fragment,N),zh=l(N),Dt=s(N,"P",{});var xs=a(Dt);Fh=d(xs,"The "),_s=s(xs,"A",{href:!0});var Gm=a(_s);$h=d(Gm,"LEDForConditionalGeneration"),Gm.forEach(t),xh=d(xs," forward method, overrides the "),Ha=s(xs,"CODE",{});var Qm=a(Ha);Mh=d(Qm,"__call__"),Qm.forEach(t),Sh=d(xs," special method."),xs.forEach(t),Ch=l(N),_(to.$$.fragment,N),Oh=l(N),Ua=s(N,"P",{});var Hm=a(Ua);Ph=d(Hm,"Conditional generation example:"),Hm.forEach(t),Ah=l(N),_(vn.$$.fragment,N),Nh=l(N),Ra=s(N,"P",{});var Um=a(Ra);Ih=d(Um,"Summarization example::"),Um.forEach(t),jh=l(N),Wa=s(N,"BLOCKQUOTE",{});var Rm=a(Wa);Ka=s(Rm,"BLOCKQUOTE",{});var Wm=a(Ka);Va=s(Wm,"BLOCKQUOTE",{});var Km=a(Va);Xa=s(Km,"P",{});var Vm=a(Xa);Bh=d(Vm,`import torch from transformers import LEDTokenizer, LEDForConditionalGeneration`),Vm.forEach(t),Km.forEach(t),Wm.forEach(t),Rm.forEach(t),Gh=l(N),Ja=s(N,"BLOCKQUOTE",{});var Xm=a(Ja);Za=s(Xm,"BLOCKQUOTE",{});var Jm=a(Za);Ya=s(Jm,"BLOCKQUOTE",{});var Zm=a(Ya);er=s(Zm,"P",{});var Ym=a(er);Qh=d(Ym,`model = LEDForConditionalGeneration.from_pretrained(\u2018allenai/led-large-16384-arxiv\u2019) tokenizer = LEDTokenizer.from_pretrained(\u2018allenai/led-large-16384-arxiv\u2019)`),Ym.forEach(t),Zm.forEach(t),Jm.forEach(t),Xm.forEach(t),Hh=l(N),tr=s(N,"BLOCKQUOTE",{});var e_=a(tr);or=s(e_,"BLOCKQUOTE",{});var t_=a(or);nr=s(t_,"BLOCKQUOTE",{});var o_=a(nr);sr=s(o_,"P",{});var n_=a(sr);Uh=d(n_,`ARTICLE_TO_SUMMARIZE = '''Transformers (Vaswani et al., 2017) have achieved state-of-the-art \u2026 results in a wide range of natural language tasks including generative \u2026 language modeling (Dai et al., 2019; Radford et al., 2019) and discriminative \u2026 language understanding (Devlin et al., 2019). This success is partly due to \u2026 the self-attention component which enables the network to capture contextual \u2026 information from the entire sequence. While powerful, the memory and computational \u2026 requirements of self-attention grow quadratically with sequence length, making \u2026 it infeasible (or very expensive) to process long sequences. \u2026 \u2026 To address this limitation, we present Longformer, a modified Transformer \u2026 architecture with a self-attention operation that scales linearly with the \u2026 sequence length, making it versatile for processing long documents (Fig 1). This \u2026 is an advantage for natural language tasks such as long document classification, \u2026 question answering (QA), and coreference resolution, where existing approaches \u2026 partition or shorten the long context into smaller sequences that fall within the \u2026 typical 512 token limit of BERT-style pretrained models. Such partitioning could \u2026 potentially result in loss of important cross-partition information, and to \u2026 mitigate this problem, existing methods often rely on complex architectures to \u2026 address such interactions. On the other hand, our proposed Longformer is able to \u2026 build contextual representations of the entire context using multiple layers of \u2026 attention, reducing the need for task-specific architectures.''' inputs = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors=\u2018pt\u2019)`),n_.forEach(t),o_.forEach(t),t_.forEach(t),e_.forEach(t),Rh=l(N),ar=s(N,"BLOCKQUOTE",{});var s_=a(ar);rr=s(s_,"BLOCKQUOTE",{});var a_=a(rr);Tn=s(a_,"BLOCKQUOTE",{});var Yd=a(Tn);oo=s(Yd,"H1",{class:!0});var ei=a(oo);no=s(ei,"A",{id:!0,class:!0,href:!0});var r_=a(no);dr=s(r_,"SPAN",{});var d_=a(dr);_(kn.$$.fragment,d_),d_.forEach(t),r_.forEach(t),Wh=l(ei),ir=s(ei,"SPAN",{});var i_=a(ir);Kh=d(i_,"Global attention on the first token (cf. Beltagy et al. 2020)"),i_.forEach(t),ei.forEach(t),Vh=l(Yd),lr=s(Yd,"P",{});var l_=a(lr);Xh=d(l_,`global_attention_mask = torch.zeros_like(inputs) global_attention_mask[:, 0] = 1`),l_.forEach(t),Yd.forEach(t),a_.forEach(t),s_.forEach(t),Jh=l(N),cr=s(N,"BLOCKQUOTE",{});var c_=a(cr);hr=s(c_,"BLOCKQUOTE",{});var h_=a(hr);bn=s(h_,"BLOCKQUOTE",{});var ti=a(bn);so=s(ti,"H1",{class:!0});var oi=a(so);ao=s(oi,"A",{id:!0,class:!0,href:!0});var u_=a(ao);ur=s(u_,"SPAN",{});var p_=a(ur);_(wn.$$.fragment,p_),p_.forEach(t),u_.forEach(t),Zh=l(oi),pr=s(oi,"SPAN",{});var f_=a(pr);Yh=d(f_,"Generate Summary"),f_.forEach(t),oi.forEach(t),eu=l(ti),fr=s(ti,"P",{});var m_=a(fr);tu=d(m_,`summary_ids = model.generate(inputs, global_attention_mask=global_attention_mask, \u2026 num_beams=3, max_length=32, early_stopping=True) print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))`),m_.forEach(t),ti.forEach(t),h_.forEach(t),c_.forEach(t),N.forEach(t),To.forEach(t),ld=l(o),zt=s(o,"H2",{class:!0});var ni=a(zt);ro=s(ni,"A",{id:!0,class:!0,href:!0});var __=a(ro);mr=s(__,"SPAN",{});var g_=a(mr);_(yn.$$.fragment,g_),g_.forEach(t),__.forEach(t),ou=l(ni),_r=s(ni,"SPAN",{});var v_=a(_r);nu=d(v_,"LEDForSequenceClassification"),v_.forEach(t),ni.forEach(t),cd=l(o),he=s(o,"DIV",{class:!0});var Xe=a(he);_(En.$$.fragment,Xe),su=l(Xe),gr=s(Xe,"P",{});var T_=a(gr);au=d(T_,`LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),T_.forEach(t),ru=l(Xe),qn=s(Xe,"P",{});var si=a(qn);du=d(si,"This model inherits from "),gs=s(si,"A",{href:!0});var k_=a(gs);iu=d(k_,"PreTrainedModel"),k_.forEach(t),lu=d(si,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),si.forEach(t),cu=l(Xe),Ln=s(Xe,"P",{});var ai=a(Ln);hu=d(ai,"This model is also a PyTorch "),Dn=s(ai,"A",{href:!0,rel:!0});var b_=a(Dn);uu=d(b_,"torch.nn.Module"),b_.forEach(t),pu=d(ai,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ai.forEach(t),fu=l(Xe),U=s(Xe,"DIV",{class:!0});var qe=a(U);_(zn.$$.fragment,qe),mu=l(qe),Ft=s(qe,"P",{});var Ms=a(Ft);_u=d(Ms,"The "),vs=s(Ms,"A",{href:!0});var w_=a(vs);gu=d(w_,"LEDForSequenceClassification"),w_.forEach(t),vu=d(Ms," forward method, overrides the "),vr=s(Ms,"CODE",{});var y_=a(vr);Tu=d(y_,"__call__"),y_.forEach(t),ku=d(Ms," special method."),Ms.forEach(t),bu=l(qe),_(io.$$.fragment,qe),wu=l(qe),Tr=s(qe,"P",{});var E_=a(Tr);yu=d(E_,"Example of single-label classification:"),E_.forEach(t),Eu=l(qe),_(Fn.$$.fragment,qe),qu=l(qe),kr=s(qe,"P",{});var q_=a(kr);Lu=d(q_,"Example of multi-label classification:"),q_.forEach(t),Du=l(qe),_($n.$$.fragment,qe),qe.forEach(t),Xe.forEach(t),hd=l(o),$t=s(o,"H2",{class:!0});var ri=a($t);lo=s(ri,"A",{id:!0,class:!0,href:!0});var L_=a(lo);br=s(L_,"SPAN",{});var D_=a(br);_(xn.$$.fragment,D_),D_.forEach(t),L_.forEach(t),zu=l(ri),wr=s(ri,"SPAN",{});var z_=a(wr);Fu=d(z_,"LEDForQuestionAnswering"),z_.forEach(t),ri.forEach(t),ud=l(o),ue=s(o,"DIV",{class:!0});var Je=a(ue);_(Mn.$$.fragment,Je),$u=l(Je),xt=s(Je,"P",{});var Ss=a(xt);xu=d(Ss,`LED Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),yr=s(Ss,"CODE",{});var F_=a(yr);Mu=d(F_,"span start logits"),F_.forEach(t),Su=d(Ss," and "),Er=s(Ss,"CODE",{});var $_=a(Er);Cu=d($_,"span end logits"),$_.forEach(t),Ou=d(Ss,")."),Ss.forEach(t),Pu=l(Je),Sn=s(Je,"P",{});var di=a(Sn);Au=d(di,"This model inherits from "),Ts=s(di,"A",{href:!0});var x_=a(Ts);Nu=d(x_,"PreTrainedModel"),x_.forEach(t),Iu=d(di,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),di.forEach(t),ju=l(Je),Cn=s(Je,"P",{});var ii=a(Cn);Bu=d(ii,"This model is also a PyTorch "),On=s(ii,"A",{href:!0,rel:!0});var M_=a(On);Gu=d(M_,"torch.nn.Module"),M_.forEach(t),Qu=d(ii,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ii.forEach(t),Hu=l(Je),ke=s(Je,"DIV",{class:!0});var Ze=a(ke);_(Pn.$$.fragment,Ze),Uu=l(Ze),Mt=s(Ze,"P",{});var Cs=a(Mt);Ru=d(Cs,"The "),ks=s(Cs,"A",{href:!0});var S_=a(ks);Wu=d(S_,"LEDForQuestionAnswering"),S_.forEach(t),Ku=d(Cs," forward method, overrides the "),qr=s(Cs,"CODE",{});var C_=a(qr);Vu=d(C_,"__call__"),C_.forEach(t),Xu=d(Cs," special method."),Cs.forEach(t),Ju=l(Ze),_(co.$$.fragment,Ze),Zu=l(Ze),Lr=s(Ze,"P",{});var O_=a(Lr);Yu=d(O_,"Example:"),O_.forEach(t),ep=l(Ze),_(An.$$.fragment,Ze),Ze.forEach(t),Je.forEach(t),pd=l(o),St=s(o,"H2",{class:!0});var li=a(St);ho=s(li,"A",{id:!0,class:!0,href:!0});var P_=a(ho);Dr=s(P_,"SPAN",{});var A_=a(Dr);_(Nn.$$.fragment,A_),A_.forEach(t),P_.forEach(t),tp=l(li),zr=s(li,"SPAN",{});var N_=a(zr);op=d(N_,"TFLEDModel"),N_.forEach(t),li.forEach(t),fd=l(o),pe=s(o,"DIV",{class:!0});var Ye=a(pe);_(In.$$.fragment,Ye),np=l(Ye),jn=s(Ye,"P",{});var ci=a(jn);sp=d(ci,`The bare LED Model outputting raw hidden-states without any specific head on top. This model inherits from `),bs=s(ci,"A",{href:!0});var I_=a(bs);ap=d(I_,"TFPreTrainedModel"),I_.forEach(t),rp=d(ci,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ci.forEach(t),dp=l(Ye),Bn=s(Ye,"P",{});var hi=a(Bn);ip=d(hi,"This model is also a "),Gn=s(hi,"A",{href:!0,rel:!0});var j_=a(Gn);lp=d(j_,"tf.keras.Model"),j_.forEach(t),cp=d(hi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hi.forEach(t),hp=l(Ye),_(uo.$$.fragment,Ye),up=l(Ye),be=s(Ye,"DIV",{class:!0});var et=a(be);_(Qn.$$.fragment,et),pp=l(et),Ct=s(et,"P",{});var Os=a(Ct);fp=d(Os,"The "),ws=s(Os,"A",{href:!0});var B_=a(ws);mp=d(B_,"TFLEDModel"),B_.forEach(t),_p=d(Os," forward method, overrides the "),Fr=s(Os,"CODE",{});var G_=a(Fr);gp=d(G_,"__call__"),G_.forEach(t),vp=d(Os," special method."),Os.forEach(t),Tp=l(et),_(po.$$.fragment,et),kp=l(et),$r=s(et,"P",{});var Q_=a($r);bp=d(Q_,"Example:"),Q_.forEach(t),wp=l(et),_(Hn.$$.fragment,et),et.forEach(t),Ye.forEach(t),md=l(o),Ot=s(o,"H2",{class:!0});var ui=a(Ot);fo=s(ui,"A",{id:!0,class:!0,href:!0});var H_=a(fo);xr=s(H_,"SPAN",{});var U_=a(xr);_(Un.$$.fragment,U_),U_.forEach(t),H_.forEach(t),yp=l(ui),Mr=s(ui,"SPAN",{});var R_=a(Mr);Ep=d(R_,"TFLEDForConditionalGeneration"),R_.forEach(t),ui.forEach(t),_d=l(o),fe=s(o,"DIV",{class:!0});var tt=a(fe);_(Rn.$$.fragment,tt),qp=l(tt),Wn=s(tt,"P",{});var pi=a(Wn);Lp=d(pi,`The LED Model with a language modeling head. Can be used for summarization. This model inherits from `),ys=s(pi,"A",{href:!0});var W_=a(ys);Dp=d(W_,"TFPreTrainedModel"),W_.forEach(t),zp=d(pi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pi.forEach(t),Fp=l(tt),Kn=s(tt,"P",{});var fi=a(Kn);$p=d(fi,"This model is also a "),Vn=s(fi,"A",{href:!0,rel:!0});var K_=a(Vn);xp=d(K_,"tf.keras.Model"),K_.forEach(t),Mp=d(fi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fi.forEach(t),Sp=l(tt),_(mo.$$.fragment,tt),Cp=l(tt),we=s(tt,"DIV",{class:!0});var ot=a(we);_(Xn.$$.fragment,ot),Op=l(ot),Pt=s(ot,"P",{});var Ps=a(Pt);Pp=d(Ps,"The "),Es=s(Ps,"A",{href:!0});var V_=a(Es);Ap=d(V_,"TFLEDForConditionalGeneration"),V_.forEach(t),Np=d(Ps," forward method, overrides the "),Sr=s(Ps,"CODE",{});var X_=a(Sr);Ip=d(X_,"__call__"),X_.forEach(t),jp=d(Ps," special method."),Ps.forEach(t),Bp=l(ot),_(_o.$$.fragment,ot),Gp=l(ot),Cr=s(ot,"P",{});var J_=a(Cr);Qp=d(J_,"Examples:"),J_.forEach(t),Hp=l(ot),_(Jn.$$.fragment,ot),ot.forEach(t),tt.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(hg)),c(w,"id","led"),c(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(w,"href","#led"),c(b,"class","relative group"),c(ee,"id","overview"),c(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ee,"href","#overview"),c(z,"class","relative group"),c(se,"href","https://arxiv.org/abs/2004.05150"),c(se,"rel","nofollow"),c(H,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForConditionalGeneration"),c(j,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(Oe,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer"),c(Pe,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),c(es,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer"),c(ts,"href","/docs/transformers/v4.15.0/en/model_doc/longformer#transformers.LongformerModel"),c(yo,"href","https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing"),c(yo,"rel","nofollow"),c(qo,"href","https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing"),c(qo,"rel","nofollow"),c(Lo,"href","https://huggingface.co/patrickvonplaten"),c(Lo,"rel","nofollow"),c(Nt,"id","transformers.LEDConfig"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#transformers.LEDConfig"),c(ct,"class","relative group"),c(os,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDModel"),c(Fo,"href","https://huggingface.co/allenai/led-base-16384"),c(Fo,"rel","nofollow"),c(ns,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(ss,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(jt,"id","initializing-a-led-allenai/led-base-16384-style-configuration"),c(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jt,"href","#initializing-a-led-allenai/led-base-16384-style-configuration"),c(It,"class","relative group"),c(Gt,"id","initializing-a-model-from-the-allenai/led-base-16384-style-configuration"),c(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gt,"href","#initializing-a-model-from-the-allenai/led-base-16384-style-configuration"),c(Bt,"class","relative group"),c(Ht,"id","accessing-the-model-configuration"),c(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ht,"href","#accessing-the-model-configuration"),c(Qt,"class","relative group"),c(P,"class","docstring"),c(Ut,"id","transformers.LEDTokenizer"),c(Ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ut,"href","#transformers.LEDTokenizer"),c(pt,"class","relative group"),c(as,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizer"),c(rs,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),c(ds,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),c(We,"class","docstring"),c(Wt,"class","docstring"),c(Kt,"class","docstring"),c(Ea,"class","docstring"),c(I,"class","docstring"),c(Vt,"id","transformers.LEDTokenizerFast"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.LEDTokenizerFast"),c(ft,"class","relative group"),c(cs,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDTokenizerFast"),c(hs,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizerFast"),c(us,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizerFast"),c(Fe,"class","docstring"),c(Jt,"id","transformers.models.led.modeling_led.LEDEncoderBaseModelOutput"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput"),c(mt,"class","relative group"),c(_t,"class","docstring"),c(gt,"class","docstring"),c(vt,"class","docstring"),c(Tt,"class","docstring"),c(kt,"class","docstring"),c(bt,"class","docstring"),c(wt,"class","docstring"),c(yt,"class","docstring"),c(Zt,"id","transformers.LEDModel"),c(Zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Zt,"href","#transformers.LEDModel"),c(Et,"class","relative group"),c(ps,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ln,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ln,"rel","nofollow"),c(fs,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDModel"),c(Te,"class","docstring"),c($e,"class","docstring"),c(eo,"id","transformers.LEDForConditionalGeneration"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.LEDForConditionalGeneration"),c(Lt,"class","relative group"),c(ms,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(_n,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(_n,"rel","nofollow"),c(_s,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForConditionalGeneration"),c(no,"id","global-attention-on-the-first-token-(cf.-beltagy-et-al.-2020)"),c(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(no,"href","#global-attention-on-the-first-token-(cf.-beltagy-et-al.-2020)"),c(oo,"class","relative group"),c(ao,"id","generate-summary"),c(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ao,"href","#generate-summary"),c(so,"class","relative group"),c(M,"class","docstring"),c(xe,"class","docstring"),c(ro,"id","transformers.LEDForSequenceClassification"),c(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ro,"href","#transformers.LEDForSequenceClassification"),c(zt,"class","relative group"),c(gs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Dn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Dn,"rel","nofollow"),c(vs,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForSequenceClassification"),c(U,"class","docstring"),c(he,"class","docstring"),c(lo,"id","transformers.LEDForQuestionAnswering"),c(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(lo,"href","#transformers.LEDForQuestionAnswering"),c($t,"class","relative group"),c(Ts,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(On,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(On,"rel","nofollow"),c(ks,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.LEDForQuestionAnswering"),c(ke,"class","docstring"),c(ue,"class","docstring"),c(ho,"id","transformers.TFLEDModel"),c(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ho,"href","#transformers.TFLEDModel"),c(St,"class","relative group"),c(bs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Gn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Gn,"rel","nofollow"),c(ws,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDModel"),c(be,"class","docstring"),c(pe,"class","docstring"),c(fo,"id","transformers.TFLEDForConditionalGeneration"),c(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fo,"href","#transformers.TFLEDForConditionalGeneration"),c(Ot,"class","relative group"),c(ys,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Vn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Vn,"rel","nofollow"),c(Es,"href","/docs/transformers/v4.15.0/en/model_doc/led#transformers.TFLEDForConditionalGeneration"),c(we,"class","docstring"),c(fe,"class","docstring")},m(o,u){e(document.head,p),h(o,L,u),h(o,b,u),e(b,w),e(w,q),g(E,q,null),e(b,y),e(b,D),e(D,Ae),h(o,_e,u),h(o,z,u),e(z,ee),e(ee,W),g(ne,W,null),e(z,Ne),e(z,K),e(K,Ie),h(o,Me,u),h(o,Q,u),e(Q,V),e(Q,se),e(se,Le),e(Q,C),h(o,A,u),h(o,ge,u),e(ge,ae),h(o,Se,u),h(o,ve,u),e(ve,X),e(X,je),h(o,Ce,u),h(o,O,u),e(O,Be),h(o,te,u),h(o,$,u),e($,x),e(x,H),e(H,Ge),e(x,Qe),e(x,j),e(j,He),e(x,re),e(x,De),e(De,f),e(x,F),e(x,de),e(de,nt),e(x,st),e(x,G),e(G,at),e(x,rt),e(x,Oe),e(Oe,J),e(x,ie),e(x,Pe),e(Pe,dt),e(x,le),e($,it),e($,ce),e(ce,Ue),e(ce,Re),e(Re,mi),e(ce,_i),e(ce,As),e(As,gi),e(ce,vi),e($,Ti),e($,ze),e(ze,ki),e(ze,Ns),e(Ns,bi),e(ze,wi),e(ze,Is),e(Is,yi),e(ze,Ei),e(ze,es),e(es,qi),e(ze,Li),e(ze,js),e(js,Di),e(ze,zi),e($,Fi),e($,Z),e(Z,$i),e(Z,Bs),e(Bs,xi),e(Z,Mi),e(Z,Gs),e(Gs,Si),e(Z,Ci),e(Z,ts),e(ts,Oi),e(Z,Pi),e(Z,Qs),e(Qs,Ai),e(Z,Ni),e(Z,Hs),e(Hs,Ii),e(Z,ji),e(Z,Us),e(Us,Bi),e(Z,Gi),e($,Qi),e($,lt),e(lt,Hi),e(lt,Rs),e(Rs,Ui),e(lt,Ri),e(lt,Ws),e(Ws,Wi),e(lt,Ki),e($,Vi),e($,wo),e(wo,Xi),e(wo,yo),e(yo,Ji),e(wo,Zi),e($,Yi),e($,Eo),e(Eo,el),e(Eo,qo),e(qo,tl),e(Eo,ol),h(o,Qr,u),h(o,At,u),e(At,nl),e(At,Lo),e(Lo,sl),e(At,al),h(o,Hr,u),h(o,ct,u),e(ct,Nt),e(Nt,Ks),g(Do,Ks,null),e(ct,rl),e(ct,Vs),e(Vs,dl),h(o,Ur,u),h(o,P,u),g(zo,P,null),e(P,il),e(P,ht),e(ht,ll),e(ht,os),e(os,cl),e(ht,hl),e(ht,Fo),e(Fo,ul),e(ht,pl),e(P,fl),e(P,ut),e(ut,ml),e(ut,ns),e(ns,_l),e(ut,gl),e(ut,ss),e(ss,vl),e(ut,Tl),e(P,kl),e(P,Xs),e(Xs,bl),e(P,wl),g($o,P,null),e(P,yl),e(P,Js),e(Js,Zs),e(Zs,Ys),e(Ys,ea),e(ea,El),e(P,ql),e(P,ta),e(ta,oa),e(oa,xo),e(xo,It),e(It,jt),e(jt,na),g(Mo,na,null),e(It,Ll),e(It,sa),e(sa,Dl),e(xo,zl),e(xo,aa),e(aa,Fl),e(P,$l),e(P,ra),e(ra,da),e(da,So),e(So,Bt),e(Bt,Gt),e(Gt,ia),g(Co,ia,null),e(Bt,xl),e(Bt,la),e(la,Ml),e(So,Sl),e(So,ca),e(ca,Cl),e(P,Ol),e(P,ha),e(ha,ua),e(ua,Oo),e(Oo,Qt),e(Qt,Ht),e(Ht,pa),g(Po,pa,null),e(Qt,Pl),e(Qt,fa),e(fa,Al),e(Oo,Nl),e(Oo,ma),e(ma,Il),h(o,Rr,u),h(o,pt,u),e(pt,Ut),e(Ut,_a),g(Ao,_a,null),e(pt,jl),e(pt,ga),e(ga,Bl),h(o,Wr,u),h(o,I,u),g(No,I,null),e(I,Gl),e(I,va),e(va,Ql),e(I,Hl),e(I,Rt),e(Rt,as),e(as,Ul),e(Rt,Rl),e(Rt,rs),e(rs,Wl),e(Rt,Kl),e(I,Vl),e(I,Io),e(Io,Xl),e(Io,ds),e(ds,Jl),e(Io,Zl),e(I,Yl),e(I,We),g(jo,We,null),e(We,ec),e(We,Ta),e(Ta,tc),e(We,oc),e(We,Bo),e(Bo,is),e(is,nc),e(is,ka),e(ka,sc),e(Bo,ac),e(Bo,ls),e(ls,rc),e(ls,ba),e(ba,dc),e(I,ic),e(I,Wt),g(Go,Wt,null),e(Wt,lc),e(Wt,Qo),e(Qo,cc),e(Qo,wa),e(wa,hc),e(Qo,uc),e(I,pc),e(I,Kt),g(Ho,Kt,null),e(Kt,fc),e(Kt,ya),e(ya,mc),e(I,_c),e(I,Ea),h(o,Kr,u),h(o,ft,u),e(ft,Vt),e(Vt,qa),g(Uo,qa,null),e(ft,gc),e(ft,La),e(La,vc),h(o,Vr,u),h(o,Fe,u),g(Ro,Fe,null),e(Fe,Tc),e(Fe,Wo),e(Wo,kc),e(Wo,Da),e(Da,bc),e(Wo,wc),e(Fe,yc),e(Fe,Xt),e(Xt,cs),e(cs,Ec),e(Xt,qc),e(Xt,hs),e(hs,Lc),e(Xt,Dc),e(Fe,zc),e(Fe,Ko),e(Ko,Fc),e(Ko,us),e(us,$c),e(Ko,xc),h(o,Xr,u),h(o,mt,u),e(mt,Jt),e(Jt,za),g(Vo,za,null),e(mt,Mc),e(mt,Fa),e(Fa,Sc),h(o,Jr,u),h(o,_t,u),g(Xo,_t,null),e(_t,Cc),e(_t,$a),e($a,Oc),h(o,Zr,u),h(o,gt,u),g(Jo,gt,null),e(gt,Pc),e(gt,xa),e(xa,Ac),h(o,Yr,u),h(o,vt,u),g(Zo,vt,null),e(vt,Nc),e(vt,Ma),e(Ma,Ic),h(o,ed,u),h(o,Tt,u),g(Yo,Tt,null),e(Tt,jc),e(Tt,Sa),e(Sa,Bc),h(o,td,u),h(o,kt,u),g(en,kt,null),e(kt,Gc),e(kt,Ca),e(Ca,Qc),h(o,od,u),h(o,bt,u),g(tn,bt,null),e(bt,Hc),e(bt,Oa),e(Oa,Uc),h(o,nd,u),h(o,wt,u),g(on,wt,null),e(wt,Rc),e(wt,Pa),e(Pa,Wc),h(o,sd,u),h(o,yt,u),g(nn,yt,null),e(yt,Kc),e(yt,Aa),e(Aa,Vc),h(o,ad,u),h(o,Et,u),e(Et,Zt),e(Zt,Na),g(sn,Na,null),e(Et,Xc),e(Et,Ia),e(Ia,Jc),h(o,rd,u),h(o,$e,u),g(an,$e,null),e($e,Zc),e($e,rn),e(rn,Yc),e(rn,ps),e(ps,eh),e(rn,th),e($e,oh),e($e,dn),e(dn,nh),e(dn,ln),e(ln,sh),e(dn,ah),e($e,rh),e($e,Te),g(cn,Te,null),e(Te,dh),e(Te,qt),e(qt,ih),e(qt,fs),e(fs,lh),e(qt,ch),e(qt,ja),e(ja,hh),e(qt,uh),e(Te,ph),g(Yt,Te,null),e(Te,fh),e(Te,Ba),e(Ba,mh),e(Te,_h),g(hn,Te,null),h(o,dd,u),h(o,Lt,u),e(Lt,eo),e(eo,Ga),g(un,Ga,null),e(Lt,gh),e(Lt,Qa),e(Qa,vh),h(o,id,u),h(o,xe,u),g(pn,xe,null),e(xe,Th),e(xe,fn),e(fn,kh),e(fn,ms),e(ms,bh),e(fn,wh),e(xe,yh),e(xe,mn),e(mn,Eh),e(mn,_n),e(_n,qh),e(mn,Lh),e(xe,Dh),e(xe,M),g(gn,M,null),e(M,zh),e(M,Dt),e(Dt,Fh),e(Dt,_s),e(_s,$h),e(Dt,xh),e(Dt,Ha),e(Ha,Mh),e(Dt,Sh),e(M,Ch),g(to,M,null),e(M,Oh),e(M,Ua),e(Ua,Ph),e(M,Ah),g(vn,M,null),e(M,Nh),e(M,Ra),e(Ra,Ih),e(M,jh),e(M,Wa),e(Wa,Ka),e(Ka,Va),e(Va,Xa),e(Xa,Bh),e(M,Gh),e(M,Ja),e(Ja,Za),e(Za,Ya),e(Ya,er),e(er,Qh),e(M,Hh),e(M,tr),e(tr,or),e(or,nr),e(nr,sr),e(sr,Uh),e(M,Rh),e(M,ar),e(ar,rr),e(rr,Tn),e(Tn,oo),e(oo,no),e(no,dr),g(kn,dr,null),e(oo,Wh),e(oo,ir),e(ir,Kh),e(Tn,Vh),e(Tn,lr),e(lr,Xh),e(M,Jh),e(M,cr),e(cr,hr),e(hr,bn),e(bn,so),e(so,ao),e(ao,ur),g(wn,ur,null),e(so,Zh),e(so,pr),e(pr,Yh),e(bn,eu),e(bn,fr),e(fr,tu),h(o,ld,u),h(o,zt,u),e(zt,ro),e(ro,mr),g(yn,mr,null),e(zt,ou),e(zt,_r),e(_r,nu),h(o,cd,u),h(o,he,u),g(En,he,null),e(he,su),e(he,gr),e(gr,au),e(he,ru),e(he,qn),e(qn,du),e(qn,gs),e(gs,iu),e(qn,lu),e(he,cu),e(he,Ln),e(Ln,hu),e(Ln,Dn),e(Dn,uu),e(Ln,pu),e(he,fu),e(he,U),g(zn,U,null),e(U,mu),e(U,Ft),e(Ft,_u),e(Ft,vs),e(vs,gu),e(Ft,vu),e(Ft,vr),e(vr,Tu),e(Ft,ku),e(U,bu),g(io,U,null),e(U,wu),e(U,Tr),e(Tr,yu),e(U,Eu),g(Fn,U,null),e(U,qu),e(U,kr),e(kr,Lu),e(U,Du),g($n,U,null),h(o,hd,u),h(o,$t,u),e($t,lo),e(lo,br),g(xn,br,null),e($t,zu),e($t,wr),e(wr,Fu),h(o,ud,u),h(o,ue,u),g(Mn,ue,null),e(ue,$u),e(ue,xt),e(xt,xu),e(xt,yr),e(yr,Mu),e(xt,Su),e(xt,Er),e(Er,Cu),e(xt,Ou),e(ue,Pu),e(ue,Sn),e(Sn,Au),e(Sn,Ts),e(Ts,Nu),e(Sn,Iu),e(ue,ju),e(ue,Cn),e(Cn,Bu),e(Cn,On),e(On,Gu),e(Cn,Qu),e(ue,Hu),e(ue,ke),g(Pn,ke,null),e(ke,Uu),e(ke,Mt),e(Mt,Ru),e(Mt,ks),e(ks,Wu),e(Mt,Ku),e(Mt,qr),e(qr,Vu),e(Mt,Xu),e(ke,Ju),g(co,ke,null),e(ke,Zu),e(ke,Lr),e(Lr,Yu),e(ke,ep),g(An,ke,null),h(o,pd,u),h(o,St,u),e(St,ho),e(ho,Dr),g(Nn,Dr,null),e(St,tp),e(St,zr),e(zr,op),h(o,fd,u),h(o,pe,u),g(In,pe,null),e(pe,np),e(pe,jn),e(jn,sp),e(jn,bs),e(bs,ap),e(jn,rp),e(pe,dp),e(pe,Bn),e(Bn,ip),e(Bn,Gn),e(Gn,lp),e(Bn,cp),e(pe,hp),g(uo,pe,null),e(pe,up),e(pe,be),g(Qn,be,null),e(be,pp),e(be,Ct),e(Ct,fp),e(Ct,ws),e(ws,mp),e(Ct,_p),e(Ct,Fr),e(Fr,gp),e(Ct,vp),e(be,Tp),g(po,be,null),e(be,kp),e(be,$r),e($r,bp),e(be,wp),g(Hn,be,null),h(o,md,u),h(o,Ot,u),e(Ot,fo),e(fo,xr),g(Un,xr,null),e(Ot,yp),e(Ot,Mr),e(Mr,Ep),h(o,_d,u),h(o,fe,u),g(Rn,fe,null),e(fe,qp),e(fe,Wn),e(Wn,Lp),e(Wn,ys),e(ys,Dp),e(Wn,zp),e(fe,Fp),e(fe,Kn),e(Kn,$p),e(Kn,Vn),e(Vn,xp),e(Kn,Mp),e(fe,Sp),g(mo,fe,null),e(fe,Cp),e(fe,we),g(Xn,we,null),e(we,Op),e(we,Pt),e(Pt,Pp),e(Pt,Es),e(Es,Ap),e(Pt,Np),e(Pt,Sr),e(Sr,Ip),e(Pt,jp),e(we,Bp),g(_o,we,null),e(we,Gp),e(we,Cr),e(Cr,Qp),e(we,Hp),g(Jn,we,null),gd=!0},p(o,[u]){const Zn={};u&2&&(Zn.$$scope={dirty:u,ctx:o}),Yt.$set(Zn);const Or={};u&2&&(Or.$$scope={dirty:u,ctx:o}),to.$set(Or);const Pr={};u&2&&(Pr.$$scope={dirty:u,ctx:o}),io.$set(Pr);const Ar={};u&2&&(Ar.$$scope={dirty:u,ctx:o}),co.$set(Ar);const Yn={};u&2&&(Yn.$$scope={dirty:u,ctx:o}),uo.$set(Yn);const Nr={};u&2&&(Nr.$$scope={dirty:u,ctx:o}),po.$set(Nr);const Ir={};u&2&&(Ir.$$scope={dirty:u,ctx:o}),mo.$set(Ir);const jr={};u&2&&(jr.$$scope={dirty:u,ctx:o}),_o.$set(jr)},i(o){gd||(v(E.$$.fragment,o),v(ne.$$.fragment,o),v(Do.$$.fragment,o),v(zo.$$.fragment,o),v($o.$$.fragment,o),v(Mo.$$.fragment,o),v(Co.$$.fragment,o),v(Po.$$.fragment,o),v(Ao.$$.fragment,o),v(No.$$.fragment,o),v(jo.$$.fragment,o),v(Go.$$.fragment,o),v(Ho.$$.fragment,o),v(Uo.$$.fragment,o),v(Ro.$$.fragment,o),v(Vo.$$.fragment,o),v(Xo.$$.fragment,o),v(Jo.$$.fragment,o),v(Zo.$$.fragment,o),v(Yo.$$.fragment,o),v(en.$$.fragment,o),v(tn.$$.fragment,o),v(on.$$.fragment,o),v(nn.$$.fragment,o),v(sn.$$.fragment,o),v(an.$$.fragment,o),v(cn.$$.fragment,o),v(Yt.$$.fragment,o),v(hn.$$.fragment,o),v(un.$$.fragment,o),v(pn.$$.fragment,o),v(gn.$$.fragment,o),v(to.$$.fragment,o),v(vn.$$.fragment,o),v(kn.$$.fragment,o),v(wn.$$.fragment,o),v(yn.$$.fragment,o),v(En.$$.fragment,o),v(zn.$$.fragment,o),v(io.$$.fragment,o),v(Fn.$$.fragment,o),v($n.$$.fragment,o),v(xn.$$.fragment,o),v(Mn.$$.fragment,o),v(Pn.$$.fragment,o),v(co.$$.fragment,o),v(An.$$.fragment,o),v(Nn.$$.fragment,o),v(In.$$.fragment,o),v(uo.$$.fragment,o),v(Qn.$$.fragment,o),v(po.$$.fragment,o),v(Hn.$$.fragment,o),v(Un.$$.fragment,o),v(Rn.$$.fragment,o),v(mo.$$.fragment,o),v(Xn.$$.fragment,o),v(_o.$$.fragment,o),v(Jn.$$.fragment,o),gd=!0)},o(o){T(E.$$.fragment,o),T(ne.$$.fragment,o),T(Do.$$.fragment,o),T(zo.$$.fragment,o),T($o.$$.fragment,o),T(Mo.$$.fragment,o),T(Co.$$.fragment,o),T(Po.$$.fragment,o),T(Ao.$$.fragment,o),T(No.$$.fragment,o),T(jo.$$.fragment,o),T(Go.$$.fragment,o),T(Ho.$$.fragment,o),T(Uo.$$.fragment,o),T(Ro.$$.fragment,o),T(Vo.$$.fragment,o),T(Xo.$$.fragment,o),T(Jo.$$.fragment,o),T(Zo.$$.fragment,o),T(Yo.$$.fragment,o),T(en.$$.fragment,o),T(tn.$$.fragment,o),T(on.$$.fragment,o),T(nn.$$.fragment,o),T(sn.$$.fragment,o),T(an.$$.fragment,o),T(cn.$$.fragment,o),T(Yt.$$.fragment,o),T(hn.$$.fragment,o),T(un.$$.fragment,o),T(pn.$$.fragment,o),T(gn.$$.fragment,o),T(to.$$.fragment,o),T(vn.$$.fragment,o),T(kn.$$.fragment,o),T(wn.$$.fragment,o),T(yn.$$.fragment,o),T(En.$$.fragment,o),T(zn.$$.fragment,o),T(io.$$.fragment,o),T(Fn.$$.fragment,o),T($n.$$.fragment,o),T(xn.$$.fragment,o),T(Mn.$$.fragment,o),T(Pn.$$.fragment,o),T(co.$$.fragment,o),T(An.$$.fragment,o),T(Nn.$$.fragment,o),T(In.$$.fragment,o),T(uo.$$.fragment,o),T(Qn.$$.fragment,o),T(po.$$.fragment,o),T(Hn.$$.fragment,o),T(Un.$$.fragment,o),T(Rn.$$.fragment,o),T(mo.$$.fragment,o),T(Xn.$$.fragment,o),T(_o.$$.fragment,o),T(Jn.$$.fragment,o),gd=!1},d(o){t(p),o&&t(L),o&&t(b),k(E),o&&t(_e),o&&t(z),k(ne),o&&t(Me),o&&t(Q),o&&t(A),o&&t(ge),o&&t(Se),o&&t(ve),o&&t(Ce),o&&t(O),o&&t(te),o&&t($),o&&t(Qr),o&&t(At),o&&t(Hr),o&&t(ct),k(Do),o&&t(Ur),o&&t(P),k(zo),k($o),k(Mo),k(Co),k(Po),o&&t(Rr),o&&t(pt),k(Ao),o&&t(Wr),o&&t(I),k(No),k(jo),k(Go),k(Ho),o&&t(Kr),o&&t(ft),k(Uo),o&&t(Vr),o&&t(Fe),k(Ro),o&&t(Xr),o&&t(mt),k(Vo),o&&t(Jr),o&&t(_t),k(Xo),o&&t(Zr),o&&t(gt),k(Jo),o&&t(Yr),o&&t(vt),k(Zo),o&&t(ed),o&&t(Tt),k(Yo),o&&t(td),o&&t(kt),k(en),o&&t(od),o&&t(bt),k(tn),o&&t(nd),o&&t(wt),k(on),o&&t(sd),o&&t(yt),k(nn),o&&t(ad),o&&t(Et),k(sn),o&&t(rd),o&&t($e),k(an),k(cn),k(Yt),k(hn),o&&t(dd),o&&t(Lt),k(un),o&&t(id),o&&t(xe),k(pn),k(gn),k(to),k(vn),k(kn),k(wn),o&&t(ld),o&&t(zt),k(yn),o&&t(cd),o&&t(he),k(En),k(zn),k(io),k(Fn),k($n),o&&t(hd),o&&t($t),k(xn),o&&t(ud),o&&t(ue),k(Mn),k(Pn),k(co),k(An),o&&t(pd),o&&t(St),k(Nn),o&&t(fd),o&&t(pe),k(In),k(uo),k(Qn),k(po),k(Hn),o&&t(md),o&&t(Ot),k(Un),o&&t(_d),o&&t(fe),k(Rn),k(mo),k(Xn),k(_o),k(Jn)}}}const hg={local:"led",sections:[{local:"overview",title:"Overview"},{local:"transformers.LEDConfig",title:"LEDConfig"},{local:"transformers.LEDTokenizer",title:"LEDTokenizer"},{local:"transformers.LEDTokenizerFast",title:"LEDTokenizerFast"},{local:"transformers.models.led.modeling_led.LEDEncoderBaseModelOutput",title:"LED specific outputs"},{local:"transformers.LEDModel",title:"LEDModel"},{local:"transformers.LEDForConditionalGeneration",title:"LEDForConditionalGeneration"},{local:"transformers.LEDForSequenceClassification",title:"LEDForSequenceClassification"},{local:"transformers.LEDForQuestionAnswering",title:"LEDForQuestionAnswering"},{local:"transformers.TFLEDModel",title:"TFLEDModel"},{local:"transformers.TFLEDForConditionalGeneration",title:"TFLEDForConditionalGeneration"}],title:"LED"};function ug(B,p,L){let{fw:b}=p;return B.$$set=w=>{"fw"in w&&L(0,b=w.fw)},[b]}class Tg extends Z_{constructor(p){super();Y_(this,p,ug,cg,eg,{fw:0})}}export{Tg as default,hg as metadata};
9,915
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/speech_to_text_2.mdx-d18a1df7.js
import{S as _i,i as gi,s as vi,e as a,k as l,w as m,t as r,L as Ti,c as n,d as o,m as d,a as c,x as u,h as s,b as i,J as e,g as p,y as _,q as g,o as v,B as T}from"../../chunks/vendor-b1433968.js";import{T as ui}from"../../chunks/Tip-c3840994.js";import{D as W}from"../../chunks/Docstring-ff504c58.js";import{C as Ar}from"../../chunks/CodeBlock-a320dbd7.js";import{I as gt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function xi($e){let f,A,x,S,F,w,B,D;return{c(){f=a("p"),A=r(`This class method is simply calling AutoFeatureExtractor\u2019s `),x=a("code"),S=r("from_pretrained"),F=r(` and Speech2Text2Tokenizer\u2019s `),w=a("code"),B=r("from_pretrained"),D=r(`. Please refer to the docstrings of the methods above for more information.`)},l(z){f=n(z,"P",{});var b=c(f);A=s(b,`This class method is simply calling AutoFeatureExtractor\u2019s `),x=n(b,"CODE",{});var C=c(x);S=s(C,"from_pretrained"),C.forEach(o),F=s(b,` and Speech2Text2Tokenizer\u2019s `),w=n(b,"CODE",{});var j=c(w);B=s(j,"from_pretrained"),j.forEach(o),D=s(b,`. Please refer to the docstrings of the methods above for more information.`),b.forEach(o)},m(z,b){p(z,f,b),e(f,A),e(f,x),e(x,S),e(f,F),e(f,w),e(w,B),e(f,D)},d(z){z&&o(f)}}}function ki($e){let f,A,x,S,F,w,B,D;return{c(){f=a("p"),A=r("This class method is simply calling "),x=a("code"),S=r("save_pretrained"),F=r(` and `),w=a("code"),B=r("save_pretrained"),D=r(`. Please refer to the docstrings of the methods above for more information.`)},l(z){f=n(z,"P",{});var b=c(f);A=s(b,"This class method is simply calling "),x=n(b,"CODE",{});var C=c(x);S=s(C,"save_pretrained"),C.forEach(o),F=s(b,` and `),w=n(b,"CODE",{});var j=c(w);B=s(j,"save_pretrained"),j.forEach(o),D=s(b,`. Please refer to the docstrings of the methods above for more information.`),b.forEach(o)},m(z,b){p(z,f,b),e(f,A),e(f,x),e(x,S),e(f,F),e(f,w),e(w,B),e(f,D)},d(z){z&&o(f)}}}function bi($e){let f,A,x,S,F,w,B,D,z,b,C,j,so,Ee,jr,ao,Mr,Wo,H,Lr,vt,Fr,Dr,Pe,Ir,Wr,Vo,y,Vr,no,Nr,Br,co,Or,Ur,Tt,Rr,Hr,xt,Jr,Gr,kt,Kr,Qr,io,Xr,Yr,No,ie,Zr,ze,es,ts,Bo,le,os,Ce,rs,ss,Oo,bt,as,Uo,J,qe,ns,Ae,cs,is,ls,je,ds,St,hs,ps,fs,Me,ms,wt,us,Le,_s,gs,Ro,ee,de,lo,Fe,vs,ho,Ts,Ho,G,xs,yt,ks,bs,$t,Ss,ws,Jo,P,ys,Et,$s,Es,Pt,Ps,zs,zt,Cs,qs,Ct,As,js,qt,Ms,Ls,Go,At,po,Fs,Ko,De,Qo,jt,Ie,fo,Ds,Is,mo,Ws,Xo,We,Yo,he,Vs,Ve,Ns,Bs,Zo,te,pe,uo,Ne,Os,_o,Us,er,q,Be,Rs,oe,Hs,Mt,Js,Gs,Oe,Ks,Qs,Xs,re,Ys,Lt,Zs,ea,Ft,ta,oa,ra,go,sa,aa,Ue,tr,se,fe,vo,Re,na,To,ca,or,E,He,ia,xo,la,da,Je,ha,Dt,pa,fa,ma,me,Ge,ua,ko,_a,ga,K,Ke,va,bo,Ta,xa,Qe,ka,So,ba,Sa,wa,wo,rr,ae,ue,yo,Xe,ya,$o,$a,sr,k,Ye,Ea,Eo,Pa,za,M,It,Ca,qa,Wt,Aa,ja,Vt,Ma,La,Ze,Po,Fa,Da,Ia,Nt,Wa,Va,Na,_e,et,Ba,O,Oa,zo,Ua,Ra,Bt,Ha,Ja,tt,Co,Ga,Ka,Qa,Xa,Q,ot,Ya,rt,Za,Ot,en,tn,on,ge,rn,X,st,sn,ne,an,qo,nn,cn,Ut,ln,dn,hn,ve,pn,Te,at,fn,nt,mn,Rt,un,_n,gn,xe,ct,vn,it,Tn,Ht,xn,kn,bn,ke,lt,Sn,Ao,wn,ar,ce,be,jo,dt,yn,Mo,$n,nr,I,ht,En,U,Pn,Jt,zn,Cn,Lo,qn,An,Fo,jn,Mn,Ln,pt,Fn,ft,Dn,In,Wn,Y,mt,Vn,Do,Nn,Bn,ut,cr;return w=new gt({}),Ee=new gt({}),Fe=new gt({}),De=new Ar({props:{code:`import torch from transformers import Speech2Text2Processor, SpeechEncoderDecoderModel from datasets import load_dataset import soundfile as sf model = SpeechEncoderDecoderModel.from_pretrained("facebook/s2t-wav2vec2-large-en-de") processor = Speech2Text2Processor.from_pretrained("facebook/s2t-wav2vec2-large-en-de") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt") generated_ids = model.generate(input_ids=inputs["input_values"], attention_mask=inputs["attention_mask"]) transcription = processor.batch_decode(generated_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> torch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2Text2Processor, SpeechEncoderDecoderModel</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">processor = Speech2Text2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>):</span> <span class="hljs-meta">...</span> <span class="language-python"> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>])</span> <span class="hljs-meta">...</span> <span class="language-python"> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech</span> <span class="hljs-meta">...</span> <span class="language-python"> <span class="hljs-keyword">return</span> batch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">ds = ds.<span class="hljs-built_in">map</span>(map_to_array)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">inputs = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">generated_ids = model.generate(input_ids=inputs[<span class="hljs-string">&quot;input_values&quot;</span>], attention_mask=inputs[<span class="hljs-string">&quot;attention_mask&quot;</span>])</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">transcription = processor.batch_decode(generated_ids)</span>`}}),We=new Ar({props:{code:`from datasets import load_dataset from transformers import pipeline librispeech_en = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") asr = pipeline("automatic-speech-recognition", model="facebook/s2t-wav2vec2-large-en-de", feature_extractor="facebook/s2t-wav2vec2-large-en-de") translation_de = asr(librispeech_en[0]["file"]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">librispeech_en = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">asr = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>, feature_extractor=<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">translation_de = asr(librispeech_en[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;file&quot;</span>])</span>`}}),Ne=new gt({}),Be=new W({props:{name:"class transformers.Speech2Text2Config",anchor:"transformers.Speech2Text2Config",parameters:[{name:"vocab_size",val:" = 10000"},{name:"decoder_layers",val:" = 6"},{name:"decoder_ffn_dim",val:" = 2048"},{name:"decoder_attention_heads",val:" = 4"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"activation_function",val:" = 'relu'"},{name:"d_model",val:" = 256"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"max_source_positions",val:" = 6000"},{name:"max_target_positions",val:" = 1024"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py#L29",parametersDescription:[{anchor:"transformers.Speech2Text2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextModel">Speech2TextModel</a>`,name:"vocab_size"},{anchor:"transformers.Speech2Text2Config.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.Speech2Text2Config.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.Speech2Text2Config.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.Speech2Text2Config.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.Speech2Text2Config.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.Speech2Text2Config.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, and pooler.`,name:"dropout"},{anchor:"transformers.Speech2Text2Config.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.Speech2Text2Config.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.Speech2Text2Config.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.Speech2Text2Config.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. <a href="https://arxiv.org/abs/1909.11556%3E%60" rel="nofollow">https://arxiv.org/abs/1909.11556&gt;\`</a>__ for more details. decoder_layerdrop: (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.Speech2Text2Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.Speech2Text2Config.max_source_positions",description:`<strong>max_source_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 6000) &#x2014; The maximum sequence length of log-mel filter-bank features that this model might ever be used with. max_target_positions &#x2014; (<code>int</code>, <em>optional</em>, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_source_positions"}]}}),Ue=new Ar({props:{code:`from transformers import Speech2Text2ForCausalLM, Speech2Text2Config # Initializing a Speech2Text2 s2t_transformer_s style configuration configuration = Speech2Text2Config() # Initializing a model from the s2t_transformer_s style configuration model = Speech2Text2ForCausalLM(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2Text2ForCausalLM, Speech2Text2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Speech2Text2 s2t_transformer_s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Speech2Text2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the s2t_transformer_s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Speech2Text2ForCausalLM(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Re=new gt({}),He=new W({props:{name:"class transformers.Speech2Text2Tokenizer",anchor:"transformers.Speech2Text2Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"do_lower_case",val:" = False"},{name:"merges_file",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/tokenization_speech_to_text_2.py#L67",parametersDescription:[{anchor:"transformers.Speech2Text2Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.Speech2Text2Tokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.Speech2Text2Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.Speech2Text2Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.Speech2Text2Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.</p> <p>**kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>`,name:"pad_token"}]}}),Ge=new W({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerBase.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3178",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Ke=new W({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerBase.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3211",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerBase.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),Xe=new gt({}),Ye=new W({props:{name:"class transformers.Speech2Text2Processor",anchor:"transformers.Speech2Text2Processor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L25",parametersDescription:[{anchor:"transformers.Speech2Text2Processor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>AutoFeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Speech2Text2Processor.tokenizer",description:`<strong>tokenizer</strong> (<code>Speech2Text2Tokenizer</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer">Speech2Text2Tokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"}]}}),et=new W({props:{name:"__call__",anchor:"transformers.Speech2Text2Processor.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L114"}}),ot=new W({props:{name:"from_pretrained",anchor:"transformers.Speech2Text2Processor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L79",parametersDescription:[{anchor:"transformers.Speech2Text2Processor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <code>save_pretrained</code> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <code>PreTrainedFeatureExtractor</code> and <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a></li> </ul>`,name:"pretrained_model_name_or_path"}]}}),ge=new ui({props:{$$slots:{default:[xi]},$$scope:{ctx:$e}}}),st=new W({props:{name:"save_pretrained",anchor:"transformers.Speech2Text2Processor.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L56",parametersDescription:[{anchor:"transformers.Speech2Text2Processor.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"}]}}),ve=new ui({props:{$$slots:{default:[ki]},$$scope:{ctx:$e}}}),at=new W({props:{name:"batch_decode",anchor:"transformers.Speech2Text2Processor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L124"}}),ct=new W({props:{name:"decode",anchor:"transformers.Speech2Text2Processor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L132"}}),lt=new W({props:{name:"as_target_processor",anchor:"transformers.Speech2Text2Processor.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L140"}}),dt=new gt({}),ht=new W({props:{name:"class transformers.Speech2Text2ForCausalLM",anchor:"transformers.Speech2Text2ForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py#L742",parametersDescription:[{anchor:"transformers.Speech2Text2ForCausalLM.config",description:`<strong>config</strong> ([<em>Speech2Text2Config</em>]) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [<em>~PreTrainedModel.from_pretrained</em>] method to load the model weights.`,name:"config"}]}}),mt=new W({props:{name:"forward",anchor:"transformers.Speech2Text2ForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py#L773",parametersDescription:[{anchor:"transformers.Speech2Text2ForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer">Speech2Text2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Config" >Speech2Text2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ut=new Ar({props:{code:`from transformers import SpeechEncoderDecoderModel, Speech2Text2ForCausalLM, Wav2Vec2Model, Speech2Text2Config, Wav2Vec2Config encoder = Wav2Vec2Model(Wav2Vec2Config()) decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SpeechEncoderDecoderModel, Speech2Text2ForCausalLM, Wav2Vec2Model, Speech2Text2Config, Wav2Vec2Config <span class="hljs-meta">&gt;&gt;&gt; </span>encoder = Wav2Vec2Model(Wav2Vec2Config()) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) <span class="hljs-comment"># init speech2text model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder)`}}),{c(){f=a("meta"),A=l(),x=a("h1"),S=a("a"),F=a("span"),m(w.$$.fragment),B=l(),D=a("span"),z=r("Speech2Text2"),b=l(),C=a("h2"),j=a("a"),so=a("span"),m(Ee.$$.fragment),jr=l(),ao=a("span"),Mr=r("Overview"),Wo=l(),H=a("p"),Lr=r("The Speech2Text2 model is used together with "),vt=a("a"),Fr=r("Wav2Vec2"),Dr=r(` for Speech Translation models proposed in `),Pe=a("a"),Ir=r("Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),Wr=r(` by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.`),Vo=l(),y=a("p"),Vr=r("Speech2Text2 is a "),no=a("em"),Nr=r("decoder-only"),Br=r(" transformer model that can be used with any speech "),co=a("em"),Or=r("encoder-only"),Ur=r(`, such as `),Tt=a("a"),Rr=r("Wav2Vec2"),Hr=r(" or "),xt=a("a"),Jr=r("HuBERT"),Gr=r(` for Speech-to-Text tasks. Please refer to the `),kt=a("a"),Kr=r("SpeechEncoderDecoder"),Qr=r(" class on how to combine Speech2Text2 with any speech "),io=a("em"),Xr=r("encoder-only"),Yr=r(` model.`),No=l(),ie=a("p"),Zr=r("This model was contributed by "),ze=a("a"),es=r("Patrick von Platen"),ts=r("."),Bo=l(),le=a("p"),os=r("The original code can be found "),Ce=a("a"),rs=r("here"),ss=r("."),Oo=l(),bt=a("p"),as=r("Tips:"),Uo=l(),J=a("ul"),qe=a("li"),ns=r(`Speech2Text2 achieves state-of-the-art results on the CoVoST Speech Translation dataset. For more information, see the `),Ae=a("a"),cs=r("official models"),is=r(" ."),ls=l(),je=a("li"),ds=r("Speech2Text2 is always used within the "),St=a("a"),hs=r("SpeechEncoderDecoder"),ps=r(" framework."),fs=l(),Me=a("li"),ms=r("Speech2Text2\u2019s tokenizer is based on "),wt=a("em"),us=r("fastBPE <"),Le=a("a"),_s=r("https://github.com/glample/fastBPE>"),gs=r("."),Ro=l(),ee=a("h2"),de=a("a"),lo=a("span"),m(Fe.$$.fragment),vs=l(),ho=a("span"),Ts=r("Inference"),Ho=l(),G=a("p"),xs=r("Speech2Text2\u2019s "),yt=a("a"),ks=r("SpeechEncoderDecoderModel"),bs=r(` model accepts raw waveform input values from speech and makes use of `),$t=a("a"),Ss=r("generate()"),ws=r(` to translate the input speech autoregressively to the target language.`),Jo=l(),P=a("p"),ys=r("The "),Et=a("a"),$s=r("Wav2Vec2FeatureExtractor"),Es=r(` class is responsible for preprocessing the input speech and `),Pt=a("a"),Ps=r("Speech2Text2Tokenizer"),zs=r(` decodes the generated target tokens to the target string. The `),zt=a("a"),Cs=r("Speech2Text2Processor"),qs=r(" wraps "),Ct=a("a"),As=r("Wav2Vec2FeatureExtractor"),js=r(` and `),qt=a("a"),Ms=r("Speech2Text2Tokenizer"),Ls=r(` into a single instance to both extract the input features and decode the predicted token ids.`),Go=l(),At=a("ul"),po=a("li"),Fs=r("Step-by-step Speech Translation"),Ko=l(),m(De.$$.fragment),Qo=l(),jt=a("ul"),Ie=a("li"),fo=a("p"),Ds=r("Speech Translation via Pipelines"),Is=l(),mo=a("p"),Ws=r("The automatic speech recognition pipeline can also be used to translate speech in just a couple lines of code"),Xo=l(),m(We.$$.fragment),Yo=l(),he=a("p"),Vs=r("See "),Ve=a("a"),Ns=r("model hub"),Bs=r(" to look for Speech2Text2 checkpoints."),Zo=l(),te=a("h2"),pe=a("a"),uo=a("span"),m(Ne.$$.fragment),Os=l(),_o=a("span"),Us=r("Speech2Text2Config"),er=l(),q=a("div"),m(Be.$$.fragment),Rs=l(),oe=a("p"),Hs=r("This is the configuration class to store the configuration of a "),Mt=a("a"),Js=r("Speech2Text2ForCausalLM"),Gs=r(`. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 `),Oe=a("a"),Ks=r("facebook/s2t-small-librispeech-asr"),Qs=r(" architecture."),Xs=l(),re=a("p"),Ys=r("Configuration objects inherit from "),Lt=a("a"),Zs=r("PretrainedConfig"),ea=r(` and can be used to control the model outputs. Read the documentation from `),Ft=a("a"),ta=r("PretrainedConfig"),oa=r(" for more information."),ra=l(),go=a("p"),sa=r("Example:"),aa=l(),m(Ue.$$.fragment),tr=l(),se=a("h2"),fe=a("a"),vo=a("span"),m(Re.$$.fragment),na=l(),To=a("span"),ca=r("Speech2TextTokenizer"),or=l(),E=a("div"),m(He.$$.fragment),ia=l(),xo=a("p"),la=r("Constructs a Speech2Text2Tokenizer."),da=l(),Je=a("p"),ha=r("This tokenizer inherits from "),Dt=a("a"),pa=r("PreTrainedTokenizer"),fa=r(` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),ma=l(),me=a("div"),m(Ge.$$.fragment),ua=l(),ko=a("p"),_a=r("Convert a list of lists of token ids into a list of strings by calling decode."),ga=l(),K=a("div"),m(Ke.$$.fragment),va=l(),bo=a("p"),Ta=r(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),xa=l(),Qe=a("p"),ka=r("Similar to doing "),So=a("code"),ba=r("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Sa=r("."),wa=l(),wo=a("div"),rr=l(),ae=a("h2"),ue=a("a"),yo=a("span"),m(Xe.$$.fragment),ya=l(),$o=a("span"),$a=r("Speech2Text2Processor"),sr=l(),k=a("div"),m(Ye.$$.fragment),Ea=l(),Eo=a("p"),Pa=r(`Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor.`),za=l(),M=a("p"),It=a("a"),Ca=r("Speech2Text2Processor"),qa=r(` offers all the functionalities of `),Wt=a("a"),Aa=r("AutoFeatureExtractor"),ja=r(" and "),Vt=a("a"),Ma=r("Speech2Text2Tokenizer"),La=r(`. See the `),Ze=a("a"),Po=a("strong"),Fa=r("call"),Da=r("()"),Ia=r(" and "),Nt=a("a"),Wa=r("decode()"),Va=r(` for more information.`),Na=l(),_e=a("div"),m(et.$$.fragment),Ba=l(),O=a("p"),Oa=r(`When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor\u2019s `),zo=a("code"),Ua=r("__call__()"),Ra=r(` and returns its output. If used in the context `),Bt=a("a"),Ha=r("as_target_processor()"),Ja=r(` this method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),tt=a("a"),Co=a("strong"),Ga=r("call"),Ka=r("()"),Qa=r(`. Please refer to the doctsring of the above two methods for more information.`),Xa=l(),Q=a("div"),m(ot.$$.fragment),Ya=l(),rt=a("p"),Za=r("Instantiate a "),Ot=a("a"),en=r("Speech2Text2Processor"),tn=r(" from a pretrained Speech2Text2 processor."),on=l(),m(ge.$$.fragment),rn=l(),X=a("div"),m(st.$$.fragment),sn=l(),ne=a("p"),an=r(`Save a Speech2Text2 feature extractor object and Speech2Text2 tokenizer object to the directory `),qo=a("code"),nn=r("save_directory"),cn=r(`, so that it can be re-loaded using the `),Ut=a("a"),ln=r("from_pretrained()"),dn=r(" class method."),hn=l(),m(ve.$$.fragment),pn=l(),Te=a("div"),m(at.$$.fragment),fn=l(),nt=a("p"),mn=r(`This method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),Rt=a("a"),un=r("batch_decode()"),_n=r(`. Please refer to the docstring of this method for more information.`),gn=l(),xe=a("div"),m(ct.$$.fragment),vn=l(),it=a("p"),Tn=r(`This method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),Ht=a("a"),xn=r("decode()"),kn=r(`. Please refer to the docstring of this method for more information.`),bn=l(),ke=a("div"),m(lt.$$.fragment),Sn=l(),Ao=a("p"),wn=r(`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text2.`),ar=l(),ce=a("h2"),be=a("a"),jo=a("span"),m(dt.$$.fragment),yn=l(),Mo=a("span"),$n=r("Speech2Text2ForCausalLM"),nr=l(),I=a("div"),m(ht.$$.fragment),En=l(),U=a("p"),Pn=r("The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of "),Jt=a("a"),zn=r("EncoderDecoderModel"),Cn=r(" and "),Lo=a("code"),qn=r("SpeechEncoderDecoder"),An=r(`. This model inherits from [`),Fo=a("em"),jn=r("PreTrainedModel"),Mn=r(`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ln=l(),pt=a("p"),Fn=r("This model is also a PyTorch "),ft=a("a"),Dn=r("torch.nn.Module"),In=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wn=l(),Y=a("div"),m(mt.$$.fragment),Vn=l(),Do=a("p"),Nn=r("Example:"),Bn=l(),m(ut.$$.fragment),this.h()},l(t){const h=Ti('[data-svelte="svelte-1phssyn"]',document.head);f=n(h,"META",{name:!0,content:!0}),h.forEach(o),A=d(t),x=n(t,"H1",{class:!0});var _t=c(x);S=n(_t,"A",{id:!0,class:!0,href:!0});var Io=c(S);F=n(Io,"SPAN",{});var Hn=c(F);u(w.$$.fragment,Hn),Hn.forEach(o),Io.forEach(o),B=d(_t),D=n(_t,"SPAN",{});var Jn=c(D);z=s(Jn,"Speech2Text2"),Jn.forEach(o),_t.forEach(o),b=d(t),C=n(t,"H2",{class:!0});var ir=c(C);j=n(ir,"A",{id:!0,class:!0,href:!0});var Gn=c(j);so=n(Gn,"SPAN",{});var Kn=c(so);u(Ee.$$.fragment,Kn),Kn.forEach(o),Gn.forEach(o),jr=d(ir),ao=n(ir,"SPAN",{});var Qn=c(ao);Mr=s(Qn,"Overview"),Qn.forEach(o),ir.forEach(o),Wo=d(t),H=n(t,"P",{});var Gt=c(H);Lr=s(Gt,"The Speech2Text2 model is used together with "),vt=n(Gt,"A",{href:!0});var Xn=c(vt);Fr=s(Xn,"Wav2Vec2"),Xn.forEach(o),Dr=s(Gt,` for Speech Translation models proposed in `),Pe=n(Gt,"A",{href:!0,rel:!0});var Yn=c(Pe);Ir=s(Yn,"Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),Yn.forEach(o),Wr=s(Gt,` by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.`),Gt.forEach(o),Vo=d(t),y=n(t,"P",{});var L=c(y);Vr=s(L,"Speech2Text2 is a "),no=n(L,"EM",{});var Zn=c(no);Nr=s(Zn,"decoder-only"),Zn.forEach(o),Br=s(L," transformer model that can be used with any speech "),co=n(L,"EM",{});var ec=c(co);Or=s(ec,"encoder-only"),ec.forEach(o),Ur=s(L,`, such as `),Tt=n(L,"A",{href:!0});var tc=c(Tt);Rr=s(tc,"Wav2Vec2"),tc.forEach(o),Hr=s(L," or "),xt=n(L,"A",{href:!0});var oc=c(xt);Jr=s(oc,"HuBERT"),oc.forEach(o),Gr=s(L,` for Speech-to-Text tasks. Please refer to the `),kt=n(L,"A",{href:!0});var rc=c(kt);Kr=s(rc,"SpeechEncoderDecoder"),rc.forEach(o),Qr=s(L," class on how to combine Speech2Text2 with any speech "),io=n(L,"EM",{});var sc=c(io);Xr=s(sc,"encoder-only"),sc.forEach(o),Yr=s(L,` model.`),L.forEach(o),No=d(t),ie=n(t,"P",{});var lr=c(ie);Zr=s(lr,"This model was contributed by "),ze=n(lr,"A",{href:!0,rel:!0});var ac=c(ze);es=s(ac,"Patrick von Platen"),ac.forEach(o),ts=s(lr,"."),lr.forEach(o),Bo=d(t),le=n(t,"P",{});var dr=c(le);os=s(dr,"The original code can be found "),Ce=n(dr,"A",{href:!0,rel:!0});var nc=c(Ce);rs=s(nc,"here"),nc.forEach(o),ss=s(dr,"."),dr.forEach(o),Oo=d(t),bt=n(t,"P",{});var cc=c(bt);as=s(cc,"Tips:"),cc.forEach(o),Uo=d(t),J=n(t,"UL",{});var Kt=c(J);qe=n(Kt,"LI",{});var hr=c(qe);ns=s(hr,`Speech2Text2 achieves state-of-the-art results on the CoVoST Speech Translation dataset. For more information, see the `),Ae=n(hr,"A",{href:!0,rel:!0});var ic=c(Ae);cs=s(ic,"official models"),ic.forEach(o),is=s(hr," ."),hr.forEach(o),ls=d(Kt),je=n(Kt,"LI",{});var pr=c(je);ds=s(pr,"Speech2Text2 is always used within the "),St=n(pr,"A",{href:!0});var lc=c(St);hs=s(lc,"SpeechEncoderDecoder"),lc.forEach(o),ps=s(pr," framework."),pr.forEach(o),fs=d(Kt),Me=n(Kt,"LI",{});var fr=c(Me);ms=s(fr,"Speech2Text2\u2019s tokenizer is based on "),wt=n(fr,"EM",{});var On=c(wt);us=s(On,"fastBPE <"),Le=n(On,"A",{href:!0,rel:!0});var dc=c(Le);_s=s(dc,"https://github.com/glample/fastBPE>"),dc.forEach(o),On.forEach(o),gs=s(fr,"."),fr.forEach(o),Kt.forEach(o),Ro=d(t),ee=n(t,"H2",{class:!0});var mr=c(ee);de=n(mr,"A",{id:!0,class:!0,href:!0});var hc=c(de);lo=n(hc,"SPAN",{});var pc=c(lo);u(Fe.$$.fragment,pc),pc.forEach(o),hc.forEach(o),vs=d(mr),ho=n(mr,"SPAN",{});var fc=c(ho);Ts=s(fc,"Inference"),fc.forEach(o),mr.forEach(o),Ho=d(t),G=n(t,"P",{});var Qt=c(G);xs=s(Qt,"Speech2Text2\u2019s "),yt=n(Qt,"A",{href:!0});var mc=c(yt);ks=s(mc,"SpeechEncoderDecoderModel"),mc.forEach(o),bs=s(Qt,` model accepts raw waveform input values from speech and makes use of `),$t=n(Qt,"A",{href:!0});var uc=c($t);Ss=s(uc,"generate()"),uc.forEach(o),ws=s(Qt,` to translate the input speech autoregressively to the target language.`),Qt.forEach(o),Jo=d(t),P=n(t,"P",{});var V=c(P);ys=s(V,"The "),Et=n(V,"A",{href:!0});var _c=c(Et);$s=s(_c,"Wav2Vec2FeatureExtractor"),_c.forEach(o),Es=s(V,` class is responsible for preprocessing the input speech and `),Pt=n(V,"A",{href:!0});var gc=c(Pt);Ps=s(gc,"Speech2Text2Tokenizer"),gc.forEach(o),zs=s(V,` decodes the generated target tokens to the target string. The `),zt=n(V,"A",{href:!0});var vc=c(zt);Cs=s(vc,"Speech2Text2Processor"),vc.forEach(o),qs=s(V," wraps "),Ct=n(V,"A",{href:!0});var Tc=c(Ct);As=s(Tc,"Wav2Vec2FeatureExtractor"),Tc.forEach(o),js=s(V,` and `),qt=n(V,"A",{href:!0});var xc=c(qt);Ms=s(xc,"Speech2Text2Tokenizer"),xc.forEach(o),Ls=s(V,` into a single instance to both extract the input features and decode the predicted token ids.`),V.forEach(o),Go=d(t),At=n(t,"UL",{});var kc=c(At);po=n(kc,"LI",{});var bc=c(po);Fs=s(bc,"Step-by-step Speech Translation"),bc.forEach(o),kc.forEach(o),Ko=d(t),u(De.$$.fragment,t),Qo=d(t),jt=n(t,"UL",{});var Sc=c(jt);Ie=n(Sc,"LI",{});var ur=c(Ie);fo=n(ur,"P",{});var wc=c(fo);Ds=s(wc,"Speech Translation via Pipelines"),wc.forEach(o),Is=d(ur),mo=n(ur,"P",{});var yc=c(mo);Ws=s(yc,"The automatic speech recognition pipeline can also be used to translate speech in just a couple lines of code"),yc.forEach(o),ur.forEach(o),Sc.forEach(o),Xo=d(t),u(We.$$.fragment,t),Yo=d(t),he=n(t,"P",{});var _r=c(he);Vs=s(_r,"See "),Ve=n(_r,"A",{href:!0,rel:!0});var $c=c(Ve);Ns=s($c,"model hub"),$c.forEach(o),Bs=s(_r," to look for Speech2Text2 checkpoints."),_r.forEach(o),Zo=d(t),te=n(t,"H2",{class:!0});var gr=c(te);pe=n(gr,"A",{id:!0,class:!0,href:!0});var Ec=c(pe);uo=n(Ec,"SPAN",{});var Pc=c(uo);u(Ne.$$.fragment,Pc),Pc.forEach(o),Ec.forEach(o),Os=d(gr),_o=n(gr,"SPAN",{});var zc=c(_o);Us=s(zc,"Speech2Text2Config"),zc.forEach(o),gr.forEach(o),er=d(t),q=n(t,"DIV",{class:!0});var Z=c(q);u(Be.$$.fragment,Z),Rs=d(Z),oe=n(Z,"P",{});var Xt=c(oe);Hs=s(Xt,"This is the configuration class to store the configuration of a "),Mt=n(Xt,"A",{href:!0});var Cc=c(Mt);Js=s(Cc,"Speech2Text2ForCausalLM"),Cc.forEach(o),Gs=s(Xt,`. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 `),Oe=n(Xt,"A",{href:!0,rel:!0});var qc=c(Oe);Ks=s(qc,"facebook/s2t-small-librispeech-asr"),qc.forEach(o),Qs=s(Xt," architecture."),Xt.forEach(o),Xs=d(Z),re=n(Z,"P",{});var Yt=c(re);Ys=s(Yt,"Configuration objects inherit from "),Lt=n(Yt,"A",{href:!0});var Ac=c(Lt);Zs=s(Ac,"PretrainedConfig"),Ac.forEach(o),ea=s(Yt,` and can be used to control the model outputs. Read the documentation from `),Ft=n(Yt,"A",{href:!0});var jc=c(Ft);ta=s(jc,"PretrainedConfig"),jc.forEach(o),oa=s(Yt," for more information."),Yt.forEach(o),ra=d(Z),go=n(Z,"P",{});var Mc=c(go);sa=s(Mc,"Example:"),Mc.forEach(o),aa=d(Z),u(Ue.$$.fragment,Z),Z.forEach(o),tr=d(t),se=n(t,"H2",{class:!0});var vr=c(se);fe=n(vr,"A",{id:!0,class:!0,href:!0});var Lc=c(fe);vo=n(Lc,"SPAN",{});var Fc=c(vo);u(Re.$$.fragment,Fc),Fc.forEach(o),Lc.forEach(o),na=d(vr),To=n(vr,"SPAN",{});var Dc=c(To);ca=s(Dc,"Speech2TextTokenizer"),Dc.forEach(o),vr.forEach(o),or=d(t),E=n(t,"DIV",{class:!0});var N=c(E);u(He.$$.fragment,N),ia=d(N),xo=n(N,"P",{});var Ic=c(xo);la=s(Ic,"Constructs a Speech2Text2Tokenizer."),Ic.forEach(o),da=d(N),Je=n(N,"P",{});var Tr=c(Je);ha=s(Tr,"This tokenizer inherits from "),Dt=n(Tr,"A",{href:!0});var Wc=c(Dt);pa=s(Wc,"PreTrainedTokenizer"),Wc.forEach(o),fa=s(Tr,` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Tr.forEach(o),ma=d(N),me=n(N,"DIV",{class:!0});var xr=c(me);u(Ge.$$.fragment,xr),ua=d(xr),ko=n(xr,"P",{});var Vc=c(ko);_a=s(Vc,"Convert a list of lists of token ids into a list of strings by calling decode."),Vc.forEach(o),xr.forEach(o),ga=d(N),K=n(N,"DIV",{class:!0});var Zt=c(K);u(Ke.$$.fragment,Zt),va=d(Zt),bo=n(Zt,"P",{});var Nc=c(bo);Ta=s(Nc,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Nc.forEach(o),xa=d(Zt),Qe=n(Zt,"P",{});var kr=c(Qe);ka=s(kr,"Similar to doing "),So=n(kr,"CODE",{});var Bc=c(So);ba=s(Bc,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Bc.forEach(o),Sa=s(kr,"."),kr.forEach(o),Zt.forEach(o),wa=d(N),wo=n(N,"DIV",{class:!0}),c(wo).forEach(o),N.forEach(o),rr=d(t),ae=n(t,"H2",{class:!0});var br=c(ae);ue=n(br,"A",{id:!0,class:!0,href:!0});var Oc=c(ue);yo=n(Oc,"SPAN",{});var Uc=c(yo);u(Xe.$$.fragment,Uc),Uc.forEach(o),Oc.forEach(o),ya=d(br),$o=n(br,"SPAN",{});var Rc=c($o);$a=s(Rc,"Speech2Text2Processor"),Rc.forEach(o),br.forEach(o),sr=d(t),k=n(t,"DIV",{class:!0});var $=c(k);u(Ye.$$.fragment,$),Ea=d($),Eo=n($,"P",{});var Hc=c(Eo);Pa=s(Hc,`Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor.`),Hc.forEach(o),za=d($),M=n($,"P",{});var R=c(M);It=n(R,"A",{href:!0});var Jc=c(It);Ca=s(Jc,"Speech2Text2Processor"),Jc.forEach(o),qa=s(R,` offers all the functionalities of `),Wt=n(R,"A",{href:!0});var Gc=c(Wt);Aa=s(Gc,"AutoFeatureExtractor"),Gc.forEach(o),ja=s(R," and "),Vt=n(R,"A",{href:!0});var Kc=c(Vt);Ma=s(Kc,"Speech2Text2Tokenizer"),Kc.forEach(o),La=s(R,`. See the `),Ze=n(R,"A",{href:!0});var Un=c(Ze);Po=n(Un,"STRONG",{});var Qc=c(Po);Fa=s(Qc,"call"),Qc.forEach(o),Da=s(Un,"()"),Un.forEach(o),Ia=s(R," and "),Nt=n(R,"A",{href:!0});var Xc=c(Nt);Wa=s(Xc,"decode()"),Xc.forEach(o),Va=s(R,` for more information.`),R.forEach(o),Na=d($),_e=n($,"DIV",{class:!0});var Sr=c(_e);u(et.$$.fragment,Sr),Ba=d(Sr),O=n(Sr,"P",{});var Se=c(O);Oa=s(Se,`When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor\u2019s `),zo=n(Se,"CODE",{});var Yc=c(zo);Ua=s(Yc,"__call__()"),Yc.forEach(o),Ra=s(Se,` and returns its output. If used in the context `),Bt=n(Se,"A",{href:!0});var Zc=c(Bt);Ha=s(Zc,"as_target_processor()"),Zc.forEach(o),Ja=s(Se,` this method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),tt=n(Se,"A",{href:!0});var Rn=c(tt);Co=n(Rn,"STRONG",{});var ei=c(Co);Ga=s(ei,"call"),ei.forEach(o),Ka=s(Rn,"()"),Rn.forEach(o),Qa=s(Se,`. Please refer to the doctsring of the above two methods for more information.`),Se.forEach(o),Sr.forEach(o),Xa=d($),Q=n($,"DIV",{class:!0});var eo=c(Q);u(ot.$$.fragment,eo),Ya=d(eo),rt=n(eo,"P",{});var wr=c(rt);Za=s(wr,"Instantiate a "),Ot=n(wr,"A",{href:!0});var ti=c(Ot);en=s(ti,"Speech2Text2Processor"),ti.forEach(o),tn=s(wr," from a pretrained Speech2Text2 processor."),wr.forEach(o),on=d(eo),u(ge.$$.fragment,eo),eo.forEach(o),rn=d($),X=n($,"DIV",{class:!0});var to=c(X);u(st.$$.fragment,to),sn=d(to),ne=n(to,"P",{});var oo=c(ne);an=s(oo,`Save a Speech2Text2 feature extractor object and Speech2Text2 tokenizer object to the directory `),qo=n(oo,"CODE",{});var oi=c(qo);nn=s(oi,"save_directory"),oi.forEach(o),cn=s(oo,`, so that it can be re-loaded using the `),Ut=n(oo,"A",{href:!0});var ri=c(Ut);ln=s(ri,"from_pretrained()"),ri.forEach(o),dn=s(oo," class method."),oo.forEach(o),hn=d(to),u(ve.$$.fragment,to),to.forEach(o),pn=d($),Te=n($,"DIV",{class:!0});var yr=c(Te);u(at.$$.fragment,yr),fn=d(yr),nt=n(yr,"P",{});var $r=c(nt);mn=s($r,`This method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),Rt=n($r,"A",{href:!0});var si=c(Rt);un=s(si,"batch_decode()"),si.forEach(o),_n=s($r,`. Please refer to the docstring of this method for more information.`),$r.forEach(o),yr.forEach(o),gn=d($),xe=n($,"DIV",{class:!0});var Er=c(xe);u(ct.$$.fragment,Er),vn=d(Er),it=n(Er,"P",{});var Pr=c(it);Tn=s(Pr,`This method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),Ht=n(Pr,"A",{href:!0});var ai=c(Ht);xn=s(ai,"decode()"),ai.forEach(o),kn=s(Pr,`. Please refer to the docstring of this method for more information.`),Pr.forEach(o),Er.forEach(o),bn=d($),ke=n($,"DIV",{class:!0});var zr=c(ke);u(lt.$$.fragment,zr),Sn=d(zr),Ao=n(zr,"P",{});var ni=c(Ao);wn=s(ni,`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text2.`),ni.forEach(o),zr.forEach(o),$.forEach(o),ar=d(t),ce=n(t,"H2",{class:!0});var Cr=c(ce);be=n(Cr,"A",{id:!0,class:!0,href:!0});var ci=c(be);jo=n(ci,"SPAN",{});var ii=c(jo);u(dt.$$.fragment,ii),ii.forEach(o),ci.forEach(o),yn=d(Cr),Mo=n(Cr,"SPAN",{});var li=c(Mo);$n=s(li,"Speech2Text2ForCausalLM"),li.forEach(o),Cr.forEach(o),nr=d(t),I=n(t,"DIV",{class:!0});var we=c(I);u(ht.$$.fragment,we),En=d(we),U=n(we,"P",{});var ye=c(U);Pn=s(ye,"The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of "),Jt=n(ye,"A",{href:!0});var di=c(Jt);zn=s(di,"EncoderDecoderModel"),di.forEach(o),Cn=s(ye," and "),Lo=n(ye,"CODE",{});var hi=c(Lo);qn=s(hi,"SpeechEncoderDecoder"),hi.forEach(o),An=s(ye,`. This model inherits from [`),Fo=n(ye,"EM",{});var pi=c(Fo);jn=s(pi,"PreTrainedModel"),pi.forEach(o),Mn=s(ye,`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ye.forEach(o),Ln=d(we),pt=n(we,"P",{});var qr=c(pt);Fn=s(qr,"This model is also a PyTorch "),ft=n(qr,"A",{href:!0,rel:!0});var fi=c(ft);Dn=s(fi,"torch.nn.Module"),fi.forEach(o),In=s(qr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qr.forEach(o),Wn=d(we),Y=n(we,"DIV",{class:!0});var ro=c(Y);u(mt.$$.fragment,ro),Vn=d(ro),Do=n(ro,"P",{});var mi=c(Do);Nn=s(mi,"Example:"),mi.forEach(o),Bn=d(ro),u(ut.$$.fragment,ro),ro.forEach(o),we.forEach(o),this.h()},h(){i(f,"name","hf:doc:metadata"),i(f,"content",JSON.stringify(Si)),i(S,"id","speech2text2"),i(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(S,"href","#speech2text2"),i(x,"class","relative group"),i(j,"id","overview"),i(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(j,"href","#overview"),i(C,"class","relative group"),i(vt,"href","/docs/transformers/v4.15.0/en/wav2vec2"),i(Pe,"href","https://arxiv.org/abs/2104.06678"),i(Pe,"rel","nofollow"),i(Tt,"href","/docs/transformers/v4.15.0/en/wav2vec2"),i(xt,"href","/docs/transformers/v4.15.0/en/hubert"),i(kt,"href","/docs/transformers/v4.15.0/en/speechencoderdecoder"),i(ze,"href","https://huggingface.co/patrickvonplaten"),i(ze,"rel","nofollow"),i(Ce,"href","https://github.com/pytorch/fairseq/blob/1f7ef9ed1e1061f8c7f88f8b94c7186834398690/fairseq/models/wav2vec/wav2vec2_asr.py#L266"),i(Ce,"rel","nofollow"),i(Ae,"href","https://huggingface.co/models?other=speech2text2"),i(Ae,"rel","nofollow"),i(St,"href","/docs/transformers/v4.15.0/en/speechencoderdecoder"),i(Le,"href","https://github.com/glample/fastBPE%3E"),i(Le,"rel","nofollow"),i(de,"id","inference"),i(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(de,"href","#inference"),i(ee,"class","relative group"),i(yt,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),i($t,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),i(Et,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),i(Pt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),i(zt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor"),i(Ct,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),i(qt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),i(Ve,"href","https://huggingface.co/models?filter=speech2text2"),i(Ve,"rel","nofollow"),i(pe,"id","transformers.Speech2Text2Config"),i(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(pe,"href","#transformers.Speech2Text2Config"),i(te,"class","relative group"),i(Mt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2ForCausalLM"),i(Oe,"href","https://huggingface.co/facebook/s2t-small-librispeech-asr"),i(Oe,"rel","nofollow"),i(Lt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(Ft,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(q,"class","docstring"),i(fe,"id","transformers.Speech2Text2Tokenizer"),i(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(fe,"href","#transformers.Speech2Text2Tokenizer"),i(se,"class","relative group"),i(Dt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),i(me,"class","docstring"),i(K,"class","docstring"),i(wo,"class","docstring"),i(E,"class","docstring"),i(ue,"id","transformers.Speech2Text2Processor"),i(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(ue,"href","#transformers.Speech2Text2Processor"),i(ae,"class","relative group"),i(It,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor"),i(Wt,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoFeatureExtractor"),i(Vt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),i(Ze,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor.__call__"),i(Nt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor.decode"),i(Bt,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor.as_target_processor"),i(tt,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),i(_e,"class","docstring"),i(Ot,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor"),i(Q,"class","docstring"),i(Ut,"href","/docs/transformers/v4.15.0/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor.from_pretrained"),i(X,"class","docstring"),i(Rt,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),i(Te,"class","docstring"),i(Ht,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),i(xe,"class","docstring"),i(ke,"class","docstring"),i(k,"class","docstring"),i(be,"id","transformers.Speech2Text2ForCausalLM"),i(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(be,"href","#transformers.Speech2Text2ForCausalLM"),i(ce,"class","relative group"),i(Jt,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),i(ft,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(ft,"rel","nofollow"),i(Y,"class","docstring"),i(I,"class","docstring")},m(t,h){e(document.head,f),p(t,A,h),p(t,x,h),e(x,S),e(S,F),_(w,F,null),e(x,B),e(x,D),e(D,z),p(t,b,h),p(t,C,h),e(C,j),e(j,so),_(Ee,so,null),e(C,jr),e(C,ao),e(ao,Mr),p(t,Wo,h),p(t,H,h),e(H,Lr),e(H,vt),e(vt,Fr),e(H,Dr),e(H,Pe),e(Pe,Ir),e(H,Wr),p(t,Vo,h),p(t,y,h),e(y,Vr),e(y,no),e(no,Nr),e(y,Br),e(y,co),e(co,Or),e(y,Ur),e(y,Tt),e(Tt,Rr),e(y,Hr),e(y,xt),e(xt,Jr),e(y,Gr),e(y,kt),e(kt,Kr),e(y,Qr),e(y,io),e(io,Xr),e(y,Yr),p(t,No,h),p(t,ie,h),e(ie,Zr),e(ie,ze),e(ze,es),e(ie,ts),p(t,Bo,h),p(t,le,h),e(le,os),e(le,Ce),e(Ce,rs),e(le,ss),p(t,Oo,h),p(t,bt,h),e(bt,as),p(t,Uo,h),p(t,J,h),e(J,qe),e(qe,ns),e(qe,Ae),e(Ae,cs),e(qe,is),e(J,ls),e(J,je),e(je,ds),e(je,St),e(St,hs),e(je,ps),e(J,fs),e(J,Me),e(Me,ms),e(Me,wt),e(wt,us),e(wt,Le),e(Le,_s),e(Me,gs),p(t,Ro,h),p(t,ee,h),e(ee,de),e(de,lo),_(Fe,lo,null),e(ee,vs),e(ee,ho),e(ho,Ts),p(t,Ho,h),p(t,G,h),e(G,xs),e(G,yt),e(yt,ks),e(G,bs),e(G,$t),e($t,Ss),e(G,ws),p(t,Jo,h),p(t,P,h),e(P,ys),e(P,Et),e(Et,$s),e(P,Es),e(P,Pt),e(Pt,Ps),e(P,zs),e(P,zt),e(zt,Cs),e(P,qs),e(P,Ct),e(Ct,As),e(P,js),e(P,qt),e(qt,Ms),e(P,Ls),p(t,Go,h),p(t,At,h),e(At,po),e(po,Fs),p(t,Ko,h),_(De,t,h),p(t,Qo,h),p(t,jt,h),e(jt,Ie),e(Ie,fo),e(fo,Ds),e(Ie,Is),e(Ie,mo),e(mo,Ws),p(t,Xo,h),_(We,t,h),p(t,Yo,h),p(t,he,h),e(he,Vs),e(he,Ve),e(Ve,Ns),e(he,Bs),p(t,Zo,h),p(t,te,h),e(te,pe),e(pe,uo),_(Ne,uo,null),e(te,Os),e(te,_o),e(_o,Us),p(t,er,h),p(t,q,h),_(Be,q,null),e(q,Rs),e(q,oe),e(oe,Hs),e(oe,Mt),e(Mt,Js),e(oe,Gs),e(oe,Oe),e(Oe,Ks),e(oe,Qs),e(q,Xs),e(q,re),e(re,Ys),e(re,Lt),e(Lt,Zs),e(re,ea),e(re,Ft),e(Ft,ta),e(re,oa),e(q,ra),e(q,go),e(go,sa),e(q,aa),_(Ue,q,null),p(t,tr,h),p(t,se,h),e(se,fe),e(fe,vo),_(Re,vo,null),e(se,na),e(se,To),e(To,ca),p(t,or,h),p(t,E,h),_(He,E,null),e(E,ia),e(E,xo),e(xo,la),e(E,da),e(E,Je),e(Je,ha),e(Je,Dt),e(Dt,pa),e(Je,fa),e(E,ma),e(E,me),_(Ge,me,null),e(me,ua),e(me,ko),e(ko,_a),e(E,ga),e(E,K),_(Ke,K,null),e(K,va),e(K,bo),e(bo,Ta),e(K,xa),e(K,Qe),e(Qe,ka),e(Qe,So),e(So,ba),e(Qe,Sa),e(E,wa),e(E,wo),p(t,rr,h),p(t,ae,h),e(ae,ue),e(ue,yo),_(Xe,yo,null),e(ae,ya),e(ae,$o),e($o,$a),p(t,sr,h),p(t,k,h),_(Ye,k,null),e(k,Ea),e(k,Eo),e(Eo,Pa),e(k,za),e(k,M),e(M,It),e(It,Ca),e(M,qa),e(M,Wt),e(Wt,Aa),e(M,ja),e(M,Vt),e(Vt,Ma),e(M,La),e(M,Ze),e(Ze,Po),e(Po,Fa),e(Ze,Da),e(M,Ia),e(M,Nt),e(Nt,Wa),e(M,Va),e(k,Na),e(k,_e),_(et,_e,null),e(_e,Ba),e(_e,O),e(O,Oa),e(O,zo),e(zo,Ua),e(O,Ra),e(O,Bt),e(Bt,Ha),e(O,Ja),e(O,tt),e(tt,Co),e(Co,Ga),e(tt,Ka),e(O,Qa),e(k,Xa),e(k,Q),_(ot,Q,null),e(Q,Ya),e(Q,rt),e(rt,Za),e(rt,Ot),e(Ot,en),e(rt,tn),e(Q,on),_(ge,Q,null),e(k,rn),e(k,X),_(st,X,null),e(X,sn),e(X,ne),e(ne,an),e(ne,qo),e(qo,nn),e(ne,cn),e(ne,Ut),e(Ut,ln),e(ne,dn),e(X,hn),_(ve,X,null),e(k,pn),e(k,Te),_(at,Te,null),e(Te,fn),e(Te,nt),e(nt,mn),e(nt,Rt),e(Rt,un),e(nt,_n),e(k,gn),e(k,xe),_(ct,xe,null),e(xe,vn),e(xe,it),e(it,Tn),e(it,Ht),e(Ht,xn),e(it,kn),e(k,bn),e(k,ke),_(lt,ke,null),e(ke,Sn),e(ke,Ao),e(Ao,wn),p(t,ar,h),p(t,ce,h),e(ce,be),e(be,jo),_(dt,jo,null),e(ce,yn),e(ce,Mo),e(Mo,$n),p(t,nr,h),p(t,I,h),_(ht,I,null),e(I,En),e(I,U),e(U,Pn),e(U,Jt),e(Jt,zn),e(U,Cn),e(U,Lo),e(Lo,qn),e(U,An),e(U,Fo),e(Fo,jn),e(U,Mn),e(I,Ln),e(I,pt),e(pt,Fn),e(pt,ft),e(ft,Dn),e(pt,In),e(I,Wn),e(I,Y),_(mt,Y,null),e(Y,Vn),e(Y,Do),e(Do,Nn),e(Y,Bn),_(ut,Y,null),cr=!0},p(t,[h]){const _t={};h&2&&(_t.$$scope={dirty:h,ctx:t}),ge.$set(_t);const Io={};h&2&&(Io.$$scope={dirty:h,ctx:t}),ve.$set(Io)},i(t){cr||(g(w.$$.fragment,t),g(Ee.$$.fragment,t),g(Fe.$$.fragment,t),g(De.$$.fragment,t),g(We.$$.fragment,t),g(Ne.$$.fragment,t),g(Be.$$.fragment,t),g(Ue.$$.fragment,t),g(Re.$$.fragment,t),g(He.$$.fragment,t),g(Ge.$$.fragment,t),g(Ke.$$.fragment,t),g(Xe.$$.fragment,t),g(Ye.$$.fragment,t),g(et.$$.fragment,t),g(ot.$$.fragment,t),g(ge.$$.fragment,t),g(st.$$.fragment,t),g(ve.$$.fragment,t),g(at.$$.fragment,t),g(ct.$$.fragment,t),g(lt.$$.fragment,t),g(dt.$$.fragment,t),g(ht.$$.fragment,t),g(mt.$$.fragment,t),g(ut.$$.fragment,t),cr=!0)},o(t){v(w.$$.fragment,t),v(Ee.$$.fragment,t),v(Fe.$$.fragment,t),v(De.$$.fragment,t),v(We.$$.fragment,t),v(Ne.$$.fragment,t),v(Be.$$.fragment,t),v(Ue.$$.fragment,t),v(Re.$$.fragment,t),v(He.$$.fragment,t),v(Ge.$$.fragment,t),v(Ke.$$.fragment,t),v(Xe.$$.fragment,t),v(Ye.$$.fragment,t),v(et.$$.fragment,t),v(ot.$$.fragment,t),v(ge.$$.fragment,t),v(st.$$.fragment,t),v(ve.$$.fragment,t),v(at.$$.fragment,t),v(ct.$$.fragment,t),v(lt.$$.fragment,t),v(dt.$$.fragment,t),v(ht.$$.fragment,t),v(mt.$$.fragment,t),v(ut.$$.fragment,t),cr=!1},d(t){o(f),t&&o(A),t&&o(x),T(w),t&&o(b),t&&o(C),T(Ee),t&&o(Wo),t&&o(H),t&&o(Vo),t&&o(y),t&&o(No),t&&o(ie),t&&o(Bo),t&&o(le),t&&o(Oo),t&&o(bt),t&&o(Uo),t&&o(J),t&&o(Ro),t&&o(ee),T(Fe),t&&o(Ho),t&&o(G),t&&o(Jo),t&&o(P),t&&o(Go),t&&o(At),t&&o(Ko),T(De,t),t&&o(Qo),t&&o(jt),t&&o(Xo),T(We,t),t&&o(Yo),t&&o(he),t&&o(Zo),t&&o(te),T(Ne),t&&o(er),t&&o(q),T(Be),T(Ue),t&&o(tr),t&&o(se),T(Re),t&&o(or),t&&o(E),T(He),T(Ge),T(Ke),t&&o(rr),t&&o(ae),T(Xe),t&&o(sr),t&&o(k),T(Ye),T(et),T(ot),T(ge),T(st),T(ve),T(at),T(ct),T(lt),t&&o(ar),t&&o(ce),T(dt),t&&o(nr),t&&o(I),T(ht),T(mt),T(ut)}}}const Si={local:"speech2text2",sections:[{local:"overview",title:"Overview"},{local:"inference",title:"Inference"},{local:"transformers.Speech2Text2Config",title:"Speech2Text2Config"},{local:"transformers.Speech2Text2Tokenizer",title:"Speech2TextTokenizer"},{local:"transformers.Speech2Text2Processor",title:"Speech2Text2Processor"},{local:"transformers.Speech2Text2ForCausalLM",title:"Speech2Text2ForCausalLM"}],title:"Speech2Text2"};function wi($e,f,A){let{fw:x}=f;return $e.$$set=S=>{"fw"in S&&A(0,x=S.fw)},[x]}class qi extends _i{constructor(f){super();gi(this,f,wi,bi,vi,{fw:0})}}export{qi as default,Si as metadata};
9,916
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/wav2vec2_phoneme.mdx-0418471f.js
import{S as Wt,i as Et,s as Ct,e as n,k as l,w as X,t as s,L as $t,c as r,d as t,m as c,a,x as j,h as i,b as p,J as o,g as h,y as J,K as Vt,q as Q,o as Z,B as K}from"../../chunks/vendor-b1433968.js";import{D as Je}from"../../chunks/Docstring-ff504c58.js";import{I as Go}from"../../chunks/IconCopyLink-7029626d.js";function Lt(Qe){let v,G,_,g,ae,$,Ze,se,Ke,ye,b,y,ie,V,Ge,le,Ye,ze,z,eo,L,oo,to,Pe,Y,no,xe,ee,ce,ro,qe,oe,ao,We,u,de,so,io,pe,lo,co,B,po,te,ho,mo,uo,he,fo,_o,me,go,Ee,P,vo,I,ko,To,Ce,F,bo,A,wo,$e,x,yo,N,zo,Po,Ve,q,xo,ue,qo,Wo,Le,w,W,fe,D,Eo,_e,Co,Be,m,S,$o,ge,Vo,Lo,R,Bo,ne,Io,Fo,Ao,E,U,No,ve,Do,So,C,O,Ro,ke,Uo,Oo,k,M,Mo,Te,Ho,Xo,H,jo,be,Jo,Qo,Zo,we,Ie;return $=new Go({}),V=new Go({}),D=new Go({}),S=new Je({props:{name:"class transformers.Wav2Vec2PhonemeCTCTokenizer",anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"phone_delimiter_token",val:" = ' '"},{name:"word_delimiter_token",val:" = None"},{name:"do_phonemize",val:" = True"},{name:"phonemizer_lang",val:" = 'en-us'"},{name:"phonemizer_backend",val:" = 'espeak'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py#L50",parametersDescription:[{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.do_phonemize",description:`<strong>do_phonemize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the tokenizer should phonetize the input or not. Only if a sequence of phonemes is passed to the tokenizer, <code>do_phonemize</code> should be set to <code>False</code>.`,name:"do_phonemize"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.phonemizer_lang",description:`<strong>phonemizer_lang</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;en-us&quot;</code>) &#x2014; The language of the phoneme set to which the tokenizer should phonetize the input text to.`,name:"phonemizer_lang"},{anchor:"transformers.Wav2Vec2PhonemeCTCTokenizer.phonemizer_backend",description:`<strong>phonemizer_backend</strong> (<code>str</code>, <em>optional</em>. defaults to <code>&quot;espeak&quot;</code>) &#x2014; The backend phonetization library that shall be used by the phonemizer library. Defaults to <code>espeak-ng</code>. See the <a href="https://github.com/bootphon/phonemizer#readme" rel="nofollow">phonemizer package</a>. for more information.</p> <p>**kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>`,name:"phonemizer_backend"}]}}),U=new Je({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2334",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),O=new Je({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerBase.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3178",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),M=new Je({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerBase.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3211",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerBase.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),{c(){v=n("meta"),G=l(),_=n("h1"),g=n("a"),ae=n("span"),X($.$$.fragment),Ze=l(),se=n("span"),Ke=s("Wav2Vec2Phoneme"),ye=l(),b=n("h2"),y=n("a"),ie=n("span"),X(V.$$.fragment),Ge=l(),le=n("span"),Ye=s("Overview"),ze=l(),z=n("p"),eo=s("The Wav2Vec2Phoneme model was proposed in "),L=n("a"),oo=s(`Simple and Effective Zero-shot Cross-lingual Phoneme Recognition (Xu et al., 2021`),to=s(" by Qiantong Xu, Alexei Baevski, Michael Auli."),Pe=l(),Y=n("p"),no=s("The abstract from the paper is the following:"),xe=l(),ee=n("p"),ce=n("em"),ro=s(`Recent progress in self-training, self-supervised pretraining and unsupervised learning enabled well performing speech recognition systems without any labeled data. However, in many cases there is labeled data available for related languages which is not utilized by these methods. This paper extends previous work on zero-shot cross-lingual transfer learning by fine-tuning a multilingually pretrained wav2vec 2.0 model to transcribe unseen languages. This is done by mapping phonemes of the training languages to the target language using articulatory features. Experiments show that this simple method significantly outperforms prior work which introduced task-specific architectures and used only part of a monolingually pretrained model.`),qe=l(),oe=n("p"),ao=s("Tips:"),We=l(),u=n("ul"),de=n("li"),so=s("Wav2Vec2Phoneme uses the exact same architecture as Wav2Vec2"),io=l(),pe=n("li"),lo=s("Wav2Vec2Phoneme is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),co=l(),B=n("li"),po=s(`Wav2Vec2Phoneme model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),te=n("a"),ho=s("Wav2Vec2PhonemeCTCTokenizer"),mo=s("."),uo=l(),he=n("li"),fo=s(`Wav2Vec2Phoneme can be fine-tuned on multiple language at once and decode unseen languages in a single forward pass to a sequence of phonemes`),_o=l(),me=n("li"),go=s(`By default the model outputs a sequence of phonemes. In order to transform the phonemes to a sequence of words one should make use of a dictionary and language model.`),Ee=l(),P=n("p"),vo=s("Relevant checkpoints can be found under "),I=n("a"),ko=s("https://huggingface.co/models?other=phoneme-recognition"),To=s("."),Ce=l(),F=n("p"),bo=s("This model was contributed by "),A=n("a"),wo=s("patrickvonplaten"),$e=l(),x=n("p"),yo=s("The original code can be found "),N=n("a"),zo=s("here"),Po=s("."),Ve=l(),q=n("p"),xo=s("Wav2Vec2Phoneme\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),ue=n("code"),qo=s("Wav2Vec2"),Wo=s("\u2019s documentation page except for the tokenizer."),Le=l(),w=n("h2"),W=n("a"),fe=n("span"),X(D.$$.fragment),Eo=l(),_e=n("span"),Co=s("Wav2Vec2PhonemeCTCTokenizer"),Be=l(),m=n("div"),X(S.$$.fragment),$o=l(),ge=n("p"),Vo=s("Constructs a Wav2Vec2PhonemeCTC tokenizer."),Lo=l(),R=n("p"),Bo=s("This tokenizer inherits from "),ne=n("a"),Io=s("PreTrainedTokenizer"),Fo=s(` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Ao=l(),E=n("div"),X(U.$$.fragment),No=l(),ve=n("p"),Do=s(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),So=l(),C=n("div"),X(O.$$.fragment),Ro=l(),ke=n("p"),Uo=s("Convert a list of lists of token ids into a list of strings by calling decode."),Oo=l(),k=n("div"),X(M.$$.fragment),Mo=l(),Te=n("p"),Ho=s(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Xo=l(),H=n("p"),jo=s("Similar to doing "),be=n("code"),Jo=s("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Qo=s("."),Zo=l(),we=n("div"),this.h()},l(e){const d=$t('[data-svelte="svelte-1phssyn"]',document.head);v=r(d,"META",{name:!0,content:!0}),d.forEach(t),G=c(e),_=r(e,"H1",{class:!0});var Fe=a(_);g=r(Fe,"A",{id:!0,class:!0,href:!0});var Yo=a(g);ae=r(Yo,"SPAN",{});var et=a(ae);j($.$$.fragment,et),et.forEach(t),Yo.forEach(t),Ze=c(Fe),se=r(Fe,"SPAN",{});var ot=a(se);Ke=i(ot,"Wav2Vec2Phoneme"),ot.forEach(t),Fe.forEach(t),ye=c(e),b=r(e,"H2",{class:!0});var Ae=a(b);y=r(Ae,"A",{id:!0,class:!0,href:!0});var tt=a(y);ie=r(tt,"SPAN",{});var nt=a(ie);j(V.$$.fragment,nt),nt.forEach(t),tt.forEach(t),Ge=c(Ae),le=r(Ae,"SPAN",{});var rt=a(le);Ye=i(rt,"Overview"),rt.forEach(t),Ae.forEach(t),ze=c(e),z=r(e,"P",{});var Ne=a(z);eo=i(Ne,"The Wav2Vec2Phoneme model was proposed in "),L=r(Ne,"A",{href:!0,rel:!0});var at=a(L);oo=i(at,`Simple and Effective Zero-shot Cross-lingual Phoneme Recognition (Xu et al., 2021`),at.forEach(t),to=i(Ne," by Qiantong Xu, Alexei Baevski, Michael Auli."),Ne.forEach(t),Pe=c(e),Y=r(e,"P",{});var st=a(Y);no=i(st,"The abstract from the paper is the following:"),st.forEach(t),xe=c(e),ee=r(e,"P",{});var it=a(ee);ce=r(it,"EM",{});var lt=a(ce);ro=i(lt,`Recent progress in self-training, self-supervised pretraining and unsupervised learning enabled well performing speech recognition systems without any labeled data. However, in many cases there is labeled data available for related languages which is not utilized by these methods. This paper extends previous work on zero-shot cross-lingual transfer learning by fine-tuning a multilingually pretrained wav2vec 2.0 model to transcribe unseen languages. This is done by mapping phonemes of the training languages to the target language using articulatory features. Experiments show that this simple method significantly outperforms prior work which introduced task-specific architectures and used only part of a monolingually pretrained model.`),lt.forEach(t),it.forEach(t),qe=c(e),oe=r(e,"P",{});var ct=a(oe);ao=i(ct,"Tips:"),ct.forEach(t),We=c(e),u=r(e,"UL",{});var T=a(u);de=r(T,"LI",{});var dt=a(de);so=i(dt,"Wav2Vec2Phoneme uses the exact same architecture as Wav2Vec2"),dt.forEach(t),io=c(T),pe=r(T,"LI",{});var pt=a(pe);lo=i(pt,"Wav2Vec2Phoneme is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),pt.forEach(t),co=c(T),B=r(T,"LI",{});var De=a(B);po=i(De,`Wav2Vec2Phoneme model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),te=r(De,"A",{href:!0});var ht=a(te);ho=i(ht,"Wav2Vec2PhonemeCTCTokenizer"),ht.forEach(t),mo=i(De,"."),De.forEach(t),uo=c(T),he=r(T,"LI",{});var mt=a(he);fo=i(mt,`Wav2Vec2Phoneme can be fine-tuned on multiple language at once and decode unseen languages in a single forward pass to a sequence of phonemes`),mt.forEach(t),_o=c(T),me=r(T,"LI",{});var ut=a(me);go=i(ut,`By default the model outputs a sequence of phonemes. In order to transform the phonemes to a sequence of words one should make use of a dictionary and language model.`),ut.forEach(t),T.forEach(t),Ee=c(e),P=r(e,"P",{});var Se=a(P);vo=i(Se,"Relevant checkpoints can be found under "),I=r(Se,"A",{href:!0,rel:!0});var ft=a(I);ko=i(ft,"https://huggingface.co/models?other=phoneme-recognition"),ft.forEach(t),To=i(Se,"."),Se.forEach(t),Ce=c(e),F=r(e,"P",{});var Ko=a(F);bo=i(Ko,"This model was contributed by "),A=r(Ko,"A",{href:!0,rel:!0});var _t=a(A);wo=i(_t,"patrickvonplaten"),_t.forEach(t),Ko.forEach(t),$e=c(e),x=r(e,"P",{});var Re=a(x);yo=i(Re,"The original code can be found "),N=r(Re,"A",{href:!0,rel:!0});var gt=a(N);zo=i(gt,"here"),gt.forEach(t),Po=i(Re,"."),Re.forEach(t),Ve=c(e),q=r(e,"P",{});var Ue=a(q);xo=i(Ue,"Wav2Vec2Phoneme\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),ue=r(Ue,"CODE",{});var vt=a(ue);qo=i(vt,"Wav2Vec2"),vt.forEach(t),Wo=i(Ue,"\u2019s documentation page except for the tokenizer."),Ue.forEach(t),Le=c(e),w=r(e,"H2",{class:!0});var Oe=a(w);W=r(Oe,"A",{id:!0,class:!0,href:!0});var kt=a(W);fe=r(kt,"SPAN",{});var Tt=a(fe);j(D.$$.fragment,Tt),Tt.forEach(t),kt.forEach(t),Eo=c(Oe),_e=r(Oe,"SPAN",{});var bt=a(_e);Co=i(bt,"Wav2Vec2PhonemeCTCTokenizer"),bt.forEach(t),Oe.forEach(t),Be=c(e),m=r(e,"DIV",{class:!0});var f=a(m);j(S.$$.fragment,f),$o=c(f),ge=r(f,"P",{});var wt=a(ge);Vo=i(wt,"Constructs a Wav2Vec2PhonemeCTC tokenizer."),wt.forEach(t),Lo=c(f),R=r(f,"P",{});var Me=a(R);Bo=i(Me,"This tokenizer inherits from "),ne=r(Me,"A",{href:!0});var yt=a(ne);Io=i(yt,"PreTrainedTokenizer"),yt.forEach(t),Fo=i(Me,` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Me.forEach(t),Ao=c(f),E=r(f,"DIV",{class:!0});var He=a(E);j(U.$$.fragment,He),No=c(He),ve=r(He,"P",{});var zt=a(ve);Do=i(zt,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),zt.forEach(t),He.forEach(t),So=c(f),C=r(f,"DIV",{class:!0});var Xe=a(C);j(O.$$.fragment,Xe),Ro=c(Xe),ke=r(Xe,"P",{});var Pt=a(ke);Uo=i(Pt,"Convert a list of lists of token ids into a list of strings by calling decode."),Pt.forEach(t),Xe.forEach(t),Oo=c(f),k=r(f,"DIV",{class:!0});var re=a(k);j(M.$$.fragment,re),Mo=c(re),Te=r(re,"P",{});var xt=a(Te);Ho=i(xt,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),xt.forEach(t),Xo=c(re),H=r(re,"P",{});var je=a(H);jo=i(je,"Similar to doing "),be=r(je,"CODE",{});var qt=a(be);Jo=i(qt,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),qt.forEach(t),Qo=i(je,"."),je.forEach(t),re.forEach(t),Zo=c(f),we=r(f,"DIV",{class:!0}),a(we).forEach(t),f.forEach(t),this.h()},h(){p(v,"name","hf:doc:metadata"),p(v,"content",JSON.stringify(Bt)),p(g,"id","wav2vec2phoneme"),p(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(g,"href","#wav2vec2phoneme"),p(_,"class","relative group"),p(y,"id","overview"),p(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(y,"href","#overview"),p(b,"class","relative group"),p(L,"href","https://arxiv.org/abs/2109.11680"),p(L,"rel","nofollow"),p(te,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2_phoneme#transformers.Wav2Vec2PhonemeCTCTokenizer"),p(I,"href","https://huggingface.co/models?other=phoneme-recognition"),p(I,"rel","nofollow"),p(A,"href","https://huggingface.co/patrickvonplaten"),p(A,"rel","nofollow"),p(N,"href","https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec"),p(N,"rel","nofollow"),p(W,"id","transformers.Wav2Vec2PhonemeCTCTokenizer"),p(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(W,"href","#transformers.Wav2Vec2PhonemeCTCTokenizer"),p(w,"class","relative group"),p(ne,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(E,"class","docstring"),p(C,"class","docstring"),p(k,"class","docstring"),p(we,"class","docstring"),p(m,"class","docstring")},m(e,d){o(document.head,v),h(e,G,d),h(e,_,d),o(_,g),o(g,ae),J($,ae,null),o(_,Ze),o(_,se),o(se,Ke),h(e,ye,d),h(e,b,d),o(b,y),o(y,ie),J(V,ie,null),o(b,Ge),o(b,le),o(le,Ye),h(e,ze,d),h(e,z,d),o(z,eo),o(z,L),o(L,oo),o(z,to),h(e,Pe,d),h(e,Y,d),o(Y,no),h(e,xe,d),h(e,ee,d),o(ee,ce),o(ce,ro),h(e,qe,d),h(e,oe,d),o(oe,ao),h(e,We,d),h(e,u,d),o(u,de),o(de,so),o(u,io),o(u,pe),o(pe,lo),o(u,co),o(u,B),o(B,po),o(B,te),o(te,ho),o(B,mo),o(u,uo),o(u,he),o(he,fo),o(u,_o),o(u,me),o(me,go),h(e,Ee,d),h(e,P,d),o(P,vo),o(P,I),o(I,ko),o(P,To),h(e,Ce,d),h(e,F,d),o(F,bo),o(F,A),o(A,wo),h(e,$e,d),h(e,x,d),o(x,yo),o(x,N),o(N,zo),o(x,Po),h(e,Ve,d),h(e,q,d),o(q,xo),o(q,ue),o(ue,qo),o(q,Wo),h(e,Le,d),h(e,w,d),o(w,W),o(W,fe),J(D,fe,null),o(w,Eo),o(w,_e),o(_e,Co),h(e,Be,d),h(e,m,d),J(S,m,null),o(m,$o),o(m,ge),o(ge,Vo),o(m,Lo),o(m,R),o(R,Bo),o(R,ne),o(ne,Io),o(R,Fo),o(m,Ao),o(m,E),J(U,E,null),o(E,No),o(E,ve),o(ve,Do),o(m,So),o(m,C),J(O,C,null),o(C,Ro),o(C,ke),o(ke,Uo),o(m,Oo),o(m,k),J(M,k,null),o(k,Mo),o(k,Te),o(Te,Ho),o(k,Xo),o(k,H),o(H,jo),o(H,be),o(be,Jo),o(H,Qo),o(m,Zo),o(m,we),Ie=!0},p:Vt,i(e){Ie||(Q($.$$.fragment,e),Q(V.$$.fragment,e),Q(D.$$.fragment,e),Q(S.$$.fragment,e),Q(U.$$.fragment,e),Q(O.$$.fragment,e),Q(M.$$.fragment,e),Ie=!0)},o(e){Z($.$$.fragment,e),Z(V.$$.fragment,e),Z(D.$$.fragment,e),Z(S.$$.fragment,e),Z(U.$$.fragment,e),Z(O.$$.fragment,e),Z(M.$$.fragment,e),Ie=!1},d(e){t(v),e&&t(G),e&&t(_),K($),e&&t(ye),e&&t(b),K(V),e&&t(ze),e&&t(z),e&&t(Pe),e&&t(Y),e&&t(xe),e&&t(ee),e&&t(qe),e&&t(oe),e&&t(We),e&&t(u),e&&t(Ee),e&&t(P),e&&t(Ce),e&&t(F),e&&t($e),e&&t(x),e&&t(Ve),e&&t(q),e&&t(Le),e&&t(w),K(D),e&&t(Be),e&&t(m),K(S),K(U),K(O),K(M)}}}const Bt={local:"wav2vec2phoneme",sections:[{local:"overview",title:"Overview"},{local:"transformers.Wav2Vec2PhonemeCTCTokenizer",title:"Wav2Vec2PhonemeCTCTokenizer"}],title:"Wav2Vec2Phoneme"};function It(Qe,v,G){let{fw:_}=v;return Qe.$$set=g=>{"fw"in g&&G(0,_=g.fw)},[_]}class Dt extends Wt{constructor(v){super();Et(this,v,It,Lt,Ct,{fw:0})}}export{Dt as default,Bt as metadata};
9,917
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/luke.mdx-f25f18a2.js
import{S as tp,i as np,s as op,e as a,k as l,w as _,t as o,L as sp,c as i,d as t,m as c,a as r,x as k,h as s,b as d,J as e,g as u,y,q as v,o as b,B as T}from"../../chunks/vendor-b1433968.js";import{T as Ko}from"../../chunks/Tip-c3840994.js";import{D as K}from"../../chunks/Docstring-ff504c58.js";import{C as In}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ap(O){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=i(g,"P",{});var f=r(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=i(f,"CODE",{});var z=r(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function ip(O){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=i(g,"P",{});var f=r(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=i(f,"CODE",{});var z=r(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function rp(O){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=i(g,"P",{});var f=r(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=i(f,"CODE",{});var z=r(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function dp(O){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=i(g,"P",{});var f=r(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=i(f,"CODE",{});var z=r(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function lp(O){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=i(g,"P",{});var f=r(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=i(f,"CODE",{});var z=r(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function cp(O){let h,L,m,w,E,g,f,z,Ds,Ro,le,xe,Sn,He,Bs,Nn,Ws,Ho,$e,Os,Ve,Us,Ks,Vo,Jt,Rs,Yo,Qt,Dn,Hs,Jo,Xt,Vs,Qo,R,Bn,Ye,Ys,Gt,Js,Qs,Xs,Wn,q,Gs,On,Zs,ea,Un,ta,na,Kn,oa,sa,Rn,aa,ia,Zt,ra,da,la,Je,H,en,ca,pa,Hn,ha,ua,Vn,ma,fa,Yn,ga,_a,ka,Qe,tn,Jn,ya,va,ba,nn,Qn,Ta,wa,La,U,Xn,Ea,za,ce,qe,on,xa,$a,Xe,qa,Ca,Ma,Ce,sn,Fa,Pa,Ge,ja,Aa,Ia,an,rn,Sa,Na,Da,S,dn,Ba,Wa,Gn,Oa,Ua,Zn,Ka,Ra,eo,Ha,Va,to,Ya,Ja,Qa,pe,Xa,ln,Ga,Za,Ze,ei,ti,ni,et,oi,tt,si,ai,Xo,cn,ii,Go,nt,Zo,V,ri,ot,di,li,st,ci,pi,at,hi,ui,es,he,Me,no,it,mi,oo,fi,ts,C,rt,gi,dt,_i,pn,ki,yi,vi,ue,bi,hn,Ti,wi,un,Li,Ei,zi,so,xi,$i,lt,ns,me,Fe,ao,ct,qi,io,Ci,os,M,pt,Mi,ro,Fi,Pi,x,ji,mn,Ai,Ii,fn,Si,Ni,gn,Di,Bi,lo,Wi,Oi,co,Ui,Ki,po,Ri,Hi,ho,Vi,Yi,Ji,Pe,ht,Qi,uo,Xi,Gi,mo,ss,fe,je,fo,ut,Zi,go,er,as,F,mt,tr,_o,nr,or,ft,sr,_n,ar,ir,rr,gt,dr,_t,lr,cr,pr,N,kt,hr,ge,ur,kn,mr,fr,ko,gr,_r,kr,Ae,yr,yo,vr,br,yt,is,_e,Ie,vo,vt,Tr,bo,wr,rs,P,bt,Lr,To,Er,zr,Tt,xr,yn,$r,qr,Cr,wt,Mr,Lt,Fr,Pr,jr,Q,Et,Ar,ke,Ir,vn,Sr,Nr,wo,Dr,Br,Wr,Se,ds,ye,Ne,Lo,zt,Or,Eo,Ur,ls,j,xt,Kr,zo,Rr,Hr,$t,Vr,bn,Yr,Jr,Qr,qt,Xr,Ct,Gr,Zr,ed,D,Mt,td,ve,nd,Tn,od,sd,xo,ad,id,rd,De,dd,$o,ld,cd,Ft,cs,be,Be,qo,Pt,pd,Co,hd,ps,A,jt,ud,Mo,md,fd,At,gd,wn,_d,kd,yd,It,vd,St,bd,Td,wd,B,Nt,Ld,Te,Ed,Ln,zd,xd,Fo,$d,qd,Cd,We,Md,Po,Fd,Pd,Dt,hs,we,Oe,jo,Bt,jd,Ao,Ad,us,I,Wt,Id,Io,Sd,Nd,Ot,Dd,En,Bd,Wd,Od,Ut,Ud,Kt,Kd,Rd,Hd,W,Rt,Vd,Le,Yd,zn,Jd,Qd,So,Xd,Gd,Zd,Ue,el,No,tl,nl,Ht,ms;return g=new ze({}),He=new ze({}),nt=new In({props:{code:`from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification model = LukeModel.from_pretrained("studio-ousia/luke-base") tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyonc\xE9" inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**inputs) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state entities = ["Beyonc\xE9", "Los Angeles"] # Wikipedia entity titles corresponding to the entity mentions "Beyonc\xE9" and "Los Angeles" entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**inputs) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = int(logits[0].argmax()) print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeModel, LukeForEntityPairClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeModel.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-comment"># Example 1: Computing the contextualized entity representation corresponding to the entity mention &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>)] <span class="hljs-comment"># character-based entity span corresponding to &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state <span class="hljs-comment"># Example 2: Inputting Wikipedia entities to obtain enriched contextualized representations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entities = [<span class="hljs-string">&quot;Beyonc\xE9&quot;</span>, <span class="hljs-string">&quot;Los Angeles&quot;</span>] <span class="hljs-comment"># Wikipedia entity titles corresponding to the entity mentions &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>)] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state <span class="hljs-comment"># Example 3: Classifying the relationship between two entities using LukeForEntityPairClassification head model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntityPairClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>)] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = <span class="hljs-built_in">int</span>(logits[<span class="hljs-number">0</span>].argmax()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),it=new ze({}),rt=new K({props:{name:"class transformers.LukeConfig",anchor:"transformers.LukeConfig",parameters:[{name:"vocab_size",val:" = 50267"},{name:"entity_vocab_size",val:" = 500000"},{name:"hidden_size",val:" = 768"},{name:"entity_emb_size",val:" = 256"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_entity_aware_attention",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/configuration_luke.py#L29",parametersDescription:[{anchor:"transformers.LukeConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel">LukeModel</a>.`,name:"vocab_size"},{anchor:"transformers.LukeConfig.entity_vocab_size",description:`<strong>entity_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 500000) &#x2014; Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented by the <code>entity_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel">LukeModel</a>.`,name:"entity_vocab_size"},{anchor:"transformers.LukeConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.LukeConfig.entity_emb_size",description:`<strong>entity_emb_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The number of dimensions of the entity embedding.`,name:"entity_emb_size"},{anchor:"transformers.LukeConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.LukeConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.LukeConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.LukeConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.LukeConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.LukeConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.LukeConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.LukeConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel">LukeModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.LukeConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.LukeConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.LukeConfig.use_entity_aware_attention",description:`<strong>use_entity_aware_attention</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014; Whether or not the model should use the entity-aware self-attention mechanism proposed in <a href="https://arxiv.org/abs/2010.01057" rel="nofollow">LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention (Yamada et al.)</a>.`,name:"use_entity_aware_attention"}]}}),lt=new In({props:{code:`from transformers import LukeConfig, LukeModel # Initializing a LUKE configuration configuration = LukeConfig() # Initializing a model from the configuration model = LukeModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeConfig, LukeModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a LUKE configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = LukeConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),ct=new ze({}),pt=new K({props:{name:"class transformers.LukeTokenizer",anchor:"transformers.LukeTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"entity_vocab_file",val:""},{name:"task",val:" = None"},{name:"max_entity_length",val:" = 32"},{name:"max_mention_length",val:" = 30"},{name:"entity_token_1",val:" = '<ent>'"},{name:"entity_token_2",val:" = '<ent2>'"},{name:"entity_unk_token",val:" = '[UNK]'"},{name:"entity_pad_token",val:" = '[PAD]'"},{name:"entity_mask_token",val:" = '[MASK]'"},{name:"entity_mask2_token",val:" = '[MASK2]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/tokenization_luke.py#L154",parametersDescription:[{anchor:"transformers.LukeTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.LukeTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.LukeTokenizer.entity_vocab_file",description:`<strong>entity_vocab_file</strong> (<code>str</code>) &#x2014; Path to the entity vocabulary file.`,name:"entity_vocab_file"},{anchor:"transformers.LukeTokenizer.task",description:`<strong>task</strong> (<code>str</code>, <em>optional</em>) &#x2014; Task for which you want to prepare sequences. One of <code>&quot;entity_classification&quot;</code>, <code>&quot;entity_pair_classification&quot;</code>, or <code>&quot;entity_span_classification&quot;</code>. If you specify this argument, the entity sequence is automatically created based on the given entity span(s).`,name:"task"},{anchor:"transformers.LukeTokenizer.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.LukeTokenizer.max_mention_length",description:`<strong>max_mention_length</strong> (<code>int</code>, <em>optional</em>, defaults to 30) &#x2014; The maximum number of tokens inside an entity span.`,name:"max_mention_length"},{anchor:"transformers.LukeTokenizer.entity_token_1",description:`<strong>entity_token_1</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_1"},{anchor:"transformers.LukeTokenizer.entity_token_2",description:`<strong>entity_token_2</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent2&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_2"}]}}),ht=new K({props:{name:"__call__",anchor:"transformers.LukeTokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"entity_spans",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entity_spans_pair",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entities",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"entities_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_entity_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": typing.Optional[bool] = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/tokenization_luke.py#L264",parametersDescription:[{anchor:"transformers.LukeTokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text"},{anchor:"transformers.LukeTokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text_pair"},{anchor:"transformers.LukeTokenizer.__call__.entity_spans",description:`<strong>entity_spans</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code> as the <code>task</code> argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify <code>entities</code>, the length of each sequence must be equal to the length of each sequence of <code>entities</code>.`,name:"entity_spans"},{anchor:"transformers.LukeTokenizer.__call__.entity_spans_pair",description:`<strong>entity_spans_pair</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the <code>task</code> argument in the constructor, this argument is ignored. If you specify <code>entities_pair</code>, the length of each sequence must be equal to the length of each sequence of <code>entities_pair</code>.`,name:"entity_spans_pair"},{anchor:"transformers.LukeTokenizer.__call__.entities",description:`<strong>entities</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans</code>. If you specify <code>entity_spans</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities"},{anchor:"transformers.LukeTokenizer.__call__.entities_pair",description:`<strong>entities_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans_pair</code>. If you specify <code>entity_spans_pair</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities_pair"},{anchor:"transformers.LukeTokenizer.__call__.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.LukeTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LukeTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LukeTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LukeTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LukeTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LukeTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.LukeTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LukeTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.LukeTokenizer.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.LukeTokenizer.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.LukeTokenizer.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.LukeTokenizer.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.LukeTokenizer.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.LukeTokenizer.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.LukeTokenizer.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_ids</strong> \u2014 List of entity ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>entity_position_ids</strong> \u2014 List of entity positions in the input sequence to be fed to a model.</p> </li> <li> <p><strong>entity_token_type_ids</strong> \u2014 List of entity token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Centity_token_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>entity_attention_mask</strong> \u2014 List of indices specifying which entities should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Centity_attention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_start_positions</strong> \u2014 List of the start positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>entity_end_positions</strong> \u2014 List of the end positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),ut=new ze({}),mt=new K({props:{name:"class transformers.LukeModel",anchor:"transformers.LukeModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L873",parametersDescription:[{anchor:"transformers.LukeModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kt=new K({props:{name:"forward",anchor:"transformers.LukeModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"entity_ids",val:" = None"},{name:"entity_attention_mask",val:" = None"},{name:"entity_token_type_ids",val:" = None"},{name:"entity_position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L905",parametersDescription:[{anchor:"transformers.LukeModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeModel.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeModel.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeModel.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeModel.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.BaseLukeModelOutputWithPooling</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>entity_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length, hidden_size)</code>) \u2014 Sequence of entity hidden-states at the output of the last layer of the model.</li> <li><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.BaseLukeModelOutputWithPooling</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new Ko({props:{$$slots:{default:[ap]},$$scope:{ctx:O}}}),yt=new In({props:{code:`from transformers import LukeTokenizer, LukeModel tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") model = LukeModel.from_pretrained("studio-ousia/luke-base") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyonc\xE9" encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state text = "Beyonc\xE9 lives in Los Angeles." entities = ["Beyonc\xE9", "Los Angeles"] # Wikipedia entity titles corresponding to the entity mentions "Beyonc\xE9" and "Los Angeles" entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" encoding = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeModel.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-comment"># Compute the contextualized entity representation corresponding to the entity mention &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>)] <span class="hljs-comment"># character-based entity span corresponding to &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state <span class="hljs-comment"># Input Wikipedia entities to obtain enriched contextualized representations of word tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entities = [<span class="hljs-string">&quot;Beyonc\xE9&quot;</span>, <span class="hljs-string">&quot;Los Angeles&quot;</span>] <span class="hljs-comment"># Wikipedia entity titles corresponding to the entity mentions &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>)] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state`}}),vt=new ze({}),bt=new K({props:{name:"class transformers.LukeForMaskedLM",anchor:"transformers.LukeForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1118",parametersDescription:[{anchor:"transformers.LukeForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Et=new K({props:{name:"forward",anchor:"transformers.LukeForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"entity_ids",val:" = None"},{name:"entity_attention_mask",val:" = None"},{name:"entity_token_type_ids",val:" = None"},{name:"entity_position_ids",val:" = None"},{name:"labels",val:" = None"},{name:"entity_labels",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1154",parametersDescription:[{anchor:"transformers.LukeForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForMaskedLM.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForMaskedLM.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForMaskedLM.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForMaskedLM.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.LukeForMaskedLM.forward.entity_labels",description:`<strong>entity_labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"entity_labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.LukeMaskedLMOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 The sum of masked language modeling (MLM) loss and entity prediction loss.</p> </li> <li> <p><strong>mlm_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>mep_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked entity prediction (MEP) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>entity_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.LukeMaskedLMOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Se=new Ko({props:{$$slots:{default:[ip]},$$scope:{ctx:O}}}),zt=new ze({}),xt=new K({props:{name:"class transformers.LukeForEntityClassification",anchor:"transformers.LukeForEntityClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1252",parametersDescription:[{anchor:"transformers.LukeForEntityClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mt=new K({props:{name:"forward",anchor:"transformers.LukeForEntityClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"entity_ids",val:" = None"},{name:"entity_attention_mask",val:" = None"},{name:"entity_token_type_ids",val:" = None"},{name:"entity_position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1265",parametersDescription:[{anchor:"transformers.LukeForEntityClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForEntityClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForEntityClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForEntityClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForEntityClassification.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForEntityClassification.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForEntityClassification.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForEntityClassification.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForEntityClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForEntityClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForEntityClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForEntityClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForEntityClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForEntityClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code> or <code>(batch_size, num_labels)</code>, <em>optional</em>) &#x2014; Labels for computing the classification loss. If the shape is <code>(batch_size,)</code>, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in <code>[0, ..., config.num_labels - 1]</code>. If the shape is <code>(batch_size, num_labels)</code>, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain <code>[0, 1]</code>, where 0 and 1 indicate false and true, respectively.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.EntityClassificationOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.EntityClassificationOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),De=new Ko({props:{$$slots:{default:[rp]},$$scope:{ctx:O}}}),Ft=new In({props:{code:`from transformers import LukeTokenizer, LukeForEntityClassification tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyonc\xE9" inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeForEntityClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-open-entity&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntityClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-open-entity&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>)] <span class="hljs-comment"># character-based entity span corresponding to &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx]) Predicted <span class="hljs-keyword">class</span>: person`}}),Pt=new ze({}),jt=new K({props:{name:"class transformers.LukeForEntityPairClassification",anchor:"transformers.LukeForEntityPairClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1367",parametersDescription:[{anchor:"transformers.LukeForEntityPairClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Nt=new K({props:{name:"forward",anchor:"transformers.LukeForEntityPairClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"entity_ids",val:" = None"},{name:"entity_attention_mask",val:" = None"},{name:"entity_token_type_ids",val:" = None"},{name:"entity_position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1380",parametersDescription:[{anchor:"transformers.LukeForEntityPairClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForEntityPairClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForEntityPairClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForEntityPairClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForEntityPairClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForEntityPairClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForEntityPairClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code> or <code>(batch_size, num_labels)</code>, <em>optional</em>) &#x2014; Labels for computing the classification loss. If the shape is <code>(batch_size,)</code>, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in <code>[0, ..., config.num_labels - 1]</code>. If the shape is <code>(batch_size, num_labels)</code>, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain <code>[0, 1]</code>, where 0 and 1 indicate false and true, respectively.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.EntityPairClassificationOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.EntityPairClassificationOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),We=new Ko({props:{$$slots:{default:[dp]},$$scope:{ctx:O}}}),Dt=new In({props:{code:`from transformers import LukeTokenizer, LukeForEntityPairClassification tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeForEntityPairClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntityPairClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>)] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx]) Predicted <span class="hljs-keyword">class</span>: per:cities_of_residence`}}),Bt=new ze({}),Wt=new K({props:{name:"class transformers.LukeForEntitySpanClassification",anchor:"transformers.LukeForEntitySpanClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1484",parametersDescription:[{anchor:"transformers.LukeForEntitySpanClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Rt=new K({props:{name:"forward",anchor:"transformers.LukeForEntitySpanClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"entity_ids",val:" = None"},{name:"entity_attention_mask",val:" = None"},{name:"entity_token_type_ids",val:" = None"},{name:"entity_position_ids",val:" = None"},{name:"entity_start_positions",val:" = None"},{name:"entity_end_positions",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/luke/modeling_luke.py#L1497",parametersDescription:[{anchor:"transformers.LukeForEntitySpanClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForEntitySpanClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForEntitySpanClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForEntitySpanClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForEntitySpanClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForEntitySpanClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_start_positions",description:`<strong>entity_start_positions</strong> (<code>torch.LongTensor</code>) &#x2014; The start positions of entities in the word token sequence.`,name:"entity_start_positions"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_end_positions",description:`<strong>entity_end_positions</strong> (<code>torch.LongTensor</code>) &#x2014; The end positions of entities in the word token sequence.`,name:"entity_end_positions"},{anchor:"transformers.LukeForEntitySpanClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code> or <code>(batch_size, entity_length, num_labels)</code>, <em>optional</em>) &#x2014; Labels for computing the classification loss. If the shape is <code>(batch_size, entity_length)</code>, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in <code>[0, ..., config.num_labels - 1]</code>. If the shape is <code>(batch_size, entity_length, num_labels)</code>, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain <code>[0, 1]</code>, where 0 and 1 indicate false and true, respectively.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.EntitySpanClassificationOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.EntitySpanClassificationOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ue=new Ko({props:{$$slots:{default:[lp]},$$scope:{ctx:O}}}),Ht=new In({props:{code:`from transformers import LukeTokenizer, LukeForEntitySpanClassification tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") text = "Beyonc\xE9 lives in Los Angeles" word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens entity_spans = [] for i, start_pos in enumerate(word_start_positions): for end_pos in word_end_positions[i:]: entity_spans.append((start_pos, end_pos)) inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_indices = logits.argmax(-1).squeeze().tolist() for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): if predicted_class_idx != 0: print(text[span[0]:span[1]], model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeForEntitySpanClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-conll-2003&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntitySpanClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-conll-2003&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles&quot;</span> <span class="hljs-comment"># List all possible entity spans in the text</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_start_positions = [<span class="hljs-number">0</span>, <span class="hljs-number">8</span>, <span class="hljs-number">14</span>, <span class="hljs-number">17</span>, <span class="hljs-number">21</span>] <span class="hljs-comment"># character-based start positions of word tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_end_positions = [<span class="hljs-number">7</span>, <span class="hljs-number">13</span>, <span class="hljs-number">16</span>, <span class="hljs-number">20</span>, <span class="hljs-number">28</span>] <span class="hljs-comment"># character-based end positions of word tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, start_pos <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(word_start_positions): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> end_pos <span class="hljs-keyword">in</span> word_end_positions[i:]: <span class="hljs-meta">... </span> entity_spans.append((start_pos, end_pos)) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_indices = logits.argmax(-<span class="hljs-number">1</span>).squeeze().tolist() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> span, predicted_class_idx <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(entity_spans, predicted_class_indices): <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> predicted_class_idx != <span class="hljs-number">0</span>: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(text[span[<span class="hljs-number">0</span>]:span[<span class="hljs-number">1</span>]], model.config.id2label[predicted_class_idx]) Beyonc\xE9 PER Los Angeles LOC`}}),{c(){h=a("meta"),L=l(),m=a("h1"),w=a("a"),E=a("span"),_(g.$$.fragment),f=l(),z=a("span"),Ds=o("LUKE"),Ro=l(),le=a("h2"),xe=a("a"),Sn=a("span"),_(He.$$.fragment),Bs=l(),Nn=a("span"),Ws=o("Overview"),Ho=l(),$e=a("p"),Os=o("The LUKE model was proposed in "),Ve=a("a"),Us=o("LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),Ks=o(` by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto. It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive and cloze-style question answering, entity typing, and relation classification.`),Vo=l(),Jt=a("p"),Rs=o("The abstract from the paper is the following:"),Yo=l(),Qt=a("p"),Dn=a("em"),Hs=o(`Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering).`),Jo=l(),Xt=a("p"),Vs=o("Tips:"),Qo=l(),R=a("ul"),Bn=a("li"),Ye=a("p"),Ys=o("This implementation is the same as "),Gt=a("a"),Js=o("RobertaModel"),Qs=o(` with the addition of entity embeddings as well as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.`),Xs=l(),Wn=a("li"),q=a("p"),Gs=o("LUKE treats entities as input tokens; therefore, it takes "),On=a("code"),Zs=o("entity_ids"),ea=o(", "),Un=a("code"),ta=o("entity_attention_mask"),na=o(`, `),Kn=a("code"),oa=o("entity_token_type_ids"),sa=o(" and "),Rn=a("code"),aa=o("entity_position_ids"),ia=o(` as extra input. You can obtain those using `),Zt=a("a"),ra=o("LukeTokenizer"),da=o("."),la=l(),Je=a("li"),H=a("p"),en=a("a"),ca=o("LukeTokenizer"),pa=o(" takes "),Hn=a("code"),ha=o("entities"),ua=o(" and "),Vn=a("code"),ma=o("entity_spans"),fa=o(` (character-based start and end positions of the entities in the input text) as extra input. `),Yn=a("code"),ga=o("entities"),_a=o(` typically consist of [MASK] entities or Wikipedia entities. The brief description when inputting these entities are as follows:`),ka=l(),Qe=a("ul"),tn=a("li"),Jn=a("em"),ya=o("Inputting [MASK] entities to compute entity representations"),va=o(`: The [MASK] entity is used to mask entities to be predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address downstream tasks requiring the information of entities in text such as entity typing, relation classification, and named entity recognition.`),ba=l(),nn=a("li"),Qn=a("em"),Ta=o("Inputting Wikipedia entities to compute knowledge-enhanced token representations"),wa=o(`: LUKE learns rich information (or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as question answering.`),La=l(),U=a("li"),Xn=a("p"),Ea=o("There are three head models for the former use case:"),za=l(),ce=a("ul"),qe=a("li"),on=a("a"),xa=o("LukeForEntityClassification"),$a=o(`, for tasks to classify a single entity in an input text such as entity typing, e.g. the `),Xe=a("a"),qa=o("Open Entity dataset"),Ca=o(`. This model places a linear head on top of the output entity representation.`),Ma=l(),Ce=a("li"),sn=a("a"),Fa=o("LukeForEntityPairClassification"),Pa=o(`, for tasks to classify the relationship between two entities such as relation classification, e.g. the `),Ge=a("a"),ja=o("TACRED dataset"),Aa=o(`. This model places a linear head on top of the concatenated output representation of the pair of given entities.`),Ia=l(),an=a("li"),rn=a("a"),Sa=o("LukeForEntitySpanClassification"),Na=o(`, for tasks to classify the sequence of entity spans, such as named entity recognition (NER). This model places a linear head on top of the output entity representations. You can address NER using this model by inputting all possible entity spans in the text to the model.`),Da=l(),S=a("p"),dn=a("a"),Ba=o("LukeTokenizer"),Wa=o(" has a "),Gn=a("code"),Oa=o("task"),Ua=o(` argument, which enables you to easily create an input to these head models by specifying `),Zn=a("code"),Ka=o('task="entity_classification"'),Ra=o(", "),eo=a("code"),Ha=o('task="entity_pair_classification"'),Va=o(`, or `),to=a("code"),Ya=o('task="entity_span_classification"'),Ja=o(". Please refer to the example code of each head models."),Qa=l(),pe=a("p"),Xa=o("A demo notebook on how to fine-tune "),ln=a("a"),Ga=o("LukeForEntityPairClassification"),Za=o(` for relation classification can be found `),Ze=a("a"),ei=o("here"),ti=o("."),ni=l(),et=a("p"),oi=o(`There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with the HuggingFace implementation of LUKE. They can be found `),tt=a("a"),si=o("here"),ai=o("."),Xo=l(),cn=a("p"),ii=o("Example:"),Go=l(),_(nt.$$.fragment),Zo=l(),V=a("p"),ri=o("This model was contributed by "),ot=a("a"),di=o("ikuyamada"),li=o(" and "),st=a("a"),ci=o("nielsr"),pi=o(". The original code can be found "),at=a("a"),hi=o("here"),ui=o("."),es=l(),he=a("h2"),Me=a("a"),no=a("span"),_(it.$$.fragment),mi=l(),oo=a("span"),fi=o("LukeConfig"),ts=l(),C=a("div"),_(rt.$$.fragment),gi=l(),dt=a("p"),_i=o("This is the configuration class to store the configuration of a "),pn=a("a"),ki=o("LukeModel"),yi=o(`. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture.`),vi=l(),ue=a("p"),bi=o("Configuration objects inherit from "),hn=a("a"),Ti=o("PretrainedConfig"),wi=o(` and can be used to control the model outputs. Read the documentation from `),un=a("a"),Li=o("PretrainedConfig"),Ei=o(" for more information."),zi=l(),so=a("p"),xi=o("Examples:"),$i=l(),_(lt.$$.fragment),ns=l(),me=a("h2"),Fe=a("a"),ao=a("span"),_(ct.$$.fragment),qi=l(),io=a("span"),Ci=o("LukeTokenizer"),os=l(),M=a("div"),_(pt.$$.fragment),Mi=l(),ro=a("p"),Fi=o("Construct a LUKE tokenizer."),Pi=l(),x=a("p"),ji=o("This tokenizer inherits from "),mn=a("a"),Ai=o("RobertaTokenizer"),Ii=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Compared to `),fn=a("a"),Si=o("RobertaTokenizer"),Ni=o(", "),gn=a("a"),Di=o("LukeTokenizer"),Bi=o(` also creates entity sequences, namely `),lo=a("code"),Wi=o("entity_ids"),Oi=o(", "),co=a("code"),Ui=o("entity_attention_mask"),Ki=o(", "),po=a("code"),Ri=o("entity_token_type_ids"),Hi=o(", and "),ho=a("code"),Vi=o("entity_position_ids"),Yi=o(` to be used by the LUKE model.`),Ji=l(),Pe=a("div"),_(ht.$$.fragment),Qi=l(),uo=a("p"),Xi=o(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),Gi=l(),mo=a("div"),ss=l(),fe=a("h2"),je=a("a"),fo=a("span"),_(ut.$$.fragment),Zi=l(),go=a("span"),er=o("LukeModel"),as=l(),F=a("div"),_(mt.$$.fragment),tr=l(),_o=a("p"),nr=o("The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any specific head on top."),or=l(),ft=a("p"),sr=o("This model inherits from "),_n=a("a"),ar=o("PreTrainedModel"),ir=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rr=l(),gt=a("p"),dr=o("This model is also a PyTorch "),_t=a("a"),lr=o("torch.nn.Module"),cr=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pr=l(),N=a("div"),_(kt.$$.fragment),hr=l(),ge=a("p"),ur=o("The "),kn=a("a"),mr=o("LukeModel"),fr=o(" forward method, overrides the "),ko=a("code"),gr=o("__call__"),_r=o(" special method."),kr=l(),_(Ae.$$.fragment),yr=l(),yo=a("p"),vr=o("Examples:"),br=l(),_(yt.$$.fragment),is=l(),_e=a("h2"),Ie=a("a"),vo=a("span"),_(vt.$$.fragment),Tr=l(),bo=a("span"),wr=o("LukeForMaskedLM"),rs=l(),P=a("div"),_(bt.$$.fragment),Lr=l(),To=a("p"),Er=o(`The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction.`),zr=l(),Tt=a("p"),xr=o("This model inherits from "),yn=a("a"),$r=o("PreTrainedModel"),qr=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cr=l(),wt=a("p"),Mr=o("This model is also a PyTorch "),Lt=a("a"),Fr=o("torch.nn.Module"),Pr=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jr=l(),Q=a("div"),_(Et.$$.fragment),Ar=l(),ke=a("p"),Ir=o("The "),vn=a("a"),Sr=o("LukeForMaskedLM"),Nr=o(" forward method, overrides the "),wo=a("code"),Dr=o("__call__"),Br=o(" special method."),Wr=l(),_(Se.$$.fragment),ds=l(),ye=a("h2"),Ne=a("a"),Lo=a("span"),_(zt.$$.fragment),Or=l(),Eo=a("span"),Ur=o("LukeForEntityClassification"),ls=l(),j=a("div"),_(xt.$$.fragment),Kr=l(),zo=a("p"),Rr=o(`The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity.`),Hr=l(),$t=a("p"),Vr=o("This model inherits from "),bn=a("a"),Yr=o("PreTrainedModel"),Jr=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qr=l(),qt=a("p"),Xr=o("This model is also a PyTorch "),Ct=a("a"),Gr=o("torch.nn.Module"),Zr=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ed=l(),D=a("div"),_(Mt.$$.fragment),td=l(),ve=a("p"),nd=o("The "),Tn=a("a"),od=o("LukeForEntityClassification"),sd=o(" forward method, overrides the "),xo=a("code"),ad=o("__call__"),id=o(" special method."),rd=l(),_(De.$$.fragment),dd=l(),$o=a("p"),ld=o("Examples:"),cd=l(),_(Ft.$$.fragment),cs=l(),be=a("h2"),Be=a("a"),qo=a("span"),_(Pt.$$.fragment),pd=l(),Co=a("span"),hd=o("LukeForEntityPairClassification"),ps=l(),A=a("div"),_(jt.$$.fragment),ud=l(),Mo=a("p"),md=o(`The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED.`),fd=l(),At=a("p"),gd=o("This model inherits from "),wn=a("a"),_d=o("PreTrainedModel"),kd=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yd=l(),It=a("p"),vd=o("This model is also a PyTorch "),St=a("a"),bd=o("torch.nn.Module"),Td=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wd=l(),B=a("div"),_(Nt.$$.fragment),Ld=l(),Te=a("p"),Ed=o("The "),Ln=a("a"),zd=o("LukeForEntityPairClassification"),xd=o(" forward method, overrides the "),Fo=a("code"),$d=o("__call__"),qd=o(" special method."),Cd=l(),_(We.$$.fragment),Md=l(),Po=a("p"),Fd=o("Examples:"),Pd=l(),_(Dt.$$.fragment),hs=l(),we=a("h2"),Oe=a("a"),jo=a("span"),_(Bt.$$.fragment),jd=l(),Ao=a("span"),Ad=o("LukeForEntitySpanClassification"),us=l(),I=a("div"),_(Wt.$$.fragment),Id=l(),Io=a("p"),Sd=o(`The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition.`),Nd=l(),Ot=a("p"),Dd=o("This model inherits from "),En=a("a"),Bd=o("PreTrainedModel"),Wd=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Od=l(),Ut=a("p"),Ud=o("This model is also a PyTorch "),Kt=a("a"),Kd=o("torch.nn.Module"),Rd=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hd=l(),W=a("div"),_(Rt.$$.fragment),Vd=l(),Le=a("p"),Yd=o("The "),zn=a("a"),Jd=o("LukeForEntitySpanClassification"),Qd=o(" forward method, overrides the "),So=a("code"),Xd=o("__call__"),Gd=o(" special method."),Zd=l(),_(Ue.$$.fragment),el=l(),No=a("p"),tl=o("Examples:"),nl=l(),_(Ht.$$.fragment),this.h()},l(n){const p=sp('[data-svelte="svelte-1phssyn"]',document.head);h=i(p,"META",{name:!0,content:!0}),p.forEach(t),L=c(n),m=i(n,"H1",{class:!0});var Vt=r(m);w=i(Vt,"A",{id:!0,class:!0,href:!0});var Do=r(w);E=i(Do,"SPAN",{});var Bo=r(E);k(g.$$.fragment,Bo),Bo.forEach(t),Do.forEach(t),f=c(Vt),z=i(Vt,"SPAN",{});var Wo=r(z);Ds=s(Wo,"LUKE"),Wo.forEach(t),Vt.forEach(t),Ro=c(n),le=i(n,"H2",{class:!0});var Yt=r(le);xe=i(Yt,"A",{id:!0,class:!0,href:!0});var il=r(xe);Sn=i(il,"SPAN",{});var rl=r(Sn);k(He.$$.fragment,rl),rl.forEach(t),il.forEach(t),Bs=c(Yt),Nn=i(Yt,"SPAN",{});var dl=r(Nn);Ws=s(dl,"Overview"),dl.forEach(t),Yt.forEach(t),Ho=c(n),$e=i(n,"P",{});var fs=r($e);Os=s(fs,"The LUKE model was proposed in "),Ve=i(fs,"A",{href:!0,rel:!0});var ll=r(Ve);Us=s(ll,"LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),ll.forEach(t),Ks=s(fs,` by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto. It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive and cloze-style question answering, entity typing, and relation classification.`),fs.forEach(t),Vo=c(n),Jt=i(n,"P",{});var cl=r(Jt);Rs=s(cl,"The abstract from the paper is the following:"),cl.forEach(t),Yo=c(n),Qt=i(n,"P",{});var pl=r(Qt);Dn=i(pl,"EM",{});var hl=r(Dn);Hs=s(hl,`Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering).`),hl.forEach(t),pl.forEach(t),Jo=c(n),Xt=i(n,"P",{});var ul=r(Xt);Vs=s(ul,"Tips:"),ul.forEach(t),Qo=c(n),R=i(n,"UL",{});var Ke=r(R);Bn=i(Ke,"LI",{});var ml=r(Bn);Ye=i(ml,"P",{});var gs=r(Ye);Ys=s(gs,"This implementation is the same as "),Gt=i(gs,"A",{href:!0});var fl=r(Gt);Js=s(fl,"RobertaModel"),fl.forEach(t),Qs=s(gs,` with the addition of entity embeddings as well as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.`),gs.forEach(t),ml.forEach(t),Xs=c(Ke),Wn=i(Ke,"LI",{});var gl=r(Wn);q=i(gl,"P",{});var Y=r(q);Gs=s(Y,"LUKE treats entities as input tokens; therefore, it takes "),On=i(Y,"CODE",{});var _l=r(On);Zs=s(_l,"entity_ids"),_l.forEach(t),ea=s(Y,", "),Un=i(Y,"CODE",{});var kl=r(Un);ta=s(kl,"entity_attention_mask"),kl.forEach(t),na=s(Y,`, `),Kn=i(Y,"CODE",{});var yl=r(Kn);oa=s(yl,"entity_token_type_ids"),yl.forEach(t),sa=s(Y," and "),Rn=i(Y,"CODE",{});var vl=r(Rn);aa=s(vl,"entity_position_ids"),vl.forEach(t),ia=s(Y,` as extra input. You can obtain those using `),Zt=i(Y,"A",{href:!0});var bl=r(Zt);ra=s(bl,"LukeTokenizer"),bl.forEach(t),da=s(Y,"."),Y.forEach(t),gl.forEach(t),la=c(Ke),Je=i(Ke,"LI",{});var _s=r(Je);H=i(_s,"P",{});var Ee=r(H);en=i(Ee,"A",{href:!0});var Tl=r(en);ca=s(Tl,"LukeTokenizer"),Tl.forEach(t),pa=s(Ee," takes "),Hn=i(Ee,"CODE",{});var wl=r(Hn);ha=s(wl,"entities"),wl.forEach(t),ua=s(Ee," and "),Vn=i(Ee,"CODE",{});var Ll=r(Vn);ma=s(Ll,"entity_spans"),Ll.forEach(t),fa=s(Ee,` (character-based start and end positions of the entities in the input text) as extra input. `),Yn=i(Ee,"CODE",{});var El=r(Yn);ga=s(El,"entities"),El.forEach(t),_a=s(Ee,` typically consist of [MASK] entities or Wikipedia entities. The brief description when inputting these entities are as follows:`),Ee.forEach(t),ka=c(_s),Qe=i(_s,"UL",{});var ks=r(Qe);tn=i(ks,"LI",{});var ol=r(tn);Jn=i(ol,"EM",{});var zl=r(Jn);ya=s(zl,"Inputting [MASK] entities to compute entity representations"),zl.forEach(t),va=s(ol,`: The [MASK] entity is used to mask entities to be predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address downstream tasks requiring the information of entities in text such as entity typing, relation classification, and named entity recognition.`),ol.forEach(t),ba=c(ks),nn=i(ks,"LI",{});var sl=r(nn);Qn=i(sl,"EM",{});var xl=r(Qn);Ta=s(xl,"Inputting Wikipedia entities to compute knowledge-enhanced token representations"),xl.forEach(t),wa=s(sl,`: LUKE learns rich information (or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as question answering.`),sl.forEach(t),ks.forEach(t),_s.forEach(t),La=c(Ke),U=i(Ke,"LI",{});var X=r(U);Xn=i(X,"P",{});var $l=r(Xn);Ea=s($l,"There are three head models for the former use case:"),$l.forEach(t),za=c(X),ce=i(X,"UL",{});var xn=r(ce);qe=i(xn,"LI",{});var Oo=r(qe);on=i(Oo,"A",{href:!0});var ql=r(on);xa=s(ql,"LukeForEntityClassification"),ql.forEach(t),$a=s(Oo,`, for tasks to classify a single entity in an input text such as entity typing, e.g. the `),Xe=i(Oo,"A",{href:!0,rel:!0});var Cl=r(Xe);qa=s(Cl,"Open Entity dataset"),Cl.forEach(t),Ca=s(Oo,`. This model places a linear head on top of the output entity representation.`),Oo.forEach(t),Ma=c(xn),Ce=i(xn,"LI",{});var Uo=r(Ce);sn=i(Uo,"A",{href:!0});var Ml=r(sn);Fa=s(Ml,"LukeForEntityPairClassification"),Ml.forEach(t),Pa=s(Uo,`, for tasks to classify the relationship between two entities such as relation classification, e.g. the `),Ge=i(Uo,"A",{href:!0,rel:!0});var Fl=r(Ge);ja=s(Fl,"TACRED dataset"),Fl.forEach(t),Aa=s(Uo,`. This model places a linear head on top of the concatenated output representation of the pair of given entities.`),Uo.forEach(t),Ia=c(xn),an=i(xn,"LI",{});var al=r(an);rn=i(al,"A",{href:!0});var Pl=r(rn);Sa=s(Pl,"LukeForEntitySpanClassification"),Pl.forEach(t),Na=s(al,`, for tasks to classify the sequence of entity spans, such as named entity recognition (NER). This model places a linear head on top of the output entity representations. You can address NER using this model by inputting all possible entity spans in the text to the model.`),al.forEach(t),xn.forEach(t),Da=c(X),S=i(X,"P",{});var J=r(S);dn=i(J,"A",{href:!0});var jl=r(dn);Ba=s(jl,"LukeTokenizer"),jl.forEach(t),Wa=s(J," has a "),Gn=i(J,"CODE",{});var Al=r(Gn);Oa=s(Al,"task"),Al.forEach(t),Ua=s(J,` argument, which enables you to easily create an input to these head models by specifying `),Zn=i(J,"CODE",{});var Il=r(Zn);Ka=s(Il,'task="entity_classification"'),Il.forEach(t),Ra=s(J,", "),eo=i(J,"CODE",{});var Sl=r(eo);Ha=s(Sl,'task="entity_pair_classification"'),Sl.forEach(t),Va=s(J,`, or `),to=i(J,"CODE",{});var Nl=r(to);Ya=s(Nl,'task="entity_span_classification"'),Nl.forEach(t),Ja=s(J,". Please refer to the example code of each head models."),J.forEach(t),Qa=c(X),pe=i(X,"P",{});var $n=r(pe);Xa=s($n,"A demo notebook on how to fine-tune "),ln=i($n,"A",{href:!0});var Dl=r(ln);Ga=s(Dl,"LukeForEntityPairClassification"),Dl.forEach(t),Za=s($n,` for relation classification can be found `),Ze=i($n,"A",{href:!0,rel:!0});var Bl=r(Ze);ei=s(Bl,"here"),Bl.forEach(t),ti=s($n,"."),$n.forEach(t),ni=c(X),et=i(X,"P",{});var ys=r(et);oi=s(ys,`There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with the HuggingFace implementation of LUKE. They can be found `),tt=i(ys,"A",{href:!0,rel:!0});var Wl=r(tt);si=s(Wl,"here"),Wl.forEach(t),ai=s(ys,"."),ys.forEach(t),X.forEach(t),Ke.forEach(t),Xo=c(n),cn=i(n,"P",{});var Ol=r(cn);ii=s(Ol,"Example:"),Ol.forEach(t),Go=c(n),k(nt.$$.fragment,n),Zo=c(n),V=i(n,"P",{});var Re=r(V);ri=s(Re,"This model was contributed by "),ot=i(Re,"A",{href:!0,rel:!0});var Ul=r(ot);di=s(Ul,"ikuyamada"),Ul.forEach(t),li=s(Re," and "),st=i(Re,"A",{href:!0,rel:!0});var Kl=r(st);ci=s(Kl,"nielsr"),Kl.forEach(t),pi=s(Re,". The original code can be found "),at=i(Re,"A",{href:!0,rel:!0});var Rl=r(at);hi=s(Rl,"here"),Rl.forEach(t),ui=s(Re,"."),Re.forEach(t),es=c(n),he=i(n,"H2",{class:!0});var vs=r(he);Me=i(vs,"A",{id:!0,class:!0,href:!0});var Hl=r(Me);no=i(Hl,"SPAN",{});var Vl=r(no);k(it.$$.fragment,Vl),Vl.forEach(t),Hl.forEach(t),mi=c(vs),oo=i(vs,"SPAN",{});var Yl=r(oo);fi=s(Yl,"LukeConfig"),Yl.forEach(t),vs.forEach(t),ts=c(n),C=i(n,"DIV",{class:!0});var G=r(C);k(rt.$$.fragment,G),gi=c(G),dt=i(G,"P",{});var bs=r(dt);_i=s(bs,"This is the configuration class to store the configuration of a "),pn=i(bs,"A",{href:!0});var Jl=r(pn);ki=s(Jl,"LukeModel"),Jl.forEach(t),yi=s(bs,`. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture.`),bs.forEach(t),vi=c(G),ue=i(G,"P",{});var qn=r(ue);bi=s(qn,"Configuration objects inherit from "),hn=i(qn,"A",{href:!0});var Ql=r(hn);Ti=s(Ql,"PretrainedConfig"),Ql.forEach(t),wi=s(qn,` and can be used to control the model outputs. Read the documentation from `),un=i(qn,"A",{href:!0});var Xl=r(un);Li=s(Xl,"PretrainedConfig"),Xl.forEach(t),Ei=s(qn," for more information."),qn.forEach(t),zi=c(G),so=i(G,"P",{});var Gl=r(so);xi=s(Gl,"Examples:"),Gl.forEach(t),$i=c(G),k(lt.$$.fragment,G),G.forEach(t),ns=c(n),me=i(n,"H2",{class:!0});var Ts=r(me);Fe=i(Ts,"A",{id:!0,class:!0,href:!0});var Zl=r(Fe);ao=i(Zl,"SPAN",{});var ec=r(ao);k(ct.$$.fragment,ec),ec.forEach(t),Zl.forEach(t),qi=c(Ts),io=i(Ts,"SPAN",{});var tc=r(io);Ci=s(tc,"LukeTokenizer"),tc.forEach(t),Ts.forEach(t),os=c(n),M=i(n,"DIV",{class:!0});var Z=r(M);k(pt.$$.fragment,Z),Mi=c(Z),ro=i(Z,"P",{});var nc=r(ro);Fi=s(nc,"Construct a LUKE tokenizer."),nc.forEach(t),Pi=c(Z),x=i(Z,"P",{});var $=r(x);ji=s($,"This tokenizer inherits from "),mn=i($,"A",{href:!0});var oc=r(mn);Ai=s(oc,"RobertaTokenizer"),oc.forEach(t),Ii=s($,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Compared to `),fn=i($,"A",{href:!0});var sc=r(fn);Si=s(sc,"RobertaTokenizer"),sc.forEach(t),Ni=s($,", "),gn=i($,"A",{href:!0});var ac=r(gn);Di=s(ac,"LukeTokenizer"),ac.forEach(t),Bi=s($,` also creates entity sequences, namely `),lo=i($,"CODE",{});var ic=r(lo);Wi=s(ic,"entity_ids"),ic.forEach(t),Oi=s($,", "),co=i($,"CODE",{});var rc=r(co);Ui=s(rc,"entity_attention_mask"),rc.forEach(t),Ki=s($,", "),po=i($,"CODE",{});var dc=r(po);Ri=s(dc,"entity_token_type_ids"),dc.forEach(t),Hi=s($,", and "),ho=i($,"CODE",{});var lc=r(ho);Vi=s(lc,"entity_position_ids"),lc.forEach(t),Yi=s($,` to be used by the LUKE model.`),$.forEach(t),Ji=c(Z),Pe=i(Z,"DIV",{class:!0});var ws=r(Pe);k(ht.$$.fragment,ws),Qi=c(ws),uo=i(ws,"P",{});var cc=r(uo);Xi=s(cc,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),cc.forEach(t),ws.forEach(t),Gi=c(Z),mo=i(Z,"DIV",{class:!0}),r(mo).forEach(t),Z.forEach(t),ss=c(n),fe=i(n,"H2",{class:!0});var Ls=r(fe);je=i(Ls,"A",{id:!0,class:!0,href:!0});var pc=r(je);fo=i(pc,"SPAN",{});var hc=r(fo);k(ut.$$.fragment,hc),hc.forEach(t),pc.forEach(t),Zi=c(Ls),go=i(Ls,"SPAN",{});var uc=r(go);er=s(uc,"LukeModel"),uc.forEach(t),Ls.forEach(t),as=c(n),F=i(n,"DIV",{class:!0});var ee=r(F);k(mt.$$.fragment,ee),tr=c(ee),_o=i(ee,"P",{});var mc=r(_o);nr=s(mc,"The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any specific head on top."),mc.forEach(t),or=c(ee),ft=i(ee,"P",{});var Es=r(ft);sr=s(Es,"This model inherits from "),_n=i(Es,"A",{href:!0});var fc=r(_n);ar=s(fc,"PreTrainedModel"),fc.forEach(t),ir=s(Es,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Es.forEach(t),rr=c(ee),gt=i(ee,"P",{});var zs=r(gt);dr=s(zs,"This model is also a PyTorch "),_t=i(zs,"A",{href:!0,rel:!0});var gc=r(_t);lr=s(gc,"torch.nn.Module"),gc.forEach(t),cr=s(zs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zs.forEach(t),pr=c(ee),N=i(ee,"DIV",{class:!0});var te=r(N);k(kt.$$.fragment,te),hr=c(te),ge=i(te,"P",{});var Cn=r(ge);ur=s(Cn,"The "),kn=i(Cn,"A",{href:!0});var _c=r(kn);mr=s(_c,"LukeModel"),_c.forEach(t),fr=s(Cn," forward method, overrides the "),ko=i(Cn,"CODE",{});var kc=r(ko);gr=s(kc,"__call__"),kc.forEach(t),_r=s(Cn," special method."),Cn.forEach(t),kr=c(te),k(Ae.$$.fragment,te),yr=c(te),yo=i(te,"P",{});var yc=r(yo);vr=s(yc,"Examples:"),yc.forEach(t),br=c(te),k(yt.$$.fragment,te),te.forEach(t),ee.forEach(t),is=c(n),_e=i(n,"H2",{class:!0});var xs=r(_e);Ie=i(xs,"A",{id:!0,class:!0,href:!0});var vc=r(Ie);vo=i(vc,"SPAN",{});var bc=r(vo);k(vt.$$.fragment,bc),bc.forEach(t),vc.forEach(t),Tr=c(xs),bo=i(xs,"SPAN",{});var Tc=r(bo);wr=s(Tc,"LukeForMaskedLM"),Tc.forEach(t),xs.forEach(t),rs=c(n),P=i(n,"DIV",{class:!0});var ne=r(P);k(bt.$$.fragment,ne),Lr=c(ne),To=i(ne,"P",{});var wc=r(To);Er=s(wc,`The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction.`),wc.forEach(t),zr=c(ne),Tt=i(ne,"P",{});var $s=r(Tt);xr=s($s,"This model inherits from "),yn=i($s,"A",{href:!0});var Lc=r(yn);$r=s(Lc,"PreTrainedModel"),Lc.forEach(t),qr=s($s,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$s.forEach(t),Cr=c(ne),wt=i(ne,"P",{});var qs=r(wt);Mr=s(qs,"This model is also a PyTorch "),Lt=i(qs,"A",{href:!0,rel:!0});var Ec=r(Lt);Fr=s(Ec,"torch.nn.Module"),Ec.forEach(t),Pr=s(qs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qs.forEach(t),jr=c(ne),Q=i(ne,"DIV",{class:!0});var Mn=r(Q);k(Et.$$.fragment,Mn),Ar=c(Mn),ke=i(Mn,"P",{});var Fn=r(ke);Ir=s(Fn,"The "),vn=i(Fn,"A",{href:!0});var zc=r(vn);Sr=s(zc,"LukeForMaskedLM"),zc.forEach(t),Nr=s(Fn," forward method, overrides the "),wo=i(Fn,"CODE",{});var xc=r(wo);Dr=s(xc,"__call__"),xc.forEach(t),Br=s(Fn," special method."),Fn.forEach(t),Wr=c(Mn),k(Se.$$.fragment,Mn),Mn.forEach(t),ne.forEach(t),ds=c(n),ye=i(n,"H2",{class:!0});var Cs=r(ye);Ne=i(Cs,"A",{id:!0,class:!0,href:!0});var $c=r(Ne);Lo=i($c,"SPAN",{});var qc=r(Lo);k(zt.$$.fragment,qc),qc.forEach(t),$c.forEach(t),Or=c(Cs),Eo=i(Cs,"SPAN",{});var Cc=r(Eo);Ur=s(Cc,"LukeForEntityClassification"),Cc.forEach(t),Cs.forEach(t),ls=c(n),j=i(n,"DIV",{class:!0});var oe=r(j);k(xt.$$.fragment,oe),Kr=c(oe),zo=i(oe,"P",{});var Mc=r(zo);Rr=s(Mc,`The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity.`),Mc.forEach(t),Hr=c(oe),$t=i(oe,"P",{});var Ms=r($t);Vr=s(Ms,"This model inherits from "),bn=i(Ms,"A",{href:!0});var Fc=r(bn);Yr=s(Fc,"PreTrainedModel"),Fc.forEach(t),Jr=s(Ms,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ms.forEach(t),Qr=c(oe),qt=i(oe,"P",{});var Fs=r(qt);Xr=s(Fs,"This model is also a PyTorch "),Ct=i(Fs,"A",{href:!0,rel:!0});var Pc=r(Ct);Gr=s(Pc,"torch.nn.Module"),Pc.forEach(t),Zr=s(Fs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fs.forEach(t),ed=c(oe),D=i(oe,"DIV",{class:!0});var se=r(D);k(Mt.$$.fragment,se),td=c(se),ve=i(se,"P",{});var Pn=r(ve);nd=s(Pn,"The "),Tn=i(Pn,"A",{href:!0});var jc=r(Tn);od=s(jc,"LukeForEntityClassification"),jc.forEach(t),sd=s(Pn," forward method, overrides the "),xo=i(Pn,"CODE",{});var Ac=r(xo);ad=s(Ac,"__call__"),Ac.forEach(t),id=s(Pn," special method."),Pn.forEach(t),rd=c(se),k(De.$$.fragment,se),dd=c(se),$o=i(se,"P",{});var Ic=r($o);ld=s(Ic,"Examples:"),Ic.forEach(t),cd=c(se),k(Ft.$$.fragment,se),se.forEach(t),oe.forEach(t),cs=c(n),be=i(n,"H2",{class:!0});var Ps=r(be);Be=i(Ps,"A",{id:!0,class:!0,href:!0});var Sc=r(Be);qo=i(Sc,"SPAN",{});var Nc=r(qo);k(Pt.$$.fragment,Nc),Nc.forEach(t),Sc.forEach(t),pd=c(Ps),Co=i(Ps,"SPAN",{});var Dc=r(Co);hd=s(Dc,"LukeForEntityPairClassification"),Dc.forEach(t),Ps.forEach(t),ps=c(n),A=i(n,"DIV",{class:!0});var ae=r(A);k(jt.$$.fragment,ae),ud=c(ae),Mo=i(ae,"P",{});var Bc=r(Mo);md=s(Bc,`The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED.`),Bc.forEach(t),fd=c(ae),At=i(ae,"P",{});var js=r(At);gd=s(js,"This model inherits from "),wn=i(js,"A",{href:!0});var Wc=r(wn);_d=s(Wc,"PreTrainedModel"),Wc.forEach(t),kd=s(js,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),js.forEach(t),yd=c(ae),It=i(ae,"P",{});var As=r(It);vd=s(As,"This model is also a PyTorch "),St=i(As,"A",{href:!0,rel:!0});var Oc=r(St);bd=s(Oc,"torch.nn.Module"),Oc.forEach(t),Td=s(As,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),As.forEach(t),wd=c(ae),B=i(ae,"DIV",{class:!0});var ie=r(B);k(Nt.$$.fragment,ie),Ld=c(ie),Te=i(ie,"P",{});var jn=r(Te);Ed=s(jn,"The "),Ln=i(jn,"A",{href:!0});var Uc=r(Ln);zd=s(Uc,"LukeForEntityPairClassification"),Uc.forEach(t),xd=s(jn," forward method, overrides the "),Fo=i(jn,"CODE",{});var Kc=r(Fo);$d=s(Kc,"__call__"),Kc.forEach(t),qd=s(jn," special method."),jn.forEach(t),Cd=c(ie),k(We.$$.fragment,ie),Md=c(ie),Po=i(ie,"P",{});var Rc=r(Po);Fd=s(Rc,"Examples:"),Rc.forEach(t),Pd=c(ie),k(Dt.$$.fragment,ie),ie.forEach(t),ae.forEach(t),hs=c(n),we=i(n,"H2",{class:!0});var Is=r(we);Oe=i(Is,"A",{id:!0,class:!0,href:!0});var Hc=r(Oe);jo=i(Hc,"SPAN",{});var Vc=r(jo);k(Bt.$$.fragment,Vc),Vc.forEach(t),Hc.forEach(t),jd=c(Is),Ao=i(Is,"SPAN",{});var Yc=r(Ao);Ad=s(Yc,"LukeForEntitySpanClassification"),Yc.forEach(t),Is.forEach(t),us=c(n),I=i(n,"DIV",{class:!0});var re=r(I);k(Wt.$$.fragment,re),Id=c(re),Io=i(re,"P",{});var Jc=r(Io);Sd=s(Jc,`The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition.`),Jc.forEach(t),Nd=c(re),Ot=i(re,"P",{});var Ss=r(Ot);Dd=s(Ss,"This model inherits from "),En=i(Ss,"A",{href:!0});var Qc=r(En);Bd=s(Qc,"PreTrainedModel"),Qc.forEach(t),Wd=s(Ss,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ss.forEach(t),Od=c(re),Ut=i(re,"P",{});var Ns=r(Ut);Ud=s(Ns,"This model is also a PyTorch "),Kt=i(Ns,"A",{href:!0,rel:!0});var Xc=r(Kt);Kd=s(Xc,"torch.nn.Module"),Xc.forEach(t),Rd=s(Ns,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ns.forEach(t),Hd=c(re),W=i(re,"DIV",{class:!0});var de=r(W);k(Rt.$$.fragment,de),Vd=c(de),Le=i(de,"P",{});var An=r(Le);Yd=s(An,"The "),zn=i(An,"A",{href:!0});var Gc=r(zn);Jd=s(Gc,"LukeForEntitySpanClassification"),Gc.forEach(t),Qd=s(An," forward method, overrides the "),So=i(An,"CODE",{});var Zc=r(So);Xd=s(Zc,"__call__"),Zc.forEach(t),Gd=s(An," special method."),An.forEach(t),Zd=c(de),k(Ue.$$.fragment,de),el=c(de),No=i(de,"P",{});var ep=r(No);tl=s(ep,"Examples:"),ep.forEach(t),nl=c(de),k(Ht.$$.fragment,de),de.forEach(t),re.forEach(t),this.h()},h(){d(h,"name","hf:doc:metadata"),d(h,"content",JSON.stringify(pp)),d(w,"id","luke"),d(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(w,"href","#luke"),d(m,"class","relative group"),d(xe,"id","overview"),d(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(xe,"href","#overview"),d(le,"class","relative group"),d(Ve,"href","https://arxiv.org/abs/2010.01057"),d(Ve,"rel","nofollow"),d(Gt,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel"),d(Zt,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer"),d(en,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer"),d(on,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntityClassification"),d(Xe,"href","https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html"),d(Xe,"rel","nofollow"),d(sn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntityPairClassification"),d(Ge,"href","https://nlp.stanford.edu/projects/tacred/"),d(Ge,"rel","nofollow"),d(rn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntitySpanClassification"),d(dn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer"),d(ln,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntityPairClassification"),d(Ze,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LUKE"),d(Ze,"rel","nofollow"),d(tt,"href","https://github.com/studio-ousia/luke/tree/master/notebooks"),d(tt,"rel","nofollow"),d(ot,"href","https://huggingface.co/ikuyamada"),d(ot,"rel","nofollow"),d(st,"href","https://huggingface.co/nielsr"),d(st,"rel","nofollow"),d(at,"href","https://github.com/studio-ousia/luke"),d(at,"rel","nofollow"),d(Me,"id","transformers.LukeConfig"),d(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Me,"href","#transformers.LukeConfig"),d(he,"class","relative group"),d(pn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel"),d(hn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(un,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(C,"class","docstring"),d(Fe,"id","transformers.LukeTokenizer"),d(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Fe,"href","#transformers.LukeTokenizer"),d(me,"class","relative group"),d(mn,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),d(fn,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),d(gn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer"),d(Pe,"class","docstring"),d(mo,"class","docstring"),d(M,"class","docstring"),d(je,"id","transformers.LukeModel"),d(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(je,"href","#transformers.LukeModel"),d(fe,"class","relative group"),d(_n,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(_t,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(_t,"rel","nofollow"),d(kn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeModel"),d(N,"class","docstring"),d(F,"class","docstring"),d(Ie,"id","transformers.LukeForMaskedLM"),d(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ie,"href","#transformers.LukeForMaskedLM"),d(_e,"class","relative group"),d(yn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(Lt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Lt,"rel","nofollow"),d(vn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForMaskedLM"),d(Q,"class","docstring"),d(P,"class","docstring"),d(Ne,"id","transformers.LukeForEntityClassification"),d(Ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ne,"href","#transformers.LukeForEntityClassification"),d(ye,"class","relative group"),d(bn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(Ct,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Ct,"rel","nofollow"),d(Tn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntityClassification"),d(D,"class","docstring"),d(j,"class","docstring"),d(Be,"id","transformers.LukeForEntityPairClassification"),d(Be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Be,"href","#transformers.LukeForEntityPairClassification"),d(be,"class","relative group"),d(wn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(St,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(St,"rel","nofollow"),d(Ln,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntityPairClassification"),d(B,"class","docstring"),d(A,"class","docstring"),d(Oe,"id","transformers.LukeForEntitySpanClassification"),d(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Oe,"href","#transformers.LukeForEntitySpanClassification"),d(we,"class","relative group"),d(En,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(Kt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Kt,"rel","nofollow"),d(zn,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeForEntitySpanClassification"),d(W,"class","docstring"),d(I,"class","docstring")},m(n,p){e(document.head,h),u(n,L,p),u(n,m,p),e(m,w),e(w,E),y(g,E,null),e(m,f),e(m,z),e(z,Ds),u(n,Ro,p),u(n,le,p),e(le,xe),e(xe,Sn),y(He,Sn,null),e(le,Bs),e(le,Nn),e(Nn,Ws),u(n,Ho,p),u(n,$e,p),e($e,Os),e($e,Ve),e(Ve,Us),e($e,Ks),u(n,Vo,p),u(n,Jt,p),e(Jt,Rs),u(n,Yo,p),u(n,Qt,p),e(Qt,Dn),e(Dn,Hs),u(n,Jo,p),u(n,Xt,p),e(Xt,Vs),u(n,Qo,p),u(n,R,p),e(R,Bn),e(Bn,Ye),e(Ye,Ys),e(Ye,Gt),e(Gt,Js),e(Ye,Qs),e(R,Xs),e(R,Wn),e(Wn,q),e(q,Gs),e(q,On),e(On,Zs),e(q,ea),e(q,Un),e(Un,ta),e(q,na),e(q,Kn),e(Kn,oa),e(q,sa),e(q,Rn),e(Rn,aa),e(q,ia),e(q,Zt),e(Zt,ra),e(q,da),e(R,la),e(R,Je),e(Je,H),e(H,en),e(en,ca),e(H,pa),e(H,Hn),e(Hn,ha),e(H,ua),e(H,Vn),e(Vn,ma),e(H,fa),e(H,Yn),e(Yn,ga),e(H,_a),e(Je,ka),e(Je,Qe),e(Qe,tn),e(tn,Jn),e(Jn,ya),e(tn,va),e(Qe,ba),e(Qe,nn),e(nn,Qn),e(Qn,Ta),e(nn,wa),e(R,La),e(R,U),e(U,Xn),e(Xn,Ea),e(U,za),e(U,ce),e(ce,qe),e(qe,on),e(on,xa),e(qe,$a),e(qe,Xe),e(Xe,qa),e(qe,Ca),e(ce,Ma),e(ce,Ce),e(Ce,sn),e(sn,Fa),e(Ce,Pa),e(Ce,Ge),e(Ge,ja),e(Ce,Aa),e(ce,Ia),e(ce,an),e(an,rn),e(rn,Sa),e(an,Na),e(U,Da),e(U,S),e(S,dn),e(dn,Ba),e(S,Wa),e(S,Gn),e(Gn,Oa),e(S,Ua),e(S,Zn),e(Zn,Ka),e(S,Ra),e(S,eo),e(eo,Ha),e(S,Va),e(S,to),e(to,Ya),e(S,Ja),e(U,Qa),e(U,pe),e(pe,Xa),e(pe,ln),e(ln,Ga),e(pe,Za),e(pe,Ze),e(Ze,ei),e(pe,ti),e(U,ni),e(U,et),e(et,oi),e(et,tt),e(tt,si),e(et,ai),u(n,Xo,p),u(n,cn,p),e(cn,ii),u(n,Go,p),y(nt,n,p),u(n,Zo,p),u(n,V,p),e(V,ri),e(V,ot),e(ot,di),e(V,li),e(V,st),e(st,ci),e(V,pi),e(V,at),e(at,hi),e(V,ui),u(n,es,p),u(n,he,p),e(he,Me),e(Me,no),y(it,no,null),e(he,mi),e(he,oo),e(oo,fi),u(n,ts,p),u(n,C,p),y(rt,C,null),e(C,gi),e(C,dt),e(dt,_i),e(dt,pn),e(pn,ki),e(dt,yi),e(C,vi),e(C,ue),e(ue,bi),e(ue,hn),e(hn,Ti),e(ue,wi),e(ue,un),e(un,Li),e(ue,Ei),e(C,zi),e(C,so),e(so,xi),e(C,$i),y(lt,C,null),u(n,ns,p),u(n,me,p),e(me,Fe),e(Fe,ao),y(ct,ao,null),e(me,qi),e(me,io),e(io,Ci),u(n,os,p),u(n,M,p),y(pt,M,null),e(M,Mi),e(M,ro),e(ro,Fi),e(M,Pi),e(M,x),e(x,ji),e(x,mn),e(mn,Ai),e(x,Ii),e(x,fn),e(fn,Si),e(x,Ni),e(x,gn),e(gn,Di),e(x,Bi),e(x,lo),e(lo,Wi),e(x,Oi),e(x,co),e(co,Ui),e(x,Ki),e(x,po),e(po,Ri),e(x,Hi),e(x,ho),e(ho,Vi),e(x,Yi),e(M,Ji),e(M,Pe),y(ht,Pe,null),e(Pe,Qi),e(Pe,uo),e(uo,Xi),e(M,Gi),e(M,mo),u(n,ss,p),u(n,fe,p),e(fe,je),e(je,fo),y(ut,fo,null),e(fe,Zi),e(fe,go),e(go,er),u(n,as,p),u(n,F,p),y(mt,F,null),e(F,tr),e(F,_o),e(_o,nr),e(F,or),e(F,ft),e(ft,sr),e(ft,_n),e(_n,ar),e(ft,ir),e(F,rr),e(F,gt),e(gt,dr),e(gt,_t),e(_t,lr),e(gt,cr),e(F,pr),e(F,N),y(kt,N,null),e(N,hr),e(N,ge),e(ge,ur),e(ge,kn),e(kn,mr),e(ge,fr),e(ge,ko),e(ko,gr),e(ge,_r),e(N,kr),y(Ae,N,null),e(N,yr),e(N,yo),e(yo,vr),e(N,br),y(yt,N,null),u(n,is,p),u(n,_e,p),e(_e,Ie),e(Ie,vo),y(vt,vo,null),e(_e,Tr),e(_e,bo),e(bo,wr),u(n,rs,p),u(n,P,p),y(bt,P,null),e(P,Lr),e(P,To),e(To,Er),e(P,zr),e(P,Tt),e(Tt,xr),e(Tt,yn),e(yn,$r),e(Tt,qr),e(P,Cr),e(P,wt),e(wt,Mr),e(wt,Lt),e(Lt,Fr),e(wt,Pr),e(P,jr),e(P,Q),y(Et,Q,null),e(Q,Ar),e(Q,ke),e(ke,Ir),e(ke,vn),e(vn,Sr),e(ke,Nr),e(ke,wo),e(wo,Dr),e(ke,Br),e(Q,Wr),y(Se,Q,null),u(n,ds,p),u(n,ye,p),e(ye,Ne),e(Ne,Lo),y(zt,Lo,null),e(ye,Or),e(ye,Eo),e(Eo,Ur),u(n,ls,p),u(n,j,p),y(xt,j,null),e(j,Kr),e(j,zo),e(zo,Rr),e(j,Hr),e(j,$t),e($t,Vr),e($t,bn),e(bn,Yr),e($t,Jr),e(j,Qr),e(j,qt),e(qt,Xr),e(qt,Ct),e(Ct,Gr),e(qt,Zr),e(j,ed),e(j,D),y(Mt,D,null),e(D,td),e(D,ve),e(ve,nd),e(ve,Tn),e(Tn,od),e(ve,sd),e(ve,xo),e(xo,ad),e(ve,id),e(D,rd),y(De,D,null),e(D,dd),e(D,$o),e($o,ld),e(D,cd),y(Ft,D,null),u(n,cs,p),u(n,be,p),e(be,Be),e(Be,qo),y(Pt,qo,null),e(be,pd),e(be,Co),e(Co,hd),u(n,ps,p),u(n,A,p),y(jt,A,null),e(A,ud),e(A,Mo),e(Mo,md),e(A,fd),e(A,At),e(At,gd),e(At,wn),e(wn,_d),e(At,kd),e(A,yd),e(A,It),e(It,vd),e(It,St),e(St,bd),e(It,Td),e(A,wd),e(A,B),y(Nt,B,null),e(B,Ld),e(B,Te),e(Te,Ed),e(Te,Ln),e(Ln,zd),e(Te,xd),e(Te,Fo),e(Fo,$d),e(Te,qd),e(B,Cd),y(We,B,null),e(B,Md),e(B,Po),e(Po,Fd),e(B,Pd),y(Dt,B,null),u(n,hs,p),u(n,we,p),e(we,Oe),e(Oe,jo),y(Bt,jo,null),e(we,jd),e(we,Ao),e(Ao,Ad),u(n,us,p),u(n,I,p),y(Wt,I,null),e(I,Id),e(I,Io),e(Io,Sd),e(I,Nd),e(I,Ot),e(Ot,Dd),e(Ot,En),e(En,Bd),e(Ot,Wd),e(I,Od),e(I,Ut),e(Ut,Ud),e(Ut,Kt),e(Kt,Kd),e(Ut,Rd),e(I,Hd),e(I,W),y(Rt,W,null),e(W,Vd),e(W,Le),e(Le,Yd),e(Le,zn),e(zn,Jd),e(Le,Qd),e(Le,So),e(So,Xd),e(Le,Gd),e(W,Zd),y(Ue,W,null),e(W,el),e(W,No),e(No,tl),e(W,nl),y(Ht,W,null),ms=!0},p(n,[p]){const Vt={};p&2&&(Vt.$$scope={dirty:p,ctx:n}),Ae.$set(Vt);const Do={};p&2&&(Do.$$scope={dirty:p,ctx:n}),Se.$set(Do);const Bo={};p&2&&(Bo.$$scope={dirty:p,ctx:n}),De.$set(Bo);const Wo={};p&2&&(Wo.$$scope={dirty:p,ctx:n}),We.$set(Wo);const Yt={};p&2&&(Yt.$$scope={dirty:p,ctx:n}),Ue.$set(Yt)},i(n){ms||(v(g.$$.fragment,n),v(He.$$.fragment,n),v(nt.$$.fragment,n),v(it.$$.fragment,n),v(rt.$$.fragment,n),v(lt.$$.fragment,n),v(ct.$$.fragment,n),v(pt.$$.fragment,n),v(ht.$$.fragment,n),v(ut.$$.fragment,n),v(mt.$$.fragment,n),v(kt.$$.fragment,n),v(Ae.$$.fragment,n),v(yt.$$.fragment,n),v(vt.$$.fragment,n),v(bt.$$.fragment,n),v(Et.$$.fragment,n),v(Se.$$.fragment,n),v(zt.$$.fragment,n),v(xt.$$.fragment,n),v(Mt.$$.fragment,n),v(De.$$.fragment,n),v(Ft.$$.fragment,n),v(Pt.$$.fragment,n),v(jt.$$.fragment,n),v(Nt.$$.fragment,n),v(We.$$.fragment,n),v(Dt.$$.fragment,n),v(Bt.$$.fragment,n),v(Wt.$$.fragment,n),v(Rt.$$.fragment,n),v(Ue.$$.fragment,n),v(Ht.$$.fragment,n),ms=!0)},o(n){b(g.$$.fragment,n),b(He.$$.fragment,n),b(nt.$$.fragment,n),b(it.$$.fragment,n),b(rt.$$.fragment,n),b(lt.$$.fragment,n),b(ct.$$.fragment,n),b(pt.$$.fragment,n),b(ht.$$.fragment,n),b(ut.$$.fragment,n),b(mt.$$.fragment,n),b(kt.$$.fragment,n),b(Ae.$$.fragment,n),b(yt.$$.fragment,n),b(vt.$$.fragment,n),b(bt.$$.fragment,n),b(Et.$$.fragment,n),b(Se.$$.fragment,n),b(zt.$$.fragment,n),b(xt.$$.fragment,n),b(Mt.$$.fragment,n),b(De.$$.fragment,n),b(Ft.$$.fragment,n),b(Pt.$$.fragment,n),b(jt.$$.fragment,n),b(Nt.$$.fragment,n),b(We.$$.fragment,n),b(Dt.$$.fragment,n),b(Bt.$$.fragment,n),b(Wt.$$.fragment,n),b(Rt.$$.fragment,n),b(Ue.$$.fragment,n),b(Ht.$$.fragment,n),ms=!1},d(n){t(h),n&&t(L),n&&t(m),T(g),n&&t(Ro),n&&t(le),T(He),n&&t(Ho),n&&t($e),n&&t(Vo),n&&t(Jt),n&&t(Yo),n&&t(Qt),n&&t(Jo),n&&t(Xt),n&&t(Qo),n&&t(R),n&&t(Xo),n&&t(cn),n&&t(Go),T(nt,n),n&&t(Zo),n&&t(V),n&&t(es),n&&t(he),T(it),n&&t(ts),n&&t(C),T(rt),T(lt),n&&t(ns),n&&t(me),T(ct),n&&t(os),n&&t(M),T(pt),T(ht),n&&t(ss),n&&t(fe),T(ut),n&&t(as),n&&t(F),T(mt),T(kt),T(Ae),T(yt),n&&t(is),n&&t(_e),T(vt),n&&t(rs),n&&t(P),T(bt),T(Et),T(Se),n&&t(ds),n&&t(ye),T(zt),n&&t(ls),n&&t(j),T(xt),T(Mt),T(De),T(Ft),n&&t(cs),n&&t(be),T(Pt),n&&t(ps),n&&t(A),T(jt),T(Nt),T(We),T(Dt),n&&t(hs),n&&t(we),T(Bt),n&&t(us),n&&t(I),T(Wt),T(Rt),T(Ue),T(Ht)}}}const pp={local:"luke",sections:[{local:"overview",title:"Overview"},{local:"transformers.LukeConfig",title:"LukeConfig"},{local:"transformers.LukeTokenizer",title:"LukeTokenizer"},{local:"transformers.LukeModel",title:"LukeModel"},{local:"transformers.LukeForMaskedLM",title:"LukeForMaskedLM"},{local:"transformers.LukeForEntityClassification",title:"LukeForEntityClassification"},{local:"transformers.LukeForEntityPairClassification",title:"LukeForEntityPairClassification"},{local:"transformers.LukeForEntitySpanClassification",title:"LukeForEntitySpanClassification"}],title:"LUKE"};function hp(O,h,L){let{fw:m}=h;return O.$$set=w=>{"fw"in w&&L(0,m=w.fw)},[m]}class yp extends tp{constructor(h){super();np(this,h,hp,cp,op,{fw:0})}}export{yp as default,pp as metadata};
9,918
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/blenderbot.mdx-e8805f53.js
import{S as Um,i as Rm,s as Hm,e as n,k as i,w as _,t as s,L as Km,c as r,d as t,m as l,a,x as g,h as d,b as c,J as e,g as p,y as b,q as v,o as k,B as y}from"../../chunks/vendor-b1433968.js";import{T as po}from"../../chunks/Tip-c3840994.js";import{D as A}from"../../chunks/Docstring-ff504c58.js";import{C as pt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as he}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Vm(D){let u,z,m,T,B;return{c(){u=n("p"),z=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=s("Module"),B=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=r(x,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(w,"CODE",{});var F=a(m);T=d(F,"Module"),F.forEach(t),B=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,z),e(u,m),e(m,T),e(u,B)},d(x){x&&t(u)}}}function Qm(D){let u,z,m,T,B;return{c(){u=n("p"),z=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=s("Module"),B=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=r(x,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(w,"CODE",{});var F=a(m);T=d(F,"Module"),F.forEach(t),B=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,z),e(u,m),e(m,T),e(u,B)},d(x){x&&t(u)}}}function Jm(D){let u,z,m,T,B,x,w,F,Ce,ue,E,ke,oe,Pe,ne,re,je,Fe,G,j,ye,R,M,C,Ne,Q,J,Oe,W,Ae,Ie,N,fe,ae,$e,me,H,Se,Ee,q,Le,U,De;return{c(){u=n("p"),z=s("TF 2.0 models accepts two formats as inputs:"),m=i(),T=n("ul"),B=n("li"),x=s("having all inputs as keyword arguments (like PyTorch models), or"),w=i(),F=n("li"),Ce=s("having all inputs as a list, tuple or dict in the first positional arguments."),ue=i(),E=n("p"),ke=s("This second option is useful when using "),oe=n("code"),Pe=s("tf.keras.Model.fit"),ne=s(` method which currently requires having all the tensors in the first argument of the model call function: `),re=n("code"),je=s("model(inputs)"),Fe=s("."),G=i(),j=n("p"),ye=s(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),R=i(),M=n("ul"),C=n("li"),Ne=s("a single Tensor with "),Q=n("code"),J=s("input_ids"),Oe=s(" only and nothing else: "),W=n("code"),Ae=s("model(input_ids)"),Ie=i(),N=n("li"),fe=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=n("code"),$e=s("model([input_ids, attention_mask])"),me=s(" or "),H=n("code"),Se=s("model([input_ids, attention_mask, token_type_ids])"),Ee=i(),q=n("li"),Le=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=n("code"),De=s('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(f){u=r(f,"P",{});var $=a(u);z=d($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=l(f),T=r(f,"UL",{});var Te=a(T);B=r(Te,"LI",{});var ht=a(B);x=d(ht,"having all inputs as keyword arguments (like PyTorch models), or"),ht.forEach(t),w=l(Te),F=r(Te,"LI",{});var Ve=a(F);Ce=d(Ve,"having all inputs as a list, tuple or dict in the first positional arguments."),Ve.forEach(t),Te.forEach(t),ue=l(f),E=r(f,"P",{});var P=a(E);ke=d(P,"This second option is useful when using "),oe=r(P,"CODE",{});var _e=a(oe);Pe=d(_e,"tf.keras.Model.fit"),_e.forEach(t),ne=d(P,` method which currently requires having all the tensors in the first argument of the model call function: `),re=r(P,"CODE",{});var Ge=a(re);je=d(Ge,"model(inputs)"),Ge.forEach(t),Fe=d(P,"."),P.forEach(t),G=l(f),j=r(f,"P",{});var we=a(j);ye=d(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),R=l(f),M=r(f,"UL",{});var K=a(M);C=r(K,"LI",{});var V=a(C);Ne=d(V,"a single Tensor with "),Q=r(V,"CODE",{});var ut=a(Q);J=d(ut,"input_ids"),ut.forEach(t),Oe=d(V," only and nothing else: "),W=r(V,"CODE",{});var Qe=a(W);Ae=d(Qe,"model(input_ids)"),Qe.forEach(t),V.forEach(t),Ie=l(K),N=r(K,"LI",{});var O=a(N);fe=d(O,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=r(O,"CODE",{});var xe=a(ae);$e=d(xe,"model([input_ids, attention_mask])"),xe.forEach(t),me=d(O," or "),H=r(O,"CODE",{});var ft=a(H);Se=d(ft,"model([input_ids, attention_mask, token_type_ids])"),ft.forEach(t),O.forEach(t),Ee=l(K),q=r(K,"LI",{});var se=a(q);Le=d(se,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(se,"CODE",{});var mt=a(U);De=d(mt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),mt.forEach(t),se.forEach(t),K.forEach(t)},m(f,$){p(f,u,$),e(u,z),p(f,m,$),p(f,T,$),e(T,B),e(B,x),e(T,w),e(T,F),e(F,Ce),p(f,ue,$),p(f,E,$),e(E,ke),e(E,oe),e(oe,Pe),e(E,ne),e(E,re),e(re,je),e(E,Fe),p(f,G,$),p(f,j,$),e(j,ye),p(f,R,$),p(f,M,$),e(M,C),e(C,Ne),e(C,Q),e(Q,J),e(C,Oe),e(C,W),e(W,Ae),e(M,Ie),e(M,N),e(N,fe),e(N,ae),e(ae,$e),e(N,me),e(N,H),e(H,Se),e(M,Ee),e(M,q),e(q,Le),e(q,U),e(U,De)},d(f){f&&t(u),f&&t(m),f&&t(T),f&&t(ue),f&&t(E),f&&t(G),f&&t(j),f&&t(R),f&&t(M)}}}function Xm(D){let u,z,m,T,B;return{c(){u=n("p"),z=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=s("Module"),B=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=r(x,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(w,"CODE",{});var F=a(m);T=d(F,"Module"),F.forEach(t),B=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,z),e(u,m),e(m,T),e(u,B)},d(x){x&&t(u)}}}function Ym(D){let u,z,m,T,B,x,w,F,Ce,ue,E,ke,oe,Pe,ne,re,je,Fe,G,j,ye,R,M,C,Ne,Q,J,Oe,W,Ae,Ie,N,fe,ae,$e,me,H,Se,Ee,q,Le,U,De;return{c(){u=n("p"),z=s("TF 2.0 models accepts two formats as inputs:"),m=i(),T=n("ul"),B=n("li"),x=s("having all inputs as keyword arguments (like PyTorch models), or"),w=i(),F=n("li"),Ce=s("having all inputs as a list, tuple or dict in the first positional arguments."),ue=i(),E=n("p"),ke=s("This second option is useful when using "),oe=n("code"),Pe=s("tf.keras.Model.fit"),ne=s(` method which currently requires having all the tensors in the first argument of the model call function: `),re=n("code"),je=s("model(inputs)"),Fe=s("."),G=i(),j=n("p"),ye=s(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),R=i(),M=n("ul"),C=n("li"),Ne=s("a single Tensor with "),Q=n("code"),J=s("input_ids"),Oe=s(" only and nothing else: "),W=n("code"),Ae=s("model(input_ids)"),Ie=i(),N=n("li"),fe=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=n("code"),$e=s("model([input_ids, attention_mask])"),me=s(" or "),H=n("code"),Se=s("model([input_ids, attention_mask, token_type_ids])"),Ee=i(),q=n("li"),Le=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=n("code"),De=s('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(f){u=r(f,"P",{});var $=a(u);z=d($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=l(f),T=r(f,"UL",{});var Te=a(T);B=r(Te,"LI",{});var ht=a(B);x=d(ht,"having all inputs as keyword arguments (like PyTorch models), or"),ht.forEach(t),w=l(Te),F=r(Te,"LI",{});var Ve=a(F);Ce=d(Ve,"having all inputs as a list, tuple or dict in the first positional arguments."),Ve.forEach(t),Te.forEach(t),ue=l(f),E=r(f,"P",{});var P=a(E);ke=d(P,"This second option is useful when using "),oe=r(P,"CODE",{});var _e=a(oe);Pe=d(_e,"tf.keras.Model.fit"),_e.forEach(t),ne=d(P,` method which currently requires having all the tensors in the first argument of the model call function: `),re=r(P,"CODE",{});var Ge=a(re);je=d(Ge,"model(inputs)"),Ge.forEach(t),Fe=d(P,"."),P.forEach(t),G=l(f),j=r(f,"P",{});var we=a(j);ye=d(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),R=l(f),M=r(f,"UL",{});var K=a(M);C=r(K,"LI",{});var V=a(C);Ne=d(V,"a single Tensor with "),Q=r(V,"CODE",{});var ut=a(Q);J=d(ut,"input_ids"),ut.forEach(t),Oe=d(V," only and nothing else: "),W=r(V,"CODE",{});var Qe=a(W);Ae=d(Qe,"model(input_ids)"),Qe.forEach(t),V.forEach(t),Ie=l(K),N=r(K,"LI",{});var O=a(N);fe=d(O,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=r(O,"CODE",{});var xe=a(ae);$e=d(xe,"model([input_ids, attention_mask])"),xe.forEach(t),me=d(O," or "),H=r(O,"CODE",{});var ft=a(H);Se=d(ft,"model([input_ids, attention_mask, token_type_ids])"),ft.forEach(t),O.forEach(t),Ee=l(K),q=r(K,"LI",{});var se=a(q);Le=d(se,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(se,"CODE",{});var mt=a(U);De=d(mt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),mt.forEach(t),se.forEach(t),K.forEach(t)},m(f,$){p(f,u,$),e(u,z),p(f,m,$),p(f,T,$),e(T,B),e(B,x),e(T,w),e(T,F),e(F,Ce),p(f,ue,$),p(f,E,$),e(E,ke),e(E,oe),e(oe,Pe),e(E,ne),e(E,re),e(re,je),e(E,Fe),p(f,G,$),p(f,j,$),e(j,ye),p(f,R,$),p(f,M,$),e(M,C),e(C,Ne),e(C,Q),e(Q,J),e(C,Oe),e(C,W),e(W,Ae),e(M,Ie),e(M,N),e(N,fe),e(N,ae),e(ae,$e),e(N,me),e(N,H),e(H,Se),e(M,Ee),e(M,q),e(q,Le),e(q,U),e(U,De)},d(f){f&&t(u),f&&t(m),f&&t(T),f&&t(ue),f&&t(E),f&&t(G),f&&t(j),f&&t(R),f&&t(M)}}}function Zm(D){let u,z,m,T,B;return{c(){u=n("p"),z=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=s("Module"),B=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=r(x,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(w,"CODE",{});var F=a(m);T=d(F,"Module"),F.forEach(t),B=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,z),e(u,m),e(m,T),e(u,B)},d(x){x&&t(u)}}}function e_(D){let u,z,m,T,B;return{c(){u=n("p"),z=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=s("Module"),B=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=r(x,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(w,"CODE",{});var F=a(m);T=d(F,"Module"),F.forEach(t),B=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,z),e(u,m),e(m,T),e(u,B)},d(x){x&&t(u)}}}function t_(D){let u,z,m,T,B;return{c(){u=n("p"),z=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=s("Module"),B=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=r(x,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(w,"CODE",{});var F=a(m);T=d(F,"Module"),F.forEach(t),B=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,z),e(u,m),e(m,T),e(u,B)},d(x){x&&t(u)}}}function o_(D){let u,z,m,T,B,x,w,F,Ce,ue,E,ke,oe,Pe,ne,re,je,Fe,G,j,ye,R,M,C,Ne,Q,J,Oe,W,Ae,Ie,N,fe,ae,$e,me,H,Se,Ee,q,Le,U,De,f,$,Te,ht,Ve,P,_e,Ge,we,K,V,ut,Qe,O,xe,ft,se,mt,Nd,Od,ho,Ad,uo,Id,Sd,Ld,We,Dd,$r,Gd,Wd,Er,Ud,Rd,Wn,Hd,Kd,Es,_t,Nt,Mr,fo,Vd,qr,Qd,Ms,Un,Jd,qs,mo,Cs,gt,Ot,Cr,_o,Xd,Pr,Yd,Ps,de,go,Zd,bt,ei,Rn,ti,oi,bo,ni,ri,ai,vt,si,Hn,di,ii,Kn,li,ci,pi,jr,hi,ui,vo,js,kt,At,Nr,ko,fi,Or,mi,Ns,ie,yo,_i,Ar,gi,bi,It,Ir,vi,ki,Vn,yi,Ti,wi,To,xi,Qn,Bi,zi,Fi,Je,wo,$i,Sr,Ei,Mi,Lr,Jn,qi,Dr,Ci,Os,yt,St,Gr,xo,Pi,Wr,ji,As,le,Bo,Ni,zo,Oi,Ur,Ai,Ii,Si,Lt,Rr,Li,Di,Xn,Gi,Wi,Ui,Fo,Ri,Yn,Hi,Ki,Vi,Xe,$o,Qi,Hr,Ji,Xi,Kr,Zn,Yi,Vr,Zi,Is,Tt,Dt,Qr,Eo,el,Jr,tl,Ss,Ue,ol,Xr,nl,rl,Yr,al,sl,Zr,dl,Ls,Be,Mo,il,qo,ll,er,cl,pl,hl,Co,ul,Po,fl,ml,_l,ge,jo,gl,wt,bl,tr,vl,kl,ea,yl,Tl,wl,Gt,xl,ta,Bl,zl,No,Ds,xt,Wt,oa,Oo,Fl,na,$l,Gs,Re,El,or,Ml,ql,ra,Cl,Pl,aa,jl,Ws,ze,Ao,Nl,Io,Ol,nr,Al,Il,Sl,So,Ll,Lo,Dl,Gl,Wl,X,Do,Ul,Bt,Rl,rr,Hl,Kl,sa,Vl,Ql,Jl,Ut,Xl,da,Yl,Zl,ia,la,ca,pa,ec,tc,ha,ua,fa,Go,oc,ma,nc,rc,Us,zt,Rt,_a,Wo,ac,ga,sc,Rs,Uo,Ye,Ro,dc,ba,ic,lc,Ho,Hs,Ft,Ht,va,Ko,cc,ka,pc,Ks,ce,Vo,hc,Qo,uc,ar,fc,mc,_c,Jo,gc,Xo,bc,vc,kc,Kt,yc,be,Yo,Tc,$t,wc,sr,xc,Bc,ya,zc,Fc,$c,Vt,Ec,Ta,Mc,qc,Zo,Vs,Et,Qt,wa,en,Cc,xa,Pc,Qs,pe,tn,jc,on,Nc,dr,Oc,Ac,Ic,nn,Sc,rn,Lc,Dc,Gc,Jt,Wc,Y,an,Uc,Mt,Rc,ir,Hc,Kc,Ba,Vc,Qc,Jc,Xt,Xc,za,Yc,Zc,Fa,$a,Ea,Ma,ep,tp,qa,Ca,Pa,sn,op,ja,np,rp,Js,qt,Yt,Na,dn,ap,Oa,sp,Xs,I,ln,dp,cn,ip,lr,lp,cp,pp,pn,hp,hn,up,fp,mp,Aa,_p,gp,He,Ia,un,bp,vp,Sa,fn,kp,yp,La,mn,Tp,wp,Da,_n,xp,Bp,ve,gn,zp,Ct,Fp,Ga,$p,Ep,Wa,Mp,qp,Cp,Zt,Pp,Ua,jp,Np,bn,Op,Ze,vn,Ap,Ra,Ip,Sp,kn,Lp,et,yn,Dp,Ha,Gp,Wp,Tn,Ys,Pt,eo,Ka,wn,Up,Va,Rp,Zs,S,xn,Hp,Bn,Kp,cr,Vp,Qp,Jp,zn,Xp,Fn,Yp,Zp,eh,Qa,th,oh,Ke,Ja,$n,nh,rh,Xa,En,ah,sh,Ya,Mn,dh,ih,Za,qn,lh,ch,L,Cn,ph,jt,hh,es,uh,fh,ts,mh,_h,gh,to,bh,os,vh,kh,ns,rs,as,ss,yh,Th,ds,is,ls,cs,wh,xh,ps,hs,us,fs,Bh,zh,ms,_s,Pn,oo,no,gs,jn,Fh,bs,$h,Eh,vs,Mh,qh,tt,Nn,Ch,ks,Ph,jh,On,Nh,ot,An,Oh,ys,Ah,Ih,In,ed;return x=new he({}),R=new he({}),we=new he({}),fo=new he({}),mo=new pt({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration mname = 'facebook/blenderbot-400M-distill' model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = "My friends are cool but they eat too many carbs." inputs = tokenizer([UTTERANCE], return_tensors='pt') reply_ids = model.generate(**inputs) print(tokenizer.batch_decode(reply_ids)),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>UTTERANCE = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([UTTERANCE], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>reply_ids = model.generate(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.batch_decode(reply_ids)) [<span class="hljs-string">&quot;&lt;s&gt; That&#x27;s unfortunate. Are they trying to lose weight or are they just trying to be healthier?&lt;/s&gt;&quot;</span>]`}}),_o=new he({}),go=new A({props:{name:"class transformers.BlenderbotConfig",anchor:"transformers.BlenderbotConfig",parameters:[{name:"vocab_size",val:" = 8008"},{name:"max_position_embeddings",val:" = 128"},{name:"encoder_layers",val:" = 2"},{name:"encoder_ffn_dim",val:" = 10240"},{name:"encoder_attention_heads",val:" = 32"},{name:"decoder_layers",val:" = 24"},{name:"decoder_ffn_dim",val:" = 10240"},{name:"decoder_attention_heads",val:" = 32"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 2560"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 1"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"encoder_no_repeat_ngram_size",val:" = 3"},{name:"forced_eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/configuration_blenderbot.py#L29",parametersDescription:[{anchor:"transformers.BlenderbotConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotModel">BlenderbotModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotModel">TFBlenderbotModel</a>.`,name:"vocab_size"},{anchor:"transformers.BlenderbotConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.BlenderbotConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.BlenderbotConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.BlenderbotConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.BlenderbotConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.BlenderbotConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.BlenderbotConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.BlenderbotConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.BlenderbotConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.BlenderbotConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.BlenderbotConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.BlenderbotConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.BlenderbotConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.BlenderbotConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.BlenderbotConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.BlenderbotConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.BlenderbotConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),vo=new pt({props:{code:`from transformers import BlenderbotModel, BlenderbotConfig # Initializing a Blenderbot facebook/blenderbot-3B style configuration configuration = BlenderbotConfig() # Initializing a model from the facebook/blenderbot-3B style configuration model = BlenderbotModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotModel, BlenderbotConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Blenderbot facebook/blenderbot-3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BlenderbotConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/blenderbot-3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),ko=new he({}),yo=new A({props:{name:"class transformers.BlenderbotTokenizer",anchor:"transformers.BlenderbotTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/tokenization_blenderbot.py#L46"}}),wo=new A({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BlenderbotTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/tokenization_blenderbot.py#L61",parametersDescription:[{anchor:"transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Will be ignored`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),xo=new he({}),Bo=new A({props:{name:"class transformers.BlenderbotTokenizerFast",anchor:"transformers.BlenderbotTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py#L47"}}),$o=new A({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py#L63",parametersDescription:[{anchor:"transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Will be ignored`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Eo=new he({}),Mo=new A({props:{name:"class transformers.BlenderbotModel",anchor:"transformers.BlenderbotModel",parameters:[{name:"config",val:": BlenderbotConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_blenderbot.py#L1070",parametersDescription:[{anchor:"transformers.BlenderbotModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),jo=new A({props:{name:"forward",anchor:"transformers.BlenderbotModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_blenderbot.py#L1108",parametersDescription:[{anchor:"transformers.BlenderbotModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.BlenderbotModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.BlenderbotModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BlenderbotModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BlenderbotModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BlenderbotModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BlenderbotModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BlenderbotModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Gt=new po({props:{$$slots:{default:[Vm]},$$scope:{ctx:D}}}),No=new pt({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotModel model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill") tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Oo=new he({}),Ao=new A({props:{name:"class transformers.BlenderbotForConditionalGeneration",anchor:"transformers.BlenderbotForConditionalGeneration",parameters:[{name:"config",val:": BlenderbotConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_blenderbot.py#L1204",parametersDescription:[{anchor:"transformers.BlenderbotForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Do=new A({props:{name:"forward",anchor:"transformers.BlenderbotForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_blenderbot.py#L1261",parametersDescription:[{anchor:"transformers.BlenderbotForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ut=new po({props:{$$slots:{default:[Qm]},$$scope:{ctx:D}}}),Wo=new he({}),Ro=new A({props:{name:"forward",anchor:"transformers.BlenderbotForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_blenderbot.py#L1424",parametersDescription:[{anchor:"transformers.BlenderbotForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BlenderbotForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BlenderbotForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.BlenderbotForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.BlenderbotForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ho=new pt({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotForCausalLM tokenizer = BlenderbotTokenizer.from_pretrained('facebook/bart-large') model = BlenderbotForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForCausalLM.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ko=new he({}),Vo=new A({props:{name:"class transformers.TFBlenderbotModel",anchor:"transformers.TFBlenderbotModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1180",parametersDescription:[{anchor:"transformers.TFBlenderbotModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new po({props:{$$slots:{default:[Jm]},$$scope:{ctx:D}}}),Yo=new A({props:{name:"call",anchor:"transformers.TFBlenderbotModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1205",parametersDescription:[{anchor:"transformers.TFBlenderbotModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBlenderbotModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBlenderbotModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFBlenderbotModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBlenderbotModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBlenderbotModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBlenderbotModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBlenderbotModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBlenderbotModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBlenderbotModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBlenderbotModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBlenderbotModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBlenderbotModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBlenderbotModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Vt=new po({props:{$$slots:{default:[Xm]},$$scope:{ctx:D}}}),Zo=new pt({props:{code:`from transformers import BlenderbotTokenizer, TFBlenderbotModel import tensorflow as tf tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-400M-distill') model = TFBlenderbotModel.from_pretrained('facebook/blenderbot-400M-distill') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, TFBlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBlenderbotModel.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),en=new he({}),tn=new A({props:{name:"class transformers.TFBlenderbotForConditionalGeneration",anchor:"transformers.TFBlenderbotForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1300",parametersDescription:[{anchor:"transformers.TFBlenderbotForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jt=new po({props:{$$slots:{default:[Ym]},$$scope:{ctx:D}}}),an=new A({props:{name:"call",anchor:"transformers.TFBlenderbotForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1346",parametersDescription:[{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Xt=new po({props:{$$slots:{default:[Zm]},$$scope:{ctx:D}}}),dn=new he({}),ln=new A({props:{name:"class transformers.FlaxBlenderbotModel",anchor:"transformers.FlaxBlenderbotModel",parameters:[{name:"config",val:": BlenderbotConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1206",parametersDescription:[{anchor:"transformers.FlaxBlenderbotModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gn=new A({props:{name:"__call__",anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1141",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Zt=new po({props:{$$slots:{default:[e_]},$$scope:{ctx:D}}}),bn=new pt({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotModel tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-400M-distill') model = FlaxBlenderbotModel.from_pretrained('facebook/blenderbot-400M-distill') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotModel.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),vn=new A({props:{name:"encode",anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L963",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),kn=new pt({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-400M-distill') tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-400M-distill') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),yn=new A({props:{name:"decode",anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1026",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Tn=new pt({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-400M-distill') tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-400M-distill') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),wn=new he({}),xn=new A({props:{name:"class transformers.FlaxBlenderbotForConditionalGeneration",anchor:"transformers.FlaxBlenderbotForConditionalGeneration",parameters:[{name:"config",val:": BlenderbotConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1293",parametersDescription:[{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Cn=new A({props:{name:"__call__",anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1141",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),to=new po({props:{$$slots:{default:[t_]},$$scope:{ctx:D}}}),jn=new he({}),Nn=new A({props:{name:"encode",anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L963",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new pt({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-400M-distill') tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-400M-distill') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),An=new A({props:{name:"decode",anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1297",parametersDescription:[{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),In=new pt({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-400M-distill') tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-400M-distill') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot-400M-distill&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){u=n("meta"),z=i(),m=n("h1"),T=n("a"),B=n("span"),_(x.$$.fragment),w=i(),F=n("span"),Ce=s("Blenderbot"),ue=i(),E=n("p"),ke=n("strong"),oe=s("DISCLAIMER:"),Pe=s(" If you see something strange, file a "),ne=n("a"),re=s("Github Issue"),je=s(" ."),Fe=i(),G=n("h2"),j=n("a"),ye=n("span"),_(R.$$.fragment),M=i(),C=n("span"),Ne=s("Overview"),Q=i(),J=n("p"),Oe=s("The Blender chatbot model was proposed in "),W=n("a"),Ae=s("Recipes for building an open-domain chatbot"),Ie=s(` Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.`),N=i(),fe=n("p"),ae=s("The abstract of the paper is the following:"),$e=i(),me=n("p"),H=n("em"),Se=s(`Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.`),Ee=i(),q=n("p"),Le=s("This model was contributed by "),U=n("a"),De=s("sshleifer"),f=s(". The authors\u2019 code can be found "),$=n("a"),Te=s("here"),ht=s(" ."),Ve=i(),P=n("h2"),_e=n("a"),Ge=n("span"),_(we.$$.fragment),K=i(),V=n("span"),ut=s("Implementation Notes"),Qe=i(),O=n("ul"),xe=n("li"),ft=s("Blenderbot uses a standard "),se=n("a"),mt=s("seq2seq model transformer"),Nd=s(" based architecture."),Od=i(),ho=n("li"),Ad=s("Available checkpoints can be found in the "),uo=n("a"),Id=s("model hub"),Sd=s("."),Ld=i(),We=n("li"),Dd=s("This is the "),$r=n("em"),Gd=s("default"),Wd=s(` Blenderbot model class. However, some smaller checkpoints, such as `),Er=n("code"),Ud=s("facebook/blenderbot_small_90M"),Rd=s(`, have a different architecture and consequently should be used with `),Wn=n("a"),Hd=s("BlenderbotSmall"),Kd=s("."),Es=i(),_t=n("h2"),Nt=n("a"),Mr=n("span"),_(fo.$$.fragment),Vd=i(),qr=n("span"),Qd=s("Usage"),Ms=i(),Un=n("p"),Jd=s("Here is an example of model usage:"),qs=i(),_(mo.$$.fragment),Cs=i(),gt=n("h2"),Ot=n("a"),Cr=n("span"),_(_o.$$.fragment),Xd=i(),Pr=n("span"),Yd=s("BlenderbotConfig"),Ps=i(),de=n("div"),_(go.$$.fragment),Zd=i(),bt=n("p"),ei=s("This is the configuration class to store the configuration of a "),Rn=n("a"),ti=s("BlenderbotModel"),oi=s(`. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot `),bo=n("a"),ni=s("facebook/blenderbot-3B"),ri=s(" architecture."),ai=i(),vt=n("p"),si=s("Configuration objects inherit from "),Hn=n("a"),di=s("PretrainedConfig"),ii=s(` and can be used to control the model outputs. Read the documentation from `),Kn=n("a"),li=s("PretrainedConfig"),ci=s(" for more information."),pi=i(),jr=n("p"),hi=s("Example:"),ui=i(),_(vo.$$.fragment),js=i(),kt=n("h2"),At=n("a"),Nr=n("span"),_(ko.$$.fragment),fi=i(),Or=n("span"),mi=s("BlenderbotTokenizer"),Ns=i(),ie=n("div"),_(yo.$$.fragment),_i=i(),Ar=n("p"),gi=s("Construct a Blenderbot tokenizer."),bi=i(),It=n("p"),Ir=n("code"),vi=s("Blenderbot"),ki=s(" is nearly identical to "),Vn=n("a"),yi=s("RobertaTokenizer"),Ti=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),wi=i(),To=n("p"),xi=s("Refer to superclass "),Qn=n("a"),Bi=s("RobertaTokenizer"),zi=s(` for usage examples and documentation concerning parameters.`),Fi=i(),Je=n("div"),_(wo.$$.fragment),$i=i(),Sr=n("p"),Ei=s(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),Mi=i(),Lr=n("ul"),Jn=n("li"),qi=s("single sequence: "),Dr=n("code"),Ci=s("X </s>"),Os=i(),yt=n("h2"),St=n("a"),Gr=n("span"),_(xo.$$.fragment),Pi=i(),Wr=n("span"),ji=s("BlenderbotTokenizerFast"),As=i(),le=n("div"),_(Bo.$$.fragment),Ni=i(),zo=n("p"),Oi=s("Construct a \u201Cfast\u201D Blenderbot tokenizer (backed by HuggingFace\u2019s "),Ur=n("em"),Ai=s("tokenizers"),Ii=s(" library)."),Si=i(),Lt=n("p"),Rr=n("code"),Li=s("BlenderbotFast"),Di=s(" is nearly identical to "),Xn=n("a"),Gi=s("RobertaTokenizerFast"),Wi=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),Ui=i(),Fo=n("p"),Ri=s("Refer to superclass "),Yn=n("a"),Hi=s("RobertaTokenizerFast"),Ki=s(` for usage examples and documentation concerning parameters.`),Vi=i(),Xe=n("div"),_($o.$$.fragment),Qi=i(),Hr=n("p"),Ji=s(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),Xi=i(),Kr=n("ul"),Zn=n("li"),Yi=s("single sequence: "),Vr=n("code"),Zi=s("X </s>"),Is=i(),Tt=n("h2"),Dt=n("a"),Qr=n("span"),_(Eo.$$.fragment),el=i(),Jr=n("span"),tl=s("BlenderbotModel"),Ss=i(),Ue=n("p"),ol=s("See "),Xr=n("code"),nl=s("transformers.BartModel"),rl=s(" for arguments to "),Yr=n("em"),al=s("forward"),sl=s(" and "),Zr=n("em"),dl=s("generate"),Ls=i(),Be=n("div"),_(Mo.$$.fragment),il=i(),qo=n("p"),ll=s(`The bare Blenderbot Model outputting raw hidden-states without any specific head on top. This model inherits from `),er=n("a"),cl=s("PreTrainedModel"),pl=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hl=i(),Co=n("p"),ul=s("This model is also a PyTorch "),Po=n("a"),fl=s("torch.nn.Module"),ml=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_l=i(),ge=n("div"),_(jo.$$.fragment),gl=i(),wt=n("p"),bl=s("The "),tr=n("a"),vl=s("BlenderbotModel"),kl=s(" forward method, overrides the "),ea=n("code"),yl=s("__call__"),Tl=s(" special method."),wl=i(),_(Gt.$$.fragment),xl=i(),ta=n("p"),Bl=s("Example:"),zl=i(),_(No.$$.fragment),Ds=i(),xt=n("h2"),Wt=n("a"),oa=n("span"),_(Oo.$$.fragment),Fl=i(),na=n("span"),$l=s("BlenderbotForConditionalGeneration"),Gs=i(),Re=n("p"),El=s("See "),or=n("a"),Ml=s("BartForConditionalGeneration"),ql=s(" for arguments to "),ra=n("em"),Cl=s("forward"),Pl=s(" and "),aa=n("em"),jl=s("generate"),Ws=i(),ze=n("div"),_(Ao.$$.fragment),Nl=i(),Io=n("p"),Ol=s(`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),nr=n("a"),Al=s("PreTrainedModel"),Il=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sl=i(),So=n("p"),Ll=s("This model is also a PyTorch "),Lo=n("a"),Dl=s("torch.nn.Module"),Gl=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wl=i(),X=n("div"),_(Do.$$.fragment),Ul=i(),Bt=n("p"),Rl=s("The "),rr=n("a"),Hl=s("BlenderbotForConditionalGeneration"),Kl=s(" forward method, overrides the "),sa=n("code"),Vl=s("__call__"),Ql=s(" special method."),Jl=i(),_(Ut.$$.fragment),Xl=i(),da=n("p"),Yl=s("Conversation example::"),Zl=i(),ia=n("blockquote"),la=n("blockquote"),ca=n("blockquote"),pa=n("p"),ec=s(`from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration mname = \u2018facebook/blenderbot-400M-distill\u2019 model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018pt\u2019) reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])`),tc=i(),ha=n("blockquote"),ua=n("blockquote"),fa=n("blockquote"),Go=n("p"),oc=s(`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> `),ma=n("s"),nc=s(`That\u2019s unfortunate. \u201D \u2026 \u201CAre they trying to lose weight or are they just trying to be healthier?`),rc=s(` \u201D \u2026 \u201D<s> I\u2019m not sure.\u201D \u2026 ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018pt\u2019) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),Us=i(),zt=n("h2"),Rt=n("a"),_a=n("span"),_(Wo.$$.fragment),ac=i(),ga=n("span"),sc=s("BlenderbotForCausalLM"),Rs=i(),Uo=n("div"),Ye=n("div"),_(Ro.$$.fragment),dc=i(),ba=n("p"),ic=s("Example:"),lc=i(),_(Ho.$$.fragment),Hs=i(),Ft=n("h2"),Ht=n("a"),va=n("span"),_(Ko.$$.fragment),cc=i(),ka=n("span"),pc=s("TFBlenderbotModel"),Ks=i(),ce=n("div"),_(Vo.$$.fragment),hc=i(),Qo=n("p"),uc=s(`The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top. This model inherits from `),ar=n("a"),fc=s("TFPreTrainedModel"),mc=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_c=i(),Jo=n("p"),gc=s("This model is also a "),Xo=n("a"),bc=s("tf.keras.Model"),vc=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),kc=i(),_(Kt.$$.fragment),yc=i(),be=n("div"),_(Yo.$$.fragment),Tc=i(),$t=n("p"),wc=s("The "),sr=n("a"),xc=s("TFBlenderbotModel"),Bc=s(" forward method, overrides the "),ya=n("code"),zc=s("__call__"),Fc=s(" special method."),$c=i(),_(Vt.$$.fragment),Ec=i(),Ta=n("p"),Mc=s("Example:"),qc=i(),_(Zo.$$.fragment),Vs=i(),Et=n("h2"),Qt=n("a"),wa=n("span"),_(en.$$.fragment),Cc=i(),xa=n("span"),Pc=s("TFBlenderbotForConditionalGeneration"),Qs=i(),pe=n("div"),_(tn.$$.fragment),jc=i(),on=n("p"),Nc=s(`The BLENDERBOT Model with a language modeling head. Can be used for summarization. This model inherits from `),dr=n("a"),Oc=s("TFPreTrainedModel"),Ac=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ic=i(),nn=n("p"),Sc=s("This model is also a "),rn=n("a"),Lc=s("tf.keras.Model"),Dc=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Gc=i(),_(Jt.$$.fragment),Wc=i(),Y=n("div"),_(an.$$.fragment),Uc=i(),Mt=n("p"),Rc=s("The "),ir=n("a"),Hc=s("TFBlenderbotForConditionalGeneration"),Kc=s(" forward method, overrides the "),Ba=n("code"),Vc=s("__call__"),Qc=s(" special method."),Jc=i(),_(Xt.$$.fragment),Xc=i(),za=n("p"),Yc=s("Conversation example::"),Zc=i(),Fa=n("blockquote"),$a=n("blockquote"),Ea=n("blockquote"),Ma=n("p"),ep=s(`from transformers import BlenderbotTokenizer, TFBlenderbotForConditionalGeneration mname = \u2018facebook/blenderbot-400M-distill\u2019 model = TFBlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018tf\u2019) reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])`),tp=i(),qa=n("blockquote"),Ca=n("blockquote"),Pa=n("blockquote"),sn=n("p"),op=s(`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> `),ja=n("s"),np=s(`That\u2019s unfortunate. \u201D \u2026 \u201CAre they trying to lose weight or are they just trying to be healthier?`),rp=s(` \u201D \u2026 \u201D<s> I\u2019m not sure.\u201D \u2026 ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018tf\u2019) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),Js=i(),qt=n("h2"),Yt=n("a"),Na=n("span"),_(dn.$$.fragment),ap=i(),Oa=n("span"),sp=s("FlaxBlenderbotModel"),Xs=i(),I=n("div"),_(ln.$$.fragment),dp=i(),cn=n("p"),ip=s(`The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),lr=n("a"),lp=s("FlaxPreTrainedModel"),cp=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pp=i(),pn=n("p"),hp=s("This model is also a Flax Linen "),hn=n("a"),up=s("flax.nn.Module"),fp=s(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),mp=i(),Aa=n("p"),_p=s("Finally, this model supports inherent JAX features such as:"),gp=i(),He=n("ul"),Ia=n("li"),un=n("a"),bp=s("Just-In-Time (JIT) compilation"),vp=i(),Sa=n("li"),fn=n("a"),kp=s("Automatic Differentiation"),yp=i(),La=n("li"),mn=n("a"),Tp=s("Vectorization"),wp=i(),Da=n("li"),_n=n("a"),xp=s("Parallelization"),Bp=i(),ve=n("div"),_(gn.$$.fragment),zp=i(),Ct=n("p"),Fp=s("The "),Ga=n("code"),$p=s("FlaxBlenderbotPreTrainedModel"),Ep=s(" forward method, overrides the "),Wa=n("code"),Mp=s("__call__"),qp=s(" special method."),Cp=i(),_(Zt.$$.fragment),Pp=i(),Ua=n("p"),jp=s("Example:"),Np=i(),_(bn.$$.fragment),Op=i(),Ze=n("div"),_(vn.$$.fragment),Ap=i(),Ra=n("p"),Ip=s("Example:"),Sp=i(),_(kn.$$.fragment),Lp=i(),et=n("div"),_(yn.$$.fragment),Dp=i(),Ha=n("p"),Gp=s("Example:"),Wp=i(),_(Tn.$$.fragment),Ys=i(),Pt=n("h2"),eo=n("a"),Ka=n("span"),_(wn.$$.fragment),Up=i(),Va=n("span"),Rp=s("FlaxBlenderbotForConditionalGeneration"),Zs=i(),S=n("div"),_(xn.$$.fragment),Hp=i(),Bn=n("p"),Kp=s(`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),cr=n("a"),Vp=s("FlaxPreTrainedModel"),Qp=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jp=i(),zn=n("p"),Xp=s("This model is also a Flax Linen "),Fn=n("a"),Yp=s("flax.nn.Module"),Zp=s(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),eh=i(),Qa=n("p"),th=s("Finally, this model supports inherent JAX features such as:"),oh=i(),Ke=n("ul"),Ja=n("li"),$n=n("a"),nh=s("Just-In-Time (JIT) compilation"),rh=i(),Xa=n("li"),En=n("a"),ah=s("Automatic Differentiation"),sh=i(),Ya=n("li"),Mn=n("a"),dh=s("Vectorization"),ih=i(),Za=n("li"),qn=n("a"),lh=s("Parallelization"),ch=i(),L=n("div"),_(Cn.$$.fragment),ph=i(),jt=n("p"),hh=s("The "),es=n("code"),uh=s("FlaxBlenderbotPreTrainedModel"),fh=s(" forward method, overrides the "),ts=n("code"),mh=s("__call__"),_h=s(" special method."),gh=i(),_(to.$$.fragment),bh=i(),os=n("p"),vh=s("Conversation example::"),kh=i(),ns=n("blockquote"),rs=n("blockquote"),as=n("blockquote"),ss=n("p"),yh=s("from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration, BlenderbotConfig"),Th=i(),ds=n("blockquote"),is=n("blockquote"),ls=n("blockquote"),cs=n("p"),wh=s(`model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019) tokenizer = BlenderbotTokenizer.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019)`),xh=i(),ps=n("blockquote"),hs=n("blockquote"),us=n("blockquote"),fs=n("p"),Bh=s(`UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors=\u2018np\u2019)`),zh=i(),ms=n("blockquote"),_s=n("blockquote"),Pn=n("blockquote"),oo=n("h1"),no=n("a"),gs=n("span"),_(jn.$$.fragment),Fh=i(),bs=n("span"),$h=s("Generate Reply"),Eh=i(),vs=n("p"),Mh=s(`reply_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True).sequences print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])`),qh=i(),tt=n("div"),_(Nn.$$.fragment),Ch=i(),ks=n("p"),Ph=s("Example:"),jh=i(),_(On.$$.fragment),Nh=i(),ot=n("div"),_(An.$$.fragment),Oh=i(),ys=n("p"),Ah=s("Example:"),Ih=i(),_(In.$$.fragment),this.h()},l(o){const h=Km('[data-svelte="svelte-1phssyn"]',document.head);u=r(h,"META",{name:!0,content:!0}),h.forEach(t),z=l(o),m=r(o,"H1",{class:!0});var Sn=a(m);T=r(Sn,"A",{id:!0,class:!0,href:!0});var Ts=a(T);B=r(Ts,"SPAN",{});var ws=a(B);g(x.$$.fragment,ws),ws.forEach(t),Ts.forEach(t),w=l(Sn),F=r(Sn,"SPAN",{});var xs=a(F);Ce=d(xs,"Blenderbot"),xs.forEach(t),Sn.forEach(t),ue=l(o),E=r(o,"P",{});var ro=a(E);ke=r(ro,"STRONG",{});var Bs=a(ke);oe=d(Bs,"DISCLAIMER:"),Bs.forEach(t),Pe=d(ro," If you see something strange, file a "),ne=r(ro,"A",{href:!0,rel:!0});var zs=a(ne);re=d(zs,"Github Issue"),zs.forEach(t),je=d(ro," ."),ro.forEach(t),Fe=l(o),G=r(o,"H2",{class:!0});var Ln=a(G);j=r(Ln,"A",{id:!0,class:!0,href:!0});var Dh=a(j);ye=r(Dh,"SPAN",{});var Gh=a(ye);g(R.$$.fragment,Gh),Gh.forEach(t),Dh.forEach(t),M=l(Ln),C=r(Ln,"SPAN",{});var Wh=a(C);Ne=d(Wh,"Overview"),Wh.forEach(t),Ln.forEach(t),Q=l(o),J=r(o,"P",{});var td=a(J);Oe=d(td,"The Blender chatbot model was proposed in "),W=r(td,"A",{href:!0,rel:!0});var Uh=a(W);Ae=d(Uh,"Recipes for building an open-domain chatbot"),Uh.forEach(t),Ie=d(td,` Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.`),td.forEach(t),N=l(o),fe=r(o,"P",{});var Rh=a(fe);ae=d(Rh,"The abstract of the paper is the following:"),Rh.forEach(t),$e=l(o),me=r(o,"P",{});var Hh=a(me);H=r(Hh,"EM",{});var Kh=a(H);Se=d(Kh,`Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.`),Kh.forEach(t),Hh.forEach(t),Ee=l(o),q=r(o,"P",{});var pr=a(q);Le=d(pr,"This model was contributed by "),U=r(pr,"A",{href:!0,rel:!0});var Vh=a(U);De=d(Vh,"sshleifer"),Vh.forEach(t),f=d(pr,". The authors\u2019 code can be found "),$=r(pr,"A",{href:!0,rel:!0});var Qh=a($);Te=d(Qh,"here"),Qh.forEach(t),ht=d(pr," ."),pr.forEach(t),Ve=l(o),P=r(o,"H2",{class:!0});var od=a(P);_e=r(od,"A",{id:!0,class:!0,href:!0});var Jh=a(_e);Ge=r(Jh,"SPAN",{});var Xh=a(Ge);g(we.$$.fragment,Xh),Xh.forEach(t),Jh.forEach(t),K=l(od),V=r(od,"SPAN",{});var Yh=a(V);ut=d(Yh,"Implementation Notes"),Yh.forEach(t),od.forEach(t),Qe=l(o),O=r(o,"UL",{});var hr=a(O);xe=r(hr,"LI",{});var nd=a(xe);ft=d(nd,"Blenderbot uses a standard "),se=r(nd,"A",{href:!0,rel:!0});var Zh=a(se);mt=d(Zh,"seq2seq model transformer"),Zh.forEach(t),Nd=d(nd," based architecture."),nd.forEach(t),Od=l(hr),ho=r(hr,"LI",{});var rd=a(ho);Ad=d(rd,"Available checkpoints can be found in the "),uo=r(rd,"A",{href:!0,rel:!0});var eu=a(uo);Id=d(eu,"model hub"),eu.forEach(t),Sd=d(rd,"."),rd.forEach(t),Ld=l(hr),We=r(hr,"LI",{});var ao=a(We);Dd=d(ao,"This is the "),$r=r(ao,"EM",{});var tu=a($r);Gd=d(tu,"default"),tu.forEach(t),Wd=d(ao,` Blenderbot model class. However, some smaller checkpoints, such as `),Er=r(ao,"CODE",{});var ou=a(Er);Ud=d(ou,"facebook/blenderbot_small_90M"),ou.forEach(t),Rd=d(ao,`, have a different architecture and consequently should be used with `),Wn=r(ao,"A",{href:!0});var nu=a(Wn);Hd=d(nu,"BlenderbotSmall"),nu.forEach(t),Kd=d(ao,"."),ao.forEach(t),hr.forEach(t),Es=l(o),_t=r(o,"H2",{class:!0});var ad=a(_t);Nt=r(ad,"A",{id:!0,class:!0,href:!0});var ru=a(Nt);Mr=r(ru,"SPAN",{});var au=a(Mr);g(fo.$$.fragment,au),au.forEach(t),ru.forEach(t),Vd=l(ad),qr=r(ad,"SPAN",{});var su=a(qr);Qd=d(su,"Usage"),su.forEach(t),ad.forEach(t),Ms=l(o),Un=r(o,"P",{});var du=a(Un);Jd=d(du,"Here is an example of model usage:"),du.forEach(t),qs=l(o),g(mo.$$.fragment,o),Cs=l(o),gt=r(o,"H2",{class:!0});var sd=a(gt);Ot=r(sd,"A",{id:!0,class:!0,href:!0});var iu=a(Ot);Cr=r(iu,"SPAN",{});var lu=a(Cr);g(_o.$$.fragment,lu),lu.forEach(t),iu.forEach(t),Xd=l(sd),Pr=r(sd,"SPAN",{});var cu=a(Pr);Yd=d(cu,"BlenderbotConfig"),cu.forEach(t),sd.forEach(t),Ps=l(o),de=r(o,"DIV",{class:!0});var nt=a(de);g(go.$$.fragment,nt),Zd=l(nt),bt=r(nt,"P",{});var ur=a(bt);ei=d(ur,"This is the configuration class to store the configuration of a "),Rn=r(ur,"A",{href:!0});var pu=a(Rn);ti=d(pu,"BlenderbotModel"),pu.forEach(t),oi=d(ur,`. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot `),bo=r(ur,"A",{href:!0,rel:!0});var hu=a(bo);ni=d(hu,"facebook/blenderbot-3B"),hu.forEach(t),ri=d(ur," architecture."),ur.forEach(t),ai=l(nt),vt=r(nt,"P",{});var fr=a(vt);si=d(fr,"Configuration objects inherit from "),Hn=r(fr,"A",{href:!0});var uu=a(Hn);di=d(uu,"PretrainedConfig"),uu.forEach(t),ii=d(fr,` and can be used to control the model outputs. Read the documentation from `),Kn=r(fr,"A",{href:!0});var fu=a(Kn);li=d(fu,"PretrainedConfig"),fu.forEach(t),ci=d(fr," for more information."),fr.forEach(t),pi=l(nt),jr=r(nt,"P",{});var mu=a(jr);hi=d(mu,"Example:"),mu.forEach(t),ui=l(nt),g(vo.$$.fragment,nt),nt.forEach(t),js=l(o),kt=r(o,"H2",{class:!0});var dd=a(kt);At=r(dd,"A",{id:!0,class:!0,href:!0});var _u=a(At);Nr=r(_u,"SPAN",{});var gu=a(Nr);g(ko.$$.fragment,gu),gu.forEach(t),_u.forEach(t),fi=l(dd),Or=r(dd,"SPAN",{});var bu=a(Or);mi=d(bu,"BlenderbotTokenizer"),bu.forEach(t),dd.forEach(t),Ns=l(o),ie=r(o,"DIV",{class:!0});var rt=a(ie);g(yo.$$.fragment,rt),_i=l(rt),Ar=r(rt,"P",{});var vu=a(Ar);gi=d(vu,"Construct a Blenderbot tokenizer."),vu.forEach(t),bi=l(rt),It=r(rt,"P",{});var Fs=a(It);Ir=r(Fs,"CODE",{});var ku=a(Ir);vi=d(ku,"Blenderbot"),ku.forEach(t),ki=d(Fs," is nearly identical to "),Vn=r(Fs,"A",{href:!0});var yu=a(Vn);yi=d(yu,"RobertaTokenizer"),yu.forEach(t),Ti=d(Fs,` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),Fs.forEach(t),wi=l(rt),To=r(rt,"P",{});var id=a(To);xi=d(id,"Refer to superclass "),Qn=r(id,"A",{href:!0});var Tu=a(Qn);Bi=d(Tu,"RobertaTokenizer"),Tu.forEach(t),zi=d(id,` for usage examples and documentation concerning parameters.`),id.forEach(t),Fi=l(rt),Je=r(rt,"DIV",{class:!0});var mr=a(Je);g(wo.$$.fragment,mr),$i=l(mr),Sr=r(mr,"P",{});var wu=a(Sr);Ei=d(wu,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),wu.forEach(t),Mi=l(mr),Lr=r(mr,"UL",{});var xu=a(Lr);Jn=r(xu,"LI",{});var Sh=a(Jn);qi=d(Sh,"single sequence: "),Dr=r(Sh,"CODE",{});var Bu=a(Dr);Ci=d(Bu,"X </s>"),Bu.forEach(t),Sh.forEach(t),xu.forEach(t),mr.forEach(t),rt.forEach(t),Os=l(o),yt=r(o,"H2",{class:!0});var ld=a(yt);St=r(ld,"A",{id:!0,class:!0,href:!0});var zu=a(St);Gr=r(zu,"SPAN",{});var Fu=a(Gr);g(xo.$$.fragment,Fu),Fu.forEach(t),zu.forEach(t),Pi=l(ld),Wr=r(ld,"SPAN",{});var $u=a(Wr);ji=d($u,"BlenderbotTokenizerFast"),$u.forEach(t),ld.forEach(t),As=l(o),le=r(o,"DIV",{class:!0});var at=a(le);g(Bo.$$.fragment,at),Ni=l(at),zo=r(at,"P",{});var cd=a(zo);Oi=d(cd,"Construct a \u201Cfast\u201D Blenderbot tokenizer (backed by HuggingFace\u2019s "),Ur=r(cd,"EM",{});var Eu=a(Ur);Ai=d(Eu,"tokenizers"),Eu.forEach(t),Ii=d(cd," library)."),cd.forEach(t),Si=l(at),Lt=r(at,"P",{});var $s=a(Lt);Rr=r($s,"CODE",{});var Mu=a(Rr);Li=d(Mu,"BlenderbotFast"),Mu.forEach(t),Di=d($s," is nearly identical to "),Xn=r($s,"A",{href:!0});var qu=a(Xn);Gi=d(qu,"RobertaTokenizerFast"),qu.forEach(t),Wi=d($s,` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),$s.forEach(t),Ui=l(at),Fo=r(at,"P",{});var pd=a(Fo);Ri=d(pd,"Refer to superclass "),Yn=r(pd,"A",{href:!0});var Cu=a(Yn);Hi=d(Cu,"RobertaTokenizerFast"),Cu.forEach(t),Ki=d(pd,` for usage examples and documentation concerning parameters.`),pd.forEach(t),Vi=l(at),Xe=r(at,"DIV",{class:!0});var _r=a(Xe);g($o.$$.fragment,_r),Qi=l(_r),Hr=r(_r,"P",{});var Pu=a(Hr);Ji=d(Pu,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),Pu.forEach(t),Xi=l(_r),Kr=r(_r,"UL",{});var ju=a(Kr);Zn=r(ju,"LI",{});var Lh=a(Zn);Yi=d(Lh,"single sequence: "),Vr=r(Lh,"CODE",{});var Nu=a(Vr);Zi=d(Nu,"X </s>"),Nu.forEach(t),Lh.forEach(t),ju.forEach(t),_r.forEach(t),at.forEach(t),Is=l(o),Tt=r(o,"H2",{class:!0});var hd=a(Tt);Dt=r(hd,"A",{id:!0,class:!0,href:!0});var Ou=a(Dt);Qr=r(Ou,"SPAN",{});var Au=a(Qr);g(Eo.$$.fragment,Au),Au.forEach(t),Ou.forEach(t),el=l(hd),Jr=r(hd,"SPAN",{});var Iu=a(Jr);tl=d(Iu,"BlenderbotModel"),Iu.forEach(t),hd.forEach(t),Ss=l(o),Ue=r(o,"P",{});var Dn=a(Ue);ol=d(Dn,"See "),Xr=r(Dn,"CODE",{});var Su=a(Xr);nl=d(Su,"transformers.BartModel"),Su.forEach(t),rl=d(Dn," for arguments to "),Yr=r(Dn,"EM",{});var Lu=a(Yr);al=d(Lu,"forward"),Lu.forEach(t),sl=d(Dn," and "),Zr=r(Dn,"EM",{});var Du=a(Zr);dl=d(Du,"generate"),Du.forEach(t),Dn.forEach(t),Ls=l(o),Be=r(o,"DIV",{class:!0});var so=a(Be);g(Mo.$$.fragment,so),il=l(so),qo=r(so,"P",{});var ud=a(qo);ll=d(ud,`The bare Blenderbot Model outputting raw hidden-states without any specific head on top. This model inherits from `),er=r(ud,"A",{href:!0});var Gu=a(er);cl=d(Gu,"PreTrainedModel"),Gu.forEach(t),pl=d(ud,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ud.forEach(t),hl=l(so),Co=r(so,"P",{});var fd=a(Co);ul=d(fd,"This model is also a PyTorch "),Po=r(fd,"A",{href:!0,rel:!0});var Wu=a(Po);fl=d(Wu,"torch.nn.Module"),Wu.forEach(t),ml=d(fd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fd.forEach(t),_l=l(so),ge=r(so,"DIV",{class:!0});var st=a(ge);g(jo.$$.fragment,st),gl=l(st),wt=r(st,"P",{});var gr=a(wt);bl=d(gr,"The "),tr=r(gr,"A",{href:!0});var Uu=a(tr);vl=d(Uu,"BlenderbotModel"),Uu.forEach(t),kl=d(gr," forward method, overrides the "),ea=r(gr,"CODE",{});var Ru=a(ea);yl=d(Ru,"__call__"),Ru.forEach(t),Tl=d(gr," special method."),gr.forEach(t),wl=l(st),g(Gt.$$.fragment,st),xl=l(st),ta=r(st,"P",{});var Hu=a(ta);Bl=d(Hu,"Example:"),Hu.forEach(t),zl=l(st),g(No.$$.fragment,st),st.forEach(t),so.forEach(t),Ds=l(o),xt=r(o,"H2",{class:!0});var md=a(xt);Wt=r(md,"A",{id:!0,class:!0,href:!0});var Ku=a(Wt);oa=r(Ku,"SPAN",{});var Vu=a(oa);g(Oo.$$.fragment,Vu),Vu.forEach(t),Ku.forEach(t),Fl=l(md),na=r(md,"SPAN",{});var Qu=a(na);$l=d(Qu,"BlenderbotForConditionalGeneration"),Qu.forEach(t),md.forEach(t),Gs=l(o),Re=r(o,"P",{});var Gn=a(Re);El=d(Gn,"See "),or=r(Gn,"A",{href:!0});var Ju=a(or);Ml=d(Ju,"BartForConditionalGeneration"),Ju.forEach(t),ql=d(Gn," for arguments to "),ra=r(Gn,"EM",{});var Xu=a(ra);Cl=d(Xu,"forward"),Xu.forEach(t),Pl=d(Gn," and "),aa=r(Gn,"EM",{});var Yu=a(aa);jl=d(Yu,"generate"),Yu.forEach(t),Gn.forEach(t),Ws=l(o),ze=r(o,"DIV",{class:!0});var io=a(ze);g(Ao.$$.fragment,io),Nl=l(io),Io=r(io,"P",{});var _d=a(Io);Ol=d(_d,`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),nr=r(_d,"A",{href:!0});var Zu=a(nr);Al=d(Zu,"PreTrainedModel"),Zu.forEach(t),Il=d(_d,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_d.forEach(t),Sl=l(io),So=r(io,"P",{});var gd=a(So);Ll=d(gd,"This model is also a PyTorch "),Lo=r(gd,"A",{href:!0,rel:!0});var ef=a(Lo);Dl=d(ef,"torch.nn.Module"),ef.forEach(t),Gl=d(gd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gd.forEach(t),Wl=l(io),X=r(io,"DIV",{class:!0});var Me=a(X);g(Do.$$.fragment,Me),Ul=l(Me),Bt=r(Me,"P",{});var br=a(Bt);Rl=d(br,"The "),rr=r(br,"A",{href:!0});var tf=a(rr);Hl=d(tf,"BlenderbotForConditionalGeneration"),tf.forEach(t),Kl=d(br," forward method, overrides the "),sa=r(br,"CODE",{});var of=a(sa);Vl=d(of,"__call__"),of.forEach(t),Ql=d(br," special method."),br.forEach(t),Jl=l(Me),g(Ut.$$.fragment,Me),Xl=l(Me),da=r(Me,"P",{});var nf=a(da);Yl=d(nf,"Conversation example::"),nf.forEach(t),Zl=l(Me),ia=r(Me,"BLOCKQUOTE",{});var rf=a(ia);la=r(rf,"BLOCKQUOTE",{});var af=a(la);ca=r(af,"BLOCKQUOTE",{});var sf=a(ca);pa=r(sf,"P",{});var df=a(pa);ec=d(df,`from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration mname = \u2018facebook/blenderbot-400M-distill\u2019 model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018pt\u2019) reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])`),df.forEach(t),sf.forEach(t),af.forEach(t),rf.forEach(t),tc=l(Me),ha=r(Me,"BLOCKQUOTE",{});var lf=a(ha);ua=r(lf,"BLOCKQUOTE",{});var cf=a(ua);fa=r(cf,"BLOCKQUOTE",{});var pf=a(fa);Go=r(pf,"P",{});var bd=a(Go);oc=d(bd,`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> `),ma=r(bd,"S",{});var hf=a(ma);nc=d(hf,`That\u2019s unfortunate. \u201D \u2026 \u201CAre they trying to lose weight or are they just trying to be healthier?`),hf.forEach(t),rc=d(bd,` \u201D \u2026 \u201D<s> I\u2019m not sure.\u201D \u2026 ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018pt\u2019) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),bd.forEach(t),pf.forEach(t),cf.forEach(t),lf.forEach(t),Me.forEach(t),io.forEach(t),Us=l(o),zt=r(o,"H2",{class:!0});var vd=a(zt);Rt=r(vd,"A",{id:!0,class:!0,href:!0});var uf=a(Rt);_a=r(uf,"SPAN",{});var ff=a(_a);g(Wo.$$.fragment,ff),ff.forEach(t),uf.forEach(t),ac=l(vd),ga=r(vd,"SPAN",{});var mf=a(ga);sc=d(mf,"BlenderbotForCausalLM"),mf.forEach(t),vd.forEach(t),Rs=l(o),Uo=r(o,"DIV",{class:!0});var _f=a(Uo);Ye=r(_f,"DIV",{class:!0});var vr=a(Ye);g(Ro.$$.fragment,vr),dc=l(vr),ba=r(vr,"P",{});var gf=a(ba);ic=d(gf,"Example:"),gf.forEach(t),lc=l(vr),g(Ho.$$.fragment,vr),vr.forEach(t),_f.forEach(t),Hs=l(o),Ft=r(o,"H2",{class:!0});var kd=a(Ft);Ht=r(kd,"A",{id:!0,class:!0,href:!0});var bf=a(Ht);va=r(bf,"SPAN",{});var vf=a(va);g(Ko.$$.fragment,vf),vf.forEach(t),bf.forEach(t),cc=l(kd),ka=r(kd,"SPAN",{});var kf=a(ka);pc=d(kf,"TFBlenderbotModel"),kf.forEach(t),kd.forEach(t),Ks=l(o),ce=r(o,"DIV",{class:!0});var dt=a(ce);g(Vo.$$.fragment,dt),hc=l(dt),Qo=r(dt,"P",{});var yd=a(Qo);uc=d(yd,`The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top. This model inherits from `),ar=r(yd,"A",{href:!0});var yf=a(ar);fc=d(yf,"TFPreTrainedModel"),yf.forEach(t),mc=d(yd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yd.forEach(t),_c=l(dt),Jo=r(dt,"P",{});var Td=a(Jo);gc=d(Td,"This model is also a "),Xo=r(Td,"A",{href:!0,rel:!0});var Tf=a(Xo);bc=d(Tf,"tf.keras.Model"),Tf.forEach(t),vc=d(Td,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Td.forEach(t),kc=l(dt),g(Kt.$$.fragment,dt),yc=l(dt),be=r(dt,"DIV",{class:!0});var it=a(be);g(Yo.$$.fragment,it),Tc=l(it),$t=r(it,"P",{});var kr=a($t);wc=d(kr,"The "),sr=r(kr,"A",{href:!0});var wf=a(sr);xc=d(wf,"TFBlenderbotModel"),wf.forEach(t),Bc=d(kr," forward method, overrides the "),ya=r(kr,"CODE",{});var xf=a(ya);zc=d(xf,"__call__"),xf.forEach(t),Fc=d(kr," special method."),kr.forEach(t),$c=l(it),g(Vt.$$.fragment,it),Ec=l(it),Ta=r(it,"P",{});var Bf=a(Ta);Mc=d(Bf,"Example:"),Bf.forEach(t),qc=l(it),g(Zo.$$.fragment,it),it.forEach(t),dt.forEach(t),Vs=l(o),Et=r(o,"H2",{class:!0});var wd=a(Et);Qt=r(wd,"A",{id:!0,class:!0,href:!0});var zf=a(Qt);wa=r(zf,"SPAN",{});var Ff=a(wa);g(en.$$.fragment,Ff),Ff.forEach(t),zf.forEach(t),Cc=l(wd),xa=r(wd,"SPAN",{});var $f=a(xa);Pc=d($f,"TFBlenderbotForConditionalGeneration"),$f.forEach(t),wd.forEach(t),Qs=l(o),pe=r(o,"DIV",{class:!0});var lt=a(pe);g(tn.$$.fragment,lt),jc=l(lt),on=r(lt,"P",{});var xd=a(on);Nc=d(xd,`The BLENDERBOT Model with a language modeling head. Can be used for summarization. This model inherits from `),dr=r(xd,"A",{href:!0});var Ef=a(dr);Oc=d(Ef,"TFPreTrainedModel"),Ef.forEach(t),Ac=d(xd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xd.forEach(t),Ic=l(lt),nn=r(lt,"P",{});var Bd=a(nn);Sc=d(Bd,"This model is also a "),rn=r(Bd,"A",{href:!0,rel:!0});var Mf=a(rn);Lc=d(Mf,"tf.keras.Model"),Mf.forEach(t),Dc=d(Bd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Bd.forEach(t),Gc=l(lt),g(Jt.$$.fragment,lt),Wc=l(lt),Y=r(lt,"DIV",{class:!0});var qe=a(Y);g(an.$$.fragment,qe),Uc=l(qe),Mt=r(qe,"P",{});var yr=a(Mt);Rc=d(yr,"The "),ir=r(yr,"A",{href:!0});var qf=a(ir);Hc=d(qf,"TFBlenderbotForConditionalGeneration"),qf.forEach(t),Kc=d(yr," forward method, overrides the "),Ba=r(yr,"CODE",{});var Cf=a(Ba);Vc=d(Cf,"__call__"),Cf.forEach(t),Qc=d(yr," special method."),yr.forEach(t),Jc=l(qe),g(Xt.$$.fragment,qe),Xc=l(qe),za=r(qe,"P",{});var Pf=a(za);Yc=d(Pf,"Conversation example::"),Pf.forEach(t),Zc=l(qe),Fa=r(qe,"BLOCKQUOTE",{});var jf=a(Fa);$a=r(jf,"BLOCKQUOTE",{});var Nf=a($a);Ea=r(Nf,"BLOCKQUOTE",{});var Of=a(Ea);Ma=r(Of,"P",{});var Af=a(Ma);ep=d(Af,`from transformers import BlenderbotTokenizer, TFBlenderbotForConditionalGeneration mname = \u2018facebook/blenderbot-400M-distill\u2019 model = TFBlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018tf\u2019) reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])`),Af.forEach(t),Of.forEach(t),Nf.forEach(t),jf.forEach(t),tp=l(qe),qa=r(qe,"BLOCKQUOTE",{});var If=a(qa);Ca=r(If,"BLOCKQUOTE",{});var Sf=a(Ca);Pa=r(Sf,"BLOCKQUOTE",{});var Lf=a(Pa);sn=r(Lf,"P",{});var zd=a(sn);op=d(zd,`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> `),ja=r(zd,"S",{});var Df=a(ja);np=d(Df,`That\u2019s unfortunate. \u201D \u2026 \u201CAre they trying to lose weight or are they just trying to be healthier?`),Df.forEach(t),rp=d(zd,` \u201D \u2026 \u201D<s> I\u2019m not sure.\u201D \u2026 ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018tf\u2019) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),zd.forEach(t),Lf.forEach(t),Sf.forEach(t),If.forEach(t),qe.forEach(t),lt.forEach(t),Js=l(o),qt=r(o,"H2",{class:!0});var Fd=a(qt);Yt=r(Fd,"A",{id:!0,class:!0,href:!0});var Gf=a(Yt);Na=r(Gf,"SPAN",{});var Wf=a(Na);g(dn.$$.fragment,Wf),Wf.forEach(t),Gf.forEach(t),ap=l(Fd),Oa=r(Fd,"SPAN",{});var Uf=a(Oa);sp=d(Uf,"FlaxBlenderbotModel"),Uf.forEach(t),Fd.forEach(t),Xs=l(o),I=r(o,"DIV",{class:!0});var Z=a(I);g(ln.$$.fragment,Z),dp=l(Z),cn=r(Z,"P",{});var $d=a(cn);ip=d($d,`The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),lr=r($d,"A",{href:!0});var Rf=a(lr);lp=d(Rf,"FlaxPreTrainedModel"),Rf.forEach(t),cp=d($d,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$d.forEach(t),pp=l(Z),pn=r(Z,"P",{});var Ed=a(pn);hp=d(Ed,"This model is also a Flax Linen "),hn=r(Ed,"A",{href:!0,rel:!0});var Hf=a(hn);up=d(Hf,"flax.nn.Module"),Hf.forEach(t),fp=d(Ed,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ed.forEach(t),mp=l(Z),Aa=r(Z,"P",{});var Kf=a(Aa);_p=d(Kf,"Finally, this model supports inherent JAX features such as:"),Kf.forEach(t),gp=l(Z),He=r(Z,"UL",{});var lo=a(He);Ia=r(lo,"LI",{});var Vf=a(Ia);un=r(Vf,"A",{href:!0,rel:!0});var Qf=a(un);bp=d(Qf,"Just-In-Time (JIT) compilation"),Qf.forEach(t),Vf.forEach(t),vp=l(lo),Sa=r(lo,"LI",{});var Jf=a(Sa);fn=r(Jf,"A",{href:!0,rel:!0});var Xf=a(fn);kp=d(Xf,"Automatic Differentiation"),Xf.forEach(t),Jf.forEach(t),yp=l(lo),La=r(lo,"LI",{});var Yf=a(La);mn=r(Yf,"A",{href:!0,rel:!0});var Zf=a(mn);Tp=d(Zf,"Vectorization"),Zf.forEach(t),Yf.forEach(t),wp=l(lo),Da=r(lo,"LI",{});var em=a(Da);_n=r(em,"A",{href:!0,rel:!0});var tm=a(_n);xp=d(tm,"Parallelization"),tm.forEach(t),em.forEach(t),lo.forEach(t),Bp=l(Z),ve=r(Z,"DIV",{class:!0});var ct=a(ve);g(gn.$$.fragment,ct),zp=l(ct),Ct=r(ct,"P",{});var Tr=a(Ct);Fp=d(Tr,"The "),Ga=r(Tr,"CODE",{});var om=a(Ga);$p=d(om,"FlaxBlenderbotPreTrainedModel"),om.forEach(t),Ep=d(Tr," forward method, overrides the "),Wa=r(Tr,"CODE",{});var nm=a(Wa);Mp=d(nm,"__call__"),nm.forEach(t),qp=d(Tr," special method."),Tr.forEach(t),Cp=l(ct),g(Zt.$$.fragment,ct),Pp=l(ct),Ua=r(ct,"P",{});var rm=a(Ua);jp=d(rm,"Example:"),rm.forEach(t),Np=l(ct),g(bn.$$.fragment,ct),ct.forEach(t),Op=l(Z),Ze=r(Z,"DIV",{class:!0});var wr=a(Ze);g(vn.$$.fragment,wr),Ap=l(wr),Ra=r(wr,"P",{});var am=a(Ra);Ip=d(am,"Example:"),am.forEach(t),Sp=l(wr),g(kn.$$.fragment,wr),wr.forEach(t),Lp=l(Z),et=r(Z,"DIV",{class:!0});var xr=a(et);g(yn.$$.fragment,xr),Dp=l(xr),Ha=r(xr,"P",{});var sm=a(Ha);Gp=d(sm,"Example:"),sm.forEach(t),Wp=l(xr),g(Tn.$$.fragment,xr),xr.forEach(t),Z.forEach(t),Ys=l(o),Pt=r(o,"H2",{class:!0});var Md=a(Pt);eo=r(Md,"A",{id:!0,class:!0,href:!0});var dm=a(eo);Ka=r(dm,"SPAN",{});var im=a(Ka);g(wn.$$.fragment,im),im.forEach(t),dm.forEach(t),Up=l(Md),Va=r(Md,"SPAN",{});var lm=a(Va);Rp=d(lm,"FlaxBlenderbotForConditionalGeneration"),lm.forEach(t),Md.forEach(t),Zs=l(o),S=r(o,"DIV",{class:!0});var ee=a(S);g(xn.$$.fragment,ee),Hp=l(ee),Bn=r(ee,"P",{});var qd=a(Bn);Kp=d(qd,`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),cr=r(qd,"A",{href:!0});var cm=a(cr);Vp=d(cm,"FlaxPreTrainedModel"),cm.forEach(t),Qp=d(qd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qd.forEach(t),Jp=l(ee),zn=r(ee,"P",{});var Cd=a(zn);Xp=d(Cd,"This model is also a Flax Linen "),Fn=r(Cd,"A",{href:!0,rel:!0});var pm=a(Fn);Yp=d(pm,"flax.nn.Module"),pm.forEach(t),Zp=d(Cd,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Cd.forEach(t),eh=l(ee),Qa=r(ee,"P",{});var hm=a(Qa);th=d(hm,"Finally, this model supports inherent JAX features such as:"),hm.forEach(t),oh=l(ee),Ke=r(ee,"UL",{});var co=a(Ke);Ja=r(co,"LI",{});var um=a(Ja);$n=r(um,"A",{href:!0,rel:!0});var fm=a($n);nh=d(fm,"Just-In-Time (JIT) compilation"),fm.forEach(t),um.forEach(t),rh=l(co),Xa=r(co,"LI",{});var mm=a(Xa);En=r(mm,"A",{href:!0,rel:!0});var _m=a(En);ah=d(_m,"Automatic Differentiation"),_m.forEach(t),mm.forEach(t),sh=l(co),Ya=r(co,"LI",{});var gm=a(Ya);Mn=r(gm,"A",{href:!0,rel:!0});var bm=a(Mn);dh=d(bm,"Vectorization"),bm.forEach(t),gm.forEach(t),ih=l(co),Za=r(co,"LI",{});var vm=a(Za);qn=r(vm,"A",{href:!0,rel:!0});var km=a(qn);lh=d(km,"Parallelization"),km.forEach(t),vm.forEach(t),co.forEach(t),ch=l(ee),L=r(ee,"DIV",{class:!0});var te=a(L);g(Cn.$$.fragment,te),ph=l(te),jt=r(te,"P",{});var Br=a(jt);hh=d(Br,"The "),es=r(Br,"CODE",{});var ym=a(es);uh=d(ym,"FlaxBlenderbotPreTrainedModel"),ym.forEach(t),fh=d(Br," forward method, overrides the "),ts=r(Br,"CODE",{});var Tm=a(ts);mh=d(Tm,"__call__"),Tm.forEach(t),_h=d(Br," special method."),Br.forEach(t),gh=l(te),g(to.$$.fragment,te),bh=l(te),os=r(te,"P",{});var wm=a(os);vh=d(wm,"Conversation example::"),wm.forEach(t),kh=l(te),ns=r(te,"BLOCKQUOTE",{});var xm=a(ns);rs=r(xm,"BLOCKQUOTE",{});var Bm=a(rs);as=r(Bm,"BLOCKQUOTE",{});var zm=a(as);ss=r(zm,"P",{});var Fm=a(ss);yh=d(Fm,"from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration, BlenderbotConfig"),Fm.forEach(t),zm.forEach(t),Bm.forEach(t),xm.forEach(t),Th=l(te),ds=r(te,"BLOCKQUOTE",{});var $m=a(ds);is=r($m,"BLOCKQUOTE",{});var Em=a(is);ls=r(Em,"BLOCKQUOTE",{});var Mm=a(ls);cs=r(Mm,"P",{});var qm=a(cs);wh=d(qm,`model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019) tokenizer = BlenderbotTokenizer.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019)`),qm.forEach(t),Mm.forEach(t),Em.forEach(t),$m.forEach(t),xh=l(te),ps=r(te,"BLOCKQUOTE",{});var Cm=a(ps);hs=r(Cm,"BLOCKQUOTE",{});var Pm=a(hs);us=r(Pm,"BLOCKQUOTE",{});var jm=a(us);fs=r(jm,"P",{});var Nm=a(fs);Bh=d(Nm,`UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors=\u2018np\u2019)`),Nm.forEach(t),jm.forEach(t),Pm.forEach(t),Cm.forEach(t),zh=l(te),ms=r(te,"BLOCKQUOTE",{});var Om=a(ms);_s=r(Om,"BLOCKQUOTE",{});var Am=a(_s);Pn=r(Am,"BLOCKQUOTE",{});var Pd=a(Pn);oo=r(Pd,"H1",{class:!0});var jd=a(oo);no=r(jd,"A",{id:!0,class:!0,href:!0});var Im=a(no);gs=r(Im,"SPAN",{});var Sm=a(gs);g(jn.$$.fragment,Sm),Sm.forEach(t),Im.forEach(t),Fh=l(jd),bs=r(jd,"SPAN",{});var Lm=a(bs);$h=d(Lm,"Generate Reply"),Lm.forEach(t),jd.forEach(t),Eh=l(Pd),vs=r(Pd,"P",{});var Dm=a(vs);Mh=d(Dm,`reply_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True).sequences print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])`),Dm.forEach(t),Pd.forEach(t),Am.forEach(t),Om.forEach(t),te.forEach(t),qh=l(ee),tt=r(ee,"DIV",{class:!0});var zr=a(tt);g(Nn.$$.fragment,zr),Ch=l(zr),ks=r(zr,"P",{});var Gm=a(ks);Ph=d(Gm,"Example:"),Gm.forEach(t),jh=l(zr),g(On.$$.fragment,zr),zr.forEach(t),Nh=l(ee),ot=r(ee,"DIV",{class:!0});var Fr=a(ot);g(An.$$.fragment,Fr),Oh=l(Fr),ys=r(Fr,"P",{});var Wm=a(ys);Ah=d(Wm,"Example:"),Wm.forEach(t),Ih=l(Fr),g(In.$$.fragment,Fr),Fr.forEach(t),ee.forEach(t),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(n_)),c(T,"id","blenderbot"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#blenderbot"),c(m,"class","relative group"),c(ne,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),c(ne,"rel","nofollow"),c(j,"id","overview"),c(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(j,"href","#overview"),c(G,"class","relative group"),c(W,"href","https://arxiv.org/pdf/2004.13637.pdf"),c(W,"rel","nofollow"),c(U,"href","https://huggingface.co/sshleifer"),c(U,"rel","nofollow"),c($,"href","https://github.com/facebookresearch/ParlAI"),c($,"rel","nofollow"),c(_e,"id","implementation-notes"),c(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_e,"href","#implementation-notes"),c(P,"class","relative group"),c(se,"href","https://arxiv.org/pdf/1706.03762.pdf"),c(se,"rel","nofollow"),c(uo,"href","https://huggingface.co/models?search=blenderbot"),c(uo,"rel","nofollow"),c(Wn,"href","blenderbot_small"),c(Nt,"id","usage"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#usage"),c(_t,"class","relative group"),c(Ot,"id","transformers.BlenderbotConfig"),c(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ot,"href","#transformers.BlenderbotConfig"),c(gt,"class","relative group"),c(Rn,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotModel"),c(bo,"href","https://huggingface.co/facebook/blenderbot-3B"),c(bo,"rel","nofollow"),c(Hn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Kn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(de,"class","docstring"),c(At,"id","transformers.BlenderbotTokenizer"),c(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(At,"href","#transformers.BlenderbotTokenizer"),c(kt,"class","relative group"),c(Vn,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Qn,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Je,"class","docstring"),c(ie,"class","docstring"),c(St,"id","transformers.BlenderbotTokenizerFast"),c(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(St,"href","#transformers.BlenderbotTokenizerFast"),c(yt,"class","relative group"),c(Xn,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(Yn,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(Xe,"class","docstring"),c(le,"class","docstring"),c(Dt,"id","transformers.BlenderbotModel"),c(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dt,"href","#transformers.BlenderbotModel"),c(Tt,"class","relative group"),c(er,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Po,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Po,"rel","nofollow"),c(tr,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotModel"),c(ge,"class","docstring"),c(Be,"class","docstring"),c(Wt,"id","transformers.BlenderbotForConditionalGeneration"),c(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wt,"href","#transformers.BlenderbotForConditionalGeneration"),c(xt,"class","relative group"),c(or,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(nr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Lo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Lo,"rel","nofollow"),c(rr,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotForConditionalGeneration"),c(X,"class","docstring"),c(ze,"class","docstring"),c(Rt,"id","transformers.BlenderbotForCausalLM"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.BlenderbotForCausalLM"),c(zt,"class","relative group"),c(Ye,"class","docstring"),c(Uo,"class","docstring"),c(Ht,"id","transformers.TFBlenderbotModel"),c(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ht,"href","#transformers.TFBlenderbotModel"),c(Ft,"class","relative group"),c(ar,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Xo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Xo,"rel","nofollow"),c(sr,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotModel"),c(be,"class","docstring"),c(ce,"class","docstring"),c(Qt,"id","transformers.TFBlenderbotForConditionalGeneration"),c(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qt,"href","#transformers.TFBlenderbotForConditionalGeneration"),c(Et,"class","relative group"),c(dr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(rn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(rn,"rel","nofollow"),c(ir,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.TFBlenderbotForConditionalGeneration"),c(Y,"class","docstring"),c(pe,"class","docstring"),c(Yt,"id","transformers.FlaxBlenderbotModel"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#transformers.FlaxBlenderbotModel"),c(qt,"class","relative group"),c(lr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(hn,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(hn,"rel","nofollow"),c(un,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(un,"rel","nofollow"),c(fn,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(fn,"rel","nofollow"),c(mn,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(mn,"rel","nofollow"),c(_n,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(_n,"rel","nofollow"),c(ve,"class","docstring"),c(Ze,"class","docstring"),c(et,"class","docstring"),c(I,"class","docstring"),c(eo,"id","transformers.FlaxBlenderbotForConditionalGeneration"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.FlaxBlenderbotForConditionalGeneration"),c(Pt,"class","relative group"),c(cr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Fn,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Fn,"rel","nofollow"),c($n,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c($n,"rel","nofollow"),c(En,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(En,"rel","nofollow"),c(Mn,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Mn,"rel","nofollow"),c(qn,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(qn,"rel","nofollow"),c(no,"id","generate-reply"),c(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(no,"href","#generate-reply"),c(oo,"class","relative group"),c(L,"class","docstring"),c(tt,"class","docstring"),c(ot,"class","docstring"),c(S,"class","docstring")},m(o,h){e(document.head,u),p(o,z,h),p(o,m,h),e(m,T),e(T,B),b(x,B,null),e(m,w),e(m,F),e(F,Ce),p(o,ue,h),p(o,E,h),e(E,ke),e(ke,oe),e(E,Pe),e(E,ne),e(ne,re),e(E,je),p(o,Fe,h),p(o,G,h),e(G,j),e(j,ye),b(R,ye,null),e(G,M),e(G,C),e(C,Ne),p(o,Q,h),p(o,J,h),e(J,Oe),e(J,W),e(W,Ae),e(J,Ie),p(o,N,h),p(o,fe,h),e(fe,ae),p(o,$e,h),p(o,me,h),e(me,H),e(H,Se),p(o,Ee,h),p(o,q,h),e(q,Le),e(q,U),e(U,De),e(q,f),e(q,$),e($,Te),e(q,ht),p(o,Ve,h),p(o,P,h),e(P,_e),e(_e,Ge),b(we,Ge,null),e(P,K),e(P,V),e(V,ut),p(o,Qe,h),p(o,O,h),e(O,xe),e(xe,ft),e(xe,se),e(se,mt),e(xe,Nd),e(O,Od),e(O,ho),e(ho,Ad),e(ho,uo),e(uo,Id),e(ho,Sd),e(O,Ld),e(O,We),e(We,Dd),e(We,$r),e($r,Gd),e(We,Wd),e(We,Er),e(Er,Ud),e(We,Rd),e(We,Wn),e(Wn,Hd),e(We,Kd),p(o,Es,h),p(o,_t,h),e(_t,Nt),e(Nt,Mr),b(fo,Mr,null),e(_t,Vd),e(_t,qr),e(qr,Qd),p(o,Ms,h),p(o,Un,h),e(Un,Jd),p(o,qs,h),b(mo,o,h),p(o,Cs,h),p(o,gt,h),e(gt,Ot),e(Ot,Cr),b(_o,Cr,null),e(gt,Xd),e(gt,Pr),e(Pr,Yd),p(o,Ps,h),p(o,de,h),b(go,de,null),e(de,Zd),e(de,bt),e(bt,ei),e(bt,Rn),e(Rn,ti),e(bt,oi),e(bt,bo),e(bo,ni),e(bt,ri),e(de,ai),e(de,vt),e(vt,si),e(vt,Hn),e(Hn,di),e(vt,ii),e(vt,Kn),e(Kn,li),e(vt,ci),e(de,pi),e(de,jr),e(jr,hi),e(de,ui),b(vo,de,null),p(o,js,h),p(o,kt,h),e(kt,At),e(At,Nr),b(ko,Nr,null),e(kt,fi),e(kt,Or),e(Or,mi),p(o,Ns,h),p(o,ie,h),b(yo,ie,null),e(ie,_i),e(ie,Ar),e(Ar,gi),e(ie,bi),e(ie,It),e(It,Ir),e(Ir,vi),e(It,ki),e(It,Vn),e(Vn,yi),e(It,Ti),e(ie,wi),e(ie,To),e(To,xi),e(To,Qn),e(Qn,Bi),e(To,zi),e(ie,Fi),e(ie,Je),b(wo,Je,null),e(Je,$i),e(Je,Sr),e(Sr,Ei),e(Je,Mi),e(Je,Lr),e(Lr,Jn),e(Jn,qi),e(Jn,Dr),e(Dr,Ci),p(o,Os,h),p(o,yt,h),e(yt,St),e(St,Gr),b(xo,Gr,null),e(yt,Pi),e(yt,Wr),e(Wr,ji),p(o,As,h),p(o,le,h),b(Bo,le,null),e(le,Ni),e(le,zo),e(zo,Oi),e(zo,Ur),e(Ur,Ai),e(zo,Ii),e(le,Si),e(le,Lt),e(Lt,Rr),e(Rr,Li),e(Lt,Di),e(Lt,Xn),e(Xn,Gi),e(Lt,Wi),e(le,Ui),e(le,Fo),e(Fo,Ri),e(Fo,Yn),e(Yn,Hi),e(Fo,Ki),e(le,Vi),e(le,Xe),b($o,Xe,null),e(Xe,Qi),e(Xe,Hr),e(Hr,Ji),e(Xe,Xi),e(Xe,Kr),e(Kr,Zn),e(Zn,Yi),e(Zn,Vr),e(Vr,Zi),p(o,Is,h),p(o,Tt,h),e(Tt,Dt),e(Dt,Qr),b(Eo,Qr,null),e(Tt,el),e(Tt,Jr),e(Jr,tl),p(o,Ss,h),p(o,Ue,h),e(Ue,ol),e(Ue,Xr),e(Xr,nl),e(Ue,rl),e(Ue,Yr),e(Yr,al),e(Ue,sl),e(Ue,Zr),e(Zr,dl),p(o,Ls,h),p(o,Be,h),b(Mo,Be,null),e(Be,il),e(Be,qo),e(qo,ll),e(qo,er),e(er,cl),e(qo,pl),e(Be,hl),e(Be,Co),e(Co,ul),e(Co,Po),e(Po,fl),e(Co,ml),e(Be,_l),e(Be,ge),b(jo,ge,null),e(ge,gl),e(ge,wt),e(wt,bl),e(wt,tr),e(tr,vl),e(wt,kl),e(wt,ea),e(ea,yl),e(wt,Tl),e(ge,wl),b(Gt,ge,null),e(ge,xl),e(ge,ta),e(ta,Bl),e(ge,zl),b(No,ge,null),p(o,Ds,h),p(o,xt,h),e(xt,Wt),e(Wt,oa),b(Oo,oa,null),e(xt,Fl),e(xt,na),e(na,$l),p(o,Gs,h),p(o,Re,h),e(Re,El),e(Re,or),e(or,Ml),e(Re,ql),e(Re,ra),e(ra,Cl),e(Re,Pl),e(Re,aa),e(aa,jl),p(o,Ws,h),p(o,ze,h),b(Ao,ze,null),e(ze,Nl),e(ze,Io),e(Io,Ol),e(Io,nr),e(nr,Al),e(Io,Il),e(ze,Sl),e(ze,So),e(So,Ll),e(So,Lo),e(Lo,Dl),e(So,Gl),e(ze,Wl),e(ze,X),b(Do,X,null),e(X,Ul),e(X,Bt),e(Bt,Rl),e(Bt,rr),e(rr,Hl),e(Bt,Kl),e(Bt,sa),e(sa,Vl),e(Bt,Ql),e(X,Jl),b(Ut,X,null),e(X,Xl),e(X,da),e(da,Yl),e(X,Zl),e(X,ia),e(ia,la),e(la,ca),e(ca,pa),e(pa,ec),e(X,tc),e(X,ha),e(ha,ua),e(ua,fa),e(fa,Go),e(Go,oc),e(Go,ma),e(ma,nc),e(Go,rc),p(o,Us,h),p(o,zt,h),e(zt,Rt),e(Rt,_a),b(Wo,_a,null),e(zt,ac),e(zt,ga),e(ga,sc),p(o,Rs,h),p(o,Uo,h),e(Uo,Ye),b(Ro,Ye,null),e(Ye,dc),e(Ye,ba),e(ba,ic),e(Ye,lc),b(Ho,Ye,null),p(o,Hs,h),p(o,Ft,h),e(Ft,Ht),e(Ht,va),b(Ko,va,null),e(Ft,cc),e(Ft,ka),e(ka,pc),p(o,Ks,h),p(o,ce,h),b(Vo,ce,null),e(ce,hc),e(ce,Qo),e(Qo,uc),e(Qo,ar),e(ar,fc),e(Qo,mc),e(ce,_c),e(ce,Jo),e(Jo,gc),e(Jo,Xo),e(Xo,bc),e(Jo,vc),e(ce,kc),b(Kt,ce,null),e(ce,yc),e(ce,be),b(Yo,be,null),e(be,Tc),e(be,$t),e($t,wc),e($t,sr),e(sr,xc),e($t,Bc),e($t,ya),e(ya,zc),e($t,Fc),e(be,$c),b(Vt,be,null),e(be,Ec),e(be,Ta),e(Ta,Mc),e(be,qc),b(Zo,be,null),p(o,Vs,h),p(o,Et,h),e(Et,Qt),e(Qt,wa),b(en,wa,null),e(Et,Cc),e(Et,xa),e(xa,Pc),p(o,Qs,h),p(o,pe,h),b(tn,pe,null),e(pe,jc),e(pe,on),e(on,Nc),e(on,dr),e(dr,Oc),e(on,Ac),e(pe,Ic),e(pe,nn),e(nn,Sc),e(nn,rn),e(rn,Lc),e(nn,Dc),e(pe,Gc),b(Jt,pe,null),e(pe,Wc),e(pe,Y),b(an,Y,null),e(Y,Uc),e(Y,Mt),e(Mt,Rc),e(Mt,ir),e(ir,Hc),e(Mt,Kc),e(Mt,Ba),e(Ba,Vc),e(Mt,Qc),e(Y,Jc),b(Xt,Y,null),e(Y,Xc),e(Y,za),e(za,Yc),e(Y,Zc),e(Y,Fa),e(Fa,$a),e($a,Ea),e(Ea,Ma),e(Ma,ep),e(Y,tp),e(Y,qa),e(qa,Ca),e(Ca,Pa),e(Pa,sn),e(sn,op),e(sn,ja),e(ja,np),e(sn,rp),p(o,Js,h),p(o,qt,h),e(qt,Yt),e(Yt,Na),b(dn,Na,null),e(qt,ap),e(qt,Oa),e(Oa,sp),p(o,Xs,h),p(o,I,h),b(ln,I,null),e(I,dp),e(I,cn),e(cn,ip),e(cn,lr),e(lr,lp),e(cn,cp),e(I,pp),e(I,pn),e(pn,hp),e(pn,hn),e(hn,up),e(pn,fp),e(I,mp),e(I,Aa),e(Aa,_p),e(I,gp),e(I,He),e(He,Ia),e(Ia,un),e(un,bp),e(He,vp),e(He,Sa),e(Sa,fn),e(fn,kp),e(He,yp),e(He,La),e(La,mn),e(mn,Tp),e(He,wp),e(He,Da),e(Da,_n),e(_n,xp),e(I,Bp),e(I,ve),b(gn,ve,null),e(ve,zp),e(ve,Ct),e(Ct,Fp),e(Ct,Ga),e(Ga,$p),e(Ct,Ep),e(Ct,Wa),e(Wa,Mp),e(Ct,qp),e(ve,Cp),b(Zt,ve,null),e(ve,Pp),e(ve,Ua),e(Ua,jp),e(ve,Np),b(bn,ve,null),e(I,Op),e(I,Ze),b(vn,Ze,null),e(Ze,Ap),e(Ze,Ra),e(Ra,Ip),e(Ze,Sp),b(kn,Ze,null),e(I,Lp),e(I,et),b(yn,et,null),e(et,Dp),e(et,Ha),e(Ha,Gp),e(et,Wp),b(Tn,et,null),p(o,Ys,h),p(o,Pt,h),e(Pt,eo),e(eo,Ka),b(wn,Ka,null),e(Pt,Up),e(Pt,Va),e(Va,Rp),p(o,Zs,h),p(o,S,h),b(xn,S,null),e(S,Hp),e(S,Bn),e(Bn,Kp),e(Bn,cr),e(cr,Vp),e(Bn,Qp),e(S,Jp),e(S,zn),e(zn,Xp),e(zn,Fn),e(Fn,Yp),e(zn,Zp),e(S,eh),e(S,Qa),e(Qa,th),e(S,oh),e(S,Ke),e(Ke,Ja),e(Ja,$n),e($n,nh),e(Ke,rh),e(Ke,Xa),e(Xa,En),e(En,ah),e(Ke,sh),e(Ke,Ya),e(Ya,Mn),e(Mn,dh),e(Ke,ih),e(Ke,Za),e(Za,qn),e(qn,lh),e(S,ch),e(S,L),b(Cn,L,null),e(L,ph),e(L,jt),e(jt,hh),e(jt,es),e(es,uh),e(jt,fh),e(jt,ts),e(ts,mh),e(jt,_h),e(L,gh),b(to,L,null),e(L,bh),e(L,os),e(os,vh),e(L,kh),e(L,ns),e(ns,rs),e(rs,as),e(as,ss),e(ss,yh),e(L,Th),e(L,ds),e(ds,is),e(is,ls),e(ls,cs),e(cs,wh),e(L,xh),e(L,ps),e(ps,hs),e(hs,us),e(us,fs),e(fs,Bh),e(L,zh),e(L,ms),e(ms,_s),e(_s,Pn),e(Pn,oo),e(oo,no),e(no,gs),b(jn,gs,null),e(oo,Fh),e(oo,bs),e(bs,$h),e(Pn,Eh),e(Pn,vs),e(vs,Mh),e(S,qh),e(S,tt),b(Nn,tt,null),e(tt,Ch),e(tt,ks),e(ks,Ph),e(tt,jh),b(On,tt,null),e(S,Nh),e(S,ot),b(An,ot,null),e(ot,Oh),e(ot,ys),e(ys,Ah),e(ot,Ih),b(In,ot,null),ed=!0},p(o,[h]){const Sn={};h&2&&(Sn.$$scope={dirty:h,ctx:o}),Gt.$set(Sn);const Ts={};h&2&&(Ts.$$scope={dirty:h,ctx:o}),Ut.$set(Ts);const ws={};h&2&&(ws.$$scope={dirty:h,ctx:o}),Kt.$set(ws);const xs={};h&2&&(xs.$$scope={dirty:h,ctx:o}),Vt.$set(xs);const ro={};h&2&&(ro.$$scope={dirty:h,ctx:o}),Jt.$set(ro);const Bs={};h&2&&(Bs.$$scope={dirty:h,ctx:o}),Xt.$set(Bs);const zs={};h&2&&(zs.$$scope={dirty:h,ctx:o}),Zt.$set(zs);const Ln={};h&2&&(Ln.$$scope={dirty:h,ctx:o}),to.$set(Ln)},i(o){ed||(v(x.$$.fragment,o),v(R.$$.fragment,o),v(we.$$.fragment,o),v(fo.$$.fragment,o),v(mo.$$.fragment,o),v(_o.$$.fragment,o),v(go.$$.fragment,o),v(vo.$$.fragment,o),v(ko.$$.fragment,o),v(yo.$$.fragment,o),v(wo.$$.fragment,o),v(xo.$$.fragment,o),v(Bo.$$.fragment,o),v($o.$$.fragment,o),v(Eo.$$.fragment,o),v(Mo.$$.fragment,o),v(jo.$$.fragment,o),v(Gt.$$.fragment,o),v(No.$$.fragment,o),v(Oo.$$.fragment,o),v(Ao.$$.fragment,o),v(Do.$$.fragment,o),v(Ut.$$.fragment,o),v(Wo.$$.fragment,o),v(Ro.$$.fragment,o),v(Ho.$$.fragment,o),v(Ko.$$.fragment,o),v(Vo.$$.fragment,o),v(Kt.$$.fragment,o),v(Yo.$$.fragment,o),v(Vt.$$.fragment,o),v(Zo.$$.fragment,o),v(en.$$.fragment,o),v(tn.$$.fragment,o),v(Jt.$$.fragment,o),v(an.$$.fragment,o),v(Xt.$$.fragment,o),v(dn.$$.fragment,o),v(ln.$$.fragment,o),v(gn.$$.fragment,o),v(Zt.$$.fragment,o),v(bn.$$.fragment,o),v(vn.$$.fragment,o),v(kn.$$.fragment,o),v(yn.$$.fragment,o),v(Tn.$$.fragment,o),v(wn.$$.fragment,o),v(xn.$$.fragment,o),v(Cn.$$.fragment,o),v(to.$$.fragment,o),v(jn.$$.fragment,o),v(Nn.$$.fragment,o),v(On.$$.fragment,o),v(An.$$.fragment,o),v(In.$$.fragment,o),ed=!0)},o(o){k(x.$$.fragment,o),k(R.$$.fragment,o),k(we.$$.fragment,o),k(fo.$$.fragment,o),k(mo.$$.fragment,o),k(_o.$$.fragment,o),k(go.$$.fragment,o),k(vo.$$.fragment,o),k(ko.$$.fragment,o),k(yo.$$.fragment,o),k(wo.$$.fragment,o),k(xo.$$.fragment,o),k(Bo.$$.fragment,o),k($o.$$.fragment,o),k(Eo.$$.fragment,o),k(Mo.$$.fragment,o),k(jo.$$.fragment,o),k(Gt.$$.fragment,o),k(No.$$.fragment,o),k(Oo.$$.fragment,o),k(Ao.$$.fragment,o),k(Do.$$.fragment,o),k(Ut.$$.fragment,o),k(Wo.$$.fragment,o),k(Ro.$$.fragment,o),k(Ho.$$.fragment,o),k(Ko.$$.fragment,o),k(Vo.$$.fragment,o),k(Kt.$$.fragment,o),k(Yo.$$.fragment,o),k(Vt.$$.fragment,o),k(Zo.$$.fragment,o),k(en.$$.fragment,o),k(tn.$$.fragment,o),k(Jt.$$.fragment,o),k(an.$$.fragment,o),k(Xt.$$.fragment,o),k(dn.$$.fragment,o),k(ln.$$.fragment,o),k(gn.$$.fragment,o),k(Zt.$$.fragment,o),k(bn.$$.fragment,o),k(vn.$$.fragment,o),k(kn.$$.fragment,o),k(yn.$$.fragment,o),k(Tn.$$.fragment,o),k(wn.$$.fragment,o),k(xn.$$.fragment,o),k(Cn.$$.fragment,o),k(to.$$.fragment,o),k(jn.$$.fragment,o),k(Nn.$$.fragment,o),k(On.$$.fragment,o),k(An.$$.fragment,o),k(In.$$.fragment,o),ed=!1},d(o){t(u),o&&t(z),o&&t(m),y(x),o&&t(ue),o&&t(E),o&&t(Fe),o&&t(G),y(R),o&&t(Q),o&&t(J),o&&t(N),o&&t(fe),o&&t($e),o&&t(me),o&&t(Ee),o&&t(q),o&&t(Ve),o&&t(P),y(we),o&&t(Qe),o&&t(O),o&&t(Es),o&&t(_t),y(fo),o&&t(Ms),o&&t(Un),o&&t(qs),y(mo,o),o&&t(Cs),o&&t(gt),y(_o),o&&t(Ps),o&&t(de),y(go),y(vo),o&&t(js),o&&t(kt),y(ko),o&&t(Ns),o&&t(ie),y(yo),y(wo),o&&t(Os),o&&t(yt),y(xo),o&&t(As),o&&t(le),y(Bo),y($o),o&&t(Is),o&&t(Tt),y(Eo),o&&t(Ss),o&&t(Ue),o&&t(Ls),o&&t(Be),y(Mo),y(jo),y(Gt),y(No),o&&t(Ds),o&&t(xt),y(Oo),o&&t(Gs),o&&t(Re),o&&t(Ws),o&&t(ze),y(Ao),y(Do),y(Ut),o&&t(Us),o&&t(zt),y(Wo),o&&t(Rs),o&&t(Uo),y(Ro),y(Ho),o&&t(Hs),o&&t(Ft),y(Ko),o&&t(Ks),o&&t(ce),y(Vo),y(Kt),y(Yo),y(Vt),y(Zo),o&&t(Vs),o&&t(Et),y(en),o&&t(Qs),o&&t(pe),y(tn),y(Jt),y(an),y(Xt),o&&t(Js),o&&t(qt),y(dn),o&&t(Xs),o&&t(I),y(ln),y(gn),y(Zt),y(bn),y(vn),y(kn),y(yn),y(Tn),o&&t(Ys),o&&t(Pt),y(wn),o&&t(Zs),o&&t(S),y(xn),y(Cn),y(to),y(jn),y(Nn),y(On),y(An),y(In)}}}const n_={local:"blenderbot",sections:[{local:"overview",title:"Overview"},{local:"implementation-notes",title:"Implementation Notes"},{local:"usage",title:"Usage"},{local:"transformers.BlenderbotConfig",title:"BlenderbotConfig"},{local:"transformers.BlenderbotTokenizer",title:"BlenderbotTokenizer"},{local:"transformers.BlenderbotTokenizerFast",title:"BlenderbotTokenizerFast"},{local:"transformers.BlenderbotModel",title:"BlenderbotModel"},{local:"transformers.BlenderbotForConditionalGeneration",title:"BlenderbotForConditionalGeneration"},{local:"transformers.BlenderbotForCausalLM",title:"BlenderbotForCausalLM"},{local:"transformers.TFBlenderbotModel",title:"TFBlenderbotModel"},{local:"transformers.TFBlenderbotForConditionalGeneration",title:"TFBlenderbotForConditionalGeneration"},{local:"transformers.FlaxBlenderbotModel",title:"FlaxBlenderbotModel"},{local:"transformers.FlaxBlenderbotForConditionalGeneration",title:"FlaxBlenderbotForConditionalGeneration"}],title:"Blenderbot"};function r_(D,u,z){let{fw:m}=u;return D.$$set=T=>{"fw"in T&&z(0,m=T.fw)},[m]}class p_ extends Um{constructor(u){super();Rm(this,u,r_,o_,Hm,{fw:0})}}export{p_ as default,n_ as metadata};
9,919
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/xlsr_wav2vec2.mdx-5768aafd.js
import{S as Fe,i as He,s as Je,e as r,k as f,w as ze,t as i,L as Ke,c as o,d as a,m as v,a as s,x as Be,h as l,b as h,J as t,g as c,y as Me,K as De,q as Ne,o as Ie,B as Ue}from"../../chunks/vendor-b1433968.js";import{I as je}from"../../chunks/IconCopyLink-7029626d.js";function Ge(ae){let d,W,p,u,X,L,te,$,re,B,m,w,O,E,oe,P,se,M,g,ne,S,ie,le,N,k,ce,I,T,q,he,U,C,pe,j,b,z,fe,ve,A,ue,V,de,me,F,_,we,x,ge,be,H,y,_e,R,ye,Le,J;return L=new je({}),E=new je({}),{c(){d=r("meta"),W=f(),p=r("h1"),u=r("a"),X=r("span"),ze(L.$$.fragment),te=f(),$=r("span"),re=i("XLSR-Wav2Vec2"),B=f(),m=r("h2"),w=r("a"),O=r("span"),ze(E.$$.fragment),oe=f(),P=r("span"),se=i("Overview"),M=f(),g=r("p"),ne=i("The XLSR-Wav2Vec2 model was proposed in "),S=r("a"),ie=i("Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),le=i(` by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.`),N=f(),k=r("p"),ce=i("The abstract from the paper is the following:"),I=f(),T=r("p"),q=r("em"),he=i(`This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.`),U=f(),C=r("p"),pe=i("Tips:"),j=f(),b=r("ul"),z=r("li"),fe=i("XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ve=f(),A=r("li"),ue=i(`XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),V=r("a"),de=i("Wav2Vec2CTCTokenizer"),me=i("."),F=f(),_=r("p"),we=i("XLSR-Wav2Vec2\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),x=r("a"),ge=i("Wav2Vec2\u2019s documentation page"),be=i("."),H=f(),y=r("p"),_e=i("The original code can be found "),R=r("a"),ye=i("here"),Le=i("."),this.h()},l(e){const n=Ke('[data-svelte="svelte-1phssyn"]',document.head);d=o(n,"META",{name:!0,content:!0}),n.forEach(a),W=v(e),p=o(e,"H1",{class:!0});var K=s(p);u=o(K,"A",{id:!0,class:!0,href:!0});var Ee=s(u);X=o(Ee,"SPAN",{});var Se=s(X);Be(L.$$.fragment,Se),Se.forEach(a),Ee.forEach(a),te=v(K),$=o(K,"SPAN",{});var Ae=s($);re=l(Ae,"XLSR-Wav2Vec2"),Ae.forEach(a),K.forEach(a),B=v(e),m=o(e,"H2",{class:!0});var D=s(m);w=o(D,"A",{id:!0,class:!0,href:!0});var Re=s(w);O=o(Re,"SPAN",{});var We=s(O);Be(E.$$.fragment,We),We.forEach(a),Re.forEach(a),oe=v(D),P=o(D,"SPAN",{});var ke=s(P);se=l(ke,"Overview"),ke.forEach(a),D.forEach(a),M=v(e),g=o(e,"P",{});var G=s(g);ne=l(G,"The XLSR-Wav2Vec2 model was proposed in "),S=o(G,"A",{href:!0,rel:!0});var Te=s(S);ie=l(Te,"Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),Te.forEach(a),le=l(G,` by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.`),G.forEach(a),N=v(e),k=o(e,"P",{});var Ce=s(k);ce=l(Ce,"The abstract from the paper is the following:"),Ce.forEach(a),I=v(e),T=o(e,"P",{});var Ve=s(T);q=o(Ve,"EM",{});var xe=s(q);he=l(xe,`This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.`),xe.forEach(a),Ve.forEach(a),U=v(e),C=o(e,"P",{});var Xe=s(C);pe=l(Xe,"Tips:"),Xe.forEach(a),j=v(e),b=o(e,"UL",{});var Q=s(b);z=o(Q,"LI",{});var $e=s(z);fe=l($e,"XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),$e.forEach(a),ve=v(Q),A=o(Q,"LI",{});var Y=s(A);ue=l(Y,`XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),V=o(Y,"A",{href:!0});var Oe=s(V);de=l(Oe,"Wav2Vec2CTCTokenizer"),Oe.forEach(a),me=l(Y,"."),Y.forEach(a),Q.forEach(a),F=v(e),_=o(e,"P",{});var Z=s(_);we=l(Z,"XLSR-Wav2Vec2\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),x=o(Z,"A",{href:!0});var Pe=s(x);ge=l(Pe,"Wav2Vec2\u2019s documentation page"),Pe.forEach(a),be=l(Z,"."),Z.forEach(a),H=v(e),y=o(e,"P",{});var ee=s(y);_e=l(ee,"The original code can be found "),R=o(ee,"A",{href:!0,rel:!0});var qe=s(R);ye=l(qe,"here"),qe.forEach(a),Le=l(ee,"."),ee.forEach(a),this.h()},h(){h(d,"name","hf:doc:metadata"),h(d,"content",JSON.stringify(Qe)),h(u,"id","xlsrwav2vec2"),h(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(u,"href","#xlsrwav2vec2"),h(p,"class","relative group"),h(w,"id","overview"),h(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(w,"href","#overview"),h(m,"class","relative group"),h(S,"href","https://arxiv.org/abs/2006.13979"),h(S,"rel","nofollow"),h(V,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),h(x,"href","/docs/transformers/v4.15.0/en/wav2vec2"),h(R,"href","https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec"),h(R,"rel","nofollow")},m(e,n){t(document.head,d),c(e,W,n),c(e,p,n),t(p,u),t(u,X),Me(L,X,null),t(p,te),t(p,$),t($,re),c(e,B,n),c(e,m,n),t(m,w),t(w,O),Me(E,O,null),t(m,oe),t(m,P),t(P,se),c(e,M,n),c(e,g,n),t(g,ne),t(g,S),t(S,ie),t(g,le),c(e,N,n),c(e,k,n),t(k,ce),c(e,I,n),c(e,T,n),t(T,q),t(q,he),c(e,U,n),c(e,C,n),t(C,pe),c(e,j,n),c(e,b,n),t(b,z),t(z,fe),t(b,ve),t(b,A),t(A,ue),t(A,V),t(V,de),t(A,me),c(e,F,n),c(e,_,n),t(_,we),t(_,x),t(x,ge),t(_,be),c(e,H,n),c(e,y,n),t(y,_e),t(y,R),t(R,ye),t(y,Le),J=!0},p:De,i(e){J||(Ne(L.$$.fragment,e),Ne(E.$$.fragment,e),J=!0)},o(e){Ie(L.$$.fragment,e),Ie(E.$$.fragment,e),J=!1},d(e){a(d),e&&a(W),e&&a(p),Ue(L),e&&a(B),e&&a(m),Ue(E),e&&a(M),e&&a(g),e&&a(N),e&&a(k),e&&a(I),e&&a(T),e&&a(U),e&&a(C),e&&a(j),e&&a(b),e&&a(F),e&&a(_),e&&a(H),e&&a(y)}}}const Qe={local:"xlsrwav2vec2",sections:[{local:"overview",title:"Overview"}],title:"XLSR-Wav2Vec2"};function Ye(ae,d,W){let{fw:p}=d;return ae.$$set=u=>{"fw"in u&&W(0,p=u.fw)},[p]}class aa extends Fe{constructor(d){super();He(this,d,Ye,Ge,Je,{fw:0})}}export{aa as default,Qe as metadata};
9,920
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/hubert.mdx-1b332f39.js
import{S as Oi,i as Wi,s as Ii,e as a,k as l,w as k,t as s,L as Li,c as r,d as t,m as d,a as i,x as C,h as n,b as c,J as e,g as h,y as H,q as $,o as E,B as F}from"../../chunks/vendor-b1433968.js";import{T as ho}from"../../chunks/Tip-c3840994.js";import{D as ze}from"../../chunks/Docstring-ff504c58.js";import{C as zo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as yt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ni(A){let p,y,f,g,v;return{c(){p=a("p"),y=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=s("Module"),v=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var _=i(p);y=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var T=i(f);g=n(T,"Module"),T.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){h(b,p,_),e(p,y),e(p,f),e(f,g),e(p,v)},d(b){b&&t(p)}}}function Bi(A){let p,y,f,g,v;return{c(){p=a("p"),y=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=s("Module"),v=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var _=i(p);y=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var T=i(f);g=n(T,"Module"),T.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){h(b,p,_),e(p,y),e(p,f),e(f,g),e(p,v)},d(b){b&&t(p)}}}function Ri(A){let p,y,f,g,v;return{c(){p=a("p"),y=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=s("Module"),v=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var _=i(p);y=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var T=i(f);g=n(T,"Module"),T.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){h(b,p,_),e(p,y),e(p,f),e(f,g),e(p,v)},d(b){b&&t(p)}}}function Vi(A){let p,y,f,g,v,b,_,T,Ee,ie,j,K,O,X,Fe,W,je,we,D,I,G,_e,x,q,le,Q,Te,de,L,xe,ke,M,Me,Y,J,be,Z,qe,ee,N,ce,te,Pe;return{c(){p=a("p"),y=s("TF 2.0 models accepts two formats as inputs:"),f=l(),g=a("ul"),v=a("li"),b=s("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),T=a("li"),Ee=s("having all inputs as a list, tuple or dict in the first positional arguments."),ie=l(),j=a("p"),K=s("This second option is useful when using "),O=a("code"),X=s("tf.keras.Model.fit"),Fe=s(` method which currently requires having all the tensors in the first argument of the model call function: `),W=a("code"),je=s("model(inputs)"),we=s("."),D=l(),I=a("p"),G=s(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e=l(),x=a("ul"),q=a("li"),le=s("a single Tensor with "),Q=a("code"),Te=s("input_values"),de=s(" only and nothing else: "),L=a("code"),xe=s("model(inputs_ids)"),ke=l(),M=a("li"),Me=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Y=a("code"),J=s("model([input_values, attention_mask])"),be=s(" or "),Z=a("code"),qe=s("model([input_values, attention_mask, token_type_ids])"),ee=l(),N=a("li"),ce=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),te=a("code"),Pe=s('model({"input_values": input_values, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var w=i(p);y=n(w,"TF 2.0 models accepts two formats as inputs:"),w.forEach(t),f=d(u),g=r(u,"UL",{});var ve=i(g);v=r(ve,"LI",{});var ye=i(v);b=n(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(ve),T=r(ve,"LI",{});var Ue=i(T);Ee=n(Ue,"having all inputs as a list, tuple or dict in the first positional arguments."),Ue.forEach(t),ve.forEach(t),ie=d(u),j=r(u,"P",{});var B=i(j);K=n(B,"This second option is useful when using "),O=r(B,"CODE",{});var De=i(O);X=n(De,"tf.keras.Model.fit"),De.forEach(t),Fe=n(B,` method which currently requires having all the tensors in the first argument of the model call function: `),W=r(B,"CODE",{});var oe=i(W);je=n(oe,"model(inputs)"),oe.forEach(t),we=n(B,"."),B.forEach(t),D=d(u),I=r(u,"P",{});var pe=i(I);G=n(pe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),pe.forEach(t),_e=d(u),x=r(u,"UL",{});var S=i(x);q=r(S,"LI",{});var z=i(q);le=n(z,"a single Tensor with "),Q=r(z,"CODE",{});var Ke=i(Q);Te=n(Ke,"input_values"),Ke.forEach(t),de=n(z," only and nothing else: "),L=r(z,"CODE",{});var Se=i(L);xe=n(Se,"model(inputs_ids)"),Se.forEach(t),z.forEach(t),ke=d(S),M=r(S,"LI",{});var se=i(M);Me=n(se,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Y=r(se,"CODE",{});var Ae=i(Y);J=n(Ae,"model([input_values, attention_mask])"),Ae.forEach(t),be=n(se," or "),Z=r(se,"CODE",{});var P=i(Z);qe=n(P,"model([input_values, attention_mask, token_type_ids])"),P.forEach(t),se.forEach(t),ee=d(S),N=r(S,"LI",{});var ne=i(N);ce=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),te=r(ne,"CODE",{});var Ye=i(te);Pe=n(Ye,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),Ye.forEach(t),ne.forEach(t),S.forEach(t)},m(u,w){h(u,p,w),e(p,y),h(u,f,w),h(u,g,w),e(g,v),e(v,b),e(g,_),e(g,T),e(T,Ee),h(u,ie,w),h(u,j,w),e(j,K),e(j,O),e(O,X),e(j,Fe),e(j,W),e(W,je),e(j,we),h(u,D,w),h(u,I,w),e(I,G),h(u,_e,w),h(u,x,w),e(x,q),e(q,le),e(q,Q),e(Q,Te),e(q,de),e(q,L),e(L,xe),e(x,ke),e(x,M),e(M,Me),e(M,Y),e(Y,J),e(M,be),e(M,Z),e(Z,qe),e(x,ee),e(x,N),e(N,ce),e(N,te),e(te,Pe)},d(u){u&&t(p),u&&t(f),u&&t(g),u&&t(ie),u&&t(j),u&&t(D),u&&t(I),u&&t(_e),u&&t(x)}}}function Ui(A){let p,y,f,g,v;return{c(){p=a("p"),y=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=s("Module"),v=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var _=i(p);y=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var T=i(f);g=n(T,"Module"),T.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){h(b,p,_),e(p,y),e(p,f),e(f,g),e(p,v)},d(b){b&&t(p)}}}function Ki(A){let p,y,f,g,v,b,_,T,Ee,ie,j,K,O,X,Fe,W,je,we,D,I,G,_e,x,q,le,Q,Te,de,L,xe,ke,M,Me,Y,J,be,Z,qe,ee,N,ce,te,Pe;return{c(){p=a("p"),y=s("TF 2.0 models accepts two formats as inputs:"),f=l(),g=a("ul"),v=a("li"),b=s("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),T=a("li"),Ee=s("having all inputs as a list, tuple or dict in the first positional arguments."),ie=l(),j=a("p"),K=s("This second option is useful when using "),O=a("code"),X=s("tf.keras.Model.fit"),Fe=s(` method which currently requires having all the tensors in the first argument of the model call function: `),W=a("code"),je=s("model(inputs)"),we=s("."),D=l(),I=a("p"),G=s(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e=l(),x=a("ul"),q=a("li"),le=s("a single Tensor with "),Q=a("code"),Te=s("input_values"),de=s(" only and nothing else: "),L=a("code"),xe=s("model(inputs_ids)"),ke=l(),M=a("li"),Me=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Y=a("code"),J=s("model([input_values, attention_mask])"),be=s(" or "),Z=a("code"),qe=s("model([input_values, attention_mask, token_type_ids])"),ee=l(),N=a("li"),ce=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),te=a("code"),Pe=s('model({"input_values": input_values, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var w=i(p);y=n(w,"TF 2.0 models accepts two formats as inputs:"),w.forEach(t),f=d(u),g=r(u,"UL",{});var ve=i(g);v=r(ve,"LI",{});var ye=i(v);b=n(ye,"having all inputs as keyword arguments (like PyTorch models), or"),ye.forEach(t),_=d(ve),T=r(ve,"LI",{});var Ue=i(T);Ee=n(Ue,"having all inputs as a list, tuple or dict in the first positional arguments."),Ue.forEach(t),ve.forEach(t),ie=d(u),j=r(u,"P",{});var B=i(j);K=n(B,"This second option is useful when using "),O=r(B,"CODE",{});var De=i(O);X=n(De,"tf.keras.Model.fit"),De.forEach(t),Fe=n(B,` method which currently requires having all the tensors in the first argument of the model call function: `),W=r(B,"CODE",{});var oe=i(W);je=n(oe,"model(inputs)"),oe.forEach(t),we=n(B,"."),B.forEach(t),D=d(u),I=r(u,"P",{});var pe=i(I);G=n(pe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),pe.forEach(t),_e=d(u),x=r(u,"UL",{});var S=i(x);q=r(S,"LI",{});var z=i(q);le=n(z,"a single Tensor with "),Q=r(z,"CODE",{});var Ke=i(Q);Te=n(Ke,"input_values"),Ke.forEach(t),de=n(z," only and nothing else: "),L=r(z,"CODE",{});var Se=i(L);xe=n(Se,"model(inputs_ids)"),Se.forEach(t),z.forEach(t),ke=d(S),M=r(S,"LI",{});var se=i(M);Me=n(se,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Y=r(se,"CODE",{});var Ae=i(Y);J=n(Ae,"model([input_values, attention_mask])"),Ae.forEach(t),be=n(se," or "),Z=r(se,"CODE",{});var P=i(Z);qe=n(P,"model([input_values, attention_mask, token_type_ids])"),P.forEach(t),se.forEach(t),ee=d(S),N=r(S,"LI",{});var ne=i(N);ce=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),te=r(ne,"CODE",{});var Ye=i(te);Pe=n(Ye,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),Ye.forEach(t),ne.forEach(t),S.forEach(t)},m(u,w){h(u,p,w),e(p,y),h(u,f,w),h(u,g,w),e(g,v),e(v,b),e(g,_),e(g,T),e(T,Ee),h(u,ie,w),h(u,j,w),e(j,K),e(j,O),e(O,X),e(j,Fe),e(j,W),e(W,je),e(j,we),h(u,D,w),h(u,I,w),e(I,G),h(u,_e,w),h(u,x,w),e(x,q),e(q,le),e(q,Q),e(Q,Te),e(q,de),e(q,L),e(L,xe),e(x,ke),e(x,M),e(M,Me),e(M,Y),e(Y,J),e(M,be),e(M,Z),e(Z,qe),e(x,ee),e(x,N),e(N,ce),e(N,te),e(te,Pe)},d(u){u&&t(p),u&&t(f),u&&t(g),u&&t(ie),u&&t(j),u&&t(D),u&&t(I),u&&t(_e),u&&t(x)}}}function Yi(A){let p,y,f,g,v;return{c(){p=a("p"),y=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=s("Module"),v=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var _=i(p);y=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var T=i(f);g=n(T,"Module"),T.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){h(b,p,_),e(p,y),e(p,f),e(f,g),e(p,v)},d(b){b&&t(p)}}}function Ji(A){let p,y,f,g,v,b,_,T,Ee,ie,j,K,O,X,Fe,W,je,we,D,I,G,_e,x,q,le,Q,Te,de,L,xe,ke,M,Me,Y,J,be,Z,qe,ee,N,ce,te,Pe,u,w,ve,ye,Ue,B,De,oe,pe,S,z,Ke,Se,se,Ae,P,ne,Ye,Je,Us,mo,Ks,Ys,wt,Js,Xs,Gs,Xe,Qs,fo,Zs,en,go,tn,on,sn,Do,nn,an,Tt,us,Ge,lt,Ao,kt,rn,Oo,ln,hs,ae,Ct,dn,Ht,cn,$t,pn,un,hn,Et,mn,_o,fn,gn,_n,Ft,bn,jt,vn,yn,wn,ue,xt,Tn,Qe,kn,bo,Cn,Hn,Wo,$n,En,Fn,dt,jn,Io,xn,Mn,Mt,ms,Ze,ct,Lo,qt,qn,No,Pn,fs,re,Pt,Sn,et,zn,Bo,Dn,An,St,On,Wn,In,zt,Ln,vo,Nn,Bn,Rn,Dt,Vn,At,Un,Kn,Yn,he,Ot,Jn,tt,Xn,yo,Gn,Qn,Ro,Zn,ea,ta,pt,oa,Vo,sa,na,Wt,gs,ot,ut,Uo,It,aa,Ko,ra,_s,R,Lt,ia,Yo,la,da,Nt,ca,Bt,pa,ua,ha,Rt,ma,wo,fa,ga,_a,Vt,ba,Ut,va,ya,wa,me,Kt,Ta,st,ka,To,Ca,Ha,Jo,$a,Ea,Fa,ht,ja,Xo,xa,Ma,Yt,bs,nt,mt,Go,Jt,qa,Qo,Pa,vs,V,Xt,Sa,Zo,za,Da,Gt,Aa,ko,Oa,Wa,Ia,Qt,La,Zt,Na,Ba,Ra,ft,Va,fe,eo,Ua,at,Ka,Co,Ya,Ja,es,Xa,Ga,Qa,gt,Za,ts,er,tr,to,ys,rt,_t,os,oo,or,ss,sr,ws,U,so,nr,no,ar,ns,rr,ir,lr,ao,dr,Ho,cr,pr,ur,ro,hr,io,mr,fr,gr,bt,_r,ge,lo,br,it,vr,$o,yr,wr,as,Tr,kr,Cr,vt,Hr,rs,$r,Er,co,Ts;return b=new yt({}),X=new yt({}),z=new yt({}),ne=new ze({props:{name:"class transformers.HubertConfig",anchor:"transformers.HubertConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_layer_norm",val:" = True"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"ctc_loss_reduction",val:" = 'sum'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/configuration_hubert.py#L29",parametersDescription:[{anchor:"transformers.HubertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertModel">HubertModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertModel">HubertModel</a>.`,name:"vocab_size"},{anchor:"transformers.HubertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.HubertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.HubertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.HubertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.HubertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.HubertConfig.hidden_dropout(float,",description:`<strong>hidden_dropout(<code>float</code>,</strong> <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout(float,"},{anchor:"transformers.HubertConfig.attention_dropout(float,",description:`<strong>attention_dropout(<code>float</code>,</strong> <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout(float,"},{anchor:"transformers.HubertConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.HubertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.HubertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.HubertConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.HubertConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.HubertConfig.feat_proj_layer_norm",description:`<strong>feat_proj_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply LayerNorm to the output of the feature extractor.`,name:"feat_proj_layer_norm"},{anchor:"transformers.HubertConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.HubertConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.HubertConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.HubertConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.HubertConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.HubertConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.HubertConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.HubertConfig.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether do apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.HubertConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.HubertConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.HubertConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.HubertConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.HubertConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.HubertConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.HubertConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.HubertConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForCTC">HubertForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.HubertConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForCTC">HubertForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.HubertConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForSequenceClassification">HubertForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.HubertConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"}]}}),Tt=new zo({props:{code:`from transformers import HubertModel, HubertConfig # Initializing a Hubert facebook/hubert-base-ls960 style configuration configuration = HubertConfig() # Initializing a model from the facebook/hubert-base-ls960 style configuration model = HubertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> HubertModel, HubertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Hubert facebook/hubert-base-ls960 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = HubertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/hubert-base-ls960 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),kt=new yt({}),Ct=new ze({props:{name:"class transformers.HubertModel",anchor:"transformers.HubertModel",parameters:[{name:"config",val:": HubertConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_hubert.py#L901",parametersDescription:[{anchor:"transformers.HubertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xt=new ze({props:{name:"forward",anchor:"transformers.HubertModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_hubert.py#L965",parametersDescription:[{anchor:"transformers.HubertModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.HubertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/hubert-base-ls960" rel="nofollow">hubert-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.HubertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.HubertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.HubertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),dt=new ho({props:{$$slots:{default:[Ni]},$$scope:{ctx:A}}}),Mt=new zo({props:{code:`from transformers import Wav2Vec2Processor, HubertModel from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft") model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, HubertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-large-ls960-ft&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertModel.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-large-ls960-ft&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),qt=new yt({}),Pt=new ze({props:{name:"class transformers.HubertForCTC",anchor:"transformers.HubertForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_hubert.py#L1042",parametersDescription:[{anchor:"transformers.HubertForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ot=new ze({props:{name:"forward",anchor:"transformers.HubertForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_hubert.py#L1068",parametersDescription:[{anchor:"transformers.HubertForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.HubertForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/hubert-base-ls960" rel="nofollow">hubert-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.HubertForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.HubertForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.HubertForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.HubertForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),pt=new ho({props:{$$slots:{default:[Bi]},$$scope:{ctx:A}}}),Wt=new zo({props:{code:`from transformers import Wav2Vec2Processor, HubertForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('facebook/hubert-large-ls960-ft') model = HubertForCTC.from_pretrained('facebook/hubert-large-ls960-ft') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, HubertForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;facebook/hubert-large-ls960-ft&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertForCTC.from_pretrained(<span class="hljs-string">&#x27;facebook/hubert-large-ls960-ft&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),It=new yt({}),Lt=new ze({props:{name:"class transformers.HubertForSequenceClassification",anchor:"transformers.HubertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_hubert.py#L1154",parametersDescription:[{anchor:"transformers.HubertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new ze({props:{name:"forward",anchor:"transformers.HubertForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_hubert.py#L1183",parametersDescription:[{anchor:"transformers.HubertForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.HubertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/hubert-base-ls960" rel="nofollow">hubert-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.HubertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.HubertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.HubertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.HubertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ht=new ho({props:{$$slots:{default:[Ri]},$$scope:{ctx:A}}}),Yt=new zo({props:{code:`from transformers import Wav2Vec2FeatureExtractor, HubertForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('superb/hubert-base-superb-ks') model = HubertForSequenceClassification.from_pretrained('superb/hubert-base-superb-ks') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, HubertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;superb/hubert-base-superb-ks&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;superb/hubert-base-superb-ks&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),Jt=new yt({}),Xt=new ze({props:{name:"class transformers.TFHubertModel",anchor:"transformers.TFHubertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_tf_hubert.py#L1383",parametersDescription:[{anchor:"transformers.TFHubertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ft=new ho({props:{$$slots:{default:[Vi]},$$scope:{ctx:A}}}),eo=new ze({props:{name:"call",anchor:"transformers.TFHubertModel.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_tf_hubert.py#L1389",parametersDescription:[{anchor:"transformers.TFHubertModel.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFHubertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFHubertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFHubertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFHubertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFHubertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFHubertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFHubertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFHubertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFHubertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),gt=new ho({props:{$$slots:{default:[Ui]},$$scope:{ctx:A}}}),to=new zo({props:{code:`from transformers import Wav2Vec2Processor, TFHubertModel from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h") model = TFHubertModel.from_pretrained("facebook/hubert-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFHubertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFHubertModel.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),oo=new yt({}),so=new ze({props:{name:"class transformers.TFHubertForCTC",anchor:"transformers.TFHubertForCTC",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_tf_hubert.py#L1479",parametersDescription:[{anchor:"transformers.TFHubertForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bt=new ho({props:{$$slots:{default:[Ki]},$$scope:{ctx:A}}}),lo=new ze({props:{name:"call",anchor:"transformers.TFHubertForCTC.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/hubert/modeling_tf_hubert.py#L1494",parametersDescription:[{anchor:"transformers.TFHubertForCTC.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFHubertForCTC.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFHubertForCTC.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFHubertForCTC.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFHubertForCTC.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFHubertForCTC.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFHubertForCTC.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFHubertForCTC.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFHubertForCTC.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFHubertForCTC.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFHubertForCTC.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_values</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),vt=new ho({props:{$$slots:{default:[Yi]},$$scope:{ctx:A}}}),co=new zo({props:{code:`import tensorflow as tf from transformers import Wav2Vec2Processor, TFHubertForCTC from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h") model = TFHubertForCTC.from_pretrained("facebook/hubert-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 logits = model(input_values).logits >>> predicted_ids = tf.argmax(logits, axis=-1) transcription = processor.decode(predicted_ids[0]) # compute loss target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" # wrap processor as target processor to encode labels with processor.as_target_processor(): labels = processor(transcription, return_tensors="tf").input_values loss = model(input_values, labels=labels).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFHubertForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFHubertForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits &gt;&gt;&gt; predicted_ids = tf.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.decode(predicted_ids[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_transcription = <span class="hljs-string">&quot;A MAN SAID TO THE UNIVERSE SIR I EXIST&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># wrap processor as target processor to encode labels</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> labels = processor(transcription, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, labels=labels).loss`}}),{c(){p=a("meta"),y=l(),f=a("h1"),g=a("a"),v=a("span"),k(b.$$.fragment),_=l(),T=a("span"),Ee=s("Hubert"),ie=l(),j=a("h2"),K=a("a"),O=a("span"),k(X.$$.fragment),Fe=l(),W=a("span"),je=s("Overview"),we=l(),D=a("p"),I=s("Hubert was proposed in "),G=a("a"),_e=s("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),x=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),q=l(),le=a("p"),Q=s("The abstract from the paper is the following:"),Te=l(),de=a("p"),L=a("em"),xe=s(`Self-supervised approaches for speech representation learning are challenged by three unique problems: (1) there are multiple sound units in each input utterance, (2) there is no lexicon of input sound units during the pre-training phase, and (3) sound units have variable lengths with no explicit segmentation. To deal with these three problems, we propose the Hidden-Unit BERT (HuBERT) approach for self-supervised speech representation learning, which utilizes an offline clustering step to provide aligned target labels for a BERT-like prediction loss. A key ingredient of our approach is applying the prediction loss over the masked regions only, which forces the model to learn a combined acoustic and language model over the continuous inputs. HuBERT relies primarily on the consistency of the unsupervised clustering step rather than the intrinsic quality of the assigned cluster labels. Starting with a simple k-means teacher of 100 clusters, and using two iterations of clustering, the HuBERT model either matches or improves upon the state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-light (60,000h) benchmarks with 10min, 1h, 10h, 100h, and 960h fine-tuning subsets. Using a 1B parameter model, HuBERT shows up to 19% and 13% relative WER reduction on the more challenging dev-other and test-other evaluation subsets.`),ke=l(),M=a("p"),Me=s("Tips:"),Y=l(),J=a("ul"),be=a("li"),Z=s("Hubert is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),qe=l(),ee=a("li"),N=s(`Hubert model was fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),ce=a("a"),te=s("Wav2Vec2CTCTokenizer"),Pe=s("."),u=l(),w=a("p"),ve=s("This model was contributed by "),ye=a("a"),Ue=s("patrickvonplaten"),B=s("."),De=l(),oe=a("h2"),pe=a("a"),S=a("span"),k(z.$$.fragment),Ke=l(),Se=a("span"),se=s("HubertConfig"),Ae=l(),P=a("div"),k(ne.$$.fragment),Ye=l(),Je=a("p"),Us=s("This is the configuration class to store the configuration of a "),mo=a("a"),Ks=s("HubertModel"),Ys=s(`. It is used to instantiate an Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Hubert `),wt=a("a"),Js=s("facebook/hubert-base-ls960"),Xs=s(" architecture."),Gs=l(),Xe=a("p"),Qs=s("Configuration objects inherit from "),fo=a("a"),Zs=s("PretrainedConfig"),en=s(` and can be used to control the model outputs. Read the documentation from `),go=a("a"),tn=s("PretrainedConfig"),on=s(" for more information."),sn=l(),Do=a("p"),nn=s("Example:"),an=l(),k(Tt.$$.fragment),us=l(),Ge=a("h2"),lt=a("a"),Ao=a("span"),k(kt.$$.fragment),rn=l(),Oo=a("span"),ln=s("HubertModel"),hs=l(),ae=a("div"),k(Ct.$$.fragment),dn=l(),Ht=a("p"),cn=s(`The bare Hubert Model transformer outputting raw hidden-states without any specific head on top. Hubert was proposed in `),$t=a("a"),pn=s("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),un=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),hn=l(),Et=a("p"),mn=s("This model inherits from "),_o=a("a"),fn=s("PreTrainedModel"),gn=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),_n=l(),Ft=a("p"),bn=s("This model is a PyTorch "),jt=a("a"),vn=s("torch.nn.Module"),yn=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wn=l(),ue=a("div"),k(xt.$$.fragment),Tn=l(),Qe=a("p"),kn=s("The "),bo=a("a"),Cn=s("HubertModel"),Hn=s(" forward method, overrides the "),Wo=a("code"),$n=s("__call__"),En=s(" special method."),Fn=l(),k(dt.$$.fragment),jn=l(),Io=a("p"),xn=s("Example:"),Mn=l(),k(Mt.$$.fragment),ms=l(),Ze=a("h2"),ct=a("a"),Lo=a("span"),k(qt.$$.fragment),qn=l(),No=a("span"),Pn=s("HubertForCTC"),fs=l(),re=a("div"),k(Pt.$$.fragment),Sn=l(),et=a("p"),zn=s("Hubert Model with a "),Bo=a("code"),Dn=s("language modeling"),An=s(` head on top for Connectionist Temporal Classification (CTC). Hubert was proposed in `),St=a("a"),On=s("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),Wn=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),In=l(),zt=a("p"),Ln=s("This model inherits from "),vo=a("a"),Nn=s("PreTrainedModel"),Bn=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Rn=l(),Dt=a("p"),Vn=s("This model is a PyTorch "),At=a("a"),Un=s("torch.nn.Module"),Kn=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yn=l(),he=a("div"),k(Ot.$$.fragment),Jn=l(),tt=a("p"),Xn=s("The "),yo=a("a"),Gn=s("HubertForCTC"),Qn=s(" forward method, overrides the "),Ro=a("code"),Zn=s("__call__"),ea=s(" special method."),ta=l(),k(pt.$$.fragment),oa=l(),Vo=a("p"),sa=s("Example:"),na=l(),k(Wt.$$.fragment),gs=l(),ot=a("h2"),ut=a("a"),Uo=a("span"),k(It.$$.fragment),aa=l(),Ko=a("span"),ra=s("HubertForSequenceClassification"),_s=l(),R=a("div"),k(Lt.$$.fragment),ia=l(),Yo=a("p"),la=s(`Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),da=l(),Nt=a("p"),ca=s("Hubert was proposed in "),Bt=a("a"),pa=s("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),ua=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),ha=l(),Rt=a("p"),ma=s("This model inherits from "),wo=a("a"),fa=s("PreTrainedModel"),ga=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),_a=l(),Vt=a("p"),ba=s("This model is a PyTorch "),Ut=a("a"),va=s("torch.nn.Module"),ya=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wa=l(),me=a("div"),k(Kt.$$.fragment),Ta=l(),st=a("p"),ka=s("The "),To=a("a"),Ca=s("HubertForSequenceClassification"),Ha=s(" forward method, overrides the "),Jo=a("code"),$a=s("__call__"),Ea=s(" special method."),Fa=l(),k(ht.$$.fragment),ja=l(),Xo=a("p"),xa=s("Example:"),Ma=l(),k(Yt.$$.fragment),bs=l(),nt=a("h2"),mt=a("a"),Go=a("span"),k(Jt.$$.fragment),qa=l(),Qo=a("span"),Pa=s("TFHubertModel"),vs=l(),V=a("div"),k(Xt.$$.fragment),Sa=l(),Zo=a("p"),za=s("The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top."),Da=l(),Gt=a("p"),Aa=s("This model inherits from "),ko=a("a"),Oa=s("TFPreTrainedModel"),Wa=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ia=l(),Qt=a("p"),La=s("This model is also a "),Zt=a("a"),Na=s("tf.keras.Model"),Ba=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ra=l(),k(ft.$$.fragment),Va=l(),fe=a("div"),k(eo.$$.fragment),Ua=l(),at=a("p"),Ka=s("The "),Co=a("a"),Ya=s("TFHubertModel"),Ja=s(" forward method, overrides the "),es=a("code"),Xa=s("__call__"),Ga=s(" special method."),Qa=l(),k(gt.$$.fragment),Za=l(),ts=a("p"),er=s("Example:"),tr=l(),k(to.$$.fragment),ys=l(),rt=a("h2"),_t=a("a"),os=a("span"),k(oo.$$.fragment),or=l(),ss=a("span"),sr=s("TFHubertForCTC"),ws=l(),U=a("div"),k(so.$$.fragment),nr=l(),no=a("p"),ar=s("TFHubert Model with a "),ns=a("code"),rr=s("language modeling"),ir=s(" head on top for Connectionist Temporal Classification (CTC)."),lr=l(),ao=a("p"),dr=s("This model inherits from "),Ho=a("a"),cr=s("TFPreTrainedModel"),pr=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ur=l(),ro=a("p"),hr=s("This model is also a "),io=a("a"),mr=s("tf.keras.Model"),fr=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),gr=l(),k(bt.$$.fragment),_r=l(),ge=a("div"),k(lo.$$.fragment),br=l(),it=a("p"),vr=s("The "),$o=a("a"),yr=s("TFHubertForCTC"),wr=s(" forward method, overrides the "),as=a("code"),Tr=s("__call__"),kr=s(" special method."),Cr=l(),k(vt.$$.fragment),Hr=l(),rs=a("p"),$r=s("Example:"),Er=l(),k(co.$$.fragment),this.h()},l(o){const m=Li('[data-svelte="svelte-1phssyn"]',document.head);p=r(m,"META",{name:!0,content:!0}),m.forEach(t),y=d(o),f=r(o,"H1",{class:!0});var po=i(f);g=r(po,"A",{id:!0,class:!0,href:!0});var is=i(g);v=r(is,"SPAN",{});var ls=i(v);C(b.$$.fragment,ls),ls.forEach(t),is.forEach(t),_=d(po),T=r(po,"SPAN",{});var ds=i(T);Ee=n(ds,"Hubert"),ds.forEach(t),po.forEach(t),ie=d(o),j=r(o,"H2",{class:!0});var uo=i(j);K=r(uo,"A",{id:!0,class:!0,href:!0});var cs=i(K);O=r(cs,"SPAN",{});var ps=i(O);C(X.$$.fragment,ps),ps.forEach(t),cs.forEach(t),Fe=d(uo),W=r(uo,"SPAN",{});var Fr=i(W);je=n(Fr,"Overview"),Fr.forEach(t),uo.forEach(t),we=d(o),D=r(o,"P",{});var ks=i(D);I=n(ks,"Hubert was proposed in "),G=r(ks,"A",{href:!0,rel:!0});var jr=i(G);_e=n(jr,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),jr.forEach(t),x=n(ks,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),ks.forEach(t),q=d(o),le=r(o,"P",{});var xr=i(le);Q=n(xr,"The abstract from the paper is the following:"),xr.forEach(t),Te=d(o),de=r(o,"P",{});var Mr=i(de);L=r(Mr,"EM",{});var qr=i(L);xe=n(qr,`Self-supervised approaches for speech representation learning are challenged by three unique problems: (1) there are multiple sound units in each input utterance, (2) there is no lexicon of input sound units during the pre-training phase, and (3) sound units have variable lengths with no explicit segmentation. To deal with these three problems, we propose the Hidden-Unit BERT (HuBERT) approach for self-supervised speech representation learning, which utilizes an offline clustering step to provide aligned target labels for a BERT-like prediction loss. A key ingredient of our approach is applying the prediction loss over the masked regions only, which forces the model to learn a combined acoustic and language model over the continuous inputs. HuBERT relies primarily on the consistency of the unsupervised clustering step rather than the intrinsic quality of the assigned cluster labels. Starting with a simple k-means teacher of 100 clusters, and using two iterations of clustering, the HuBERT model either matches or improves upon the state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-light (60,000h) benchmarks with 10min, 1h, 10h, 100h, and 960h fine-tuning subsets. Using a 1B parameter model, HuBERT shows up to 19% and 13% relative WER reduction on the more challenging dev-other and test-other evaluation subsets.`),qr.forEach(t),Mr.forEach(t),ke=d(o),M=r(o,"P",{});var Pr=i(M);Me=n(Pr,"Tips:"),Pr.forEach(t),Y=d(o),J=r(o,"UL",{});var Cs=i(J);be=r(Cs,"LI",{});var Sr=i(be);Z=n(Sr,"Hubert is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),Sr.forEach(t),qe=d(Cs),ee=r(Cs,"LI",{});var Hs=i(ee);N=n(Hs,`Hubert model was fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),ce=r(Hs,"A",{href:!0});var zr=i(ce);te=n(zr,"Wav2Vec2CTCTokenizer"),zr.forEach(t),Pe=n(Hs,"."),Hs.forEach(t),Cs.forEach(t),u=d(o),w=r(o,"P",{});var $s=i(w);ve=n($s,"This model was contributed by "),ye=r($s,"A",{href:!0,rel:!0});var Dr=i(ye);Ue=n(Dr,"patrickvonplaten"),Dr.forEach(t),B=n($s,"."),$s.forEach(t),De=d(o),oe=r(o,"H2",{class:!0});var Es=i(oe);pe=r(Es,"A",{id:!0,class:!0,href:!0});var Ar=i(pe);S=r(Ar,"SPAN",{});var Or=i(S);C(z.$$.fragment,Or),Or.forEach(t),Ar.forEach(t),Ke=d(Es),Se=r(Es,"SPAN",{});var Wr=i(Se);se=n(Wr,"HubertConfig"),Wr.forEach(t),Es.forEach(t),Ae=d(o),P=r(o,"DIV",{class:!0});var Oe=i(P);C(ne.$$.fragment,Oe),Ye=d(Oe),Je=r(Oe,"P",{});var Eo=i(Je);Us=n(Eo,"This is the configuration class to store the configuration of a "),mo=r(Eo,"A",{href:!0});var Ir=i(mo);Ks=n(Ir,"HubertModel"),Ir.forEach(t),Ys=n(Eo,`. It is used to instantiate an Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Hubert `),wt=r(Eo,"A",{href:!0,rel:!0});var Lr=i(wt);Js=n(Lr,"facebook/hubert-base-ls960"),Lr.forEach(t),Xs=n(Eo," architecture."),Eo.forEach(t),Gs=d(Oe),Xe=r(Oe,"P",{});var Fo=i(Xe);Qs=n(Fo,"Configuration objects inherit from "),fo=r(Fo,"A",{href:!0});var Nr=i(fo);Zs=n(Nr,"PretrainedConfig"),Nr.forEach(t),en=n(Fo,` and can be used to control the model outputs. Read the documentation from `),go=r(Fo,"A",{href:!0});var Br=i(go);tn=n(Br,"PretrainedConfig"),Br.forEach(t),on=n(Fo," for more information."),Fo.forEach(t),sn=d(Oe),Do=r(Oe,"P",{});var Rr=i(Do);nn=n(Rr,"Example:"),Rr.forEach(t),an=d(Oe),C(Tt.$$.fragment,Oe),Oe.forEach(t),us=d(o),Ge=r(o,"H2",{class:!0});var Fs=i(Ge);lt=r(Fs,"A",{id:!0,class:!0,href:!0});var Vr=i(lt);Ao=r(Vr,"SPAN",{});var Ur=i(Ao);C(kt.$$.fragment,Ur),Ur.forEach(t),Vr.forEach(t),rn=d(Fs),Oo=r(Fs,"SPAN",{});var Kr=i(Oo);ln=n(Kr,"HubertModel"),Kr.forEach(t),Fs.forEach(t),hs=d(o),ae=r(o,"DIV",{class:!0});var We=i(ae);C(Ct.$$.fragment,We),dn=d(We),Ht=r(We,"P",{});var js=i(Ht);cn=n(js,`The bare Hubert Model transformer outputting raw hidden-states without any specific head on top. Hubert was proposed in `),$t=r(js,"A",{href:!0,rel:!0});var Yr=i($t);pn=n(Yr,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),Yr.forEach(t),un=n(js,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),js.forEach(t),hn=d(We),Et=r(We,"P",{});var xs=i(Et);mn=n(xs,"This model inherits from "),_o=r(xs,"A",{href:!0});var Jr=i(_o);fn=n(Jr,"PreTrainedModel"),Jr.forEach(t),gn=n(xs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),xs.forEach(t),_n=d(We),Ft=r(We,"P",{});var Ms=i(Ft);bn=n(Ms,"This model is a PyTorch "),jt=r(Ms,"A",{href:!0,rel:!0});var Xr=i(jt);vn=n(Xr,"torch.nn.Module"),Xr.forEach(t),yn=n(Ms,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ms.forEach(t),wn=d(We),ue=r(We,"DIV",{class:!0});var Ie=i(ue);C(xt.$$.fragment,Ie),Tn=d(Ie),Qe=r(Ie,"P",{});var jo=i(Qe);kn=n(jo,"The "),bo=r(jo,"A",{href:!0});var Gr=i(bo);Cn=n(Gr,"HubertModel"),Gr.forEach(t),Hn=n(jo," forward method, overrides the "),Wo=r(jo,"CODE",{});var Qr=i(Wo);$n=n(Qr,"__call__"),Qr.forEach(t),En=n(jo," special method."),jo.forEach(t),Fn=d(Ie),C(dt.$$.fragment,Ie),jn=d(Ie),Io=r(Ie,"P",{});var Zr=i(Io);xn=n(Zr,"Example:"),Zr.forEach(t),Mn=d(Ie),C(Mt.$$.fragment,Ie),Ie.forEach(t),We.forEach(t),ms=d(o),Ze=r(o,"H2",{class:!0});var qs=i(Ze);ct=r(qs,"A",{id:!0,class:!0,href:!0});var ei=i(ct);Lo=r(ei,"SPAN",{});var ti=i(Lo);C(qt.$$.fragment,ti),ti.forEach(t),ei.forEach(t),qn=d(qs),No=r(qs,"SPAN",{});var oi=i(No);Pn=n(oi,"HubertForCTC"),oi.forEach(t),qs.forEach(t),fs=d(o),re=r(o,"DIV",{class:!0});var Le=i(re);C(Pt.$$.fragment,Le),Sn=d(Le),et=r(Le,"P",{});var xo=i(et);zn=n(xo,"Hubert Model with a "),Bo=r(xo,"CODE",{});var si=i(Bo);Dn=n(si,"language modeling"),si.forEach(t),An=n(xo,` head on top for Connectionist Temporal Classification (CTC). Hubert was proposed in `),St=r(xo,"A",{href:!0,rel:!0});var ni=i(St);On=n(ni,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),ni.forEach(t),Wn=n(xo,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),xo.forEach(t),In=d(Le),zt=r(Le,"P",{});var Ps=i(zt);Ln=n(Ps,"This model inherits from "),vo=r(Ps,"A",{href:!0});var ai=i(vo);Nn=n(ai,"PreTrainedModel"),ai.forEach(t),Bn=n(Ps,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ps.forEach(t),Rn=d(Le),Dt=r(Le,"P",{});var Ss=i(Dt);Vn=n(Ss,"This model is a PyTorch "),At=r(Ss,"A",{href:!0,rel:!0});var ri=i(At);Un=n(ri,"torch.nn.Module"),ri.forEach(t),Kn=n(Ss,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ss.forEach(t),Yn=d(Le),he=r(Le,"DIV",{class:!0});var Ne=i(he);C(Ot.$$.fragment,Ne),Jn=d(Ne),tt=r(Ne,"P",{});var Mo=i(tt);Xn=n(Mo,"The "),yo=r(Mo,"A",{href:!0});var ii=i(yo);Gn=n(ii,"HubertForCTC"),ii.forEach(t),Qn=n(Mo," forward method, overrides the "),Ro=r(Mo,"CODE",{});var li=i(Ro);Zn=n(li,"__call__"),li.forEach(t),ea=n(Mo," special method."),Mo.forEach(t),ta=d(Ne),C(pt.$$.fragment,Ne),oa=d(Ne),Vo=r(Ne,"P",{});var di=i(Vo);sa=n(di,"Example:"),di.forEach(t),na=d(Ne),C(Wt.$$.fragment,Ne),Ne.forEach(t),Le.forEach(t),gs=d(o),ot=r(o,"H2",{class:!0});var zs=i(ot);ut=r(zs,"A",{id:!0,class:!0,href:!0});var ci=i(ut);Uo=r(ci,"SPAN",{});var pi=i(Uo);C(It.$$.fragment,pi),pi.forEach(t),ci.forEach(t),aa=d(zs),Ko=r(zs,"SPAN",{});var ui=i(Ko);ra=n(ui,"HubertForSequenceClassification"),ui.forEach(t),zs.forEach(t),_s=d(o),R=r(o,"DIV",{class:!0});var Ce=i(R);C(Lt.$$.fragment,Ce),ia=d(Ce),Yo=r(Ce,"P",{});var hi=i(Yo);la=n(hi,`Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),hi.forEach(t),da=d(Ce),Nt=r(Ce,"P",{});var Ds=i(Nt);ca=n(Ds,"Hubert was proposed in "),Bt=r(Ds,"A",{href:!0,rel:!0});var mi=i(Bt);pa=n(mi,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),mi.forEach(t),ua=n(Ds,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),Ds.forEach(t),ha=d(Ce),Rt=r(Ce,"P",{});var As=i(Rt);ma=n(As,"This model inherits from "),wo=r(As,"A",{href:!0});var fi=i(wo);fa=n(fi,"PreTrainedModel"),fi.forEach(t),ga=n(As,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),As.forEach(t),_a=d(Ce),Vt=r(Ce,"P",{});var Os=i(Vt);ba=n(Os,"This model is a PyTorch "),Ut=r(Os,"A",{href:!0,rel:!0});var gi=i(Ut);va=n(gi,"torch.nn.Module"),gi.forEach(t),ya=n(Os,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Os.forEach(t),wa=d(Ce),me=r(Ce,"DIV",{class:!0});var Be=i(me);C(Kt.$$.fragment,Be),Ta=d(Be),st=r(Be,"P",{});var qo=i(st);ka=n(qo,"The "),To=r(qo,"A",{href:!0});var _i=i(To);Ca=n(_i,"HubertForSequenceClassification"),_i.forEach(t),Ha=n(qo," forward method, overrides the "),Jo=r(qo,"CODE",{});var bi=i(Jo);$a=n(bi,"__call__"),bi.forEach(t),Ea=n(qo," special method."),qo.forEach(t),Fa=d(Be),C(ht.$$.fragment,Be),ja=d(Be),Xo=r(Be,"P",{});var vi=i(Xo);xa=n(vi,"Example:"),vi.forEach(t),Ma=d(Be),C(Yt.$$.fragment,Be),Be.forEach(t),Ce.forEach(t),bs=d(o),nt=r(o,"H2",{class:!0});var Ws=i(nt);mt=r(Ws,"A",{id:!0,class:!0,href:!0});var yi=i(mt);Go=r(yi,"SPAN",{});var wi=i(Go);C(Jt.$$.fragment,wi),wi.forEach(t),yi.forEach(t),qa=d(Ws),Qo=r(Ws,"SPAN",{});var Ti=i(Qo);Pa=n(Ti,"TFHubertModel"),Ti.forEach(t),Ws.forEach(t),vs=d(o),V=r(o,"DIV",{class:!0});var He=i(V);C(Xt.$$.fragment,He),Sa=d(He),Zo=r(He,"P",{});var ki=i(Zo);za=n(ki,"The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top."),ki.forEach(t),Da=d(He),Gt=r(He,"P",{});var Is=i(Gt);Aa=n(Is,"This model inherits from "),ko=r(Is,"A",{href:!0});var Ci=i(ko);Oa=n(Ci,"TFPreTrainedModel"),Ci.forEach(t),Wa=n(Is,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Is.forEach(t),Ia=d(He),Qt=r(He,"P",{});var Ls=i(Qt);La=n(Ls,"This model is also a "),Zt=r(Ls,"A",{href:!0,rel:!0});var Hi=i(Zt);Na=n(Hi,"tf.keras.Model"),Hi.forEach(t),Ba=n(Ls,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ls.forEach(t),Ra=d(He),C(ft.$$.fragment,He),Va=d(He),fe=r(He,"DIV",{class:!0});var Re=i(fe);C(eo.$$.fragment,Re),Ua=d(Re),at=r(Re,"P",{});var Po=i(at);Ka=n(Po,"The "),Co=r(Po,"A",{href:!0});var $i=i(Co);Ya=n($i,"TFHubertModel"),$i.forEach(t),Ja=n(Po," forward method, overrides the "),es=r(Po,"CODE",{});var Ei=i(es);Xa=n(Ei,"__call__"),Ei.forEach(t),Ga=n(Po," special method."),Po.forEach(t),Qa=d(Re),C(gt.$$.fragment,Re),Za=d(Re),ts=r(Re,"P",{});var Fi=i(ts);er=n(Fi,"Example:"),Fi.forEach(t),tr=d(Re),C(to.$$.fragment,Re),Re.forEach(t),He.forEach(t),ys=d(o),rt=r(o,"H2",{class:!0});var Ns=i(rt);_t=r(Ns,"A",{id:!0,class:!0,href:!0});var ji=i(_t);os=r(ji,"SPAN",{});var xi=i(os);C(oo.$$.fragment,xi),xi.forEach(t),ji.forEach(t),or=d(Ns),ss=r(Ns,"SPAN",{});var Mi=i(ss);sr=n(Mi,"TFHubertForCTC"),Mi.forEach(t),Ns.forEach(t),ws=d(o),U=r(o,"DIV",{class:!0});var $e=i(U);C(so.$$.fragment,$e),nr=d($e),no=r($e,"P",{});var Bs=i(no);ar=n(Bs,"TFHubert Model with a "),ns=r(Bs,"CODE",{});var qi=i(ns);rr=n(qi,"language modeling"),qi.forEach(t),ir=n(Bs," head on top for Connectionist Temporal Classification (CTC)."),Bs.forEach(t),lr=d($e),ao=r($e,"P",{});var Rs=i(ao);dr=n(Rs,"This model inherits from "),Ho=r(Rs,"A",{href:!0});var Pi=i(Ho);cr=n(Pi,"TFPreTrainedModel"),Pi.forEach(t),pr=n(Rs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rs.forEach(t),ur=d($e),ro=r($e,"P",{});var Vs=i(ro);hr=n(Vs,"This model is also a "),io=r(Vs,"A",{href:!0,rel:!0});var Si=i(io);mr=n(Si,"tf.keras.Model"),Si.forEach(t),fr=n(Vs,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vs.forEach(t),gr=d($e),C(bt.$$.fragment,$e),_r=d($e),ge=r($e,"DIV",{class:!0});var Ve=i(ge);C(lo.$$.fragment,Ve),br=d(Ve),it=r(Ve,"P",{});var So=i(it);vr=n(So,"The "),$o=r(So,"A",{href:!0});var zi=i($o);yr=n(zi,"TFHubertForCTC"),zi.forEach(t),wr=n(So," forward method, overrides the "),as=r(So,"CODE",{});var Di=i(as);Tr=n(Di,"__call__"),Di.forEach(t),kr=n(So," special method."),So.forEach(t),Cr=d(Ve),C(vt.$$.fragment,Ve),Hr=d(Ve),rs=r(Ve,"P",{});var Ai=i(rs);$r=n(Ai,"Example:"),Ai.forEach(t),Er=d(Ve),C(co.$$.fragment,Ve),Ve.forEach(t),$e.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(Xi)),c(g,"id","hubert"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#hubert"),c(f,"class","relative group"),c(K,"id","overview"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#overview"),c(j,"class","relative group"),c(G,"href","https://arxiv.org/abs/2106.07447"),c(G,"rel","nofollow"),c(ce,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),c(ye,"href","https://huggingface.co/patrickvonplaten"),c(ye,"rel","nofollow"),c(pe,"id","transformers.HubertConfig"),c(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(pe,"href","#transformers.HubertConfig"),c(oe,"class","relative group"),c(mo,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertModel"),c(wt,"href","https://huggingface.co/facebook/hubert-base-ls960"),c(wt,"rel","nofollow"),c(fo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(go,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(P,"class","docstring"),c(lt,"id","transformers.HubertModel"),c(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(lt,"href","#transformers.HubertModel"),c(Ge,"class","relative group"),c($t,"href","https://arxiv.org/abs/2106.07447"),c($t,"rel","nofollow"),c(_o,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(jt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(jt,"rel","nofollow"),c(bo,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertModel"),c(ue,"class","docstring"),c(ae,"class","docstring"),c(ct,"id","transformers.HubertForCTC"),c(ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ct,"href","#transformers.HubertForCTC"),c(Ze,"class","relative group"),c(St,"href","https://arxiv.org/abs/2106.07447"),c(St,"rel","nofollow"),c(vo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(At,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(At,"rel","nofollow"),c(yo,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForCTC"),c(he,"class","docstring"),c(re,"class","docstring"),c(ut,"id","transformers.HubertForSequenceClassification"),c(ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ut,"href","#transformers.HubertForSequenceClassification"),c(ot,"class","relative group"),c(Bt,"href","https://arxiv.org/abs/2106.07447"),c(Bt,"rel","nofollow"),c(wo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ut,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ut,"rel","nofollow"),c(To,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.HubertForSequenceClassification"),c(me,"class","docstring"),c(R,"class","docstring"),c(mt,"id","transformers.TFHubertModel"),c(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(mt,"href","#transformers.TFHubertModel"),c(nt,"class","relative group"),c(ko,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Zt,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Zt,"rel","nofollow"),c(Co,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.TFHubertModel"),c(fe,"class","docstring"),c(V,"class","docstring"),c(_t,"id","transformers.TFHubertForCTC"),c(_t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_t,"href","#transformers.TFHubertForCTC"),c(rt,"class","relative group"),c(Ho,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(io,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(io,"rel","nofollow"),c($o,"href","/docs/transformers/v4.15.0/en/model_doc/hubert#transformers.TFHubertForCTC"),c(ge,"class","docstring"),c(U,"class","docstring")},m(o,m){e(document.head,p),h(o,y,m),h(o,f,m),e(f,g),e(g,v),H(b,v,null),e(f,_),e(f,T),e(T,Ee),h(o,ie,m),h(o,j,m),e(j,K),e(K,O),H(X,O,null),e(j,Fe),e(j,W),e(W,je),h(o,we,m),h(o,D,m),e(D,I),e(D,G),e(G,_e),e(D,x),h(o,q,m),h(o,le,m),e(le,Q),h(o,Te,m),h(o,de,m),e(de,L),e(L,xe),h(o,ke,m),h(o,M,m),e(M,Me),h(o,Y,m),h(o,J,m),e(J,be),e(be,Z),e(J,qe),e(J,ee),e(ee,N),e(ee,ce),e(ce,te),e(ee,Pe),h(o,u,m),h(o,w,m),e(w,ve),e(w,ye),e(ye,Ue),e(w,B),h(o,De,m),h(o,oe,m),e(oe,pe),e(pe,S),H(z,S,null),e(oe,Ke),e(oe,Se),e(Se,se),h(o,Ae,m),h(o,P,m),H(ne,P,null),e(P,Ye),e(P,Je),e(Je,Us),e(Je,mo),e(mo,Ks),e(Je,Ys),e(Je,wt),e(wt,Js),e(Je,Xs),e(P,Gs),e(P,Xe),e(Xe,Qs),e(Xe,fo),e(fo,Zs),e(Xe,en),e(Xe,go),e(go,tn),e(Xe,on),e(P,sn),e(P,Do),e(Do,nn),e(P,an),H(Tt,P,null),h(o,us,m),h(o,Ge,m),e(Ge,lt),e(lt,Ao),H(kt,Ao,null),e(Ge,rn),e(Ge,Oo),e(Oo,ln),h(o,hs,m),h(o,ae,m),H(Ct,ae,null),e(ae,dn),e(ae,Ht),e(Ht,cn),e(Ht,$t),e($t,pn),e(Ht,un),e(ae,hn),e(ae,Et),e(Et,mn),e(Et,_o),e(_o,fn),e(Et,gn),e(ae,_n),e(ae,Ft),e(Ft,bn),e(Ft,jt),e(jt,vn),e(Ft,yn),e(ae,wn),e(ae,ue),H(xt,ue,null),e(ue,Tn),e(ue,Qe),e(Qe,kn),e(Qe,bo),e(bo,Cn),e(Qe,Hn),e(Qe,Wo),e(Wo,$n),e(Qe,En),e(ue,Fn),H(dt,ue,null),e(ue,jn),e(ue,Io),e(Io,xn),e(ue,Mn),H(Mt,ue,null),h(o,ms,m),h(o,Ze,m),e(Ze,ct),e(ct,Lo),H(qt,Lo,null),e(Ze,qn),e(Ze,No),e(No,Pn),h(o,fs,m),h(o,re,m),H(Pt,re,null),e(re,Sn),e(re,et),e(et,zn),e(et,Bo),e(Bo,Dn),e(et,An),e(et,St),e(St,On),e(et,Wn),e(re,In),e(re,zt),e(zt,Ln),e(zt,vo),e(vo,Nn),e(zt,Bn),e(re,Rn),e(re,Dt),e(Dt,Vn),e(Dt,At),e(At,Un),e(Dt,Kn),e(re,Yn),e(re,he),H(Ot,he,null),e(he,Jn),e(he,tt),e(tt,Xn),e(tt,yo),e(yo,Gn),e(tt,Qn),e(tt,Ro),e(Ro,Zn),e(tt,ea),e(he,ta),H(pt,he,null),e(he,oa),e(he,Vo),e(Vo,sa),e(he,na),H(Wt,he,null),h(o,gs,m),h(o,ot,m),e(ot,ut),e(ut,Uo),H(It,Uo,null),e(ot,aa),e(ot,Ko),e(Ko,ra),h(o,_s,m),h(o,R,m),H(Lt,R,null),e(R,ia),e(R,Yo),e(Yo,la),e(R,da),e(R,Nt),e(Nt,ca),e(Nt,Bt),e(Bt,pa),e(Nt,ua),e(R,ha),e(R,Rt),e(Rt,ma),e(Rt,wo),e(wo,fa),e(Rt,ga),e(R,_a),e(R,Vt),e(Vt,ba),e(Vt,Ut),e(Ut,va),e(Vt,ya),e(R,wa),e(R,me),H(Kt,me,null),e(me,Ta),e(me,st),e(st,ka),e(st,To),e(To,Ca),e(st,Ha),e(st,Jo),e(Jo,$a),e(st,Ea),e(me,Fa),H(ht,me,null),e(me,ja),e(me,Xo),e(Xo,xa),e(me,Ma),H(Yt,me,null),h(o,bs,m),h(o,nt,m),e(nt,mt),e(mt,Go),H(Jt,Go,null),e(nt,qa),e(nt,Qo),e(Qo,Pa),h(o,vs,m),h(o,V,m),H(Xt,V,null),e(V,Sa),e(V,Zo),e(Zo,za),e(V,Da),e(V,Gt),e(Gt,Aa),e(Gt,ko),e(ko,Oa),e(Gt,Wa),e(V,Ia),e(V,Qt),e(Qt,La),e(Qt,Zt),e(Zt,Na),e(Qt,Ba),e(V,Ra),H(ft,V,null),e(V,Va),e(V,fe),H(eo,fe,null),e(fe,Ua),e(fe,at),e(at,Ka),e(at,Co),e(Co,Ya),e(at,Ja),e(at,es),e(es,Xa),e(at,Ga),e(fe,Qa),H(gt,fe,null),e(fe,Za),e(fe,ts),e(ts,er),e(fe,tr),H(to,fe,null),h(o,ys,m),h(o,rt,m),e(rt,_t),e(_t,os),H(oo,os,null),e(rt,or),e(rt,ss),e(ss,sr),h(o,ws,m),h(o,U,m),H(so,U,null),e(U,nr),e(U,no),e(no,ar),e(no,ns),e(ns,rr),e(no,ir),e(U,lr),e(U,ao),e(ao,dr),e(ao,Ho),e(Ho,cr),e(ao,pr),e(U,ur),e(U,ro),e(ro,hr),e(ro,io),e(io,mr),e(ro,fr),e(U,gr),H(bt,U,null),e(U,_r),e(U,ge),H(lo,ge,null),e(ge,br),e(ge,it),e(it,vr),e(it,$o),e($o,yr),e(it,wr),e(it,as),e(as,Tr),e(it,kr),e(ge,Cr),H(vt,ge,null),e(ge,Hr),e(ge,rs),e(rs,$r),e(ge,Er),H(co,ge,null),Ts=!0},p(o,[m]){const po={};m&2&&(po.$$scope={dirty:m,ctx:o}),dt.$set(po);const is={};m&2&&(is.$$scope={dirty:m,ctx:o}),pt.$set(is);const ls={};m&2&&(ls.$$scope={dirty:m,ctx:o}),ht.$set(ls);const ds={};m&2&&(ds.$$scope={dirty:m,ctx:o}),ft.$set(ds);const uo={};m&2&&(uo.$$scope={dirty:m,ctx:o}),gt.$set(uo);const cs={};m&2&&(cs.$$scope={dirty:m,ctx:o}),bt.$set(cs);const ps={};m&2&&(ps.$$scope={dirty:m,ctx:o}),vt.$set(ps)},i(o){Ts||($(b.$$.fragment,o),$(X.$$.fragment,o),$(z.$$.fragment,o),$(ne.$$.fragment,o),$(Tt.$$.fragment,o),$(kt.$$.fragment,o),$(Ct.$$.fragment,o),$(xt.$$.fragment,o),$(dt.$$.fragment,o),$(Mt.$$.fragment,o),$(qt.$$.fragment,o),$(Pt.$$.fragment,o),$(Ot.$$.fragment,o),$(pt.$$.fragment,o),$(Wt.$$.fragment,o),$(It.$$.fragment,o),$(Lt.$$.fragment,o),$(Kt.$$.fragment,o),$(ht.$$.fragment,o),$(Yt.$$.fragment,o),$(Jt.$$.fragment,o),$(Xt.$$.fragment,o),$(ft.$$.fragment,o),$(eo.$$.fragment,o),$(gt.$$.fragment,o),$(to.$$.fragment,o),$(oo.$$.fragment,o),$(so.$$.fragment,o),$(bt.$$.fragment,o),$(lo.$$.fragment,o),$(vt.$$.fragment,o),$(co.$$.fragment,o),Ts=!0)},o(o){E(b.$$.fragment,o),E(X.$$.fragment,o),E(z.$$.fragment,o),E(ne.$$.fragment,o),E(Tt.$$.fragment,o),E(kt.$$.fragment,o),E(Ct.$$.fragment,o),E(xt.$$.fragment,o),E(dt.$$.fragment,o),E(Mt.$$.fragment,o),E(qt.$$.fragment,o),E(Pt.$$.fragment,o),E(Ot.$$.fragment,o),E(pt.$$.fragment,o),E(Wt.$$.fragment,o),E(It.$$.fragment,o),E(Lt.$$.fragment,o),E(Kt.$$.fragment,o),E(ht.$$.fragment,o),E(Yt.$$.fragment,o),E(Jt.$$.fragment,o),E(Xt.$$.fragment,o),E(ft.$$.fragment,o),E(eo.$$.fragment,o),E(gt.$$.fragment,o),E(to.$$.fragment,o),E(oo.$$.fragment,o),E(so.$$.fragment,o),E(bt.$$.fragment,o),E(lo.$$.fragment,o),E(vt.$$.fragment,o),E(co.$$.fragment,o),Ts=!1},d(o){t(p),o&&t(y),o&&t(f),F(b),o&&t(ie),o&&t(j),F(X),o&&t(we),o&&t(D),o&&t(q),o&&t(le),o&&t(Te),o&&t(de),o&&t(ke),o&&t(M),o&&t(Y),o&&t(J),o&&t(u),o&&t(w),o&&t(De),o&&t(oe),F(z),o&&t(Ae),o&&t(P),F(ne),F(Tt),o&&t(us),o&&t(Ge),F(kt),o&&t(hs),o&&t(ae),F(Ct),F(xt),F(dt),F(Mt),o&&t(ms),o&&t(Ze),F(qt),o&&t(fs),o&&t(re),F(Pt),F(Ot),F(pt),F(Wt),o&&t(gs),o&&t(ot),F(It),o&&t(_s),o&&t(R),F(Lt),F(Kt),F(ht),F(Yt),o&&t(bs),o&&t(nt),F(Jt),o&&t(vs),o&&t(V),F(Xt),F(ft),F(eo),F(gt),F(to),o&&t(ys),o&&t(rt),F(oo),o&&t(ws),o&&t(U),F(so),F(bt),F(lo),F(vt),F(co)}}}const Xi={local:"hubert",sections:[{local:"overview",title:"Overview"},{local:"transformers.HubertConfig",title:"HubertConfig"},{local:"transformers.HubertModel",title:"HubertModel"},{local:"transformers.HubertForCTC",title:"HubertForCTC"},{local:"transformers.HubertForSequenceClassification",title:"HubertForSequenceClassification"},{local:"transformers.TFHubertModel",title:"TFHubertModel"},{local:"transformers.TFHubertForCTC",title:"TFHubertForCTC"}],title:"Hubert"};function Gi(A,p,y){let{fw:f}=p;return A.$$set=g=>{"fw"in g&&y(0,f=g.fw)},[f]}class nl extends Oi{constructor(p){super();Wi(this,p,Gi,Ji,Ii,{fw:0})}}export{nl as default,Xi as metadata};
9,921
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/wav2vec2.mdx-4e717138.js
import{S as ak,i as sk,s as nk,e as a,k as c,w as _,t as r,L as rk,c as s,d as t,m as d,a as n,x as v,h as i,b as l,J as e,g as m,y as w,q as b,o as y,B as k}from"../../chunks/vendor-b1433968.js";import{T as oe}from"../../chunks/Tip-c3840994.js";import{D as F}from"../../chunks/Docstring-ff504c58.js";import{C as at}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Y}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ik(C){let p,x,f,T,W,u,g,$;return{c(){p=a("p"),x=r(`This class method is simply calling Wav2Vec2FeatureExtractor\u2019s `),f=a("a"),T=r("from_pretrained()"),W=r(` and PreTrainedTokenizer\u2019s `),u=a("code"),g=r("from_pretrained"),$=r(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(q){p=s(q,"P",{});var P=n(p);x=i(P,`This class method is simply calling Wav2Vec2FeatureExtractor\u2019s `),f=s(P,"A",{href:!0});var j=n(f);T=i(j,"from_pretrained()"),j.forEach(t),W=i(P,` and PreTrainedTokenizer\u2019s `),u=s(P,"CODE",{});var A=n(u);g=i(A,"from_pretrained"),A.forEach(t),$=i(P,`. Please refer to the docstrings of the methods above for more information.`),P.forEach(t),this.h()},h(){l(f,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.feature_extraction_utils.FeatureExtractionMixin.from_pretrained")},m(q,P){m(q,p,P),e(p,x),e(p,f),e(f,T),e(p,W),e(p,u),e(u,g),e(p,$)},d(q){q&&t(p)}}}function lk(C){let p,x,f,T,W,u,g,$;return{c(){p=a("p"),x=r(`This class method is simply calling `),f=a("a"),T=r("save_pretrained()"),W=r(` and `),u=a("code"),g=r("save_pretrained"),$=r(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(q){p=s(q,"P",{});var P=n(p);x=i(P,`This class method is simply calling `),f=s(P,"A",{href:!0});var j=n(f);T=i(j,"save_pretrained()"),j.forEach(t),W=i(P,` and `),u=s(P,"CODE",{});var A=n(u);g=i(A,"save_pretrained"),A.forEach(t),$=i(P,`. Please refer to the docstrings of the methods above for more information.`),P.forEach(t),this.h()},h(){l(f,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained")},m(q,P){m(q,p,P),e(p,x),e(p,f),e(f,T),e(p,W),e(p,u),e(u,g),e(p,$)},d(q){q&&t(p)}}}function ck(C){let p,x,f,T,W,u,g,$,q,P,j,A,D,N;return{c(){p=a("p"),x=r(`This class method is simply calling Wav2Vec2FeatureExtractor\u2019s `),f=a("a"),T=r("from_pretrained()"),W=r(`, Wav2Vec2CTCTokenizer\u2019s `),u=a("code"),g=r("from_pretrained"),$=r(`, and `),q=a("code"),P=r("pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub"),j=r("."),A=c(),D=a("p"),N=r("Please refer to the docstrings of the methods above for more information."),this.h()},l(M){p=s(M,"P",{});var E=n(p);x=i(E,`This class method is simply calling Wav2Vec2FeatureExtractor\u2019s `),f=s(E,"A",{href:!0});var ae=n(f);T=i(ae,"from_pretrained()"),ae.forEach(t),W=i(E,`, Wav2Vec2CTCTokenizer\u2019s `),u=s(E,"CODE",{});var ee=n(u);g=i(ee,"from_pretrained"),ee.forEach(t),$=i(E,`, and `),q=s(E,"CODE",{});var S=n(q);P=i(S,"pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub"),S.forEach(t),j=i(E,"."),E.forEach(t),A=d(M),D=s(M,"P",{});var B=n(D);N=i(B,"Please refer to the docstrings of the methods above for more information."),B.forEach(t),this.h()},h(){l(f,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.feature_extraction_utils.FeatureExtractionMixin.from_pretrained")},m(M,E){m(M,p,E),e(p,x),e(p,f),e(f,T),e(p,W),e(p,u),e(u,g),e(p,$),e(p,q),e(q,P),e(p,j),m(M,A,E),m(M,D,E),e(D,N)},d(M){M&&t(p),M&&t(A),M&&t(D)}}}function dk(C){let p,x,f,T,W,u,g,$,q,P,j,A,D,N;return{c(){p=a("p"),x=r(`This class method is simply calling `),f=a("code"),T=r("save_pretrained,"),W=c(),u=a("code"),g=r("save_pretrained"),$=r(` and pyctcdecode\u2019s `),q=a("code"),P=r("pyctcdecode.BeamSearchDecoderCTC.save_to_dir"),j=r("."),A=c(),D=a("p"),N=r("Please refer to the docstrings of the methods above for more information.")},l(M){p=s(M,"P",{});var E=n(p);x=i(E,`This class method is simply calling `),f=s(E,"CODE",{});var ae=n(f);T=i(ae,"save_pretrained,"),ae.forEach(t),W=d(E),u=s(E,"CODE",{});var ee=n(u);g=i(ee,"save_pretrained"),ee.forEach(t),$=i(E,` and pyctcdecode\u2019s `),q=s(E,"CODE",{});var S=n(q);P=i(S,"pyctcdecode.BeamSearchDecoderCTC.save_to_dir"),S.forEach(t),j=i(E,"."),E.forEach(t),A=d(M),D=s(M,"P",{});var B=n(D);N=i(B,"Please refer to the docstrings of the methods above for more information."),B.forEach(t)},m(M,E){m(M,p,E),e(p,x),e(p,f),e(f,T),e(p,W),e(p,u),e(u,g),e(p,$),e(p,q),e(q,P),e(p,j),m(M,A,E),m(M,D,E),e(D,N)},d(M){M&&t(p),M&&t(A),M&&t(D)}}}function pk(C){let p,x;return{c(){p=a("p"),x=r("This function makes use of Python\u2019s multiprocessing.")},l(f){p=s(f,"P",{});var T=n(p);x=i(T,"This function makes use of Python\u2019s multiprocessing."),T.forEach(t)},m(f,T){m(f,p,T),e(p,x)},d(f){f&&t(p)}}}function hk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function mk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function fk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function uk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function gk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function _k(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function vk(C){let p,x,f,T,W,u,g,$,q,P,j,A,D,N,M,E,ae,ee,S,B,fe,He,O,H,$e,ue,Ze,Ve,se,st,Ke,I,nt,he,me,Re,ge,rt,_e,ne,Fe,ve,it;return{c(){p=a("p"),x=r("TF 2.0 models accepts two formats as inputs:"),f=c(),T=a("ul"),W=a("li"),u=r("having all inputs as keyword arguments (like PyTorch models), or"),g=c(),$=a("li"),q=r("having all inputs as a list, tuple or dict in the first positional arguments."),P=c(),j=a("p"),A=r("This second option is useful when using "),D=a("code"),N=r("tf.keras.Model.fit"),M=r(` method which currently requires having all the tensors in the first argument of the model call function: `),E=a("code"),ae=r("model(inputs)"),ee=r("."),S=c(),B=a("p"),fe=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),He=c(),O=a("ul"),H=a("li"),$e=r("a single Tensor with "),ue=a("code"),Ze=r("input_values"),Ve=r(" only and nothing else: "),se=a("code"),st=r("model(inputs_ids)"),Ke=c(),I=a("li"),nt=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),he=a("code"),me=r("model([input_values, attention_mask])"),Re=r(" or "),ge=a("code"),rt=r("model([input_values, attention_mask, token_type_ids])"),_e=c(),ne=a("li"),Fe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ve=a("code"),it=r('model({"input_values": input_values, "token_type_ids": token_type_ids})')},l(V){p=s(V,"P",{});var z=n(p);x=i(z,"TF 2.0 models accepts two formats as inputs:"),z.forEach(t),f=d(V),T=s(V,"UL",{});var Xe=n(T);W=s(Xe,"LI",{});var Je=n(W);u=i(Je,"having all inputs as keyword arguments (like PyTorch models), or"),Je.forEach(t),g=d(Xe),$=s(Xe,"LI",{});var Nt=n($);q=i(Nt,"having all inputs as a list, tuple or dict in the first positional arguments."),Nt.forEach(t),Xe.forEach(t),P=d(V),j=s(V,"P",{});var re=n(j);A=i(re,"This second option is useful when using "),D=s(re,"CODE",{});var wt=n(D);N=i(wt,"tf.keras.Model.fit"),wt.forEach(t),M=i(re,` method which currently requires having all the tensors in the first argument of the model call function: `),E=s(re,"CODE",{});var we=n(E);ae=i(we,"model(inputs)"),we.forEach(t),ee=i(re,"."),re.forEach(t),S=d(V),B=s(V,"P",{});var je=n(B);fe=i(je,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),je.forEach(t),He=d(V),O=s(V,"UL",{});var J=n(O);H=s(J,"LI",{});var G=n(H);$e=i(G,"a single Tensor with "),ue=s(G,"CODE",{});var Bt=n(ue);Ze=i(Bt,"input_values"),Bt.forEach(t),Ve=i(G," only and nothing else: "),se=s(G,"CODE",{});var lt=n(se);st=i(lt,"model(inputs_ids)"),lt.forEach(t),G.forEach(t),Ke=d(J),I=s(J,"LI",{});var be=n(I);nt=i(be,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),he=s(be,"CODE",{});var bt=n(he);me=i(bt,"model([input_values, attention_mask])"),bt.forEach(t),Re=i(be," or "),ge=s(be,"CODE",{});var R=n(ge);rt=i(R,"model([input_values, attention_mask, token_type_ids])"),R.forEach(t),be.forEach(t),_e=d(J),ne=s(J,"LI",{});var ye=n(ne);Fe=i(ye,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ve=s(ye,"CODE",{});var Ut=n(ve);it=i(Ut,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),Ut.forEach(t),ye.forEach(t),J.forEach(t)},m(V,z){m(V,p,z),e(p,x),m(V,f,z),m(V,T,z),e(T,W),e(W,u),e(T,g),e(T,$),e($,q),m(V,P,z),m(V,j,z),e(j,A),e(j,D),e(D,N),e(j,M),e(j,E),e(E,ae),e(j,ee),m(V,S,z),m(V,B,z),e(B,fe),m(V,He,z),m(V,O,z),e(O,H),e(H,$e),e(H,ue),e(ue,Ze),e(H,Ve),e(H,se),e(se,st),e(O,Ke),e(O,I),e(I,nt),e(I,he),e(he,me),e(I,Re),e(I,ge),e(ge,rt),e(O,_e),e(O,ne),e(ne,Fe),e(ne,ve),e(ve,it)},d(V){V&&t(p),V&&t(f),V&&t(T),V&&t(P),V&&t(j),V&&t(S),V&&t(B),V&&t(He),V&&t(O)}}}function wk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function bk(C){let p,x,f,T,W,u,g,$,q,P,j,A,D,N,M,E,ae,ee,S,B,fe,He,O,H,$e,ue,Ze,Ve,se,st,Ke,I,nt,he,me,Re,ge,rt,_e,ne,Fe,ve,it;return{c(){p=a("p"),x=r("TF 2.0 models accepts two formats as inputs:"),f=c(),T=a("ul"),W=a("li"),u=r("having all inputs as keyword arguments (like PyTorch models), or"),g=c(),$=a("li"),q=r("having all inputs as a list, tuple or dict in the first positional arguments."),P=c(),j=a("p"),A=r("This second option is useful when using "),D=a("code"),N=r("tf.keras.Model.fit"),M=r(` method which currently requires having all the tensors in the first argument of the model call function: `),E=a("code"),ae=r("model(inputs)"),ee=r("."),S=c(),B=a("p"),fe=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),He=c(),O=a("ul"),H=a("li"),$e=r("a single Tensor with "),ue=a("code"),Ze=r("input_values"),Ve=r(" only and nothing else: "),se=a("code"),st=r("model(inputs_ids)"),Ke=c(),I=a("li"),nt=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),he=a("code"),me=r("model([input_values, attention_mask])"),Re=r(" or "),ge=a("code"),rt=r("model([input_values, attention_mask, token_type_ids])"),_e=c(),ne=a("li"),Fe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ve=a("code"),it=r('model({"input_values": input_values, "token_type_ids": token_type_ids})')},l(V){p=s(V,"P",{});var z=n(p);x=i(z,"TF 2.0 models accepts two formats as inputs:"),z.forEach(t),f=d(V),T=s(V,"UL",{});var Xe=n(T);W=s(Xe,"LI",{});var Je=n(W);u=i(Je,"having all inputs as keyword arguments (like PyTorch models), or"),Je.forEach(t),g=d(Xe),$=s(Xe,"LI",{});var Nt=n($);q=i(Nt,"having all inputs as a list, tuple or dict in the first positional arguments."),Nt.forEach(t),Xe.forEach(t),P=d(V),j=s(V,"P",{});var re=n(j);A=i(re,"This second option is useful when using "),D=s(re,"CODE",{});var wt=n(D);N=i(wt,"tf.keras.Model.fit"),wt.forEach(t),M=i(re,` method which currently requires having all the tensors in the first argument of the model call function: `),E=s(re,"CODE",{});var we=n(E);ae=i(we,"model(inputs)"),we.forEach(t),ee=i(re,"."),re.forEach(t),S=d(V),B=s(V,"P",{});var je=n(B);fe=i(je,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),je.forEach(t),He=d(V),O=s(V,"UL",{});var J=n(O);H=s(J,"LI",{});var G=n(H);$e=i(G,"a single Tensor with "),ue=s(G,"CODE",{});var Bt=n(ue);Ze=i(Bt,"input_values"),Bt.forEach(t),Ve=i(G," only and nothing else: "),se=s(G,"CODE",{});var lt=n(se);st=i(lt,"model(inputs_ids)"),lt.forEach(t),G.forEach(t),Ke=d(J),I=s(J,"LI",{});var be=n(I);nt=i(be,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),he=s(be,"CODE",{});var bt=n(he);me=i(bt,"model([input_values, attention_mask])"),bt.forEach(t),Re=i(be," or "),ge=s(be,"CODE",{});var R=n(ge);rt=i(R,"model([input_values, attention_mask, token_type_ids])"),R.forEach(t),be.forEach(t),_e=d(J),ne=s(J,"LI",{});var ye=n(ne);Fe=i(ye,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ve=s(ye,"CODE",{});var Ut=n(ve);it=i(Ut,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),Ut.forEach(t),ye.forEach(t),J.forEach(t)},m(V,z){m(V,p,z),e(p,x),m(V,f,z),m(V,T,z),e(T,W),e(W,u),e(T,g),e(T,$),e($,q),m(V,P,z),m(V,j,z),e(j,A),e(j,D),e(D,N),e(j,M),e(j,E),e(E,ae),e(j,ee),m(V,S,z),m(V,B,z),e(B,fe),m(V,He,z),m(V,O,z),e(O,H),e(H,$e),e(H,ue),e(ue,Ze),e(H,Ve),e(H,se),e(se,st),e(O,Ke),e(O,I),e(I,nt),e(I,he),e(he,me),e(I,Re),e(I,ge),e(ge,rt),e(O,_e),e(O,ne),e(ne,Fe),e(ne,ve),e(ve,it)},d(V){V&&t(p),V&&t(f),V&&t(T),V&&t(P),V&&t(j),V&&t(S),V&&t(B),V&&t(He),V&&t(O)}}}function yk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function kk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function Tk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function xk(C){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=n(p);x=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var $=n(f);T=i($,"Module"),$.forEach(t),W=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(u,g){m(u,p,g),e(p,x),e(p,f),e(f,T),e(p,W)},d(u){u&&t(p)}}}function Wk(C){let p,x,f,T,W,u,g,$,q,P,j,A,D,N,M,E,ae,ee,S,B,fe,He,O,H,$e,ue,Ze,Ve,se,st,Ke,I,nt,he,me,Re,ge,rt,_e,ne,Fe,ve,it,V,z,Xe,Je,Nt,re,wt,we,je,J,G,Bt,lt,be,bt,R,ye,Ut,Ht,ah,jr,sh,nh,Pa,rh,ih,lh,Rt,ch,Er,dh,ph,Pr,hh,mh,fh,Hi,uh,gh,Ca,td,Xt,Eo,Ri,qa,_h,Xi,vh,od,ke,Ma,wh,Ji,bh,yh,za,kh,Cr,Th,xh,Wh,Po,Aa,$h,Gi,Vh,Fh,Zi,ad,Jt,Co,Ki,Da,jh,Qi,Eh,sd,Ge,Oa,Ph,Yi,Ch,qh,La,Mh,qr,zh,Ah,Dh,qo,Sa,Oh,el,Lh,nd,Gt,Mo,tl,Ia,Sh,ol,Ih,rd,L,Na,Nh,al,Bh,Uh,Ee,Mr,Hh,Rh,zr,Xh,Jh,Ar,Gh,Zh,Ba,sl,Kh,Qh,Yh,Dr,em,tm,om,zo,Ua,am,ct,sm,Ha,nl,nm,rm,im,Or,lm,cm,Ra,rl,dm,pm,hm,mm,Ao,Xa,fm,dt,um,Lr,gm,_m,Sr,vm,wm,Ir,bm,ym,km,yt,Ja,Tm,Ga,xm,Nr,Wm,$m,Vm,Do,Fm,kt,Za,jm,Zt,Em,il,Pm,Cm,Br,qm,Mm,zm,Oo,Am,Lo,Ka,Dm,Qa,Om,Ur,Lm,Sm,Im,So,Ya,Nm,es,Bm,Hr,Um,Hm,Rm,Io,ts,Xm,ll,Jm,id,Kt,No,cl,os,Gm,dl,Zm,ld,U,as,Km,pl,Qm,Ym,Bo,ss,ef,pt,tf,ns,hl,of,af,sf,Rr,nf,rf,rs,ml,lf,cf,df,pf,Uo,is,hf,ht,mf,Xr,ff,uf,Jr,gf,_f,Gr,vf,wf,bf,Tt,ls,yf,cs,kf,Zr,Tf,xf,Wf,Ho,$f,xt,ds,Vf,Qt,Ff,fl,jf,Ef,Kr,Pf,Cf,qf,Ro,Mf,Wt,ps,zf,ul,Af,Df,Xo,Of,Jo,hs,Lf,gl,Sf,If,Go,ms,Nf,_l,Bf,cd,Yt,Zo,vl,fs,Uf,wl,Hf,dd,eo,us,Rf,gs,Xf,bl,Jf,Gf,pd,to,_s,Zf,vs,Kf,yl,Qf,Yf,hd,oo,ws,eu,bs,tu,Qr,ou,au,md,mt,ys,su,ks,nu,kl,ru,iu,lu,Ko,Ts,cu,Tl,du,fd,ft,xs,pu,Ws,hu,xl,mu,fu,uu,Qo,$s,gu,Wl,_u,ud,ao,Yo,$l,Vs,vu,Vl,wu,gd,Te,Fs,bu,js,yu,Es,ku,Tu,xu,Ps,Wu,Yr,$u,Vu,Fu,Cs,ju,qs,Eu,Pu,Cu,Pe,Ms,qu,so,Mu,ei,zu,Au,Fl,Du,Ou,Lu,ea,Su,jl,Iu,Nu,zs,_d,no,ta,El,As,Bu,Pl,Uu,vd,xe,Ds,Hu,ro,Ru,Cl,Xu,Ju,Os,Gu,Zu,Ku,Ls,Qu,ti,Yu,eg,tg,Ss,og,Is,ag,sg,ng,Ce,Ns,rg,io,ig,oi,lg,cg,ql,dg,pg,hg,oa,mg,Ml,fg,ug,Bs,wd,lo,aa,zl,Us,gg,Al,_g,bd,ie,Hs,vg,Dl,wg,bg,Rs,yg,Xs,kg,Tg,xg,Js,Wg,ai,$g,Vg,Fg,Gs,jg,Zs,Eg,Pg,Cg,qe,Ks,qg,co,Mg,si,zg,Ag,Ol,Dg,Og,Lg,sa,Sg,Ll,Ig,Ng,Qs,yd,po,na,Sl,Ys,Bg,Il,Ug,kd,le,en,Hg,Nl,Rg,Xg,tn,Jg,on,Gg,Zg,Kg,an,Qg,ni,Yg,e_,t_,sn,o_,nn,a_,s_,n_,Me,rn,r_,ho,i_,ri,l_,c_,Bl,d_,p_,h_,ra,m_,Ul,f_,u_,ln,Td,mo,ia,Hl,cn,g_,Rl,__,xd,ce,dn,v_,Xl,w_,b_,pn,y_,hn,k_,T_,x_,mn,W_,ii,$_,V_,F_,fn,j_,un,E_,P_,C_,ze,gn,q_,fo,M_,li,z_,A_,Jl,D_,O_,L_,la,S_,Gl,I_,N_,_n,Wd,uo,ca,Zl,vn,B_,Kl,U_,$d,We,wn,H_,go,R_,Ql,X_,J_,bn,G_,Z_,K_,yn,Q_,ci,Y_,ev,tv,kn,ov,Tn,av,sv,nv,Ae,xn,rv,_o,iv,di,lv,cv,Yl,dv,pv,hv,da,mv,ec,fv,uv,Wn,Vd,vo,pa,tc,$n,gv,oc,_v,Fd,de,Vn,vv,ac,wv,bv,Fn,yv,pi,kv,Tv,xv,jn,Wv,En,$v,Vv,Fv,ha,jv,De,Pn,Ev,wo,Pv,hi,Cv,qv,sc,Mv,zv,Av,ma,Dv,nc,Ov,Lv,Cn,jd,bo,fa,rc,qn,Sv,ic,Iv,Ed,pe,Mn,Nv,zn,Bv,lc,Uv,Hv,Rv,An,Xv,mi,Jv,Gv,Zv,Dn,Kv,On,Qv,Yv,e2,ua,t2,Oe,Ln,o2,yo,a2,fi,s2,n2,cc,r2,i2,l2,ga,c2,dc,d2,p2,Sn,Pd,ko,_a,pc,In,h2,hc,m2,Cd,Z,Nn,f2,Bn,u2,Un,g2,_2,v2,Hn,w2,ui,b2,y2,k2,Rn,T2,Xn,x2,W2,$2,mc,V2,F2,ut,fc,Jn,j2,E2,uc,Gn,P2,C2,gc,Zn,q2,M2,_c,Kn,z2,A2,Le,Qn,D2,To,O2,vc,L2,S2,wc,I2,N2,B2,va,U2,bc,H2,R2,Yn,qd,xo,wa,yc,er,X2,kc,J2,Md,K,tr,G2,Wo,Z2,Tc,K2,Q2,or,Y2,ew,tw,ar,ow,gi,aw,sw,nw,sr,rw,nr,iw,lw,cw,xc,dw,pw,gt,Wc,rr,hw,mw,$c,ir,fw,uw,Vc,lr,gw,_w,Fc,cr,vw,ww,Se,dr,bw,$o,yw,jc,kw,Tw,Ec,xw,Ww,$w,ba,Vw,Pc,Fw,jw,pr,zd,Vo,ya,Cc,hr,Ew,qc,Pw,Ad,Q,mr,Cw,Fo,qw,Mc,Mw,zw,fr,Aw,Dw,Ow,ur,Lw,_i,Sw,Iw,Nw,gr,Bw,_r,Uw,Hw,Rw,zc,Xw,Jw,_t,Ac,vr,Gw,Zw,Dc,wr,Kw,Qw,Oc,br,Yw,eb,Lc,yr,tb,ob,Ie,kr,ab,jo,sb,vi,nb,rb,Sc,ib,lb,cb,ka,db,Ic,pb,hb,Tr,Dd;return u=new Y({}),N=new Y({}),G=new Y({}),ye=new F({props:{name:"class transformers.Wav2Vec2Config",anchor:"transformers.Wav2Vec2Config",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"feat_quantizer_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"num_codevectors_per_group",val:" = 320"},{name:"num_codevector_groups",val:" = 2"},{name:"contrastive_logits_temperature",val:" = 0.1"},{name:"num_negatives",val:" = 100"},{name:"codevector_dim",val:" = 256"},{name:"proj_codevector_dim",val:" = 256"},{name:"diversity_loss_weight",val:" = 0.1"},{name:"ctc_loss_reduction",val:" = 'sum'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"tdnn_dim",val:" = (512, 512, 512, 512, 1500)"},{name:"tdnn_kernel",val:" = (5, 3, 3, 1, 1)"},{name:"tdnn_dilation",val:" = (1, 2, 3, 1, 1)"},{name:"xvector_output_dim",val:" = 512"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"add_adapter",val:" = False"},{name:"adapter_kernel_size",val:" = 3"},{name:"adapter_stride",val:" = 2"},{name:"num_adapter_layers",val:" = 3"},{name:"output_hidden_size",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/configuration_wav2vec2.py#L29",parametersDescription:[{anchor:"transformers.Wav2Vec2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the Wav2Vec2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Model">Wav2Vec2Model</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.TFWav2Vec2Model">TFWav2Vec2Model</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Model">Wav2Vec2Model</a>.`,name:"vocab_size"},{anchor:"transformers.Wav2Vec2Config.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.Wav2Vec2Config.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.Wav2Vec2Config.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.Wav2Vec2Config.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.Wav2Vec2Config.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.Wav2Vec2Config.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.Wav2Vec2Config.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.Wav2Vec2Config.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.Wav2Vec2Config.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.Wav2Vec2Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.Wav2Vec2Config.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.Wav2Vec2Config.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.Wav2Vec2Config.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.Wav2Vec2Config.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (obj &#x2014;<em>float</em>, <em>optional</em>, defaults to 0.0): The dropout probabilitiy for quantized feature extractor states.`,name:"feat_quantizer_dropout"},{anchor:"transformers.Wav2Vec2Config.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.Wav2Vec2Config.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.Wav2Vec2Config.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.Wav2Vec2Config.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.Wav2Vec2Config.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.Wav2Vec2Config.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.Wav2Vec2Config.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.Wav2Vec2Config.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.Wav2Vec2Config.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.Wav2Vec2Config.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.Wav2Vec2Config.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.Wav2Vec2Config.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.Wav2Vec2Config.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.Wav2Vec2Config.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.Wav2Vec2Config.num_codevectors_per_group",description:`<strong>num_codevectors_per_group</strong> (<code>int</code>, <em>optional</em>, defaults to 320) &#x2014; Number of entries in each quantization codebook (group).`,name:"num_codevectors_per_group"},{anchor:"transformers.Wav2Vec2Config.num_codevector_groups",description:`<strong>num_codevector_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of codevector groups for product codevector quantization.`,name:"num_codevector_groups"},{anchor:"transformers.Wav2Vec2Config.contrastive_logits_temperature",description:`<strong>contrastive_logits_temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The temperature <em>kappa</em> in the contrastive loss.`,name:"contrastive_logits_temperature"},{anchor:"transformers.Wav2Vec2Config.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for the output of the feature extractor that&#x2019;s used by the quantizer.`,name:"feat_quantizer_dropout"},{anchor:"transformers.Wav2Vec2Config.num_negatives",description:`<strong>num_negatives</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Number of negative samples for the contrastive loss.`,name:"num_negatives"},{anchor:"transformers.Wav2Vec2Config.codevector_dim",description:`<strong>codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the quantized feature vectors.`,name:"codevector_dim"},{anchor:"transformers.Wav2Vec2Config.proj_codevector_dim",description:`<strong>proj_codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the final projection of both the quantized and the transformer features.`,name:"proj_codevector_dim"},{anchor:"transformers.Wav2Vec2Config.diversity_loss_weight",description:`<strong>diversity_loss_weight</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The weight of the codebook diversity loss component.`,name:"diversity_loss_weight"},{anchor:"transformers.Wav2Vec2Config.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.Wav2Vec2Config.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.Wav2Vec2Config.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification">Wav2Vec2ForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.Wav2Vec2Config.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"},{anchor:"transformers.Wav2Vec2Config.tdnn_dim",description:`<strong>tdnn_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 1500)</code>) &#x2014; A tuple of integers defining the number of output channels of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dim</em> defines the number of <em>TDNN</em> layers.`,name:"tdnn_dim"},{anchor:"transformers.Wav2Vec2Config.tdnn_kernel",description:`<strong>tdnn_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 3, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_kernel</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_kernel"},{anchor:"transformers.Wav2Vec2Config.tdnn_dilation",description:`<strong>tdnn_dilation</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(1, 2, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the dilation factor of each 1D convolutional layer in <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dilation</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_dilation"},{anchor:"transformers.Wav2Vec2Config.xvector_output_dim",description:`<strong>xvector_output_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the <em>XVector</em> embedding vectors.`,name:"xvector_output_dim"},{anchor:"transformers.Wav2Vec2Config.add_adapter",description:`<strong>add_adapter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for warm-starting Wav2Vec2 for SpeechEncoderDecoder models.`,name:"add_adapter"},{anchor:"transformers.Wav2Vec2Config.adapter_kernel_size",description:`<strong>adapter_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Kernel size of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_kernel_size"},{anchor:"transformers.Wav2Vec2Config.adapter_stride",description:`<strong>adapter_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Stride of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_stride"},{anchor:"transformers.Wav2Vec2Config.num_adapter_layers",description:`<strong>num_adapter_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of convolutional layers that should be used in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"num_adapter_layers"},{anchor:"transformers.Wav2Vec2Config.output_hidden_size",description:`<strong>output_hidden_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimensionality of the encoder output layer. If not defined, this defaults to <em>hidden-size</em>. Only relevant if <code>add_adapter is True</code>.`,name:"output_hidden_size"}]}}),Ca=new at({props:{code:`from transformers import Wav2Vec2Model, Wav2Vec2Config # Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration configuration = Wav2Vec2Config() # Initializing a model from the facebook/wav2vec2-base-960h style configuration model = Wav2Vec2Model(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Model, Wav2Vec2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Wav2Vec2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/wav2vec2-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2Model(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),qa=new Y({}),Ma=new F({props:{name:"class transformers.Wav2Vec2CTCTokenizer",anchor:"transformers.Wav2Vec2CTCTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"word_delimiter_token",val:" = '|'"},{name:"do_lower_case",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/tokenization_wav2vec2.py#L82",parametersDescription:[{anchor:"transformers.Wav2Vec2CTCTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.Wav2Vec2CTCTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.word_delimiter_token",description:`<strong>word_delimiter_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;|&quot;</code>) &#x2014; The token used for defining the end of a word.`,name:"word_delimiter_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to accept lowercase input and lowercase the output when decoding.</p> <p>**kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>`,name:"do_lower_case"}]}}),Aa=new F({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2334",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),Da=new Y({}),Oa=new F({props:{name:"class transformers.Wav2Vec2FeatureExtractor",anchor:"transformers.Wav2Vec2FeatureExtractor",parameters:[{name:"feature_size",val:" = 1"},{name:"sampling_rate",val:" = 16000"},{name:"padding_value",val:" = 0.0"},{name:"return_attention_mask",val:" = False"},{name:"do_normalize",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L32",parametersDescription:[{anchor:"transformers.Wav2Vec2FeatureExtractor.feature_size",description:`<strong>feature_size</strong> (<code>int</code>, defaults to 1) &#x2014; The feature dimension of the extracted features.`,name:"feature_size"},{anchor:"transformers.Wav2Vec2FeatureExtractor.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>, defaults to 16000) &#x2014; The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).`,name:"sampling_rate"},{anchor:"transformers.Wav2Vec2FeatureExtractor.padding_value",description:`<strong>padding_value</strong> (<code>float</code>, defaults to 0.0) &#x2014; The value that is used to fill the padding values.`,name:"padding_value"},{anchor:"transformers.Wav2Vec2FeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance for some models, <em>e.g.</em>, <a href="https://huggingface.co/models?search=lv60" rel="nofollow">wav2vec2-lv60</a>.`,name:"do_normalize"},{anchor:"transformers.Wav2Vec2FeatureExtractor.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor.__call__"><strong>call</strong>()</a> should return <code>attention_mask</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;group&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, have <strong>not</strong> been trained using <code>attention_mask</code>. For such models, <code>input_values</code> should simply be padded with 0 and no <code>attention_mask</code> should be passed.</p> <p>For Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;layer&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self" rel="nofollow">wav2vec2-lv60</a>, <code>attention_mask</code> should be passed for batched inference.</p> </div>`,name:"return_attention_mask"}]}}),Sa=new F({props:{name:"__call__",anchor:"transformers.Wav2Vec2FeatureExtractor.__call__",parameters:[{name:"raw_speech",val:": typing.Union[numpy.ndarray, typing.List[float], typing.List[numpy.ndarray], typing.List[typing.List[float]]]"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"truncation",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"sampling_rate",val:": typing.Optional[int] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L101",parametersDescription:[{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.raw_speech",description:`<strong>raw_speech</strong> (<code>np.ndarray</code>, <code>List[float]</code>, <code>List[np.ndarray]</code>, <code>List[List[float]]</code>) &#x2014; The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values.`,name:"raw_speech"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>) &#x2014; Activates truncation to cut input sequences longer than <em>max_length</em> to <em>max_length</em>.`,name:"truncation"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.</p> </blockquote>`,name:"pad_to_multiple_of"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor&#x2019;s default.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;group&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, have <strong>not</strong> been trained using <code>attention_mask</code>. For such models, <code>input_values</code> should simply be padded with 0 and no <code>attention_mask</code> should be passed.</p> <p>For Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;layer&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self" rel="nofollow">wav2vec2-lv60</a>, <code>attention_mask</code> should be passed for batched inference.</p> </div>`,name:"return_attention_mask"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>, <em>optional</em>) &#x2014; The sampling rate at which the <code>raw_speech</code> input was sampled. It is strongly recommended to pass <code>sampling_rate</code> at the forward call to prevent silent errors.`,name:"sampling_rate"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.padding_value",description:"<strong>padding_value</strong> (<code>float</code>, defaults to 0.0) &#x2014;",name:"padding_value"}]}}),Ia=new Y({}),Na=new F({props:{name:"class transformers.Wav2Vec2Processor",anchor:"transformers.Wav2Vec2Processor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L27",parametersDescription:[{anchor:"transformers.Wav2Vec2Processor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>Wav2Vec2FeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor">Wav2Vec2FeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Wav2Vec2Processor.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"}]}}),Ua=new F({props:{name:"__call__",anchor:"transformers.Wav2Vec2Processor.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L132"}}),Xa=new F({props:{name:"pad",anchor:"transformers.Wav2Vec2Processor.pad",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L142"}}),Ja=new F({props:{name:"from_pretrained",anchor:"transformers.Wav2Vec2Processor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L81",parametersDescription:[{anchor:"transformers.Wav2Vec2Processor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a></li> </ul>`,name:"pretrained_model_name_or_path"}]}}),Do=new oe({props:{$$slots:{default:[ik]},$$scope:{ctx:C}}}),Za=new F({props:{name:"save_pretrained",anchor:"transformers.Wav2Vec2Processor.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L58",parametersDescription:[{anchor:"transformers.Wav2Vec2Processor.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"}]}}),Oo=new oe({props:{$$slots:{default:[lk]},$$scope:{ctx:C}}}),Ka=new F({props:{name:"batch_decode",anchor:"transformers.Wav2Vec2Processor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L152"}}),Ya=new F({props:{name:"decode",anchor:"transformers.Wav2Vec2Processor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L160"}}),ts=new F({props:{name:"as_target_processor",anchor:"transformers.Wav2Vec2Processor.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/processing_wav2vec2.py#L168"}}),os=new Y({}),as=new F({props:{name:"class transformers.Wav2Vec2ProcessorWithLM",anchor:"transformers.Wav2Vec2ProcessorWithLM",parameters:[{name:"feature_extractor",val:": FeatureExtractionMixin"},{name:"tokenizer",val:": PreTrainedTokenizer"},{name:"decoder",val:": BeamSearchDecoderCTC"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L50",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor">Wav2Vec2FeatureExtractor</a>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor">Wav2Vec2FeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer">Wav2Vec2CTCTokenizer</a>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer">Wav2Vec2CTCTokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decoder",description:`<strong>decoder</strong> (<code>pyctcdecode.BeamSearchDecoderCTC</code>) &#x2014; An instance of <code>pyctcdecode.BeamSearchDecoderCTC</code>. The decoder is a required input.`,name:"decoder"}]}}),ss=new F({props:{name:"__call__",anchor:"transformers.Wav2Vec2ProcessorWithLM.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L219"}}),is=new F({props:{name:"pad",anchor:"transformers.Wav2Vec2ProcessorWithLM.pad",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L229"}}),ls=new F({props:{name:"from_pretrained",anchor:"transformers.Wav2Vec2ProcessorWithLM.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L124",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a></li> </ul>`,name:"pretrained_model_name_or_path"}]}}),Ho=new oe({props:{$$slots:{default:[ck]},$$scope:{ctx:C}}}),ds=new F({props:{name:"save_pretrained",anchor:"transformers.Wav2Vec2ProcessorWithLM.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L98",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"}]}}),Ro=new oe({props:{$$slots:{default:[dk]},$$scope:{ctx:C}}}),ps=new F({props:{name:"batch_decode",anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode",parameters:[{name:"logits",val:": ndarray"},{name:"num_processes",val:": typing.Optional[int] = None"},{name:"beam_width",val:": typing.Optional[int] = None"},{name:"beam_prune_logp",val:": typing.Optional[float] = None"},{name:"token_min_logp",val:": typing.Optional[float] = None"},{name:"hotwords",val:": typing.Optional[typing.Iterable[str]] = None"},{name:"hotword_weight",val:": typing.Optional[float] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L239",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.logits",description:`<strong>logits</strong> (<code>np.ndarray</code>) &#x2014; The logits output vector of the model representing the log probabilities for each token.`,name:"logits"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.num_processes",description:`<strong>num_processes</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of processes on which the function should be parallelized over. Defaults to the number of available CPUs.`,name:"num_processes"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.beam_width",description:`<strong>beam_width</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum number of beams at each step in decoding. Defaults to pyctcdecode&#x2019;s DEFAULT_BEAM_WIDTH.`,name:"beam_width"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.beam_prune_logp",description:`<strong>beam_prune_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; Beams that are much worse than best beam will be pruned Defaults to pyctcdecode&#x2019;s DEFAULT_PRUNE_LOGP.`,name:"beam_prune_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.token_min_logp",description:`<strong>token_min_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; Tokens below this logp are skipped unless they are argmax of frame Defaults to pyctcdecode&#x2019;s DEFAULT_MIN_TOKEN_LOGP.`,name:"token_min_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.hotwords",description:`<strong>hotwords</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of words with extra importance, can be OOV for LM`,name:"hotwords"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.hotword_weight",description:`<strong>hotword_weight</strong> (<code>int</code>, <em>optional</em>) &#x2014; Weight factor for hotword importance Defaults to pyctcdecode&#x2019;s DEFAULT_HOTWORD_WEIGHT.`,name:"hotword_weight"}],returnDescription:` <p><code>Wav2Vec2DecoderWithLMOutput</code> or <code>tuple</code>.</p> `}}),Xo=new oe({props:{$$slots:{default:[pk]},$$scope:{ctx:C}}}),hs=new F({props:{name:"decode",anchor:"transformers.Wav2Vec2ProcessorWithLM.decode",parameters:[{name:"logits",val:": ndarray"},{name:"beam_width",val:": typing.Optional[int] = None"},{name:"beam_prune_logp",val:": typing.Optional[float] = None"},{name:"token_min_logp",val:": typing.Optional[float] = None"},{name:"hotwords",val:": typing.Optional[typing.Iterable[str]] = None"},{name:"hotword_weight",val:": typing.Optional[float] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L314",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.logits",description:`<strong>logits</strong> (<code>np.ndarray</code>) &#x2014; The logits output vector of the model representing the log probabilities for each token.`,name:"logits"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.beam_width",description:`<strong>beam_width</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum number of beams at each step in decoding. Defaults to pyctcdecode&#x2019;s DEFAULT_BEAM_WIDTH.`,name:"beam_width"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.beam_prune_logp",description:`<strong>beam_prune_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; A threshold to prune beams with log-probs less than best_beam_logp + beam_prune_logp. The value should be &lt;= 0. Defaults to pyctcdecode&#x2019;s DEFAULT_PRUNE_LOGP.`,name:"beam_prune_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.token_min_logp",description:`<strong>token_min_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; Tokens with log-probs below token_min_logp are skipped unless they are have the maximum log-prob for an utterance. Defaults to pyctcdecode&#x2019;s DEFAULT_MIN_TOKEN_LOGP.`,name:"token_min_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.hotwords",description:`<strong>hotwords</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of words with extra importance which can be missing from the LM&#x2019;s vocabulary, e.g. [&#x201C;huggingface&#x201D;]`,name:"hotwords"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.hotword_weight",description:`<strong>hotword_weight</strong> (<code>int</code>, <em>optional</em>) &#x2014; Weight multiplier that boosts hotword scores. Defaults to pyctcdecode&#x2019;s DEFAULT_HOTWORD_WEIGHT.`,name:"hotword_weight"}],returnDescription:` <p><code>Wav2Vec2DecoderWithLMOutput</code> or <code>tuple</code>.</p> `}}),ms=new F({props:{name:"as_target_processor",anchor:"transformers.Wav2Vec2ProcessorWithLM.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L372"}}),fs=new Y({}),us=new F({props:{name:"class transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput",anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput",parameters:[{name:"text",val:": typing.Union[typing.List[str], str]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L38",parametersDescription:[{anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput.text",description:`<strong>text</strong> (list of <code>str</code>) &#x2014; Decoded logits in text from. Usually the speech transcription.`,name:"text"}]}}),_s=new F({props:{name:"class transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput",anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"extract_features",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L73",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.`,name:"extract_features"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ws=new F({props:{name:"class transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput",anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"projected_states",val:": FloatTensor = None"},{name:"projected_quantized_states",val:": FloatTensor = None"},{name:"codevector_perplexity",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"contrastive_loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"diversity_loss",val:": typing.Optional[torch.FloatTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L101",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> . (classification) loss.`,name:"loss"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.projected_states",description:`<strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.`,name:"projected_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.projected_quantized_states",description:`<strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.`,name:"projected_quantized_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.contrastive_loss",description:`<strong>contrastive_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; The contrastive loss (L_m) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> .`,name:"contrastive_loss"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.diversity_loss",description:`<strong>diversity_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; The diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> .`,name:"diversity_loss"}]}}),ys=new F({props:{name:"class transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput",anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"extract_features",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L46",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, last_conv_dim)</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model with <code>last_conv_dim</code> being the dimension of the last convolutional layer.`,name:"extract_features"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ts=new F({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/flax/struct.py#L120"}}),xs=new F({props:{name:"class transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput",anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput",parameters:[{name:"projected_states",val:": ndarray = None"},{name:"projected_quantized_states",val:": ndarray = None"},{name:"codevector_perplexity",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L75",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>jnp.ndarray</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> . (classification) loss.`,name:"loss"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.projected_states",description:`<strong>projected_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.`,name:"projected_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.projected_quantized_states",description:`<strong>projected_quantized_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.`,name:"projected_quantized_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),$s=new F({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/flax/struct.py#L120"}}),Vs=new Y({}),Fs=new F({props:{name:"class transformers.Wav2Vec2Model",anchor:"transformers.Wav2Vec2Model",parameters:[{name:"config",val:": Wav2Vec2Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1189",parametersDescription:[{anchor:"transformers.Wav2Vec2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ms=new F({props:{name:"forward",anchor:"transformers.Wav2Vec2Model.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1254",parametersDescription:[{anchor:"transformers.Wav2Vec2Model.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ea=new oe({props:{$$slots:{default:[hk]},$$scope:{ctx:C}}}),zs=new at({props:{code:`from transformers import Wav2Vec2Processor, Wav2Vec2Model from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-base-960h') model = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Wav2Vec2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2Model.from_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),As=new Y({}),Ds=new F({props:{name:"class transformers.Wav2Vec2ForCTC",anchor:"transformers.Wav2Vec2ForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1606",parametersDescription:[{anchor:"transformers.Wav2Vec2ForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ns=new F({props:{name:"forward",anchor:"transformers.Wav2Vec2ForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1632",parametersDescription:[{anchor:"transformers.Wav2Vec2ForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),oa=new oe({props:{$$slots:{default:[mk]},$$scope:{ctx:C}}}),Bs=new at({props:{code:`from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-base-960h') model = Wav2Vec2ForCTC.from_pretrained('facebook/wav2vec2-base-960h') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Wav2Vec2ForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForCTC.from_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),Us=new Y({}),Hs=new F({props:{name:"class transformers.Wav2Vec2ForSequenceClassification",anchor:"transformers.Wav2Vec2ForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1717",parametersDescription:[{anchor:"transformers.Wav2Vec2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ks=new F({props:{name:"forward",anchor:"transformers.Wav2Vec2ForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1746",parametersDescription:[{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),sa=new oe({props:{$$slots:{default:[fk]},$$scope:{ctx:C}}}),Qs=new at({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('superb/wav2vec2-base-superb-ks') model = Wav2Vec2ForSequenceClassification.from_pretrained('superb/wav2vec2-base-superb-ks') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;superb/wav2vec2-base-superb-ks&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;superb/wav2vec2-base-superb-ks&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),Ys=new Y({}),en=new F({props:{name:"class transformers.Wav2Vec2ForAudioFrameClassification",anchor:"transformers.Wav2Vec2ForAudioFrameClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1821",parametersDescription:[{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rn=new F({props:{name:"forward",anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1848",parametersDescription:[{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ra=new oe({props:{$$slots:{default:[uk]},$$scope:{ctx:C}}}),ln=new at({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('superb/wav2vec2-base-superb-sd') model = Wav2Vec2ForAudioFrameClassification.from_pretrained('superb/wav2vec2-base-superb-sd') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits probabilities = torch.sigmoid(logits[0]) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (probabilities > 0.5).long(),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;superb/wav2vec2-base-superb-sd&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForAudioFrameClassification.from_pretrained(<span class="hljs-string">&#x27;superb/wav2vec2-base-superb-sd&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probabilities = torch.sigmoid(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># labels is a one-hot array of shape (num_frames, num_speakers)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = (probabilities &gt; <span class="hljs-number">0.5</span>).long()`}}),cn=new Y({}),dn=new F({props:{name:"class transformers.Wav2Vec2ForXVector",anchor:"transformers.Wav2Vec2ForXVector",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1958",parametersDescription:[{anchor:"transformers.Wav2Vec2ForXVector.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gn=new F({props:{name:"forward",anchor:"transformers.Wav2Vec2ForXVector.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L2008",parametersDescription:[{anchor:"transformers.Wav2Vec2ForXVector.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForXVector.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForXVector.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForXVector.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForXVector.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForXVector.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.wav2vec2.modeling_wav2vec2.XVectorOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Classification hidden states before AMSoftmax.</p> </li> <li> <p><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Utterance embeddings used for vector similarity-based retrieval.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.wav2vec2.modeling_wav2vec2.XVectorOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),la=new oe({props:{$$slots:{default:[gk]},$$scope:{ctx:C}}}),_n=new at({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForXVector from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('superb/wav2vec2-base-superb-sv') model = Wav2Vec2ForXVector.from_pretrained('superb/wav2vec2-base-superb-sv') # audio file is decoded on the fly inputs = feature_extractor(dataset[:2]["audio"]["array"], return_tensors="pt") embeddings = model(**inputs).embeddings embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() # the resulting embeddings can be used for cosine similarity-based retrieval cosine_sim = torch.nn.CosineSimilarity(dim=-1) similarity = cosine_sim(embeddings[0], embeddings[1]) threshold = 0.7 # the optimal threshold is dataset-dependent if similarity < threshold: print("Speakers are not the same!"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;superb/wav2vec2-base-superb-sv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForXVector.from_pretrained(<span class="hljs-string">&#x27;superb/wav2vec2-base-superb-sv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[:<span class="hljs-number">2</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(**inputs).embeddings <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = torch.nn.functional.normalize(embeddings, dim=-<span class="hljs-number">1</span>).cpu() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the resulting embeddings can be used for cosine similarity-based retrieval</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.nn.CosineSimilarity(dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>similarity = cosine_sim(embeddings[<span class="hljs-number">0</span>], embeddings[<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>threshold = <span class="hljs-number">0.7</span> <span class="hljs-comment"># the optimal threshold is dataset-dependent</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">if</span> similarity &lt; threshold: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Speakers are not the same!&quot;</span>)`}}),vn=new Y({}),wn=new F({props:{name:"class transformers.Wav2Vec2ForPreTraining",anchor:"transformers.Wav2Vec2ForPreTraining",parameters:[{name:"config",val:": Wav2Vec2Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1316",parametersDescription:[{anchor:"transformers.Wav2Vec2ForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xn=new F({props:{name:"forward",anchor:"transformers.Wav2Vec2ForPreTraining.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"sampled_negative_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1365",parametersDescription:[{anchor:"transformers.Wav2Vec2ForPreTraining.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.sampled_negative_indices",description:`<strong>sampled_negative_indices</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, sequence_length, num_negatives)</code>, <em>optional</em>) &#x2014; Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss. Required input for pre-training.`,name:"sampled_negative_indices"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> . (classification) loss.</p> </li> <li> <p><strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.</p> </li> <li> <p><strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>contrastive_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 The contrastive loss (L_m) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> .</p> </li> <li> <p><strong>diversity_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 The diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> .</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),da=new oe({props:{$$slots:{default:[_k]},$$scope:{ctx:C}}}),Wn=new at({props:{code:`import torch from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices from datasets import load_dataset import soundfile as sf feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("patrickvonplaten/wav2vec2-base") model = Wav2Vec2ForPreTraining.from_pretrained("patrickvonplaten/wav2vec2-base") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = feature_extractor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 # compute masked indices batch_size, raw_sequence_length = input_values.shape sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) with torch.no_grad(): outputs = model(input_values, mask_time_indices=mask_time_indices) # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) cosine_sim = torch.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, dim=-1 ) # show that cosine similarity is much higher than random assert cosine_sim[mask_time_indices].mean() > 0.5 # for contrastive loss training model should be put into train mode model.train() loss = model(input_values, mask_time_indices=mask_time_indices).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.wav2vec2.modeling_wav2vec2 <span class="hljs-keyword">import</span> _compute_mask_indices <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForPreTraining.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute masked indices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size, raw_sequence_length = input_values.shape <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=<span class="hljs-number">0.2</span>, mask_length=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(input_values, mask_time_indices=mask_time_indices) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.cosine_similarity( <span class="hljs-meta">... </span> outputs.projected_states, outputs.projected_quantized_states, dim=-<span class="hljs-number">1</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># show that cosine similarity is much higher than random</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> cosine_sim[mask_time_indices].mean() &gt; <span class="hljs-number">0.5</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># for contrastive loss training model should be put into train mode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, mask_time_indices=mask_time_indices).loss`}}),$n=new Y({}),Vn=new F({props:{name:"class transformers.TFWav2Vec2Model",anchor:"transformers.TFWav2Vec2Model",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1374",parametersDescription:[{anchor:"transformers.TFWav2Vec2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ha=new oe({props:{$$slots:{default:[vk]},$$scope:{ctx:C}}}),Pn=new F({props:{name:"call",anchor:"transformers.TFWav2Vec2Model.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1380",parametersDescription:[{anchor:"transformers.TFWav2Vec2Model.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFWav2Vec2Model.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFWav2Vec2Model.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFWav2Vec2Model.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFWav2Vec2Model.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFWav2Vec2Model.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFWav2Vec2Model.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFWav2Vec2Model.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFWav2Vec2Model.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFWav2Vec2Model.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ma=new oe({props:{$$slots:{default:[wk]},$$scope:{ctx:C}}}),Cn=new at({props:{code:`from transformers import Wav2Vec2Processor, TFWav2Vec2Model from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFWav2Vec2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFWav2Vec2Model.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">&gt;&gt;&gt; </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">&gt;&gt;&gt; </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),qn=new Y({}),Mn=new F({props:{name:"class transformers.TFWav2Vec2ForCTC",anchor:"transformers.TFWav2Vec2ForCTC",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1470",parametersDescription:[{anchor:"transformers.TFWav2Vec2ForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ua=new oe({props:{$$slots:{default:[bk]},$$scope:{ctx:C}}}),Ln=new F({props:{name:"call",anchor:"transformers.TFWav2Vec2ForCTC.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1485",parametersDescription:[{anchor:"transformers.TFWav2Vec2ForCTC.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFWav2Vec2ForCTC.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFWav2Vec2ForCTC.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFWav2Vec2ForCTC.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFWav2Vec2ForCTC.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFWav2Vec2ForCTC.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFWav2Vec2ForCTC.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFWav2Vec2ForCTC.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFWav2Vec2ForCTC.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFWav2Vec2ForCTC.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFWav2Vec2ForCTC.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_values</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ga=new oe({props:{$$slots:{default:[yk]},$$scope:{ctx:C}}}),Sn=new at({props:{code:`import tensorflow as tf from transformers import Wav2Vec2Processor, TFWav2Vec2ForCTC from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) transcription = processor.decode(predicted_ids[0]) # compute loss target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" # wrap processor as target processor to encode labels with processor.as_target_processor(): labels = processor(transcription, return_tensors="tf").input_ids loss = model(input_values, labels=labels).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFWav2Vec2ForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFWav2Vec2ForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">&gt;&gt;&gt; </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">&gt;&gt;&gt; </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = tf.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.decode(predicted_ids[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_transcription = <span class="hljs-string">&quot;A MAN SAID TO THE UNIVERSE SIR I EXIST&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># wrap processor as target processor to encode labels</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">&gt;&gt;&gt; </span> labels = processor(transcription, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, labels=labels).loss`}}),In=new Y({}),Nn=new F({props:{name:"class transformers.FlaxWav2Vec2Model",anchor:"transformers.FlaxWav2Vec2Model",parameters:[{name:"config",val:": Wav2Vec2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1024)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L933",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxWav2Vec2Model.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Qn=new F({props:{name:"__call__",anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L795",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.input_values",description:`<strong>input_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>jnp.ndarray</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a> .. warning:: <code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.`,name:"attention_mask"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.wav2vec2.configuration_wav2vec2.Wav2Vec2Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, last_conv_dim)</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model with <code>last_conv_dim</code> being the dimension of the last convolutional layer.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),va=new oe({props:{$$slots:{default:[kk]},$$scope:{ctx:C}}}),Yn=new at({props:{code:`from transformers import Wav2Vec2Processor, FlaxWav2Vec2Model from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-lv60") model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="np").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, FlaxWav2Vec2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxWav2Vec2Model.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">&gt;&gt;&gt; </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">&gt;&gt;&gt; </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),er=new Y({}),tr=new F({props:{name:"class transformers.FlaxWav2Vec2ForCTC",anchor:"transformers.FlaxWav2Vec2ForCTC",parameters:[{name:"config",val:": Wav2Vec2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1024)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1035",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2ForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxWav2Vec2ForCTC.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),dr=new F({props:{name:"__call__",anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L795",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.input_values",description:`<strong>input_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>jnp.ndarray</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a> .. warning:: <code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.`,name:"attention_mask"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.wav2vec2.configuration_wav2vec2.Wav2Vec2Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ba=new oe({props:{$$slots:{default:[Tk]},$$scope:{ctx:C}}}),pr=new at({props:{code:`import jax.numpy as jnp from transformers import Wav2Vec2Processor, FlaxWav2Vec2ForCTC from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60") model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="np").input_values # Batch size 1 logits = model(input_values).logits predicted_ids = jnp.argmax(logits, axis=-1) transcription = processor.decode(predicted_ids[0]) # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST",`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, FlaxWav2Vec2ForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-960h-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxWav2Vec2ForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-960h-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">&gt;&gt;&gt; </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">&gt;&gt;&gt; </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = jnp.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.decode(predicted_ids[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># should give: &quot;A MAN SAID TO THE UNIVERSE SIR I EXIST&quot;</span>`}}),hr=new Y({}),mr=new F({props:{name:"class transformers.FlaxWav2Vec2ForPreTraining",anchor:"transformers.FlaxWav2Vec2ForPreTraining",parameters:[{name:"config",val:": Wav2Vec2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1024)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1171",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2ForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),kr=new F({props:{name:"__call__",anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"gumbel_temperature",val:": int = 1"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"gumbel_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1174",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.input_values",description:`<strong>input_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>jnp.ndarray</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a> .. warning:: <code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.`,name:"attention_mask"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.wav2vec2.configuration_wav2vec2.Wav2Vec2Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>jnp.ndarray</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> . (classification) loss.</p> </li> <li> <p><strong>projected_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.</p> </li> <li> <p><strong>projected_quantized_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ka=new oe({props:{$$slots:{default:[xk]},$$scope:{ctx:C}}}),Tr=new at({props:{code:`import optax import numpy as np import jax.numpy as jnp from transformers import Wav2Vec2FeatureExtractor, FlaxWav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices from datasets import load_dataset import soundfile as sf feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 # compute masked indices batch_size, raw_sequence_length = input_values.shape sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) outputs = model(input_values, mask_time_indices=mask_time_indices) # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) cosine_sim = optax.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states ) # show that cosine similarity is much higher than random assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> optax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, FlaxWav2Vec2ForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.wav2vec2.modeling_flax_wav2vec2 <span class="hljs-keyword">import</span> _compute_mask_indices <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxWav2Vec2ForPreTraining.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute masked indices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size, raw_sequence_length = input_values.shape <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=<span class="hljs-number">0.2</span>, mask_length=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_values, mask_time_indices=mask_time_indices) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = optax.cosine_similarity( <span class="hljs-meta">... </span> outputs.projected_states, outputs.projected_quantized_states <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># show that cosine similarity is much higher than random</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> np.asarray(cosine_sim)[mask_time_indices].mean() &gt; <span class="hljs-number">0.5</span>`}}),{c(){p=a("meta"),x=c(),f=a("h1"),T=a("a"),W=a("span"),_(u.$$.fragment),g=c(),$=a("span"),q=r("Wav2Vec2"),P=c(),j=a("h2"),A=a("a"),D=a("span"),_(N.$$.fragment),M=c(),E=a("span"),ae=r("Overview"),ee=c(),S=a("p"),B=r("The Wav2Vec2 model was proposed in "),fe=a("a"),He=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),O=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),H=c(),$e=a("p"),ue=r("The abstract from the paper is the following:"),Ze=c(),Ve=a("p"),se=a("em"),st=r(`We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.`),Ke=c(),I=a("p"),nt=r("Tips:"),he=c(),me=a("ul"),Re=a("li"),ge=r("Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),rt=c(),_e=a("li"),ne=r(`Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),Fe=a("a"),ve=r("Wav2Vec2CTCTokenizer"),it=r("."),V=c(),z=a("p"),Xe=r("This model was contributed by "),Je=a("a"),Nt=r("patrickvonplaten"),re=r("."),wt=c(),we=a("h2"),je=a("a"),J=a("span"),_(G.$$.fragment),Bt=c(),lt=a("span"),be=r("Wav2Vec2Config"),bt=c(),R=a("div"),_(ye.$$.fragment),Ut=c(),Ht=a("p"),ah=r("This is the configuration class to store the configuration of a "),jr=a("a"),sh=r("Wav2Vec2Model"),nh=r(`. It is used to instantiate an Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2 `),Pa=a("a"),rh=r("facebook/wav2vec2-base-960h"),ih=r(" architecture."),lh=c(),Rt=a("p"),ch=r("Configuration objects inherit from "),Er=a("a"),dh=r("PretrainedConfig"),ph=r(` and can be used to control the model outputs. Read the documentation from `),Pr=a("a"),hh=r("PretrainedConfig"),mh=r(" for more information."),fh=c(),Hi=a("p"),uh=r("Example:"),gh=c(),_(Ca.$$.fragment),td=c(),Xt=a("h2"),Eo=a("a"),Ri=a("span"),_(qa.$$.fragment),_h=c(),Xi=a("span"),vh=r("Wav2Vec2CTCTokenizer"),od=c(),ke=a("div"),_(Ma.$$.fragment),wh=c(),Ji=a("p"),bh=r("Constructs a Wav2Vec2CTC tokenizer."),yh=c(),za=a("p"),kh=r("This tokenizer inherits from "),Cr=a("a"),Th=r("PreTrainedTokenizer"),xh=r(` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Wh=c(),Po=a("div"),_(Aa.$$.fragment),$h=c(),Gi=a("p"),Vh=r(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Fh=c(),Zi=a("div"),ad=c(),Jt=a("h2"),Co=a("a"),Ki=a("span"),_(Da.$$.fragment),jh=c(),Qi=a("span"),Eh=r("Wav2Vec2FeatureExtractor"),sd=c(),Ge=a("div"),_(Oa.$$.fragment),Ph=c(),Yi=a("p"),Ch=r("Constructs a Wav2Vec2 feature extractor."),qh=c(),La=a("p"),Mh=r(`This feature extractor inherits from `),qr=a("a"),zh=r("SequenceFeatureExtractor"),Ah=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Dh=c(),qo=a("div"),_(Sa.$$.fragment),Oh=c(),el=a("p"),Lh=r("Main method to featurize and prepare for the model one or several sequence(s). sequences."),nd=c(),Gt=a("h2"),Mo=a("a"),tl=a("span"),_(Ia.$$.fragment),Sh=c(),ol=a("span"),Ih=r("Wav2Vec2Processor"),rd=c(),L=a("div"),_(Na.$$.fragment),Nh=c(),al=a("p"),Bh=r(`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor and a Wav2Vec2 CTC tokenizer into a single processor.`),Uh=c(),Ee=a("p"),Mr=a("a"),Hh=r("Wav2Vec2Processor"),Rh=r(` offers all the functionalities of `),zr=a("a"),Xh=r("Wav2Vec2FeatureExtractor"),Jh=r(" and "),Ar=a("a"),Gh=r("PreTrainedTokenizer"),Zh=r(`. See the docstring of `),Ba=a("a"),sl=a("strong"),Kh=r("call"),Qh=r("()"),Yh=r(" and "),Dr=a("a"),em=r("decode()"),tm=r(` for more information.`),om=c(),zo=a("div"),_(Ua.$$.fragment),am=c(),ct=a("p"),sm=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Ha=a("a"),nl=a("strong"),nm=r("call"),rm=r("()"),im=r(` and returns its output. If used in the context `),Or=a("a"),lm=r("as_target_processor()"),cm=r(` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ra=a("a"),rl=a("strong"),dm=r("call"),pm=r("()"),hm=r(`. Please refer to the docstring of the above two methods for more information.`),mm=c(),Ao=a("div"),_(Xa.$$.fragment),fm=c(),dt=a("p"),um=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Lr=a("a"),gm=r("pad()"),_m=r(` and returns its output. If used in the context `),Sr=a("a"),vm=r("as_target_processor()"),wm=r(` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ir=a("a"),bm=r("pad()"),ym=r(`. Please refer to the docstring of the above two methods for more information.`),km=c(),yt=a("div"),_(Ja.$$.fragment),Tm=c(),Ga=a("p"),xm=r("Instantiate a "),Nr=a("a"),Wm=r("Wav2Vec2Processor"),$m=r(" from a pretrained Wav2Vec2 processor."),Vm=c(),_(Do.$$.fragment),Fm=c(),kt=a("div"),_(Za.$$.fragment),jm=c(),Zt=a("p"),Em=r("Save a Wav2Vec2 feature_extractor object and Wav2Vec2 tokenizer object to the directory "),il=a("code"),Pm=r("save_directory"),Cm=r(`, so that it can be re-loaded using the `),Br=a("a"),qm=r("from_pretrained()"),Mm=r(" class method."),zm=c(),_(Oo.$$.fragment),Am=c(),Lo=a("div"),_(Ka.$$.fragment),Dm=c(),Qa=a("p"),Om=r(`This method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ur=a("a"),Lm=r("batch_decode()"),Sm=r(`. Please refer to the docstring of this method for more information.`),Im=c(),So=a("div"),_(Ya.$$.fragment),Nm=c(),es=a("p"),Bm=r(`This method forwards all its arguments to PreTrainedTokenizer\u2019s `),Hr=a("a"),Um=r("decode()"),Hm=r(`. Please refer to the docstring of this method for more information.`),Rm=c(),Io=a("div"),_(ts.$$.fragment),Xm=c(),ll=a("p"),Jm=r(`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),id=c(),Kt=a("h2"),No=a("a"),cl=a("span"),_(os.$$.fragment),Gm=c(),dl=a("span"),Zm=r("Wav2Vec2ProcessorWithLM"),ld=c(),U=a("div"),_(as.$$.fragment),Km=c(),pl=a("p"),Qm=r(`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder with language model support into a single processor for language model boosted speech recognition decoding.`),Ym=c(),Bo=a("div"),_(ss.$$.fragment),ef=c(),pt=a("p"),tf=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),ns=a("a"),hl=a("strong"),of=r("call"),af=r("()"),sf=r(` and returns its output. If used in the context `),Rr=a("a"),nf=r("as_target_processor()"),rf=r(` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),rs=a("a"),ml=a("strong"),lf=r("call"),cf=r("()"),df=r(`. Please refer to the docstring of the above two methods for more information.`),pf=c(),Uo=a("div"),_(is.$$.fragment),hf=c(),ht=a("p"),mf=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Xr=a("a"),ff=r("pad()"),uf=r(` and returns its output. If used in the context `),Jr=a("a"),gf=r("as_target_processor()"),_f=r(` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),Gr=a("a"),vf=r("pad()"),wf=r(`. Please refer to the docstring of the above two methods for more information.`),bf=c(),Tt=a("div"),_(ls.$$.fragment),yf=c(),cs=a("p"),kf=r("Instantiate a "),Zr=a("a"),Tf=r("Wav2Vec2ProcessorWithLM"),xf=r(" from a pretrained Wav2Vec2 processor."),Wf=c(),_(Ho.$$.fragment),$f=c(),xt=a("div"),_(ds.$$.fragment),Vf=c(),Qt=a("p"),Ff=r(`Save the Wav2Vec2 feature_extractor, a tokenizer object and a pyctcdecode decoder to the directory `),fl=a("code"),jf=r("save_directory"),Ef=r(`, so that they can be re-loaded using the `),Kr=a("a"),Pf=r("from_pretrained()"),Cf=r(" class method."),qf=c(),_(Ro.$$.fragment),Mf=c(),Wt=a("div"),_(ps.$$.fragment),zf=c(),ul=a("p"),Af=r("Batch decode output logits to audio transcription with language model support."),Df=c(),_(Xo.$$.fragment),Of=c(),Jo=a("div"),_(hs.$$.fragment),Lf=c(),gl=a("p"),Sf=r("Decode output logits to audio transcription with language model support."),If=c(),Go=a("div"),_(ms.$$.fragment),Nf=c(),_l=a("p"),Bf=r(`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),cd=c(),Yt=a("h2"),Zo=a("a"),vl=a("span"),_(fs.$$.fragment),Uf=c(),wl=a("span"),Hf=r("Wav2Vec2 specific outputs"),dd=c(),eo=a("div"),_(us.$$.fragment),Rf=c(),gs=a("p"),Xf=r("Output type of "),bl=a("code"),Jf=r("Wav2Vec2DecoderWithLM"),Gf=r(", with transcription."),pd=c(),to=a("div"),_(_s.$$.fragment),Zf=c(),vs=a("p"),Kf=r("Output type of "),yl=a("code"),Qf=r("Wav2Vec2BaseModelOutput"),Yf=r(", with potential hidden states and attentions."),hd=c(),oo=a("div"),_(ws.$$.fragment),eu=c(),bs=a("p"),tu=r("Output type of "),Qr=a("a"),ou=r("Wav2Vec2ForPreTraining"),au=r(", with potential hidden states and attentions."),md=c(),mt=a("div"),_(ys.$$.fragment),su=c(),ks=a("p"),nu=r("Output type of "),kl=a("code"),ru=r("FlaxWav2Vec2BaseModelOutput"),iu=r(", with potential hidden states and attentions."),lu=c(),Ko=a("div"),_(Ts.$$.fragment),cu=c(),Tl=a("p"),du=r("\u201CReturns a new object replacing the specified fields with new values."),fd=c(),ft=a("div"),_(xs.$$.fragment),pu=c(),Ws=a("p"),hu=r("Output type of "),xl=a("code"),mu=r("FlaxWav2Vec2ForPreTrainingOutput"),fu=r(`, with potential hidden states and attentions.`),uu=c(),Qo=a("div"),_($s.$$.fragment),gu=c(),Wl=a("p"),_u=r("\u201CReturns a new object replacing the specified fields with new values."),ud=c(),ao=a("h2"),Yo=a("a"),$l=a("span"),_(Vs.$$.fragment),vu=c(),Vl=a("span"),wu=r("Wav2Vec2Model"),gd=c(),Te=a("div"),_(Fs.$$.fragment),bu=c(),js=a("p"),yu=r(`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Es=a("a"),ku=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Tu=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),xu=c(),Ps=a("p"),Wu=r("This model inherits from "),Yr=a("a"),$u=r("PreTrainedModel"),Vu=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Fu=c(),Cs=a("p"),ju=r("This model is a PyTorch "),qs=a("a"),Eu=r("torch.nn.Module"),Pu=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cu=c(),Pe=a("div"),_(Ms.$$.fragment),qu=c(),so=a("p"),Mu=r("The "),ei=a("a"),zu=r("Wav2Vec2Model"),Au=r(" forward method, overrides the "),Fl=a("code"),Du=r("__call__"),Ou=r(" special method."),Lu=c(),_(ea.$$.fragment),Su=c(),jl=a("p"),Iu=r("Example:"),Nu=c(),_(zs.$$.fragment),_d=c(),no=a("h2"),ta=a("a"),El=a("span"),_(As.$$.fragment),Bu=c(),Pl=a("span"),Uu=r("Wav2Vec2ForCTC"),vd=c(),xe=a("div"),_(Ds.$$.fragment),Hu=c(),ro=a("p"),Ru=r("Wav2Vec2 Model with a "),Cl=a("code"),Xu=r("language modeling"),Ju=r(` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),Os=a("a"),Gu=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Zu=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ku=c(),Ls=a("p"),Qu=r("This model inherits from "),ti=a("a"),Yu=r("PreTrainedModel"),eg=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),tg=c(),Ss=a("p"),og=r("This model is a PyTorch "),Is=a("a"),ag=r("torch.nn.Module"),sg=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ng=c(),Ce=a("div"),_(Ns.$$.fragment),rg=c(),io=a("p"),ig=r("The "),oi=a("a"),lg=r("Wav2Vec2ForCTC"),cg=r(" forward method, overrides the "),ql=a("code"),dg=r("__call__"),pg=r(" special method."),hg=c(),_(oa.$$.fragment),mg=c(),Ml=a("p"),fg=r("Example:"),ug=c(),_(Bs.$$.fragment),wd=c(),lo=a("h2"),aa=a("a"),zl=a("span"),_(Us.$$.fragment),gg=c(),Al=a("span"),_g=r("Wav2Vec2ForSequenceClassification"),bd=c(),ie=a("div"),_(Hs.$$.fragment),vg=c(),Dl=a("p"),wg=r(`Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),bg=c(),Rs=a("p"),yg=r("Wav2Vec2 was proposed in "),Xs=a("a"),kg=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Tg=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),xg=c(),Js=a("p"),Wg=r("This model inherits from "),ai=a("a"),$g=r("PreTrainedModel"),Vg=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Fg=c(),Gs=a("p"),jg=r("This model is a PyTorch "),Zs=a("a"),Eg=r("torch.nn.Module"),Pg=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cg=c(),qe=a("div"),_(Ks.$$.fragment),qg=c(),co=a("p"),Mg=r("The "),si=a("a"),zg=r("Wav2Vec2ForSequenceClassification"),Ag=r(" forward method, overrides the "),Ol=a("code"),Dg=r("__call__"),Og=r(" special method."),Lg=c(),_(sa.$$.fragment),Sg=c(),Ll=a("p"),Ig=r("Example:"),Ng=c(),_(Qs.$$.fragment),yd=c(),po=a("h2"),na=a("a"),Sl=a("span"),_(Ys.$$.fragment),Bg=c(),Il=a("span"),Ug=r("Wav2Vec2ForAudioFrameClassification"),kd=c(),le=a("div"),_(en.$$.fragment),Hg=c(),Nl=a("p"),Rg=r("Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization."),Xg=c(),tn=a("p"),Jg=r("Wav2Vec2 was proposed in "),on=a("a"),Gg=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Zg=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Kg=c(),an=a("p"),Qg=r("This model inherits from "),ni=a("a"),Yg=r("PreTrainedModel"),e_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),t_=c(),sn=a("p"),o_=r("This model is a PyTorch "),nn=a("a"),a_=r("torch.nn.Module"),s_=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),n_=c(),Me=a("div"),_(rn.$$.fragment),r_=c(),ho=a("p"),i_=r("The "),ri=a("a"),l_=r("Wav2Vec2ForAudioFrameClassification"),c_=r(" forward method, overrides the "),Bl=a("code"),d_=r("__call__"),p_=r(" special method."),h_=c(),_(ra.$$.fragment),m_=c(),Ul=a("p"),f_=r("Example:"),u_=c(),_(ln.$$.fragment),Td=c(),mo=a("h2"),ia=a("a"),Hl=a("span"),_(cn.$$.fragment),g_=c(),Rl=a("span"),__=r("Wav2Vec2ForXVector"),xd=c(),ce=a("div"),_(dn.$$.fragment),v_=c(),Xl=a("p"),w_=r("Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification."),b_=c(),pn=a("p"),y_=r("Wav2Vec2 was proposed in "),hn=a("a"),k_=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),T_=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),x_=c(),mn=a("p"),W_=r("This model inherits from "),ii=a("a"),$_=r("PreTrainedModel"),V_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),F_=c(),fn=a("p"),j_=r("This model is a PyTorch "),un=a("a"),E_=r("torch.nn.Module"),P_=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),C_=c(),ze=a("div"),_(gn.$$.fragment),q_=c(),fo=a("p"),M_=r("The "),li=a("a"),z_=r("Wav2Vec2ForXVector"),A_=r(" forward method, overrides the "),Jl=a("code"),D_=r("__call__"),O_=r(" special method."),L_=c(),_(la.$$.fragment),S_=c(),Gl=a("p"),I_=r("Example:"),N_=c(),_(_n.$$.fragment),Wd=c(),uo=a("h2"),ca=a("a"),Zl=a("span"),_(vn.$$.fragment),B_=c(),Kl=a("span"),U_=r("Wav2Vec2ForPreTraining"),$d=c(),We=a("div"),_(wn.$$.fragment),H_=c(),go=a("p"),R_=r("Wav2Vec2 Model with a quantizer and "),Ql=a("code"),X_=r("VQ"),J_=r(` head on top. Wav2Vec2 was proposed in `),bn=a("a"),G_=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Z_=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),K_=c(),yn=a("p"),Q_=r("This model inherits from "),ci=a("a"),Y_=r("PreTrainedModel"),ev=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),tv=c(),kn=a("p"),ov=r("This model is a PyTorch "),Tn=a("a"),av=r("torch.nn.Module"),sv=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nv=c(),Ae=a("div"),_(xn.$$.fragment),rv=c(),_o=a("p"),iv=r("The "),di=a("a"),lv=r("Wav2Vec2ForPreTraining"),cv=r(" forward method, overrides the "),Yl=a("code"),dv=r("__call__"),pv=r(" special method."),hv=c(),_(da.$$.fragment),mv=c(),ec=a("p"),fv=r("Example:"),uv=c(),_(Wn.$$.fragment),Vd=c(),vo=a("h2"),pa=a("a"),tc=a("span"),_($n.$$.fragment),gv=c(),oc=a("span"),_v=r("TFWav2Vec2Model"),Fd=c(),de=a("div"),_(Vn.$$.fragment),vv=c(),ac=a("p"),wv=r("The bare TFWav2Vec2 Model transformer outputing raw hidden-states without any specific head on top."),bv=c(),Fn=a("p"),yv=r("This model inherits from "),pi=a("a"),kv=r("TFPreTrainedModel"),Tv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xv=c(),jn=a("p"),Wv=r("This model is also a "),En=a("a"),$v=r("tf.keras.Model"),Vv=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Fv=c(),_(ha.$$.fragment),jv=c(),De=a("div"),_(Pn.$$.fragment),Ev=c(),wo=a("p"),Pv=r("The "),hi=a("a"),Cv=r("TFWav2Vec2Model"),qv=r(" forward method, overrides the "),sc=a("code"),Mv=r("__call__"),zv=r(" special method."),Av=c(),_(ma.$$.fragment),Dv=c(),nc=a("p"),Ov=r("Example:"),Lv=c(),_(Cn.$$.fragment),jd=c(),bo=a("h2"),fa=a("a"),rc=a("span"),_(qn.$$.fragment),Sv=c(),ic=a("span"),Iv=r("TFWav2Vec2ForCTC"),Ed=c(),pe=a("div"),_(Mn.$$.fragment),Nv=c(),zn=a("p"),Bv=r("TFWav2Vec2 Model with a "),lc=a("code"),Uv=r("language modeling"),Hv=r(" head on top for Connectionist Temporal Classification (CTC)."),Rv=c(),An=a("p"),Xv=r("This model inherits from "),mi=a("a"),Jv=r("TFPreTrainedModel"),Gv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zv=c(),Dn=a("p"),Kv=r("This model is also a "),On=a("a"),Qv=r("tf.keras.Model"),Yv=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),e2=c(),_(ua.$$.fragment),t2=c(),Oe=a("div"),_(Ln.$$.fragment),o2=c(),yo=a("p"),a2=r("The "),fi=a("a"),s2=r("TFWav2Vec2ForCTC"),n2=r(" forward method, overrides the "),cc=a("code"),r2=r("__call__"),i2=r(" special method."),l2=c(),_(ga.$$.fragment),c2=c(),dc=a("p"),d2=r("Example:"),p2=c(),_(Sn.$$.fragment),Pd=c(),ko=a("h2"),_a=a("a"),pc=a("span"),_(In.$$.fragment),h2=c(),hc=a("span"),m2=r("FlaxWav2Vec2Model"),Cd=c(),Z=a("div"),_(Nn.$$.fragment),f2=c(),Bn=a("p"),u2=r(`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Un=a("a"),g2=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),_2=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),v2=c(),Hn=a("p"),w2=r("This model inherits from "),ui=a("a"),b2=r("FlaxPreTrainedModel"),y2=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),k2=c(),Rn=a("p"),T2=r("This model is also a Flax Linen "),Xn=a("a"),x2=r("flax.nn.Module"),W2=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),$2=c(),mc=a("p"),V2=r("Finally, this model supports inherent JAX features such as:"),F2=c(),ut=a("ul"),fc=a("li"),Jn=a("a"),j2=r("Just-In-Time (JIT) compilation"),E2=c(),uc=a("li"),Gn=a("a"),P2=r("Automatic Differentiation"),C2=c(),gc=a("li"),Zn=a("a"),q2=r("Vectorization"),M2=c(),_c=a("li"),Kn=a("a"),z2=r("Parallelization"),A2=c(),Le=a("div"),_(Qn.$$.fragment),D2=c(),To=a("p"),O2=r("The "),vc=a("code"),L2=r("FlaxWav2Vec2PreTrainedModel"),S2=r(" forward method, overrides the "),wc=a("code"),I2=r("__call__"),N2=r(" special method."),B2=c(),_(va.$$.fragment),U2=c(),bc=a("p"),H2=r("Example:"),R2=c(),_(Yn.$$.fragment),qd=c(),xo=a("h2"),wa=a("a"),yc=a("span"),_(er.$$.fragment),X2=c(),kc=a("span"),J2=r("FlaxWav2Vec2ForCTC"),Md=c(),K=a("div"),_(tr.$$.fragment),G2=c(),Wo=a("p"),Z2=r("Wav2Vec2 Model with a "),Tc=a("code"),K2=r("language modeling"),Q2=r(` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),or=a("a"),Y2=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),ew=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),tw=c(),ar=a("p"),ow=r("This model inherits from "),gi=a("a"),aw=r("FlaxPreTrainedModel"),sw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nw=c(),sr=a("p"),rw=r("This model is also a Flax Linen "),nr=a("a"),iw=r("flax.nn.Module"),lw=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),cw=c(),xc=a("p"),dw=r("Finally, this model supports inherent JAX features such as:"),pw=c(),gt=a("ul"),Wc=a("li"),rr=a("a"),hw=r("Just-In-Time (JIT) compilation"),mw=c(),$c=a("li"),ir=a("a"),fw=r("Automatic Differentiation"),uw=c(),Vc=a("li"),lr=a("a"),gw=r("Vectorization"),_w=c(),Fc=a("li"),cr=a("a"),vw=r("Parallelization"),ww=c(),Se=a("div"),_(dr.$$.fragment),bw=c(),$o=a("p"),yw=r("The "),jc=a("code"),kw=r("FlaxWav2Vec2PreTrainedModel"),Tw=r(" forward method, overrides the "),Ec=a("code"),xw=r("__call__"),Ww=r(" special method."),$w=c(),_(ba.$$.fragment),Vw=c(),Pc=a("p"),Fw=r("Example:"),jw=c(),_(pr.$$.fragment),zd=c(),Vo=a("h2"),ya=a("a"),Cc=a("span"),_(hr.$$.fragment),Ew=c(),qc=a("span"),Pw=r("FlaxWav2Vec2ForPreTraining"),Ad=c(),Q=a("div"),_(mr.$$.fragment),Cw=c(),Fo=a("p"),qw=r("Wav2Vec2 Model with a quantizer and "),Mc=a("code"),Mw=r("VQ"),zw=r(` head on top. Wav2Vec2 was proposed in `),fr=a("a"),Aw=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Dw=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ow=c(),ur=a("p"),Lw=r("This model inherits from "),_i=a("a"),Sw=r("FlaxPreTrainedModel"),Iw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nw=c(),gr=a("p"),Bw=r("This model is also a Flax Linen "),_r=a("a"),Uw=r("flax.nn.Module"),Hw=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Rw=c(),zc=a("p"),Xw=r("Finally, this model supports inherent JAX features such as:"),Jw=c(),_t=a("ul"),Ac=a("li"),vr=a("a"),Gw=r("Just-In-Time (JIT) compilation"),Zw=c(),Dc=a("li"),wr=a("a"),Kw=r("Automatic Differentiation"),Qw=c(),Oc=a("li"),br=a("a"),Yw=r("Vectorization"),eb=c(),Lc=a("li"),yr=a("a"),tb=r("Parallelization"),ob=c(),Ie=a("div"),_(kr.$$.fragment),ab=c(),jo=a("p"),sb=r("The "),vi=a("a"),nb=r("FlaxWav2Vec2ForPreTraining"),rb=r(" forward method, overrides the "),Sc=a("code"),ib=r("__call__"),lb=r(" special method."),cb=c(),_(ka.$$.fragment),db=c(),Ic=a("p"),pb=r("Example:"),hb=c(),_(Tr.$$.fragment),this.h()},l(o){const h=rk('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(t),x=d(o),f=s(o,"H1",{class:!0});var xr=n(f);T=s(xr,"A",{id:!0,class:!0,href:!0});var Nc=n(T);W=s(Nc,"SPAN",{});var Bc=n(W);v(u.$$.fragment,Bc),Bc.forEach(t),Nc.forEach(t),g=d(xr),$=s(xr,"SPAN",{});var Uc=n($);q=i(Uc,"Wav2Vec2"),Uc.forEach(t),xr.forEach(t),P=d(o),j=s(o,"H2",{class:!0});var Wr=n(j);A=s(Wr,"A",{id:!0,class:!0,href:!0});var Hc=n(A);D=s(Hc,"SPAN",{});var Rc=n(D);v(N.$$.fragment,Rc),Rc.forEach(t),Hc.forEach(t),M=d(Wr),E=s(Wr,"SPAN",{});var Xc=n(E);ae=i(Xc,"Overview"),Xc.forEach(t),Wr.forEach(t),ee=d(o),S=s(o,"P",{});var $r=n(S);B=i($r,"The Wav2Vec2 model was proposed in "),fe=s($r,"A",{href:!0,rel:!0});var Jc=n(fe);He=i(Jc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Jc.forEach(t),O=i($r," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),$r.forEach(t),H=d(o),$e=s(o,"P",{});var Gc=n($e);ue=i(Gc,"The abstract from the paper is the following:"),Gc.forEach(t),Ze=d(o),Ve=s(o,"P",{});var Zc=n(Ve);se=s(Zc,"EM",{});var Kc=n(se);st=i(Kc,`We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.`),Kc.forEach(t),Zc.forEach(t),Ke=d(o),I=s(o,"P",{});var Qc=n(I);nt=i(Qc,"Tips:"),Qc.forEach(t),he=d(o),me=s(o,"UL",{});var Vr=n(me);Re=s(Vr,"LI",{});var Yc=n(Re);ge=i(Yc,"Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),Yc.forEach(t),rt=d(Vr),_e=s(Vr,"LI",{});var Fr=n(_e);ne=i(Fr,`Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),Fe=s(Fr,"A",{href:!0});var ed=n(Fe);ve=i(ed,"Wav2Vec2CTCTokenizer"),ed.forEach(t),it=i(Fr,"."),Fr.forEach(t),Vr.forEach(t),V=d(o),z=s(o,"P",{});var Od=n(z);Xe=i(Od,"This model was contributed by "),Je=s(Od,"A",{href:!0,rel:!0});var vb=n(Je);Nt=i(vb,"patrickvonplaten"),vb.forEach(t),re=i(Od,"."),Od.forEach(t),wt=d(o),we=s(o,"H2",{class:!0});var Ld=n(we);je=s(Ld,"A",{id:!0,class:!0,href:!0});var wb=n(je);J=s(wb,"SPAN",{});var bb=n(J);v(G.$$.fragment,bb),bb.forEach(t),wb.forEach(t),Bt=d(Ld),lt=s(Ld,"SPAN",{});var yb=n(lt);be=i(yb,"Wav2Vec2Config"),yb.forEach(t),Ld.forEach(t),bt=d(o),R=s(o,"DIV",{class:!0});var $t=n(R);v(ye.$$.fragment,$t),Ut=d($t),Ht=s($t,"P",{});var wi=n(Ht);ah=i(wi,"This is the configuration class to store the configuration of a "),jr=s(wi,"A",{href:!0});var kb=n(jr);sh=i(kb,"Wav2Vec2Model"),kb.forEach(t),nh=i(wi,`. It is used to instantiate an Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2 `),Pa=s(wi,"A",{href:!0,rel:!0});var Tb=n(Pa);rh=i(Tb,"facebook/wav2vec2-base-960h"),Tb.forEach(t),ih=i(wi," architecture."),wi.forEach(t),lh=d($t),Rt=s($t,"P",{});var bi=n(Rt);ch=i(bi,"Configuration objects inherit from "),Er=s(bi,"A",{href:!0});var xb=n(Er);dh=i(xb,"PretrainedConfig"),xb.forEach(t),ph=i(bi,` and can be used to control the model outputs. Read the documentation from `),Pr=s(bi,"A",{href:!0});var Wb=n(Pr);hh=i(Wb,"PretrainedConfig"),Wb.forEach(t),mh=i(bi," for more information."),bi.forEach(t),fh=d($t),Hi=s($t,"P",{});var $b=n(Hi);uh=i($b,"Example:"),$b.forEach(t),gh=d($t),v(Ca.$$.fragment,$t),$t.forEach(t),td=d(o),Xt=s(o,"H2",{class:!0});var Sd=n(Xt);Eo=s(Sd,"A",{id:!0,class:!0,href:!0});var Vb=n(Eo);Ri=s(Vb,"SPAN",{});var Fb=n(Ri);v(qa.$$.fragment,Fb),Fb.forEach(t),Vb.forEach(t),_h=d(Sd),Xi=s(Sd,"SPAN",{});var jb=n(Xi);vh=i(jb,"Wav2Vec2CTCTokenizer"),jb.forEach(t),Sd.forEach(t),od=d(o),ke=s(o,"DIV",{class:!0});var Vt=n(ke);v(Ma.$$.fragment,Vt),wh=d(Vt),Ji=s(Vt,"P",{});var Eb=n(Ji);bh=i(Eb,"Constructs a Wav2Vec2CTC tokenizer."),Eb.forEach(t),yh=d(Vt),za=s(Vt,"P",{});var Id=n(za);kh=i(Id,"This tokenizer inherits from "),Cr=s(Id,"A",{href:!0});var Pb=n(Cr);Th=i(Pb,"PreTrainedTokenizer"),Pb.forEach(t),xh=i(Id,` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Id.forEach(t),Wh=d(Vt),Po=s(Vt,"DIV",{class:!0});var Nd=n(Po);v(Aa.$$.fragment,Nd),$h=d(Nd),Gi=s(Nd,"P",{});var Cb=n(Gi);Vh=i(Cb,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Cb.forEach(t),Nd.forEach(t),Fh=d(Vt),Zi=s(Vt,"DIV",{class:!0}),n(Zi).forEach(t),Vt.forEach(t),ad=d(o),Jt=s(o,"H2",{class:!0});var Bd=n(Jt);Co=s(Bd,"A",{id:!0,class:!0,href:!0});var qb=n(Co);Ki=s(qb,"SPAN",{});var Mb=n(Ki);v(Da.$$.fragment,Mb),Mb.forEach(t),qb.forEach(t),jh=d(Bd),Qi=s(Bd,"SPAN",{});var zb=n(Qi);Eh=i(zb,"Wav2Vec2FeatureExtractor"),zb.forEach(t),Bd.forEach(t),sd=d(o),Ge=s(o,"DIV",{class:!0});var Ta=n(Ge);v(Oa.$$.fragment,Ta),Ph=d(Ta),Yi=s(Ta,"P",{});var Ab=n(Yi);Ch=i(Ab,"Constructs a Wav2Vec2 feature extractor."),Ab.forEach(t),qh=d(Ta),La=s(Ta,"P",{});var Ud=n(La);Mh=i(Ud,`This feature extractor inherits from `),qr=s(Ud,"A",{href:!0});var Db=n(qr);zh=i(Db,"SequenceFeatureExtractor"),Db.forEach(t),Ah=i(Ud,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ud.forEach(t),Dh=d(Ta),qo=s(Ta,"DIV",{class:!0});var Hd=n(qo);v(Sa.$$.fragment,Hd),Oh=d(Hd),el=s(Hd,"P",{});var Ob=n(el);Lh=i(Ob,"Main method to featurize and prepare for the model one or several sequence(s). sequences."),Ob.forEach(t),Hd.forEach(t),Ta.forEach(t),nd=d(o),Gt=s(o,"H2",{class:!0});var Rd=n(Gt);Mo=s(Rd,"A",{id:!0,class:!0,href:!0});var Lb=n(Mo);tl=s(Lb,"SPAN",{});var Sb=n(tl);v(Ia.$$.fragment,Sb),Sb.forEach(t),Lb.forEach(t),Sh=d(Rd),ol=s(Rd,"SPAN",{});var Ib=n(ol);Ih=i(Ib,"Wav2Vec2Processor"),Ib.forEach(t),Rd.forEach(t),rd=d(o),L=s(o,"DIV",{class:!0});var X=n(L);v(Na.$$.fragment,X),Nh=d(X),al=s(X,"P",{});var Nb=n(al);Bh=i(Nb,`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor and a Wav2Vec2 CTC tokenizer into a single processor.`),Nb.forEach(t),Uh=d(X),Ee=s(X,"P",{});var vt=n(Ee);Mr=s(vt,"A",{href:!0});var Bb=n(Mr);Hh=i(Bb,"Wav2Vec2Processor"),Bb.forEach(t),Rh=i(vt,` offers all the functionalities of `),zr=s(vt,"A",{href:!0});var Ub=n(zr);Xh=i(Ub,"Wav2Vec2FeatureExtractor"),Ub.forEach(t),Jh=i(vt," and "),Ar=s(vt,"A",{href:!0});var Hb=n(Ar);Gh=i(Hb,"PreTrainedTokenizer"),Hb.forEach(t),Zh=i(vt,`. See the docstring of `),Ba=s(vt,"A",{href:!0});var mb=n(Ba);sl=s(mb,"STRONG",{});var Rb=n(sl);Kh=i(Rb,"call"),Rb.forEach(t),Qh=i(mb,"()"),mb.forEach(t),Yh=i(vt," and "),Dr=s(vt,"A",{href:!0});var Xb=n(Dr);em=i(Xb,"decode()"),Xb.forEach(t),tm=i(vt,` for more information.`),vt.forEach(t),om=d(X),zo=s(X,"DIV",{class:!0});var Xd=n(zo);v(Ua.$$.fragment,Xd),am=d(Xd),ct=s(Xd,"P",{});var xa=n(ct);sm=i(xa,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Ha=s(xa,"A",{href:!0});var fb=n(Ha);nl=s(fb,"STRONG",{});var Jb=n(nl);nm=i(Jb,"call"),Jb.forEach(t),rm=i(fb,"()"),fb.forEach(t),im=i(xa,` and returns its output. If used in the context `),Or=s(xa,"A",{href:!0});var Gb=n(Or);lm=i(Gb,"as_target_processor()"),Gb.forEach(t),cm=i(xa,` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ra=s(xa,"A",{href:!0});var ub=n(Ra);rl=s(ub,"STRONG",{});var Zb=n(rl);dm=i(Zb,"call"),Zb.forEach(t),pm=i(ub,"()"),ub.forEach(t),hm=i(xa,`. Please refer to the docstring of the above two methods for more information.`),xa.forEach(t),Xd.forEach(t),mm=d(X),Ao=s(X,"DIV",{class:!0});var Jd=n(Ao);v(Xa.$$.fragment,Jd),fm=d(Jd),dt=s(Jd,"P",{});var Wa=n(dt);um=i(Wa,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Lr=s(Wa,"A",{href:!0});var Kb=n(Lr);gm=i(Kb,"pad()"),Kb.forEach(t),_m=i(Wa,` and returns its output. If used in the context `),Sr=s(Wa,"A",{href:!0});var Qb=n(Sr);vm=i(Qb,"as_target_processor()"),Qb.forEach(t),wm=i(Wa,` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ir=s(Wa,"A",{href:!0});var Yb=n(Ir);bm=i(Yb,"pad()"),Yb.forEach(t),ym=i(Wa,`. Please refer to the docstring of the above two methods for more information.`),Wa.forEach(t),Jd.forEach(t),km=d(X),yt=s(X,"DIV",{class:!0});var yi=n(yt);v(Ja.$$.fragment,yi),Tm=d(yi),Ga=s(yi,"P",{});var Gd=n(Ga);xm=i(Gd,"Instantiate a "),Nr=s(Gd,"A",{href:!0});var e1=n(Nr);Wm=i(e1,"Wav2Vec2Processor"),e1.forEach(t),$m=i(Gd," from a pretrained Wav2Vec2 processor."),Gd.forEach(t),Vm=d(yi),v(Do.$$.fragment,yi),yi.forEach(t),Fm=d(X),kt=s(X,"DIV",{class:!0});var ki=n(kt);v(Za.$$.fragment,ki),jm=d(ki),Zt=s(ki,"P",{});var Ti=n(Zt);Em=i(Ti,"Save a Wav2Vec2 feature_extractor object and Wav2Vec2 tokenizer object to the directory "),il=s(Ti,"CODE",{});var t1=n(il);Pm=i(t1,"save_directory"),t1.forEach(t),Cm=i(Ti,`, so that it can be re-loaded using the `),Br=s(Ti,"A",{href:!0});var o1=n(Br);qm=i(o1,"from_pretrained()"),o1.forEach(t),Mm=i(Ti," class method."),Ti.forEach(t),zm=d(ki),v(Oo.$$.fragment,ki),ki.forEach(t),Am=d(X),Lo=s(X,"DIV",{class:!0});var Zd=n(Lo);v(Ka.$$.fragment,Zd),Dm=d(Zd),Qa=s(Zd,"P",{});var Kd=n(Qa);Om=i(Kd,`This method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ur=s(Kd,"A",{href:!0});var a1=n(Ur);Lm=i(a1,"batch_decode()"),a1.forEach(t),Sm=i(Kd,`. Please refer to the docstring of this method for more information.`),Kd.forEach(t),Zd.forEach(t),Im=d(X),So=s(X,"DIV",{class:!0});var Qd=n(So);v(Ya.$$.fragment,Qd),Nm=d(Qd),es=s(Qd,"P",{});var Yd=n(es);Bm=i(Yd,`This method forwards all its arguments to PreTrainedTokenizer\u2019s `),Hr=s(Yd,"A",{href:!0});var s1=n(Hr);Um=i(s1,"decode()"),s1.forEach(t),Hm=i(Yd,`. Please refer to the docstring of this method for more information.`),Yd.forEach(t),Qd.forEach(t),Rm=d(X),Io=s(X,"DIV",{class:!0});var ep=n(Io);v(ts.$$.fragment,ep),Xm=d(ep),ll=s(ep,"P",{});var n1=n(ll);Jm=i(n1,`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),n1.forEach(t),ep.forEach(t),X.forEach(t),id=d(o),Kt=s(o,"H2",{class:!0});var tp=n(Kt);No=s(tp,"A",{id:!0,class:!0,href:!0});var r1=n(No);cl=s(r1,"SPAN",{});var i1=n(cl);v(os.$$.fragment,i1),i1.forEach(t),r1.forEach(t),Gm=d(tp),dl=s(tp,"SPAN",{});var l1=n(dl);Zm=i(l1,"Wav2Vec2ProcessorWithLM"),l1.forEach(t),tp.forEach(t),ld=d(o),U=s(o,"DIV",{class:!0});var te=n(U);v(as.$$.fragment,te),Km=d(te),pl=s(te,"P",{});var c1=n(pl);Qm=i(c1,`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder with language model support into a single processor for language model boosted speech recognition decoding.`),c1.forEach(t),Ym=d(te),Bo=s(te,"DIV",{class:!0});var op=n(Bo);v(ss.$$.fragment,op),ef=d(op),pt=s(op,"P",{});var $a=n(pt);tf=i($a,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),ns=s($a,"A",{href:!0});var gb=n(ns);hl=s(gb,"STRONG",{});var d1=n(hl);of=i(d1,"call"),d1.forEach(t),af=i(gb,"()"),gb.forEach(t),sf=i($a,` and returns its output. If used in the context `),Rr=s($a,"A",{href:!0});var p1=n(Rr);nf=i(p1,"as_target_processor()"),p1.forEach(t),rf=i($a,` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),rs=s($a,"A",{href:!0});var _b=n(rs);ml=s(_b,"STRONG",{});var h1=n(ml);lf=i(h1,"call"),h1.forEach(t),cf=i(_b,"()"),_b.forEach(t),df=i($a,`. Please refer to the docstring of the above two methods for more information.`),$a.forEach(t),op.forEach(t),pf=d(te),Uo=s(te,"DIV",{class:!0});var ap=n(Uo);v(is.$$.fragment,ap),hf=d(ap),ht=s(ap,"P",{});var Va=n(ht);mf=i(Va,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Xr=s(Va,"A",{href:!0});var m1=n(Xr);ff=i(m1,"pad()"),m1.forEach(t),uf=i(Va,` and returns its output. If used in the context `),Jr=s(Va,"A",{href:!0});var f1=n(Jr);gf=i(f1,"as_target_processor()"),f1.forEach(t),_f=i(Va,` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),Gr=s(Va,"A",{href:!0});var u1=n(Gr);vf=i(u1,"pad()"),u1.forEach(t),wf=i(Va,`. Please refer to the docstring of the above two methods for more information.`),Va.forEach(t),ap.forEach(t),bf=d(te),Tt=s(te,"DIV",{class:!0});var xi=n(Tt);v(ls.$$.fragment,xi),yf=d(xi),cs=s(xi,"P",{});var sp=n(cs);kf=i(sp,"Instantiate a "),Zr=s(sp,"A",{href:!0});var g1=n(Zr);Tf=i(g1,"Wav2Vec2ProcessorWithLM"),g1.forEach(t),xf=i(sp," from a pretrained Wav2Vec2 processor."),sp.forEach(t),Wf=d(xi),v(Ho.$$.fragment,xi),xi.forEach(t),$f=d(te),xt=s(te,"DIV",{class:!0});var Wi=n(xt);v(ds.$$.fragment,Wi),Vf=d(Wi),Qt=s(Wi,"P",{});var $i=n(Qt);Ff=i($i,`Save the Wav2Vec2 feature_extractor, a tokenizer object and a pyctcdecode decoder to the directory `),fl=s($i,"CODE",{});var _1=n(fl);jf=i(_1,"save_directory"),_1.forEach(t),Ef=i($i,`, so that they can be re-loaded using the `),Kr=s($i,"A",{href:!0});var v1=n(Kr);Pf=i(v1,"from_pretrained()"),v1.forEach(t),Cf=i($i," class method."),$i.forEach(t),qf=d(Wi),v(Ro.$$.fragment,Wi),Wi.forEach(t),Mf=d(te),Wt=s(te,"DIV",{class:!0});var Vi=n(Wt);v(ps.$$.fragment,Vi),zf=d(Vi),ul=s(Vi,"P",{});var w1=n(ul);Af=i(w1,"Batch decode output logits to audio transcription with language model support."),w1.forEach(t),Df=d(Vi),v(Xo.$$.fragment,Vi),Vi.forEach(t),Of=d(te),Jo=s(te,"DIV",{class:!0});var np=n(Jo);v(hs.$$.fragment,np),Lf=d(np),gl=s(np,"P",{});var b1=n(gl);Sf=i(b1,"Decode output logits to audio transcription with language model support."),b1.forEach(t),np.forEach(t),If=d(te),Go=s(te,"DIV",{class:!0});var rp=n(Go);v(ms.$$.fragment,rp),Nf=d(rp),_l=s(rp,"P",{});var y1=n(_l);Bf=i(y1,`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),y1.forEach(t),rp.forEach(t),te.forEach(t),cd=d(o),Yt=s(o,"H2",{class:!0});var ip=n(Yt);Zo=s(ip,"A",{id:!0,class:!0,href:!0});var k1=n(Zo);vl=s(k1,"SPAN",{});var T1=n(vl);v(fs.$$.fragment,T1),T1.forEach(t),k1.forEach(t),Uf=d(ip),wl=s(ip,"SPAN",{});var x1=n(wl);Hf=i(x1,"Wav2Vec2 specific outputs"),x1.forEach(t),ip.forEach(t),dd=d(o),eo=s(o,"DIV",{class:!0});var lp=n(eo);v(us.$$.fragment,lp),Rf=d(lp),gs=s(lp,"P",{});var cp=n(gs);Xf=i(cp,"Output type of "),bl=s(cp,"CODE",{});var W1=n(bl);Jf=i(W1,"Wav2Vec2DecoderWithLM"),W1.forEach(t),Gf=i(cp,", with transcription."),cp.forEach(t),lp.forEach(t),pd=d(o),to=s(o,"DIV",{class:!0});var dp=n(to);v(_s.$$.fragment,dp),Zf=d(dp),vs=s(dp,"P",{});var pp=n(vs);Kf=i(pp,"Output type of "),yl=s(pp,"CODE",{});var $1=n(yl);Qf=i($1,"Wav2Vec2BaseModelOutput"),$1.forEach(t),Yf=i(pp,", with potential hidden states and attentions."),pp.forEach(t),dp.forEach(t),hd=d(o),oo=s(o,"DIV",{class:!0});var hp=n(oo);v(ws.$$.fragment,hp),eu=d(hp),bs=s(hp,"P",{});var mp=n(bs);tu=i(mp,"Output type of "),Qr=s(mp,"A",{href:!0});var V1=n(Qr);ou=i(V1,"Wav2Vec2ForPreTraining"),V1.forEach(t),au=i(mp,", with potential hidden states and attentions."),mp.forEach(t),hp.forEach(t),md=d(o),mt=s(o,"DIV",{class:!0});var Fi=n(mt);v(ys.$$.fragment,Fi),su=d(Fi),ks=s(Fi,"P",{});var fp=n(ks);nu=i(fp,"Output type of "),kl=s(fp,"CODE",{});var F1=n(kl);ru=i(F1,"FlaxWav2Vec2BaseModelOutput"),F1.forEach(t),iu=i(fp,", with potential hidden states and attentions."),fp.forEach(t),lu=d(Fi),Ko=s(Fi,"DIV",{class:!0});var up=n(Ko);v(Ts.$$.fragment,up),cu=d(up),Tl=s(up,"P",{});var j1=n(Tl);du=i(j1,"\u201CReturns a new object replacing the specified fields with new values."),j1.forEach(t),up.forEach(t),Fi.forEach(t),fd=d(o),ft=s(o,"DIV",{class:!0});var ji=n(ft);v(xs.$$.fragment,ji),pu=d(ji),Ws=s(ji,"P",{});var gp=n(Ws);hu=i(gp,"Output type of "),xl=s(gp,"CODE",{});var E1=n(xl);mu=i(E1,"FlaxWav2Vec2ForPreTrainingOutput"),E1.forEach(t),fu=i(gp,`, with potential hidden states and attentions.`),gp.forEach(t),uu=d(ji),Qo=s(ji,"DIV",{class:!0});var _p=n(Qo);v($s.$$.fragment,_p),gu=d(_p),Wl=s(_p,"P",{});var P1=n(Wl);_u=i(P1,"\u201CReturns a new object replacing the specified fields with new values."),P1.forEach(t),_p.forEach(t),ji.forEach(t),ud=d(o),ao=s(o,"H2",{class:!0});var vp=n(ao);Yo=s(vp,"A",{id:!0,class:!0,href:!0});var C1=n(Yo);$l=s(C1,"SPAN",{});var q1=n($l);v(Vs.$$.fragment,q1),q1.forEach(t),C1.forEach(t),vu=d(vp),Vl=s(vp,"SPAN",{});var M1=n(Vl);wu=i(M1,"Wav2Vec2Model"),M1.forEach(t),vp.forEach(t),gd=d(o),Te=s(o,"DIV",{class:!0});var Ft=n(Te);v(Fs.$$.fragment,Ft),bu=d(Ft),js=s(Ft,"P",{});var wp=n(js);yu=i(wp,`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Es=s(wp,"A",{href:!0,rel:!0});var z1=n(Es);ku=i(z1,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),z1.forEach(t),Tu=i(wp," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),wp.forEach(t),xu=d(Ft),Ps=s(Ft,"P",{});var bp=n(Ps);Wu=i(bp,"This model inherits from "),Yr=s(bp,"A",{href:!0});var A1=n(Yr);$u=i(A1,"PreTrainedModel"),A1.forEach(t),Vu=i(bp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),bp.forEach(t),Fu=d(Ft),Cs=s(Ft,"P",{});var yp=n(Cs);ju=i(yp,"This model is a PyTorch "),qs=s(yp,"A",{href:!0,rel:!0});var D1=n(qs);Eu=i(D1,"torch.nn.Module"),D1.forEach(t),Pu=i(yp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yp.forEach(t),Cu=d(Ft),Pe=s(Ft,"DIV",{class:!0});var jt=n(Pe);v(Ms.$$.fragment,jt),qu=d(jt),so=s(jt,"P",{});var Ei=n(so);Mu=i(Ei,"The "),ei=s(Ei,"A",{href:!0});var O1=n(ei);zu=i(O1,"Wav2Vec2Model"),O1.forEach(t),Au=i(Ei," forward method, overrides the "),Fl=s(Ei,"CODE",{});var L1=n(Fl);Du=i(L1,"__call__"),L1.forEach(t),Ou=i(Ei," special method."),Ei.forEach(t),Lu=d(jt),v(ea.$$.fragment,jt),Su=d(jt),jl=s(jt,"P",{});var S1=n(jl);Iu=i(S1,"Example:"),S1.forEach(t),Nu=d(jt),v(zs.$$.fragment,jt),jt.forEach(t),Ft.forEach(t),_d=d(o),no=s(o,"H2",{class:!0});var kp=n(no);ta=s(kp,"A",{id:!0,class:!0,href:!0});var I1=n(ta);El=s(I1,"SPAN",{});var N1=n(El);v(As.$$.fragment,N1),N1.forEach(t),I1.forEach(t),Bu=d(kp),Pl=s(kp,"SPAN",{});var B1=n(Pl);Uu=i(B1,"Wav2Vec2ForCTC"),B1.forEach(t),kp.forEach(t),vd=d(o),xe=s(o,"DIV",{class:!0});var Et=n(xe);v(Ds.$$.fragment,Et),Hu=d(Et),ro=s(Et,"P",{});var Pi=n(ro);Ru=i(Pi,"Wav2Vec2 Model with a "),Cl=s(Pi,"CODE",{});var U1=n(Cl);Xu=i(U1,"language modeling"),U1.forEach(t),Ju=i(Pi,` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),Os=s(Pi,"A",{href:!0,rel:!0});var H1=n(Os);Gu=i(H1,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),H1.forEach(t),Zu=i(Pi," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Pi.forEach(t),Ku=d(Et),Ls=s(Et,"P",{});var Tp=n(Ls);Qu=i(Tp,"This model inherits from "),ti=s(Tp,"A",{href:!0});var R1=n(ti);Yu=i(R1,"PreTrainedModel"),R1.forEach(t),eg=i(Tp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Tp.forEach(t),tg=d(Et),Ss=s(Et,"P",{});var xp=n(Ss);og=i(xp,"This model is a PyTorch "),Is=s(xp,"A",{href:!0,rel:!0});var X1=n(Is);ag=i(X1,"torch.nn.Module"),X1.forEach(t),sg=i(xp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xp.forEach(t),ng=d(Et),Ce=s(Et,"DIV",{class:!0});var Pt=n(Ce);v(Ns.$$.fragment,Pt),rg=d(Pt),io=s(Pt,"P",{});var Ci=n(io);ig=i(Ci,"The "),oi=s(Ci,"A",{href:!0});var J1=n(oi);lg=i(J1,"Wav2Vec2ForCTC"),J1.forEach(t),cg=i(Ci," forward method, overrides the "),ql=s(Ci,"CODE",{});var G1=n(ql);dg=i(G1,"__call__"),G1.forEach(t),pg=i(Ci," special method."),Ci.forEach(t),hg=d(Pt),v(oa.$$.fragment,Pt),mg=d(Pt),Ml=s(Pt,"P",{});var Z1=n(Ml);fg=i(Z1,"Example:"),Z1.forEach(t),ug=d(Pt),v(Bs.$$.fragment,Pt),Pt.forEach(t),Et.forEach(t),wd=d(o),lo=s(o,"H2",{class:!0});var Wp=n(lo);aa=s(Wp,"A",{id:!0,class:!0,href:!0});var K1=n(aa);zl=s(K1,"SPAN",{});var Q1=n(zl);v(Us.$$.fragment,Q1),Q1.forEach(t),K1.forEach(t),gg=d(Wp),Al=s(Wp,"SPAN",{});var Y1=n(Al);_g=i(Y1,"Wav2Vec2ForSequenceClassification"),Y1.forEach(t),Wp.forEach(t),bd=d(o),ie=s(o,"DIV",{class:!0});var Qe=n(ie);v(Hs.$$.fragment,Qe),vg=d(Qe),Dl=s(Qe,"P",{});var ey=n(Dl);wg=i(ey,`Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),ey.forEach(t),bg=d(Qe),Rs=s(Qe,"P",{});var $p=n(Rs);yg=i($p,"Wav2Vec2 was proposed in "),Xs=s($p,"A",{href:!0,rel:!0});var ty=n(Xs);kg=i(ty,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),ty.forEach(t),Tg=i($p," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),$p.forEach(t),xg=d(Qe),Js=s(Qe,"P",{});var Vp=n(Js);Wg=i(Vp,"This model inherits from "),ai=s(Vp,"A",{href:!0});var oy=n(ai);$g=i(oy,"PreTrainedModel"),oy.forEach(t),Vg=i(Vp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Vp.forEach(t),Fg=d(Qe),Gs=s(Qe,"P",{});var Fp=n(Gs);jg=i(Fp,"This model is a PyTorch "),Zs=s(Fp,"A",{href:!0,rel:!0});var ay=n(Zs);Eg=i(ay,"torch.nn.Module"),ay.forEach(t),Pg=i(Fp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fp.forEach(t),Cg=d(Qe),qe=s(Qe,"DIV",{class:!0});var Ct=n(qe);v(Ks.$$.fragment,Ct),qg=d(Ct),co=s(Ct,"P",{});var qi=n(co);Mg=i(qi,"The "),si=s(qi,"A",{href:!0});var sy=n(si);zg=i(sy,"Wav2Vec2ForSequenceClassification"),sy.forEach(t),Ag=i(qi," forward method, overrides the "),Ol=s(qi,"CODE",{});var ny=n(Ol);Dg=i(ny,"__call__"),ny.forEach(t),Og=i(qi," special method."),qi.forEach(t),Lg=d(Ct),v(sa.$$.fragment,Ct),Sg=d(Ct),Ll=s(Ct,"P",{});var ry=n(Ll);Ig=i(ry,"Example:"),ry.forEach(t),Ng=d(Ct),v(Qs.$$.fragment,Ct),Ct.forEach(t),Qe.forEach(t),yd=d(o),po=s(o,"H2",{class:!0});var jp=n(po);na=s(jp,"A",{id:!0,class:!0,href:!0});var iy=n(na);Sl=s(iy,"SPAN",{});var ly=n(Sl);v(Ys.$$.fragment,ly),ly.forEach(t),iy.forEach(t),Bg=d(jp),Il=s(jp,"SPAN",{});var cy=n(Il);Ug=i(cy,"Wav2Vec2ForAudioFrameClassification"),cy.forEach(t),jp.forEach(t),kd=d(o),le=s(o,"DIV",{class:!0});var Ye=n(le);v(en.$$.fragment,Ye),Hg=d(Ye),Nl=s(Ye,"P",{});var dy=n(Nl);Rg=i(dy,"Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization."),dy.forEach(t),Xg=d(Ye),tn=s(Ye,"P",{});var Ep=n(tn);Jg=i(Ep,"Wav2Vec2 was proposed in "),on=s(Ep,"A",{href:!0,rel:!0});var py=n(on);Gg=i(py,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),py.forEach(t),Zg=i(Ep," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ep.forEach(t),Kg=d(Ye),an=s(Ye,"P",{});var Pp=n(an);Qg=i(Pp,"This model inherits from "),ni=s(Pp,"A",{href:!0});var hy=n(ni);Yg=i(hy,"PreTrainedModel"),hy.forEach(t),e_=i(Pp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Pp.forEach(t),t_=d(Ye),sn=s(Ye,"P",{});var Cp=n(sn);o_=i(Cp,"This model is a PyTorch "),nn=s(Cp,"A",{href:!0,rel:!0});var my=n(nn);a_=i(my,"torch.nn.Module"),my.forEach(t),s_=i(Cp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cp.forEach(t),n_=d(Ye),Me=s(Ye,"DIV",{class:!0});var qt=n(Me);v(rn.$$.fragment,qt),r_=d(qt),ho=s(qt,"P",{});var Mi=n(ho);i_=i(Mi,"The "),ri=s(Mi,"A",{href:!0});var fy=n(ri);l_=i(fy,"Wav2Vec2ForAudioFrameClassification"),fy.forEach(t),c_=i(Mi," forward method, overrides the "),Bl=s(Mi,"CODE",{});var uy=n(Bl);d_=i(uy,"__call__"),uy.forEach(t),p_=i(Mi," special method."),Mi.forEach(t),h_=d(qt),v(ra.$$.fragment,qt),m_=d(qt),Ul=s(qt,"P",{});var gy=n(Ul);f_=i(gy,"Example:"),gy.forEach(t),u_=d(qt),v(ln.$$.fragment,qt),qt.forEach(t),Ye.forEach(t),Td=d(o),mo=s(o,"H2",{class:!0});var qp=n(mo);ia=s(qp,"A",{id:!0,class:!0,href:!0});var _y=n(ia);Hl=s(_y,"SPAN",{});var vy=n(Hl);v(cn.$$.fragment,vy),vy.forEach(t),_y.forEach(t),g_=d(qp),Rl=s(qp,"SPAN",{});var wy=n(Rl);__=i(wy,"Wav2Vec2ForXVector"),wy.forEach(t),qp.forEach(t),xd=d(o),ce=s(o,"DIV",{class:!0});var et=n(ce);v(dn.$$.fragment,et),v_=d(et),Xl=s(et,"P",{});var by=n(Xl);w_=i(by,"Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification."),by.forEach(t),b_=d(et),pn=s(et,"P",{});var Mp=n(pn);y_=i(Mp,"Wav2Vec2 was proposed in "),hn=s(Mp,"A",{href:!0,rel:!0});var yy=n(hn);k_=i(yy,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),yy.forEach(t),T_=i(Mp," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Mp.forEach(t),x_=d(et),mn=s(et,"P",{});var zp=n(mn);W_=i(zp,"This model inherits from "),ii=s(zp,"A",{href:!0});var ky=n(ii);$_=i(ky,"PreTrainedModel"),ky.forEach(t),V_=i(zp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),zp.forEach(t),F_=d(et),fn=s(et,"P",{});var Ap=n(fn);j_=i(Ap,"This model is a PyTorch "),un=s(Ap,"A",{href:!0,rel:!0});var Ty=n(un);E_=i(Ty,"torch.nn.Module"),Ty.forEach(t),P_=i(Ap,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ap.forEach(t),C_=d(et),ze=s(et,"DIV",{class:!0});var Mt=n(ze);v(gn.$$.fragment,Mt),q_=d(Mt),fo=s(Mt,"P",{});var zi=n(fo);M_=i(zi,"The "),li=s(zi,"A",{href:!0});var xy=n(li);z_=i(xy,"Wav2Vec2ForXVector"),xy.forEach(t),A_=i(zi," forward method, overrides the "),Jl=s(zi,"CODE",{});var Wy=n(Jl);D_=i(Wy,"__call__"),Wy.forEach(t),O_=i(zi," special method."),zi.forEach(t),L_=d(Mt),v(la.$$.fragment,Mt),S_=d(Mt),Gl=s(Mt,"P",{});var $y=n(Gl);I_=i($y,"Example:"),$y.forEach(t),N_=d(Mt),v(_n.$$.fragment,Mt),Mt.forEach(t),et.forEach(t),Wd=d(o),uo=s(o,"H2",{class:!0});var Dp=n(uo);ca=s(Dp,"A",{id:!0,class:!0,href:!0});var Vy=n(ca);Zl=s(Vy,"SPAN",{});var Fy=n(Zl);v(vn.$$.fragment,Fy),Fy.forEach(t),Vy.forEach(t),B_=d(Dp),Kl=s(Dp,"SPAN",{});var jy=n(Kl);U_=i(jy,"Wav2Vec2ForPreTraining"),jy.forEach(t),Dp.forEach(t),$d=d(o),We=s(o,"DIV",{class:!0});var zt=n(We);v(wn.$$.fragment,zt),H_=d(zt),go=s(zt,"P",{});var Ai=n(go);R_=i(Ai,"Wav2Vec2 Model with a quantizer and "),Ql=s(Ai,"CODE",{});var Ey=n(Ql);X_=i(Ey,"VQ"),Ey.forEach(t),J_=i(Ai,` head on top. Wav2Vec2 was proposed in `),bn=s(Ai,"A",{href:!0,rel:!0});var Py=n(bn);G_=i(Py,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Py.forEach(t),Z_=i(Ai," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ai.forEach(t),K_=d(zt),yn=s(zt,"P",{});var Op=n(yn);Q_=i(Op,"This model inherits from "),ci=s(Op,"A",{href:!0});var Cy=n(ci);Y_=i(Cy,"PreTrainedModel"),Cy.forEach(t),ev=i(Op,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Op.forEach(t),tv=d(zt),kn=s(zt,"P",{});var Lp=n(kn);ov=i(Lp,"This model is a PyTorch "),Tn=s(Lp,"A",{href:!0,rel:!0});var qy=n(Tn);av=i(qy,"torch.nn.Module"),qy.forEach(t),sv=i(Lp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lp.forEach(t),nv=d(zt),Ae=s(zt,"DIV",{class:!0});var At=n(Ae);v(xn.$$.fragment,At),rv=d(At),_o=s(At,"P",{});var Di=n(_o);iv=i(Di,"The "),di=s(Di,"A",{href:!0});var My=n(di);lv=i(My,"Wav2Vec2ForPreTraining"),My.forEach(t),cv=i(Di," forward method, overrides the "),Yl=s(Di,"CODE",{});var zy=n(Yl);dv=i(zy,"__call__"),zy.forEach(t),pv=i(Di," special method."),Di.forEach(t),hv=d(At),v(da.$$.fragment,At),mv=d(At),ec=s(At,"P",{});var Ay=n(ec);fv=i(Ay,"Example:"),Ay.forEach(t),uv=d(At),v(Wn.$$.fragment,At),At.forEach(t),zt.forEach(t),Vd=d(o),vo=s(o,"H2",{class:!0});var Sp=n(vo);pa=s(Sp,"A",{id:!0,class:!0,href:!0});var Dy=n(pa);tc=s(Dy,"SPAN",{});var Oy=n(tc);v($n.$$.fragment,Oy),Oy.forEach(t),Dy.forEach(t),gv=d(Sp),oc=s(Sp,"SPAN",{});var Ly=n(oc);_v=i(Ly,"TFWav2Vec2Model"),Ly.forEach(t),Sp.forEach(t),Fd=d(o),de=s(o,"DIV",{class:!0});var tt=n(de);v(Vn.$$.fragment,tt),vv=d(tt),ac=s(tt,"P",{});var Sy=n(ac);wv=i(Sy,"The bare TFWav2Vec2 Model transformer outputing raw hidden-states without any specific head on top."),Sy.forEach(t),bv=d(tt),Fn=s(tt,"P",{});var Ip=n(Fn);yv=i(Ip,"This model inherits from "),pi=s(Ip,"A",{href:!0});var Iy=n(pi);kv=i(Iy,"TFPreTrainedModel"),Iy.forEach(t),Tv=i(Ip,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ip.forEach(t),xv=d(tt),jn=s(tt,"P",{});var Np=n(jn);Wv=i(Np,"This model is also a "),En=s(Np,"A",{href:!0,rel:!0});var Ny=n(En);$v=i(Ny,"tf.keras.Model"),Ny.forEach(t),Vv=i(Np,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Np.forEach(t),Fv=d(tt),v(ha.$$.fragment,tt),jv=d(tt),De=s(tt,"DIV",{class:!0});var Dt=n(De);v(Pn.$$.fragment,Dt),Ev=d(Dt),wo=s(Dt,"P",{});var Oi=n(wo);Pv=i(Oi,"The "),hi=s(Oi,"A",{href:!0});var By=n(hi);Cv=i(By,"TFWav2Vec2Model"),By.forEach(t),qv=i(Oi," forward method, overrides the "),sc=s(Oi,"CODE",{});var Uy=n(sc);Mv=i(Uy,"__call__"),Uy.forEach(t),zv=i(Oi," special method."),Oi.forEach(t),Av=d(Dt),v(ma.$$.fragment,Dt),Dv=d(Dt),nc=s(Dt,"P",{});var Hy=n(nc);Ov=i(Hy,"Example:"),Hy.forEach(t),Lv=d(Dt),v(Cn.$$.fragment,Dt),Dt.forEach(t),tt.forEach(t),jd=d(o),bo=s(o,"H2",{class:!0});var Bp=n(bo);fa=s(Bp,"A",{id:!0,class:!0,href:!0});var Ry=n(fa);rc=s(Ry,"SPAN",{});var Xy=n(rc);v(qn.$$.fragment,Xy),Xy.forEach(t),Ry.forEach(t),Sv=d(Bp),ic=s(Bp,"SPAN",{});var Jy=n(ic);Iv=i(Jy,"TFWav2Vec2ForCTC"),Jy.forEach(t),Bp.forEach(t),Ed=d(o),pe=s(o,"DIV",{class:!0});var ot=n(pe);v(Mn.$$.fragment,ot),Nv=d(ot),zn=s(ot,"P",{});var Up=n(zn);Bv=i(Up,"TFWav2Vec2 Model with a "),lc=s(Up,"CODE",{});var Gy=n(lc);Uv=i(Gy,"language modeling"),Gy.forEach(t),Hv=i(Up," head on top for Connectionist Temporal Classification (CTC)."),Up.forEach(t),Rv=d(ot),An=s(ot,"P",{});var Hp=n(An);Xv=i(Hp,"This model inherits from "),mi=s(Hp,"A",{href:!0});var Zy=n(mi);Jv=i(Zy,"TFPreTrainedModel"),Zy.forEach(t),Gv=i(Hp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hp.forEach(t),Zv=d(ot),Dn=s(ot,"P",{});var Rp=n(Dn);Kv=i(Rp,"This model is also a "),On=s(Rp,"A",{href:!0,rel:!0});var Ky=n(On);Qv=i(Ky,"tf.keras.Model"),Ky.forEach(t),Yv=i(Rp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Rp.forEach(t),e2=d(ot),v(ua.$$.fragment,ot),t2=d(ot),Oe=s(ot,"DIV",{class:!0});var Ot=n(Oe);v(Ln.$$.fragment,Ot),o2=d(Ot),yo=s(Ot,"P",{});var Li=n(yo);a2=i(Li,"The "),fi=s(Li,"A",{href:!0});var Qy=n(fi);s2=i(Qy,"TFWav2Vec2ForCTC"),Qy.forEach(t),n2=i(Li," forward method, overrides the "),cc=s(Li,"CODE",{});var Yy=n(cc);r2=i(Yy,"__call__"),Yy.forEach(t),i2=i(Li," special method."),Li.forEach(t),l2=d(Ot),v(ga.$$.fragment,Ot),c2=d(Ot),dc=s(Ot,"P",{});var e0=n(dc);d2=i(e0,"Example:"),e0.forEach(t),p2=d(Ot),v(Sn.$$.fragment,Ot),Ot.forEach(t),ot.forEach(t),Pd=d(o),ko=s(o,"H2",{class:!0});var Xp=n(ko);_a=s(Xp,"A",{id:!0,class:!0,href:!0});var t0=n(_a);pc=s(t0,"SPAN",{});var o0=n(pc);v(In.$$.fragment,o0),o0.forEach(t),t0.forEach(t),h2=d(Xp),hc=s(Xp,"SPAN",{});var a0=n(hc);m2=i(a0,"FlaxWav2Vec2Model"),a0.forEach(t),Xp.forEach(t),Cd=d(o),Z=s(o,"DIV",{class:!0});var Ne=n(Z);v(Nn.$$.fragment,Ne),f2=d(Ne),Bn=s(Ne,"P",{});var Jp=n(Bn);u2=i(Jp,`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Un=s(Jp,"A",{href:!0,rel:!0});var s0=n(Un);g2=i(s0,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),s0.forEach(t),_2=i(Jp," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Jp.forEach(t),v2=d(Ne),Hn=s(Ne,"P",{});var Gp=n(Hn);w2=i(Gp,"This model inherits from "),ui=s(Gp,"A",{href:!0});var n0=n(ui);b2=i(n0,"FlaxPreTrainedModel"),n0.forEach(t),y2=i(Gp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gp.forEach(t),k2=d(Ne),Rn=s(Ne,"P",{});var Zp=n(Rn);T2=i(Zp,"This model is also a Flax Linen "),Xn=s(Zp,"A",{href:!0,rel:!0});var r0=n(Xn);x2=i(r0,"flax.nn.Module"),r0.forEach(t),W2=i(Zp,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Zp.forEach(t),$2=d(Ne),mc=s(Ne,"P",{});var i0=n(mc);V2=i(i0,"Finally, this model supports inherent JAX features such as:"),i0.forEach(t),F2=d(Ne),ut=s(Ne,"UL",{});var Fa=n(ut);fc=s(Fa,"LI",{});var l0=n(fc);Jn=s(l0,"A",{href:!0,rel:!0});var c0=n(Jn);j2=i(c0,"Just-In-Time (JIT) compilation"),c0.forEach(t),l0.forEach(t),E2=d(Fa),uc=s(Fa,"LI",{});var d0=n(uc);Gn=s(d0,"A",{href:!0,rel:!0});var p0=n(Gn);P2=i(p0,"Automatic Differentiation"),p0.forEach(t),d0.forEach(t),C2=d(Fa),gc=s(Fa,"LI",{});var h0=n(gc);Zn=s(h0,"A",{href:!0,rel:!0});var m0=n(Zn);q2=i(m0,"Vectorization"),m0.forEach(t),h0.forEach(t),M2=d(Fa),_c=s(Fa,"LI",{});var f0=n(_c);Kn=s(f0,"A",{href:!0,rel:!0});var u0=n(Kn);z2=i(u0,"Parallelization"),u0.forEach(t),f0.forEach(t),Fa.forEach(t),A2=d(Ne),Le=s(Ne,"DIV",{class:!0});var Lt=n(Le);v(Qn.$$.fragment,Lt),D2=d(Lt),To=s(Lt,"P",{});var Si=n(To);O2=i(Si,"The "),vc=s(Si,"CODE",{});var g0=n(vc);L2=i(g0,"FlaxWav2Vec2PreTrainedModel"),g0.forEach(t),S2=i(Si," forward method, overrides the "),wc=s(Si,"CODE",{});var _0=n(wc);I2=i(_0,"__call__"),_0.forEach(t),N2=i(Si," special method."),Si.forEach(t),B2=d(Lt),v(va.$$.fragment,Lt),U2=d(Lt),bc=s(Lt,"P",{});var v0=n(bc);H2=i(v0,"Example:"),v0.forEach(t),R2=d(Lt),v(Yn.$$.fragment,Lt),Lt.forEach(t),Ne.forEach(t),qd=d(o),xo=s(o,"H2",{class:!0});var Kp=n(xo);wa=s(Kp,"A",{id:!0,class:!0,href:!0});var w0=n(wa);yc=s(w0,"SPAN",{});var b0=n(yc);v(er.$$.fragment,b0),b0.forEach(t),w0.forEach(t),X2=d(Kp),kc=s(Kp,"SPAN",{});var y0=n(kc);J2=i(y0,"FlaxWav2Vec2ForCTC"),y0.forEach(t),Kp.forEach(t),Md=d(o),K=s(o,"DIV",{class:!0});var Be=n(K);v(tr.$$.fragment,Be),G2=d(Be),Wo=s(Be,"P",{});var Ii=n(Wo);Z2=i(Ii,"Wav2Vec2 Model with a "),Tc=s(Ii,"CODE",{});var k0=n(Tc);K2=i(k0,"language modeling"),k0.forEach(t),Q2=i(Ii,` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),or=s(Ii,"A",{href:!0,rel:!0});var T0=n(or);Y2=i(T0,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),T0.forEach(t),ew=i(Ii," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ii.forEach(t),tw=d(Be),ar=s(Be,"P",{});var Qp=n(ar);ow=i(Qp,"This model inherits from "),gi=s(Qp,"A",{href:!0});var x0=n(gi);aw=i(x0,"FlaxPreTrainedModel"),x0.forEach(t),sw=i(Qp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qp.forEach(t),nw=d(Be),sr=s(Be,"P",{});var Yp=n(sr);rw=i(Yp,"This model is also a Flax Linen "),nr=s(Yp,"A",{href:!0,rel:!0});var W0=n(nr);iw=i(W0,"flax.nn.Module"),W0.forEach(t),lw=i(Yp,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Yp.forEach(t),cw=d(Be),xc=s(Be,"P",{});var $0=n(xc);dw=i($0,"Finally, this model supports inherent JAX features such as:"),$0.forEach(t),pw=d(Be),gt=s(Be,"UL",{});var ja=n(gt);Wc=s(ja,"LI",{});var V0=n(Wc);rr=s(V0,"A",{href:!0,rel:!0});var F0=n(rr);hw=i(F0,"Just-In-Time (JIT) compilation"),F0.forEach(t),V0.forEach(t),mw=d(ja),$c=s(ja,"LI",{});var j0=n($c);ir=s(j0,"A",{href:!0,rel:!0});var E0=n(ir);fw=i(E0,"Automatic Differentiation"),E0.forEach(t),j0.forEach(t),uw=d(ja),Vc=s(ja,"LI",{});var P0=n(Vc);lr=s(P0,"A",{href:!0,rel:!0});var C0=n(lr);gw=i(C0,"Vectorization"),C0.forEach(t),P0.forEach(t),_w=d(ja),Fc=s(ja,"LI",{});var q0=n(Fc);cr=s(q0,"A",{href:!0,rel:!0});var M0=n(cr);vw=i(M0,"Parallelization"),M0.forEach(t),q0.forEach(t),ja.forEach(t),ww=d(Be),Se=s(Be,"DIV",{class:!0});var St=n(Se);v(dr.$$.fragment,St),bw=d(St),$o=s(St,"P",{});var Ni=n($o);yw=i(Ni,"The "),jc=s(Ni,"CODE",{});var z0=n(jc);kw=i(z0,"FlaxWav2Vec2PreTrainedModel"),z0.forEach(t),Tw=i(Ni," forward method, overrides the "),Ec=s(Ni,"CODE",{});var A0=n(Ec);xw=i(A0,"__call__"),A0.forEach(t),Ww=i(Ni," special method."),Ni.forEach(t),$w=d(St),v(ba.$$.fragment,St),Vw=d(St),Pc=s(St,"P",{});var D0=n(Pc);Fw=i(D0,"Example:"),D0.forEach(t),jw=d(St),v(pr.$$.fragment,St),St.forEach(t),Be.forEach(t),zd=d(o),Vo=s(o,"H2",{class:!0});var eh=n(Vo);ya=s(eh,"A",{id:!0,class:!0,href:!0});var O0=n(ya);Cc=s(O0,"SPAN",{});var L0=n(Cc);v(hr.$$.fragment,L0),L0.forEach(t),O0.forEach(t),Ew=d(eh),qc=s(eh,"SPAN",{});var S0=n(qc);Pw=i(S0,"FlaxWav2Vec2ForPreTraining"),S0.forEach(t),eh.forEach(t),Ad=d(o),Q=s(o,"DIV",{class:!0});var Ue=n(Q);v(mr.$$.fragment,Ue),Cw=d(Ue),Fo=s(Ue,"P",{});var Bi=n(Fo);qw=i(Bi,"Wav2Vec2 Model with a quantizer and "),Mc=s(Bi,"CODE",{});var I0=n(Mc);Mw=i(I0,"VQ"),I0.forEach(t),zw=i(Bi,` head on top. Wav2Vec2 was proposed in `),fr=s(Bi,"A",{href:!0,rel:!0});var N0=n(fr);Aw=i(N0,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),N0.forEach(t),Dw=i(Bi," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Bi.forEach(t),Ow=d(Ue),ur=s(Ue,"P",{});var th=n(ur);Lw=i(th,"This model inherits from "),_i=s(th,"A",{href:!0});var B0=n(_i);Sw=i(B0,"FlaxPreTrainedModel"),B0.forEach(t),Iw=i(th,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),th.forEach(t),Nw=d(Ue),gr=s(Ue,"P",{});var oh=n(gr);Bw=i(oh,"This model is also a Flax Linen "),_r=s(oh,"A",{href:!0,rel:!0});var U0=n(_r);Uw=i(U0,"flax.nn.Module"),U0.forEach(t),Hw=i(oh,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),oh.forEach(t),Rw=d(Ue),zc=s(Ue,"P",{});var H0=n(zc);Xw=i(H0,"Finally, this model supports inherent JAX features such as:"),H0.forEach(t),Jw=d(Ue),_t=s(Ue,"UL",{});var Ea=n(_t);Ac=s(Ea,"LI",{});var R0=n(Ac);vr=s(R0,"A",{href:!0,rel:!0});var X0=n(vr);Gw=i(X0,"Just-In-Time (JIT) compilation"),X0.forEach(t),R0.forEach(t),Zw=d(Ea),Dc=s(Ea,"LI",{});var J0=n(Dc);wr=s(J0,"A",{href:!0,rel:!0});var G0=n(wr);Kw=i(G0,"Automatic Differentiation"),G0.forEach(t),J0.forEach(t),Qw=d(Ea),Oc=s(Ea,"LI",{});var Z0=n(Oc);br=s(Z0,"A",{href:!0,rel:!0});var K0=n(br);Yw=i(K0,"Vectorization"),K0.forEach(t),Z0.forEach(t),eb=d(Ea),Lc=s(Ea,"LI",{});var Q0=n(Lc);yr=s(Q0,"A",{href:!0,rel:!0});var Y0=n(yr);tb=i(Y0,"Parallelization"),Y0.forEach(t),Q0.forEach(t),Ea.forEach(t),ob=d(Ue),Ie=s(Ue,"DIV",{class:!0});var It=n(Ie);v(kr.$$.fragment,It),ab=d(It),jo=s(It,"P",{});var Ui=n(jo);sb=i(Ui,"The "),vi=s(Ui,"A",{href:!0});var ek=n(vi);nb=i(ek,"FlaxWav2Vec2ForPreTraining"),ek.forEach(t),rb=i(Ui," forward method, overrides the "),Sc=s(Ui,"CODE",{});var tk=n(Sc);ib=i(tk,"__call__"),tk.forEach(t),lb=i(Ui," special method."),Ui.forEach(t),cb=d(It),v(ka.$$.fragment,It),db=d(It),Ic=s(It,"P",{});var ok=n(Ic);pb=i(ok,"Example:"),ok.forEach(t),hb=d(It),v(Tr.$$.fragment,It),It.forEach(t),Ue.forEach(t),this.h()},h(){l(p,"name","hf:doc:metadata"),l(p,"content",JSON.stringify($k)),l(T,"id","wav2vec2"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#wav2vec2"),l(f,"class","relative group"),l(A,"id","overview"),l(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(A,"href","#overview"),l(j,"class","relative group"),l(fe,"href","https://arxiv.org/abs/2006.11477"),l(fe,"rel","nofollow"),l(Fe,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),l(Je,"href","https://huggingface.co/patrickvonplaten"),l(Je,"rel","nofollow"),l(je,"id","transformers.Wav2Vec2Config"),l(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(je,"href","#transformers.Wav2Vec2Config"),l(we,"class","relative group"),l(jr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Model"),l(Pa,"href","https://huggingface.co/facebook/wav2vec2-base-960h"),l(Pa,"rel","nofollow"),l(Er,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Pr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(R,"class","docstring"),l(Eo,"id","transformers.Wav2Vec2CTCTokenizer"),l(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Eo,"href","#transformers.Wav2Vec2CTCTokenizer"),l(Xt,"class","relative group"),l(Cr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(Po,"class","docstring"),l(Zi,"class","docstring"),l(ke,"class","docstring"),l(Co,"id","transformers.Wav2Vec2FeatureExtractor"),l(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Co,"href","#transformers.Wav2Vec2FeatureExtractor"),l(Jt,"class","relative group"),l(qr,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor"),l(qo,"class","docstring"),l(Ge,"class","docstring"),l(Mo,"id","transformers.Wav2Vec2Processor"),l(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Mo,"href","#transformers.Wav2Vec2Processor"),l(Gt,"class","relative group"),l(Mr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),l(zr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),l(Ar,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(Ba,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__"),l(Dr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.decode"),l(Ha,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor.__call__"),l(Or,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.as_target_processor"),l(Ra,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),l(zo,"class","docstring"),l(Lr,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad"),l(Sr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.as_target_processor"),l(Ir,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.pad"),l(Ao,"class","docstring"),l(Nr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),l(yt,"class","docstring"),l(Br,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.from_pretrained"),l(kt,"class","docstring"),l(Ur,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),l(Lo,"class","docstring"),l(Hr,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),l(So,"class","docstring"),l(Io,"class","docstring"),l(L,"class","docstring"),l(No,"id","transformers.Wav2Vec2ProcessorWithLM"),l(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(No,"href","#transformers.Wav2Vec2ProcessorWithLM"),l(Kt,"class","relative group"),l(ns,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor.__call__"),l(Rr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM.as_target_processor"),l(rs,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),l(Bo,"class","docstring"),l(Xr,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad"),l(Jr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM.as_target_processor"),l(Gr,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.pad"),l(Uo,"class","docstring"),l(Zr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM"),l(Tt,"class","docstring"),l(Kr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM.from_pretrained"),l(xt,"class","docstring"),l(Wt,"class","docstring"),l(Jo,"class","docstring"),l(Go,"class","docstring"),l(U,"class","docstring"),l(Zo,"id","transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput"),l(Zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Zo,"href","#transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput"),l(Yt,"class","relative group"),l(eo,"class","docstring"),l(to,"class","docstring"),l(Qr,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForPreTraining"),l(oo,"class","docstring"),l(Ko,"class","docstring"),l(mt,"class","docstring"),l(Qo,"class","docstring"),l(ft,"class","docstring"),l(Yo,"id","transformers.Wav2Vec2Model"),l(Yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Yo,"href","#transformers.Wav2Vec2Model"),l(ao,"class","relative group"),l(Es,"href","https://arxiv.org/abs/2006.11477"),l(Es,"rel","nofollow"),l(Yr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(qs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(qs,"rel","nofollow"),l(ei,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Model"),l(Pe,"class","docstring"),l(Te,"class","docstring"),l(ta,"id","transformers.Wav2Vec2ForCTC"),l(ta,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ta,"href","#transformers.Wav2Vec2ForCTC"),l(no,"class","relative group"),l(Os,"href","https://arxiv.org/abs/2006.11477"),l(Os,"rel","nofollow"),l(ti,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Is,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Is,"rel","nofollow"),l(oi,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC"),l(Ce,"class","docstring"),l(xe,"class","docstring"),l(aa,"id","transformers.Wav2Vec2ForSequenceClassification"),l(aa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(aa,"href","#transformers.Wav2Vec2ForSequenceClassification"),l(lo,"class","relative group"),l(Xs,"href","https://arxiv.org/abs/2006.11477"),l(Xs,"rel","nofollow"),l(ai,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Zs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Zs,"rel","nofollow"),l(si,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification"),l(qe,"class","docstring"),l(ie,"class","docstring"),l(na,"id","transformers.Wav2Vec2ForAudioFrameClassification"),l(na,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(na,"href","#transformers.Wav2Vec2ForAudioFrameClassification"),l(po,"class","relative group"),l(on,"href","https://arxiv.org/abs/2006.11477"),l(on,"rel","nofollow"),l(ni,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(nn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(nn,"rel","nofollow"),l(ri,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForAudioFrameClassification"),l(Me,"class","docstring"),l(le,"class","docstring"),l(ia,"id","transformers.Wav2Vec2ForXVector"),l(ia,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ia,"href","#transformers.Wav2Vec2ForXVector"),l(mo,"class","relative group"),l(hn,"href","https://arxiv.org/abs/2006.11477"),l(hn,"rel","nofollow"),l(ii,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(un,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(un,"rel","nofollow"),l(li,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForXVector"),l(ze,"class","docstring"),l(ce,"class","docstring"),l(ca,"id","transformers.Wav2Vec2ForPreTraining"),l(ca,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ca,"href","#transformers.Wav2Vec2ForPreTraining"),l(uo,"class","relative group"),l(bn,"href","https://arxiv.org/abs/2006.11477"),l(bn,"rel","nofollow"),l(ci,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Tn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Tn,"rel","nofollow"),l(di,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForPreTraining"),l(Ae,"class","docstring"),l(We,"class","docstring"),l(pa,"id","transformers.TFWav2Vec2Model"),l(pa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(pa,"href","#transformers.TFWav2Vec2Model"),l(vo,"class","relative group"),l(pi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),l(En,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(En,"rel","nofollow"),l(hi,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.TFWav2Vec2Model"),l(De,"class","docstring"),l(de,"class","docstring"),l(fa,"id","transformers.TFWav2Vec2ForCTC"),l(fa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(fa,"href","#transformers.TFWav2Vec2ForCTC"),l(bo,"class","relative group"),l(mi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),l(On,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(On,"rel","nofollow"),l(fi,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.TFWav2Vec2ForCTC"),l(Oe,"class","docstring"),l(pe,"class","docstring"),l(_a,"id","transformers.FlaxWav2Vec2Model"),l(_a,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_a,"href","#transformers.FlaxWav2Vec2Model"),l(ko,"class","relative group"),l(Un,"href","https://arxiv.org/abs/2006.11477"),l(Un,"rel","nofollow"),l(ui,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(Xn,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(Xn,"rel","nofollow"),l(Jn,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(Jn,"rel","nofollow"),l(Gn,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Gn,"rel","nofollow"),l(Zn,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Zn,"rel","nofollow"),l(Kn,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(Kn,"rel","nofollow"),l(Le,"class","docstring"),l(Z,"class","docstring"),l(wa,"id","transformers.FlaxWav2Vec2ForCTC"),l(wa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(wa,"href","#transformers.FlaxWav2Vec2ForCTC"),l(xo,"class","relative group"),l(or,"href","https://arxiv.org/abs/2006.11477"),l(or,"rel","nofollow"),l(gi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(nr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(nr,"rel","nofollow"),l(rr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(rr,"rel","nofollow"),l(ir,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(ir,"rel","nofollow"),l(lr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(lr,"rel","nofollow"),l(cr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(cr,"rel","nofollow"),l(Se,"class","docstring"),l(K,"class","docstring"),l(ya,"id","transformers.FlaxWav2Vec2ForPreTraining"),l(ya,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ya,"href","#transformers.FlaxWav2Vec2ForPreTraining"),l(Vo,"class","relative group"),l(fr,"href","https://arxiv.org/abs/2006.11477"),l(fr,"rel","nofollow"),l(_i,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(_r,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(_r,"rel","nofollow"),l(vr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(vr,"rel","nofollow"),l(wr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(wr,"rel","nofollow"),l(br,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(br,"rel","nofollow"),l(yr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(yr,"rel","nofollow"),l(vi,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.FlaxWav2Vec2ForPreTraining"),l(Ie,"class","docstring"),l(Q,"class","docstring")},m(o,h){e(document.head,p),m(o,x,h),m(o,f,h),e(f,T),e(T,W),w(u,W,null),e(f,g),e(f,$),e($,q),m(o,P,h),m(o,j,h),e(j,A),e(A,D),w(N,D,null),e(j,M),e(j,E),e(E,ae),m(o,ee,h),m(o,S,h),e(S,B),e(S,fe),e(fe,He),e(S,O),m(o,H,h),m(o,$e,h),e($e,ue),m(o,Ze,h),m(o,Ve,h),e(Ve,se),e(se,st),m(o,Ke,h),m(o,I,h),e(I,nt),m(o,he,h),m(o,me,h),e(me,Re),e(Re,ge),e(me,rt),e(me,_e),e(_e,ne),e(_e,Fe),e(Fe,ve),e(_e,it),m(o,V,h),m(o,z,h),e(z,Xe),e(z,Je),e(Je,Nt),e(z,re),m(o,wt,h),m(o,we,h),e(we,je),e(je,J),w(G,J,null),e(we,Bt),e(we,lt),e(lt,be),m(o,bt,h),m(o,R,h),w(ye,R,null),e(R,Ut),e(R,Ht),e(Ht,ah),e(Ht,jr),e(jr,sh),e(Ht,nh),e(Ht,Pa),e(Pa,rh),e(Ht,ih),e(R,lh),e(R,Rt),e(Rt,ch),e(Rt,Er),e(Er,dh),e(Rt,ph),e(Rt,Pr),e(Pr,hh),e(Rt,mh),e(R,fh),e(R,Hi),e(Hi,uh),e(R,gh),w(Ca,R,null),m(o,td,h),m(o,Xt,h),e(Xt,Eo),e(Eo,Ri),w(qa,Ri,null),e(Xt,_h),e(Xt,Xi),e(Xi,vh),m(o,od,h),m(o,ke,h),w(Ma,ke,null),e(ke,wh),e(ke,Ji),e(Ji,bh),e(ke,yh),e(ke,za),e(za,kh),e(za,Cr),e(Cr,Th),e(za,xh),e(ke,Wh),e(ke,Po),w(Aa,Po,null),e(Po,$h),e(Po,Gi),e(Gi,Vh),e(ke,Fh),e(ke,Zi),m(o,ad,h),m(o,Jt,h),e(Jt,Co),e(Co,Ki),w(Da,Ki,null),e(Jt,jh),e(Jt,Qi),e(Qi,Eh),m(o,sd,h),m(o,Ge,h),w(Oa,Ge,null),e(Ge,Ph),e(Ge,Yi),e(Yi,Ch),e(Ge,qh),e(Ge,La),e(La,Mh),e(La,qr),e(qr,zh),e(La,Ah),e(Ge,Dh),e(Ge,qo),w(Sa,qo,null),e(qo,Oh),e(qo,el),e(el,Lh),m(o,nd,h),m(o,Gt,h),e(Gt,Mo),e(Mo,tl),w(Ia,tl,null),e(Gt,Sh),e(Gt,ol),e(ol,Ih),m(o,rd,h),m(o,L,h),w(Na,L,null),e(L,Nh),e(L,al),e(al,Bh),e(L,Uh),e(L,Ee),e(Ee,Mr),e(Mr,Hh),e(Ee,Rh),e(Ee,zr),e(zr,Xh),e(Ee,Jh),e(Ee,Ar),e(Ar,Gh),e(Ee,Zh),e(Ee,Ba),e(Ba,sl),e(sl,Kh),e(Ba,Qh),e(Ee,Yh),e(Ee,Dr),e(Dr,em),e(Ee,tm),e(L,om),e(L,zo),w(Ua,zo,null),e(zo,am),e(zo,ct),e(ct,sm),e(ct,Ha),e(Ha,nl),e(nl,nm),e(Ha,rm),e(ct,im),e(ct,Or),e(Or,lm),e(ct,cm),e(ct,Ra),e(Ra,rl),e(rl,dm),e(Ra,pm),e(ct,hm),e(L,mm),e(L,Ao),w(Xa,Ao,null),e(Ao,fm),e(Ao,dt),e(dt,um),e(dt,Lr),e(Lr,gm),e(dt,_m),e(dt,Sr),e(Sr,vm),e(dt,wm),e(dt,Ir),e(Ir,bm),e(dt,ym),e(L,km),e(L,yt),w(Ja,yt,null),e(yt,Tm),e(yt,Ga),e(Ga,xm),e(Ga,Nr),e(Nr,Wm),e(Ga,$m),e(yt,Vm),w(Do,yt,null),e(L,Fm),e(L,kt),w(Za,kt,null),e(kt,jm),e(kt,Zt),e(Zt,Em),e(Zt,il),e(il,Pm),e(Zt,Cm),e(Zt,Br),e(Br,qm),e(Zt,Mm),e(kt,zm),w(Oo,kt,null),e(L,Am),e(L,Lo),w(Ka,Lo,null),e(Lo,Dm),e(Lo,Qa),e(Qa,Om),e(Qa,Ur),e(Ur,Lm),e(Qa,Sm),e(L,Im),e(L,So),w(Ya,So,null),e(So,Nm),e(So,es),e(es,Bm),e(es,Hr),e(Hr,Um),e(es,Hm),e(L,Rm),e(L,Io),w(ts,Io,null),e(Io,Xm),e(Io,ll),e(ll,Jm),m(o,id,h),m(o,Kt,h),e(Kt,No),e(No,cl),w(os,cl,null),e(Kt,Gm),e(Kt,dl),e(dl,Zm),m(o,ld,h),m(o,U,h),w(as,U,null),e(U,Km),e(U,pl),e(pl,Qm),e(U,Ym),e(U,Bo),w(ss,Bo,null),e(Bo,ef),e(Bo,pt),e(pt,tf),e(pt,ns),e(ns,hl),e(hl,of),e(ns,af),e(pt,sf),e(pt,Rr),e(Rr,nf),e(pt,rf),e(pt,rs),e(rs,ml),e(ml,lf),e(rs,cf),e(pt,df),e(U,pf),e(U,Uo),w(is,Uo,null),e(Uo,hf),e(Uo,ht),e(ht,mf),e(ht,Xr),e(Xr,ff),e(ht,uf),e(ht,Jr),e(Jr,gf),e(ht,_f),e(ht,Gr),e(Gr,vf),e(ht,wf),e(U,bf),e(U,Tt),w(ls,Tt,null),e(Tt,yf),e(Tt,cs),e(cs,kf),e(cs,Zr),e(Zr,Tf),e(cs,xf),e(Tt,Wf),w(Ho,Tt,null),e(U,$f),e(U,xt),w(ds,xt,null),e(xt,Vf),e(xt,Qt),e(Qt,Ff),e(Qt,fl),e(fl,jf),e(Qt,Ef),e(Qt,Kr),e(Kr,Pf),e(Qt,Cf),e(xt,qf),w(Ro,xt,null),e(U,Mf),e(U,Wt),w(ps,Wt,null),e(Wt,zf),e(Wt,ul),e(ul,Af),e(Wt,Df),w(Xo,Wt,null),e(U,Of),e(U,Jo),w(hs,Jo,null),e(Jo,Lf),e(Jo,gl),e(gl,Sf),e(U,If),e(U,Go),w(ms,Go,null),e(Go,Nf),e(Go,_l),e(_l,Bf),m(o,cd,h),m(o,Yt,h),e(Yt,Zo),e(Zo,vl),w(fs,vl,null),e(Yt,Uf),e(Yt,wl),e(wl,Hf),m(o,dd,h),m(o,eo,h),w(us,eo,null),e(eo,Rf),e(eo,gs),e(gs,Xf),e(gs,bl),e(bl,Jf),e(gs,Gf),m(o,pd,h),m(o,to,h),w(_s,to,null),e(to,Zf),e(to,vs),e(vs,Kf),e(vs,yl),e(yl,Qf),e(vs,Yf),m(o,hd,h),m(o,oo,h),w(ws,oo,null),e(oo,eu),e(oo,bs),e(bs,tu),e(bs,Qr),e(Qr,ou),e(bs,au),m(o,md,h),m(o,mt,h),w(ys,mt,null),e(mt,su),e(mt,ks),e(ks,nu),e(ks,kl),e(kl,ru),e(ks,iu),e(mt,lu),e(mt,Ko),w(Ts,Ko,null),e(Ko,cu),e(Ko,Tl),e(Tl,du),m(o,fd,h),m(o,ft,h),w(xs,ft,null),e(ft,pu),e(ft,Ws),e(Ws,hu),e(Ws,xl),e(xl,mu),e(Ws,fu),e(ft,uu),e(ft,Qo),w($s,Qo,null),e(Qo,gu),e(Qo,Wl),e(Wl,_u),m(o,ud,h),m(o,ao,h),e(ao,Yo),e(Yo,$l),w(Vs,$l,null),e(ao,vu),e(ao,Vl),e(Vl,wu),m(o,gd,h),m(o,Te,h),w(Fs,Te,null),e(Te,bu),e(Te,js),e(js,yu),e(js,Es),e(Es,ku),e(js,Tu),e(Te,xu),e(Te,Ps),e(Ps,Wu),e(Ps,Yr),e(Yr,$u),e(Ps,Vu),e(Te,Fu),e(Te,Cs),e(Cs,ju),e(Cs,qs),e(qs,Eu),e(Cs,Pu),e(Te,Cu),e(Te,Pe),w(Ms,Pe,null),e(Pe,qu),e(Pe,so),e(so,Mu),e(so,ei),e(ei,zu),e(so,Au),e(so,Fl),e(Fl,Du),e(so,Ou),e(Pe,Lu),w(ea,Pe,null),e(Pe,Su),e(Pe,jl),e(jl,Iu),e(Pe,Nu),w(zs,Pe,null),m(o,_d,h),m(o,no,h),e(no,ta),e(ta,El),w(As,El,null),e(no,Bu),e(no,Pl),e(Pl,Uu),m(o,vd,h),m(o,xe,h),w(Ds,xe,null),e(xe,Hu),e(xe,ro),e(ro,Ru),e(ro,Cl),e(Cl,Xu),e(ro,Ju),e(ro,Os),e(Os,Gu),e(ro,Zu),e(xe,Ku),e(xe,Ls),e(Ls,Qu),e(Ls,ti),e(ti,Yu),e(Ls,eg),e(xe,tg),e(xe,Ss),e(Ss,og),e(Ss,Is),e(Is,ag),e(Ss,sg),e(xe,ng),e(xe,Ce),w(Ns,Ce,null),e(Ce,rg),e(Ce,io),e(io,ig),e(io,oi),e(oi,lg),e(io,cg),e(io,ql),e(ql,dg),e(io,pg),e(Ce,hg),w(oa,Ce,null),e(Ce,mg),e(Ce,Ml),e(Ml,fg),e(Ce,ug),w(Bs,Ce,null),m(o,wd,h),m(o,lo,h),e(lo,aa),e(aa,zl),w(Us,zl,null),e(lo,gg),e(lo,Al),e(Al,_g),m(o,bd,h),m(o,ie,h),w(Hs,ie,null),e(ie,vg),e(ie,Dl),e(Dl,wg),e(ie,bg),e(ie,Rs),e(Rs,yg),e(Rs,Xs),e(Xs,kg),e(Rs,Tg),e(ie,xg),e(ie,Js),e(Js,Wg),e(Js,ai),e(ai,$g),e(Js,Vg),e(ie,Fg),e(ie,Gs),e(Gs,jg),e(Gs,Zs),e(Zs,Eg),e(Gs,Pg),e(ie,Cg),e(ie,qe),w(Ks,qe,null),e(qe,qg),e(qe,co),e(co,Mg),e(co,si),e(si,zg),e(co,Ag),e(co,Ol),e(Ol,Dg),e(co,Og),e(qe,Lg),w(sa,qe,null),e(qe,Sg),e(qe,Ll),e(Ll,Ig),e(qe,Ng),w(Qs,qe,null),m(o,yd,h),m(o,po,h),e(po,na),e(na,Sl),w(Ys,Sl,null),e(po,Bg),e(po,Il),e(Il,Ug),m(o,kd,h),m(o,le,h),w(en,le,null),e(le,Hg),e(le,Nl),e(Nl,Rg),e(le,Xg),e(le,tn),e(tn,Jg),e(tn,on),e(on,Gg),e(tn,Zg),e(le,Kg),e(le,an),e(an,Qg),e(an,ni),e(ni,Yg),e(an,e_),e(le,t_),e(le,sn),e(sn,o_),e(sn,nn),e(nn,a_),e(sn,s_),e(le,n_),e(le,Me),w(rn,Me,null),e(Me,r_),e(Me,ho),e(ho,i_),e(ho,ri),e(ri,l_),e(ho,c_),e(ho,Bl),e(Bl,d_),e(ho,p_),e(Me,h_),w(ra,Me,null),e(Me,m_),e(Me,Ul),e(Ul,f_),e(Me,u_),w(ln,Me,null),m(o,Td,h),m(o,mo,h),e(mo,ia),e(ia,Hl),w(cn,Hl,null),e(mo,g_),e(mo,Rl),e(Rl,__),m(o,xd,h),m(o,ce,h),w(dn,ce,null),e(ce,v_),e(ce,Xl),e(Xl,w_),e(ce,b_),e(ce,pn),e(pn,y_),e(pn,hn),e(hn,k_),e(pn,T_),e(ce,x_),e(ce,mn),e(mn,W_),e(mn,ii),e(ii,$_),e(mn,V_),e(ce,F_),e(ce,fn),e(fn,j_),e(fn,un),e(un,E_),e(fn,P_),e(ce,C_),e(ce,ze),w(gn,ze,null),e(ze,q_),e(ze,fo),e(fo,M_),e(fo,li),e(li,z_),e(fo,A_),e(fo,Jl),e(Jl,D_),e(fo,O_),e(ze,L_),w(la,ze,null),e(ze,S_),e(ze,Gl),e(Gl,I_),e(ze,N_),w(_n,ze,null),m(o,Wd,h),m(o,uo,h),e(uo,ca),e(ca,Zl),w(vn,Zl,null),e(uo,B_),e(uo,Kl),e(Kl,U_),m(o,$d,h),m(o,We,h),w(wn,We,null),e(We,H_),e(We,go),e(go,R_),e(go,Ql),e(Ql,X_),e(go,J_),e(go,bn),e(bn,G_),e(go,Z_),e(We,K_),e(We,yn),e(yn,Q_),e(yn,ci),e(ci,Y_),e(yn,ev),e(We,tv),e(We,kn),e(kn,ov),e(kn,Tn),e(Tn,av),e(kn,sv),e(We,nv),e(We,Ae),w(xn,Ae,null),e(Ae,rv),e(Ae,_o),e(_o,iv),e(_o,di),e(di,lv),e(_o,cv),e(_o,Yl),e(Yl,dv),e(_o,pv),e(Ae,hv),w(da,Ae,null),e(Ae,mv),e(Ae,ec),e(ec,fv),e(Ae,uv),w(Wn,Ae,null),m(o,Vd,h),m(o,vo,h),e(vo,pa),e(pa,tc),w($n,tc,null),e(vo,gv),e(vo,oc),e(oc,_v),m(o,Fd,h),m(o,de,h),w(Vn,de,null),e(de,vv),e(de,ac),e(ac,wv),e(de,bv),e(de,Fn),e(Fn,yv),e(Fn,pi),e(pi,kv),e(Fn,Tv),e(de,xv),e(de,jn),e(jn,Wv),e(jn,En),e(En,$v),e(jn,Vv),e(de,Fv),w(ha,de,null),e(de,jv),e(de,De),w(Pn,De,null),e(De,Ev),e(De,wo),e(wo,Pv),e(wo,hi),e(hi,Cv),e(wo,qv),e(wo,sc),e(sc,Mv),e(wo,zv),e(De,Av),w(ma,De,null),e(De,Dv),e(De,nc),e(nc,Ov),e(De,Lv),w(Cn,De,null),m(o,jd,h),m(o,bo,h),e(bo,fa),e(fa,rc),w(qn,rc,null),e(bo,Sv),e(bo,ic),e(ic,Iv),m(o,Ed,h),m(o,pe,h),w(Mn,pe,null),e(pe,Nv),e(pe,zn),e(zn,Bv),e(zn,lc),e(lc,Uv),e(zn,Hv),e(pe,Rv),e(pe,An),e(An,Xv),e(An,mi),e(mi,Jv),e(An,Gv),e(pe,Zv),e(pe,Dn),e(Dn,Kv),e(Dn,On),e(On,Qv),e(Dn,Yv),e(pe,e2),w(ua,pe,null),e(pe,t2),e(pe,Oe),w(Ln,Oe,null),e(Oe,o2),e(Oe,yo),e(yo,a2),e(yo,fi),e(fi,s2),e(yo,n2),e(yo,cc),e(cc,r2),e(yo,i2),e(Oe,l2),w(ga,Oe,null),e(Oe,c2),e(Oe,dc),e(dc,d2),e(Oe,p2),w(Sn,Oe,null),m(o,Pd,h),m(o,ko,h),e(ko,_a),e(_a,pc),w(In,pc,null),e(ko,h2),e(ko,hc),e(hc,m2),m(o,Cd,h),m(o,Z,h),w(Nn,Z,null),e(Z,f2),e(Z,Bn),e(Bn,u2),e(Bn,Un),e(Un,g2),e(Bn,_2),e(Z,v2),e(Z,Hn),e(Hn,w2),e(Hn,ui),e(ui,b2),e(Hn,y2),e(Z,k2),e(Z,Rn),e(Rn,T2),e(Rn,Xn),e(Xn,x2),e(Rn,W2),e(Z,$2),e(Z,mc),e(mc,V2),e(Z,F2),e(Z,ut),e(ut,fc),e(fc,Jn),e(Jn,j2),e(ut,E2),e(ut,uc),e(uc,Gn),e(Gn,P2),e(ut,C2),e(ut,gc),e(gc,Zn),e(Zn,q2),e(ut,M2),e(ut,_c),e(_c,Kn),e(Kn,z2),e(Z,A2),e(Z,Le),w(Qn,Le,null),e(Le,D2),e(Le,To),e(To,O2),e(To,vc),e(vc,L2),e(To,S2),e(To,wc),e(wc,I2),e(To,N2),e(Le,B2),w(va,Le,null),e(Le,U2),e(Le,bc),e(bc,H2),e(Le,R2),w(Yn,Le,null),m(o,qd,h),m(o,xo,h),e(xo,wa),e(wa,yc),w(er,yc,null),e(xo,X2),e(xo,kc),e(kc,J2),m(o,Md,h),m(o,K,h),w(tr,K,null),e(K,G2),e(K,Wo),e(Wo,Z2),e(Wo,Tc),e(Tc,K2),e(Wo,Q2),e(Wo,or),e(or,Y2),e(Wo,ew),e(K,tw),e(K,ar),e(ar,ow),e(ar,gi),e(gi,aw),e(ar,sw),e(K,nw),e(K,sr),e(sr,rw),e(sr,nr),e(nr,iw),e(sr,lw),e(K,cw),e(K,xc),e(xc,dw),e(K,pw),e(K,gt),e(gt,Wc),e(Wc,rr),e(rr,hw),e(gt,mw),e(gt,$c),e($c,ir),e(ir,fw),e(gt,uw),e(gt,Vc),e(Vc,lr),e(lr,gw),e(gt,_w),e(gt,Fc),e(Fc,cr),e(cr,vw),e(K,ww),e(K,Se),w(dr,Se,null),e(Se,bw),e(Se,$o),e($o,yw),e($o,jc),e(jc,kw),e($o,Tw),e($o,Ec),e(Ec,xw),e($o,Ww),e(Se,$w),w(ba,Se,null),e(Se,Vw),e(Se,Pc),e(Pc,Fw),e(Se,jw),w(pr,Se,null),m(o,zd,h),m(o,Vo,h),e(Vo,ya),e(ya,Cc),w(hr,Cc,null),e(Vo,Ew),e(Vo,qc),e(qc,Pw),m(o,Ad,h),m(o,Q,h),w(mr,Q,null),e(Q,Cw),e(Q,Fo),e(Fo,qw),e(Fo,Mc),e(Mc,Mw),e(Fo,zw),e(Fo,fr),e(fr,Aw),e(Fo,Dw),e(Q,Ow),e(Q,ur),e(ur,Lw),e(ur,_i),e(_i,Sw),e(ur,Iw),e(Q,Nw),e(Q,gr),e(gr,Bw),e(gr,_r),e(_r,Uw),e(gr,Hw),e(Q,Rw),e(Q,zc),e(zc,Xw),e(Q,Jw),e(Q,_t),e(_t,Ac),e(Ac,vr),e(vr,Gw),e(_t,Zw),e(_t,Dc),e(Dc,wr),e(wr,Kw),e(_t,Qw),e(_t,Oc),e(Oc,br),e(br,Yw),e(_t,eb),e(_t,Lc),e(Lc,yr),e(yr,tb),e(Q,ob),e(Q,Ie),w(kr,Ie,null),e(Ie,ab),e(Ie,jo),e(jo,sb),e(jo,vi),e(vi,nb),e(jo,rb),e(jo,Sc),e(Sc,ib),e(jo,lb),e(Ie,cb),w(ka,Ie,null),e(Ie,db),e(Ie,Ic),e(Ic,pb),e(Ie,hb),w(Tr,Ie,null),Dd=!0},p(o,[h]){const xr={};h&2&&(xr.$$scope={dirty:h,ctx:o}),Do.$set(xr);const Nc={};h&2&&(Nc.$$scope={dirty:h,ctx:o}),Oo.$set(Nc);const Bc={};h&2&&(Bc.$$scope={dirty:h,ctx:o}),Ho.$set(Bc);const Uc={};h&2&&(Uc.$$scope={dirty:h,ctx:o}),Ro.$set(Uc);const Wr={};h&2&&(Wr.$$scope={dirty:h,ctx:o}),Xo.$set(Wr);const Hc={};h&2&&(Hc.$$scope={dirty:h,ctx:o}),ea.$set(Hc);const Rc={};h&2&&(Rc.$$scope={dirty:h,ctx:o}),oa.$set(Rc);const Xc={};h&2&&(Xc.$$scope={dirty:h,ctx:o}),sa.$set(Xc);const $r={};h&2&&($r.$$scope={dirty:h,ctx:o}),ra.$set($r);const Jc={};h&2&&(Jc.$$scope={dirty:h,ctx:o}),la.$set(Jc);const Gc={};h&2&&(Gc.$$scope={dirty:h,ctx:o}),da.$set(Gc);const Zc={};h&2&&(Zc.$$scope={dirty:h,ctx:o}),ha.$set(Zc);const Kc={};h&2&&(Kc.$$scope={dirty:h,ctx:o}),ma.$set(Kc);const Qc={};h&2&&(Qc.$$scope={dirty:h,ctx:o}),ua.$set(Qc);const Vr={};h&2&&(Vr.$$scope={dirty:h,ctx:o}),ga.$set(Vr);const Yc={};h&2&&(Yc.$$scope={dirty:h,ctx:o}),va.$set(Yc);const Fr={};h&2&&(Fr.$$scope={dirty:h,ctx:o}),ba.$set(Fr);const ed={};h&2&&(ed.$$scope={dirty:h,ctx:o}),ka.$set(ed)},i(o){Dd||(b(u.$$.fragment,o),b(N.$$.fragment,o),b(G.$$.fragment,o),b(ye.$$.fragment,o),b(Ca.$$.fragment,o),b(qa.$$.fragment,o),b(Ma.$$.fragment,o),b(Aa.$$.fragment,o),b(Da.$$.fragment,o),b(Oa.$$.fragment,o),b(Sa.$$.fragment,o),b(Ia.$$.fragment,o),b(Na.$$.fragment,o),b(Ua.$$.fragment,o),b(Xa.$$.fragment,o),b(Ja.$$.fragment,o),b(Do.$$.fragment,o),b(Za.$$.fragment,o),b(Oo.$$.fragment,o),b(Ka.$$.fragment,o),b(Ya.$$.fragment,o),b(ts.$$.fragment,o),b(os.$$.fragment,o),b(as.$$.fragment,o),b(ss.$$.fragment,o),b(is.$$.fragment,o),b(ls.$$.fragment,o),b(Ho.$$.fragment,o),b(ds.$$.fragment,o),b(Ro.$$.fragment,o),b(ps.$$.fragment,o),b(Xo.$$.fragment,o),b(hs.$$.fragment,o),b(ms.$$.fragment,o),b(fs.$$.fragment,o),b(us.$$.fragment,o),b(_s.$$.fragment,o),b(ws.$$.fragment,o),b(ys.$$.fragment,o),b(Ts.$$.fragment,o),b(xs.$$.fragment,o),b($s.$$.fragment,o),b(Vs.$$.fragment,o),b(Fs.$$.fragment,o),b(Ms.$$.fragment,o),b(ea.$$.fragment,o),b(zs.$$.fragment,o),b(As.$$.fragment,o),b(Ds.$$.fragment,o),b(Ns.$$.fragment,o),b(oa.$$.fragment,o),b(Bs.$$.fragment,o),b(Us.$$.fragment,o),b(Hs.$$.fragment,o),b(Ks.$$.fragment,o),b(sa.$$.fragment,o),b(Qs.$$.fragment,o),b(Ys.$$.fragment,o),b(en.$$.fragment,o),b(rn.$$.fragment,o),b(ra.$$.fragment,o),b(ln.$$.fragment,o),b(cn.$$.fragment,o),b(dn.$$.fragment,o),b(gn.$$.fragment,o),b(la.$$.fragment,o),b(_n.$$.fragment,o),b(vn.$$.fragment,o),b(wn.$$.fragment,o),b(xn.$$.fragment,o),b(da.$$.fragment,o),b(Wn.$$.fragment,o),b($n.$$.fragment,o),b(Vn.$$.fragment,o),b(ha.$$.fragment,o),b(Pn.$$.fragment,o),b(ma.$$.fragment,o),b(Cn.$$.fragment,o),b(qn.$$.fragment,o),b(Mn.$$.fragment,o),b(ua.$$.fragment,o),b(Ln.$$.fragment,o),b(ga.$$.fragment,o),b(Sn.$$.fragment,o),b(In.$$.fragment,o),b(Nn.$$.fragment,o),b(Qn.$$.fragment,o),b(va.$$.fragment,o),b(Yn.$$.fragment,o),b(er.$$.fragment,o),b(tr.$$.fragment,o),b(dr.$$.fragment,o),b(ba.$$.fragment,o),b(pr.$$.fragment,o),b(hr.$$.fragment,o),b(mr.$$.fragment,o),b(kr.$$.fragment,o),b(ka.$$.fragment,o),b(Tr.$$.fragment,o),Dd=!0)},o(o){y(u.$$.fragment,o),y(N.$$.fragment,o),y(G.$$.fragment,o),y(ye.$$.fragment,o),y(Ca.$$.fragment,o),y(qa.$$.fragment,o),y(Ma.$$.fragment,o),y(Aa.$$.fragment,o),y(Da.$$.fragment,o),y(Oa.$$.fragment,o),y(Sa.$$.fragment,o),y(Ia.$$.fragment,o),y(Na.$$.fragment,o),y(Ua.$$.fragment,o),y(Xa.$$.fragment,o),y(Ja.$$.fragment,o),y(Do.$$.fragment,o),y(Za.$$.fragment,o),y(Oo.$$.fragment,o),y(Ka.$$.fragment,o),y(Ya.$$.fragment,o),y(ts.$$.fragment,o),y(os.$$.fragment,o),y(as.$$.fragment,o),y(ss.$$.fragment,o),y(is.$$.fragment,o),y(ls.$$.fragment,o),y(Ho.$$.fragment,o),y(ds.$$.fragment,o),y(Ro.$$.fragment,o),y(ps.$$.fragment,o),y(Xo.$$.fragment,o),y(hs.$$.fragment,o),y(ms.$$.fragment,o),y(fs.$$.fragment,o),y(us.$$.fragment,o),y(_s.$$.fragment,o),y(ws.$$.fragment,o),y(ys.$$.fragment,o),y(Ts.$$.fragment,o),y(xs.$$.fragment,o),y($s.$$.fragment,o),y(Vs.$$.fragment,o),y(Fs.$$.fragment,o),y(Ms.$$.fragment,o),y(ea.$$.fragment,o),y(zs.$$.fragment,o),y(As.$$.fragment,o),y(Ds.$$.fragment,o),y(Ns.$$.fragment,o),y(oa.$$.fragment,o),y(Bs.$$.fragment,o),y(Us.$$.fragment,o),y(Hs.$$.fragment,o),y(Ks.$$.fragment,o),y(sa.$$.fragment,o),y(Qs.$$.fragment,o),y(Ys.$$.fragment,o),y(en.$$.fragment,o),y(rn.$$.fragment,o),y(ra.$$.fragment,o),y(ln.$$.fragment,o),y(cn.$$.fragment,o),y(dn.$$.fragment,o),y(gn.$$.fragment,o),y(la.$$.fragment,o),y(_n.$$.fragment,o),y(vn.$$.fragment,o),y(wn.$$.fragment,o),y(xn.$$.fragment,o),y(da.$$.fragment,o),y(Wn.$$.fragment,o),y($n.$$.fragment,o),y(Vn.$$.fragment,o),y(ha.$$.fragment,o),y(Pn.$$.fragment,o),y(ma.$$.fragment,o),y(Cn.$$.fragment,o),y(qn.$$.fragment,o),y(Mn.$$.fragment,o),y(ua.$$.fragment,o),y(Ln.$$.fragment,o),y(ga.$$.fragment,o),y(Sn.$$.fragment,o),y(In.$$.fragment,o),y(Nn.$$.fragment,o),y(Qn.$$.fragment,o),y(va.$$.fragment,o),y(Yn.$$.fragment,o),y(er.$$.fragment,o),y(tr.$$.fragment,o),y(dr.$$.fragment,o),y(ba.$$.fragment,o),y(pr.$$.fragment,o),y(hr.$$.fragment,o),y(mr.$$.fragment,o),y(kr.$$.fragment,o),y(ka.$$.fragment,o),y(Tr.$$.fragment,o),Dd=!1},d(o){t(p),o&&t(x),o&&t(f),k(u),o&&t(P),o&&t(j),k(N),o&&t(ee),o&&t(S),o&&t(H),o&&t($e),o&&t(Ze),o&&t(Ve),o&&t(Ke),o&&t(I),o&&t(he),o&&t(me),o&&t(V),o&&t(z),o&&t(wt),o&&t(we),k(G),o&&t(bt),o&&t(R),k(ye),k(Ca),o&&t(td),o&&t(Xt),k(qa),o&&t(od),o&&t(ke),k(Ma),k(Aa),o&&t(ad),o&&t(Jt),k(Da),o&&t(sd),o&&t(Ge),k(Oa),k(Sa),o&&t(nd),o&&t(Gt),k(Ia),o&&t(rd),o&&t(L),k(Na),k(Ua),k(Xa),k(Ja),k(Do),k(Za),k(Oo),k(Ka),k(Ya),k(ts),o&&t(id),o&&t(Kt),k(os),o&&t(ld),o&&t(U),k(as),k(ss),k(is),k(ls),k(Ho),k(ds),k(Ro),k(ps),k(Xo),k(hs),k(ms),o&&t(cd),o&&t(Yt),k(fs),o&&t(dd),o&&t(eo),k(us),o&&t(pd),o&&t(to),k(_s),o&&t(hd),o&&t(oo),k(ws),o&&t(md),o&&t(mt),k(ys),k(Ts),o&&t(fd),o&&t(ft),k(xs),k($s),o&&t(ud),o&&t(ao),k(Vs),o&&t(gd),o&&t(Te),k(Fs),k(Ms),k(ea),k(zs),o&&t(_d),o&&t(no),k(As),o&&t(vd),o&&t(xe),k(Ds),k(Ns),k(oa),k(Bs),o&&t(wd),o&&t(lo),k(Us),o&&t(bd),o&&t(ie),k(Hs),k(Ks),k(sa),k(Qs),o&&t(yd),o&&t(po),k(Ys),o&&t(kd),o&&t(le),k(en),k(rn),k(ra),k(ln),o&&t(Td),o&&t(mo),k(cn),o&&t(xd),o&&t(ce),k(dn),k(gn),k(la),k(_n),o&&t(Wd),o&&t(uo),k(vn),o&&t($d),o&&t(We),k(wn),k(xn),k(da),k(Wn),o&&t(Vd),o&&t(vo),k($n),o&&t(Fd),o&&t(de),k(Vn),k(ha),k(Pn),k(ma),k(Cn),o&&t(jd),o&&t(bo),k(qn),o&&t(Ed),o&&t(pe),k(Mn),k(ua),k(Ln),k(ga),k(Sn),o&&t(Pd),o&&t(ko),k(In),o&&t(Cd),o&&t(Z),k(Nn),k(Qn),k(va),k(Yn),o&&t(qd),o&&t(xo),k(er),o&&t(Md),o&&t(K),k(tr),k(dr),k(ba),k(pr),o&&t(zd),o&&t(Vo),k(hr),o&&t(Ad),o&&t(Q),k(mr),k(kr),k(ka),k(Tr)}}}const $k={local:"wav2vec2",sections:[{local:"overview",title:"Overview"},{local:"transformers.Wav2Vec2Config",title:"Wav2Vec2Config"},{local:"transformers.Wav2Vec2CTCTokenizer",title:"Wav2Vec2CTCTokenizer"},{local:"transformers.Wav2Vec2FeatureExtractor",title:"Wav2Vec2FeatureExtractor"},{local:"transformers.Wav2Vec2Processor",title:"Wav2Vec2Processor"},{local:"transformers.Wav2Vec2ProcessorWithLM",title:"Wav2Vec2ProcessorWithLM"},{local:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput",title:"Wav2Vec2 specific outputs"},{local:"transformers.Wav2Vec2Model",title:"Wav2Vec2Model"},{local:"transformers.Wav2Vec2ForCTC",title:"Wav2Vec2ForCTC"},{local:"transformers.Wav2Vec2ForSequenceClassification",title:"Wav2Vec2ForSequenceClassification"},{local:"transformers.Wav2Vec2ForAudioFrameClassification",title:"Wav2Vec2ForAudioFrameClassification"},{local:"transformers.Wav2Vec2ForXVector",title:"Wav2Vec2ForXVector"},{local:"transformers.Wav2Vec2ForPreTraining",title:"Wav2Vec2ForPreTraining"},{local:"transformers.TFWav2Vec2Model",title:"TFWav2Vec2Model"},{local:"transformers.TFWav2Vec2ForCTC",title:"TFWav2Vec2ForCTC"},{local:"transformers.FlaxWav2Vec2Model",title:"FlaxWav2Vec2Model"},{local:"transformers.FlaxWav2Vec2ForCTC",title:"FlaxWav2Vec2ForCTC"},{local:"transformers.FlaxWav2Vec2ForPreTraining",title:"FlaxWav2Vec2ForPreTraining"}],title:"Wav2Vec2"};function Vk(C,p,x){let{fw:f}=p;return C.$$set=T=>{"fw"in T&&x(0,f=T.fw)},[f]}class Mk extends ak{constructor(p){super();sk(this,p,Vk,Wk,nk,{fw:0})}}export{Mk as default,$k as metadata};
9,922
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/deit.mdx-cf8cab67.js
import{S as gs,i as _s,s as Ts,e as a,k as d,w as v,t as s,L as vs,c as r,d as o,m as c,a as n,x as w,h as i,b as l,J as e,g as p,y as b,q as y,o as $,B as D}from"../../chunks/vendor-b1433968.js";import{T as so}from"../../chunks/Tip-c3840994.js";import{D as ne}from"../../chunks/Docstring-ff504c58.js";import{C as qo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Ye}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ws(A){let h,T,m,u,x;return{c(){h=a("p"),T=s(`This is a recently introduced model so the API hasn\u2019t been tested extensively. There may be some bugs or slight breaking changes to fix it in the future. If you see something strange, file a `),m=a("a"),u=s("Github Issue"),x=s("."),this.h()},l(g){h=r(g,"P",{});var _=n(h);T=i(_,`This is a recently introduced model so the API hasn\u2019t been tested extensively. There may be some bugs or slight breaking changes to fix it in the future. If you see something strange, file a `),m=r(_,"A",{href:!0,rel:!0});var E=n(m);u=i(E,"Github Issue"),E.forEach(o),x=i(_,"."),_.forEach(o),this.h()},h(){l(m,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),l(m,"rel","nofollow")},m(g,_){p(g,h,_),e(h,T),e(h,m),e(m,u),e(h,x)},d(g){g&&o(h)}}}function bs(A){let h,T;return{c(){h=a("p"),T=s(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(m){h=r(m,"P",{});var u=n(h);T=i(u,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),u.forEach(o)},m(m,u){p(m,h,u),e(h,T)},d(m){m&&o(h)}}}function ys(A){let h,T,m,u,x;return{c(){h=a("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=s("Module"),x=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var _=n(h);T=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var E=n(m);u=i(E,"Module"),E.forEach(o),x=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(g,_){p(g,h,_),e(h,T),e(h,m),e(m,u),e(h,x)},d(g){g&&o(h)}}}function $s(A){let h,T,m,u,x;return{c(){h=a("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=s("Module"),x=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var _=n(h);T=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var E=n(m);u=i(E,"Module"),E.forEach(o),x=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(g,_){p(g,h,_),e(h,T),e(h,m),e(m,u),e(h,x)},d(g){g&&o(h)}}}function Ds(A){let h,T,m,u,x;return{c(){h=a("p"),T=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=s("Module"),x=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var _=n(h);T=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var E=n(m);u=i(E,"Module"),E.forEach(o),x=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(g,_){p(g,h,_),e(h,T),e(h,m),e(m,u),e(h,x)},d(g){g&&o(h)}}}function xs(A){let h,T,m,u,x,g,_,E,Lo,io,se,lo,J,ie,$t,be,Wo,Dt,So,co,L,Oo,ye,Vo,Ho,et,Bo,Ro,$e,Uo,Jo,ho,tt,Go,fo,ot,xt,Ko,mo,at,Xo,po,P,It,Zo,Qo,G,Yo,rt,ea,ta,nt,oa,aa,ra,Et,na,sa,kt,ia,la,I,da,st,ca,ha,it,fa,ma,Ft,pa,ua,Ct,ga,_a,Pt,Ta,va,jt,wa,ba,lt,ya,$a,uo,le,Da,De,xa,Ia,go,K,de,Mt,xe,Ea,zt,ka,_o,C,Ie,Fa,X,Ca,dt,Pa,ja,Ee,Ma,za,Aa,Z,Na,ct,qa,La,ht,Wa,Sa,Oa,At,Va,Ha,ke,To,Q,ce,Nt,Fe,Ba,qt,Ra,vo,N,Ce,Ua,Lt,Ja,Ga,Pe,Ka,Wt,Xa,Za,Qa,O,je,Ya,St,er,tr,he,wo,Y,fe,Ot,Me,or,Vt,ar,bo,S,ze,rr,Ae,nr,Ne,sr,ir,lr,j,qe,dr,ee,cr,ft,hr,fr,Ht,mr,pr,ur,me,gr,Bt,_r,Tr,Le,yo,te,pe,Rt,We,vr,Ut,wr,$o,q,Se,br,Jt,yr,$r,Oe,Dr,Ve,xr,Ir,Er,M,He,kr,oe,Fr,mt,Cr,Pr,Gt,jr,Mr,zr,ue,Ar,Kt,Nr,qr,Be,Do,ae,ge,Xt,Re,Lr,Zt,Wr,xo,k,Ue,Sr,Qt,Or,Vr,Yt,Hr,Br,eo,Rr,Ur,Je,Jr,Ge,Gr,Kr,Xr,z,Ke,Zr,re,Qr,pt,Yr,en,to,tn,on,an,_e,rn,oo,nn,sn,Xe,Io;return g=new Ye({}),se=new so({props:{$$slots:{default:[ws]},$$scope:{ctx:A}}}),be=new Ye({}),xe=new Ye({}),Ie=new ne({props:{name:"class transformers.DeiTConfig",anchor:"transformers.DeiTConfig",parameters:[{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"is_encoder_decoder",val:" = False"},{name:"image_size",val:" = 224"},{name:"patch_size",val:" = 16"},{name:"num_channels",val:" = 3"},{name:"qkv_bias",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/configuration_deit.py#L29",parametersDescription:[{anchor:"transformers.DeiTConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.DeiTConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.DeiTConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.DeiTConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.DeiTConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.DeiTConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.DeiTConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.DeiTConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.DeiTConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.DeiTConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>224</code>) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.DeiTConfig.patch_size",description:`<strong>patch_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>16</code>) &#x2014; The size (resolution) of each patch.`,name:"patch_size"},{anchor:"transformers.DeiTConfig.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to <code>3</code>) &#x2014; The number of input channels.`,name:"num_channels"},{anchor:"transformers.DeiTConfig.qkv_bias",description:`<strong>qkv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to add a bias to the queries, keys and values.`,name:"qkv_bias"}]}}),ke=new qo({props:{code:`from transformers import DeiTModel, DeiTConfig # Initializing a DeiT deit-base-distilled-patch16-224 style configuration configuration = DeiTConfig() # Initializing a model from the deit-base-distilled-patch16-224 style configuration model = DeiTModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DeiTModel, DeiTConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a DeiT deit-base-distilled-patch16-224 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = DeiTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the deit-base-distilled-patch16-224 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = DeiTModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Fe=new Ye({}),Ce=new ne({props:{name:"class transformers.DeiTFeatureExtractor",anchor:"transformers.DeiTFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 256"},{name:"resample",val:" = 3"},{name:"do_center_crop",val:" = True"},{name:"crop_size",val:" = 224"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/feature_extraction_deit.py#L37",parametersDescription:[{anchor:"transformers.DeiTFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.DeiTFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 256) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.DeiTFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.DeiTFeatureExtractor.do_center_crop",description:`<strong>do_center_crop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to crop the input at the center. If the input size is smaller than <code>crop_size</code> along any edge, the image is padded with 0&#x2019;s and then center cropped.`,name:"do_center_crop"},{anchor:"transformers.DeiTFeatureExtractor.crop_size",description:`<strong>crop_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Desired output size when applying center-cropping. Only has an effect if <code>do_center_crop</code> is set to <code>True</code>.`,name:"crop_size"},{anchor:"transformers.DeiTFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with <code>image_mean</code> and <code>image_std</code>.`,name:"do_normalize"},{anchor:"transformers.DeiTFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.`,name:"image_mean"},{anchor:"transformers.DeiTFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.`,name:"image_std"}]}}),je=new ne({props:{name:"__call__",anchor:"transformers.DeiTFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/feature_extraction_deit.py#L93",parametersDescription:[{anchor:"transformers.DeiTFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.DeiTFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),he=new so({props:{warning:"&lcub;true}",$$slots:{default:[bs]},$$scope:{ctx:A}}}),Me=new Ye({}),ze=new ne({props:{name:"class transformers.DeiTModel",anchor:"transformers.DeiTModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/modeling_deit.py#L451",parametersDescription:[{anchor:"transformers.DeiTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig">DeiTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qe=new ne({props:{name:"forward",anchor:"transformers.DeiTModel.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/modeling_deit.py#L476",parametersDescription:[{anchor:"transformers.DeiTModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor">DeiTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor.__call__">DeiTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.DeiTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.DeiTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DeiTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DeiTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig" >DeiTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),me=new so({props:{$$slots:{default:[ys]},$$scope:{ctx:A}}}),Le=new qo({props:{code:`from transformers import DeiTFeatureExtractor, DeiTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = DeiTFeatureExtractor.from_pretrained('facebook/deit-base-distilled-patch16-224') model = DeiTModel.from_pretrained('facebook/deit-base-distilled-patch16-224', add_pooling_layer=False) inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DeiTFeatureExtractor, DeiTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = DeiTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/deit-base-distilled-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DeiTModel.from_pretrained(<span class="hljs-string">&#x27;facebook/deit-base-distilled-patch16-224&#x27;</span>, add_pooling_layer=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),We=new Ye({}),Se=new ne({props:{name:"class transformers.DeiTForImageClassification",anchor:"transformers.DeiTForImageClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/modeling_deit.py#L570",parametersDescription:[{anchor:"transformers.DeiTForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig">DeiTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),He=new ne({props:{name:"forward",anchor:"transformers.DeiTForImageClassification.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"head_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/modeling_deit.py#L583",parametersDescription:[{anchor:"transformers.DeiTForImageClassification.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor">DeiTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor.__call__">DeiTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.DeiTForImageClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.DeiTForImageClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DeiTForImageClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DeiTForImageClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DeiTForImageClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig" >DeiTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ue=new so({props:{$$slots:{default:[$s]},$$scope:{ctx:A}}}),Be=new qo({props:{code:`from transformers import DeiTFeatureExtractor, DeiTForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) # note: we are loading a DeiTForImageClassificationWithTeacher from the hub here, # so the head will be randomly initialized, hence the predictions will be random feature_extractor = DeiTFeatureExtractor.from_pretrained('facebook/deit-base-distilled-patch16-224') model = DeiTForImageClassification.from_pretrained('facebook/deit-base-distilled-patch16-224') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DeiTFeatureExtractor, DeiTForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># note: we are loading a DeiTForImageClassificationWithTeacher from the hub here,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># so the head will be randomly initialized, hence the predictions will be random</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = DeiTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/deit-base-distilled-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DeiTForImageClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/deit-base-distilled-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),Re=new Ye({}),Ue=new ne({props:{name:"class transformers.DeiTForImageClassificationWithTeacher",anchor:"transformers.DeiTForImageClassificationWithTeacher",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/modeling_deit.py#L702",parametersDescription:[{anchor:"transformers.DeiTForImageClassificationWithTeacher.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig">DeiTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ke=new ne({props:{name:"forward",anchor:"transformers.DeiTForImageClassificationWithTeacher.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deit/modeling_deit.py#L720",parametersDescription:[{anchor:"transformers.DeiTForImageClassificationWithTeacher.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor">DeiTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor.__call__">DeiTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.DeiTForImageClassificationWithTeacher.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.DeiTForImageClassificationWithTeacher.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DeiTForImageClassificationWithTeacher.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DeiTForImageClassificationWithTeacher.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.deit.modeling_deit.DeiTForImageClassificationWithTeacherOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTConfig" >DeiTConfig</a>) and inputs.</p> <ul> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Prediction scores as the average of the cls_logits and distillation logits.</li> <li><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the class token).</li> <li><strong>distillation_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the distillation token).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.deit.modeling_deit.DeiTForImageClassificationWithTeacherOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),_e=new so({props:{$$slots:{default:[Ds]},$$scope:{ctx:A}}}),Xe=new qo({props:{code:`from transformers import DeiTFeatureExtractor, DeiTForImageClassificationWithTeacher from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = DeiTFeatureExtractor.from_pretrained('facebook/deit-base-distilled-patch16-224') model = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DeiTFeatureExtractor, DeiTForImageClassificationWithTeacher <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = DeiTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/deit-base-distilled-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DeiTForImageClassificationWithTeacher.from_pretrained(<span class="hljs-string">&#x27;facebook/deit-base-distilled-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),{c(){h=a("meta"),T=d(),m=a("h1"),u=a("a"),x=a("span"),v(g.$$.fragment),_=d(),E=a("span"),Lo=s("DeiT"),io=d(),v(se.$$.fragment),lo=d(),J=a("h2"),ie=a("a"),$t=a("span"),v(be.$$.fragment),Wo=d(),Dt=a("span"),So=s("Overview"),co=d(),L=a("p"),Oo=s("The DeiT model was proposed in "),ye=a("a"),Vo=s("Training data-efficient image transformers & distillation through attention"),Ho=s(` by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Herv\xE9 J\xE9gou. The `),et=a("a"),Bo=s("Vision Transformer (ViT)"),Ro=s(" introduced in "),$e=a("a"),Uo=s("Dosovitskiy et al., 2020"),Jo=s(` has shown that one can match or even outperform existing convolutional neural networks using a Transformer encoder (BERT-like). However, the ViT models introduced in that paper required training on expensive infrastructure for multiple weeks, using external data. DeiT (data-efficient image transformers) are more efficiently trained transformers for image classification, requiring far less data and far less computing resources compared to the original ViT models.`),ho=d(),tt=a("p"),Go=s("The abstract from the paper is the following:"),fo=d(),ot=a("p"),xt=a("em"),Ko=s(`Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.`),mo=d(),at=a("p"),Xo=s("Tips:"),po=d(),P=a("ul"),It=a("li"),Zo=s(`Compared to ViT, DeiT models use a so-called distillation token to effectively learn from a teacher (which, in the DeiT paper, is a ResNet like-model). The distillation token is learned through backpropagation, by interacting with the class ([CLS]) and patch tokens through the self-attention layers.`),Qo=d(),G=a("li"),Yo=s(`There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top of the final hidden state of the class token and not using the distillation signal, or (2) by placing both a prediction head on top of the class token and on top of the distillation token. In that case, the [CLS] prediction head is trained using regular cross-entropy between the prediction of the head and the ground-truth label, while the distillation prediction head is trained using hard distillation (cross-entropy between the prediction of the distillation head and the label predicted by the teacher). At inference time, one takes the average prediction between both heads as final prediction. (2) is also called \u201Cfine-tuning with distillation\u201D, because one relies on a teacher that has already been fine-tuned on the downstream dataset. In terms of models, (1) corresponds to `),rt=a("a"),ea=s("DeiTForImageClassification"),ta=s(` and (2) corresponds to `),nt=a("a"),oa=s("DeiTForImageClassificationWithTeacher"),aa=s("."),ra=d(),Et=a("li"),na=s(`Note that the authors also did try soft distillation for (2) (in which case the distillation prediction head is trained using KL divergence to match the softmax output of the teacher), but hard distillation gave the best results.`),sa=d(),kt=a("li"),ia=s(`All released checkpoints were pre-trained and fine-tuned on ImageNet-1k only. No external data was used. This is in contrast with the original ViT model, which used external data like the JFT-300M dataset/Imagenet-21k for pre-training.`),la=d(),I=a("li"),da=s(`The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into `),st=a("a"),ca=s("ViTModel"),ha=s(" or "),it=a("a"),fa=s("ViTForImageClassification"),ma=s(`. Techniques like data augmentation, optimization, and regularization were used in order to simulate training on a much larger dataset (while only using ImageNet-1k for pre-training). There are 4 variants available (in 3 different sizes): `),Ft=a("em"),pa=s("facebook/deit-tiny-patch16-224"),ua=s(", "),Ct=a("em"),ga=s("facebook/deit-small-patch16-224"),_a=s(", "),Pt=a("em"),Ta=s("facebook/deit-base-patch16-224"),va=s(` and `),jt=a("em"),wa=s("facebook/deit-base-patch16-384"),ba=s(". Note that one should use "),lt=a("a"),ya=s("DeiTFeatureExtractor"),$a=s(` in order to prepare images for the model.`),uo=d(),le=a("p"),Da=s("This model was contributed by "),De=a("a"),xa=s("nielsr"),Ia=s("."),go=d(),K=a("h2"),de=a("a"),Mt=a("span"),v(xe.$$.fragment),Ea=d(),zt=a("span"),ka=s("DeiTConfig"),_o=d(),C=a("div"),v(Ie.$$.fragment),Fa=d(),X=a("p"),Ca=s("This is the configuration class to store the configuration of a "),dt=a("a"),Pa=s("DeiTModel"),ja=s(`. It is used to instantiate an DeiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeiT `),Ee=a("a"),Ma=s("facebook/deit-base-distilled-patch16-224"),za=s(` architecture.`),Aa=d(),Z=a("p"),Na=s("Configuration objects inherit from "),ct=a("a"),qa=s("PretrainedConfig"),La=s(` and can be used to control the model outputs. Read the documentation from `),ht=a("a"),Wa=s("PretrainedConfig"),Sa=s(" for more information."),Oa=d(),At=a("p"),Va=s("Example:"),Ha=d(),v(ke.$$.fragment),To=d(),Q=a("h2"),ce=a("a"),Nt=a("span"),v(Fe.$$.fragment),Ba=d(),qt=a("span"),Ra=s("DeiTFeatureExtractor"),vo=d(),N=a("div"),v(Ce.$$.fragment),Ua=d(),Lt=a("p"),Ja=s("Constructs a DeiT feature extractor."),Ga=d(),Pe=a("p"),Ka=s("This feature extractor inherits from "),Wt=a("code"),Xa=s("FeatureExtractionMixin"),Za=s(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Qa=d(),O=a("div"),v(je.$$.fragment),Ya=d(),St=a("p"),er=s("Main method to prepare for the model one or several image(s)."),tr=d(),v(he.$$.fragment),wo=d(),Y=a("h2"),fe=a("a"),Ot=a("span"),v(Me.$$.fragment),or=d(),Vt=a("span"),ar=s("DeiTModel"),bo=d(),S=a("div"),v(ze.$$.fragment),rr=d(),Ae=a("p"),nr=s(`The bare DeiT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Ne=a("a"),sr=s("torch.nn.Module"),ir=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),lr=d(),j=a("div"),v(qe.$$.fragment),dr=d(),ee=a("p"),cr=s("The "),ft=a("a"),hr=s("DeiTModel"),fr=s(" forward method, overrides the "),Ht=a("code"),mr=s("__call__"),pr=s(" special method."),ur=d(),v(me.$$.fragment),gr=d(),Bt=a("p"),_r=s("Examples:"),Tr=d(),v(Le.$$.fragment),yo=d(),te=a("h2"),pe=a("a"),Rt=a("span"),v(We.$$.fragment),vr=d(),Ut=a("span"),wr=s("DeiTForImageClassification"),$o=d(),q=a("div"),v(Se.$$.fragment),br=d(),Jt=a("p"),yr=s(`DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),$r=d(),Oe=a("p"),Dr=s("This model is a PyTorch "),Ve=a("a"),xr=s("torch.nn.Module"),Ir=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Er=d(),M=a("div"),v(He.$$.fragment),kr=d(),oe=a("p"),Fr=s("The "),mt=a("a"),Cr=s("DeiTForImageClassification"),Pr=s(" forward method, overrides the "),Gt=a("code"),jr=s("__call__"),Mr=s(" special method."),zr=d(),v(ue.$$.fragment),Ar=d(),Kt=a("p"),Nr=s("Examples:"),qr=d(),v(Be.$$.fragment),Do=d(),ae=a("h2"),ge=a("a"),Xt=a("span"),v(Re.$$.fragment),Lr=d(),Zt=a("span"),Wr=s("DeiTForImageClassificationWithTeacher"),xo=d(),k=a("div"),v(Ue.$$.fragment),Sr=d(),Qt=a("p"),Or=s(`DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.`),Vr=d(),Yt=a("p"),Hr=s(".. warning::"),Br=d(),eo=a("p"),Rr=s(`This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported.`),Ur=d(),Je=a("p"),Jr=s("This model is a PyTorch "),Ge=a("a"),Gr=s("torch.nn.Module"),Kr=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xr=d(),z=a("div"),v(Ke.$$.fragment),Zr=d(),re=a("p"),Qr=s("The "),pt=a("a"),Yr=s("DeiTForImageClassificationWithTeacher"),en=s(" forward method, overrides the "),to=a("code"),tn=s("__call__"),on=s(" special method."),an=d(),v(_e.$$.fragment),rn=d(),oo=a("p"),nn=s("Examples:"),sn=d(),v(Xe.$$.fragment),this.h()},l(t){const f=vs('[data-svelte="svelte-1phssyn"]',document.head);h=r(f,"META",{name:!0,content:!0}),f.forEach(o),T=c(t),m=r(t,"H1",{class:!0});var Ze=n(m);u=r(Ze,"A",{id:!0,class:!0,href:!0});var ao=n(u);x=r(ao,"SPAN",{});var ro=n(x);w(g.$$.fragment,ro),ro.forEach(o),ao.forEach(o),_=c(Ze),E=r(Ze,"SPAN",{});var no=n(E);Lo=i(no,"DeiT"),no.forEach(o),Ze.forEach(o),io=c(t),w(se.$$.fragment,t),lo=c(t),J=r(t,"H2",{class:!0});var Qe=n(J);ie=r(Qe,"A",{id:!0,class:!0,href:!0});var ln=n(ie);$t=r(ln,"SPAN",{});var dn=n($t);w(be.$$.fragment,dn),dn.forEach(o),ln.forEach(o),Wo=c(Qe),Dt=r(Qe,"SPAN",{});var cn=n(Dt);So=i(cn,"Overview"),cn.forEach(o),Qe.forEach(o),co=c(t),L=r(t,"P",{});var Te=n(L);Oo=i(Te,"The DeiT model was proposed in "),ye=r(Te,"A",{href:!0,rel:!0});var hn=n(ye);Vo=i(hn,"Training data-efficient image transformers & distillation through attention"),hn.forEach(o),Ho=i(Te,` by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Herv\xE9 J\xE9gou. The `),et=r(Te,"A",{href:!0});var fn=n(et);Bo=i(fn,"Vision Transformer (ViT)"),fn.forEach(o),Ro=i(Te," introduced in "),$e=r(Te,"A",{href:!0,rel:!0});var mn=n($e);Uo=i(mn,"Dosovitskiy et al., 2020"),mn.forEach(o),Jo=i(Te,` has shown that one can match or even outperform existing convolutional neural networks using a Transformer encoder (BERT-like). However, the ViT models introduced in that paper required training on expensive infrastructure for multiple weeks, using external data. DeiT (data-efficient image transformers) are more efficiently trained transformers for image classification, requiring far less data and far less computing resources compared to the original ViT models.`),Te.forEach(o),ho=c(t),tt=r(t,"P",{});var pn=n(tt);Go=i(pn,"The abstract from the paper is the following:"),pn.forEach(o),fo=c(t),ot=r(t,"P",{});var un=n(ot);xt=r(un,"EM",{});var gn=n(xt);Ko=i(gn,`Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.`),gn.forEach(o),un.forEach(o),mo=c(t),at=r(t,"P",{});var _n=n(at);Xo=i(_n,"Tips:"),_n.forEach(o),po=c(t),P=r(t,"UL",{});var V=n(P);It=r(V,"LI",{});var Tn=n(It);Zo=i(Tn,`Compared to ViT, DeiT models use a so-called distillation token to effectively learn from a teacher (which, in the DeiT paper, is a ResNet like-model). The distillation token is learned through backpropagation, by interacting with the class ([CLS]) and patch tokens through the self-attention layers.`),Tn.forEach(o),Qo=c(V),G=r(V,"LI",{});var ut=n(G);Yo=i(ut,`There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top of the final hidden state of the class token and not using the distillation signal, or (2) by placing both a prediction head on top of the class token and on top of the distillation token. In that case, the [CLS] prediction head is trained using regular cross-entropy between the prediction of the head and the ground-truth label, while the distillation prediction head is trained using hard distillation (cross-entropy between the prediction of the distillation head and the label predicted by the teacher). At inference time, one takes the average prediction between both heads as final prediction. (2) is also called \u201Cfine-tuning with distillation\u201D, because one relies on a teacher that has already been fine-tuned on the downstream dataset. In terms of models, (1) corresponds to `),rt=r(ut,"A",{href:!0});var vn=n(rt);ea=i(vn,"DeiTForImageClassification"),vn.forEach(o),ta=i(ut,` and (2) corresponds to `),nt=r(ut,"A",{href:!0});var wn=n(nt);oa=i(wn,"DeiTForImageClassificationWithTeacher"),wn.forEach(o),aa=i(ut,"."),ut.forEach(o),ra=c(V),Et=r(V,"LI",{});var bn=n(Et);na=i(bn,`Note that the authors also did try soft distillation for (2) (in which case the distillation prediction head is trained using KL divergence to match the softmax output of the teacher), but hard distillation gave the best results.`),bn.forEach(o),sa=c(V),kt=r(V,"LI",{});var yn=n(kt);ia=i(yn,`All released checkpoints were pre-trained and fine-tuned on ImageNet-1k only. No external data was used. This is in contrast with the original ViT model, which used external data like the JFT-300M dataset/Imagenet-21k for pre-training.`),yn.forEach(o),la=c(V),I=r(V,"LI",{});var F=n(I);da=i(F,`The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into `),st=r(F,"A",{href:!0});var $n=n(st);ca=i($n,"ViTModel"),$n.forEach(o),ha=i(F," or "),it=r(F,"A",{href:!0});var Dn=n(it);fa=i(Dn,"ViTForImageClassification"),Dn.forEach(o),ma=i(F,`. Techniques like data augmentation, optimization, and regularization were used in order to simulate training on a much larger dataset (while only using ImageNet-1k for pre-training). There are 4 variants available (in 3 different sizes): `),Ft=r(F,"EM",{});var xn=n(Ft);pa=i(xn,"facebook/deit-tiny-patch16-224"),xn.forEach(o),ua=i(F,", "),Ct=r(F,"EM",{});var In=n(Ct);ga=i(In,"facebook/deit-small-patch16-224"),In.forEach(o),_a=i(F,", "),Pt=r(F,"EM",{});var En=n(Pt);Ta=i(En,"facebook/deit-base-patch16-224"),En.forEach(o),va=i(F,` and `),jt=r(F,"EM",{});var kn=n(jt);wa=i(kn,"facebook/deit-base-patch16-384"),kn.forEach(o),ba=i(F,". Note that one should use "),lt=r(F,"A",{href:!0});var Fn=n(lt);ya=i(Fn,"DeiTFeatureExtractor"),Fn.forEach(o),$a=i(F,` in order to prepare images for the model.`),F.forEach(o),V.forEach(o),uo=c(t),le=r(t,"P",{});var Eo=n(le);Da=i(Eo,"This model was contributed by "),De=r(Eo,"A",{href:!0,rel:!0});var Cn=n(De);xa=i(Cn,"nielsr"),Cn.forEach(o),Ia=i(Eo,"."),Eo.forEach(o),go=c(t),K=r(t,"H2",{class:!0});var ko=n(K);de=r(ko,"A",{id:!0,class:!0,href:!0});var Pn=n(de);Mt=r(Pn,"SPAN",{});var jn=n(Mt);w(xe.$$.fragment,jn),jn.forEach(o),Pn.forEach(o),Ea=c(ko),zt=r(ko,"SPAN",{});var Mn=n(zt);ka=i(Mn,"DeiTConfig"),Mn.forEach(o),ko.forEach(o),_o=c(t),C=r(t,"DIV",{class:!0});var H=n(C);w(Ie.$$.fragment,H),Fa=c(H),X=r(H,"P",{});var gt=n(X);Ca=i(gt,"This is the configuration class to store the configuration of a "),dt=r(gt,"A",{href:!0});var zn=n(dt);Pa=i(zn,"DeiTModel"),zn.forEach(o),ja=i(gt,`. It is used to instantiate an DeiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeiT `),Ee=r(gt,"A",{href:!0,rel:!0});var An=n(Ee);Ma=i(An,"facebook/deit-base-distilled-patch16-224"),An.forEach(o),za=i(gt,` architecture.`),gt.forEach(o),Aa=c(H),Z=r(H,"P",{});var _t=n(Z);Na=i(_t,"Configuration objects inherit from "),ct=r(_t,"A",{href:!0});var Nn=n(ct);qa=i(Nn,"PretrainedConfig"),Nn.forEach(o),La=i(_t,` and can be used to control the model outputs. Read the documentation from `),ht=r(_t,"A",{href:!0});var qn=n(ht);Wa=i(qn,"PretrainedConfig"),qn.forEach(o),Sa=i(_t," for more information."),_t.forEach(o),Oa=c(H),At=r(H,"P",{});var Ln=n(At);Va=i(Ln,"Example:"),Ln.forEach(o),Ha=c(H),w(ke.$$.fragment,H),H.forEach(o),To=c(t),Q=r(t,"H2",{class:!0});var Fo=n(Q);ce=r(Fo,"A",{id:!0,class:!0,href:!0});var Wn=n(ce);Nt=r(Wn,"SPAN",{});var Sn=n(Nt);w(Fe.$$.fragment,Sn),Sn.forEach(o),Wn.forEach(o),Ba=c(Fo),qt=r(Fo,"SPAN",{});var On=n(qt);Ra=i(On,"DeiTFeatureExtractor"),On.forEach(o),Fo.forEach(o),vo=c(t),N=r(t,"DIV",{class:!0});var ve=n(N);w(Ce.$$.fragment,ve),Ua=c(ve),Lt=r(ve,"P",{});var Vn=n(Lt);Ja=i(Vn,"Constructs a DeiT feature extractor."),Vn.forEach(o),Ga=c(ve),Pe=r(ve,"P",{});var Co=n(Pe);Ka=i(Co,"This feature extractor inherits from "),Wt=r(Co,"CODE",{});var Hn=n(Wt);Xa=i(Hn,"FeatureExtractionMixin"),Hn.forEach(o),Za=i(Co,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Co.forEach(o),Qa=c(ve),O=r(ve,"DIV",{class:!0});var Tt=n(O);w(je.$$.fragment,Tt),Ya=c(Tt),St=r(Tt,"P",{});var Bn=n(St);er=i(Bn,"Main method to prepare for the model one or several image(s)."),Bn.forEach(o),tr=c(Tt),w(he.$$.fragment,Tt),Tt.forEach(o),ve.forEach(o),wo=c(t),Y=r(t,"H2",{class:!0});var Po=n(Y);fe=r(Po,"A",{id:!0,class:!0,href:!0});var Rn=n(fe);Ot=r(Rn,"SPAN",{});var Un=n(Ot);w(Me.$$.fragment,Un),Un.forEach(o),Rn.forEach(o),or=c(Po),Vt=r(Po,"SPAN",{});var Jn=n(Vt);ar=i(Jn,"DeiTModel"),Jn.forEach(o),Po.forEach(o),bo=c(t),S=r(t,"DIV",{class:!0});var vt=n(S);w(ze.$$.fragment,vt),rr=c(vt),Ae=r(vt,"P",{});var jo=n(Ae);nr=i(jo,`The bare DeiT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Ne=r(jo,"A",{href:!0,rel:!0});var Gn=n(Ne);sr=i(Gn,"torch.nn.Module"),Gn.forEach(o),ir=i(jo,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jo.forEach(o),lr=c(vt),j=r(vt,"DIV",{class:!0});var B=n(j);w(qe.$$.fragment,B),dr=c(B),ee=r(B,"P",{});var wt=n(ee);cr=i(wt,"The "),ft=r(wt,"A",{href:!0});var Kn=n(ft);hr=i(Kn,"DeiTModel"),Kn.forEach(o),fr=i(wt," forward method, overrides the "),Ht=r(wt,"CODE",{});var Xn=n(Ht);mr=i(Xn,"__call__"),Xn.forEach(o),pr=i(wt," special method."),wt.forEach(o),ur=c(B),w(me.$$.fragment,B),gr=c(B),Bt=r(B,"P",{});var Zn=n(Bt);_r=i(Zn,"Examples:"),Zn.forEach(o),Tr=c(B),w(Le.$$.fragment,B),B.forEach(o),vt.forEach(o),yo=c(t),te=r(t,"H2",{class:!0});var Mo=n(te);pe=r(Mo,"A",{id:!0,class:!0,href:!0});var Qn=n(pe);Rt=r(Qn,"SPAN",{});var Yn=n(Rt);w(We.$$.fragment,Yn),Yn.forEach(o),Qn.forEach(o),vr=c(Mo),Ut=r(Mo,"SPAN",{});var es=n(Ut);wr=i(es,"DeiTForImageClassification"),es.forEach(o),Mo.forEach(o),$o=c(t),q=r(t,"DIV",{class:!0});var we=n(q);w(Se.$$.fragment,we),br=c(we),Jt=r(we,"P",{});var ts=n(Jt);yr=i(ts,`DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),ts.forEach(o),$r=c(we),Oe=r(we,"P",{});var zo=n(Oe);Dr=i(zo,"This model is a PyTorch "),Ve=r(zo,"A",{href:!0,rel:!0});var os=n(Ve);xr=i(os,"torch.nn.Module"),os.forEach(o),Ir=i(zo,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zo.forEach(o),Er=c(we),M=r(we,"DIV",{class:!0});var R=n(M);w(He.$$.fragment,R),kr=c(R),oe=r(R,"P",{});var bt=n(oe);Fr=i(bt,"The "),mt=r(bt,"A",{href:!0});var as=n(mt);Cr=i(as,"DeiTForImageClassification"),as.forEach(o),Pr=i(bt," forward method, overrides the "),Gt=r(bt,"CODE",{});var rs=n(Gt);jr=i(rs,"__call__"),rs.forEach(o),Mr=i(bt," special method."),bt.forEach(o),zr=c(R),w(ue.$$.fragment,R),Ar=c(R),Kt=r(R,"P",{});var ns=n(Kt);Nr=i(ns,"Examples:"),ns.forEach(o),qr=c(R),w(Be.$$.fragment,R),R.forEach(o),we.forEach(o),Do=c(t),ae=r(t,"H2",{class:!0});var Ao=n(ae);ge=r(Ao,"A",{id:!0,class:!0,href:!0});var ss=n(ge);Xt=r(ss,"SPAN",{});var is=n(Xt);w(Re.$$.fragment,is),is.forEach(o),ss.forEach(o),Lr=c(Ao),Zt=r(Ao,"SPAN",{});var ls=n(Zt);Wr=i(ls,"DeiTForImageClassificationWithTeacher"),ls.forEach(o),Ao.forEach(o),xo=c(t),k=r(t,"DIV",{class:!0});var W=n(k);w(Ue.$$.fragment,W),Sr=c(W),Qt=r(W,"P",{});var ds=n(Qt);Or=i(ds,`DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.`),ds.forEach(o),Vr=c(W),Yt=r(W,"P",{});var cs=n(Yt);Hr=i(cs,".. warning::"),cs.forEach(o),Br=c(W),eo=r(W,"P",{});var hs=n(eo);Rr=i(hs,`This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported.`),hs.forEach(o),Ur=c(W),Je=r(W,"P",{});var No=n(Je);Jr=i(No,"This model is a PyTorch "),Ge=r(No,"A",{href:!0,rel:!0});var fs=n(Ge);Gr=i(fs,"torch.nn.Module"),fs.forEach(o),Kr=i(No,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),No.forEach(o),Xr=c(W),z=r(W,"DIV",{class:!0});var U=n(z);w(Ke.$$.fragment,U),Zr=c(U),re=r(U,"P",{});var yt=n(re);Qr=i(yt,"The "),pt=r(yt,"A",{href:!0});var ms=n(pt);Yr=i(ms,"DeiTForImageClassificationWithTeacher"),ms.forEach(o),en=i(yt," forward method, overrides the "),to=r(yt,"CODE",{});var ps=n(to);tn=i(ps,"__call__"),ps.forEach(o),on=i(yt," special method."),yt.forEach(o),an=c(U),w(_e.$$.fragment,U),rn=c(U),oo=r(U,"P",{});var us=n(oo);nn=i(us,"Examples:"),us.forEach(o),sn=c(U),w(Xe.$$.fragment,U),U.forEach(o),W.forEach(o),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(Is)),l(u,"id","deit"),l(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(u,"href","#deit"),l(m,"class","relative group"),l(ie,"id","overview"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#overview"),l(J,"class","relative group"),l(ye,"href","https://arxiv.org/abs/2012.12877"),l(ye,"rel","nofollow"),l(et,"href","vit"),l($e,"href","https://arxiv.org/abs/2010.11929"),l($e,"rel","nofollow"),l(rt,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassification"),l(nt,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassificationWithTeacher"),l(st,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTModel"),l(it,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTForImageClassification"),l(lt,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor"),l(De,"href","https://huggingface.co/nielsr"),l(De,"rel","nofollow"),l(de,"id","transformers.DeiTConfig"),l(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(de,"href","#transformers.DeiTConfig"),l(K,"class","relative group"),l(dt,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTModel"),l(Ee,"href","https://huggingface.co/facebook/deit-base-distilled-patch16-224"),l(Ee,"rel","nofollow"),l(ct,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(ht,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(C,"class","docstring"),l(ce,"id","transformers.DeiTFeatureExtractor"),l(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ce,"href","#transformers.DeiTFeatureExtractor"),l(Q,"class","relative group"),l(O,"class","docstring"),l(N,"class","docstring"),l(fe,"id","transformers.DeiTModel"),l(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(fe,"href","#transformers.DeiTModel"),l(Y,"class","relative group"),l(Ne,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ne,"rel","nofollow"),l(ft,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTModel"),l(j,"class","docstring"),l(S,"class","docstring"),l(pe,"id","transformers.DeiTForImageClassification"),l(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(pe,"href","#transformers.DeiTForImageClassification"),l(te,"class","relative group"),l(Ve,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ve,"rel","nofollow"),l(mt,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassification"),l(M,"class","docstring"),l(q,"class","docstring"),l(ge,"id","transformers.DeiTForImageClassificationWithTeacher"),l(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ge,"href","#transformers.DeiTForImageClassificationWithTeacher"),l(ae,"class","relative group"),l(Ge,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ge,"rel","nofollow"),l(pt,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTForImageClassificationWithTeacher"),l(z,"class","docstring"),l(k,"class","docstring")},m(t,f){e(document.head,h),p(t,T,f),p(t,m,f),e(m,u),e(u,x),b(g,x,null),e(m,_),e(m,E),e(E,Lo),p(t,io,f),b(se,t,f),p(t,lo,f),p(t,J,f),e(J,ie),e(ie,$t),b(be,$t,null),e(J,Wo),e(J,Dt),e(Dt,So),p(t,co,f),p(t,L,f),e(L,Oo),e(L,ye),e(ye,Vo),e(L,Ho),e(L,et),e(et,Bo),e(L,Ro),e(L,$e),e($e,Uo),e(L,Jo),p(t,ho,f),p(t,tt,f),e(tt,Go),p(t,fo,f),p(t,ot,f),e(ot,xt),e(xt,Ko),p(t,mo,f),p(t,at,f),e(at,Xo),p(t,po,f),p(t,P,f),e(P,It),e(It,Zo),e(P,Qo),e(P,G),e(G,Yo),e(G,rt),e(rt,ea),e(G,ta),e(G,nt),e(nt,oa),e(G,aa),e(P,ra),e(P,Et),e(Et,na),e(P,sa),e(P,kt),e(kt,ia),e(P,la),e(P,I),e(I,da),e(I,st),e(st,ca),e(I,ha),e(I,it),e(it,fa),e(I,ma),e(I,Ft),e(Ft,pa),e(I,ua),e(I,Ct),e(Ct,ga),e(I,_a),e(I,Pt),e(Pt,Ta),e(I,va),e(I,jt),e(jt,wa),e(I,ba),e(I,lt),e(lt,ya),e(I,$a),p(t,uo,f),p(t,le,f),e(le,Da),e(le,De),e(De,xa),e(le,Ia),p(t,go,f),p(t,K,f),e(K,de),e(de,Mt),b(xe,Mt,null),e(K,Ea),e(K,zt),e(zt,ka),p(t,_o,f),p(t,C,f),b(Ie,C,null),e(C,Fa),e(C,X),e(X,Ca),e(X,dt),e(dt,Pa),e(X,ja),e(X,Ee),e(Ee,Ma),e(X,za),e(C,Aa),e(C,Z),e(Z,Na),e(Z,ct),e(ct,qa),e(Z,La),e(Z,ht),e(ht,Wa),e(Z,Sa),e(C,Oa),e(C,At),e(At,Va),e(C,Ha),b(ke,C,null),p(t,To,f),p(t,Q,f),e(Q,ce),e(ce,Nt),b(Fe,Nt,null),e(Q,Ba),e(Q,qt),e(qt,Ra),p(t,vo,f),p(t,N,f),b(Ce,N,null),e(N,Ua),e(N,Lt),e(Lt,Ja),e(N,Ga),e(N,Pe),e(Pe,Ka),e(Pe,Wt),e(Wt,Xa),e(Pe,Za),e(N,Qa),e(N,O),b(je,O,null),e(O,Ya),e(O,St),e(St,er),e(O,tr),b(he,O,null),p(t,wo,f),p(t,Y,f),e(Y,fe),e(fe,Ot),b(Me,Ot,null),e(Y,or),e(Y,Vt),e(Vt,ar),p(t,bo,f),p(t,S,f),b(ze,S,null),e(S,rr),e(S,Ae),e(Ae,nr),e(Ae,Ne),e(Ne,sr),e(Ae,ir),e(S,lr),e(S,j),b(qe,j,null),e(j,dr),e(j,ee),e(ee,cr),e(ee,ft),e(ft,hr),e(ee,fr),e(ee,Ht),e(Ht,mr),e(ee,pr),e(j,ur),b(me,j,null),e(j,gr),e(j,Bt),e(Bt,_r),e(j,Tr),b(Le,j,null),p(t,yo,f),p(t,te,f),e(te,pe),e(pe,Rt),b(We,Rt,null),e(te,vr),e(te,Ut),e(Ut,wr),p(t,$o,f),p(t,q,f),b(Se,q,null),e(q,br),e(q,Jt),e(Jt,yr),e(q,$r),e(q,Oe),e(Oe,Dr),e(Oe,Ve),e(Ve,xr),e(Oe,Ir),e(q,Er),e(q,M),b(He,M,null),e(M,kr),e(M,oe),e(oe,Fr),e(oe,mt),e(mt,Cr),e(oe,Pr),e(oe,Gt),e(Gt,jr),e(oe,Mr),e(M,zr),b(ue,M,null),e(M,Ar),e(M,Kt),e(Kt,Nr),e(M,qr),b(Be,M,null),p(t,Do,f),p(t,ae,f),e(ae,ge),e(ge,Xt),b(Re,Xt,null),e(ae,Lr),e(ae,Zt),e(Zt,Wr),p(t,xo,f),p(t,k,f),b(Ue,k,null),e(k,Sr),e(k,Qt),e(Qt,Or),e(k,Vr),e(k,Yt),e(Yt,Hr),e(k,Br),e(k,eo),e(eo,Rr),e(k,Ur),e(k,Je),e(Je,Jr),e(Je,Ge),e(Ge,Gr),e(Je,Kr),e(k,Xr),e(k,z),b(Ke,z,null),e(z,Zr),e(z,re),e(re,Qr),e(re,pt),e(pt,Yr),e(re,en),e(re,to),e(to,tn),e(re,on),e(z,an),b(_e,z,null),e(z,rn),e(z,oo),e(oo,nn),e(z,sn),b(Xe,z,null),Io=!0},p(t,[f]){const Ze={};f&2&&(Ze.$$scope={dirty:f,ctx:t}),se.$set(Ze);const ao={};f&2&&(ao.$$scope={dirty:f,ctx:t}),he.$set(ao);const ro={};f&2&&(ro.$$scope={dirty:f,ctx:t}),me.$set(ro);const no={};f&2&&(no.$$scope={dirty:f,ctx:t}),ue.$set(no);const Qe={};f&2&&(Qe.$$scope={dirty:f,ctx:t}),_e.$set(Qe)},i(t){Io||(y(g.$$.fragment,t),y(se.$$.fragment,t),y(be.$$.fragment,t),y(xe.$$.fragment,t),y(Ie.$$.fragment,t),y(ke.$$.fragment,t),y(Fe.$$.fragment,t),y(Ce.$$.fragment,t),y(je.$$.fragment,t),y(he.$$.fragment,t),y(Me.$$.fragment,t),y(ze.$$.fragment,t),y(qe.$$.fragment,t),y(me.$$.fragment,t),y(Le.$$.fragment,t),y(We.$$.fragment,t),y(Se.$$.fragment,t),y(He.$$.fragment,t),y(ue.$$.fragment,t),y(Be.$$.fragment,t),y(Re.$$.fragment,t),y(Ue.$$.fragment,t),y(Ke.$$.fragment,t),y(_e.$$.fragment,t),y(Xe.$$.fragment,t),Io=!0)},o(t){$(g.$$.fragment,t),$(se.$$.fragment,t),$(be.$$.fragment,t),$(xe.$$.fragment,t),$(Ie.$$.fragment,t),$(ke.$$.fragment,t),$(Fe.$$.fragment,t),$(Ce.$$.fragment,t),$(je.$$.fragment,t),$(he.$$.fragment,t),$(Me.$$.fragment,t),$(ze.$$.fragment,t),$(qe.$$.fragment,t),$(me.$$.fragment,t),$(Le.$$.fragment,t),$(We.$$.fragment,t),$(Se.$$.fragment,t),$(He.$$.fragment,t),$(ue.$$.fragment,t),$(Be.$$.fragment,t),$(Re.$$.fragment,t),$(Ue.$$.fragment,t),$(Ke.$$.fragment,t),$(_e.$$.fragment,t),$(Xe.$$.fragment,t),Io=!1},d(t){o(h),t&&o(T),t&&o(m),D(g),t&&o(io),D(se,t),t&&o(lo),t&&o(J),D(be),t&&o(co),t&&o(L),t&&o(ho),t&&o(tt),t&&o(fo),t&&o(ot),t&&o(mo),t&&o(at),t&&o(po),t&&o(P),t&&o(uo),t&&o(le),t&&o(go),t&&o(K),D(xe),t&&o(_o),t&&o(C),D(Ie),D(ke),t&&o(To),t&&o(Q),D(Fe),t&&o(vo),t&&o(N),D(Ce),D(je),D(he),t&&o(wo),t&&o(Y),D(Me),t&&o(bo),t&&o(S),D(ze),D(qe),D(me),D(Le),t&&o(yo),t&&o(te),D(We),t&&o($o),t&&o(q),D(Se),D(He),D(ue),D(Be),t&&o(Do),t&&o(ae),D(Re),t&&o(xo),t&&o(k),D(Ue),D(Ke),D(_e),D(Xe)}}}const Is={local:"deit",sections:[{local:"overview",title:"Overview"},{local:"transformers.DeiTConfig",title:"DeiTConfig"},{local:"transformers.DeiTFeatureExtractor",title:"DeiTFeatureExtractor"},{local:"transformers.DeiTModel",title:"DeiTModel"},{local:"transformers.DeiTForImageClassification",title:"DeiTForImageClassification"},{local:"transformers.DeiTForImageClassificationWithTeacher",title:"DeiTForImageClassificationWithTeacher"}],title:"DeiT"};function Es(A,h,T){let{fw:m}=h;return A.$$set=u=>{"fw"in u&&T(0,m=u.fw)},[m]}class zs extends gs{constructor(h){super();_s(this,h,Es,xs,Ts,{fw:0})}}export{zs as default,Is as metadata};
9,923
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/rembert.mdx-05c4d0ae.js
import{S as dw,i as cw,s as pw,e as s,k as l,w as T,t as o,L as hw,c as r,d as t,m as d,a,x as b,h as n,b as p,J as e,g as m,y,q as w,o as $,B as F}from"../../chunks/vendor-b1433968.js";import{T as qe}from"../../chunks/Tip-c3840994.js";import{D as ee}from"../../chunks/Docstring-ff504c58.js";import{C as je}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function mw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function uw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function fw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function gw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function _w(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function kw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function vw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function Tw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function bw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function yw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function ww(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function $w(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function Fw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function Ew(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function Rw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function Bw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function Mw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function zw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function Cw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce;return{c(){h=s("p"),R=o("TF 2.0 models accepts two formats as inputs:"),g=l(),u=s("ul"),v=s("li"),k=o("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),te=o("This second option is useful when using "),N=s("code"),ne=o("tf.keras.Model.fit"),fe=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),ge=o("model(inputs)"),pe=o("."),J=l(),j=s("p"),se=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),ae=o("a single Tensor with "),U=s("code"),he=o("input_ids"),ie=o(" only and nothing else: "),I=s("code"),_e=o("model(inputs_ids)"),me=l(),C=s("li"),ke=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=s("code"),le=o("model([input_ids, attention_mask])"),ve=o(" or "),W=s("code"),Z=o("model([input_ids, attention_mask, token_type_ids])"),oe=l(),L=s("li"),re=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ce=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var E=a(h);R=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(c),u=r(c,"UL",{});var x=a(u);v=r(x,"LI",{});var be=a(v);k=n(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),_=d(x),B=r(x,"LI",{});var we=a(B);ue=n(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(t),x.forEach(t),X=d(c),M=r(c,"P",{});var P=a(M);te=n(P,"This second option is useful when using "),N=r(P,"CODE",{});var $e=a(N);ne=n($e,"tf.keras.Model.fit"),$e.forEach(t),fe=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(P,"CODE",{});var ye=a(O);ge=n(ye,"model(inputs)"),ye.forEach(t),pe=n(P,"."),P.forEach(t),J=d(c),j=r(c,"P",{});var Fe=a(j);se=n(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),Y=d(c),z=r(c,"UL",{});var D=a(z);q=r(D,"LI",{});var Q=a(q);ae=n(Q,"a single Tensor with "),U=r(Q,"CODE",{});var Te=a(U);he=n(Te,"input_ids"),Te.forEach(t),ie=n(Q," only and nothing else: "),I=r(Q,"CODE",{});var Ee=a(I);_e=n(Ee,"model(inputs_ids)"),Ee.forEach(t),Q.forEach(t),me=d(D),C=r(D,"LI",{});var A=a(C);ke=n(A,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r(A,"CODE",{});var Re=a(S);le=n(Re,"model([input_ids, attention_mask])"),Re.forEach(t),ve=n(A," or "),W=r(A,"CODE",{});var Be=a(W);Z=n(Be,"model([input_ids, attention_mask, token_type_ids])"),Be.forEach(t),A.forEach(t),oe=d(D),L=r(D,"LI",{});var de=a(L);re=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(de,"CODE",{});var Me=a(H);ce=n(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(t),de.forEach(t),D.forEach(t)},m(c,E){m(c,h,E),e(h,R),m(c,g,E),m(c,u,E),e(u,v),e(v,k),e(u,_),e(u,B),e(B,ue),m(c,X,E),m(c,M,E),e(M,te),e(M,N),e(N,ne),e(M,fe),e(M,O),e(O,ge),e(M,pe),m(c,J,E),m(c,j,E),e(j,se),m(c,Y,E),m(c,z,E),e(z,q),e(q,ae),e(q,U),e(U,he),e(q,ie),e(q,I),e(I,_e),e(z,me),e(z,C),e(C,ke),e(C,S),e(S,le),e(C,ve),e(C,W),e(W,Z),e(z,oe),e(z,L),e(L,re),e(L,H),e(H,ce)},d(c){c&&t(h),c&&t(g),c&&t(u),c&&t(X),c&&t(M),c&&t(J),c&&t(j),c&&t(Y),c&&t(z)}}}function qw(V){let h,R,g,u,v;return{c(){h=s("p"),R=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),u=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){h=r(k,"P",{});var _=a(h);R=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(_,"CODE",{});var B=a(g);u=n(B,"Module"),B.forEach(t),v=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(k,_){m(k,h,_),e(h,R),e(h,g),e(g,u),e(h,v)},d(k){k&&t(h)}}}function xw(V){let h,R,g,u,v,k,_,B,ue,X,M,te,N,ne,fe,O,ge,pe,J,j,se,Y,z,q,ae,U,he,ie,I,_e,me,C,ke,S,le,ve,W,Z,oe,L,re,H,ce,c,E,x,be,we,P,$e,ye,Fe,D,Q,Te,Ee,A,Re,Be,de,Me,kp,vp,Oa,Tp,bp,$n,yp,Ia,Sa,Wt,Wa,wp,$p,Eo,Ro,Ua,Fn,Fp,Ha,Ep,Rp,Qa,Bp,Mp,Ka,Va,En,Bo,Mo,Ja,Rn,zp,Ga,Cp,qp,Xa,xp,Pp,Ya,Za,Bn,zo,Co,ei,Mn,Lp,ti,jp,Dp,oi,Ap,Ad,Ut,qo,ni,zn,Np,si,Op,Nd,Ce,Cn,Ip,qn,Sp,xn,Wp,Up,Hp,Pn,Qp,Wr,Kp,Vp,Jp,Ht,Gp,ri,Xp,Yp,ai,Zp,eh,th,Bt,Ln,oh,ii,nh,sh,jn,Ur,rh,li,ah,ih,Hr,lh,di,dh,ch,xo,Dn,ph,An,hh,ci,mh,uh,fh,ut,Nn,gh,pi,_h,kh,On,vh,Qt,Th,hi,bh,yh,mi,wh,$h,Fh,ui,Od,Kt,Po,fi,In,Eh,gi,Rh,Id,De,Sn,Bh,Ft,Mh,_i,zh,Ch,Wn,qh,xh,Qr,Ph,Lh,jh,Mt,Un,Dh,ki,Ah,Nh,Hn,Kr,Oh,vi,Ih,Sh,Vr,Wh,Ti,Uh,Hh,Lo,Qn,Qh,Kn,Kh,bi,Vh,Jh,Gh,ft,Vn,Xh,yi,Yh,Zh,Jn,em,wi,tm,om,$i,Sd,Vt,jo,Fi,Gn,nm,Ei,sm,Wd,Je,Xn,rm,Yn,am,Zn,im,lm,dm,es,cm,ts,pm,hm,mm,Ae,um,Ri,fm,gm,Bi,_m,km,Mi,vm,Tm,zi,bm,ym,Ci,wm,$m,qi,Fm,Em,Rm,Ge,os,Bm,Jt,Mm,Jr,zm,Cm,xi,qm,xm,Pm,Do,Lm,Pi,jm,Dm,ns,Ud,Gt,Ao,Li,ss,Am,ji,Nm,Hd,Et,rs,Om,Xt,Im,Di,Sm,Wm,as,Um,Hm,Qm,Xe,is,Km,Yt,Vm,Gr,Jm,Gm,Ai,Xm,Ym,Zm,No,eu,Ni,tu,ou,ls,Qd,Zt,Oo,Oi,ds,nu,Ii,su,Kd,Rt,cs,ru,eo,au,Si,iu,lu,ps,du,cu,pu,Ye,hs,hu,to,mu,Xr,uu,fu,Wi,gu,_u,ku,Io,vu,Ui,Tu,bu,ms,Vd,oo,So,Hi,us,yu,Qi,wu,Jd,ct,fs,$u,Ki,Fu,Eu,gs,Ru,_s,Bu,Mu,zu,Le,ks,Cu,no,qu,Yr,xu,Pu,Vi,Lu,ju,Du,Wo,Au,Ji,Nu,Ou,vs,Iu,Gi,Su,Wu,Ts,Gd,so,Uo,Xi,bs,Uu,Yi,Hu,Xd,pt,ys,Qu,Zi,Ku,Vu,ws,Ju,$s,Gu,Xu,Yu,Ze,Fs,Zu,ro,ef,Zr,tf,of,el,nf,sf,rf,Ho,af,tl,lf,df,Es,Yd,ao,Qo,ol,Rs,cf,nl,pf,Zd,ht,Bs,hf,sl,mf,uf,Ms,ff,zs,gf,_f,kf,et,Cs,vf,io,Tf,ea,bf,yf,rl,wf,$f,Ff,Ko,Ef,al,Rf,Bf,qs,ec,lo,Vo,il,xs,Mf,ll,zf,tc,mt,Ps,Cf,co,qf,dl,xf,Pf,cl,Lf,jf,Df,Ls,Af,js,Nf,Of,If,tt,Ds,Sf,po,Wf,ta,Uf,Hf,pl,Qf,Kf,Vf,Jo,Jf,hl,Gf,Xf,As,oc,ho,Go,ml,Ns,Yf,ul,Zf,nc,Ne,Os,eg,fl,tg,og,Is,ng,oa,sg,rg,ag,Ss,ig,Ws,lg,dg,cg,Xo,pg,ot,Us,hg,mo,mg,na,ug,fg,gl,gg,_g,kg,Yo,vg,_l,Tg,bg,Hs,sc,uo,Zo,kl,Qs,yg,vl,wg,rc,Oe,Ks,$g,Vs,Fg,Tl,Eg,Rg,Bg,Js,Mg,sa,zg,Cg,qg,Gs,xg,Xs,Pg,Lg,jg,en,Dg,nt,Ys,Ag,fo,Ng,ra,Og,Ig,bl,Sg,Wg,Ug,tn,Hg,yl,Qg,Kg,Zs,ac,go,on,wl,er,Vg,$l,Jg,ic,Ie,tr,Gg,or,Xg,Fl,Yg,Zg,e_,nr,t_,aa,o_,n_,s_,sr,r_,rr,a_,i_,l_,nn,d_,Qe,ar,c_,xe,p_,El,h_,m_,Rl,u_,f_,Bl,g_,__,Ml,k_,v_,zl,T_,b_,Cl,y_,w_,ql,$_,F_,E_,ir,lr,R_,xl,B_,M_,z_,dr,C_,Pl,q_,x_,P_,K,L_,Ll,j_,D_,jl,A_,N_,Dl,O_,I_,Al,S_,W_,Nl,U_,H_,Ol,Q_,K_,Il,V_,J_,Sl,G_,X_,Wl,Y_,Z_,Ul,ek,tk,Hl,ok,nk,Ql,sk,rk,Kl,ak,ik,Vl,lk,dk,Jl,ck,pk,Gl,hk,mk,Xl,uk,fk,Yl,gk,_k,Zl,kk,vk,ed,Tk,bk,yk,td,wk,$k,cr,lc,_o,sn,od,pr,Fk,nd,Ek,dc,Se,hr,Rk,sd,Bk,Mk,mr,zk,ia,Ck,qk,xk,ur,Pk,fr,Lk,jk,Dk,rn,Ak,st,gr,Nk,ko,Ok,la,Ik,Sk,rd,Wk,Uk,Hk,an,Qk,ad,Kk,Vk,_r,cc,vo,ln,id,kr,Jk,ld,Gk,pc,We,vr,Xk,dd,Yk,Zk,Tr,ev,da,tv,ov,nv,br,sv,yr,rv,av,iv,dn,lv,rt,wr,dv,To,cv,ca,pv,hv,cd,mv,uv,fv,cn,gv,pd,_v,kv,$r,hc,bo,pn,hd,Fr,vv,md,Tv,mc,Ue,Er,bv,ud,yv,wv,Rr,$v,pa,Fv,Ev,Rv,Br,Bv,Mr,Mv,zv,Cv,hn,qv,at,zr,xv,yo,Pv,ha,Lv,jv,fd,Dv,Av,Nv,mn,Ov,gd,Iv,Sv,Cr,uc,wo,un,_d,qr,Wv,kd,Uv,fc,He,xr,Hv,$o,Qv,vd,Kv,Vv,Td,Jv,Gv,Xv,Pr,Yv,ma,Zv,eT,tT,Lr,oT,jr,nT,sT,rT,fn,aT,it,Dr,iT,Fo,lT,ua,dT,cT,bd,pT,hT,mT,gn,uT,yd,fT,gT,Ar,gc;return k=new ze({}),ne=new ze({}),re=new ze({}),be=new ee({props:{name:"class transformers.RemBertConfig",anchor:"transformers.RemBertConfig",parameters:[{name:"vocab_size",val:" = 250300"},{name:"hidden_size",val:" = 1152"},{name:"num_hidden_layers",val:" = 32"},{name:"num_attention_heads",val:" = 18"},{name:"input_embedding_size",val:" = 256"},{name:"output_embedding_size",val:" = 1664"},{name:"intermediate_size",val:" = 4608"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"classifier_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 312"},{name:"eos_token_id",val:" = 313"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/configuration_rembert.py#L29",parametersDescription:[{anchor:"transformers.RemBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 250300) &#x2014; Vocabulary size of the RemBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertModel">TFRemBertModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.RemBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1152) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.RemBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.RemBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 18) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.RemBertConfig.input_embedding_size",description:`<strong>input_embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the input embeddings.`,name:"input_embedding_size"},{anchor:"transformers.RemBertConfig.output_embedding_size",description:`<strong>output_embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1664) &#x2014; Dimensionality of the output embeddings.`,name:"output_embedding_size"},{anchor:"transformers.RemBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4608) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.RemBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.RemBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.RemBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.RemBertConfig.classifier_dropout_prob",description:`<strong>classifier_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the classifier layer when fine-tuning.`,name:"classifier_dropout_prob"},{anchor:"transformers.RemBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.RemBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertModel">TFRemBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.RemBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.RemBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.RemBertConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"}]}}),$n=new je({props:{code:",",highlighted:""}}),Fn=new ze({}),Rn=new ze({}),Mn=new ze({}),zn=new ze({}),Cn=new ee({props:{name:"class transformers.RemBertTokenizer",anchor:"transformers.RemBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = True"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert.py#L43",parametersDescription:[{anchor:"transformers.RemBertTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.RemBertTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.RemBertTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.RemBertTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RemBertTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RemBertTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RemBertTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RemBertTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),Ln=new ee({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RemBertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert.py#L172",parametersDescription:[{anchor:"transformers.RemBertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.RemBertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Dn=new ee({props:{name:"get_special_tokens_mask",anchor:"transformers.RemBertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert.py#L197",parametersDescription:[{anchor:"transformers.RemBertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RemBertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.RemBertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Nn=new ee({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.RemBertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert.py#L228",parametersDescription:[{anchor:"transformers.RemBertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RemBertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),On=new je({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),In=new ze({}),Sn=new ee({props:{name:"class transformers.RemBertTokenizerFast",anchor:"transformers.RemBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = False"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert_fast.py#L52",parametersDescription:[{anchor:"transformers.RemBertTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.RemBertTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.RemBertTokenizerFast.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.RemBertTokenizerFast.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.RemBertTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.RemBertTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.`,name:"eos_token"},{anchor:"transformers.RemBertTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RemBertTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RemBertTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RemBertTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RemBertTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),Un=new ee({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RemBertTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert_fast.py#L143",parametersDescription:[{anchor:"transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Qn=new ee({props:{name:"get_special_tokens_mask",anchor:"transformers.RemBertTokenizerFast.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert_fast.py#L168",parametersDescription:[{anchor:"transformers.RemBertTokenizerFast.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.RemBertTokenizerFast.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.RemBertTokenizerFast.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Set to True if the token list is already formatted with special tokens for the model`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Vn=new ee({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/tokenization_rembert_fast.py#L199",parametersDescription:[{anchor:"transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Jn=new je({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Gn=new ze({}),Xn=new ee({props:{name:"class transformers.RemBertModel",anchor:"transformers.RemBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L743",parametersDescription:[{anchor:"transformers.RemBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),os=new ee({props:{name:"forward",anchor:"transformers.RemBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L783",parametersDescription:[{anchor:"transformers.RemBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RemBertModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RemBertModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RemBertModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Do=new qe({props:{$$slots:{default:[mw]},$$scope:{ctx:V}}}),ns=new je({props:{code:`from transformers import RemBertTokenizer, RemBertModel import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertModel.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertModel.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ss=new ze({}),rs=new ee({props:{name:"class transformers.RemBertForCausalLM",anchor:"transformers.RemBertForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1016",parametersDescription:[{anchor:"transformers.RemBertForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),is=new ee({props:{name:"forward",anchor:"transformers.RemBertForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1038",parametersDescription:[{anchor:"transformers.RemBertForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RemBertForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RemBertForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RemBertForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.RemBertForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),No=new qe({props:{$$slots:{default:[uw]},$$scope:{ctx:V}}}),ls=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForCausalLM, RemBertConfig import torch tokenizer = RemBertTokenizer.from_pretrained('google/rembert') config = RemBertConfig.from_pretrained("google/rembert") config.is_decoder = True model = RemBertForCausalLM.from_pretrained('google/rembert', config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForCausalLM, RemBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RemBertConfig.from_pretrained(<span class="hljs-string">&quot;google/rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForCausalLM.from_pretrained(<span class="hljs-string">&#x27;google/rembert&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),ds=new ze({}),cs=new ee({props:{name:"class transformers.RemBertForMaskedLM",anchor:"transformers.RemBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L914",parametersDescription:[{anchor:"transformers.RemBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),hs=new ee({props:{name:"forward",anchor:"transformers.RemBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L936",parametersDescription:[{anchor:"transformers.RemBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Io=new qe({props:{$$slots:{default:[fw]},$$scope:{ctx:V}}}),ms=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForMaskedLM import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertForMaskedLM.from_pretrained('rembert') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),us=new ze({}),fs=new ee({props:{name:"class transformers.RemBertForSequenceClassification",anchor:"transformers.RemBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1169",parametersDescription:[{anchor:"transformers.RemBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ks=new ee({props:{name:"forward",anchor:"transformers.RemBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1180",parametersDescription:[{anchor:"transformers.RemBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Wo=new qe({props:{$$slots:{default:[gw]},$$scope:{ctx:V}}}),vs=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForSequenceClassification import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertForSequenceClassification.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ts=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForSequenceClassification import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertForSequenceClassification.from_pretrained('rembert', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),bs=new ze({}),ys=new ee({props:{name:"class transformers.RemBertForMultipleChoice",anchor:"transformers.RemBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1265",parametersDescription:[{anchor:"transformers.RemBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fs=new ee({props:{name:"forward",anchor:"transformers.RemBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1276",parametersDescription:[{anchor:"transformers.RemBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ho=new qe({props:{$$slots:{default:[_w]},$$scope:{ctx:V}}}),Es=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForMultipleChoice import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertForMultipleChoice.from_pretrained('rembert') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Rs=new ze({}),Bs=new ee({props:{name:"class transformers.RemBertForTokenClassification",anchor:"transformers.RemBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1356",parametersDescription:[{anchor:"transformers.RemBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Cs=new ee({props:{name:"forward",anchor:"transformers.RemBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1368",parametersDescription:[{anchor:"transformers.RemBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ko=new qe({props:{$$slots:{default:[kw]},$$scope:{ctx:V}}}),qs=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForTokenClassification import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertForTokenClassification.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),xs=new ze({}),Ps=new ee({props:{name:"class transformers.RemBertForQuestionAnswering",anchor:"transformers.RemBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1444",parametersDescription:[{anchor:"transformers.RemBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ds=new ee({props:{name:"forward",anchor:"transformers.RemBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_rembert.py#L1456",parametersDescription:[{anchor:"transformers.RemBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RemBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RemBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RemBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RemBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RemBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RemBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RemBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RemBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RemBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.RemBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Jo=new qe({props:{$$slots:{default:[vw]},$$scope:{ctx:V}}}),As=new je({props:{code:`from transformers import RemBertTokenizer, RemBertForQuestionAnswering import torch tokenizer = RemBertTokenizer.from_pretrained('rembert') model = RemBertForQuestionAnswering.from_pretrained('rembert') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Ns=new ze({}),Os=new ee({props:{name:"class transformers.TFRemBertModel",anchor:"transformers.TFRemBertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L949",parametersDescription:[{anchor:"transformers.TFRemBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xo=new qe({props:{$$slots:{default:[Tw]},$$scope:{ctx:V}}}),Us=new ee({props:{name:"call",anchor:"transformers.TFRemBertModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L955",parametersDescription:[{anchor:"transformers.TFRemBertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRemBertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRemBertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRemBertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRemBertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRemBertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRemBertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRemBertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRemBertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRemBertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRemBertModel.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFRemBertModel.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFRemBertModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFRemBertModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Yo=new qe({props:{$$slots:{default:[bw]},$$scope:{ctx:V}}}),Hs=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertModel import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertModel.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertModel.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Qs=new ze({}),Ks=new ee({props:{name:"class transformers.TFRemBertForMaskedLM",anchor:"transformers.TFRemBertForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1061",parametersDescription:[{anchor:"transformers.TFRemBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),en=new qe({props:{$$slots:{default:[yw]},$$scope:{ctx:V}}}),Ys=new ee({props:{name:"call",anchor:"transformers.TFRemBertForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1077",parametersDescription:[{anchor:"transformers.TFRemBertForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRemBertForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRemBertForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRemBertForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRemBertForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRemBertForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRemBertForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRemBertForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRemBertForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRemBertForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRemBertForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),tn=new qe({props:{$$slots:{default:[ww]},$$scope:{ctx:V}}}),Zs=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertForMaskedLM import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertForMaskedLM.from_pretrained('rembert') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),er=new ze({}),tr=new ee({props:{name:"class transformers.TFRemBertForCausalLM",anchor:"transformers.TFRemBertForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1159",parametersDescription:[{anchor:"transformers.TFRemBertForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),nn=new qe({props:{$$slots:{default:[$w]},$$scope:{ctx:V}}}),ar=new ee({props:{name:"call",anchor:"transformers.TFRemBertForCausalLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1185",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),cr=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertForCausalLM import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertForCausalLM.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForCausalLM.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),pr=new ze({}),hr=new ee({props:{name:"class transformers.TFRemBertForSequenceClassification",anchor:"transformers.TFRemBertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1312",parametersDescription:[{anchor:"transformers.TFRemBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rn=new qe({props:{$$slots:{default:[Fw]},$$scope:{ctx:V}}}),gr=new ee({props:{name:"call",anchor:"transformers.TFRemBertForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1326",parametersDescription:[{anchor:"transformers.TFRemBertForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRemBertForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRemBertForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRemBertForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRemBertForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRemBertForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRemBertForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRemBertForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRemBertForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRemBertForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRemBertForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),an=new qe({props:{$$slots:{default:[Ew]},$$scope:{ctx:V}}}),_r=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertForSequenceClassification import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertForSequenceClassification.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),kr=new ze({}),vr=new ee({props:{name:"class transformers.TFRemBertForMultipleChoice",anchor:"transformers.TFRemBertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1411",parametersDescription:[{anchor:"transformers.TFRemBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),dn=new qe({props:{$$slots:{default:[Rw]},$$scope:{ctx:V}}}),wr=new ee({props:{name:"call",anchor:"transformers.TFRemBertForMultipleChoice.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1431",parametersDescription:[{anchor:"transformers.TFRemBertForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRemBertForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRemBertForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRemBertForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRemBertForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRemBertForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRemBertForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRemBertForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRemBertForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRemBertForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRemBertForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),cn=new qe({props:{$$slots:{default:[Bw]},$$scope:{ctx:V}}}),$r=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertForMultipleChoice import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertForMultipleChoice.from_pretrained('rembert') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Fr=new ze({}),Er=new ee({props:{name:"class transformers.TFRemBertForTokenClassification",anchor:"transformers.TFRemBertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1562",parametersDescription:[{anchor:"transformers.TFRemBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),hn=new qe({props:{$$slots:{default:[Mw]},$$scope:{ctx:V}}}),zr=new ee({props:{name:"call",anchor:"transformers.TFRemBertForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1574",parametersDescription:[{anchor:"transformers.TFRemBertForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRemBertForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRemBertForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRemBertForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRemBertForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRemBertForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRemBertForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRemBertForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRemBertForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRemBertForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRemBertForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),mn=new qe({props:{$$slots:{default:[zw]},$$scope:{ctx:V}}}),Cr=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertForTokenClassification import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertForTokenClassification.from_pretrained('rembert') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qr=new ze({}),xr=new ee({props:{name:"class transformers.TFRemBertForQuestionAnswering",anchor:"transformers.TFRemBertForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1658",parametersDescription:[{anchor:"transformers.TFRemBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fn=new qe({props:{$$slots:{default:[Cw]},$$scope:{ctx:V}}}),Dr=new ee({props:{name:"call",anchor:"transformers.TFRemBertForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rembert/modeling_tf_rembert.py#L1669",parametersDescription:[{anchor:"transformers.TFRemBertForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFRemBertForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),gn=new qe({props:{$$slots:{default:[qw]},$$scope:{ctx:V}}}),Ar=new je({props:{code:`from transformers import RemBertTokenizer, TFRemBertForQuestionAnswering import tensorflow as tf tokenizer = RemBertTokenizer.from_pretrained('rembert') model = TFRemBertForQuestionAnswering.from_pretrained('rembert') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;rembert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){h=s("meta"),R=l(),g=s("h1"),u=s("a"),v=s("span"),T(k.$$.fragment),_=l(),B=s("span"),ue=o("RemBERT"),X=l(),M=s("h2"),te=s("a"),N=s("span"),T(ne.$$.fragment),fe=l(),O=s("span"),ge=o("Overview"),pe=l(),J=s("p"),j=o("The RemBERT model was proposed in "),se=s("a"),Y=o("Rethinking Embedding Coupling in Pre-trained Language Models"),z=o(" by Hyung Won Chung, Thibault F\xE9vry, Henry Tsai, Melvin Johnson, Sebastian Ruder."),q=l(),ae=s("p"),U=o("The abstract from the paper is the following:"),he=l(),ie=s("p"),I=s("em"),_e=o(`We re-evaluate the standard practice of sharing weights between input and output embeddings in state-of-the-art pre-trained language models. We show that decoupled embeddings provide increased modeling flexibility, allowing us to significantly improve the efficiency of parameter allocation in the input embedding of multilingual models. By reallocating the input embedding parameters in the Transformer layers, we achieve dramatically better performance on standard natural language understanding tasks with the same number of parameters during fine-tuning. We also show that allocating additional capacity to the output embedding provides benefits to the model that persist through the fine-tuning stage even though the output embedding is discarded after pre-training. Our analysis shows that larger output embeddings prevent the model\u2019s last layers from overspecializing to the pre-training task and encourage Transformer representations to be more general and more transferable to other tasks and languages. Harnessing these findings, we are able to train models that achieve strong performance on the XTREME benchmark without increasing the number of parameters at the fine-tuning stage.`),me=l(),C=s("p"),ke=o("Tips:"),S=l(),le=s("p"),ve=o(`For fine-tuning, RemBERT can be thought of as a bigger version of mBERT with an ALBERT-like factorization of the embedding layer. The embeddings are not tied in pre-training, in contrast with BERT, which enables smaller input embeddings (preserved during fine-tuning) and bigger output embeddings (discarded at fine-tuning). The tokenizer is also similar to the Albert one rather than the BERT one.`),W=l(),Z=s("h2"),oe=s("a"),L=s("span"),T(re.$$.fragment),H=l(),ce=s("span"),c=o("RemBertConfig"),E=l(),x=s("div"),T(be.$$.fragment),we=l(),P=s("p"),$e=o("This is the configuration class to store the configuration of a "),ye=s("a"),Fe=o("RemBertModel"),D=o(`. It is used to instantiate an RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the remert-large architecture.`),Q=l(),Te=s("p"),Ee=o("Configuration objects inherit from "),A=s("a"),Re=o("PretrainedConfig"),Be=o(` and can be used to control the model outputs. Read the documentation from `),de=s("a"),Me=o("PretrainedConfig"),kp=o(" for more information."),vp=l(),Oa=s("p"),Tp=o("Example:"),bp=l(),T($n.$$.fragment),yp=l(),Ia=s("blockquote"),Sa=s("blockquote"),Wt=s("blockquote"),Wa=s("p"),wp=o("from transformers import RemBertModel, RemBertConfig"),$p=l(),Eo=s("h1"),Ro=s("a"),Ua=s("span"),T(Fn.$$.fragment),Fp=l(),Ha=s("span"),Ep=o("Initializing a RemBERT rembert style configuration"),Rp=l(),Qa=s("p"),Bp=o("configuration = RemBertConfig()"),Mp=l(),Ka=s("blockquote"),Va=s("blockquote"),En=s("blockquote"),Bo=s("h1"),Mo=s("a"),Ja=s("span"),T(Rn.$$.fragment),zp=l(),Ga=s("span"),Cp=o("Initializing a model from the rembert style configuration"),qp=l(),Xa=s("p"),xp=o("model = RemBertModel(configuration)"),Pp=l(),Ya=s("blockquote"),Za=s("blockquote"),Bn=s("blockquote"),zo=s("h1"),Co=s("a"),ei=s("span"),T(Mn.$$.fragment),Lp=l(),ti=s("span"),jp=o("Accessing the model configuration"),Dp=l(),oi=s("p"),Ap=o("configuration = model.config"),Ad=l(),Ut=s("h2"),qo=s("a"),ni=s("span"),T(zn.$$.fragment),Np=l(),si=s("span"),Op=o("RemBertTokenizer"),Nd=l(),Ce=s("div"),T(Cn.$$.fragment),Ip=l(),qn=s("p"),Sp=o("Construct a RemBERT tokenizer. Based on "),xn=s("a"),Wp=o("SentencePiece"),Up=o("."),Hp=l(),Pn=s("p"),Qp=o("This tokenizer inherits from "),Wr=s("a"),Kp=o("PreTrainedTokenizer"),Vp=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Jp=l(),Ht=s("p"),Gp=o(`Attributes: sp_model (`),ri=s("code"),Xp=o("SentencePieceProcessor"),Yp=o(`): The `),ai=s("em"),Zp=o("SentencePiece"),eh=o(" processor that is used for every conversion (string, tokens and IDs)."),th=l(),Bt=s("div"),T(Ln.$$.fragment),oh=l(),ii=s("p"),nh=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REMBERT sequence has the following format:`),sh=l(),jn=s("ul"),Ur=s("li"),rh=o("single sequence: "),li=s("code"),ah=o("[CLS] X [SEP]"),ih=l(),Hr=s("li"),lh=o("pair of sequences: "),di=s("code"),dh=o("[CLS] A [SEP] B [SEP]"),ch=l(),xo=s("div"),T(Dn.$$.fragment),ph=l(),An=s("p"),hh=o(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ci=s("code"),mh=o("prepare_for_model"),uh=o(" method."),fh=l(),ut=s("div"),T(Nn.$$.fragment),gh=l(),pi=s("p"),_h=o(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT sequence pair mask has the following format:`),kh=l(),T(On.$$.fragment),vh=l(),Qt=s("p"),Th=o("If "),hi=s("code"),bh=o("token_ids_1"),yh=o(" is "),mi=s("code"),wh=o("None"),$h=o(", this method only returns the first portion of the mask (0s)."),Fh=l(),ui=s("div"),Od=l(),Kt=s("h2"),Po=s("a"),fi=s("span"),T(In.$$.fragment),Eh=l(),gi=s("span"),Rh=o("RemBertTokenizerFast"),Id=l(),De=s("div"),T(Sn.$$.fragment),Bh=l(),Ft=s("p"),Mh=o("Construct a \u201Cfast\u201D RemBert tokenizer (backed by HuggingFace\u2019s "),_i=s("em"),zh=o("tokenizers"),Ch=o(" library). Based on "),Wn=s("a"),qh=o("Unigram"),xh=o(`. This tokenizer inherits from `),Qr=s("a"),Ph=o("PreTrainedTokenizerFast"),Lh=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),jh=l(),Mt=s("div"),T(Un.$$.fragment),Dh=l(),ki=s("p"),Ah=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RemBERT sequence has the following format:`),Nh=l(),Hn=s("ul"),Kr=s("li"),Oh=o("single sequence: "),vi=s("code"),Ih=o("[CLS] X [SEP]"),Sh=l(),Vr=s("li"),Wh=o("pair of sequences: "),Ti=s("code"),Uh=o("[CLS] A [SEP] B [SEP]"),Hh=l(),Lo=s("div"),T(Qn.$$.fragment),Qh=l(),Kn=s("p"),Kh=o(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),bi=s("code"),Vh=o("prepare_for_model"),Jh=o(" method."),Gh=l(),ft=s("div"),T(Vn.$$.fragment),Xh=l(),yi=s("p"),Yh=o(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT sequence pair mask has the following format:`),Zh=l(),T(Jn.$$.fragment),em=l(),wi=s("p"),tm=o("if token_ids_1 is None, only returns the first portion of the mask (0s)."),om=l(),$i=s("div"),Sd=l(),Vt=s("h2"),jo=s("a"),Fi=s("span"),T(Gn.$$.fragment),nm=l(),Ei=s("span"),sm=o("RemBertModel"),Wd=l(),Je=s("div"),T(Xn.$$.fragment),rm=l(),Yn=s("p"),am=o(`The bare RemBERT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Zn=s("a"),im=o("torch.nn.Module"),lm=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dm=l(),es=s("p"),cm=o(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),ts=s("a"),pm=o(`Attention is all you need`),hm=o(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),mm=l(),Ae=s("p"),um=o("To behave as an decoder the model needs to be initialized with the "),Ri=s("code"),fm=o("is_decoder"),gm=o(` argument of the configuration set to `),Bi=s("code"),_m=o("True"),km=o(". To be used in a Seq2Seq model, the model needs to initialized with both "),Mi=s("code"),vm=o("is_decoder"),Tm=o(` argument and `),zi=s("code"),bm=o("add_cross_attention"),ym=o(" set to "),Ci=s("code"),wm=o("True"),$m=o("; an "),qi=s("code"),Fm=o("encoder_hidden_states"),Em=o(` is then expected as an input to the forward pass.`),Rm=l(),Ge=s("div"),T(os.$$.fragment),Bm=l(),Jt=s("p"),Mm=o("The "),Jr=s("a"),zm=o("RemBertModel"),Cm=o(" forward method, overrides the "),xi=s("code"),qm=o("__call__"),xm=o(" special method."),Pm=l(),T(Do.$$.fragment),Lm=l(),Pi=s("p"),jm=o("Example:"),Dm=l(),T(ns.$$.fragment),Ud=l(),Gt=s("h2"),Ao=s("a"),Li=s("span"),T(ss.$$.fragment),Am=l(),ji=s("span"),Nm=o("RemBertForCausalLM"),Hd=l(),Et=s("div"),T(rs.$$.fragment),Om=l(),Xt=s("p"),Im=o("RemBERT Model with a "),Di=s("code"),Sm=o("language modeling"),Wm=o(` head on top for CLM fine-tuning. This model is a PyTorch `),as=s("a"),Um=o("torch.nn.Module"),Hm=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qm=l(),Xe=s("div"),T(is.$$.fragment),Km=l(),Yt=s("p"),Vm=o("The "),Gr=s("a"),Jm=o("RemBertForCausalLM"),Gm=o(" forward method, overrides the "),Ai=s("code"),Xm=o("__call__"),Ym=o(" special method."),Zm=l(),T(No.$$.fragment),eu=l(),Ni=s("p"),tu=o("Example:"),ou=l(),T(ls.$$.fragment),Qd=l(),Zt=s("h2"),Oo=s("a"),Oi=s("span"),T(ds.$$.fragment),nu=l(),Ii=s("span"),su=o("RemBertForMaskedLM"),Kd=l(),Rt=s("div"),T(cs.$$.fragment),ru=l(),eo=s("p"),au=o("RemBERT Model with a "),Si=s("code"),iu=o("language modeling"),lu=o(` head on top. This model is a PyTorch `),ps=s("a"),du=o("torch.nn.Module"),cu=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pu=l(),Ye=s("div"),T(hs.$$.fragment),hu=l(),to=s("p"),mu=o("The "),Xr=s("a"),uu=o("RemBertForMaskedLM"),fu=o(" forward method, overrides the "),Wi=s("code"),gu=o("__call__"),_u=o(" special method."),ku=l(),T(Io.$$.fragment),vu=l(),Ui=s("p"),Tu=o("Example:"),bu=l(),T(ms.$$.fragment),Vd=l(),oo=s("h2"),So=s("a"),Hi=s("span"),T(us.$$.fragment),yu=l(),Qi=s("span"),wu=o("RemBertForSequenceClassification"),Jd=l(),ct=s("div"),T(fs.$$.fragment),$u=l(),Ki=s("p"),Fu=o(`RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Eu=l(),gs=s("p"),Ru=o("This model is a PyTorch "),_s=s("a"),Bu=o("torch.nn.Module"),Mu=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zu=l(),Le=s("div"),T(ks.$$.fragment),Cu=l(),no=s("p"),qu=o("The "),Yr=s("a"),xu=o("RemBertForSequenceClassification"),Pu=o(" forward method, overrides the "),Vi=s("code"),Lu=o("__call__"),ju=o(" special method."),Du=l(),T(Wo.$$.fragment),Au=l(),Ji=s("p"),Nu=o("Example of single-label classification:"),Ou=l(),T(vs.$$.fragment),Iu=l(),Gi=s("p"),Su=o("Example of multi-label classification:"),Wu=l(),T(Ts.$$.fragment),Gd=l(),so=s("h2"),Uo=s("a"),Xi=s("span"),T(bs.$$.fragment),Uu=l(),Yi=s("span"),Hu=o("RemBertForMultipleChoice"),Xd=l(),pt=s("div"),T(ys.$$.fragment),Qu=l(),Zi=s("p"),Ku=o(`RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Vu=l(),ws=s("p"),Ju=o("This model is a PyTorch "),$s=s("a"),Gu=o("torch.nn.Module"),Xu=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yu=l(),Ze=s("div"),T(Fs.$$.fragment),Zu=l(),ro=s("p"),ef=o("The "),Zr=s("a"),tf=o("RemBertForMultipleChoice"),of=o(" forward method, overrides the "),el=s("code"),nf=o("__call__"),sf=o(" special method."),rf=l(),T(Ho.$$.fragment),af=l(),tl=s("p"),lf=o("Example:"),df=l(),T(Es.$$.fragment),Yd=l(),ao=s("h2"),Qo=s("a"),ol=s("span"),T(Rs.$$.fragment),cf=l(),nl=s("span"),pf=o("RemBertForTokenClassification"),Zd=l(),ht=s("div"),T(Bs.$$.fragment),hf=l(),sl=s("p"),mf=o(`RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),uf=l(),Ms=s("p"),ff=o("This model is a PyTorch "),zs=s("a"),gf=o("torch.nn.Module"),_f=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kf=l(),et=s("div"),T(Cs.$$.fragment),vf=l(),io=s("p"),Tf=o("The "),ea=s("a"),bf=o("RemBertForTokenClassification"),yf=o(" forward method, overrides the "),rl=s("code"),wf=o("__call__"),$f=o(" special method."),Ff=l(),T(Ko.$$.fragment),Ef=l(),al=s("p"),Rf=o("Example:"),Bf=l(),T(qs.$$.fragment),ec=l(),lo=s("h2"),Vo=s("a"),il=s("span"),T(xs.$$.fragment),Mf=l(),ll=s("span"),zf=o("RemBertForQuestionAnswering"),tc=l(),mt=s("div"),T(Ps.$$.fragment),Cf=l(),co=s("p"),qf=o(`RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),dl=s("code"),xf=o("span start logits"),Pf=o(" and "),cl=s("code"),Lf=o("span end logits"),jf=o(")."),Df=l(),Ls=s("p"),Af=o("This model is a PyTorch "),js=s("a"),Nf=o("torch.nn.Module"),Of=o(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),If=l(),tt=s("div"),T(Ds.$$.fragment),Sf=l(),po=s("p"),Wf=o("The "),ta=s("a"),Uf=o("RemBertForQuestionAnswering"),Hf=o(" forward method, overrides the "),pl=s("code"),Qf=o("__call__"),Kf=o(" special method."),Vf=l(),T(Jo.$$.fragment),Jf=l(),hl=s("p"),Gf=o("Example:"),Xf=l(),T(As.$$.fragment),oc=l(),ho=s("h2"),Go=s("a"),ml=s("span"),T(Ns.$$.fragment),Yf=l(),ul=s("span"),Zf=o("TFRemBertModel"),nc=l(),Ne=s("div"),T(Os.$$.fragment),eg=l(),fl=s("p"),tg=o("The bare RemBERT Model transformer outputing raw hidden-states without any specific head on top."),og=l(),Is=s("p"),ng=o("This model inherits from "),oa=s("a"),sg=o("TFPreTrainedModel"),rg=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ag=l(),Ss=s("p"),ig=o("This model is also a "),Ws=s("a"),lg=o("tf.keras.Model"),dg=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cg=l(),T(Xo.$$.fragment),pg=l(),ot=s("div"),T(Us.$$.fragment),hg=l(),mo=s("p"),mg=o("The "),na=s("a"),ug=o("TFRemBertModel"),fg=o(" forward method, overrides the "),gl=s("code"),gg=o("__call__"),_g=o(" special method."),kg=l(),T(Yo.$$.fragment),vg=l(),_l=s("p"),Tg=o("Example:"),bg=l(),T(Hs.$$.fragment),sc=l(),uo=s("h2"),Zo=s("a"),kl=s("span"),T(Qs.$$.fragment),yg=l(),vl=s("span"),wg=o("TFRemBertForMaskedLM"),rc=l(),Oe=s("div"),T(Ks.$$.fragment),$g=l(),Vs=s("p"),Fg=o("RemBERT Model with a "),Tl=s("code"),Eg=o("language modeling"),Rg=o(" head on top."),Bg=l(),Js=s("p"),Mg=o("This model inherits from "),sa=s("a"),zg=o("TFPreTrainedModel"),Cg=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qg=l(),Gs=s("p"),xg=o("This model is also a "),Xs=s("a"),Pg=o("tf.keras.Model"),Lg=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jg=l(),T(en.$$.fragment),Dg=l(),nt=s("div"),T(Ys.$$.fragment),Ag=l(),fo=s("p"),Ng=o("The "),ra=s("a"),Og=o("TFRemBertForMaskedLM"),Ig=o(" forward method, overrides the "),bl=s("code"),Sg=o("__call__"),Wg=o(" special method."),Ug=l(),T(tn.$$.fragment),Hg=l(),yl=s("p"),Qg=o("Example:"),Kg=l(),T(Zs.$$.fragment),ac=l(),go=s("h2"),on=s("a"),wl=s("span"),T(er.$$.fragment),Vg=l(),$l=s("span"),Jg=o("TFRemBertForCausalLM"),ic=l(),Ie=s("div"),T(tr.$$.fragment),Gg=l(),or=s("p"),Xg=o("RemBERT Model with a "),Fl=s("code"),Yg=o("language modeling"),Zg=o(" head on top for CLM fine-tuning."),e_=l(),nr=s("p"),t_=o("This model inherits from "),aa=s("a"),o_=o("TFPreTrainedModel"),n_=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),s_=l(),sr=s("p"),r_=o("This model is also a "),rr=s("a"),a_=o("tf.keras.Model"),i_=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),l_=l(),T(nn.$$.fragment),d_=l(),Qe=s("div"),T(ar.$$.fragment),c_=l(),xe=s("p"),p_=o("encoder_hidden_states ("),El=s("code"),h_=o("tf.Tensor"),m_=o(" of shape "),Rl=s("code"),u_=o("(batch_size, sequence_length, hidden_size)"),f_=o(", "),Bl=s("em"),g_=o("optional"),__=o(`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`),Ml=s("code"),k_=o("tf.Tensor"),v_=o(" of shape "),zl=s("code"),T_=o("(batch_size, sequence_length)"),b_=o(", "),Cl=s("em"),y_=o("optional"),w_=o(`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `),ql=s("code"),$_=o("[0, 1]"),F_=o(":"),E_=l(),ir=s("ul"),lr=s("li"),R_=o("1 for tokens that are "),xl=s("strong"),B_=o("not masked"),M_=o(","),z_=l(),dr=s("li"),C_=o("0 for tokens that are "),Pl=s("strong"),q_=o("masked"),x_=o("."),P_=l(),K=s("p"),L_=o("past_key_values ("),Ll=s("code"),j_=o("Tuple[Tuple[tf.Tensor]]"),D_=o(" of length "),jl=s("code"),A_=o("config.n_layers"),N_=o(`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `),Dl=s("code"),O_=o("past_key_values"),I_=o(" are used, the user can optionally input only the last "),Al=s("code"),S_=o("decoder_input_ids"),W_=o(` (those that don\u2019t have their past key value states given to this model) of shape `),Nl=s("code"),U_=o("(batch_size, 1)"),H_=o(` instead of all `),Ol=s("code"),Q_=o("decoder_input_ids"),K_=o(" of shape "),Il=s("code"),V_=o("(batch_size, sequence_length)"),J_=o(`. use_cache (`),Sl=s("code"),G_=o("bool"),X_=o(", "),Wl=s("em"),Y_=o("optional"),Z_=o(", defaults to "),Ul=s("code"),ek=o("True"),tk=o(`): If set to `),Hl=s("code"),ok=o("True"),nk=o(", "),Ql=s("code"),sk=o("past_key_values"),rk=o(` key value states are returned and can be used to speed up decoding (see `),Kl=s("code"),ak=o("past_key_values"),ik=o("). Set to "),Vl=s("code"),lk=o("False"),dk=o(" during training, "),Jl=s("code"),ck=o("True"),pk=o(` during generation labels (`),Gl=s("code"),hk=o("tf.Tensor"),mk=o(" or "),Xl=s("code"),uk=o("np.ndarray"),fk=o(" of shape "),Yl=s("code"),gk=o("(batch_size, sequence_length)"),_k=o(", "),Zl=s("em"),kk=o("optional"),vk=o(`): Labels for computing the cross entropy classification loss. Indices should be in `),ed=s("code"),Tk=o("[0, ..., config.vocab_size - 1]"),bk=o("."),yk=l(),td=s("p"),wk=o("Example:"),$k=l(),T(cr.$$.fragment),lc=l(),_o=s("h2"),sn=s("a"),od=s("span"),T(pr.$$.fragment),Fk=l(),nd=s("span"),Ek=o("TFRemBertForSequenceClassification"),dc=l(),Se=s("div"),T(hr.$$.fragment),Rk=l(),sd=s("p"),Bk=o("RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks."),Mk=l(),mr=s("p"),zk=o("This model inherits from "),ia=s("a"),Ck=o("TFPreTrainedModel"),qk=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xk=l(),ur=s("p"),Pk=o("This model is also a "),fr=s("a"),Lk=o("tf.keras.Model"),jk=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Dk=l(),T(rn.$$.fragment),Ak=l(),st=s("div"),T(gr.$$.fragment),Nk=l(),ko=s("p"),Ok=o("The "),la=s("a"),Ik=o("TFRemBertForSequenceClassification"),Sk=o(" forward method, overrides the "),rd=s("code"),Wk=o("__call__"),Uk=o(" special method."),Hk=l(),T(an.$$.fragment),Qk=l(),ad=s("p"),Kk=o("Example:"),Vk=l(),T(_r.$$.fragment),cc=l(),vo=s("h2"),ln=s("a"),id=s("span"),T(kr.$$.fragment),Jk=l(),ld=s("span"),Gk=o("TFRemBertForMultipleChoice"),pc=l(),We=s("div"),T(vr.$$.fragment),Xk=l(),dd=s("p"),Yk=o(`RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Zk=l(),Tr=s("p"),ev=o("This model inherits from "),da=s("a"),tv=o("TFPreTrainedModel"),ov=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nv=l(),br=s("p"),sv=o("This model is also a "),yr=s("a"),rv=o("tf.keras.Model"),av=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),iv=l(),T(dn.$$.fragment),lv=l(),rt=s("div"),T(wr.$$.fragment),dv=l(),To=s("p"),cv=o("The "),ca=s("a"),pv=o("TFRemBertForMultipleChoice"),hv=o(" forward method, overrides the "),cd=s("code"),mv=o("__call__"),uv=o(" special method."),fv=l(),T(cn.$$.fragment),gv=l(),pd=s("p"),_v=o("Example:"),kv=l(),T($r.$$.fragment),hc=l(),bo=s("h2"),pn=s("a"),hd=s("span"),T(Fr.$$.fragment),vv=l(),md=s("span"),Tv=o("TFRemBertForTokenClassification"),mc=l(),Ue=s("div"),T(Er.$$.fragment),bv=l(),ud=s("p"),yv=o(`RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),wv=l(),Rr=s("p"),$v=o("This model inherits from "),pa=s("a"),Fv=o("TFPreTrainedModel"),Ev=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rv=l(),Br=s("p"),Bv=o("This model is also a "),Mr=s("a"),Mv=o("tf.keras.Model"),zv=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cv=l(),T(hn.$$.fragment),qv=l(),at=s("div"),T(zr.$$.fragment),xv=l(),yo=s("p"),Pv=o("The "),ha=s("a"),Lv=o("TFRemBertForTokenClassification"),jv=o(" forward method, overrides the "),fd=s("code"),Dv=o("__call__"),Av=o(" special method."),Nv=l(),T(mn.$$.fragment),Ov=l(),gd=s("p"),Iv=o("Example:"),Sv=l(),T(Cr.$$.fragment),uc=l(),wo=s("h2"),un=s("a"),_d=s("span"),T(qr.$$.fragment),Wv=l(),kd=s("span"),Uv=o("TFRemBertForQuestionAnswering"),fc=l(),He=s("div"),T(xr.$$.fragment),Hv=l(),$o=s("p"),Qv=o(`RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),vd=s("code"),Kv=o("span start logits"),Vv=o(" and "),Td=s("code"),Jv=o("span end logits"),Gv=o(")."),Xv=l(),Pr=s("p"),Yv=o("This model inherits from "),ma=s("a"),Zv=o("TFPreTrainedModel"),eT=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tT=l(),Lr=s("p"),oT=o("This model is also a "),jr=s("a"),nT=o("tf.keras.Model"),sT=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),rT=l(),T(fn.$$.fragment),aT=l(),it=s("div"),T(Dr.$$.fragment),iT=l(),Fo=s("p"),lT=o("The "),ua=s("a"),dT=o("TFRemBertForQuestionAnswering"),cT=o(" forward method, overrides the "),bd=s("code"),pT=o("__call__"),hT=o(" special method."),mT=l(),T(gn.$$.fragment),uT=l(),yd=s("p"),fT=o("Example:"),gT=l(),T(Ar.$$.fragment),this.h()},l(i){const f=hw('[data-svelte="svelte-1phssyn"]',document.head);h=r(f,"META",{name:!0,content:!0}),f.forEach(t),R=d(i),g=r(i,"H1",{class:!0});var Nr=a(g);u=r(Nr,"A",{id:!0,class:!0,href:!0});var wd=a(u);v=r(wd,"SPAN",{});var $d=a(v);b(k.$$.fragment,$d),$d.forEach(t),wd.forEach(t),_=d(Nr),B=r(Nr,"SPAN",{});var Fd=a(B);ue=n(Fd,"RemBERT"),Fd.forEach(t),Nr.forEach(t),X=d(i),M=r(i,"H2",{class:!0});var Or=a(M);te=r(Or,"A",{id:!0,class:!0,href:!0});var Ed=a(te);N=r(Ed,"SPAN",{});var Rd=a(N);b(ne.$$.fragment,Rd),Rd.forEach(t),Ed.forEach(t),fe=d(Or),O=r(Or,"SPAN",{});var Bd=a(O);ge=n(Bd,"Overview"),Bd.forEach(t),Or.forEach(t),pe=d(i),J=r(i,"P",{});var Ir=a(J);j=n(Ir,"The RemBERT model was proposed in "),se=r(Ir,"A",{href:!0,rel:!0});var Md=a(se);Y=n(Md,"Rethinking Embedding Coupling in Pre-trained Language Models"),Md.forEach(t),z=n(Ir," by Hyung Won Chung, Thibault F\xE9vry, Henry Tsai, Melvin Johnson, Sebastian Ruder."),Ir.forEach(t),q=d(i),ae=r(i,"P",{});var zd=a(ae);U=n(zd,"The abstract from the paper is the following:"),zd.forEach(t),he=d(i),ie=r(i,"P",{});var Cd=a(ie);I=r(Cd,"EM",{});var qd=a(I);_e=n(qd,`We re-evaluate the standard practice of sharing weights between input and output embeddings in state-of-the-art pre-trained language models. We show that decoupled embeddings provide increased modeling flexibility, allowing us to significantly improve the efficiency of parameter allocation in the input embedding of multilingual models. By reallocating the input embedding parameters in the Transformer layers, we achieve dramatically better performance on standard natural language understanding tasks with the same number of parameters during fine-tuning. We also show that allocating additional capacity to the output embedding provides benefits to the model that persist through the fine-tuning stage even though the output embedding is discarded after pre-training. Our analysis shows that larger output embeddings prevent the model\u2019s last layers from overspecializing to the pre-training task and encourage Transformer representations to be more general and more transferable to other tasks and languages. Harnessing these findings, we are able to train models that achieve strong performance on the XTREME benchmark without increasing the number of parameters at the fine-tuning stage.`),qd.forEach(t),Cd.forEach(t),me=d(i),C=r(i,"P",{});var xd=a(C);ke=n(xd,"Tips:"),xd.forEach(t),S=d(i),le=r(i,"P",{});var Pd=a(le);ve=n(Pd,`For fine-tuning, RemBERT can be thought of as a bigger version of mBERT with an ALBERT-like factorization of the embedding layer. The embeddings are not tied in pre-training, in contrast with BERT, which enables smaller input embeddings (preserved during fine-tuning) and bigger output embeddings (discarded at fine-tuning). The tokenizer is also similar to the Albert one rather than the BERT one.`),Pd.forEach(t),W=d(i),Z=r(i,"H2",{class:!0});var Sr=a(Z);oe=r(Sr,"A",{id:!0,class:!0,href:!0});var Ld=a(oe);L=r(Ld,"SPAN",{});var jd=a(L);b(re.$$.fragment,jd),jd.forEach(t),Ld.forEach(t),H=d(Sr),ce=r(Sr,"SPAN",{});var Dd=a(ce);c=n(Dd,"RemBertConfig"),Dd.forEach(t),Sr.forEach(t),E=d(i),x=r(i,"DIV",{class:!0});var Pe=a(x);b(be.$$.fragment,Pe),we=d(Pe),P=r(Pe,"P",{});var _c=a(P);$e=n(_c,"This is the configuration class to store the configuration of a "),ye=r(_c,"A",{href:!0});var bT=a(ye);Fe=n(bT,"RemBertModel"),bT.forEach(t),D=n(_c,`. It is used to instantiate an RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the remert-large architecture.`),_c.forEach(t),Q=d(Pe),Te=r(Pe,"P",{});var fa=a(Te);Ee=n(fa,"Configuration objects inherit from "),A=r(fa,"A",{href:!0});var yT=a(A);Re=n(yT,"PretrainedConfig"),yT.forEach(t),Be=n(fa,` and can be used to control the model outputs. Read the documentation from `),de=r(fa,"A",{href:!0});var wT=a(de);Me=n(wT,"PretrainedConfig"),wT.forEach(t),kp=n(fa," for more information."),fa.forEach(t),vp=d(Pe),Oa=r(Pe,"P",{});var $T=a(Oa);Tp=n($T,"Example:"),$T.forEach(t),bp=d(Pe),b($n.$$.fragment,Pe),yp=d(Pe),Ia=r(Pe,"BLOCKQUOTE",{});var FT=a(Ia);Sa=r(FT,"BLOCKQUOTE",{});var ET=a(Sa);Wt=r(ET,"BLOCKQUOTE",{});var ga=a(Wt);Wa=r(ga,"P",{});var RT=a(Wa);wp=n(RT,"from transformers import RemBertModel, RemBertConfig"),RT.forEach(t),$p=d(ga),Eo=r(ga,"H1",{class:!0});var kc=a(Eo);Ro=r(kc,"A",{id:!0,class:!0,href:!0});var BT=a(Ro);Ua=r(BT,"SPAN",{});var MT=a(Ua);b(Fn.$$.fragment,MT),MT.forEach(t),BT.forEach(t),Fp=d(kc),Ha=r(kc,"SPAN",{});var zT=a(Ha);Ep=n(zT,"Initializing a RemBERT rembert style configuration"),zT.forEach(t),kc.forEach(t),Rp=d(ga),Qa=r(ga,"P",{});var CT=a(Qa);Bp=n(CT,"configuration = RemBertConfig()"),CT.forEach(t),ga.forEach(t),ET.forEach(t),FT.forEach(t),Mp=d(Pe),Ka=r(Pe,"BLOCKQUOTE",{});var qT=a(Ka);Va=r(qT,"BLOCKQUOTE",{});var xT=a(Va);En=r(xT,"BLOCKQUOTE",{});var vc=a(En);Bo=r(vc,"H1",{class:!0});var Tc=a(Bo);Mo=r(Tc,"A",{id:!0,class:!0,href:!0});var PT=a(Mo);Ja=r(PT,"SPAN",{});var LT=a(Ja);b(Rn.$$.fragment,LT),LT.forEach(t),PT.forEach(t),zp=d(Tc),Ga=r(Tc,"SPAN",{});var jT=a(Ga);Cp=n(jT,"Initializing a model from the rembert style configuration"),jT.forEach(t),Tc.forEach(t),qp=d(vc),Xa=r(vc,"P",{});var DT=a(Xa);xp=n(DT,"model = RemBertModel(configuration)"),DT.forEach(t),vc.forEach(t),xT.forEach(t),qT.forEach(t),Pp=d(Pe),Ya=r(Pe,"BLOCKQUOTE",{});var AT=a(Ya);Za=r(AT,"BLOCKQUOTE",{});var NT=a(Za);Bn=r(NT,"BLOCKQUOTE",{});var bc=a(Bn);zo=r(bc,"H1",{class:!0});var yc=a(zo);Co=r(yc,"A",{id:!0,class:!0,href:!0});var OT=a(Co);ei=r(OT,"SPAN",{});var IT=a(ei);b(Mn.$$.fragment,IT),IT.forEach(t),OT.forEach(t),Lp=d(yc),ti=r(yc,"SPAN",{});var ST=a(ti);jp=n(ST,"Accessing the model configuration"),ST.forEach(t),yc.forEach(t),Dp=d(bc),oi=r(bc,"P",{});var WT=a(oi);Ap=n(WT,"configuration = model.config"),WT.forEach(t),bc.forEach(t),NT.forEach(t),AT.forEach(t),Pe.forEach(t),Ad=d(i),Ut=r(i,"H2",{class:!0});var wc=a(Ut);qo=r(wc,"A",{id:!0,class:!0,href:!0});var UT=a(qo);ni=r(UT,"SPAN",{});var HT=a(ni);b(zn.$$.fragment,HT),HT.forEach(t),UT.forEach(t),Np=d(wc),si=r(wc,"SPAN",{});var QT=a(si);Op=n(QT,"RemBertTokenizer"),QT.forEach(t),wc.forEach(t),Nd=d(i),Ce=r(i,"DIV",{class:!0});var Ke=a(Ce);b(Cn.$$.fragment,Ke),Ip=d(Ke),qn=r(Ke,"P",{});var $c=a(qn);Sp=n($c,"Construct a RemBERT tokenizer. Based on "),xn=r($c,"A",{href:!0,rel:!0});var KT=a(xn);Wp=n(KT,"SentencePiece"),KT.forEach(t),Up=n($c,"."),$c.forEach(t),Hp=d(Ke),Pn=r(Ke,"P",{});var Fc=a(Pn);Qp=n(Fc,"This tokenizer inherits from "),Wr=r(Fc,"A",{href:!0});var VT=a(Wr);Kp=n(VT,"PreTrainedTokenizer"),VT.forEach(t),Vp=n(Fc,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Fc.forEach(t),Jp=d(Ke),Ht=r(Ke,"P",{});var _a=a(Ht);Gp=n(_a,`Attributes: sp_model (`),ri=r(_a,"CODE",{});var JT=a(ri);Xp=n(JT,"SentencePieceProcessor"),JT.forEach(t),Yp=n(_a,`): The `),ai=r(_a,"EM",{});var GT=a(ai);Zp=n(GT,"SentencePiece"),GT.forEach(t),eh=n(_a," processor that is used for every conversion (string, tokens and IDs)."),_a.forEach(t),th=d(Ke),Bt=r(Ke,"DIV",{class:!0});var ka=a(Bt);b(Ln.$$.fragment,ka),oh=d(ka),ii=r(ka,"P",{});var XT=a(ii);nh=n(XT,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REMBERT sequence has the following format:`),XT.forEach(t),sh=d(ka),jn=r(ka,"UL",{});var Ec=a(jn);Ur=r(Ec,"LI",{});var _T=a(Ur);rh=n(_T,"single sequence: "),li=r(_T,"CODE",{});var YT=a(li);ah=n(YT,"[CLS] X [SEP]"),YT.forEach(t),_T.forEach(t),ih=d(Ec),Hr=r(Ec,"LI",{});var kT=a(Hr);lh=n(kT,"pair of sequences: "),di=r(kT,"CODE",{});var ZT=a(di);dh=n(ZT,"[CLS] A [SEP] B [SEP]"),ZT.forEach(t),kT.forEach(t),Ec.forEach(t),ka.forEach(t),ch=d(Ke),xo=r(Ke,"DIV",{class:!0});var Rc=a(xo);b(Dn.$$.fragment,Rc),ph=d(Rc),An=r(Rc,"P",{});var Bc=a(An);hh=n(Bc,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ci=r(Bc,"CODE",{});var eb=a(ci);mh=n(eb,"prepare_for_model"),eb.forEach(t),uh=n(Bc," method."),Bc.forEach(t),Rc.forEach(t),fh=d(Ke),ut=r(Ke,"DIV",{class:!0});var _n=a(ut);b(Nn.$$.fragment,_n),gh=d(_n),pi=r(_n,"P",{});var tb=a(pi);_h=n(tb,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT sequence pair mask has the following format:`),tb.forEach(t),kh=d(_n),b(On.$$.fragment,_n),vh=d(_n),Qt=r(_n,"P",{});var va=a(Qt);Th=n(va,"If "),hi=r(va,"CODE",{});var ob=a(hi);bh=n(ob,"token_ids_1"),ob.forEach(t),yh=n(va," is "),mi=r(va,"CODE",{});var nb=a(mi);wh=n(nb,"None"),nb.forEach(t),$h=n(va,", this method only returns the first portion of the mask (0s)."),va.forEach(t),_n.forEach(t),Fh=d(Ke),ui=r(Ke,"DIV",{class:!0}),a(ui).forEach(t),Ke.forEach(t),Od=d(i),Kt=r(i,"H2",{class:!0});var Mc=a(Kt);Po=r(Mc,"A",{id:!0,class:!0,href:!0});var sb=a(Po);fi=r(sb,"SPAN",{});var rb=a(fi);b(In.$$.fragment,rb),rb.forEach(t),sb.forEach(t),Eh=d(Mc),gi=r(Mc,"SPAN",{});var ab=a(gi);Rh=n(ab,"RemBertTokenizerFast"),ab.forEach(t),Mc.forEach(t),Id=d(i),De=r(i,"DIV",{class:!0});var gt=a(De);b(Sn.$$.fragment,gt),Bh=d(gt),Ft=r(gt,"P",{});var kn=a(Ft);Mh=n(kn,"Construct a \u201Cfast\u201D RemBert tokenizer (backed by HuggingFace\u2019s "),_i=r(kn,"EM",{});var ib=a(_i);zh=n(ib,"tokenizers"),ib.forEach(t),Ch=n(kn," library). Based on "),Wn=r(kn,"A",{href:!0,rel:!0});var lb=a(Wn);qh=n(lb,"Unigram"),lb.forEach(t),xh=n(kn,`. This tokenizer inherits from `),Qr=r(kn,"A",{href:!0});var db=a(Qr);Ph=n(db,"PreTrainedTokenizerFast"),db.forEach(t),Lh=n(kn,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),kn.forEach(t),jh=d(gt),Mt=r(gt,"DIV",{class:!0});var Ta=a(Mt);b(Un.$$.fragment,Ta),Dh=d(Ta),ki=r(Ta,"P",{});var cb=a(ki);Ah=n(cb,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RemBERT sequence has the following format:`),cb.forEach(t),Nh=d(Ta),Hn=r(Ta,"UL",{});var zc=a(Hn);Kr=r(zc,"LI",{});var vT=a(Kr);Oh=n(vT,"single sequence: "),vi=r(vT,"CODE",{});var pb=a(vi);Ih=n(pb,"[CLS] X [SEP]"),pb.forEach(t),vT.forEach(t),Sh=d(zc),Vr=r(zc,"LI",{});var TT=a(Vr);Wh=n(TT,"pair of sequences: "),Ti=r(TT,"CODE",{});var hb=a(Ti);Uh=n(hb,"[CLS] A [SEP] B [SEP]"),hb.forEach(t),TT.forEach(t),zc.forEach(t),Ta.forEach(t),Hh=d(gt),Lo=r(gt,"DIV",{class:!0});var Cc=a(Lo);b(Qn.$$.fragment,Cc),Qh=d(Cc),Kn=r(Cc,"P",{});var qc=a(Kn);Kh=n(qc,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),bi=r(qc,"CODE",{});var mb=a(bi);Vh=n(mb,"prepare_for_model"),mb.forEach(t),Jh=n(qc," method."),qc.forEach(t),Cc.forEach(t),Gh=d(gt),ft=r(gt,"DIV",{class:!0});var vn=a(ft);b(Vn.$$.fragment,vn),Xh=d(vn),yi=r(vn,"P",{});var ub=a(yi);Yh=n(ub,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT sequence pair mask has the following format:`),ub.forEach(t),Zh=d(vn),b(Jn.$$.fragment,vn),em=d(vn),wi=r(vn,"P",{});var fb=a(wi);tm=n(fb,"if token_ids_1 is None, only returns the first portion of the mask (0s)."),fb.forEach(t),vn.forEach(t),om=d(gt),$i=r(gt,"DIV",{class:!0}),a($i).forEach(t),gt.forEach(t),Sd=d(i),Vt=r(i,"H2",{class:!0});var xc=a(Vt);jo=r(xc,"A",{id:!0,class:!0,href:!0});var gb=a(jo);Fi=r(gb,"SPAN",{});var _b=a(Fi);b(Gn.$$.fragment,_b),_b.forEach(t),gb.forEach(t),nm=d(xc),Ei=r(xc,"SPAN",{});var kb=a(Ei);sm=n(kb,"RemBertModel"),kb.forEach(t),xc.forEach(t),Wd=d(i),Je=r(i,"DIV",{class:!0});var zt=a(Je);b(Xn.$$.fragment,zt),rm=d(zt),Yn=r(zt,"P",{});var Pc=a(Yn);am=n(Pc,`The bare RemBERT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Zn=r(Pc,"A",{href:!0,rel:!0});var vb=a(Zn);im=n(vb,"torch.nn.Module"),vb.forEach(t),lm=n(Pc,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pc.forEach(t),dm=d(zt),es=r(zt,"P",{});var Lc=a(es);cm=n(Lc,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),ts=r(Lc,"A",{href:!0,rel:!0});var Tb=a(ts);pm=n(Tb,`Attention is all you need`),Tb.forEach(t),hm=n(Lc,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Lc.forEach(t),mm=d(zt),Ae=r(zt,"P",{});var lt=a(Ae);um=n(lt,"To behave as an decoder the model needs to be initialized with the "),Ri=r(lt,"CODE",{});var bb=a(Ri);fm=n(bb,"is_decoder"),bb.forEach(t),gm=n(lt,` argument of the configuration set to `),Bi=r(lt,"CODE",{});var yb=a(Bi);_m=n(yb,"True"),yb.forEach(t),km=n(lt,". To be used in a Seq2Seq model, the model needs to initialized with both "),Mi=r(lt,"CODE",{});var wb=a(Mi);vm=n(wb,"is_decoder"),wb.forEach(t),Tm=n(lt,` argument and `),zi=r(lt,"CODE",{});var $b=a(zi);bm=n($b,"add_cross_attention"),$b.forEach(t),ym=n(lt," set to "),Ci=r(lt,"CODE",{});var Fb=a(Ci);wm=n(Fb,"True"),Fb.forEach(t),$m=n(lt,"; an "),qi=r(lt,"CODE",{});var Eb=a(qi);Fm=n(Eb,"encoder_hidden_states"),Eb.forEach(t),Em=n(lt,` is then expected as an input to the forward pass.`),lt.forEach(t),Rm=d(zt),Ge=r(zt,"DIV",{class:!0});var Ct=a(Ge);b(os.$$.fragment,Ct),Bm=d(Ct),Jt=r(Ct,"P",{});var ba=a(Jt);Mm=n(ba,"The "),Jr=r(ba,"A",{href:!0});var Rb=a(Jr);zm=n(Rb,"RemBertModel"),Rb.forEach(t),Cm=n(ba," forward method, overrides the "),xi=r(ba,"CODE",{});var Bb=a(xi);qm=n(Bb,"__call__"),Bb.forEach(t),xm=n(ba," special method."),ba.forEach(t),Pm=d(Ct),b(Do.$$.fragment,Ct),Lm=d(Ct),Pi=r(Ct,"P",{});var Mb=a(Pi);jm=n(Mb,"Example:"),Mb.forEach(t),Dm=d(Ct),b(ns.$$.fragment,Ct),Ct.forEach(t),zt.forEach(t),Ud=d(i),Gt=r(i,"H2",{class:!0});var jc=a(Gt);Ao=r(jc,"A",{id:!0,class:!0,href:!0});var zb=a(Ao);Li=r(zb,"SPAN",{});var Cb=a(Li);b(ss.$$.fragment,Cb),Cb.forEach(t),zb.forEach(t),Am=d(jc),ji=r(jc,"SPAN",{});var qb=a(ji);Nm=n(qb,"RemBertForCausalLM"),qb.forEach(t),jc.forEach(t),Hd=d(i),Et=r(i,"DIV",{class:!0});var ya=a(Et);b(rs.$$.fragment,ya),Om=d(ya),Xt=r(ya,"P",{});var wa=a(Xt);Im=n(wa,"RemBERT Model with a "),Di=r(wa,"CODE",{});var xb=a(Di);Sm=n(xb,"language modeling"),xb.forEach(t),Wm=n(wa,` head on top for CLM fine-tuning. This model is a PyTorch `),as=r(wa,"A",{href:!0,rel:!0});var Pb=a(as);Um=n(Pb,"torch.nn.Module"),Pb.forEach(t),Hm=n(wa,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wa.forEach(t),Qm=d(ya),Xe=r(ya,"DIV",{class:!0});var qt=a(Xe);b(is.$$.fragment,qt),Km=d(qt),Yt=r(qt,"P",{});var $a=a(Yt);Vm=n($a,"The "),Gr=r($a,"A",{href:!0});var Lb=a(Gr);Jm=n(Lb,"RemBertForCausalLM"),Lb.forEach(t),Gm=n($a," forward method, overrides the "),Ai=r($a,"CODE",{});var jb=a(Ai);Xm=n(jb,"__call__"),jb.forEach(t),Ym=n($a," special method."),$a.forEach(t),Zm=d(qt),b(No.$$.fragment,qt),eu=d(qt),Ni=r(qt,"P",{});var Db=a(Ni);tu=n(Db,"Example:"),Db.forEach(t),ou=d(qt),b(ls.$$.fragment,qt),qt.forEach(t),ya.forEach(t),Qd=d(i),Zt=r(i,"H2",{class:!0});var Dc=a(Zt);Oo=r(Dc,"A",{id:!0,class:!0,href:!0});var Ab=a(Oo);Oi=r(Ab,"SPAN",{});var Nb=a(Oi);b(ds.$$.fragment,Nb),Nb.forEach(t),Ab.forEach(t),nu=d(Dc),Ii=r(Dc,"SPAN",{});var Ob=a(Ii);su=n(Ob,"RemBertForMaskedLM"),Ob.forEach(t),Dc.forEach(t),Kd=d(i),Rt=r(i,"DIV",{class:!0});var Fa=a(Rt);b(cs.$$.fragment,Fa),ru=d(Fa),eo=r(Fa,"P",{});var Ea=a(eo);au=n(Ea,"RemBERT Model with a "),Si=r(Ea,"CODE",{});var Ib=a(Si);iu=n(Ib,"language modeling"),Ib.forEach(t),lu=n(Ea,` head on top. This model is a PyTorch `),ps=r(Ea,"A",{href:!0,rel:!0});var Sb=a(ps);du=n(Sb,"torch.nn.Module"),Sb.forEach(t),cu=n(Ea,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ea.forEach(t),pu=d(Fa),Ye=r(Fa,"DIV",{class:!0});var xt=a(Ye);b(hs.$$.fragment,xt),hu=d(xt),to=r(xt,"P",{});var Ra=a(to);mu=n(Ra,"The "),Xr=r(Ra,"A",{href:!0});var Wb=a(Xr);uu=n(Wb,"RemBertForMaskedLM"),Wb.forEach(t),fu=n(Ra," forward method, overrides the "),Wi=r(Ra,"CODE",{});var Ub=a(Wi);gu=n(Ub,"__call__"),Ub.forEach(t),_u=n(Ra," special method."),Ra.forEach(t),ku=d(xt),b(Io.$$.fragment,xt),vu=d(xt),Ui=r(xt,"P",{});var Hb=a(Ui);Tu=n(Hb,"Example:"),Hb.forEach(t),bu=d(xt),b(ms.$$.fragment,xt),xt.forEach(t),Fa.forEach(t),Vd=d(i),oo=r(i,"H2",{class:!0});var Ac=a(oo);So=r(Ac,"A",{id:!0,class:!0,href:!0});var Qb=a(So);Hi=r(Qb,"SPAN",{});var Kb=a(Hi);b(us.$$.fragment,Kb),Kb.forEach(t),Qb.forEach(t),yu=d(Ac),Qi=r(Ac,"SPAN",{});var Vb=a(Qi);wu=n(Vb,"RemBertForSequenceClassification"),Vb.forEach(t),Ac.forEach(t),Jd=d(i),ct=r(i,"DIV",{class:!0});var Tn=a(ct);b(fs.$$.fragment,Tn),$u=d(Tn),Ki=r(Tn,"P",{});var Jb=a(Ki);Fu=n(Jb,`RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Jb.forEach(t),Eu=d(Tn),gs=r(Tn,"P",{});var Nc=a(gs);Ru=n(Nc,"This model is a PyTorch "),_s=r(Nc,"A",{href:!0,rel:!0});var Gb=a(_s);Bu=n(Gb,"torch.nn.Module"),Gb.forEach(t),Mu=n(Nc,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nc.forEach(t),zu=d(Tn),Le=r(Tn,"DIV",{class:!0});var dt=a(Le);b(ks.$$.fragment,dt),Cu=d(dt),no=r(dt,"P",{});var Ba=a(no);qu=n(Ba,"The "),Yr=r(Ba,"A",{href:!0});var Xb=a(Yr);xu=n(Xb,"RemBertForSequenceClassification"),Xb.forEach(t),Pu=n(Ba," forward method, overrides the "),Vi=r(Ba,"CODE",{});var Yb=a(Vi);Lu=n(Yb,"__call__"),Yb.forEach(t),ju=n(Ba," special method."),Ba.forEach(t),Du=d(dt),b(Wo.$$.fragment,dt),Au=d(dt),Ji=r(dt,"P",{});var Zb=a(Ji);Nu=n(Zb,"Example of single-label classification:"),Zb.forEach(t),Ou=d(dt),b(vs.$$.fragment,dt),Iu=d(dt),Gi=r(dt,"P",{});var ey=a(Gi);Su=n(ey,"Example of multi-label classification:"),ey.forEach(t),Wu=d(dt),b(Ts.$$.fragment,dt),dt.forEach(t),Tn.forEach(t),Gd=d(i),so=r(i,"H2",{class:!0});var Oc=a(so);Uo=r(Oc,"A",{id:!0,class:!0,href:!0});var ty=a(Uo);Xi=r(ty,"SPAN",{});var oy=a(Xi);b(bs.$$.fragment,oy),oy.forEach(t),ty.forEach(t),Uu=d(Oc),Yi=r(Oc,"SPAN",{});var ny=a(Yi);Hu=n(ny,"RemBertForMultipleChoice"),ny.forEach(t),Oc.forEach(t),Xd=d(i),pt=r(i,"DIV",{class:!0});var bn=a(pt);b(ys.$$.fragment,bn),Qu=d(bn),Zi=r(bn,"P",{});var sy=a(Zi);Ku=n(sy,`RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),sy.forEach(t),Vu=d(bn),ws=r(bn,"P",{});var Ic=a(ws);Ju=n(Ic,"This model is a PyTorch "),$s=r(Ic,"A",{href:!0,rel:!0});var ry=a($s);Gu=n(ry,"torch.nn.Module"),ry.forEach(t),Xu=n(Ic,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ic.forEach(t),Yu=d(bn),Ze=r(bn,"DIV",{class:!0});var Pt=a(Ze);b(Fs.$$.fragment,Pt),Zu=d(Pt),ro=r(Pt,"P",{});var Ma=a(ro);ef=n(Ma,"The "),Zr=r(Ma,"A",{href:!0});var ay=a(Zr);tf=n(ay,"RemBertForMultipleChoice"),ay.forEach(t),of=n(Ma," forward method, overrides the "),el=r(Ma,"CODE",{});var iy=a(el);nf=n(iy,"__call__"),iy.forEach(t),sf=n(Ma," special method."),Ma.forEach(t),rf=d(Pt),b(Ho.$$.fragment,Pt),af=d(Pt),tl=r(Pt,"P",{});var ly=a(tl);lf=n(ly,"Example:"),ly.forEach(t),df=d(Pt),b(Es.$$.fragment,Pt),Pt.forEach(t),bn.forEach(t),Yd=d(i),ao=r(i,"H2",{class:!0});var Sc=a(ao);Qo=r(Sc,"A",{id:!0,class:!0,href:!0});var dy=a(Qo);ol=r(dy,"SPAN",{});var cy=a(ol);b(Rs.$$.fragment,cy),cy.forEach(t),dy.forEach(t),cf=d(Sc),nl=r(Sc,"SPAN",{});var py=a(nl);pf=n(py,"RemBertForTokenClassification"),py.forEach(t),Sc.forEach(t),Zd=d(i),ht=r(i,"DIV",{class:!0});var yn=a(ht);b(Bs.$$.fragment,yn),hf=d(yn),sl=r(yn,"P",{});var hy=a(sl);mf=n(hy,`RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),hy.forEach(t),uf=d(yn),Ms=r(yn,"P",{});var Wc=a(Ms);ff=n(Wc,"This model is a PyTorch "),zs=r(Wc,"A",{href:!0,rel:!0});var my=a(zs);gf=n(my,"torch.nn.Module"),my.forEach(t),_f=n(Wc,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wc.forEach(t),kf=d(yn),et=r(yn,"DIV",{class:!0});var Lt=a(et);b(Cs.$$.fragment,Lt),vf=d(Lt),io=r(Lt,"P",{});var za=a(io);Tf=n(za,"The "),ea=r(za,"A",{href:!0});var uy=a(ea);bf=n(uy,"RemBertForTokenClassification"),uy.forEach(t),yf=n(za," forward method, overrides the "),rl=r(za,"CODE",{});var fy=a(rl);wf=n(fy,"__call__"),fy.forEach(t),$f=n(za," special method."),za.forEach(t),Ff=d(Lt),b(Ko.$$.fragment,Lt),Ef=d(Lt),al=r(Lt,"P",{});var gy=a(al);Rf=n(gy,"Example:"),gy.forEach(t),Bf=d(Lt),b(qs.$$.fragment,Lt),Lt.forEach(t),yn.forEach(t),ec=d(i),lo=r(i,"H2",{class:!0});var Uc=a(lo);Vo=r(Uc,"A",{id:!0,class:!0,href:!0});var _y=a(Vo);il=r(_y,"SPAN",{});var ky=a(il);b(xs.$$.fragment,ky),ky.forEach(t),_y.forEach(t),Mf=d(Uc),ll=r(Uc,"SPAN",{});var vy=a(ll);zf=n(vy,"RemBertForQuestionAnswering"),vy.forEach(t),Uc.forEach(t),tc=d(i),mt=r(i,"DIV",{class:!0});var wn=a(mt);b(Ps.$$.fragment,wn),Cf=d(wn),co=r(wn,"P",{});var Ca=a(co);qf=n(Ca,`RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),dl=r(Ca,"CODE",{});var Ty=a(dl);xf=n(Ty,"span start logits"),Ty.forEach(t),Pf=n(Ca," and "),cl=r(Ca,"CODE",{});var by=a(cl);Lf=n(by,"span end logits"),by.forEach(t),jf=n(Ca,")."),Ca.forEach(t),Df=d(wn),Ls=r(wn,"P",{});var Hc=a(Ls);Af=n(Hc,"This model is a PyTorch "),js=r(Hc,"A",{href:!0,rel:!0});var yy=a(js);Nf=n(yy,"torch.nn.Module"),yy.forEach(t),Of=n(Hc,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hc.forEach(t),If=d(wn),tt=r(wn,"DIV",{class:!0});var jt=a(tt);b(Ds.$$.fragment,jt),Sf=d(jt),po=r(jt,"P",{});var qa=a(po);Wf=n(qa,"The "),ta=r(qa,"A",{href:!0});var wy=a(ta);Uf=n(wy,"RemBertForQuestionAnswering"),wy.forEach(t),Hf=n(qa," forward method, overrides the "),pl=r(qa,"CODE",{});var $y=a(pl);Qf=n($y,"__call__"),$y.forEach(t),Kf=n(qa," special method."),qa.forEach(t),Vf=d(jt),b(Jo.$$.fragment,jt),Jf=d(jt),hl=r(jt,"P",{});var Fy=a(hl);Gf=n(Fy,"Example:"),Fy.forEach(t),Xf=d(jt),b(As.$$.fragment,jt),jt.forEach(t),wn.forEach(t),oc=d(i),ho=r(i,"H2",{class:!0});var Qc=a(ho);Go=r(Qc,"A",{id:!0,class:!0,href:!0});var Ey=a(Go);ml=r(Ey,"SPAN",{});var Ry=a(ml);b(Ns.$$.fragment,Ry),Ry.forEach(t),Ey.forEach(t),Yf=d(Qc),ul=r(Qc,"SPAN",{});var By=a(ul);Zf=n(By,"TFRemBertModel"),By.forEach(t),Qc.forEach(t),nc=d(i),Ne=r(i,"DIV",{class:!0});var _t=a(Ne);b(Os.$$.fragment,_t),eg=d(_t),fl=r(_t,"P",{});var My=a(fl);tg=n(My,"The bare RemBERT Model transformer outputing raw hidden-states without any specific head on top."),My.forEach(t),og=d(_t),Is=r(_t,"P",{});var Kc=a(Is);ng=n(Kc,"This model inherits from "),oa=r(Kc,"A",{href:!0});var zy=a(oa);sg=n(zy,"TFPreTrainedModel"),zy.forEach(t),rg=n(Kc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kc.forEach(t),ag=d(_t),Ss=r(_t,"P",{});var Vc=a(Ss);ig=n(Vc,"This model is also a "),Ws=r(Vc,"A",{href:!0,rel:!0});var Cy=a(Ws);lg=n(Cy,"tf.keras.Model"),Cy.forEach(t),dg=n(Vc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vc.forEach(t),cg=d(_t),b(Xo.$$.fragment,_t),pg=d(_t),ot=r(_t,"DIV",{class:!0});var Dt=a(ot);b(Us.$$.fragment,Dt),hg=d(Dt),mo=r(Dt,"P",{});var xa=a(mo);mg=n(xa,"The "),na=r(xa,"A",{href:!0});var qy=a(na);ug=n(qy,"TFRemBertModel"),qy.forEach(t),fg=n(xa," forward method, overrides the "),gl=r(xa,"CODE",{});var xy=a(gl);gg=n(xy,"__call__"),xy.forEach(t),_g=n(xa," special method."),xa.forEach(t),kg=d(Dt),b(Yo.$$.fragment,Dt),vg=d(Dt),_l=r(Dt,"P",{});var Py=a(_l);Tg=n(Py,"Example:"),Py.forEach(t),bg=d(Dt),b(Hs.$$.fragment,Dt),Dt.forEach(t),_t.forEach(t),sc=d(i),uo=r(i,"H2",{class:!0});var Jc=a(uo);Zo=r(Jc,"A",{id:!0,class:!0,href:!0});var Ly=a(Zo);kl=r(Ly,"SPAN",{});var jy=a(kl);b(Qs.$$.fragment,jy),jy.forEach(t),Ly.forEach(t),yg=d(Jc),vl=r(Jc,"SPAN",{});var Dy=a(vl);wg=n(Dy,"TFRemBertForMaskedLM"),Dy.forEach(t),Jc.forEach(t),rc=d(i),Oe=r(i,"DIV",{class:!0});var kt=a(Oe);b(Ks.$$.fragment,kt),$g=d(kt),Vs=r(kt,"P",{});var Gc=a(Vs);Fg=n(Gc,"RemBERT Model with a "),Tl=r(Gc,"CODE",{});var Ay=a(Tl);Eg=n(Ay,"language modeling"),Ay.forEach(t),Rg=n(Gc," head on top."),Gc.forEach(t),Bg=d(kt),Js=r(kt,"P",{});var Xc=a(Js);Mg=n(Xc,"This model inherits from "),sa=r(Xc,"A",{href:!0});var Ny=a(sa);zg=n(Ny,"TFPreTrainedModel"),Ny.forEach(t),Cg=n(Xc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xc.forEach(t),qg=d(kt),Gs=r(kt,"P",{});var Yc=a(Gs);xg=n(Yc,"This model is also a "),Xs=r(Yc,"A",{href:!0,rel:!0});var Oy=a(Xs);Pg=n(Oy,"tf.keras.Model"),Oy.forEach(t),Lg=n(Yc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Yc.forEach(t),jg=d(kt),b(en.$$.fragment,kt),Dg=d(kt),nt=r(kt,"DIV",{class:!0});var At=a(nt);b(Ys.$$.fragment,At),Ag=d(At),fo=r(At,"P",{});var Pa=a(fo);Ng=n(Pa,"The "),ra=r(Pa,"A",{href:!0});var Iy=a(ra);Og=n(Iy,"TFRemBertForMaskedLM"),Iy.forEach(t),Ig=n(Pa," forward method, overrides the "),bl=r(Pa,"CODE",{});var Sy=a(bl);Sg=n(Sy,"__call__"),Sy.forEach(t),Wg=n(Pa," special method."),Pa.forEach(t),Ug=d(At),b(tn.$$.fragment,At),Hg=d(At),yl=r(At,"P",{});var Wy=a(yl);Qg=n(Wy,"Example:"),Wy.forEach(t),Kg=d(At),b(Zs.$$.fragment,At),At.forEach(t),kt.forEach(t),ac=d(i),go=r(i,"H2",{class:!0});var Zc=a(go);on=r(Zc,"A",{id:!0,class:!0,href:!0});var Uy=a(on);wl=r(Uy,"SPAN",{});var Hy=a(wl);b(er.$$.fragment,Hy),Hy.forEach(t),Uy.forEach(t),Vg=d(Zc),$l=r(Zc,"SPAN",{});var Qy=a($l);Jg=n(Qy,"TFRemBertForCausalLM"),Qy.forEach(t),Zc.forEach(t),ic=d(i),Ie=r(i,"DIV",{class:!0});var vt=a(Ie);b(tr.$$.fragment,vt),Gg=d(vt),or=r(vt,"P",{});var ep=a(or);Xg=n(ep,"RemBERT Model with a "),Fl=r(ep,"CODE",{});var Ky=a(Fl);Yg=n(Ky,"language modeling"),Ky.forEach(t),Zg=n(ep," head on top for CLM fine-tuning."),ep.forEach(t),e_=d(vt),nr=r(vt,"P",{});var tp=a(nr);t_=n(tp,"This model inherits from "),aa=r(tp,"A",{href:!0});var Vy=a(aa);o_=n(Vy,"TFPreTrainedModel"),Vy.forEach(t),n_=n(tp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tp.forEach(t),s_=d(vt),sr=r(vt,"P",{});var op=a(sr);r_=n(op,"This model is also a "),rr=r(op,"A",{href:!0,rel:!0});var Jy=a(rr);a_=n(Jy,"tf.keras.Model"),Jy.forEach(t),i_=n(op,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),op.forEach(t),l_=d(vt),b(nn.$$.fragment,vt),d_=d(vt),Qe=r(vt,"DIV",{class:!0});var Tt=a(Qe);b(ar.$$.fragment,Tt),c_=d(Tt),xe=r(Tt,"P",{});var Ve=a(xe);p_=n(Ve,"encoder_hidden_states ("),El=r(Ve,"CODE",{});var Gy=a(El);h_=n(Gy,"tf.Tensor"),Gy.forEach(t),m_=n(Ve," of shape "),Rl=r(Ve,"CODE",{});var Xy=a(Rl);u_=n(Xy,"(batch_size, sequence_length, hidden_size)"),Xy.forEach(t),f_=n(Ve,", "),Bl=r(Ve,"EM",{});var Yy=a(Bl);g_=n(Yy,"optional"),Yy.forEach(t),__=n(Ve,`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`),Ml=r(Ve,"CODE",{});var Zy=a(Ml);k_=n(Zy,"tf.Tensor"),Zy.forEach(t),v_=n(Ve," of shape "),zl=r(Ve,"CODE",{});var e1=a(zl);T_=n(e1,"(batch_size, sequence_length)"),e1.forEach(t),b_=n(Ve,", "),Cl=r(Ve,"EM",{});var t1=a(Cl);y_=n(t1,"optional"),t1.forEach(t),w_=n(Ve,`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `),ql=r(Ve,"CODE",{});var o1=a(ql);$_=n(o1,"[0, 1]"),o1.forEach(t),F_=n(Ve,":"),Ve.forEach(t),E_=d(Tt),ir=r(Tt,"UL",{});var np=a(ir);lr=r(np,"LI",{});var sp=a(lr);R_=n(sp,"1 for tokens that are "),xl=r(sp,"STRONG",{});var n1=a(xl);B_=n(n1,"not masked"),n1.forEach(t),M_=n(sp,","),sp.forEach(t),z_=d(np),dr=r(np,"LI",{});var rp=a(dr);C_=n(rp,"0 for tokens that are "),Pl=r(rp,"STRONG",{});var s1=a(Pl);q_=n(s1,"masked"),s1.forEach(t),x_=n(rp,"."),rp.forEach(t),np.forEach(t),P_=d(Tt),K=r(Tt,"P",{});var G=a(K);L_=n(G,"past_key_values ("),Ll=r(G,"CODE",{});var r1=a(Ll);j_=n(r1,"Tuple[Tuple[tf.Tensor]]"),r1.forEach(t),D_=n(G," of length "),jl=r(G,"CODE",{});var a1=a(jl);A_=n(a1,"config.n_layers"),a1.forEach(t),N_=n(G,`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `),Dl=r(G,"CODE",{});var i1=a(Dl);O_=n(i1,"past_key_values"),i1.forEach(t),I_=n(G," are used, the user can optionally input only the last "),Al=r(G,"CODE",{});var l1=a(Al);S_=n(l1,"decoder_input_ids"),l1.forEach(t),W_=n(G,` (those that don\u2019t have their past key value states given to this model) of shape `),Nl=r(G,"CODE",{});var d1=a(Nl);U_=n(d1,"(batch_size, 1)"),d1.forEach(t),H_=n(G,` instead of all `),Ol=r(G,"CODE",{});var c1=a(Ol);Q_=n(c1,"decoder_input_ids"),c1.forEach(t),K_=n(G," of shape "),Il=r(G,"CODE",{});var p1=a(Il);V_=n(p1,"(batch_size, sequence_length)"),p1.forEach(t),J_=n(G,`. use_cache (`),Sl=r(G,"CODE",{});var h1=a(Sl);G_=n(h1,"bool"),h1.forEach(t),X_=n(G,", "),Wl=r(G,"EM",{});var m1=a(Wl);Y_=n(m1,"optional"),m1.forEach(t),Z_=n(G,", defaults to "),Ul=r(G,"CODE",{});var u1=a(Ul);ek=n(u1,"True"),u1.forEach(t),tk=n(G,`): If set to `),Hl=r(G,"CODE",{});var f1=a(Hl);ok=n(f1,"True"),f1.forEach(t),nk=n(G,", "),Ql=r(G,"CODE",{});var g1=a(Ql);sk=n(g1,"past_key_values"),g1.forEach(t),rk=n(G,` key value states are returned and can be used to speed up decoding (see `),Kl=r(G,"CODE",{});var _1=a(Kl);ak=n(_1,"past_key_values"),_1.forEach(t),ik=n(G,"). Set to "),Vl=r(G,"CODE",{});var k1=a(Vl);lk=n(k1,"False"),k1.forEach(t),dk=n(G," during training, "),Jl=r(G,"CODE",{});var v1=a(Jl);ck=n(v1,"True"),v1.forEach(t),pk=n(G,` during generation labels (`),Gl=r(G,"CODE",{});var T1=a(Gl);hk=n(T1,"tf.Tensor"),T1.forEach(t),mk=n(G," or "),Xl=r(G,"CODE",{});var b1=a(Xl);uk=n(b1,"np.ndarray"),b1.forEach(t),fk=n(G," of shape "),Yl=r(G,"CODE",{});var y1=a(Yl);gk=n(y1,"(batch_size, sequence_length)"),y1.forEach(t),_k=n(G,", "),Zl=r(G,"EM",{});var w1=a(Zl);kk=n(w1,"optional"),w1.forEach(t),vk=n(G,`): Labels for computing the cross entropy classification loss. Indices should be in `),ed=r(G,"CODE",{});var $1=a(ed);Tk=n($1,"[0, ..., config.vocab_size - 1]"),$1.forEach(t),bk=n(G,"."),G.forEach(t),yk=d(Tt),td=r(Tt,"P",{});var F1=a(td);wk=n(F1,"Example:"),F1.forEach(t),$k=d(Tt),b(cr.$$.fragment,Tt),Tt.forEach(t),vt.forEach(t),lc=d(i),_o=r(i,"H2",{class:!0});var ap=a(_o);sn=r(ap,"A",{id:!0,class:!0,href:!0});var E1=a(sn);od=r(E1,"SPAN",{});var R1=a(od);b(pr.$$.fragment,R1),R1.forEach(t),E1.forEach(t),Fk=d(ap),nd=r(ap,"SPAN",{});var B1=a(nd);Ek=n(B1,"TFRemBertForSequenceClassification"),B1.forEach(t),ap.forEach(t),dc=d(i),Se=r(i,"DIV",{class:!0});var bt=a(Se);b(hr.$$.fragment,bt),Rk=d(bt),sd=r(bt,"P",{});var M1=a(sd);Bk=n(M1,"RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks."),M1.forEach(t),Mk=d(bt),mr=r(bt,"P",{});var ip=a(mr);zk=n(ip,"This model inherits from "),ia=r(ip,"A",{href:!0});var z1=a(ia);Ck=n(z1,"TFPreTrainedModel"),z1.forEach(t),qk=n(ip,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ip.forEach(t),xk=d(bt),ur=r(bt,"P",{});var lp=a(ur);Pk=n(lp,"This model is also a "),fr=r(lp,"A",{href:!0,rel:!0});var C1=a(fr);Lk=n(C1,"tf.keras.Model"),C1.forEach(t),jk=n(lp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),lp.forEach(t),Dk=d(bt),b(rn.$$.fragment,bt),Ak=d(bt),st=r(bt,"DIV",{class:!0});var Nt=a(st);b(gr.$$.fragment,Nt),Nk=d(Nt),ko=r(Nt,"P",{});var La=a(ko);Ok=n(La,"The "),la=r(La,"A",{href:!0});var q1=a(la);Ik=n(q1,"TFRemBertForSequenceClassification"),q1.forEach(t),Sk=n(La," forward method, overrides the "),rd=r(La,"CODE",{});var x1=a(rd);Wk=n(x1,"__call__"),x1.forEach(t),Uk=n(La," special method."),La.forEach(t),Hk=d(Nt),b(an.$$.fragment,Nt),Qk=d(Nt),ad=r(Nt,"P",{});var P1=a(ad);Kk=n(P1,"Example:"),P1.forEach(t),Vk=d(Nt),b(_r.$$.fragment,Nt),Nt.forEach(t),bt.forEach(t),cc=d(i),vo=r(i,"H2",{class:!0});var dp=a(vo);ln=r(dp,"A",{id:!0,class:!0,href:!0});var L1=a(ln);id=r(L1,"SPAN",{});var j1=a(id);b(kr.$$.fragment,j1),j1.forEach(t),L1.forEach(t),Jk=d(dp),ld=r(dp,"SPAN",{});var D1=a(ld);Gk=n(D1,"TFRemBertForMultipleChoice"),D1.forEach(t),dp.forEach(t),pc=d(i),We=r(i,"DIV",{class:!0});var yt=a(We);b(vr.$$.fragment,yt),Xk=d(yt),dd=r(yt,"P",{});var A1=a(dd);Yk=n(A1,`RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),A1.forEach(t),Zk=d(yt),Tr=r(yt,"P",{});var cp=a(Tr);ev=n(cp,"This model inherits from "),da=r(cp,"A",{href:!0});var N1=a(da);tv=n(N1,"TFPreTrainedModel"),N1.forEach(t),ov=n(cp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cp.forEach(t),nv=d(yt),br=r(yt,"P",{});var pp=a(br);sv=n(pp,"This model is also a "),yr=r(pp,"A",{href:!0,rel:!0});var O1=a(yr);rv=n(O1,"tf.keras.Model"),O1.forEach(t),av=n(pp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),pp.forEach(t),iv=d(yt),b(dn.$$.fragment,yt),lv=d(yt),rt=r(yt,"DIV",{class:!0});var Ot=a(rt);b(wr.$$.fragment,Ot),dv=d(Ot),To=r(Ot,"P",{});var ja=a(To);cv=n(ja,"The "),ca=r(ja,"A",{href:!0});var I1=a(ca);pv=n(I1,"TFRemBertForMultipleChoice"),I1.forEach(t),hv=n(ja," forward method, overrides the "),cd=r(ja,"CODE",{});var S1=a(cd);mv=n(S1,"__call__"),S1.forEach(t),uv=n(ja," special method."),ja.forEach(t),fv=d(Ot),b(cn.$$.fragment,Ot),gv=d(Ot),pd=r(Ot,"P",{});var W1=a(pd);_v=n(W1,"Example:"),W1.forEach(t),kv=d(Ot),b($r.$$.fragment,Ot),Ot.forEach(t),yt.forEach(t),hc=d(i),bo=r(i,"H2",{class:!0});var hp=a(bo);pn=r(hp,"A",{id:!0,class:!0,href:!0});var U1=a(pn);hd=r(U1,"SPAN",{});var H1=a(hd);b(Fr.$$.fragment,H1),H1.forEach(t),U1.forEach(t),vv=d(hp),md=r(hp,"SPAN",{});var Q1=a(md);Tv=n(Q1,"TFRemBertForTokenClassification"),Q1.forEach(t),hp.forEach(t),mc=d(i),Ue=r(i,"DIV",{class:!0});var wt=a(Ue);b(Er.$$.fragment,wt),bv=d(wt),ud=r(wt,"P",{});var K1=a(ud);yv=n(K1,`RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),K1.forEach(t),wv=d(wt),Rr=r(wt,"P",{});var mp=a(Rr);$v=n(mp,"This model inherits from "),pa=r(mp,"A",{href:!0});var V1=a(pa);Fv=n(V1,"TFPreTrainedModel"),V1.forEach(t),Ev=n(mp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mp.forEach(t),Rv=d(wt),Br=r(wt,"P",{});var up=a(Br);Bv=n(up,"This model is also a "),Mr=r(up,"A",{href:!0,rel:!0});var J1=a(Mr);Mv=n(J1,"tf.keras.Model"),J1.forEach(t),zv=n(up,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),up.forEach(t),Cv=d(wt),b(hn.$$.fragment,wt),qv=d(wt),at=r(wt,"DIV",{class:!0});var It=a(at);b(zr.$$.fragment,It),xv=d(It),yo=r(It,"P",{});var Da=a(yo);Pv=n(Da,"The "),ha=r(Da,"A",{href:!0});var G1=a(ha);Lv=n(G1,"TFRemBertForTokenClassification"),G1.forEach(t),jv=n(Da," forward method, overrides the "),fd=r(Da,"CODE",{});var X1=a(fd);Dv=n(X1,"__call__"),X1.forEach(t),Av=n(Da," special method."),Da.forEach(t),Nv=d(It),b(mn.$$.fragment,It),Ov=d(It),gd=r(It,"P",{});var Y1=a(gd);Iv=n(Y1,"Example:"),Y1.forEach(t),Sv=d(It),b(Cr.$$.fragment,It),It.forEach(t),wt.forEach(t),uc=d(i),wo=r(i,"H2",{class:!0});var fp=a(wo);un=r(fp,"A",{id:!0,class:!0,href:!0});var Z1=a(un);_d=r(Z1,"SPAN",{});var ew=a(_d);b(qr.$$.fragment,ew),ew.forEach(t),Z1.forEach(t),Wv=d(fp),kd=r(fp,"SPAN",{});var tw=a(kd);Uv=n(tw,"TFRemBertForQuestionAnswering"),tw.forEach(t),fp.forEach(t),fc=d(i),He=r(i,"DIV",{class:!0});var $t=a(He);b(xr.$$.fragment,$t),Hv=d($t),$o=r($t,"P",{});var Aa=a($o);Qv=n(Aa,`RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),vd=r(Aa,"CODE",{});var ow=a(vd);Kv=n(ow,"span start logits"),ow.forEach(t),Vv=n(Aa," and "),Td=r(Aa,"CODE",{});var nw=a(Td);Jv=n(nw,"span end logits"),nw.forEach(t),Gv=n(Aa,")."),Aa.forEach(t),Xv=d($t),Pr=r($t,"P",{});var gp=a(Pr);Yv=n(gp,"This model inherits from "),ma=r(gp,"A",{href:!0});var sw=a(ma);Zv=n(sw,"TFPreTrainedModel"),sw.forEach(t),eT=n(gp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gp.forEach(t),tT=d($t),Lr=r($t,"P",{});var _p=a(Lr);oT=n(_p,"This model is also a "),jr=r(_p,"A",{href:!0,rel:!0});var rw=a(jr);nT=n(rw,"tf.keras.Model"),rw.forEach(t),sT=n(_p,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_p.forEach(t),rT=d($t),b(fn.$$.fragment,$t),aT=d($t),it=r($t,"DIV",{class:!0});var St=a(it);b(Dr.$$.fragment,St),iT=d(St),Fo=r(St,"P",{});var Na=a(Fo);lT=n(Na,"The "),ua=r(Na,"A",{href:!0});var aw=a(ua);dT=n(aw,"TFRemBertForQuestionAnswering"),aw.forEach(t),cT=n(Na," forward method, overrides the "),bd=r(Na,"CODE",{});var iw=a(bd);pT=n(iw,"__call__"),iw.forEach(t),hT=n(Na," special method."),Na.forEach(t),mT=d(St),b(gn.$$.fragment,St),uT=d(St),yd=r(St,"P",{});var lw=a(yd);fT=n(lw,"Example:"),lw.forEach(t),gT=d(St),b(Ar.$$.fragment,St),St.forEach(t),$t.forEach(t),this.h()},h(){p(h,"name","hf:doc:metadata"),p(h,"content",JSON.stringify(Pw)),p(u,"id","rembert"),p(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(u,"href","#rembert"),p(g,"class","relative group"),p(te,"id","overview"),p(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(te,"href","#overview"),p(M,"class","relative group"),p(se,"href","https://arxiv.org/abs/2010.12821"),p(se,"rel","nofollow"),p(oe,"id","transformers.RemBertConfig"),p(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(oe,"href","#transformers.RemBertConfig"),p(Z,"class","relative group"),p(ye,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel"),p(A,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(de,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(Ro,"id","initializing-a-rembert-rembert-style-configuration"),p(Ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ro,"href","#initializing-a-rembert-rembert-style-configuration"),p(Eo,"class","relative group"),p(Mo,"id","initializing-a-model-from-the-rembert-style-configuration"),p(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Mo,"href","#initializing-a-model-from-the-rembert-style-configuration"),p(Bo,"class","relative group"),p(Co,"id","accessing-the-model-configuration"),p(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Co,"href","#accessing-the-model-configuration"),p(zo,"class","relative group"),p(x,"class","docstring"),p(qo,"id","transformers.RemBertTokenizer"),p(qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(qo,"href","#transformers.RemBertTokenizer"),p(Ut,"class","relative group"),p(xn,"href","https://github.com/google/sentencepiece"),p(xn,"rel","nofollow"),p(Wr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(Bt,"class","docstring"),p(xo,"class","docstring"),p(ut,"class","docstring"),p(ui,"class","docstring"),p(Ce,"class","docstring"),p(Po,"id","transformers.RemBertTokenizerFast"),p(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Po,"href","#transformers.RemBertTokenizerFast"),p(Kt,"class","relative group"),p(Wn,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),p(Wn,"rel","nofollow"),p(Qr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),p(Mt,"class","docstring"),p(Lo,"class","docstring"),p(ft,"class","docstring"),p($i,"class","docstring"),p(De,"class","docstring"),p(jo,"id","transformers.RemBertModel"),p(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(jo,"href","#transformers.RemBertModel"),p(Vt,"class","relative group"),p(Zn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Zn,"rel","nofollow"),p(ts,"href","https://arxiv.org/abs/1706.03762"),p(ts,"rel","nofollow"),p(Jr,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertModel"),p(Ge,"class","docstring"),p(Je,"class","docstring"),p(Ao,"id","transformers.RemBertForCausalLM"),p(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ao,"href","#transformers.RemBertForCausalLM"),p(Gt,"class","relative group"),p(as,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(as,"rel","nofollow"),p(Gr,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForCausalLM"),p(Xe,"class","docstring"),p(Et,"class","docstring"),p(Oo,"id","transformers.RemBertForMaskedLM"),p(Oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Oo,"href","#transformers.RemBertForMaskedLM"),p(Zt,"class","relative group"),p(ps,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(ps,"rel","nofollow"),p(Xr,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForMaskedLM"),p(Ye,"class","docstring"),p(Rt,"class","docstring"),p(So,"id","transformers.RemBertForSequenceClassification"),p(So,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(So,"href","#transformers.RemBertForSequenceClassification"),p(oo,"class","relative group"),p(_s,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(_s,"rel","nofollow"),p(Yr,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForSequenceClassification"),p(Le,"class","docstring"),p(ct,"class","docstring"),p(Uo,"id","transformers.RemBertForMultipleChoice"),p(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Uo,"href","#transformers.RemBertForMultipleChoice"),p(so,"class","relative group"),p($s,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p($s,"rel","nofollow"),p(Zr,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForMultipleChoice"),p(Ze,"class","docstring"),p(pt,"class","docstring"),p(Qo,"id","transformers.RemBertForTokenClassification"),p(Qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Qo,"href","#transformers.RemBertForTokenClassification"),p(ao,"class","relative group"),p(zs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(zs,"rel","nofollow"),p(ea,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForTokenClassification"),p(et,"class","docstring"),p(ht,"class","docstring"),p(Vo,"id","transformers.RemBertForQuestionAnswering"),p(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Vo,"href","#transformers.RemBertForQuestionAnswering"),p(lo,"class","relative group"),p(js,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(js,"rel","nofollow"),p(ta,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.RemBertForQuestionAnswering"),p(tt,"class","docstring"),p(mt,"class","docstring"),p(Go,"id","transformers.TFRemBertModel"),p(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Go,"href","#transformers.TFRemBertModel"),p(ho,"class","relative group"),p(oa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Ws,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Ws,"rel","nofollow"),p(na,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertModel"),p(ot,"class","docstring"),p(Ne,"class","docstring"),p(Zo,"id","transformers.TFRemBertForMaskedLM"),p(Zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Zo,"href","#transformers.TFRemBertForMaskedLM"),p(uo,"class","relative group"),p(sa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Xs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Xs,"rel","nofollow"),p(ra,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForMaskedLM"),p(nt,"class","docstring"),p(Oe,"class","docstring"),p(on,"id","transformers.TFRemBertForCausalLM"),p(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(on,"href","#transformers.TFRemBertForCausalLM"),p(go,"class","relative group"),p(aa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(rr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(rr,"rel","nofollow"),p(Qe,"class","docstring"),p(Ie,"class","docstring"),p(sn,"id","transformers.TFRemBertForSequenceClassification"),p(sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(sn,"href","#transformers.TFRemBertForSequenceClassification"),p(_o,"class","relative group"),p(ia,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(fr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(fr,"rel","nofollow"),p(la,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForSequenceClassification"),p(st,"class","docstring"),p(Se,"class","docstring"),p(ln,"id","transformers.TFRemBertForMultipleChoice"),p(ln,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ln,"href","#transformers.TFRemBertForMultipleChoice"),p(vo,"class","relative group"),p(da,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(yr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(yr,"rel","nofollow"),p(ca,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForMultipleChoice"),p(rt,"class","docstring"),p(We,"class","docstring"),p(pn,"id","transformers.TFRemBertForTokenClassification"),p(pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(pn,"href","#transformers.TFRemBertForTokenClassification"),p(bo,"class","relative group"),p(pa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Mr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Mr,"rel","nofollow"),p(ha,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForTokenClassification"),p(at,"class","docstring"),p(Ue,"class","docstring"),p(un,"id","transformers.TFRemBertForQuestionAnswering"),p(un,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(un,"href","#transformers.TFRemBertForQuestionAnswering"),p(wo,"class","relative group"),p(ma,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(jr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(jr,"rel","nofollow"),p(ua,"href","/docs/transformers/v4.15.0/en/model_doc/rembert#transformers.TFRemBertForQuestionAnswering"),p(it,"class","docstring"),p(He,"class","docstring")},m(i,f){e(document.head,h),m(i,R,f),m(i,g,f),e(g,u),e(u,v),y(k,v,null),e(g,_),e(g,B),e(B,ue),m(i,X,f),m(i,M,f),e(M,te),e(te,N),y(ne,N,null),e(M,fe),e(M,O),e(O,ge),m(i,pe,f),m(i,J,f),e(J,j),e(J,se),e(se,Y),e(J,z),m(i,q,f),m(i,ae,f),e(ae,U),m(i,he,f),m(i,ie,f),e(ie,I),e(I,_e),m(i,me,f),m(i,C,f),e(C,ke),m(i,S,f),m(i,le,f),e(le,ve),m(i,W,f),m(i,Z,f),e(Z,oe),e(oe,L),y(re,L,null),e(Z,H),e(Z,ce),e(ce,c),m(i,E,f),m(i,x,f),y(be,x,null),e(x,we),e(x,P),e(P,$e),e(P,ye),e(ye,Fe),e(P,D),e(x,Q),e(x,Te),e(Te,Ee),e(Te,A),e(A,Re),e(Te,Be),e(Te,de),e(de,Me),e(Te,kp),e(x,vp),e(x,Oa),e(Oa,Tp),e(x,bp),y($n,x,null),e(x,yp),e(x,Ia),e(Ia,Sa),e(Sa,Wt),e(Wt,Wa),e(Wa,wp),e(Wt,$p),e(Wt,Eo),e(Eo,Ro),e(Ro,Ua),y(Fn,Ua,null),e(Eo,Fp),e(Eo,Ha),e(Ha,Ep),e(Wt,Rp),e(Wt,Qa),e(Qa,Bp),e(x,Mp),e(x,Ka),e(Ka,Va),e(Va,En),e(En,Bo),e(Bo,Mo),e(Mo,Ja),y(Rn,Ja,null),e(Bo,zp),e(Bo,Ga),e(Ga,Cp),e(En,qp),e(En,Xa),e(Xa,xp),e(x,Pp),e(x,Ya),e(Ya,Za),e(Za,Bn),e(Bn,zo),e(zo,Co),e(Co,ei),y(Mn,ei,null),e(zo,Lp),e(zo,ti),e(ti,jp),e(Bn,Dp),e(Bn,oi),e(oi,Ap),m(i,Ad,f),m(i,Ut,f),e(Ut,qo),e(qo,ni),y(zn,ni,null),e(Ut,Np),e(Ut,si),e(si,Op),m(i,Nd,f),m(i,Ce,f),y(Cn,Ce,null),e(Ce,Ip),e(Ce,qn),e(qn,Sp),e(qn,xn),e(xn,Wp),e(qn,Up),e(Ce,Hp),e(Ce,Pn),e(Pn,Qp),e(Pn,Wr),e(Wr,Kp),e(Pn,Vp),e(Ce,Jp),e(Ce,Ht),e(Ht,Gp),e(Ht,ri),e(ri,Xp),e(Ht,Yp),e(Ht,ai),e(ai,Zp),e(Ht,eh),e(Ce,th),e(Ce,Bt),y(Ln,Bt,null),e(Bt,oh),e(Bt,ii),e(ii,nh),e(Bt,sh),e(Bt,jn),e(jn,Ur),e(Ur,rh),e(Ur,li),e(li,ah),e(jn,ih),e(jn,Hr),e(Hr,lh),e(Hr,di),e(di,dh),e(Ce,ch),e(Ce,xo),y(Dn,xo,null),e(xo,ph),e(xo,An),e(An,hh),e(An,ci),e(ci,mh),e(An,uh),e(Ce,fh),e(Ce,ut),y(Nn,ut,null),e(ut,gh),e(ut,pi),e(pi,_h),e(ut,kh),y(On,ut,null),e(ut,vh),e(ut,Qt),e(Qt,Th),e(Qt,hi),e(hi,bh),e(Qt,yh),e(Qt,mi),e(mi,wh),e(Qt,$h),e(Ce,Fh),e(Ce,ui),m(i,Od,f),m(i,Kt,f),e(Kt,Po),e(Po,fi),y(In,fi,null),e(Kt,Eh),e(Kt,gi),e(gi,Rh),m(i,Id,f),m(i,De,f),y(Sn,De,null),e(De,Bh),e(De,Ft),e(Ft,Mh),e(Ft,_i),e(_i,zh),e(Ft,Ch),e(Ft,Wn),e(Wn,qh),e(Ft,xh),e(Ft,Qr),e(Qr,Ph),e(Ft,Lh),e(De,jh),e(De,Mt),y(Un,Mt,null),e(Mt,Dh),e(Mt,ki),e(ki,Ah),e(Mt,Nh),e(Mt,Hn),e(Hn,Kr),e(Kr,Oh),e(Kr,vi),e(vi,Ih),e(Hn,Sh),e(Hn,Vr),e(Vr,Wh),e(Vr,Ti),e(Ti,Uh),e(De,Hh),e(De,Lo),y(Qn,Lo,null),e(Lo,Qh),e(Lo,Kn),e(Kn,Kh),e(Kn,bi),e(bi,Vh),e(Kn,Jh),e(De,Gh),e(De,ft),y(Vn,ft,null),e(ft,Xh),e(ft,yi),e(yi,Yh),e(ft,Zh),y(Jn,ft,null),e(ft,em),e(ft,wi),e(wi,tm),e(De,om),e(De,$i),m(i,Sd,f),m(i,Vt,f),e(Vt,jo),e(jo,Fi),y(Gn,Fi,null),e(Vt,nm),e(Vt,Ei),e(Ei,sm),m(i,Wd,f),m(i,Je,f),y(Xn,Je,null),e(Je,rm),e(Je,Yn),e(Yn,am),e(Yn,Zn),e(Zn,im),e(Yn,lm),e(Je,dm),e(Je,es),e(es,cm),e(es,ts),e(ts,pm),e(es,hm),e(Je,mm),e(Je,Ae),e(Ae,um),e(Ae,Ri),e(Ri,fm),e(Ae,gm),e(Ae,Bi),e(Bi,_m),e(Ae,km),e(Ae,Mi),e(Mi,vm),e(Ae,Tm),e(Ae,zi),e(zi,bm),e(Ae,ym),e(Ae,Ci),e(Ci,wm),e(Ae,$m),e(Ae,qi),e(qi,Fm),e(Ae,Em),e(Je,Rm),e(Je,Ge),y(os,Ge,null),e(Ge,Bm),e(Ge,Jt),e(Jt,Mm),e(Jt,Jr),e(Jr,zm),e(Jt,Cm),e(Jt,xi),e(xi,qm),e(Jt,xm),e(Ge,Pm),y(Do,Ge,null),e(Ge,Lm),e(Ge,Pi),e(Pi,jm),e(Ge,Dm),y(ns,Ge,null),m(i,Ud,f),m(i,Gt,f),e(Gt,Ao),e(Ao,Li),y(ss,Li,null),e(Gt,Am),e(Gt,ji),e(ji,Nm),m(i,Hd,f),m(i,Et,f),y(rs,Et,null),e(Et,Om),e(Et,Xt),e(Xt,Im),e(Xt,Di),e(Di,Sm),e(Xt,Wm),e(Xt,as),e(as,Um),e(Xt,Hm),e(Et,Qm),e(Et,Xe),y(is,Xe,null),e(Xe,Km),e(Xe,Yt),e(Yt,Vm),e(Yt,Gr),e(Gr,Jm),e(Yt,Gm),e(Yt,Ai),e(Ai,Xm),e(Yt,Ym),e(Xe,Zm),y(No,Xe,null),e(Xe,eu),e(Xe,Ni),e(Ni,tu),e(Xe,ou),y(ls,Xe,null),m(i,Qd,f),m(i,Zt,f),e(Zt,Oo),e(Oo,Oi),y(ds,Oi,null),e(Zt,nu),e(Zt,Ii),e(Ii,su),m(i,Kd,f),m(i,Rt,f),y(cs,Rt,null),e(Rt,ru),e(Rt,eo),e(eo,au),e(eo,Si),e(Si,iu),e(eo,lu),e(eo,ps),e(ps,du),e(eo,cu),e(Rt,pu),e(Rt,Ye),y(hs,Ye,null),e(Ye,hu),e(Ye,to),e(to,mu),e(to,Xr),e(Xr,uu),e(to,fu),e(to,Wi),e(Wi,gu),e(to,_u),e(Ye,ku),y(Io,Ye,null),e(Ye,vu),e(Ye,Ui),e(Ui,Tu),e(Ye,bu),y(ms,Ye,null),m(i,Vd,f),m(i,oo,f),e(oo,So),e(So,Hi),y(us,Hi,null),e(oo,yu),e(oo,Qi),e(Qi,wu),m(i,Jd,f),m(i,ct,f),y(fs,ct,null),e(ct,$u),e(ct,Ki),e(Ki,Fu),e(ct,Eu),e(ct,gs),e(gs,Ru),e(gs,_s),e(_s,Bu),e(gs,Mu),e(ct,zu),e(ct,Le),y(ks,Le,null),e(Le,Cu),e(Le,no),e(no,qu),e(no,Yr),e(Yr,xu),e(no,Pu),e(no,Vi),e(Vi,Lu),e(no,ju),e(Le,Du),y(Wo,Le,null),e(Le,Au),e(Le,Ji),e(Ji,Nu),e(Le,Ou),y(vs,Le,null),e(Le,Iu),e(Le,Gi),e(Gi,Su),e(Le,Wu),y(Ts,Le,null),m(i,Gd,f),m(i,so,f),e(so,Uo),e(Uo,Xi),y(bs,Xi,null),e(so,Uu),e(so,Yi),e(Yi,Hu),m(i,Xd,f),m(i,pt,f),y(ys,pt,null),e(pt,Qu),e(pt,Zi),e(Zi,Ku),e(pt,Vu),e(pt,ws),e(ws,Ju),e(ws,$s),e($s,Gu),e(ws,Xu),e(pt,Yu),e(pt,Ze),y(Fs,Ze,null),e(Ze,Zu),e(Ze,ro),e(ro,ef),e(ro,Zr),e(Zr,tf),e(ro,of),e(ro,el),e(el,nf),e(ro,sf),e(Ze,rf),y(Ho,Ze,null),e(Ze,af),e(Ze,tl),e(tl,lf),e(Ze,df),y(Es,Ze,null),m(i,Yd,f),m(i,ao,f),e(ao,Qo),e(Qo,ol),y(Rs,ol,null),e(ao,cf),e(ao,nl),e(nl,pf),m(i,Zd,f),m(i,ht,f),y(Bs,ht,null),e(ht,hf),e(ht,sl),e(sl,mf),e(ht,uf),e(ht,Ms),e(Ms,ff),e(Ms,zs),e(zs,gf),e(Ms,_f),e(ht,kf),e(ht,et),y(Cs,et,null),e(et,vf),e(et,io),e(io,Tf),e(io,ea),e(ea,bf),e(io,yf),e(io,rl),e(rl,wf),e(io,$f),e(et,Ff),y(Ko,et,null),e(et,Ef),e(et,al),e(al,Rf),e(et,Bf),y(qs,et,null),m(i,ec,f),m(i,lo,f),e(lo,Vo),e(Vo,il),y(xs,il,null),e(lo,Mf),e(lo,ll),e(ll,zf),m(i,tc,f),m(i,mt,f),y(Ps,mt,null),e(mt,Cf),e(mt,co),e(co,qf),e(co,dl),e(dl,xf),e(co,Pf),e(co,cl),e(cl,Lf),e(co,jf),e(mt,Df),e(mt,Ls),e(Ls,Af),e(Ls,js),e(js,Nf),e(Ls,Of),e(mt,If),e(mt,tt),y(Ds,tt,null),e(tt,Sf),e(tt,po),e(po,Wf),e(po,ta),e(ta,Uf),e(po,Hf),e(po,pl),e(pl,Qf),e(po,Kf),e(tt,Vf),y(Jo,tt,null),e(tt,Jf),e(tt,hl),e(hl,Gf),e(tt,Xf),y(As,tt,null),m(i,oc,f),m(i,ho,f),e(ho,Go),e(Go,ml),y(Ns,ml,null),e(ho,Yf),e(ho,ul),e(ul,Zf),m(i,nc,f),m(i,Ne,f),y(Os,Ne,null),e(Ne,eg),e(Ne,fl),e(fl,tg),e(Ne,og),e(Ne,Is),e(Is,ng),e(Is,oa),e(oa,sg),e(Is,rg),e(Ne,ag),e(Ne,Ss),e(Ss,ig),e(Ss,Ws),e(Ws,lg),e(Ss,dg),e(Ne,cg),y(Xo,Ne,null),e(Ne,pg),e(Ne,ot),y(Us,ot,null),e(ot,hg),e(ot,mo),e(mo,mg),e(mo,na),e(na,ug),e(mo,fg),e(mo,gl),e(gl,gg),e(mo,_g),e(ot,kg),y(Yo,ot,null),e(ot,vg),e(ot,_l),e(_l,Tg),e(ot,bg),y(Hs,ot,null),m(i,sc,f),m(i,uo,f),e(uo,Zo),e(Zo,kl),y(Qs,kl,null),e(uo,yg),e(uo,vl),e(vl,wg),m(i,rc,f),m(i,Oe,f),y(Ks,Oe,null),e(Oe,$g),e(Oe,Vs),e(Vs,Fg),e(Vs,Tl),e(Tl,Eg),e(Vs,Rg),e(Oe,Bg),e(Oe,Js),e(Js,Mg),e(Js,sa),e(sa,zg),e(Js,Cg),e(Oe,qg),e(Oe,Gs),e(Gs,xg),e(Gs,Xs),e(Xs,Pg),e(Gs,Lg),e(Oe,jg),y(en,Oe,null),e(Oe,Dg),e(Oe,nt),y(Ys,nt,null),e(nt,Ag),e(nt,fo),e(fo,Ng),e(fo,ra),e(ra,Og),e(fo,Ig),e(fo,bl),e(bl,Sg),e(fo,Wg),e(nt,Ug),y(tn,nt,null),e(nt,Hg),e(nt,yl),e(yl,Qg),e(nt,Kg),y(Zs,nt,null),m(i,ac,f),m(i,go,f),e(go,on),e(on,wl),y(er,wl,null),e(go,Vg),e(go,$l),e($l,Jg),m(i,ic,f),m(i,Ie,f),y(tr,Ie,null),e(Ie,Gg),e(Ie,or),e(or,Xg),e(or,Fl),e(Fl,Yg),e(or,Zg),e(Ie,e_),e(Ie,nr),e(nr,t_),e(nr,aa),e(aa,o_),e(nr,n_),e(Ie,s_),e(Ie,sr),e(sr,r_),e(sr,rr),e(rr,a_),e(sr,i_),e(Ie,l_),y(nn,Ie,null),e(Ie,d_),e(Ie,Qe),y(ar,Qe,null),e(Qe,c_),e(Qe,xe),e(xe,p_),e(xe,El),e(El,h_),e(xe,m_),e(xe,Rl),e(Rl,u_),e(xe,f_),e(xe,Bl),e(Bl,g_),e(xe,__),e(xe,Ml),e(Ml,k_),e(xe,v_),e(xe,zl),e(zl,T_),e(xe,b_),e(xe,Cl),e(Cl,y_),e(xe,w_),e(xe,ql),e(ql,$_),e(xe,F_),e(Qe,E_),e(Qe,ir),e(ir,lr),e(lr,R_),e(lr,xl),e(xl,B_),e(lr,M_),e(ir,z_),e(ir,dr),e(dr,C_),e(dr,Pl),e(Pl,q_),e(dr,x_),e(Qe,P_),e(Qe,K),e(K,L_),e(K,Ll),e(Ll,j_),e(K,D_),e(K,jl),e(jl,A_),e(K,N_),e(K,Dl),e(Dl,O_),e(K,I_),e(K,Al),e(Al,S_),e(K,W_),e(K,Nl),e(Nl,U_),e(K,H_),e(K,Ol),e(Ol,Q_),e(K,K_),e(K,Il),e(Il,V_),e(K,J_),e(K,Sl),e(Sl,G_),e(K,X_),e(K,Wl),e(Wl,Y_),e(K,Z_),e(K,Ul),e(Ul,ek),e(K,tk),e(K,Hl),e(Hl,ok),e(K,nk),e(K,Ql),e(Ql,sk),e(K,rk),e(K,Kl),e(Kl,ak),e(K,ik),e(K,Vl),e(Vl,lk),e(K,dk),e(K,Jl),e(Jl,ck),e(K,pk),e(K,Gl),e(Gl,hk),e(K,mk),e(K,Xl),e(Xl,uk),e(K,fk),e(K,Yl),e(Yl,gk),e(K,_k),e(K,Zl),e(Zl,kk),e(K,vk),e(K,ed),e(ed,Tk),e(K,bk),e(Qe,yk),e(Qe,td),e(td,wk),e(Qe,$k),y(cr,Qe,null),m(i,lc,f),m(i,_o,f),e(_o,sn),e(sn,od),y(pr,od,null),e(_o,Fk),e(_o,nd),e(nd,Ek),m(i,dc,f),m(i,Se,f),y(hr,Se,null),e(Se,Rk),e(Se,sd),e(sd,Bk),e(Se,Mk),e(Se,mr),e(mr,zk),e(mr,ia),e(ia,Ck),e(mr,qk),e(Se,xk),e(Se,ur),e(ur,Pk),e(ur,fr),e(fr,Lk),e(ur,jk),e(Se,Dk),y(rn,Se,null),e(Se,Ak),e(Se,st),y(gr,st,null),e(st,Nk),e(st,ko),e(ko,Ok),e(ko,la),e(la,Ik),e(ko,Sk),e(ko,rd),e(rd,Wk),e(ko,Uk),e(st,Hk),y(an,st,null),e(st,Qk),e(st,ad),e(ad,Kk),e(st,Vk),y(_r,st,null),m(i,cc,f),m(i,vo,f),e(vo,ln),e(ln,id),y(kr,id,null),e(vo,Jk),e(vo,ld),e(ld,Gk),m(i,pc,f),m(i,We,f),y(vr,We,null),e(We,Xk),e(We,dd),e(dd,Yk),e(We,Zk),e(We,Tr),e(Tr,ev),e(Tr,da),e(da,tv),e(Tr,ov),e(We,nv),e(We,br),e(br,sv),e(br,yr),e(yr,rv),e(br,av),e(We,iv),y(dn,We,null),e(We,lv),e(We,rt),y(wr,rt,null),e(rt,dv),e(rt,To),e(To,cv),e(To,ca),e(ca,pv),e(To,hv),e(To,cd),e(cd,mv),e(To,uv),e(rt,fv),y(cn,rt,null),e(rt,gv),e(rt,pd),e(pd,_v),e(rt,kv),y($r,rt,null),m(i,hc,f),m(i,bo,f),e(bo,pn),e(pn,hd),y(Fr,hd,null),e(bo,vv),e(bo,md),e(md,Tv),m(i,mc,f),m(i,Ue,f),y(Er,Ue,null),e(Ue,bv),e(Ue,ud),e(ud,yv),e(Ue,wv),e(Ue,Rr),e(Rr,$v),e(Rr,pa),e(pa,Fv),e(Rr,Ev),e(Ue,Rv),e(Ue,Br),e(Br,Bv),e(Br,Mr),e(Mr,Mv),e(Br,zv),e(Ue,Cv),y(hn,Ue,null),e(Ue,qv),e(Ue,at),y(zr,at,null),e(at,xv),e(at,yo),e(yo,Pv),e(yo,ha),e(ha,Lv),e(yo,jv),e(yo,fd),e(fd,Dv),e(yo,Av),e(at,Nv),y(mn,at,null),e(at,Ov),e(at,gd),e(gd,Iv),e(at,Sv),y(Cr,at,null),m(i,uc,f),m(i,wo,f),e(wo,un),e(un,_d),y(qr,_d,null),e(wo,Wv),e(wo,kd),e(kd,Uv),m(i,fc,f),m(i,He,f),y(xr,He,null),e(He,Hv),e(He,$o),e($o,Qv),e($o,vd),e(vd,Kv),e($o,Vv),e($o,Td),e(Td,Jv),e($o,Gv),e(He,Xv),e(He,Pr),e(Pr,Yv),e(Pr,ma),e(ma,Zv),e(Pr,eT),e(He,tT),e(He,Lr),e(Lr,oT),e(Lr,jr),e(jr,nT),e(Lr,sT),e(He,rT),y(fn,He,null),e(He,aT),e(He,it),y(Dr,it,null),e(it,iT),e(it,Fo),e(Fo,lT),e(Fo,ua),e(ua,dT),e(Fo,cT),e(Fo,bd),e(bd,pT),e(Fo,hT),e(it,mT),y(gn,it,null),e(it,uT),e(it,yd),e(yd,fT),e(it,gT),y(Ar,it,null),gc=!0},p(i,[f]){const Nr={};f&2&&(Nr.$$scope={dirty:f,ctx:i}),Do.$set(Nr);const wd={};f&2&&(wd.$$scope={dirty:f,ctx:i}),No.$set(wd);const $d={};f&2&&($d.$$scope={dirty:f,ctx:i}),Io.$set($d);const Fd={};f&2&&(Fd.$$scope={dirty:f,ctx:i}),Wo.$set(Fd);const Or={};f&2&&(Or.$$scope={dirty:f,ctx:i}),Ho.$set(Or);const Ed={};f&2&&(Ed.$$scope={dirty:f,ctx:i}),Ko.$set(Ed);const Rd={};f&2&&(Rd.$$scope={dirty:f,ctx:i}),Jo.$set(Rd);const Bd={};f&2&&(Bd.$$scope={dirty:f,ctx:i}),Xo.$set(Bd);const Ir={};f&2&&(Ir.$$scope={dirty:f,ctx:i}),Yo.$set(Ir);const Md={};f&2&&(Md.$$scope={dirty:f,ctx:i}),en.$set(Md);const zd={};f&2&&(zd.$$scope={dirty:f,ctx:i}),tn.$set(zd);const Cd={};f&2&&(Cd.$$scope={dirty:f,ctx:i}),nn.$set(Cd);const qd={};f&2&&(qd.$$scope={dirty:f,ctx:i}),rn.$set(qd);const xd={};f&2&&(xd.$$scope={dirty:f,ctx:i}),an.$set(xd);const Pd={};f&2&&(Pd.$$scope={dirty:f,ctx:i}),dn.$set(Pd);const Sr={};f&2&&(Sr.$$scope={dirty:f,ctx:i}),cn.$set(Sr);const Ld={};f&2&&(Ld.$$scope={dirty:f,ctx:i}),hn.$set(Ld);const jd={};f&2&&(jd.$$scope={dirty:f,ctx:i}),mn.$set(jd);const Dd={};f&2&&(Dd.$$scope={dirty:f,ctx:i}),fn.$set(Dd);const Pe={};f&2&&(Pe.$$scope={dirty:f,ctx:i}),gn.$set(Pe)},i(i){gc||(w(k.$$.fragment,i),w(ne.$$.fragment,i),w(re.$$.fragment,i),w(be.$$.fragment,i),w($n.$$.fragment,i),w(Fn.$$.fragment,i),w(Rn.$$.fragment,i),w(Mn.$$.fragment,i),w(zn.$$.fragment,i),w(Cn.$$.fragment,i),w(Ln.$$.fragment,i),w(Dn.$$.fragment,i),w(Nn.$$.fragment,i),w(On.$$.fragment,i),w(In.$$.fragment,i),w(Sn.$$.fragment,i),w(Un.$$.fragment,i),w(Qn.$$.fragment,i),w(Vn.$$.fragment,i),w(Jn.$$.fragment,i),w(Gn.$$.fragment,i),w(Xn.$$.fragment,i),w(os.$$.fragment,i),w(Do.$$.fragment,i),w(ns.$$.fragment,i),w(ss.$$.fragment,i),w(rs.$$.fragment,i),w(is.$$.fragment,i),w(No.$$.fragment,i),w(ls.$$.fragment,i),w(ds.$$.fragment,i),w(cs.$$.fragment,i),w(hs.$$.fragment,i),w(Io.$$.fragment,i),w(ms.$$.fragment,i),w(us.$$.fragment,i),w(fs.$$.fragment,i),w(ks.$$.fragment,i),w(Wo.$$.fragment,i),w(vs.$$.fragment,i),w(Ts.$$.fragment,i),w(bs.$$.fragment,i),w(ys.$$.fragment,i),w(Fs.$$.fragment,i),w(Ho.$$.fragment,i),w(Es.$$.fragment,i),w(Rs.$$.fragment,i),w(Bs.$$.fragment,i),w(Cs.$$.fragment,i),w(Ko.$$.fragment,i),w(qs.$$.fragment,i),w(xs.$$.fragment,i),w(Ps.$$.fragment,i),w(Ds.$$.fragment,i),w(Jo.$$.fragment,i),w(As.$$.fragment,i),w(Ns.$$.fragment,i),w(Os.$$.fragment,i),w(Xo.$$.fragment,i),w(Us.$$.fragment,i),w(Yo.$$.fragment,i),w(Hs.$$.fragment,i),w(Qs.$$.fragment,i),w(Ks.$$.fragment,i),w(en.$$.fragment,i),w(Ys.$$.fragment,i),w(tn.$$.fragment,i),w(Zs.$$.fragment,i),w(er.$$.fragment,i),w(tr.$$.fragment,i),w(nn.$$.fragment,i),w(ar.$$.fragment,i),w(cr.$$.fragment,i),w(pr.$$.fragment,i),w(hr.$$.fragment,i),w(rn.$$.fragment,i),w(gr.$$.fragment,i),w(an.$$.fragment,i),w(_r.$$.fragment,i),w(kr.$$.fragment,i),w(vr.$$.fragment,i),w(dn.$$.fragment,i),w(wr.$$.fragment,i),w(cn.$$.fragment,i),w($r.$$.fragment,i),w(Fr.$$.fragment,i),w(Er.$$.fragment,i),w(hn.$$.fragment,i),w(zr.$$.fragment,i),w(mn.$$.fragment,i),w(Cr.$$.fragment,i),w(qr.$$.fragment,i),w(xr.$$.fragment,i),w(fn.$$.fragment,i),w(Dr.$$.fragment,i),w(gn.$$.fragment,i),w(Ar.$$.fragment,i),gc=!0)},o(i){$(k.$$.fragment,i),$(ne.$$.fragment,i),$(re.$$.fragment,i),$(be.$$.fragment,i),$($n.$$.fragment,i),$(Fn.$$.fragment,i),$(Rn.$$.fragment,i),$(Mn.$$.fragment,i),$(zn.$$.fragment,i),$(Cn.$$.fragment,i),$(Ln.$$.fragment,i),$(Dn.$$.fragment,i),$(Nn.$$.fragment,i),$(On.$$.fragment,i),$(In.$$.fragment,i),$(Sn.$$.fragment,i),$(Un.$$.fragment,i),$(Qn.$$.fragment,i),$(Vn.$$.fragment,i),$(Jn.$$.fragment,i),$(Gn.$$.fragment,i),$(Xn.$$.fragment,i),$(os.$$.fragment,i),$(Do.$$.fragment,i),$(ns.$$.fragment,i),$(ss.$$.fragment,i),$(rs.$$.fragment,i),$(is.$$.fragment,i),$(No.$$.fragment,i),$(ls.$$.fragment,i),$(ds.$$.fragment,i),$(cs.$$.fragment,i),$(hs.$$.fragment,i),$(Io.$$.fragment,i),$(ms.$$.fragment,i),$(us.$$.fragment,i),$(fs.$$.fragment,i),$(ks.$$.fragment,i),$(Wo.$$.fragment,i),$(vs.$$.fragment,i),$(Ts.$$.fragment,i),$(bs.$$.fragment,i),$(ys.$$.fragment,i),$(Fs.$$.fragment,i),$(Ho.$$.fragment,i),$(Es.$$.fragment,i),$(Rs.$$.fragment,i),$(Bs.$$.fragment,i),$(Cs.$$.fragment,i),$(Ko.$$.fragment,i),$(qs.$$.fragment,i),$(xs.$$.fragment,i),$(Ps.$$.fragment,i),$(Ds.$$.fragment,i),$(Jo.$$.fragment,i),$(As.$$.fragment,i),$(Ns.$$.fragment,i),$(Os.$$.fragment,i),$(Xo.$$.fragment,i),$(Us.$$.fragment,i),$(Yo.$$.fragment,i),$(Hs.$$.fragment,i),$(Qs.$$.fragment,i),$(Ks.$$.fragment,i),$(en.$$.fragment,i),$(Ys.$$.fragment,i),$(tn.$$.fragment,i),$(Zs.$$.fragment,i),$(er.$$.fragment,i),$(tr.$$.fragment,i),$(nn.$$.fragment,i),$(ar.$$.fragment,i),$(cr.$$.fragment,i),$(pr.$$.fragment,i),$(hr.$$.fragment,i),$(rn.$$.fragment,i),$(gr.$$.fragment,i),$(an.$$.fragment,i),$(_r.$$.fragment,i),$(kr.$$.fragment,i),$(vr.$$.fragment,i),$(dn.$$.fragment,i),$(wr.$$.fragment,i),$(cn.$$.fragment,i),$($r.$$.fragment,i),$(Fr.$$.fragment,i),$(Er.$$.fragment,i),$(hn.$$.fragment,i),$(zr.$$.fragment,i),$(mn.$$.fragment,i),$(Cr.$$.fragment,i),$(qr.$$.fragment,i),$(xr.$$.fragment,i),$(fn.$$.fragment,i),$(Dr.$$.fragment,i),$(gn.$$.fragment,i),$(Ar.$$.fragment,i),gc=!1},d(i){t(h),i&&t(R),i&&t(g),F(k),i&&t(X),i&&t(M),F(ne),i&&t(pe),i&&t(J),i&&t(q),i&&t(ae),i&&t(he),i&&t(ie),i&&t(me),i&&t(C),i&&t(S),i&&t(le),i&&t(W),i&&t(Z),F(re),i&&t(E),i&&t(x),F(be),F($n),F(Fn),F(Rn),F(Mn),i&&t(Ad),i&&t(Ut),F(zn),i&&t(Nd),i&&t(Ce),F(Cn),F(Ln),F(Dn),F(Nn),F(On),i&&t(Od),i&&t(Kt),F(In),i&&t(Id),i&&t(De),F(Sn),F(Un),F(Qn),F(Vn),F(Jn),i&&t(Sd),i&&t(Vt),F(Gn),i&&t(Wd),i&&t(Je),F(Xn),F(os),F(Do),F(ns),i&&t(Ud),i&&t(Gt),F(ss),i&&t(Hd),i&&t(Et),F(rs),F(is),F(No),F(ls),i&&t(Qd),i&&t(Zt),F(ds),i&&t(Kd),i&&t(Rt),F(cs),F(hs),F(Io),F(ms),i&&t(Vd),i&&t(oo),F(us),i&&t(Jd),i&&t(ct),F(fs),F(ks),F(Wo),F(vs),F(Ts),i&&t(Gd),i&&t(so),F(bs),i&&t(Xd),i&&t(pt),F(ys),F(Fs),F(Ho),F(Es),i&&t(Yd),i&&t(ao),F(Rs),i&&t(Zd),i&&t(ht),F(Bs),F(Cs),F(Ko),F(qs),i&&t(ec),i&&t(lo),F(xs),i&&t(tc),i&&t(mt),F(Ps),F(Ds),F(Jo),F(As),i&&t(oc),i&&t(ho),F(Ns),i&&t(nc),i&&t(Ne),F(Os),F(Xo),F(Us),F(Yo),F(Hs),i&&t(sc),i&&t(uo),F(Qs),i&&t(rc),i&&t(Oe),F(Ks),F(en),F(Ys),F(tn),F(Zs),i&&t(ac),i&&t(go),F(er),i&&t(ic),i&&t(Ie),F(tr),F(nn),F(ar),F(cr),i&&t(lc),i&&t(_o),F(pr),i&&t(dc),i&&t(Se),F(hr),F(rn),F(gr),F(an),F(_r),i&&t(cc),i&&t(vo),F(kr),i&&t(pc),i&&t(We),F(vr),F(dn),F(wr),F(cn),F($r),i&&t(hc),i&&t(bo),F(Fr),i&&t(mc),i&&t(Ue),F(Er),F(hn),F(zr),F(mn),F(Cr),i&&t(uc),i&&t(wo),F(qr),i&&t(fc),i&&t(He),F(xr),F(fn),F(Dr),F(gn),F(Ar)}}}const Pw={local:"rembert",sections:[{local:"overview",title:"Overview"},{local:"transformers.RemBertConfig",title:"RemBertConfig"},{local:"transformers.RemBertTokenizer",title:"RemBertTokenizer"},{local:"transformers.RemBertTokenizerFast",title:"RemBertTokenizerFast"},{local:"transformers.RemBertModel",title:"RemBertModel"},{local:"transformers.RemBertForCausalLM",title:"RemBertForCausalLM"},{local:"transformers.RemBertForMaskedLM",title:"RemBertForMaskedLM"},{local:"transformers.RemBertForSequenceClassification",title:"RemBertForSequenceClassification"},{local:"transformers.RemBertForMultipleChoice",title:"RemBertForMultipleChoice"},{local:"transformers.RemBertForTokenClassification",title:"RemBertForTokenClassification"},{local:"transformers.RemBertForQuestionAnswering",title:"RemBertForQuestionAnswering"},{local:"transformers.TFRemBertModel",title:"TFRemBertModel"},{local:"transformers.TFRemBertForMaskedLM",title:"TFRemBertForMaskedLM"},{local:"transformers.TFRemBertForCausalLM",title:"TFRemBertForCausalLM"},{local:"transformers.TFRemBertForSequenceClassification",title:"TFRemBertForSequenceClassification"},{local:"transformers.TFRemBertForMultipleChoice",title:"TFRemBertForMultipleChoice"},{local:"transformers.TFRemBertForTokenClassification",title:"TFRemBertForTokenClassification"},{local:"transformers.TFRemBertForQuestionAnswering",title:"TFRemBertForQuestionAnswering"}],title:"RemBERT"};function Lw(V,h,R){let{fw:g}=h;return V.$$set=u=>{"fw"in u&&R(0,g=u.fw)},[g]}class Sw extends dw{constructor(h){super();cw(this,h,Lw,xw,pw,{fw:0})}}export{Sw as default,Pw as metadata};
9,924
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/transformerxl.mdx-7870d248.js
import{S as Vf,i as Uf,s as Rf,e as n,k as l,w as b,t as r,L as Gf,c as s,d as t,m as d,a,x as w,h as i,b as c,J as e,g as f,y,q as L,o as k,B as x}from"../../chunks/vendor-b1433968.js";import{T as Mt}from"../../chunks/Tip-c3840994.js";import{D as re}from"../../chunks/Docstring-ff504c58.js";import{C as Yt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Ke}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Kf(B){let p,$,u,g,v;return{c(){p=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),g=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=s(T,"P",{});var _=a(p);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(_,"CODE",{});var E=a(u);g=i(E,"Module"),E.forEach(t),v=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){f(T,p,_),e(p,$),e(p,u),e(u,g),e(p,v)},d(T){T&&t(p)}}}function Yf(B){let p,$,u,g,v;return{c(){p=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),g=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=s(T,"P",{});var _=a(p);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(_,"CODE",{});var E=a(u);g=i(E,"Module"),E.forEach(t),v=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){f(T,p,_),e(p,$),e(p,u),e(u,g),e(p,v)},d(T){T&&t(p)}}}function Jf(B){let p,$,u,g,v;return{c(){p=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),g=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=s(T,"P",{});var _=a(p);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(_,"CODE",{});var E=a(u);g=i(E,"Module"),E.forEach(t),v=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){f(T,p,_),e(p,$),e(p,u),e(u,g),e(p,v)},d(T){T&&t(p)}}}function Qf(B){let p,$,u,g,v,T,_,E,Te,J,X,K,H,Q,ve,A,be,fe,D,j,Z,te,F,C,oe,V,he,ne,I,we,me,z,ye,W,Y,ie,U,Le,le,S,pe,q,ke;return{c(){p=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),u=l(),g=n("ul"),v=n("li"),T=r("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=n("li"),Te=r("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),X=n("p"),K=r("This second option is useful when using "),H=n("code"),Q=r("tf.keras.Model.fit"),ve=r(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),be=r("model(inputs)"),fe=r("."),D=l(),j=n("p"),Z=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),C=n("li"),oe=r("a single Tensor with "),V=n("code"),he=r("input_ids"),ne=r(" only and nothing else: "),I=n("code"),we=r("model(inputs_ids)"),me=l(),z=n("li"),ye=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),Y=r("model([input_ids, attention_mask])"),ie=r(" or "),U=n("code"),Le=r("model([input_ids, attention_mask, token_type_ids])"),le=l(),S=n("li"),pe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),q=n("code"),ke=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var M=a(p);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),u=d(h),g=s(h,"UL",{});var se=a(g);v=s(se,"LI",{});var ue=a(v);T=i(ue,"having all inputs as keyword arguments (like PyTorch models), or"),ue.forEach(t),_=d(se),E=s(se,"LI",{});var Be=a(E);Te=i(Be,"having all inputs as a list, tuple or dict in the first positional arguments."),Be.forEach(t),se.forEach(t),J=d(h),X=s(h,"P",{});var O=a(X);K=i(O,"This second option is useful when using "),H=s(O,"CODE",{});var He=a(H);Q=i(He,"tf.keras.Model.fit"),He.forEach(t),ve=i(O,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(O,"CODE",{});var ge=a(A);be=i(ge,"model(inputs)"),ge.forEach(t),fe=i(O,"."),O.forEach(t),D=d(h),j=s(h,"P",{});var Fe=a(j);Z=i(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),te=d(h),F=s(h,"UL",{});var N=a(F);C=s(N,"LI",{});var R=a(C);oe=i(R,"a single Tensor with "),V=s(R,"CODE",{});var Ae=a(V);he=i(Ae,"input_ids"),Ae.forEach(t),ne=i(R," only and nothing else: "),I=s(R,"CODE",{});var xe=a(I);we=i(xe,"model(inputs_ids)"),xe.forEach(t),R.forEach(t),me=d(N),z=s(N,"LI",{});var P=a(z);ye=i(P,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(P,"CODE",{});var Ve=a(W);Y=i(Ve,"model([input_ids, attention_mask])"),Ve.forEach(t),ie=i(P," or "),U=s(P,"CODE",{});var ze=a(U);Le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),P.forEach(t),le=d(N),S=s(N,"LI",{});var $e=a(S);pe=i($e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),q=s($e,"CODE",{});var Ue=a(q);ke=i(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),$e.forEach(t),N.forEach(t)},m(h,M){f(h,p,M),e(p,$),f(h,u,M),f(h,g,M),e(g,v),e(v,T),e(g,_),e(g,E),e(E,Te),f(h,J,M),f(h,X,M),e(X,K),e(X,H),e(H,Q),e(X,ve),e(X,A),e(A,be),e(X,fe),f(h,D,M),f(h,j,M),e(j,Z),f(h,te,M),f(h,F,M),e(F,C),e(C,oe),e(C,V),e(V,he),e(C,ne),e(C,I),e(I,we),e(F,me),e(F,z),e(z,ye),e(z,W),e(W,Y),e(z,ie),e(z,U),e(U,Le),e(F,le),e(F,S),e(S,pe),e(S,q),e(q,ke)},d(h){h&&t(p),h&&t(u),h&&t(g),h&&t(J),h&&t(X),h&&t(D),h&&t(j),h&&t(te),h&&t(F)}}}function Zf(B){let p,$,u,g,v;return{c(){p=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),g=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=s(T,"P",{});var _=a(p);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(_,"CODE",{});var E=a(u);g=i(E,"Module"),E.forEach(t),v=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){f(T,p,_),e(p,$),e(p,u),e(u,g),e(p,v)},d(T){T&&t(p)}}}function eh(B){let p,$,u,g,v,T,_,E,Te,J,X,K,H,Q,ve,A,be,fe,D,j,Z,te,F,C,oe,V,he,ne,I,we,me,z,ye,W,Y,ie,U,Le,le,S,pe,q,ke;return{c(){p=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),u=l(),g=n("ul"),v=n("li"),T=r("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=n("li"),Te=r("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),X=n("p"),K=r("This second option is useful when using "),H=n("code"),Q=r("tf.keras.Model.fit"),ve=r(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),be=r("model(inputs)"),fe=r("."),D=l(),j=n("p"),Z=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),C=n("li"),oe=r("a single Tensor with "),V=n("code"),he=r("input_ids"),ne=r(" only and nothing else: "),I=n("code"),we=r("model(inputs_ids)"),me=l(),z=n("li"),ye=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),Y=r("model([input_ids, attention_mask])"),ie=r(" or "),U=n("code"),Le=r("model([input_ids, attention_mask, token_type_ids])"),le=l(),S=n("li"),pe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),q=n("code"),ke=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var M=a(p);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),u=d(h),g=s(h,"UL",{});var se=a(g);v=s(se,"LI",{});var ue=a(v);T=i(ue,"having all inputs as keyword arguments (like PyTorch models), or"),ue.forEach(t),_=d(se),E=s(se,"LI",{});var Be=a(E);Te=i(Be,"having all inputs as a list, tuple or dict in the first positional arguments."),Be.forEach(t),se.forEach(t),J=d(h),X=s(h,"P",{});var O=a(X);K=i(O,"This second option is useful when using "),H=s(O,"CODE",{});var He=a(H);Q=i(He,"tf.keras.Model.fit"),He.forEach(t),ve=i(O,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(O,"CODE",{});var ge=a(A);be=i(ge,"model(inputs)"),ge.forEach(t),fe=i(O,"."),O.forEach(t),D=d(h),j=s(h,"P",{});var Fe=a(j);Z=i(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),te=d(h),F=s(h,"UL",{});var N=a(F);C=s(N,"LI",{});var R=a(C);oe=i(R,"a single Tensor with "),V=s(R,"CODE",{});var Ae=a(V);he=i(Ae,"input_ids"),Ae.forEach(t),ne=i(R," only and nothing else: "),I=s(R,"CODE",{});var xe=a(I);we=i(xe,"model(inputs_ids)"),xe.forEach(t),R.forEach(t),me=d(N),z=s(N,"LI",{});var P=a(z);ye=i(P,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(P,"CODE",{});var Ve=a(W);Y=i(Ve,"model([input_ids, attention_mask])"),Ve.forEach(t),ie=i(P," or "),U=s(P,"CODE",{});var ze=a(U);Le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),P.forEach(t),le=d(N),S=s(N,"LI",{});var $e=a(S);pe=i($e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),q=s($e,"CODE",{});var Ue=a(q);ke=i(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),$e.forEach(t),N.forEach(t)},m(h,M){f(h,p,M),e(p,$),f(h,u,M),f(h,g,M),e(g,v),e(v,T),e(g,_),e(g,E),e(E,Te),f(h,J,M),f(h,X,M),e(X,K),e(X,H),e(H,Q),e(X,ve),e(X,A),e(A,be),e(X,fe),f(h,D,M),f(h,j,M),e(j,Z),f(h,te,M),f(h,F,M),e(F,C),e(C,oe),e(C,V),e(V,he),e(C,ne),e(C,I),e(I,we),e(F,me),e(F,z),e(z,ye),e(z,W),e(W,Y),e(z,ie),e(z,U),e(U,Le),e(F,le),e(F,S),e(S,pe),e(S,q),e(q,ke)},d(h){h&&t(p),h&&t(u),h&&t(g),h&&t(J),h&&t(X),h&&t(D),h&&t(j),h&&t(te),h&&t(F)}}}function th(B){let p,$,u,g,v;return{c(){p=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),g=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=s(T,"P",{});var _=a(p);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(_,"CODE",{});var E=a(u);g=i(E,"Module"),E.forEach(t),v=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){f(T,p,_),e(p,$),e(p,u),e(u,g),e(p,v)},d(T){T&&t(p)}}}function oh(B){let p,$,u,g,v,T,_,E,Te,J,X,K,H,Q,ve,A,be,fe,D,j,Z,te,F,C,oe,V,he,ne,I,we,me,z,ye,W,Y,ie,U,Le,le,S,pe,q,ke;return{c(){p=n("p"),$=r("TF 2.0 models accepts two formats as inputs:"),u=l(),g=n("ul"),v=n("li"),T=r("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=n("li"),Te=r("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),X=n("p"),K=r("This second option is useful when using "),H=n("code"),Q=r("tf.keras.Model.fit"),ve=r(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),be=r("model(inputs)"),fe=r("."),D=l(),j=n("p"),Z=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),C=n("li"),oe=r("a single Tensor with "),V=n("code"),he=r("input_ids"),ne=r(" only and nothing else: "),I=n("code"),we=r("model(inputs_ids)"),me=l(),z=n("li"),ye=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),Y=r("model([input_ids, attention_mask])"),ie=r(" or "),U=n("code"),Le=r("model([input_ids, attention_mask, token_type_ids])"),le=l(),S=n("li"),pe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),q=n("code"),ke=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=s(h,"P",{});var M=a(p);$=i(M,"TF 2.0 models accepts two formats as inputs:"),M.forEach(t),u=d(h),g=s(h,"UL",{});var se=a(g);v=s(se,"LI",{});var ue=a(v);T=i(ue,"having all inputs as keyword arguments (like PyTorch models), or"),ue.forEach(t),_=d(se),E=s(se,"LI",{});var Be=a(E);Te=i(Be,"having all inputs as a list, tuple or dict in the first positional arguments."),Be.forEach(t),se.forEach(t),J=d(h),X=s(h,"P",{});var O=a(X);K=i(O,"This second option is useful when using "),H=s(O,"CODE",{});var He=a(H);Q=i(He,"tf.keras.Model.fit"),He.forEach(t),ve=i(O,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(O,"CODE",{});var ge=a(A);be=i(ge,"model(inputs)"),ge.forEach(t),fe=i(O,"."),O.forEach(t),D=d(h),j=s(h,"P",{});var Fe=a(j);Z=i(Fe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Fe.forEach(t),te=d(h),F=s(h,"UL",{});var N=a(F);C=s(N,"LI",{});var R=a(C);oe=i(R,"a single Tensor with "),V=s(R,"CODE",{});var Ae=a(V);he=i(Ae,"input_ids"),Ae.forEach(t),ne=i(R," only and nothing else: "),I=s(R,"CODE",{});var xe=a(I);we=i(xe,"model(inputs_ids)"),xe.forEach(t),R.forEach(t),me=d(N),z=s(N,"LI",{});var P=a(z);ye=i(P,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(P,"CODE",{});var Ve=a(W);Y=i(Ve,"model([input_ids, attention_mask])"),Ve.forEach(t),ie=i(P," or "),U=s(P,"CODE",{});var ze=a(U);Le=i(ze,"model([input_ids, attention_mask, token_type_ids])"),ze.forEach(t),P.forEach(t),le=d(N),S=s(N,"LI",{});var $e=a(S);pe=i($e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),q=s($e,"CODE",{});var Ue=a(q);ke=i(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),$e.forEach(t),N.forEach(t)},m(h,M){f(h,p,M),e(p,$),f(h,u,M),f(h,g,M),e(g,v),e(v,T),e(g,_),e(g,E),e(E,Te),f(h,J,M),f(h,X,M),e(X,K),e(X,H),e(H,Q),e(X,ve),e(X,A),e(A,be),e(X,fe),f(h,D,M),f(h,j,M),e(j,Z),f(h,te,M),f(h,F,M),e(F,C),e(C,oe),e(C,V),e(V,he),e(C,ne),e(C,I),e(I,we),e(F,me),e(F,z),e(z,ye),e(z,W),e(W,Y),e(z,ie),e(z,U),e(U,Le),e(F,le),e(F,S),e(S,pe),e(S,q),e(q,ke)},d(h){h&&t(p),h&&t(u),h&&t(g),h&&t(J),h&&t(X),h&&t(D),h&&t(j),h&&t(te),h&&t(F)}}}function nh(B){let p,$,u,g,v;return{c(){p=n("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),g=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=s(T,"P",{});var _=a(p);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(_,"CODE",{});var E=a(u);g=i(E,"Module"),E.forEach(t),v=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){f(T,p,_),e(p,$),e(p,u),e(u,g),e(p,v)},d(T){T&&t(p)}}}function sh(B){let p,$,u,g,v,T,_,E,Te,J,X,K,H,Q,ve,A,be,fe,D,j,Z,te,F,C,oe,V,he,ne,I,we,me,z,ye,W,Y,ie,U,Le,le,S,pe,q,ke,h,M,se,ue,Be,O,He,ge,Fe,N,R,Ae,xe,P,Ve,ze,$e,Ue,Hn,Va,Ua,Jt,Ra,Vs,it,Et,An,Qt,Ga,Dn,Ka,Us,Xe,Zt,Ya,Ye,Ja,ln,Qa,Za,dn,er,tr,eo,or,nr,sr,lt,ar,cn,rr,ir,fn,lr,dr,cr,In,fr,hr,to,Rs,dt,Ft,Wn,oo,mr,Bn,pr,Gs,De,no,ur,so,gr,ao,_r,Tr,vr,ro,br,hn,wr,yr,Lr,Vn,Ks,ct,zt,Un,io,kr,Rn,xr,Ys,ft,lo,$r,Gn,Xr,Js,ht,co,Mr,Kn,Er,Qs,mt,fo,Fr,Yn,zr,Zs,pt,ho,Cr,Jn,qr,ea,ut,Ct,Qn,mo,Pr,Zn,jr,ta,Me,po,Sr,es,Or,Nr,uo,Hr,mn,Ar,Dr,Ir,go,Wr,_o,Br,Vr,Ur,Ce,To,Rr,gt,Gr,pn,Kr,Yr,ts,Jr,Qr,Zr,qt,ei,os,ti,oi,vo,oa,_t,Pt,ns,bo,ni,ss,si,na,Ee,wo,ai,as,ri,ii,yo,li,un,di,ci,fi,Lo,hi,ko,mi,pi,ui,qe,xo,gi,Tt,_i,gn,Ti,vi,rs,bi,wi,yi,jt,Li,is,ki,xi,$o,sa,vt,St,ls,Xo,$i,ds,Xi,aa,ee,Mo,Mi,cs,Ei,Fi,_n,Tn,zi,Ci,qi,Ie,Pi,fs,ji,Si,hs,Oi,Ni,ms,Hi,Ai,ps,Di,Ii,Wi,Eo,Bi,vn,Vi,Ui,Ri,Fo,Gi,zo,Ki,Yi,Ji,ae,Co,Qi,bt,Zi,bn,el,tl,us,ol,nl,sl,Ot,al,gs,rl,il,qo,ll,_s,dl,cl,Po,ra,wt,Nt,Ts,jo,fl,vs,hl,ia,de,So,ml,bs,pl,ul,Oo,gl,wn,_l,Tl,vl,No,bl,Ho,wl,yl,Ll,Ht,kl,Pe,Ao,xl,yt,$l,yn,Xl,Ml,ws,El,Fl,zl,At,Cl,ys,ql,Pl,Do,la,Lt,Dt,Ls,Io,jl,ks,Sl,da,ce,Wo,Ol,xs,Nl,Hl,Bo,Al,Ln,Dl,Il,Wl,Vo,Bl,Uo,Vl,Ul,Rl,It,Gl,je,Ro,Kl,kt,Yl,kn,Jl,Ql,$s,Zl,ed,td,Wt,od,Xs,nd,sd,Go,ca,xt,Bt,Ms,Ko,ad,Es,rd,fa,G,Yo,id,Fs,ld,dd,xn,$n,cd,fd,hd,We,md,zs,pd,ud,Cs,gd,_d,qs,Td,vd,Ps,bd,wd,yd,Jo,Ld,Xn,kd,xd,$d,Qo,Xd,Zo,Md,Ed,Fd,Vt,zd,Se,en,Cd,$t,qd,Mn,Pd,jd,js,Sd,Od,Nd,Ut,Hd,Ss,Ad,Dd,tn,ha,Xt,Rt,Os,on,Id,Ns,Wd,ma,En,pa,Fn,ua;return T=new Ke({}),Q=new Ke({}),Qt=new Ke({}),Zt=new re({props:{name:"class transformers.TransfoXLConfig",anchor:"transformers.TransfoXLConfig",parameters:[{name:"vocab_size",val:" = 267735"},{name:"cutoffs",val:" = [20000, 40000, 200000]"},{name:"d_model",val:" = 1024"},{name:"d_embed",val:" = 1024"},{name:"n_head",val:" = 16"},{name:"d_head",val:" = 64"},{name:"d_inner",val:" = 4096"},{name:"div_val",val:" = 4"},{name:"pre_lnorm",val:" = False"},{name:"n_layer",val:" = 18"},{name:"mem_len",val:" = 1600"},{name:"clamp_len",val:" = 1000"},{name:"same_length",val:" = True"},{name:"proj_share_all_but_first",val:" = True"},{name:"attn_type",val:" = 0"},{name:"sample_softmax",val:" = -1"},{name:"adaptive",val:" = True"},{name:"dropout",val:" = 0.1"},{name:"dropatt",val:" = 0.0"},{name:"untie_r",val:" = True"},{name:"init",val:" = 'normal'"},{name:"init_range",val:" = 0.01"},{name:"proj_init_std",val:" = 0.01"},{name:"init_std",val:" = 0.02"},{name:"layer_norm_epsilon",val:" = 1e-05"},{name:"eos_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/configuration_transfo_xl.py#L29",parametersDescription:[{anchor:"transformers.TransfoXLConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 267735) &#x2014; Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLModel">TransfoXLModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLModel">TFTransfoXLModel</a>.`,name:"vocab_size"},{anchor:"transformers.TransfoXLConfig.cutoffs",description:`<strong>cutoffs</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[20000, 40000, 200000]</code>) &#x2014; Cutoffs for the adaptive softmax.`,name:"cutoffs"},{anchor:"transformers.TransfoXLConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the model&#x2019;s hidden states.`,name:"d_model"},{anchor:"transformers.TransfoXLConfig.d_embed",description:`<strong>d_embed</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the embeddings`,name:"d_embed"},{anchor:"transformers.TransfoXLConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.TransfoXLConfig.d_head",description:`<strong>d_head</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Dimensionality of the model&#x2019;s heads.`,name:"d_head"},{anchor:"transformers.TransfoXLConfig.d_inner",description:`<strong>d_inner</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Inner dimension in FF`,name:"d_inner"},{anchor:"transformers.TransfoXLConfig.div_val",description:`<strong>div_val</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; Divident value for adapative input and softmax`,name:"div_val"},{anchor:"transformers.TransfoXLConfig.pre_lnorm",description:`<strong>pre_lnorm</strong> (<code>boolean</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to apply LayerNorm to the input instead of the output in the blocks.`,name:"pre_lnorm"},{anchor:"transformers.TransfoXLConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 18) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.TransfoXLConfig.mem_len",description:`<strong>mem_len</strong> (<code>int</code>, <em>optional</em>, defaults to 1600) &#x2014; Length of the retained previous heads.`,name:"mem_len"},{anchor:"transformers.TransfoXLConfig.clamp_len",description:`<strong>clamp_len</strong> (<code>int</code>, <em>optional</em>, defaults to 1000) &#x2014; Use the same pos embeddings after clamp_len.`,name:"clamp_len"},{anchor:"transformers.TransfoXLConfig.same_length",description:`<strong>same_length</strong> (<code>boolean</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use the same attn length for all tokens`,name:"same_length"},{anchor:"transformers.TransfoXLConfig.proj_share_all_but_first",description:`<strong>proj_share_all_but_first</strong> (<code>boolean</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; True to share all but first projs, False not to share.`,name:"proj_share_all_but_first"},{anchor:"transformers.TransfoXLConfig.attn_type",description:`<strong>attn_type</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.`,name:"attn_type"},{anchor:"transformers.TransfoXLConfig.sample_softmax",description:`<strong>sample_softmax</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Number of samples in the sampled softmax.`,name:"sample_softmax"},{anchor:"transformers.TransfoXLConfig.adaptive",description:`<strong>adaptive</strong> (<code>boolean</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use adaptive softmax.`,name:"adaptive"},{anchor:"transformers.TransfoXLConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.TransfoXLConfig.dropatt",description:`<strong>dropatt</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout ratio for the attention probabilities.`,name:"dropatt"},{anchor:"transformers.TransfoXLConfig.untie_r",description:`<strong>untie_r</strong> (<code>boolean</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether ot not to untie relative position biases.`,name:"untie_r"},{anchor:"transformers.TransfoXLConfig.init",description:`<strong>init</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;normal&quot;</code>) &#x2014; Parameter initializer to use.`,name:"init"},{anchor:"transformers.TransfoXLConfig.init_range",description:`<strong>init_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.01) &#x2014; Parameters initialized by U(-init_range, init_range).`,name:"init_range"},{anchor:"transformers.TransfoXLConfig.proj_init_std",description:`<strong>proj_init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.01) &#x2014; Parameters initialized by N(0, init_std)`,name:"proj_init_std"},{anchor:"transformers.TransfoXLConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; Parameters initialized by N(0, init_std)`,name:"init_std"},{anchor:"transformers.TransfoXLConfig.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon to use in the layer normalization layers`,name:"layer_norm_epsilon"}]}}),to=new Yt({props:{code:`from transformers import TransfoXLConfig, TransfoXLModel # Initializing a Transformer XL configuration configuration = TransfoXLConfig() # Initializing a model from the configuration model = TransfoXLModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLConfig, TransfoXLModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Transformer XL configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = TransfoXLConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TransfoXLModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),oo=new Ke({}),no=new re({props:{name:"class transformers.TransfoXLTokenizer",anchor:"transformers.TransfoXLTokenizer",parameters:[{name:"special",val:" = None"},{name:"min_freq",val:" = 0"},{name:"max_size",val:" = None"},{name:"lower_case",val:" = False"},{name:"delimiter",val:" = None"},{name:"vocab_file",val:" = None"},{name:"pretrained_vocab_file",val:": str = None"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '<unk>'"},{name:"eos_token",val:" = '<eos>'"},{name:"additional_special_tokens",val:" = ['<formula>']"},{name:"language",val:" = 'en'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/tokenization_transfo_xl.py#L115",parametersDescription:[{anchor:"transformers.TransfoXLTokenizer.special",description:`<strong>special</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of special tokens (to be treated by the original implementation of this tokenizer).`,name:"special"},{anchor:"transformers.TransfoXLTokenizer.min_freq",description:`<strong>min_freq</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it will be mapped to <code>unk_token</code>).`,name:"min_freq"},{anchor:"transformers.TransfoXLTokenizer.max_size",description:`<strong>max_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found after excluding the tokens according to the <code>min_freq</code> rule.`,name:"max_size"},{anchor:"transformers.TransfoXLTokenizer.lower_case",description:`<strong>lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"lower_case"},{anchor:"transformers.TransfoXLTokenizer.delimiter",description:`<strong>delimiter</strong> (<code>str</code>, <em>optional</em>) &#x2014; The delimiter used between tokens.`,name:"delimiter"},{anchor:"transformers.TransfoXLTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>, <em>optional</em>) &#x2014; File containing the vocabulary (from the original implementation).`,name:"vocab_file"},{anchor:"transformers.TransfoXLTokenizer.pretrained_vocab_file",description:`<strong>pretrained_vocab_file</strong> (<code>str</code>, <em>optional</em>) &#x2014; File containing the vocabulary as saved with the <code>save_pretrained()</code> method.`,name:"pretrained_vocab_file"},{anchor:"transformers.TransfoXLTokenizer.never_split",description:`<strong>never_split</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of tokens that should never be split. If no list is specified, will simply use the existing special tokens.`,name:"never_split"},{anchor:"transformers.TransfoXLTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.TransfoXLTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;eos&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.TransfoXLTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;formula&gt;&quot;]</code>) &#x2014; A list of additional special tokens (for the HuggingFace functionality).`,name:"additional_special_tokens"},{anchor:"transformers.TransfoXLTokenizer.language",description:`<strong>language</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;en&quot;</code>) &#x2014; The language of this tokenizer (used for mose preprocessing).`,name:"language"}]}}),io=new Ke({}),lo=new re({props:{name:"class transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput",anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"mems",val:": typing.List[torch.FloatTensor] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L606",parametersDescription:[{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"mems"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),co=new re({props:{name:"class transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput",anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput",parameters:[{name:"losses",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prediction_scores",val:": FloatTensor = None"},{name:"mems",val:": typing.List[torch.FloatTensor] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L669",parametersDescription:[{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput.losses",description:`<strong>losses</strong> (<code>torch.FloatTensor</code> of shape <em>(batch_size, sequence_length-1)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling losses (not reduced).`,name:"losses"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput.prediction_scores",description:`<strong>prediction_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).`,name:"prediction_scores"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"mems"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),fo=new re({props:{name:"class transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput",anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"mems",val:": typing.List[tensorflow.python.framework.ops.Tensor] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L702",parametersDescription:[{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"mems"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ho=new re({props:{name:"class transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput",anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput",parameters:[{name:"prediction_scores",val:": Tensor = None"},{name:"mems",val:": typing.List[tensorflow.python.framework.ops.Tensor] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L732",parametersDescription:[{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput.losses",description:`<strong>losses</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, sequence_length-1)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling losses (not reduced).`,name:"losses"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput.prediction_scores",description:`<strong>prediction_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).`,name:"prediction_scores"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"mems"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),mo=new Ke({}),po=new re({props:{name:"class transformers.TransfoXLModel",anchor:"transformers.TransfoXLModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L766",parametersDescription:[{anchor:"transformers.TransfoXLModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),To=new re({props:{name:"forward",anchor:"transformers.TransfoXLModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"mems",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L870",parametersDescription:[{anchor:"transformers.TransfoXLModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLTokenizer">TransfoXLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TransfoXLModel.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>mems</code> output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.TransfoXLModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TransfoXLModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TransfoXLModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TransfoXLModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TransfoXLModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput" >transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig" >TransfoXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput" >transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qt=new Mt({props:{$$slots:{default:[Kf]},$$scope:{ctx:B}}}),vo=new Yt({props:{code:`from transformers import TransfoXLTokenizer, TransfoXLModel import torch tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TransfoXLModel.from_pretrained('transfo-xl-wt103') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TransfoXLModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TransfoXLModel.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),bo=new Ke({}),wo=new re({props:{name:"class transformers.TransfoXLLMHeadModel",anchor:"transformers.TransfoXLLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L1008",parametersDescription:[{anchor:"transformers.TransfoXLLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xo=new re({props:{name:"forward",anchor:"transformers.TransfoXLLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"mems",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L1052",parametersDescription:[{anchor:"transformers.TransfoXLLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLTokenizer">TransfoXLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TransfoXLLMHeadModel.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>mems</code> output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.TransfoXLLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TransfoXLLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TransfoXLLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TransfoXLLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TransfoXLLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TransfoXLLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput" >transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig" >TransfoXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>losses</strong> (<code>torch.FloatTensor</code> of shape <em>(batch_size, sequence_length-1)</em>, <em>optional</em>, returned when <code>labels</code> is provided) Language modeling losses (not reduced).</p> </li> <li> <p><strong>prediction_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput" >transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),jt=new Mt({props:{$$slots:{default:[Yf]},$$scope:{ctx:B}}}),$o=new Yt({props:{code:`import torch from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TransfoXLLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TransfoXLLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Xo=new Ke({}),Mo=new re({props:{name:"class transformers.TransfoXLForSequenceClassification",anchor:"transformers.TransfoXLForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L1164",parametersDescription:[{anchor:"transformers.TransfoXLForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Co=new re({props:{name:"forward",anchor:"transformers.TransfoXLForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"mems",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_transfo_xl.py#L1175",parametersDescription:[{anchor:"transformers.TransfoXLForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLTokenizer">TransfoXLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>mems</code> output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TransfoXLForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLSequenceClassifierOutputWithPast</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig" >TransfoXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLSequenceClassifierOutputWithPast</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ot=new Mt({props:{$$slots:{default:[Jf]},$$scope:{ctx:B}}}),qo=new Yt({props:{code:`from transformers import TransfoXLTokenizer, TransfoXLForSequenceClassification import torch tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TransfoXLForSequenceClassification.from_pretrained('transfo-xl-wt103') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TransfoXLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TransfoXLForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Po=new Yt({props:{code:`from transformers import TransfoXLTokenizer, TransfoXLForSequenceClassification import torch tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TransfoXLForSequenceClassification.from_pretrained('transfo-xl-wt103', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TransfoXLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TransfoXLForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),jo=new Ke({}),So=new re({props:{name:"class transformers.TFTransfoXLModel",anchor:"transformers.TFTransfoXLModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L878",parametersDescription:[{anchor:"transformers.TFTransfoXLModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ht=new Mt({props:{$$slots:{default:[Qf]},$$scope:{ctx:B}}}),Ao=new re({props:{name:"call",anchor:"transformers.TFTransfoXLModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"mems",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L883",parametersDescription:[{anchor:"transformers.TFTransfoXLModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTransfoXLModel.call.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>mems</code> output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.TFTransfoXLModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTransfoXLModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTransfoXLModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTransfoXLModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTransfoXLModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTransfoXLModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput" >transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig" >TransfoXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput" >transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),At=new Mt({props:{$$slots:{default:[Zf]},$$scope:{ctx:B}}}),Do=new Yt({props:{code:`from transformers import TransfoXLTokenizer, TFTransfoXLModel import tensorflow as tf tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TFTransfoXLModel.from_pretrained('transfo-xl-wt103') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TFTransfoXLModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTransfoXLModel.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Io=new Ke({}),Wo=new re({props:{name:"class transformers.TFTransfoXLLMHeadModel",anchor:"transformers.TFTransfoXLLMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L947",parametersDescription:[{anchor:"transformers.TFTransfoXLLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),It=new Mt({props:{$$slots:{default:[eh]},$$scope:{ctx:B}}}),Ro=new re({props:{name:"call",anchor:"transformers.TFTransfoXLLMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"mems",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L975",parametersDescription:[{anchor:"transformers.TFTransfoXLLMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>mems</code> output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTransfoXLLMHeadModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput" >transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig" >TransfoXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>losses</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, sequence_length-1)</em>, <em>optional</em>, returned when <code>labels</code> is provided) Language modeling losses (not reduced).</p> </li> <li> <p><strong>prediction_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput" >transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Wt=new Mt({props:{$$slots:{default:[th]},$$scope:{ctx:B}}}),Go=new Yt({props:{code:`from transformers import TransfoXLTokenizer, TFTransfoXLLMHeadModel import tensorflow as tf tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TFTransfoXLLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTransfoXLLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ko=new Ke({}),Yo=new re({props:{name:"class transformers.TFTransfoXLForSequenceClassification",anchor:"transformers.TFTransfoXLForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L1076",parametersDescription:[{anchor:"transformers.TFTransfoXLForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig">TransfoXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vt=new Mt({props:{$$slots:{default:[oh]},$$scope:{ctx:B}}}),en=new re({props:{name:"call",anchor:"transformers.TFTransfoXLForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"mems",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py#L1091",parametersDescription:[{anchor:"transformers.TFTransfoXLForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>mems</code> output below). Can be used to speed up sequential decoding. The token ids which have their mems given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFTransfoXLForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLSequenceClassifierOutputWithPast</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLConfig" >TransfoXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLSequenceClassifierOutputWithPast</code> or <code>tuple(tf.Tensor)</code></p> `}}),Ut=new Mt({props:{$$slots:{default:[nh]},$$scope:{ctx:B}}}),tn=new Yt({props:{code:`from transformers import TransfoXLTokenizer, TFTransfoXLForSequenceClassification import tensorflow as tf tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TFTransfoXLForSequenceClassification.from_pretrained('transfo-xl-wt103') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TransfoXLTokenizer, TFTransfoXLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TransfoXLTokenizer.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTransfoXLForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;transfo-xl-wt103&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),on=new Ke({}),{c(){p=n("meta"),$=l(),u=n("h1"),g=n("a"),v=n("span"),b(T.$$.fragment),_=l(),E=n("span"),Te=r("Transformer XL"),J=l(),X=n("h2"),K=n("a"),H=n("span"),b(Q.$$.fragment),ve=l(),A=n("span"),be=r("Overview"),fe=l(),D=n("p"),j=r("The Transformer-XL model was proposed in "),Z=n("a"),te=r("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),F=r(` by Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. It\u2019s a causal (uni-directional) transformer with relative positioning (sinuso\xEFdal) embeddings which can reuse previously computed hidden-states to attend to longer context (memory). This model also uses adaptive softmax inputs and outputs (tied).`),C=l(),oe=n("p"),V=r("The abstract from the paper is the following:"),he=l(),ne=n("p"),I=n("em"),we=r(`Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens.`),me=l(),z=n("p"),ye=r("Tips:"),W=l(),Y=n("ul"),ie=n("li"),U=r(`Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right. The original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left.`),Le=l(),le=n("li"),S=r("Transformer-XL is one of the few models that has no sequence length limit."),pe=l(),q=n("p"),ke=r("This model was contributed by "),h=n("a"),M=r("thomwolf"),se=r(". The original code can be found "),ue=n("a"),Be=r("here"),O=r("."),He=l(),ge=n("p"),Fe=n("strong"),N=r("Note"),R=r(":"),Ae=l(),xe=n("ul"),P=n("li"),Ve=r("TransformerXL does "),ze=n("strong"),$e=r("not"),Ue=r(" work with "),Hn=n("em"),Va=r("torch.nn.DataParallel"),Ua=r(" due to a bug in PyTorch, see "),Jt=n("a"),Ra=r("issue #36035"),Vs=l(),it=n("h2"),Et=n("a"),An=n("span"),b(Qt.$$.fragment),Ga=l(),Dn=n("span"),Ka=r("TransfoXLConfig"),Us=l(),Xe=n("div"),b(Zt.$$.fragment),Ya=l(),Ye=n("p"),Ja=r("This is the configuration class to store the configuration of a "),ln=n("a"),Qa=r("TransfoXLModel"),Za=r(` or a `),dn=n("a"),er=r("TFTransfoXLModel"),tr=r(`. It is used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),eo=n("a"),or=r("Transformer XL"),nr=r(" architecture."),sr=l(),lt=n("p"),ar=r("Configuration objects inherit from "),cn=n("a"),rr=r("PretrainedConfig"),ir=r(` and can be used to control the model outputs. Read the documentation from `),fn=n("a"),lr=r("PretrainedConfig"),dr=r(" for more information."),cr=l(),In=n("p"),fr=r("Examples:"),hr=l(),b(to.$$.fragment),Rs=l(),dt=n("h2"),Ft=n("a"),Wn=n("span"),b(oo.$$.fragment),mr=l(),Bn=n("span"),pr=r("TransfoXLTokenizer"),Gs=l(),De=n("div"),b(no.$$.fragment),ur=l(),so=n("p"),gr=r("Construct a Transformer-XL tokenizer adapted from Vocab class in "),ao=n("a"),_r=r("the original code"),Tr=r(`. The Transformer-XL tokenizer is a word-level tokenizer (no sub-word tokenization).`),vr=l(),ro=n("p"),br=r("This tokenizer inherits from "),hn=n("a"),wr=r("PreTrainedTokenizer"),yr=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Lr=l(),Vn=n("div"),Ks=l(),ct=n("h2"),zt=n("a"),Un=n("span"),b(io.$$.fragment),kr=l(),Rn=n("span"),xr=r("TransfoXL specific outputs"),Ys=l(),ft=n("div"),b(lo.$$.fragment),$r=l(),Gn=n("p"),Xr=r("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Js=l(),ht=n("div"),b(co.$$.fragment),Mr=l(),Kn=n("p"),Er=r("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Qs=l(),mt=n("div"),b(fo.$$.fragment),Fr=l(),Yn=n("p"),zr=r("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Zs=l(),pt=n("div"),b(ho.$$.fragment),Cr=l(),Jn=n("p"),qr=r("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),ea=l(),ut=n("h2"),Ct=n("a"),Qn=n("span"),b(mo.$$.fragment),Pr=l(),Zn=n("span"),jr=r("TransfoXLModel"),ta=l(),Me=n("div"),b(po.$$.fragment),Sr=l(),es=n("p"),Or=r("The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),Nr=l(),uo=n("p"),Hr=r("This model inherits from "),mn=n("a"),Ar=r("PreTrainedModel"),Dr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ir=l(),go=n("p"),Wr=r("This model is also a PyTorch "),_o=n("a"),Br=r("torch.nn.Module"),Vr=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ur=l(),Ce=n("div"),b(To.$$.fragment),Rr=l(),gt=n("p"),Gr=r("The "),pn=n("a"),Kr=r("TransfoXLModel"),Yr=r(" forward method, overrides the "),ts=n("code"),Jr=r("__call__"),Qr=r(" special method."),Zr=l(),b(qt.$$.fragment),ei=l(),os=n("p"),ti=r("Example:"),oi=l(),b(vo.$$.fragment),oa=l(),_t=n("h2"),Pt=n("a"),ns=n("span"),b(bo.$$.fragment),ni=l(),ss=n("span"),si=r("TransfoXLLMHeadModel"),na=l(),Ee=n("div"),b(wo.$$.fragment),ai=l(),as=n("p"),ri=r(`The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive input embeddings)`),ii=l(),yo=n("p"),li=r("This model inherits from "),un=n("a"),di=r("PreTrainedModel"),ci=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fi=l(),Lo=n("p"),hi=r("This model is also a PyTorch "),ko=n("a"),mi=r("torch.nn.Module"),pi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ui=l(),qe=n("div"),b(xo.$$.fragment),gi=l(),Tt=n("p"),_i=r("The "),gn=n("a"),Ti=r("TransfoXLLMHeadModel"),vi=r(" forward method, overrides the "),rs=n("code"),bi=r("__call__"),wi=r(" special method."),yi=l(),b(jt.$$.fragment),Li=l(),is=n("p"),ki=r("Example:"),xi=l(),b($o.$$.fragment),sa=l(),vt=n("h2"),St=n("a"),ls=n("span"),b(Xo.$$.fragment),$i=l(),ds=n("span"),Xi=r("TransfoXLForSequenceClassification"),aa=l(),ee=n("div"),b(Mo.$$.fragment),Mi=l(),cs=n("p"),Ei=r("The Transformer-XL Model transformer with a sequence classification head on top (linear layer)."),Fi=l(),_n=n("p"),Tn=n("a"),zi=r("TransfoXLForSequenceClassification"),Ci=r(` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),qi=l(),Ie=n("p"),Pi=r(`Since it does classification on the last token, it requires to know the position of the last token. If a `),fs=n("code"),ji=r("pad_token_id"),Si=r(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),hs=n("code"),Oi=r("pad_token_id"),Ni=r(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),ms=n("code"),Hi=r("inputs_embeds"),Ai=r(" are passed instead of "),ps=n("code"),Di=r("input_ids"),Ii=r(`, it does the same (take the last value in each row of the batch).`),Wi=l(),Eo=n("p"),Bi=r("This model inherits from "),vn=n("a"),Vi=r("PreTrainedModel"),Ui=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ri=l(),Fo=n("p"),Gi=r("This model is also a PyTorch "),zo=n("a"),Ki=r("torch.nn.Module"),Yi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ji=l(),ae=n("div"),b(Co.$$.fragment),Qi=l(),bt=n("p"),Zi=r("The "),bn=n("a"),el=r("TransfoXLForSequenceClassification"),tl=r(" forward method, overrides the "),us=n("code"),ol=r("__call__"),nl=r(" special method."),sl=l(),b(Ot.$$.fragment),al=l(),gs=n("p"),rl=r("Example of single-label classification:"),il=l(),b(qo.$$.fragment),ll=l(),_s=n("p"),dl=r("Example of multi-label classification:"),cl=l(),b(Po.$$.fragment),ra=l(),wt=n("h2"),Nt=n("a"),Ts=n("span"),b(jo.$$.fragment),fl=l(),vs=n("span"),hl=r("TFTransfoXLModel"),ia=l(),de=n("div"),b(So.$$.fragment),ml=l(),bs=n("p"),pl=r("The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),ul=l(),Oo=n("p"),gl=r("This model inherits from "),wn=n("a"),_l=r("TFPreTrainedModel"),Tl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vl=l(),No=n("p"),bl=r("This model is also a "),Ho=n("a"),wl=r("tf.keras.Model"),yl=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ll=l(),b(Ht.$$.fragment),kl=l(),Pe=n("div"),b(Ao.$$.fragment),xl=l(),yt=n("p"),$l=r("The "),yn=n("a"),Xl=r("TFTransfoXLModel"),Ml=r(" forward method, overrides the "),ws=n("code"),El=r("__call__"),Fl=r(" special method."),zl=l(),b(At.$$.fragment),Cl=l(),ys=n("p"),ql=r("Example:"),Pl=l(),b(Do.$$.fragment),la=l(),Lt=n("h2"),Dt=n("a"),Ls=n("span"),b(Io.$$.fragment),jl=l(),ks=n("span"),Sl=r("TFTransfoXLLMHeadModel"),da=l(),ce=n("div"),b(Wo.$$.fragment),Ol=l(),xs=n("p"),Nl=r(`The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive input embeddings)`),Hl=l(),Bo=n("p"),Al=r("This model inherits from "),Ln=n("a"),Dl=r("TFPreTrainedModel"),Il=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wl=l(),Vo=n("p"),Bl=r("This model is also a "),Uo=n("a"),Vl=r("tf.keras.Model"),Ul=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Rl=l(),b(It.$$.fragment),Gl=l(),je=n("div"),b(Ro.$$.fragment),Kl=l(),kt=n("p"),Yl=r("The "),kn=n("a"),Jl=r("TFTransfoXLLMHeadModel"),Ql=r(" forward method, overrides the "),$s=n("code"),Zl=r("__call__"),ed=r(" special method."),td=l(),b(Wt.$$.fragment),od=l(),Xs=n("p"),nd=r("Example:"),sd=l(),b(Go.$$.fragment),ca=l(),xt=n("h2"),Bt=n("a"),Ms=n("span"),b(Ko.$$.fragment),ad=l(),Es=n("span"),rd=r("TFTransfoXLForSequenceClassification"),fa=l(),G=n("div"),b(Yo.$$.fragment),id=l(),Fs=n("p"),ld=r("The Transfo XL Model transformer with a sequence classification head on top (linear layer)."),dd=l(),xn=n("p"),$n=n("a"),cd=r("TFTransfoXLForSequenceClassification"),fd=r(` uses the last token in order to do the classification, as other causal models (e.g. GPT-1,GPT-2) do.`),hd=l(),We=n("p"),md=r(`Since it does classification on the last token, it requires to know the position of the last token. If a `),zs=n("code"),pd=r("pad_token_id"),ud=r(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Cs=n("code"),gd=r("pad_token_id"),_d=r(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),qs=n("code"),Td=r("inputs_embeds"),vd=r(" are passed instead of "),Ps=n("code"),bd=r("input_ids"),wd=r(`, it does the same (take the last value in each row of the batch).`),yd=l(),Jo=n("p"),Ld=r("This model inherits from "),Xn=n("a"),kd=r("TFPreTrainedModel"),xd=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$d=l(),Qo=n("p"),Xd=r("This model is also a "),Zo=n("a"),Md=r("tf.keras.Model"),Ed=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Fd=l(),b(Vt.$$.fragment),zd=l(),Se=n("div"),b(en.$$.fragment),Cd=l(),$t=n("p"),qd=r("The "),Mn=n("a"),Pd=r("TFTransfoXLForSequenceClassification"),jd=r(" forward method, overrides the "),js=n("code"),Sd=r("__call__"),Od=r(" special method."),Nd=l(),b(Ut.$$.fragment),Hd=l(),Ss=n("p"),Ad=r("Example:"),Dd=l(),b(tn.$$.fragment),ha=l(),Xt=n("h2"),Rt=n("a"),Os=n("span"),b(on.$$.fragment),Id=l(),Ns=n("span"),Wd=r("Internal Layers"),ma=l(),En=n("div"),pa=l(),Fn=n("div"),this.h()},l(o){const m=Gf('[data-svelte="svelte-1phssyn"]',document.head);p=s(m,"META",{name:!0,content:!0}),m.forEach(t),$=d(o),u=s(o,"H1",{class:!0});var nn=a(u);g=s(nn,"A",{id:!0,class:!0,href:!0});var Hs=a(g);v=s(Hs,"SPAN",{});var As=a(v);w(T.$$.fragment,As),As.forEach(t),Hs.forEach(t),_=d(nn),E=s(nn,"SPAN",{});var Ds=a(E);Te=i(Ds,"Transformer XL"),Ds.forEach(t),nn.forEach(t),J=d(o),X=s(o,"H2",{class:!0});var sn=a(X);K=s(sn,"A",{id:!0,class:!0,href:!0});var Is=a(K);H=s(Is,"SPAN",{});var Ws=a(H);w(Q.$$.fragment,Ws),Ws.forEach(t),Is.forEach(t),ve=d(sn),A=s(sn,"SPAN",{});var Bs=a(A);be=i(Bs,"Overview"),Bs.forEach(t),sn.forEach(t),fe=d(o),D=s(o,"P",{});var an=a(D);j=i(an,"The Transformer-XL model was proposed in "),Z=s(an,"A",{href:!0,rel:!0});var Rd=a(Z);te=i(Rd,"Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),Rd.forEach(t),F=i(an,` by Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. It\u2019s a causal (uni-directional) transformer with relative positioning (sinuso\xEFdal) embeddings which can reuse previously computed hidden-states to attend to longer context (memory). This model also uses adaptive softmax inputs and outputs (tied).`),an.forEach(t),C=d(o),oe=s(o,"P",{});var Gd=a(oe);V=i(Gd,"The abstract from the paper is the following:"),Gd.forEach(t),he=d(o),ne=s(o,"P",{});var Kd=a(ne);I=s(Kd,"EM",{});var Yd=a(I);we=i(Yd,`Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens.`),Yd.forEach(t),Kd.forEach(t),me=d(o),z=s(o,"P",{});var Jd=a(z);ye=i(Jd,"Tips:"),Jd.forEach(t),W=d(o),Y=s(o,"UL",{});var ga=a(Y);ie=s(ga,"LI",{});var Qd=a(ie);U=i(Qd,`Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right. The original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left.`),Qd.forEach(t),Le=d(ga),le=s(ga,"LI",{});var Zd=a(le);S=i(Zd,"Transformer-XL is one of the few models that has no sequence length limit."),Zd.forEach(t),ga.forEach(t),pe=d(o),q=s(o,"P",{});var zn=a(q);ke=i(zn,"This model was contributed by "),h=s(zn,"A",{href:!0,rel:!0});var ec=a(h);M=i(ec,"thomwolf"),ec.forEach(t),se=i(zn,". The original code can be found "),ue=s(zn,"A",{href:!0,rel:!0});var tc=a(ue);Be=i(tc,"here"),tc.forEach(t),O=i(zn,"."),zn.forEach(t),He=d(o),ge=s(o,"P",{});var Bd=a(ge);Fe=s(Bd,"STRONG",{});var oc=a(Fe);N=i(oc,"Note"),oc.forEach(t),R=i(Bd,":"),Bd.forEach(t),Ae=d(o),xe=s(o,"UL",{});var nc=a(xe);P=s(nc,"LI",{});var rn=a(P);Ve=i(rn,"TransformerXL does "),ze=s(rn,"STRONG",{});var sc=a(ze);$e=i(sc,"not"),sc.forEach(t),Ue=i(rn," work with "),Hn=s(rn,"EM",{});var ac=a(Hn);Va=i(ac,"torch.nn.DataParallel"),ac.forEach(t),Ua=i(rn," due to a bug in PyTorch, see "),Jt=s(rn,"A",{href:!0,rel:!0});var rc=a(Jt);Ra=i(rc,"issue #36035"),rc.forEach(t),rn.forEach(t),nc.forEach(t),Vs=d(o),it=s(o,"H2",{class:!0});var _a=a(it);Et=s(_a,"A",{id:!0,class:!0,href:!0});var ic=a(Et);An=s(ic,"SPAN",{});var lc=a(An);w(Qt.$$.fragment,lc),lc.forEach(t),ic.forEach(t),Ga=d(_a),Dn=s(_a,"SPAN",{});var dc=a(Dn);Ka=i(dc,"TransfoXLConfig"),dc.forEach(t),_a.forEach(t),Us=d(o),Xe=s(o,"DIV",{class:!0});var Je=a(Xe);w(Zt.$$.fragment,Je),Ya=d(Je),Ye=s(Je,"P",{});var Gt=a(Ye);Ja=i(Gt,"This is the configuration class to store the configuration of a "),ln=s(Gt,"A",{href:!0});var cc=a(ln);Qa=i(cc,"TransfoXLModel"),cc.forEach(t),Za=i(Gt,` or a `),dn=s(Gt,"A",{href:!0});var fc=a(dn);er=i(fc,"TFTransfoXLModel"),fc.forEach(t),tr=i(Gt,`. It is used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),eo=s(Gt,"A",{href:!0,rel:!0});var hc=a(eo);or=i(hc,"Transformer XL"),hc.forEach(t),nr=i(Gt," architecture."),Gt.forEach(t),sr=d(Je),lt=s(Je,"P",{});var Cn=a(lt);ar=i(Cn,"Configuration objects inherit from "),cn=s(Cn,"A",{href:!0});var mc=a(cn);rr=i(mc,"PretrainedConfig"),mc.forEach(t),ir=i(Cn,` and can be used to control the model outputs. Read the documentation from `),fn=s(Cn,"A",{href:!0});var pc=a(fn);lr=i(pc,"PretrainedConfig"),pc.forEach(t),dr=i(Cn," for more information."),Cn.forEach(t),cr=d(Je),In=s(Je,"P",{});var uc=a(In);fr=i(uc,"Examples:"),uc.forEach(t),hr=d(Je),w(to.$$.fragment,Je),Je.forEach(t),Rs=d(o),dt=s(o,"H2",{class:!0});var Ta=a(dt);Ft=s(Ta,"A",{id:!0,class:!0,href:!0});var gc=a(Ft);Wn=s(gc,"SPAN",{});var _c=a(Wn);w(oo.$$.fragment,_c),_c.forEach(t),gc.forEach(t),mr=d(Ta),Bn=s(Ta,"SPAN",{});var Tc=a(Bn);pr=i(Tc,"TransfoXLTokenizer"),Tc.forEach(t),Ta.forEach(t),Gs=d(o),De=s(o,"DIV",{class:!0});var Kt=a(De);w(no.$$.fragment,Kt),ur=d(Kt),so=s(Kt,"P",{});var va=a(so);gr=i(va,"Construct a Transformer-XL tokenizer adapted from Vocab class in "),ao=s(va,"A",{href:!0,rel:!0});var vc=a(ao);_r=i(vc,"the original code"),vc.forEach(t),Tr=i(va,`. The Transformer-XL tokenizer is a word-level tokenizer (no sub-word tokenization).`),va.forEach(t),vr=d(Kt),ro=s(Kt,"P",{});var ba=a(ro);br=i(ba,"This tokenizer inherits from "),hn=s(ba,"A",{href:!0});var bc=a(hn);wr=i(bc,"PreTrainedTokenizer"),bc.forEach(t),yr=i(ba,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ba.forEach(t),Lr=d(Kt),Vn=s(Kt,"DIV",{class:!0}),a(Vn).forEach(t),Kt.forEach(t),Ks=d(o),ct=s(o,"H2",{class:!0});var wa=a(ct);zt=s(wa,"A",{id:!0,class:!0,href:!0});var wc=a(zt);Un=s(wc,"SPAN",{});var yc=a(Un);w(io.$$.fragment,yc),yc.forEach(t),wc.forEach(t),kr=d(wa),Rn=s(wa,"SPAN",{});var Lc=a(Rn);xr=i(Lc,"TransfoXL specific outputs"),Lc.forEach(t),wa.forEach(t),Ys=d(o),ft=s(o,"DIV",{class:!0});var ya=a(ft);w(lo.$$.fragment,ya),$r=d(ya),Gn=s(ya,"P",{});var kc=a(Gn);Xr=i(kc,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),kc.forEach(t),ya.forEach(t),Js=d(o),ht=s(o,"DIV",{class:!0});var La=a(ht);w(co.$$.fragment,La),Mr=d(La),Kn=s(La,"P",{});var xc=a(Kn);Er=i(xc,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),xc.forEach(t),La.forEach(t),Qs=d(o),mt=s(o,"DIV",{class:!0});var ka=a(mt);w(fo.$$.fragment,ka),Fr=d(ka),Yn=s(ka,"P",{});var $c=a(Yn);zr=i($c,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),$c.forEach(t),ka.forEach(t),Zs=d(o),pt=s(o,"DIV",{class:!0});var xa=a(pt);w(ho.$$.fragment,xa),Cr=d(xa),Jn=s(xa,"P",{});var Xc=a(Jn);qr=i(Xc,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Xc.forEach(t),xa.forEach(t),ea=d(o),ut=s(o,"H2",{class:!0});var $a=a(ut);Ct=s($a,"A",{id:!0,class:!0,href:!0});var Mc=a(Ct);Qn=s(Mc,"SPAN",{});var Ec=a(Qn);w(mo.$$.fragment,Ec),Ec.forEach(t),Mc.forEach(t),Pr=d($a),Zn=s($a,"SPAN",{});var Fc=a(Zn);jr=i(Fc,"TransfoXLModel"),Fc.forEach(t),$a.forEach(t),ta=d(o),Me=s(o,"DIV",{class:!0});var Qe=a(Me);w(po.$$.fragment,Qe),Sr=d(Qe),es=s(Qe,"P",{});var zc=a(es);Or=i(zc,"The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),zc.forEach(t),Nr=d(Qe),uo=s(Qe,"P",{});var Xa=a(uo);Hr=i(Xa,"This model inherits from "),mn=s(Xa,"A",{href:!0});var Cc=a(mn);Ar=i(Cc,"PreTrainedModel"),Cc.forEach(t),Dr=i(Xa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xa.forEach(t),Ir=d(Qe),go=s(Qe,"P",{});var Ma=a(go);Wr=i(Ma,"This model is also a PyTorch "),_o=s(Ma,"A",{href:!0,rel:!0});var qc=a(_o);Br=i(qc,"torch.nn.Module"),qc.forEach(t),Vr=i(Ma,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ma.forEach(t),Ur=d(Qe),Ce=s(Qe,"DIV",{class:!0});var Ze=a(Ce);w(To.$$.fragment,Ze),Rr=d(Ze),gt=s(Ze,"P",{});var qn=a(gt);Gr=i(qn,"The "),pn=s(qn,"A",{href:!0});var Pc=a(pn);Kr=i(Pc,"TransfoXLModel"),Pc.forEach(t),Yr=i(qn," forward method, overrides the "),ts=s(qn,"CODE",{});var jc=a(ts);Jr=i(jc,"__call__"),jc.forEach(t),Qr=i(qn," special method."),qn.forEach(t),Zr=d(Ze),w(qt.$$.fragment,Ze),ei=d(Ze),os=s(Ze,"P",{});var Sc=a(os);ti=i(Sc,"Example:"),Sc.forEach(t),oi=d(Ze),w(vo.$$.fragment,Ze),Ze.forEach(t),Qe.forEach(t),oa=d(o),_t=s(o,"H2",{class:!0});var Ea=a(_t);Pt=s(Ea,"A",{id:!0,class:!0,href:!0});var Oc=a(Pt);ns=s(Oc,"SPAN",{});var Nc=a(ns);w(bo.$$.fragment,Nc),Nc.forEach(t),Oc.forEach(t),ni=d(Ea),ss=s(Ea,"SPAN",{});var Hc=a(ss);si=i(Hc,"TransfoXLLMHeadModel"),Hc.forEach(t),Ea.forEach(t),na=d(o),Ee=s(o,"DIV",{class:!0});var et=a(Ee);w(wo.$$.fragment,et),ai=d(et),as=s(et,"P",{});var Ac=a(as);ri=i(Ac,`The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive input embeddings)`),Ac.forEach(t),ii=d(et),yo=s(et,"P",{});var Fa=a(yo);li=i(Fa,"This model inherits from "),un=s(Fa,"A",{href:!0});var Dc=a(un);di=i(Dc,"PreTrainedModel"),Dc.forEach(t),ci=i(Fa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fa.forEach(t),fi=d(et),Lo=s(et,"P",{});var za=a(Lo);hi=i(za,"This model is also a PyTorch "),ko=s(za,"A",{href:!0,rel:!0});var Ic=a(ko);mi=i(Ic,"torch.nn.Module"),Ic.forEach(t),pi=i(za,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),za.forEach(t),ui=d(et),qe=s(et,"DIV",{class:!0});var tt=a(qe);w(xo.$$.fragment,tt),gi=d(tt),Tt=s(tt,"P",{});var Pn=a(Tt);_i=i(Pn,"The "),gn=s(Pn,"A",{href:!0});var Wc=a(gn);Ti=i(Wc,"TransfoXLLMHeadModel"),Wc.forEach(t),vi=i(Pn," forward method, overrides the "),rs=s(Pn,"CODE",{});var Bc=a(rs);bi=i(Bc,"__call__"),Bc.forEach(t),wi=i(Pn," special method."),Pn.forEach(t),yi=d(tt),w(jt.$$.fragment,tt),Li=d(tt),is=s(tt,"P",{});var Vc=a(is);ki=i(Vc,"Example:"),Vc.forEach(t),xi=d(tt),w($o.$$.fragment,tt),tt.forEach(t),et.forEach(t),sa=d(o),vt=s(o,"H2",{class:!0});var Ca=a(vt);St=s(Ca,"A",{id:!0,class:!0,href:!0});var Uc=a(St);ls=s(Uc,"SPAN",{});var Rc=a(ls);w(Xo.$$.fragment,Rc),Rc.forEach(t),Uc.forEach(t),$i=d(Ca),ds=s(Ca,"SPAN",{});var Gc=a(ds);Xi=i(Gc,"TransfoXLForSequenceClassification"),Gc.forEach(t),Ca.forEach(t),aa=d(o),ee=s(o,"DIV",{class:!0});var Oe=a(ee);w(Mo.$$.fragment,Oe),Mi=d(Oe),cs=s(Oe,"P",{});var Kc=a(cs);Ei=i(Kc,"The Transformer-XL Model transformer with a sequence classification head on top (linear layer)."),Kc.forEach(t),Fi=d(Oe),_n=s(Oe,"P",{});var Vd=a(_n);Tn=s(Vd,"A",{href:!0});var Yc=a(Tn);zi=i(Yc,"TransfoXLForSequenceClassification"),Yc.forEach(t),Ci=i(Vd,` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),Vd.forEach(t),qi=d(Oe),Ie=s(Oe,"P",{});var ot=a(Ie);Pi=i(ot,`Since it does classification on the last token, it requires to know the position of the last token. If a `),fs=s(ot,"CODE",{});var Jc=a(fs);ji=i(Jc,"pad_token_id"),Jc.forEach(t),Si=i(ot,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),hs=s(ot,"CODE",{});var Qc=a(hs);Oi=i(Qc,"pad_token_id"),Qc.forEach(t),Ni=i(ot,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),ms=s(ot,"CODE",{});var Zc=a(ms);Hi=i(Zc,"inputs_embeds"),Zc.forEach(t),Ai=i(ot," are passed instead of "),ps=s(ot,"CODE",{});var ef=a(ps);Di=i(ef,"input_ids"),ef.forEach(t),Ii=i(ot,`, it does the same (take the last value in each row of the batch).`),ot.forEach(t),Wi=d(Oe),Eo=s(Oe,"P",{});var qa=a(Eo);Bi=i(qa,"This model inherits from "),vn=s(qa,"A",{href:!0});var tf=a(vn);Vi=i(tf,"PreTrainedModel"),tf.forEach(t),Ui=i(qa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qa.forEach(t),Ri=d(Oe),Fo=s(Oe,"P",{});var Pa=a(Fo);Gi=i(Pa,"This model is also a PyTorch "),zo=s(Pa,"A",{href:!0,rel:!0});var of=a(zo);Ki=i(of,"torch.nn.Module"),of.forEach(t),Yi=i(Pa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pa.forEach(t),Ji=d(Oe),ae=s(Oe,"DIV",{class:!0});var Ne=a(ae);w(Co.$$.fragment,Ne),Qi=d(Ne),bt=s(Ne,"P",{});var jn=a(bt);Zi=i(jn,"The "),bn=s(jn,"A",{href:!0});var nf=a(bn);el=i(nf,"TransfoXLForSequenceClassification"),nf.forEach(t),tl=i(jn," forward method, overrides the "),us=s(jn,"CODE",{});var sf=a(us);ol=i(sf,"__call__"),sf.forEach(t),nl=i(jn," special method."),jn.forEach(t),sl=d(Ne),w(Ot.$$.fragment,Ne),al=d(Ne),gs=s(Ne,"P",{});var af=a(gs);rl=i(af,"Example of single-label classification:"),af.forEach(t),il=d(Ne),w(qo.$$.fragment,Ne),ll=d(Ne),_s=s(Ne,"P",{});var rf=a(_s);dl=i(rf,"Example of multi-label classification:"),rf.forEach(t),cl=d(Ne),w(Po.$$.fragment,Ne),Ne.forEach(t),Oe.forEach(t),ra=d(o),wt=s(o,"H2",{class:!0});var ja=a(wt);Nt=s(ja,"A",{id:!0,class:!0,href:!0});var lf=a(Nt);Ts=s(lf,"SPAN",{});var df=a(Ts);w(jo.$$.fragment,df),df.forEach(t),lf.forEach(t),fl=d(ja),vs=s(ja,"SPAN",{});var cf=a(vs);hl=i(cf,"TFTransfoXLModel"),cf.forEach(t),ja.forEach(t),ia=d(o),de=s(o,"DIV",{class:!0});var Re=a(de);w(So.$$.fragment,Re),ml=d(Re),bs=s(Re,"P",{});var ff=a(bs);pl=i(ff,"The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),ff.forEach(t),ul=d(Re),Oo=s(Re,"P",{});var Sa=a(Oo);gl=i(Sa,"This model inherits from "),wn=s(Sa,"A",{href:!0});var hf=a(wn);_l=i(hf,"TFPreTrainedModel"),hf.forEach(t),Tl=i(Sa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sa.forEach(t),vl=d(Re),No=s(Re,"P",{});var Oa=a(No);bl=i(Oa,"This model is also a "),Ho=s(Oa,"A",{href:!0,rel:!0});var mf=a(Ho);wl=i(mf,"tf.keras.Model"),mf.forEach(t),yl=i(Oa,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Oa.forEach(t),Ll=d(Re),w(Ht.$$.fragment,Re),kl=d(Re),Pe=s(Re,"DIV",{class:!0});var nt=a(Pe);w(Ao.$$.fragment,nt),xl=d(nt),yt=s(nt,"P",{});var Sn=a(yt);$l=i(Sn,"The "),yn=s(Sn,"A",{href:!0});var pf=a(yn);Xl=i(pf,"TFTransfoXLModel"),pf.forEach(t),Ml=i(Sn," forward method, overrides the "),ws=s(Sn,"CODE",{});var uf=a(ws);El=i(uf,"__call__"),uf.forEach(t),Fl=i(Sn," special method."),Sn.forEach(t),zl=d(nt),w(At.$$.fragment,nt),Cl=d(nt),ys=s(nt,"P",{});var gf=a(ys);ql=i(gf,"Example:"),gf.forEach(t),Pl=d(nt),w(Do.$$.fragment,nt),nt.forEach(t),Re.forEach(t),la=d(o),Lt=s(o,"H2",{class:!0});var Na=a(Lt);Dt=s(Na,"A",{id:!0,class:!0,href:!0});var _f=a(Dt);Ls=s(_f,"SPAN",{});var Tf=a(Ls);w(Io.$$.fragment,Tf),Tf.forEach(t),_f.forEach(t),jl=d(Na),ks=s(Na,"SPAN",{});var vf=a(ks);Sl=i(vf,"TFTransfoXLLMHeadModel"),vf.forEach(t),Na.forEach(t),da=d(o),ce=s(o,"DIV",{class:!0});var Ge=a(ce);w(Wo.$$.fragment,Ge),Ol=d(Ge),xs=s(Ge,"P",{});var bf=a(xs);Nl=i(bf,`The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive input embeddings)`),bf.forEach(t),Hl=d(Ge),Bo=s(Ge,"P",{});var Ha=a(Bo);Al=i(Ha,"This model inherits from "),Ln=s(Ha,"A",{href:!0});var wf=a(Ln);Dl=i(wf,"TFPreTrainedModel"),wf.forEach(t),Il=i(Ha,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ha.forEach(t),Wl=d(Ge),Vo=s(Ge,"P",{});var Aa=a(Vo);Bl=i(Aa,"This model is also a "),Uo=s(Aa,"A",{href:!0,rel:!0});var yf=a(Uo);Vl=i(yf,"tf.keras.Model"),yf.forEach(t),Ul=i(Aa,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Aa.forEach(t),Rl=d(Ge),w(It.$$.fragment,Ge),Gl=d(Ge),je=s(Ge,"DIV",{class:!0});var st=a(je);w(Ro.$$.fragment,st),Kl=d(st),kt=s(st,"P",{});var On=a(kt);Yl=i(On,"The "),kn=s(On,"A",{href:!0});var Lf=a(kn);Jl=i(Lf,"TFTransfoXLLMHeadModel"),Lf.forEach(t),Ql=i(On," forward method, overrides the "),$s=s(On,"CODE",{});var kf=a($s);Zl=i(kf,"__call__"),kf.forEach(t),ed=i(On," special method."),On.forEach(t),td=d(st),w(Wt.$$.fragment,st),od=d(st),Xs=s(st,"P",{});var xf=a(Xs);nd=i(xf,"Example:"),xf.forEach(t),sd=d(st),w(Go.$$.fragment,st),st.forEach(t),Ge.forEach(t),ca=d(o),xt=s(o,"H2",{class:!0});var Da=a(xt);Bt=s(Da,"A",{id:!0,class:!0,href:!0});var $f=a(Bt);Ms=s($f,"SPAN",{});var Xf=a(Ms);w(Ko.$$.fragment,Xf),Xf.forEach(t),$f.forEach(t),ad=d(Da),Es=s(Da,"SPAN",{});var Mf=a(Es);rd=i(Mf,"TFTransfoXLForSequenceClassification"),Mf.forEach(t),Da.forEach(t),fa=d(o),G=s(o,"DIV",{class:!0});var _e=a(G);w(Yo.$$.fragment,_e),id=d(_e),Fs=s(_e,"P",{});var Ef=a(Fs);ld=i(Ef,"The Transfo XL Model transformer with a sequence classification head on top (linear layer)."),Ef.forEach(t),dd=d(_e),xn=s(_e,"P",{});var Ud=a(xn);$n=s(Ud,"A",{href:!0});var Ff=a($n);cd=i(Ff,"TFTransfoXLForSequenceClassification"),Ff.forEach(t),fd=i(Ud,` uses the last token in order to do the classification, as other causal models (e.g. GPT-1,GPT-2) do.`),Ud.forEach(t),hd=d(_e),We=s(_e,"P",{});var at=a(We);md=i(at,`Since it does classification on the last token, it requires to know the position of the last token. If a `),zs=s(at,"CODE",{});var zf=a(zs);pd=i(zf,"pad_token_id"),zf.forEach(t),ud=i(at,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Cs=s(at,"CODE",{});var Cf=a(Cs);gd=i(Cf,"pad_token_id"),Cf.forEach(t),_d=i(at,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),qs=s(at,"CODE",{});var qf=a(qs);Td=i(qf,"inputs_embeds"),qf.forEach(t),vd=i(at," are passed instead of "),Ps=s(at,"CODE",{});var Pf=a(Ps);bd=i(Pf,"input_ids"),Pf.forEach(t),wd=i(at,`, it does the same (take the last value in each row of the batch).`),at.forEach(t),yd=d(_e),Jo=s(_e,"P",{});var Ia=a(Jo);Ld=i(Ia,"This model inherits from "),Xn=s(Ia,"A",{href:!0});var jf=a(Xn);kd=i(jf,"TFPreTrainedModel"),jf.forEach(t),xd=i(Ia,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ia.forEach(t),$d=d(_e),Qo=s(_e,"P",{});var Wa=a(Qo);Xd=i(Wa,"This model is also a "),Zo=s(Wa,"A",{href:!0,rel:!0});var Sf=a(Zo);Md=i(Sf,"tf.keras.Model"),Sf.forEach(t),Ed=i(Wa,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Wa.forEach(t),Fd=d(_e),w(Vt.$$.fragment,_e),zd=d(_e),Se=s(_e,"DIV",{class:!0});var rt=a(Se);w(en.$$.fragment,rt),Cd=d(rt),$t=s(rt,"P",{});var Nn=a($t);qd=i(Nn,"The "),Mn=s(Nn,"A",{href:!0});var Of=a(Mn);Pd=i(Of,"TFTransfoXLForSequenceClassification"),Of.forEach(t),jd=i(Nn," forward method, overrides the "),js=s(Nn,"CODE",{});var Nf=a(js);Sd=i(Nf,"__call__"),Nf.forEach(t),Od=i(Nn," special method."),Nn.forEach(t),Nd=d(rt),w(Ut.$$.fragment,rt),Hd=d(rt),Ss=s(rt,"P",{});var Hf=a(Ss);Ad=i(Hf,"Example:"),Hf.forEach(t),Dd=d(rt),w(tn.$$.fragment,rt),rt.forEach(t),_e.forEach(t),ha=d(o),Xt=s(o,"H2",{class:!0});var Ba=a(Xt);Rt=s(Ba,"A",{id:!0,class:!0,href:!0});var Af=a(Rt);Os=s(Af,"SPAN",{});var Df=a(Os);w(on.$$.fragment,Df),Df.forEach(t),Af.forEach(t),Id=d(Ba),Ns=s(Ba,"SPAN",{});var If=a(Ns);Wd=i(If,"Internal Layers"),If.forEach(t),Ba.forEach(t),ma=d(o),En=s(o,"DIV",{class:!0});var Wf=a(En);Wf.forEach(t),pa=d(o),Fn=s(o,"DIV",{class:!0});var Bf=a(Fn);Bf.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(ah)),c(g,"id","transformer-xl"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#transformer-xl"),c(u,"class","relative group"),c(K,"id","overview"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#overview"),c(X,"class","relative group"),c(Z,"href","https://arxiv.org/abs/1901.02860"),c(Z,"rel","nofollow"),c(h,"href","https://huggingface.co/thomwolf"),c(h,"rel","nofollow"),c(ue,"href","https://github.com/kimiyoung/transformer-xl"),c(ue,"rel","nofollow"),c(Jt,"href","https://github.com/pytorch/pytorch/issues/36035"),c(Jt,"rel","nofollow"),c(Et,"id","transformers.TransfoXLConfig"),c(Et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Et,"href","#transformers.TransfoXLConfig"),c(it,"class","relative group"),c(ln,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLModel"),c(dn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLModel"),c(eo,"href","https://huggingface.co/transfo-xl-wt103"),c(eo,"rel","nofollow"),c(cn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(fn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Xe,"class","docstring"),c(Ft,"id","transformers.TransfoXLTokenizer"),c(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ft,"href","#transformers.TransfoXLTokenizer"),c(dt,"class","relative group"),c(ao,"href","https://github.com/kimiyoung/transformer-xl"),c(ao,"rel","nofollow"),c(hn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Vn,"class","docstring"),c(De,"class","docstring"),c(zt,"id","transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput"),c(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zt,"href","#transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput"),c(ct,"class","relative group"),c(ft,"class","docstring"),c(ht,"class","docstring"),c(mt,"class","docstring"),c(pt,"class","docstring"),c(Ct,"id","transformers.TransfoXLModel"),c(Ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ct,"href","#transformers.TransfoXLModel"),c(ut,"class","relative group"),c(mn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(_o,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(_o,"rel","nofollow"),c(pn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLModel"),c(Ce,"class","docstring"),c(Me,"class","docstring"),c(Pt,"id","transformers.TransfoXLLMHeadModel"),c(Pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Pt,"href","#transformers.TransfoXLLMHeadModel"),c(_t,"class","relative group"),c(un,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ko,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ko,"rel","nofollow"),c(gn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLLMHeadModel"),c(qe,"class","docstring"),c(Ee,"class","docstring"),c(St,"id","transformers.TransfoXLForSequenceClassification"),c(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(St,"href","#transformers.TransfoXLForSequenceClassification"),c(vt,"class","relative group"),c(Tn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLForSequenceClassification"),c(vn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(zo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(zo,"rel","nofollow"),c(bn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TransfoXLForSequenceClassification"),c(ae,"class","docstring"),c(ee,"class","docstring"),c(Nt,"id","transformers.TFTransfoXLModel"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#transformers.TFTransfoXLModel"),c(wt,"class","relative group"),c(wn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ho,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ho,"rel","nofollow"),c(yn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLModel"),c(Pe,"class","docstring"),c(de,"class","docstring"),c(Dt,"id","transformers.TFTransfoXLLMHeadModel"),c(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dt,"href","#transformers.TFTransfoXLLMHeadModel"),c(Lt,"class","relative group"),c(Ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Uo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Uo,"rel","nofollow"),c(kn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLLMHeadModel"),c(je,"class","docstring"),c(ce,"class","docstring"),c(Bt,"id","transformers.TFTransfoXLForSequenceClassification"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#transformers.TFTransfoXLForSequenceClassification"),c(xt,"class","relative group"),c($n,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLForSequenceClassification"),c(Xn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Zo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Zo,"rel","nofollow"),c(Mn,"href","/docs/transformers/v4.15.0/en/model_doc/transformerxl#transformers.TFTransfoXLForSequenceClassification"),c(Se,"class","docstring"),c(G,"class","docstring"),c(Rt,"id","transformers.AdaptiveEmbedding"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.AdaptiveEmbedding"),c(Xt,"class","relative group"),c(En,"class","docstring"),c(Fn,"class","docstring")},m(o,m){e(document.head,p),f(o,$,m),f(o,u,m),e(u,g),e(g,v),y(T,v,null),e(u,_),e(u,E),e(E,Te),f(o,J,m),f(o,X,m),e(X,K),e(K,H),y(Q,H,null),e(X,ve),e(X,A),e(A,be),f(o,fe,m),f(o,D,m),e(D,j),e(D,Z),e(Z,te),e(D,F),f(o,C,m),f(o,oe,m),e(oe,V),f(o,he,m),f(o,ne,m),e(ne,I),e(I,we),f(o,me,m),f(o,z,m),e(z,ye),f(o,W,m),f(o,Y,m),e(Y,ie),e(ie,U),e(Y,Le),e(Y,le),e(le,S),f(o,pe,m),f(o,q,m),e(q,ke),e(q,h),e(h,M),e(q,se),e(q,ue),e(ue,Be),e(q,O),f(o,He,m),f(o,ge,m),e(ge,Fe),e(Fe,N),e(ge,R),f(o,Ae,m),f(o,xe,m),e(xe,P),e(P,Ve),e(P,ze),e(ze,$e),e(P,Ue),e(P,Hn),e(Hn,Va),e(P,Ua),e(P,Jt),e(Jt,Ra),f(o,Vs,m),f(o,it,m),e(it,Et),e(Et,An),y(Qt,An,null),e(it,Ga),e(it,Dn),e(Dn,Ka),f(o,Us,m),f(o,Xe,m),y(Zt,Xe,null),e(Xe,Ya),e(Xe,Ye),e(Ye,Ja),e(Ye,ln),e(ln,Qa),e(Ye,Za),e(Ye,dn),e(dn,er),e(Ye,tr),e(Ye,eo),e(eo,or),e(Ye,nr),e(Xe,sr),e(Xe,lt),e(lt,ar),e(lt,cn),e(cn,rr),e(lt,ir),e(lt,fn),e(fn,lr),e(lt,dr),e(Xe,cr),e(Xe,In),e(In,fr),e(Xe,hr),y(to,Xe,null),f(o,Rs,m),f(o,dt,m),e(dt,Ft),e(Ft,Wn),y(oo,Wn,null),e(dt,mr),e(dt,Bn),e(Bn,pr),f(o,Gs,m),f(o,De,m),y(no,De,null),e(De,ur),e(De,so),e(so,gr),e(so,ao),e(ao,_r),e(so,Tr),e(De,vr),e(De,ro),e(ro,br),e(ro,hn),e(hn,wr),e(ro,yr),e(De,Lr),e(De,Vn),f(o,Ks,m),f(o,ct,m),e(ct,zt),e(zt,Un),y(io,Un,null),e(ct,kr),e(ct,Rn),e(Rn,xr),f(o,Ys,m),f(o,ft,m),y(lo,ft,null),e(ft,$r),e(ft,Gn),e(Gn,Xr),f(o,Js,m),f(o,ht,m),y(co,ht,null),e(ht,Mr),e(ht,Kn),e(Kn,Er),f(o,Qs,m),f(o,mt,m),y(fo,mt,null),e(mt,Fr),e(mt,Yn),e(Yn,zr),f(o,Zs,m),f(o,pt,m),y(ho,pt,null),e(pt,Cr),e(pt,Jn),e(Jn,qr),f(o,ea,m),f(o,ut,m),e(ut,Ct),e(Ct,Qn),y(mo,Qn,null),e(ut,Pr),e(ut,Zn),e(Zn,jr),f(o,ta,m),f(o,Me,m),y(po,Me,null),e(Me,Sr),e(Me,es),e(es,Or),e(Me,Nr),e(Me,uo),e(uo,Hr),e(uo,mn),e(mn,Ar),e(uo,Dr),e(Me,Ir),e(Me,go),e(go,Wr),e(go,_o),e(_o,Br),e(go,Vr),e(Me,Ur),e(Me,Ce),y(To,Ce,null),e(Ce,Rr),e(Ce,gt),e(gt,Gr),e(gt,pn),e(pn,Kr),e(gt,Yr),e(gt,ts),e(ts,Jr),e(gt,Qr),e(Ce,Zr),y(qt,Ce,null),e(Ce,ei),e(Ce,os),e(os,ti),e(Ce,oi),y(vo,Ce,null),f(o,oa,m),f(o,_t,m),e(_t,Pt),e(Pt,ns),y(bo,ns,null),e(_t,ni),e(_t,ss),e(ss,si),f(o,na,m),f(o,Ee,m),y(wo,Ee,null),e(Ee,ai),e(Ee,as),e(as,ri),e(Ee,ii),e(Ee,yo),e(yo,li),e(yo,un),e(un,di),e(yo,ci),e(Ee,fi),e(Ee,Lo),e(Lo,hi),e(Lo,ko),e(ko,mi),e(Lo,pi),e(Ee,ui),e(Ee,qe),y(xo,qe,null),e(qe,gi),e(qe,Tt),e(Tt,_i),e(Tt,gn),e(gn,Ti),e(Tt,vi),e(Tt,rs),e(rs,bi),e(Tt,wi),e(qe,yi),y(jt,qe,null),e(qe,Li),e(qe,is),e(is,ki),e(qe,xi),y($o,qe,null),f(o,sa,m),f(o,vt,m),e(vt,St),e(St,ls),y(Xo,ls,null),e(vt,$i),e(vt,ds),e(ds,Xi),f(o,aa,m),f(o,ee,m),y(Mo,ee,null),e(ee,Mi),e(ee,cs),e(cs,Ei),e(ee,Fi),e(ee,_n),e(_n,Tn),e(Tn,zi),e(_n,Ci),e(ee,qi),e(ee,Ie),e(Ie,Pi),e(Ie,fs),e(fs,ji),e(Ie,Si),e(Ie,hs),e(hs,Oi),e(Ie,Ni),e(Ie,ms),e(ms,Hi),e(Ie,Ai),e(Ie,ps),e(ps,Di),e(Ie,Ii),e(ee,Wi),e(ee,Eo),e(Eo,Bi),e(Eo,vn),e(vn,Vi),e(Eo,Ui),e(ee,Ri),e(ee,Fo),e(Fo,Gi),e(Fo,zo),e(zo,Ki),e(Fo,Yi),e(ee,Ji),e(ee,ae),y(Co,ae,null),e(ae,Qi),e(ae,bt),e(bt,Zi),e(bt,bn),e(bn,el),e(bt,tl),e(bt,us),e(us,ol),e(bt,nl),e(ae,sl),y(Ot,ae,null),e(ae,al),e(ae,gs),e(gs,rl),e(ae,il),y(qo,ae,null),e(ae,ll),e(ae,_s),e(_s,dl),e(ae,cl),y(Po,ae,null),f(o,ra,m),f(o,wt,m),e(wt,Nt),e(Nt,Ts),y(jo,Ts,null),e(wt,fl),e(wt,vs),e(vs,hl),f(o,ia,m),f(o,de,m),y(So,de,null),e(de,ml),e(de,bs),e(bs,pl),e(de,ul),e(de,Oo),e(Oo,gl),e(Oo,wn),e(wn,_l),e(Oo,Tl),e(de,vl),e(de,No),e(No,bl),e(No,Ho),e(Ho,wl),e(No,yl),e(de,Ll),y(Ht,de,null),e(de,kl),e(de,Pe),y(Ao,Pe,null),e(Pe,xl),e(Pe,yt),e(yt,$l),e(yt,yn),e(yn,Xl),e(yt,Ml),e(yt,ws),e(ws,El),e(yt,Fl),e(Pe,zl),y(At,Pe,null),e(Pe,Cl),e(Pe,ys),e(ys,ql),e(Pe,Pl),y(Do,Pe,null),f(o,la,m),f(o,Lt,m),e(Lt,Dt),e(Dt,Ls),y(Io,Ls,null),e(Lt,jl),e(Lt,ks),e(ks,Sl),f(o,da,m),f(o,ce,m),y(Wo,ce,null),e(ce,Ol),e(ce,xs),e(xs,Nl),e(ce,Hl),e(ce,Bo),e(Bo,Al),e(Bo,Ln),e(Ln,Dl),e(Bo,Il),e(ce,Wl),e(ce,Vo),e(Vo,Bl),e(Vo,Uo),e(Uo,Vl),e(Vo,Ul),e(ce,Rl),y(It,ce,null),e(ce,Gl),e(ce,je),y(Ro,je,null),e(je,Kl),e(je,kt),e(kt,Yl),e(kt,kn),e(kn,Jl),e(kt,Ql),e(kt,$s),e($s,Zl),e(kt,ed),e(je,td),y(Wt,je,null),e(je,od),e(je,Xs),e(Xs,nd),e(je,sd),y(Go,je,null),f(o,ca,m),f(o,xt,m),e(xt,Bt),e(Bt,Ms),y(Ko,Ms,null),e(xt,ad),e(xt,Es),e(Es,rd),f(o,fa,m),f(o,G,m),y(Yo,G,null),e(G,id),e(G,Fs),e(Fs,ld),e(G,dd),e(G,xn),e(xn,$n),e($n,cd),e(xn,fd),e(G,hd),e(G,We),e(We,md),e(We,zs),e(zs,pd),e(We,ud),e(We,Cs),e(Cs,gd),e(We,_d),e(We,qs),e(qs,Td),e(We,vd),e(We,Ps),e(Ps,bd),e(We,wd),e(G,yd),e(G,Jo),e(Jo,Ld),e(Jo,Xn),e(Xn,kd),e(Jo,xd),e(G,$d),e(G,Qo),e(Qo,Xd),e(Qo,Zo),e(Zo,Md),e(Qo,Ed),e(G,Fd),y(Vt,G,null),e(G,zd),e(G,Se),y(en,Se,null),e(Se,Cd),e(Se,$t),e($t,qd),e($t,Mn),e(Mn,Pd),e($t,jd),e($t,js),e(js,Sd),e($t,Od),e(Se,Nd),y(Ut,Se,null),e(Se,Hd),e(Se,Ss),e(Ss,Ad),e(Se,Dd),y(tn,Se,null),f(o,ha,m),f(o,Xt,m),e(Xt,Rt),e(Rt,Os),y(on,Os,null),e(Xt,Id),e(Xt,Ns),e(Ns,Wd),f(o,ma,m),f(o,En,m),f(o,pa,m),f(o,Fn,m),ua=!0},p(o,[m]){const nn={};m&2&&(nn.$$scope={dirty:m,ctx:o}),qt.$set(nn);const Hs={};m&2&&(Hs.$$scope={dirty:m,ctx:o}),jt.$set(Hs);const As={};m&2&&(As.$$scope={dirty:m,ctx:o}),Ot.$set(As);const Ds={};m&2&&(Ds.$$scope={dirty:m,ctx:o}),Ht.$set(Ds);const sn={};m&2&&(sn.$$scope={dirty:m,ctx:o}),At.$set(sn);const Is={};m&2&&(Is.$$scope={dirty:m,ctx:o}),It.$set(Is);const Ws={};m&2&&(Ws.$$scope={dirty:m,ctx:o}),Wt.$set(Ws);const Bs={};m&2&&(Bs.$$scope={dirty:m,ctx:o}),Vt.$set(Bs);const an={};m&2&&(an.$$scope={dirty:m,ctx:o}),Ut.$set(an)},i(o){ua||(L(T.$$.fragment,o),L(Q.$$.fragment,o),L(Qt.$$.fragment,o),L(Zt.$$.fragment,o),L(to.$$.fragment,o),L(oo.$$.fragment,o),L(no.$$.fragment,o),L(io.$$.fragment,o),L(lo.$$.fragment,o),L(co.$$.fragment,o),L(fo.$$.fragment,o),L(ho.$$.fragment,o),L(mo.$$.fragment,o),L(po.$$.fragment,o),L(To.$$.fragment,o),L(qt.$$.fragment,o),L(vo.$$.fragment,o),L(bo.$$.fragment,o),L(wo.$$.fragment,o),L(xo.$$.fragment,o),L(jt.$$.fragment,o),L($o.$$.fragment,o),L(Xo.$$.fragment,o),L(Mo.$$.fragment,o),L(Co.$$.fragment,o),L(Ot.$$.fragment,o),L(qo.$$.fragment,o),L(Po.$$.fragment,o),L(jo.$$.fragment,o),L(So.$$.fragment,o),L(Ht.$$.fragment,o),L(Ao.$$.fragment,o),L(At.$$.fragment,o),L(Do.$$.fragment,o),L(Io.$$.fragment,o),L(Wo.$$.fragment,o),L(It.$$.fragment,o),L(Ro.$$.fragment,o),L(Wt.$$.fragment,o),L(Go.$$.fragment,o),L(Ko.$$.fragment,o),L(Yo.$$.fragment,o),L(Vt.$$.fragment,o),L(en.$$.fragment,o),L(Ut.$$.fragment,o),L(tn.$$.fragment,o),L(on.$$.fragment,o),ua=!0)},o(o){k(T.$$.fragment,o),k(Q.$$.fragment,o),k(Qt.$$.fragment,o),k(Zt.$$.fragment,o),k(to.$$.fragment,o),k(oo.$$.fragment,o),k(no.$$.fragment,o),k(io.$$.fragment,o),k(lo.$$.fragment,o),k(co.$$.fragment,o),k(fo.$$.fragment,o),k(ho.$$.fragment,o),k(mo.$$.fragment,o),k(po.$$.fragment,o),k(To.$$.fragment,o),k(qt.$$.fragment,o),k(vo.$$.fragment,o),k(bo.$$.fragment,o),k(wo.$$.fragment,o),k(xo.$$.fragment,o),k(jt.$$.fragment,o),k($o.$$.fragment,o),k(Xo.$$.fragment,o),k(Mo.$$.fragment,o),k(Co.$$.fragment,o),k(Ot.$$.fragment,o),k(qo.$$.fragment,o),k(Po.$$.fragment,o),k(jo.$$.fragment,o),k(So.$$.fragment,o),k(Ht.$$.fragment,o),k(Ao.$$.fragment,o),k(At.$$.fragment,o),k(Do.$$.fragment,o),k(Io.$$.fragment,o),k(Wo.$$.fragment,o),k(It.$$.fragment,o),k(Ro.$$.fragment,o),k(Wt.$$.fragment,o),k(Go.$$.fragment,o),k(Ko.$$.fragment,o),k(Yo.$$.fragment,o),k(Vt.$$.fragment,o),k(en.$$.fragment,o),k(Ut.$$.fragment,o),k(tn.$$.fragment,o),k(on.$$.fragment,o),ua=!1},d(o){t(p),o&&t($),o&&t(u),x(T),o&&t(J),o&&t(X),x(Q),o&&t(fe),o&&t(D),o&&t(C),o&&t(oe),o&&t(he),o&&t(ne),o&&t(me),o&&t(z),o&&t(W),o&&t(Y),o&&t(pe),o&&t(q),o&&t(He),o&&t(ge),o&&t(Ae),o&&t(xe),o&&t(Vs),o&&t(it),x(Qt),o&&t(Us),o&&t(Xe),x(Zt),x(to),o&&t(Rs),o&&t(dt),x(oo),o&&t(Gs),o&&t(De),x(no),o&&t(Ks),o&&t(ct),x(io),o&&t(Ys),o&&t(ft),x(lo),o&&t(Js),o&&t(ht),x(co),o&&t(Qs),o&&t(mt),x(fo),o&&t(Zs),o&&t(pt),x(ho),o&&t(ea),o&&t(ut),x(mo),o&&t(ta),o&&t(Me),x(po),x(To),x(qt),x(vo),o&&t(oa),o&&t(_t),x(bo),o&&t(na),o&&t(Ee),x(wo),x(xo),x(jt),x($o),o&&t(sa),o&&t(vt),x(Xo),o&&t(aa),o&&t(ee),x(Mo),x(Co),x(Ot),x(qo),x(Po),o&&t(ra),o&&t(wt),x(jo),o&&t(ia),o&&t(de),x(So),x(Ht),x(Ao),x(At),x(Do),o&&t(la),o&&t(Lt),x(Io),o&&t(da),o&&t(ce),x(Wo),x(It),x(Ro),x(Wt),x(Go),o&&t(ca),o&&t(xt),x(Ko),o&&t(fa),o&&t(G),x(Yo),x(Vt),x(en),x(Ut),x(tn),o&&t(ha),o&&t(Xt),x(on),o&&t(ma),o&&t(En),o&&t(pa),o&&t(Fn)}}}const ah={local:"transformer-xl",sections:[{local:"overview",title:"Overview"},{local:"transformers.TransfoXLConfig",title:"TransfoXLConfig"},{local:"transformers.TransfoXLTokenizer",title:"TransfoXLTokenizer"},{local:"transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput",title:"TransfoXL specific outputs"},{local:"transformers.TransfoXLModel",title:"TransfoXLModel"},{local:"transformers.TransfoXLLMHeadModel",title:"TransfoXLLMHeadModel"},{local:"transformers.TransfoXLForSequenceClassification",title:"TransfoXLForSequenceClassification"},{local:"transformers.TFTransfoXLModel",title:"TFTransfoXLModel"},{local:"transformers.TFTransfoXLLMHeadModel",title:"TFTransfoXLLMHeadModel"},{local:"transformers.TFTransfoXLForSequenceClassification",title:"TFTransfoXLForSequenceClassification"},{local:"transformers.AdaptiveEmbedding",title:"Internal Layers"}],title:"Transformer XL"};function rh(B,p,$){let{fw:u}=p;return B.$$set=g=>{"fw"in g&&$(0,u=g.fw)},[u]}class mh extends Vf{constructor(p){super();Uf(this,p,rh,sh,Rf,{fw:0})}}export{mh as default,ah as metadata};
9,925
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/prophetnet.mdx-9fa5aa9f.js
import{S as Zh,i as el,s as tl,e as n,k as c,w as f,t as s,L as ol,c as r,d as o,m as h,a as d,x as _,h as a,b as i,J as e,g as p,y as g,q as v,o as k,B as T}from"../../chunks/vendor-b1433968.js";import{T as Rn}from"../../chunks/Tip-c3840994.js";import{D as q}from"../../chunks/Docstring-ff504c58.js";import{C as Go}from"../../chunks/CodeBlock-a320dbd7.js";import{I as re}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function nl(I){let u,P,m,y,N;return{c(){u=n("p"),P=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),y=s("Module"),N=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){u=r(w,"P",{});var b=d(u);P=a(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(b,"CODE",{});var z=d(m);y=a(z,"Module"),z.forEach(o),N=a(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(w,b){p(w,u,b),e(u,P),e(u,m),e(m,y),e(u,N)},d(w){w&&o(u)}}}function rl(I){let u,P,m,y,N;return{c(){u=n("p"),P=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),y=s("Module"),N=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){u=r(w,"P",{});var b=d(u);P=a(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(b,"CODE",{});var z=d(m);y=a(z,"Module"),z.forEach(o),N=a(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(w,b){p(w,u,b),e(u,P),e(u,m),e(m,y),e(u,N)},d(w){w&&o(u)}}}function sl(I){let u,P,m,y,N;return{c(){u=n("p"),P=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),y=s("Module"),N=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){u=r(w,"P",{});var b=d(u);P=a(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(b,"CODE",{});var z=d(m);y=a(z,"Module"),z.forEach(o),N=a(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(w,b){p(w,u,b),e(u,P),e(u,m),e(m,y),e(u,N)},d(w){w&&o(u)}}}function al(I){let u,P,m,y,N;return{c(){u=n("p"),P=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),y=s("Module"),N=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){u=r(w,"P",{});var b=d(u);P=a(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(b,"CODE",{});var z=d(m);y=a(z,"Module"),z.forEach(o),N=a(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(w,b){p(w,u,b),e(u,P),e(u,m),e(m,y),e(u,N)},d(w){w&&o(u)}}}function dl(I){let u,P,m,y,N;return{c(){u=n("p"),P=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),y=s("Module"),N=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){u=r(w,"P",{});var b=d(u);P=a(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(b,"CODE",{});var z=d(m);y=a(z,"Module"),z.forEach(o),N=a(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(w,b){p(w,u,b),e(u,P),e(u,m),e(m,y),e(u,N)},d(w){w&&o(u)}}}function il(I){let u,P,m,y,N,w,b,z,Xr,Jn,se,Wo,Zr,es,Qe,ts,os,Yn,ae,Ee,Ho,Xe,ns,Vo,rs,Kn,Me,ss,Ze,as,ds,Qn,ao,is,Xn,io,cs,Zn,co,Uo,hs,er,Ce,ls,et,ps,us,tr,de,Se,Ro,tt,ms,Jo,fs,or,U,ot,_s,nt,gs,ho,vs,ks,Ts,ie,bs,lo,ws,ys,po,Ps,Ns,nr,ce,De,Yo,rt,zs,Ko,qs,rr,$,st,$s,Qo,Fs,xs,at,Es,uo,Ms,Cs,Ss,R,dt,Ds,Xo,Os,Ls,it,mo,js,Zo,As,Is,fo,Bs,en,Gs,Ws,Oe,ct,Hs,tn,Vs,Us,W,ht,Rs,on,Js,Ys,lt,Ks,he,Qs,nn,Xs,Zs,rn,ea,ta,oa,Le,pt,na,ut,ra,sn,sa,aa,sr,le,je,an,mt,da,dn,ia,ar,pe,ft,ca,cn,ha,dr,ue,_t,la,hn,pa,ir,me,gt,ua,ln,ma,cr,fe,vt,fa,pn,_a,hr,_e,Ae,un,kt,ga,mn,va,lr,E,Tt,ka,bt,Ta,_o,ba,wa,ya,ge,Pa,wt,Na,za,fn,qa,$a,Fa,yt,xa,Pt,Ea,Ma,Ca,S,Nt,Sa,ve,Da,go,Oa,La,_n,ja,Aa,Ia,Ie,Ba,gn,Ga,Wa,zt,pr,ke,Be,vn,qt,Ha,kn,Va,ur,F,$t,Ua,Ft,Ra,vo,Ja,Ya,Ka,Te,Qa,xt,Xa,Za,Tn,ed,td,od,Et,nd,Mt,rd,sd,ad,B,dd,bn,id,cd,wn,hd,ld,yn,pd,ud,ko,md,fd,_d,D,Ct,gd,be,vd,To,kd,Td,Pn,bd,wd,yd,Ge,Pd,Nn,Nd,zd,St,mr,we,We,zn,Dt,qd,qn,$d,fr,x,Ot,Fd,Lt,xd,bo,Ed,Md,Cd,ye,Sd,jt,Dd,Od,$n,Ld,jd,Ad,At,Id,It,Bd,Gd,Wd,G,Hd,Fn,Vd,Ud,xn,Rd,Jd,En,Yd,Kd,wo,Qd,Xd,Zd,O,Bt,ei,Pe,ti,yo,oi,ni,Mn,ri,si,ai,He,di,Cn,ii,ci,Gt,_r,Ne,Ve,Sn,Wt,hi,Dn,li,gr,M,Ht,pi,Vt,ui,Po,mi,fi,_i,ze,gi,Ut,vi,ki,On,Ti,bi,wi,Rt,yi,Jt,Pi,Ni,zi,L,Yt,qi,qe,$i,No,Fi,xi,Ln,Ei,Mi,Ci,Ue,Si,jn,Di,Oi,Kt,vr,$e,Re,An,Qt,Li,In,ji,kr,C,Xt,Ai,Zt,Ii,zo,Bi,Gi,Wi,Fe,Hi,eo,Vi,Ui,Bn,Ri,Ji,Yi,to,Ki,oo,Qi,Xi,Zi,j,no,ec,xe,tc,qo,oc,nc,Gn,rc,sc,ac,Je,dc,Wn,ic,cc,ro,Tr;return w=new re({}),Xe=new re({}),tt=new re({}),ot=new q({props:{name:"class transformers.ProphetNetConfig",anchor:"transformers.ProphetNetConfig",parameters:[{name:"activation_dropout",val:" = 0.1"},{name:"activation_function",val:" = 'gelu'"},{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 1024"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"num_encoder_layers",val:" = 12"},{name:"num_encoder_attention_heads",val:" = 16"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"num_decoder_layers",val:" = 12"},{name:"num_decoder_attention_heads",val:" = 16"},{name:"attention_dropout",val:" = 0.1"},{name:"dropout",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"init_std",val:" = 0.02"},{name:"is_encoder_decoder",val:" = True"},{name:"add_cross_attention",val:" = True"},{name:"decoder_start_token_id",val:" = 0"},{name:"ngram",val:" = 2"},{name:"num_buckets",val:" = 32"},{name:"relative_max_distance",val:" = 128"},{name:"disable_ngram_loss",val:" = False"},{name:"eps",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/configuration_prophetnet.py#L29",parametersDescription:[{anchor:"transformers.ProphetNetConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.ProphetNetConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.ProphetNetConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetModel">ProphetNetModel</a>.`,name:"vocab_size"},{anchor:"transformers.ProphetNetConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.ProphetNetConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.ProphetNetConfig.num_encoder_layers",description:`<strong>num_encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"num_encoder_layers"},{anchor:"transformers.ProphetNetConfig.num_encoder_attention_heads",description:`<strong>num_encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_encoder_attention_heads"},{anchor:"transformers.ProphetNetConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the <code>intermediate</code> (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.ProphetNetConfig.num_decoder_layers",description:`<strong>num_decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"num_decoder_layers"},{anchor:"transformers.ProphetNetConfig.num_decoder_attention_heads",description:`<strong>num_decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"num_decoder_attention_heads"},{anchor:"transformers.ProphetNetConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.ProphetNetConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.ProphetNetConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.ProphetNetConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"init_std"},{anchor:"transformers.ProphetNetConfig.add_cross_attention",description:`<strong>add_cross_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether cross-attention layers should be added to the model.`,name:"add_cross_attention"},{anchor:"transformers.ProphetNetConfig.is_encoder_decoder",description:`<strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether this is an encoder/decoder model.`,name:"is_encoder_decoder"},{anchor:"transformers.ProphetNetConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Padding token id.`,name:"pad_token_id"},{anchor:"transformers.ProphetNetConfig.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Beginning of stream token id.`,name:"bos_token_id"},{anchor:"transformers.ProphetNetConfig.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; End of stream token id.`,name:"eos_token_id"},{anchor:"transformers.ProphetNetConfig.ngram",description:`<strong>ngram</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token.`,name:"ngram"},{anchor:"transformers.ProphetNetConfig.num_buckets",description:`<strong>num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">https://arxiv.org/abs/1910.10683</a>) for more details.`,name:"num_buckets"},{anchor:"transformers.ProphetNetConfig.relative_max_distance",description:`<strong>relative_max_distance</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">https://arxiv.org/abs/1910.10683</a>) for more details.`,name:"relative_max_distance"},{anchor:"transformers.ProphetNetConfig.disable_ngram_loss",description:`<strong>disable_ngram_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether be trained predicting only the next first token.`,name:"disable_ngram_loss"},{anchor:"transformers.ProphetNetConfig.eps",description:`<strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Controls the <code>epsilon</code> parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed.`,name:"eps"},{anchor:"transformers.ProphetNetConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),rt=new re({}),st=new q({props:{name:"class transformers.ProphetNetTokenizer",anchor:"transformers.ProphetNetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"x_sep_token",val:" = '[X_SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/tokenization_prophetnet.py#L55",parametersDescription:[{anchor:"transformers.ProphetNetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.ProphetNetTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.ProphetNetTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.ProphetNetTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.ProphetNetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.ProphetNetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.ProphetNetTokenizer.x_sep_token",description:`<strong>x_sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[X_SEP]&quot;</code>) &#x2014; Special second separator token, which can be generated by <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration">ProphetNetForConditionalGeneration</a>. It is used to separate bullet-point like sentences in summarization, <em>e.g.</em>.`,name:"x_sep_token"},{anchor:"transformers.ProphetNetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.ProphetNetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.ProphetNetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.ProphetNetTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),dt=new q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.ProphetNetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/tokenization_prophetnet.py#L262",parametersDescription:[{anchor:"transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ct=new q({props:{name:"convert_tokens_to_string",anchor:"transformers.ProphetNetTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/tokenization_prophetnet.py#L181"}}),ht=new q({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/tokenization_prophetnet.py#L213",parametersDescription:[{anchor:"transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),lt=new Go({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),pt=new q({props:{name:"get_special_tokens_mask",anchor:"transformers.ProphetNetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/tokenization_prophetnet.py#L186",parametersDescription:[{anchor:"transformers.ProphetNetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.ProphetNetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.ProphetNetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),mt=new re({}),ft=new q({props:{name:"class transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput",anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"logits_ngram",val:": typing.Optional[torch.FloatTensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_ngram_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_ngram_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L257",parametersDescription:[{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.logits_ngram",description:`<strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits_ngram"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_ngram_hidden_states",description:`<strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_ngram_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_ngram_attentions",description:`<strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_ngram_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"cross_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}]}}),_t=new q({props:{name:"class transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput",anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"last_hidden_state_ngram",val:": typing.Optional[torch.FloatTensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_ngram_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_ngram_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L336",parametersDescription:[{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) &#x2014; Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.last_hidden_state_ngram",description:`<strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.`,name:"last_hidden_state_ngram"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_ngram_hidden_states",description:`<strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_ngram_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_ngram_attentions",description:`<strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"decoder_ngram_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"cross_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}]}}),gt=new q({props:{name:"class transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput",anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"last_hidden_state_ngram",val:": typing.Optional[torch.FloatTensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"hidden_states_ngram",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"ngram_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L415",parametersDescription:[{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) &#x2014; Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.last_hidden_state_ngram",description:`<strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.`,name:"last_hidden_state_ngram"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.ngram_hidden_states",description:`<strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"ngram_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.ngram_attentions",description:`<strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"ngram_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"cross_attentions"}]}}),vt=new q({props:{name:"class transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput",anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"logits_ngram",val:": typing.Optional[torch.FloatTensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"hidden_states_ngram",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"ngram_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L470",parametersDescription:[{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.logits_ngram",description:`<strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits_ngram"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.ngram_hidden_states",description:`<strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.`,name:"ngram_hidden_states"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.ngram_attentions",description:`<strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"ngram_attentions"},{anchor:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the`,name:"cross_attentions"}]}}),kt=new re({}),Tt=new q({props:{name:"class transformers.ProphetNetModel",anchor:"transformers.ProphetNetModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1737",parametersDescription:[{anchor:"transformers.ProphetNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Nt=new q({props:{name:"forward",anchor:"transformers.ProphetNetModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[typing.Tuple] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1769",parametersDescription:[{anchor:"transformers.ProphetNetModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ProphetNetModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ProphetNetModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>ProphetNet uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.ProphetNetModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.ProphetNetModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ProphetNetModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.ProphetNetModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.ProphetNetModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.ProphetNetModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.ProphetNetModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ProphetNetModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ProphetNetModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ProphetNetModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) \u2014 Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,ngram * decoder_sequence_length, config.vocab_size)</code>) \u2014 Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ie=new Rn({props:{$$slots:{default:[nl]},$$scope:{ctx:I}}}),zt=new Go({props:{code:`from transformers import ProphetNetTokenizer, ProphetNetModel tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') model = ProphetNetModel.from_pretrained('microsoft/prophetnet-large-uncased') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state # main stream hidden states last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-comment"># main stream hidden states</span> <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states_ngram = outputs.last_hidden_state_ngram <span class="hljs-comment"># predict hidden states</span>`}}),qt=new re({}),$t=new q({props:{name:"class transformers.ProphetNetEncoder",anchor:"transformers.ProphetNetEncoder",parameters:[{name:"config",val:": ProphetNetConfig"},{name:"word_embeddings",val:": Embedding = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1229",parametersDescription:[{anchor:"transformers.ProphetNetEncoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ct=new q({props:{name:"forward",anchor:"transformers.ProphetNetEncoder.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1259",parametersDescription:[{anchor:"transformers.ProphetNetEncoder.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ProphetNetEncoder.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ProphetNetEncoder.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ProphetNetEncoder.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ProphetNetEncoder.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ProphetNetEncoder.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ge=new Rn({props:{$$slots:{default:[rl]},$$scope:{ctx:I}}}),St=new Go({props:{code:`from transformers import ProphetNetTokenizer, ProphetNetEncoder import torch tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') model = ProphetNetEncoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetEncoder.from_pretrained(<span class="hljs-string">&#x27;patrickvonplaten/prophetnet-large-uncased-standalone&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Dt=new re({}),Ot=new q({props:{name:"class transformers.ProphetNetDecoder",anchor:"transformers.ProphetNetDecoder",parameters:[{name:"config",val:": ProphetNetConfig"},{name:"word_embeddings",val:": Embedding = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1369",parametersDescription:[{anchor:"transformers.ProphetNetDecoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bt=new q({props:{name:"forward",anchor:"transformers.ProphetNetDecoder.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1406",parametersDescription:[{anchor:"transformers.ProphetNetDecoder.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ProphetNetDecoder.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ProphetNetDecoder.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ProphetNetDecoder.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ProphetNetDecoder.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ProphetNetDecoder.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ProphetNetDecoder.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ProphetNetDecoder.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.ProphetNetDecoder.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.ProphetNetDecoder.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.ProphetNetDecoder.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) \u2014 Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) \u2014 Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),He=new Rn({props:{$$slots:{default:[sl]},$$scope:{ctx:I}}}),Gt=new Go({props:{code:`from transformers import ProphetNetTokenizer, ProphetNetDecoder import torch tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') model = ProphetNetDecoder.from_pretrained('microsoft/prophetnet-large-uncased', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetDecoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetDecoder.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Wt=new re({}),Ht=new q({props:{name:"class transformers.ProphetNetForConditionalGeneration",anchor:"transformers.ProphetNetForConditionalGeneration",parameters:[{name:"config",val:": ProphetNetConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1862",parametersDescription:[{anchor:"transformers.ProphetNetForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Yt=new q({props:{name:"forward",anchor:"transformers.ProphetNetForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L1883",parametersDescription:[{anchor:"transformers.ProphetNetForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>ProphetNet uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ProphetNetForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ue=new Rn({props:{$$slots:{default:[al]},$$scope:{ctx:I}}}),Kt=new Go({props:{code:`from transformers import ProphetNetTokenizer, ProphetNetForConditionalGeneration tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) logits_next_token = outputs.logits # logits to predict next token as usual logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_next_token = outputs.logits <span class="hljs-comment"># logits to predict next token as usual</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_ngram_next_tokens = outputs.logits_ngram <span class="hljs-comment"># logits to predict 2nd, 3rd, ... next tokens</span>`}}),Qt=new re({}),Xt=new q({props:{name:"class transformers.ProphetNetForCausalLM",anchor:"transformers.ProphetNetForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L2068",parametersDescription:[{anchor:"transformers.ProphetNetForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),no=new q({props:{name:"forward",anchor:"transformers.ProphetNetForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/prophetnet/modeling_prophetnet.py#L2103",parametersDescription:[{anchor:"transformers.ProphetNetForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ProphetNetForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ProphetNetForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ProphetNetForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ProphetNetForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ProphetNetForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ProphetNetForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.ProphetNetForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.ProphetNetForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.ProphetNetForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.ProphetNetForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.ProphetNetForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Je=new Rn({props:{$$slots:{default:[dl]},$$scope:{ctx:I}}}),ro=new Go({props:{code:`from transformers import ProphetNetTokenizer, ProphetNetForCausalLM import torch tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') model = ProphetNetForCausalLM.from_pretrained('microsoft/prophetnet-large-uncased') assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # Model can also be used with EncoderDecoder framework from transformers import BertTokenizer, EncoderDecoderModel, ProphetNetTokenizer import torch tokenizer_enc = BertTokenizer.from_pretrained('bert-large-uncased') tokenizer_dec = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-large-uncased", "microsoft/prophetnet-large-uncased") ARTICLE = ( "the us state department said wednesday it had received no " "formal word from bolivia that it was expelling the us ambassador there " "but said the charges made against him are \`\` baseless ." ) input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids labels = tokenizer_dec("us rejects charges against its ambassador in bolivia", return_tensors="pt").input_ids outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:]) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetForCausalLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model can also be used with EncoderDecoder framework</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, EncoderDecoderModel, ProphetNetTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_enc = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_dec = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/prophetnet-large-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>, <span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE = ( <span class="hljs-meta">... </span><span class="hljs-string">&quot;the us state department said wednesday it had received no &quot;</span> <span class="hljs-meta">... </span><span class="hljs-string">&quot;formal word from bolivia that it was expelling the us ambassador there &quot;</span> <span class="hljs-meta">... </span><span class="hljs-string">&quot;but said the charges made against him are \`\` baseless .&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer_enc(ARTICLE, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer_dec(<span class="hljs-string">&quot;us rejects charges against its ambassador in bolivia&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-<span class="hljs-number">1</span>], labels=labels[:, <span class="hljs-number">1</span>:]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),{c(){u=n("meta"),P=c(),m=n("h1"),y=n("a"),N=n("span"),f(w.$$.fragment),b=c(),z=n("span"),Xr=s("ProphetNet"),Jn=c(),se=n("p"),Wo=n("strong"),Zr=s("DISCLAIMER:"),es=s(" If you see something strange, file a "),Qe=n("a"),ts=s("Github Issue"),os=s(` and assign @patrickvonplaten`),Yn=c(),ae=n("h2"),Ee=n("a"),Ho=n("span"),f(Xe.$$.fragment),ns=c(),Vo=n("span"),rs=s("Overview"),Kn=c(),Me=n("p"),ss=s("The ProphetNet model was proposed in "),Ze=n("a"),as=s("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),ds=s(` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020.`),Qn=c(),ao=n("p"),is=s(`ProphetNet is an encoder-decoder model and can predict n-future tokens for \u201Cngram\u201D language modeling instead of just the next token.`),Xn=c(),io=n("p"),cs=s("The abstract from the paper is the following:"),Zn=c(),co=n("p"),Uo=n("em"),hs=s(`In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.`),er=c(),Ce=n("p"),ls=s("The Authors\u2019 code can be found "),et=n("a"),ps=s("here"),us=s("."),tr=c(),de=n("h2"),Se=n("a"),Ro=n("span"),f(tt.$$.fragment),ms=c(),Jo=n("span"),fs=s("ProphetNetConfig"),or=c(),U=n("div"),f(ot.$$.fragment),_s=c(),nt=n("p"),gs=s("This is the configuration class to store the configuration of a "),ho=n("a"),vs=s("ProphetNetModel"),ks=s(`. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture.`),Ts=c(),ie=n("p"),bs=s("Configuration objects inherit from "),lo=n("a"),ws=s("PretrainedConfig"),ys=s(` and can be used to control the model outputs. Read the documentation from `),po=n("a"),Ps=s("PretrainedConfig"),Ns=s(" for more information."),nr=c(),ce=n("h2"),De=n("a"),Yo=n("span"),f(rt.$$.fragment),zs=c(),Ko=n("span"),qs=s("ProphetNetTokenizer"),rr=c(),$=n("div"),f(st.$$.fragment),$s=c(),Qo=n("p"),Fs=s("Construct a ProphetNetTokenizer. Based on WordPiece."),xs=c(),at=n("p"),Es=s("This tokenizer inherits from "),uo=n("a"),Ms=s("PreTrainedTokenizer"),Cs=s(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ss=c(),R=n("div"),f(dt.$$.fragment),Ds=c(),Xo=n("p"),Os=s(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),Ls=c(),it=n("ul"),mo=n("li"),js=s("single sequence: "),Zo=n("code"),As=s("[CLS] X [SEP]"),Is=c(),fo=n("li"),Bs=s("pair of sequences: "),en=n("code"),Gs=s("[CLS] A [SEP] B [SEP]"),Ws=c(),Oe=n("div"),f(ct.$$.fragment),Hs=c(),tn=n("p"),Vs=s("Converts a sequence of tokens (string) in a single string."),Us=c(),W=n("div"),f(ht.$$.fragment),Rs=c(),on=n("p"),Js=s(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ProphetNet sequence pair mask has the following format:`),Ys=c(),f(lt.$$.fragment),Ks=c(),he=n("p"),Qs=s("If "),nn=n("code"),Xs=s("token_ids_1"),Zs=s(" is "),rn=n("code"),ea=s("None"),ta=s(", this method only returns the first portion of the mask (0s)."),oa=c(),Le=n("div"),f(pt.$$.fragment),na=c(),ut=n("p"),ra=s(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),sn=n("code"),sa=s("prepare_for_model"),aa=s(" method."),sr=c(),le=n("h2"),je=n("a"),an=n("span"),f(mt.$$.fragment),da=c(),dn=n("span"),ia=s("ProphetNet specific outputs"),ar=c(),pe=n("div"),f(ft.$$.fragment),ca=c(),cn=n("p"),ha=s("Base class for sequence-to-sequence language models outputs."),dr=c(),ue=n("div"),f(_t.$$.fragment),la=c(),hn=n("p"),pa=s(`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),ir=c(),me=n("div"),f(gt.$$.fragment),ua=c(),ln=n("p"),ma=s("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),cr=c(),fe=n("div"),f(vt.$$.fragment),fa=c(),pn=n("p"),_a=s("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),hr=c(),_e=n("h2"),Ae=n("a"),un=n("span"),f(kt.$$.fragment),ga=c(),mn=n("span"),va=s("ProphetNetModel"),lr=c(),E=n("div"),f(Tt.$$.fragment),ka=c(),bt=n("p"),Ta=s(`The bare ProphetNet Model outputting raw hidden-states without any specific head on top. This model inherits from `),_o=n("a"),ba=s("PreTrainedModel"),wa=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ya=c(),ge=n("p"),Pa=s("Original ProphetNet code can be found at <"),wt=n("a"),Na=s("https://github.com/microsoft/ProphetNet>"),za=s(` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),fn=n("code"),qa=s("convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),$a=s("."),Fa=c(),yt=n("p"),xa=s("This model is a PyTorch "),Pt=n("a"),Ea=s("torch.nn.Module"),Ma=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Ca=c(),S=n("div"),f(Nt.$$.fragment),Sa=c(),ve=n("p"),Da=s("The "),go=n("a"),Oa=s("ProphetNetModel"),La=s(" forward method, overrides the "),_n=n("code"),ja=s("__call__"),Aa=s(" special method."),Ia=c(),f(Ie.$$.fragment),Ba=c(),gn=n("p"),Ga=s("Example:"),Wa=c(),f(zt.$$.fragment),pr=c(),ke=n("h2"),Be=n("a"),vn=n("span"),f(qt.$$.fragment),Ha=c(),kn=n("span"),Va=s("ProphetNetEncoder"),ur=c(),F=n("div"),f($t.$$.fragment),Ua=c(),Ft=n("p"),Ra=s(`The standalone encoder part of the ProphetNetModel. This model inherits from `),vo=n("a"),Ja=s("PreTrainedModel"),Ya=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ka=c(),Te=n("p"),Qa=s("Original ProphetNet code can be found at <"),xt=n("a"),Xa=s("https://github.com/microsoft/ProphetNet>"),Za=s(` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),Tn=n("code"),ed=s("convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),td=s("."),od=c(),Et=n("p"),nd=s("This model is a PyTorch "),Mt=n("a"),rd=s("torch.nn.Module"),sd=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),ad=c(),B=n("p"),dd=s("word_embeddings ("),bn=n("code"),id=s("torch.nn.Embeddings"),cd=s(" of shape "),wn=n("code"),hd=s("(config.vocab_size, config.hidden_size)"),ld=s(", "),yn=n("em"),pd=s("optional"),ud=s(`): The word embedding parameters. This can be used to initialize `),ko=n("a"),md=s("ProphetNetEncoder"),fd=s(` with pre-defined word embeddings instead of randomly initialized word embeddings.`),_d=c(),D=n("div"),f(Ct.$$.fragment),gd=c(),be=n("p"),vd=s("The "),To=n("a"),kd=s("ProphetNetEncoder"),Td=s(" forward method, overrides the "),Pn=n("code"),bd=s("__call__"),wd=s(" special method."),yd=c(),f(Ge.$$.fragment),Pd=c(),Nn=n("p"),Nd=s("Example:"),zd=c(),f(St.$$.fragment),mr=c(),we=n("h2"),We=n("a"),zn=n("span"),f(Dt.$$.fragment),qd=c(),qn=n("span"),$d=s("ProphetNetDecoder"),fr=c(),x=n("div"),f(Ot.$$.fragment),Fd=c(),Lt=n("p"),xd=s(`The standalone decoder part of the ProphetNetModel. This model inherits from `),bo=n("a"),Ed=s("PreTrainedModel"),Md=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cd=c(),ye=n("p"),Sd=s("Original ProphetNet code can be found at <"),jt=n("a"),Dd=s("https://github.com/microsoft/ProphetNet>"),Od=s(` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),$n=n("code"),Ld=s("convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),jd=s("."),Ad=c(),At=n("p"),Id=s("This model is a PyTorch "),It=n("a"),Bd=s("torch.nn.Module"),Gd=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Wd=c(),G=n("p"),Hd=s("word_embeddings ("),Fn=n("code"),Vd=s("torch.nn.Embeddings"),Ud=s(" of shape "),xn=n("code"),Rd=s("(config.vocab_size, config.hidden_size)"),Jd=s(", "),En=n("em"),Yd=s("optional"),Kd=s(`): The word embedding parameters. This can be used to initialize `),wo=n("a"),Qd=s("ProphetNetEncoder"),Xd=s(` with pre-defined word embeddings instead of randomly initialized word embeddings.`),Zd=c(),O=n("div"),f(Bt.$$.fragment),ei=c(),Pe=n("p"),ti=s("The "),yo=n("a"),oi=s("ProphetNetDecoder"),ni=s(" forward method, overrides the "),Mn=n("code"),ri=s("__call__"),si=s(" special method."),ai=c(),f(He.$$.fragment),di=c(),Cn=n("p"),ii=s("Example:"),ci=c(),f(Gt.$$.fragment),_r=c(),Ne=n("h2"),Ve=n("a"),Sn=n("span"),f(Wt.$$.fragment),hi=c(),Dn=n("span"),li=s("ProphetNetForConditionalGeneration"),gr=c(),M=n("div"),f(Ht.$$.fragment),pi=c(),Vt=n("p"),ui=s(`The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks. This model inherits from `),Po=n("a"),mi=s("PreTrainedModel"),fi=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_i=c(),ze=n("p"),gi=s("Original ProphetNet code can be found at <"),Ut=n("a"),vi=s("https://github.com/microsoft/ProphetNet>"),ki=s(` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),On=n("code"),Ti=s("convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),bi=s("."),wi=c(),Rt=n("p"),yi=s("This model is a PyTorch "),Jt=n("a"),Pi=s("torch.nn.Module"),Ni=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),zi=c(),L=n("div"),f(Yt.$$.fragment),qi=c(),qe=n("p"),$i=s("The "),No=n("a"),Fi=s("ProphetNetForConditionalGeneration"),xi=s(" forward method, overrides the "),Ln=n("code"),Ei=s("__call__"),Mi=s(" special method."),Ci=c(),f(Ue.$$.fragment),Si=c(),jn=n("p"),Di=s("Example:"),Oi=c(),f(Kt.$$.fragment),vr=c(),$e=n("h2"),Re=n("a"),An=n("span"),f(Qt.$$.fragment),Li=c(),In=n("span"),ji=s("ProphetNetForCausalLM"),kr=c(),C=n("div"),f(Xt.$$.fragment),Ai=c(),Zt=n("p"),Ii=s(`The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal language modeling. This model inherits from `),zo=n("a"),Bi=s("PreTrainedModel"),Gi=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wi=c(),Fe=n("p"),Hi=s("Original ProphetNet code can be found at <"),eo=n("a"),Vi=s("https://github.com/microsoft/ProphetNet>"),Ui=s(` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),Bn=n("code"),Ri=s("convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),Ji=s("."),Yi=c(),to=n("p"),Ki=s("This model is a PyTorch "),oo=n("a"),Qi=s("torch.nn.Module"),Xi=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Zi=c(),j=n("div"),f(no.$$.fragment),ec=c(),xe=n("p"),tc=s("The "),qo=n("a"),oc=s("ProphetNetForCausalLM"),nc=s(" forward method, overrides the "),Gn=n("code"),rc=s("__call__"),sc=s(" special method."),ac=c(),f(Je.$$.fragment),dc=c(),Wn=n("p"),ic=s("Example:"),cc=c(),f(ro.$$.fragment),this.h()},l(t){const l=ol('[data-svelte="svelte-1phssyn"]',document.head);u=r(l,"META",{name:!0,content:!0}),l.forEach(o),P=h(t),m=r(t,"H1",{class:!0});var so=d(m);y=r(so,"A",{id:!0,class:!0,href:!0});var Hn=d(y);N=r(Hn,"SPAN",{});var Vn=d(N);_(w.$$.fragment,Vn),Vn.forEach(o),Hn.forEach(o),b=h(so),z=r(so,"SPAN",{});var Un=d(z);Xr=a(Un,"ProphetNet"),Un.forEach(o),so.forEach(o),Jn=h(t),se=r(t,"P",{});var Ye=d(se);Wo=r(Ye,"STRONG",{});var pc=d(Wo);Zr=a(pc,"DISCLAIMER:"),pc.forEach(o),es=a(Ye," If you see something strange, file a "),Qe=r(Ye,"A",{href:!0,rel:!0});var uc=d(Qe);ts=a(uc,"Github Issue"),uc.forEach(o),os=a(Ye,` and assign @patrickvonplaten`),Ye.forEach(o),Yn=h(t),ae=r(t,"H2",{class:!0});var br=d(ae);Ee=r(br,"A",{id:!0,class:!0,href:!0});var mc=d(Ee);Ho=r(mc,"SPAN",{});var fc=d(Ho);_(Xe.$$.fragment,fc),fc.forEach(o),mc.forEach(o),ns=h(br),Vo=r(br,"SPAN",{});var _c=d(Vo);rs=a(_c,"Overview"),_c.forEach(o),br.forEach(o),Kn=h(t),Me=r(t,"P",{});var wr=d(Me);ss=a(wr,"The ProphetNet model was proposed in "),Ze=r(wr,"A",{href:!0,rel:!0});var gc=d(Ze);as=a(gc,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),gc.forEach(o),ds=a(wr,` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020.`),wr.forEach(o),Qn=h(t),ao=r(t,"P",{});var vc=d(ao);is=a(vc,`ProphetNet is an encoder-decoder model and can predict n-future tokens for \u201Cngram\u201D language modeling instead of just the next token.`),vc.forEach(o),Xn=h(t),io=r(t,"P",{});var kc=d(io);cs=a(kc,"The abstract from the paper is the following:"),kc.forEach(o),Zn=h(t),co=r(t,"P",{});var Tc=d(co);Uo=r(Tc,"EM",{});var bc=d(Uo);hs=a(bc,`In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.`),bc.forEach(o),Tc.forEach(o),er=h(t),Ce=r(t,"P",{});var yr=d(Ce);ls=a(yr,"The Authors\u2019 code can be found "),et=r(yr,"A",{href:!0,rel:!0});var wc=d(et);ps=a(wc,"here"),wc.forEach(o),us=a(yr,"."),yr.forEach(o),tr=h(t),de=r(t,"H2",{class:!0});var Pr=d(de);Se=r(Pr,"A",{id:!0,class:!0,href:!0});var yc=d(Se);Ro=r(yc,"SPAN",{});var Pc=d(Ro);_(tt.$$.fragment,Pc),Pc.forEach(o),yc.forEach(o),ms=h(Pr),Jo=r(Pr,"SPAN",{});var Nc=d(Jo);fs=a(Nc,"ProphetNetConfig"),Nc.forEach(o),Pr.forEach(o),or=h(t),U=r(t,"DIV",{class:!0});var $o=d(U);_(ot.$$.fragment,$o),_s=h($o),nt=r($o,"P",{});var Nr=d(nt);gs=a(Nr,"This is the configuration class to store the configuration of a "),ho=r(Nr,"A",{href:!0});var zc=d(ho);vs=a(zc,"ProphetNetModel"),zc.forEach(o),ks=a(Nr,`. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture.`),Nr.forEach(o),Ts=h($o),ie=r($o,"P",{});var Fo=d(ie);bs=a(Fo,"Configuration objects inherit from "),lo=r(Fo,"A",{href:!0});var qc=d(lo);ws=a(qc,"PretrainedConfig"),qc.forEach(o),ys=a(Fo,` and can be used to control the model outputs. Read the documentation from `),po=r(Fo,"A",{href:!0});var $c=d(po);Ps=a($c,"PretrainedConfig"),$c.forEach(o),Ns=a(Fo," for more information."),Fo.forEach(o),$o.forEach(o),nr=h(t),ce=r(t,"H2",{class:!0});var zr=d(ce);De=r(zr,"A",{id:!0,class:!0,href:!0});var Fc=d(De);Yo=r(Fc,"SPAN",{});var xc=d(Yo);_(rt.$$.fragment,xc),xc.forEach(o),Fc.forEach(o),zs=h(zr),Ko=r(zr,"SPAN",{});var Ec=d(Ko);qs=a(Ec,"ProphetNetTokenizer"),Ec.forEach(o),zr.forEach(o),rr=h(t),$=r(t,"DIV",{class:!0});var A=d($);_(st.$$.fragment,A),$s=h(A),Qo=r(A,"P",{});var Mc=d(Qo);Fs=a(Mc,"Construct a ProphetNetTokenizer. Based on WordPiece."),Mc.forEach(o),xs=h(A),at=r(A,"P",{});var qr=d(at);Es=a(qr,"This tokenizer inherits from "),uo=r(qr,"A",{href:!0});var Cc=d(uo);Ms=a(Cc,"PreTrainedTokenizer"),Cc.forEach(o),Cs=a(qr,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),qr.forEach(o),Ss=h(A),R=r(A,"DIV",{class:!0});var xo=d(R);_(dt.$$.fragment,xo),Ds=h(xo),Xo=r(xo,"P",{});var Sc=d(Xo);Os=a(Sc,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),Sc.forEach(o),Ls=h(xo),it=r(xo,"UL",{});var $r=d(it);mo=r($r,"LI",{});var hc=d(mo);js=a(hc,"single sequence: "),Zo=r(hc,"CODE",{});var Dc=d(Zo);As=a(Dc,"[CLS] X [SEP]"),Dc.forEach(o),hc.forEach(o),Is=h($r),fo=r($r,"LI",{});var lc=d(fo);Bs=a(lc,"pair of sequences: "),en=r(lc,"CODE",{});var Oc=d(en);Gs=a(Oc,"[CLS] A [SEP] B [SEP]"),Oc.forEach(o),lc.forEach(o),$r.forEach(o),xo.forEach(o),Ws=h(A),Oe=r(A,"DIV",{class:!0});var Fr=d(Oe);_(ct.$$.fragment,Fr),Hs=h(Fr),tn=r(Fr,"P",{});var Lc=d(tn);Vs=a(Lc,"Converts a sequence of tokens (string) in a single string."),Lc.forEach(o),Fr.forEach(o),Us=h(A),W=r(A,"DIV",{class:!0});var Ke=d(W);_(ht.$$.fragment,Ke),Rs=h(Ke),on=r(Ke,"P",{});var jc=d(on);Js=a(jc,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ProphetNet sequence pair mask has the following format:`),jc.forEach(o),Ys=h(Ke),_(lt.$$.fragment,Ke),Ks=h(Ke),he=r(Ke,"P",{});var Eo=d(he);Qs=a(Eo,"If "),nn=r(Eo,"CODE",{});var Ac=d(nn);Xs=a(Ac,"token_ids_1"),Ac.forEach(o),Zs=a(Eo," is "),rn=r(Eo,"CODE",{});var Ic=d(rn);ea=a(Ic,"None"),Ic.forEach(o),ta=a(Eo,", this method only returns the first portion of the mask (0s)."),Eo.forEach(o),Ke.forEach(o),oa=h(A),Le=r(A,"DIV",{class:!0});var xr=d(Le);_(pt.$$.fragment,xr),na=h(xr),ut=r(xr,"P",{});var Er=d(ut);ra=a(Er,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),sn=r(Er,"CODE",{});var Bc=d(sn);sa=a(Bc,"prepare_for_model"),Bc.forEach(o),aa=a(Er," method."),Er.forEach(o),xr.forEach(o),A.forEach(o),sr=h(t),le=r(t,"H2",{class:!0});var Mr=d(le);je=r(Mr,"A",{id:!0,class:!0,href:!0});var Gc=d(je);an=r(Gc,"SPAN",{});var Wc=d(an);_(mt.$$.fragment,Wc),Wc.forEach(o),Gc.forEach(o),da=h(Mr),dn=r(Mr,"SPAN",{});var Hc=d(dn);ia=a(Hc,"ProphetNet specific outputs"),Hc.forEach(o),Mr.forEach(o),ar=h(t),pe=r(t,"DIV",{class:!0});var Cr=d(pe);_(ft.$$.fragment,Cr),ca=h(Cr),cn=r(Cr,"P",{});var Vc=d(cn);ha=a(Vc,"Base class for sequence-to-sequence language models outputs."),Vc.forEach(o),Cr.forEach(o),dr=h(t),ue=r(t,"DIV",{class:!0});var Sr=d(ue);_(_t.$$.fragment,Sr),la=h(Sr),hn=r(Sr,"P",{});var Uc=d(hn);pa=a(Uc,`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Uc.forEach(o),Sr.forEach(o),ir=h(t),me=r(t,"DIV",{class:!0});var Dr=d(me);_(gt.$$.fragment,Dr),ua=h(Dr),ln=r(Dr,"P",{});var Rc=d(ln);ma=a(Rc,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Rc.forEach(o),Dr.forEach(o),cr=h(t),fe=r(t,"DIV",{class:!0});var Or=d(fe);_(vt.$$.fragment,Or),fa=h(Or),pn=r(Or,"P",{});var Jc=d(pn);_a=a(Jc,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Jc.forEach(o),Or.forEach(o),hr=h(t),_e=r(t,"H2",{class:!0});var Lr=d(_e);Ae=r(Lr,"A",{id:!0,class:!0,href:!0});var Yc=d(Ae);un=r(Yc,"SPAN",{});var Kc=d(un);_(kt.$$.fragment,Kc),Kc.forEach(o),Yc.forEach(o),ga=h(Lr),mn=r(Lr,"SPAN",{});var Qc=d(mn);va=a(Qc,"ProphetNetModel"),Qc.forEach(o),Lr.forEach(o),lr=h(t),E=r(t,"DIV",{class:!0});var J=d(E);_(Tt.$$.fragment,J),ka=h(J),bt=r(J,"P",{});var jr=d(bt);Ta=a(jr,`The bare ProphetNet Model outputting raw hidden-states without any specific head on top. This model inherits from `),_o=r(jr,"A",{href:!0});var Xc=d(_o);ba=a(Xc,"PreTrainedModel"),Xc.forEach(o),wa=a(jr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jr.forEach(o),ya=h(J),ge=r(J,"P",{});var Mo=d(ge);Pa=a(Mo,"Original ProphetNet code can be found at <"),wt=r(Mo,"A",{href:!0,rel:!0});var Zc=d(wt);Na=a(Zc,"https://github.com/microsoft/ProphetNet>"),Zc.forEach(o),za=a(Mo,` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),fn=r(Mo,"CODE",{});var eh=d(fn);qa=a(eh,"convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),eh.forEach(o),$a=a(Mo,"."),Mo.forEach(o),Fa=h(J),yt=r(J,"P",{});var Ar=d(yt);xa=a(Ar,"This model is a PyTorch "),Pt=r(Ar,"A",{href:!0,rel:!0});var th=d(Pt);Ea=a(th,"torch.nn.Module"),th.forEach(o),Ma=a(Ar,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Ar.forEach(o),Ca=h(J),S=r(J,"DIV",{class:!0});var Y=d(S);_(Nt.$$.fragment,Y),Sa=h(Y),ve=r(Y,"P",{});var Co=d(ve);Da=a(Co,"The "),go=r(Co,"A",{href:!0});var oh=d(go);Oa=a(oh,"ProphetNetModel"),oh.forEach(o),La=a(Co," forward method, overrides the "),_n=r(Co,"CODE",{});var nh=d(_n);ja=a(nh,"__call__"),nh.forEach(o),Aa=a(Co," special method."),Co.forEach(o),Ia=h(Y),_(Ie.$$.fragment,Y),Ba=h(Y),gn=r(Y,"P",{});var rh=d(gn);Ga=a(rh,"Example:"),rh.forEach(o),Wa=h(Y),_(zt.$$.fragment,Y),Y.forEach(o),J.forEach(o),pr=h(t),ke=r(t,"H2",{class:!0});var Ir=d(ke);Be=r(Ir,"A",{id:!0,class:!0,href:!0});var sh=d(Be);vn=r(sh,"SPAN",{});var ah=d(vn);_(qt.$$.fragment,ah),ah.forEach(o),sh.forEach(o),Ha=h(Ir),kn=r(Ir,"SPAN",{});var dh=d(kn);Va=a(dh,"ProphetNetEncoder"),dh.forEach(o),Ir.forEach(o),ur=h(t),F=r(t,"DIV",{class:!0});var H=d(F);_($t.$$.fragment,H),Ua=h(H),Ft=r(H,"P",{});var Br=d(Ft);Ra=a(Br,`The standalone encoder part of the ProphetNetModel. This model inherits from `),vo=r(Br,"A",{href:!0});var ih=d(vo);Ja=a(ih,"PreTrainedModel"),ih.forEach(o),Ya=a(Br,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Br.forEach(o),Ka=h(H),Te=r(H,"P",{});var So=d(Te);Qa=a(So,"Original ProphetNet code can be found at <"),xt=r(So,"A",{href:!0,rel:!0});var ch=d(xt);Xa=a(ch,"https://github.com/microsoft/ProphetNet>"),ch.forEach(o),Za=a(So,` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),Tn=r(So,"CODE",{});var hh=d(Tn);ed=a(hh,"convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),hh.forEach(o),td=a(So,"."),So.forEach(o),od=h(H),Et=r(H,"P",{});var Gr=d(Et);nd=a(Gr,"This model is a PyTorch "),Mt=r(Gr,"A",{href:!0,rel:!0});var lh=d(Mt);rd=a(lh,"torch.nn.Module"),lh.forEach(o),sd=a(Gr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Gr.forEach(o),ad=h(H),B=r(H,"P",{});var K=d(B);dd=a(K,"word_embeddings ("),bn=r(K,"CODE",{});var ph=d(bn);id=a(ph,"torch.nn.Embeddings"),ph.forEach(o),cd=a(K," of shape "),wn=r(K,"CODE",{});var uh=d(wn);hd=a(uh,"(config.vocab_size, config.hidden_size)"),uh.forEach(o),ld=a(K,", "),yn=r(K,"EM",{});var mh=d(yn);pd=a(mh,"optional"),mh.forEach(o),ud=a(K,`): The word embedding parameters. This can be used to initialize `),ko=r(K,"A",{href:!0});var fh=d(ko);md=a(fh,"ProphetNetEncoder"),fh.forEach(o),fd=a(K,` with pre-defined word embeddings instead of randomly initialized word embeddings.`),K.forEach(o),_d=h(H),D=r(H,"DIV",{class:!0});var Q=d(D);_(Ct.$$.fragment,Q),gd=h(Q),be=r(Q,"P",{});var Do=d(be);vd=a(Do,"The "),To=r(Do,"A",{href:!0});var _h=d(To);kd=a(_h,"ProphetNetEncoder"),_h.forEach(o),Td=a(Do," forward method, overrides the "),Pn=r(Do,"CODE",{});var gh=d(Pn);bd=a(gh,"__call__"),gh.forEach(o),wd=a(Do," special method."),Do.forEach(o),yd=h(Q),_(Ge.$$.fragment,Q),Pd=h(Q),Nn=r(Q,"P",{});var vh=d(Nn);Nd=a(vh,"Example:"),vh.forEach(o),zd=h(Q),_(St.$$.fragment,Q),Q.forEach(o),H.forEach(o),mr=h(t),we=r(t,"H2",{class:!0});var Wr=d(we);We=r(Wr,"A",{id:!0,class:!0,href:!0});var kh=d(We);zn=r(kh,"SPAN",{});var Th=d(zn);_(Dt.$$.fragment,Th),Th.forEach(o),kh.forEach(o),qd=h(Wr),qn=r(Wr,"SPAN",{});var bh=d(qn);$d=a(bh,"ProphetNetDecoder"),bh.forEach(o),Wr.forEach(o),fr=h(t),x=r(t,"DIV",{class:!0});var V=d(x);_(Ot.$$.fragment,V),Fd=h(V),Lt=r(V,"P",{});var Hr=d(Lt);xd=a(Hr,`The standalone decoder part of the ProphetNetModel. This model inherits from `),bo=r(Hr,"A",{href:!0});var wh=d(bo);Ed=a(wh,"PreTrainedModel"),wh.forEach(o),Md=a(Hr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hr.forEach(o),Cd=h(V),ye=r(V,"P",{});var Oo=d(ye);Sd=a(Oo,"Original ProphetNet code can be found at <"),jt=r(Oo,"A",{href:!0,rel:!0});var yh=d(jt);Dd=a(yh,"https://github.com/microsoft/ProphetNet>"),yh.forEach(o),Od=a(Oo,` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),$n=r(Oo,"CODE",{});var Ph=d($n);Ld=a(Ph,"convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),Ph.forEach(o),jd=a(Oo,"."),Oo.forEach(o),Ad=h(V),At=r(V,"P",{});var Vr=d(At);Id=a(Vr,"This model is a PyTorch "),It=r(Vr,"A",{href:!0,rel:!0});var Nh=d(It);Bd=a(Nh,"torch.nn.Module"),Nh.forEach(o),Gd=a(Vr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Vr.forEach(o),Wd=h(V),G=r(V,"P",{});var X=d(G);Hd=a(X,"word_embeddings ("),Fn=r(X,"CODE",{});var zh=d(Fn);Vd=a(zh,"torch.nn.Embeddings"),zh.forEach(o),Ud=a(X," of shape "),xn=r(X,"CODE",{});var qh=d(xn);Rd=a(qh,"(config.vocab_size, config.hidden_size)"),qh.forEach(o),Jd=a(X,", "),En=r(X,"EM",{});var $h=d(En);Yd=a($h,"optional"),$h.forEach(o),Kd=a(X,`): The word embedding parameters. This can be used to initialize `),wo=r(X,"A",{href:!0});var Fh=d(wo);Qd=a(Fh,"ProphetNetEncoder"),Fh.forEach(o),Xd=a(X,` with pre-defined word embeddings instead of randomly initialized word embeddings.`),X.forEach(o),Zd=h(V),O=r(V,"DIV",{class:!0});var Z=d(O);_(Bt.$$.fragment,Z),ei=h(Z),Pe=r(Z,"P",{});var Lo=d(Pe);ti=a(Lo,"The "),yo=r(Lo,"A",{href:!0});var xh=d(yo);oi=a(xh,"ProphetNetDecoder"),xh.forEach(o),ni=a(Lo," forward method, overrides the "),Mn=r(Lo,"CODE",{});var Eh=d(Mn);ri=a(Eh,"__call__"),Eh.forEach(o),si=a(Lo," special method."),Lo.forEach(o),ai=h(Z),_(He.$$.fragment,Z),di=h(Z),Cn=r(Z,"P",{});var Mh=d(Cn);ii=a(Mh,"Example:"),Mh.forEach(o),ci=h(Z),_(Gt.$$.fragment,Z),Z.forEach(o),V.forEach(o),_r=h(t),Ne=r(t,"H2",{class:!0});var Ur=d(Ne);Ve=r(Ur,"A",{id:!0,class:!0,href:!0});var Ch=d(Ve);Sn=r(Ch,"SPAN",{});var Sh=d(Sn);_(Wt.$$.fragment,Sh),Sh.forEach(o),Ch.forEach(o),hi=h(Ur),Dn=r(Ur,"SPAN",{});var Dh=d(Dn);li=a(Dh,"ProphetNetForConditionalGeneration"),Dh.forEach(o),Ur.forEach(o),gr=h(t),M=r(t,"DIV",{class:!0});var ee=d(M);_(Ht.$$.fragment,ee),pi=h(ee),Vt=r(ee,"P",{});var Rr=d(Vt);ui=a(Rr,`The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks. This model inherits from `),Po=r(Rr,"A",{href:!0});var Oh=d(Po);mi=a(Oh,"PreTrainedModel"),Oh.forEach(o),fi=a(Rr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rr.forEach(o),_i=h(ee),ze=r(ee,"P",{});var jo=d(ze);gi=a(jo,"Original ProphetNet code can be found at <"),Ut=r(jo,"A",{href:!0,rel:!0});var Lh=d(Ut);vi=a(Lh,"https://github.com/microsoft/ProphetNet>"),Lh.forEach(o),ki=a(jo,` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),On=r(jo,"CODE",{});var jh=d(On);Ti=a(jh,"convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),jh.forEach(o),bi=a(jo,"."),jo.forEach(o),wi=h(ee),Rt=r(ee,"P",{});var Jr=d(Rt);yi=a(Jr,"This model is a PyTorch "),Jt=r(Jr,"A",{href:!0,rel:!0});var Ah=d(Jt);Pi=a(Ah,"torch.nn.Module"),Ah.forEach(o),Ni=a(Jr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Jr.forEach(o),zi=h(ee),L=r(ee,"DIV",{class:!0});var te=d(L);_(Yt.$$.fragment,te),qi=h(te),qe=r(te,"P",{});var Ao=d(qe);$i=a(Ao,"The "),No=r(Ao,"A",{href:!0});var Ih=d(No);Fi=a(Ih,"ProphetNetForConditionalGeneration"),Ih.forEach(o),xi=a(Ao," forward method, overrides the "),Ln=r(Ao,"CODE",{});var Bh=d(Ln);Ei=a(Bh,"__call__"),Bh.forEach(o),Mi=a(Ao," special method."),Ao.forEach(o),Ci=h(te),_(Ue.$$.fragment,te),Si=h(te),jn=r(te,"P",{});var Gh=d(jn);Di=a(Gh,"Example:"),Gh.forEach(o),Oi=h(te),_(Kt.$$.fragment,te),te.forEach(o),ee.forEach(o),vr=h(t),$e=r(t,"H2",{class:!0});var Yr=d($e);Re=r(Yr,"A",{id:!0,class:!0,href:!0});var Wh=d(Re);An=r(Wh,"SPAN",{});var Hh=d(An);_(Qt.$$.fragment,Hh),Hh.forEach(o),Wh.forEach(o),Li=h(Yr),In=r(Yr,"SPAN",{});var Vh=d(In);ji=a(Vh,"ProphetNetForCausalLM"),Vh.forEach(o),Yr.forEach(o),kr=h(t),C=r(t,"DIV",{class:!0});var oe=d(C);_(Xt.$$.fragment,oe),Ai=h(oe),Zt=r(oe,"P",{});var Kr=d(Zt);Ii=a(Kr,`The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal language modeling. This model inherits from `),zo=r(Kr,"A",{href:!0});var Uh=d(zo);Bi=a(Uh,"PreTrainedModel"),Uh.forEach(o),Gi=a(Kr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kr.forEach(o),Wi=h(oe),Fe=r(oe,"P",{});var Io=d(Fe);Hi=a(Io,"Original ProphetNet code can be found at <"),eo=r(Io,"A",{href:!0,rel:!0});var Rh=d(eo);Vi=a(Rh,"https://github.com/microsoft/ProphetNet>"),Rh.forEach(o),Ui=a(Io,` . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `),Bn=r(Io,"CODE",{});var Jh=d(Bn);Ri=a(Jh,"convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py"),Jh.forEach(o),Ji=a(Io,"."),Io.forEach(o),Yi=h(oe),to=r(oe,"P",{});var Qr=d(to);Ki=a(Qr,"This model is a PyTorch "),oo=r(Qr,"A",{href:!0,rel:!0});var Yh=d(oo);Qi=a(Yh,"torch.nn.Module"),Yh.forEach(o),Xi=a(Qr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.`),Qr.forEach(o),Zi=h(oe),j=r(oe,"DIV",{class:!0});var ne=d(j);_(no.$$.fragment,ne),ec=h(ne),xe=r(ne,"P",{});var Bo=d(xe);tc=a(Bo,"The "),qo=r(Bo,"A",{href:!0});var Kh=d(qo);oc=a(Kh,"ProphetNetForCausalLM"),Kh.forEach(o),nc=a(Bo," forward method, overrides the "),Gn=r(Bo,"CODE",{});var Qh=d(Gn);rc=a(Qh,"__call__"),Qh.forEach(o),sc=a(Bo," special method."),Bo.forEach(o),ac=h(ne),_(Je.$$.fragment,ne),dc=h(ne),Wn=r(ne,"P",{});var Xh=d(Wn);ic=a(Xh,"Example:"),Xh.forEach(o),cc=h(ne),_(ro.$$.fragment,ne),ne.forEach(o),oe.forEach(o),this.h()},h(){i(u,"name","hf:doc:metadata"),i(u,"content",JSON.stringify(cl)),i(y,"id","prophetnet"),i(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(y,"href","#prophetnet"),i(m,"class","relative group"),i(Qe,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),i(Qe,"rel","nofollow"),i(Ee,"id","overview"),i(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ee,"href","#overview"),i(ae,"class","relative group"),i(Ze,"href","https://arxiv.org/abs/2001.04063"),i(Ze,"rel","nofollow"),i(et,"href","https://github.com/microsoft/ProphetNet"),i(et,"rel","nofollow"),i(Se,"id","transformers.ProphetNetConfig"),i(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Se,"href","#transformers.ProphetNetConfig"),i(de,"class","relative group"),i(ho,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetModel"),i(lo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(po,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(U,"class","docstring"),i(De,"id","transformers.ProphetNetTokenizer"),i(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(De,"href","#transformers.ProphetNetTokenizer"),i(ce,"class","relative group"),i(uo,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),i(R,"class","docstring"),i(Oe,"class","docstring"),i(W,"class","docstring"),i(Le,"class","docstring"),i($,"class","docstring"),i(je,"id","transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput"),i(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(je,"href","#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput"),i(le,"class","relative group"),i(pe,"class","docstring"),i(ue,"class","docstring"),i(me,"class","docstring"),i(fe,"class","docstring"),i(Ae,"id","transformers.ProphetNetModel"),i(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ae,"href","#transformers.ProphetNetModel"),i(_e,"class","relative group"),i(_o,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(wt,"href","https://github.com/microsoft/ProphetNet%3E"),i(wt,"rel","nofollow"),i(Pt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(Pt,"rel","nofollow"),i(go,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetModel"),i(S,"class","docstring"),i(E,"class","docstring"),i(Be,"id","transformers.ProphetNetEncoder"),i(Be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Be,"href","#transformers.ProphetNetEncoder"),i(ke,"class","relative group"),i(vo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(xt,"href","https://github.com/microsoft/ProphetNet%3E"),i(xt,"rel","nofollow"),i(Mt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(Mt,"rel","nofollow"),i(ko,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetEncoder"),i(To,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetEncoder"),i(D,"class","docstring"),i(F,"class","docstring"),i(We,"id","transformers.ProphetNetDecoder"),i(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(We,"href","#transformers.ProphetNetDecoder"),i(we,"class","relative group"),i(bo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(jt,"href","https://github.com/microsoft/ProphetNet%3E"),i(jt,"rel","nofollow"),i(It,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(It,"rel","nofollow"),i(wo,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetEncoder"),i(yo,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetDecoder"),i(O,"class","docstring"),i(x,"class","docstring"),i(Ve,"id","transformers.ProphetNetForConditionalGeneration"),i(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ve,"href","#transformers.ProphetNetForConditionalGeneration"),i(Ne,"class","relative group"),i(Po,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(Ut,"href","https://github.com/microsoft/ProphetNet%3E"),i(Ut,"rel","nofollow"),i(Jt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(Jt,"rel","nofollow"),i(No,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration"),i(L,"class","docstring"),i(M,"class","docstring"),i(Re,"id","transformers.ProphetNetForCausalLM"),i(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Re,"href","#transformers.ProphetNetForCausalLM"),i($e,"class","relative group"),i(zo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(eo,"href","https://github.com/microsoft/ProphetNet%3E"),i(eo,"rel","nofollow"),i(oo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(oo,"rel","nofollow"),i(qo,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForCausalLM"),i(j,"class","docstring"),i(C,"class","docstring")},m(t,l){e(document.head,u),p(t,P,l),p(t,m,l),e(m,y),e(y,N),g(w,N,null),e(m,b),e(m,z),e(z,Xr),p(t,Jn,l),p(t,se,l),e(se,Wo),e(Wo,Zr),e(se,es),e(se,Qe),e(Qe,ts),e(se,os),p(t,Yn,l),p(t,ae,l),e(ae,Ee),e(Ee,Ho),g(Xe,Ho,null),e(ae,ns),e(ae,Vo),e(Vo,rs),p(t,Kn,l),p(t,Me,l),e(Me,ss),e(Me,Ze),e(Ze,as),e(Me,ds),p(t,Qn,l),p(t,ao,l),e(ao,is),p(t,Xn,l),p(t,io,l),e(io,cs),p(t,Zn,l),p(t,co,l),e(co,Uo),e(Uo,hs),p(t,er,l),p(t,Ce,l),e(Ce,ls),e(Ce,et),e(et,ps),e(Ce,us),p(t,tr,l),p(t,de,l),e(de,Se),e(Se,Ro),g(tt,Ro,null),e(de,ms),e(de,Jo),e(Jo,fs),p(t,or,l),p(t,U,l),g(ot,U,null),e(U,_s),e(U,nt),e(nt,gs),e(nt,ho),e(ho,vs),e(nt,ks),e(U,Ts),e(U,ie),e(ie,bs),e(ie,lo),e(lo,ws),e(ie,ys),e(ie,po),e(po,Ps),e(ie,Ns),p(t,nr,l),p(t,ce,l),e(ce,De),e(De,Yo),g(rt,Yo,null),e(ce,zs),e(ce,Ko),e(Ko,qs),p(t,rr,l),p(t,$,l),g(st,$,null),e($,$s),e($,Qo),e(Qo,Fs),e($,xs),e($,at),e(at,Es),e(at,uo),e(uo,Ms),e(at,Cs),e($,Ss),e($,R),g(dt,R,null),e(R,Ds),e(R,Xo),e(Xo,Os),e(R,Ls),e(R,it),e(it,mo),e(mo,js),e(mo,Zo),e(Zo,As),e(it,Is),e(it,fo),e(fo,Bs),e(fo,en),e(en,Gs),e($,Ws),e($,Oe),g(ct,Oe,null),e(Oe,Hs),e(Oe,tn),e(tn,Vs),e($,Us),e($,W),g(ht,W,null),e(W,Rs),e(W,on),e(on,Js),e(W,Ys),g(lt,W,null),e(W,Ks),e(W,he),e(he,Qs),e(he,nn),e(nn,Xs),e(he,Zs),e(he,rn),e(rn,ea),e(he,ta),e($,oa),e($,Le),g(pt,Le,null),e(Le,na),e(Le,ut),e(ut,ra),e(ut,sn),e(sn,sa),e(ut,aa),p(t,sr,l),p(t,le,l),e(le,je),e(je,an),g(mt,an,null),e(le,da),e(le,dn),e(dn,ia),p(t,ar,l),p(t,pe,l),g(ft,pe,null),e(pe,ca),e(pe,cn),e(cn,ha),p(t,dr,l),p(t,ue,l),g(_t,ue,null),e(ue,la),e(ue,hn),e(hn,pa),p(t,ir,l),p(t,me,l),g(gt,me,null),e(me,ua),e(me,ln),e(ln,ma),p(t,cr,l),p(t,fe,l),g(vt,fe,null),e(fe,fa),e(fe,pn),e(pn,_a),p(t,hr,l),p(t,_e,l),e(_e,Ae),e(Ae,un),g(kt,un,null),e(_e,ga),e(_e,mn),e(mn,va),p(t,lr,l),p(t,E,l),g(Tt,E,null),e(E,ka),e(E,bt),e(bt,Ta),e(bt,_o),e(_o,ba),e(bt,wa),e(E,ya),e(E,ge),e(ge,Pa),e(ge,wt),e(wt,Na),e(ge,za),e(ge,fn),e(fn,qa),e(ge,$a),e(E,Fa),e(E,yt),e(yt,xa),e(yt,Pt),e(Pt,Ea),e(yt,Ma),e(E,Ca),e(E,S),g(Nt,S,null),e(S,Sa),e(S,ve),e(ve,Da),e(ve,go),e(go,Oa),e(ve,La),e(ve,_n),e(_n,ja),e(ve,Aa),e(S,Ia),g(Ie,S,null),e(S,Ba),e(S,gn),e(gn,Ga),e(S,Wa),g(zt,S,null),p(t,pr,l),p(t,ke,l),e(ke,Be),e(Be,vn),g(qt,vn,null),e(ke,Ha),e(ke,kn),e(kn,Va),p(t,ur,l),p(t,F,l),g($t,F,null),e(F,Ua),e(F,Ft),e(Ft,Ra),e(Ft,vo),e(vo,Ja),e(Ft,Ya),e(F,Ka),e(F,Te),e(Te,Qa),e(Te,xt),e(xt,Xa),e(Te,Za),e(Te,Tn),e(Tn,ed),e(Te,td),e(F,od),e(F,Et),e(Et,nd),e(Et,Mt),e(Mt,rd),e(Et,sd),e(F,ad),e(F,B),e(B,dd),e(B,bn),e(bn,id),e(B,cd),e(B,wn),e(wn,hd),e(B,ld),e(B,yn),e(yn,pd),e(B,ud),e(B,ko),e(ko,md),e(B,fd),e(F,_d),e(F,D),g(Ct,D,null),e(D,gd),e(D,be),e(be,vd),e(be,To),e(To,kd),e(be,Td),e(be,Pn),e(Pn,bd),e(be,wd),e(D,yd),g(Ge,D,null),e(D,Pd),e(D,Nn),e(Nn,Nd),e(D,zd),g(St,D,null),p(t,mr,l),p(t,we,l),e(we,We),e(We,zn),g(Dt,zn,null),e(we,qd),e(we,qn),e(qn,$d),p(t,fr,l),p(t,x,l),g(Ot,x,null),e(x,Fd),e(x,Lt),e(Lt,xd),e(Lt,bo),e(bo,Ed),e(Lt,Md),e(x,Cd),e(x,ye),e(ye,Sd),e(ye,jt),e(jt,Dd),e(ye,Od),e(ye,$n),e($n,Ld),e(ye,jd),e(x,Ad),e(x,At),e(At,Id),e(At,It),e(It,Bd),e(At,Gd),e(x,Wd),e(x,G),e(G,Hd),e(G,Fn),e(Fn,Vd),e(G,Ud),e(G,xn),e(xn,Rd),e(G,Jd),e(G,En),e(En,Yd),e(G,Kd),e(G,wo),e(wo,Qd),e(G,Xd),e(x,Zd),e(x,O),g(Bt,O,null),e(O,ei),e(O,Pe),e(Pe,ti),e(Pe,yo),e(yo,oi),e(Pe,ni),e(Pe,Mn),e(Mn,ri),e(Pe,si),e(O,ai),g(He,O,null),e(O,di),e(O,Cn),e(Cn,ii),e(O,ci),g(Gt,O,null),p(t,_r,l),p(t,Ne,l),e(Ne,Ve),e(Ve,Sn),g(Wt,Sn,null),e(Ne,hi),e(Ne,Dn),e(Dn,li),p(t,gr,l),p(t,M,l),g(Ht,M,null),e(M,pi),e(M,Vt),e(Vt,ui),e(Vt,Po),e(Po,mi),e(Vt,fi),e(M,_i),e(M,ze),e(ze,gi),e(ze,Ut),e(Ut,vi),e(ze,ki),e(ze,On),e(On,Ti),e(ze,bi),e(M,wi),e(M,Rt),e(Rt,yi),e(Rt,Jt),e(Jt,Pi),e(Rt,Ni),e(M,zi),e(M,L),g(Yt,L,null),e(L,qi),e(L,qe),e(qe,$i),e(qe,No),e(No,Fi),e(qe,xi),e(qe,Ln),e(Ln,Ei),e(qe,Mi),e(L,Ci),g(Ue,L,null),e(L,Si),e(L,jn),e(jn,Di),e(L,Oi),g(Kt,L,null),p(t,vr,l),p(t,$e,l),e($e,Re),e(Re,An),g(Qt,An,null),e($e,Li),e($e,In),e(In,ji),p(t,kr,l),p(t,C,l),g(Xt,C,null),e(C,Ai),e(C,Zt),e(Zt,Ii),e(Zt,zo),e(zo,Bi),e(Zt,Gi),e(C,Wi),e(C,Fe),e(Fe,Hi),e(Fe,eo),e(eo,Vi),e(Fe,Ui),e(Fe,Bn),e(Bn,Ri),e(Fe,Ji),e(C,Yi),e(C,to),e(to,Ki),e(to,oo),e(oo,Qi),e(to,Xi),e(C,Zi),e(C,j),g(no,j,null),e(j,ec),e(j,xe),e(xe,tc),e(xe,qo),e(qo,oc),e(xe,nc),e(xe,Gn),e(Gn,rc),e(xe,sc),e(j,ac),g(Je,j,null),e(j,dc),e(j,Wn),e(Wn,ic),e(j,cc),g(ro,j,null),Tr=!0},p(t,[l]){const so={};l&2&&(so.$$scope={dirty:l,ctx:t}),Ie.$set(so);const Hn={};l&2&&(Hn.$$scope={dirty:l,ctx:t}),Ge.$set(Hn);const Vn={};l&2&&(Vn.$$scope={dirty:l,ctx:t}),He.$set(Vn);const Un={};l&2&&(Un.$$scope={dirty:l,ctx:t}),Ue.$set(Un);const Ye={};l&2&&(Ye.$$scope={dirty:l,ctx:t}),Je.$set(Ye)},i(t){Tr||(v(w.$$.fragment,t),v(Xe.$$.fragment,t),v(tt.$$.fragment,t),v(ot.$$.fragment,t),v(rt.$$.fragment,t),v(st.$$.fragment,t),v(dt.$$.fragment,t),v(ct.$$.fragment,t),v(ht.$$.fragment,t),v(lt.$$.fragment,t),v(pt.$$.fragment,t),v(mt.$$.fragment,t),v(ft.$$.fragment,t),v(_t.$$.fragment,t),v(gt.$$.fragment,t),v(vt.$$.fragment,t),v(kt.$$.fragment,t),v(Tt.$$.fragment,t),v(Nt.$$.fragment,t),v(Ie.$$.fragment,t),v(zt.$$.fragment,t),v(qt.$$.fragment,t),v($t.$$.fragment,t),v(Ct.$$.fragment,t),v(Ge.$$.fragment,t),v(St.$$.fragment,t),v(Dt.$$.fragment,t),v(Ot.$$.fragment,t),v(Bt.$$.fragment,t),v(He.$$.fragment,t),v(Gt.$$.fragment,t),v(Wt.$$.fragment,t),v(Ht.$$.fragment,t),v(Yt.$$.fragment,t),v(Ue.$$.fragment,t),v(Kt.$$.fragment,t),v(Qt.$$.fragment,t),v(Xt.$$.fragment,t),v(no.$$.fragment,t),v(Je.$$.fragment,t),v(ro.$$.fragment,t),Tr=!0)},o(t){k(w.$$.fragment,t),k(Xe.$$.fragment,t),k(tt.$$.fragment,t),k(ot.$$.fragment,t),k(rt.$$.fragment,t),k(st.$$.fragment,t),k(dt.$$.fragment,t),k(ct.$$.fragment,t),k(ht.$$.fragment,t),k(lt.$$.fragment,t),k(pt.$$.fragment,t),k(mt.$$.fragment,t),k(ft.$$.fragment,t),k(_t.$$.fragment,t),k(gt.$$.fragment,t),k(vt.$$.fragment,t),k(kt.$$.fragment,t),k(Tt.$$.fragment,t),k(Nt.$$.fragment,t),k(Ie.$$.fragment,t),k(zt.$$.fragment,t),k(qt.$$.fragment,t),k($t.$$.fragment,t),k(Ct.$$.fragment,t),k(Ge.$$.fragment,t),k(St.$$.fragment,t),k(Dt.$$.fragment,t),k(Ot.$$.fragment,t),k(Bt.$$.fragment,t),k(He.$$.fragment,t),k(Gt.$$.fragment,t),k(Wt.$$.fragment,t),k(Ht.$$.fragment,t),k(Yt.$$.fragment,t),k(Ue.$$.fragment,t),k(Kt.$$.fragment,t),k(Qt.$$.fragment,t),k(Xt.$$.fragment,t),k(no.$$.fragment,t),k(Je.$$.fragment,t),k(ro.$$.fragment,t),Tr=!1},d(t){o(u),t&&o(P),t&&o(m),T(w),t&&o(Jn),t&&o(se),t&&o(Yn),t&&o(ae),T(Xe),t&&o(Kn),t&&o(Me),t&&o(Qn),t&&o(ao),t&&o(Xn),t&&o(io),t&&o(Zn),t&&o(co),t&&o(er),t&&o(Ce),t&&o(tr),t&&o(de),T(tt),t&&o(or),t&&o(U),T(ot),t&&o(nr),t&&o(ce),T(rt),t&&o(rr),t&&o($),T(st),T(dt),T(ct),T(ht),T(lt),T(pt),t&&o(sr),t&&o(le),T(mt),t&&o(ar),t&&o(pe),T(ft),t&&o(dr),t&&o(ue),T(_t),t&&o(ir),t&&o(me),T(gt),t&&o(cr),t&&o(fe),T(vt),t&&o(hr),t&&o(_e),T(kt),t&&o(lr),t&&o(E),T(Tt),T(Nt),T(Ie),T(zt),t&&o(pr),t&&o(ke),T(qt),t&&o(ur),t&&o(F),T($t),T(Ct),T(Ge),T(St),t&&o(mr),t&&o(we),T(Dt),t&&o(fr),t&&o(x),T(Ot),T(Bt),T(He),T(Gt),t&&o(_r),t&&o(Ne),T(Wt),t&&o(gr),t&&o(M),T(Ht),T(Yt),T(Ue),T(Kt),t&&o(vr),t&&o($e),T(Qt),t&&o(kr),t&&o(C),T(Xt),T(no),T(Je),T(ro)}}}const cl={local:"prophetnet",sections:[{local:"overview",title:"Overview"},{local:"transformers.ProphetNetConfig",title:"ProphetNetConfig"},{local:"transformers.ProphetNetTokenizer",title:"ProphetNetTokenizer"},{local:"transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput",title:"ProphetNet specific outputs"},{local:"transformers.ProphetNetModel",title:"ProphetNetModel"},{local:"transformers.ProphetNetEncoder",title:"ProphetNetEncoder"},{local:"transformers.ProphetNetDecoder",title:"ProphetNetDecoder"},{local:"transformers.ProphetNetForConditionalGeneration",title:"ProphetNetForConditionalGeneration"},{local:"transformers.ProphetNetForCausalLM",title:"ProphetNetForCausalLM"}],title:"ProphetNet"};function hl(I,u,P){let{fw:m}=u;return I.$$set=y=>{"fw"in y&&P(0,m=y.fw)},[m]}class gl extends Zh{constructor(u){super();el(this,u,hl,il,tl,{fw:0})}}export{gl as default,cl as metadata};
9,926
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/fnet.mdx-1f573e04.js
import{S as Yh,i as Zh,s as ef,e as n,k as l,w as g,t as a,L as tf,c as s,d as o,m as c,a as r,x as _,h as i,b as d,J as e,g as f,y as k,q as v,o as F,B as b}from"../../chunks/vendor-b1433968.js";import{T as Ft}from"../../chunks/Tip-c3840994.js";import{D as z}from"../../chunks/Docstring-ff504c58.js";import{C as Y}from"../../chunks/CodeBlock-a320dbd7.js";import{I as G}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function of(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function nf(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function sf(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function rf(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function af(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function lf(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function cf(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function df(q){let p,N,m,T,y;return{c(){p=n("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),y=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,N),e(p,m),e(m,T),e(p,y)},d(w){w&&o(p)}}}function pf(q){let p,N,m,T,y,w,u,$,Yr,er,me,We,Mn,bt,Zr,jn,ea,tr,Ue,ta,wt,oa,na,or,Wo,Cn,sa,nr,Uo,ra,sr,Ho,Sn,aa,rr,oe,ia,Tt,la,ca,Nt,da,pa,ar,ue,He,An,yt,ha,Ln,fa,ir,P,$t,ma,ge,ua,Qo,ga,_a,zt,ka,va,Fa,_e,ba,Vo,wa,Ta,Ro,Na,ya,$a,In,za,qa,qt,lr,ke,Qe,Dn,xt,xa,On,Ea,cr,x,Et,Pa,Z,Ma,Jo,ja,Ca,Pt,Sa,Aa,Go,La,Ia,Da,ve,Oa,Bn,Ba,Wa,Wn,Ua,Ha,Qa,ne,Mt,Va,Un,Ra,Ja,jt,Ko,Ga,Hn,Ka,Xa,Xo,Ya,Qn,Za,ei,Ve,Ct,ti,St,oi,Vn,ni,si,ri,K,At,ai,Rn,ii,li,Lt,ci,Fe,di,Jn,pi,hi,Gn,fi,mi,ui,Kn,dr,be,Re,Xn,It,gi,Yn,_i,pr,B,Dt,ki,W,vi,Zn,Fi,bi,Yo,wi,Ti,Ot,Ni,yi,Zo,$i,zi,qi,se,Bt,xi,es,Ei,Pi,Wt,en,Mi,ts,ji,Ci,tn,Si,os,Ai,Li,X,Ut,Ii,ns,Di,Oi,Ht,Bi,ss,Wi,hr,we,Je,rs,Qt,Ui,as,Hi,fr,U,Vt,Qi,Rt,Vi,Jt,Ri,Ji,Gi,Gt,Ki,is,Xi,Yi,Zi,M,Kt,el,Te,tl,on,ol,nl,ls,sl,rl,al,Ge,il,cs,ll,cl,Xt,mr,Ne,Ke,ds,Yt,dl,ps,pl,ur,H,Zt,hl,ye,fl,hs,ml,ul,fs,gl,_l,kl,eo,vl,to,Fl,bl,wl,j,oo,Tl,$e,Nl,nn,yl,$l,ms,zl,ql,xl,Xe,El,us,Pl,Ml,no,gr,ze,Ye,gs,so,jl,_s,Cl,_r,ee,ro,Sl,qe,Al,ks,Ll,Il,ao,Dl,Ol,Bl,C,io,Wl,xe,Ul,sn,Hl,Ql,vs,Vl,Rl,Jl,Ze,Gl,Fs,Kl,Xl,lo,kr,Ee,et,bs,co,Yl,ws,Zl,vr,te,po,ec,Pe,tc,Ts,oc,nc,ho,sc,rc,ac,S,fo,ic,Me,lc,rn,cc,dc,Ns,pc,hc,fc,tt,mc,ys,uc,gc,mo,Fr,je,ot,$s,uo,_c,zs,kc,br,Q,go,vc,qs,Fc,bc,_o,wc,ko,Tc,Nc,yc,E,vo,$c,Ce,zc,an,qc,xc,xs,Ec,Pc,Mc,nt,jc,Es,Cc,Sc,Fo,Ac,Ps,Lc,Ic,bo,wr,Se,st,Ms,wo,Dc,js,Oc,Tr,V,To,Bc,Cs,Wc,Uc,No,Hc,yo,Qc,Vc,Rc,A,$o,Jc,Ae,Gc,ln,Kc,Xc,Ss,Yc,Zc,ed,rt,td,As,od,nd,zo,Nr,Le,at,Ls,qo,sd,Is,rd,yr,R,xo,ad,Ds,id,ld,Eo,cd,Po,dd,pd,hd,L,Mo,fd,Ie,md,cn,ud,gd,Os,_d,kd,vd,it,Fd,Bs,bd,wd,jo,$r,De,lt,Ws,Co,Td,Us,Nd,zr,J,So,yd,Oe,$d,Hs,zd,qd,Qs,xd,Ed,Pd,Ao,Md,Lo,jd,Cd,Sd,I,Io,Ad,Be,Ld,dn,Id,Dd,Vs,Od,Bd,Wd,ct,Ud,Rs,Hd,Qd,Do,qr;return w=new G({}),bt=new G({}),yt=new G({}),$t=new z({props:{name:"class transformers.FNetConfig",anchor:"transformers.FNetConfig",parameters:[{name:"vocab_size",val:" = 32000"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 4"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_tpu_fourier_optimizations",val:" = False"},{name:"tpu_short_seq_length",val:" = 512"},{name:"pad_token_id",val:" = 3"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/configuration_fnet.py#L30",parametersDescription:[{anchor:"transformers.FNetConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32000) &#x2014; Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetModel">FNetModel</a> or <code>TFFNetModel</code>.`,name:"vocab_size"},{anchor:"transformers.FNetConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.FNetConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.FNetConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.FNetConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.FNetConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.FNetConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.FNetConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetModel">FNetModel</a> or <code>TFFNetModel</code>.`,name:"type_vocab_size"},{anchor:"transformers.FNetConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.FNetConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.FNetConfig.use_tpu_fourier_optimizations",description:`<strong>use_tpu_fourier_optimizations</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Determines whether to use TPU optimized FFTs. If <code>True</code>, the model will favor axis-wise FFTs transforms. Set to <code>False</code> for GPU/CPU hardware, in which case n-dimensional FFTs are used.`,name:"use_tpu_fourier_optimizations"},{anchor:"transformers.FNetConfig.tpu_short_seq_length",description:`<strong>tpu_short_seq_length</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT matrix only when <em>use_tpu_fourier_optimizations</em> is set to <code>True</code> and the input sequence is shorter than or equal to 4096 tokens.`,name:"tpu_short_seq_length"}]}}),qt=new Y({props:{code:`from transformers import FNetModel, FNetConfig # Initializing a FNet fnet-base style configuration configuration = FNetConfig() # Initializing a model from the fnet-base style configuration model = FNetModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetModel, FNetConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a FNet fnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = FNetConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the fnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),xt=new G({}),Et=new z({props:{name:"class transformers.FNetTokenizer",anchor:"transformers.FNetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = True"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet.py#L46",parametersDescription:[{anchor:"transformers.FNetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.FNetTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.FNetTokenizer.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.FNetTokenizer.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.FNetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.FNetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.FNetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.FNetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.FNetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.FNetTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Mt=new z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.FNetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet.py#L215",parametersDescription:[{anchor:"transformers.FNetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ct=new z({props:{name:"get_special_tokens_mask",anchor:"transformers.FNetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet.py#L240",parametersDescription:[{anchor:"transformers.FNetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.FNetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),At=new z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FNetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet.py#L268",parametersDescription:[{anchor:"transformers.FNetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Lt=new Y({props:{code:"0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,",highlighted:'<span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> | first sequence | second sequence |'}}),It=new G({}),Dt=new z({props:{name:"class transformers.FNetTokenizerFast",anchor:"transformers.FNetTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = True"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet_fast.py#L55",parametersDescription:[{anchor:"transformers.FNetTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.FNetTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.FNetTokenizerFast.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.FNetTokenizerFast.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.FNetTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.FNetTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.FNetTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.FNetTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.FNetTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),Bt=new z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.FNetTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet_fast.py#L137",parametersDescription:[{anchor:"transformers.FNetTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ut=new z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FNetTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/tokenization_fnet_fast.py#L162",parametersDescription:[{anchor:"transformers.FNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ht=new Y({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Qt=new G({}),Vt=new z({props:{name:"class transformers.FNetModel",anchor:"transformers.FNetModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L518",parametersDescription:[{anchor:"transformers.FNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new z({props:{name:"forward",anchor:"transformers.FNetModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L545",parametersDescription:[{anchor:"transformers.FNetModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ge=new Ft({props:{$$slots:{default:[of]},$$scope:{ctx:q}}}),Xt=new Y({props:{code:`from transformers import FNetTokenizer, FNetModel import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetModel.from_pretrained('google/fnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetModel.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Yt=new G({}),Zt=new z({props:{name:"class transformers.FNetForPreTraining",anchor:"transformers.FNetForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L628",parametersDescription:[{anchor:"transformers.FNetForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),oo=new z({props:{name:"forward",anchor:"transformers.FNetForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"next_sentence_label",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L644",parametersDescription:[{anchor:"transformers.FNetForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.FNetForPreTraining.forward.next_sentence_label",description:`<strong>next_sentence_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"next_sentence_label"},{anchor:"transformers.FNetForPreTraining.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.fnet.modeling_fnet.FNetForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</li> <li><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> </ul> `,returnType:` <p><code>transformers.models.fnet.modeling_fnet.FNetForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xe=new Ft({props:{$$slots:{default:[nf]},$$scope:{ctx:q}}}),no=new Y({props:{code:`from transformers import FNetTokenizer, FNetForPreTraining import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForPreTraining.from_pretrained('google/fnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),so=new G({}),ro=new z({props:{name:"class transformers.FNetForMaskedLM",anchor:"transformers.FNetForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L718",parametersDescription:[{anchor:"transformers.FNetForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),io=new z({props:{name:"forward",anchor:"transformers.FNetForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L734",parametersDescription:[{anchor:"transformers.FNetForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ze=new Ft({props:{$$slots:{default:[sf]},$$scope:{ctx:q}}}),lo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForMaskedLM import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForMaskedLM.from_pretrained('google/fnet-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),co=new G({}),po=new z({props:{name:"class transformers.FNetForNextSentencePrediction",anchor:"transformers.FNetForNextSentencePrediction",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L786",parametersDescription:[{anchor:"transformers.FNetForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fo=new z({props:{name:"forward",anchor:"transformers.FNetForNextSentencePrediction.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L796",parametersDescription:[{anchor:"transformers.FNetForNextSentencePrediction.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForNextSentencePrediction.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForNextSentencePrediction.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForNextSentencePrediction.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForNextSentencePrediction.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForNextSentencePrediction.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForNextSentencePrediction.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring). Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) \u2014 Next sequence prediction (classification) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tt=new Ft({props:{$$slots:{default:[rf]},$$scope:{ctx:q}}}),mo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForNextSentencePrediction import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForNextSentencePrediction.from_pretrained('google/fnet-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='pt') outputs = model(**encoding, labels=torch.LongTensor([1])) logits = outputs.logits assert logits[0, 0] < logits[0, 1] # next sentence was random,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=torch.LongTensor([<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># next sentence was random</span>`}}),uo=new G({}),go=new z({props:{name:"class transformers.FNetForSequenceClassification",anchor:"transformers.FNetForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L879",parametersDescription:[{anchor:"transformers.FNetForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),vo=new z({props:{name:"forward",anchor:"transformers.FNetForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L891",parametersDescription:[{anchor:"transformers.FNetForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nt=new Ft({props:{$$slots:{default:[af]},$$scope:{ctx:q}}}),Fo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForSequenceClassification import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForSequenceClassification.from_pretrained('google/fnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),bo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForSequenceClassification import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForSequenceClassification.from_pretrained('google/fnet-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),wo=new G({}),To=new z({props:{name:"class transformers.FNetForMultipleChoice",anchor:"transformers.FNetForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L964",parametersDescription:[{anchor:"transformers.FNetForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$o=new z({props:{name:"forward",anchor:"transformers.FNetForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L975",parametersDescription:[{anchor:"transformers.FNetForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),rt=new Ft({props:{$$slots:{default:[lf]},$$scope:{ctx:q}}}),zo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForMultipleChoice import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForMultipleChoice.from_pretrained('google/fnet-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qo=new G({}),xo=new z({props:{name:"class transformers.FNetForTokenClassification",anchor:"transformers.FNetForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L1043",parametersDescription:[{anchor:"transformers.FNetForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mo=new z({props:{name:"forward",anchor:"transformers.FNetForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L1056",parametersDescription:[{anchor:"transformers.FNetForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),it=new Ft({props:{$$slots:{default:[cf]},$$scope:{ctx:q}}}),jo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForTokenClassification import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForTokenClassification.from_pretrained('google/fnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Co=new G({}),So=new z({props:{name:"class transformers.FNetForQuestionAnswering",anchor:"transformers.FNetForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L1113",parametersDescription:[{anchor:"transformers.FNetForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Io=new z({props:{name:"forward",anchor:"transformers.FNetForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/fnet/modeling_fnet.py#L1125",parametersDescription:[{anchor:"transformers.FNetForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.FNetForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ct=new Ft({props:{$$slots:{default:[df]},$$scope:{ctx:q}}}),Do=new Y({props:{code:`from transformers import FNetTokenizer, FNetForQuestionAnswering import torch tokenizer = FNetTokenizer.from_pretrained('google/fnet-base') model = FNetForQuestionAnswering.from_pretrained('google/fnet-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/fnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){p=n("meta"),N=l(),m=n("h1"),T=n("a"),y=n("span"),g(w.$$.fragment),u=l(),$=n("span"),Yr=a("FNet"),er=l(),me=n("h2"),We=n("a"),Mn=n("span"),g(bt.$$.fragment),Zr=l(),jn=n("span"),ea=a("Overview"),tr=l(),Ue=n("p"),ta=a("The FNet model was proposed in "),wt=n("a"),oa=a("FNet: Mixing Tokens with Fourier Transforms"),na=a(` by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT model with a fourier transform which returns only the real parts of the transform. The model is significantly faster than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97% accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the paper is the following:`),or=l(),Wo=n("p"),Cn=n("em"),sa=a(`We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that \u201Cmix\u201D input tokens. These linear mixers, along with standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths, our FNet model is significantly faster: when compared to the \u201Cefficient\u201D Transformers on the Long Range Arena benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.`),nr=l(),Uo=n("p"),ra=a("Tips on usage:"),sr=l(),Ho=n("ul"),Sn=n("li"),aa=a(`The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum sequence length for fine-tuning and inference.`),rr=l(),oe=n("p"),ia=a("This model was contributed by "),Tt=n("a"),la=a("gchhablani"),ca=a(". The original code can be found "),Nt=n("a"),da=a("here"),pa=a("."),ar=l(),ue=n("h2"),He=n("a"),An=n("span"),g(yt.$$.fragment),ha=l(),Ln=n("span"),fa=a("FNetConfig"),ir=l(),P=n("div"),g($t.$$.fragment),ma=l(),ge=n("p"),ua=a("This is the configuration class to store the configuration of a "),Qo=n("a"),ga=a("FNetModel"),_a=a(`. It is used to instantiate an FNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FNet `),zt=n("a"),ka=a("fnet-base"),va=a(" architecture."),Fa=l(),_e=n("p"),ba=a("Configuration objects inherit from "),Vo=n("a"),wa=a("PretrainedConfig"),Ta=a(` and can be used to control the model outputs. Read the documentation from `),Ro=n("a"),Na=a("PretrainedConfig"),ya=a(" for more information."),$a=l(),In=n("p"),za=a("Example:"),qa=l(),g(qt.$$.fragment),lr=l(),ke=n("h2"),Qe=n("a"),Dn=n("span"),g(xt.$$.fragment),xa=l(),On=n("span"),Ea=a("FNetTokenizer"),cr=l(),x=n("div"),g(Et.$$.fragment),Pa=l(),Z=n("p"),Ma=a("Construct an FNet tokenizer. Adapted from "),Jo=n("a"),ja=a("AlbertTokenizer"),Ca=a(". Based on "),Pt=n("a"),Sa=a("SentencePiece"),Aa=a(`. This tokenizer inherits from `),Go=n("a"),La=a("PreTrainedTokenizer"),Ia=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Da=l(),ve=n("p"),Oa=a(`Attributes: sp_model (`),Bn=n("code"),Ba=a("SentencePieceProcessor"),Wa=a(`): The `),Wn=n("em"),Ua=a("SentencePiece"),Ha=a(" processor that is used for every conversion (string, tokens and IDs)."),Qa=l(),ne=n("div"),g(Mt.$$.fragment),Va=l(),Un=n("p"),Ra=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),Ja=l(),jt=n("ul"),Ko=n("li"),Ga=a("single sequence: "),Hn=n("code"),Ka=a("[CLS] X [SEP]"),Xa=l(),Xo=n("li"),Ya=a("pair of sequences: "),Qn=n("code"),Za=a("[CLS] A [SEP] B [SEP]"),ei=l(),Ve=n("div"),g(Ct.$$.fragment),ti=l(),St=n("p"),oi=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Vn=n("code"),ni=a("prepare_for_model"),si=a(" method."),ri=l(),K=n("div"),g(At.$$.fragment),ai=l(),Rn=n("p"),ii=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format: :`),li=l(),g(Lt.$$.fragment),ci=l(),Fe=n("p"),di=a("If "),Jn=n("code"),pi=a("token_ids_1"),hi=a(" is "),Gn=n("code"),fi=a("None"),mi=a(", this method only returns the first portion of the mask (0s)."),ui=l(),Kn=n("div"),dr=l(),be=n("h2"),Re=n("a"),Xn=n("span"),g(It.$$.fragment),gi=l(),Yn=n("span"),_i=a("FNetTokenizerFast"),pr=l(),B=n("div"),g(Dt.$$.fragment),ki=l(),W=n("p"),vi=a("Construct a \u201Cfast\u201D FNetTokenizer (backed by HuggingFace\u2019s "),Zn=n("em"),Fi=a("tokenizers"),bi=a(` library). Adapted from `),Yo=n("a"),wi=a("AlbertTokenizerFast"),Ti=a(". Based on "),Ot=n("a"),Ni=a("Unigram"),yi=a(`. This tokenizer inherits from `),Zo=n("a"),$i=a("PreTrainedTokenizerFast"),zi=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),qi=l(),se=n("div"),g(Bt.$$.fragment),xi=l(),es=n("p"),Ei=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),Pi=l(),Wt=n("ul"),en=n("li"),Mi=a("single sequence: "),ts=n("code"),ji=a("[CLS] X [SEP]"),Ci=l(),tn=n("li"),Si=a("pair of sequences: "),os=n("code"),Ai=a("[CLS] A [SEP] B [SEP]"),Li=l(),X=n("div"),g(Ut.$$.fragment),Ii=l(),ns=n("p"),Di=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format:`),Oi=l(),g(Ht.$$.fragment),Bi=l(),ss=n("p"),Wi=a("if token_ids_1 is None, only returns the first portion of the mask (0s)."),hr=l(),we=n("h2"),Je=n("a"),rs=n("span"),g(Qt.$$.fragment),Ui=l(),as=n("span"),Hi=a("FNetModel"),fr=l(),U=n("div"),g(Vt.$$.fragment),Qi=l(),Rt=n("p"),Vi=a(`The bare FNet Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Jt=n("a"),Ri=a("torch.nn.Module"),Ji=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gi=l(),Gt=n("p"),Ki=a("The model can behave as an encoder, following the architecture described in "),is=n("code"),Xi=a("FNet: Mixing Tokens with Fourier Transforms <https://arxiv.org/abs/2105.03824>"),Yi=a(`__ by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.`),Zi=l(),M=n("div"),g(Kt.$$.fragment),el=l(),Te=n("p"),tl=a("The "),on=n("a"),ol=a("FNetModel"),nl=a(" forward method, overrides the "),ls=n("code"),sl=a("__call__"),rl=a(" special method."),al=l(),g(Ge.$$.fragment),il=l(),cs=n("p"),ll=a("Example:"),cl=l(),g(Xt.$$.fragment),mr=l(),Ne=n("h2"),Ke=n("a"),ds=n("span"),g(Yt.$$.fragment),dl=l(),ps=n("span"),pl=a("FNetForPreTraining"),ur=l(),H=n("div"),g(Zt.$$.fragment),hl=l(),ye=n("p"),fl=a("FNet Model with two heads on top as done during the pretraining: a "),hs=n("code"),ml=a("masked language modeling"),ul=a(" head and a "),fs=n("code"),gl=a("next sentence prediction (classification)"),_l=a(" head."),kl=l(),eo=n("p"),vl=a("This model is a PyTorch "),to=n("a"),Fl=a("torch.nn.Module"),bl=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wl=l(),j=n("div"),g(oo.$$.fragment),Tl=l(),$e=n("p"),Nl=a("The "),nn=n("a"),yl=a("FNetForPreTraining"),$l=a(" forward method, overrides the "),ms=n("code"),zl=a("__call__"),ql=a(" special method."),xl=l(),g(Xe.$$.fragment),El=l(),us=n("p"),Pl=a("Example:"),Ml=l(),g(no.$$.fragment),gr=l(),ze=n("h2"),Ye=n("a"),gs=n("span"),g(so.$$.fragment),jl=l(),_s=n("span"),Cl=a("FNetForMaskedLM"),_r=l(),ee=n("div"),g(ro.$$.fragment),Sl=l(),qe=n("p"),Al=a("FNet Model with a "),ks=n("code"),Ll=a("language modeling"),Il=a(` head on top. This model is a PyTorch `),ao=n("a"),Dl=a("torch.nn.Module"),Ol=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bl=l(),C=n("div"),g(io.$$.fragment),Wl=l(),xe=n("p"),Ul=a("The "),sn=n("a"),Hl=a("FNetForMaskedLM"),Ql=a(" forward method, overrides the "),vs=n("code"),Vl=a("__call__"),Rl=a(" special method."),Jl=l(),g(Ze.$$.fragment),Gl=l(),Fs=n("p"),Kl=a("Example:"),Xl=l(),g(lo.$$.fragment),kr=l(),Ee=n("h2"),et=n("a"),bs=n("span"),g(co.$$.fragment),Yl=l(),ws=n("span"),Zl=a("FNetForNextSentencePrediction"),vr=l(),te=n("div"),g(po.$$.fragment),ec=l(),Pe=n("p"),tc=a("FNet Model with a "),Ts=n("code"),oc=a("next sentence prediction (classification)"),nc=a(` head on top. This model is a PyTorch `),ho=n("a"),sc=a("torch.nn.Module"),rc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ac=l(),S=n("div"),g(fo.$$.fragment),ic=l(),Me=n("p"),lc=a("The "),rn=n("a"),cc=a("FNetForNextSentencePrediction"),dc=a(" forward method, overrides the "),Ns=n("code"),pc=a("__call__"),hc=a(" special method."),fc=l(),g(tt.$$.fragment),mc=l(),ys=n("p"),uc=a("Example:"),gc=l(),g(mo.$$.fragment),Fr=l(),je=n("h2"),ot=n("a"),$s=n("span"),g(uo.$$.fragment),_c=l(),zs=n("span"),kc=a("FNetForSequenceClassification"),br=l(),Q=n("div"),g(go.$$.fragment),vc=l(),qs=n("p"),Fc=a(`FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),bc=l(),_o=n("p"),wc=a("This model is a PyTorch "),ko=n("a"),Tc=a("torch.nn.Module"),Nc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yc=l(),E=n("div"),g(vo.$$.fragment),$c=l(),Ce=n("p"),zc=a("The "),an=n("a"),qc=a("FNetForSequenceClassification"),xc=a(" forward method, overrides the "),xs=n("code"),Ec=a("__call__"),Pc=a(" special method."),Mc=l(),g(nt.$$.fragment),jc=l(),Es=n("p"),Cc=a("Example of single-label classification:"),Sc=l(),g(Fo.$$.fragment),Ac=l(),Ps=n("p"),Lc=a("Example of multi-label classification:"),Ic=l(),g(bo.$$.fragment),wr=l(),Se=n("h2"),st=n("a"),Ms=n("span"),g(wo.$$.fragment),Dc=l(),js=n("span"),Oc=a("FNetForMultipleChoice"),Tr=l(),V=n("div"),g(To.$$.fragment),Bc=l(),Cs=n("p"),Wc=a(`FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Uc=l(),No=n("p"),Hc=a("This model is a PyTorch "),yo=n("a"),Qc=a("torch.nn.Module"),Vc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rc=l(),A=n("div"),g($o.$$.fragment),Jc=l(),Ae=n("p"),Gc=a("The "),ln=n("a"),Kc=a("FNetForMultipleChoice"),Xc=a(" forward method, overrides the "),Ss=n("code"),Yc=a("__call__"),Zc=a(" special method."),ed=l(),g(rt.$$.fragment),td=l(),As=n("p"),od=a("Example:"),nd=l(),g(zo.$$.fragment),Nr=l(),Le=n("h2"),at=n("a"),Ls=n("span"),g(qo.$$.fragment),sd=l(),Is=n("span"),rd=a("FNetForTokenClassification"),yr=l(),R=n("div"),g(xo.$$.fragment),ad=l(),Ds=n("p"),id=a(`FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ld=l(),Eo=n("p"),cd=a("This model is a PyTorch "),Po=n("a"),dd=a("torch.nn.Module"),pd=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hd=l(),L=n("div"),g(Mo.$$.fragment),fd=l(),Ie=n("p"),md=a("The "),cn=n("a"),ud=a("FNetForTokenClassification"),gd=a(" forward method, overrides the "),Os=n("code"),_d=a("__call__"),kd=a(" special method."),vd=l(),g(it.$$.fragment),Fd=l(),Bs=n("p"),bd=a("Example:"),wd=l(),g(jo.$$.fragment),$r=l(),De=n("h2"),lt=n("a"),Ws=n("span"),g(Co.$$.fragment),Td=l(),Us=n("span"),Nd=a("FNetForQuestionAnswering"),zr=l(),J=n("div"),g(So.$$.fragment),yd=l(),Oe=n("p"),$d=a(`FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Hs=n("code"),zd=a("span start logits"),qd=a(" and "),Qs=n("code"),xd=a("span end logits"),Ed=a(")."),Pd=l(),Ao=n("p"),Md=a("This model is a PyTorch "),Lo=n("a"),jd=a("torch.nn.Module"),Cd=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sd=l(),I=n("div"),g(Io.$$.fragment),Ad=l(),Be=n("p"),Ld=a("The "),dn=n("a"),Id=a("FNetForQuestionAnswering"),Dd=a(" forward method, overrides the "),Vs=n("code"),Od=a("__call__"),Bd=a(" special method."),Wd=l(),g(ct.$$.fragment),Ud=l(),Rs=n("p"),Hd=a("Example:"),Qd=l(),g(Do.$$.fragment),this.h()},l(t){const h=tf('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(o),N=c(t),m=s(t,"H1",{class:!0});var Oo=r(m);T=s(Oo,"A",{id:!0,class:!0,href:!0});var Js=r(T);y=s(Js,"SPAN",{});var Gs=r(y);_(w.$$.fragment,Gs),Gs.forEach(o),Js.forEach(o),u=c(Oo),$=s(Oo,"SPAN",{});var Ks=r($);Yr=i(Ks,"FNet"),Ks.forEach(o),Oo.forEach(o),er=c(t),me=s(t,"H2",{class:!0});var Bo=r(me);We=s(Bo,"A",{id:!0,class:!0,href:!0});var Xs=r(We);Mn=s(Xs,"SPAN",{});var Ys=r(Mn);_(bt.$$.fragment,Ys),Ys.forEach(o),Xs.forEach(o),Zr=c(Bo),jn=s(Bo,"SPAN",{});var Zs=r(jn);ea=i(Zs,"Overview"),Zs.forEach(o),Bo.forEach(o),tr=c(t),Ue=s(t,"P",{});var xr=r(Ue);ta=i(xr,"The FNet model was proposed in "),wt=s(xr,"A",{href:!0,rel:!0});var Kd=r(wt);oa=i(Kd,"FNet: Mixing Tokens with Fourier Transforms"),Kd.forEach(o),na=i(xr,` by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT model with a fourier transform which returns only the real parts of the transform. The model is significantly faster than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97% accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the paper is the following:`),xr.forEach(o),or=c(t),Wo=s(t,"P",{});var Xd=r(Wo);Cn=s(Xd,"EM",{});var Yd=r(Cn);sa=i(Yd,`We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that \u201Cmix\u201D input tokens. These linear mixers, along with standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths, our FNet model is significantly faster: when compared to the \u201Cefficient\u201D Transformers on the Long Range Arena benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.`),Yd.forEach(o),Xd.forEach(o),nr=c(t),Uo=s(t,"P",{});var Zd=r(Uo);ra=i(Zd,"Tips on usage:"),Zd.forEach(o),sr=c(t),Ho=s(t,"UL",{});var ep=r(Ho);Sn=s(ep,"LI",{});var tp=r(Sn);aa=i(tp,`The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum sequence length for fine-tuning and inference.`),tp.forEach(o),ep.forEach(o),rr=c(t),oe=s(t,"P",{});var pn=r(oe);ia=i(pn,"This model was contributed by "),Tt=s(pn,"A",{href:!0,rel:!0});var op=r(Tt);la=i(op,"gchhablani"),op.forEach(o),ca=i(pn,". The original code can be found "),Nt=s(pn,"A",{href:!0,rel:!0});var np=r(Nt);da=i(np,"here"),np.forEach(o),pa=i(pn,"."),pn.forEach(o),ar=c(t),ue=s(t,"H2",{class:!0});var Er=r(ue);He=s(Er,"A",{id:!0,class:!0,href:!0});var sp=r(He);An=s(sp,"SPAN",{});var rp=r(An);_(yt.$$.fragment,rp),rp.forEach(o),sp.forEach(o),ha=c(Er),Ln=s(Er,"SPAN",{});var ap=r(Ln);fa=i(ap,"FNetConfig"),ap.forEach(o),Er.forEach(o),ir=c(t),P=s(t,"DIV",{class:!0});var re=r(P);_($t.$$.fragment,re),ma=c(re),ge=s(re,"P",{});var hn=r(ge);ua=i(hn,"This is the configuration class to store the configuration of a "),Qo=s(hn,"A",{href:!0});var ip=r(Qo);ga=i(ip,"FNetModel"),ip.forEach(o),_a=i(hn,`. It is used to instantiate an FNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FNet `),zt=s(hn,"A",{href:!0,rel:!0});var lp=r(zt);ka=i(lp,"fnet-base"),lp.forEach(o),va=i(hn," architecture."),hn.forEach(o),Fa=c(re),_e=s(re,"P",{});var fn=r(_e);ba=i(fn,"Configuration objects inherit from "),Vo=s(fn,"A",{href:!0});var cp=r(Vo);wa=i(cp,"PretrainedConfig"),cp.forEach(o),Ta=i(fn,` and can be used to control the model outputs. Read the documentation from `),Ro=s(fn,"A",{href:!0});var dp=r(Ro);Na=i(dp,"PretrainedConfig"),dp.forEach(o),ya=i(fn," for more information."),fn.forEach(o),$a=c(re),In=s(re,"P",{});var pp=r(In);za=i(pp,"Example:"),pp.forEach(o),qa=c(re),_(qt.$$.fragment,re),re.forEach(o),lr=c(t),ke=s(t,"H2",{class:!0});var Pr=r(ke);Qe=s(Pr,"A",{id:!0,class:!0,href:!0});var hp=r(Qe);Dn=s(hp,"SPAN",{});var fp=r(Dn);_(xt.$$.fragment,fp),fp.forEach(o),hp.forEach(o),xa=c(Pr),On=s(Pr,"SPAN",{});var mp=r(On);Ea=i(mp,"FNetTokenizer"),mp.forEach(o),Pr.forEach(o),cr=c(t),x=s(t,"DIV",{class:!0});var D=r(x);_(Et.$$.fragment,D),Pa=c(D),Z=s(D,"P",{});var dt=r(Z);Ma=i(dt,"Construct an FNet tokenizer. Adapted from "),Jo=s(dt,"A",{href:!0});var up=r(Jo);ja=i(up,"AlbertTokenizer"),up.forEach(o),Ca=i(dt,". Based on "),Pt=s(dt,"A",{href:!0,rel:!0});var gp=r(Pt);Sa=i(gp,"SentencePiece"),gp.forEach(o),Aa=i(dt,`. This tokenizer inherits from `),Go=s(dt,"A",{href:!0});var _p=r(Go);La=i(_p,"PreTrainedTokenizer"),_p.forEach(o),Ia=i(dt,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),dt.forEach(o),Da=c(D),ve=s(D,"P",{});var mn=r(ve);Oa=i(mn,`Attributes: sp_model (`),Bn=s(mn,"CODE",{});var kp=r(Bn);Ba=i(kp,"SentencePieceProcessor"),kp.forEach(o),Wa=i(mn,`): The `),Wn=s(mn,"EM",{});var vp=r(Wn);Ua=i(vp,"SentencePiece"),vp.forEach(o),Ha=i(mn," processor that is used for every conversion (string, tokens and IDs)."),mn.forEach(o),Qa=c(D),ne=s(D,"DIV",{class:!0});var un=r(ne);_(Mt.$$.fragment,un),Va=c(un),Un=s(un,"P",{});var Fp=r(Un);Ra=i(Fp,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),Fp.forEach(o),Ja=c(un),jt=s(un,"UL",{});var Mr=r(jt);Ko=s(Mr,"LI",{});var Vd=r(Ko);Ga=i(Vd,"single sequence: "),Hn=s(Vd,"CODE",{});var bp=r(Hn);Ka=i(bp,"[CLS] X [SEP]"),bp.forEach(o),Vd.forEach(o),Xa=c(Mr),Xo=s(Mr,"LI",{});var Rd=r(Xo);Ya=i(Rd,"pair of sequences: "),Qn=s(Rd,"CODE",{});var wp=r(Qn);Za=i(wp,"[CLS] A [SEP] B [SEP]"),wp.forEach(o),Rd.forEach(o),Mr.forEach(o),un.forEach(o),ei=c(D),Ve=s(D,"DIV",{class:!0});var jr=r(Ve);_(Ct.$$.fragment,jr),ti=c(jr),St=s(jr,"P",{});var Cr=r(St);oi=i(Cr,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Vn=s(Cr,"CODE",{});var Tp=r(Vn);ni=i(Tp,"prepare_for_model"),Tp.forEach(o),si=i(Cr," method."),Cr.forEach(o),jr.forEach(o),ri=c(D),K=s(D,"DIV",{class:!0});var pt=r(K);_(At.$$.fragment,pt),ai=c(pt),Rn=s(pt,"P",{});var Np=r(Rn);ii=i(Np,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format: :`),Np.forEach(o),li=c(pt),_(Lt.$$.fragment,pt),ci=c(pt),Fe=s(pt,"P",{});var gn=r(Fe);di=i(gn,"If "),Jn=s(gn,"CODE",{});var yp=r(Jn);pi=i(yp,"token_ids_1"),yp.forEach(o),hi=i(gn," is "),Gn=s(gn,"CODE",{});var $p=r(Gn);fi=i($p,"None"),$p.forEach(o),mi=i(gn,", this method only returns the first portion of the mask (0s)."),gn.forEach(o),pt.forEach(o),ui=c(D),Kn=s(D,"DIV",{class:!0}),r(Kn).forEach(o),D.forEach(o),dr=c(t),be=s(t,"H2",{class:!0});var Sr=r(be);Re=s(Sr,"A",{id:!0,class:!0,href:!0});var zp=r(Re);Xn=s(zp,"SPAN",{});var qp=r(Xn);_(It.$$.fragment,qp),qp.forEach(o),zp.forEach(o),gi=c(Sr),Yn=s(Sr,"SPAN",{});var xp=r(Yn);_i=i(xp,"FNetTokenizerFast"),xp.forEach(o),Sr.forEach(o),pr=c(t),B=s(t,"DIV",{class:!0});var ht=r(B);_(Dt.$$.fragment,ht),ki=c(ht),W=s(ht,"P",{});var ae=r(W);vi=i(ae,"Construct a \u201Cfast\u201D FNetTokenizer (backed by HuggingFace\u2019s "),Zn=s(ae,"EM",{});var Ep=r(Zn);Fi=i(Ep,"tokenizers"),Ep.forEach(o),bi=i(ae,` library). Adapted from `),Yo=s(ae,"A",{href:!0});var Pp=r(Yo);wi=i(Pp,"AlbertTokenizerFast"),Pp.forEach(o),Ti=i(ae,". Based on "),Ot=s(ae,"A",{href:!0,rel:!0});var Mp=r(Ot);Ni=i(Mp,"Unigram"),Mp.forEach(o),yi=i(ae,`. This tokenizer inherits from `),Zo=s(ae,"A",{href:!0});var jp=r(Zo);$i=i(jp,"PreTrainedTokenizerFast"),jp.forEach(o),zi=i(ae,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),ae.forEach(o),qi=c(ht),se=s(ht,"DIV",{class:!0});var _n=r(se);_(Bt.$$.fragment,_n),xi=c(_n),es=s(_n,"P",{});var Cp=r(es);Ei=i(Cp,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),Cp.forEach(o),Pi=c(_n),Wt=s(_n,"UL",{});var Ar=r(Wt);en=s(Ar,"LI",{});var Jd=r(en);Mi=i(Jd,"single sequence: "),ts=s(Jd,"CODE",{});var Sp=r(ts);ji=i(Sp,"[CLS] X [SEP]"),Sp.forEach(o),Jd.forEach(o),Ci=c(Ar),tn=s(Ar,"LI",{});var Gd=r(tn);Si=i(Gd,"pair of sequences: "),os=s(Gd,"CODE",{});var Ap=r(os);Ai=i(Ap,"[CLS] A [SEP] B [SEP]"),Ap.forEach(o),Gd.forEach(o),Ar.forEach(o),_n.forEach(o),Li=c(ht),X=s(ht,"DIV",{class:!0});var ft=r(X);_(Ut.$$.fragment,ft),Ii=c(ft),ns=s(ft,"P",{});var Lp=r(ns);Di=i(Lp,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format:`),Lp.forEach(o),Oi=c(ft),_(Ht.$$.fragment,ft),Bi=c(ft),ss=s(ft,"P",{});var Ip=r(ss);Wi=i(Ip,"if token_ids_1 is None, only returns the first portion of the mask (0s)."),Ip.forEach(o),ft.forEach(o),ht.forEach(o),hr=c(t),we=s(t,"H2",{class:!0});var Lr=r(we);Je=s(Lr,"A",{id:!0,class:!0,href:!0});var Dp=r(Je);rs=s(Dp,"SPAN",{});var Op=r(rs);_(Qt.$$.fragment,Op),Op.forEach(o),Dp.forEach(o),Ui=c(Lr),as=s(Lr,"SPAN",{});var Bp=r(as);Hi=i(Bp,"FNetModel"),Bp.forEach(o),Lr.forEach(o),fr=c(t),U=s(t,"DIV",{class:!0});var mt=r(U);_(Vt.$$.fragment,mt),Qi=c(mt),Rt=s(mt,"P",{});var Ir=r(Rt);Vi=i(Ir,`The bare FNet Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Jt=s(Ir,"A",{href:!0,rel:!0});var Wp=r(Jt);Ri=i(Wp,"torch.nn.Module"),Wp.forEach(o),Ji=i(Ir,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ir.forEach(o),Gi=c(mt),Gt=s(mt,"P",{});var Dr=r(Gt);Ki=i(Dr,"The model can behave as an encoder, following the architecture described in "),is=s(Dr,"CODE",{});var Up=r(is);Xi=i(Up,"FNet: Mixing Tokens with Fourier Transforms <https://arxiv.org/abs/2105.03824>"),Up.forEach(o),Yi=i(Dr,`__ by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.`),Dr.forEach(o),Zi=c(mt),M=s(mt,"DIV",{class:!0});var ie=r(M);_(Kt.$$.fragment,ie),el=c(ie),Te=s(ie,"P",{});var kn=r(Te);tl=i(kn,"The "),on=s(kn,"A",{href:!0});var Hp=r(on);ol=i(Hp,"FNetModel"),Hp.forEach(o),nl=i(kn," forward method, overrides the "),ls=s(kn,"CODE",{});var Qp=r(ls);sl=i(Qp,"__call__"),Qp.forEach(o),rl=i(kn," special method."),kn.forEach(o),al=c(ie),_(Ge.$$.fragment,ie),il=c(ie),cs=s(ie,"P",{});var Vp=r(cs);ll=i(Vp,"Example:"),Vp.forEach(o),cl=c(ie),_(Xt.$$.fragment,ie),ie.forEach(o),mt.forEach(o),mr=c(t),Ne=s(t,"H2",{class:!0});var Or=r(Ne);Ke=s(Or,"A",{id:!0,class:!0,href:!0});var Rp=r(Ke);ds=s(Rp,"SPAN",{});var Jp=r(ds);_(Yt.$$.fragment,Jp),Jp.forEach(o),Rp.forEach(o),dl=c(Or),ps=s(Or,"SPAN",{});var Gp=r(ps);pl=i(Gp,"FNetForPreTraining"),Gp.forEach(o),Or.forEach(o),ur=c(t),H=s(t,"DIV",{class:!0});var ut=r(H);_(Zt.$$.fragment,ut),hl=c(ut),ye=s(ut,"P",{});var vn=r(ye);fl=i(vn,"FNet Model with two heads on top as done during the pretraining: a "),hs=s(vn,"CODE",{});var Kp=r(hs);ml=i(Kp,"masked language modeling"),Kp.forEach(o),ul=i(vn," head and a "),fs=s(vn,"CODE",{});var Xp=r(fs);gl=i(Xp,"next sentence prediction (classification)"),Xp.forEach(o),_l=i(vn," head."),vn.forEach(o),kl=c(ut),eo=s(ut,"P",{});var Br=r(eo);vl=i(Br,"This model is a PyTorch "),to=s(Br,"A",{href:!0,rel:!0});var Yp=r(to);Fl=i(Yp,"torch.nn.Module"),Yp.forEach(o),bl=i(Br,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Br.forEach(o),wl=c(ut),j=s(ut,"DIV",{class:!0});var le=r(j);_(oo.$$.fragment,le),Tl=c(le),$e=s(le,"P",{});var Fn=r($e);Nl=i(Fn,"The "),nn=s(Fn,"A",{href:!0});var Zp=r(nn);yl=i(Zp,"FNetForPreTraining"),Zp.forEach(o),$l=i(Fn," forward method, overrides the "),ms=s(Fn,"CODE",{});var eh=r(ms);zl=i(eh,"__call__"),eh.forEach(o),ql=i(Fn," special method."),Fn.forEach(o),xl=c(le),_(Xe.$$.fragment,le),El=c(le),us=s(le,"P",{});var th=r(us);Pl=i(th,"Example:"),th.forEach(o),Ml=c(le),_(no.$$.fragment,le),le.forEach(o),ut.forEach(o),gr=c(t),ze=s(t,"H2",{class:!0});var Wr=r(ze);Ye=s(Wr,"A",{id:!0,class:!0,href:!0});var oh=r(Ye);gs=s(oh,"SPAN",{});var nh=r(gs);_(so.$$.fragment,nh),nh.forEach(o),oh.forEach(o),jl=c(Wr),_s=s(Wr,"SPAN",{});var sh=r(_s);Cl=i(sh,"FNetForMaskedLM"),sh.forEach(o),Wr.forEach(o),_r=c(t),ee=s(t,"DIV",{class:!0});var bn=r(ee);_(ro.$$.fragment,bn),Sl=c(bn),qe=s(bn,"P",{});var wn=r(qe);Al=i(wn,"FNet Model with a "),ks=s(wn,"CODE",{});var rh=r(ks);Ll=i(rh,"language modeling"),rh.forEach(o),Il=i(wn,` head on top. This model is a PyTorch `),ao=s(wn,"A",{href:!0,rel:!0});var ah=r(ao);Dl=i(ah,"torch.nn.Module"),ah.forEach(o),Ol=i(wn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wn.forEach(o),Bl=c(bn),C=s(bn,"DIV",{class:!0});var ce=r(C);_(io.$$.fragment,ce),Wl=c(ce),xe=s(ce,"P",{});var Tn=r(xe);Ul=i(Tn,"The "),sn=s(Tn,"A",{href:!0});var ih=r(sn);Hl=i(ih,"FNetForMaskedLM"),ih.forEach(o),Ql=i(Tn," forward method, overrides the "),vs=s(Tn,"CODE",{});var lh=r(vs);Vl=i(lh,"__call__"),lh.forEach(o),Rl=i(Tn," special method."),Tn.forEach(o),Jl=c(ce),_(Ze.$$.fragment,ce),Gl=c(ce),Fs=s(ce,"P",{});var ch=r(Fs);Kl=i(ch,"Example:"),ch.forEach(o),Xl=c(ce),_(lo.$$.fragment,ce),ce.forEach(o),bn.forEach(o),kr=c(t),Ee=s(t,"H2",{class:!0});var Ur=r(Ee);et=s(Ur,"A",{id:!0,class:!0,href:!0});var dh=r(et);bs=s(dh,"SPAN",{});var ph=r(bs);_(co.$$.fragment,ph),ph.forEach(o),dh.forEach(o),Yl=c(Ur),ws=s(Ur,"SPAN",{});var hh=r(ws);Zl=i(hh,"FNetForNextSentencePrediction"),hh.forEach(o),Ur.forEach(o),vr=c(t),te=s(t,"DIV",{class:!0});var Nn=r(te);_(po.$$.fragment,Nn),ec=c(Nn),Pe=s(Nn,"P",{});var yn=r(Pe);tc=i(yn,"FNet Model with a "),Ts=s(yn,"CODE",{});var fh=r(Ts);oc=i(fh,"next sentence prediction (classification)"),fh.forEach(o),nc=i(yn,` head on top. This model is a PyTorch `),ho=s(yn,"A",{href:!0,rel:!0});var mh=r(ho);sc=i(mh,"torch.nn.Module"),mh.forEach(o),rc=i(yn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yn.forEach(o),ac=c(Nn),S=s(Nn,"DIV",{class:!0});var de=r(S);_(fo.$$.fragment,de),ic=c(de),Me=s(de,"P",{});var $n=r(Me);lc=i($n,"The "),rn=s($n,"A",{href:!0});var uh=r(rn);cc=i(uh,"FNetForNextSentencePrediction"),uh.forEach(o),dc=i($n," forward method, overrides the "),Ns=s($n,"CODE",{});var gh=r(Ns);pc=i(gh,"__call__"),gh.forEach(o),hc=i($n," special method."),$n.forEach(o),fc=c(de),_(tt.$$.fragment,de),mc=c(de),ys=s(de,"P",{});var _h=r(ys);uc=i(_h,"Example:"),_h.forEach(o),gc=c(de),_(mo.$$.fragment,de),de.forEach(o),Nn.forEach(o),Fr=c(t),je=s(t,"H2",{class:!0});var Hr=r(je);ot=s(Hr,"A",{id:!0,class:!0,href:!0});var kh=r(ot);$s=s(kh,"SPAN",{});var vh=r($s);_(uo.$$.fragment,vh),vh.forEach(o),kh.forEach(o),_c=c(Hr),zs=s(Hr,"SPAN",{});var Fh=r(zs);kc=i(Fh,"FNetForSequenceClassification"),Fh.forEach(o),Hr.forEach(o),br=c(t),Q=s(t,"DIV",{class:!0});var gt=r(Q);_(go.$$.fragment,gt),vc=c(gt),qs=s(gt,"P",{});var bh=r(qs);Fc=i(bh,`FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),bh.forEach(o),bc=c(gt),_o=s(gt,"P",{});var Qr=r(_o);wc=i(Qr,"This model is a PyTorch "),ko=s(Qr,"A",{href:!0,rel:!0});var wh=r(ko);Tc=i(wh,"torch.nn.Module"),wh.forEach(o),Nc=i(Qr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qr.forEach(o),yc=c(gt),E=s(gt,"DIV",{class:!0});var O=r(E);_(vo.$$.fragment,O),$c=c(O),Ce=s(O,"P",{});var zn=r(Ce);zc=i(zn,"The "),an=s(zn,"A",{href:!0});var Th=r(an);qc=i(Th,"FNetForSequenceClassification"),Th.forEach(o),xc=i(zn," forward method, overrides the "),xs=s(zn,"CODE",{});var Nh=r(xs);Ec=i(Nh,"__call__"),Nh.forEach(o),Pc=i(zn," special method."),zn.forEach(o),Mc=c(O),_(nt.$$.fragment,O),jc=c(O),Es=s(O,"P",{});var yh=r(Es);Cc=i(yh,"Example of single-label classification:"),yh.forEach(o),Sc=c(O),_(Fo.$$.fragment,O),Ac=c(O),Ps=s(O,"P",{});var $h=r(Ps);Lc=i($h,"Example of multi-label classification:"),$h.forEach(o),Ic=c(O),_(bo.$$.fragment,O),O.forEach(o),gt.forEach(o),wr=c(t),Se=s(t,"H2",{class:!0});var Vr=r(Se);st=s(Vr,"A",{id:!0,class:!0,href:!0});var zh=r(st);Ms=s(zh,"SPAN",{});var qh=r(Ms);_(wo.$$.fragment,qh),qh.forEach(o),zh.forEach(o),Dc=c(Vr),js=s(Vr,"SPAN",{});var xh=r(js);Oc=i(xh,"FNetForMultipleChoice"),xh.forEach(o),Vr.forEach(o),Tr=c(t),V=s(t,"DIV",{class:!0});var _t=r(V);_(To.$$.fragment,_t),Bc=c(_t),Cs=s(_t,"P",{});var Eh=r(Cs);Wc=i(Eh,`FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Eh.forEach(o),Uc=c(_t),No=s(_t,"P",{});var Rr=r(No);Hc=i(Rr,"This model is a PyTorch "),yo=s(Rr,"A",{href:!0,rel:!0});var Ph=r(yo);Qc=i(Ph,"torch.nn.Module"),Ph.forEach(o),Vc=i(Rr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rr.forEach(o),Rc=c(_t),A=s(_t,"DIV",{class:!0});var pe=r(A);_($o.$$.fragment,pe),Jc=c(pe),Ae=s(pe,"P",{});var qn=r(Ae);Gc=i(qn,"The "),ln=s(qn,"A",{href:!0});var Mh=r(ln);Kc=i(Mh,"FNetForMultipleChoice"),Mh.forEach(o),Xc=i(qn," forward method, overrides the "),Ss=s(qn,"CODE",{});var jh=r(Ss);Yc=i(jh,"__call__"),jh.forEach(o),Zc=i(qn," special method."),qn.forEach(o),ed=c(pe),_(rt.$$.fragment,pe),td=c(pe),As=s(pe,"P",{});var Ch=r(As);od=i(Ch,"Example:"),Ch.forEach(o),nd=c(pe),_(zo.$$.fragment,pe),pe.forEach(o),_t.forEach(o),Nr=c(t),Le=s(t,"H2",{class:!0});var Jr=r(Le);at=s(Jr,"A",{id:!0,class:!0,href:!0});var Sh=r(at);Ls=s(Sh,"SPAN",{});var Ah=r(Ls);_(qo.$$.fragment,Ah),Ah.forEach(o),Sh.forEach(o),sd=c(Jr),Is=s(Jr,"SPAN",{});var Lh=r(Is);rd=i(Lh,"FNetForTokenClassification"),Lh.forEach(o),Jr.forEach(o),yr=c(t),R=s(t,"DIV",{class:!0});var kt=r(R);_(xo.$$.fragment,kt),ad=c(kt),Ds=s(kt,"P",{});var Ih=r(Ds);id=i(Ih,`FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ih.forEach(o),ld=c(kt),Eo=s(kt,"P",{});var Gr=r(Eo);cd=i(Gr,"This model is a PyTorch "),Po=s(Gr,"A",{href:!0,rel:!0});var Dh=r(Po);dd=i(Dh,"torch.nn.Module"),Dh.forEach(o),pd=i(Gr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gr.forEach(o),hd=c(kt),L=s(kt,"DIV",{class:!0});var he=r(L);_(Mo.$$.fragment,he),fd=c(he),Ie=s(he,"P",{});var xn=r(Ie);md=i(xn,"The "),cn=s(xn,"A",{href:!0});var Oh=r(cn);ud=i(Oh,"FNetForTokenClassification"),Oh.forEach(o),gd=i(xn," forward method, overrides the "),Os=s(xn,"CODE",{});var Bh=r(Os);_d=i(Bh,"__call__"),Bh.forEach(o),kd=i(xn," special method."),xn.forEach(o),vd=c(he),_(it.$$.fragment,he),Fd=c(he),Bs=s(he,"P",{});var Wh=r(Bs);bd=i(Wh,"Example:"),Wh.forEach(o),wd=c(he),_(jo.$$.fragment,he),he.forEach(o),kt.forEach(o),$r=c(t),De=s(t,"H2",{class:!0});var Kr=r(De);lt=s(Kr,"A",{id:!0,class:!0,href:!0});var Uh=r(lt);Ws=s(Uh,"SPAN",{});var Hh=r(Ws);_(Co.$$.fragment,Hh),Hh.forEach(o),Uh.forEach(o),Td=c(Kr),Us=s(Kr,"SPAN",{});var Qh=r(Us);Nd=i(Qh,"FNetForQuestionAnswering"),Qh.forEach(o),Kr.forEach(o),zr=c(t),J=s(t,"DIV",{class:!0});var vt=r(J);_(So.$$.fragment,vt),yd=c(vt),Oe=s(vt,"P",{});var En=r(Oe);$d=i(En,`FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Hs=s(En,"CODE",{});var Vh=r(Hs);zd=i(Vh,"span start logits"),Vh.forEach(o),qd=i(En," and "),Qs=s(En,"CODE",{});var Rh=r(Qs);xd=i(Rh,"span end logits"),Rh.forEach(o),Ed=i(En,")."),En.forEach(o),Pd=c(vt),Ao=s(vt,"P",{});var Xr=r(Ao);Md=i(Xr,"This model is a PyTorch "),Lo=s(Xr,"A",{href:!0,rel:!0});var Jh=r(Lo);jd=i(Jh,"torch.nn.Module"),Jh.forEach(o),Cd=i(Xr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xr.forEach(o),Sd=c(vt),I=s(vt,"DIV",{class:!0});var fe=r(I);_(Io.$$.fragment,fe),Ad=c(fe),Be=s(fe,"P",{});var Pn=r(Be);Ld=i(Pn,"The "),dn=s(Pn,"A",{href:!0});var Gh=r(dn);Id=i(Gh,"FNetForQuestionAnswering"),Gh.forEach(o),Dd=i(Pn," forward method, overrides the "),Vs=s(Pn,"CODE",{});var Kh=r(Vs);Od=i(Kh,"__call__"),Kh.forEach(o),Bd=i(Pn," special method."),Pn.forEach(o),Wd=c(fe),_(ct.$$.fragment,fe),Ud=c(fe),Rs=s(fe,"P",{});var Xh=r(Rs);Hd=i(Xh,"Example:"),Xh.forEach(o),Qd=c(fe),_(Do.$$.fragment,fe),fe.forEach(o),vt.forEach(o),this.h()},h(){d(p,"name","hf:doc:metadata"),d(p,"content",JSON.stringify(hf)),d(T,"id","fnet"),d(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(T,"href","#fnet"),d(m,"class","relative group"),d(We,"id","overview"),d(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(We,"href","#overview"),d(me,"class","relative group"),d(wt,"href","https://arxiv.org/abs/2105.03824"),d(wt,"rel","nofollow"),d(Tt,"href","https://huggingface.co/gchhablani"),d(Tt,"rel","nofollow"),d(Nt,"href","https://github.com/google-research/google-research/tree/master/f_net"),d(Nt,"rel","nofollow"),d(He,"id","transformers.FNetConfig"),d(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(He,"href","#transformers.FNetConfig"),d(ue,"class","relative group"),d(Qo,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetModel"),d(zt,"href","https://huggingface.co/google/fnet-base"),d(zt,"rel","nofollow"),d(Vo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(Ro,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(P,"class","docstring"),d(Qe,"id","transformers.FNetTokenizer"),d(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Qe,"href","#transformers.FNetTokenizer"),d(ke,"class","relative group"),d(Jo,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizer"),d(Pt,"href","https://github.com/google/sentencepiece"),d(Pt,"rel","nofollow"),d(Go,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(ne,"class","docstring"),d(Ve,"class","docstring"),d(K,"class","docstring"),d(Kn,"class","docstring"),d(x,"class","docstring"),d(Re,"id","transformers.FNetTokenizerFast"),d(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Re,"href","#transformers.FNetTokenizerFast"),d(be,"class","relative group"),d(Yo,"href","/docs/transformers/v4.15.0/en/model_doc/albert#transformers.AlbertTokenizerFast"),d(Ot,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),d(Ot,"rel","nofollow"),d(Zo,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),d(se,"class","docstring"),d(X,"class","docstring"),d(B,"class","docstring"),d(Je,"id","transformers.FNetModel"),d(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Je,"href","#transformers.FNetModel"),d(we,"class","relative group"),d(Jt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Jt,"rel","nofollow"),d(on,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetModel"),d(M,"class","docstring"),d(U,"class","docstring"),d(Ke,"id","transformers.FNetForPreTraining"),d(Ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ke,"href","#transformers.FNetForPreTraining"),d(Ne,"class","relative group"),d(to,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(to,"rel","nofollow"),d(nn,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForPreTraining"),d(j,"class","docstring"),d(H,"class","docstring"),d(Ye,"id","transformers.FNetForMaskedLM"),d(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ye,"href","#transformers.FNetForMaskedLM"),d(ze,"class","relative group"),d(ao,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(ao,"rel","nofollow"),d(sn,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForMaskedLM"),d(C,"class","docstring"),d(ee,"class","docstring"),d(et,"id","transformers.FNetForNextSentencePrediction"),d(et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(et,"href","#transformers.FNetForNextSentencePrediction"),d(Ee,"class","relative group"),d(ho,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(ho,"rel","nofollow"),d(rn,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForNextSentencePrediction"),d(S,"class","docstring"),d(te,"class","docstring"),d(ot,"id","transformers.FNetForSequenceClassification"),d(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ot,"href","#transformers.FNetForSequenceClassification"),d(je,"class","relative group"),d(ko,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(ko,"rel","nofollow"),d(an,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForSequenceClassification"),d(E,"class","docstring"),d(Q,"class","docstring"),d(st,"id","transformers.FNetForMultipleChoice"),d(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(st,"href","#transformers.FNetForMultipleChoice"),d(Se,"class","relative group"),d(yo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(yo,"rel","nofollow"),d(ln,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForMultipleChoice"),d(A,"class","docstring"),d(V,"class","docstring"),d(at,"id","transformers.FNetForTokenClassification"),d(at,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(at,"href","#transformers.FNetForTokenClassification"),d(Le,"class","relative group"),d(Po,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Po,"rel","nofollow"),d(cn,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForTokenClassification"),d(L,"class","docstring"),d(R,"class","docstring"),d(lt,"id","transformers.FNetForQuestionAnswering"),d(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(lt,"href","#transformers.FNetForQuestionAnswering"),d(De,"class","relative group"),d(Lo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Lo,"rel","nofollow"),d(dn,"href","/docs/transformers/v4.15.0/en/model_doc/fnet#transformers.FNetForQuestionAnswering"),d(I,"class","docstring"),d(J,"class","docstring")},m(t,h){e(document.head,p),f(t,N,h),f(t,m,h),e(m,T),e(T,y),k(w,y,null),e(m,u),e(m,$),e($,Yr),f(t,er,h),f(t,me,h),e(me,We),e(We,Mn),k(bt,Mn,null),e(me,Zr),e(me,jn),e(jn,ea),f(t,tr,h),f(t,Ue,h),e(Ue,ta),e(Ue,wt),e(wt,oa),e(Ue,na),f(t,or,h),f(t,Wo,h),e(Wo,Cn),e(Cn,sa),f(t,nr,h),f(t,Uo,h),e(Uo,ra),f(t,sr,h),f(t,Ho,h),e(Ho,Sn),e(Sn,aa),f(t,rr,h),f(t,oe,h),e(oe,ia),e(oe,Tt),e(Tt,la),e(oe,ca),e(oe,Nt),e(Nt,da),e(oe,pa),f(t,ar,h),f(t,ue,h),e(ue,He),e(He,An),k(yt,An,null),e(ue,ha),e(ue,Ln),e(Ln,fa),f(t,ir,h),f(t,P,h),k($t,P,null),e(P,ma),e(P,ge),e(ge,ua),e(ge,Qo),e(Qo,ga),e(ge,_a),e(ge,zt),e(zt,ka),e(ge,va),e(P,Fa),e(P,_e),e(_e,ba),e(_e,Vo),e(Vo,wa),e(_e,Ta),e(_e,Ro),e(Ro,Na),e(_e,ya),e(P,$a),e(P,In),e(In,za),e(P,qa),k(qt,P,null),f(t,lr,h),f(t,ke,h),e(ke,Qe),e(Qe,Dn),k(xt,Dn,null),e(ke,xa),e(ke,On),e(On,Ea),f(t,cr,h),f(t,x,h),k(Et,x,null),e(x,Pa),e(x,Z),e(Z,Ma),e(Z,Jo),e(Jo,ja),e(Z,Ca),e(Z,Pt),e(Pt,Sa),e(Z,Aa),e(Z,Go),e(Go,La),e(Z,Ia),e(x,Da),e(x,ve),e(ve,Oa),e(ve,Bn),e(Bn,Ba),e(ve,Wa),e(ve,Wn),e(Wn,Ua),e(ve,Ha),e(x,Qa),e(x,ne),k(Mt,ne,null),e(ne,Va),e(ne,Un),e(Un,Ra),e(ne,Ja),e(ne,jt),e(jt,Ko),e(Ko,Ga),e(Ko,Hn),e(Hn,Ka),e(jt,Xa),e(jt,Xo),e(Xo,Ya),e(Xo,Qn),e(Qn,Za),e(x,ei),e(x,Ve),k(Ct,Ve,null),e(Ve,ti),e(Ve,St),e(St,oi),e(St,Vn),e(Vn,ni),e(St,si),e(x,ri),e(x,K),k(At,K,null),e(K,ai),e(K,Rn),e(Rn,ii),e(K,li),k(Lt,K,null),e(K,ci),e(K,Fe),e(Fe,di),e(Fe,Jn),e(Jn,pi),e(Fe,hi),e(Fe,Gn),e(Gn,fi),e(Fe,mi),e(x,ui),e(x,Kn),f(t,dr,h),f(t,be,h),e(be,Re),e(Re,Xn),k(It,Xn,null),e(be,gi),e(be,Yn),e(Yn,_i),f(t,pr,h),f(t,B,h),k(Dt,B,null),e(B,ki),e(B,W),e(W,vi),e(W,Zn),e(Zn,Fi),e(W,bi),e(W,Yo),e(Yo,wi),e(W,Ti),e(W,Ot),e(Ot,Ni),e(W,yi),e(W,Zo),e(Zo,$i),e(W,zi),e(B,qi),e(B,se),k(Bt,se,null),e(se,xi),e(se,es),e(es,Ei),e(se,Pi),e(se,Wt),e(Wt,en),e(en,Mi),e(en,ts),e(ts,ji),e(Wt,Ci),e(Wt,tn),e(tn,Si),e(tn,os),e(os,Ai),e(B,Li),e(B,X),k(Ut,X,null),e(X,Ii),e(X,ns),e(ns,Di),e(X,Oi),k(Ht,X,null),e(X,Bi),e(X,ss),e(ss,Wi),f(t,hr,h),f(t,we,h),e(we,Je),e(Je,rs),k(Qt,rs,null),e(we,Ui),e(we,as),e(as,Hi),f(t,fr,h),f(t,U,h),k(Vt,U,null),e(U,Qi),e(U,Rt),e(Rt,Vi),e(Rt,Jt),e(Jt,Ri),e(Rt,Ji),e(U,Gi),e(U,Gt),e(Gt,Ki),e(Gt,is),e(is,Xi),e(Gt,Yi),e(U,Zi),e(U,M),k(Kt,M,null),e(M,el),e(M,Te),e(Te,tl),e(Te,on),e(on,ol),e(Te,nl),e(Te,ls),e(ls,sl),e(Te,rl),e(M,al),k(Ge,M,null),e(M,il),e(M,cs),e(cs,ll),e(M,cl),k(Xt,M,null),f(t,mr,h),f(t,Ne,h),e(Ne,Ke),e(Ke,ds),k(Yt,ds,null),e(Ne,dl),e(Ne,ps),e(ps,pl),f(t,ur,h),f(t,H,h),k(Zt,H,null),e(H,hl),e(H,ye),e(ye,fl),e(ye,hs),e(hs,ml),e(ye,ul),e(ye,fs),e(fs,gl),e(ye,_l),e(H,kl),e(H,eo),e(eo,vl),e(eo,to),e(to,Fl),e(eo,bl),e(H,wl),e(H,j),k(oo,j,null),e(j,Tl),e(j,$e),e($e,Nl),e($e,nn),e(nn,yl),e($e,$l),e($e,ms),e(ms,zl),e($e,ql),e(j,xl),k(Xe,j,null),e(j,El),e(j,us),e(us,Pl),e(j,Ml),k(no,j,null),f(t,gr,h),f(t,ze,h),e(ze,Ye),e(Ye,gs),k(so,gs,null),e(ze,jl),e(ze,_s),e(_s,Cl),f(t,_r,h),f(t,ee,h),k(ro,ee,null),e(ee,Sl),e(ee,qe),e(qe,Al),e(qe,ks),e(ks,Ll),e(qe,Il),e(qe,ao),e(ao,Dl),e(qe,Ol),e(ee,Bl),e(ee,C),k(io,C,null),e(C,Wl),e(C,xe),e(xe,Ul),e(xe,sn),e(sn,Hl),e(xe,Ql),e(xe,vs),e(vs,Vl),e(xe,Rl),e(C,Jl),k(Ze,C,null),e(C,Gl),e(C,Fs),e(Fs,Kl),e(C,Xl),k(lo,C,null),f(t,kr,h),f(t,Ee,h),e(Ee,et),e(et,bs),k(co,bs,null),e(Ee,Yl),e(Ee,ws),e(ws,Zl),f(t,vr,h),f(t,te,h),k(po,te,null),e(te,ec),e(te,Pe),e(Pe,tc),e(Pe,Ts),e(Ts,oc),e(Pe,nc),e(Pe,ho),e(ho,sc),e(Pe,rc),e(te,ac),e(te,S),k(fo,S,null),e(S,ic),e(S,Me),e(Me,lc),e(Me,rn),e(rn,cc),e(Me,dc),e(Me,Ns),e(Ns,pc),e(Me,hc),e(S,fc),k(tt,S,null),e(S,mc),e(S,ys),e(ys,uc),e(S,gc),k(mo,S,null),f(t,Fr,h),f(t,je,h),e(je,ot),e(ot,$s),k(uo,$s,null),e(je,_c),e(je,zs),e(zs,kc),f(t,br,h),f(t,Q,h),k(go,Q,null),e(Q,vc),e(Q,qs),e(qs,Fc),e(Q,bc),e(Q,_o),e(_o,wc),e(_o,ko),e(ko,Tc),e(_o,Nc),e(Q,yc),e(Q,E),k(vo,E,null),e(E,$c),e(E,Ce),e(Ce,zc),e(Ce,an),e(an,qc),e(Ce,xc),e(Ce,xs),e(xs,Ec),e(Ce,Pc),e(E,Mc),k(nt,E,null),e(E,jc),e(E,Es),e(Es,Cc),e(E,Sc),k(Fo,E,null),e(E,Ac),e(E,Ps),e(Ps,Lc),e(E,Ic),k(bo,E,null),f(t,wr,h),f(t,Se,h),e(Se,st),e(st,Ms),k(wo,Ms,null),e(Se,Dc),e(Se,js),e(js,Oc),f(t,Tr,h),f(t,V,h),k(To,V,null),e(V,Bc),e(V,Cs),e(Cs,Wc),e(V,Uc),e(V,No),e(No,Hc),e(No,yo),e(yo,Qc),e(No,Vc),e(V,Rc),e(V,A),k($o,A,null),e(A,Jc),e(A,Ae),e(Ae,Gc),e(Ae,ln),e(ln,Kc),e(Ae,Xc),e(Ae,Ss),e(Ss,Yc),e(Ae,Zc),e(A,ed),k(rt,A,null),e(A,td),e(A,As),e(As,od),e(A,nd),k(zo,A,null),f(t,Nr,h),f(t,Le,h),e(Le,at),e(at,Ls),k(qo,Ls,null),e(Le,sd),e(Le,Is),e(Is,rd),f(t,yr,h),f(t,R,h),k(xo,R,null),e(R,ad),e(R,Ds),e(Ds,id),e(R,ld),e(R,Eo),e(Eo,cd),e(Eo,Po),e(Po,dd),e(Eo,pd),e(R,hd),e(R,L),k(Mo,L,null),e(L,fd),e(L,Ie),e(Ie,md),e(Ie,cn),e(cn,ud),e(Ie,gd),e(Ie,Os),e(Os,_d),e(Ie,kd),e(L,vd),k(it,L,null),e(L,Fd),e(L,Bs),e(Bs,bd),e(L,wd),k(jo,L,null),f(t,$r,h),f(t,De,h),e(De,lt),e(lt,Ws),k(Co,Ws,null),e(De,Td),e(De,Us),e(Us,Nd),f(t,zr,h),f(t,J,h),k(So,J,null),e(J,yd),e(J,Oe),e(Oe,$d),e(Oe,Hs),e(Hs,zd),e(Oe,qd),e(Oe,Qs),e(Qs,xd),e(Oe,Ed),e(J,Pd),e(J,Ao),e(Ao,Md),e(Ao,Lo),e(Lo,jd),e(Ao,Cd),e(J,Sd),e(J,I),k(Io,I,null),e(I,Ad),e(I,Be),e(Be,Ld),e(Be,dn),e(dn,Id),e(Be,Dd),e(Be,Vs),e(Vs,Od),e(Be,Bd),e(I,Wd),k(ct,I,null),e(I,Ud),e(I,Rs),e(Rs,Hd),e(I,Qd),k(Do,I,null),qr=!0},p(t,[h]){const Oo={};h&2&&(Oo.$$scope={dirty:h,ctx:t}),Ge.$set(Oo);const Js={};h&2&&(Js.$$scope={dirty:h,ctx:t}),Xe.$set(Js);const Gs={};h&2&&(Gs.$$scope={dirty:h,ctx:t}),Ze.$set(Gs);const Ks={};h&2&&(Ks.$$scope={dirty:h,ctx:t}),tt.$set(Ks);const Bo={};h&2&&(Bo.$$scope={dirty:h,ctx:t}),nt.$set(Bo);const Xs={};h&2&&(Xs.$$scope={dirty:h,ctx:t}),rt.$set(Xs);const Ys={};h&2&&(Ys.$$scope={dirty:h,ctx:t}),it.$set(Ys);const Zs={};h&2&&(Zs.$$scope={dirty:h,ctx:t}),ct.$set(Zs)},i(t){qr||(v(w.$$.fragment,t),v(bt.$$.fragment,t),v(yt.$$.fragment,t),v($t.$$.fragment,t),v(qt.$$.fragment,t),v(xt.$$.fragment,t),v(Et.$$.fragment,t),v(Mt.$$.fragment,t),v(Ct.$$.fragment,t),v(At.$$.fragment,t),v(Lt.$$.fragment,t),v(It.$$.fragment,t),v(Dt.$$.fragment,t),v(Bt.$$.fragment,t),v(Ut.$$.fragment,t),v(Ht.$$.fragment,t),v(Qt.$$.fragment,t),v(Vt.$$.fragment,t),v(Kt.$$.fragment,t),v(Ge.$$.fragment,t),v(Xt.$$.fragment,t),v(Yt.$$.fragment,t),v(Zt.$$.fragment,t),v(oo.$$.fragment,t),v(Xe.$$.fragment,t),v(no.$$.fragment,t),v(so.$$.fragment,t),v(ro.$$.fragment,t),v(io.$$.fragment,t),v(Ze.$$.fragment,t),v(lo.$$.fragment,t),v(co.$$.fragment,t),v(po.$$.fragment,t),v(fo.$$.fragment,t),v(tt.$$.fragment,t),v(mo.$$.fragment,t),v(uo.$$.fragment,t),v(go.$$.fragment,t),v(vo.$$.fragment,t),v(nt.$$.fragment,t),v(Fo.$$.fragment,t),v(bo.$$.fragment,t),v(wo.$$.fragment,t),v(To.$$.fragment,t),v($o.$$.fragment,t),v(rt.$$.fragment,t),v(zo.$$.fragment,t),v(qo.$$.fragment,t),v(xo.$$.fragment,t),v(Mo.$$.fragment,t),v(it.$$.fragment,t),v(jo.$$.fragment,t),v(Co.$$.fragment,t),v(So.$$.fragment,t),v(Io.$$.fragment,t),v(ct.$$.fragment,t),v(Do.$$.fragment,t),qr=!0)},o(t){F(w.$$.fragment,t),F(bt.$$.fragment,t),F(yt.$$.fragment,t),F($t.$$.fragment,t),F(qt.$$.fragment,t),F(xt.$$.fragment,t),F(Et.$$.fragment,t),F(Mt.$$.fragment,t),F(Ct.$$.fragment,t),F(At.$$.fragment,t),F(Lt.$$.fragment,t),F(It.$$.fragment,t),F(Dt.$$.fragment,t),F(Bt.$$.fragment,t),F(Ut.$$.fragment,t),F(Ht.$$.fragment,t),F(Qt.$$.fragment,t),F(Vt.$$.fragment,t),F(Kt.$$.fragment,t),F(Ge.$$.fragment,t),F(Xt.$$.fragment,t),F(Yt.$$.fragment,t),F(Zt.$$.fragment,t),F(oo.$$.fragment,t),F(Xe.$$.fragment,t),F(no.$$.fragment,t),F(so.$$.fragment,t),F(ro.$$.fragment,t),F(io.$$.fragment,t),F(Ze.$$.fragment,t),F(lo.$$.fragment,t),F(co.$$.fragment,t),F(po.$$.fragment,t),F(fo.$$.fragment,t),F(tt.$$.fragment,t),F(mo.$$.fragment,t),F(uo.$$.fragment,t),F(go.$$.fragment,t),F(vo.$$.fragment,t),F(nt.$$.fragment,t),F(Fo.$$.fragment,t),F(bo.$$.fragment,t),F(wo.$$.fragment,t),F(To.$$.fragment,t),F($o.$$.fragment,t),F(rt.$$.fragment,t),F(zo.$$.fragment,t),F(qo.$$.fragment,t),F(xo.$$.fragment,t),F(Mo.$$.fragment,t),F(it.$$.fragment,t),F(jo.$$.fragment,t),F(Co.$$.fragment,t),F(So.$$.fragment,t),F(Io.$$.fragment,t),F(ct.$$.fragment,t),F(Do.$$.fragment,t),qr=!1},d(t){o(p),t&&o(N),t&&o(m),b(w),t&&o(er),t&&o(me),b(bt),t&&o(tr),t&&o(Ue),t&&o(or),t&&o(Wo),t&&o(nr),t&&o(Uo),t&&o(sr),t&&o(Ho),t&&o(rr),t&&o(oe),t&&o(ar),t&&o(ue),b(yt),t&&o(ir),t&&o(P),b($t),b(qt),t&&o(lr),t&&o(ke),b(xt),t&&o(cr),t&&o(x),b(Et),b(Mt),b(Ct),b(At),b(Lt),t&&o(dr),t&&o(be),b(It),t&&o(pr),t&&o(B),b(Dt),b(Bt),b(Ut),b(Ht),t&&o(hr),t&&o(we),b(Qt),t&&o(fr),t&&o(U),b(Vt),b(Kt),b(Ge),b(Xt),t&&o(mr),t&&o(Ne),b(Yt),t&&o(ur),t&&o(H),b(Zt),b(oo),b(Xe),b(no),t&&o(gr),t&&o(ze),b(so),t&&o(_r),t&&o(ee),b(ro),b(io),b(Ze),b(lo),t&&o(kr),t&&o(Ee),b(co),t&&o(vr),t&&o(te),b(po),b(fo),b(tt),b(mo),t&&o(Fr),t&&o(je),b(uo),t&&o(br),t&&o(Q),b(go),b(vo),b(nt),b(Fo),b(bo),t&&o(wr),t&&o(Se),b(wo),t&&o(Tr),t&&o(V),b(To),b($o),b(rt),b(zo),t&&o(Nr),t&&o(Le),b(qo),t&&o(yr),t&&o(R),b(xo),b(Mo),b(it),b(jo),t&&o($r),t&&o(De),b(Co),t&&o(zr),t&&o(J),b(So),b(Io),b(ct),b(Do)}}}const hf={local:"fnet",sections:[{local:"overview",title:"Overview"},{local:"transformers.FNetConfig",title:"FNetConfig"},{local:"transformers.FNetTokenizer",title:"FNetTokenizer"},{local:"transformers.FNetTokenizerFast",title:"FNetTokenizerFast"},{local:"transformers.FNetModel",title:"FNetModel"},{local:"transformers.FNetForPreTraining",title:"FNetForPreTraining"},{local:"transformers.FNetForMaskedLM",title:"FNetForMaskedLM"},{local:"transformers.FNetForNextSentencePrediction",title:"FNetForNextSentencePrediction"},{local:"transformers.FNetForSequenceClassification",title:"FNetForSequenceClassification"},{local:"transformers.FNetForMultipleChoice",title:"FNetForMultipleChoice"},{local:"transformers.FNetForTokenClassification",title:"FNetForTokenClassification"},{local:"transformers.FNetForQuestionAnswering",title:"FNetForQuestionAnswering"}],title:"FNet"};function ff(q,p,N){let{fw:m}=p;return q.$$set=T=>{"fw"in T&&N(0,m=T.fw)},[m]}class Ff extends Yh{constructor(p){super();Zh(this,p,ff,pf,ef,{fw:0})}}export{Ff as default,hf as metadata};
9,927
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/unispeech_sat.mdx-9970ac04.js
import{S as ed,i as td,s as od,e as s,k as c,w as _,t as a,L as ad,c as r,d as o,m as d,a as i,x as v,h as n,b as l,J as e,g as m,y as S,q as b,o as w,B as y}from"../../chunks/vendor-b1433968.js";import{T as zo}from"../../chunks/Tip-c3840994.js";import{D as M}from"../../chunks/Docstring-ff504c58.js";import{C as oo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ae}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function nd(C){let h,T,u,k,U;return{c(){h=s("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),k=a("Module"),U=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);T=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r(f,"CODE",{});var $=i(u);k=n($,"Module"),$.forEach(o),U=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,T),e(h,u),e(u,k),e(h,U)},d(g){g&&o(h)}}}function sd(C){let h,T,u,k,U;return{c(){h=s("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),k=a("Module"),U=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);T=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r(f,"CODE",{});var $=i(u);k=n($,"Module"),$.forEach(o),U=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,T),e(h,u),e(u,k),e(h,U)},d(g){g&&o(h)}}}function rd(C){let h,T,u,k,U;return{c(){h=s("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),k=a("Module"),U=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);T=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r(f,"CODE",{});var $=i(u);k=n($,"Module"),$.forEach(o),U=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,T),e(h,u),e(u,k),e(h,U)},d(g){g&&o(h)}}}function id(C){let h,T,u,k,U;return{c(){h=s("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),k=a("Module"),U=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);T=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r(f,"CODE",{});var $=i(u);k=n($,"Module"),$.forEach(o),U=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,T),e(h,u),e(u,k),e(h,U)},d(g){g&&o(h)}}}function ld(C){let h,T,u,k,U;return{c(){h=s("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),k=a("Module"),U=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);T=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r(f,"CODE",{});var $=i(u);k=n($,"Module"),$.forEach(o),U=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,T),e(h,u),e(u,k),e(h,U)},d(g){g&&o(h)}}}function cd(C){let h,T,u,k,U;return{c(){h=s("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s("code"),k=a("Module"),U=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);T=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r(f,"CODE",{});var $=i(u);k=n($,"Module"),$.forEach(o),U=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,T),e(h,u),e(u,k),e(h,U)},d(g){g&&o(h)}}}function dd(C){let h,T,u,k,U,g,f,$,Cn,Fa,ne,Ue,Do,Ne,qn,Oo,En,xa,$e,Pn,We,An,Mn,ja,ao,zn,Ca,no,Vo,Dn,qa,so,On,Ea,H,Ie,Vn,ro,Ln,Nn,Wn,Be,In,io,Bn,Hn,Xn,Lo,Rn,Pa,X,Zn,He,Jn,Qn,Xe,Yn,Kn,Aa,se,Fe,No,Re,Gn,Wo,es,Ma,q,Ze,ts,re,os,lo,as,ns,Je,ss,rs,is,ie,ls,co,cs,ds,po,ps,hs,ms,Io,us,fs,Qe,za,le,xe,Bo,Ye,gs,Ho,_s,Da,ce,Ke,vs,Ge,Ss,Xo,bs,ws,Oa,de,et,ys,tt,ks,Ro,Ts,Us,Va,pe,je,Zo,ot,$s,Jo,Fs,La,E,at,xs,nt,js,st,Cs,qs,Es,rt,Ps,ho,As,Ms,zs,it,Ds,lt,Os,Vs,Ls,z,ct,Ns,he,Ws,mo,Is,Bs,Qo,Hs,Xs,Rs,Ce,Zs,Yo,Js,Qs,dt,Na,me,qe,Ko,pt,Ys,Go,Ks,Wa,P,ht,Gs,ue,er,ea,tr,or,mt,ar,nr,sr,ut,rr,uo,ir,lr,cr,ft,dr,gt,pr,hr,mr,D,_t,ur,fe,fr,fo,gr,_r,ta,vr,Sr,br,Ee,wr,oa,yr,kr,vt,Ia,ge,Pe,aa,St,Tr,na,Ur,Ba,F,bt,$r,sa,Fr,xr,wt,jr,yt,Cr,qr,Er,kt,Pr,go,Ar,Mr,zr,Tt,Dr,Ut,Or,Vr,Lr,O,$t,Nr,_e,Wr,_o,Ir,Br,ra,Hr,Xr,Rr,Ae,Zr,ia,Jr,Qr,Ft,Ha,ve,Me,la,xt,Yr,ca,Kr,Xa,x,jt,Gr,da,ei,ti,Ct,oi,qt,ai,ni,si,Et,ri,vo,ii,li,ci,Pt,di,At,pi,hi,mi,V,Mt,ui,Se,fi,So,gi,_i,pa,vi,Si,bi,ze,wi,ha,yi,ki,zt,Ra,be,De,ma,Dt,Ti,ua,Ui,Za,j,Ot,$i,fa,Fi,xi,Vt,ji,Lt,Ci,qi,Ei,Nt,Pi,bo,Ai,Mi,zi,Wt,Di,It,Oi,Vi,Li,L,Bt,Ni,we,Wi,wo,Ii,Bi,ga,Hi,Xi,Ri,Oe,Zi,_a,Ji,Qi,Ht,Ja,ye,Ve,va,Xt,Yi,Sa,Ki,Qa,A,Rt,Gi,ke,el,ba,tl,ol,Zt,al,nl,sl,Jt,rl,yo,il,ll,cl,Qt,dl,Yt,pl,hl,ml,N,Kt,ul,Te,fl,ko,gl,_l,wa,vl,Sl,bl,Le,wl,ya,yl,kl,Gt,Ya;return g=new ae({}),Ne=new ae({}),Re=new ae({}),Ze=new M({props:{name:"class transformers.UniSpeechSatConfig",anchor:"transformers.UniSpeechSatConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"feat_quantizer_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"num_codevectors_per_group",val:" = 320"},{name:"num_codevector_groups",val:" = 2"},{name:"contrastive_logits_temperature",val:" = 0.1"},{name:"num_negatives",val:" = 100"},{name:"codevector_dim",val:" = 256"},{name:"proj_codevector_dim",val:" = 256"},{name:"diversity_loss_weight",val:" = 0.1"},{name:"ctc_loss_reduction",val:" = 'mean'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"tdnn_dim",val:" = (512, 512, 512, 512, 1500)"},{name:"tdnn_kernel",val:" = (5, 3, 3, 1, 1)"},{name:"tdnn_dilation",val:" = (1, 2, 3, 1, 1)"},{name:"xvector_output_dim",val:" = 512"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"num_clusters",val:" = 504"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py#L29",parametersDescription:[{anchor:"transformers.UniSpeechSatConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the UniSpeechSat model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatModel">UniSpeechSatModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatModel">UniSpeechSatModel</a>.`,name:"vocab_size"},{anchor:"transformers.UniSpeechSatConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.UniSpeechSatConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.UniSpeechSatConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.UniSpeechSatConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.UniSpeechSatConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.UniSpeechSatConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.UniSpeechSatConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.UniSpeechSatConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForCTC">UniSpeechSatForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.UniSpeechSatConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.UniSpeechSatConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.UniSpeechSatConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.UniSpeechSatConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.UniSpeechSatConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.UniSpeechSatConfig.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (obj &#x2014;<em>float</em>, <em>optional</em>, defaults to 0.0): The dropout probabilitiy for quantized feature extractor states.`,name:"feat_quantizer_dropout"},{anchor:"transformers.UniSpeechSatConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.UniSpeechSatConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.UniSpeechSatConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.UniSpeechSatConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.UniSpeechSatConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.UniSpeechSatConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.UniSpeechSatConfig.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.UniSpeechSatConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.UniSpeechSatConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.UniSpeechSatConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.UniSpeechSatConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.UniSpeechSatConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.UniSpeechSatConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.UniSpeechSatConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.UniSpeechSatConfig.num_codevectors_per_group",description:`<strong>num_codevectors_per_group</strong> (<code>int</code>, <em>optional</em>, defaults to 320) &#x2014; Number of entries in each quantization codebook (group).`,name:"num_codevectors_per_group"},{anchor:"transformers.UniSpeechSatConfig.num_codevector_groups",description:`<strong>num_codevector_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of codevector groups for product codevector quantization.`,name:"num_codevector_groups"},{anchor:"transformers.UniSpeechSatConfig.contrastive_logits_temperature",description:`<strong>contrastive_logits_temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The temperature <em>kappa</em> in the contrastive loss.`,name:"contrastive_logits_temperature"},{anchor:"transformers.UniSpeechSatConfig.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for the output of the feature extractor that&#x2019;s used by the quantizer.`,name:"feat_quantizer_dropout"},{anchor:"transformers.UniSpeechSatConfig.num_negatives",description:`<strong>num_negatives</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Number of negative samples for the contrastive loss.`,name:"num_negatives"},{anchor:"transformers.UniSpeechSatConfig.codevector_dim",description:`<strong>codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the quantized feature vectors.`,name:"codevector_dim"},{anchor:"transformers.UniSpeechSatConfig.proj_codevector_dim",description:`<strong>proj_codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the final projection of both the quantized and the transformer features.`,name:"proj_codevector_dim"},{anchor:"transformers.UniSpeechSatConfig.diversity_loss_weight",description:`<strong>diversity_loss_weight</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The weight of the codebook diversity loss component.`,name:"diversity_loss_weight"},{anchor:"transformers.UniSpeechSatConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;mean&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForCTC">UniSpeechSatForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.UniSpeechSatConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForCTC">UniSpeechSatForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.UniSpeechSatConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForSequenceClassification">UniSpeechSatForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.UniSpeechSatConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"},{anchor:"transformers.UniSpeechSatConfig.tdnn_dim",description:`<strong>tdnn_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 1500)</code>) &#x2014; A tuple of integers defining the number of output channels of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dim</em> defines the number of <em>TDNN</em> layers.`,name:"tdnn_dim"},{anchor:"transformers.UniSpeechSatConfig.tdnn_kernel",description:`<strong>tdnn_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 3, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_kernel</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_kernel"},{anchor:"transformers.UniSpeechSatConfig.tdnn_dilation",description:`<strong>tdnn_dilation</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(1, 2, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the dilation factor of each 1D convolutional layer in <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dilation</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_dilation"},{anchor:"transformers.UniSpeechSatConfig.xvector_output_dim",description:`<strong>xvector_output_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the <em>XVector</em> embedding vectors.`,name:"xvector_output_dim"}]}}),Qe=new oo({props:{code:`from transformers import UniSpeechSatModel, UniSpeechSatConfig # Initializing a UniSpeechSat facebook/unispeech_sat-base-960h style configuration configuration = UniSpeechSatConfig() # Initializing a model from the facebook/unispeech_sat-base-960h style configuration model = UniSpeechSatModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> UniSpeechSatModel, UniSpeechSatConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a UniSpeechSat facebook/unispeech_sat-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = UniSpeechSatConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/unispeech_sat-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ye=new ae({}),Ke=new M({props:{name:"class transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput",anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"extract_features",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L62",parametersDescription:[{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.`,name:"extract_features"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),et=new M({props:{name:"class transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput",anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"projected_states",val:": FloatTensor = None"},{name:"projected_quantized_states",val:": FloatTensor = None"},{name:"codevector_perplexity",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L90",parametersDescription:[{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> . (classification) loss.`,name:"loss"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput.projected_states",description:`<strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.`,name:"projected_states"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput.projected_quantized_states",description:`<strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.`,name:"projected_quantized_states"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ot=new ae({}),at=new M({props:{name:"class transformers.UniSpeechSatModel",anchor:"transformers.UniSpeechSatModel",parameters:[{name:"config",val:": UniSpeechSatConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1079",parametersDescription:[{anchor:"transformers.UniSpeechSatModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ct=new M({props:{name:"forward",anchor:"transformers.UniSpeechSatModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1143",parametersDescription:[{anchor:"transformers.UniSpeechSatModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechSatProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechSatProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechSatModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/unispeech_sat-base-960h" rel="nofollow">unispeech_sat-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechSatModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechSatModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechSatModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput" >transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig" >UniSpeechSatConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput" >transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ce=new zo({props:{$$slots:{default:[nd]},$$scope:{ctx:C}}}),dt=new oo({props:{code:`from transformers import Wav2Vec2Processor, UniSpeechSatModel from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('microsoft/unispeech-sat-base-plus') model = UniSpeechSatModel.from_pretrained('microsoft/unispeech-sat-base-plus') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, UniSpeechSatModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),pt=new ae({}),ht=new M({props:{name:"class transformers.UniSpeechSatForCTC",anchor:"transformers.UniSpeechSatForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1363",parametersDescription:[{anchor:"transformers.UniSpeechSatForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),_t=new M({props:{name:"forward",anchor:"transformers.UniSpeechSatForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1389",parametersDescription:[{anchor:"transformers.UniSpeechSatForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechSatProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechSatProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechSatForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/unispeech_sat-base-960h" rel="nofollow">unispeech_sat-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechSatForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechSatForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechSatForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechSatForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig" >UniSpeechSatConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ee=new zo({props:{$$slots:{default:[sd]},$$scope:{ctx:C}}}),vt=new oo({props:{code:`from transformers import Wav2Vec2Processor, UniSpeechSatForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('microsoft/unispeech-sat-base-plus') model = UniSpeechSatForCTC.from_pretrained('microsoft/unispeech-sat-base-plus') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, UniSpeechSatForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatForCTC.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),St=new ae({}),bt=new M({props:{name:"class transformers.UniSpeechSatForSequenceClassification",anchor:"transformers.UniSpeechSatForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1475",parametersDescription:[{anchor:"transformers.UniSpeechSatForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$t=new M({props:{name:"forward",anchor:"transformers.UniSpeechSatForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1504",parametersDescription:[{anchor:"transformers.UniSpeechSatForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechSatProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechSatProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechSatForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/unispeech_sat-base-960h" rel="nofollow">unispeech_sat-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechSatForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechSatForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechSatForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechSatForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig" >UniSpeechSatConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new zo({props:{$$slots:{default:[rd]},$$scope:{ctx:C}}}),Ft=new oo({props:{code:`from transformers import Wav2Vec2FeatureExtractor, UniSpeechSatForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/unispeech-sat-base-plus') model = UniSpeechSatForSequenceClassification.from_pretrained('microsoft/unispeech-sat-base-plus') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, UniSpeechSatForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),xt=new ae({}),jt=new M({props:{name:"class transformers.UniSpeechSatForAudioFrameClassification",anchor:"transformers.UniSpeechSatForAudioFrameClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1580",parametersDescription:[{anchor:"transformers.UniSpeechSatForAudioFrameClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mt=new M({props:{name:"forward",anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1607",parametersDescription:[{anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechSatProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechSatProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/unispeech_sat-base-960h" rel="nofollow">unispeech_sat-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechSatForAudioFrameClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig" >UniSpeechSatConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ze=new zo({props:{$$slots:{default:[id]},$$scope:{ctx:C}}}),zt=new oo({props:{code:`from transformers import Wav2Vec2FeatureExtractor, UniSpeechSatForAudioFrameClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/unispeech-sat-base-plus-sd') model = UniSpeechSatForAudioFrameClassification.from_pretrained('microsoft/unispeech-sat-base-plus-sd') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits probabilities = torch.sigmoid(logits[0]) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (probabilities > 0.5).long(),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, UniSpeechSatForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus-sd&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatForAudioFrameClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus-sd&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probabilities = torch.sigmoid(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># labels is a one-hot array of shape (num_frames, num_speakers)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = (probabilities &gt; <span class="hljs-number">0.5</span>).long()`}}),Dt=new ae({}),Ot=new M({props:{name:"class transformers.UniSpeechSatForXVector",anchor:"transformers.UniSpeechSatForXVector",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1720",parametersDescription:[{anchor:"transformers.UniSpeechSatForXVector.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bt=new M({props:{name:"forward",anchor:"transformers.UniSpeechSatForXVector.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1770",parametersDescription:[{anchor:"transformers.UniSpeechSatForXVector.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechSatProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechSatProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechSatForXVector.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/unispeech_sat-base-960h" rel="nofollow">unispeech_sat-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechSatForXVector.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechSatForXVector.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechSatForXVector.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechSatForXVector.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.unispeech_sat.modeling_unispeech_sat.XVectorOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig" >UniSpeechSatConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Classification hidden states before AMSoftmax.</p> </li> <li> <p><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Utterance embeddings used for vector similarity-based retrieval.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.unispeech_sat.modeling_unispeech_sat.XVectorOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Oe=new zo({props:{$$slots:{default:[ld]},$$scope:{ctx:C}}}),Ht=new oo({props:{code:`from transformers import Wav2Vec2FeatureExtractor, UniSpeechSatForXVector from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/unispeech-sat-base-plus-sv') model = UniSpeechSatForXVector.from_pretrained('microsoft/unispeech-sat-base-plus-sv') # audio file is decoded on the fly inputs = feature_extractor(dataset[:2]["audio"]["array"], return_tensors="pt") embeddings = model(**inputs).embeddings embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() # the resulting embeddings can be used for cosine similarity-based retrieval cosine_sim = torch.nn.CosineSimilarity(dim=-1) similarity = cosine_sim(embeddings[0], embeddings[1]) threshold = 0.7 # the optimal threshold is dataset-dependent if similarity < threshold: print("Speakers are not the same!"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, UniSpeechSatForXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus-sv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatForXVector.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-sat-base-plus-sv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[:<span class="hljs-number">2</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(**inputs).embeddings <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = torch.nn.functional.normalize(embeddings, dim=-<span class="hljs-number">1</span>).cpu() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the resulting embeddings can be used for cosine similarity-based retrieval</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.nn.CosineSimilarity(dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>similarity = cosine_sim(embeddings[<span class="hljs-number">0</span>], embeddings[<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>threshold = <span class="hljs-number">0.7</span> <span class="hljs-comment"># the optimal threshold is dataset-dependent</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">if</span> similarity &lt; threshold: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Speakers are not the same!&quot;</span>)`}}),Xt=new ae({}),Rt=new M({props:{name:"class transformers.UniSpeechSatForPreTraining",anchor:"transformers.UniSpeechSatForPreTraining",parameters:[{name:"config",val:": UniSpeechSatConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1200",parametersDescription:[{anchor:"transformers.UniSpeechSatForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig">UniSpeechSatConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new M({props:{name:"forward",anchor:"transformers.UniSpeechSatForPreTraining.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py#L1256",parametersDescription:[{anchor:"transformers.UniSpeechSatForPreTraining.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechSatProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechSatProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechSatForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/unispeech_sat-base-960h" rel="nofollow">unispeech_sat-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechSatForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechSatForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechSatForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput" >transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatConfig" >UniSpeechSatConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> . (classification) loss.</p> </li> <li> <p><strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.</p> </li> <li> <p><strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput" >transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Le=new zo({props:{$$slots:{default:[cd]},$$scope:{ctx:C}}}),Gt=new oo({props:{code:`import torch from transformers import UniSpeechSatFeatureExtractor, UniSpeechSatForPreTraining from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices from datasets import load_dataset import soundfile as sf feature_extractor = UniSpeechSatFeatureExtractor.from_pretrained("patrickvonplaten/unispeech_sat-base") model = UniSpeechSatForPreTraining.from_pretrained("patrickvonplaten/unispeech_sat-base") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = feature_extractor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 # compute masked indices batch_size, raw_sequence_length = input_values.shape sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) with torch.no_grad(): outputs = model(input_values, mask_time_indices=mask_time_indices) # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) cosine_sim = torch.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, dim=-1 ) # show that cosine similarity is much higher than random assert cosine_sim[mask_time_indices].mean() > 0.5 # for contrastive loss training model should be put into train mode model.train() loss = model(input_values, mask_time_indices=mask_time_indices).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> UniSpeechSatFeatureExtractor, UniSpeechSatForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.unispeech_sat.modeling_unispeech_sat <span class="hljs-keyword">import</span> _compute_mask_indices <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = UniSpeechSatFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/unispeech_sat-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechSatForPreTraining.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/unispeech_sat-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;patrickvonplaten/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute masked indices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size, raw_sequence_length = input_values.shape <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=<span class="hljs-number">0.2</span>, mask_length=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(input_values, mask_time_indices=mask_time_indices) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.cosine_similarity( <span class="hljs-meta">... </span> outputs.projected_states, outputs.projected_quantized_states, dim=-<span class="hljs-number">1</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># show that cosine similarity is much higher than random</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> cosine_sim[mask_time_indices].mean() &gt; <span class="hljs-number">0.5</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># for contrastive loss training model should be put into train mode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, mask_time_indices=mask_time_indices).loss`}}),{c(){h=s("meta"),T=c(),u=s("h1"),k=s("a"),U=s("span"),_(g.$$.fragment),f=c(),$=s("span"),Cn=a("UniSpeech-SAT"),Fa=c(),ne=s("h2"),Ue=s("a"),Do=s("span"),_(Ne.$$.fragment),qn=c(),Oo=s("span"),En=a("Overview"),xa=c(),$e=s("p"),Pn=a("The UniSpeech-SAT model was proposed in "),We=s("a"),An=a(`UniSpeech-SAT: Universal Speech Representation Learning with Speaker Aware Pre-Training`),Mn=a(` by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu .`),ja=c(),ao=s("p"),zn=a("The abstract from the paper is the following:"),Ca=c(),no=s("p"),Vo=s("em"),Dn=a(`Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years witness great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply the multi-task learning to the current SSL framework, where we integrate the utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisely and incorporate during training. We integrate the proposed methods into the HuBERT framework. Experiment results on SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.`),qa=c(),so=s("p"),On=a("Tips:"),Ea=c(),H=s("ul"),Ie=s("li"),Vn=a(`UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use `),ro=s("a"),Ln=a("Wav2Vec2Processor"),Nn=a(" for the feature extraction."),Wn=c(),Be=s("li"),In=a(`UniSpeechSat model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),io=s("a"),Bn=a("Wav2Vec2CTCTokenizer"),Hn=a("."),Xn=c(),Lo=s("li"),Rn=a("UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks."),Pa=c(),X=s("p"),Zn=a("This model was contributed by "),He=s("a"),Jn=a("patrickvonplaten"),Qn=a(`. The Authors\u2019 code can be found `),Xe=s("a"),Yn=a("here"),Kn=a("."),Aa=c(),se=s("h2"),Fe=s("a"),No=s("span"),_(Re.$$.fragment),Gn=c(),Wo=s("span"),es=a("UniSpeechSatConfig"),Ma=c(),q=s("div"),_(Ze.$$.fragment),ts=c(),re=s("p"),os=a("This is the configuration class to store the configuration of a "),lo=s("a"),as=a("UniSpeechSatModel"),ns=a(`. It is used to instantiate an UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeechSat `),Je=s("a"),ss=a("facebook/unispeech_sat-base-960h"),rs=a(" architecture."),is=c(),ie=s("p"),ls=a("Configuration objects inherit from "),co=s("a"),cs=a("PretrainedConfig"),ds=a(` and can be used to control the model outputs. Read the documentation from `),po=s("a"),ps=a("PretrainedConfig"),hs=a(" for more information."),ms=c(),Io=s("p"),us=a("Example:"),fs=c(),_(Qe.$$.fragment),za=c(),le=s("h2"),xe=s("a"),Bo=s("span"),_(Ye.$$.fragment),gs=c(),Ho=s("span"),_s=a("UniSpeechSat specific outputs"),Da=c(),ce=s("div"),_(Ke.$$.fragment),vs=c(),Ge=s("p"),Ss=a("Output type of "),Xo=s("code"),bs=a("UniSpeechSatBaseModelOutput"),ws=a(", with potential hidden states and attentions."),Oa=c(),de=s("div"),_(et.$$.fragment),ys=c(),tt=s("p"),ks=a("Output type of "),Ro=s("code"),Ts=a("UniSpeechSatForPreTrainingOutput"),Us=a(`, with potential hidden states and attentions.`),Va=c(),pe=s("h2"),je=s("a"),Zo=s("span"),_(ot.$$.fragment),$s=c(),Jo=s("span"),Fs=a("UniSpeechSatModel"),La=c(),E=s("div"),_(at.$$.fragment),xs=c(),nt=s("p"),js=a(`The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top. UniSpeechSat was proposed in `),st=s("a"),Cs=a("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),qs=a(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Es=c(),rt=s("p"),Ps=a("This model inherits from "),ho=s("a"),As=a("PreTrainedModel"),Ms=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),zs=c(),it=s("p"),Ds=a("This model is a PyTorch "),lt=s("a"),Os=a("torch.nn.Module"),Vs=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ls=c(),z=s("div"),_(ct.$$.fragment),Ns=c(),he=s("p"),Ws=a("The "),mo=s("a"),Is=a("UniSpeechSatModel"),Bs=a(" forward method, overrides the "),Qo=s("code"),Hs=a("__call__"),Xs=a(" special method."),Rs=c(),_(Ce.$$.fragment),Zs=c(),Yo=s("p"),Js=a("Example:"),Qs=c(),_(dt.$$.fragment),Na=c(),me=s("h2"),qe=s("a"),Ko=s("span"),_(pt.$$.fragment),Ys=c(),Go=s("span"),Ks=a("UniSpeechSatForCTC"),Wa=c(),P=s("div"),_(ht.$$.fragment),Gs=c(),ue=s("p"),er=a("UniSpeechSat Model with a "),ea=s("code"),tr=a("language modeling"),or=a(` head on top for Connectionist Temporal Classification (CTC). UniSpeechSat was proposed in `),mt=s("a"),ar=a("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),nr=a(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),sr=c(),ut=s("p"),rr=a("This model inherits from "),uo=s("a"),ir=a("PreTrainedModel"),lr=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),cr=c(),ft=s("p"),dr=a("This model is a PyTorch "),gt=s("a"),pr=a("torch.nn.Module"),hr=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mr=c(),D=s("div"),_(_t.$$.fragment),ur=c(),fe=s("p"),fr=a("The "),fo=s("a"),gr=a("UniSpeechSatForCTC"),_r=a(" forward method, overrides the "),ta=s("code"),vr=a("__call__"),Sr=a(" special method."),br=c(),_(Ee.$$.fragment),wr=c(),oa=s("p"),yr=a("Example:"),kr=c(),_(vt.$$.fragment),Ia=c(),ge=s("h2"),Pe=s("a"),aa=s("span"),_(St.$$.fragment),Tr=c(),na=s("span"),Ur=a("UniSpeechSatForSequenceClassification"),Ba=c(),F=s("div"),_(bt.$$.fragment),$r=c(),sa=s("p"),Fr=a(`UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),xr=c(),wt=s("p"),jr=a("UniSpeechSat was proposed in "),yt=s("a"),Cr=a("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),qr=a(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Er=c(),kt=s("p"),Pr=a("This model inherits from "),go=s("a"),Ar=a("PreTrainedModel"),Mr=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),zr=c(),Tt=s("p"),Dr=a("This model is a PyTorch "),Ut=s("a"),Or=a("torch.nn.Module"),Vr=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lr=c(),O=s("div"),_($t.$$.fragment),Nr=c(),_e=s("p"),Wr=a("The "),_o=s("a"),Ir=a("UniSpeechSatForSequenceClassification"),Br=a(" forward method, overrides the "),ra=s("code"),Hr=a("__call__"),Xr=a(" special method."),Rr=c(),_(Ae.$$.fragment),Zr=c(),ia=s("p"),Jr=a("Example:"),Qr=c(),_(Ft.$$.fragment),Ha=c(),ve=s("h2"),Me=s("a"),la=s("span"),_(xt.$$.fragment),Yr=c(),ca=s("span"),Kr=a("UniSpeechSatForAudioFrameClassification"),Xa=c(),x=s("div"),_(jt.$$.fragment),Gr=c(),da=s("p"),ei=a("UniSpeech-SAT Model with a frame classification head on top for tasks like Speaker Diarization."),ti=c(),Ct=s("p"),oi=a("UniSpeechSat was proposed in "),qt=s("a"),ai=a("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),ni=a(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),si=c(),Et=s("p"),ri=a("This model inherits from "),vo=s("a"),ii=a("PreTrainedModel"),li=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),ci=c(),Pt=s("p"),di=a("This model is a PyTorch "),At=s("a"),pi=a("torch.nn.Module"),hi=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mi=c(),V=s("div"),_(Mt.$$.fragment),ui=c(),Se=s("p"),fi=a("The "),So=s("a"),gi=a("UniSpeechSatForAudioFrameClassification"),_i=a(" forward method, overrides the "),pa=s("code"),vi=a("__call__"),Si=a(" special method."),bi=c(),_(ze.$$.fragment),wi=c(),ha=s("p"),yi=a("Example:"),ki=c(),_(zt.$$.fragment),Ra=c(),be=s("h2"),De=s("a"),ma=s("span"),_(Dt.$$.fragment),Ti=c(),ua=s("span"),Ui=a("UniSpeechSatForXVector"),Za=c(),j=s("div"),_(Ot.$$.fragment),$i=c(),fa=s("p"),Fi=a("UniSpeech-SAT Model with an XVector feature extraction head on top for tasks like Speaker Verification."),xi=c(),Vt=s("p"),ji=a("UniSpeechSat was proposed in "),Lt=s("a"),Ci=a("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),qi=a(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ei=c(),Nt=s("p"),Pi=a("This model inherits from "),bo=s("a"),Ai=a("PreTrainedModel"),Mi=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),zi=c(),Wt=s("p"),Di=a("This model is a PyTorch "),It=s("a"),Oi=a("torch.nn.Module"),Vi=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Li=c(),L=s("div"),_(Bt.$$.fragment),Ni=c(),we=s("p"),Wi=a("The "),wo=s("a"),Ii=a("UniSpeechSatForXVector"),Bi=a(" forward method, overrides the "),ga=s("code"),Hi=a("__call__"),Xi=a(" special method."),Ri=c(),_(Oe.$$.fragment),Zi=c(),_a=s("p"),Ji=a("Example:"),Qi=c(),_(Ht.$$.fragment),Ja=c(),ye=s("h2"),Ve=s("a"),va=s("span"),_(Xt.$$.fragment),Yi=c(),Sa=s("span"),Ki=a("UniSpeechSatForPreTraining"),Qa=c(),A=s("div"),_(Rt.$$.fragment),Gi=c(),ke=s("p"),el=a("UniSpeechSat Model with a quantizer and "),ba=s("code"),tl=a("VQ"),ol=a(` head on top. UniSpeechSat was proposed in `),Zt=s("a"),al=a("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),nl=a(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),sl=c(),Jt=s("p"),rl=a("This model inherits from "),yo=s("a"),il=a("PreTrainedModel"),ll=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),cl=c(),Qt=s("p"),dl=a("This model is a PyTorch "),Yt=s("a"),pl=a("torch.nn.Module"),hl=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ml=c(),N=s("div"),_(Kt.$$.fragment),ul=c(),Te=s("p"),fl=a("The "),ko=s("a"),gl=a("UniSpeechSatForPreTraining"),_l=a(" forward method, overrides the "),wa=s("code"),vl=a("__call__"),Sl=a(" special method."),bl=c(),_(Le.$$.fragment),wl=c(),ya=s("p"),yl=a("Example:"),kl=c(),_(Gt.$$.fragment),this.h()},l(t){const p=ad('[data-svelte="svelte-1phssyn"]',document.head);h=r(p,"META",{name:!0,content:!0}),p.forEach(o),T=d(t),u=r(t,"H1",{class:!0});var eo=i(u);k=r(eo,"A",{id:!0,class:!0,href:!0});var ka=i(k);U=r(ka,"SPAN",{});var Ta=i(U);v(g.$$.fragment,Ta),Ta.forEach(o),ka.forEach(o),f=d(eo),$=r(eo,"SPAN",{});var Ua=i($);Cn=n(Ua,"UniSpeech-SAT"),Ua.forEach(o),eo.forEach(o),Fa=d(t),ne=r(t,"H2",{class:!0});var to=i(ne);Ue=r(to,"A",{id:!0,class:!0,href:!0});var $a=i(Ue);Do=r($a,"SPAN",{});var Tl=i(Do);v(Ne.$$.fragment,Tl),Tl.forEach(o),$a.forEach(o),qn=d(to),Oo=r(to,"SPAN",{});var Ul=i(Oo);En=n(Ul,"Overview"),Ul.forEach(o),to.forEach(o),xa=d(t),$e=r(t,"P",{});var Ka=i($e);Pn=n(Ka,"The UniSpeech-SAT model was proposed in "),We=r(Ka,"A",{href:!0,rel:!0});var $l=i(We);An=n($l,`UniSpeech-SAT: Universal Speech Representation Learning with Speaker Aware Pre-Training`),$l.forEach(o),Mn=n(Ka,` by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu .`),Ka.forEach(o),ja=d(t),ao=r(t,"P",{});var Fl=i(ao);zn=n(Fl,"The abstract from the paper is the following:"),Fl.forEach(o),Ca=d(t),no=r(t,"P",{});var xl=i(no);Vo=r(xl,"EM",{});var jl=i(Vo);Dn=n(jl,`Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years witness great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply the multi-task learning to the current SSL framework, where we integrate the utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisely and incorporate during training. We integrate the proposed methods into the HuBERT framework. Experiment results on SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.`),jl.forEach(o),xl.forEach(o),qa=d(t),so=r(t,"P",{});var Cl=i(so);On=n(Cl,"Tips:"),Cl.forEach(o),Ea=d(t),H=r(t,"UL",{});var To=i(H);Ie=r(To,"LI",{});var Ga=i(Ie);Vn=n(Ga,`UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use `),ro=r(Ga,"A",{href:!0});var ql=i(ro);Ln=n(ql,"Wav2Vec2Processor"),ql.forEach(o),Nn=n(Ga," for the feature extraction."),Ga.forEach(o),Wn=d(To),Be=r(To,"LI",{});var en=i(Be);In=n(en,`UniSpeechSat model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),io=r(en,"A",{href:!0});var El=i(io);Bn=n(El,"Wav2Vec2CTCTokenizer"),El.forEach(o),Hn=n(en,"."),en.forEach(o),Xn=d(To),Lo=r(To,"LI",{});var Pl=i(Lo);Rn=n(Pl,"UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks."),Pl.forEach(o),To.forEach(o),Pa=d(t),X=r(t,"P",{});var Uo=i(X);Zn=n(Uo,"This model was contributed by "),He=r(Uo,"A",{href:!0,rel:!0});var Al=i(He);Jn=n(Al,"patrickvonplaten"),Al.forEach(o),Qn=n(Uo,`. The Authors\u2019 code can be found `),Xe=r(Uo,"A",{href:!0,rel:!0});var Ml=i(Xe);Yn=n(Ml,"here"),Ml.forEach(o),Kn=n(Uo,"."),Uo.forEach(o),Aa=d(t),se=r(t,"H2",{class:!0});var tn=i(se);Fe=r(tn,"A",{id:!0,class:!0,href:!0});var zl=i(Fe);No=r(zl,"SPAN",{});var Dl=i(No);v(Re.$$.fragment,Dl),Dl.forEach(o),zl.forEach(o),Gn=d(tn),Wo=r(tn,"SPAN",{});var Ol=i(Wo);es=n(Ol,"UniSpeechSatConfig"),Ol.forEach(o),tn.forEach(o),Ma=d(t),q=r(t,"DIV",{class:!0});var R=i(q);v(Ze.$$.fragment,R),ts=d(R),re=r(R,"P",{});var $o=i(re);os=n($o,"This is the configuration class to store the configuration of a "),lo=r($o,"A",{href:!0});var Vl=i(lo);as=n(Vl,"UniSpeechSatModel"),Vl.forEach(o),ns=n($o,`. It is used to instantiate an UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeechSat `),Je=r($o,"A",{href:!0,rel:!0});var Ll=i(Je);ss=n(Ll,"facebook/unispeech_sat-base-960h"),Ll.forEach(o),rs=n($o," architecture."),$o.forEach(o),is=d(R),ie=r(R,"P",{});var Fo=i(ie);ls=n(Fo,"Configuration objects inherit from "),co=r(Fo,"A",{href:!0});var Nl=i(co);cs=n(Nl,"PretrainedConfig"),Nl.forEach(o),ds=n(Fo,` and can be used to control the model outputs. Read the documentation from `),po=r(Fo,"A",{href:!0});var Wl=i(po);ps=n(Wl,"PretrainedConfig"),Wl.forEach(o),hs=n(Fo," for more information."),Fo.forEach(o),ms=d(R),Io=r(R,"P",{});var Il=i(Io);us=n(Il,"Example:"),Il.forEach(o),fs=d(R),v(Qe.$$.fragment,R),R.forEach(o),za=d(t),le=r(t,"H2",{class:!0});var on=i(le);xe=r(on,"A",{id:!0,class:!0,href:!0});var Bl=i(xe);Bo=r(Bl,"SPAN",{});var Hl=i(Bo);v(Ye.$$.fragment,Hl),Hl.forEach(o),Bl.forEach(o),gs=d(on),Ho=r(on,"SPAN",{});var Xl=i(Ho);_s=n(Xl,"UniSpeechSat specific outputs"),Xl.forEach(o),on.forEach(o),Da=d(t),ce=r(t,"DIV",{class:!0});var an=i(ce);v(Ke.$$.fragment,an),vs=d(an),Ge=r(an,"P",{});var nn=i(Ge);Ss=n(nn,"Output type of "),Xo=r(nn,"CODE",{});var Rl=i(Xo);bs=n(Rl,"UniSpeechSatBaseModelOutput"),Rl.forEach(o),ws=n(nn,", with potential hidden states and attentions."),nn.forEach(o),an.forEach(o),Oa=d(t),de=r(t,"DIV",{class:!0});var sn=i(de);v(et.$$.fragment,sn),ys=d(sn),tt=r(sn,"P",{});var rn=i(tt);ks=n(rn,"Output type of "),Ro=r(rn,"CODE",{});var Zl=i(Ro);Ts=n(Zl,"UniSpeechSatForPreTrainingOutput"),Zl.forEach(o),Us=n(rn,`, with potential hidden states and attentions.`),rn.forEach(o),sn.forEach(o),Va=d(t),pe=r(t,"H2",{class:!0});var ln=i(pe);je=r(ln,"A",{id:!0,class:!0,href:!0});var Jl=i(je);Zo=r(Jl,"SPAN",{});var Ql=i(Zo);v(ot.$$.fragment,Ql),Ql.forEach(o),Jl.forEach(o),$s=d(ln),Jo=r(ln,"SPAN",{});var Yl=i(Jo);Fs=n(Yl,"UniSpeechSatModel"),Yl.forEach(o),ln.forEach(o),La=d(t),E=r(t,"DIV",{class:!0});var Z=i(E);v(at.$$.fragment,Z),xs=d(Z),nt=r(Z,"P",{});var cn=i(nt);js=n(cn,`The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top. UniSpeechSat was proposed in `),st=r(cn,"A",{href:!0,rel:!0});var Kl=i(st);Cs=n(Kl,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Kl.forEach(o),qs=n(cn," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),cn.forEach(o),Es=d(Z),rt=r(Z,"P",{});var dn=i(rt);Ps=n(dn,"This model inherits from "),ho=r(dn,"A",{href:!0});var Gl=i(ho);As=n(Gl,"PreTrainedModel"),Gl.forEach(o),Ms=n(dn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),dn.forEach(o),zs=d(Z),it=r(Z,"P",{});var pn=i(it);Ds=n(pn,"This model is a PyTorch "),lt=r(pn,"A",{href:!0,rel:!0});var ec=i(lt);Os=n(ec,"torch.nn.Module"),ec.forEach(o),Vs=n(pn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pn.forEach(o),Ls=d(Z),z=r(Z,"DIV",{class:!0});var J=i(z);v(ct.$$.fragment,J),Ns=d(J),he=r(J,"P",{});var xo=i(he);Ws=n(xo,"The "),mo=r(xo,"A",{href:!0});var tc=i(mo);Is=n(tc,"UniSpeechSatModel"),tc.forEach(o),Bs=n(xo," forward method, overrides the "),Qo=r(xo,"CODE",{});var oc=i(Qo);Hs=n(oc,"__call__"),oc.forEach(o),Xs=n(xo," special method."),xo.forEach(o),Rs=d(J),v(Ce.$$.fragment,J),Zs=d(J),Yo=r(J,"P",{});var ac=i(Yo);Js=n(ac,"Example:"),ac.forEach(o),Qs=d(J),v(dt.$$.fragment,J),J.forEach(o),Z.forEach(o),Na=d(t),me=r(t,"H2",{class:!0});var hn=i(me);qe=r(hn,"A",{id:!0,class:!0,href:!0});var nc=i(qe);Ko=r(nc,"SPAN",{});var sc=i(Ko);v(pt.$$.fragment,sc),sc.forEach(o),nc.forEach(o),Ys=d(hn),Go=r(hn,"SPAN",{});var rc=i(Go);Ks=n(rc,"UniSpeechSatForCTC"),rc.forEach(o),hn.forEach(o),Wa=d(t),P=r(t,"DIV",{class:!0});var Q=i(P);v(ht.$$.fragment,Q),Gs=d(Q),ue=r(Q,"P",{});var jo=i(ue);er=n(jo,"UniSpeechSat Model with a "),ea=r(jo,"CODE",{});var ic=i(ea);tr=n(ic,"language modeling"),ic.forEach(o),or=n(jo,` head on top for Connectionist Temporal Classification (CTC). UniSpeechSat was proposed in `),mt=r(jo,"A",{href:!0,rel:!0});var lc=i(mt);ar=n(lc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),lc.forEach(o),nr=n(jo," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),jo.forEach(o),sr=d(Q),ut=r(Q,"P",{});var mn=i(ut);rr=n(mn,"This model inherits from "),uo=r(mn,"A",{href:!0});var cc=i(uo);ir=n(cc,"PreTrainedModel"),cc.forEach(o),lr=n(mn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),mn.forEach(o),cr=d(Q),ft=r(Q,"P",{});var un=i(ft);dr=n(un,"This model is a PyTorch "),gt=r(un,"A",{href:!0,rel:!0});var dc=i(gt);pr=n(dc,"torch.nn.Module"),dc.forEach(o),hr=n(un,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),un.forEach(o),mr=d(Q),D=r(Q,"DIV",{class:!0});var Y=i(D);v(_t.$$.fragment,Y),ur=d(Y),fe=r(Y,"P",{});var Co=i(fe);fr=n(Co,"The "),fo=r(Co,"A",{href:!0});var pc=i(fo);gr=n(pc,"UniSpeechSatForCTC"),pc.forEach(o),_r=n(Co," forward method, overrides the "),ta=r(Co,"CODE",{});var hc=i(ta);vr=n(hc,"__call__"),hc.forEach(o),Sr=n(Co," special method."),Co.forEach(o),br=d(Y),v(Ee.$$.fragment,Y),wr=d(Y),oa=r(Y,"P",{});var mc=i(oa);yr=n(mc,"Example:"),mc.forEach(o),kr=d(Y),v(vt.$$.fragment,Y),Y.forEach(o),Q.forEach(o),Ia=d(t),ge=r(t,"H2",{class:!0});var fn=i(ge);Pe=r(fn,"A",{id:!0,class:!0,href:!0});var uc=i(Pe);aa=r(uc,"SPAN",{});var fc=i(aa);v(St.$$.fragment,fc),fc.forEach(o),uc.forEach(o),Tr=d(fn),na=r(fn,"SPAN",{});var gc=i(na);Ur=n(gc,"UniSpeechSatForSequenceClassification"),gc.forEach(o),fn.forEach(o),Ba=d(t),F=r(t,"DIV",{class:!0});var W=i(F);v(bt.$$.fragment,W),$r=d(W),sa=r(W,"P",{});var _c=i(sa);Fr=n(_c,`UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),_c.forEach(o),xr=d(W),wt=r(W,"P",{});var gn=i(wt);jr=n(gn,"UniSpeechSat was proposed in "),yt=r(gn,"A",{href:!0,rel:!0});var vc=i(yt);Cr=n(vc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),vc.forEach(o),qr=n(gn," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),gn.forEach(o),Er=d(W),kt=r(W,"P",{});var _n=i(kt);Pr=n(_n,"This model inherits from "),go=r(_n,"A",{href:!0});var Sc=i(go);Ar=n(Sc,"PreTrainedModel"),Sc.forEach(o),Mr=n(_n,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),_n.forEach(o),zr=d(W),Tt=r(W,"P",{});var vn=i(Tt);Dr=n(vn,"This model is a PyTorch "),Ut=r(vn,"A",{href:!0,rel:!0});var bc=i(Ut);Or=n(bc,"torch.nn.Module"),bc.forEach(o),Vr=n(vn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vn.forEach(o),Lr=d(W),O=r(W,"DIV",{class:!0});var K=i(O);v($t.$$.fragment,K),Nr=d(K),_e=r(K,"P",{});var qo=i(_e);Wr=n(qo,"The "),_o=r(qo,"A",{href:!0});var wc=i(_o);Ir=n(wc,"UniSpeechSatForSequenceClassification"),wc.forEach(o),Br=n(qo," forward method, overrides the "),ra=r(qo,"CODE",{});var yc=i(ra);Hr=n(yc,"__call__"),yc.forEach(o),Xr=n(qo," special method."),qo.forEach(o),Rr=d(K),v(Ae.$$.fragment,K),Zr=d(K),ia=r(K,"P",{});var kc=i(ia);Jr=n(kc,"Example:"),kc.forEach(o),Qr=d(K),v(Ft.$$.fragment,K),K.forEach(o),W.forEach(o),Ha=d(t),ve=r(t,"H2",{class:!0});var Sn=i(ve);Me=r(Sn,"A",{id:!0,class:!0,href:!0});var Tc=i(Me);la=r(Tc,"SPAN",{});var Uc=i(la);v(xt.$$.fragment,Uc),Uc.forEach(o),Tc.forEach(o),Yr=d(Sn),ca=r(Sn,"SPAN",{});var $c=i(ca);Kr=n($c,"UniSpeechSatForAudioFrameClassification"),$c.forEach(o),Sn.forEach(o),Xa=d(t),x=r(t,"DIV",{class:!0});var I=i(x);v(jt.$$.fragment,I),Gr=d(I),da=r(I,"P",{});var Fc=i(da);ei=n(Fc,"UniSpeech-SAT Model with a frame classification head on top for tasks like Speaker Diarization."),Fc.forEach(o),ti=d(I),Ct=r(I,"P",{});var bn=i(Ct);oi=n(bn,"UniSpeechSat was proposed in "),qt=r(bn,"A",{href:!0,rel:!0});var xc=i(qt);ai=n(xc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),xc.forEach(o),ni=n(bn," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),bn.forEach(o),si=d(I),Et=r(I,"P",{});var wn=i(Et);ri=n(wn,"This model inherits from "),vo=r(wn,"A",{href:!0});var jc=i(vo);ii=n(jc,"PreTrainedModel"),jc.forEach(o),li=n(wn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),wn.forEach(o),ci=d(I),Pt=r(I,"P",{});var yn=i(Pt);di=n(yn,"This model is a PyTorch "),At=r(yn,"A",{href:!0,rel:!0});var Cc=i(At);pi=n(Cc,"torch.nn.Module"),Cc.forEach(o),hi=n(yn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yn.forEach(o),mi=d(I),V=r(I,"DIV",{class:!0});var G=i(V);v(Mt.$$.fragment,G),ui=d(G),Se=r(G,"P",{});var Eo=i(Se);fi=n(Eo,"The "),So=r(Eo,"A",{href:!0});var qc=i(So);gi=n(qc,"UniSpeechSatForAudioFrameClassification"),qc.forEach(o),_i=n(Eo," forward method, overrides the "),pa=r(Eo,"CODE",{});var Ec=i(pa);vi=n(Ec,"__call__"),Ec.forEach(o),Si=n(Eo," special method."),Eo.forEach(o),bi=d(G),v(ze.$$.fragment,G),wi=d(G),ha=r(G,"P",{});var Pc=i(ha);yi=n(Pc,"Example:"),Pc.forEach(o),ki=d(G),v(zt.$$.fragment,G),G.forEach(o),I.forEach(o),Ra=d(t),be=r(t,"H2",{class:!0});var kn=i(be);De=r(kn,"A",{id:!0,class:!0,href:!0});var Ac=i(De);ma=r(Ac,"SPAN",{});var Mc=i(ma);v(Dt.$$.fragment,Mc),Mc.forEach(o),Ac.forEach(o),Ti=d(kn),ua=r(kn,"SPAN",{});var zc=i(ua);Ui=n(zc,"UniSpeechSatForXVector"),zc.forEach(o),kn.forEach(o),Za=d(t),j=r(t,"DIV",{class:!0});var B=i(j);v(Ot.$$.fragment,B),$i=d(B),fa=r(B,"P",{});var Dc=i(fa);Fi=n(Dc,"UniSpeech-SAT Model with an XVector feature extraction head on top for tasks like Speaker Verification."),Dc.forEach(o),xi=d(B),Vt=r(B,"P",{});var Tn=i(Vt);ji=n(Tn,"UniSpeechSat was proposed in "),Lt=r(Tn,"A",{href:!0,rel:!0});var Oc=i(Lt);Ci=n(Oc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Oc.forEach(o),qi=n(Tn," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Tn.forEach(o),Ei=d(B),Nt=r(B,"P",{});var Un=i(Nt);Pi=n(Un,"This model inherits from "),bo=r(Un,"A",{href:!0});var Vc=i(bo);Ai=n(Vc,"PreTrainedModel"),Vc.forEach(o),Mi=n(Un,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Un.forEach(o),zi=d(B),Wt=r(B,"P",{});var $n=i(Wt);Di=n($n,"This model is a PyTorch "),It=r($n,"A",{href:!0,rel:!0});var Lc=i(It);Oi=n(Lc,"torch.nn.Module"),Lc.forEach(o),Vi=n($n,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$n.forEach(o),Li=d(B),L=r(B,"DIV",{class:!0});var ee=i(L);v(Bt.$$.fragment,ee),Ni=d(ee),we=r(ee,"P",{});var Po=i(we);Wi=n(Po,"The "),wo=r(Po,"A",{href:!0});var Nc=i(wo);Ii=n(Nc,"UniSpeechSatForXVector"),Nc.forEach(o),Bi=n(Po," forward method, overrides the "),ga=r(Po,"CODE",{});var Wc=i(ga);Hi=n(Wc,"__call__"),Wc.forEach(o),Xi=n(Po," special method."),Po.forEach(o),Ri=d(ee),v(Oe.$$.fragment,ee),Zi=d(ee),_a=r(ee,"P",{});var Ic=i(_a);Ji=n(Ic,"Example:"),Ic.forEach(o),Qi=d(ee),v(Ht.$$.fragment,ee),ee.forEach(o),B.forEach(o),Ja=d(t),ye=r(t,"H2",{class:!0});var Fn=i(ye);Ve=r(Fn,"A",{id:!0,class:!0,href:!0});var Bc=i(Ve);va=r(Bc,"SPAN",{});var Hc=i(va);v(Xt.$$.fragment,Hc),Hc.forEach(o),Bc.forEach(o),Yi=d(Fn),Sa=r(Fn,"SPAN",{});var Xc=i(Sa);Ki=n(Xc,"UniSpeechSatForPreTraining"),Xc.forEach(o),Fn.forEach(o),Qa=d(t),A=r(t,"DIV",{class:!0});var te=i(A);v(Rt.$$.fragment,te),Gi=d(te),ke=r(te,"P",{});var Ao=i(ke);el=n(Ao,"UniSpeechSat Model with a quantizer and "),ba=r(Ao,"CODE",{});var Rc=i(ba);tl=n(Rc,"VQ"),Rc.forEach(o),ol=n(Ao,` head on top. UniSpeechSat was proposed in `),Zt=r(Ao,"A",{href:!0,rel:!0});var Zc=i(Zt);al=n(Zc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Zc.forEach(o),nl=n(Ao," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Ao.forEach(o),sl=d(te),Jt=r(te,"P",{});var xn=i(Jt);rl=n(xn,"This model inherits from "),yo=r(xn,"A",{href:!0});var Jc=i(yo);il=n(Jc,"PreTrainedModel"),Jc.forEach(o),ll=n(xn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),xn.forEach(o),cl=d(te),Qt=r(te,"P",{});var jn=i(Qt);dl=n(jn,"This model is a PyTorch "),Yt=r(jn,"A",{href:!0,rel:!0});var Qc=i(Yt);pl=n(Qc,"torch.nn.Module"),Qc.forEach(o),hl=n(jn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jn.forEach(o),ml=d(te),N=r(te,"DIV",{class:!0});var oe=i(N);v(Kt.$$.fragment,oe),ul=d(oe),Te=r(oe,"P",{});var Mo=i(Te);fl=n(Mo,"The "),ko=r(Mo,"A",{href:!0});var Yc=i(ko);gl=n(Yc,"UniSpeechSatForPreTraining"),Yc.forEach(o),_l=n(Mo," forward method, overrides the "),wa=r(Mo,"CODE",{});var Kc=i(wa);vl=n(Kc,"__call__"),Kc.forEach(o),Sl=n(Mo," special method."),Mo.forEach(o),bl=d(oe),v(Le.$$.fragment,oe),wl=d(oe),ya=r(oe,"P",{});var Gc=i(ya);yl=n(Gc,"Example:"),Gc.forEach(o),kl=d(oe),v(Gt.$$.fragment,oe),oe.forEach(o),te.forEach(o),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(pd)),l(k,"id","unispeechsat"),l(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(k,"href","#unispeechsat"),l(u,"class","relative group"),l(Ue,"id","overview"),l(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ue,"href","#overview"),l(ne,"class","relative group"),l(We,"href","https://arxiv.org/abs/2110.05752"),l(We,"rel","nofollow"),l(ro,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),l(io,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),l(He,"href","https://huggingface.co/patrickvonplaten"),l(He,"rel","nofollow"),l(Xe,"href","https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT"),l(Xe,"rel","nofollow"),l(Fe,"id","transformers.UniSpeechSatConfig"),l(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Fe,"href","#transformers.UniSpeechSatConfig"),l(se,"class","relative group"),l(lo,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatModel"),l(Je,"href","https://huggingface.co/facebook/unispeech_sat-base-960h"),l(Je,"rel","nofollow"),l(co,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(po,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(q,"class","docstring"),l(xe,"id","transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput"),l(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(xe,"href","#transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput"),l(le,"class","relative group"),l(ce,"class","docstring"),l(de,"class","docstring"),l(je,"id","transformers.UniSpeechSatModel"),l(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(je,"href","#transformers.UniSpeechSatModel"),l(pe,"class","relative group"),l(st,"href","https://arxiv.org/abs/2006.11477"),l(st,"rel","nofollow"),l(ho,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(lt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(lt,"rel","nofollow"),l(mo,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatModel"),l(z,"class","docstring"),l(E,"class","docstring"),l(qe,"id","transformers.UniSpeechSatForCTC"),l(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(qe,"href","#transformers.UniSpeechSatForCTC"),l(me,"class","relative group"),l(mt,"href","https://arxiv.org/abs/2006.11477"),l(mt,"rel","nofollow"),l(uo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(gt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(gt,"rel","nofollow"),l(fo,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForCTC"),l(D,"class","docstring"),l(P,"class","docstring"),l(Pe,"id","transformers.UniSpeechSatForSequenceClassification"),l(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Pe,"href","#transformers.UniSpeechSatForSequenceClassification"),l(ge,"class","relative group"),l(yt,"href","https://arxiv.org/abs/2006.11477"),l(yt,"rel","nofollow"),l(go,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ut,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ut,"rel","nofollow"),l(_o,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForSequenceClassification"),l(O,"class","docstring"),l(F,"class","docstring"),l(Me,"id","transformers.UniSpeechSatForAudioFrameClassification"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.UniSpeechSatForAudioFrameClassification"),l(ve,"class","relative group"),l(qt,"href","https://arxiv.org/abs/2006.11477"),l(qt,"rel","nofollow"),l(vo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(At,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(At,"rel","nofollow"),l(So,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForAudioFrameClassification"),l(V,"class","docstring"),l(x,"class","docstring"),l(De,"id","transformers.UniSpeechSatForXVector"),l(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(De,"href","#transformers.UniSpeechSatForXVector"),l(be,"class","relative group"),l(Lt,"href","https://arxiv.org/abs/2006.11477"),l(Lt,"rel","nofollow"),l(bo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(It,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(It,"rel","nofollow"),l(wo,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForXVector"),l(L,"class","docstring"),l(j,"class","docstring"),l(Ve,"id","transformers.UniSpeechSatForPreTraining"),l(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ve,"href","#transformers.UniSpeechSatForPreTraining"),l(ye,"class","relative group"),l(Zt,"href","https://arxiv.org/abs/2006.11477"),l(Zt,"rel","nofollow"),l(yo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Yt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Yt,"rel","nofollow"),l(ko,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech_sat#transformers.UniSpeechSatForPreTraining"),l(N,"class","docstring"),l(A,"class","docstring")},m(t,p){e(document.head,h),m(t,T,p),m(t,u,p),e(u,k),e(k,U),S(g,U,null),e(u,f),e(u,$),e($,Cn),m(t,Fa,p),m(t,ne,p),e(ne,Ue),e(Ue,Do),S(Ne,Do,null),e(ne,qn),e(ne,Oo),e(Oo,En),m(t,xa,p),m(t,$e,p),e($e,Pn),e($e,We),e(We,An),e($e,Mn),m(t,ja,p),m(t,ao,p),e(ao,zn),m(t,Ca,p),m(t,no,p),e(no,Vo),e(Vo,Dn),m(t,qa,p),m(t,so,p),e(so,On),m(t,Ea,p),m(t,H,p),e(H,Ie),e(Ie,Vn),e(Ie,ro),e(ro,Ln),e(Ie,Nn),e(H,Wn),e(H,Be),e(Be,In),e(Be,io),e(io,Bn),e(Be,Hn),e(H,Xn),e(H,Lo),e(Lo,Rn),m(t,Pa,p),m(t,X,p),e(X,Zn),e(X,He),e(He,Jn),e(X,Qn),e(X,Xe),e(Xe,Yn),e(X,Kn),m(t,Aa,p),m(t,se,p),e(se,Fe),e(Fe,No),S(Re,No,null),e(se,Gn),e(se,Wo),e(Wo,es),m(t,Ma,p),m(t,q,p),S(Ze,q,null),e(q,ts),e(q,re),e(re,os),e(re,lo),e(lo,as),e(re,ns),e(re,Je),e(Je,ss),e(re,rs),e(q,is),e(q,ie),e(ie,ls),e(ie,co),e(co,cs),e(ie,ds),e(ie,po),e(po,ps),e(ie,hs),e(q,ms),e(q,Io),e(Io,us),e(q,fs),S(Qe,q,null),m(t,za,p),m(t,le,p),e(le,xe),e(xe,Bo),S(Ye,Bo,null),e(le,gs),e(le,Ho),e(Ho,_s),m(t,Da,p),m(t,ce,p),S(Ke,ce,null),e(ce,vs),e(ce,Ge),e(Ge,Ss),e(Ge,Xo),e(Xo,bs),e(Ge,ws),m(t,Oa,p),m(t,de,p),S(et,de,null),e(de,ys),e(de,tt),e(tt,ks),e(tt,Ro),e(Ro,Ts),e(tt,Us),m(t,Va,p),m(t,pe,p),e(pe,je),e(je,Zo),S(ot,Zo,null),e(pe,$s),e(pe,Jo),e(Jo,Fs),m(t,La,p),m(t,E,p),S(at,E,null),e(E,xs),e(E,nt),e(nt,js),e(nt,st),e(st,Cs),e(nt,qs),e(E,Es),e(E,rt),e(rt,Ps),e(rt,ho),e(ho,As),e(rt,Ms),e(E,zs),e(E,it),e(it,Ds),e(it,lt),e(lt,Os),e(it,Vs),e(E,Ls),e(E,z),S(ct,z,null),e(z,Ns),e(z,he),e(he,Ws),e(he,mo),e(mo,Is),e(he,Bs),e(he,Qo),e(Qo,Hs),e(he,Xs),e(z,Rs),S(Ce,z,null),e(z,Zs),e(z,Yo),e(Yo,Js),e(z,Qs),S(dt,z,null),m(t,Na,p),m(t,me,p),e(me,qe),e(qe,Ko),S(pt,Ko,null),e(me,Ys),e(me,Go),e(Go,Ks),m(t,Wa,p),m(t,P,p),S(ht,P,null),e(P,Gs),e(P,ue),e(ue,er),e(ue,ea),e(ea,tr),e(ue,or),e(ue,mt),e(mt,ar),e(ue,nr),e(P,sr),e(P,ut),e(ut,rr),e(ut,uo),e(uo,ir),e(ut,lr),e(P,cr),e(P,ft),e(ft,dr),e(ft,gt),e(gt,pr),e(ft,hr),e(P,mr),e(P,D),S(_t,D,null),e(D,ur),e(D,fe),e(fe,fr),e(fe,fo),e(fo,gr),e(fe,_r),e(fe,ta),e(ta,vr),e(fe,Sr),e(D,br),S(Ee,D,null),e(D,wr),e(D,oa),e(oa,yr),e(D,kr),S(vt,D,null),m(t,Ia,p),m(t,ge,p),e(ge,Pe),e(Pe,aa),S(St,aa,null),e(ge,Tr),e(ge,na),e(na,Ur),m(t,Ba,p),m(t,F,p),S(bt,F,null),e(F,$r),e(F,sa),e(sa,Fr),e(F,xr),e(F,wt),e(wt,jr),e(wt,yt),e(yt,Cr),e(wt,qr),e(F,Er),e(F,kt),e(kt,Pr),e(kt,go),e(go,Ar),e(kt,Mr),e(F,zr),e(F,Tt),e(Tt,Dr),e(Tt,Ut),e(Ut,Or),e(Tt,Vr),e(F,Lr),e(F,O),S($t,O,null),e(O,Nr),e(O,_e),e(_e,Wr),e(_e,_o),e(_o,Ir),e(_e,Br),e(_e,ra),e(ra,Hr),e(_e,Xr),e(O,Rr),S(Ae,O,null),e(O,Zr),e(O,ia),e(ia,Jr),e(O,Qr),S(Ft,O,null),m(t,Ha,p),m(t,ve,p),e(ve,Me),e(Me,la),S(xt,la,null),e(ve,Yr),e(ve,ca),e(ca,Kr),m(t,Xa,p),m(t,x,p),S(jt,x,null),e(x,Gr),e(x,da),e(da,ei),e(x,ti),e(x,Ct),e(Ct,oi),e(Ct,qt),e(qt,ai),e(Ct,ni),e(x,si),e(x,Et),e(Et,ri),e(Et,vo),e(vo,ii),e(Et,li),e(x,ci),e(x,Pt),e(Pt,di),e(Pt,At),e(At,pi),e(Pt,hi),e(x,mi),e(x,V),S(Mt,V,null),e(V,ui),e(V,Se),e(Se,fi),e(Se,So),e(So,gi),e(Se,_i),e(Se,pa),e(pa,vi),e(Se,Si),e(V,bi),S(ze,V,null),e(V,wi),e(V,ha),e(ha,yi),e(V,ki),S(zt,V,null),m(t,Ra,p),m(t,be,p),e(be,De),e(De,ma),S(Dt,ma,null),e(be,Ti),e(be,ua),e(ua,Ui),m(t,Za,p),m(t,j,p),S(Ot,j,null),e(j,$i),e(j,fa),e(fa,Fi),e(j,xi),e(j,Vt),e(Vt,ji),e(Vt,Lt),e(Lt,Ci),e(Vt,qi),e(j,Ei),e(j,Nt),e(Nt,Pi),e(Nt,bo),e(bo,Ai),e(Nt,Mi),e(j,zi),e(j,Wt),e(Wt,Di),e(Wt,It),e(It,Oi),e(Wt,Vi),e(j,Li),e(j,L),S(Bt,L,null),e(L,Ni),e(L,we),e(we,Wi),e(we,wo),e(wo,Ii),e(we,Bi),e(we,ga),e(ga,Hi),e(we,Xi),e(L,Ri),S(Oe,L,null),e(L,Zi),e(L,_a),e(_a,Ji),e(L,Qi),S(Ht,L,null),m(t,Ja,p),m(t,ye,p),e(ye,Ve),e(Ve,va),S(Xt,va,null),e(ye,Yi),e(ye,Sa),e(Sa,Ki),m(t,Qa,p),m(t,A,p),S(Rt,A,null),e(A,Gi),e(A,ke),e(ke,el),e(ke,ba),e(ba,tl),e(ke,ol),e(ke,Zt),e(Zt,al),e(ke,nl),e(A,sl),e(A,Jt),e(Jt,rl),e(Jt,yo),e(yo,il),e(Jt,ll),e(A,cl),e(A,Qt),e(Qt,dl),e(Qt,Yt),e(Yt,pl),e(Qt,hl),e(A,ml),e(A,N),S(Kt,N,null),e(N,ul),e(N,Te),e(Te,fl),e(Te,ko),e(ko,gl),e(Te,_l),e(Te,wa),e(wa,vl),e(Te,Sl),e(N,bl),S(Le,N,null),e(N,wl),e(N,ya),e(ya,yl),e(N,kl),S(Gt,N,null),Ya=!0},p(t,[p]){const eo={};p&2&&(eo.$$scope={dirty:p,ctx:t}),Ce.$set(eo);const ka={};p&2&&(ka.$$scope={dirty:p,ctx:t}),Ee.$set(ka);const Ta={};p&2&&(Ta.$$scope={dirty:p,ctx:t}),Ae.$set(Ta);const Ua={};p&2&&(Ua.$$scope={dirty:p,ctx:t}),ze.$set(Ua);const to={};p&2&&(to.$$scope={dirty:p,ctx:t}),Oe.$set(to);const $a={};p&2&&($a.$$scope={dirty:p,ctx:t}),Le.$set($a)},i(t){Ya||(b(g.$$.fragment,t),b(Ne.$$.fragment,t),b(Re.$$.fragment,t),b(Ze.$$.fragment,t),b(Qe.$$.fragment,t),b(Ye.$$.fragment,t),b(Ke.$$.fragment,t),b(et.$$.fragment,t),b(ot.$$.fragment,t),b(at.$$.fragment,t),b(ct.$$.fragment,t),b(Ce.$$.fragment,t),b(dt.$$.fragment,t),b(pt.$$.fragment,t),b(ht.$$.fragment,t),b(_t.$$.fragment,t),b(Ee.$$.fragment,t),b(vt.$$.fragment,t),b(St.$$.fragment,t),b(bt.$$.fragment,t),b($t.$$.fragment,t),b(Ae.$$.fragment,t),b(Ft.$$.fragment,t),b(xt.$$.fragment,t),b(jt.$$.fragment,t),b(Mt.$$.fragment,t),b(ze.$$.fragment,t),b(zt.$$.fragment,t),b(Dt.$$.fragment,t),b(Ot.$$.fragment,t),b(Bt.$$.fragment,t),b(Oe.$$.fragment,t),b(Ht.$$.fragment,t),b(Xt.$$.fragment,t),b(Rt.$$.fragment,t),b(Kt.$$.fragment,t),b(Le.$$.fragment,t),b(Gt.$$.fragment,t),Ya=!0)},o(t){w(g.$$.fragment,t),w(Ne.$$.fragment,t),w(Re.$$.fragment,t),w(Ze.$$.fragment,t),w(Qe.$$.fragment,t),w(Ye.$$.fragment,t),w(Ke.$$.fragment,t),w(et.$$.fragment,t),w(ot.$$.fragment,t),w(at.$$.fragment,t),w(ct.$$.fragment,t),w(Ce.$$.fragment,t),w(dt.$$.fragment,t),w(pt.$$.fragment,t),w(ht.$$.fragment,t),w(_t.$$.fragment,t),w(Ee.$$.fragment,t),w(vt.$$.fragment,t),w(St.$$.fragment,t),w(bt.$$.fragment,t),w($t.$$.fragment,t),w(Ae.$$.fragment,t),w(Ft.$$.fragment,t),w(xt.$$.fragment,t),w(jt.$$.fragment,t),w(Mt.$$.fragment,t),w(ze.$$.fragment,t),w(zt.$$.fragment,t),w(Dt.$$.fragment,t),w(Ot.$$.fragment,t),w(Bt.$$.fragment,t),w(Oe.$$.fragment,t),w(Ht.$$.fragment,t),w(Xt.$$.fragment,t),w(Rt.$$.fragment,t),w(Kt.$$.fragment,t),w(Le.$$.fragment,t),w(Gt.$$.fragment,t),Ya=!1},d(t){o(h),t&&o(T),t&&o(u),y(g),t&&o(Fa),t&&o(ne),y(Ne),t&&o(xa),t&&o($e),t&&o(ja),t&&o(ao),t&&o(Ca),t&&o(no),t&&o(qa),t&&o(so),t&&o(Ea),t&&o(H),t&&o(Pa),t&&o(X),t&&o(Aa),t&&o(se),y(Re),t&&o(Ma),t&&o(q),y(Ze),y(Qe),t&&o(za),t&&o(le),y(Ye),t&&o(Da),t&&o(ce),y(Ke),t&&o(Oa),t&&o(de),y(et),t&&o(Va),t&&o(pe),y(ot),t&&o(La),t&&o(E),y(at),y(ct),y(Ce),y(dt),t&&o(Na),t&&o(me),y(pt),t&&o(Wa),t&&o(P),y(ht),y(_t),y(Ee),y(vt),t&&o(Ia),t&&o(ge),y(St),t&&o(Ba),t&&o(F),y(bt),y($t),y(Ae),y(Ft),t&&o(Ha),t&&o(ve),y(xt),t&&o(Xa),t&&o(x),y(jt),y(Mt),y(ze),y(zt),t&&o(Ra),t&&o(be),y(Dt),t&&o(Za),t&&o(j),y(Ot),y(Bt),y(Oe),y(Ht),t&&o(Ja),t&&o(ye),y(Xt),t&&o(Qa),t&&o(A),y(Rt),y(Kt),y(Le),y(Gt)}}}const pd={local:"unispeechsat",sections:[{local:"overview",title:"Overview"},{local:"transformers.UniSpeechSatConfig",title:"UniSpeechSatConfig"},{local:"transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatBaseModelOutput",title:"UniSpeechSat specific outputs"},{local:"transformers.UniSpeechSatModel",title:"UniSpeechSatModel"},{local:"transformers.UniSpeechSatForCTC",title:"UniSpeechSatForCTC"},{local:"transformers.UniSpeechSatForSequenceClassification",title:"UniSpeechSatForSequenceClassification"},{local:"transformers.UniSpeechSatForAudioFrameClassification",title:"UniSpeechSatForAudioFrameClassification"},{local:"transformers.UniSpeechSatForXVector",title:"UniSpeechSatForXVector"},{local:"transformers.UniSpeechSatForPreTraining",title:"UniSpeechSatForPreTraining"}],title:"UniSpeech-SAT"};function hd(C,h,T){let{fw:u}=h;return C.$$set=k=>{"fw"in k&&T(0,u=k.fw)},[u]}class Sd extends ed{constructor(h){super();td(this,h,hd,dd,od,{fw:0})}}export{Sd as default,pd as metadata};
9,928
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/visual_bert.mdx-aaa579bd.js
import{S as Dd,i as Ld,s as Sd,e as o,k as d,w as _,t as r,L as Wd,c as n,d as s,m as c,a,x as v,h as i,b as l,J as e,g as p,y as b,q as k,o as T,B as y}from"../../chunks/vendor-b1433968.js";import{T as zs}from"../../chunks/Tip-c3840994.js";import{D as L}from"../../chunks/Docstring-ff504c58.js";import{C as Le}from"../../chunks/CodeBlock-a320dbd7.js";import{I as se}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Od(z){let h,B,m,w,V;return{c(){h=o("p"),B=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=o("code"),w=r("Module"),V=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=n(g,"P",{});var f=a(h);B=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(f,"CODE",{});var $=a(m);w=i($,"Module"),$.forEach(s),V=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(s)},m(g,f){p(g,h,f),e(h,B),e(h,m),e(m,w),e(h,V)},d(g){g&&s(h)}}}function Qd(z){let h,B,m,w,V;return{c(){h=o("p"),B=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=o("code"),w=r("Module"),V=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=n(g,"P",{});var f=a(h);B=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(f,"CODE",{});var $=a(m);w=i($,"Module"),$.forEach(s),V=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(s)},m(g,f){p(g,h,f),e(h,B),e(h,m),e(m,w),e(h,V)},d(g){g&&s(h)}}}function Hd(z){let h,B,m,w,V;return{c(){h=o("p"),B=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=o("code"),w=r("Module"),V=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=n(g,"P",{});var f=a(h);B=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(f,"CODE",{});var $=a(m);w=i($,"Module"),$.forEach(s),V=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(s)},m(g,f){p(g,h,f),e(h,B),e(h,m),e(m,w),e(h,V)},d(g){g&&s(h)}}}function Ud(z){let h,B,m,w,V;return{c(){h=o("p"),B=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=o("code"),w=r("Module"),V=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=n(g,"P",{});var f=a(h);B=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(f,"CODE",{});var $=a(m);w=i($,"Module"),$.forEach(s),V=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(s)},m(g,f){p(g,h,f),e(h,B),e(h,m),e(m,w),e(h,V)},d(g){g&&s(h)}}}function Kd(z){let h,B,m,w,V;return{c(){h=o("p"),B=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=o("code"),w=r("Module"),V=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=n(g,"P",{});var f=a(h);B=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(f,"CODE",{});var $=a(m);w=i($,"Module"),$.forEach(s),V=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(s)},m(g,f){p(g,h,f),e(h,B),e(h,m),e(m,w),e(h,V)},d(g){g&&s(h)}}}function Jd(z){let h,B,m,w,V;return{c(){h=o("p"),B=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=o("code"),w=r("Module"),V=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=n(g,"P",{});var f=a(h);B=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(f,"CODE",{});var $=a(m);w=i($,"Module"),$.forEach(s),V=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(s)},m(g,f){p(g,h,f),e(h,B),e(h,m),e(m,w),e(h,V)},d(g){g&&s(h)}}}function Gd(z){let h,B,m,w,V,g,f,$,$n,wo,oe,Te,xs,Se,zn,Fs,xn,Bo,ye,Fn,We,qn,En,Vo,Ht,Pn,$o,Ut,qs,Mn,zo,Kt,An,xo,we,Es,Oe,jn,Jt,Cn,Rn,Nn,Ps,Ms,In,Fo,ne,Be,As,Qe,Dn,js,Ln,qo,Gt,Sn,Eo,Yt,Wn,Po,Ve,On,Xt,Qn,Hn,Mo,$e,Cs,Zt,He,Un,Kn,Jn,Rs,es,Ue,Gn,Yn,Ao,ze,Xn,ts,Zn,ea,jo,Ke,Co,S,ta,Je,sa,oa,Ge,na,aa,Ro,ae,xe,Ns,Ye,ra,Is,ia,No,x,Xe,la,re,da,ss,ca,ua,Ze,ha,pa,ma,ie,fa,os,ga,_a,ns,va,ba,ka,Ds,Ta,ya,et,Io,le,Fe,Ls,tt,wa,Ss,Ba,Do,F,st,Va,ot,$a,as,za,xa,Fa,nt,qa,at,Ea,Pa,Ma,rt,Aa,Ws,ja,Ca,Ra,j,it,Na,de,Ia,rs,Da,La,Os,Sa,Wa,Oa,qe,Qa,Qs,Ha,Ua,lt,Lo,ce,Ee,Hs,dt,Ka,Us,Ja,So,q,ct,Ga,ue,Ya,Ks,Xa,Za,Js,er,tr,sr,ut,or,is,nr,ar,rr,ht,ir,pt,lr,dr,cr,C,mt,ur,he,hr,ls,pr,mr,Gs,fr,gr,_r,Pe,vr,Ys,br,kr,ft,Wo,pe,Me,Xs,gt,Tr,Zs,yr,Oo,E,_t,wr,eo,Br,Vr,vt,$r,ds,zr,xr,Fr,bt,qr,kt,Er,Pr,Mr,R,Tt,Ar,me,jr,cs,Cr,Rr,to,Nr,Ir,Dr,Ae,Lr,so,Sr,Wr,yt,Qo,fe,je,oo,wt,Or,no,Qr,Ho,P,Bt,Hr,ao,Ur,Kr,Vt,Jr,us,Gr,Yr,Xr,$t,Zr,zt,ei,ti,si,N,xt,oi,ge,ni,hs,ai,ri,ro,ii,li,di,Ce,ci,io,ui,hi,Ft,Uo,_e,Re,lo,qt,pi,co,mi,Ko,M,Et,fi,uo,gi,_i,Pt,vi,ps,bi,ki,Ti,Mt,yi,At,wi,Bi,Vi,I,jt,$i,ve,zi,ms,xi,Fi,ho,qi,Ei,Pi,Ne,Mi,po,Ai,ji,Ct,Jo,be,Ie,mo,Rt,Ci,fo,Ri,Go,A,Nt,Ni,go,Ii,Di,It,Li,fs,Si,Wi,Oi,Dt,Qi,Lt,Hi,Ui,Ki,D,St,Ji,ke,Gi,gs,Yi,Xi,_o,Zi,el,tl,De,sl,vo,ol,nl,Wt,Yo;return g=new se({}),Se=new se({}),Qe=new se({}),Ke=new Le({props:{code:`import torch from transformers import BertTokenizer, VisualBertModel model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") inputs = tokenizer("What is the man eating?", return_tensors="pt") # this is a custom function that returns the visual embeddings given the image path visual_embeds = get_visual_embeddings(image_path) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update({ "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask }) outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> torch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertModel</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">model = VisualBertModel.from_pretrained(<span class="hljs-string">&quot;uclanlp/visualbert-vqa-coco-pre&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">inputs = tokenizer(<span class="hljs-string">&quot;What is the man eating?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># this is a custom function that returns the visual embeddings given the image path</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">visual_embeds = get_visual_embeddings(image_path)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">inputs.update({</span> <span class="hljs-meta">...</span> <span class="language-python"> <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds,</span> <span class="hljs-meta">...</span> <span class="language-python"> <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids,</span> <span class="hljs-meta">...</span> <span class="language-python"> <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask</span> <span class="hljs-meta">...</span> <span class="language-python">})</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">outputs = model(**inputs)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">last_hidden_state = outputs.last_hidden_state</span>`}}),Ye=new se({}),Xe=new L({props:{name:"class transformers.VisualBertConfig",anchor:"transformers.VisualBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"visual_embedding_dim",val:" = 512"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"bypass_transformer",val:" = False"},{name:"special_visual_initialize",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/configuration_visual_bert.py#L37",parametersDescription:[{anchor:"transformers.VisualBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the VisualBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel">VisualBertModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <code>inputs_ids</code> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel">VisualBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.VisualBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.VisualBertConfig.visual_embedding_dim",description:`<strong>visual_embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the visual embeddings to be passed to the model.`,name:"visual_embedding_dim"},{anchor:"transformers.VisualBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.VisualBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.VisualBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.VisualBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.VisualBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.VisualBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.VisualBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.VisualBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel">VisualBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.VisualBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.VisualBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.VisualBertConfig.bypass_transformer",description:`<strong>bypass_transformer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should bypass the transformer for the visual embeddings. If set to <code>True</code>, the model directly concatenates the visual embeddings from <code>VisualBertEmbeddings</code> with text output from transformers, and then pass it to a self-attention layer.`,name:"bypass_transformer"},{anchor:"transformers.VisualBertConfig.special_visual_initialize",description:`<strong>special_visual_initialize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the visual token type and position type embedding weights should be initialized the same as the textual token type and positive type embeddings. When set to <code>True</code>, the weights of the textual token type and position type embeddings are copied to the respective visual embedding layers.`,name:"special_visual_initialize"}]}}),et=new Le({props:{code:`from transformers import VisualBertModel, VisualBertConfig # Initializing a VisualBERT visualbert-vqa-coco-pre style configuration configuration = VisualBertConfig.from_pretrained('visualbert-vqa-coco-pre') # Initializing a model from the visualbert-vqa-coco-pre style configuration model = VisualBertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisualBertModel, VisualBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a VisualBERT visualbert-vqa-coco-pre style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = VisualBertConfig.from_pretrained(<span class="hljs-string">&#x27;visualbert-vqa-coco-pre&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the visualbert-vqa-coco-pre style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisualBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),tt=new se({}),st=new L({props:{name:"class transformers.VisualBertModel",anchor:"transformers.VisualBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L679",parametersDescription:[{anchor:"transformers.VisualBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),it=new L({props:{name:"forward",anchor:"transformers.VisualBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"visual_embeds",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"visual_token_type_ids",val:" = None"},{name:"image_text_alignment",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L718",parametersDescription:[{anchor:"transformers.VisualBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisualBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisualBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.VisualBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisualBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.VisualBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.VisualBertModel.forward.visual_embeds",description:`<strong>visual_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length, visual_embedding_dim)</code>, <em>optional</em>) &#x2014; The embedded representation of the visual inputs, generally derived using using an object detector.`,name:"visual_embeds"},{anchor:"transformers.VisualBertModel.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on visual embeddings. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.VisualBertModel.forward.visual_token_type_ids",description:`<strong>visual_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate different portions of the visual embeds.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a> The authors of VisualBERT set the <em>visual_token_type_ids</em> to <em>1</em> for all tokens.`,name:"visual_token_type_ids"},{anchor:"transformers.VisualBertModel.forward.image_text_alignment",description:`<strong>image_text_alignment</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length, alignment_number)</code>, <em>optional</em>) &#x2014; Image-Text alignment uses to decide the position IDs of the visual embeddings.`,name:"image_text_alignment"},{anchor:"transformers.VisualBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisualBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisualBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig" >VisualBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qe=new zs({props:{$$slots:{default:[Od]},$$scope:{ctx:z}}}),lt=new Le({props:{code:`# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image. from transformers import BertTokenizer, VisualBertModel import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = VisualBertModel.from_pretrained('uclanlp/visualbert-vqa-coco-pre') inputs = tokenizer("The capital of France is Paris.", return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update({ "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask }) outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-comment"># Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image.</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertModel <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) model = VisualBertModel.from_pretrained(<span class="hljs-string">&#x27;uclanlp/visualbert-vqa-coco-pre&#x27;</span>) inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) visual_embeds = get_visual_embeddings(image).unsqueeze(<span class="hljs-number">0</span>) visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>) inputs.update({ <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds, <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids, <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask }) outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`}}),dt=new se({}),ct=new L({props:{name:"class transformers.VisualBertForPreTraining",anchor:"transformers.VisualBertForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L873",parametersDescription:[{anchor:"transformers.VisualBertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mt=new L({props:{name:"forward",anchor:"transformers.VisualBertForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"visual_embeds",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"visual_token_type_ids",val:" = None"},{name:"image_text_alignment",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"sentence_image_labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L889",parametersDescription:[{anchor:"transformers.VisualBertForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisualBertForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisualBertForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.VisualBertForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisualBertForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.VisualBertForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.VisualBertForPreTraining.forward.visual_embeds",description:`<strong>visual_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length, visual_embedding_dim)</code>, <em>optional</em>) &#x2014; The embedded representation of the visual inputs, generally derived using using an object detector.`,name:"visual_embeds"},{anchor:"transformers.VisualBertForPreTraining.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on visual embeddings. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.VisualBertForPreTraining.forward.visual_token_type_ids",description:`<strong>visual_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate different portions of the visual embeds.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a> The authors of VisualBERT set the <em>visual_token_type_ids</em> to <em>1</em> for all tokens.`,name:"visual_token_type_ids"},{anchor:"transformers.VisualBertForPreTraining.forward.image_text_alignment",description:`<strong>image_text_alignment</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length, alignment_number)</code>, <em>optional</em>) &#x2014; Image-Text alignment uses to decide the position IDs of the visual embeddings.`,name:"image_text_alignment"},{anchor:"transformers.VisualBertForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisualBertForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisualBertForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.VisualBertForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, total_sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.VisualBertForPreTraining.forward.sentence_image_labels",description:`<strong>sentence_image_labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sentence-image prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a matching pair of sequence A for the given image,</li> <li>1 indicates sequence B is a random sequence w.r.t A for the given image.</li> </ul>`,name:"sentence_image_labels"}],returnDescription:` <p>A <code>transformers.models.visual_bert.modeling_visual_bert.VisualBertForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig" >VisualBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the sentence-image prediction (classification) loss.</p> </li> <li> <p><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.visual_bert.modeling_visual_bert.VisualBertForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pe=new zs({props:{$$slots:{default:[Qd]},$$scope:{ctx:z}}}),ft=new Le({props:{code:`# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import BertTokenizer, VisualBertForPreTraining tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = VisualBertForPreTraining.from_pretrained('uclanlp/visualbert-vqa-coco-pre') inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update({ "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask }) max_length = inputs["input_ids"].shape[-1]+visual_embeds.shape[-2] labels = tokenizer("The capital of France is Paris.", return_tensors="pt", padding="max_length", max_length=max_length)["input_ids"] sentence_image_labels = torch.tensor(1).unsqueeze(0) # Batch_size outputs = model(**inputs, labels=labels, sentence_image_labels=sentence_image_labels) loss = outputs.loss prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-comment"># Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertForPreTraining tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) model = VisualBertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;uclanlp/visualbert-vqa-coco-pre&#x27;</span>) inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is {mask}.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) visual_embeds = get_visual_embeddings(image).unsqueeze(<span class="hljs-number">0</span>) visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>) inputs.update({ <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds, <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids, <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask }) max_length = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].shape[-<span class="hljs-number">1</span>]+visual_embeds.shape[-<span class="hljs-number">2</span>] labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-string">&quot;max_length&quot;</span>, max_length=max_length)[<span class="hljs-string">&quot;input_ids&quot;</span>] sentence_image_labels = torch.tensor(<span class="hljs-number">1</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch_size</span> outputs = model(**inputs, labels=labels, sentence_image_labels=sentence_image_labels) loss = outputs.loss prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits`}}),gt=new se({}),_t=new L({props:{name:"class transformers.VisualBertForQuestionAnswering",anchor:"transformers.VisualBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1167",parametersDescription:[{anchor:"transformers.VisualBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tt=new L({props:{name:"forward",anchor:"transformers.VisualBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"visual_embeds",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"visual_token_type_ids",val:" = None"},{name:"image_text_alignment",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1179",parametersDescription:[{anchor:"transformers.VisualBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.visual_embeds",description:`<strong>visual_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length, visual_embedding_dim)</code>, <em>optional</em>) &#x2014; The embedded representation of the visual inputs, generally derived using using an object detector.`,name:"visual_embeds"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on visual embeddings. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.visual_token_type_ids",description:`<strong>visual_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate different portions of the visual embeds.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a> The authors of VisualBERT set the <em>visual_token_type_ids</em> to <em>1</em> for all tokens.`,name:"visual_token_type_ids"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.image_text_alignment",description:`<strong>image_text_alignment</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length, alignment_number)</code>, <em>optional</em>) &#x2014; Image-Text alignment uses to decide the position IDs of the visual embeddings.`,name:"image_text_alignment"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.VisualBertForQuestionAnswering.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, total_sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. A KLDivLoss is computed between the labels and the returned logits.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig" >VisualBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new zs({props:{$$slots:{default:[Hd]},$$scope:{ctx:z}}}),yt=new Le({props:{code:`# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import BertTokenizer, VisualBertForQuestionAnswering import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = VisualBertForQuestionAnswering.from_pretrained('uclanlp/visualbert-vqa') text = "Who is eating the apple?" inputs = tokenizer(text, return_tensors='pt') visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update({ "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask }) labels = torch.tensor([[0.0,1.0]]).unsqueeze(0) # Batch size 1, Num labels 2 outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits,`,highlighted:`<span class="hljs-comment"># Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertForQuestionAnswering <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) model = VisualBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;uclanlp/visualbert-vqa&#x27;</span>) text = <span class="hljs-string">&quot;Who is eating the apple?&quot;</span> inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) visual_embeds = get_visual_embeddings(image).unsqueeze(<span class="hljs-number">0</span>) visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>) inputs.update({ <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds, <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids, <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask }) labels = torch.tensor([[<span class="hljs-number">0.0</span>,<span class="hljs-number">1.0</span>]]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1, Num labels 2</span> outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits`}}),wt=new se({}),Bt=new L({props:{name:"class transformers.VisualBertForMultipleChoice",anchor:"transformers.VisualBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1017",parametersDescription:[{anchor:"transformers.VisualBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xt=new L({props:{name:"forward",anchor:"transformers.VisualBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"visual_embeds",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"visual_token_type_ids",val:" = None"},{name:"image_text_alignment",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1028",parametersDescription:[{anchor:"transformers.VisualBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisualBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisualBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.VisualBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisualBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.VisualBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.VisualBertForMultipleChoice.forward.visual_embeds",description:`<strong>visual_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length, visual_embedding_dim)</code>, <em>optional</em>) &#x2014; The embedded representation of the visual inputs, generally derived using using an object detector.`,name:"visual_embeds"},{anchor:"transformers.VisualBertForMultipleChoice.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on visual embeddings. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.VisualBertForMultipleChoice.forward.visual_token_type_ids",description:`<strong>visual_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate different portions of the visual embeds.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a> The authors of VisualBERT set the <em>visual_token_type_ids</em> to <em>1</em> for all tokens.`,name:"visual_token_type_ids"},{anchor:"transformers.VisualBertForMultipleChoice.forward.image_text_alignment",description:`<strong>image_text_alignment</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length, alignment_number)</code>, <em>optional</em>) &#x2014; Image-Text alignment uses to decide the position IDs of the visual embeddings.`,name:"image_text_alignment"},{anchor:"transformers.VisualBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisualBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisualBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.VisualBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig" >VisualBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ce=new zs({props:{$$slots:{default:[Ud]},$$scope:{ctx:z}}}),Ft=new Le({props:{code:`# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import BertTokenizer, VisualBertForMultipleChoice import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = VisualBertForMultipleChoice.from_pretrained('uclanlp/visualbert-vcr') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." visual_embeds = get_visual_embeddings(image) # (batch_size, num_choices, visual_seq_length, visual_embedding_dim) visual_embeds = visual_embeds.expand(1, 2, *visual_embeds.shape) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True) # batch size is 1 inputs_dict = {k: v.unsqueeze(0) for k,v in encoding.items()} inputs_dict.update({ "visual_embeds": visual_embeds, "visual_attention_mask": visual_attention_mask, "visual_token_type_ids": visual_token_type_ids, "labels": labels }) outputs = model(**inputs_dict) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-comment"># Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertForMultipleChoice <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) model = VisualBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;uclanlp/visualbert-vcr&#x27;</span>) prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> visual_embeds = get_visual_embeddings(image) <span class="hljs-comment"># (batch_size, num_choices, visual_seq_length, visual_embedding_dim)</span> visual_embeds = visual_embeds.expand(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, *visual_embeds.shape) visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>) labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-comment"># batch size is 1</span> inputs_dict = {k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()} inputs_dict.update({ <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds, <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask, <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids, <span class="hljs-string">&quot;labels&quot;</span>: labels }) outputs = model(**inputs_dict) loss = outputs.loss logits = outputs.logits`}}),qt=new se({}),Et=new L({props:{name:"class transformers.VisualBertForVisualReasoning",anchor:"transformers.VisualBertForVisualReasoning",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1290",parametersDescription:[{anchor:"transformers.VisualBertForVisualReasoning.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),jt=new L({props:{name:"forward",anchor:"transformers.VisualBertForVisualReasoning.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"visual_embeds",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"visual_token_type_ids",val:" = None"},{name:"image_text_alignment",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1302",parametersDescription:[{anchor:"transformers.VisualBertForVisualReasoning.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisualBertForVisualReasoning.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisualBertForVisualReasoning.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.VisualBertForVisualReasoning.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisualBertForVisualReasoning.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.VisualBertForVisualReasoning.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.VisualBertForVisualReasoning.forward.visual_embeds",description:`<strong>visual_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length, visual_embedding_dim)</code>, <em>optional</em>) &#x2014; The embedded representation of the visual inputs, generally derived using using an object detector.`,name:"visual_embeds"},{anchor:"transformers.VisualBertForVisualReasoning.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on visual embeddings. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.VisualBertForVisualReasoning.forward.visual_token_type_ids",description:`<strong>visual_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate different portions of the visual embeds.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a> The authors of VisualBERT set the <em>visual_token_type_ids</em> to <em>1</em> for all tokens.`,name:"visual_token_type_ids"},{anchor:"transformers.VisualBertForVisualReasoning.forward.image_text_alignment",description:`<strong>image_text_alignment</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length, alignment_number)</code>, <em>optional</em>) &#x2014; Image-Text alignment uses to decide the position IDs of the visual embeddings.`,name:"image_text_alignment"},{anchor:"transformers.VisualBertForVisualReasoning.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisualBertForVisualReasoning.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisualBertForVisualReasoning.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.VisualBertForVisualReasoning.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. A classification loss is computed (Cross-Entropy) against these labels.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig" >VisualBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new zs({props:{$$slots:{default:[Kd]},$$scope:{ctx:z}}}),Ct=new Le({props:{code:`# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import BertTokenizer, VisualBertForVisualReasoning import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = VisualBertForVisualReasoning.from_pretrained('uclanlp/visualbert-nlvr2') text = "Who is eating the apple?" inputs = tokenizer(text, return_tensors='pt') visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update({ "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask }) labels = torch.tensor(1).unsqueeze(0) # Batch size 1, Num choices 2 outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits,`,highlighted:`<span class="hljs-comment"># Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertForVisualReasoning <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) model = VisualBertForVisualReasoning.from_pretrained(<span class="hljs-string">&#x27;uclanlp/visualbert-nlvr2&#x27;</span>) text = <span class="hljs-string">&quot;Who is eating the apple?&quot;</span> inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) visual_embeds = get_visual_embeddings(image).unsqueeze(<span class="hljs-number">0</span>) visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>) inputs.update({ <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds, <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids, <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask }) labels = torch.tensor(<span class="hljs-number">1</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1, Num choices 2</span> outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits`}}),Rt=new se({}),Nt=new L({props:{name:"class transformers.VisualBertForRegionToPhraseAlignment",anchor:"transformers.VisualBertForRegionToPhraseAlignment",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1447",parametersDescription:[{anchor:"transformers.VisualBertForRegionToPhraseAlignment.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig">VisualBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),St=new L({props:{name:"forward",anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"visual_embeds",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"visual_token_type_ids",val:" = None"},{name:"image_text_alignment",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"region_to_phrase_position",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/visual_bert/modeling_visual_bert.py#L1459",parametersDescription:[{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.visual_embeds",description:`<strong>visual_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length, visual_embedding_dim)</code>, <em>optional</em>) &#x2014; The embedded representation of the visual inputs, generally derived using using an object detector.`,name:"visual_embeds"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on visual embeddings. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.visual_token_type_ids",description:`<strong>visual_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate different portions of the visual embeds.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a> The authors of VisualBERT set the <em>visual_token_type_ids</em> to <em>1</em> for all tokens.`,name:"visual_token_type_ids"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.image_text_alignment",description:`<strong>image_text_alignment</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, visual_seq_length, alignment_number)</code>, <em>optional</em>) &#x2014; Image-Text alignment uses to decide the position IDs of the visual embeddings.`,name:"image_text_alignment"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.region_to_phrase_position",description:`<strong>region_to_phrase_position</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, total_sequence_length)</code>, <em>optional</em>) &#x2014; The positions depicting the position of the image embedding corresponding to the textual tokens.`,name:"region_to_phrase_position"},{anchor:"transformers.VisualBertForRegionToPhraseAlignment.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, total_sequence_length, visual_sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and the outputs from the attention layer.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertConfig" >VisualBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),De=new zs({props:{$$slots:{default:[Jd]},$$scope:{ctx:z}}}),Wt=new Le({props:{code:`# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import BertTokenizer, VisualBertForRegionToPhraseAlignment import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = VisualBertForRegionToPhraseAlignment.from_pretrained('uclanlp/visualbert-vqa-coco-pre') text = "Who is eating the apple?" inputs = tokenizer(text, return_tensors='pt') visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) region_to_phrase_position = torch.ones((1, inputs["input_ids"].shape[-1]+visual_embeds.shape[-2])) inputs.update({ "region_to_phrase_position": region_to_phrase_position, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask }) labels = torch.ones((1, inputs["input_ids"].shape[-1]+visual_embeds.shape[-2], visual_embeds.shape[-2])) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits,`,highlighted:`<span class="hljs-comment"># Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, VisualBertForRegionToPhraseAlignment <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) model = VisualBertForRegionToPhraseAlignment.from_pretrained(<span class="hljs-string">&#x27;uclanlp/visualbert-vqa-coco-pre&#x27;</span>) text = <span class="hljs-string">&quot;Who is eating the apple?&quot;</span> inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) visual_embeds = get_visual_embeddings(image).unsqueeze(<span class="hljs-number">0</span>) visual_token_type_ids = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-<span class="hljs-number">1</span>], dtype=torch.<span class="hljs-built_in">float</span>) region_to_phrase_position = torch.ones((<span class="hljs-number">1</span>, inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].shape[-<span class="hljs-number">1</span>]+visual_embeds.shape[-<span class="hljs-number">2</span>])) inputs.update({ <span class="hljs-string">&quot;region_to_phrase_position&quot;</span>: region_to_phrase_position, <span class="hljs-string">&quot;visual_embeds&quot;</span>: visual_embeds, <span class="hljs-string">&quot;visual_token_type_ids&quot;</span>: visual_token_type_ids, <span class="hljs-string">&quot;visual_attention_mask&quot;</span>: visual_attention_mask }) labels = torch.ones((<span class="hljs-number">1</span>, inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].shape[-<span class="hljs-number">1</span>]+visual_embeds.shape[-<span class="hljs-number">2</span>], visual_embeds.shape[-<span class="hljs-number">2</span>])) <span class="hljs-comment"># Batch size 1</span> outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits`}}),{c(){h=o("meta"),B=d(),m=o("h1"),w=o("a"),V=o("span"),_(g.$$.fragment),f=d(),$=o("span"),$n=r("VisualBERT"),wo=d(),oe=o("h2"),Te=o("a"),xs=o("span"),_(Se.$$.fragment),zn=d(),Fs=o("span"),xn=r("Overview"),Bo=d(),ye=o("p"),Fn=r("The VisualBERT model was proposed in "),We=o("a"),qn=r("VisualBERT: A Simple and Performant Baseline for Vision and Language"),En=r(` by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. VisualBERT is a neural network trained on a variety of (image, text) pairs.`),Vo=d(),Ht=o("p"),Pn=r("The abstract from the paper is the following:"),$o=d(),Ut=o("p"),qs=o("em"),Mn=r(`We propose VisualBERT, a simple and flexible framework for modeling a broad range of vision-and-language tasks. VisualBERT consists of a stack of Transformer layers that implicitly align elements of an input text and regions in an associated input image with self-attention. We further propose two visually-grounded language model objectives for pre-training VisualBERT on image caption data. Experiments on four vision-and-language tasks including VQA, VCR, NLVR2, and Flickr30K show that VisualBERT outperforms or rivals with state-of-the-art models while being significantly simpler. Further analysis demonstrates that VisualBERT can ground elements of language to image regions without any explicit supervision and is even sensitive to syntactic relationships, tracking, for example, associations between verbs and image regions corresponding to their arguments.`),zo=d(),Kt=o("p"),An=r("Tips:"),xo=d(),we=o("ol"),Es=o("li"),Oe=o("p"),jn=r("Most of the checkpoints provided work with the "),Jt=o("a"),Cn=r("VisualBertForPreTraining"),Rn=r(` configuration. Other checkpoints provided are the fine-tuned checkpoints for down-stream tasks - VQA (\u2018visualbert-vqa\u2019), VCR (\u2018visualbert-vcr\u2019), NLVR2 (\u2018visualbert-nlvr2\u2019). Hence, if you are not working on these downstream tasks, it is recommended that you use the pretrained checkpoints.`),Nn=d(),Ps=o("li"),Ms=o("p"),In=r(`For the VCR task, the authors use a fine-tuned detector for generating visual embeddings, for all the checkpoints. We do not provide the detector and its weights as a part of the package, but it will be available in the research projects, and the states can be loaded directly into the detector provided.`),Fo=d(),ne=o("h2"),Be=o("a"),As=o("span"),_(Qe.$$.fragment),Dn=d(),js=o("span"),Ln=r("Usage"),qo=d(),Gt=o("p"),Sn=r(`VisualBERT is a multi-modal vision and language model. It can be used for visual question answering, multiple choice, visual reasoning and region-to-phrase correspondence tasks. VisualBERT uses a BERT-like transformer to prepare embeddings for image-text pairs. Both the text and visual features are then projected to a latent space with identical dimension.`),Eo=d(),Yt=o("p"),Wn=r(`To feed images to the model, each image is passed through a pre-trained object detector and the regions and the bounding boxes are extracted. The authors use the features generated after passing these regions through a pre-trained CNN like ResNet as visual embeddings. They also add absolute position embeddings, and feed the resulting sequence of vectors to a standard BERT model. The text input is concatenated in the front of the visual embeddings in the embedding layer, and is expected to be bound by [CLS] and a [SEP] tokens, as in BERT. The segment IDs must also be set appropriately for the textual and visual parts.`),Po=d(),Ve=o("p"),On=r("The "),Xt=o("a"),Qn=r("BertTokenizer"),Hn=r(` is used to encode the text. A custom detector/feature extractor must be used to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:`),Mo=d(),$e=o("ul"),Cs=o("li"),Zt=o("p"),He=o("a"),Un=r("VisualBERT VQA demo notebook"),Kn=r(` : This notebook contains an example on VisualBERT VQA.`),Jn=d(),Rs=o("li"),es=o("p"),Ue=o("a"),Gn=r("Generate Embeddings for VisualBERT (Colab Notebook)"),Yn=r(` : This notebook contains an example on how to generate visual embeddings.`),Ao=d(),ze=o("p"),Xn=r("The following example shows how to get the last hidden state using "),ts=o("a"),Zn=r("VisualBertModel"),ea=r(":"),jo=d(),_(Ke.$$.fragment),Co=d(),S=o("p"),ta=r("This model was contributed by "),Je=o("a"),sa=r("gchhablani"),oa=r(". The original code can be found "),Ge=o("a"),na=r("here"),aa=r("."),Ro=d(),ae=o("h2"),xe=o("a"),Ns=o("span"),_(Ye.$$.fragment),ra=d(),Is=o("span"),ia=r("VisualBertConfig"),No=d(),x=o("div"),_(Xe.$$.fragment),la=d(),re=o("p"),da=r("This is the configuration class to store the configuration of a "),ss=o("a"),ca=r("VisualBertModel"),ua=r(`. It is used to instantiate an VisualBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VisualBERT `),Ze=o("a"),ha=r("visualbert-vqa-coco-pre"),pa=r(" architecture."),ma=d(),ie=o("p"),fa=r("Configuration objects inherit from "),os=o("a"),ga=r("PretrainedConfig"),_a=r(` and can be used to control the model outputs. Read the documentation from `),ns=o("a"),va=r("PretrainedConfig"),ba=r(" for more information."),ka=d(),Ds=o("p"),Ta=r("Example:"),ya=d(),_(et.$$.fragment),Io=d(),le=o("h2"),Fe=o("a"),Ls=o("span"),_(tt.$$.fragment),wa=d(),Ss=o("span"),Ba=r("VisualBertModel"),Do=d(),F=o("div"),_(st.$$.fragment),Va=d(),ot=o("p"),$a=r(`The bare VisualBert Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),as=o("a"),za=r("PreTrainedModel"),xa=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fa=d(),nt=o("p"),qa=r("This model is also a PyTorch "),at=o("a"),Ea=r("torch.nn.Module"),Pa=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ma=d(),rt=o("p"),Aa=r("The model can behave as an encoder (with only self-attention) following the architecture described in "),Ws=o("code"),ja=r("Attention is all you need <https://arxiv.org/abs/1706.03762>"),Ca=r(`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Ra=d(),j=o("div"),_(it.$$.fragment),Na=d(),de=o("p"),Ia=r("The "),rs=o("a"),Da=r("VisualBertModel"),La=r(" forward method, overrides the "),Os=o("code"),Sa=r("__call__"),Wa=r(" special method."),Oa=d(),_(qe.$$.fragment),Qa=d(),Qs=o("p"),Ha=r("Example:"),Ua=d(),_(lt.$$.fragment),Lo=d(),ce=o("h2"),Ee=o("a"),Hs=o("span"),_(dt.$$.fragment),Ka=d(),Us=o("span"),Ja=r("VisualBertForPreTraining"),So=d(),q=o("div"),_(ct.$$.fragment),Ga=d(),ue=o("p"),Ya=r("VisualBert Model with two heads on top as done during the pretraining: a "),Ks=o("code"),Xa=r("masked language modeling"),Za=r(` head and a `),Js=o("code"),er=r("sentence-image prediction (classification)"),tr=r(" head."),sr=d(),ut=o("p"),or=r("This model inherits from "),is=o("a"),nr=r("PreTrainedModel"),ar=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rr=d(),ht=o("p"),ir=r("This model is also a PyTorch "),pt=o("a"),lr=r("torch.nn.Module"),dr=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cr=d(),C=o("div"),_(mt.$$.fragment),ur=d(),he=o("p"),hr=r("The "),ls=o("a"),pr=r("VisualBertForPreTraining"),mr=r(" forward method, overrides the "),Gs=o("code"),fr=r("__call__"),gr=r(" special method."),_r=d(),_(Pe.$$.fragment),vr=d(),Ys=o("p"),br=r("Example:"),kr=d(),_(ft.$$.fragment),Wo=d(),pe=o("h2"),Me=o("a"),Xs=o("span"),_(gt.$$.fragment),Tr=d(),Zs=o("span"),yr=r("VisualBertForQuestionAnswering"),Oo=d(),E=o("div"),_(_t.$$.fragment),wr=d(),eo=o("p"),Br=r(`VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled output) for VQA.`),Vr=d(),vt=o("p"),$r=r("This model inherits from "),ds=o("a"),zr=r("PreTrainedModel"),xr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fr=d(),bt=o("p"),qr=r("This model is also a PyTorch "),kt=o("a"),Er=r("torch.nn.Module"),Pr=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Mr=d(),R=o("div"),_(Tt.$$.fragment),Ar=d(),me=o("p"),jr=r("The "),cs=o("a"),Cr=r("VisualBertForQuestionAnswering"),Rr=r(" forward method, overrides the "),to=o("code"),Nr=r("__call__"),Ir=r(" special method."),Dr=d(),_(Ae.$$.fragment),Lr=d(),so=o("p"),Sr=r("Example:"),Wr=d(),_(yt.$$.fragment),Qo=d(),fe=o("h2"),je=o("a"),oo=o("span"),_(wt.$$.fragment),Or=d(),no=o("span"),Qr=r("VisualBertForMultipleChoice"),Ho=d(),P=o("div"),_(Bt.$$.fragment),Hr=d(),ao=o("p"),Ur=r(`VisualBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for VCR tasks.`),Kr=d(),Vt=o("p"),Jr=r("This model inherits from "),us=o("a"),Gr=r("PreTrainedModel"),Yr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xr=d(),$t=o("p"),Zr=r("This model is also a PyTorch "),zt=o("a"),ei=r("torch.nn.Module"),ti=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),si=d(),N=o("div"),_(xt.$$.fragment),oi=d(),ge=o("p"),ni=r("The "),hs=o("a"),ai=r("VisualBertForMultipleChoice"),ri=r(" forward method, overrides the "),ro=o("code"),ii=r("__call__"),li=r(" special method."),di=d(),_(Ce.$$.fragment),ci=d(),io=o("p"),ui=r("Example:"),hi=d(),_(Ft.$$.fragment),Uo=d(),_e=o("h2"),Re=o("a"),lo=o("span"),_(qt.$$.fragment),pi=d(),co=o("span"),mi=r("VisualBertForVisualReasoning"),Ko=d(),M=o("div"),_(Et.$$.fragment),fi=d(),uo=o("p"),gi=r(`VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled output) for Visual Reasoning e.g. for NLVR task.`),_i=d(),Pt=o("p"),vi=r("This model inherits from "),ps=o("a"),bi=r("PreTrainedModel"),ki=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ti=d(),Mt=o("p"),yi=r("This model is also a PyTorch "),At=o("a"),wi=r("torch.nn.Module"),Bi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vi=d(),I=o("div"),_(jt.$$.fragment),$i=d(),ve=o("p"),zi=r("The "),ms=o("a"),xi=r("VisualBertForVisualReasoning"),Fi=r(" forward method, overrides the "),ho=o("code"),qi=r("__call__"),Ei=r(" special method."),Pi=d(),_(Ne.$$.fragment),Mi=d(),po=o("p"),Ai=r("Example:"),ji=d(),_(Ct.$$.fragment),Jo=d(),be=o("h2"),Ie=o("a"),mo=o("span"),_(Rt.$$.fragment),Ci=d(),fo=o("span"),Ri=r("VisualBertForRegionToPhraseAlignment"),Go=d(),A=o("div"),_(Nt.$$.fragment),Ni=d(),go=o("p"),Ii=r(`VisualBert Model with a Masked Language Modeling head and an attention layer on top for Region-to-Phrase Alignment e.g. for Flickr30 Entities task.`),Di=d(),It=o("p"),Li=r("This model inherits from "),fs=o("a"),Si=r("PreTrainedModel"),Wi=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Oi=d(),Dt=o("p"),Qi=r("This model is also a PyTorch "),Lt=o("a"),Hi=r("torch.nn.Module"),Ui=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ki=d(),D=o("div"),_(St.$$.fragment),Ji=d(),ke=o("p"),Gi=r("The "),gs=o("a"),Yi=r("VisualBertForRegionToPhraseAlignment"),Xi=r(" forward method, overrides the "),_o=o("code"),Zi=r("__call__"),el=r(" special method."),tl=d(),_(De.$$.fragment),sl=d(),vo=o("p"),ol=r("Example:"),nl=d(),_(Wt.$$.fragment),this.h()},l(t){const u=Wd('[data-svelte="svelte-1phssyn"]',document.head);h=n(u,"META",{name:!0,content:!0}),u.forEach(s),B=c(t),m=n(t,"H1",{class:!0});var Ot=a(m);w=n(Ot,"A",{id:!0,class:!0,href:!0});var bo=a(w);V=n(bo,"SPAN",{});var ko=a(V);v(g.$$.fragment,ko),ko.forEach(s),bo.forEach(s),f=c(Ot),$=n(Ot,"SPAN",{});var To=a($);$n=i(To,"VisualBERT"),To.forEach(s),Ot.forEach(s),wo=c(t),oe=n(t,"H2",{class:!0});var Qt=a(oe);Te=n(Qt,"A",{id:!0,class:!0,href:!0});var yo=a(Te);xs=n(yo,"SPAN",{});var il=a(xs);v(Se.$$.fragment,il),il.forEach(s),yo.forEach(s),zn=c(Qt),Fs=n(Qt,"SPAN",{});var ll=a(Fs);xn=i(ll,"Overview"),ll.forEach(s),Qt.forEach(s),Bo=c(t),ye=n(t,"P",{});var Xo=a(ye);Fn=i(Xo,"The VisualBERT model was proposed in "),We=n(Xo,"A",{href:!0,rel:!0});var dl=a(We);qn=i(dl,"VisualBERT: A Simple and Performant Baseline for Vision and Language"),dl.forEach(s),En=i(Xo,` by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. VisualBERT is a neural network trained on a variety of (image, text) pairs.`),Xo.forEach(s),Vo=c(t),Ht=n(t,"P",{});var cl=a(Ht);Pn=i(cl,"The abstract from the paper is the following:"),cl.forEach(s),$o=c(t),Ut=n(t,"P",{});var ul=a(Ut);qs=n(ul,"EM",{});var hl=a(qs);Mn=i(hl,`We propose VisualBERT, a simple and flexible framework for modeling a broad range of vision-and-language tasks. VisualBERT consists of a stack of Transformer layers that implicitly align elements of an input text and regions in an associated input image with self-attention. We further propose two visually-grounded language model objectives for pre-training VisualBERT on image caption data. Experiments on four vision-and-language tasks including VQA, VCR, NLVR2, and Flickr30K show that VisualBERT outperforms or rivals with state-of-the-art models while being significantly simpler. Further analysis demonstrates that VisualBERT can ground elements of language to image regions without any explicit supervision and is even sensitive to syntactic relationships, tracking, for example, associations between verbs and image regions corresponding to their arguments.`),hl.forEach(s),ul.forEach(s),zo=c(t),Kt=n(t,"P",{});var pl=a(Kt);An=i(pl,"Tips:"),pl.forEach(s),xo=c(t),we=n(t,"OL",{});var Zo=a(we);Es=n(Zo,"LI",{});var ml=a(Es);Oe=n(ml,"P",{});var en=a(Oe);jn=i(en,"Most of the checkpoints provided work with the "),Jt=n(en,"A",{href:!0});var fl=a(Jt);Cn=i(fl,"VisualBertForPreTraining"),fl.forEach(s),Rn=i(en,` configuration. Other checkpoints provided are the fine-tuned checkpoints for down-stream tasks - VQA (\u2018visualbert-vqa\u2019), VCR (\u2018visualbert-vcr\u2019), NLVR2 (\u2018visualbert-nlvr2\u2019). Hence, if you are not working on these downstream tasks, it is recommended that you use the pretrained checkpoints.`),en.forEach(s),ml.forEach(s),Nn=c(Zo),Ps=n(Zo,"LI",{});var gl=a(Ps);Ms=n(gl,"P",{});var _l=a(Ms);In=i(_l,`For the VCR task, the authors use a fine-tuned detector for generating visual embeddings, for all the checkpoints. We do not provide the detector and its weights as a part of the package, but it will be available in the research projects, and the states can be loaded directly into the detector provided.`),_l.forEach(s),gl.forEach(s),Zo.forEach(s),Fo=c(t),ne=n(t,"H2",{class:!0});var tn=a(ne);Be=n(tn,"A",{id:!0,class:!0,href:!0});var vl=a(Be);As=n(vl,"SPAN",{});var bl=a(As);v(Qe.$$.fragment,bl),bl.forEach(s),vl.forEach(s),Dn=c(tn),js=n(tn,"SPAN",{});var kl=a(js);Ln=i(kl,"Usage"),kl.forEach(s),tn.forEach(s),qo=c(t),Gt=n(t,"P",{});var Tl=a(Gt);Sn=i(Tl,`VisualBERT is a multi-modal vision and language model. It can be used for visual question answering, multiple choice, visual reasoning and region-to-phrase correspondence tasks. VisualBERT uses a BERT-like transformer to prepare embeddings for image-text pairs. Both the text and visual features are then projected to a latent space with identical dimension.`),Tl.forEach(s),Eo=c(t),Yt=n(t,"P",{});var yl=a(Yt);Wn=i(yl,`To feed images to the model, each image is passed through a pre-trained object detector and the regions and the bounding boxes are extracted. The authors use the features generated after passing these regions through a pre-trained CNN like ResNet as visual embeddings. They also add absolute position embeddings, and feed the resulting sequence of vectors to a standard BERT model. The text input is concatenated in the front of the visual embeddings in the embedding layer, and is expected to be bound by [CLS] and a [SEP] tokens, as in BERT. The segment IDs must also be set appropriately for the textual and visual parts.`),yl.forEach(s),Po=c(t),Ve=n(t,"P",{});var sn=a(Ve);On=i(sn,"The "),Xt=n(sn,"A",{href:!0});var wl=a(Xt);Qn=i(wl,"BertTokenizer"),wl.forEach(s),Hn=i(sn,` is used to encode the text. A custom detector/feature extractor must be used to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:`),sn.forEach(s),Mo=c(t),$e=n(t,"UL",{});var on=a($e);Cs=n(on,"LI",{});var Bl=a(Cs);Zt=n(Bl,"P",{});var al=a(Zt);He=n(al,"A",{href:!0,rel:!0});var Vl=a(He);Un=i(Vl,"VisualBERT VQA demo notebook"),Vl.forEach(s),Kn=i(al,` : This notebook contains an example on VisualBERT VQA.`),al.forEach(s),Bl.forEach(s),Jn=c(on),Rs=n(on,"LI",{});var $l=a(Rs);es=n($l,"P",{});var rl=a(es);Ue=n(rl,"A",{href:!0,rel:!0});var zl=a(Ue);Gn=i(zl,"Generate Embeddings for VisualBERT (Colab Notebook)"),zl.forEach(s),Yn=i(rl,` : This notebook contains an example on how to generate visual embeddings.`),rl.forEach(s),$l.forEach(s),on.forEach(s),Ao=c(t),ze=n(t,"P",{});var nn=a(ze);Xn=i(nn,"The following example shows how to get the last hidden state using "),ts=n(nn,"A",{href:!0});var xl=a(ts);Zn=i(xl,"VisualBertModel"),xl.forEach(s),ea=i(nn,":"),nn.forEach(s),jo=c(t),v(Ke.$$.fragment,t),Co=c(t),S=n(t,"P",{});var _s=a(S);ta=i(_s,"This model was contributed by "),Je=n(_s,"A",{href:!0,rel:!0});var Fl=a(Je);sa=i(Fl,"gchhablani"),Fl.forEach(s),oa=i(_s,". The original code can be found "),Ge=n(_s,"A",{href:!0,rel:!0});var ql=a(Ge);na=i(ql,"here"),ql.forEach(s),aa=i(_s,"."),_s.forEach(s),Ro=c(t),ae=n(t,"H2",{class:!0});var an=a(ae);xe=n(an,"A",{id:!0,class:!0,href:!0});var El=a(xe);Ns=n(El,"SPAN",{});var Pl=a(Ns);v(Ye.$$.fragment,Pl),Pl.forEach(s),El.forEach(s),ra=c(an),Is=n(an,"SPAN",{});var Ml=a(Is);ia=i(Ml,"VisualBertConfig"),Ml.forEach(s),an.forEach(s),No=c(t),x=n(t,"DIV",{class:!0});var W=a(x);v(Xe.$$.fragment,W),la=c(W),re=n(W,"P",{});var vs=a(re);da=i(vs,"This is the configuration class to store the configuration of a "),ss=n(vs,"A",{href:!0});var Al=a(ss);ca=i(Al,"VisualBertModel"),Al.forEach(s),ua=i(vs,`. It is used to instantiate an VisualBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VisualBERT `),Ze=n(vs,"A",{href:!0,rel:!0});var jl=a(Ze);ha=i(jl,"visualbert-vqa-coco-pre"),jl.forEach(s),pa=i(vs," architecture."),vs.forEach(s),ma=c(W),ie=n(W,"P",{});var bs=a(ie);fa=i(bs,"Configuration objects inherit from "),os=n(bs,"A",{href:!0});var Cl=a(os);ga=i(Cl,"PretrainedConfig"),Cl.forEach(s),_a=i(bs,` and can be used to control the model outputs. Read the documentation from `),ns=n(bs,"A",{href:!0});var Rl=a(ns);va=i(Rl,"PretrainedConfig"),Rl.forEach(s),ba=i(bs," for more information."),bs.forEach(s),ka=c(W),Ds=n(W,"P",{});var Nl=a(Ds);Ta=i(Nl,"Example:"),Nl.forEach(s),ya=c(W),v(et.$$.fragment,W),W.forEach(s),Io=c(t),le=n(t,"H2",{class:!0});var rn=a(le);Fe=n(rn,"A",{id:!0,class:!0,href:!0});var Il=a(Fe);Ls=n(Il,"SPAN",{});var Dl=a(Ls);v(tt.$$.fragment,Dl),Dl.forEach(s),Il.forEach(s),wa=c(rn),Ss=n(rn,"SPAN",{});var Ll=a(Ss);Ba=i(Ll,"VisualBertModel"),Ll.forEach(s),rn.forEach(s),Do=c(t),F=n(t,"DIV",{class:!0});var O=a(F);v(st.$$.fragment,O),Va=c(O),ot=n(O,"P",{});var ln=a(ot);$a=i(ln,`The bare VisualBert Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),as=n(ln,"A",{href:!0});var Sl=a(as);za=i(Sl,"PreTrainedModel"),Sl.forEach(s),xa=i(ln,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ln.forEach(s),Fa=c(O),nt=n(O,"P",{});var dn=a(nt);qa=i(dn,"This model is also a PyTorch "),at=n(dn,"A",{href:!0,rel:!0});var Wl=a(at);Ea=i(Wl,"torch.nn.Module"),Wl.forEach(s),Pa=i(dn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dn.forEach(s),Ma=c(O),rt=n(O,"P",{});var cn=a(rt);Aa=i(cn,"The model can behave as an encoder (with only self-attention) following the architecture described in "),Ws=n(cn,"CODE",{});var Ol=a(Ws);ja=i(Ol,"Attention is all you need <https://arxiv.org/abs/1706.03762>"),Ol.forEach(s),Ca=i(cn,`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),cn.forEach(s),Ra=c(O),j=n(O,"DIV",{class:!0});var Q=a(j);v(it.$$.fragment,Q),Na=c(Q),de=n(Q,"P",{});var ks=a(de);Ia=i(ks,"The "),rs=n(ks,"A",{href:!0});var Ql=a(rs);Da=i(Ql,"VisualBertModel"),Ql.forEach(s),La=i(ks," forward method, overrides the "),Os=n(ks,"CODE",{});var Hl=a(Os);Sa=i(Hl,"__call__"),Hl.forEach(s),Wa=i(ks," special method."),ks.forEach(s),Oa=c(Q),v(qe.$$.fragment,Q),Qa=c(Q),Qs=n(Q,"P",{});var Ul=a(Qs);Ha=i(Ul,"Example:"),Ul.forEach(s),Ua=c(Q),v(lt.$$.fragment,Q),Q.forEach(s),O.forEach(s),Lo=c(t),ce=n(t,"H2",{class:!0});var un=a(ce);Ee=n(un,"A",{id:!0,class:!0,href:!0});var Kl=a(Ee);Hs=n(Kl,"SPAN",{});var Jl=a(Hs);v(dt.$$.fragment,Jl),Jl.forEach(s),Kl.forEach(s),Ka=c(un),Us=n(un,"SPAN",{});var Gl=a(Us);Ja=i(Gl,"VisualBertForPreTraining"),Gl.forEach(s),un.forEach(s),So=c(t),q=n(t,"DIV",{class:!0});var H=a(q);v(ct.$$.fragment,H),Ga=c(H),ue=n(H,"P",{});var Ts=a(ue);Ya=i(Ts,"VisualBert Model with two heads on top as done during the pretraining: a "),Ks=n(Ts,"CODE",{});var Yl=a(Ks);Xa=i(Yl,"masked language modeling"),Yl.forEach(s),Za=i(Ts,` head and a `),Js=n(Ts,"CODE",{});var Xl=a(Js);er=i(Xl,"sentence-image prediction (classification)"),Xl.forEach(s),tr=i(Ts," head."),Ts.forEach(s),sr=c(H),ut=n(H,"P",{});var hn=a(ut);or=i(hn,"This model inherits from "),is=n(hn,"A",{href:!0});var Zl=a(is);nr=i(Zl,"PreTrainedModel"),Zl.forEach(s),ar=i(hn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hn.forEach(s),rr=c(H),ht=n(H,"P",{});var pn=a(ht);ir=i(pn,"This model is also a PyTorch "),pt=n(pn,"A",{href:!0,rel:!0});var ed=a(pt);lr=i(ed,"torch.nn.Module"),ed.forEach(s),dr=i(pn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pn.forEach(s),cr=c(H),C=n(H,"DIV",{class:!0});var U=a(C);v(mt.$$.fragment,U),ur=c(U),he=n(U,"P",{});var ys=a(he);hr=i(ys,"The "),ls=n(ys,"A",{href:!0});var td=a(ls);pr=i(td,"VisualBertForPreTraining"),td.forEach(s),mr=i(ys," forward method, overrides the "),Gs=n(ys,"CODE",{});var sd=a(Gs);fr=i(sd,"__call__"),sd.forEach(s),gr=i(ys," special method."),ys.forEach(s),_r=c(U),v(Pe.$$.fragment,U),vr=c(U),Ys=n(U,"P",{});var od=a(Ys);br=i(od,"Example:"),od.forEach(s),kr=c(U),v(ft.$$.fragment,U),U.forEach(s),H.forEach(s),Wo=c(t),pe=n(t,"H2",{class:!0});var mn=a(pe);Me=n(mn,"A",{id:!0,class:!0,href:!0});var nd=a(Me);Xs=n(nd,"SPAN",{});var ad=a(Xs);v(gt.$$.fragment,ad),ad.forEach(s),nd.forEach(s),Tr=c(mn),Zs=n(mn,"SPAN",{});var rd=a(Zs);yr=i(rd,"VisualBertForQuestionAnswering"),rd.forEach(s),mn.forEach(s),Oo=c(t),E=n(t,"DIV",{class:!0});var K=a(E);v(_t.$$.fragment,K),wr=c(K),eo=n(K,"P",{});var id=a(eo);Br=i(id,`VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled output) for VQA.`),id.forEach(s),Vr=c(K),vt=n(K,"P",{});var fn=a(vt);$r=i(fn,"This model inherits from "),ds=n(fn,"A",{href:!0});var ld=a(ds);zr=i(ld,"PreTrainedModel"),ld.forEach(s),xr=i(fn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fn.forEach(s),Fr=c(K),bt=n(K,"P",{});var gn=a(bt);qr=i(gn,"This model is also a PyTorch "),kt=n(gn,"A",{href:!0,rel:!0});var dd=a(kt);Er=i(dd,"torch.nn.Module"),dd.forEach(s),Pr=i(gn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gn.forEach(s),Mr=c(K),R=n(K,"DIV",{class:!0});var J=a(R);v(Tt.$$.fragment,J),Ar=c(J),me=n(J,"P",{});var ws=a(me);jr=i(ws,"The "),cs=n(ws,"A",{href:!0});var cd=a(cs);Cr=i(cd,"VisualBertForQuestionAnswering"),cd.forEach(s),Rr=i(ws," forward method, overrides the "),to=n(ws,"CODE",{});var ud=a(to);Nr=i(ud,"__call__"),ud.forEach(s),Ir=i(ws," special method."),ws.forEach(s),Dr=c(J),v(Ae.$$.fragment,J),Lr=c(J),so=n(J,"P",{});var hd=a(so);Sr=i(hd,"Example:"),hd.forEach(s),Wr=c(J),v(yt.$$.fragment,J),J.forEach(s),K.forEach(s),Qo=c(t),fe=n(t,"H2",{class:!0});var _n=a(fe);je=n(_n,"A",{id:!0,class:!0,href:!0});var pd=a(je);oo=n(pd,"SPAN",{});var md=a(oo);v(wt.$$.fragment,md),md.forEach(s),pd.forEach(s),Or=c(_n),no=n(_n,"SPAN",{});var fd=a(no);Qr=i(fd,"VisualBertForMultipleChoice"),fd.forEach(s),_n.forEach(s),Ho=c(t),P=n(t,"DIV",{class:!0});var G=a(P);v(Bt.$$.fragment,G),Hr=c(G),ao=n(G,"P",{});var gd=a(ao);Ur=i(gd,`VisualBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for VCR tasks.`),gd.forEach(s),Kr=c(G),Vt=n(G,"P",{});var vn=a(Vt);Jr=i(vn,"This model inherits from "),us=n(vn,"A",{href:!0});var _d=a(us);Gr=i(_d,"PreTrainedModel"),_d.forEach(s),Yr=i(vn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vn.forEach(s),Xr=c(G),$t=n(G,"P",{});var bn=a($t);Zr=i(bn,"This model is also a PyTorch "),zt=n(bn,"A",{href:!0,rel:!0});var vd=a(zt);ei=i(vd,"torch.nn.Module"),vd.forEach(s),ti=i(bn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bn.forEach(s),si=c(G),N=n(G,"DIV",{class:!0});var Y=a(N);v(xt.$$.fragment,Y),oi=c(Y),ge=n(Y,"P",{});var Bs=a(ge);ni=i(Bs,"The "),hs=n(Bs,"A",{href:!0});var bd=a(hs);ai=i(bd,"VisualBertForMultipleChoice"),bd.forEach(s),ri=i(Bs," forward method, overrides the "),ro=n(Bs,"CODE",{});var kd=a(ro);ii=i(kd,"__call__"),kd.forEach(s),li=i(Bs," special method."),Bs.forEach(s),di=c(Y),v(Ce.$$.fragment,Y),ci=c(Y),io=n(Y,"P",{});var Td=a(io);ui=i(Td,"Example:"),Td.forEach(s),hi=c(Y),v(Ft.$$.fragment,Y),Y.forEach(s),G.forEach(s),Uo=c(t),_e=n(t,"H2",{class:!0});var kn=a(_e);Re=n(kn,"A",{id:!0,class:!0,href:!0});var yd=a(Re);lo=n(yd,"SPAN",{});var wd=a(lo);v(qt.$$.fragment,wd),wd.forEach(s),yd.forEach(s),pi=c(kn),co=n(kn,"SPAN",{});var Bd=a(co);mi=i(Bd,"VisualBertForVisualReasoning"),Bd.forEach(s),kn.forEach(s),Ko=c(t),M=n(t,"DIV",{class:!0});var X=a(M);v(Et.$$.fragment,X),fi=c(X),uo=n(X,"P",{});var Vd=a(uo);gi=i(Vd,`VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled output) for Visual Reasoning e.g. for NLVR task.`),Vd.forEach(s),_i=c(X),Pt=n(X,"P",{});var Tn=a(Pt);vi=i(Tn,"This model inherits from "),ps=n(Tn,"A",{href:!0});var $d=a(ps);bi=i($d,"PreTrainedModel"),$d.forEach(s),ki=i(Tn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tn.forEach(s),Ti=c(X),Mt=n(X,"P",{});var yn=a(Mt);yi=i(yn,"This model is also a PyTorch "),At=n(yn,"A",{href:!0,rel:!0});var zd=a(At);wi=i(zd,"torch.nn.Module"),zd.forEach(s),Bi=i(yn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yn.forEach(s),Vi=c(X),I=n(X,"DIV",{class:!0});var Z=a(I);v(jt.$$.fragment,Z),$i=c(Z),ve=n(Z,"P",{});var Vs=a(ve);zi=i(Vs,"The "),ms=n(Vs,"A",{href:!0});var xd=a(ms);xi=i(xd,"VisualBertForVisualReasoning"),xd.forEach(s),Fi=i(Vs," forward method, overrides the "),ho=n(Vs,"CODE",{});var Fd=a(ho);qi=i(Fd,"__call__"),Fd.forEach(s),Ei=i(Vs," special method."),Vs.forEach(s),Pi=c(Z),v(Ne.$$.fragment,Z),Mi=c(Z),po=n(Z,"P",{});var qd=a(po);Ai=i(qd,"Example:"),qd.forEach(s),ji=c(Z),v(Ct.$$.fragment,Z),Z.forEach(s),X.forEach(s),Jo=c(t),be=n(t,"H2",{class:!0});var wn=a(be);Ie=n(wn,"A",{id:!0,class:!0,href:!0});var Ed=a(Ie);mo=n(Ed,"SPAN",{});var Pd=a(mo);v(Rt.$$.fragment,Pd),Pd.forEach(s),Ed.forEach(s),Ci=c(wn),fo=n(wn,"SPAN",{});var Md=a(fo);Ri=i(Md,"VisualBertForRegionToPhraseAlignment"),Md.forEach(s),wn.forEach(s),Go=c(t),A=n(t,"DIV",{class:!0});var ee=a(A);v(Nt.$$.fragment,ee),Ni=c(ee),go=n(ee,"P",{});var Ad=a(go);Ii=i(Ad,`VisualBert Model with a Masked Language Modeling head and an attention layer on top for Region-to-Phrase Alignment e.g. for Flickr30 Entities task.`),Ad.forEach(s),Di=c(ee),It=n(ee,"P",{});var Bn=a(It);Li=i(Bn,"This model inherits from "),fs=n(Bn,"A",{href:!0});var jd=a(fs);Si=i(jd,"PreTrainedModel"),jd.forEach(s),Wi=i(Bn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bn.forEach(s),Oi=c(ee),Dt=n(ee,"P",{});var Vn=a(Dt);Qi=i(Vn,"This model is also a PyTorch "),Lt=n(Vn,"A",{href:!0,rel:!0});var Cd=a(Lt);Hi=i(Cd,"torch.nn.Module"),Cd.forEach(s),Ui=i(Vn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vn.forEach(s),Ki=c(ee),D=n(ee,"DIV",{class:!0});var te=a(D);v(St.$$.fragment,te),Ji=c(te),ke=n(te,"P",{});var $s=a(ke);Gi=i($s,"The "),gs=n($s,"A",{href:!0});var Rd=a(gs);Yi=i(Rd,"VisualBertForRegionToPhraseAlignment"),Rd.forEach(s),Xi=i($s," forward method, overrides the "),_o=n($s,"CODE",{});var Nd=a(_o);Zi=i(Nd,"__call__"),Nd.forEach(s),el=i($s," special method."),$s.forEach(s),tl=c(te),v(De.$$.fragment,te),sl=c(te),vo=n(te,"P",{});var Id=a(vo);ol=i(Id,"Example:"),Id.forEach(s),nl=c(te),v(Wt.$$.fragment,te),te.forEach(s),ee.forEach(s),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(Yd)),l(w,"id","visualbert"),l(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(w,"href","#visualbert"),l(m,"class","relative group"),l(Te,"id","overview"),l(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Te,"href","#overview"),l(oe,"class","relative group"),l(We,"href","https://arxiv.org/pdf/1908.03557"),l(We,"rel","nofollow"),l(Jt,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForPreTraining"),l(Be,"id","usage"),l(Be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Be,"href","#usage"),l(ne,"class","relative group"),l(Xt,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),l(He,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects/visual_bert"),l(He,"rel","nofollow"),l(Ue,"href","https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing"),l(Ue,"rel","nofollow"),l(ts,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel"),l(Je,"href","https://huggingface.co/gchhablani"),l(Je,"rel","nofollow"),l(Ge,"href","https://github.com/uclanlp/visualbert"),l(Ge,"rel","nofollow"),l(xe,"id","transformers.VisualBertConfig"),l(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(xe,"href","#transformers.VisualBertConfig"),l(ae,"class","relative group"),l(ss,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel"),l(Ze,"href","https://huggingface.co/uclanlp/visualbert-vqa-coco-pre"),l(Ze,"rel","nofollow"),l(os,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(ns,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(x,"class","docstring"),l(Fe,"id","transformers.VisualBertModel"),l(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Fe,"href","#transformers.VisualBertModel"),l(le,"class","relative group"),l(as,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(at,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(at,"rel","nofollow"),l(rs,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertModel"),l(j,"class","docstring"),l(F,"class","docstring"),l(Ee,"id","transformers.VisualBertForPreTraining"),l(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ee,"href","#transformers.VisualBertForPreTraining"),l(ce,"class","relative group"),l(is,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(pt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(pt,"rel","nofollow"),l(ls,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForPreTraining"),l(C,"class","docstring"),l(q,"class","docstring"),l(Me,"id","transformers.VisualBertForQuestionAnswering"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.VisualBertForQuestionAnswering"),l(pe,"class","relative group"),l(ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(kt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(kt,"rel","nofollow"),l(cs,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForQuestionAnswering"),l(R,"class","docstring"),l(E,"class","docstring"),l(je,"id","transformers.VisualBertForMultipleChoice"),l(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(je,"href","#transformers.VisualBertForMultipleChoice"),l(fe,"class","relative group"),l(us,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(zt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(zt,"rel","nofollow"),l(hs,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForMultipleChoice"),l(N,"class","docstring"),l(P,"class","docstring"),l(Re,"id","transformers.VisualBertForVisualReasoning"),l(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Re,"href","#transformers.VisualBertForVisualReasoning"),l(_e,"class","relative group"),l(ps,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(At,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(At,"rel","nofollow"),l(ms,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForVisualReasoning"),l(I,"class","docstring"),l(M,"class","docstring"),l(Ie,"id","transformers.VisualBertForRegionToPhraseAlignment"),l(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ie,"href","#transformers.VisualBertForRegionToPhraseAlignment"),l(be,"class","relative group"),l(fs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Lt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Lt,"rel","nofollow"),l(gs,"href","/docs/transformers/v4.15.0/en/model_doc/visual_bert#transformers.VisualBertForRegionToPhraseAlignment"),l(D,"class","docstring"),l(A,"class","docstring")},m(t,u){e(document.head,h),p(t,B,u),p(t,m,u),e(m,w),e(w,V),b(g,V,null),e(m,f),e(m,$),e($,$n),p(t,wo,u),p(t,oe,u),e(oe,Te),e(Te,xs),b(Se,xs,null),e(oe,zn),e(oe,Fs),e(Fs,xn),p(t,Bo,u),p(t,ye,u),e(ye,Fn),e(ye,We),e(We,qn),e(ye,En),p(t,Vo,u),p(t,Ht,u),e(Ht,Pn),p(t,$o,u),p(t,Ut,u),e(Ut,qs),e(qs,Mn),p(t,zo,u),p(t,Kt,u),e(Kt,An),p(t,xo,u),p(t,we,u),e(we,Es),e(Es,Oe),e(Oe,jn),e(Oe,Jt),e(Jt,Cn),e(Oe,Rn),e(we,Nn),e(we,Ps),e(Ps,Ms),e(Ms,In),p(t,Fo,u),p(t,ne,u),e(ne,Be),e(Be,As),b(Qe,As,null),e(ne,Dn),e(ne,js),e(js,Ln),p(t,qo,u),p(t,Gt,u),e(Gt,Sn),p(t,Eo,u),p(t,Yt,u),e(Yt,Wn),p(t,Po,u),p(t,Ve,u),e(Ve,On),e(Ve,Xt),e(Xt,Qn),e(Ve,Hn),p(t,Mo,u),p(t,$e,u),e($e,Cs),e(Cs,Zt),e(Zt,He),e(He,Un),e(Zt,Kn),e($e,Jn),e($e,Rs),e(Rs,es),e(es,Ue),e(Ue,Gn),e(es,Yn),p(t,Ao,u),p(t,ze,u),e(ze,Xn),e(ze,ts),e(ts,Zn),e(ze,ea),p(t,jo,u),b(Ke,t,u),p(t,Co,u),p(t,S,u),e(S,ta),e(S,Je),e(Je,sa),e(S,oa),e(S,Ge),e(Ge,na),e(S,aa),p(t,Ro,u),p(t,ae,u),e(ae,xe),e(xe,Ns),b(Ye,Ns,null),e(ae,ra),e(ae,Is),e(Is,ia),p(t,No,u),p(t,x,u),b(Xe,x,null),e(x,la),e(x,re),e(re,da),e(re,ss),e(ss,ca),e(re,ua),e(re,Ze),e(Ze,ha),e(re,pa),e(x,ma),e(x,ie),e(ie,fa),e(ie,os),e(os,ga),e(ie,_a),e(ie,ns),e(ns,va),e(ie,ba),e(x,ka),e(x,Ds),e(Ds,Ta),e(x,ya),b(et,x,null),p(t,Io,u),p(t,le,u),e(le,Fe),e(Fe,Ls),b(tt,Ls,null),e(le,wa),e(le,Ss),e(Ss,Ba),p(t,Do,u),p(t,F,u),b(st,F,null),e(F,Va),e(F,ot),e(ot,$a),e(ot,as),e(as,za),e(ot,xa),e(F,Fa),e(F,nt),e(nt,qa),e(nt,at),e(at,Ea),e(nt,Pa),e(F,Ma),e(F,rt),e(rt,Aa),e(rt,Ws),e(Ws,ja),e(rt,Ca),e(F,Ra),e(F,j),b(it,j,null),e(j,Na),e(j,de),e(de,Ia),e(de,rs),e(rs,Da),e(de,La),e(de,Os),e(Os,Sa),e(de,Wa),e(j,Oa),b(qe,j,null),e(j,Qa),e(j,Qs),e(Qs,Ha),e(j,Ua),b(lt,j,null),p(t,Lo,u),p(t,ce,u),e(ce,Ee),e(Ee,Hs),b(dt,Hs,null),e(ce,Ka),e(ce,Us),e(Us,Ja),p(t,So,u),p(t,q,u),b(ct,q,null),e(q,Ga),e(q,ue),e(ue,Ya),e(ue,Ks),e(Ks,Xa),e(ue,Za),e(ue,Js),e(Js,er),e(ue,tr),e(q,sr),e(q,ut),e(ut,or),e(ut,is),e(is,nr),e(ut,ar),e(q,rr),e(q,ht),e(ht,ir),e(ht,pt),e(pt,lr),e(ht,dr),e(q,cr),e(q,C),b(mt,C,null),e(C,ur),e(C,he),e(he,hr),e(he,ls),e(ls,pr),e(he,mr),e(he,Gs),e(Gs,fr),e(he,gr),e(C,_r),b(Pe,C,null),e(C,vr),e(C,Ys),e(Ys,br),e(C,kr),b(ft,C,null),p(t,Wo,u),p(t,pe,u),e(pe,Me),e(Me,Xs),b(gt,Xs,null),e(pe,Tr),e(pe,Zs),e(Zs,yr),p(t,Oo,u),p(t,E,u),b(_t,E,null),e(E,wr),e(E,eo),e(eo,Br),e(E,Vr),e(E,vt),e(vt,$r),e(vt,ds),e(ds,zr),e(vt,xr),e(E,Fr),e(E,bt),e(bt,qr),e(bt,kt),e(kt,Er),e(bt,Pr),e(E,Mr),e(E,R),b(Tt,R,null),e(R,Ar),e(R,me),e(me,jr),e(me,cs),e(cs,Cr),e(me,Rr),e(me,to),e(to,Nr),e(me,Ir),e(R,Dr),b(Ae,R,null),e(R,Lr),e(R,so),e(so,Sr),e(R,Wr),b(yt,R,null),p(t,Qo,u),p(t,fe,u),e(fe,je),e(je,oo),b(wt,oo,null),e(fe,Or),e(fe,no),e(no,Qr),p(t,Ho,u),p(t,P,u),b(Bt,P,null),e(P,Hr),e(P,ao),e(ao,Ur),e(P,Kr),e(P,Vt),e(Vt,Jr),e(Vt,us),e(us,Gr),e(Vt,Yr),e(P,Xr),e(P,$t),e($t,Zr),e($t,zt),e(zt,ei),e($t,ti),e(P,si),e(P,N),b(xt,N,null),e(N,oi),e(N,ge),e(ge,ni),e(ge,hs),e(hs,ai),e(ge,ri),e(ge,ro),e(ro,ii),e(ge,li),e(N,di),b(Ce,N,null),e(N,ci),e(N,io),e(io,ui),e(N,hi),b(Ft,N,null),p(t,Uo,u),p(t,_e,u),e(_e,Re),e(Re,lo),b(qt,lo,null),e(_e,pi),e(_e,co),e(co,mi),p(t,Ko,u),p(t,M,u),b(Et,M,null),e(M,fi),e(M,uo),e(uo,gi),e(M,_i),e(M,Pt),e(Pt,vi),e(Pt,ps),e(ps,bi),e(Pt,ki),e(M,Ti),e(M,Mt),e(Mt,yi),e(Mt,At),e(At,wi),e(Mt,Bi),e(M,Vi),e(M,I),b(jt,I,null),e(I,$i),e(I,ve),e(ve,zi),e(ve,ms),e(ms,xi),e(ve,Fi),e(ve,ho),e(ho,qi),e(ve,Ei),e(I,Pi),b(Ne,I,null),e(I,Mi),e(I,po),e(po,Ai),e(I,ji),b(Ct,I,null),p(t,Jo,u),p(t,be,u),e(be,Ie),e(Ie,mo),b(Rt,mo,null),e(be,Ci),e(be,fo),e(fo,Ri),p(t,Go,u),p(t,A,u),b(Nt,A,null),e(A,Ni),e(A,go),e(go,Ii),e(A,Di),e(A,It),e(It,Li),e(It,fs),e(fs,Si),e(It,Wi),e(A,Oi),e(A,Dt),e(Dt,Qi),e(Dt,Lt),e(Lt,Hi),e(Dt,Ui),e(A,Ki),e(A,D),b(St,D,null),e(D,Ji),e(D,ke),e(ke,Gi),e(ke,gs),e(gs,Yi),e(ke,Xi),e(ke,_o),e(_o,Zi),e(ke,el),e(D,tl),b(De,D,null),e(D,sl),e(D,vo),e(vo,ol),e(D,nl),b(Wt,D,null),Yo=!0},p(t,[u]){const Ot={};u&2&&(Ot.$$scope={dirty:u,ctx:t}),qe.$set(Ot);const bo={};u&2&&(bo.$$scope={dirty:u,ctx:t}),Pe.$set(bo);const ko={};u&2&&(ko.$$scope={dirty:u,ctx:t}),Ae.$set(ko);const To={};u&2&&(To.$$scope={dirty:u,ctx:t}),Ce.$set(To);const Qt={};u&2&&(Qt.$$scope={dirty:u,ctx:t}),Ne.$set(Qt);const yo={};u&2&&(yo.$$scope={dirty:u,ctx:t}),De.$set(yo)},i(t){Yo||(k(g.$$.fragment,t),k(Se.$$.fragment,t),k(Qe.$$.fragment,t),k(Ke.$$.fragment,t),k(Ye.$$.fragment,t),k(Xe.$$.fragment,t),k(et.$$.fragment,t),k(tt.$$.fragment,t),k(st.$$.fragment,t),k(it.$$.fragment,t),k(qe.$$.fragment,t),k(lt.$$.fragment,t),k(dt.$$.fragment,t),k(ct.$$.fragment,t),k(mt.$$.fragment,t),k(Pe.$$.fragment,t),k(ft.$$.fragment,t),k(gt.$$.fragment,t),k(_t.$$.fragment,t),k(Tt.$$.fragment,t),k(Ae.$$.fragment,t),k(yt.$$.fragment,t),k(wt.$$.fragment,t),k(Bt.$$.fragment,t),k(xt.$$.fragment,t),k(Ce.$$.fragment,t),k(Ft.$$.fragment,t),k(qt.$$.fragment,t),k(Et.$$.fragment,t),k(jt.$$.fragment,t),k(Ne.$$.fragment,t),k(Ct.$$.fragment,t),k(Rt.$$.fragment,t),k(Nt.$$.fragment,t),k(St.$$.fragment,t),k(De.$$.fragment,t),k(Wt.$$.fragment,t),Yo=!0)},o(t){T(g.$$.fragment,t),T(Se.$$.fragment,t),T(Qe.$$.fragment,t),T(Ke.$$.fragment,t),T(Ye.$$.fragment,t),T(Xe.$$.fragment,t),T(et.$$.fragment,t),T(tt.$$.fragment,t),T(st.$$.fragment,t),T(it.$$.fragment,t),T(qe.$$.fragment,t),T(lt.$$.fragment,t),T(dt.$$.fragment,t),T(ct.$$.fragment,t),T(mt.$$.fragment,t),T(Pe.$$.fragment,t),T(ft.$$.fragment,t),T(gt.$$.fragment,t),T(_t.$$.fragment,t),T(Tt.$$.fragment,t),T(Ae.$$.fragment,t),T(yt.$$.fragment,t),T(wt.$$.fragment,t),T(Bt.$$.fragment,t),T(xt.$$.fragment,t),T(Ce.$$.fragment,t),T(Ft.$$.fragment,t),T(qt.$$.fragment,t),T(Et.$$.fragment,t),T(jt.$$.fragment,t),T(Ne.$$.fragment,t),T(Ct.$$.fragment,t),T(Rt.$$.fragment,t),T(Nt.$$.fragment,t),T(St.$$.fragment,t),T(De.$$.fragment,t),T(Wt.$$.fragment,t),Yo=!1},d(t){s(h),t&&s(B),t&&s(m),y(g),t&&s(wo),t&&s(oe),y(Se),t&&s(Bo),t&&s(ye),t&&s(Vo),t&&s(Ht),t&&s($o),t&&s(Ut),t&&s(zo),t&&s(Kt),t&&s(xo),t&&s(we),t&&s(Fo),t&&s(ne),y(Qe),t&&s(qo),t&&s(Gt),t&&s(Eo),t&&s(Yt),t&&s(Po),t&&s(Ve),t&&s(Mo),t&&s($e),t&&s(Ao),t&&s(ze),t&&s(jo),y(Ke,t),t&&s(Co),t&&s(S),t&&s(Ro),t&&s(ae),y(Ye),t&&s(No),t&&s(x),y(Xe),y(et),t&&s(Io),t&&s(le),y(tt),t&&s(Do),t&&s(F),y(st),y(it),y(qe),y(lt),t&&s(Lo),t&&s(ce),y(dt),t&&s(So),t&&s(q),y(ct),y(mt),y(Pe),y(ft),t&&s(Wo),t&&s(pe),y(gt),t&&s(Oo),t&&s(E),y(_t),y(Tt),y(Ae),y(yt),t&&s(Qo),t&&s(fe),y(wt),t&&s(Ho),t&&s(P),y(Bt),y(xt),y(Ce),y(Ft),t&&s(Uo),t&&s(_e),y(qt),t&&s(Ko),t&&s(M),y(Et),y(jt),y(Ne),y(Ct),t&&s(Jo),t&&s(be),y(Rt),t&&s(Go),t&&s(A),y(Nt),y(St),y(De),y(Wt)}}}const Yd={local:"visualbert",sections:[{local:"overview",title:"Overview"},{local:"usage",title:"Usage"},{local:"transformers.VisualBertConfig",title:"VisualBertConfig"},{local:"transformers.VisualBertModel",title:"VisualBertModel"},{local:"transformers.VisualBertForPreTraining",title:"VisualBertForPreTraining"},{local:"transformers.VisualBertForQuestionAnswering",title:"VisualBertForQuestionAnswering"},{local:"transformers.VisualBertForMultipleChoice",title:"VisualBertForMultipleChoice"},{local:"transformers.VisualBertForVisualReasoning",title:"VisualBertForVisualReasoning"},{local:"transformers.VisualBertForRegionToPhraseAlignment",title:"VisualBertForRegionToPhraseAlignment"}],title:"VisualBERT"};function Xd(z,h,B){let{fw:m}=h;return z.$$set=w=>{"fw"in w&&B(0,m=w.fw)},[m]}class ac extends Dd{constructor(h){super();Ld(this,h,Xd,Gd,Sd,{fw:0})}}export{ac as default,Yd as metadata};
9,929
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/ibert.mdx-3f6dfaa4.js
import{S as bl,i as vl,s as kl,e as n,k as d,w as b,t as a,L as wl,c as s,d as o,m as c,a as r,x as v,h as i,b as l,J as e,g as m,y as k,q as w,o as T,B as y}from"../../chunks/vendor-b1433968.js";import{T as io}from"../../chunks/Tip-c3840994.js";import{D}from"../../chunks/Docstring-ff504c58.js";import{C as xt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as _e}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Tl(q){let h,I,u,_,$;return{c(){h=n("p"),I=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),_=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var f=r(h);I=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var B=r(u);_=i(B,"Module"),B.forEach(o),$=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,I),e(h,u),e(u,_),e(h,$)},d(g){g&&o(h)}}}function yl(q){let h,I,u,_,$;return{c(){h=n("p"),I=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),_=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var f=r(h);I=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var B=r(u);_=i(B,"Module"),B.forEach(o),$=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,I),e(h,u),e(u,_),e(h,$)},d(g){g&&o(h)}}}function Il(q){let h,I,u,_,$;return{c(){h=n("p"),I=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),_=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var f=r(h);I=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var B=r(u);_=i(B,"Module"),B.forEach(o),$=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,I),e(h,u),e(u,_),e(h,$)},d(g){g&&o(h)}}}function $l(q){let h,I,u,_,$;return{c(){h=n("p"),I=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),_=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var f=r(h);I=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var B=r(u);_=i(B,"Module"),B.forEach(o),$=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,I),e(h,u),e(u,_),e(h,$)},d(g){g&&o(h)}}}function Bl(q){let h,I,u,_,$;return{c(){h=n("p"),I=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),_=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var f=r(h);I=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var B=r(u);_=i(B,"Module"),B.forEach(o),$=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,I),e(h,u),e(u,_),e(h,$)},d(g){g&&o(h)}}}function zl(q){let h,I,u,_,$;return{c(){h=n("p"),I=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),_=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var f=r(h);I=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var B=r(u);_=i(B,"Module"),B.forEach(o),$=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){m(g,h,f),e(h,I),e(h,u),e(u,_),e(h,$)},d(g){g&&o(h)}}}function Ml(q){let h,I,u,_,$,g,f,B,Dn,Yo,te,be,lo,xe,Wn,co,On,Zo,ve,Qn,Pe,Hn,Un,Xo,Pt,Gn,en,jt,ho,Vn,tn,Q,Jn,je,Kn,Yn,Ae,Zn,Xn,on,oe,ke,po,Se,es,uo,ts,nn,O,Le,os,Re,ns,At,ss,rs,as,ne,is,St,ls,ds,Lt,cs,hs,sn,se,we,mo,Ne,ps,fo,us,rn,M,De,ms,go,fs,gs,We,_s,Rt,bs,vs,ks,Oe,ws,Qe,Ts,ys,Is,He,$s,_o,Bs,zs,Ms,j,Ue,qs,re,Fs,Nt,Es,Cs,bo,xs,Ps,js,Te,As,vo,Ss,Ls,Ge,an,ae,ye,ko,Ve,Rs,wo,Ns,ln,F,Je,Ds,Ke,Ws,To,Os,Qs,Hs,Ye,Us,Dt,Gs,Vs,Js,Ze,Ks,Xe,Ys,Zs,Xs,A,et,er,ie,tr,Wt,or,nr,yo,sr,rr,ar,Ie,ir,Io,lr,dr,tt,dn,le,$e,$o,ot,cr,Bo,hr,cn,E,nt,pr,zo,ur,mr,st,fr,Ot,gr,_r,br,rt,vr,at,kr,wr,Tr,z,it,yr,de,Ir,Qt,$r,Br,Mo,zr,Mr,qr,Be,Fr,qo,Er,Cr,lt,xr,Fo,Pr,jr,dt,hn,ce,ze,Eo,ct,Ar,Co,Sr,pn,C,ht,Lr,xo,Rr,Nr,pt,Dr,Ht,Wr,Or,Qr,ut,Hr,mt,Ur,Gr,Vr,S,ft,Jr,he,Kr,Ut,Yr,Zr,Po,Xr,ea,ta,Me,oa,jo,na,sa,gt,un,pe,qe,Ao,_t,ra,So,aa,mn,x,bt,ia,Lo,la,da,vt,ca,Gt,ha,pa,ua,kt,ma,wt,fa,ga,_a,L,Tt,ba,ue,va,Vt,ka,wa,Ro,Ta,ya,Ia,Fe,$a,No,Ba,za,yt,fn,me,Ee,Do,It,Ma,Wo,qa,gn,P,$t,Fa,fe,Ea,Oo,Ca,xa,Qo,Pa,ja,Aa,Bt,Sa,Jt,La,Ra,Na,zt,Da,Mt,Wa,Oa,Qa,R,qt,Ha,ge,Ua,Kt,Ga,Va,Ho,Ja,Ka,Ya,Ce,Za,Uo,Xa,ei,Ft,_n;return g=new _e({}),xe=new _e({}),Se=new _e({}),Le=new D({props:{name:"class transformers.IBertConfig",anchor:"transformers.IBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"quant_mode",val:" = False"},{name:"force_dequant",val:" = 'none'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/configuration_ibert.py#L32",parametersDescription:[{anchor:"transformers.IBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertModel">IBertModel</a>`,name:"vocab_size"},{anchor:"transformers.IBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.IBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.IBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.IBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.IBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.IBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.IBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.IBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.IBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertModel">IBertModel</a>`,name:"type_vocab_size"},{anchor:"transformers.IBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.IBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.IBertConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.IBertConfig.quant_mode",description:`<strong>quant_mode</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to quantize the model or not.`,name:"quant_mode"},{anchor:"transformers.IBertConfig.force_dequant",description:`<strong>force_dequant</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; Force dequantize specific nonlinear layer. Dequatized layers are then executed with full precision. <code>&quot;none&quot;</code>, <code>&quot;gelu&quot;</code>, <code>&quot;softmax&quot;</code>, <code>&quot;layernorm&quot;</code> and <code>&quot;nonlinear&quot;</code> are supported. As deafult, it is set as <code>&quot;none&quot;</code>, which does not dequantize any layers. Please specify <code>&quot;gelu&quot;</code>, <code>&quot;softmax&quot;</code>, or <code>&quot;layernorm&quot;</code> to dequantize GELU, Softmax, or LayerNorm, respectively. <code>&quot;nonlinear&quot;</code> will dequantize all nonlinear layers, i.e., GELU, Softmax, and LayerNorm.`,name:"force_dequant"}]}}),Ne=new _e({}),De=new D({props:{name:"class transformers.IBertModel",anchor:"transformers.IBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L733",parametersDescription:[{anchor:"transformers.IBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ue=new D({props:{name:"forward",anchor:"transformers.IBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L772",parametersDescription:[{anchor:"transformers.IBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Te=new io({props:{$$slots:{default:[Tl]},$$scope:{ctx:q}}}),Ge=new xt({props:{code:`from transformers import RobertaTokenizer, IBertModel import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertModel.from_pretrained('kssteven/ibert-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertModel.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ve=new _e({}),Je=new D({props:{name:"class transformers.IBertForMaskedLM",anchor:"transformers.IBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L857",parametersDescription:[{anchor:"transformers.IBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),et=new D({props:{name:"forward",anchor:"transformers.IBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L876",parametersDescription:[{anchor:"transformers.IBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.IBertForMaskedLM.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ie=new io({props:{$$slots:{default:[yl]},$$scope:{ctx:q}}}),tt=new xt({props:{code:`from transformers import RobertaTokenizer, IBertForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertForMaskedLM.from_pretrained('kssteven/ibert-roberta-base') inputs = tokenizer("The capital of France is <mask>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ot=new _e({}),nt=new D({props:{name:"class transformers.IBertForSequenceClassification",anchor:"transformers.IBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L971",parametersDescription:[{anchor:"transformers.IBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),it=new D({props:{name:"forward",anchor:"transformers.IBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L984",parametersDescription:[{anchor:"transformers.IBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Be=new io({props:{$$slots:{default:[Il]},$$scope:{ctx:q}}}),lt=new xt({props:{code:`from transformers import RobertaTokenizer, IBertForSequenceClassification import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertForSequenceClassification.from_pretrained('kssteven/ibert-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),dt=new xt({props:{code:`from transformers import RobertaTokenizer, IBertForSequenceClassification import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertForSequenceClassification.from_pretrained('kssteven/ibert-roberta-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ct=new _e({}),ht=new D({props:{name:"class transformers.IBertForMultipleChoice",anchor:"transformers.IBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L1066",parametersDescription:[{anchor:"transformers.IBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ft=new D({props:{name:"forward",anchor:"transformers.IBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L1079",parametersDescription:[{anchor:"transformers.IBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Me=new io({props:{$$slots:{default:[$l]},$$scope:{ctx:q}}}),gt=new xt({props:{code:`from transformers import RobertaTokenizer, IBertForMultipleChoice import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertForMultipleChoice.from_pretrained('kssteven/ibert-roberta-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),_t=new _e({}),bt=new D({props:{name:"class transformers.IBertForTokenClassification",anchor:"transformers.IBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L1158",parametersDescription:[{anchor:"transformers.IBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tt=new D({props:{name:"forward",anchor:"transformers.IBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L1173",parametersDescription:[{anchor:"transformers.IBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fe=new io({props:{$$slots:{default:[Bl]},$$scope:{ctx:q}}}),yt=new xt({props:{code:`from transformers import RobertaTokenizer, IBertForTokenClassification import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertForTokenClassification.from_pretrained('kssteven/ibert-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),It=new _e({}),$t=new D({props:{name:"class transformers.IBertForQuestionAnswering",anchor:"transformers.IBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L1268",parametersDescription:[{anchor:"transformers.IBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qt=new D({props:{name:"forward",anchor:"transformers.IBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ibert/modeling_ibert.py#L1282",parametersDescription:[{anchor:"transformers.IBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.IBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ce=new io({props:{$$slots:{default:[zl]},$$scope:{ctx:q}}}),Ft=new xt({props:{code:`from transformers import RobertaTokenizer, IBertForQuestionAnswering import torch tokenizer = RobertaTokenizer.from_pretrained('kssteven/ibert-roberta-base') model = IBertForQuestionAnswering.from_pretrained('kssteven/ibert-roberta-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;kssteven/ibert-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){h=n("meta"),I=d(),u=n("h1"),_=n("a"),$=n("span"),b(g.$$.fragment),f=d(),B=n("span"),Dn=a("I-BERT"),Yo=d(),te=n("h2"),be=n("a"),lo=n("span"),b(xe.$$.fragment),Wn=d(),co=n("span"),On=a("Overview"),Zo=d(),ve=n("p"),Qn=a("The I-BERT model was proposed in "),Pe=n("a"),Hn=a("I-BERT: Integer-only BERT Quantization"),Un=a(` by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It\u2019s a quantized version of RoBERTa running inference up to four times faster.`),Xo=d(),Pt=n("p"),Gn=a("The abstract from the paper is the following:"),en=d(),jt=n("p"),ho=n("em"),Vn=a(`Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.`),tn=d(),Q=n("p"),Jn=a("This model was contributed by "),je=n("a"),Kn=a("kssteven"),Yn=a(". The original code can be found "),Ae=n("a"),Zn=a("here"),Xn=a("."),on=d(),oe=n("h2"),ke=n("a"),po=n("span"),b(Se.$$.fragment),es=d(),uo=n("span"),ts=a("IBertConfig"),nn=d(),O=n("div"),b(Le.$$.fragment),os=d(),Re=n("p"),ns=a("This is the configuration class to store the configuration of a "),At=n("a"),ss=a("IBertModel"),rs=a(`. It is used to instantiate a I-BERT model according to the specified arguments,`),as=d(),ne=n("p"),is=a("Configuration objects inherit from "),St=n("a"),ls=a("PretrainedConfig"),ds=a(` and can be used to control the model outputs. Read the documentation from `),Lt=n("a"),cs=a("PretrainedConfig"),hs=a(" for more information."),sn=d(),se=n("h2"),we=n("a"),mo=n("span"),b(Ne.$$.fragment),ps=d(),fo=n("span"),us=a("IBertModel"),rn=d(),M=n("div"),b(De.$$.fragment),ms=d(),go=n("p"),fs=a("The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top."),gs=d(),We=n("p"),_s=a("This model inherits from "),Rt=n("a"),bs=a("PreTrainedModel"),vs=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ks=d(),Oe=n("p"),ws=a("This model is also a PyTorch "),Qe=n("a"),Ts=a("torch.nn.Module"),ys=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Is=d(),He=n("p"),$s=a(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),_o=n("code"),Bs=a("Attention is all you need <https://arxiv.org/abs/1706.03762>"),zs=a(`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Ms=d(),j=n("div"),b(Ue.$$.fragment),qs=d(),re=n("p"),Fs=a("The "),Nt=n("a"),Es=a("IBertModel"),Cs=a(" forward method, overrides the "),bo=n("code"),xs=a("__call__"),Ps=a(" special method."),js=d(),b(Te.$$.fragment),As=d(),vo=n("p"),Ss=a("Example:"),Ls=d(),b(Ge.$$.fragment),an=d(),ae=n("h2"),ye=n("a"),ko=n("span"),b(Ve.$$.fragment),Rs=d(),wo=n("span"),Ns=a("IBertForMaskedLM"),ln=d(),F=n("div"),b(Je.$$.fragment),Ds=d(),Ke=n("p"),Ws=a("I-BERT Model with a "),To=n("code"),Os=a("language modeling"),Qs=a(" head on top."),Hs=d(),Ye=n("p"),Us=a("This model inherits from "),Dt=n("a"),Gs=a("PreTrainedModel"),Vs=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Js=d(),Ze=n("p"),Ks=a("This model is also a PyTorch "),Xe=n("a"),Ys=a("torch.nn.Module"),Zs=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xs=d(),A=n("div"),b(et.$$.fragment),er=d(),ie=n("p"),tr=a("The "),Wt=n("a"),or=a("IBertForMaskedLM"),nr=a(" forward method, overrides the "),yo=n("code"),sr=a("__call__"),rr=a(" special method."),ar=d(),b(Ie.$$.fragment),ir=d(),Io=n("p"),lr=a("Example:"),dr=d(),b(tt.$$.fragment),dn=d(),le=n("h2"),$e=n("a"),$o=n("span"),b(ot.$$.fragment),cr=d(),Bo=n("span"),hr=a("IBertForSequenceClassification"),cn=d(),E=n("div"),b(nt.$$.fragment),pr=d(),zo=n("p"),ur=a(`I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),mr=d(),st=n("p"),fr=a("This model inherits from "),Ot=n("a"),gr=a("PreTrainedModel"),_r=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),br=d(),rt=n("p"),vr=a("This model is also a PyTorch "),at=n("a"),kr=a("torch.nn.Module"),wr=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tr=d(),z=n("div"),b(it.$$.fragment),yr=d(),de=n("p"),Ir=a("The "),Qt=n("a"),$r=a("IBertForSequenceClassification"),Br=a(" forward method, overrides the "),Mo=n("code"),zr=a("__call__"),Mr=a(" special method."),qr=d(),b(Be.$$.fragment),Fr=d(),qo=n("p"),Er=a("Example of single-label classification:"),Cr=d(),b(lt.$$.fragment),xr=d(),Fo=n("p"),Pr=a("Example of multi-label classification:"),jr=d(),b(dt.$$.fragment),hn=d(),ce=n("h2"),ze=n("a"),Eo=n("span"),b(ct.$$.fragment),Ar=d(),Co=n("span"),Sr=a("IBertForMultipleChoice"),pn=d(),C=n("div"),b(ht.$$.fragment),Lr=d(),xo=n("p"),Rr=a(`I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Nr=d(),pt=n("p"),Dr=a("This model inherits from "),Ht=n("a"),Wr=a("PreTrainedModel"),Or=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qr=d(),ut=n("p"),Hr=a("This model is also a PyTorch "),mt=n("a"),Ur=a("torch.nn.Module"),Gr=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vr=d(),S=n("div"),b(ft.$$.fragment),Jr=d(),he=n("p"),Kr=a("The "),Ut=n("a"),Yr=a("IBertForMultipleChoice"),Zr=a(" forward method, overrides the "),Po=n("code"),Xr=a("__call__"),ea=a(" special method."),ta=d(),b(Me.$$.fragment),oa=d(),jo=n("p"),na=a("Example:"),sa=d(),b(gt.$$.fragment),un=d(),pe=n("h2"),qe=n("a"),Ao=n("span"),b(_t.$$.fragment),ra=d(),So=n("span"),aa=a("IBertForTokenClassification"),mn=d(),x=n("div"),b(bt.$$.fragment),ia=d(),Lo=n("p"),la=a(`I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),da=d(),vt=n("p"),ca=a("This model inherits from "),Gt=n("a"),ha=a("PreTrainedModel"),pa=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ua=d(),kt=n("p"),ma=a("This model is also a PyTorch "),wt=n("a"),fa=a("torch.nn.Module"),ga=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_a=d(),L=n("div"),b(Tt.$$.fragment),ba=d(),ue=n("p"),va=a("The "),Vt=n("a"),ka=a("IBertForTokenClassification"),wa=a(" forward method, overrides the "),Ro=n("code"),Ta=a("__call__"),ya=a(" special method."),Ia=d(),b(Fe.$$.fragment),$a=d(),No=n("p"),Ba=a("Example:"),za=d(),b(yt.$$.fragment),fn=d(),me=n("h2"),Ee=n("a"),Do=n("span"),b(It.$$.fragment),Ma=d(),Wo=n("span"),qa=a("IBertForQuestionAnswering"),gn=d(),P=n("div"),b($t.$$.fragment),Fa=d(),fe=n("p"),Ea=a(`I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Oo=n("code"),Ca=a("span start logits"),xa=a(" and "),Qo=n("code"),Pa=a("span end logits"),ja=a(")."),Aa=d(),Bt=n("p"),Sa=a("This model inherits from "),Jt=n("a"),La=a("PreTrainedModel"),Ra=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Na=d(),zt=n("p"),Da=a("This model is also a PyTorch "),Mt=n("a"),Wa=a("torch.nn.Module"),Oa=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qa=d(),R=n("div"),b(qt.$$.fragment),Ha=d(),ge=n("p"),Ua=a("The "),Kt=n("a"),Ga=a("IBertForQuestionAnswering"),Va=a(" forward method, overrides the "),Ho=n("code"),Ja=a("__call__"),Ka=a(" special method."),Ya=d(),b(Ce.$$.fragment),Za=d(),Uo=n("p"),Xa=a("Example:"),ei=d(),b(Ft.$$.fragment),this.h()},l(t){const p=wl('[data-svelte="svelte-1phssyn"]',document.head);h=s(p,"META",{name:!0,content:!0}),p.forEach(o),I=c(t),u=s(t,"H1",{class:!0});var Et=r(u);_=s(Et,"A",{id:!0,class:!0,href:!0});var Go=r(_);$=s(Go,"SPAN",{});var Vo=r($);v(g.$$.fragment,Vo),Vo.forEach(o),Go.forEach(o),f=c(Et),B=s(Et,"SPAN",{});var Jo=r(B);Dn=i(Jo,"I-BERT"),Jo.forEach(o),Et.forEach(o),Yo=c(t),te=s(t,"H2",{class:!0});var Ct=r(te);be=s(Ct,"A",{id:!0,class:!0,href:!0});var Ko=r(be);lo=s(Ko,"SPAN",{});var ti=r(lo);v(xe.$$.fragment,ti),ti.forEach(o),Ko.forEach(o),Wn=c(Ct),co=s(Ct,"SPAN",{});var oi=r(co);On=i(oi,"Overview"),oi.forEach(o),Ct.forEach(o),Zo=c(t),ve=s(t,"P",{});var bn=r(ve);Qn=i(bn,"The I-BERT model was proposed in "),Pe=s(bn,"A",{href:!0,rel:!0});var ni=r(Pe);Hn=i(ni,"I-BERT: Integer-only BERT Quantization"),ni.forEach(o),Un=i(bn,` by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It\u2019s a quantized version of RoBERTa running inference up to four times faster.`),bn.forEach(o),Xo=c(t),Pt=s(t,"P",{});var si=r(Pt);Gn=i(si,"The abstract from the paper is the following:"),si.forEach(o),en=c(t),jt=s(t,"P",{});var ri=r(jt);ho=s(ri,"EM",{});var ai=r(ho);Vn=i(ai,`Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.`),ai.forEach(o),ri.forEach(o),tn=c(t),Q=s(t,"P",{});var Yt=r(Q);Jn=i(Yt,"This model was contributed by "),je=s(Yt,"A",{href:!0,rel:!0});var ii=r(je);Kn=i(ii,"kssteven"),ii.forEach(o),Yn=i(Yt,". The original code can be found "),Ae=s(Yt,"A",{href:!0,rel:!0});var li=r(Ae);Zn=i(li,"here"),li.forEach(o),Xn=i(Yt,"."),Yt.forEach(o),on=c(t),oe=s(t,"H2",{class:!0});var vn=r(oe);ke=s(vn,"A",{id:!0,class:!0,href:!0});var di=r(ke);po=s(di,"SPAN",{});var ci=r(po);v(Se.$$.fragment,ci),ci.forEach(o),di.forEach(o),es=c(vn),uo=s(vn,"SPAN",{});var hi=r(uo);ts=i(hi,"IBertConfig"),hi.forEach(o),vn.forEach(o),nn=c(t),O=s(t,"DIV",{class:!0});var Zt=r(O);v(Le.$$.fragment,Zt),os=c(Zt),Re=s(Zt,"P",{});var kn=r(Re);ns=i(kn,"This is the configuration class to store the configuration of a "),At=s(kn,"A",{href:!0});var pi=r(At);ss=i(pi,"IBertModel"),pi.forEach(o),rs=i(kn,`. It is used to instantiate a I-BERT model according to the specified arguments,`),kn.forEach(o),as=c(Zt),ne=s(Zt,"P",{});var Xt=r(ne);is=i(Xt,"Configuration objects inherit from "),St=s(Xt,"A",{href:!0});var ui=r(St);ls=i(ui,"PretrainedConfig"),ui.forEach(o),ds=i(Xt,` and can be used to control the model outputs. Read the documentation from `),Lt=s(Xt,"A",{href:!0});var mi=r(Lt);cs=i(mi,"PretrainedConfig"),mi.forEach(o),hs=i(Xt," for more information."),Xt.forEach(o),Zt.forEach(o),sn=c(t),se=s(t,"H2",{class:!0});var wn=r(se);we=s(wn,"A",{id:!0,class:!0,href:!0});var fi=r(we);mo=s(fi,"SPAN",{});var gi=r(mo);v(Ne.$$.fragment,gi),gi.forEach(o),fi.forEach(o),ps=c(wn),fo=s(wn,"SPAN",{});var _i=r(fo);us=i(_i,"IBertModel"),_i.forEach(o),wn.forEach(o),rn=c(t),M=s(t,"DIV",{class:!0});var W=r(M);v(De.$$.fragment,W),ms=c(W),go=s(W,"P",{});var bi=r(go);fs=i(bi,"The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top."),bi.forEach(o),gs=c(W),We=s(W,"P",{});var Tn=r(We);_s=i(Tn,"This model inherits from "),Rt=s(Tn,"A",{href:!0});var vi=r(Rt);bs=i(vi,"PreTrainedModel"),vi.forEach(o),vs=i(Tn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tn.forEach(o),ks=c(W),Oe=s(W,"P",{});var yn=r(Oe);ws=i(yn,"This model is also a PyTorch "),Qe=s(yn,"A",{href:!0,rel:!0});var ki=r(Qe);Ts=i(ki,"torch.nn.Module"),ki.forEach(o),ys=i(yn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yn.forEach(o),Is=c(W),He=s(W,"P",{});var In=r(He);$s=i(In,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),_o=s(In,"CODE",{});var wi=r(_o);Bs=i(wi,"Attention is all you need <https://arxiv.org/abs/1706.03762>"),wi.forEach(o),zs=i(In,`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),In.forEach(o),Ms=c(W),j=s(W,"DIV",{class:!0});var H=r(j);v(Ue.$$.fragment,H),qs=c(H),re=s(H,"P",{});var eo=r(re);Fs=i(eo,"The "),Nt=s(eo,"A",{href:!0});var Ti=r(Nt);Es=i(Ti,"IBertModel"),Ti.forEach(o),Cs=i(eo," forward method, overrides the "),bo=s(eo,"CODE",{});var yi=r(bo);xs=i(yi,"__call__"),yi.forEach(o),Ps=i(eo," special method."),eo.forEach(o),js=c(H),v(Te.$$.fragment,H),As=c(H),vo=s(H,"P",{});var Ii=r(vo);Ss=i(Ii,"Example:"),Ii.forEach(o),Ls=c(H),v(Ge.$$.fragment,H),H.forEach(o),W.forEach(o),an=c(t),ae=s(t,"H2",{class:!0});var $n=r(ae);ye=s($n,"A",{id:!0,class:!0,href:!0});var $i=r(ye);ko=s($i,"SPAN",{});var Bi=r(ko);v(Ve.$$.fragment,Bi),Bi.forEach(o),$i.forEach(o),Rs=c($n),wo=s($n,"SPAN",{});var zi=r(wo);Ns=i(zi,"IBertForMaskedLM"),zi.forEach(o),$n.forEach(o),ln=c(t),F=s(t,"DIV",{class:!0});var U=r(F);v(Je.$$.fragment,U),Ds=c(U),Ke=s(U,"P",{});var Bn=r(Ke);Ws=i(Bn,"I-BERT Model with a "),To=s(Bn,"CODE",{});var Mi=r(To);Os=i(Mi,"language modeling"),Mi.forEach(o),Qs=i(Bn," head on top."),Bn.forEach(o),Hs=c(U),Ye=s(U,"P",{});var zn=r(Ye);Us=i(zn,"This model inherits from "),Dt=s(zn,"A",{href:!0});var qi=r(Dt);Gs=i(qi,"PreTrainedModel"),qi.forEach(o),Vs=i(zn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zn.forEach(o),Js=c(U),Ze=s(U,"P",{});var Mn=r(Ze);Ks=i(Mn,"This model is also a PyTorch "),Xe=s(Mn,"A",{href:!0,rel:!0});var Fi=r(Xe);Ys=i(Fi,"torch.nn.Module"),Fi.forEach(o),Zs=i(Mn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Mn.forEach(o),Xs=c(U),A=s(U,"DIV",{class:!0});var G=r(A);v(et.$$.fragment,G),er=c(G),ie=s(G,"P",{});var to=r(ie);tr=i(to,"The "),Wt=s(to,"A",{href:!0});var Ei=r(Wt);or=i(Ei,"IBertForMaskedLM"),Ei.forEach(o),nr=i(to," forward method, overrides the "),yo=s(to,"CODE",{});var Ci=r(yo);sr=i(Ci,"__call__"),Ci.forEach(o),rr=i(to," special method."),to.forEach(o),ar=c(G),v(Ie.$$.fragment,G),ir=c(G),Io=s(G,"P",{});var xi=r(Io);lr=i(xi,"Example:"),xi.forEach(o),dr=c(G),v(tt.$$.fragment,G),G.forEach(o),U.forEach(o),dn=c(t),le=s(t,"H2",{class:!0});var qn=r(le);$e=s(qn,"A",{id:!0,class:!0,href:!0});var Pi=r($e);$o=s(Pi,"SPAN",{});var ji=r($o);v(ot.$$.fragment,ji),ji.forEach(o),Pi.forEach(o),cr=c(qn),Bo=s(qn,"SPAN",{});var Ai=r(Bo);hr=i(Ai,"IBertForSequenceClassification"),Ai.forEach(o),qn.forEach(o),cn=c(t),E=s(t,"DIV",{class:!0});var V=r(E);v(nt.$$.fragment,V),pr=c(V),zo=s(V,"P",{});var Si=r(zo);ur=i(Si,`I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Si.forEach(o),mr=c(V),st=s(V,"P",{});var Fn=r(st);fr=i(Fn,"This model inherits from "),Ot=s(Fn,"A",{href:!0});var Li=r(Ot);gr=i(Li,"PreTrainedModel"),Li.forEach(o),_r=i(Fn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fn.forEach(o),br=c(V),rt=s(V,"P",{});var En=r(rt);vr=i(En,"This model is also a PyTorch "),at=s(En,"A",{href:!0,rel:!0});var Ri=r(at);kr=i(Ri,"torch.nn.Module"),Ri.forEach(o),wr=i(En,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),En.forEach(o),Tr=c(V),z=s(V,"DIV",{class:!0});var N=r(z);v(it.$$.fragment,N),yr=c(N),de=s(N,"P",{});var oo=r(de);Ir=i(oo,"The "),Qt=s(oo,"A",{href:!0});var Ni=r(Qt);$r=i(Ni,"IBertForSequenceClassification"),Ni.forEach(o),Br=i(oo," forward method, overrides the "),Mo=s(oo,"CODE",{});var Di=r(Mo);zr=i(Di,"__call__"),Di.forEach(o),Mr=i(oo," special method."),oo.forEach(o),qr=c(N),v(Be.$$.fragment,N),Fr=c(N),qo=s(N,"P",{});var Wi=r(qo);Er=i(Wi,"Example of single-label classification:"),Wi.forEach(o),Cr=c(N),v(lt.$$.fragment,N),xr=c(N),Fo=s(N,"P",{});var Oi=r(Fo);Pr=i(Oi,"Example of multi-label classification:"),Oi.forEach(o),jr=c(N),v(dt.$$.fragment,N),N.forEach(o),V.forEach(o),hn=c(t),ce=s(t,"H2",{class:!0});var Cn=r(ce);ze=s(Cn,"A",{id:!0,class:!0,href:!0});var Qi=r(ze);Eo=s(Qi,"SPAN",{});var Hi=r(Eo);v(ct.$$.fragment,Hi),Hi.forEach(o),Qi.forEach(o),Ar=c(Cn),Co=s(Cn,"SPAN",{});var Ui=r(Co);Sr=i(Ui,"IBertForMultipleChoice"),Ui.forEach(o),Cn.forEach(o),pn=c(t),C=s(t,"DIV",{class:!0});var J=r(C);v(ht.$$.fragment,J),Lr=c(J),xo=s(J,"P",{});var Gi=r(xo);Rr=i(Gi,`I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Gi.forEach(o),Nr=c(J),pt=s(J,"P",{});var xn=r(pt);Dr=i(xn,"This model inherits from "),Ht=s(xn,"A",{href:!0});var Vi=r(Ht);Wr=i(Vi,"PreTrainedModel"),Vi.forEach(o),Or=i(xn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xn.forEach(o),Qr=c(J),ut=s(J,"P",{});var Pn=r(ut);Hr=i(Pn,"This model is also a PyTorch "),mt=s(Pn,"A",{href:!0,rel:!0});var Ji=r(mt);Ur=i(Ji,"torch.nn.Module"),Ji.forEach(o),Gr=i(Pn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pn.forEach(o),Vr=c(J),S=s(J,"DIV",{class:!0});var K=r(S);v(ft.$$.fragment,K),Jr=c(K),he=s(K,"P",{});var no=r(he);Kr=i(no,"The "),Ut=s(no,"A",{href:!0});var Ki=r(Ut);Yr=i(Ki,"IBertForMultipleChoice"),Ki.forEach(o),Zr=i(no," forward method, overrides the "),Po=s(no,"CODE",{});var Yi=r(Po);Xr=i(Yi,"__call__"),Yi.forEach(o),ea=i(no," special method."),no.forEach(o),ta=c(K),v(Me.$$.fragment,K),oa=c(K),jo=s(K,"P",{});var Zi=r(jo);na=i(Zi,"Example:"),Zi.forEach(o),sa=c(K),v(gt.$$.fragment,K),K.forEach(o),J.forEach(o),un=c(t),pe=s(t,"H2",{class:!0});var jn=r(pe);qe=s(jn,"A",{id:!0,class:!0,href:!0});var Xi=r(qe);Ao=s(Xi,"SPAN",{});var el=r(Ao);v(_t.$$.fragment,el),el.forEach(o),Xi.forEach(o),ra=c(jn),So=s(jn,"SPAN",{});var tl=r(So);aa=i(tl,"IBertForTokenClassification"),tl.forEach(o),jn.forEach(o),mn=c(t),x=s(t,"DIV",{class:!0});var Y=r(x);v(bt.$$.fragment,Y),ia=c(Y),Lo=s(Y,"P",{});var ol=r(Lo);la=i(ol,`I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ol.forEach(o),da=c(Y),vt=s(Y,"P",{});var An=r(vt);ca=i(An,"This model inherits from "),Gt=s(An,"A",{href:!0});var nl=r(Gt);ha=i(nl,"PreTrainedModel"),nl.forEach(o),pa=i(An,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),An.forEach(o),ua=c(Y),kt=s(Y,"P",{});var Sn=r(kt);ma=i(Sn,"This model is also a PyTorch "),wt=s(Sn,"A",{href:!0,rel:!0});var sl=r(wt);fa=i(sl,"torch.nn.Module"),sl.forEach(o),ga=i(Sn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sn.forEach(o),_a=c(Y),L=s(Y,"DIV",{class:!0});var Z=r(L);v(Tt.$$.fragment,Z),ba=c(Z),ue=s(Z,"P",{});var so=r(ue);va=i(so,"The "),Vt=s(so,"A",{href:!0});var rl=r(Vt);ka=i(rl,"IBertForTokenClassification"),rl.forEach(o),wa=i(so," forward method, overrides the "),Ro=s(so,"CODE",{});var al=r(Ro);Ta=i(al,"__call__"),al.forEach(o),ya=i(so," special method."),so.forEach(o),Ia=c(Z),v(Fe.$$.fragment,Z),$a=c(Z),No=s(Z,"P",{});var il=r(No);Ba=i(il,"Example:"),il.forEach(o),za=c(Z),v(yt.$$.fragment,Z),Z.forEach(o),Y.forEach(o),fn=c(t),me=s(t,"H2",{class:!0});var Ln=r(me);Ee=s(Ln,"A",{id:!0,class:!0,href:!0});var ll=r(Ee);Do=s(ll,"SPAN",{});var dl=r(Do);v(It.$$.fragment,dl),dl.forEach(o),ll.forEach(o),Ma=c(Ln),Wo=s(Ln,"SPAN",{});var cl=r(Wo);qa=i(cl,"IBertForQuestionAnswering"),cl.forEach(o),Ln.forEach(o),gn=c(t),P=s(t,"DIV",{class:!0});var X=r(P);v($t.$$.fragment,X),Fa=c(X),fe=s(X,"P",{});var ro=r(fe);Ea=i(ro,`I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Oo=s(ro,"CODE",{});var hl=r(Oo);Ca=i(hl,"span start logits"),hl.forEach(o),xa=i(ro," and "),Qo=s(ro,"CODE",{});var pl=r(Qo);Pa=i(pl,"span end logits"),pl.forEach(o),ja=i(ro,")."),ro.forEach(o),Aa=c(X),Bt=s(X,"P",{});var Rn=r(Bt);Sa=i(Rn,"This model inherits from "),Jt=s(Rn,"A",{href:!0});var ul=r(Jt);La=i(ul,"PreTrainedModel"),ul.forEach(o),Ra=i(Rn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rn.forEach(o),Na=c(X),zt=s(X,"P",{});var Nn=r(zt);Da=i(Nn,"This model is also a PyTorch "),Mt=s(Nn,"A",{href:!0,rel:!0});var ml=r(Mt);Wa=i(ml,"torch.nn.Module"),ml.forEach(o),Oa=i(Nn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nn.forEach(o),Qa=c(X),R=s(X,"DIV",{class:!0});var ee=r(R);v(qt.$$.fragment,ee),Ha=c(ee),ge=s(ee,"P",{});var ao=r(ge);Ua=i(ao,"The "),Kt=s(ao,"A",{href:!0});var fl=r(Kt);Ga=i(fl,"IBertForQuestionAnswering"),fl.forEach(o),Va=i(ao," forward method, overrides the "),Ho=s(ao,"CODE",{});var gl=r(Ho);Ja=i(gl,"__call__"),gl.forEach(o),Ka=i(ao," special method."),ao.forEach(o),Ya=c(ee),v(Ce.$$.fragment,ee),Za=c(ee),Uo=s(ee,"P",{});var _l=r(Uo);Xa=i(_l,"Example:"),_l.forEach(o),ei=c(ee),v(Ft.$$.fragment,ee),ee.forEach(o),X.forEach(o),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(ql)),l(_,"id","ibert"),l(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_,"href","#ibert"),l(u,"class","relative group"),l(be,"id","overview"),l(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(be,"href","#overview"),l(te,"class","relative group"),l(Pe,"href","https://arxiv.org/abs/2101.01321"),l(Pe,"rel","nofollow"),l(je,"href","https://huggingface.co/kssteven"),l(je,"rel","nofollow"),l(Ae,"href","https://github.com/kssteven418/I-BERT"),l(Ae,"rel","nofollow"),l(ke,"id","transformers.IBertConfig"),l(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ke,"href","#transformers.IBertConfig"),l(oe,"class","relative group"),l(At,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertModel"),l(St,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Lt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(O,"class","docstring"),l(we,"id","transformers.IBertModel"),l(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(we,"href","#transformers.IBertModel"),l(se,"class","relative group"),l(Rt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Qe,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Qe,"rel","nofollow"),l(Nt,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertModel"),l(j,"class","docstring"),l(M,"class","docstring"),l(ye,"id","transformers.IBertForMaskedLM"),l(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ye,"href","#transformers.IBertForMaskedLM"),l(ae,"class","relative group"),l(Dt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Xe,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Xe,"rel","nofollow"),l(Wt,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMaskedLM"),l(A,"class","docstring"),l(F,"class","docstring"),l($e,"id","transformers.IBertForSequenceClassification"),l($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($e,"href","#transformers.IBertForSequenceClassification"),l(le,"class","relative group"),l(Ot,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(at,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(at,"rel","nofollow"),l(Qt,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForSequenceClassification"),l(z,"class","docstring"),l(E,"class","docstring"),l(ze,"id","transformers.IBertForMultipleChoice"),l(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ze,"href","#transformers.IBertForMultipleChoice"),l(ce,"class","relative group"),l(Ht,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(mt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(mt,"rel","nofollow"),l(Ut,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForMultipleChoice"),l(S,"class","docstring"),l(C,"class","docstring"),l(qe,"id","transformers.IBertForTokenClassification"),l(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(qe,"href","#transformers.IBertForTokenClassification"),l(pe,"class","relative group"),l(Gt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(wt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(wt,"rel","nofollow"),l(Vt,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForTokenClassification"),l(L,"class","docstring"),l(x,"class","docstring"),l(Ee,"id","transformers.IBertForQuestionAnswering"),l(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ee,"href","#transformers.IBertForQuestionAnswering"),l(me,"class","relative group"),l(Jt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Mt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Mt,"rel","nofollow"),l(Kt,"href","/docs/transformers/v4.15.0/en/model_doc/ibert#transformers.IBertForQuestionAnswering"),l(R,"class","docstring"),l(P,"class","docstring")},m(t,p){e(document.head,h),m(t,I,p),m(t,u,p),e(u,_),e(_,$),k(g,$,null),e(u,f),e(u,B),e(B,Dn),m(t,Yo,p),m(t,te,p),e(te,be),e(be,lo),k(xe,lo,null),e(te,Wn),e(te,co),e(co,On),m(t,Zo,p),m(t,ve,p),e(ve,Qn),e(ve,Pe),e(Pe,Hn),e(ve,Un),m(t,Xo,p),m(t,Pt,p),e(Pt,Gn),m(t,en,p),m(t,jt,p),e(jt,ho),e(ho,Vn),m(t,tn,p),m(t,Q,p),e(Q,Jn),e(Q,je),e(je,Kn),e(Q,Yn),e(Q,Ae),e(Ae,Zn),e(Q,Xn),m(t,on,p),m(t,oe,p),e(oe,ke),e(ke,po),k(Se,po,null),e(oe,es),e(oe,uo),e(uo,ts),m(t,nn,p),m(t,O,p),k(Le,O,null),e(O,os),e(O,Re),e(Re,ns),e(Re,At),e(At,ss),e(Re,rs),e(O,as),e(O,ne),e(ne,is),e(ne,St),e(St,ls),e(ne,ds),e(ne,Lt),e(Lt,cs),e(ne,hs),m(t,sn,p),m(t,se,p),e(se,we),e(we,mo),k(Ne,mo,null),e(se,ps),e(se,fo),e(fo,us),m(t,rn,p),m(t,M,p),k(De,M,null),e(M,ms),e(M,go),e(go,fs),e(M,gs),e(M,We),e(We,_s),e(We,Rt),e(Rt,bs),e(We,vs),e(M,ks),e(M,Oe),e(Oe,ws),e(Oe,Qe),e(Qe,Ts),e(Oe,ys),e(M,Is),e(M,He),e(He,$s),e(He,_o),e(_o,Bs),e(He,zs),e(M,Ms),e(M,j),k(Ue,j,null),e(j,qs),e(j,re),e(re,Fs),e(re,Nt),e(Nt,Es),e(re,Cs),e(re,bo),e(bo,xs),e(re,Ps),e(j,js),k(Te,j,null),e(j,As),e(j,vo),e(vo,Ss),e(j,Ls),k(Ge,j,null),m(t,an,p),m(t,ae,p),e(ae,ye),e(ye,ko),k(Ve,ko,null),e(ae,Rs),e(ae,wo),e(wo,Ns),m(t,ln,p),m(t,F,p),k(Je,F,null),e(F,Ds),e(F,Ke),e(Ke,Ws),e(Ke,To),e(To,Os),e(Ke,Qs),e(F,Hs),e(F,Ye),e(Ye,Us),e(Ye,Dt),e(Dt,Gs),e(Ye,Vs),e(F,Js),e(F,Ze),e(Ze,Ks),e(Ze,Xe),e(Xe,Ys),e(Ze,Zs),e(F,Xs),e(F,A),k(et,A,null),e(A,er),e(A,ie),e(ie,tr),e(ie,Wt),e(Wt,or),e(ie,nr),e(ie,yo),e(yo,sr),e(ie,rr),e(A,ar),k(Ie,A,null),e(A,ir),e(A,Io),e(Io,lr),e(A,dr),k(tt,A,null),m(t,dn,p),m(t,le,p),e(le,$e),e($e,$o),k(ot,$o,null),e(le,cr),e(le,Bo),e(Bo,hr),m(t,cn,p),m(t,E,p),k(nt,E,null),e(E,pr),e(E,zo),e(zo,ur),e(E,mr),e(E,st),e(st,fr),e(st,Ot),e(Ot,gr),e(st,_r),e(E,br),e(E,rt),e(rt,vr),e(rt,at),e(at,kr),e(rt,wr),e(E,Tr),e(E,z),k(it,z,null),e(z,yr),e(z,de),e(de,Ir),e(de,Qt),e(Qt,$r),e(de,Br),e(de,Mo),e(Mo,zr),e(de,Mr),e(z,qr),k(Be,z,null),e(z,Fr),e(z,qo),e(qo,Er),e(z,Cr),k(lt,z,null),e(z,xr),e(z,Fo),e(Fo,Pr),e(z,jr),k(dt,z,null),m(t,hn,p),m(t,ce,p),e(ce,ze),e(ze,Eo),k(ct,Eo,null),e(ce,Ar),e(ce,Co),e(Co,Sr),m(t,pn,p),m(t,C,p),k(ht,C,null),e(C,Lr),e(C,xo),e(xo,Rr),e(C,Nr),e(C,pt),e(pt,Dr),e(pt,Ht),e(Ht,Wr),e(pt,Or),e(C,Qr),e(C,ut),e(ut,Hr),e(ut,mt),e(mt,Ur),e(ut,Gr),e(C,Vr),e(C,S),k(ft,S,null),e(S,Jr),e(S,he),e(he,Kr),e(he,Ut),e(Ut,Yr),e(he,Zr),e(he,Po),e(Po,Xr),e(he,ea),e(S,ta),k(Me,S,null),e(S,oa),e(S,jo),e(jo,na),e(S,sa),k(gt,S,null),m(t,un,p),m(t,pe,p),e(pe,qe),e(qe,Ao),k(_t,Ao,null),e(pe,ra),e(pe,So),e(So,aa),m(t,mn,p),m(t,x,p),k(bt,x,null),e(x,ia),e(x,Lo),e(Lo,la),e(x,da),e(x,vt),e(vt,ca),e(vt,Gt),e(Gt,ha),e(vt,pa),e(x,ua),e(x,kt),e(kt,ma),e(kt,wt),e(wt,fa),e(kt,ga),e(x,_a),e(x,L),k(Tt,L,null),e(L,ba),e(L,ue),e(ue,va),e(ue,Vt),e(Vt,ka),e(ue,wa),e(ue,Ro),e(Ro,Ta),e(ue,ya),e(L,Ia),k(Fe,L,null),e(L,$a),e(L,No),e(No,Ba),e(L,za),k(yt,L,null),m(t,fn,p),m(t,me,p),e(me,Ee),e(Ee,Do),k(It,Do,null),e(me,Ma),e(me,Wo),e(Wo,qa),m(t,gn,p),m(t,P,p),k($t,P,null),e(P,Fa),e(P,fe),e(fe,Ea),e(fe,Oo),e(Oo,Ca),e(fe,xa),e(fe,Qo),e(Qo,Pa),e(fe,ja),e(P,Aa),e(P,Bt),e(Bt,Sa),e(Bt,Jt),e(Jt,La),e(Bt,Ra),e(P,Na),e(P,zt),e(zt,Da),e(zt,Mt),e(Mt,Wa),e(zt,Oa),e(P,Qa),e(P,R),k(qt,R,null),e(R,Ha),e(R,ge),e(ge,Ua),e(ge,Kt),e(Kt,Ga),e(ge,Va),e(ge,Ho),e(Ho,Ja),e(ge,Ka),e(R,Ya),k(Ce,R,null),e(R,Za),e(R,Uo),e(Uo,Xa),e(R,ei),k(Ft,R,null),_n=!0},p(t,[p]){const Et={};p&2&&(Et.$$scope={dirty:p,ctx:t}),Te.$set(Et);const Go={};p&2&&(Go.$$scope={dirty:p,ctx:t}),Ie.$set(Go);const Vo={};p&2&&(Vo.$$scope={dirty:p,ctx:t}),Be.$set(Vo);const Jo={};p&2&&(Jo.$$scope={dirty:p,ctx:t}),Me.$set(Jo);const Ct={};p&2&&(Ct.$$scope={dirty:p,ctx:t}),Fe.$set(Ct);const Ko={};p&2&&(Ko.$$scope={dirty:p,ctx:t}),Ce.$set(Ko)},i(t){_n||(w(g.$$.fragment,t),w(xe.$$.fragment,t),w(Se.$$.fragment,t),w(Le.$$.fragment,t),w(Ne.$$.fragment,t),w(De.$$.fragment,t),w(Ue.$$.fragment,t),w(Te.$$.fragment,t),w(Ge.$$.fragment,t),w(Ve.$$.fragment,t),w(Je.$$.fragment,t),w(et.$$.fragment,t),w(Ie.$$.fragment,t),w(tt.$$.fragment,t),w(ot.$$.fragment,t),w(nt.$$.fragment,t),w(it.$$.fragment,t),w(Be.$$.fragment,t),w(lt.$$.fragment,t),w(dt.$$.fragment,t),w(ct.$$.fragment,t),w(ht.$$.fragment,t),w(ft.$$.fragment,t),w(Me.$$.fragment,t),w(gt.$$.fragment,t),w(_t.$$.fragment,t),w(bt.$$.fragment,t),w(Tt.$$.fragment,t),w(Fe.$$.fragment,t),w(yt.$$.fragment,t),w(It.$$.fragment,t),w($t.$$.fragment,t),w(qt.$$.fragment,t),w(Ce.$$.fragment,t),w(Ft.$$.fragment,t),_n=!0)},o(t){T(g.$$.fragment,t),T(xe.$$.fragment,t),T(Se.$$.fragment,t),T(Le.$$.fragment,t),T(Ne.$$.fragment,t),T(De.$$.fragment,t),T(Ue.$$.fragment,t),T(Te.$$.fragment,t),T(Ge.$$.fragment,t),T(Ve.$$.fragment,t),T(Je.$$.fragment,t),T(et.$$.fragment,t),T(Ie.$$.fragment,t),T(tt.$$.fragment,t),T(ot.$$.fragment,t),T(nt.$$.fragment,t),T(it.$$.fragment,t),T(Be.$$.fragment,t),T(lt.$$.fragment,t),T(dt.$$.fragment,t),T(ct.$$.fragment,t),T(ht.$$.fragment,t),T(ft.$$.fragment,t),T(Me.$$.fragment,t),T(gt.$$.fragment,t),T(_t.$$.fragment,t),T(bt.$$.fragment,t),T(Tt.$$.fragment,t),T(Fe.$$.fragment,t),T(yt.$$.fragment,t),T(It.$$.fragment,t),T($t.$$.fragment,t),T(qt.$$.fragment,t),T(Ce.$$.fragment,t),T(Ft.$$.fragment,t),_n=!1},d(t){o(h),t&&o(I),t&&o(u),y(g),t&&o(Yo),t&&o(te),y(xe),t&&o(Zo),t&&o(ve),t&&o(Xo),t&&o(Pt),t&&o(en),t&&o(jt),t&&o(tn),t&&o(Q),t&&o(on),t&&o(oe),y(Se),t&&o(nn),t&&o(O),y(Le),t&&o(sn),t&&o(se),y(Ne),t&&o(rn),t&&o(M),y(De),y(Ue),y(Te),y(Ge),t&&o(an),t&&o(ae),y(Ve),t&&o(ln),t&&o(F),y(Je),y(et),y(Ie),y(tt),t&&o(dn),t&&o(le),y(ot),t&&o(cn),t&&o(E),y(nt),y(it),y(Be),y(lt),y(dt),t&&o(hn),t&&o(ce),y(ct),t&&o(pn),t&&o(C),y(ht),y(ft),y(Me),y(gt),t&&o(un),t&&o(pe),y(_t),t&&o(mn),t&&o(x),y(bt),y(Tt),y(Fe),y(yt),t&&o(fn),t&&o(me),y(It),t&&o(gn),t&&o(P),y($t),y(qt),y(Ce),y(Ft)}}}const ql={local:"ibert",sections:[{local:"overview",title:"Overview"},{local:"transformers.IBertConfig",title:"IBertConfig"},{local:"transformers.IBertModel",title:"IBertModel"},{local:"transformers.IBertForMaskedLM",title:"IBertForMaskedLM"},{local:"transformers.IBertForSequenceClassification",title:"IBertForSequenceClassification"},{local:"transformers.IBertForMultipleChoice",title:"IBertForMultipleChoice"},{local:"transformers.IBertForTokenClassification",title:"IBertForTokenClassification"},{local:"transformers.IBertForQuestionAnswering",title:"IBertForQuestionAnswering"}],title:"I-BERT"};function Fl(q,h,I){let{fw:u}=h;return q.$$set=_=>{"fw"in _&&I(0,u=_.fw)},[u]}class Sl extends bl{constructor(h){super();vl(this,h,Fl,Ml,kl,{fw:0})}}export{Sl as default,ql as metadata};
9,930
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/deberta.mdx-b415826c.js
import{S as d_,i as c_,s as p_,e as a,k as l,w as k,t as n,L as h_,c as r,d as t,m as d,a as i,x as v,h as s,b as c,J as e,g as f,y as T,q as w,o as y,B as D}from"../../chunks/vendor-b1433968.js";import{T as Ie}from"../../chunks/Tip-c3840994.js";import{D as J}from"../../chunks/Docstring-ff504c58.js";import{C as et}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Pe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function f_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function u_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function m_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function g_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function __(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function b_(O){let h,F,m,g,E,b,_,z,he,X,q,Y,A,ee,fe,I,ue,le,W,B,te,Z,M,x,ne,H,de,se,U,ce,ae,C,me,L,ge,_e,S,K,be,P,ke,Q,oe;return{c(){h=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),E=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),q=a("p"),Y=n("This second option is useful when using "),A=a("code"),ee=n("tf.keras.Model.fit"),fe=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=a("code"),ue=n("model(inputs)"),le=n("."),W=l(),B=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=a("ul"),x=a("li"),ne=n("a single Tensor with "),H=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),U=a("code"),ce=n("model(inputs_ids)"),ae=l(),C=a("li"),me=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),ge=n("model([input_ids, attention_mask])"),_e=n(" or "),S=a("code"),K=n("model([input_ids, attention_mask, token_type_ids])"),be=l(),P=a("li"),ke=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var $=i(h);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=d(p),g=r(p,"UL",{});var V=i(g);E=r(V,"LI",{});var Ee=i(E);b=s(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),_=d(V),z=r(V,"LI",{});var $e=i(z);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),V.forEach(t),X=d(p),q=r(p,"P",{});var j=i(q);Y=s(j,"This second option is useful when using "),A=r(j,"CODE",{});var pe=i(A);ee=s(pe,"tf.keras.Model.fit"),pe.forEach(t),fe=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(j,"CODE",{});var ve=i(I);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(j,"."),j.forEach(t),W=d(p),B=r(p,"P",{});var ye=i(B);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),Z=d(p),M=r(p,"UL",{});var R=i(M);x=r(R,"LI",{});var G=i(x);ne=s(G,"a single Tensor with "),H=r(G,"CODE",{});var De=i(H);de=s(De,"input_ids"),De.forEach(t),se=s(G," only and nothing else: "),U=r(G,"CODE",{});var Fe=i(U);ce=s(Fe,"model(inputs_ids)"),Fe.forEach(t),G.forEach(t),ae=d(R),C=r(R,"LI",{});var N=i(C);me=s(N,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(N,"CODE",{});var re=i(L);ge=s(re,"model([input_ids, attention_mask])"),re.forEach(t),_e=s(N," or "),S=r(N,"CODE",{});var we=i(S);K=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),N.forEach(t),be=d(R),P=r(R,"LI",{});var Te=i(P);ke=s(Te,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=r(Te,"CODE",{});var ie=i(Q);oe=s(ie,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ie.forEach(t),Te.forEach(t),R.forEach(t)},m(p,$){f(p,h,$),e(h,F),f(p,m,$),f(p,g,$),e(g,E),e(E,b),e(g,_),e(g,z),e(z,he),f(p,X,$),f(p,q,$),e(q,Y),e(q,A),e(A,ee),e(q,fe),e(q,I),e(I,ue),e(q,le),f(p,W,$),f(p,B,$),e(B,te),f(p,Z,$),f(p,M,$),e(M,x),e(x,ne),e(x,H),e(H,de),e(x,se),e(x,U),e(U,ce),e(M,ae),e(M,C),e(C,me),e(C,L),e(L,ge),e(C,_e),e(C,S),e(S,K),e(M,be),e(M,P),e(P,ke),e(P,Q),e(Q,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(X),p&&t(q),p&&t(W),p&&t(B),p&&t(Z),p&&t(M)}}}function k_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function v_(O){let h,F,m,g,E,b,_,z,he,X,q,Y,A,ee,fe,I,ue,le,W,B,te,Z,M,x,ne,H,de,se,U,ce,ae,C,me,L,ge,_e,S,K,be,P,ke,Q,oe;return{c(){h=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),E=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),q=a("p"),Y=n("This second option is useful when using "),A=a("code"),ee=n("tf.keras.Model.fit"),fe=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=a("code"),ue=n("model(inputs)"),le=n("."),W=l(),B=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=a("ul"),x=a("li"),ne=n("a single Tensor with "),H=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),U=a("code"),ce=n("model(inputs_ids)"),ae=l(),C=a("li"),me=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),ge=n("model([input_ids, attention_mask])"),_e=n(" or "),S=a("code"),K=n("model([input_ids, attention_mask, token_type_ids])"),be=l(),P=a("li"),ke=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var $=i(h);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=d(p),g=r(p,"UL",{});var V=i(g);E=r(V,"LI",{});var Ee=i(E);b=s(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),_=d(V),z=r(V,"LI",{});var $e=i(z);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),V.forEach(t),X=d(p),q=r(p,"P",{});var j=i(q);Y=s(j,"This second option is useful when using "),A=r(j,"CODE",{});var pe=i(A);ee=s(pe,"tf.keras.Model.fit"),pe.forEach(t),fe=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(j,"CODE",{});var ve=i(I);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(j,"."),j.forEach(t),W=d(p),B=r(p,"P",{});var ye=i(B);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),Z=d(p),M=r(p,"UL",{});var R=i(M);x=r(R,"LI",{});var G=i(x);ne=s(G,"a single Tensor with "),H=r(G,"CODE",{});var De=i(H);de=s(De,"input_ids"),De.forEach(t),se=s(G," only and nothing else: "),U=r(G,"CODE",{});var Fe=i(U);ce=s(Fe,"model(inputs_ids)"),Fe.forEach(t),G.forEach(t),ae=d(R),C=r(R,"LI",{});var N=i(C);me=s(N,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(N,"CODE",{});var re=i(L);ge=s(re,"model([input_ids, attention_mask])"),re.forEach(t),_e=s(N," or "),S=r(N,"CODE",{});var we=i(S);K=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),N.forEach(t),be=d(R),P=r(R,"LI",{});var Te=i(P);ke=s(Te,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=r(Te,"CODE",{});var ie=i(Q);oe=s(ie,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ie.forEach(t),Te.forEach(t),R.forEach(t)},m(p,$){f(p,h,$),e(h,F),f(p,m,$),f(p,g,$),e(g,E),e(E,b),e(g,_),e(g,z),e(z,he),f(p,X,$),f(p,q,$),e(q,Y),e(q,A),e(A,ee),e(q,fe),e(q,I),e(I,ue),e(q,le),f(p,W,$),f(p,B,$),e(B,te),f(p,Z,$),f(p,M,$),e(M,x),e(x,ne),e(x,H),e(H,de),e(x,se),e(x,U),e(U,ce),e(M,ae),e(M,C),e(C,me),e(C,L),e(L,ge),e(C,_e),e(C,S),e(S,K),e(M,be),e(M,P),e(P,ke),e(P,Q),e(Q,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(X),p&&t(q),p&&t(W),p&&t(B),p&&t(Z),p&&t(M)}}}function T_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function w_(O){let h,F,m,g,E,b,_,z,he,X,q,Y,A,ee,fe,I,ue,le,W,B,te,Z,M,x,ne,H,de,se,U,ce,ae,C,me,L,ge,_e,S,K,be,P,ke,Q,oe;return{c(){h=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),E=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),q=a("p"),Y=n("This second option is useful when using "),A=a("code"),ee=n("tf.keras.Model.fit"),fe=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=a("code"),ue=n("model(inputs)"),le=n("."),W=l(),B=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=a("ul"),x=a("li"),ne=n("a single Tensor with "),H=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),U=a("code"),ce=n("model(inputs_ids)"),ae=l(),C=a("li"),me=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),ge=n("model([input_ids, attention_mask])"),_e=n(" or "),S=a("code"),K=n("model([input_ids, attention_mask, token_type_ids])"),be=l(),P=a("li"),ke=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var $=i(h);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=d(p),g=r(p,"UL",{});var V=i(g);E=r(V,"LI",{});var Ee=i(E);b=s(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),_=d(V),z=r(V,"LI",{});var $e=i(z);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),V.forEach(t),X=d(p),q=r(p,"P",{});var j=i(q);Y=s(j,"This second option is useful when using "),A=r(j,"CODE",{});var pe=i(A);ee=s(pe,"tf.keras.Model.fit"),pe.forEach(t),fe=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(j,"CODE",{});var ve=i(I);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(j,"."),j.forEach(t),W=d(p),B=r(p,"P",{});var ye=i(B);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),Z=d(p),M=r(p,"UL",{});var R=i(M);x=r(R,"LI",{});var G=i(x);ne=s(G,"a single Tensor with "),H=r(G,"CODE",{});var De=i(H);de=s(De,"input_ids"),De.forEach(t),se=s(G," only and nothing else: "),U=r(G,"CODE",{});var Fe=i(U);ce=s(Fe,"model(inputs_ids)"),Fe.forEach(t),G.forEach(t),ae=d(R),C=r(R,"LI",{});var N=i(C);me=s(N,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(N,"CODE",{});var re=i(L);ge=s(re,"model([input_ids, attention_mask])"),re.forEach(t),_e=s(N," or "),S=r(N,"CODE",{});var we=i(S);K=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),N.forEach(t),be=d(R),P=r(R,"LI",{});var Te=i(P);ke=s(Te,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=r(Te,"CODE",{});var ie=i(Q);oe=s(ie,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ie.forEach(t),Te.forEach(t),R.forEach(t)},m(p,$){f(p,h,$),e(h,F),f(p,m,$),f(p,g,$),e(g,E),e(E,b),e(g,_),e(g,z),e(z,he),f(p,X,$),f(p,q,$),e(q,Y),e(q,A),e(A,ee),e(q,fe),e(q,I),e(I,ue),e(q,le),f(p,W,$),f(p,B,$),e(B,te),f(p,Z,$),f(p,M,$),e(M,x),e(x,ne),e(x,H),e(H,de),e(x,se),e(x,U),e(U,ce),e(M,ae),e(M,C),e(C,me),e(C,L),e(L,ge),e(C,_e),e(C,S),e(S,K),e(M,be),e(M,P),e(P,ke),e(P,Q),e(Q,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(X),p&&t(q),p&&t(W),p&&t(B),p&&t(Z),p&&t(M)}}}function y_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function D_(O){let h,F,m,g,E,b,_,z,he,X,q,Y,A,ee,fe,I,ue,le,W,B,te,Z,M,x,ne,H,de,se,U,ce,ae,C,me,L,ge,_e,S,K,be,P,ke,Q,oe;return{c(){h=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),E=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),q=a("p"),Y=n("This second option is useful when using "),A=a("code"),ee=n("tf.keras.Model.fit"),fe=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=a("code"),ue=n("model(inputs)"),le=n("."),W=l(),B=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=a("ul"),x=a("li"),ne=n("a single Tensor with "),H=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),U=a("code"),ce=n("model(inputs_ids)"),ae=l(),C=a("li"),me=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),ge=n("model([input_ids, attention_mask])"),_e=n(" or "),S=a("code"),K=n("model([input_ids, attention_mask, token_type_ids])"),be=l(),P=a("li"),ke=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var $=i(h);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=d(p),g=r(p,"UL",{});var V=i(g);E=r(V,"LI",{});var Ee=i(E);b=s(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),_=d(V),z=r(V,"LI",{});var $e=i(z);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),V.forEach(t),X=d(p),q=r(p,"P",{});var j=i(q);Y=s(j,"This second option is useful when using "),A=r(j,"CODE",{});var pe=i(A);ee=s(pe,"tf.keras.Model.fit"),pe.forEach(t),fe=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(j,"CODE",{});var ve=i(I);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(j,"."),j.forEach(t),W=d(p),B=r(p,"P",{});var ye=i(B);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),Z=d(p),M=r(p,"UL",{});var R=i(M);x=r(R,"LI",{});var G=i(x);ne=s(G,"a single Tensor with "),H=r(G,"CODE",{});var De=i(H);de=s(De,"input_ids"),De.forEach(t),se=s(G," only and nothing else: "),U=r(G,"CODE",{});var Fe=i(U);ce=s(Fe,"model(inputs_ids)"),Fe.forEach(t),G.forEach(t),ae=d(R),C=r(R,"LI",{});var N=i(C);me=s(N,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(N,"CODE",{});var re=i(L);ge=s(re,"model([input_ids, attention_mask])"),re.forEach(t),_e=s(N," or "),S=r(N,"CODE",{});var we=i(S);K=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),N.forEach(t),be=d(R),P=r(R,"LI",{});var Te=i(P);ke=s(Te,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=r(Te,"CODE",{});var ie=i(Q);oe=s(ie,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ie.forEach(t),Te.forEach(t),R.forEach(t)},m(p,$){f(p,h,$),e(h,F),f(p,m,$),f(p,g,$),e(g,E),e(E,b),e(g,_),e(g,z),e(z,he),f(p,X,$),f(p,q,$),e(q,Y),e(q,A),e(A,ee),e(q,fe),e(q,I),e(I,ue),e(q,le),f(p,W,$),f(p,B,$),e(B,te),f(p,Z,$),f(p,M,$),e(M,x),e(x,ne),e(x,H),e(H,de),e(x,se),e(x,U),e(U,ce),e(M,ae),e(M,C),e(C,me),e(C,L),e(L,ge),e(C,_e),e(C,S),e(S,K),e(M,be),e(M,P),e(P,ke),e(P,Q),e(Q,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(X),p&&t(q),p&&t(W),p&&t(B),p&&t(Z),p&&t(M)}}}function E_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function $_(O){let h,F,m,g,E,b,_,z,he,X,q,Y,A,ee,fe,I,ue,le,W,B,te,Z,M,x,ne,H,de,se,U,ce,ae,C,me,L,ge,_e,S,K,be,P,ke,Q,oe;return{c(){h=a("p"),F=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),E=a("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),q=a("p"),Y=n("This second option is useful when using "),A=a("code"),ee=n("tf.keras.Model.fit"),fe=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=a("code"),ue=n("model(inputs)"),le=n("."),W=l(),B=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=a("ul"),x=a("li"),ne=n("a single Tensor with "),H=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),U=a("code"),ce=n("model(inputs_ids)"),ae=l(),C=a("li"),me=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),ge=n("model([input_ids, attention_mask])"),_e=n(" or "),S=a("code"),K=n("model([input_ids, attention_mask, token_type_ids])"),be=l(),P=a("li"),ke=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=a("code"),oe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var $=i(h);F=s($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),m=d(p),g=r(p,"UL",{});var V=i(g);E=r(V,"LI",{});var Ee=i(E);b=s(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),_=d(V),z=r(V,"LI",{});var $e=i(z);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),V.forEach(t),X=d(p),q=r(p,"P",{});var j=i(q);Y=s(j,"This second option is useful when using "),A=r(j,"CODE",{});var pe=i(A);ee=s(pe,"tf.keras.Model.fit"),pe.forEach(t),fe=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(j,"CODE",{});var ve=i(I);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(j,"."),j.forEach(t),W=d(p),B=r(p,"P",{});var ye=i(B);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),Z=d(p),M=r(p,"UL",{});var R=i(M);x=r(R,"LI",{});var G=i(x);ne=s(G,"a single Tensor with "),H=r(G,"CODE",{});var De=i(H);de=s(De,"input_ids"),De.forEach(t),se=s(G," only and nothing else: "),U=r(G,"CODE",{});var Fe=i(U);ce=s(Fe,"model(inputs_ids)"),Fe.forEach(t),G.forEach(t),ae=d(R),C=r(R,"LI",{});var N=i(C);me=s(N,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(N,"CODE",{});var re=i(L);ge=s(re,"model([input_ids, attention_mask])"),re.forEach(t),_e=s(N," or "),S=r(N,"CODE",{});var we=i(S);K=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),N.forEach(t),be=d(R),P=r(R,"LI",{});var Te=i(P);ke=s(Te,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=r(Te,"CODE",{});var ie=i(Q);oe=s(ie,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ie.forEach(t),Te.forEach(t),R.forEach(t)},m(p,$){f(p,h,$),e(h,F),f(p,m,$),f(p,g,$),e(g,E),e(E,b),e(g,_),e(g,z),e(z,he),f(p,X,$),f(p,q,$),e(q,Y),e(q,A),e(A,ee),e(q,fe),e(q,I),e(I,ue),e(q,le),f(p,W,$),f(p,B,$),e(B,te),f(p,Z,$),f(p,M,$),e(M,x),e(x,ne),e(x,H),e(H,de),e(x,se),e(x,U),e(U,ce),e(M,ae),e(M,C),e(C,me),e(C,L),e(L,ge),e(C,_e),e(C,S),e(S,K),e(M,be),e(M,P),e(P,ke),e(P,Q),e(Q,oe)},d(p){p&&t(h),p&&t(m),p&&t(g),p&&t(X),p&&t(q),p&&t(W),p&&t(B),p&&t(Z),p&&t(M)}}}function F_(O){let h,F,m,g,E;return{c(){h=a("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),E=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var _=i(h);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=i(m);g=s(z,"Module"),z.forEach(t),E=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){f(b,h,_),e(h,F),e(h,m),e(m,g),e(h,E)},d(b){b&&t(h)}}}function z_(O){let h,F,m,g,E,b,_,z,he,X,q,Y,A,ee,fe,I,ue,le,W,B,te,Z,M,x,ne,H,de,se,U,ce,ae,C,me,L,ge,_e,S,K,be,P,ke,Q,oe,p,$,V,Ee,$e,j,pe,ve,ye,R,G,De,Fe,N,re,we,Te,ie,Dl,Ns,El,$l,Os,Fl,zl,Ho,ql,Ml,Cl,$t,xl,Ws,Pl,Rl,Hs,jl,Bl,ci,Ft,ao,ya,Uo,Ll,Da,Al,pi,qe,Qo,Il,Ea,Sl,Nl,dt,Go,Ol,$a,Wl,Hl,Ko,Fa,Ul,Ql,za,Gl,Kl,ro,Vo,Vl,zt,Jl,qa,Xl,Yl,Ma,Zl,ed,td,tt,Jo,od,Ca,nd,sd,Xo,ad,qt,rd,xa,id,ld,Pa,dd,cd,pd,Ra,hi,Mt,io,ja,Yo,hd,Ba,fd,fi,Je,Zo,ud,en,md,La,gd,_d,bd,ct,tn,kd,Aa,vd,Td,on,Ia,wd,yd,Sa,Dd,Ed,ot,nn,$d,Na,Fd,zd,sn,qd,Ct,Md,Oa,Cd,xd,Wa,Pd,Rd,ui,xt,lo,Ha,an,jd,Ua,Bd,mi,Xe,rn,Ld,ln,Ad,dn,Id,Sd,Nd,cn,Od,pn,Wd,Hd,Ud,Se,hn,Qd,Pt,Gd,Us,Kd,Vd,Qa,Jd,Xd,Yd,co,Zd,Ga,ec,tc,fn,gi,Rt,po,Ka,un,oc,Va,nc,_i,jt,mn,sc,Ja,ac,bi,Bt,ho,Xa,gn,rc,Ya,ic,ki,Ye,_n,lc,Lt,dc,Za,cc,pc,bn,hc,fc,uc,kn,mc,vn,gc,_c,bc,Ne,Tn,kc,At,vc,Qs,Tc,wc,er,yc,Dc,Ec,fo,$c,tr,Fc,zc,wn,vi,It,uo,or,yn,qc,nr,Mc,Ti,Re,Dn,Cc,sr,xc,Pc,En,Rc,$n,jc,Bc,Lc,Fn,Ac,zn,Ic,Sc,Nc,ze,qn,Oc,St,Wc,Gs,Hc,Uc,ar,Qc,Gc,Kc,mo,Vc,rr,Jc,Xc,Mn,Yc,ir,Zc,ep,Cn,wi,Nt,go,lr,xn,tp,dr,op,yi,je,Pn,np,cr,sp,ap,Rn,rp,jn,ip,lp,dp,Bn,cp,Ln,pp,hp,fp,Oe,An,up,Ot,mp,Ks,gp,_p,pr,bp,kp,vp,_o,Tp,hr,wp,yp,In,Di,Wt,bo,fr,Sn,Dp,ur,Ep,Ei,Be,Nn,$p,Ht,Fp,mr,zp,qp,gr,Mp,Cp,xp,On,Pp,Wn,Rp,jp,Bp,Hn,Lp,Un,Ap,Ip,Sp,We,Qn,Np,Ut,Op,Vs,Wp,Hp,_r,Up,Qp,Gp,ko,Kp,br,Vp,Jp,Gn,$i,Qt,vo,kr,Kn,Xp,vr,Yp,Fi,Le,Vn,Zp,Jn,eh,Xn,th,oh,nh,Yn,sh,Zn,ah,rh,ih,To,lh,He,es,dh,Gt,ch,Js,ph,hh,Tr,fh,uh,mh,wo,gh,wr,_h,bh,ts,zi,Kt,yo,yr,os,kh,Dr,vh,qi,lt,ns,Th,Er,wh,yh,nt,ss,Dh,$r,Eh,$h,as,Fh,Fr,zh,qh,Mh,Ze,Ch,zr,xh,Ph,qr,Rh,jh,Mr,Bh,Lh,Cr,Ah,Ih,Mi,Vt,Do,xr,rs,Sh,Pr,Nh,Ci,Ae,is,Oh,Jt,Wh,Rr,Hh,Uh,ls,Qh,Gh,Kh,ds,Vh,cs,Jh,Xh,Yh,Eo,Zh,Ue,ps,ef,Xt,tf,Xs,of,nf,jr,sf,af,rf,$o,lf,Br,df,cf,hs,xi,Yt,Fo,Lr,fs,pf,Ar,hf,Pi,Me,us,ff,Ir,uf,mf,ms,gf,gs,_f,bf,kf,_s,vf,bs,Tf,wf,yf,zo,Df,Qe,ks,Ef,Zt,$f,Ys,Ff,zf,Sr,qf,Mf,Cf,qo,xf,Nr,Pf,Rf,vs,Ri,eo,Mo,Or,Ts,jf,Wr,Bf,ji,Ce,ws,Lf,Hr,Af,If,ys,Sf,Ds,Nf,Of,Wf,Es,Hf,$s,Uf,Qf,Gf,Co,Kf,Ge,Fs,Vf,to,Jf,Zs,Xf,Yf,Ur,Zf,eu,tu,xo,ou,Qr,nu,su,zs,Bi,oo,Po,Gr,qs,au,Kr,ru,Li,xe,Ms,iu,no,lu,Vr,du,cu,Jr,pu,hu,fu,Cs,uu,xs,mu,gu,_u,Ps,bu,Rs,ku,vu,Tu,Ro,wu,Ke,js,yu,so,Du,ea,Eu,$u,Xr,Fu,zu,qu,jo,Mu,Yr,Cu,xu,Bs,Ai;return b=new Pe({}),ee=new Pe({}),R=new Pe({}),we=new J({props:{name:"class transformers.DebertaConfig",anchor:"transformers.DebertaConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 0"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-07"},{name:"relative_attention",val:" = False"},{name:"max_relative_positions",val:" = -1"},{name:"pad_token_id",val:" = 0"},{name:"position_biased_input",val:" = True"},{name:"pos_att_type",val:" = None"},{name:"pooler_dropout",val:" = 0"},{name:"pooler_hidden_act",val:" = 'gelu'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/configuration_deberta.py#L33",parametersDescription:[{anchor:"transformers.DebertaConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a>.`,name:"vocab_size"},{anchor:"transformers.DebertaConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.DebertaConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.DebertaConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.DebertaConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.DebertaConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code>, <code>&quot;gelu&quot;</code>, <code>&quot;tanh&quot;</code>, <code>&quot;gelu_fast&quot;</code>, <code>&quot;mish&quot;</code>, <code>&quot;linear&quot;</code>, <code>&quot;sigmoid&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.DebertaConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.DebertaConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.DebertaConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.DebertaConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.DebertaConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.DebertaConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.DebertaConfig.relative_attention",description:`<strong>relative_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether use relative position encoding.`,name:"relative_attention"},{anchor:"transformers.DebertaConfig.max_relative_positions",description:`<strong>max_relative_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The range of relative positions <code>[-max_position_embeddings, max_position_embeddings]</code>. Use the same value as <code>max_position_embeddings</code>.`,name:"max_relative_positions"},{anchor:"transformers.DebertaConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The value used to pad input_ids.`,name:"pad_token_id"},{anchor:"transformers.DebertaConfig.position_biased_input",description:`<strong>position_biased_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether add absolute position embedding to content embedding.`,name:"position_biased_input"},{anchor:"transformers.DebertaConfig.pos_att_type",description:`<strong>pos_att_type</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The type of relative position attention, it can be a combination of <code>[&quot;p2c&quot;, &quot;c2p&quot;]</code>, e.g. <code>[&quot;p2c&quot;]</code>, <code>[&quot;p2c&quot;, &quot;c2p&quot;]</code>.`,name:"pos_att_type"},{anchor:"transformers.DebertaConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, optional, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"}]}}),Uo=new Pe({}),Qo=new J({props:{name:"class transformers.DebertaTokenizer",anchor:"transformers.DebertaTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"unk_token",val:" = '[UNK]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta.py#L62",parametersDescription:[{anchor:"transformers.DebertaTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.DebertaTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.DebertaTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.DebertaTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.DebertaTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.DebertaTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.DebertaTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),Go=new J({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.DebertaTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta.py#L133",parametersDescription:[{anchor:"transformers.DebertaTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.DebertaTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Vo=new J({props:{name:"get_special_tokens_mask",anchor:"transformers.DebertaTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta.py#L158",parametersDescription:[{anchor:"transformers.DebertaTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.DebertaTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.DebertaTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Jo=new J({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.DebertaTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta.py#L185",parametersDescription:[{anchor:"transformers.DebertaTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.DebertaTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Xo=new et({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Yo=new Pe({}),Zo=new J({props:{name:"class transformers.DebertaTokenizerFast",anchor:"transformers.DebertaTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '[CLS]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"unk_token",val:" = '[UNK]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta_fast.py#L63",parametersDescription:[{anchor:"transformers.DebertaTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.DebertaTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.DebertaTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.DebertaTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.DebertaTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.DebertaTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.DebertaTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),tn=new J({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.DebertaTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta_fast.py#L153",parametersDescription:[{anchor:"transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),nn=new J({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/tokenization_deberta_fast.py#L178",parametersDescription:[{anchor:"transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),sn=new et({props:{code:`0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0 0 | first sequence | second sequence |`}}),an=new Pe({}),rn=new J({props:{name:"class transformers.DebertaModel",anchor:"transformers.DebertaModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L877",parametersDescription:[{anchor:"transformers.DebertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),hn=new J({props:{name:"forward",anchor:"transformers.DebertaModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L901",parametersDescription:[{anchor:"transformers.DebertaModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),co=new Ie({props:{$$slots:{default:[f_]},$$scope:{ctx:O}}}),fn=new et({props:{code:`from transformers import DebertaTokenizer, DebertaModel import torch tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base') model = DebertaModel.from_pretrained('microsoft/deberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),un=new Pe({}),mn=new J({props:{name:"class transformers.DebertaPreTrainedModel",anchor:"transformers.DebertaPreTrainedModel",parameters:[{name:"config",val:": PretrainedConfig"},{name:"*inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L783"}}),gn=new Pe({}),_n=new J({props:{name:"class transformers.DebertaForMaskedLM",anchor:"transformers.DebertaForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L989",parametersDescription:[{anchor:"transformers.DebertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tn=new J({props:{name:"forward",anchor:"transformers.DebertaForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1008",parametersDescription:[{anchor:"transformers.DebertaForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),fo=new Ie({props:{$$slots:{default:[u_]},$$scope:{ctx:O}}}),wn=new et({props:{code:`from transformers import DebertaTokenizer, DebertaForMaskedLM import torch tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base') model = DebertaForMaskedLM.from_pretrained('microsoft/deberta-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),yn=new Pe({}),Dn=new J({props:{name:"class transformers.DebertaForSequenceClassification",anchor:"transformers.DebertaForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1123",parametersDescription:[{anchor:"transformers.DebertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qn=new J({props:{name:"forward",anchor:"transformers.DebertaForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1148",parametersDescription:[{anchor:"transformers.DebertaForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),mo=new Ie({props:{$$slots:{default:[m_]},$$scope:{ctx:O}}}),Mn=new et({props:{code:`from transformers import DebertaTokenizer, DebertaForSequenceClassification import torch tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base') model = DebertaForSequenceClassification.from_pretrained('microsoft/deberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Cn=new et({props:{code:`from transformers import DebertaTokenizer, DebertaForSequenceClassification import torch tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base') model = DebertaForSequenceClassification.from_pretrained('microsoft/deberta-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),xn=new Pe({}),Pn=new J({props:{name:"class transformers.DebertaForTokenClassification",anchor:"transformers.DebertaForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1241",parametersDescription:[{anchor:"transformers.DebertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),An=new J({props:{name:"forward",anchor:"transformers.DebertaForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1255",parametersDescription:[{anchor:"transformers.DebertaForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_o=new Ie({props:{$$slots:{default:[g_]},$$scope:{ctx:O}}}),In=new et({props:{code:`from transformers import DebertaTokenizer, DebertaForTokenClassification import torch tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base') model = DebertaForTokenClassification.from_pretrained('microsoft/deberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Sn=new Pe({}),Nn=new J({props:{name:"class transformers.DebertaForQuestionAnswering",anchor:"transformers.DebertaForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1326",parametersDescription:[{anchor:"transformers.DebertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qn=new J({props:{name:"forward",anchor:"transformers.DebertaForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_deberta.py#L1339",parametersDescription:[{anchor:"transformers.DebertaForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.DebertaForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.DebertaForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.DebertaForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.DebertaForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.DebertaForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DebertaForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DebertaForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DebertaForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.DebertaForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ko=new Ie({props:{$$slots:{default:[__]},$$scope:{ctx:O}}}),Gn=new et({props:{code:`from transformers import DebertaTokenizer, DebertaForQuestionAnswering import torch tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base') model = DebertaForQuestionAnswering.from_pretrained('microsoft/deberta-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;microsoft/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Kn=new Pe({}),Vn=new J({props:{name:"class transformers.TFDebertaModel",anchor:"transformers.TFDebertaModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1087",parametersDescription:[{anchor:"transformers.TFDebertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),To=new Ie({props:{$$slots:{default:[b_]},$$scope:{ctx:O}}}),es=new J({props:{name:"call",anchor:"transformers.TFDebertaModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1093",parametersDescription:[{anchor:"transformers.TFDebertaModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaModel.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),wo=new Ie({props:{$$slots:{default:[k_]},$$scope:{ctx:O}}}),ts=new et({props:{code:`from transformers import DebertaTokenizer, TFDebertaModel import tensorflow as tf tokenizer = DebertaTokenizer.from_pretrained('kamalkraj/deberta-base') model = TFDebertaModel.from_pretrained('kamalkraj/deberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaModel.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),os=new Pe({}),ns=new J({props:{name:"class transformers.TFDebertaPreTrainedModel",anchor:"transformers.TFDebertaPreTrainedModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L993"}}),ss=new J({props:{name:"call",anchor:"None",parameters:[{name:"inputs",val:""},{name:"training",val:" = None"},{name:"mask",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/keras/engine/training.py#L450",returnDescription:` <p>A tensor if there is a single output, or a list of tensors if there are more than one outputs.</p> `}}),rs=new Pe({}),is=new J({props:{name:"class transformers.TFDebertaForMaskedLM",anchor:"transformers.TFDebertaForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1149",parametersDescription:[{anchor:"transformers.TFDebertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Eo=new Ie({props:{$$slots:{default:[v_]},$$scope:{ctx:O}}}),ps=new J({props:{name:"call",anchor:"transformers.TFDebertaForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1165",parametersDescription:[{anchor:"transformers.TFDebertaForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaForMaskedLM.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),$o=new Ie({props:{$$slots:{default:[T_]},$$scope:{ctx:O}}}),hs=new et({props:{code:`from transformers import DebertaTokenizer, TFDebertaForMaskedLM import tensorflow as tf tokenizer = DebertaTokenizer.from_pretrained('kamalkraj/deberta-base') model = TFDebertaForMaskedLM.from_pretrained('kamalkraj/deberta-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),fs=new Pe({}),us=new J({props:{name:"class transformers.TFDebertaForSequenceClassification",anchor:"transformers.TFDebertaForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1248",parametersDescription:[{anchor:"transformers.TFDebertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zo=new Ie({props:{$$slots:{default:[w_]},$$scope:{ctx:O}}}),ks=new J({props:{name:"call",anchor:"transformers.TFDebertaForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1266",parametersDescription:[{anchor:"transformers.TFDebertaForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaForSequenceClassification.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),qo=new Ie({props:{$$slots:{default:[y_]},$$scope:{ctx:O}}}),vs=new et({props:{code:`from transformers import DebertaTokenizer, TFDebertaForSequenceClassification import tensorflow as tf tokenizer = DebertaTokenizer.from_pretrained('kamalkraj/deberta-base') model = TFDebertaForSequenceClassification.from_pretrained('kamalkraj/deberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ts=new Pe({}),ws=new J({props:{name:"class transformers.TFDebertaForTokenClassification",anchor:"transformers.TFDebertaForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1350",parametersDescription:[{anchor:"transformers.TFDebertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Co=new Ie({props:{$$slots:{default:[D_]},$$scope:{ctx:O}}}),Fs=new J({props:{name:"call",anchor:"transformers.TFDebertaForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1362",parametersDescription:[{anchor:"transformers.TFDebertaForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaForTokenClassification.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),xo=new Ie({props:{$$slots:{default:[E_]},$$scope:{ctx:O}}}),zs=new et({props:{code:`from transformers import DebertaTokenizer, TFDebertaForTokenClassification import tensorflow as tf tokenizer = DebertaTokenizer.from_pretrained('kamalkraj/deberta-base') model = TFDebertaForTokenClassification.from_pretrained('kamalkraj/deberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qs=new Pe({}),Ms=new J({props:{name:"class transformers.TFDebertaForQuestionAnswering",anchor:"transformers.TFDebertaForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1443",parametersDescription:[{anchor:"transformers.TFDebertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ro=new Ie({props:{$$slots:{default:[$_]},$$scope:{ctx:O}}}),js=new J({props:{name:"call",anchor:"transformers.TFDebertaForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/deberta/modeling_tf_deberta.py#L1454",parametersDescription:[{anchor:"transformers.TFDebertaForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.return_dict",description:"<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;\nWhether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.",name:"return_dict"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFDebertaForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),jo=new Ie({props:{$$slots:{default:[F_]},$$scope:{ctx:O}}}),Bs=new et({props:{code:`from transformers import DebertaTokenizer, TFDebertaForQuestionAnswering import tensorflow as tf tokenizer = DebertaTokenizer.from_pretrained('kamalkraj/deberta-base') model = TFDebertaForQuestionAnswering.from_pretrained('kamalkraj/deberta-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;kamalkraj/deberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){h=a("meta"),F=l(),m=a("h1"),g=a("a"),E=a("span"),k(b.$$.fragment),_=l(),z=a("span"),he=n("DeBERTa"),X=l(),q=a("h2"),Y=a("a"),A=a("span"),k(ee.$$.fragment),fe=l(),I=a("span"),ue=n("Overview"),le=l(),W=a("p"),B=n("The DeBERTa model was proposed in "),te=a("a"),Z=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),M=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google\u2019s BERT model released in 2018 and Facebook\u2019s RoBERTa model released in 2019.`),x=l(),ne=a("p"),H=n(`It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa.`),de=l(),se=a("p"),U=n("The abstract from the paper is the following:"),ce=l(),ae=a("p"),C=a("em"),me=n(`Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at `),L=a("a"),ge=n("https://github.com/microsoft/DeBERTa"),_e=n("."),S=l(),K=a("p"),be=n("This model was contributed by "),P=a("a"),ke=n("DeBERTa"),Q=n(`. This model TF 2.0 implementation was contributed by `),oe=a("a"),p=n("kamalkraj"),$=n(" . The original code can be found "),V=a("a"),Ee=n("here"),$e=n("."),j=l(),pe=a("h2"),ve=a("a"),ye=a("span"),k(R.$$.fragment),G=l(),De=a("span"),Fe=n("DebertaConfig"),N=l(),re=a("div"),k(we.$$.fragment),Te=l(),ie=a("p"),Dl=n("This is the configuration class to store the configuration of a "),Ns=a("a"),El=n("DebertaModel"),$l=n(` or a `),Os=a("a"),Fl=n("TFDebertaModel"),zl=n(`. It is used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa `),Ho=a("a"),ql=n("microsoft/deberta-base"),Ml=n(` architecture.`),Cl=l(),$t=a("p"),xl=n("Configuration objects inherit from "),Ws=a("a"),Pl=n("PretrainedConfig"),Rl=n(` and can be used to control the model outputs. Read the documentation from `),Hs=a("a"),jl=n("PretrainedConfig"),Bl=n(" for more information."),ci=l(),Ft=a("h2"),ao=a("a"),ya=a("span"),k(Uo.$$.fragment),Ll=l(),Da=a("span"),Al=n("DebertaTokenizer"),pi=l(),qe=a("div"),k(Qo.$$.fragment),Il=l(),Ea=a("p"),Sl=n("Constructs a DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece"),Nl=l(),dt=a("div"),k(Go.$$.fragment),Ol=l(),$a=a("p"),Wl=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:`),Hl=l(),Ko=a("ul"),Fa=a("li"),Ul=n("single sequence: [CLS] X [SEP]"),Ql=l(),za=a("li"),Gl=n("pair of sequences: [CLS] A [SEP] B [SEP]"),Kl=l(),ro=a("div"),k(Vo.$$.fragment),Vl=l(),zt=a("p"),Jl=n(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),qa=a("code"),Xl=n("prepare_for_model"),Yl=n(" or "),Ma=a("code"),Zl=n("encode_plus"),ed=n(" methods."),td=l(),tt=a("div"),k(Jo.$$.fragment),od=l(),Ca=a("p"),nd=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:`),sd=l(),k(Xo.$$.fragment),ad=l(),qt=a("p"),rd=n("If "),xa=a("code"),id=n("token_ids_1"),ld=n(" is "),Pa=a("code"),dd=n("None"),cd=n(", this method only returns the first portion of the mask (0s)."),pd=l(),Ra=a("div"),hi=l(),Mt=a("h2"),io=a("a"),ja=a("span"),k(Yo.$$.fragment),hd=l(),Ba=a("span"),fd=n("DebertaTokenizerFast"),fi=l(),Je=a("div"),k(Zo.$$.fragment),ud=l(),en=a("p"),md=n(`Constructs a \u201Cfast\u201D DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece. It is backed by HuggingFace\u2019s `),La=a("em"),gd=n("tokenizers"),_d=n(" library."),bd=l(),ct=a("div"),k(tn.$$.fragment),kd=l(),Aa=a("p"),vd=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:`),Td=l(),on=a("ul"),Ia=a("li"),wd=n("single sequence: [CLS] X [SEP]"),yd=l(),Sa=a("li"),Dd=n("pair of sequences: [CLS] A [SEP] B [SEP]"),Ed=l(),ot=a("div"),k(nn.$$.fragment),$d=l(),Na=a("p"),Fd=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:`),zd=l(),k(sn.$$.fragment),qd=l(),Ct=a("p"),Md=n("If "),Oa=a("code"),Cd=n("token_ids_1"),xd=n(" is "),Wa=a("code"),Pd=n("None"),Rd=n(", this method only returns the first portion of the mask (0s)."),ui=l(),xt=a("h2"),lo=a("a"),Ha=a("span"),k(an.$$.fragment),jd=l(),Ua=a("span"),Bd=n("DebertaModel"),mi=l(),Xe=a("div"),k(rn.$$.fragment),Ld=l(),ln=a("p"),Ad=n(`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),dn=a("a"),Id=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Sd=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Nd=l(),cn=a("p"),Od=n("This model is also a PyTorch "),pn=a("a"),Wd=n("torch.nn.Module"),Hd=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Ud=l(),Se=a("div"),k(hn.$$.fragment),Qd=l(),Pt=a("p"),Gd=n("The "),Us=a("a"),Kd=n("DebertaModel"),Vd=n(" forward method, overrides the "),Qa=a("code"),Jd=n("__call__"),Xd=n(" special method."),Yd=l(),k(co.$$.fragment),Zd=l(),Ga=a("p"),ec=n("Example:"),tc=l(),k(fn.$$.fragment),gi=l(),Rt=a("h2"),po=a("a"),Ka=a("span"),k(un.$$.fragment),oc=l(),Va=a("span"),nc=n("DebertaPreTrainedModel"),_i=l(),jt=a("div"),k(mn.$$.fragment),sc=l(),Ja=a("p"),ac=n(`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),bi=l(),Bt=a("h2"),ho=a("a"),Xa=a("span"),k(gn.$$.fragment),rc=l(),Ya=a("span"),ic=n("DebertaForMaskedLM"),ki=l(),Ye=a("div"),k(_n.$$.fragment),lc=l(),Lt=a("p"),dc=n("DeBERTa Model with a "),Za=a("code"),cc=n("language modeling"),pc=n(` head on top. The DeBERTa model was proposed in `),bn=a("a"),hc=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),fc=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),uc=l(),kn=a("p"),mc=n("This model is also a PyTorch "),vn=a("a"),gc=n("torch.nn.Module"),_c=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),bc=l(),Ne=a("div"),k(Tn.$$.fragment),kc=l(),At=a("p"),vc=n("The "),Qs=a("a"),Tc=n("DebertaForMaskedLM"),wc=n(" forward method, overrides the "),er=a("code"),yc=n("__call__"),Dc=n(" special method."),Ec=l(),k(fo.$$.fragment),$c=l(),tr=a("p"),Fc=n("Example:"),zc=l(),k(wn.$$.fragment),vi=l(),It=a("h2"),uo=a("a"),or=a("span"),k(yn.$$.fragment),qc=l(),nr=a("span"),Mc=n("DebertaForSequenceClassification"),Ti=l(),Re=a("div"),k(Dn.$$.fragment),Cc=l(),sr=a("p"),xc=n(`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Pc=l(),En=a("p"),Rc=n("The DeBERTa model was proposed in "),$n=a("a"),jc=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Bc=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Lc=l(),Fn=a("p"),Ac=n("This model is also a PyTorch "),zn=a("a"),Ic=n("torch.nn.Module"),Sc=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Nc=l(),ze=a("div"),k(qn.$$.fragment),Oc=l(),St=a("p"),Wc=n("The "),Gs=a("a"),Hc=n("DebertaForSequenceClassification"),Uc=n(" forward method, overrides the "),ar=a("code"),Qc=n("__call__"),Gc=n(" special method."),Kc=l(),k(mo.$$.fragment),Vc=l(),rr=a("p"),Jc=n("Example of single-label classification:"),Xc=l(),k(Mn.$$.fragment),Yc=l(),ir=a("p"),Zc=n("Example of multi-label classification:"),ep=l(),k(Cn.$$.fragment),wi=l(),Nt=a("h2"),go=a("a"),lr=a("span"),k(xn.$$.fragment),tp=l(),dr=a("span"),op=n("DebertaForTokenClassification"),yi=l(),je=a("div"),k(Pn.$$.fragment),np=l(),cr=a("p"),sp=n(`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ap=l(),Rn=a("p"),rp=n("The DeBERTa model was proposed in "),jn=a("a"),ip=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),lp=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),dp=l(),Bn=a("p"),cp=n("This model is also a PyTorch "),Ln=a("a"),pp=n("torch.nn.Module"),hp=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),fp=l(),Oe=a("div"),k(An.$$.fragment),up=l(),Ot=a("p"),mp=n("The "),Ks=a("a"),gp=n("DebertaForTokenClassification"),_p=n(" forward method, overrides the "),pr=a("code"),bp=n("__call__"),kp=n(" special method."),vp=l(),k(_o.$$.fragment),Tp=l(),hr=a("p"),wp=n("Example:"),yp=l(),k(In.$$.fragment),Di=l(),Wt=a("h2"),bo=a("a"),fr=a("span"),k(Sn.$$.fragment),Dp=l(),ur=a("span"),Ep=n("DebertaForQuestionAnswering"),Ei=l(),Be=a("div"),k(Nn.$$.fragment),$p=l(),Ht=a("p"),Fp=n(`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),mr=a("code"),zp=n("span start logits"),qp=n(" and "),gr=a("code"),Mp=n("span end logits"),Cp=n(")."),xp=l(),On=a("p"),Pp=n("The DeBERTa model was proposed in "),Wn=a("a"),Rp=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),jp=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Bp=l(),Hn=a("p"),Lp=n("This model is also a PyTorch "),Un=a("a"),Ap=n("torch.nn.Module"),Ip=n("\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Sp=l(),We=a("div"),k(Qn.$$.fragment),Np=l(),Ut=a("p"),Op=n("The "),Vs=a("a"),Wp=n("DebertaForQuestionAnswering"),Hp=n(" forward method, overrides the "),_r=a("code"),Up=n("__call__"),Qp=n(" special method."),Gp=l(),k(ko.$$.fragment),Kp=l(),br=a("p"),Vp=n("Example:"),Jp=l(),k(Gn.$$.fragment),$i=l(),Qt=a("h2"),vo=a("a"),kr=a("span"),k(Kn.$$.fragment),Xp=l(),vr=a("span"),Yp=n("TFDebertaModel"),Fi=l(),Le=a("div"),k(Vn.$$.fragment),Zp=l(),Jn=a("p"),eh=n(`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),Xn=a("a"),th=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),oh=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),nh=l(),Yn=a("p"),sh=n("This model is also a "),Zn=a("a"),ah=n("tf.keras.Model"),rh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ih=l(),k(To.$$.fragment),lh=l(),He=a("div"),k(es.$$.fragment),dh=l(),Gt=a("p"),ch=n("The "),Js=a("a"),ph=n("TFDebertaModel"),hh=n(" forward method, overrides the "),Tr=a("code"),fh=n("__call__"),uh=n(" special method."),mh=l(),k(wo.$$.fragment),gh=l(),wr=a("p"),_h=n("Example:"),bh=l(),k(ts.$$.fragment),zi=l(),Kt=a("h2"),yo=a("a"),yr=a("span"),k(os.$$.fragment),kh=l(),Dr=a("span"),vh=n("TFDebertaPreTrainedModel"),qi=l(),lt=a("div"),k(ns.$$.fragment),Th=l(),Er=a("p"),wh=n(`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),yh=l(),nt=a("div"),k(ss.$$.fragment),Dh=l(),$r=a("p"),Eh=n("Calls the model on new inputs and returns the outputs as tensors."),$h=l(),as=a("p"),Fh=n("In this case "),Fr=a("code"),zh=n("call()"),qh=n(` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs).`),Mh=l(),Ze=a("p"),Ch=n(`Note: This method should not be called directly. It is only meant to be overridden when subclassing `),zr=a("code"),xh=n("tf.keras.Model"),Ph=n(`. To call a model on an input, always use the `),qr=a("code"),Rh=n("__call__()"),jh=n(` method, i.e. `),Mr=a("code"),Bh=n("model(inputs)"),Lh=n(", which relies on the underlying "),Cr=a("code"),Ah=n("call()"),Ih=n(" method."),Mi=l(),Vt=a("h2"),Do=a("a"),xr=a("span"),k(rs.$$.fragment),Sh=l(),Pr=a("span"),Nh=n("TFDebertaForMaskedLM"),Ci=l(),Ae=a("div"),k(is.$$.fragment),Oh=l(),Jt=a("p"),Wh=n("DeBERTa Model with a "),Rr=a("code"),Hh=n("language modeling"),Uh=n(` head on top. The DeBERTa model was proposed in `),ls=a("a"),Qh=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Gh=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Kh=l(),ds=a("p"),Vh=n("This model is also a "),cs=a("a"),Jh=n("tf.keras.Model"),Xh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Yh=l(),k(Eo.$$.fragment),Zh=l(),Ue=a("div"),k(ps.$$.fragment),ef=l(),Xt=a("p"),tf=n("The "),Xs=a("a"),of=n("TFDebertaForMaskedLM"),nf=n(" forward method, overrides the "),jr=a("code"),sf=n("__call__"),af=n(" special method."),rf=l(),k($o.$$.fragment),lf=l(),Br=a("p"),df=n("Example:"),cf=l(),k(hs.$$.fragment),xi=l(),Yt=a("h2"),Fo=a("a"),Lr=a("span"),k(fs.$$.fragment),pf=l(),Ar=a("span"),hf=n("TFDebertaForSequenceClassification"),Pi=l(),Me=a("div"),k(us.$$.fragment),ff=l(),Ir=a("p"),uf=n(`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),mf=l(),ms=a("p"),gf=n("The DeBERTa model was proposed in "),gs=a("a"),_f=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),bf=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),kf=l(),_s=a("p"),vf=n("This model is also a "),bs=a("a"),Tf=n("tf.keras.Model"),wf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yf=l(),k(zo.$$.fragment),Df=l(),Qe=a("div"),k(ks.$$.fragment),Ef=l(),Zt=a("p"),$f=n("The "),Ys=a("a"),Ff=n("TFDebertaForSequenceClassification"),zf=n(" forward method, overrides the "),Sr=a("code"),qf=n("__call__"),Mf=n(" special method."),Cf=l(),k(qo.$$.fragment),xf=l(),Nr=a("p"),Pf=n("Example:"),Rf=l(),k(vs.$$.fragment),Ri=l(),eo=a("h2"),Mo=a("a"),Or=a("span"),k(Ts.$$.fragment),jf=l(),Wr=a("span"),Bf=n("TFDebertaForTokenClassification"),ji=l(),Ce=a("div"),k(ws.$$.fragment),Lf=l(),Hr=a("p"),Af=n(`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),If=l(),ys=a("p"),Sf=n("The DeBERTa model was proposed in "),Ds=a("a"),Nf=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Of=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Wf=l(),Es=a("p"),Hf=n("This model is also a "),$s=a("a"),Uf=n("tf.keras.Model"),Qf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Gf=l(),k(Co.$$.fragment),Kf=l(),Ge=a("div"),k(Fs.$$.fragment),Vf=l(),to=a("p"),Jf=n("The "),Zs=a("a"),Xf=n("TFDebertaForTokenClassification"),Yf=n(" forward method, overrides the "),Ur=a("code"),Zf=n("__call__"),eu=n(" special method."),tu=l(),k(xo.$$.fragment),ou=l(),Qr=a("p"),nu=n("Example:"),su=l(),k(zs.$$.fragment),Bi=l(),oo=a("h2"),Po=a("a"),Gr=a("span"),k(qs.$$.fragment),au=l(),Kr=a("span"),ru=n("TFDebertaForQuestionAnswering"),Li=l(),xe=a("div"),k(Ms.$$.fragment),iu=l(),no=a("p"),lu=n(`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Vr=a("code"),du=n("span start logits"),cu=n(" and "),Jr=a("code"),pu=n("span end logits"),hu=n(")."),fu=l(),Cs=a("p"),uu=n("The DeBERTa model was proposed in "),xs=a("a"),mu=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),gu=n(` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),_u=l(),Ps=a("p"),bu=n("This model is also a "),Rs=a("a"),ku=n("tf.keras.Model"),vu=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Tu=l(),k(Ro.$$.fragment),wu=l(),Ke=a("div"),k(js.$$.fragment),yu=l(),so=a("p"),Du=n("The "),ea=a("a"),Eu=n("TFDebertaForQuestionAnswering"),$u=n(" forward method, overrides the "),Xr=a("code"),Fu=n("__call__"),zu=n(" special method."),qu=l(),k(jo.$$.fragment),Mu=l(),Yr=a("p"),Cu=n("Example:"),xu=l(),k(Bs.$$.fragment),this.h()},l(o){const u=h_('[data-svelte="svelte-1phssyn"]',document.head);h=r(u,"META",{name:!0,content:!0}),u.forEach(t),F=d(o),m=r(o,"H1",{class:!0});var Ls=i(m);g=r(Ls,"A",{id:!0,class:!0,href:!0});var Zr=i(g);E=r(Zr,"SPAN",{});var ei=i(E);v(b.$$.fragment,ei),ei.forEach(t),Zr.forEach(t),_=d(Ls),z=r(Ls,"SPAN",{});var ti=i(z);he=s(ti,"DeBERTa"),ti.forEach(t),Ls.forEach(t),X=d(o),q=r(o,"H2",{class:!0});var As=i(q);Y=r(As,"A",{id:!0,class:!0,href:!0});var oi=i(Y);A=r(oi,"SPAN",{});var ni=i(A);v(ee.$$.fragment,ni),ni.forEach(t),oi.forEach(t),fe=d(As),I=r(As,"SPAN",{});var si=i(I);ue=s(si,"Overview"),si.forEach(t),As.forEach(t),le=d(o),W=r(o,"P",{});var Is=i(W);B=s(Is,"The DeBERTa model was proposed in "),te=r(Is,"A",{href:!0,rel:!0});var ai=i(te);Z=s(ai,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),ai.forEach(t),M=s(Is,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google\u2019s BERT model released in 2018 and Facebook\u2019s RoBERTa model released in 2019.`),Is.forEach(t),x=d(o),ne=r(o,"P",{});var ri=i(ne);H=s(ri,`It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa.`),ri.forEach(t),de=d(o),se=r(o,"P",{});var ii=i(se);U=s(ii,"The abstract from the paper is the following:"),ii.forEach(t),ce=d(o),ae=r(o,"P",{});var li=i(ae);C=r(li,"EM",{});var Ss=i(C);me=s(Ss,`Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at `),L=r(Ss,"A",{href:!0,rel:!0});var di=i(L);ge=s(di,"https://github.com/microsoft/DeBERTa"),di.forEach(t),_e=s(Ss,"."),Ss.forEach(t),li.forEach(t),S=d(o),K=r(o,"P",{});var Bo=i(K);be=s(Bo,"This model was contributed by "),P=r(Bo,"A",{href:!0,rel:!0});var Pu=i(P);ke=s(Pu,"DeBERTa"),Pu.forEach(t),Q=s(Bo,`. This model TF 2.0 implementation was contributed by `),oe=r(Bo,"A",{href:!0,rel:!0});var Ru=i(oe);p=s(Ru,"kamalkraj"),Ru.forEach(t),$=s(Bo," . The original code can be found "),V=r(Bo,"A",{href:!0,rel:!0});var ju=i(V);Ee=s(ju,"here"),ju.forEach(t),$e=s(Bo,"."),Bo.forEach(t),j=d(o),pe=r(o,"H2",{class:!0});var Ii=i(pe);ve=r(Ii,"A",{id:!0,class:!0,href:!0});var Bu=i(ve);ye=r(Bu,"SPAN",{});var Lu=i(ye);v(R.$$.fragment,Lu),Lu.forEach(t),Bu.forEach(t),G=d(Ii),De=r(Ii,"SPAN",{});var Au=i(De);Fe=s(Au,"DebertaConfig"),Au.forEach(t),Ii.forEach(t),N=d(o),re=r(o,"DIV",{class:!0});var ta=i(re);v(we.$$.fragment,ta),Te=d(ta),ie=r(ta,"P",{});var Lo=i(ie);Dl=s(Lo,"This is the configuration class to store the configuration of a "),Ns=r(Lo,"A",{href:!0});var Iu=i(Ns);El=s(Iu,"DebertaModel"),Iu.forEach(t),$l=s(Lo,` or a `),Os=r(Lo,"A",{href:!0});var Su=i(Os);Fl=s(Su,"TFDebertaModel"),Su.forEach(t),zl=s(Lo,`. It is used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa `),Ho=r(Lo,"A",{href:!0,rel:!0});var Nu=i(Ho);ql=s(Nu,"microsoft/deberta-base"),Nu.forEach(t),Ml=s(Lo,` architecture.`),Lo.forEach(t),Cl=d(ta),$t=r(ta,"P",{});var oa=i($t);xl=s(oa,"Configuration objects inherit from "),Ws=r(oa,"A",{href:!0});var Ou=i(Ws);Pl=s(Ou,"PretrainedConfig"),Ou.forEach(t),Rl=s(oa,` and can be used to control the model outputs. Read the documentation from `),Hs=r(oa,"A",{href:!0});var Wu=i(Hs);jl=s(Wu,"PretrainedConfig"),Wu.forEach(t),Bl=s(oa," for more information."),oa.forEach(t),ta.forEach(t),ci=d(o),Ft=r(o,"H2",{class:!0});var Si=i(Ft);ao=r(Si,"A",{id:!0,class:!0,href:!0});var Hu=i(ao);ya=r(Hu,"SPAN",{});var Uu=i(ya);v(Uo.$$.fragment,Uu),Uu.forEach(t),Hu.forEach(t),Ll=d(Si),Da=r(Si,"SPAN",{});var Qu=i(Da);Al=s(Qu,"DebertaTokenizer"),Qu.forEach(t),Si.forEach(t),pi=d(o),qe=r(o,"DIV",{class:!0});var st=i(qe);v(Qo.$$.fragment,st),Il=d(st),Ea=r(st,"P",{});var Gu=i(Ea);Sl=s(Gu,"Constructs a DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece"),Gu.forEach(t),Nl=d(st),dt=r(st,"DIV",{class:!0});var na=i(dt);v(Go.$$.fragment,na),Ol=d(na),$a=r(na,"P",{});var Ku=i($a);Wl=s(Ku,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:`),Ku.forEach(t),Hl=d(na),Ko=r(na,"UL",{});var Ni=i(Ko);Fa=r(Ni,"LI",{});var Vu=i(Fa);Ul=s(Vu,"single sequence: [CLS] X [SEP]"),Vu.forEach(t),Ql=d(Ni),za=r(Ni,"LI",{});var Ju=i(za);Gl=s(Ju,"pair of sequences: [CLS] A [SEP] B [SEP]"),Ju.forEach(t),Ni.forEach(t),na.forEach(t),Kl=d(st),ro=r(st,"DIV",{class:!0});var Oi=i(ro);v(Vo.$$.fragment,Oi),Vl=d(Oi),zt=r(Oi,"P",{});var sa=i(zt);Jl=s(sa,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),qa=r(sa,"CODE",{});var Xu=i(qa);Xl=s(Xu,"prepare_for_model"),Xu.forEach(t),Yl=s(sa," or "),Ma=r(sa,"CODE",{});var Yu=i(Ma);Zl=s(Yu,"encode_plus"),Yu.forEach(t),ed=s(sa," methods."),sa.forEach(t),Oi.forEach(t),td=d(st),tt=r(st,"DIV",{class:!0});var Ao=i(tt);v(Jo.$$.fragment,Ao),od=d(Ao),Ca=r(Ao,"P",{});var Zu=i(Ca);nd=s(Zu,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:`),Zu.forEach(t),sd=d(Ao),v(Xo.$$.fragment,Ao),ad=d(Ao),qt=r(Ao,"P",{});var aa=i(qt);rd=s(aa,"If "),xa=r(aa,"CODE",{});var em=i(xa);id=s(em,"token_ids_1"),em.forEach(t),ld=s(aa," is "),Pa=r(aa,"CODE",{});var tm=i(Pa);dd=s(tm,"None"),tm.forEach(t),cd=s(aa,", this method only returns the first portion of the mask (0s)."),aa.forEach(t),Ao.forEach(t),pd=d(st),Ra=r(st,"DIV",{class:!0}),i(Ra).forEach(t),st.forEach(t),hi=d(o),Mt=r(o,"H2",{class:!0});var Wi=i(Mt);io=r(Wi,"A",{id:!0,class:!0,href:!0});var om=i(io);ja=r(om,"SPAN",{});var nm=i(ja);v(Yo.$$.fragment,nm),nm.forEach(t),om.forEach(t),hd=d(Wi),Ba=r(Wi,"SPAN",{});var sm=i(Ba);fd=s(sm,"DebertaTokenizerFast"),sm.forEach(t),Wi.forEach(t),fi=d(o),Je=r(o,"DIV",{class:!0});var Io=i(Je);v(Zo.$$.fragment,Io),ud=d(Io),en=r(Io,"P",{});var Hi=i(en);md=s(Hi,`Constructs a \u201Cfast\u201D DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece. It is backed by HuggingFace\u2019s `),La=r(Hi,"EM",{});var am=i(La);gd=s(am,"tokenizers"),am.forEach(t),_d=s(Hi," library."),Hi.forEach(t),bd=d(Io),ct=r(Io,"DIV",{class:!0});var ra=i(ct);v(tn.$$.fragment,ra),kd=d(ra),Aa=r(ra,"P",{});var rm=i(Aa);vd=s(rm,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:`),rm.forEach(t),Td=d(ra),on=r(ra,"UL",{});var Ui=i(on);Ia=r(Ui,"LI",{});var im=i(Ia);wd=s(im,"single sequence: [CLS] X [SEP]"),im.forEach(t),yd=d(Ui),Sa=r(Ui,"LI",{});var lm=i(Sa);Dd=s(lm,"pair of sequences: [CLS] A [SEP] B [SEP]"),lm.forEach(t),Ui.forEach(t),ra.forEach(t),Ed=d(Io),ot=r(Io,"DIV",{class:!0});var So=i(ot);v(nn.$$.fragment,So),$d=d(So),Na=r(So,"P",{});var dm=i(Na);Fd=s(dm,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:`),dm.forEach(t),zd=d(So),v(sn.$$.fragment,So),qd=d(So),Ct=r(So,"P",{});var ia=i(Ct);Md=s(ia,"If "),Oa=r(ia,"CODE",{});var cm=i(Oa);Cd=s(cm,"token_ids_1"),cm.forEach(t),xd=s(ia," is "),Wa=r(ia,"CODE",{});var pm=i(Wa);Pd=s(pm,"None"),pm.forEach(t),Rd=s(ia,", this method only returns the first portion of the mask (0s)."),ia.forEach(t),So.forEach(t),Io.forEach(t),ui=d(o),xt=r(o,"H2",{class:!0});var Qi=i(xt);lo=r(Qi,"A",{id:!0,class:!0,href:!0});var hm=i(lo);Ha=r(hm,"SPAN",{});var fm=i(Ha);v(an.$$.fragment,fm),fm.forEach(t),hm.forEach(t),jd=d(Qi),Ua=r(Qi,"SPAN",{});var um=i(Ua);Bd=s(um,"DebertaModel"),um.forEach(t),Qi.forEach(t),mi=d(o),Xe=r(o,"DIV",{class:!0});var No=i(Xe);v(rn.$$.fragment,No),Ld=d(No),ln=r(No,"P",{});var Gi=i(ln);Ad=s(Gi,`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),dn=r(Gi,"A",{href:!0,rel:!0});var mm=i(dn);Id=s(mm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),mm.forEach(t),Sd=s(Gi,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),Gi.forEach(t),Nd=d(No),cn=r(No,"P",{});var Ki=i(cn);Od=s(Ki,"This model is also a PyTorch "),pn=r(Ki,"A",{href:!0,rel:!0});var gm=i(pn);Wd=s(gm,"torch.nn.Module"),gm.forEach(t),Hd=s(Ki,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Ki.forEach(t),Ud=d(No),Se=r(No,"DIV",{class:!0});var pt=i(Se);v(hn.$$.fragment,pt),Qd=d(pt),Pt=r(pt,"P",{});var la=i(Pt);Gd=s(la,"The "),Us=r(la,"A",{href:!0});var _m=i(Us);Kd=s(_m,"DebertaModel"),_m.forEach(t),Vd=s(la," forward method, overrides the "),Qa=r(la,"CODE",{});var bm=i(Qa);Jd=s(bm,"__call__"),bm.forEach(t),Xd=s(la," special method."),la.forEach(t),Yd=d(pt),v(co.$$.fragment,pt),Zd=d(pt),Ga=r(pt,"P",{});var km=i(Ga);ec=s(km,"Example:"),km.forEach(t),tc=d(pt),v(fn.$$.fragment,pt),pt.forEach(t),No.forEach(t),gi=d(o),Rt=r(o,"H2",{class:!0});var Vi=i(Rt);po=r(Vi,"A",{id:!0,class:!0,href:!0});var vm=i(po);Ka=r(vm,"SPAN",{});var Tm=i(Ka);v(un.$$.fragment,Tm),Tm.forEach(t),vm.forEach(t),oc=d(Vi),Va=r(Vi,"SPAN",{});var wm=i(Va);nc=s(wm,"DebertaPreTrainedModel"),wm.forEach(t),Vi.forEach(t),_i=d(o),jt=r(o,"DIV",{class:!0});var Ji=i(jt);v(mn.$$.fragment,Ji),sc=d(Ji),Ja=r(Ji,"P",{});var ym=i(Ja);ac=s(ym,`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),ym.forEach(t),Ji.forEach(t),bi=d(o),Bt=r(o,"H2",{class:!0});var Xi=i(Bt);ho=r(Xi,"A",{id:!0,class:!0,href:!0});var Dm=i(ho);Xa=r(Dm,"SPAN",{});var Em=i(Xa);v(gn.$$.fragment,Em),Em.forEach(t),Dm.forEach(t),rc=d(Xi),Ya=r(Xi,"SPAN",{});var $m=i(Ya);ic=s($m,"DebertaForMaskedLM"),$m.forEach(t),Xi.forEach(t),ki=d(o),Ye=r(o,"DIV",{class:!0});var Oo=i(Ye);v(_n.$$.fragment,Oo),lc=d(Oo),Lt=r(Oo,"P",{});var da=i(Lt);dc=s(da,"DeBERTa Model with a "),Za=r(da,"CODE",{});var Fm=i(Za);cc=s(Fm,"language modeling"),Fm.forEach(t),pc=s(da,` head on top. The DeBERTa model was proposed in `),bn=r(da,"A",{href:!0,rel:!0});var zm=i(bn);hc=s(zm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),zm.forEach(t),fc=s(da,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),da.forEach(t),uc=d(Oo),kn=r(Oo,"P",{});var Yi=i(kn);mc=s(Yi,"This model is also a PyTorch "),vn=r(Yi,"A",{href:!0,rel:!0});var qm=i(vn);gc=s(qm,"torch.nn.Module"),qm.forEach(t),_c=s(Yi,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),Yi.forEach(t),bc=d(Oo),Ne=r(Oo,"DIV",{class:!0});var ht=i(Ne);v(Tn.$$.fragment,ht),kc=d(ht),At=r(ht,"P",{});var ca=i(At);vc=s(ca,"The "),Qs=r(ca,"A",{href:!0});var Mm=i(Qs);Tc=s(Mm,"DebertaForMaskedLM"),Mm.forEach(t),wc=s(ca," forward method, overrides the "),er=r(ca,"CODE",{});var Cm=i(er);yc=s(Cm,"__call__"),Cm.forEach(t),Dc=s(ca," special method."),ca.forEach(t),Ec=d(ht),v(fo.$$.fragment,ht),$c=d(ht),tr=r(ht,"P",{});var xm=i(tr);Fc=s(xm,"Example:"),xm.forEach(t),zc=d(ht),v(wn.$$.fragment,ht),ht.forEach(t),Oo.forEach(t),vi=d(o),It=r(o,"H2",{class:!0});var Zi=i(It);uo=r(Zi,"A",{id:!0,class:!0,href:!0});var Pm=i(uo);or=r(Pm,"SPAN",{});var Rm=i(or);v(yn.$$.fragment,Rm),Rm.forEach(t),Pm.forEach(t),qc=d(Zi),nr=r(Zi,"SPAN",{});var jm=i(nr);Mc=s(jm,"DebertaForSequenceClassification"),jm.forEach(t),Zi.forEach(t),Ti=d(o),Re=r(o,"DIV",{class:!0});var ft=i(Re);v(Dn.$$.fragment,ft),Cc=d(ft),sr=r(ft,"P",{});var Bm=i(sr);xc=s(Bm,`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Bm.forEach(t),Pc=d(ft),En=r(ft,"P",{});var el=i(En);Rc=s(el,"The DeBERTa model was proposed in "),$n=r(el,"A",{href:!0,rel:!0});var Lm=i($n);jc=s(Lm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Lm.forEach(t),Bc=s(el,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),el.forEach(t),Lc=d(ft),Fn=r(ft,"P",{});var tl=i(Fn);Ac=s(tl,"This model is also a PyTorch "),zn=r(tl,"A",{href:!0,rel:!0});var Am=i(zn);Ic=s(Am,"torch.nn.Module"),Am.forEach(t),Sc=s(tl,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),tl.forEach(t),Nc=d(ft),ze=r(ft,"DIV",{class:!0});var Ve=i(ze);v(qn.$$.fragment,Ve),Oc=d(Ve),St=r(Ve,"P",{});var pa=i(St);Wc=s(pa,"The "),Gs=r(pa,"A",{href:!0});var Im=i(Gs);Hc=s(Im,"DebertaForSequenceClassification"),Im.forEach(t),Uc=s(pa," forward method, overrides the "),ar=r(pa,"CODE",{});var Sm=i(ar);Qc=s(Sm,"__call__"),Sm.forEach(t),Gc=s(pa," special method."),pa.forEach(t),Kc=d(Ve),v(mo.$$.fragment,Ve),Vc=d(Ve),rr=r(Ve,"P",{});var Nm=i(rr);Jc=s(Nm,"Example of single-label classification:"),Nm.forEach(t),Xc=d(Ve),v(Mn.$$.fragment,Ve),Yc=d(Ve),ir=r(Ve,"P",{});var Om=i(ir);Zc=s(Om,"Example of multi-label classification:"),Om.forEach(t),ep=d(Ve),v(Cn.$$.fragment,Ve),Ve.forEach(t),ft.forEach(t),wi=d(o),Nt=r(o,"H2",{class:!0});var ol=i(Nt);go=r(ol,"A",{id:!0,class:!0,href:!0});var Wm=i(go);lr=r(Wm,"SPAN",{});var Hm=i(lr);v(xn.$$.fragment,Hm),Hm.forEach(t),Wm.forEach(t),tp=d(ol),dr=r(ol,"SPAN",{});var Um=i(dr);op=s(Um,"DebertaForTokenClassification"),Um.forEach(t),ol.forEach(t),yi=d(o),je=r(o,"DIV",{class:!0});var ut=i(je);v(Pn.$$.fragment,ut),np=d(ut),cr=r(ut,"P",{});var Qm=i(cr);sp=s(Qm,`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Qm.forEach(t),ap=d(ut),Rn=r(ut,"P",{});var nl=i(Rn);rp=s(nl,"The DeBERTa model was proposed in "),jn=r(nl,"A",{href:!0,rel:!0});var Gm=i(jn);ip=s(Gm,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Gm.forEach(t),lp=s(nl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),nl.forEach(t),dp=d(ut),Bn=r(ut,"P",{});var sl=i(Bn);cp=s(sl,"This model is also a PyTorch "),Ln=r(sl,"A",{href:!0,rel:!0});var Km=i(Ln);pp=s(Km,"torch.nn.Module"),Km.forEach(t),hp=s(sl,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),sl.forEach(t),fp=d(ut),Oe=r(ut,"DIV",{class:!0});var mt=i(Oe);v(An.$$.fragment,mt),up=d(mt),Ot=r(mt,"P",{});var ha=i(Ot);mp=s(ha,"The "),Ks=r(ha,"A",{href:!0});var Vm=i(Ks);gp=s(Vm,"DebertaForTokenClassification"),Vm.forEach(t),_p=s(ha," forward method, overrides the "),pr=r(ha,"CODE",{});var Jm=i(pr);bp=s(Jm,"__call__"),Jm.forEach(t),kp=s(ha," special method."),ha.forEach(t),vp=d(mt),v(_o.$$.fragment,mt),Tp=d(mt),hr=r(mt,"P",{});var Xm=i(hr);wp=s(Xm,"Example:"),Xm.forEach(t),yp=d(mt),v(In.$$.fragment,mt),mt.forEach(t),ut.forEach(t),Di=d(o),Wt=r(o,"H2",{class:!0});var al=i(Wt);bo=r(al,"A",{id:!0,class:!0,href:!0});var Ym=i(bo);fr=r(Ym,"SPAN",{});var Zm=i(fr);v(Sn.$$.fragment,Zm),Zm.forEach(t),Ym.forEach(t),Dp=d(al),ur=r(al,"SPAN",{});var eg=i(ur);Ep=s(eg,"DebertaForQuestionAnswering"),eg.forEach(t),al.forEach(t),Ei=d(o),Be=r(o,"DIV",{class:!0});var gt=i(Be);v(Nn.$$.fragment,gt),$p=d(gt),Ht=r(gt,"P",{});var fa=i(Ht);Fp=s(fa,`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),mr=r(fa,"CODE",{});var tg=i(mr);zp=s(tg,"span start logits"),tg.forEach(t),qp=s(fa," and "),gr=r(fa,"CODE",{});var og=i(gr);Mp=s(og,"span end logits"),og.forEach(t),Cp=s(fa,")."),fa.forEach(t),xp=d(gt),On=r(gt,"P",{});var rl=i(On);Pp=s(rl,"The DeBERTa model was proposed in "),Wn=r(rl,"A",{href:!0,rel:!0});var ng=i(Wn);Rp=s(ng,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),ng.forEach(t),jp=s(rl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),rl.forEach(t),Bp=d(gt),Hn=r(gt,"P",{});var il=i(Hn);Lp=s(il,"This model is also a PyTorch "),Un=r(il,"A",{href:!0,rel:!0});var sg=i(Un);Ap=s(sg,"torch.nn.Module"),sg.forEach(t),Ip=s(il,"\nsubclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\ngeneral usage and behavior.```"),il.forEach(t),Sp=d(gt),We=r(gt,"DIV",{class:!0});var _t=i(We);v(Qn.$$.fragment,_t),Np=d(_t),Ut=r(_t,"P",{});var ua=i(Ut);Op=s(ua,"The "),Vs=r(ua,"A",{href:!0});var ag=i(Vs);Wp=s(ag,"DebertaForQuestionAnswering"),ag.forEach(t),Hp=s(ua," forward method, overrides the "),_r=r(ua,"CODE",{});var rg=i(_r);Up=s(rg,"__call__"),rg.forEach(t),Qp=s(ua," special method."),ua.forEach(t),Gp=d(_t),v(ko.$$.fragment,_t),Kp=d(_t),br=r(_t,"P",{});var ig=i(br);Vp=s(ig,"Example:"),ig.forEach(t),Jp=d(_t),v(Gn.$$.fragment,_t),_t.forEach(t),gt.forEach(t),$i=d(o),Qt=r(o,"H2",{class:!0});var ll=i(Qt);vo=r(ll,"A",{id:!0,class:!0,href:!0});var lg=i(vo);kr=r(lg,"SPAN",{});var dg=i(kr);v(Kn.$$.fragment,dg),dg.forEach(t),lg.forEach(t),Xp=d(ll),vr=r(ll,"SPAN",{});var cg=i(vr);Yp=s(cg,"TFDebertaModel"),cg.forEach(t),ll.forEach(t),Fi=d(o),Le=r(o,"DIV",{class:!0});var bt=i(Le);v(Vn.$$.fragment,bt),Zp=d(bt),Jn=r(bt,"P",{});var dl=i(Jn);eh=s(dl,`The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in `),Xn=r(dl,"A",{href:!0,rel:!0});var pg=i(Xn);th=s(pg,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),pg.forEach(t),oh=s(dl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),dl.forEach(t),nh=d(bt),Yn=r(bt,"P",{});var cl=i(Yn);sh=s(cl,"This model is also a "),Zn=r(cl,"A",{href:!0,rel:!0});var hg=i(Zn);ah=s(hg,"tf.keras.Model"),hg.forEach(t),rh=s(cl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cl.forEach(t),ih=d(bt),v(To.$$.fragment,bt),lh=d(bt),He=r(bt,"DIV",{class:!0});var kt=i(He);v(es.$$.fragment,kt),dh=d(kt),Gt=r(kt,"P",{});var ma=i(Gt);ch=s(ma,"The "),Js=r(ma,"A",{href:!0});var fg=i(Js);ph=s(fg,"TFDebertaModel"),fg.forEach(t),hh=s(ma," forward method, overrides the "),Tr=r(ma,"CODE",{});var ug=i(Tr);fh=s(ug,"__call__"),ug.forEach(t),uh=s(ma," special method."),ma.forEach(t),mh=d(kt),v(wo.$$.fragment,kt),gh=d(kt),wr=r(kt,"P",{});var mg=i(wr);_h=s(mg,"Example:"),mg.forEach(t),bh=d(kt),v(ts.$$.fragment,kt),kt.forEach(t),bt.forEach(t),zi=d(o),Kt=r(o,"H2",{class:!0});var pl=i(Kt);yo=r(pl,"A",{id:!0,class:!0,href:!0});var gg=i(yo);yr=r(gg,"SPAN",{});var _g=i(yr);v(os.$$.fragment,_g),_g.forEach(t),gg.forEach(t),kh=d(pl),Dr=r(pl,"SPAN",{});var bg=i(Dr);vh=s(bg,"TFDebertaPreTrainedModel"),bg.forEach(t),pl.forEach(t),qi=d(o),lt=r(o,"DIV",{class:!0});var ga=i(lt);v(ns.$$.fragment,ga),Th=d(ga),Er=r(ga,"P",{});var kg=i(Er);wh=s(kg,`An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.`),kg.forEach(t),yh=d(ga),nt=r(ga,"DIV",{class:!0});var Wo=i(nt);v(ss.$$.fragment,Wo),Dh=d(Wo),$r=r(Wo,"P",{});var vg=i($r);Eh=s(vg,"Calls the model on new inputs and returns the outputs as tensors."),vg.forEach(t),$h=d(Wo),as=r(Wo,"P",{});var hl=i(as);Fh=s(hl,"In this case "),Fr=r(hl,"CODE",{});var Tg=i(Fr);zh=s(Tg,"call()"),Tg.forEach(t),qh=s(hl,` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs).`),hl.forEach(t),Mh=d(Wo),Ze=r(Wo,"P",{});var vt=i(Ze);Ch=s(vt,`Note: This method should not be called directly. It is only meant to be overridden when subclassing `),zr=r(vt,"CODE",{});var wg=i(zr);xh=s(wg,"tf.keras.Model"),wg.forEach(t),Ph=s(vt,`. To call a model on an input, always use the `),qr=r(vt,"CODE",{});var yg=i(qr);Rh=s(yg,"__call__()"),yg.forEach(t),jh=s(vt,` method, i.e. `),Mr=r(vt,"CODE",{});var Dg=i(Mr);Bh=s(Dg,"model(inputs)"),Dg.forEach(t),Lh=s(vt,", which relies on the underlying "),Cr=r(vt,"CODE",{});var Eg=i(Cr);Ah=s(Eg,"call()"),Eg.forEach(t),Ih=s(vt," method."),vt.forEach(t),Wo.forEach(t),ga.forEach(t),Mi=d(o),Vt=r(o,"H2",{class:!0});var fl=i(Vt);Do=r(fl,"A",{id:!0,class:!0,href:!0});var $g=i(Do);xr=r($g,"SPAN",{});var Fg=i(xr);v(rs.$$.fragment,Fg),Fg.forEach(t),$g.forEach(t),Sh=d(fl),Pr=r(fl,"SPAN",{});var zg=i(Pr);Nh=s(zg,"TFDebertaForMaskedLM"),zg.forEach(t),fl.forEach(t),Ci=d(o),Ae=r(o,"DIV",{class:!0});var Tt=i(Ae);v(is.$$.fragment,Tt),Oh=d(Tt),Jt=r(Tt,"P",{});var _a=i(Jt);Wh=s(_a,"DeBERTa Model with a "),Rr=r(_a,"CODE",{});var qg=i(Rr);Hh=s(qg,"language modeling"),qg.forEach(t),Uh=s(_a,` head on top. The DeBERTa model was proposed in `),ls=r(_a,"A",{href:!0,rel:!0});var Mg=i(ls);Qh=s(Mg,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Mg.forEach(t),Gh=s(_a,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),_a.forEach(t),Kh=d(Tt),ds=r(Tt,"P",{});var ul=i(ds);Vh=s(ul,"This model is also a "),cs=r(ul,"A",{href:!0,rel:!0});var Cg=i(cs);Jh=s(Cg,"tf.keras.Model"),Cg.forEach(t),Xh=s(ul,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ul.forEach(t),Yh=d(Tt),v(Eo.$$.fragment,Tt),Zh=d(Tt),Ue=r(Tt,"DIV",{class:!0});var wt=i(Ue);v(ps.$$.fragment,wt),ef=d(wt),Xt=r(wt,"P",{});var ba=i(Xt);tf=s(ba,"The "),Xs=r(ba,"A",{href:!0});var xg=i(Xs);of=s(xg,"TFDebertaForMaskedLM"),xg.forEach(t),nf=s(ba," forward method, overrides the "),jr=r(ba,"CODE",{});var Pg=i(jr);sf=s(Pg,"__call__"),Pg.forEach(t),af=s(ba," special method."),ba.forEach(t),rf=d(wt),v($o.$$.fragment,wt),lf=d(wt),Br=r(wt,"P",{});var Rg=i(Br);df=s(Rg,"Example:"),Rg.forEach(t),cf=d(wt),v(hs.$$.fragment,wt),wt.forEach(t),Tt.forEach(t),xi=d(o),Yt=r(o,"H2",{class:!0});var ml=i(Yt);Fo=r(ml,"A",{id:!0,class:!0,href:!0});var jg=i(Fo);Lr=r(jg,"SPAN",{});var Bg=i(Lr);v(fs.$$.fragment,Bg),Bg.forEach(t),jg.forEach(t),pf=d(ml),Ar=r(ml,"SPAN",{});var Lg=i(Ar);hf=s(Lg,"TFDebertaForSequenceClassification"),Lg.forEach(t),ml.forEach(t),Pi=d(o),Me=r(o,"DIV",{class:!0});var at=i(Me);v(us.$$.fragment,at),ff=d(at),Ir=r(at,"P",{});var Ag=i(Ir);uf=s(Ag,`DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ag.forEach(t),mf=d(at),ms=r(at,"P",{});var gl=i(ms);gf=s(gl,"The DeBERTa model was proposed in "),gs=r(gl,"A",{href:!0,rel:!0});var Ig=i(gs);_f=s(Ig,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Ig.forEach(t),bf=s(gl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),gl.forEach(t),kf=d(at),_s=r(at,"P",{});var _l=i(_s);vf=s(_l,"This model is also a "),bs=r(_l,"A",{href:!0,rel:!0});var Sg=i(bs);Tf=s(Sg,"tf.keras.Model"),Sg.forEach(t),wf=s(_l,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_l.forEach(t),yf=d(at),v(zo.$$.fragment,at),Df=d(at),Qe=r(at,"DIV",{class:!0});var yt=i(Qe);v(ks.$$.fragment,yt),Ef=d(yt),Zt=r(yt,"P",{});var ka=i(Zt);$f=s(ka,"The "),Ys=r(ka,"A",{href:!0});var Ng=i(Ys);Ff=s(Ng,"TFDebertaForSequenceClassification"),Ng.forEach(t),zf=s(ka," forward method, overrides the "),Sr=r(ka,"CODE",{});var Og=i(Sr);qf=s(Og,"__call__"),Og.forEach(t),Mf=s(ka," special method."),ka.forEach(t),Cf=d(yt),v(qo.$$.fragment,yt),xf=d(yt),Nr=r(yt,"P",{});var Wg=i(Nr);Pf=s(Wg,"Example:"),Wg.forEach(t),Rf=d(yt),v(vs.$$.fragment,yt),yt.forEach(t),at.forEach(t),Ri=d(o),eo=r(o,"H2",{class:!0});var bl=i(eo);Mo=r(bl,"A",{id:!0,class:!0,href:!0});var Hg=i(Mo);Or=r(Hg,"SPAN",{});var Ug=i(Or);v(Ts.$$.fragment,Ug),Ug.forEach(t),Hg.forEach(t),jf=d(bl),Wr=r(bl,"SPAN",{});var Qg=i(Wr);Bf=s(Qg,"TFDebertaForTokenClassification"),Qg.forEach(t),bl.forEach(t),ji=d(o),Ce=r(o,"DIV",{class:!0});var rt=i(Ce);v(ws.$$.fragment,rt),Lf=d(rt),Hr=r(rt,"P",{});var Gg=i(Hr);Af=s(Gg,`DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Gg.forEach(t),If=d(rt),ys=r(rt,"P",{});var kl=i(ys);Sf=s(kl,"The DeBERTa model was proposed in "),Ds=r(kl,"A",{href:!0,rel:!0});var Kg=i(Ds);Nf=s(Kg,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),Kg.forEach(t),Of=s(kl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),kl.forEach(t),Wf=d(rt),Es=r(rt,"P",{});var vl=i(Es);Hf=s(vl,"This model is also a "),$s=r(vl,"A",{href:!0,rel:!0});var Vg=i($s);Uf=s(Vg,"tf.keras.Model"),Vg.forEach(t),Qf=s(vl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vl.forEach(t),Gf=d(rt),v(Co.$$.fragment,rt),Kf=d(rt),Ge=r(rt,"DIV",{class:!0});var Dt=i(Ge);v(Fs.$$.fragment,Dt),Vf=d(Dt),to=r(Dt,"P",{});var va=i(to);Jf=s(va,"The "),Zs=r(va,"A",{href:!0});var Jg=i(Zs);Xf=s(Jg,"TFDebertaForTokenClassification"),Jg.forEach(t),Yf=s(va," forward method, overrides the "),Ur=r(va,"CODE",{});var Xg=i(Ur);Zf=s(Xg,"__call__"),Xg.forEach(t),eu=s(va," special method."),va.forEach(t),tu=d(Dt),v(xo.$$.fragment,Dt),ou=d(Dt),Qr=r(Dt,"P",{});var Yg=i(Qr);nu=s(Yg,"Example:"),Yg.forEach(t),su=d(Dt),v(zs.$$.fragment,Dt),Dt.forEach(t),rt.forEach(t),Bi=d(o),oo=r(o,"H2",{class:!0});var Tl=i(oo);Po=r(Tl,"A",{id:!0,class:!0,href:!0});var Zg=i(Po);Gr=r(Zg,"SPAN",{});var e_=i(Gr);v(qs.$$.fragment,e_),e_.forEach(t),Zg.forEach(t),au=d(Tl),Kr=r(Tl,"SPAN",{});var t_=i(Kr);ru=s(t_,"TFDebertaForQuestionAnswering"),t_.forEach(t),Tl.forEach(t),Li=d(o),xe=r(o,"DIV",{class:!0});var it=i(xe);v(Ms.$$.fragment,it),iu=d(it),no=r(it,"P",{});var Ta=i(no);lu=s(Ta,`DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Vr=r(Ta,"CODE",{});var o_=i(Vr);du=s(o_,"span start logits"),o_.forEach(t),cu=s(Ta," and "),Jr=r(Ta,"CODE",{});var n_=i(Jr);pu=s(n_,"span end logits"),n_.forEach(t),hu=s(Ta,")."),Ta.forEach(t),fu=d(it),Cs=r(it,"P",{});var wl=i(Cs);uu=s(wl,"The DeBERTa model was proposed in "),xs=r(wl,"A",{href:!0,rel:!0});var s_=i(xs);mu=s(s_,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),s_.forEach(t),gu=s(wl,` by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\u2019s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.`),wl.forEach(t),_u=d(it),Ps=r(it,"P",{});var yl=i(Ps);bu=s(yl,"This model is also a "),Rs=r(yl,"A",{href:!0,rel:!0});var a_=i(Rs);ku=s(a_,"tf.keras.Model"),a_.forEach(t),vu=s(yl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yl.forEach(t),Tu=d(it),v(Ro.$$.fragment,it),wu=d(it),Ke=r(it,"DIV",{class:!0});var Et=i(Ke);v(js.$$.fragment,Et),yu=d(Et),so=r(Et,"P",{});var wa=i(so);Du=s(wa,"The "),ea=r(wa,"A",{href:!0});var r_=i(ea);Eu=s(r_,"TFDebertaForQuestionAnswering"),r_.forEach(t),$u=s(wa," forward method, overrides the "),Xr=r(wa,"CODE",{});var i_=i(Xr);Fu=s(i_,"__call__"),i_.forEach(t),zu=s(wa," special method."),wa.forEach(t),qu=d(Et),v(jo.$$.fragment,Et),Mu=d(Et),Yr=r(Et,"P",{});var l_=i(Yr);Cu=s(l_,"Example:"),l_.forEach(t),xu=d(Et),v(Bs.$$.fragment,Et),Et.forEach(t),it.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(q_)),c(g,"id","deberta"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#deberta"),c(m,"class","relative group"),c(Y,"id","overview"),c(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Y,"href","#overview"),c(q,"class","relative group"),c(te,"href","https://arxiv.org/abs/2006.03654"),c(te,"rel","nofollow"),c(L,"href","https://github.com/microsoft/DeBERTa"),c(L,"rel","nofollow"),c(P,"href","https://huggingface.co/DeBERTa"),c(P,"rel","nofollow"),c(oe,"href","https://huggingface.co/kamalkraj"),c(oe,"rel","nofollow"),c(V,"href","https://github.com/microsoft/DeBERTa"),c(V,"rel","nofollow"),c(ve,"id","transformers.DebertaConfig"),c(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ve,"href","#transformers.DebertaConfig"),c(pe,"class","relative group"),c(Ns,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel"),c(Os,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel"),c(Ho,"href","https://huggingface.co/microsoft/deberta-base"),c(Ho,"rel","nofollow"),c(Ws,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Hs,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(re,"class","docstring"),c(ao,"id","transformers.DebertaTokenizer"),c(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ao,"href","#transformers.DebertaTokenizer"),c(Ft,"class","relative group"),c(dt,"class","docstring"),c(ro,"class","docstring"),c(tt,"class","docstring"),c(Ra,"class","docstring"),c(qe,"class","docstring"),c(io,"id","transformers.DebertaTokenizerFast"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.DebertaTokenizerFast"),c(Mt,"class","relative group"),c(ct,"class","docstring"),c(ot,"class","docstring"),c(Je,"class","docstring"),c(lo,"id","transformers.DebertaModel"),c(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(lo,"href","#transformers.DebertaModel"),c(xt,"class","relative group"),c(dn,"href","https://arxiv.org/abs/2006.03654"),c(dn,"rel","nofollow"),c(pn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(pn,"rel","nofollow"),c(Us,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaModel"),c(Se,"class","docstring"),c(Xe,"class","docstring"),c(po,"id","transformers.DebertaPreTrainedModel"),c(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(po,"href","#transformers.DebertaPreTrainedModel"),c(Rt,"class","relative group"),c(jt,"class","docstring"),c(ho,"id","transformers.DebertaForMaskedLM"),c(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ho,"href","#transformers.DebertaForMaskedLM"),c(Bt,"class","relative group"),c(bn,"href","https://arxiv.org/abs/2006.03654"),c(bn,"rel","nofollow"),c(vn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(vn,"rel","nofollow"),c(Qs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForMaskedLM"),c(Ne,"class","docstring"),c(Ye,"class","docstring"),c(uo,"id","transformers.DebertaForSequenceClassification"),c(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(uo,"href","#transformers.DebertaForSequenceClassification"),c(It,"class","relative group"),c($n,"href","https://arxiv.org/abs/2006.03654"),c($n,"rel","nofollow"),c(zn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(zn,"rel","nofollow"),c(Gs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForSequenceClassification"),c(ze,"class","docstring"),c(Re,"class","docstring"),c(go,"id","transformers.DebertaForTokenClassification"),c(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(go,"href","#transformers.DebertaForTokenClassification"),c(Nt,"class","relative group"),c(jn,"href","https://arxiv.org/abs/2006.03654"),c(jn,"rel","nofollow"),c(Ln,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ln,"rel","nofollow"),c(Ks,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForTokenClassification"),c(Oe,"class","docstring"),c(je,"class","docstring"),c(bo,"id","transformers.DebertaForQuestionAnswering"),c(bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bo,"href","#transformers.DebertaForQuestionAnswering"),c(Wt,"class","relative group"),c(Wn,"href","https://arxiv.org/abs/2006.03654"),c(Wn,"rel","nofollow"),c(Un,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Un,"rel","nofollow"),c(Vs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.DebertaForQuestionAnswering"),c(We,"class","docstring"),c(Be,"class","docstring"),c(vo,"id","transformers.TFDebertaModel"),c(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vo,"href","#transformers.TFDebertaModel"),c(Qt,"class","relative group"),c(Xn,"href","https://arxiv.org/abs/2006.03654"),c(Xn,"rel","nofollow"),c(Zn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Zn,"rel","nofollow"),c(Js,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaModel"),c(He,"class","docstring"),c(Le,"class","docstring"),c(yo,"id","transformers.TFDebertaPreTrainedModel"),c(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(yo,"href","#transformers.TFDebertaPreTrainedModel"),c(Kt,"class","relative group"),c(nt,"class","docstring"),c(lt,"class","docstring"),c(Do,"id","transformers.TFDebertaForMaskedLM"),c(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Do,"href","#transformers.TFDebertaForMaskedLM"),c(Vt,"class","relative group"),c(ls,"href","https://arxiv.org/abs/2006.03654"),c(ls,"rel","nofollow"),c(cs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(cs,"rel","nofollow"),c(Xs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForMaskedLM"),c(Ue,"class","docstring"),c(Ae,"class","docstring"),c(Fo,"id","transformers.TFDebertaForSequenceClassification"),c(Fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fo,"href","#transformers.TFDebertaForSequenceClassification"),c(Yt,"class","relative group"),c(gs,"href","https://arxiv.org/abs/2006.03654"),c(gs,"rel","nofollow"),c(bs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(bs,"rel","nofollow"),c(Ys,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForSequenceClassification"),c(Qe,"class","docstring"),c(Me,"class","docstring"),c(Mo,"id","transformers.TFDebertaForTokenClassification"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#transformers.TFDebertaForTokenClassification"),c(eo,"class","relative group"),c(Ds,"href","https://arxiv.org/abs/2006.03654"),c(Ds,"rel","nofollow"),c($s,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c($s,"rel","nofollow"),c(Zs,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForTokenClassification"),c(Ge,"class","docstring"),c(Ce,"class","docstring"),c(Po,"id","transformers.TFDebertaForQuestionAnswering"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.TFDebertaForQuestionAnswering"),c(oo,"class","relative group"),c(xs,"href","https://arxiv.org/abs/2006.03654"),c(xs,"rel","nofollow"),c(Rs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Rs,"rel","nofollow"),c(ea,"href","/docs/transformers/v4.15.0/en/model_doc/deberta#transformers.TFDebertaForQuestionAnswering"),c(Ke,"class","docstring"),c(xe,"class","docstring")},m(o,u){e(document.head,h),f(o,F,u),f(o,m,u),e(m,g),e(g,E),T(b,E,null),e(m,_),e(m,z),e(z,he),f(o,X,u),f(o,q,u),e(q,Y),e(Y,A),T(ee,A,null),e(q,fe),e(q,I),e(I,ue),f(o,le,u),f(o,W,u),e(W,B),e(W,te),e(te,Z),e(W,M),f(o,x,u),f(o,ne,u),e(ne,H),f(o,de,u),f(o,se,u),e(se,U),f(o,ce,u),f(o,ae,u),e(ae,C),e(C,me),e(C,L),e(L,ge),e(C,_e),f(o,S,u),f(o,K,u),e(K,be),e(K,P),e(P,ke),e(K,Q),e(K,oe),e(oe,p),e(K,$),e(K,V),e(V,Ee),e(K,$e),f(o,j,u),f(o,pe,u),e(pe,ve),e(ve,ye),T(R,ye,null),e(pe,G),e(pe,De),e(De,Fe),f(o,N,u),f(o,re,u),T(we,re,null),e(re,Te),e(re,ie),e(ie,Dl),e(ie,Ns),e(Ns,El),e(ie,$l),e(ie,Os),e(Os,Fl),e(ie,zl),e(ie,Ho),e(Ho,ql),e(ie,Ml),e(re,Cl),e(re,$t),e($t,xl),e($t,Ws),e(Ws,Pl),e($t,Rl),e($t,Hs),e(Hs,jl),e($t,Bl),f(o,ci,u),f(o,Ft,u),e(Ft,ao),e(ao,ya),T(Uo,ya,null),e(Ft,Ll),e(Ft,Da),e(Da,Al),f(o,pi,u),f(o,qe,u),T(Qo,qe,null),e(qe,Il),e(qe,Ea),e(Ea,Sl),e(qe,Nl),e(qe,dt),T(Go,dt,null),e(dt,Ol),e(dt,$a),e($a,Wl),e(dt,Hl),e(dt,Ko),e(Ko,Fa),e(Fa,Ul),e(Ko,Ql),e(Ko,za),e(za,Gl),e(qe,Kl),e(qe,ro),T(Vo,ro,null),e(ro,Vl),e(ro,zt),e(zt,Jl),e(zt,qa),e(qa,Xl),e(zt,Yl),e(zt,Ma),e(Ma,Zl),e(zt,ed),e(qe,td),e(qe,tt),T(Jo,tt,null),e(tt,od),e(tt,Ca),e(Ca,nd),e(tt,sd),T(Xo,tt,null),e(tt,ad),e(tt,qt),e(qt,rd),e(qt,xa),e(xa,id),e(qt,ld),e(qt,Pa),e(Pa,dd),e(qt,cd),e(qe,pd),e(qe,Ra),f(o,hi,u),f(o,Mt,u),e(Mt,io),e(io,ja),T(Yo,ja,null),e(Mt,hd),e(Mt,Ba),e(Ba,fd),f(o,fi,u),f(o,Je,u),T(Zo,Je,null),e(Je,ud),e(Je,en),e(en,md),e(en,La),e(La,gd),e(en,_d),e(Je,bd),e(Je,ct),T(tn,ct,null),e(ct,kd),e(ct,Aa),e(Aa,vd),e(ct,Td),e(ct,on),e(on,Ia),e(Ia,wd),e(on,yd),e(on,Sa),e(Sa,Dd),e(Je,Ed),e(Je,ot),T(nn,ot,null),e(ot,$d),e(ot,Na),e(Na,Fd),e(ot,zd),T(sn,ot,null),e(ot,qd),e(ot,Ct),e(Ct,Md),e(Ct,Oa),e(Oa,Cd),e(Ct,xd),e(Ct,Wa),e(Wa,Pd),e(Ct,Rd),f(o,ui,u),f(o,xt,u),e(xt,lo),e(lo,Ha),T(an,Ha,null),e(xt,jd),e(xt,Ua),e(Ua,Bd),f(o,mi,u),f(o,Xe,u),T(rn,Xe,null),e(Xe,Ld),e(Xe,ln),e(ln,Ad),e(ln,dn),e(dn,Id),e(ln,Sd),e(Xe,Nd),e(Xe,cn),e(cn,Od),e(cn,pn),e(pn,Wd),e(cn,Hd),e(Xe,Ud),e(Xe,Se),T(hn,Se,null),e(Se,Qd),e(Se,Pt),e(Pt,Gd),e(Pt,Us),e(Us,Kd),e(Pt,Vd),e(Pt,Qa),e(Qa,Jd),e(Pt,Xd),e(Se,Yd),T(co,Se,null),e(Se,Zd),e(Se,Ga),e(Ga,ec),e(Se,tc),T(fn,Se,null),f(o,gi,u),f(o,Rt,u),e(Rt,po),e(po,Ka),T(un,Ka,null),e(Rt,oc),e(Rt,Va),e(Va,nc),f(o,_i,u),f(o,jt,u),T(mn,jt,null),e(jt,sc),e(jt,Ja),e(Ja,ac),f(o,bi,u),f(o,Bt,u),e(Bt,ho),e(ho,Xa),T(gn,Xa,null),e(Bt,rc),e(Bt,Ya),e(Ya,ic),f(o,ki,u),f(o,Ye,u),T(_n,Ye,null),e(Ye,lc),e(Ye,Lt),e(Lt,dc),e(Lt,Za),e(Za,cc),e(Lt,pc),e(Lt,bn),e(bn,hc),e(Lt,fc),e(Ye,uc),e(Ye,kn),e(kn,mc),e(kn,vn),e(vn,gc),e(kn,_c),e(Ye,bc),e(Ye,Ne),T(Tn,Ne,null),e(Ne,kc),e(Ne,At),e(At,vc),e(At,Qs),e(Qs,Tc),e(At,wc),e(At,er),e(er,yc),e(At,Dc),e(Ne,Ec),T(fo,Ne,null),e(Ne,$c),e(Ne,tr),e(tr,Fc),e(Ne,zc),T(wn,Ne,null),f(o,vi,u),f(o,It,u),e(It,uo),e(uo,or),T(yn,or,null),e(It,qc),e(It,nr),e(nr,Mc),f(o,Ti,u),f(o,Re,u),T(Dn,Re,null),e(Re,Cc),e(Re,sr),e(sr,xc),e(Re,Pc),e(Re,En),e(En,Rc),e(En,$n),e($n,jc),e(En,Bc),e(Re,Lc),e(Re,Fn),e(Fn,Ac),e(Fn,zn),e(zn,Ic),e(Fn,Sc),e(Re,Nc),e(Re,ze),T(qn,ze,null),e(ze,Oc),e(ze,St),e(St,Wc),e(St,Gs),e(Gs,Hc),e(St,Uc),e(St,ar),e(ar,Qc),e(St,Gc),e(ze,Kc),T(mo,ze,null),e(ze,Vc),e(ze,rr),e(rr,Jc),e(ze,Xc),T(Mn,ze,null),e(ze,Yc),e(ze,ir),e(ir,Zc),e(ze,ep),T(Cn,ze,null),f(o,wi,u),f(o,Nt,u),e(Nt,go),e(go,lr),T(xn,lr,null),e(Nt,tp),e(Nt,dr),e(dr,op),f(o,yi,u),f(o,je,u),T(Pn,je,null),e(je,np),e(je,cr),e(cr,sp),e(je,ap),e(je,Rn),e(Rn,rp),e(Rn,jn),e(jn,ip),e(Rn,lp),e(je,dp),e(je,Bn),e(Bn,cp),e(Bn,Ln),e(Ln,pp),e(Bn,hp),e(je,fp),e(je,Oe),T(An,Oe,null),e(Oe,up),e(Oe,Ot),e(Ot,mp),e(Ot,Ks),e(Ks,gp),e(Ot,_p),e(Ot,pr),e(pr,bp),e(Ot,kp),e(Oe,vp),T(_o,Oe,null),e(Oe,Tp),e(Oe,hr),e(hr,wp),e(Oe,yp),T(In,Oe,null),f(o,Di,u),f(o,Wt,u),e(Wt,bo),e(bo,fr),T(Sn,fr,null),e(Wt,Dp),e(Wt,ur),e(ur,Ep),f(o,Ei,u),f(o,Be,u),T(Nn,Be,null),e(Be,$p),e(Be,Ht),e(Ht,Fp),e(Ht,mr),e(mr,zp),e(Ht,qp),e(Ht,gr),e(gr,Mp),e(Ht,Cp),e(Be,xp),e(Be,On),e(On,Pp),e(On,Wn),e(Wn,Rp),e(On,jp),e(Be,Bp),e(Be,Hn),e(Hn,Lp),e(Hn,Un),e(Un,Ap),e(Hn,Ip),e(Be,Sp),e(Be,We),T(Qn,We,null),e(We,Np),e(We,Ut),e(Ut,Op),e(Ut,Vs),e(Vs,Wp),e(Ut,Hp),e(Ut,_r),e(_r,Up),e(Ut,Qp),e(We,Gp),T(ko,We,null),e(We,Kp),e(We,br),e(br,Vp),e(We,Jp),T(Gn,We,null),f(o,$i,u),f(o,Qt,u),e(Qt,vo),e(vo,kr),T(Kn,kr,null),e(Qt,Xp),e(Qt,vr),e(vr,Yp),f(o,Fi,u),f(o,Le,u),T(Vn,Le,null),e(Le,Zp),e(Le,Jn),e(Jn,eh),e(Jn,Xn),e(Xn,th),e(Jn,oh),e(Le,nh),e(Le,Yn),e(Yn,sh),e(Yn,Zn),e(Zn,ah),e(Yn,rh),e(Le,ih),T(To,Le,null),e(Le,lh),e(Le,He),T(es,He,null),e(He,dh),e(He,Gt),e(Gt,ch),e(Gt,Js),e(Js,ph),e(Gt,hh),e(Gt,Tr),e(Tr,fh),e(Gt,uh),e(He,mh),T(wo,He,null),e(He,gh),e(He,wr),e(wr,_h),e(He,bh),T(ts,He,null),f(o,zi,u),f(o,Kt,u),e(Kt,yo),e(yo,yr),T(os,yr,null),e(Kt,kh),e(Kt,Dr),e(Dr,vh),f(o,qi,u),f(o,lt,u),T(ns,lt,null),e(lt,Th),e(lt,Er),e(Er,wh),e(lt,yh),e(lt,nt),T(ss,nt,null),e(nt,Dh),e(nt,$r),e($r,Eh),e(nt,$h),e(nt,as),e(as,Fh),e(as,Fr),e(Fr,zh),e(as,qh),e(nt,Mh),e(nt,Ze),e(Ze,Ch),e(Ze,zr),e(zr,xh),e(Ze,Ph),e(Ze,qr),e(qr,Rh),e(Ze,jh),e(Ze,Mr),e(Mr,Bh),e(Ze,Lh),e(Ze,Cr),e(Cr,Ah),e(Ze,Ih),f(o,Mi,u),f(o,Vt,u),e(Vt,Do),e(Do,xr),T(rs,xr,null),e(Vt,Sh),e(Vt,Pr),e(Pr,Nh),f(o,Ci,u),f(o,Ae,u),T(is,Ae,null),e(Ae,Oh),e(Ae,Jt),e(Jt,Wh),e(Jt,Rr),e(Rr,Hh),e(Jt,Uh),e(Jt,ls),e(ls,Qh),e(Jt,Gh),e(Ae,Kh),e(Ae,ds),e(ds,Vh),e(ds,cs),e(cs,Jh),e(ds,Xh),e(Ae,Yh),T(Eo,Ae,null),e(Ae,Zh),e(Ae,Ue),T(ps,Ue,null),e(Ue,ef),e(Ue,Xt),e(Xt,tf),e(Xt,Xs),e(Xs,of),e(Xt,nf),e(Xt,jr),e(jr,sf),e(Xt,af),e(Ue,rf),T($o,Ue,null),e(Ue,lf),e(Ue,Br),e(Br,df),e(Ue,cf),T(hs,Ue,null),f(o,xi,u),f(o,Yt,u),e(Yt,Fo),e(Fo,Lr),T(fs,Lr,null),e(Yt,pf),e(Yt,Ar),e(Ar,hf),f(o,Pi,u),f(o,Me,u),T(us,Me,null),e(Me,ff),e(Me,Ir),e(Ir,uf),e(Me,mf),e(Me,ms),e(ms,gf),e(ms,gs),e(gs,_f),e(ms,bf),e(Me,kf),e(Me,_s),e(_s,vf),e(_s,bs),e(bs,Tf),e(_s,wf),e(Me,yf),T(zo,Me,null),e(Me,Df),e(Me,Qe),T(ks,Qe,null),e(Qe,Ef),e(Qe,Zt),e(Zt,$f),e(Zt,Ys),e(Ys,Ff),e(Zt,zf),e(Zt,Sr),e(Sr,qf),e(Zt,Mf),e(Qe,Cf),T(qo,Qe,null),e(Qe,xf),e(Qe,Nr),e(Nr,Pf),e(Qe,Rf),T(vs,Qe,null),f(o,Ri,u),f(o,eo,u),e(eo,Mo),e(Mo,Or),T(Ts,Or,null),e(eo,jf),e(eo,Wr),e(Wr,Bf),f(o,ji,u),f(o,Ce,u),T(ws,Ce,null),e(Ce,Lf),e(Ce,Hr),e(Hr,Af),e(Ce,If),e(Ce,ys),e(ys,Sf),e(ys,Ds),e(Ds,Nf),e(ys,Of),e(Ce,Wf),e(Ce,Es),e(Es,Hf),e(Es,$s),e($s,Uf),e(Es,Qf),e(Ce,Gf),T(Co,Ce,null),e(Ce,Kf),e(Ce,Ge),T(Fs,Ge,null),e(Ge,Vf),e(Ge,to),e(to,Jf),e(to,Zs),e(Zs,Xf),e(to,Yf),e(to,Ur),e(Ur,Zf),e(to,eu),e(Ge,tu),T(xo,Ge,null),e(Ge,ou),e(Ge,Qr),e(Qr,nu),e(Ge,su),T(zs,Ge,null),f(o,Bi,u),f(o,oo,u),e(oo,Po),e(Po,Gr),T(qs,Gr,null),e(oo,au),e(oo,Kr),e(Kr,ru),f(o,Li,u),f(o,xe,u),T(Ms,xe,null),e(xe,iu),e(xe,no),e(no,lu),e(no,Vr),e(Vr,du),e(no,cu),e(no,Jr),e(Jr,pu),e(no,hu),e(xe,fu),e(xe,Cs),e(Cs,uu),e(Cs,xs),e(xs,mu),e(Cs,gu),e(xe,_u),e(xe,Ps),e(Ps,bu),e(Ps,Rs),e(Rs,ku),e(Ps,vu),e(xe,Tu),T(Ro,xe,null),e(xe,wu),e(xe,Ke),T(js,Ke,null),e(Ke,yu),e(Ke,so),e(so,Du),e(so,ea),e(ea,Eu),e(so,$u),e(so,Xr),e(Xr,Fu),e(so,zu),e(Ke,qu),T(jo,Ke,null),e(Ke,Mu),e(Ke,Yr),e(Yr,Cu),e(Ke,xu),T(Bs,Ke,null),Ai=!0},p(o,[u]){const Ls={};u&2&&(Ls.$$scope={dirty:u,ctx:o}),co.$set(Ls);const Zr={};u&2&&(Zr.$$scope={dirty:u,ctx:o}),fo.$set(Zr);const ei={};u&2&&(ei.$$scope={dirty:u,ctx:o}),mo.$set(ei);const ti={};u&2&&(ti.$$scope={dirty:u,ctx:o}),_o.$set(ti);const As={};u&2&&(As.$$scope={dirty:u,ctx:o}),ko.$set(As);const oi={};u&2&&(oi.$$scope={dirty:u,ctx:o}),To.$set(oi);const ni={};u&2&&(ni.$$scope={dirty:u,ctx:o}),wo.$set(ni);const si={};u&2&&(si.$$scope={dirty:u,ctx:o}),Eo.$set(si);const Is={};u&2&&(Is.$$scope={dirty:u,ctx:o}),$o.$set(Is);const ai={};u&2&&(ai.$$scope={dirty:u,ctx:o}),zo.$set(ai);const ri={};u&2&&(ri.$$scope={dirty:u,ctx:o}),qo.$set(ri);const ii={};u&2&&(ii.$$scope={dirty:u,ctx:o}),Co.$set(ii);const li={};u&2&&(li.$$scope={dirty:u,ctx:o}),xo.$set(li);const Ss={};u&2&&(Ss.$$scope={dirty:u,ctx:o}),Ro.$set(Ss);const di={};u&2&&(di.$$scope={dirty:u,ctx:o}),jo.$set(di)},i(o){Ai||(w(b.$$.fragment,o),w(ee.$$.fragment,o),w(R.$$.fragment,o),w(we.$$.fragment,o),w(Uo.$$.fragment,o),w(Qo.$$.fragment,o),w(Go.$$.fragment,o),w(Vo.$$.fragment,o),w(Jo.$$.fragment,o),w(Xo.$$.fragment,o),w(Yo.$$.fragment,o),w(Zo.$$.fragment,o),w(tn.$$.fragment,o),w(nn.$$.fragment,o),w(sn.$$.fragment,o),w(an.$$.fragment,o),w(rn.$$.fragment,o),w(hn.$$.fragment,o),w(co.$$.fragment,o),w(fn.$$.fragment,o),w(un.$$.fragment,o),w(mn.$$.fragment,o),w(gn.$$.fragment,o),w(_n.$$.fragment,o),w(Tn.$$.fragment,o),w(fo.$$.fragment,o),w(wn.$$.fragment,o),w(yn.$$.fragment,o),w(Dn.$$.fragment,o),w(qn.$$.fragment,o),w(mo.$$.fragment,o),w(Mn.$$.fragment,o),w(Cn.$$.fragment,o),w(xn.$$.fragment,o),w(Pn.$$.fragment,o),w(An.$$.fragment,o),w(_o.$$.fragment,o),w(In.$$.fragment,o),w(Sn.$$.fragment,o),w(Nn.$$.fragment,o),w(Qn.$$.fragment,o),w(ko.$$.fragment,o),w(Gn.$$.fragment,o),w(Kn.$$.fragment,o),w(Vn.$$.fragment,o),w(To.$$.fragment,o),w(es.$$.fragment,o),w(wo.$$.fragment,o),w(ts.$$.fragment,o),w(os.$$.fragment,o),w(ns.$$.fragment,o),w(ss.$$.fragment,o),w(rs.$$.fragment,o),w(is.$$.fragment,o),w(Eo.$$.fragment,o),w(ps.$$.fragment,o),w($o.$$.fragment,o),w(hs.$$.fragment,o),w(fs.$$.fragment,o),w(us.$$.fragment,o),w(zo.$$.fragment,o),w(ks.$$.fragment,o),w(qo.$$.fragment,o),w(vs.$$.fragment,o),w(Ts.$$.fragment,o),w(ws.$$.fragment,o),w(Co.$$.fragment,o),w(Fs.$$.fragment,o),w(xo.$$.fragment,o),w(zs.$$.fragment,o),w(qs.$$.fragment,o),w(Ms.$$.fragment,o),w(Ro.$$.fragment,o),w(js.$$.fragment,o),w(jo.$$.fragment,o),w(Bs.$$.fragment,o),Ai=!0)},o(o){y(b.$$.fragment,o),y(ee.$$.fragment,o),y(R.$$.fragment,o),y(we.$$.fragment,o),y(Uo.$$.fragment,o),y(Qo.$$.fragment,o),y(Go.$$.fragment,o),y(Vo.$$.fragment,o),y(Jo.$$.fragment,o),y(Xo.$$.fragment,o),y(Yo.$$.fragment,o),y(Zo.$$.fragment,o),y(tn.$$.fragment,o),y(nn.$$.fragment,o),y(sn.$$.fragment,o),y(an.$$.fragment,o),y(rn.$$.fragment,o),y(hn.$$.fragment,o),y(co.$$.fragment,o),y(fn.$$.fragment,o),y(un.$$.fragment,o),y(mn.$$.fragment,o),y(gn.$$.fragment,o),y(_n.$$.fragment,o),y(Tn.$$.fragment,o),y(fo.$$.fragment,o),y(wn.$$.fragment,o),y(yn.$$.fragment,o),y(Dn.$$.fragment,o),y(qn.$$.fragment,o),y(mo.$$.fragment,o),y(Mn.$$.fragment,o),y(Cn.$$.fragment,o),y(xn.$$.fragment,o),y(Pn.$$.fragment,o),y(An.$$.fragment,o),y(_o.$$.fragment,o),y(In.$$.fragment,o),y(Sn.$$.fragment,o),y(Nn.$$.fragment,o),y(Qn.$$.fragment,o),y(ko.$$.fragment,o),y(Gn.$$.fragment,o),y(Kn.$$.fragment,o),y(Vn.$$.fragment,o),y(To.$$.fragment,o),y(es.$$.fragment,o),y(wo.$$.fragment,o),y(ts.$$.fragment,o),y(os.$$.fragment,o),y(ns.$$.fragment,o),y(ss.$$.fragment,o),y(rs.$$.fragment,o),y(is.$$.fragment,o),y(Eo.$$.fragment,o),y(ps.$$.fragment,o),y($o.$$.fragment,o),y(hs.$$.fragment,o),y(fs.$$.fragment,o),y(us.$$.fragment,o),y(zo.$$.fragment,o),y(ks.$$.fragment,o),y(qo.$$.fragment,o),y(vs.$$.fragment,o),y(Ts.$$.fragment,o),y(ws.$$.fragment,o),y(Co.$$.fragment,o),y(Fs.$$.fragment,o),y(xo.$$.fragment,o),y(zs.$$.fragment,o),y(qs.$$.fragment,o),y(Ms.$$.fragment,o),y(Ro.$$.fragment,o),y(js.$$.fragment,o),y(jo.$$.fragment,o),y(Bs.$$.fragment,o),Ai=!1},d(o){t(h),o&&t(F),o&&t(m),D(b),o&&t(X),o&&t(q),D(ee),o&&t(le),o&&t(W),o&&t(x),o&&t(ne),o&&t(de),o&&t(se),o&&t(ce),o&&t(ae),o&&t(S),o&&t(K),o&&t(j),o&&t(pe),D(R),o&&t(N),o&&t(re),D(we),o&&t(ci),o&&t(Ft),D(Uo),o&&t(pi),o&&t(qe),D(Qo),D(Go),D(Vo),D(Jo),D(Xo),o&&t(hi),o&&t(Mt),D(Yo),o&&t(fi),o&&t(Je),D(Zo),D(tn),D(nn),D(sn),o&&t(ui),o&&t(xt),D(an),o&&t(mi),o&&t(Xe),D(rn),D(hn),D(co),D(fn),o&&t(gi),o&&t(Rt),D(un),o&&t(_i),o&&t(jt),D(mn),o&&t(bi),o&&t(Bt),D(gn),o&&t(ki),o&&t(Ye),D(_n),D(Tn),D(fo),D(wn),o&&t(vi),o&&t(It),D(yn),o&&t(Ti),o&&t(Re),D(Dn),D(qn),D(mo),D(Mn),D(Cn),o&&t(wi),o&&t(Nt),D(xn),o&&t(yi),o&&t(je),D(Pn),D(An),D(_o),D(In),o&&t(Di),o&&t(Wt),D(Sn),o&&t(Ei),o&&t(Be),D(Nn),D(Qn),D(ko),D(Gn),o&&t($i),o&&t(Qt),D(Kn),o&&t(Fi),o&&t(Le),D(Vn),D(To),D(es),D(wo),D(ts),o&&t(zi),o&&t(Kt),D(os),o&&t(qi),o&&t(lt),D(ns),D(ss),o&&t(Mi),o&&t(Vt),D(rs),o&&t(Ci),o&&t(Ae),D(is),D(Eo),D(ps),D($o),D(hs),o&&t(xi),o&&t(Yt),D(fs),o&&t(Pi),o&&t(Me),D(us),D(zo),D(ks),D(qo),D(vs),o&&t(Ri),o&&t(eo),D(Ts),o&&t(ji),o&&t(Ce),D(ws),D(Co),D(Fs),D(xo),D(zs),o&&t(Bi),o&&t(oo),D(qs),o&&t(Li),o&&t(xe),D(Ms),D(Ro),D(js),D(jo),D(Bs)}}}const q_={local:"deberta",sections:[{local:"overview",title:"Overview"},{local:"transformers.DebertaConfig",title:"DebertaConfig"},{local:"transformers.DebertaTokenizer",title:"DebertaTokenizer"},{local:"transformers.DebertaTokenizerFast",title:"DebertaTokenizerFast"},{local:"transformers.DebertaModel",title:"DebertaModel"},{local:"transformers.DebertaPreTrainedModel",title:"DebertaPreTrainedModel"},{local:"transformers.DebertaForMaskedLM",title:"DebertaForMaskedLM"},{local:"transformers.DebertaForSequenceClassification",title:"DebertaForSequenceClassification"},{local:"transformers.DebertaForTokenClassification",title:"DebertaForTokenClassification"},{local:"transformers.DebertaForQuestionAnswering",title:"DebertaForQuestionAnswering"},{local:"transformers.TFDebertaModel",title:"TFDebertaModel"},{local:"transformers.TFDebertaPreTrainedModel",title:"TFDebertaPreTrainedModel"},{local:"transformers.TFDebertaForMaskedLM",title:"TFDebertaForMaskedLM"},{local:"transformers.TFDebertaForSequenceClassification",title:"TFDebertaForSequenceClassification"},{local:"transformers.TFDebertaForTokenClassification",title:"TFDebertaForTokenClassification"},{local:"transformers.TFDebertaForQuestionAnswering",title:"TFDebertaForQuestionAnswering"}],title:"DeBERTa"};function M_(O,h,F){let{fw:m}=h;return O.$$set=g=>{"fw"in g&&F(0,m=g.fw)},[m]}class L_ extends d_{constructor(h){super();c_(this,h,M_,z_,p_,{fw:0})}}export{L_ as default,q_ as metadata};
9,931
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/unispeech.mdx-f34fe971.js
import{S as vi,i as bi,s as wi,e as n,k as l,w as _,t as r,L as yi,c as a,d as o,m as d,a as s,x as v,h as i,b as c,J as e,g as m,y as b,q as w,o as y,B as S}from"../../chunks/vendor-b1433968.js";import{T as vn}from"../../chunks/Tip-c3840994.js";import{D as W}from"../../chunks/Docstring-ff504c58.js";import{C as To}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Te}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Si(A){let h,k,u,T,U;return{c(){h=n("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),T=r("Module"),U=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){h=a(f,"P",{});var g=s(h);k=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(g,"CODE",{});var $=s(u);T=i($,"Module"),$.forEach(o),U=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(f,g){m(f,h,g),e(h,k),e(h,u),e(u,T),e(h,U)},d(f){f&&o(h)}}}function Ti(A){let h,k,u,T,U;return{c(){h=n("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),T=r("Module"),U=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){h=a(f,"P",{});var g=s(h);k=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(g,"CODE",{});var $=s(u);T=i($,"Module"),$.forEach(o),U=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(f,g){m(f,h,g),e(h,k),e(h,u),e(u,T),e(h,U)},d(f){f&&o(h)}}}function ki(A){let h,k,u,T,U;return{c(){h=n("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),T=r("Module"),U=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){h=a(f,"P",{});var g=s(h);k=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(g,"CODE",{});var $=s(u);T=i($,"Module"),$.forEach(o),U=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(f,g){m(f,h,g),e(h,k),e(h,u),e(u,T),e(h,U)},d(f){f&&o(h)}}}function Ui(A){let h,k,u,T,U;return{c(){h=n("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),T=r("Module"),U=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){h=a(f,"P",{});var g=s(h);k=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a(g,"CODE",{});var $=s(u);T=i($,"Module"),$.forEach(o),U=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(f,g){m(f,h,g),e(h,k),e(h,u),e(u,T),e(h,U)},d(f){f&&o(h)}}}function $i(A){let h,k,u,T,U,f,g,$,bn,ko,R,de,Ht,ke,wn,Kt,yn,Uo,pe,Sn,Ue,Tn,kn,$o,yt,Un,Co,St,Yt,$n,jo,Tt,Cn,xo,he,$e,jn,kt,xn,Fn,qn,Ce,Pn,Ut,En,Mn,Fo,O,zn,je,Dn,An,xe,Wn,On,qo,Q,me,Rt,Fe,Ln,Qt,Nn,Po,j,qe,Vn,X,In,$t,Bn,Hn,Pe,Kn,Yn,Rn,Z,Qn,Ct,Xn,Zn,jt,Jn,Gn,ea,Xt,ta,oa,Ee,Eo,J,ue,Zt,Me,na,Jt,aa,Mo,G,ze,sa,De,ra,Gt,ia,ca,zo,ee,Ae,la,We,da,eo,pa,ha,Do,te,fe,to,Oe,ma,oo,ua,Ao,x,Le,fa,Ne,ga,Ve,_a,va,ba,Ie,wa,xt,ya,Sa,Ta,Be,ka,He,Ua,$a,Ca,P,Ke,ja,oe,xa,Ft,Fa,qa,no,Pa,Ea,Ma,ge,za,ao,Da,Aa,Ye,Wo,ne,_e,so,Re,Wa,ro,Oa,Oo,F,Qe,La,ae,Na,io,Va,Ia,Xe,Ba,Ha,Ka,Ze,Ya,qt,Ra,Qa,Xa,Je,Za,Ge,Ja,Ga,es,E,et,ts,se,os,Pt,ns,as,co,ss,rs,is,ve,cs,lo,ls,ds,tt,Lo,re,be,po,ot,ps,ho,hs,No,C,nt,ms,mo,us,fs,at,gs,st,_s,vs,bs,rt,ws,Et,ys,Ss,Ts,it,ks,ct,Us,$s,Cs,M,lt,js,ie,xs,Mt,Fs,qs,uo,Ps,Es,Ms,we,zs,fo,Ds,As,dt,Vo,ce,ye,go,pt,Ws,_o,Os,Io,q,ht,Ls,mt,Ns,ut,Vs,Is,Bs,ft,Hs,zt,Ks,Ys,Rs,gt,Qs,_t,Xs,Zs,Js,z,vt,Gs,le,er,Dt,tr,or,vo,nr,ar,sr,Se,rr,bo,ir,cr,bt,Bo;return f=new Te({}),ke=new Te({}),Fe=new Te({}),qe=new W({props:{name:"class transformers.UniSpeechConfig",anchor:"transformers.UniSpeechConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"feat_quantizer_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"num_codevectors_per_group",val:" = 320"},{name:"num_codevector_groups",val:" = 2"},{name:"contrastive_logits_temperature",val:" = 0.1"},{name:"num_negatives",val:" = 100"},{name:"codevector_dim",val:" = 256"},{name:"proj_codevector_dim",val:" = 256"},{name:"diversity_loss_weight",val:" = 0.1"},{name:"ctc_loss_reduction",val:" = 'mean'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"num_ctc_classes",val:" = 80"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"replace_prob",val:" = 0.5"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/configuration_unispeech.py#L29",parametersDescription:[{anchor:"transformers.UniSpeechConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the UniSpeech model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechModel">UniSpeechModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechModel">UniSpeechModel</a>.`,name:"vocab_size"},{anchor:"transformers.UniSpeechConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.UniSpeechConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.UniSpeechConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.UniSpeechConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.UniSpeechConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.UniSpeechConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.UniSpeechConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.UniSpeechConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForCTC">UniSpeechForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.UniSpeechConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.UniSpeechConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.UniSpeechConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.UniSpeechConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.UniSpeechConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.UniSpeechConfig.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (obj &#x2014;<em>float</em>, <em>optional</em>, defaults to 0.0): The dropout probabilitiy for quantized feature extractor states.`,name:"feat_quantizer_dropout"},{anchor:"transformers.UniSpeechConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.UniSpeechConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.UniSpeechConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.UniSpeechConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.UniSpeechConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.UniSpeechConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.UniSpeechConfig.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.UniSpeechConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.UniSpeechConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.UniSpeechConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.UniSpeechConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.UniSpeechConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.UniSpeechConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.UniSpeechConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.UniSpeechConfig.num_codevectors_per_group",description:`<strong>num_codevectors_per_group</strong> (<code>int</code>, <em>optional</em>, defaults to 320) &#x2014; Number of entries in each quantization codebook (group).`,name:"num_codevectors_per_group"},{anchor:"transformers.UniSpeechConfig.num_codevector_groups",description:`<strong>num_codevector_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of codevector groups for product codevector quantization.`,name:"num_codevector_groups"},{anchor:"transformers.UniSpeechConfig.contrastive_logits_temperature",description:`<strong>contrastive_logits_temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The temperature <em>kappa</em> in the contrastive loss.`,name:"contrastive_logits_temperature"},{anchor:"transformers.UniSpeechConfig.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for the output of the feature extractor that&#x2019;s used by the quantizer.`,name:"feat_quantizer_dropout"},{anchor:"transformers.UniSpeechConfig.num_negatives",description:`<strong>num_negatives</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Number of negative samples for the contrastive loss.`,name:"num_negatives"},{anchor:"transformers.UniSpeechConfig.codevector_dim",description:`<strong>codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the quantized feature vectors.`,name:"codevector_dim"},{anchor:"transformers.UniSpeechConfig.proj_codevector_dim",description:`<strong>proj_codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the final projection of both the quantized and the transformer features.`,name:"proj_codevector_dim"},{anchor:"transformers.UniSpeechConfig.diversity_loss_weight",description:`<strong>diversity_loss_weight</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The weight of the codebook diversity loss component.`,name:"diversity_loss_weight"},{anchor:"transformers.UniSpeechConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;mean&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForCTC">UniSpeechForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.UniSpeechConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForCTC">UniSpeechForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.UniSpeechConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForSequenceClassification">UniSpeechForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.UniSpeechConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"},{anchor:"transformers.UniSpeechConfig.replace_prob",description:`<strong>replace_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Propability that transformer feature is replaced by quantized feature for pretraining.`,name:"replace_prob"}]}}),Ee=new To({props:{code:`from transformers import UniSpeechModel, UniSpeechConfig # Initializing a UniSpeech facebook/unispeech-base-960h style configuration configuration = UniSpeechConfig() # Initializing a model from the facebook/unispeech-base-960h style configuration model = UniSpeechModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> UniSpeechModel, UniSpeechConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a UniSpeech facebook/unispeech-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = UniSpeechConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/unispeech-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Me=new Te({}),ze=new W({props:{name:"class transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput",anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"extract_features",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L61",parametersDescription:[{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.`,name:"extract_features"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ae=new W({props:{name:"class transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput",anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"projected_states",val:": FloatTensor = None"},{name:"projected_quantized_states",val:": FloatTensor = None"},{name:"codevector_perplexity",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L89",parametersDescription:[{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> . (classification) loss.`,name:"loss"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput.projected_states",description:`<strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.`,name:"projected_states"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput.projected_quantized_states",description:`<strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.`,name:"projected_quantized_states"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Oe=new Te({}),Le=new W({props:{name:"class transformers.UniSpeechModel",anchor:"transformers.UniSpeechModel",parameters:[{name:"config",val:": UniSpeechConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1046",parametersDescription:[{anchor:"transformers.UniSpeechModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ke=new W({props:{name:"forward",anchor:"transformers.UniSpeechModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1110",parametersDescription:[{anchor:"transformers.UniSpeechModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput" >transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig" >UniSpeechConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput" >transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ge=new vn({props:{$$slots:{default:[Si]},$$scope:{ctx:A}}}),Ye=new To({props:{code:`from transformers import Wav2Vec2Processor, UniSpeechModel from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('microsoft/unispeech-large-1500h-cv') model = UniSpeechModel.from_pretrained('microsoft/unispeech-large-1500h-cv') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, UniSpeechModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-large-1500h-cv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-large-1500h-cv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Re=new Te({}),Qe=new W({props:{name:"class transformers.UniSpeechForCTC",anchor:"transformers.UniSpeechForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1340",parametersDescription:[{anchor:"transformers.UniSpeechForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),et=new W({props:{name:"forward",anchor:"transformers.UniSpeechForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1366",parametersDescription:[{anchor:"transformers.UniSpeechForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig" >UniSpeechConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ve=new vn({props:{$$slots:{default:[Ti]},$$scope:{ctx:A}}}),tt=new To({props:{code:`from transformers import Wav2Vec2Processor, UniSpeechForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('microsoft/unispeech-large-1500h-cv') model = UniSpeechForCTC.from_pretrained('microsoft/unispeech-large-1500h-cv') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, UniSpeechForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-large-1500h-cv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechForCTC.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-large-1500h-cv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),ot=new Te({}),nt=new W({props:{name:"class transformers.UniSpeechForSequenceClassification",anchor:"transformers.UniSpeechForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1452",parametersDescription:[{anchor:"transformers.UniSpeechForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lt=new W({props:{name:"forward",anchor:"transformers.UniSpeechForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1481",parametersDescription:[{anchor:"transformers.UniSpeechForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig" >UniSpeechConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),we=new vn({props:{$$slots:{default:[ki]},$$scope:{ctx:A}}}),dt=new To({props:{code:`from transformers import Wav2Vec2FeatureExtractor, UniSpeechForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/unispeech-large-1500h-cv') model = UniSpeechForSequenceClassification.from_pretrained('microsoft/unispeech-large-1500h-cv') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, UniSpeechForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-large-1500h-cv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = UniSpeechForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/unispeech-large-1500h-cv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),pt=new Te({}),ht=new W({props:{name:"class transformers.UniSpeechForPreTraining",anchor:"transformers.UniSpeechForPreTraining",parameters:[{name:"config",val:": UniSpeechConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1169",parametersDescription:[{anchor:"transformers.UniSpeechForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig">UniSpeechConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),vt=new W({props:{name:"forward",anchor:"transformers.UniSpeechForPreTraining.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/unispeech/modeling_unispeech.py#L1219",parametersDescription:[{anchor:"transformers.UniSpeechForPreTraining.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>UniSpeechProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>UniSpeechProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.UniSpeechForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.UniSpeechForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.UniSpeechForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.UniSpeechForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.UniSpeechForPreTraining.forward.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.UniSpeechForPreTraining.forward.sampled_negative_indices",description:`<strong>sampled_negative_indices</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, sequence_length, num_negatives)</code>, <em>optional</em>) &#x2014; Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss. Required input for pre-training.`,name:"sampled_negative_indices"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput" >transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechConfig" >UniSpeechConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> . (classification) loss.</p> </li> <li> <p><strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.</p> </li> <li> <p><strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput" >transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Se=new vn({props:{$$slots:{default:[Ui]},$$scope:{ctx:A}}}),bt=new To({props:{code:`import torch from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices from datasets import load_dataset import soundfile as sf feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("patrickvonplaten/wav2vec2-base") model = Wav2Vec2ForPreTraining.from_pretrained("patrickvonplaten/wav2vec2-base") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = feature_extractor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 # compute masked indices batch_size, raw_sequence_length = input_values.shape sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) with torch.no_grad(): outputs = model(input_values, mask_time_indices=mask_time_indices) # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) cosine_sim = torch.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, dim=-1 ) # show that cosine similarity is much higher than random assert cosine_sim[mask_time_indices].mean() > 0.5 # for contrastive loss training model should be put into train mode model.train() loss = model(input_values, mask_time_indices=mask_time_indices).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.wav2vec2.modeling_wav2vec2 <span class="hljs-keyword">import</span> _compute_mask_indices <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForPreTraining.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;patrickvonplaten/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute masked indices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size, raw_sequence_length = input_values.shape <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=<span class="hljs-number">0.2</span>, mask_length=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(input_values, mask_time_indices=mask_time_indices) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.cosine_similarity( <span class="hljs-meta">... </span> outputs.projected_states, outputs.projected_quantized_states, dim=-<span class="hljs-number">1</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># show that cosine similarity is much higher than random</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> cosine_sim[mask_time_indices].mean() &gt; <span class="hljs-number">0.5</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># for contrastive loss training model should be put into train mode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, mask_time_indices=mask_time_indices).loss`}}),{c(){h=n("meta"),k=l(),u=n("h1"),T=n("a"),U=n("span"),_(f.$$.fragment),g=l(),$=n("span"),bn=r("UniSpeech"),ko=l(),R=n("h2"),de=n("a"),Ht=n("span"),_(ke.$$.fragment),wn=l(),Kt=n("span"),yn=r("Overview"),Uo=l(),pe=n("p"),Sn=r("The UniSpeech model was proposed in "),Ue=n("a"),Tn=r("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),kn=r(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang .`),$o=l(),yt=n("p"),Un=r("The abstract from the paper is the following:"),Co=l(),St=n("p"),Yt=n("em"),$n=r(`In this paper, we propose a unified pre-training approach called UniSpeech to learn speech representations with both unlabeled and labeled data, in which supervised phonetic CTC learning and phonetically-aware contrastive self-supervised learning are conducted in a multi-task learning manner. The resultant representations can capture information more correlated with phonetic structures and improve the generalization across languages and domains. We evaluate the effectiveness of UniSpeech for cross-lingual representation learning on public CommonVoice corpus. The results show that UniSpeech outperforms self-supervised pretraining and supervised transfer learning for speech recognition by a maximum of 13.4% and 17.8% relative phone error rate reductions respectively (averaged over all testing languages). The transferability of UniSpeech is also demonstrated on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.`),jo=l(),Tt=n("p"),Cn=r("Tips:"),xo=l(),he=n("ul"),$e=n("li"),jn=r(`UniSpeech is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use `),kt=n("a"),xn=r("Wav2Vec2Processor"),Fn=r(" for the feature extraction."),qn=l(),Ce=n("li"),Pn=r(`UniSpeech model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Ut=n("a"),En=r("Wav2Vec2CTCTokenizer"),Mn=r("."),Fo=l(),O=n("p"),zn=r("This model was contributed by "),je=n("a"),Dn=r("patrickvonplaten"),An=r(`. The Authors\u2019 code can be found `),xe=n("a"),Wn=r("here"),On=r("."),qo=l(),Q=n("h2"),me=n("a"),Rt=n("span"),_(Fe.$$.fragment),Ln=l(),Qt=n("span"),Nn=r("UniSpeechConfig"),Po=l(),j=n("div"),_(qe.$$.fragment),Vn=l(),X=n("p"),In=r("This is the configuration class to store the configuration of a "),$t=n("a"),Bn=r("UniSpeechModel"),Hn=r(`. It is used to instantiate an UniSpeech model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeech `),Pe=n("a"),Kn=r("facebook/unispeech-base-960h"),Yn=r(" architecture."),Rn=l(),Z=n("p"),Qn=r("Configuration objects inherit from "),Ct=n("a"),Xn=r("PretrainedConfig"),Zn=r(` and can be used to control the model outputs. Read the documentation from `),jt=n("a"),Jn=r("PretrainedConfig"),Gn=r(" for more information."),ea=l(),Xt=n("p"),ta=r("Example:"),oa=l(),_(Ee.$$.fragment),Eo=l(),J=n("h2"),ue=n("a"),Zt=n("span"),_(Me.$$.fragment),na=l(),Jt=n("span"),aa=r("UniSpeech specific outputs"),Mo=l(),G=n("div"),_(ze.$$.fragment),sa=l(),De=n("p"),ra=r("Output type of "),Gt=n("code"),ia=r("UniSpeechBaseModelOutput"),ca=r(", with potential hidden states and attentions."),zo=l(),ee=n("div"),_(Ae.$$.fragment),la=l(),We=n("p"),da=r("Output type of "),eo=n("code"),pa=r("UniSpeechForPreTrainingOutput"),ha=r(", with potential hidden states and attentions."),Do=l(),te=n("h2"),fe=n("a"),to=n("span"),_(Oe.$$.fragment),ma=l(),oo=n("span"),ua=r("UniSpeechModel"),Ao=l(),x=n("div"),_(Le.$$.fragment),fa=l(),Ne=n("p"),ga=r(`The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top. UniSpeech was proposed in `),Ve=n("a"),_a=r("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),va=r(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),ba=l(),Ie=n("p"),wa=r("This model inherits from "),xt=n("a"),ya=r("PreTrainedModel"),Sa=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ta=l(),Be=n("p"),ka=r("This model is a PyTorch "),He=n("a"),Ua=r("torch.nn.Module"),$a=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ca=l(),P=n("div"),_(Ke.$$.fragment),ja=l(),oe=n("p"),xa=r("The "),Ft=n("a"),Fa=r("UniSpeechModel"),qa=r(" forward method, overrides the "),no=n("code"),Pa=r("__call__"),Ea=r(" special method."),Ma=l(),_(ge.$$.fragment),za=l(),ao=n("p"),Da=r("Example:"),Aa=l(),_(Ye.$$.fragment),Wo=l(),ne=n("h2"),_e=n("a"),so=n("span"),_(Re.$$.fragment),Wa=l(),ro=n("span"),Oa=r("UniSpeechForCTC"),Oo=l(),F=n("div"),_(Qe.$$.fragment),La=l(),ae=n("p"),Na=r("UniSpeech Model with a "),io=n("code"),Va=r("language modeling"),Ia=r(` head on top for Connectionist Temporal Classification (CTC). UniSpeech was proposed in `),Xe=n("a"),Ba=r("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Ha=r(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Ka=l(),Ze=n("p"),Ya=r("This model inherits from "),qt=n("a"),Ra=r("PreTrainedModel"),Qa=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Xa=l(),Je=n("p"),Za=r("This model is a PyTorch "),Ge=n("a"),Ja=r("torch.nn.Module"),Ga=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),es=l(),E=n("div"),_(et.$$.fragment),ts=l(),se=n("p"),os=r("The "),Pt=n("a"),ns=r("UniSpeechForCTC"),as=r(" forward method, overrides the "),co=n("code"),ss=r("__call__"),rs=r(" special method."),is=l(),_(ve.$$.fragment),cs=l(),lo=n("p"),ls=r("Example:"),ds=l(),_(tt.$$.fragment),Lo=l(),re=n("h2"),be=n("a"),po=n("span"),_(ot.$$.fragment),ps=l(),ho=n("span"),hs=r("UniSpeechForSequenceClassification"),No=l(),C=n("div"),_(nt.$$.fragment),ms=l(),mo=n("p"),us=r(`UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),fs=l(),at=n("p"),gs=r("UniSpeech was proposed in "),st=n("a"),_s=r("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),vs=r(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),bs=l(),rt=n("p"),ws=r("This model inherits from "),Et=n("a"),ys=r("PreTrainedModel"),Ss=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ts=l(),it=n("p"),ks=r("This model is a PyTorch "),ct=n("a"),Us=r("torch.nn.Module"),$s=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cs=l(),M=n("div"),_(lt.$$.fragment),js=l(),ie=n("p"),xs=r("The "),Mt=n("a"),Fs=r("UniSpeechForSequenceClassification"),qs=r(" forward method, overrides the "),uo=n("code"),Ps=r("__call__"),Es=r(" special method."),Ms=l(),_(we.$$.fragment),zs=l(),fo=n("p"),Ds=r("Example:"),As=l(),_(dt.$$.fragment),Vo=l(),ce=n("h2"),ye=n("a"),go=n("span"),_(pt.$$.fragment),Ws=l(),_o=n("span"),Os=r("UniSpeechForPreTraining"),Io=l(),q=n("div"),_(ht.$$.fragment),Ls=l(),mt=n("p"),Ns=r(`UniSpeech Model with a vector-quantization module and ctc loss for pre-training. UniSpeech was proposed in `),ut=n("a"),Vs=r("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Is=r(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Bs=l(),ft=n("p"),Hs=r("This model inherits from "),zt=n("a"),Ks=r("PreTrainedModel"),Ys=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Rs=l(),gt=n("p"),Qs=r("This model is a PyTorch "),_t=n("a"),Xs=r("torch.nn.Module"),Zs=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Js=l(),z=n("div"),_(vt.$$.fragment),Gs=l(),le=n("p"),er=r("The "),Dt=n("a"),tr=r("UniSpeechForPreTraining"),or=r(" forward method, overrides the "),vo=n("code"),nr=r("__call__"),ar=r(" special method."),sr=l(),_(Se.$$.fragment),rr=l(),bo=n("p"),ir=r("Example:"),cr=l(),_(bt.$$.fragment),this.h()},l(t){const p=yi('[data-svelte="svelte-1phssyn"]',document.head);h=a(p,"META",{name:!0,content:!0}),p.forEach(o),k=d(t),u=a(t,"H1",{class:!0});var wt=s(u);T=a(wt,"A",{id:!0,class:!0,href:!0});var wo=s(T);U=a(wo,"SPAN",{});var yo=s(U);v(f.$$.fragment,yo),yo.forEach(o),wo.forEach(o),g=d(wt),$=a(wt,"SPAN",{});var So=s($);bn=i(So,"UniSpeech"),So.forEach(o),wt.forEach(o),ko=d(t),R=a(t,"H2",{class:!0});var Ho=s(R);de=a(Ho,"A",{id:!0,class:!0,href:!0});var lr=s(de);Ht=a(lr,"SPAN",{});var dr=s(Ht);v(ke.$$.fragment,dr),dr.forEach(o),lr.forEach(o),wn=d(Ho),Kt=a(Ho,"SPAN",{});var pr=s(Kt);yn=i(pr,"Overview"),pr.forEach(o),Ho.forEach(o),Uo=d(t),pe=a(t,"P",{});var Ko=s(pe);Sn=i(Ko,"The UniSpeech model was proposed in "),Ue=a(Ko,"A",{href:!0,rel:!0});var hr=s(Ue);Tn=i(hr,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),hr.forEach(o),kn=i(Ko,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang .`),Ko.forEach(o),$o=d(t),yt=a(t,"P",{});var mr=s(yt);Un=i(mr,"The abstract from the paper is the following:"),mr.forEach(o),Co=d(t),St=a(t,"P",{});var ur=s(St);Yt=a(ur,"EM",{});var fr=s(Yt);$n=i(fr,`In this paper, we propose a unified pre-training approach called UniSpeech to learn speech representations with both unlabeled and labeled data, in which supervised phonetic CTC learning and phonetically-aware contrastive self-supervised learning are conducted in a multi-task learning manner. The resultant representations can capture information more correlated with phonetic structures and improve the generalization across languages and domains. We evaluate the effectiveness of UniSpeech for cross-lingual representation learning on public CommonVoice corpus. The results show that UniSpeech outperforms self-supervised pretraining and supervised transfer learning for speech recognition by a maximum of 13.4% and 17.8% relative phone error rate reductions respectively (averaged over all testing languages). The transferability of UniSpeech is also demonstrated on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.`),fr.forEach(o),ur.forEach(o),jo=d(t),Tt=a(t,"P",{});var gr=s(Tt);Cn=i(gr,"Tips:"),gr.forEach(o),xo=d(t),he=a(t,"UL",{});var Yo=s(he);$e=a(Yo,"LI",{});var Ro=s($e);jn=i(Ro,`UniSpeech is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use `),kt=a(Ro,"A",{href:!0});var _r=s(kt);xn=i(_r,"Wav2Vec2Processor"),_r.forEach(o),Fn=i(Ro," for the feature extraction."),Ro.forEach(o),qn=d(Yo),Ce=a(Yo,"LI",{});var Qo=s(Ce);Pn=i(Qo,`UniSpeech model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Ut=a(Qo,"A",{href:!0});var vr=s(Ut);En=i(vr,"Wav2Vec2CTCTokenizer"),vr.forEach(o),Mn=i(Qo,"."),Qo.forEach(o),Yo.forEach(o),Fo=d(t),O=a(t,"P",{});var At=s(O);zn=i(At,"This model was contributed by "),je=a(At,"A",{href:!0,rel:!0});var br=s(je);Dn=i(br,"patrickvonplaten"),br.forEach(o),An=i(At,`. The Authors\u2019 code can be found `),xe=a(At,"A",{href:!0,rel:!0});var wr=s(xe);Wn=i(wr,"here"),wr.forEach(o),On=i(At,"."),At.forEach(o),qo=d(t),Q=a(t,"H2",{class:!0});var Xo=s(Q);me=a(Xo,"A",{id:!0,class:!0,href:!0});var yr=s(me);Rt=a(yr,"SPAN",{});var Sr=s(Rt);v(Fe.$$.fragment,Sr),Sr.forEach(o),yr.forEach(o),Ln=d(Xo),Qt=a(Xo,"SPAN",{});var Tr=s(Qt);Nn=i(Tr,"UniSpeechConfig"),Tr.forEach(o),Xo.forEach(o),Po=d(t),j=a(t,"DIV",{class:!0});var L=s(j);v(qe.$$.fragment,L),Vn=d(L),X=a(L,"P",{});var Wt=s(X);In=i(Wt,"This is the configuration class to store the configuration of a "),$t=a(Wt,"A",{href:!0});var kr=s($t);Bn=i(kr,"UniSpeechModel"),kr.forEach(o),Hn=i(Wt,`. It is used to instantiate an UniSpeech model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeech `),Pe=a(Wt,"A",{href:!0,rel:!0});var Ur=s(Pe);Kn=i(Ur,"facebook/unispeech-base-960h"),Ur.forEach(o),Yn=i(Wt," architecture."),Wt.forEach(o),Rn=d(L),Z=a(L,"P",{});var Ot=s(Z);Qn=i(Ot,"Configuration objects inherit from "),Ct=a(Ot,"A",{href:!0});var $r=s(Ct);Xn=i($r,"PretrainedConfig"),$r.forEach(o),Zn=i(Ot,` and can be used to control the model outputs. Read the documentation from `),jt=a(Ot,"A",{href:!0});var Cr=s(jt);Jn=i(Cr,"PretrainedConfig"),Cr.forEach(o),Gn=i(Ot," for more information."),Ot.forEach(o),ea=d(L),Xt=a(L,"P",{});var jr=s(Xt);ta=i(jr,"Example:"),jr.forEach(o),oa=d(L),v(Ee.$$.fragment,L),L.forEach(o),Eo=d(t),J=a(t,"H2",{class:!0});var Zo=s(J);ue=a(Zo,"A",{id:!0,class:!0,href:!0});var xr=s(ue);Zt=a(xr,"SPAN",{});var Fr=s(Zt);v(Me.$$.fragment,Fr),Fr.forEach(o),xr.forEach(o),na=d(Zo),Jt=a(Zo,"SPAN",{});var qr=s(Jt);aa=i(qr,"UniSpeech specific outputs"),qr.forEach(o),Zo.forEach(o),Mo=d(t),G=a(t,"DIV",{class:!0});var Jo=s(G);v(ze.$$.fragment,Jo),sa=d(Jo),De=a(Jo,"P",{});var Go=s(De);ra=i(Go,"Output type of "),Gt=a(Go,"CODE",{});var Pr=s(Gt);ia=i(Pr,"UniSpeechBaseModelOutput"),Pr.forEach(o),ca=i(Go,", with potential hidden states and attentions."),Go.forEach(o),Jo.forEach(o),zo=d(t),ee=a(t,"DIV",{class:!0});var en=s(ee);v(Ae.$$.fragment,en),la=d(en),We=a(en,"P",{});var tn=s(We);da=i(tn,"Output type of "),eo=a(tn,"CODE",{});var Er=s(eo);pa=i(Er,"UniSpeechForPreTrainingOutput"),Er.forEach(o),ha=i(tn,", with potential hidden states and attentions."),tn.forEach(o),en.forEach(o),Do=d(t),te=a(t,"H2",{class:!0});var on=s(te);fe=a(on,"A",{id:!0,class:!0,href:!0});var Mr=s(fe);to=a(Mr,"SPAN",{});var zr=s(to);v(Oe.$$.fragment,zr),zr.forEach(o),Mr.forEach(o),ma=d(on),oo=a(on,"SPAN",{});var Dr=s(oo);ua=i(Dr,"UniSpeechModel"),Dr.forEach(o),on.forEach(o),Ao=d(t),x=a(t,"DIV",{class:!0});var N=s(x);v(Le.$$.fragment,N),fa=d(N),Ne=a(N,"P",{});var nn=s(Ne);ga=i(nn,`The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top. UniSpeech was proposed in `),Ve=a(nn,"A",{href:!0,rel:!0});var Ar=s(Ve);_a=i(Ar,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Ar.forEach(o),va=i(nn,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),nn.forEach(o),ba=d(N),Ie=a(N,"P",{});var an=s(Ie);wa=i(an,"This model inherits from "),xt=a(an,"A",{href:!0});var Wr=s(xt);ya=i(Wr,"PreTrainedModel"),Wr.forEach(o),Sa=i(an,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),an.forEach(o),Ta=d(N),Be=a(N,"P",{});var sn=s(Be);ka=i(sn,"This model is a PyTorch "),He=a(sn,"A",{href:!0,rel:!0});var Or=s(He);Ua=i(Or,"torch.nn.Module"),Or.forEach(o),$a=i(sn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sn.forEach(o),Ca=d(N),P=a(N,"DIV",{class:!0});var V=s(P);v(Ke.$$.fragment,V),ja=d(V),oe=a(V,"P",{});var Lt=s(oe);xa=i(Lt,"The "),Ft=a(Lt,"A",{href:!0});var Lr=s(Ft);Fa=i(Lr,"UniSpeechModel"),Lr.forEach(o),qa=i(Lt," forward method, overrides the "),no=a(Lt,"CODE",{});var Nr=s(no);Pa=i(Nr,"__call__"),Nr.forEach(o),Ea=i(Lt," special method."),Lt.forEach(o),Ma=d(V),v(ge.$$.fragment,V),za=d(V),ao=a(V,"P",{});var Vr=s(ao);Da=i(Vr,"Example:"),Vr.forEach(o),Aa=d(V),v(Ye.$$.fragment,V),V.forEach(o),N.forEach(o),Wo=d(t),ne=a(t,"H2",{class:!0});var rn=s(ne);_e=a(rn,"A",{id:!0,class:!0,href:!0});var Ir=s(_e);so=a(Ir,"SPAN",{});var Br=s(so);v(Re.$$.fragment,Br),Br.forEach(o),Ir.forEach(o),Wa=d(rn),ro=a(rn,"SPAN",{});var Hr=s(ro);Oa=i(Hr,"UniSpeechForCTC"),Hr.forEach(o),rn.forEach(o),Oo=d(t),F=a(t,"DIV",{class:!0});var I=s(F);v(Qe.$$.fragment,I),La=d(I),ae=a(I,"P",{});var Nt=s(ae);Na=i(Nt,"UniSpeech Model with a "),io=a(Nt,"CODE",{});var Kr=s(io);Va=i(Kr,"language modeling"),Kr.forEach(o),Ia=i(Nt,` head on top for Connectionist Temporal Classification (CTC). UniSpeech was proposed in `),Xe=a(Nt,"A",{href:!0,rel:!0});var Yr=s(Xe);Ba=i(Yr,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Yr.forEach(o),Ha=i(Nt,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Nt.forEach(o),Ka=d(I),Ze=a(I,"P",{});var cn=s(Ze);Ya=i(cn,"This model inherits from "),qt=a(cn,"A",{href:!0});var Rr=s(qt);Ra=i(Rr,"PreTrainedModel"),Rr.forEach(o),Qa=i(cn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),cn.forEach(o),Xa=d(I),Je=a(I,"P",{});var ln=s(Je);Za=i(ln,"This model is a PyTorch "),Ge=a(ln,"A",{href:!0,rel:!0});var Qr=s(Ge);Ja=i(Qr,"torch.nn.Module"),Qr.forEach(o),Ga=i(ln,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ln.forEach(o),es=d(I),E=a(I,"DIV",{class:!0});var B=s(E);v(et.$$.fragment,B),ts=d(B),se=a(B,"P",{});var Vt=s(se);os=i(Vt,"The "),Pt=a(Vt,"A",{href:!0});var Xr=s(Pt);ns=i(Xr,"UniSpeechForCTC"),Xr.forEach(o),as=i(Vt," forward method, overrides the "),co=a(Vt,"CODE",{});var Zr=s(co);ss=i(Zr,"__call__"),Zr.forEach(o),rs=i(Vt," special method."),Vt.forEach(o),is=d(B),v(ve.$$.fragment,B),cs=d(B),lo=a(B,"P",{});var Jr=s(lo);ls=i(Jr,"Example:"),Jr.forEach(o),ds=d(B),v(tt.$$.fragment,B),B.forEach(o),I.forEach(o),Lo=d(t),re=a(t,"H2",{class:!0});var dn=s(re);be=a(dn,"A",{id:!0,class:!0,href:!0});var Gr=s(be);po=a(Gr,"SPAN",{});var ei=s(po);v(ot.$$.fragment,ei),ei.forEach(o),Gr.forEach(o),ps=d(dn),ho=a(dn,"SPAN",{});var ti=s(ho);hs=i(ti,"UniSpeechForSequenceClassification"),ti.forEach(o),dn.forEach(o),No=d(t),C=a(t,"DIV",{class:!0});var D=s(C);v(nt.$$.fragment,D),ms=d(D),mo=a(D,"P",{});var oi=s(mo);us=i(oi,`UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),oi.forEach(o),fs=d(D),at=a(D,"P",{});var pn=s(at);gs=i(pn,"UniSpeech was proposed in "),st=a(pn,"A",{href:!0,rel:!0});var ni=s(st);_s=i(ni,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),ni.forEach(o),vs=i(pn,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),pn.forEach(o),bs=d(D),rt=a(D,"P",{});var hn=s(rt);ws=i(hn,"This model inherits from "),Et=a(hn,"A",{href:!0});var ai=s(Et);ys=i(ai,"PreTrainedModel"),ai.forEach(o),Ss=i(hn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),hn.forEach(o),Ts=d(D),it=a(D,"P",{});var mn=s(it);ks=i(mn,"This model is a PyTorch "),ct=a(mn,"A",{href:!0,rel:!0});var si=s(ct);Us=i(si,"torch.nn.Module"),si.forEach(o),$s=i(mn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mn.forEach(o),Cs=d(D),M=a(D,"DIV",{class:!0});var H=s(M);v(lt.$$.fragment,H),js=d(H),ie=a(H,"P",{});var It=s(ie);xs=i(It,"The "),Mt=a(It,"A",{href:!0});var ri=s(Mt);Fs=i(ri,"UniSpeechForSequenceClassification"),ri.forEach(o),qs=i(It," forward method, overrides the "),uo=a(It,"CODE",{});var ii=s(uo);Ps=i(ii,"__call__"),ii.forEach(o),Es=i(It," special method."),It.forEach(o),Ms=d(H),v(we.$$.fragment,H),zs=d(H),fo=a(H,"P",{});var ci=s(fo);Ds=i(ci,"Example:"),ci.forEach(o),As=d(H),v(dt.$$.fragment,H),H.forEach(o),D.forEach(o),Vo=d(t),ce=a(t,"H2",{class:!0});var un=s(ce);ye=a(un,"A",{id:!0,class:!0,href:!0});var li=s(ye);go=a(li,"SPAN",{});var di=s(go);v(pt.$$.fragment,di),di.forEach(o),li.forEach(o),Ws=d(un),_o=a(un,"SPAN",{});var pi=s(_o);Os=i(pi,"UniSpeechForPreTraining"),pi.forEach(o),un.forEach(o),Io=d(t),q=a(t,"DIV",{class:!0});var K=s(q);v(ht.$$.fragment,K),Ls=d(K),mt=a(K,"P",{});var fn=s(mt);Ns=i(fn,`UniSpeech Model with a vector-quantization module and ctc loss for pre-training. UniSpeech was proposed in `),ut=a(fn,"A",{href:!0,rel:!0});var hi=s(ut);Vs=i(hi,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),hi.forEach(o),Is=i(fn,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),fn.forEach(o),Bs=d(K),ft=a(K,"P",{});var gn=s(ft);Hs=i(gn,"This model inherits from "),zt=a(gn,"A",{href:!0});var mi=s(zt);Ks=i(mi,"PreTrainedModel"),mi.forEach(o),Ys=i(gn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),gn.forEach(o),Rs=d(K),gt=a(K,"P",{});var _n=s(gt);Qs=i(_n,"This model is a PyTorch "),_t=a(_n,"A",{href:!0,rel:!0});var ui=s(_t);Xs=i(ui,"torch.nn.Module"),ui.forEach(o),Zs=i(_n,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_n.forEach(o),Js=d(K),z=a(K,"DIV",{class:!0});var Y=s(z);v(vt.$$.fragment,Y),Gs=d(Y),le=a(Y,"P",{});var Bt=s(le);er=i(Bt,"The "),Dt=a(Bt,"A",{href:!0});var fi=s(Dt);tr=i(fi,"UniSpeechForPreTraining"),fi.forEach(o),or=i(Bt," forward method, overrides the "),vo=a(Bt,"CODE",{});var gi=s(vo);nr=i(gi,"__call__"),gi.forEach(o),ar=i(Bt," special method."),Bt.forEach(o),sr=d(Y),v(Se.$$.fragment,Y),rr=d(Y),bo=a(Y,"P",{});var _i=s(bo);ir=i(_i,"Example:"),_i.forEach(o),cr=d(Y),v(bt.$$.fragment,Y),Y.forEach(o),K.forEach(o),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(Ci)),c(T,"id","unispeech"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#unispeech"),c(u,"class","relative group"),c(de,"id","overview"),c(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(de,"href","#overview"),c(R,"class","relative group"),c(Ue,"href","https://arxiv.org/abs/2101.07597"),c(Ue,"rel","nofollow"),c(kt,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),c(Ut,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),c(je,"href","https://huggingface.co/patrickvonplaten"),c(je,"rel","nofollow"),c(xe,"href","https://github.com/microsoft/UniSpeech/tree/main/UniSpeech"),c(xe,"rel","nofollow"),c(me,"id","transformers.UniSpeechConfig"),c(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(me,"href","#transformers.UniSpeechConfig"),c(Q,"class","relative group"),c($t,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechModel"),c(Pe,"href","https://huggingface.co/facebook/unispeech-base-960h"),c(Pe,"rel","nofollow"),c(Ct,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(jt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(j,"class","docstring"),c(ue,"id","transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput"),c(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ue,"href","#transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput"),c(J,"class","relative group"),c(G,"class","docstring"),c(ee,"class","docstring"),c(fe,"id","transformers.UniSpeechModel"),c(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fe,"href","#transformers.UniSpeechModel"),c(te,"class","relative group"),c(Ve,"href","https://arxiv.org/abs/2101.07597"),c(Ve,"rel","nofollow"),c(xt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(He,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(He,"rel","nofollow"),c(Ft,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechModel"),c(P,"class","docstring"),c(x,"class","docstring"),c(_e,"id","transformers.UniSpeechForCTC"),c(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_e,"href","#transformers.UniSpeechForCTC"),c(ne,"class","relative group"),c(Xe,"href","https://arxiv.org/abs/2101.07597"),c(Xe,"rel","nofollow"),c(qt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ge,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ge,"rel","nofollow"),c(Pt,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForCTC"),c(E,"class","docstring"),c(F,"class","docstring"),c(be,"id","transformers.UniSpeechForSequenceClassification"),c(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(be,"href","#transformers.UniSpeechForSequenceClassification"),c(re,"class","relative group"),c(st,"href","https://arxiv.org/abs/2101.07597"),c(st,"rel","nofollow"),c(Et,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ct,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ct,"rel","nofollow"),c(Mt,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForSequenceClassification"),c(M,"class","docstring"),c(C,"class","docstring"),c(ye,"id","transformers.UniSpeechForPreTraining"),c(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ye,"href","#transformers.UniSpeechForPreTraining"),c(ce,"class","relative group"),c(ut,"href","https://arxiv.org/abs/2101.07597"),c(ut,"rel","nofollow"),c(zt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(_t,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(_t,"rel","nofollow"),c(Dt,"href","/docs/transformers/v4.15.0/en/model_doc/unispeech#transformers.UniSpeechForPreTraining"),c(z,"class","docstring"),c(q,"class","docstring")},m(t,p){e(document.head,h),m(t,k,p),m(t,u,p),e(u,T),e(T,U),b(f,U,null),e(u,g),e(u,$),e($,bn),m(t,ko,p),m(t,R,p),e(R,de),e(de,Ht),b(ke,Ht,null),e(R,wn),e(R,Kt),e(Kt,yn),m(t,Uo,p),m(t,pe,p),e(pe,Sn),e(pe,Ue),e(Ue,Tn),e(pe,kn),m(t,$o,p),m(t,yt,p),e(yt,Un),m(t,Co,p),m(t,St,p),e(St,Yt),e(Yt,$n),m(t,jo,p),m(t,Tt,p),e(Tt,Cn),m(t,xo,p),m(t,he,p),e(he,$e),e($e,jn),e($e,kt),e(kt,xn),e($e,Fn),e(he,qn),e(he,Ce),e(Ce,Pn),e(Ce,Ut),e(Ut,En),e(Ce,Mn),m(t,Fo,p),m(t,O,p),e(O,zn),e(O,je),e(je,Dn),e(O,An),e(O,xe),e(xe,Wn),e(O,On),m(t,qo,p),m(t,Q,p),e(Q,me),e(me,Rt),b(Fe,Rt,null),e(Q,Ln),e(Q,Qt),e(Qt,Nn),m(t,Po,p),m(t,j,p),b(qe,j,null),e(j,Vn),e(j,X),e(X,In),e(X,$t),e($t,Bn),e(X,Hn),e(X,Pe),e(Pe,Kn),e(X,Yn),e(j,Rn),e(j,Z),e(Z,Qn),e(Z,Ct),e(Ct,Xn),e(Z,Zn),e(Z,jt),e(jt,Jn),e(Z,Gn),e(j,ea),e(j,Xt),e(Xt,ta),e(j,oa),b(Ee,j,null),m(t,Eo,p),m(t,J,p),e(J,ue),e(ue,Zt),b(Me,Zt,null),e(J,na),e(J,Jt),e(Jt,aa),m(t,Mo,p),m(t,G,p),b(ze,G,null),e(G,sa),e(G,De),e(De,ra),e(De,Gt),e(Gt,ia),e(De,ca),m(t,zo,p),m(t,ee,p),b(Ae,ee,null),e(ee,la),e(ee,We),e(We,da),e(We,eo),e(eo,pa),e(We,ha),m(t,Do,p),m(t,te,p),e(te,fe),e(fe,to),b(Oe,to,null),e(te,ma),e(te,oo),e(oo,ua),m(t,Ao,p),m(t,x,p),b(Le,x,null),e(x,fa),e(x,Ne),e(Ne,ga),e(Ne,Ve),e(Ve,_a),e(Ne,va),e(x,ba),e(x,Ie),e(Ie,wa),e(Ie,xt),e(xt,ya),e(Ie,Sa),e(x,Ta),e(x,Be),e(Be,ka),e(Be,He),e(He,Ua),e(Be,$a),e(x,Ca),e(x,P),b(Ke,P,null),e(P,ja),e(P,oe),e(oe,xa),e(oe,Ft),e(Ft,Fa),e(oe,qa),e(oe,no),e(no,Pa),e(oe,Ea),e(P,Ma),b(ge,P,null),e(P,za),e(P,ao),e(ao,Da),e(P,Aa),b(Ye,P,null),m(t,Wo,p),m(t,ne,p),e(ne,_e),e(_e,so),b(Re,so,null),e(ne,Wa),e(ne,ro),e(ro,Oa),m(t,Oo,p),m(t,F,p),b(Qe,F,null),e(F,La),e(F,ae),e(ae,Na),e(ae,io),e(io,Va),e(ae,Ia),e(ae,Xe),e(Xe,Ba),e(ae,Ha),e(F,Ka),e(F,Ze),e(Ze,Ya),e(Ze,qt),e(qt,Ra),e(Ze,Qa),e(F,Xa),e(F,Je),e(Je,Za),e(Je,Ge),e(Ge,Ja),e(Je,Ga),e(F,es),e(F,E),b(et,E,null),e(E,ts),e(E,se),e(se,os),e(se,Pt),e(Pt,ns),e(se,as),e(se,co),e(co,ss),e(se,rs),e(E,is),b(ve,E,null),e(E,cs),e(E,lo),e(lo,ls),e(E,ds),b(tt,E,null),m(t,Lo,p),m(t,re,p),e(re,be),e(be,po),b(ot,po,null),e(re,ps),e(re,ho),e(ho,hs),m(t,No,p),m(t,C,p),b(nt,C,null),e(C,ms),e(C,mo),e(mo,us),e(C,fs),e(C,at),e(at,gs),e(at,st),e(st,_s),e(at,vs),e(C,bs),e(C,rt),e(rt,ws),e(rt,Et),e(Et,ys),e(rt,Ss),e(C,Ts),e(C,it),e(it,ks),e(it,ct),e(ct,Us),e(it,$s),e(C,Cs),e(C,M),b(lt,M,null),e(M,js),e(M,ie),e(ie,xs),e(ie,Mt),e(Mt,Fs),e(ie,qs),e(ie,uo),e(uo,Ps),e(ie,Es),e(M,Ms),b(we,M,null),e(M,zs),e(M,fo),e(fo,Ds),e(M,As),b(dt,M,null),m(t,Vo,p),m(t,ce,p),e(ce,ye),e(ye,go),b(pt,go,null),e(ce,Ws),e(ce,_o),e(_o,Os),m(t,Io,p),m(t,q,p),b(ht,q,null),e(q,Ls),e(q,mt),e(mt,Ns),e(mt,ut),e(ut,Vs),e(mt,Is),e(q,Bs),e(q,ft),e(ft,Hs),e(ft,zt),e(zt,Ks),e(ft,Ys),e(q,Rs),e(q,gt),e(gt,Qs),e(gt,_t),e(_t,Xs),e(gt,Zs),e(q,Js),e(q,z),b(vt,z,null),e(z,Gs),e(z,le),e(le,er),e(le,Dt),e(Dt,tr),e(le,or),e(le,vo),e(vo,nr),e(le,ar),e(z,sr),b(Se,z,null),e(z,rr),e(z,bo),e(bo,ir),e(z,cr),b(bt,z,null),Bo=!0},p(t,[p]){const wt={};p&2&&(wt.$$scope={dirty:p,ctx:t}),ge.$set(wt);const wo={};p&2&&(wo.$$scope={dirty:p,ctx:t}),ve.$set(wo);const yo={};p&2&&(yo.$$scope={dirty:p,ctx:t}),we.$set(yo);const So={};p&2&&(So.$$scope={dirty:p,ctx:t}),Se.$set(So)},i(t){Bo||(w(f.$$.fragment,t),w(ke.$$.fragment,t),w(Fe.$$.fragment,t),w(qe.$$.fragment,t),w(Ee.$$.fragment,t),w(Me.$$.fragment,t),w(ze.$$.fragment,t),w(Ae.$$.fragment,t),w(Oe.$$.fragment,t),w(Le.$$.fragment,t),w(Ke.$$.fragment,t),w(ge.$$.fragment,t),w(Ye.$$.fragment,t),w(Re.$$.fragment,t),w(Qe.$$.fragment,t),w(et.$$.fragment,t),w(ve.$$.fragment,t),w(tt.$$.fragment,t),w(ot.$$.fragment,t),w(nt.$$.fragment,t),w(lt.$$.fragment,t),w(we.$$.fragment,t),w(dt.$$.fragment,t),w(pt.$$.fragment,t),w(ht.$$.fragment,t),w(vt.$$.fragment,t),w(Se.$$.fragment,t),w(bt.$$.fragment,t),Bo=!0)},o(t){y(f.$$.fragment,t),y(ke.$$.fragment,t),y(Fe.$$.fragment,t),y(qe.$$.fragment,t),y(Ee.$$.fragment,t),y(Me.$$.fragment,t),y(ze.$$.fragment,t),y(Ae.$$.fragment,t),y(Oe.$$.fragment,t),y(Le.$$.fragment,t),y(Ke.$$.fragment,t),y(ge.$$.fragment,t),y(Ye.$$.fragment,t),y(Re.$$.fragment,t),y(Qe.$$.fragment,t),y(et.$$.fragment,t),y(ve.$$.fragment,t),y(tt.$$.fragment,t),y(ot.$$.fragment,t),y(nt.$$.fragment,t),y(lt.$$.fragment,t),y(we.$$.fragment,t),y(dt.$$.fragment,t),y(pt.$$.fragment,t),y(ht.$$.fragment,t),y(vt.$$.fragment,t),y(Se.$$.fragment,t),y(bt.$$.fragment,t),Bo=!1},d(t){o(h),t&&o(k),t&&o(u),S(f),t&&o(ko),t&&o(R),S(ke),t&&o(Uo),t&&o(pe),t&&o($o),t&&o(yt),t&&o(Co),t&&o(St),t&&o(jo),t&&o(Tt),t&&o(xo),t&&o(he),t&&o(Fo),t&&o(O),t&&o(qo),t&&o(Q),S(Fe),t&&o(Po),t&&o(j),S(qe),S(Ee),t&&o(Eo),t&&o(J),S(Me),t&&o(Mo),t&&o(G),S(ze),t&&o(zo),t&&o(ee),S(Ae),t&&o(Do),t&&o(te),S(Oe),t&&o(Ao),t&&o(x),S(Le),S(Ke),S(ge),S(Ye),t&&o(Wo),t&&o(ne),S(Re),t&&o(Oo),t&&o(F),S(Qe),S(et),S(ve),S(tt),t&&o(Lo),t&&o(re),S(ot),t&&o(No),t&&o(C),S(nt),S(lt),S(we),S(dt),t&&o(Vo),t&&o(ce),S(pt),t&&o(Io),t&&o(q),S(ht),S(vt),S(Se),S(bt)}}}const Ci={local:"unispeech",sections:[{local:"overview",title:"Overview"},{local:"transformers.UniSpeechConfig",title:"UniSpeechConfig"},{local:"transformers.models.unispeech.modeling_unispeech.UniSpeechBaseModelOutput",title:"UniSpeech specific outputs"},{local:"transformers.UniSpeechModel",title:"UniSpeechModel"},{local:"transformers.UniSpeechForCTC",title:"UniSpeechForCTC"},{local:"transformers.UniSpeechForSequenceClassification",title:"UniSpeechForSequenceClassification"},{local:"transformers.UniSpeechForPreTraining",title:"UniSpeechForPreTraining"}],title:"UniSpeech"};function ji(A,h,k){let{fw:u}=h;return A.$$set=T=>{"fw"in T&&k(0,u=T.fw)},[u]}class zi extends vi{constructor(h){super();bi(this,h,ji,$i,wi,{fw:0})}}export{zi as default,Ci as metadata};
9,932
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/xlm.mdx-5aeef7aa.js
import{S as f1,i as g1,s as _1,e as a,k as l,w as b,t as n,L as v1,c as r,d as t,m as d,a as i,x as M,h as s,b as p,J as e,g as m,y as w,q as y,o as L,B as $}from"../../chunks/vendor-b1433968.js";import{T as Xe}from"../../chunks/Tip-c3840994.js";import{D as Z}from"../../chunks/Docstring-ff504c58.js";import{C as Ie}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function k1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function T1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function b1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function M1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function w1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function y1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function L1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function $1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve;return{c(){h=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),k=a("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),F=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Y=n("This second option is useful when using "),D=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=a("code"),pe=n("model(inputs)"),re=n("."),R=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),E=a("ul"),q=a("li"),J=n("a single Tensor with "),W=a("code"),he=n("input_ids"),me=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),fe=l(),z=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),ne=n("model([input_ids, attention_mask])"),_e=n(" or "),B=a("code"),se=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),P=a("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ve=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var T=i(h);x=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),_=r(c,"UL",{});var V=i(_);k=r(V,"LI",{});var Te=i(k);v=s(Te,"having all inputs as keyword arguments (like PyTorch models), or"),Te.forEach(t),g=d(V),F=r(V,"LI",{});var be=i(F);de=s(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),V.forEach(t),G=d(c),X=r(c,"P",{});var C=i(X);Y=s(C,"This second option is useful when using "),D=r(C,"CODE",{});var Me=i(D);ee=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(C,"CODE",{});var ke=i(O);pe=s(ke,"model(inputs)"),ke.forEach(t),re=s(C,"."),C.forEach(t),R=d(c),j=r(c,"P",{});var we=i(j);te=s(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),K=d(c),E=r(c,"UL",{});var A=i(E);q=r(A,"LI",{});var N=i(q);J=s(N,"a single Tensor with "),W=r(N,"CODE",{});var ye=i(W);he=s(ye,"input_ids"),ye.forEach(t),me=s(N," only and nothing else: "),H=r(N,"CODE",{});var Le=i(H);ue=s(Le,"model(inputs_ids)"),Le.forEach(t),N.forEach(t),fe=d(A),z=r(A,"LI",{});var U=i(z);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(U,"CODE",{});var le=i(Q);ne=s(le,"model([input_ids, attention_mask])"),le.forEach(t),_e=s(U," or "),B=r(U,"CODE",{});var $e=i(B);se=s($e,"model([input_ids, attention_mask, token_type_ids])"),$e.forEach(t),U.forEach(t),ae=d(A),P=r(A,"LI",{});var oe=i(P);ie=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(oe,"CODE",{});var xe=i(S);ve=s(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),oe.forEach(t),A.forEach(t)},m(c,T){m(c,h,T),e(h,x),m(c,f,T),m(c,_,T),e(_,k),e(k,v),e(_,g),e(_,F),e(F,de),m(c,G,T),m(c,X,T),e(X,Y),e(X,D),e(D,ee),e(X,ce),e(X,O),e(O,pe),e(X,re),m(c,R,T),m(c,j,T),e(j,te),m(c,K,T),m(c,E,T),e(E,q),e(q,J),e(q,W),e(W,he),e(q,me),e(q,H),e(H,ue),e(E,fe),e(E,z),e(z,ge),e(z,Q),e(Q,ne),e(z,_e),e(z,B),e(B,se),e(E,ae),e(E,P),e(P,ie),e(P,S),e(S,ve)},d(c){c&&t(h),c&&t(f),c&&t(_),c&&t(G),c&&t(X),c&&t(R),c&&t(j),c&&t(K),c&&t(E)}}}function x1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function F1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve;return{c(){h=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),k=a("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),F=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Y=n("This second option is useful when using "),D=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=a("code"),pe=n("model(inputs)"),re=n("."),R=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),E=a("ul"),q=a("li"),J=n("a single Tensor with "),W=a("code"),he=n("input_ids"),me=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),fe=l(),z=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),ne=n("model([input_ids, attention_mask])"),_e=n(" or "),B=a("code"),se=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),P=a("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ve=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var T=i(h);x=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),_=r(c,"UL",{});var V=i(_);k=r(V,"LI",{});var Te=i(k);v=s(Te,"having all inputs as keyword arguments (like PyTorch models), or"),Te.forEach(t),g=d(V),F=r(V,"LI",{});var be=i(F);de=s(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),V.forEach(t),G=d(c),X=r(c,"P",{});var C=i(X);Y=s(C,"This second option is useful when using "),D=r(C,"CODE",{});var Me=i(D);ee=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(C,"CODE",{});var ke=i(O);pe=s(ke,"model(inputs)"),ke.forEach(t),re=s(C,"."),C.forEach(t),R=d(c),j=r(c,"P",{});var we=i(j);te=s(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),K=d(c),E=r(c,"UL",{});var A=i(E);q=r(A,"LI",{});var N=i(q);J=s(N,"a single Tensor with "),W=r(N,"CODE",{});var ye=i(W);he=s(ye,"input_ids"),ye.forEach(t),me=s(N," only and nothing else: "),H=r(N,"CODE",{});var Le=i(H);ue=s(Le,"model(inputs_ids)"),Le.forEach(t),N.forEach(t),fe=d(A),z=r(A,"LI",{});var U=i(z);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(U,"CODE",{});var le=i(Q);ne=s(le,"model([input_ids, attention_mask])"),le.forEach(t),_e=s(U," or "),B=r(U,"CODE",{});var $e=i(B);se=s($e,"model([input_ids, attention_mask, token_type_ids])"),$e.forEach(t),U.forEach(t),ae=d(A),P=r(A,"LI",{});var oe=i(P);ie=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(oe,"CODE",{});var xe=i(S);ve=s(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),oe.forEach(t),A.forEach(t)},m(c,T){m(c,h,T),e(h,x),m(c,f,T),m(c,_,T),e(_,k),e(k,v),e(_,g),e(_,F),e(F,de),m(c,G,T),m(c,X,T),e(X,Y),e(X,D),e(D,ee),e(X,ce),e(X,O),e(O,pe),e(X,re),m(c,R,T),m(c,j,T),e(j,te),m(c,K,T),m(c,E,T),e(E,q),e(q,J),e(q,W),e(W,he),e(q,me),e(q,H),e(H,ue),e(E,fe),e(E,z),e(z,ge),e(z,Q),e(Q,ne),e(z,_e),e(z,B),e(B,se),e(E,ae),e(E,P),e(P,ie),e(P,S),e(S,ve)},d(c){c&&t(h),c&&t(f),c&&t(_),c&&t(G),c&&t(X),c&&t(R),c&&t(j),c&&t(K),c&&t(E)}}}function X1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function E1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve;return{c(){h=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),k=a("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),F=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Y=n("This second option is useful when using "),D=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=a("code"),pe=n("model(inputs)"),re=n("."),R=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),E=a("ul"),q=a("li"),J=n("a single Tensor with "),W=a("code"),he=n("input_ids"),me=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),fe=l(),z=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),ne=n("model([input_ids, attention_mask])"),_e=n(" or "),B=a("code"),se=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),P=a("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ve=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var T=i(h);x=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),_=r(c,"UL",{});var V=i(_);k=r(V,"LI",{});var Te=i(k);v=s(Te,"having all inputs as keyword arguments (like PyTorch models), or"),Te.forEach(t),g=d(V),F=r(V,"LI",{});var be=i(F);de=s(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),V.forEach(t),G=d(c),X=r(c,"P",{});var C=i(X);Y=s(C,"This second option is useful when using "),D=r(C,"CODE",{});var Me=i(D);ee=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(C,"CODE",{});var ke=i(O);pe=s(ke,"model(inputs)"),ke.forEach(t),re=s(C,"."),C.forEach(t),R=d(c),j=r(c,"P",{});var we=i(j);te=s(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),K=d(c),E=r(c,"UL",{});var A=i(E);q=r(A,"LI",{});var N=i(q);J=s(N,"a single Tensor with "),W=r(N,"CODE",{});var ye=i(W);he=s(ye,"input_ids"),ye.forEach(t),me=s(N," only and nothing else: "),H=r(N,"CODE",{});var Le=i(H);ue=s(Le,"model(inputs_ids)"),Le.forEach(t),N.forEach(t),fe=d(A),z=r(A,"LI",{});var U=i(z);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(U,"CODE",{});var le=i(Q);ne=s(le,"model([input_ids, attention_mask])"),le.forEach(t),_e=s(U," or "),B=r(U,"CODE",{});var $e=i(B);se=s($e,"model([input_ids, attention_mask, token_type_ids])"),$e.forEach(t),U.forEach(t),ae=d(A),P=r(A,"LI",{});var oe=i(P);ie=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(oe,"CODE",{});var xe=i(S);ve=s(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),oe.forEach(t),A.forEach(t)},m(c,T){m(c,h,T),e(h,x),m(c,f,T),m(c,_,T),e(_,k),e(k,v),e(_,g),e(_,F),e(F,de),m(c,G,T),m(c,X,T),e(X,Y),e(X,D),e(D,ee),e(X,ce),e(X,O),e(O,pe),e(X,re),m(c,R,T),m(c,j,T),e(j,te),m(c,K,T),m(c,E,T),e(E,q),e(q,J),e(q,W),e(W,he),e(q,me),e(q,H),e(H,ue),e(E,fe),e(E,z),e(z,ge),e(z,Q),e(Q,ne),e(z,_e),e(z,B),e(B,se),e(E,ae),e(E,P),e(P,ie),e(P,S),e(S,ve)},d(c){c&&t(h),c&&t(f),c&&t(_),c&&t(G),c&&t(X),c&&t(R),c&&t(j),c&&t(K),c&&t(E)}}}function z1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function q1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve;return{c(){h=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),k=a("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),F=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Y=n("This second option is useful when using "),D=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=a("code"),pe=n("model(inputs)"),re=n("."),R=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),E=a("ul"),q=a("li"),J=n("a single Tensor with "),W=a("code"),he=n("input_ids"),me=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),fe=l(),z=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),ne=n("model([input_ids, attention_mask])"),_e=n(" or "),B=a("code"),se=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),P=a("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ve=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var T=i(h);x=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),_=r(c,"UL",{});var V=i(_);k=r(V,"LI",{});var Te=i(k);v=s(Te,"having all inputs as keyword arguments (like PyTorch models), or"),Te.forEach(t),g=d(V),F=r(V,"LI",{});var be=i(F);de=s(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),V.forEach(t),G=d(c),X=r(c,"P",{});var C=i(X);Y=s(C,"This second option is useful when using "),D=r(C,"CODE",{});var Me=i(D);ee=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(C,"CODE",{});var ke=i(O);pe=s(ke,"model(inputs)"),ke.forEach(t),re=s(C,"."),C.forEach(t),R=d(c),j=r(c,"P",{});var we=i(j);te=s(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),K=d(c),E=r(c,"UL",{});var A=i(E);q=r(A,"LI",{});var N=i(q);J=s(N,"a single Tensor with "),W=r(N,"CODE",{});var ye=i(W);he=s(ye,"input_ids"),ye.forEach(t),me=s(N," only and nothing else: "),H=r(N,"CODE",{});var Le=i(H);ue=s(Le,"model(inputs_ids)"),Le.forEach(t),N.forEach(t),fe=d(A),z=r(A,"LI",{});var U=i(z);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(U,"CODE",{});var le=i(Q);ne=s(le,"model([input_ids, attention_mask])"),le.forEach(t),_e=s(U," or "),B=r(U,"CODE",{});var $e=i(B);se=s($e,"model([input_ids, attention_mask, token_type_ids])"),$e.forEach(t),U.forEach(t),ae=d(A),P=r(A,"LI",{});var oe=i(P);ie=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(oe,"CODE",{});var xe=i(S);ve=s(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),oe.forEach(t),A.forEach(t)},m(c,T){m(c,h,T),e(h,x),m(c,f,T),m(c,_,T),e(_,k),e(k,v),e(_,g),e(_,F),e(F,de),m(c,G,T),m(c,X,T),e(X,Y),e(X,D),e(D,ee),e(X,ce),e(X,O),e(O,pe),e(X,re),m(c,R,T),m(c,j,T),e(j,te),m(c,K,T),m(c,E,T),e(E,q),e(q,J),e(q,W),e(W,he),e(q,me),e(q,H),e(H,ue),e(E,fe),e(E,z),e(z,ge),e(z,Q),e(Q,ne),e(z,_e),e(z,B),e(B,se),e(E,ae),e(E,P),e(P,ie),e(P,S),e(S,ve)},d(c){c&&t(h),c&&t(f),c&&t(_),c&&t(G),c&&t(X),c&&t(R),c&&t(j),c&&t(K),c&&t(E)}}}function C1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function j1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve;return{c(){h=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),k=a("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),F=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Y=n("This second option is useful when using "),D=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=a("code"),pe=n("model(inputs)"),re=n("."),R=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),E=a("ul"),q=a("li"),J=n("a single Tensor with "),W=a("code"),he=n("input_ids"),me=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),fe=l(),z=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),ne=n("model([input_ids, attention_mask])"),_e=n(" or "),B=a("code"),se=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),P=a("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ve=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var T=i(h);x=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),_=r(c,"UL",{});var V=i(_);k=r(V,"LI",{});var Te=i(k);v=s(Te,"having all inputs as keyword arguments (like PyTorch models), or"),Te.forEach(t),g=d(V),F=r(V,"LI",{});var be=i(F);de=s(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),V.forEach(t),G=d(c),X=r(c,"P",{});var C=i(X);Y=s(C,"This second option is useful when using "),D=r(C,"CODE",{});var Me=i(D);ee=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(C,"CODE",{});var ke=i(O);pe=s(ke,"model(inputs)"),ke.forEach(t),re=s(C,"."),C.forEach(t),R=d(c),j=r(c,"P",{});var we=i(j);te=s(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),K=d(c),E=r(c,"UL",{});var A=i(E);q=r(A,"LI",{});var N=i(q);J=s(N,"a single Tensor with "),W=r(N,"CODE",{});var ye=i(W);he=s(ye,"input_ids"),ye.forEach(t),me=s(N," only and nothing else: "),H=r(N,"CODE",{});var Le=i(H);ue=s(Le,"model(inputs_ids)"),Le.forEach(t),N.forEach(t),fe=d(A),z=r(A,"LI",{});var U=i(z);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(U,"CODE",{});var le=i(Q);ne=s(le,"model([input_ids, attention_mask])"),le.forEach(t),_e=s(U," or "),B=r(U,"CODE",{});var $e=i(B);se=s($e,"model([input_ids, attention_mask, token_type_ids])"),$e.forEach(t),U.forEach(t),ae=d(A),P=r(A,"LI",{});var oe=i(P);ie=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(oe,"CODE",{});var xe=i(S);ve=s(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),oe.forEach(t),A.forEach(t)},m(c,T){m(c,h,T),e(h,x),m(c,f,T),m(c,_,T),e(_,k),e(k,v),e(_,g),e(_,F),e(F,de),m(c,G,T),m(c,X,T),e(X,Y),e(X,D),e(D,ee),e(X,ce),e(X,O),e(O,pe),e(X,re),m(c,R,T),m(c,j,T),e(j,te),m(c,K,T),m(c,E,T),e(E,q),e(q,J),e(q,W),e(W,he),e(q,me),e(q,H),e(H,ue),e(E,fe),e(E,z),e(z,ge),e(z,Q),e(Q,ne),e(z,_e),e(z,B),e(B,se),e(E,ae),e(E,P),e(P,ie),e(P,S),e(S,ve)},d(c){c&&t(h),c&&t(f),c&&t(_),c&&t(G),c&&t(X),c&&t(R),c&&t(j),c&&t(K),c&&t(E)}}}function P1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function A1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve;return{c(){h=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),k=a("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),F=a("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Y=n("This second option is useful when using "),D=a("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=a("code"),pe=n("model(inputs)"),re=n("."),R=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),E=a("ul"),q=a("li"),J=n("a single Tensor with "),W=a("code"),he=n("input_ids"),me=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),fe=l(),z=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),ne=n("model([input_ids, attention_mask])"),_e=n(" or "),B=a("code"),se=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),P=a("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ve=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=r(c,"P",{});var T=i(h);x=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),_=r(c,"UL",{});var V=i(_);k=r(V,"LI",{});var Te=i(k);v=s(Te,"having all inputs as keyword arguments (like PyTorch models), or"),Te.forEach(t),g=d(V),F=r(V,"LI",{});var be=i(F);de=s(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),V.forEach(t),G=d(c),X=r(c,"P",{});var C=i(X);Y=s(C,"This second option is useful when using "),D=r(C,"CODE",{});var Me=i(D);ee=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ce=s(C,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(C,"CODE",{});var ke=i(O);pe=s(ke,"model(inputs)"),ke.forEach(t),re=s(C,"."),C.forEach(t),R=d(c),j=r(c,"P",{});var we=i(j);te=s(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),K=d(c),E=r(c,"UL",{});var A=i(E);q=r(A,"LI",{});var N=i(q);J=s(N,"a single Tensor with "),W=r(N,"CODE",{});var ye=i(W);he=s(ye,"input_ids"),ye.forEach(t),me=s(N," only and nothing else: "),H=r(N,"CODE",{});var Le=i(H);ue=s(Le,"model(inputs_ids)"),Le.forEach(t),N.forEach(t),fe=d(A),z=r(A,"LI",{});var U=i(z);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(U,"CODE",{});var le=i(Q);ne=s(le,"model([input_ids, attention_mask])"),le.forEach(t),_e=s(U," or "),B=r(U,"CODE",{});var $e=i(B);se=s($e,"model([input_ids, attention_mask, token_type_ids])"),$e.forEach(t),U.forEach(t),ae=d(A),P=r(A,"LI",{});var oe=i(P);ie=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(oe,"CODE",{});var xe=i(S);ve=s(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),oe.forEach(t),A.forEach(t)},m(c,T){m(c,h,T),e(h,x),m(c,f,T),m(c,_,T),e(_,k),e(k,v),e(_,g),e(_,F),e(F,de),m(c,G,T),m(c,X,T),e(X,Y),e(X,D),e(D,ee),e(X,ce),e(X,O),e(O,pe),e(X,re),m(c,R,T),m(c,j,T),e(j,te),m(c,K,T),m(c,E,T),e(E,q),e(q,J),e(q,W),e(W,he),e(q,me),e(q,H),e(H,ue),e(E,fe),e(E,z),e(z,ge),e(z,Q),e(Q,ne),e(z,_e),e(z,B),e(B,se),e(E,ae),e(E,P),e(P,ie),e(P,S),e(S,ve)},d(c){c&&t(h),c&&t(f),c&&t(_),c&&t(G),c&&t(X),c&&t(R),c&&t(j),c&&t(K),c&&t(E)}}}function S1(I){let h,x,f,_,k;return{c(){h=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=r(v,"P",{});var g=i(h);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var F=i(f);_=s(F,"Module"),F.forEach(t),k=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(v,g){m(v,h,g),e(h,x),e(h,f),e(f,_),e(h,k)},d(v){v&&t(h)}}}function N1(I){let h,x,f,_,k,v,g,F,de,G,X,Y,D,ee,ce,O,pe,re,R,j,te,K,E,q,J,W,he,me,H,ue,fe,z,ge,Q,ne,_e,B,se,ae,P,ie,S,ve,c,T,V,Te,be,C,Me,ke,we,A,N,ye,Le,U,le,$e,oe,xe,Qd,ln,Bd,Ud,fl,It,Mo,fr,dn,Rd,gr,Vd,gl,De,cn,Gd,gt,Jd,fa,Kd,Yd,ga,Zd,ec,pn,tc,oc,nc,Dt,sc,_a,ac,rc,va,ic,lc,dc,_r,cc,pc,hn,_l,Ot,wo,vr,mn,hc,kr,mc,vl,Fe,un,uc,Tr,fc,gc,Oe,br,_c,vc,Mr,kc,Tc,wr,bc,Mc,_t,wc,yr,yc,Lc,Lr,$c,xc,$r,Fc,Xc,Ec,fn,zc,xr,qc,Cc,jc,gn,Pc,Fr,Ac,Sc,Nc,_n,Ic,ka,Dc,Oc,Wc,vt,vn,Hc,Xr,Qc,Bc,kn,Ta,Uc,Er,Rc,Vc,ba,Gc,zr,Jc,Kc,yo,Tn,Yc,bn,Zc,qr,ep,tp,op,lt,Mn,np,Cr,sp,ap,wn,rp,Wt,ip,jr,lp,dp,Pr,cp,pp,hp,Ar,kl,Ht,Lo,Sr,yn,mp,Nr,up,Tl,Qt,Ln,fp,$n,gp,Ir,_p,vp,bl,Bt,$o,Dr,xn,kp,Or,Tp,Ml,We,Fn,bp,Wr,Mp,wp,Xn,yp,Ma,Lp,$p,xp,En,Fp,zn,Xp,Ep,zp,Ge,qn,qp,Ut,Cp,wa,jp,Pp,Hr,Ap,Sp,Np,xo,Ip,Qr,Dp,Op,Cn,wl,Rt,Fo,Br,jn,Wp,Ur,Hp,yl,He,Pn,Qp,Rr,Bp,Up,An,Rp,ya,Vp,Gp,Jp,Sn,Kp,Nn,Yp,Zp,eh,Je,In,th,Vt,oh,La,nh,sh,Vr,ah,rh,ih,Xo,lh,Gr,dh,ch,Dn,Ll,Gt,Eo,Jr,On,ph,Kr,hh,$l,Qe,Wn,mh,Yr,uh,fh,Hn,gh,$a,_h,vh,kh,Qn,Th,Bn,bh,Mh,wh,Ee,Un,yh,Jt,Lh,xa,$h,xh,Zr,Fh,Xh,Eh,zo,zh,ei,qh,Ch,Rn,jh,ti,Ph,Ah,Vn,xl,Kt,qo,oi,Gn,Sh,ni,Nh,Fl,Be,Jn,Ih,si,Dh,Oh,Kn,Wh,Fa,Hh,Qh,Bh,Yn,Uh,Zn,Rh,Vh,Gh,Ke,es,Jh,Yt,Kh,Xa,Yh,Zh,ai,em,tm,om,Co,nm,ri,sm,am,ts,Xl,Zt,jo,ii,os,rm,li,im,El,Ue,ns,lm,di,dm,cm,ss,pm,Ea,hm,mm,um,as,fm,rs,gm,_m,vm,Ye,is,km,eo,Tm,za,bm,Mm,ci,wm,ym,Lm,Po,$m,pi,xm,Fm,ls,zl,to,Ao,hi,ds,Xm,mi,Em,ql,Re,cs,zm,oo,qm,ui,Cm,jm,fi,Pm,Am,Sm,ps,Nm,qa,Im,Dm,Om,hs,Wm,ms,Hm,Qm,Bm,Ze,us,Um,no,Rm,Ca,Vm,Gm,gi,Jm,Km,Ym,So,Zm,_i,eu,tu,fs,Cl,so,No,vi,gs,ou,ki,nu,jl,Ve,_s,su,ao,au,Ti,ru,iu,bi,lu,du,cu,vs,pu,ja,hu,mu,uu,ks,fu,Ts,gu,_u,vu,et,bs,ku,ro,Tu,Pa,bu,Mu,Mi,wu,yu,Lu,Io,$u,wi,xu,Fu,Ms,Pl,io,Do,yi,ws,Xu,Li,Eu,Al,qe,ys,zu,$i,qu,Cu,Ls,ju,Aa,Pu,Au,Su,$s,Nu,xs,Iu,Du,Ou,Oo,Wu,tt,Fs,Hu,lo,Qu,Sa,Bu,Uu,xi,Ru,Vu,Gu,Wo,Ju,Fi,Ku,Yu,Xs,Sl,co,Ho,Xi,Es,Zu,Ei,ef,Nl,Ce,zs,tf,zi,of,nf,qs,sf,Na,af,rf,lf,Cs,df,js,cf,pf,hf,Qo,mf,ot,Ps,uf,po,ff,Ia,gf,_f,qi,vf,kf,Tf,Bo,bf,Ci,Mf,wf,As,Il,ho,Uo,ji,Ss,yf,Pi,Lf,Dl,je,Ns,$f,Ai,xf,Ff,Is,Xf,Da,Ef,zf,qf,Ds,Cf,Os,jf,Pf,Af,Ro,Sf,nt,Ws,Nf,mo,If,Oa,Df,Of,Si,Wf,Hf,Qf,Vo,Bf,Ni,Uf,Rf,Hs,Ol,uo,Go,Ii,Qs,Vf,Di,Gf,Wl,Pe,Bs,Jf,Oi,Kf,Yf,Us,Zf,Wa,eg,tg,og,Rs,ng,Vs,sg,ag,rg,Jo,ig,st,Gs,lg,fo,dg,Ha,cg,pg,Wi,hg,mg,ug,Ko,fg,Hi,gg,_g,Js,Hl,go,Yo,Qi,Ks,vg,Bi,kg,Ql,Ae,Ys,Tg,Ui,bg,Mg,Zs,wg,Qa,yg,Lg,$g,ea,xg,ta,Fg,Xg,Eg,Zo,zg,at,oa,qg,_o,Cg,Ba,jg,Pg,Ri,Ag,Sg,Ng,en,Ig,Vi,Dg,Og,na,Bl,vo,tn,Gi,sa,Wg,Ji,Hg,Ul,Se,aa,Qg,ko,Bg,Ki,Ug,Rg,Yi,Vg,Gg,Jg,ra,Kg,Ua,Yg,Zg,e_,ia,t_,la,o_,n_,s_,on,a_,rt,da,r_,To,i_,Ra,l_,d_,Zi,c_,p_,h_,nn,m_,el,u_,f_,ca,Rl;return v=new ze({}),ee=new ze({}),dn=new ze({}),cn=new Z({props:{name:"class transformers.XLMConfig",anchor:"transformers.XLMConfig",parameters:[{name:"vocab_size",val:" = 30145"},{name:"emb_dim",val:" = 2048"},{name:"n_layers",val:" = 12"},{name:"n_heads",val:" = 16"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"gelu_activation",val:" = True"},{name:"sinusoidal_embeddings",val:" = False"},{name:"causal",val:" = False"},{name:"asm",val:" = False"},{name:"n_langs",val:" = 1"},{name:"use_lang_emb",val:" = True"},{name:"max_position_embeddings",val:" = 512"},{name:"embed_init_std",val:" = 0.02209708691207961"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"init_std",val:" = 0.02"},{name:"bos_index",val:" = 0"},{name:"eos_index",val:" = 1"},{name:"pad_index",val:" = 2"},{name:"unk_index",val:" = 3"},{name:"mask_index",val:" = 5"},{name:"is_encoder",val:" = True"},{name:"summary_type",val:" = 'first'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = None"},{name:"summary_proj_to_labels",val:" = True"},{name:"summary_first_dropout",val:" = 0.1"},{name:"start_n_top",val:" = 5"},{name:"end_n_top",val:" = 5"},{name:"mask_token_id",val:" = 0"},{name:"lang_id",val:" = 0"},{name:"pad_token_id",val:" = 2"},{name:"bos_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/configuration_xlm.py#L37",parametersDescription:[{anchor:"transformers.XLMConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30145) &#x2014; Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMModel">XLMModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMModel">TFXLMModel</a>.`,name:"vocab_size"},{anchor:"transformers.XLMConfig.emb_dim",description:`<strong>emb_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"emb_dim"},{anchor:"transformers.XLMConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.XLMConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.XLMConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.XLMConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the attention mechanism`,name:"attention_dropout"},{anchor:"transformers.XLMConfig.gelu_activation",description:`<strong>gelu_activation</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use <em>gelu</em> for the activations instead of <em>relu</em>.`,name:"gelu_activation"},{anchor:"transformers.XLMConfig.sinusoidal_embeddings",description:`<strong>sinusoidal_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.`,name:"sinusoidal_embeddings"},{anchor:"transformers.XLMConfig.causal",description:`<strong>causal</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in order to only attend to the left-side context instead if a bidirectional context.`,name:"causal"},{anchor:"transformers.XLMConfig.asm",description:`<strong>asm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction layer.`,name:"asm"},{anchor:"transformers.XLMConfig.n_langs",description:`<strong>n_langs</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of languages the model handles. Set to 1 for monolingual models.`,name:"n_langs"},{anchor:"transformers.XLMConfig.use_lang_emb",description:`<strong>use_lang_emb</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use language embeddings. Some models use additional language embeddings, see <a href="http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings" rel="nofollow">the multilingual models page</a> for information on how to use them.`,name:"use_lang_emb"},{anchor:"transformers.XLMConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.XLMConfig.embed_init_std",description:`<strong>embed_init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 2048^-0.5) &#x2014; The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.`,name:"embed_init_std"},{anchor:"transformers.XLMConfig.init_std",description:`<strong>init_std</strong> (<code>int</code>, <em>optional</em>, defaults to 50257) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the embedding matrices.`,name:"init_std"},{anchor:"transformers.XLMConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.XLMConfig.bos_index",description:`<strong>bos_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index of the beginning of sentence token in the vocabulary.`,name:"bos_index"},{anchor:"transformers.XLMConfig.eos_index",description:`<strong>eos_index</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The index of the end of sentence token in the vocabulary.`,name:"eos_index"},{anchor:"transformers.XLMConfig.pad_index",description:`<strong>pad_index</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The index of the padding token in the vocabulary.`,name:"pad_index"},{anchor:"transformers.XLMConfig.unk_index",description:`<strong>unk_index</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The index of the unknown token in the vocabulary.`,name:"unk_index"},{anchor:"transformers.XLMConfig.mask_index",description:`<strong>mask_index</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The index of the masking token in the vocabulary.`,name:"mask_index"},{anchor:"transformers.XLMConfig.is_encoder(bool,",description:`<strong>is_encoder(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.`,name:"is_encoder(bool,"},{anchor:"transformers.XLMConfig.summary_type",description:`<strong>summary_type</strong> (<code>string</code>, <em>optional</em>, defaults to &#x201C;first&#x201D;) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Has to be one of the following options:</p> <ul> <li><code>&quot;last&quot;</code>: Take the last token hidden state (like XLNet).</li> <li><code>&quot;first&quot;</code>: Take the first token hidden state (like BERT).</li> <li><code>&quot;mean&quot;</code>: Take the mean of all tokens hidden states.</li> <li><code>&quot;cls_index&quot;</code>: Supply a Tensor of classification token position (like GPT/GPT-2).</li> <li><code>&quot;attn&quot;</code>: Not implemented now, use multi-head attention.</li> </ul>`,name:"summary_type"},{anchor:"transformers.XLMConfig.summary_use_proj",description:`<strong>summary_use_proj</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Whether or not to add a projection after the vector extraction.`,name:"summary_use_proj"},{anchor:"transformers.XLMConfig.summary_activation",description:`<strong>summary_activation</strong> (<code>str</code>, <em>optional</em>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Pass <code>&quot;tanh&quot;</code> for a tanh activation to the output, any other value will result in no activation.`,name:"summary_activation"},{anchor:"transformers.XLMConfig.summary_proj_to_labels",description:`<strong>summary_proj_to_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Used in the sequence classification and multiple choice models.</p> <p>Whether the projection outputs should have <code>config.num_labels</code> or <code>config.hidden_size</code> classes.`,name:"summary_proj_to_labels"},{anchor:"transformers.XLMConfig.summary_first_dropout",description:`<strong>summary_first_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Used in the sequence classification and multiple choice models.</p> <p>The dropout ratio to be used after the projection and activation.`,name:"summary_first_dropout"},{anchor:"transformers.XLMConfig.start_n_top",description:`<strong>start_n_top</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Used in the SQuAD evaluation script.`,name:"start_n_top"},{anchor:"transformers.XLMConfig.end_n_top",description:`<strong>end_n_top</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Used in the SQuAD evaluation script.`,name:"end_n_top"},{anchor:"transformers.XLMConfig.mask_token_id",description:`<strong>mask_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Model agnostic parameter to identify masked tokens when generating text in an MLM context.`,name:"mask_token_id"},{anchor:"transformers.XLMConfig.lang_id",description:`<strong>lang_id</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The ID of the language used by the model. This parameter is used when generating text in a given language.`,name:"lang_id"}]}}),hn=new Ie({props:{code:`from transformers import XLMConfig, XLMModel # Initializing a XLM configuration configuration = XLMConfig() # Initializing a model from the configuration model = XLMModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMConfig, XLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a XLM configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = XLMConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),mn=new ze({}),un=new Z({props:{name:"class transformers.XLMTokenizer",anchor:"transformers.XLMTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"unk_token",val:" = '<unk>'"},{name:"bos_token",val:" = '<s>'"},{name:"sep_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '</s>'"},{name:"mask_token",val:" = '<special1>'"},{name:"additional_special_tokens",val:" = ['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']"},{name:"lang2id",val:" = None"},{name:"id2lang",val:" = None"},{name:"do_lowercase_and_remove_accent",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/tokenization_xlm.py#L530",parametersDescription:[{anchor:"transformers.XLMTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Vocabulary file.`,name:"vocab_file"},{anchor:"transformers.XLMTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Merges file.`,name:"merges_file"},{anchor:"transformers.XLMTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.XLMTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.XLMTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.XLMTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.XLMTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.XLMTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;special1&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.XLMTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;special0&gt;&quot;,&quot;&lt;special1&gt;&quot;,&quot;&lt;special2&gt;&quot;,&quot;&lt;special3&gt;&quot;,&quot;&lt;special4&gt;&quot;,&quot;&lt;special5&gt;&quot;,&quot;&lt;special6&gt;&quot;,&quot;&lt;special7&gt;&quot;,&quot;&lt;special8&gt;&quot;,&quot;&lt;special9&gt;&quot;]</code>) &#x2014; List of additional special tokens.`,name:"additional_special_tokens"},{anchor:"transformers.XLMTokenizer.lang2id",description:`<strong>lang2id</strong> (<code>Dict[str, int]</code>, <em>optional</em>) &#x2014; Dictionary mapping languages string identifiers to their IDs.`,name:"lang2id"},{anchor:"transformers.XLMTokenizer.id2lang",description:`<strong>id2lang</strong> (<code>Dict[int, str]</code>, <em>optional</em>) &#x2014; Dictionary mapping language IDs to their string identifiers.`,name:"id2lang"},{anchor:"transformers.XLMTokenizer.do_lowercase_and_remove_accent",description:`<strong>do_lowercase_and_remove_accent</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to lowercase and remove accents when tokenizing.`,name:"do_lowercase_and_remove_accent"}]}}),vn=new Z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.XLMTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/tokenization_xlm.py#L865",parametersDescription:[{anchor:"transformers.XLMTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.XLMTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Tn=new Z({props:{name:"get_special_tokens_mask",anchor:"transformers.XLMTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/tokenization_xlm.py#L892",parametersDescription:[{anchor:"transformers.XLMTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.XLMTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Mn=new Z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.XLMTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/tokenization_xlm.py#L920",parametersDescription:[{anchor:"transformers.XLMTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),wn=new Ie({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),yn=new ze({}),Ln=new Z({props:{name:"class transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput",anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"end_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_logits",val:": typing.Optional[torch.FloatTensor] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L270",parametersDescription:[{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.`,name:"loss"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.start_top_log_probs",description:`<strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_log_probs"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.start_top_index",description:`<strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_index"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.end_top_log_probs",description:`<strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_log_probs"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.end_top_index",description:`<strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_index"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.cls_logits",description:`<strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the <code>is_impossible</code> label of the answers.`,name:"cls_logits"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),xn=new ze({}),Fn=new Z({props:{name:"class transformers.XLMModel",anchor:"transformers.XLMModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L400",parametersDescription:[{anchor:"transformers.XLMModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qn=new Z({props:{name:"forward",anchor:"transformers.XLMModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L487",parametersDescription:[{anchor:"transformers.XLMModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMModel.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMModel.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMModel.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xo=new Xe({props:{$$slots:{default:[k1]},$$scope:{ctx:I}}}),Cn=new Ie({props:{code:`from transformers import XLMTokenizer, XLMModel import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMModel.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMModel.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),jn=new ze({}),Pn=new Z({props:{name:"class transformers.XLMWithLMHeadModel",anchor:"transformers.XLMWithLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L682",parametersDescription:[{anchor:"transformers.XLMWithLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),In=new Z({props:{name:"forward",anchor:"transformers.XLMWithLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L710",parametersDescription:[{anchor:"transformers.XLMWithLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMWithLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMWithLMHeadModel.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMWithLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMWithLMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMWithLMHeadModel.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMWithLMHeadModel.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMWithLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMWithLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMWithLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMWithLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMWithLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMWithLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xo=new Xe({props:{$$slots:{default:[T1]},$$scope:{ctx:I}}}),Dn=new Ie({props:{code:`from transformers import XLMTokenizer, XLMWithLMHeadModel import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("The capital of France is <special1>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;special1&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),On=new ze({}),Wn=new Z({props:{name:"class transformers.XLMForSequenceClassification",anchor:"transformers.XLMForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L778",parametersDescription:[{anchor:"transformers.XLMForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Un=new Z({props:{name:"forward",anchor:"transformers.XLMForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L790",parametersDescription:[{anchor:"transformers.XLMForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForSequenceClassification.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForSequenceClassification.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForSequenceClassification.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),zo=new Xe({props:{$$slots:{default:[b1]},$$scope:{ctx:I}}}),Rn=new Ie({props:{code:`from transformers import XLMTokenizer, XLMForSequenceClassification import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Vn=new Ie({props:{code:`from transformers import XLMTokenizer, XLMForSequenceClassification import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Gn=new ze({}),Jn=new Z({props:{name:"class transformers.XLMForMultipleChoice",anchor:"transformers.XLMForMultipleChoice",parameters:[{name:"config",val:""},{name:"*inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L1198",parametersDescription:[{anchor:"transformers.XLMForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),es=new Z({props:{name:"forward",anchor:"transformers.XLMForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L1209",parametersDescription:[{anchor:"transformers.XLMForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForMultipleChoice.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForMultipleChoice.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForMultipleChoice.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Co=new Xe({props:{$$slots:{default:[M1]},$$scope:{ctx:I}}}),ts=new Ie({props:{code:`from transformers import XLMTokenizer, XLMForMultipleChoice import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForMultipleChoice.from_pretrained('xlm-mlm-en-2048') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),os=new ze({}),ns=new Z({props:{name:"class transformers.XLMForTokenClassification",anchor:"transformers.XLMForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L1104",parametersDescription:[{anchor:"transformers.XLMForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),is=new Z({props:{name:"forward",anchor:"transformers.XLMForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L1116",parametersDescription:[{anchor:"transformers.XLMForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForTokenClassification.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForTokenClassification.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForTokenClassification.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Po=new Xe({props:{$$slots:{default:[w1]},$$scope:{ctx:I}}}),ls=new Ie({props:{code:`from transformers import XLMTokenizer, XLMForTokenClassification import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForTokenClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ds=new ze({}),cs=new Z({props:{name:"class transformers.XLMForQuestionAnsweringSimple",anchor:"transformers.XLMForQuestionAnsweringSimple",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L880",parametersDescription:[{anchor:"transformers.XLMForQuestionAnsweringSimple.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),us=new Z({props:{name:"forward",anchor:"transformers.XLMForQuestionAnsweringSimple.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L890",parametersDescription:[{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),So=new Xe({props:{$$slots:{default:[y1]},$$scope:{ctx:I}}}),fs=new Ie({props:{code:`from transformers import XLMTokenizer, XLMForQuestionAnsweringSimple import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForQuestionAnsweringSimple <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForQuestionAnsweringSimple.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),gs=new ze({}),_s=new Z({props:{name:"class transformers.XLMForQuestionAnswering",anchor:"transformers.XLMForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L985",parametersDescription:[{anchor:"transformers.XLMForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bs=new Z({props:{name:"forward",anchor:"transformers.XLMForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"is_impossible",val:" = None"},{name:"cls_index",val:" = None"},{name:"p_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L995",parametersDescription:[{anchor:"transformers.XLMForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForQuestionAnswering.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForQuestionAnswering.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForQuestionAnswering.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.XLMForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"},{anchor:"transformers.XLMForQuestionAnswering.forward.is_impossible",description:`<strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels whether a question has an answer or no answer (SQuAD 2.0)`,name:"is_impossible"},{anchor:"transformers.XLMForQuestionAnswering.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the classification token to use as input for computing plausibility of the answer.`,name:"cls_index"},{anchor:"transformers.XLMForQuestionAnswering.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Optional mask of tokens which can&#x2019;t be in answers (e.g. [CLS], [PAD], &#x2026;). 1.0 means token should be masked. 0.0 mean token is not masked.`,name:"p_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput" >transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</p> </li> <li> <p><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top config.start_n_top start token possibilities (beam-search).</p> </li> <li> <p><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top config.start_n_top start token possibilities (beam-search).</p> </li> <li> <p><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</p> </li> <li> <p><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</p> </li> <li> <p><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the <code>is_impossible</code> label of the answers.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput" >transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Io=new Xe({props:{$$slots:{default:[L1]},$$scope:{ctx:I}}}),Ms=new Ie({props:{code:`from transformers import XLMTokenizer, XLMForQuestionAnswering import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),ws=new ze({}),ys=new Z({props:{name:"class transformers.TFXLMModel",anchor:"transformers.TFXLMModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L698",parametersDescription:[{anchor:"transformers.TFXLMModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Oo=new Xe({props:{$$slots:{default:[$1]},$$scope:{ctx:I}}}),Fs=new Z({props:{name:"call",anchor:"transformers.TFXLMModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L703",parametersDescription:[{anchor:"transformers.TFXLMModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMModel.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMModel.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMModel.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Wo=new Xe({props:{$$slots:{default:[x1]},$$scope:{ctx:I}}}),Xs=new Ie({props:{code:`from transformers import XLMTokenizer, TFXLMModel import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMModel.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMModel.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Es=new ze({}),zs=new Z({props:{name:"class transformers.TFXLMWithLMHeadModel",anchor:"transformers.TFXLMWithLMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L829",parametersDescription:[{anchor:"transformers.TFXLMWithLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qo=new Xe({props:{$$slots:{default:[F1]},$$scope:{ctx:I}}}),Ps=new Z({props:{name:"call",anchor:"transformers.TFXLMWithLMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L856",parametersDescription:[{anchor:"transformers.TFXLMWithLMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMWithLMHeadModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMWithLMHeadModel.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMWithLMHeadModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMWithLMHeadModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMWithLMHeadModel.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMWithLMHeadModel.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMWithLMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMWithLMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMWithLMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMWithLMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMWithLMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMWithLMHeadModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <code>transformers.models.xlm.modeling_tf_xlm.TFXLMWithLMHeadModelOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.xlm.modeling_tf_xlm.TFXLMWithLMHeadModelOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),Bo=new Xe({props:{$$slots:{default:[X1]},$$scope:{ctx:I}}}),As=new Ie({props:{code:`from transformers import XLMTokenizer, TFXLMWithLMHeadModel import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ss=new ze({}),Ns=new Z({props:{name:"class transformers.TFXLMForSequenceClassification",anchor:"transformers.TFXLMForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L938",parametersDescription:[{anchor:"transformers.TFXLMForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ro=new Xe({props:{$$slots:{default:[E1]},$$scope:{ctx:I}}}),Ws=new Z({props:{name:"call",anchor:"transformers.TFXLMForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L946",parametersDescription:[{anchor:"transformers.TFXLMForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForSequenceClassification.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForSequenceClassification.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForSequenceClassification.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFXLMForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Vo=new Xe({props:{$$slots:{default:[z1]},$$scope:{ctx:I}}}),Hs=new Ie({props:{code:`from transformers import XLMTokenizer, TFXLMForSequenceClassification import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Qs=new ze({}),Bs=new Z({props:{name:"class transformers.TFXLMForMultipleChoice",anchor:"transformers.TFXLMForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1042",parametersDescription:[{anchor:"transformers.TFXLMForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jo=new Xe({props:{$$slots:{default:[q1]},$$scope:{ctx:I}}}),Gs=new Z({props:{name:"call",anchor:"transformers.TFXLMForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1071",parametersDescription:[{anchor:"transformers.TFXLMForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForMultipleChoice.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForMultipleChoice.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForMultipleChoice.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ko=new Xe({props:{$$slots:{default:[C1]},$$scope:{ctx:I}}}),Js=new Ie({props:{code:`from transformers import XLMTokenizer, TFXLMForMultipleChoice import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForMultipleChoice.from_pretrained('xlm-mlm-en-2048') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ks=new ze({}),Ys=new Z({props:{name:"class transformers.TFXLMForTokenClassification",anchor:"transformers.TFXLMForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1210",parametersDescription:[{anchor:"transformers.TFXLMForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Zo=new Xe({props:{$$slots:{default:[j1]},$$scope:{ctx:I}}}),oa=new Z({props:{name:"call",anchor:"transformers.TFXLMForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1221",parametersDescription:[{anchor:"transformers.TFXLMForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForTokenClassification.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForTokenClassification.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForTokenClassification.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFXLMForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),en=new Xe({props:{$$slots:{default:[P1]},$$scope:{ctx:I}}}),na=new Ie({props:{code:`from transformers import XLMTokenizer, TFXLMForTokenClassification import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForTokenClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sa=new ze({}),aa=new Z({props:{name:"class transformers.TFXLMForQuestionAnsweringSimple",anchor:"transformers.TFXLMForQuestionAnsweringSimple",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1317",parametersDescription:[{anchor:"transformers.TFXLMForQuestionAnsweringSimple.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig">XLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),on=new Xe({props:{$$slots:{default:[A1]},$$scope:{ctx:I}}}),da=new Z({props:{name:"call",anchor:"transformers.TFXLMForQuestionAnsweringSimple.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1325",parametersDescription:[{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),nn=new Xe({props:{$$slots:{default:[S1]},$$scope:{ctx:I}}}),ca=new Ie({props:{code:`from transformers import XLMTokenizer, TFXLMForQuestionAnsweringSimple import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForQuestionAnsweringSimple <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForQuestionAnsweringSimple.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){h=a("meta"),x=l(),f=a("h1"),_=a("a"),k=a("span"),b(v.$$.fragment),g=l(),F=a("span"),de=n("XLM"),G=l(),X=a("h2"),Y=a("a"),D=a("span"),b(ee.$$.fragment),ce=l(),O=a("span"),pe=n("Overview"),re=l(),R=a("p"),j=n("The XLM model was proposed in "),te=a("a"),K=n("Cross-lingual Language Model Pretraining"),E=n(` by Guillaume Lample, Alexis Conneau. It\u2019s a transformer pretrained using one of the following objectives:`),q=l(),J=a("ul"),W=a("li"),he=n("a causal language modeling (CLM) objective (next token prediction),"),me=l(),H=a("li"),ue=n("a masked language modeling (MLM) objective (BERT-like), or"),fe=l(),z=a("li"),ge=n("a Translation Language Modeling (TLM) object (extension of BERT\u2019s MLM to multiple language inputs)"),Q=l(),ne=a("p"),_e=n("The abstract from the paper is the following:"),B=l(),se=a("p"),ae=a("em"),P=n(`Recent studies have demonstrated the efficiency of generative pretraining for English natural language understanding. In this work, we extend this approach to multiple languages and show the effectiveness of cross-lingual pretraining. We propose two methods to learn cross-lingual language models (XLMs): one unsupervised that only relies on monolingual data, and one supervised that leverages parallel data with a new cross-lingual language model objective. We obtain state-of-the-art results on cross-lingual classification, unsupervised and supervised machine translation. On XNLI, our approach pushes the state of the art by an absolute gain of 4.9% accuracy. On unsupervised machine translation, we obtain 34.3 BLEU on WMT\u201916 German-English, improving the previous state of the art by more than 9 BLEU. On supervised machine translation, we obtain a new state of the art of 38.5 BLEU on WMT\u201916 Romanian-English, outperforming the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.`),ie=l(),S=a("p"),ve=n("Tips:"),c=l(),T=a("ul"),V=a("li"),Te=n(`XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation).`),be=l(),C=a("li"),Me=n("XLM has multilingual checkpoints which leverage a specific "),ke=a("code"),we=n("lang"),A=n(" parameter. Check out the "),N=a("a"),ye=n("multi-lingual"),Le=n(" page for more information."),U=l(),le=a("p"),$e=n("This model was contributed by "),oe=a("a"),xe=n("thomwolf"),Qd=n(". The original code can be found "),ln=a("a"),Bd=n("here"),Ud=n("."),fl=l(),It=a("h2"),Mo=a("a"),fr=a("span"),b(dn.$$.fragment),Rd=l(),gr=a("span"),Vd=n("XLMConfig"),gl=l(),De=a("div"),b(cn.$$.fragment),Gd=l(),gt=a("p"),Jd=n("This is the configuration class to store the configuration of a "),fa=a("a"),Kd=n("XLMModel"),Yd=n(` or a `),ga=a("a"),Zd=n("TFXLMModel"),ec=n(`. It is used to instantiate a XLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),pn=a("a"),tc=n("xlm-mlm-en-2048"),oc=n(" architecture."),nc=l(),Dt=a("p"),sc=n("Configuration objects inherit from "),_a=a("a"),ac=n("PretrainedConfig"),rc=n(` and can be used to control the model outputs. Read the documentation from `),va=a("a"),ic=n("PretrainedConfig"),lc=n(" for more information."),dc=l(),_r=a("p"),cc=n("Examples:"),pc=l(),b(hn.$$.fragment),_l=l(),Ot=a("h2"),wo=a("a"),vr=a("span"),b(mn.$$.fragment),hc=l(),kr=a("span"),mc=n("XLMTokenizer"),vl=l(),Fe=a("div"),b(un.$$.fragment),uc=l(),Tr=a("p"),fc=n("Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:"),gc=l(),Oe=a("ul"),br=a("li"),_c=n("Moses preprocessing and tokenization for most supported languages."),vc=l(),Mr=a("li"),kc=n("Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP)."),Tc=l(),wr=a("li"),bc=n("Optionally lowercases and normalizes all inputs text."),Mc=l(),_t=a("li"),wc=n("The arguments "),yr=a("code"),yc=n("special_tokens"),Lc=n(" and the function "),Lr=a("code"),$c=n("set_special_tokens"),xc=n(`, can be used to add additional symbols (like \u201D`),$r=a("strong"),Fc=n("classify"),Xc=n("\u201D) to a vocabulary."),Ec=l(),fn=a("li"),zc=n("The "),xr=a("code"),qc=n("lang2id"),Cc=n(` attribute maps the languages supported by the model with their IDs if provided (automatically set for pretrained vocabularies).`),jc=l(),gn=a("li"),Pc=n("The "),Fr=a("code"),Ac=n("id2lang"),Sc=n(" attributes does reverse mapping if provided (automatically set for pretrained vocabularies)."),Nc=l(),_n=a("p"),Ic=n("This tokenizer inherits from "),ka=a("a"),Dc=n("PreTrainedTokenizer"),Oc=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Wc=l(),vt=a("div"),b(vn.$$.fragment),Hc=l(),Xr=a("p"),Qc=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM sequence has the following format:`),Bc=l(),kn=a("ul"),Ta=a("li"),Uc=n("single sequence: "),Er=a("code"),Rc=n("<s> X </s>"),Vc=l(),ba=a("li"),Gc=n("pair of sequences: "),zr=a("code"),Jc=n("<s> A </s> B </s>"),Kc=l(),yo=a("div"),b(Tn.$$.fragment),Yc=l(),bn=a("p"),Zc=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),qr=a("code"),ep=n("prepare_for_model"),tp=n(" method."),op=l(),lt=a("div"),b(Mn.$$.fragment),np=l(),Cr=a("p"),sp=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence pair mask has the following format:`),ap=l(),b(wn.$$.fragment),rp=l(),Wt=a("p"),ip=n("If "),jr=a("code"),lp=n("token_ids_1"),dp=n(" is "),Pr=a("code"),cp=n("None"),pp=n(", this method only returns the first portion of the mask (0s)."),hp=l(),Ar=a("div"),kl=l(),Ht=a("h2"),Lo=a("a"),Sr=a("span"),b(yn.$$.fragment),mp=l(),Nr=a("span"),up=n("XLM specific outputs"),Tl=l(),Qt=a("div"),b(Ln.$$.fragment),fp=l(),$n=a("p"),gp=n("Base class for outputs of question answering models using a "),Ir=a("code"),_p=n("SquadHead"),vp=n("."),bl=l(),Bt=a("h2"),$o=a("a"),Dr=a("span"),b(xn.$$.fragment),kp=l(),Or=a("span"),Tp=n("XLMModel"),Ml=l(),We=a("div"),b(Fn.$$.fragment),bp=l(),Wr=a("p"),Mp=n("The bare XLM Model transformer outputting raw hidden-states without any specific head on top."),wp=l(),Xn=a("p"),yp=n("This model inherits from "),Ma=a("a"),Lp=n("PreTrainedModel"),$p=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xp=l(),En=a("p"),Fp=n("This model is also a PyTorch "),zn=a("a"),Xp=n("torch.nn.Module"),Ep=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zp=l(),Ge=a("div"),b(qn.$$.fragment),qp=l(),Ut=a("p"),Cp=n("The "),wa=a("a"),jp=n("XLMModel"),Pp=n(" forward method, overrides the "),Hr=a("code"),Ap=n("__call__"),Sp=n(" special method."),Np=l(),b(xo.$$.fragment),Ip=l(),Qr=a("p"),Dp=n("Example:"),Op=l(),b(Cn.$$.fragment),wl=l(),Rt=a("h2"),Fo=a("a"),Br=a("span"),b(jn.$$.fragment),Wp=l(),Ur=a("span"),Hp=n("XLMWithLMHeadModel"),yl=l(),He=a("div"),b(Pn.$$.fragment),Qp=l(),Rr=a("p"),Bp=n(`The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Up=l(),An=a("p"),Rp=n("This model inherits from "),ya=a("a"),Vp=n("PreTrainedModel"),Gp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jp=l(),Sn=a("p"),Kp=n("This model is also a PyTorch "),Nn=a("a"),Yp=n("torch.nn.Module"),Zp=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),eh=l(),Je=a("div"),b(In.$$.fragment),th=l(),Vt=a("p"),oh=n("The "),La=a("a"),nh=n("XLMWithLMHeadModel"),sh=n(" forward method, overrides the "),Vr=a("code"),ah=n("__call__"),rh=n(" special method."),ih=l(),b(Xo.$$.fragment),lh=l(),Gr=a("p"),dh=n("Example:"),ch=l(),b(Dn.$$.fragment),Ll=l(),Gt=a("h2"),Eo=a("a"),Jr=a("span"),b(On.$$.fragment),ph=l(),Kr=a("span"),hh=n("XLMForSequenceClassification"),$l=l(),Qe=a("div"),b(Wn.$$.fragment),mh=l(),Yr=a("p"),uh=n(`XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),fh=l(),Hn=a("p"),gh=n("This model inherits from "),$a=a("a"),_h=n("PreTrainedModel"),vh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kh=l(),Qn=a("p"),Th=n("This model is also a PyTorch "),Bn=a("a"),bh=n("torch.nn.Module"),Mh=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wh=l(),Ee=a("div"),b(Un.$$.fragment),yh=l(),Jt=a("p"),Lh=n("The "),xa=a("a"),$h=n("XLMForSequenceClassification"),xh=n(" forward method, overrides the "),Zr=a("code"),Fh=n("__call__"),Xh=n(" special method."),Eh=l(),b(zo.$$.fragment),zh=l(),ei=a("p"),qh=n("Example of single-label classification:"),Ch=l(),b(Rn.$$.fragment),jh=l(),ti=a("p"),Ph=n("Example of multi-label classification:"),Ah=l(),b(Vn.$$.fragment),xl=l(),Kt=a("h2"),qo=a("a"),oi=a("span"),b(Gn.$$.fragment),Sh=l(),ni=a("span"),Nh=n("XLMForMultipleChoice"),Fl=l(),Be=a("div"),b(Jn.$$.fragment),Ih=l(),si=a("p"),Dh=n(`XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Oh=l(),Kn=a("p"),Wh=n("This model inherits from "),Fa=a("a"),Hh=n("PreTrainedModel"),Qh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bh=l(),Yn=a("p"),Uh=n("This model is also a PyTorch "),Zn=a("a"),Rh=n("torch.nn.Module"),Vh=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gh=l(),Ke=a("div"),b(es.$$.fragment),Jh=l(),Yt=a("p"),Kh=n("The "),Xa=a("a"),Yh=n("XLMForMultipleChoice"),Zh=n(" forward method, overrides the "),ai=a("code"),em=n("__call__"),tm=n(" special method."),om=l(),b(Co.$$.fragment),nm=l(),ri=a("p"),sm=n("Example:"),am=l(),b(ts.$$.fragment),Xl=l(),Zt=a("h2"),jo=a("a"),ii=a("span"),b(os.$$.fragment),rm=l(),li=a("span"),im=n("XLMForTokenClassification"),El=l(),Ue=a("div"),b(ns.$$.fragment),lm=l(),di=a("p"),dm=n(`XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),cm=l(),ss=a("p"),pm=n("This model inherits from "),Ea=a("a"),hm=n("PreTrainedModel"),mm=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),um=l(),as=a("p"),fm=n("This model is also a PyTorch "),rs=a("a"),gm=n("torch.nn.Module"),_m=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vm=l(),Ye=a("div"),b(is.$$.fragment),km=l(),eo=a("p"),Tm=n("The "),za=a("a"),bm=n("XLMForTokenClassification"),Mm=n(" forward method, overrides the "),ci=a("code"),wm=n("__call__"),ym=n(" special method."),Lm=l(),b(Po.$$.fragment),$m=l(),pi=a("p"),xm=n("Example:"),Fm=l(),b(ls.$$.fragment),zl=l(),to=a("h2"),Ao=a("a"),hi=a("span"),b(ds.$$.fragment),Xm=l(),mi=a("span"),Em=n("XLMForQuestionAnsweringSimple"),ql=l(),Re=a("div"),b(cs.$$.fragment),zm=l(),oo=a("p"),qm=n(`XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),ui=a("code"),Cm=n("span start logits"),jm=n(" and "),fi=a("code"),Pm=n("span end logits"),Am=n(")."),Sm=l(),ps=a("p"),Nm=n("This model inherits from "),qa=a("a"),Im=n("PreTrainedModel"),Dm=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Om=l(),hs=a("p"),Wm=n("This model is also a PyTorch "),ms=a("a"),Hm=n("torch.nn.Module"),Qm=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bm=l(),Ze=a("div"),b(us.$$.fragment),Um=l(),no=a("p"),Rm=n("The "),Ca=a("a"),Vm=n("XLMForQuestionAnsweringSimple"),Gm=n(" forward method, overrides the "),gi=a("code"),Jm=n("__call__"),Km=n(" special method."),Ym=l(),b(So.$$.fragment),Zm=l(),_i=a("p"),eu=n("Example:"),tu=l(),b(fs.$$.fragment),Cl=l(),so=a("h2"),No=a("a"),vi=a("span"),b(gs.$$.fragment),ou=l(),ki=a("span"),nu=n("XLMForQuestionAnswering"),jl=l(),Ve=a("div"),b(_s.$$.fragment),su=l(),ao=a("p"),au=n(`XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ti=a("code"),ru=n("span start logits"),iu=n(" and "),bi=a("code"),lu=n("span end logits"),du=n(")."),cu=l(),vs=a("p"),pu=n("This model inherits from "),ja=a("a"),hu=n("PreTrainedModel"),mu=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uu=l(),ks=a("p"),fu=n("This model is also a PyTorch "),Ts=a("a"),gu=n("torch.nn.Module"),_u=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vu=l(),et=a("div"),b(bs.$$.fragment),ku=l(),ro=a("p"),Tu=n("The "),Pa=a("a"),bu=n("XLMForQuestionAnswering"),Mu=n(" forward method, overrides the "),Mi=a("code"),wu=n("__call__"),yu=n(" special method."),Lu=l(),b(Io.$$.fragment),$u=l(),wi=a("p"),xu=n("Example:"),Fu=l(),b(Ms.$$.fragment),Pl=l(),io=a("h2"),Do=a("a"),yi=a("span"),b(ws.$$.fragment),Xu=l(),Li=a("span"),Eu=n("TFXLMModel"),Al=l(),qe=a("div"),b(ys.$$.fragment),zu=l(),$i=a("p"),qu=n("The bare XLM Model transformer outputting raw hidden-states without any specific head on top."),Cu=l(),Ls=a("p"),ju=n("This model inherits from "),Aa=a("a"),Pu=n("TFPreTrainedModel"),Au=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Su=l(),$s=a("p"),Nu=n("This model is also a "),xs=a("a"),Iu=n("tf.keras.Model"),Du=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ou=l(),b(Oo.$$.fragment),Wu=l(),tt=a("div"),b(Fs.$$.fragment),Hu=l(),lo=a("p"),Qu=n("The "),Sa=a("a"),Bu=n("TFXLMModel"),Uu=n(" forward method, overrides the "),xi=a("code"),Ru=n("__call__"),Vu=n(" special method."),Gu=l(),b(Wo.$$.fragment),Ju=l(),Fi=a("p"),Ku=n("Example:"),Yu=l(),b(Xs.$$.fragment),Sl=l(),co=a("h2"),Ho=a("a"),Xi=a("span"),b(Es.$$.fragment),Zu=l(),Ei=a("span"),ef=n("TFXLMWithLMHeadModel"),Nl=l(),Ce=a("div"),b(zs.$$.fragment),tf=l(),zi=a("p"),of=n(`The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),nf=l(),qs=a("p"),sf=n("This model inherits from "),Na=a("a"),af=n("TFPreTrainedModel"),rf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lf=l(),Cs=a("p"),df=n("This model is also a "),js=a("a"),cf=n("tf.keras.Model"),pf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hf=l(),b(Qo.$$.fragment),mf=l(),ot=a("div"),b(Ps.$$.fragment),uf=l(),po=a("p"),ff=n("The "),Ia=a("a"),gf=n("TFXLMWithLMHeadModel"),_f=n(" forward method, overrides the "),qi=a("code"),vf=n("__call__"),kf=n(" special method."),Tf=l(),b(Bo.$$.fragment),bf=l(),Ci=a("p"),Mf=n("Example:"),wf=l(),b(As.$$.fragment),Il=l(),ho=a("h2"),Uo=a("a"),ji=a("span"),b(Ss.$$.fragment),yf=l(),Pi=a("span"),Lf=n("TFXLMForSequenceClassification"),Dl=l(),je=a("div"),b(Ns.$$.fragment),$f=l(),Ai=a("p"),xf=n(`XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ff=l(),Is=a("p"),Xf=n("This model inherits from "),Da=a("a"),Ef=n("TFPreTrainedModel"),zf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qf=l(),Ds=a("p"),Cf=n("This model is also a "),Os=a("a"),jf=n("tf.keras.Model"),Pf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Af=l(),b(Ro.$$.fragment),Sf=l(),nt=a("div"),b(Ws.$$.fragment),Nf=l(),mo=a("p"),If=n("The "),Oa=a("a"),Df=n("TFXLMForSequenceClassification"),Of=n(" forward method, overrides the "),Si=a("code"),Wf=n("__call__"),Hf=n(" special method."),Qf=l(),b(Vo.$$.fragment),Bf=l(),Ni=a("p"),Uf=n("Example:"),Rf=l(),b(Hs.$$.fragment),Ol=l(),uo=a("h2"),Go=a("a"),Ii=a("span"),b(Qs.$$.fragment),Vf=l(),Di=a("span"),Gf=n("TFXLMForMultipleChoice"),Wl=l(),Pe=a("div"),b(Bs.$$.fragment),Jf=l(),Oi=a("p"),Kf=n(`XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Yf=l(),Us=a("p"),Zf=n("This model inherits from "),Wa=a("a"),eg=n("TFPreTrainedModel"),tg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),og=l(),Rs=a("p"),ng=n("This model is also a "),Vs=a("a"),sg=n("tf.keras.Model"),ag=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),rg=l(),b(Jo.$$.fragment),ig=l(),st=a("div"),b(Gs.$$.fragment),lg=l(),fo=a("p"),dg=n("The "),Ha=a("a"),cg=n("TFXLMForMultipleChoice"),pg=n(" forward method, overrides the "),Wi=a("code"),hg=n("__call__"),mg=n(" special method."),ug=l(),b(Ko.$$.fragment),fg=l(),Hi=a("p"),gg=n("Example:"),_g=l(),b(Js.$$.fragment),Hl=l(),go=a("h2"),Yo=a("a"),Qi=a("span"),b(Ks.$$.fragment),vg=l(),Bi=a("span"),kg=n("TFXLMForTokenClassification"),Ql=l(),Ae=a("div"),b(Ys.$$.fragment),Tg=l(),Ui=a("p"),bg=n(`XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Mg=l(),Zs=a("p"),wg=n("This model inherits from "),Qa=a("a"),yg=n("TFPreTrainedModel"),Lg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$g=l(),ea=a("p"),xg=n("This model is also a "),ta=a("a"),Fg=n("tf.keras.Model"),Xg=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Eg=l(),b(Zo.$$.fragment),zg=l(),at=a("div"),b(oa.$$.fragment),qg=l(),_o=a("p"),Cg=n("The "),Ba=a("a"),jg=n("TFXLMForTokenClassification"),Pg=n(" forward method, overrides the "),Ri=a("code"),Ag=n("__call__"),Sg=n(" special method."),Ng=l(),b(en.$$.fragment),Ig=l(),Vi=a("p"),Dg=n("Example:"),Og=l(),b(na.$$.fragment),Bl=l(),vo=a("h2"),tn=a("a"),Gi=a("span"),b(sa.$$.fragment),Wg=l(),Ji=a("span"),Hg=n("TFXLMForQuestionAnsweringSimple"),Ul=l(),Se=a("div"),b(aa.$$.fragment),Qg=l(),ko=a("p"),Bg=n(`XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Ki=a("code"),Ug=n("span start logits"),Rg=n(" and "),Yi=a("code"),Vg=n("span end logits"),Gg=n(")."),Jg=l(),ra=a("p"),Kg=n("This model inherits from "),Ua=a("a"),Yg=n("TFPreTrainedModel"),Zg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),e_=l(),ia=a("p"),t_=n("This model is also a "),la=a("a"),o_=n("tf.keras.Model"),n_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),s_=l(),b(on.$$.fragment),a_=l(),rt=a("div"),b(da.$$.fragment),r_=l(),To=a("p"),i_=n("The "),Ra=a("a"),l_=n("TFXLMForQuestionAnsweringSimple"),d_=n(" forward method, overrides the "),Zi=a("code"),c_=n("__call__"),p_=n(" special method."),h_=l(),b(nn.$$.fragment),m_=l(),el=a("p"),u_=n("Example:"),f_=l(),b(ca.$$.fragment),this.h()},l(o){const u=v1('[data-svelte="svelte-1phssyn"]',document.head);h=r(u,"META",{name:!0,content:!0}),u.forEach(t),x=d(o),f=r(o,"H1",{class:!0});var pa=i(f);_=r(pa,"A",{id:!0,class:!0,href:!0});var tl=i(_);k=r(tl,"SPAN",{});var ol=i(k);M(v.$$.fragment,ol),ol.forEach(t),tl.forEach(t),g=d(pa),F=r(pa,"SPAN",{});var nl=i(F);de=s(nl,"XLM"),nl.forEach(t),pa.forEach(t),G=d(o),X=r(o,"H2",{class:!0});var ha=i(X);Y=r(ha,"A",{id:!0,class:!0,href:!0});var sl=i(Y);D=r(sl,"SPAN",{});var al=i(D);M(ee.$$.fragment,al),al.forEach(t),sl.forEach(t),ce=d(ha),O=r(ha,"SPAN",{});var rl=i(O);pe=s(rl,"Overview"),rl.forEach(t),ha.forEach(t),re=d(o),R=r(o,"P",{});var ma=i(R);j=s(ma,"The XLM model was proposed in "),te=r(ma,"A",{href:!0,rel:!0});var il=i(te);K=s(il,"Cross-lingual Language Model Pretraining"),il.forEach(t),E=s(ma,` by Guillaume Lample, Alexis Conneau. It\u2019s a transformer pretrained using one of the following objectives:`),ma.forEach(t),q=d(o),J=r(o,"UL",{});var bo=i(J);W=r(bo,"LI",{});var ll=i(W);he=s(ll,"a causal language modeling (CLM) objective (next token prediction),"),ll.forEach(t),me=d(bo),H=r(bo,"LI",{});var dl=i(H);ue=s(dl,"a masked language modeling (MLM) objective (BERT-like), or"),dl.forEach(t),fe=d(bo),z=r(bo,"LI",{});var cl=i(z);ge=s(cl,"a Translation Language Modeling (TLM) object (extension of BERT\u2019s MLM to multiple language inputs)"),cl.forEach(t),bo.forEach(t),Q=d(o),ne=r(o,"P",{});var pl=i(ne);_e=s(pl,"The abstract from the paper is the following:"),pl.forEach(t),B=d(o),se=r(o,"P",{});var hl=i(se);ae=r(hl,"EM",{});var ml=i(ae);P=s(ml,`Recent studies have demonstrated the efficiency of generative pretraining for English natural language understanding. In this work, we extend this approach to multiple languages and show the effectiveness of cross-lingual pretraining. We propose two methods to learn cross-lingual language models (XLMs): one unsupervised that only relies on monolingual data, and one supervised that leverages parallel data with a new cross-lingual language model objective. We obtain state-of-the-art results on cross-lingual classification, unsupervised and supervised machine translation. On XNLI, our approach pushes the state of the art by an absolute gain of 4.9% accuracy. On unsupervised machine translation, we obtain 34.3 BLEU on WMT\u201916 German-English, improving the previous state of the art by more than 9 BLEU. On supervised machine translation, we obtain a new state of the art of 38.5 BLEU on WMT\u201916 Romanian-English, outperforming the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.`),ml.forEach(t),hl.forEach(t),ie=d(o),S=r(o,"P",{});var ul=i(S);ve=s(ul,"Tips:"),ul.forEach(t),c=d(o),T=r(o,"UL",{});var ua=i(T);V=r(ua,"LI",{});var v_=i(V);Te=s(v_,`XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation).`),v_.forEach(t),be=d(ua),C=r(ua,"LI",{});var Va=i(C);Me=s(Va,"XLM has multilingual checkpoints which leverage a specific "),ke=r(Va,"CODE",{});var k_=i(ke);we=s(k_,"lang"),k_.forEach(t),A=s(Va," parameter. Check out the "),N=r(Va,"A",{href:!0});var T_=i(N);ye=s(T_,"multi-lingual"),T_.forEach(t),Le=s(Va," page for more information."),Va.forEach(t),ua.forEach(t),U=d(o),le=r(o,"P",{});var Ga=i(le);$e=s(Ga,"This model was contributed by "),oe=r(Ga,"A",{href:!0,rel:!0});var b_=i(oe);xe=s(b_,"thomwolf"),b_.forEach(t),Qd=s(Ga,". The original code can be found "),ln=r(Ga,"A",{href:!0,rel:!0});var M_=i(ln);Bd=s(M_,"here"),M_.forEach(t),Ud=s(Ga,"."),Ga.forEach(t),fl=d(o),It=r(o,"H2",{class:!0});var Vl=i(It);Mo=r(Vl,"A",{id:!0,class:!0,href:!0});var w_=i(Mo);fr=r(w_,"SPAN",{});var y_=i(fr);M(dn.$$.fragment,y_),y_.forEach(t),w_.forEach(t),Rd=d(Vl),gr=r(Vl,"SPAN",{});var L_=i(gr);Vd=s(L_,"XLMConfig"),L_.forEach(t),Vl.forEach(t),gl=d(o),De=r(o,"DIV",{class:!0});var kt=i(De);M(cn.$$.fragment,kt),Gd=d(kt),gt=r(kt,"P",{});var sn=i(gt);Jd=s(sn,"This is the configuration class to store the configuration of a "),fa=r(sn,"A",{href:!0});var $_=i(fa);Kd=s($_,"XLMModel"),$_.forEach(t),Yd=s(sn,` or a `),ga=r(sn,"A",{href:!0});var x_=i(ga);Zd=s(x_,"TFXLMModel"),x_.forEach(t),ec=s(sn,`. It is used to instantiate a XLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),pn=r(sn,"A",{href:!0,rel:!0});var F_=i(pn);tc=s(F_,"xlm-mlm-en-2048"),F_.forEach(t),oc=s(sn," architecture."),sn.forEach(t),nc=d(kt),Dt=r(kt,"P",{});var Ja=i(Dt);sc=s(Ja,"Configuration objects inherit from "),_a=r(Ja,"A",{href:!0});var X_=i(_a);ac=s(X_,"PretrainedConfig"),X_.forEach(t),rc=s(Ja,` and can be used to control the model outputs. Read the documentation from `),va=r(Ja,"A",{href:!0});var E_=i(va);ic=s(E_,"PretrainedConfig"),E_.forEach(t),lc=s(Ja," for more information."),Ja.forEach(t),dc=d(kt),_r=r(kt,"P",{});var z_=i(_r);cc=s(z_,"Examples:"),z_.forEach(t),pc=d(kt),M(hn.$$.fragment,kt),kt.forEach(t),_l=d(o),Ot=r(o,"H2",{class:!0});var Gl=i(Ot);wo=r(Gl,"A",{id:!0,class:!0,href:!0});var q_=i(wo);vr=r(q_,"SPAN",{});var C_=i(vr);M(mn.$$.fragment,C_),C_.forEach(t),q_.forEach(t),hc=d(Gl),kr=r(Gl,"SPAN",{});var j_=i(kr);mc=s(j_,"XLMTokenizer"),j_.forEach(t),Gl.forEach(t),vl=d(o),Fe=r(o,"DIV",{class:!0});var Ne=i(Fe);M(un.$$.fragment,Ne),uc=d(Ne),Tr=r(Ne,"P",{});var P_=i(Tr);fc=s(P_,"Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:"),P_.forEach(t),gc=d(Ne),Oe=r(Ne,"UL",{});var dt=i(Oe);br=r(dt,"LI",{});var A_=i(br);_c=s(A_,"Moses preprocessing and tokenization for most supported languages."),A_.forEach(t),vc=d(dt),Mr=r(dt,"LI",{});var S_=i(Mr);kc=s(S_,"Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP)."),S_.forEach(t),Tc=d(dt),wr=r(dt,"LI",{});var N_=i(wr);bc=s(N_,"Optionally lowercases and normalizes all inputs text."),N_.forEach(t),Mc=d(dt),_t=r(dt,"LI",{});var an=i(_t);wc=s(an,"The arguments "),yr=r(an,"CODE",{});var I_=i(yr);yc=s(I_,"special_tokens"),I_.forEach(t),Lc=s(an," and the function "),Lr=r(an,"CODE",{});var D_=i(Lr);$c=s(D_,"set_special_tokens"),D_.forEach(t),xc=s(an,`, can be used to add additional symbols (like \u201D`),$r=r(an,"STRONG",{});var O_=i($r);Fc=s(O_,"classify"),O_.forEach(t),Xc=s(an,"\u201D) to a vocabulary."),an.forEach(t),Ec=d(dt),fn=r(dt,"LI",{});var Jl=i(fn);zc=s(Jl,"The "),xr=r(Jl,"CODE",{});var W_=i(xr);qc=s(W_,"lang2id"),W_.forEach(t),Cc=s(Jl,` attribute maps the languages supported by the model with their IDs if provided (automatically set for pretrained vocabularies).`),Jl.forEach(t),jc=d(dt),gn=r(dt,"LI",{});var Kl=i(gn);Pc=s(Kl,"The "),Fr=r(Kl,"CODE",{});var H_=i(Fr);Ac=s(H_,"id2lang"),H_.forEach(t),Sc=s(Kl," attributes does reverse mapping if provided (automatically set for pretrained vocabularies)."),Kl.forEach(t),dt.forEach(t),Nc=d(Ne),_n=r(Ne,"P",{});var Yl=i(_n);Ic=s(Yl,"This tokenizer inherits from "),ka=r(Yl,"A",{href:!0});var Q_=i(ka);Dc=s(Q_,"PreTrainedTokenizer"),Q_.forEach(t),Oc=s(Yl,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Yl.forEach(t),Wc=d(Ne),vt=r(Ne,"DIV",{class:!0});var Ka=i(vt);M(vn.$$.fragment,Ka),Hc=d(Ka),Xr=r(Ka,"P",{});var B_=i(Xr);Qc=s(B_,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM sequence has the following format:`),B_.forEach(t),Bc=d(Ka),kn=r(Ka,"UL",{});var Zl=i(kn);Ta=r(Zl,"LI",{});var g_=i(Ta);Uc=s(g_,"single sequence: "),Er=r(g_,"CODE",{});var U_=i(Er);Rc=s(U_,"<s> X </s>"),U_.forEach(t),g_.forEach(t),Vc=d(Zl),ba=r(Zl,"LI",{});var __=i(ba);Gc=s(__,"pair of sequences: "),zr=r(__,"CODE",{});var R_=i(zr);Jc=s(R_,"<s> A </s> B </s>"),R_.forEach(t),__.forEach(t),Zl.forEach(t),Ka.forEach(t),Kc=d(Ne),yo=r(Ne,"DIV",{class:!0});var ed=i(yo);M(Tn.$$.fragment,ed),Yc=d(ed),bn=r(ed,"P",{});var td=i(bn);Zc=s(td,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),qr=r(td,"CODE",{});var V_=i(qr);ep=s(V_,"prepare_for_model"),V_.forEach(t),tp=s(td," method."),td.forEach(t),ed.forEach(t),op=d(Ne),lt=r(Ne,"DIV",{class:!0});var rn=i(lt);M(Mn.$$.fragment,rn),np=d(rn),Cr=r(rn,"P",{});var G_=i(Cr);sp=s(G_,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence pair mask has the following format:`),G_.forEach(t),ap=d(rn),M(wn.$$.fragment,rn),rp=d(rn),Wt=r(rn,"P",{});var Ya=i(Wt);ip=s(Ya,"If "),jr=r(Ya,"CODE",{});var J_=i(jr);lp=s(J_,"token_ids_1"),J_.forEach(t),dp=s(Ya," is "),Pr=r(Ya,"CODE",{});var K_=i(Pr);cp=s(K_,"None"),K_.forEach(t),pp=s(Ya,", this method only returns the first portion of the mask (0s)."),Ya.forEach(t),rn.forEach(t),hp=d(Ne),Ar=r(Ne,"DIV",{class:!0}),i(Ar).forEach(t),Ne.forEach(t),kl=d(o),Ht=r(o,"H2",{class:!0});var od=i(Ht);Lo=r(od,"A",{id:!0,class:!0,href:!0});var Y_=i(Lo);Sr=r(Y_,"SPAN",{});var Z_=i(Sr);M(yn.$$.fragment,Z_),Z_.forEach(t),Y_.forEach(t),mp=d(od),Nr=r(od,"SPAN",{});var ev=i(Nr);up=s(ev,"XLM specific outputs"),ev.forEach(t),od.forEach(t),Tl=d(o),Qt=r(o,"DIV",{class:!0});var nd=i(Qt);M(Ln.$$.fragment,nd),fp=d(nd),$n=r(nd,"P",{});var sd=i($n);gp=s(sd,"Base class for outputs of question answering models using a "),Ir=r(sd,"CODE",{});var tv=i(Ir);_p=s(tv,"SquadHead"),tv.forEach(t),vp=s(sd,"."),sd.forEach(t),nd.forEach(t),bl=d(o),Bt=r(o,"H2",{class:!0});var ad=i(Bt);$o=r(ad,"A",{id:!0,class:!0,href:!0});var ov=i($o);Dr=r(ov,"SPAN",{});var nv=i(Dr);M(xn.$$.fragment,nv),nv.forEach(t),ov.forEach(t),kp=d(ad),Or=r(ad,"SPAN",{});var sv=i(Or);Tp=s(sv,"XLMModel"),sv.forEach(t),ad.forEach(t),Ml=d(o),We=r(o,"DIV",{class:!0});var Tt=i(We);M(Fn.$$.fragment,Tt),bp=d(Tt),Wr=r(Tt,"P",{});var av=i(Wr);Mp=s(av,"The bare XLM Model transformer outputting raw hidden-states without any specific head on top."),av.forEach(t),wp=d(Tt),Xn=r(Tt,"P",{});var rd=i(Xn);yp=s(rd,"This model inherits from "),Ma=r(rd,"A",{href:!0});var rv=i(Ma);Lp=s(rv,"PreTrainedModel"),rv.forEach(t),$p=s(rd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rd.forEach(t),xp=d(Tt),En=r(Tt,"P",{});var id=i(En);Fp=s(id,"This model is also a PyTorch "),zn=r(id,"A",{href:!0,rel:!0});var iv=i(zn);Xp=s(iv,"torch.nn.Module"),iv.forEach(t),Ep=s(id,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),id.forEach(t),zp=d(Tt),Ge=r(Tt,"DIV",{class:!0});var bt=i(Ge);M(qn.$$.fragment,bt),qp=d(bt),Ut=r(bt,"P",{});var Za=i(Ut);Cp=s(Za,"The "),wa=r(Za,"A",{href:!0});var lv=i(wa);jp=s(lv,"XLMModel"),lv.forEach(t),Pp=s(Za," forward method, overrides the "),Hr=r(Za,"CODE",{});var dv=i(Hr);Ap=s(dv,"__call__"),dv.forEach(t),Sp=s(Za," special method."),Za.forEach(t),Np=d(bt),M(xo.$$.fragment,bt),Ip=d(bt),Qr=r(bt,"P",{});var cv=i(Qr);Dp=s(cv,"Example:"),cv.forEach(t),Op=d(bt),M(Cn.$$.fragment,bt),bt.forEach(t),Tt.forEach(t),wl=d(o),Rt=r(o,"H2",{class:!0});var ld=i(Rt);Fo=r(ld,"A",{id:!0,class:!0,href:!0});var pv=i(Fo);Br=r(pv,"SPAN",{});var hv=i(Br);M(jn.$$.fragment,hv),hv.forEach(t),pv.forEach(t),Wp=d(ld),Ur=r(ld,"SPAN",{});var mv=i(Ur);Hp=s(mv,"XLMWithLMHeadModel"),mv.forEach(t),ld.forEach(t),yl=d(o),He=r(o,"DIV",{class:!0});var Mt=i(He);M(Pn.$$.fragment,Mt),Qp=d(Mt),Rr=r(Mt,"P",{});var uv=i(Rr);Bp=s(uv,`The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),uv.forEach(t),Up=d(Mt),An=r(Mt,"P",{});var dd=i(An);Rp=s(dd,"This model inherits from "),ya=r(dd,"A",{href:!0});var fv=i(ya);Vp=s(fv,"PreTrainedModel"),fv.forEach(t),Gp=s(dd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dd.forEach(t),Jp=d(Mt),Sn=r(Mt,"P",{});var cd=i(Sn);Kp=s(cd,"This model is also a PyTorch "),Nn=r(cd,"A",{href:!0,rel:!0});var gv=i(Nn);Yp=s(gv,"torch.nn.Module"),gv.forEach(t),Zp=s(cd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cd.forEach(t),eh=d(Mt),Je=r(Mt,"DIV",{class:!0});var wt=i(Je);M(In.$$.fragment,wt),th=d(wt),Vt=r(wt,"P",{});var er=i(Vt);oh=s(er,"The "),La=r(er,"A",{href:!0});var _v=i(La);nh=s(_v,"XLMWithLMHeadModel"),_v.forEach(t),sh=s(er," forward method, overrides the "),Vr=r(er,"CODE",{});var vv=i(Vr);ah=s(vv,"__call__"),vv.forEach(t),rh=s(er," special method."),er.forEach(t),ih=d(wt),M(Xo.$$.fragment,wt),lh=d(wt),Gr=r(wt,"P",{});var kv=i(Gr);dh=s(kv,"Example:"),kv.forEach(t),ch=d(wt),M(Dn.$$.fragment,wt),wt.forEach(t),Mt.forEach(t),Ll=d(o),Gt=r(o,"H2",{class:!0});var pd=i(Gt);Eo=r(pd,"A",{id:!0,class:!0,href:!0});var Tv=i(Eo);Jr=r(Tv,"SPAN",{});var bv=i(Jr);M(On.$$.fragment,bv),bv.forEach(t),Tv.forEach(t),ph=d(pd),Kr=r(pd,"SPAN",{});var Mv=i(Kr);hh=s(Mv,"XLMForSequenceClassification"),Mv.forEach(t),pd.forEach(t),$l=d(o),Qe=r(o,"DIV",{class:!0});var yt=i(Qe);M(Wn.$$.fragment,yt),mh=d(yt),Yr=r(yt,"P",{});var wv=i(Yr);uh=s(wv,`XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),wv.forEach(t),fh=d(yt),Hn=r(yt,"P",{});var hd=i(Hn);gh=s(hd,"This model inherits from "),$a=r(hd,"A",{href:!0});var yv=i($a);_h=s(yv,"PreTrainedModel"),yv.forEach(t),vh=s(hd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hd.forEach(t),kh=d(yt),Qn=r(yt,"P",{});var md=i(Qn);Th=s(md,"This model is also a PyTorch "),Bn=r(md,"A",{href:!0,rel:!0});var Lv=i(Bn);bh=s(Lv,"torch.nn.Module"),Lv.forEach(t),Mh=s(md,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),md.forEach(t),wh=d(yt),Ee=r(yt,"DIV",{class:!0});var it=i(Ee);M(Un.$$.fragment,it),yh=d(it),Jt=r(it,"P",{});var tr=i(Jt);Lh=s(tr,"The "),xa=r(tr,"A",{href:!0});var $v=i(xa);$h=s($v,"XLMForSequenceClassification"),$v.forEach(t),xh=s(tr," forward method, overrides the "),Zr=r(tr,"CODE",{});var xv=i(Zr);Fh=s(xv,"__call__"),xv.forEach(t),Xh=s(tr," special method."),tr.forEach(t),Eh=d(it),M(zo.$$.fragment,it),zh=d(it),ei=r(it,"P",{});var Fv=i(ei);qh=s(Fv,"Example of single-label classification:"),Fv.forEach(t),Ch=d(it),M(Rn.$$.fragment,it),jh=d(it),ti=r(it,"P",{});var Xv=i(ti);Ph=s(Xv,"Example of multi-label classification:"),Xv.forEach(t),Ah=d(it),M(Vn.$$.fragment,it),it.forEach(t),yt.forEach(t),xl=d(o),Kt=r(o,"H2",{class:!0});var ud=i(Kt);qo=r(ud,"A",{id:!0,class:!0,href:!0});var Ev=i(qo);oi=r(Ev,"SPAN",{});var zv=i(oi);M(Gn.$$.fragment,zv),zv.forEach(t),Ev.forEach(t),Sh=d(ud),ni=r(ud,"SPAN",{});var qv=i(ni);Nh=s(qv,"XLMForMultipleChoice"),qv.forEach(t),ud.forEach(t),Fl=d(o),Be=r(o,"DIV",{class:!0});var Lt=i(Be);M(Jn.$$.fragment,Lt),Ih=d(Lt),si=r(Lt,"P",{});var Cv=i(si);Dh=s(Cv,`XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Cv.forEach(t),Oh=d(Lt),Kn=r(Lt,"P",{});var fd=i(Kn);Wh=s(fd,"This model inherits from "),Fa=r(fd,"A",{href:!0});var jv=i(Fa);Hh=s(jv,"PreTrainedModel"),jv.forEach(t),Qh=s(fd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fd.forEach(t),Bh=d(Lt),Yn=r(Lt,"P",{});var gd=i(Yn);Uh=s(gd,"This model is also a PyTorch "),Zn=r(gd,"A",{href:!0,rel:!0});var Pv=i(Zn);Rh=s(Pv,"torch.nn.Module"),Pv.forEach(t),Vh=s(gd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gd.forEach(t),Gh=d(Lt),Ke=r(Lt,"DIV",{class:!0});var $t=i(Ke);M(es.$$.fragment,$t),Jh=d($t),Yt=r($t,"P",{});var or=i(Yt);Kh=s(or,"The "),Xa=r(or,"A",{href:!0});var Av=i(Xa);Yh=s(Av,"XLMForMultipleChoice"),Av.forEach(t),Zh=s(or," forward method, overrides the "),ai=r(or,"CODE",{});var Sv=i(ai);em=s(Sv,"__call__"),Sv.forEach(t),tm=s(or," special method."),or.forEach(t),om=d($t),M(Co.$$.fragment,$t),nm=d($t),ri=r($t,"P",{});var Nv=i(ri);sm=s(Nv,"Example:"),Nv.forEach(t),am=d($t),M(ts.$$.fragment,$t),$t.forEach(t),Lt.forEach(t),Xl=d(o),Zt=r(o,"H2",{class:!0});var _d=i(Zt);jo=r(_d,"A",{id:!0,class:!0,href:!0});var Iv=i(jo);ii=r(Iv,"SPAN",{});var Dv=i(ii);M(os.$$.fragment,Dv),Dv.forEach(t),Iv.forEach(t),rm=d(_d),li=r(_d,"SPAN",{});var Ov=i(li);im=s(Ov,"XLMForTokenClassification"),Ov.forEach(t),_d.forEach(t),El=d(o),Ue=r(o,"DIV",{class:!0});var xt=i(Ue);M(ns.$$.fragment,xt),lm=d(xt),di=r(xt,"P",{});var Wv=i(di);dm=s(Wv,`XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Wv.forEach(t),cm=d(xt),ss=r(xt,"P",{});var vd=i(ss);pm=s(vd,"This model inherits from "),Ea=r(vd,"A",{href:!0});var Hv=i(Ea);hm=s(Hv,"PreTrainedModel"),Hv.forEach(t),mm=s(vd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vd.forEach(t),um=d(xt),as=r(xt,"P",{});var kd=i(as);fm=s(kd,"This model is also a PyTorch "),rs=r(kd,"A",{href:!0,rel:!0});var Qv=i(rs);gm=s(Qv,"torch.nn.Module"),Qv.forEach(t),_m=s(kd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kd.forEach(t),vm=d(xt),Ye=r(xt,"DIV",{class:!0});var Ft=i(Ye);M(is.$$.fragment,Ft),km=d(Ft),eo=r(Ft,"P",{});var nr=i(eo);Tm=s(nr,"The "),za=r(nr,"A",{href:!0});var Bv=i(za);bm=s(Bv,"XLMForTokenClassification"),Bv.forEach(t),Mm=s(nr," forward method, overrides the "),ci=r(nr,"CODE",{});var Uv=i(ci);wm=s(Uv,"__call__"),Uv.forEach(t),ym=s(nr," special method."),nr.forEach(t),Lm=d(Ft),M(Po.$$.fragment,Ft),$m=d(Ft),pi=r(Ft,"P",{});var Rv=i(pi);xm=s(Rv,"Example:"),Rv.forEach(t),Fm=d(Ft),M(ls.$$.fragment,Ft),Ft.forEach(t),xt.forEach(t),zl=d(o),to=r(o,"H2",{class:!0});var Td=i(to);Ao=r(Td,"A",{id:!0,class:!0,href:!0});var Vv=i(Ao);hi=r(Vv,"SPAN",{});var Gv=i(hi);M(ds.$$.fragment,Gv),Gv.forEach(t),Vv.forEach(t),Xm=d(Td),mi=r(Td,"SPAN",{});var Jv=i(mi);Em=s(Jv,"XLMForQuestionAnsweringSimple"),Jv.forEach(t),Td.forEach(t),ql=d(o),Re=r(o,"DIV",{class:!0});var Xt=i(Re);M(cs.$$.fragment,Xt),zm=d(Xt),oo=r(Xt,"P",{});var sr=i(oo);qm=s(sr,`XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),ui=r(sr,"CODE",{});var Kv=i(ui);Cm=s(Kv,"span start logits"),Kv.forEach(t),jm=s(sr," and "),fi=r(sr,"CODE",{});var Yv=i(fi);Pm=s(Yv,"span end logits"),Yv.forEach(t),Am=s(sr,")."),sr.forEach(t),Sm=d(Xt),ps=r(Xt,"P",{});var bd=i(ps);Nm=s(bd,"This model inherits from "),qa=r(bd,"A",{href:!0});var Zv=i(qa);Im=s(Zv,"PreTrainedModel"),Zv.forEach(t),Dm=s(bd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bd.forEach(t),Om=d(Xt),hs=r(Xt,"P",{});var Md=i(hs);Wm=s(Md,"This model is also a PyTorch "),ms=r(Md,"A",{href:!0,rel:!0});var ek=i(ms);Hm=s(ek,"torch.nn.Module"),ek.forEach(t),Qm=s(Md,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Md.forEach(t),Bm=d(Xt),Ze=r(Xt,"DIV",{class:!0});var Et=i(Ze);M(us.$$.fragment,Et),Um=d(Et),no=r(Et,"P",{});var ar=i(no);Rm=s(ar,"The "),Ca=r(ar,"A",{href:!0});var tk=i(Ca);Vm=s(tk,"XLMForQuestionAnsweringSimple"),tk.forEach(t),Gm=s(ar," forward method, overrides the "),gi=r(ar,"CODE",{});var ok=i(gi);Jm=s(ok,"__call__"),ok.forEach(t),Km=s(ar," special method."),ar.forEach(t),Ym=d(Et),M(So.$$.fragment,Et),Zm=d(Et),_i=r(Et,"P",{});var nk=i(_i);eu=s(nk,"Example:"),nk.forEach(t),tu=d(Et),M(fs.$$.fragment,Et),Et.forEach(t),Xt.forEach(t),Cl=d(o),so=r(o,"H2",{class:!0});var wd=i(so);No=r(wd,"A",{id:!0,class:!0,href:!0});var sk=i(No);vi=r(sk,"SPAN",{});var ak=i(vi);M(gs.$$.fragment,ak),ak.forEach(t),sk.forEach(t),ou=d(wd),ki=r(wd,"SPAN",{});var rk=i(ki);nu=s(rk,"XLMForQuestionAnswering"),rk.forEach(t),wd.forEach(t),jl=d(o),Ve=r(o,"DIV",{class:!0});var zt=i(Ve);M(_s.$$.fragment,zt),su=d(zt),ao=r(zt,"P",{});var rr=i(ao);au=s(rr,`XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ti=r(rr,"CODE",{});var ik=i(Ti);ru=s(ik,"span start logits"),ik.forEach(t),iu=s(rr," and "),bi=r(rr,"CODE",{});var lk=i(bi);lu=s(lk,"span end logits"),lk.forEach(t),du=s(rr,")."),rr.forEach(t),cu=d(zt),vs=r(zt,"P",{});var yd=i(vs);pu=s(yd,"This model inherits from "),ja=r(yd,"A",{href:!0});var dk=i(ja);hu=s(dk,"PreTrainedModel"),dk.forEach(t),mu=s(yd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yd.forEach(t),uu=d(zt),ks=r(zt,"P",{});var Ld=i(ks);fu=s(Ld,"This model is also a PyTorch "),Ts=r(Ld,"A",{href:!0,rel:!0});var ck=i(Ts);gu=s(ck,"torch.nn.Module"),ck.forEach(t),_u=s(Ld,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ld.forEach(t),vu=d(zt),et=r(zt,"DIV",{class:!0});var qt=i(et);M(bs.$$.fragment,qt),ku=d(qt),ro=r(qt,"P",{});var ir=i(ro);Tu=s(ir,"The "),Pa=r(ir,"A",{href:!0});var pk=i(Pa);bu=s(pk,"XLMForQuestionAnswering"),pk.forEach(t),Mu=s(ir," forward method, overrides the "),Mi=r(ir,"CODE",{});var hk=i(Mi);wu=s(hk,"__call__"),hk.forEach(t),yu=s(ir," special method."),ir.forEach(t),Lu=d(qt),M(Io.$$.fragment,qt),$u=d(qt),wi=r(qt,"P",{});var mk=i(wi);xu=s(mk,"Example:"),mk.forEach(t),Fu=d(qt),M(Ms.$$.fragment,qt),qt.forEach(t),zt.forEach(t),Pl=d(o),io=r(o,"H2",{class:!0});var $d=i(io);Do=r($d,"A",{id:!0,class:!0,href:!0});var uk=i(Do);yi=r(uk,"SPAN",{});var fk=i(yi);M(ws.$$.fragment,fk),fk.forEach(t),uk.forEach(t),Xu=d($d),Li=r($d,"SPAN",{});var gk=i(Li);Eu=s(gk,"TFXLMModel"),gk.forEach(t),$d.forEach(t),Al=d(o),qe=r(o,"DIV",{class:!0});var ct=i(qe);M(ys.$$.fragment,ct),zu=d(ct),$i=r(ct,"P",{});var _k=i($i);qu=s(_k,"The bare XLM Model transformer outputting raw hidden-states without any specific head on top."),_k.forEach(t),Cu=d(ct),Ls=r(ct,"P",{});var xd=i(Ls);ju=s(xd,"This model inherits from "),Aa=r(xd,"A",{href:!0});var vk=i(Aa);Pu=s(vk,"TFPreTrainedModel"),vk.forEach(t),Au=s(xd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xd.forEach(t),Su=d(ct),$s=r(ct,"P",{});var Fd=i($s);Nu=s(Fd,"This model is also a "),xs=r(Fd,"A",{href:!0,rel:!0});var kk=i(xs);Iu=s(kk,"tf.keras.Model"),kk.forEach(t),Du=s(Fd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Fd.forEach(t),Ou=d(ct),M(Oo.$$.fragment,ct),Wu=d(ct),tt=r(ct,"DIV",{class:!0});var Ct=i(tt);M(Fs.$$.fragment,Ct),Hu=d(Ct),lo=r(Ct,"P",{});var lr=i(lo);Qu=s(lr,"The "),Sa=r(lr,"A",{href:!0});var Tk=i(Sa);Bu=s(Tk,"TFXLMModel"),Tk.forEach(t),Uu=s(lr," forward method, overrides the "),xi=r(lr,"CODE",{});var bk=i(xi);Ru=s(bk,"__call__"),bk.forEach(t),Vu=s(lr," special method."),lr.forEach(t),Gu=d(Ct),M(Wo.$$.fragment,Ct),Ju=d(Ct),Fi=r(Ct,"P",{});var Mk=i(Fi);Ku=s(Mk,"Example:"),Mk.forEach(t),Yu=d(Ct),M(Xs.$$.fragment,Ct),Ct.forEach(t),ct.forEach(t),Sl=d(o),co=r(o,"H2",{class:!0});var Xd=i(co);Ho=r(Xd,"A",{id:!0,class:!0,href:!0});var wk=i(Ho);Xi=r(wk,"SPAN",{});var yk=i(Xi);M(Es.$$.fragment,yk),yk.forEach(t),wk.forEach(t),Zu=d(Xd),Ei=r(Xd,"SPAN",{});var Lk=i(Ei);ef=s(Lk,"TFXLMWithLMHeadModel"),Lk.forEach(t),Xd.forEach(t),Nl=d(o),Ce=r(o,"DIV",{class:!0});var pt=i(Ce);M(zs.$$.fragment,pt),tf=d(pt),zi=r(pt,"P",{});var $k=i(zi);of=s($k,`The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),$k.forEach(t),nf=d(pt),qs=r(pt,"P",{});var Ed=i(qs);sf=s(Ed,"This model inherits from "),Na=r(Ed,"A",{href:!0});var xk=i(Na);af=s(xk,"TFPreTrainedModel"),xk.forEach(t),rf=s(Ed,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ed.forEach(t),lf=d(pt),Cs=r(pt,"P",{});var zd=i(Cs);df=s(zd,"This model is also a "),js=r(zd,"A",{href:!0,rel:!0});var Fk=i(js);cf=s(Fk,"tf.keras.Model"),Fk.forEach(t),pf=s(zd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),zd.forEach(t),hf=d(pt),M(Qo.$$.fragment,pt),mf=d(pt),ot=r(pt,"DIV",{class:!0});var jt=i(ot);M(Ps.$$.fragment,jt),uf=d(jt),po=r(jt,"P",{});var dr=i(po);ff=s(dr,"The "),Ia=r(dr,"A",{href:!0});var Xk=i(Ia);gf=s(Xk,"TFXLMWithLMHeadModel"),Xk.forEach(t),_f=s(dr," forward method, overrides the "),qi=r(dr,"CODE",{});var Ek=i(qi);vf=s(Ek,"__call__"),Ek.forEach(t),kf=s(dr," special method."),dr.forEach(t),Tf=d(jt),M(Bo.$$.fragment,jt),bf=d(jt),Ci=r(jt,"P",{});var zk=i(Ci);Mf=s(zk,"Example:"),zk.forEach(t),wf=d(jt),M(As.$$.fragment,jt),jt.forEach(t),pt.forEach(t),Il=d(o),ho=r(o,"H2",{class:!0});var qd=i(ho);Uo=r(qd,"A",{id:!0,class:!0,href:!0});var qk=i(Uo);ji=r(qk,"SPAN",{});var Ck=i(ji);M(Ss.$$.fragment,Ck),Ck.forEach(t),qk.forEach(t),yf=d(qd),Pi=r(qd,"SPAN",{});var jk=i(Pi);Lf=s(jk,"TFXLMForSequenceClassification"),jk.forEach(t),qd.forEach(t),Dl=d(o),je=r(o,"DIV",{class:!0});var ht=i(je);M(Ns.$$.fragment,ht),$f=d(ht),Ai=r(ht,"P",{});var Pk=i(Ai);xf=s(Pk,`XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Pk.forEach(t),Ff=d(ht),Is=r(ht,"P",{});var Cd=i(Is);Xf=s(Cd,"This model inherits from "),Da=r(Cd,"A",{href:!0});var Ak=i(Da);Ef=s(Ak,"TFPreTrainedModel"),Ak.forEach(t),zf=s(Cd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cd.forEach(t),qf=d(ht),Ds=r(ht,"P",{});var jd=i(Ds);Cf=s(jd,"This model is also a "),Os=r(jd,"A",{href:!0,rel:!0});var Sk=i(Os);jf=s(Sk,"tf.keras.Model"),Sk.forEach(t),Pf=s(jd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jd.forEach(t),Af=d(ht),M(Ro.$$.fragment,ht),Sf=d(ht),nt=r(ht,"DIV",{class:!0});var Pt=i(nt);M(Ws.$$.fragment,Pt),Nf=d(Pt),mo=r(Pt,"P",{});var cr=i(mo);If=s(cr,"The "),Oa=r(cr,"A",{href:!0});var Nk=i(Oa);Df=s(Nk,"TFXLMForSequenceClassification"),Nk.forEach(t),Of=s(cr," forward method, overrides the "),Si=r(cr,"CODE",{});var Ik=i(Si);Wf=s(Ik,"__call__"),Ik.forEach(t),Hf=s(cr," special method."),cr.forEach(t),Qf=d(Pt),M(Vo.$$.fragment,Pt),Bf=d(Pt),Ni=r(Pt,"P",{});var Dk=i(Ni);Uf=s(Dk,"Example:"),Dk.forEach(t),Rf=d(Pt),M(Hs.$$.fragment,Pt),Pt.forEach(t),ht.forEach(t),Ol=d(o),uo=r(o,"H2",{class:!0});var Pd=i(uo);Go=r(Pd,"A",{id:!0,class:!0,href:!0});var Ok=i(Go);Ii=r(Ok,"SPAN",{});var Wk=i(Ii);M(Qs.$$.fragment,Wk),Wk.forEach(t),Ok.forEach(t),Vf=d(Pd),Di=r(Pd,"SPAN",{});var Hk=i(Di);Gf=s(Hk,"TFXLMForMultipleChoice"),Hk.forEach(t),Pd.forEach(t),Wl=d(o),Pe=r(o,"DIV",{class:!0});var mt=i(Pe);M(Bs.$$.fragment,mt),Jf=d(mt),Oi=r(mt,"P",{});var Qk=i(Oi);Kf=s(Qk,`XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Qk.forEach(t),Yf=d(mt),Us=r(mt,"P",{});var Ad=i(Us);Zf=s(Ad,"This model inherits from "),Wa=r(Ad,"A",{href:!0});var Bk=i(Wa);eg=s(Bk,"TFPreTrainedModel"),Bk.forEach(t),tg=s(Ad,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ad.forEach(t),og=d(mt),Rs=r(mt,"P",{});var Sd=i(Rs);ng=s(Sd,"This model is also a "),Vs=r(Sd,"A",{href:!0,rel:!0});var Uk=i(Vs);sg=s(Uk,"tf.keras.Model"),Uk.forEach(t),ag=s(Sd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sd.forEach(t),rg=d(mt),M(Jo.$$.fragment,mt),ig=d(mt),st=r(mt,"DIV",{class:!0});var At=i(st);M(Gs.$$.fragment,At),lg=d(At),fo=r(At,"P",{});var pr=i(fo);dg=s(pr,"The "),Ha=r(pr,"A",{href:!0});var Rk=i(Ha);cg=s(Rk,"TFXLMForMultipleChoice"),Rk.forEach(t),pg=s(pr," forward method, overrides the "),Wi=r(pr,"CODE",{});var Vk=i(Wi);hg=s(Vk,"__call__"),Vk.forEach(t),mg=s(pr," special method."),pr.forEach(t),ug=d(At),M(Ko.$$.fragment,At),fg=d(At),Hi=r(At,"P",{});var Gk=i(Hi);gg=s(Gk,"Example:"),Gk.forEach(t),_g=d(At),M(Js.$$.fragment,At),At.forEach(t),mt.forEach(t),Hl=d(o),go=r(o,"H2",{class:!0});var Nd=i(go);Yo=r(Nd,"A",{id:!0,class:!0,href:!0});var Jk=i(Yo);Qi=r(Jk,"SPAN",{});var Kk=i(Qi);M(Ks.$$.fragment,Kk),Kk.forEach(t),Jk.forEach(t),vg=d(Nd),Bi=r(Nd,"SPAN",{});var Yk=i(Bi);kg=s(Yk,"TFXLMForTokenClassification"),Yk.forEach(t),Nd.forEach(t),Ql=d(o),Ae=r(o,"DIV",{class:!0});var ut=i(Ae);M(Ys.$$.fragment,ut),Tg=d(ut),Ui=r(ut,"P",{});var Zk=i(Ui);bg=s(Zk,`XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zk.forEach(t),Mg=d(ut),Zs=r(ut,"P",{});var Id=i(Zs);wg=s(Id,"This model inherits from "),Qa=r(Id,"A",{href:!0});var e1=i(Qa);yg=s(e1,"TFPreTrainedModel"),e1.forEach(t),Lg=s(Id,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Id.forEach(t),$g=d(ut),ea=r(ut,"P",{});var Dd=i(ea);xg=s(Dd,"This model is also a "),ta=r(Dd,"A",{href:!0,rel:!0});var t1=i(ta);Fg=s(t1,"tf.keras.Model"),t1.forEach(t),Xg=s(Dd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Dd.forEach(t),Eg=d(ut),M(Zo.$$.fragment,ut),zg=d(ut),at=r(ut,"DIV",{class:!0});var St=i(at);M(oa.$$.fragment,St),qg=d(St),_o=r(St,"P",{});var hr=i(_o);Cg=s(hr,"The "),Ba=r(hr,"A",{href:!0});var o1=i(Ba);jg=s(o1,"TFXLMForTokenClassification"),o1.forEach(t),Pg=s(hr," forward method, overrides the "),Ri=r(hr,"CODE",{});var n1=i(Ri);Ag=s(n1,"__call__"),n1.forEach(t),Sg=s(hr," special method."),hr.forEach(t),Ng=d(St),M(en.$$.fragment,St),Ig=d(St),Vi=r(St,"P",{});var s1=i(Vi);Dg=s(s1,"Example:"),s1.forEach(t),Og=d(St),M(na.$$.fragment,St),St.forEach(t),ut.forEach(t),Bl=d(o),vo=r(o,"H2",{class:!0});var Od=i(vo);tn=r(Od,"A",{id:!0,class:!0,href:!0});var a1=i(tn);Gi=r(a1,"SPAN",{});var r1=i(Gi);M(sa.$$.fragment,r1),r1.forEach(t),a1.forEach(t),Wg=d(Od),Ji=r(Od,"SPAN",{});var i1=i(Ji);Hg=s(i1,"TFXLMForQuestionAnsweringSimple"),i1.forEach(t),Od.forEach(t),Ul=d(o),Se=r(o,"DIV",{class:!0});var ft=i(Se);M(aa.$$.fragment,ft),Qg=d(ft),ko=r(ft,"P",{});var mr=i(ko);Bg=s(mr,`XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Ki=r(mr,"CODE",{});var l1=i(Ki);Ug=s(l1,"span start logits"),l1.forEach(t),Rg=s(mr," and "),Yi=r(mr,"CODE",{});var d1=i(Yi);Vg=s(d1,"span end logits"),d1.forEach(t),Gg=s(mr,")."),mr.forEach(t),Jg=d(ft),ra=r(ft,"P",{});var Wd=i(ra);Kg=s(Wd,"This model inherits from "),Ua=r(Wd,"A",{href:!0});var c1=i(Ua);Yg=s(c1,"TFPreTrainedModel"),c1.forEach(t),Zg=s(Wd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wd.forEach(t),e_=d(ft),ia=r(ft,"P",{});var Hd=i(ia);t_=s(Hd,"This model is also a "),la=r(Hd,"A",{href:!0,rel:!0});var p1=i(la);o_=s(p1,"tf.keras.Model"),p1.forEach(t),n_=s(Hd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hd.forEach(t),s_=d(ft),M(on.$$.fragment,ft),a_=d(ft),rt=r(ft,"DIV",{class:!0});var Nt=i(rt);M(da.$$.fragment,Nt),r_=d(Nt),To=r(Nt,"P",{});var ur=i(To);i_=s(ur,"The "),Ra=r(ur,"A",{href:!0});var h1=i(Ra);l_=s(h1,"TFXLMForQuestionAnsweringSimple"),h1.forEach(t),d_=s(ur," forward method, overrides the "),Zi=r(ur,"CODE",{});var m1=i(Zi);c_=s(m1,"__call__"),m1.forEach(t),p_=s(ur," special method."),ur.forEach(t),h_=d(Nt),M(nn.$$.fragment,Nt),m_=d(Nt),el=r(Nt,"P",{});var u1=i(el);u_=s(u1,"Example:"),u1.forEach(t),f_=d(Nt),M(ca.$$.fragment,Nt),Nt.forEach(t),ft.forEach(t),this.h()},h(){p(h,"name","hf:doc:metadata"),p(h,"content",JSON.stringify(I1)),p(_,"id","xlm"),p(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(_,"href","#xlm"),p(f,"class","relative group"),p(Y,"id","overview"),p(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Y,"href","#overview"),p(X,"class","relative group"),p(te,"href","https://arxiv.org/abs/1901.07291"),p(te,"rel","nofollow"),p(N,"href","/docs/transformers/v4.15.0/en/../multilingual"),p(oe,"href","https://huggingface.co/thomwolf"),p(oe,"rel","nofollow"),p(ln,"href","https://github.com/facebookresearch/XLM/"),p(ln,"rel","nofollow"),p(Mo,"id","transformers.XLMConfig"),p(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Mo,"href","#transformers.XLMConfig"),p(It,"class","relative group"),p(fa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMModel"),p(ga,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMModel"),p(pn,"href","https://huggingface.co/xlm-mlm-en-2048"),p(pn,"rel","nofollow"),p(_a,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(va,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(De,"class","docstring"),p(wo,"id","transformers.XLMTokenizer"),p(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(wo,"href","#transformers.XLMTokenizer"),p(Ot,"class","relative group"),p(ka,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(vt,"class","docstring"),p(yo,"class","docstring"),p(lt,"class","docstring"),p(Ar,"class","docstring"),p(Fe,"class","docstring"),p(Lo,"id","transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput"),p(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Lo,"href","#transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput"),p(Ht,"class","relative group"),p(Qt,"class","docstring"),p($o,"id","transformers.XLMModel"),p($o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p($o,"href","#transformers.XLMModel"),p(Bt,"class","relative group"),p(Ma,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(zn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(zn,"rel","nofollow"),p(wa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMModel"),p(Ge,"class","docstring"),p(We,"class","docstring"),p(Fo,"id","transformers.XLMWithLMHeadModel"),p(Fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Fo,"href","#transformers.XLMWithLMHeadModel"),p(Rt,"class","relative group"),p(ya,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Nn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Nn,"rel","nofollow"),p(La,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel"),p(Je,"class","docstring"),p(He,"class","docstring"),p(Eo,"id","transformers.XLMForSequenceClassification"),p(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Eo,"href","#transformers.XLMForSequenceClassification"),p(Gt,"class","relative group"),p($a,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Bn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Bn,"rel","nofollow"),p(xa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForSequenceClassification"),p(Ee,"class","docstring"),p(Qe,"class","docstring"),p(qo,"id","transformers.XLMForMultipleChoice"),p(qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(qo,"href","#transformers.XLMForMultipleChoice"),p(Kt,"class","relative group"),p(Fa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Zn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Zn,"rel","nofollow"),p(Xa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForMultipleChoice"),p(Ke,"class","docstring"),p(Be,"class","docstring"),p(jo,"id","transformers.XLMForTokenClassification"),p(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(jo,"href","#transformers.XLMForTokenClassification"),p(Zt,"class","relative group"),p(Ea,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(rs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(rs,"rel","nofollow"),p(za,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForTokenClassification"),p(Ye,"class","docstring"),p(Ue,"class","docstring"),p(Ao,"id","transformers.XLMForQuestionAnsweringSimple"),p(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ao,"href","#transformers.XLMForQuestionAnsweringSimple"),p(to,"class","relative group"),p(qa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(ms,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(ms,"rel","nofollow"),p(Ca,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnsweringSimple"),p(Ze,"class","docstring"),p(Re,"class","docstring"),p(No,"id","transformers.XLMForQuestionAnswering"),p(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(No,"href","#transformers.XLMForQuestionAnswering"),p(so,"class","relative group"),p(ja,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Ts,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Ts,"rel","nofollow"),p(Pa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnswering"),p(et,"class","docstring"),p(Ve,"class","docstring"),p(Do,"id","transformers.TFXLMModel"),p(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Do,"href","#transformers.TFXLMModel"),p(io,"class","relative group"),p(Aa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(xs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(xs,"rel","nofollow"),p(Sa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMModel"),p(tt,"class","docstring"),p(qe,"class","docstring"),p(Ho,"id","transformers.TFXLMWithLMHeadModel"),p(Ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ho,"href","#transformers.TFXLMWithLMHeadModel"),p(co,"class","relative group"),p(Na,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(js,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(js,"rel","nofollow"),p(Ia,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMWithLMHeadModel"),p(ot,"class","docstring"),p(Ce,"class","docstring"),p(Uo,"id","transformers.TFXLMForSequenceClassification"),p(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Uo,"href","#transformers.TFXLMForSequenceClassification"),p(ho,"class","relative group"),p(Da,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Os,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Os,"rel","nofollow"),p(Oa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForSequenceClassification"),p(nt,"class","docstring"),p(je,"class","docstring"),p(Go,"id","transformers.TFXLMForMultipleChoice"),p(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Go,"href","#transformers.TFXLMForMultipleChoice"),p(uo,"class","relative group"),p(Wa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Vs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Vs,"rel","nofollow"),p(Ha,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForMultipleChoice"),p(st,"class","docstring"),p(Pe,"class","docstring"),p(Yo,"id","transformers.TFXLMForTokenClassification"),p(Yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Yo,"href","#transformers.TFXLMForTokenClassification"),p(go,"class","relative group"),p(Qa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(ta,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(ta,"rel","nofollow"),p(Ba,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForTokenClassification"),p(at,"class","docstring"),p(Ae,"class","docstring"),p(tn,"id","transformers.TFXLMForQuestionAnsweringSimple"),p(tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(tn,"href","#transformers.TFXLMForQuestionAnsweringSimple"),p(vo,"class","relative group"),p(Ua,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(la,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(la,"rel","nofollow"),p(Ra,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForQuestionAnsweringSimple"),p(rt,"class","docstring"),p(Se,"class","docstring")},m(o,u){e(document.head,h),m(o,x,u),m(o,f,u),e(f,_),e(_,k),w(v,k,null),e(f,g),e(f,F),e(F,de),m(o,G,u),m(o,X,u),e(X,Y),e(Y,D),w(ee,D,null),e(X,ce),e(X,O),e(O,pe),m(o,re,u),m(o,R,u),e(R,j),e(R,te),e(te,K),e(R,E),m(o,q,u),m(o,J,u),e(J,W),e(W,he),e(J,me),e(J,H),e(H,ue),e(J,fe),e(J,z),e(z,ge),m(o,Q,u),m(o,ne,u),e(ne,_e),m(o,B,u),m(o,se,u),e(se,ae),e(ae,P),m(o,ie,u),m(o,S,u),e(S,ve),m(o,c,u),m(o,T,u),e(T,V),e(V,Te),e(T,be),e(T,C),e(C,Me),e(C,ke),e(ke,we),e(C,A),e(C,N),e(N,ye),e(C,Le),m(o,U,u),m(o,le,u),e(le,$e),e(le,oe),e(oe,xe),e(le,Qd),e(le,ln),e(ln,Bd),e(le,Ud),m(o,fl,u),m(o,It,u),e(It,Mo),e(Mo,fr),w(dn,fr,null),e(It,Rd),e(It,gr),e(gr,Vd),m(o,gl,u),m(o,De,u),w(cn,De,null),e(De,Gd),e(De,gt),e(gt,Jd),e(gt,fa),e(fa,Kd),e(gt,Yd),e(gt,ga),e(ga,Zd),e(gt,ec),e(gt,pn),e(pn,tc),e(gt,oc),e(De,nc),e(De,Dt),e(Dt,sc),e(Dt,_a),e(_a,ac),e(Dt,rc),e(Dt,va),e(va,ic),e(Dt,lc),e(De,dc),e(De,_r),e(_r,cc),e(De,pc),w(hn,De,null),m(o,_l,u),m(o,Ot,u),e(Ot,wo),e(wo,vr),w(mn,vr,null),e(Ot,hc),e(Ot,kr),e(kr,mc),m(o,vl,u),m(o,Fe,u),w(un,Fe,null),e(Fe,uc),e(Fe,Tr),e(Tr,fc),e(Fe,gc),e(Fe,Oe),e(Oe,br),e(br,_c),e(Oe,vc),e(Oe,Mr),e(Mr,kc),e(Oe,Tc),e(Oe,wr),e(wr,bc),e(Oe,Mc),e(Oe,_t),e(_t,wc),e(_t,yr),e(yr,yc),e(_t,Lc),e(_t,Lr),e(Lr,$c),e(_t,xc),e(_t,$r),e($r,Fc),e(_t,Xc),e(Oe,Ec),e(Oe,fn),e(fn,zc),e(fn,xr),e(xr,qc),e(fn,Cc),e(Oe,jc),e(Oe,gn),e(gn,Pc),e(gn,Fr),e(Fr,Ac),e(gn,Sc),e(Fe,Nc),e(Fe,_n),e(_n,Ic),e(_n,ka),e(ka,Dc),e(_n,Oc),e(Fe,Wc),e(Fe,vt),w(vn,vt,null),e(vt,Hc),e(vt,Xr),e(Xr,Qc),e(vt,Bc),e(vt,kn),e(kn,Ta),e(Ta,Uc),e(Ta,Er),e(Er,Rc),e(kn,Vc),e(kn,ba),e(ba,Gc),e(ba,zr),e(zr,Jc),e(Fe,Kc),e(Fe,yo),w(Tn,yo,null),e(yo,Yc),e(yo,bn),e(bn,Zc),e(bn,qr),e(qr,ep),e(bn,tp),e(Fe,op),e(Fe,lt),w(Mn,lt,null),e(lt,np),e(lt,Cr),e(Cr,sp),e(lt,ap),w(wn,lt,null),e(lt,rp),e(lt,Wt),e(Wt,ip),e(Wt,jr),e(jr,lp),e(Wt,dp),e(Wt,Pr),e(Pr,cp),e(Wt,pp),e(Fe,hp),e(Fe,Ar),m(o,kl,u),m(o,Ht,u),e(Ht,Lo),e(Lo,Sr),w(yn,Sr,null),e(Ht,mp),e(Ht,Nr),e(Nr,up),m(o,Tl,u),m(o,Qt,u),w(Ln,Qt,null),e(Qt,fp),e(Qt,$n),e($n,gp),e($n,Ir),e(Ir,_p),e($n,vp),m(o,bl,u),m(o,Bt,u),e(Bt,$o),e($o,Dr),w(xn,Dr,null),e(Bt,kp),e(Bt,Or),e(Or,Tp),m(o,Ml,u),m(o,We,u),w(Fn,We,null),e(We,bp),e(We,Wr),e(Wr,Mp),e(We,wp),e(We,Xn),e(Xn,yp),e(Xn,Ma),e(Ma,Lp),e(Xn,$p),e(We,xp),e(We,En),e(En,Fp),e(En,zn),e(zn,Xp),e(En,Ep),e(We,zp),e(We,Ge),w(qn,Ge,null),e(Ge,qp),e(Ge,Ut),e(Ut,Cp),e(Ut,wa),e(wa,jp),e(Ut,Pp),e(Ut,Hr),e(Hr,Ap),e(Ut,Sp),e(Ge,Np),w(xo,Ge,null),e(Ge,Ip),e(Ge,Qr),e(Qr,Dp),e(Ge,Op),w(Cn,Ge,null),m(o,wl,u),m(o,Rt,u),e(Rt,Fo),e(Fo,Br),w(jn,Br,null),e(Rt,Wp),e(Rt,Ur),e(Ur,Hp),m(o,yl,u),m(o,He,u),w(Pn,He,null),e(He,Qp),e(He,Rr),e(Rr,Bp),e(He,Up),e(He,An),e(An,Rp),e(An,ya),e(ya,Vp),e(An,Gp),e(He,Jp),e(He,Sn),e(Sn,Kp),e(Sn,Nn),e(Nn,Yp),e(Sn,Zp),e(He,eh),e(He,Je),w(In,Je,null),e(Je,th),e(Je,Vt),e(Vt,oh),e(Vt,La),e(La,nh),e(Vt,sh),e(Vt,Vr),e(Vr,ah),e(Vt,rh),e(Je,ih),w(Xo,Je,null),e(Je,lh),e(Je,Gr),e(Gr,dh),e(Je,ch),w(Dn,Je,null),m(o,Ll,u),m(o,Gt,u),e(Gt,Eo),e(Eo,Jr),w(On,Jr,null),e(Gt,ph),e(Gt,Kr),e(Kr,hh),m(o,$l,u),m(o,Qe,u),w(Wn,Qe,null),e(Qe,mh),e(Qe,Yr),e(Yr,uh),e(Qe,fh),e(Qe,Hn),e(Hn,gh),e(Hn,$a),e($a,_h),e(Hn,vh),e(Qe,kh),e(Qe,Qn),e(Qn,Th),e(Qn,Bn),e(Bn,bh),e(Qn,Mh),e(Qe,wh),e(Qe,Ee),w(Un,Ee,null),e(Ee,yh),e(Ee,Jt),e(Jt,Lh),e(Jt,xa),e(xa,$h),e(Jt,xh),e(Jt,Zr),e(Zr,Fh),e(Jt,Xh),e(Ee,Eh),w(zo,Ee,null),e(Ee,zh),e(Ee,ei),e(ei,qh),e(Ee,Ch),w(Rn,Ee,null),e(Ee,jh),e(Ee,ti),e(ti,Ph),e(Ee,Ah),w(Vn,Ee,null),m(o,xl,u),m(o,Kt,u),e(Kt,qo),e(qo,oi),w(Gn,oi,null),e(Kt,Sh),e(Kt,ni),e(ni,Nh),m(o,Fl,u),m(o,Be,u),w(Jn,Be,null),e(Be,Ih),e(Be,si),e(si,Dh),e(Be,Oh),e(Be,Kn),e(Kn,Wh),e(Kn,Fa),e(Fa,Hh),e(Kn,Qh),e(Be,Bh),e(Be,Yn),e(Yn,Uh),e(Yn,Zn),e(Zn,Rh),e(Yn,Vh),e(Be,Gh),e(Be,Ke),w(es,Ke,null),e(Ke,Jh),e(Ke,Yt),e(Yt,Kh),e(Yt,Xa),e(Xa,Yh),e(Yt,Zh),e(Yt,ai),e(ai,em),e(Yt,tm),e(Ke,om),w(Co,Ke,null),e(Ke,nm),e(Ke,ri),e(ri,sm),e(Ke,am),w(ts,Ke,null),m(o,Xl,u),m(o,Zt,u),e(Zt,jo),e(jo,ii),w(os,ii,null),e(Zt,rm),e(Zt,li),e(li,im),m(o,El,u),m(o,Ue,u),w(ns,Ue,null),e(Ue,lm),e(Ue,di),e(di,dm),e(Ue,cm),e(Ue,ss),e(ss,pm),e(ss,Ea),e(Ea,hm),e(ss,mm),e(Ue,um),e(Ue,as),e(as,fm),e(as,rs),e(rs,gm),e(as,_m),e(Ue,vm),e(Ue,Ye),w(is,Ye,null),e(Ye,km),e(Ye,eo),e(eo,Tm),e(eo,za),e(za,bm),e(eo,Mm),e(eo,ci),e(ci,wm),e(eo,ym),e(Ye,Lm),w(Po,Ye,null),e(Ye,$m),e(Ye,pi),e(pi,xm),e(Ye,Fm),w(ls,Ye,null),m(o,zl,u),m(o,to,u),e(to,Ao),e(Ao,hi),w(ds,hi,null),e(to,Xm),e(to,mi),e(mi,Em),m(o,ql,u),m(o,Re,u),w(cs,Re,null),e(Re,zm),e(Re,oo),e(oo,qm),e(oo,ui),e(ui,Cm),e(oo,jm),e(oo,fi),e(fi,Pm),e(oo,Am),e(Re,Sm),e(Re,ps),e(ps,Nm),e(ps,qa),e(qa,Im),e(ps,Dm),e(Re,Om),e(Re,hs),e(hs,Wm),e(hs,ms),e(ms,Hm),e(hs,Qm),e(Re,Bm),e(Re,Ze),w(us,Ze,null),e(Ze,Um),e(Ze,no),e(no,Rm),e(no,Ca),e(Ca,Vm),e(no,Gm),e(no,gi),e(gi,Jm),e(no,Km),e(Ze,Ym),w(So,Ze,null),e(Ze,Zm),e(Ze,_i),e(_i,eu),e(Ze,tu),w(fs,Ze,null),m(o,Cl,u),m(o,so,u),e(so,No),e(No,vi),w(gs,vi,null),e(so,ou),e(so,ki),e(ki,nu),m(o,jl,u),m(o,Ve,u),w(_s,Ve,null),e(Ve,su),e(Ve,ao),e(ao,au),e(ao,Ti),e(Ti,ru),e(ao,iu),e(ao,bi),e(bi,lu),e(ao,du),e(Ve,cu),e(Ve,vs),e(vs,pu),e(vs,ja),e(ja,hu),e(vs,mu),e(Ve,uu),e(Ve,ks),e(ks,fu),e(ks,Ts),e(Ts,gu),e(ks,_u),e(Ve,vu),e(Ve,et),w(bs,et,null),e(et,ku),e(et,ro),e(ro,Tu),e(ro,Pa),e(Pa,bu),e(ro,Mu),e(ro,Mi),e(Mi,wu),e(ro,yu),e(et,Lu),w(Io,et,null),e(et,$u),e(et,wi),e(wi,xu),e(et,Fu),w(Ms,et,null),m(o,Pl,u),m(o,io,u),e(io,Do),e(Do,yi),w(ws,yi,null),e(io,Xu),e(io,Li),e(Li,Eu),m(o,Al,u),m(o,qe,u),w(ys,qe,null),e(qe,zu),e(qe,$i),e($i,qu),e(qe,Cu),e(qe,Ls),e(Ls,ju),e(Ls,Aa),e(Aa,Pu),e(Ls,Au),e(qe,Su),e(qe,$s),e($s,Nu),e($s,xs),e(xs,Iu),e($s,Du),e(qe,Ou),w(Oo,qe,null),e(qe,Wu),e(qe,tt),w(Fs,tt,null),e(tt,Hu),e(tt,lo),e(lo,Qu),e(lo,Sa),e(Sa,Bu),e(lo,Uu),e(lo,xi),e(xi,Ru),e(lo,Vu),e(tt,Gu),w(Wo,tt,null),e(tt,Ju),e(tt,Fi),e(Fi,Ku),e(tt,Yu),w(Xs,tt,null),m(o,Sl,u),m(o,co,u),e(co,Ho),e(Ho,Xi),w(Es,Xi,null),e(co,Zu),e(co,Ei),e(Ei,ef),m(o,Nl,u),m(o,Ce,u),w(zs,Ce,null),e(Ce,tf),e(Ce,zi),e(zi,of),e(Ce,nf),e(Ce,qs),e(qs,sf),e(qs,Na),e(Na,af),e(qs,rf),e(Ce,lf),e(Ce,Cs),e(Cs,df),e(Cs,js),e(js,cf),e(Cs,pf),e(Ce,hf),w(Qo,Ce,null),e(Ce,mf),e(Ce,ot),w(Ps,ot,null),e(ot,uf),e(ot,po),e(po,ff),e(po,Ia),e(Ia,gf),e(po,_f),e(po,qi),e(qi,vf),e(po,kf),e(ot,Tf),w(Bo,ot,null),e(ot,bf),e(ot,Ci),e(Ci,Mf),e(ot,wf),w(As,ot,null),m(o,Il,u),m(o,ho,u),e(ho,Uo),e(Uo,ji),w(Ss,ji,null),e(ho,yf),e(ho,Pi),e(Pi,Lf),m(o,Dl,u),m(o,je,u),w(Ns,je,null),e(je,$f),e(je,Ai),e(Ai,xf),e(je,Ff),e(je,Is),e(Is,Xf),e(Is,Da),e(Da,Ef),e(Is,zf),e(je,qf),e(je,Ds),e(Ds,Cf),e(Ds,Os),e(Os,jf),e(Ds,Pf),e(je,Af),w(Ro,je,null),e(je,Sf),e(je,nt),w(Ws,nt,null),e(nt,Nf),e(nt,mo),e(mo,If),e(mo,Oa),e(Oa,Df),e(mo,Of),e(mo,Si),e(Si,Wf),e(mo,Hf),e(nt,Qf),w(Vo,nt,null),e(nt,Bf),e(nt,Ni),e(Ni,Uf),e(nt,Rf),w(Hs,nt,null),m(o,Ol,u),m(o,uo,u),e(uo,Go),e(Go,Ii),w(Qs,Ii,null),e(uo,Vf),e(uo,Di),e(Di,Gf),m(o,Wl,u),m(o,Pe,u),w(Bs,Pe,null),e(Pe,Jf),e(Pe,Oi),e(Oi,Kf),e(Pe,Yf),e(Pe,Us),e(Us,Zf),e(Us,Wa),e(Wa,eg),e(Us,tg),e(Pe,og),e(Pe,Rs),e(Rs,ng),e(Rs,Vs),e(Vs,sg),e(Rs,ag),e(Pe,rg),w(Jo,Pe,null),e(Pe,ig),e(Pe,st),w(Gs,st,null),e(st,lg),e(st,fo),e(fo,dg),e(fo,Ha),e(Ha,cg),e(fo,pg),e(fo,Wi),e(Wi,hg),e(fo,mg),e(st,ug),w(Ko,st,null),e(st,fg),e(st,Hi),e(Hi,gg),e(st,_g),w(Js,st,null),m(o,Hl,u),m(o,go,u),e(go,Yo),e(Yo,Qi),w(Ks,Qi,null),e(go,vg),e(go,Bi),e(Bi,kg),m(o,Ql,u),m(o,Ae,u),w(Ys,Ae,null),e(Ae,Tg),e(Ae,Ui),e(Ui,bg),e(Ae,Mg),e(Ae,Zs),e(Zs,wg),e(Zs,Qa),e(Qa,yg),e(Zs,Lg),e(Ae,$g),e(Ae,ea),e(ea,xg),e(ea,ta),e(ta,Fg),e(ea,Xg),e(Ae,Eg),w(Zo,Ae,null),e(Ae,zg),e(Ae,at),w(oa,at,null),e(at,qg),e(at,_o),e(_o,Cg),e(_o,Ba),e(Ba,jg),e(_o,Pg),e(_o,Ri),e(Ri,Ag),e(_o,Sg),e(at,Ng),w(en,at,null),e(at,Ig),e(at,Vi),e(Vi,Dg),e(at,Og),w(na,at,null),m(o,Bl,u),m(o,vo,u),e(vo,tn),e(tn,Gi),w(sa,Gi,null),e(vo,Wg),e(vo,Ji),e(Ji,Hg),m(o,Ul,u),m(o,Se,u),w(aa,Se,null),e(Se,Qg),e(Se,ko),e(ko,Bg),e(ko,Ki),e(Ki,Ug),e(ko,Rg),e(ko,Yi),e(Yi,Vg),e(ko,Gg),e(Se,Jg),e(Se,ra),e(ra,Kg),e(ra,Ua),e(Ua,Yg),e(ra,Zg),e(Se,e_),e(Se,ia),e(ia,t_),e(ia,la),e(la,o_),e(ia,n_),e(Se,s_),w(on,Se,null),e(Se,a_),e(Se,rt),w(da,rt,null),e(rt,r_),e(rt,To),e(To,i_),e(To,Ra),e(Ra,l_),e(To,d_),e(To,Zi),e(Zi,c_),e(To,p_),e(rt,h_),w(nn,rt,null),e(rt,m_),e(rt,el),e(el,u_),e(rt,f_),w(ca,rt,null),Rl=!0},p(o,[u]){const pa={};u&2&&(pa.$$scope={dirty:u,ctx:o}),xo.$set(pa);const tl={};u&2&&(tl.$$scope={dirty:u,ctx:o}),Xo.$set(tl);const ol={};u&2&&(ol.$$scope={dirty:u,ctx:o}),zo.$set(ol);const nl={};u&2&&(nl.$$scope={dirty:u,ctx:o}),Co.$set(nl);const ha={};u&2&&(ha.$$scope={dirty:u,ctx:o}),Po.$set(ha);const sl={};u&2&&(sl.$$scope={dirty:u,ctx:o}),So.$set(sl);const al={};u&2&&(al.$$scope={dirty:u,ctx:o}),Io.$set(al);const rl={};u&2&&(rl.$$scope={dirty:u,ctx:o}),Oo.$set(rl);const ma={};u&2&&(ma.$$scope={dirty:u,ctx:o}),Wo.$set(ma);const il={};u&2&&(il.$$scope={dirty:u,ctx:o}),Qo.$set(il);const bo={};u&2&&(bo.$$scope={dirty:u,ctx:o}),Bo.$set(bo);const ll={};u&2&&(ll.$$scope={dirty:u,ctx:o}),Ro.$set(ll);const dl={};u&2&&(dl.$$scope={dirty:u,ctx:o}),Vo.$set(dl);const cl={};u&2&&(cl.$$scope={dirty:u,ctx:o}),Jo.$set(cl);const pl={};u&2&&(pl.$$scope={dirty:u,ctx:o}),Ko.$set(pl);const hl={};u&2&&(hl.$$scope={dirty:u,ctx:o}),Zo.$set(hl);const ml={};u&2&&(ml.$$scope={dirty:u,ctx:o}),en.$set(ml);const ul={};u&2&&(ul.$$scope={dirty:u,ctx:o}),on.$set(ul);const ua={};u&2&&(ua.$$scope={dirty:u,ctx:o}),nn.$set(ua)},i(o){Rl||(y(v.$$.fragment,o),y(ee.$$.fragment,o),y(dn.$$.fragment,o),y(cn.$$.fragment,o),y(hn.$$.fragment,o),y(mn.$$.fragment,o),y(un.$$.fragment,o),y(vn.$$.fragment,o),y(Tn.$$.fragment,o),y(Mn.$$.fragment,o),y(wn.$$.fragment,o),y(yn.$$.fragment,o),y(Ln.$$.fragment,o),y(xn.$$.fragment,o),y(Fn.$$.fragment,o),y(qn.$$.fragment,o),y(xo.$$.fragment,o),y(Cn.$$.fragment,o),y(jn.$$.fragment,o),y(Pn.$$.fragment,o),y(In.$$.fragment,o),y(Xo.$$.fragment,o),y(Dn.$$.fragment,o),y(On.$$.fragment,o),y(Wn.$$.fragment,o),y(Un.$$.fragment,o),y(zo.$$.fragment,o),y(Rn.$$.fragment,o),y(Vn.$$.fragment,o),y(Gn.$$.fragment,o),y(Jn.$$.fragment,o),y(es.$$.fragment,o),y(Co.$$.fragment,o),y(ts.$$.fragment,o),y(os.$$.fragment,o),y(ns.$$.fragment,o),y(is.$$.fragment,o),y(Po.$$.fragment,o),y(ls.$$.fragment,o),y(ds.$$.fragment,o),y(cs.$$.fragment,o),y(us.$$.fragment,o),y(So.$$.fragment,o),y(fs.$$.fragment,o),y(gs.$$.fragment,o),y(_s.$$.fragment,o),y(bs.$$.fragment,o),y(Io.$$.fragment,o),y(Ms.$$.fragment,o),y(ws.$$.fragment,o),y(ys.$$.fragment,o),y(Oo.$$.fragment,o),y(Fs.$$.fragment,o),y(Wo.$$.fragment,o),y(Xs.$$.fragment,o),y(Es.$$.fragment,o),y(zs.$$.fragment,o),y(Qo.$$.fragment,o),y(Ps.$$.fragment,o),y(Bo.$$.fragment,o),y(As.$$.fragment,o),y(Ss.$$.fragment,o),y(Ns.$$.fragment,o),y(Ro.$$.fragment,o),y(Ws.$$.fragment,o),y(Vo.$$.fragment,o),y(Hs.$$.fragment,o),y(Qs.$$.fragment,o),y(Bs.$$.fragment,o),y(Jo.$$.fragment,o),y(Gs.$$.fragment,o),y(Ko.$$.fragment,o),y(Js.$$.fragment,o),y(Ks.$$.fragment,o),y(Ys.$$.fragment,o),y(Zo.$$.fragment,o),y(oa.$$.fragment,o),y(en.$$.fragment,o),y(na.$$.fragment,o),y(sa.$$.fragment,o),y(aa.$$.fragment,o),y(on.$$.fragment,o),y(da.$$.fragment,o),y(nn.$$.fragment,o),y(ca.$$.fragment,o),Rl=!0)},o(o){L(v.$$.fragment,o),L(ee.$$.fragment,o),L(dn.$$.fragment,o),L(cn.$$.fragment,o),L(hn.$$.fragment,o),L(mn.$$.fragment,o),L(un.$$.fragment,o),L(vn.$$.fragment,o),L(Tn.$$.fragment,o),L(Mn.$$.fragment,o),L(wn.$$.fragment,o),L(yn.$$.fragment,o),L(Ln.$$.fragment,o),L(xn.$$.fragment,o),L(Fn.$$.fragment,o),L(qn.$$.fragment,o),L(xo.$$.fragment,o),L(Cn.$$.fragment,o),L(jn.$$.fragment,o),L(Pn.$$.fragment,o),L(In.$$.fragment,o),L(Xo.$$.fragment,o),L(Dn.$$.fragment,o),L(On.$$.fragment,o),L(Wn.$$.fragment,o),L(Un.$$.fragment,o),L(zo.$$.fragment,o),L(Rn.$$.fragment,o),L(Vn.$$.fragment,o),L(Gn.$$.fragment,o),L(Jn.$$.fragment,o),L(es.$$.fragment,o),L(Co.$$.fragment,o),L(ts.$$.fragment,o),L(os.$$.fragment,o),L(ns.$$.fragment,o),L(is.$$.fragment,o),L(Po.$$.fragment,o),L(ls.$$.fragment,o),L(ds.$$.fragment,o),L(cs.$$.fragment,o),L(us.$$.fragment,o),L(So.$$.fragment,o),L(fs.$$.fragment,o),L(gs.$$.fragment,o),L(_s.$$.fragment,o),L(bs.$$.fragment,o),L(Io.$$.fragment,o),L(Ms.$$.fragment,o),L(ws.$$.fragment,o),L(ys.$$.fragment,o),L(Oo.$$.fragment,o),L(Fs.$$.fragment,o),L(Wo.$$.fragment,o),L(Xs.$$.fragment,o),L(Es.$$.fragment,o),L(zs.$$.fragment,o),L(Qo.$$.fragment,o),L(Ps.$$.fragment,o),L(Bo.$$.fragment,o),L(As.$$.fragment,o),L(Ss.$$.fragment,o),L(Ns.$$.fragment,o),L(Ro.$$.fragment,o),L(Ws.$$.fragment,o),L(Vo.$$.fragment,o),L(Hs.$$.fragment,o),L(Qs.$$.fragment,o),L(Bs.$$.fragment,o),L(Jo.$$.fragment,o),L(Gs.$$.fragment,o),L(Ko.$$.fragment,o),L(Js.$$.fragment,o),L(Ks.$$.fragment,o),L(Ys.$$.fragment,o),L(Zo.$$.fragment,o),L(oa.$$.fragment,o),L(en.$$.fragment,o),L(na.$$.fragment,o),L(sa.$$.fragment,o),L(aa.$$.fragment,o),L(on.$$.fragment,o),L(da.$$.fragment,o),L(nn.$$.fragment,o),L(ca.$$.fragment,o),Rl=!1},d(o){t(h),o&&t(x),o&&t(f),$(v),o&&t(G),o&&t(X),$(ee),o&&t(re),o&&t(R),o&&t(q),o&&t(J),o&&t(Q),o&&t(ne),o&&t(B),o&&t(se),o&&t(ie),o&&t(S),o&&t(c),o&&t(T),o&&t(U),o&&t(le),o&&t(fl),o&&t(It),$(dn),o&&t(gl),o&&t(De),$(cn),$(hn),o&&t(_l),o&&t(Ot),$(mn),o&&t(vl),o&&t(Fe),$(un),$(vn),$(Tn),$(Mn),$(wn),o&&t(kl),o&&t(Ht),$(yn),o&&t(Tl),o&&t(Qt),$(Ln),o&&t(bl),o&&t(Bt),$(xn),o&&t(Ml),o&&t(We),$(Fn),$(qn),$(xo),$(Cn),o&&t(wl),o&&t(Rt),$(jn),o&&t(yl),o&&t(He),$(Pn),$(In),$(Xo),$(Dn),o&&t(Ll),o&&t(Gt),$(On),o&&t($l),o&&t(Qe),$(Wn),$(Un),$(zo),$(Rn),$(Vn),o&&t(xl),o&&t(Kt),$(Gn),o&&t(Fl),o&&t(Be),$(Jn),$(es),$(Co),$(ts),o&&t(Xl),o&&t(Zt),$(os),o&&t(El),o&&t(Ue),$(ns),$(is),$(Po),$(ls),o&&t(zl),o&&t(to),$(ds),o&&t(ql),o&&t(Re),$(cs),$(us),$(So),$(fs),o&&t(Cl),o&&t(so),$(gs),o&&t(jl),o&&t(Ve),$(_s),$(bs),$(Io),$(Ms),o&&t(Pl),o&&t(io),$(ws),o&&t(Al),o&&t(qe),$(ys),$(Oo),$(Fs),$(Wo),$(Xs),o&&t(Sl),o&&t(co),$(Es),o&&t(Nl),o&&t(Ce),$(zs),$(Qo),$(Ps),$(Bo),$(As),o&&t(Il),o&&t(ho),$(Ss),o&&t(Dl),o&&t(je),$(Ns),$(Ro),$(Ws),$(Vo),$(Hs),o&&t(Ol),o&&t(uo),$(Qs),o&&t(Wl),o&&t(Pe),$(Bs),$(Jo),$(Gs),$(Ko),$(Js),o&&t(Hl),o&&t(go),$(Ks),o&&t(Ql),o&&t(Ae),$(Ys),$(Zo),$(oa),$(en),$(na),o&&t(Bl),o&&t(vo),$(sa),o&&t(Ul),o&&t(Se),$(aa),$(on),$(da),$(nn),$(ca)}}}const I1={local:"xlm",sections:[{local:"overview",title:"Overview"},{local:"transformers.XLMConfig",title:"XLMConfig"},{local:"transformers.XLMTokenizer",title:"XLMTokenizer"},{local:"transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput",title:"XLM specific outputs"},{local:"transformers.XLMModel",title:"XLMModel"},{local:"transformers.XLMWithLMHeadModel",title:"XLMWithLMHeadModel"},{local:"transformers.XLMForSequenceClassification",title:"XLMForSequenceClassification"},{local:"transformers.XLMForMultipleChoice",title:"XLMForMultipleChoice"},{local:"transformers.XLMForTokenClassification",title:"XLMForTokenClassification"},{local:"transformers.XLMForQuestionAnsweringSimple",title:"XLMForQuestionAnsweringSimple"},{local:"transformers.XLMForQuestionAnswering",title:"XLMForQuestionAnswering"},{local:"transformers.TFXLMModel",title:"TFXLMModel"},{local:"transformers.TFXLMWithLMHeadModel",title:"TFXLMWithLMHeadModel"},{local:"transformers.TFXLMForSequenceClassification",title:"TFXLMForSequenceClassification"},{local:"transformers.TFXLMForMultipleChoice",title:"TFXLMForMultipleChoice"},{local:"transformers.TFXLMForTokenClassification",title:"TFXLMForTokenClassification"},{local:"transformers.TFXLMForQuestionAnsweringSimple",title:"TFXLMForQuestionAnsweringSimple"}],title:"XLM"};function D1(I,h,x){let{fw:f}=h;return I.$$set=_=>{"fw"in _&&x(0,f=_.fw)},[f]}class R1 extends f1{constructor(h){super();g1(this,h,D1,N1,_1,{fw:0})}}export{R1 as default,I1 as metadata};
9,933
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/layoutlmv2.mdx-314d5480.js
import{S as vv,i as yv,s as bv,e as s,k as c,w as h,t as n,L as Lv,c as r,d as t,m as d,a as i,x as m,h as a,b as l,J as e,g as p,y as f,q as g,o as _,B as v}from"../../chunks/vendor-b1433968.js";import{T as ji}from"../../chunks/Tip-c3840994.js";import{D as N}from"../../chunks/Docstring-ff504c58.js";import{C as Y}from"../../chunks/CodeBlock-a320dbd7.js";import{I as re}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function wv(ie){let y,q,b,M,P;return{c(){y=s("p"),q=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s("code"),M=n("Module"),P=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(L){y=r(L,"P",{});var w=i(y);q=a(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=r(w,"CODE",{});var I=i(b);M=a(I,"Module"),I.forEach(t),P=a(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(L,w){p(L,y,w),e(y,q),e(y,b),e(b,M),e(y,P)},d(L){L&&t(y)}}}function kv(ie){let y,q,b,M,P;return{c(){y=s("p"),q=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s("code"),M=n("Module"),P=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(L){y=r(L,"P",{});var w=i(y);q=a(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=r(w,"CODE",{});var I=i(b);M=a(I,"Module"),I.forEach(t),P=a(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(L,w){p(L,y,w),e(y,q),e(y,b),e(b,M),e(y,P)},d(L){L&&t(y)}}}function xv(ie){let y,q,b,M,P;return{c(){y=s("p"),q=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s("code"),M=n("Module"),P=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(L){y=r(L,"P",{});var w=i(y);q=a(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=r(w,"CODE",{});var I=i(b);M=a(I,"Module"),I.forEach(t),P=a(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(L,w){p(L,y,w),e(y,q),e(y,b),e(b,M),e(y,P)},d(L){L&&t(y)}}}function Mv(ie){let y,q,b,M,P;return{c(){y=s("p"),q=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s("code"),M=n("Module"),P=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(L){y=r(L,"P",{});var w=i(y);q=a(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=r(w,"CODE",{});var I=i(b);M=a(I,"Module"),I.forEach(t),P=a(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(L,w){p(L,y,w),e(y,q),e(y,b),e(b,M),e(y,P)},d(L){L&&t(y)}}}function Tv(ie){let y,q,b,M,P,L,w,I,Ii,sr,ze,Qe,na,bo,Ai,aa,Di,rr,pe,Ni,Lo,Oi,Si,Vt,Ri,Ui,ir,he,Z,Bi,wo,Wi,Vi,ko,Qi,Gi,xo,Hi,Ki,Mo,Xi,Yi,Zi,To,Ji,Eo,el,ol,tl,zo,nl,$o,al,sl,lr,Qt,rl,cr,Gt,sa,il,dr,Ht,ll,ur,B,ra,cl,dl,qo,ul,Fo,pl,hl,ml,Po,fl,Co,gl,_l,vl,$e,yl,jo,bl,Ll,Io,wl,kl,xl,k,Ml,ia,Tl,El,Kt,zl,$l,la,ql,Fl,ca,Pl,Cl,da,jl,Il,ua,Al,Dl,pa,Nl,Ol,ha,Sl,Rl,ma,Ul,Bl,Xt,Wl,Vl,Ao,Ql,Gl,Do,Hl,Kl,pr,No,hr,me,Xl,fa,Yl,Zl,ga,Jl,ec,mr,Oo,fr,Ge,oc,Yt,tc,nc,gr,fe,j,ac,Zt,sc,rc,_a,ic,lc,va,cc,dc,Jt,uc,pc,ya,hc,mc,ba,fc,gc,La,_c,vc,yc,So,bc,en,Lc,wc,kc,Ro,xc,wa,Mc,Tc,_r,He,Ec,on,zc,$c,vr,qe,Ke,ka,Uo,qc,xa,Fc,yr,W,Pc,tn,Cc,jc,nn,Ic,Ac,an,Dc,Nc,sn,Oc,Sc,br,Bo,Lr,T,Rc,rn,Uc,Bc,ln,Wc,Vc,Ma,Qc,Gc,cn,Hc,Kc,dn,Xc,Yc,Ta,Zc,Jc,Ea,ed,od,za,td,nd,$a,ad,sd,qa,rd,id,wr,S,un,ld,cd,Wo,dd,ud,pn,pd,hd,Fa,md,fd,Pa,gd,_d,kr,hn,vd,xr,mn,Ca,yd,Mr,fn,bd,Tr,Vo,Er,gn,ja,Ld,zr,ge,wd,Ia,kd,xd,Aa,Md,Td,$r,Qo,qr,_n,Da,Ed,Fr,V,zd,Na,$d,qd,Oa,Fd,Pd,Sa,Cd,jd,Ra,Id,Ad,Pr,Go,Cr,vn,Ua,Dd,jr,yn,Nd,Ir,Ho,Ar,bn,Ba,Od,Dr,Ln,Sd,Nr,Ko,Or,Fe,Xe,Wa,Xo,Rd,Va,Ud,Sr,R,Yo,Bd,Pe,Wd,wn,Vd,Qd,Zo,Gd,Hd,Kd,Ce,Xd,kn,Yd,Zd,xn,Jd,eu,ou,Qa,tu,nu,Jo,Rr,je,Ye,Ga,et,au,Ha,su,Ur,J,ot,ru,Ka,iu,lu,tt,cu,Xa,du,uu,pu,ae,nt,hu,Ya,mu,fu,Za,gu,_u,at,Br,Ie,Ze,Ja,st,vu,es,yu,Wr,A,rt,bu,D,Lu,Mn,wu,ku,os,xu,Mu,ts,Tu,Eu,ns,zu,$u,as,qu,Fu,ss,Pu,Cu,ju,it,Iu,Tn,Au,Du,Nu,En,zn,Ou,Su,Ru,Je,lt,Uu,rs,Bu,Wu,is,Vr,Ae,eo,ls,ct,Vu,cs,Qu,Qr,ee,dt,Gu,ut,Hu,ds,Ku,Xu,Yu,pt,Zu,$n,Ju,ep,op,oo,ht,tp,us,np,Gr,De,to,ps,mt,ap,hs,sp,Hr,U,ft,rp,ms,ip,lp,qn,Fn,cp,dp,up,$,pp,Pn,hp,mp,Cn,fp,gp,jn,_p,vp,fs,yp,bp,gs,Lp,wp,_s,kp,xp,vs,Mp,Tp,ys,Ep,zp,bs,$p,qp,Fp,_e,gt,Pp,x,Cp,Ls,jp,Ip,_t,ws,Ap,Dp,Np,In,Op,Sp,ks,Rp,Up,xs,Bp,Wp,vt,Ms,Vp,Qp,Gp,Ts,Hp,Kp,An,Xp,Yp,Es,Zp,Jp,zs,eh,oh,$s,th,nh,qs,ah,sh,rh,Fs,ih,Kr,Ne,no,Ps,yt,lh,Cs,ch,Xr,le,bt,dh,Lt,uh,wt,ph,hh,mh,Q,kt,fh,Oe,gh,Dn,_h,vh,js,yh,bh,Lh,ao,wh,Is,kh,xh,xt,Yr,Se,so,As,Mt,Mh,Ds,Th,Zr,oe,Tt,Eh,Et,zh,Ns,$h,qh,Fh,zt,Ph,$t,Ch,jh,Ih,G,qt,Ah,Re,Dh,Nn,Nh,Oh,Os,Sh,Rh,Uh,ro,Bh,Ss,Wh,Vh,Ft,Jr,Ue,io,Rs,Pt,Qh,Us,Gh,ei,te,Ct,Hh,ce,Kh,Bs,Xh,On,Yh,Ws,Zh,Jh,Vs,em,Sn,om,Qs,tm,nm,am,jt,sm,It,rm,im,lm,H,At,cm,Be,dm,Rn,um,pm,Gs,hm,mm,fm,lo,gm,Hs,_m,vm,Dt,oi,We,co,Ks,Nt,ym,Xs,bm,ti,ne,Ot,Lm,de,wm,Ys,km,xm,Zs,Mm,Tm,Js,Em,zm,$m,St,qm,Rt,Fm,Pm,Cm,K,Ut,jm,Ve,Im,Un,Am,Dm,er,Nm,Om,Sm,uo,Rm,or,Um,Bm,Bt,ni;return L=new re({}),bo=new re({}),No=new Y({props:{code:`def normalize_bbox(bbox, width, height): return [ int(1000 * (bbox[0] / width)), int(1000 * (bbox[1] / height)), int(1000 * (bbox[2] / width)), int(1000 * (bbox[3] / height)), ],`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">normalize_bbox</span>(<span class="hljs-params">bbox, width, height</span>): <span class="hljs-keyword">return</span> [ <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">0</span>] / width)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">1</span>] / height)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">2</span>] / width)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">3</span>] / height)), ]`}}),Oo=new Y({props:{code:`from PIL import Image image = Image.open("name_of_your_document - can be a png file, pdf, etc.") width, height = image.size,`,highlighted:`<span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>) width, height = image.size`}}),Uo=new re({}),Bo=new Y({props:{code:`from transformers import LayoutLMv2FeatureExtractor, LayoutLMv2TokenizerFast, LayoutLMv2Processor feature_extractor = LayoutLMv2FeatureExtractor() # apply_ocr is set to True by default tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased") processor = LayoutLMv2Processor(feature_extractor, tokenizer),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2FeatureExtractor, LayoutLMv2TokenizerFast, LayoutLMv2Processor feature_extractor = LayoutLMv2FeatureExtractor() <span class="hljs-comment"># apply_ocr is set to True by default</span> tokenizer = LayoutLMv2TokenizerFast.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) processor = LayoutLMv2Processor(feature_extractor, tokenizer)`}}),Vo=new Y({props:{code:`from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") encoding = processor(image, return_tensors="pt") # you can also add all tokenizer parameters here such as padding, truncation print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) encoding = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-comment"># you can also add all tokenizer parameters here such as padding, truncation</span> <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span>`}}),Qo=new Y({props:{code:`from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes encoding = processor(image, words, boxes=boxes, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> encoding = processor(image, words, boxes=boxes, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span>`}}),Go=new Y({props:{code:`from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes word_labels = [1, 2] encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'labels', 'image']),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> word_labels = [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>] encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;labels&#x27;, &#x27;image&#x27;])</span>`}}),Ho=new Y({props:{code:`from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") question = "What's his name?" encoding = processor(image, question, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) question = <span class="hljs-string">&quot;What&#x27;s his name?&quot;</span> encoding = processor(image, question, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span>`}}),Ko=new Y({props:{code:`from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") question = "What's his name?" words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes encoding = processor(image, question, words, boxes=boxes, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) question = <span class="hljs-string">&quot;What&#x27;s his name?&quot;</span> words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> encoding = processor(image, question, words, boxes=boxes, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span>`}}),Xo=new re({}),Yo=new N({props:{name:"class transformers.LayoutLMv2Config",anchor:"transformers.LayoutLMv2Config",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"max_2d_position_embeddings",val:" = 1024"},{name:"max_rel_pos",val:" = 128"},{name:"rel_pos_bins",val:" = 32"},{name:"fast_qkv",val:" = True"},{name:"max_rel_2d_pos",val:" = 256"},{name:"rel_2d_pos_bins",val:" = 64"},{name:"convert_sync_batchnorm",val:" = True"},{name:"image_feature_pool_shape",val:" = [7, 7, 256]"},{name:"coordinate_size",val:" = 128"},{name:"shape_size",val:" = 128"},{name:"has_relative_attention_bias",val:" = True"},{name:"has_spatial_attention_bias",val:" = True"},{name:"has_visual_segment_embedding",val:" = False"},{name:"detectron2_config_args",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py#L35",parametersDescription:[{anchor:"transformers.LayoutLMv2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> or <code>TFLayoutLMv2Model</code>.`,name:"vocab_size"},{anchor:"transformers.LayoutLMv2Config.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.LayoutLMv2Config.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.LayoutLMv2Config.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.LayoutLMv2Config.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.LayoutLMv2Config.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.LayoutLMv2Config.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.LayoutLMv2Config.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.LayoutLMv2Config.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.LayoutLMv2Config.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> or <code>TFLayoutLMv2Model</code>.`,name:"type_vocab_size"},{anchor:"transformers.LayoutLMv2Config.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.LayoutLMv2Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.LayoutLMv2Config.max_2d_position_embeddings",description:`<strong>max_2d_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum value that the 2D position embedding might ever be used with. Typically set this to something large just in case (e.g., 1024).`,name:"max_2d_position_embeddings"},{anchor:"transformers.LayoutLMv2Config.max_rel_pos",description:`<strong>max_rel_pos</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The maximum number of relative positions to be used in the self-attention mechanism.`,name:"max_rel_pos"},{anchor:"transformers.LayoutLMv2Config.rel_pos_bins",description:`<strong>rel_pos_bins</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of relative position bins to be used in the self-attention mechanism.`,name:"rel_pos_bins"},{anchor:"transformers.LayoutLMv2Config.fast_qkv",description:`<strong>fast_qkv</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.`,name:"fast_qkv"},{anchor:"transformers.LayoutLMv2Config.max_rel_2d_pos",description:`<strong>max_rel_2d_pos</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The maximum number of relative 2D positions in the self-attention mechanism.`,name:"max_rel_2d_pos"},{anchor:"transformers.LayoutLMv2Config.rel_2d_pos_bins",description:`<strong>rel_2d_pos_bins</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The number of 2D relative position bins in the self-attention mechanism.`,name:"rel_2d_pos_bins"},{anchor:"transformers.LayoutLMv2Config.image_feature_pool_shape",description:`<strong>image_feature_pool_shape</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [7, 7, 256]) &#x2014; The shape of the average-pooled feature map.`,name:"image_feature_pool_shape"},{anchor:"transformers.LayoutLMv2Config.coordinate_size",description:`<strong>coordinate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimension of the coordinate embeddings.`,name:"coordinate_size"},{anchor:"transformers.LayoutLMv2Config.shape_size",description:`<strong>shape_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimension of the width and height embeddings.`,name:"shape_size"},{anchor:"transformers.LayoutLMv2Config.has_relative_attention_bias",description:`<strong>has_relative_attention_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a relative attention bias in the self-attention mechanism.`,name:"has_relative_attention_bias"},{anchor:"transformers.LayoutLMv2Config.has_spatial_attention_bias",description:`<strong>has_spatial_attention_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a spatial attention bias in the self-attention mechanism.`,name:"has_spatial_attention_bias"},{anchor:"transformers.LayoutLMv2Config.has_visual_segment_embedding",description:`<strong>has_visual_segment_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add visual segment embeddings.`,name:"has_visual_segment_embedding"},{anchor:"transformers.LayoutLMv2Config.detectron2_config_args",description:`<strong>detectron2_config_args</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to <a href="https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py" rel="nofollow">this file</a> for details regarding default values.`,name:"detectron2_config_args"}]}}),Jo=new Y({props:{code:`from transformers import LayoutLMv2Model, LayoutLMv2Config # Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration configuration = LayoutLMv2Config() # Initializing a model from the microsoft/layoutlmv2-base-uncased style configuration model = LayoutLMv2Model(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Model, LayoutLMv2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = LayoutLMv2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the microsoft/layoutlmv2-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2Model(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),et=new re({}),ot=new N({props:{name:"class transformers.LayoutLMv2FeatureExtractor",anchor:"transformers.LayoutLMv2FeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 224"},{name:"resample",val:" = 2"},{name:"apply_ocr",val:" = True"},{name:"ocr_lang",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py#L83",parametersDescription:[{anchor:"transformers.LayoutLMv2FeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.LayoutLMv2FeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.LayoutLMv2FeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.LayoutLMv2FeatureExtractor.apply_ocr",description:`<strong>apply_ocr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.`,name:"apply_ocr"},{anchor:"transformers.LayoutLMv2FeatureExtractor.ocr_lang",description:`<strong>ocr_lang</strong> (<code>Optional[str]</code>, <em>optional</em>) &#x2014; The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>LayoutLMv2FeatureExtractor uses Google&#x2019;s Tesseract OCR engine under the hood.</p> </div>`,name:"ocr_lang"}]}}),nt=new N({props:{name:"__call__",anchor:"transformers.LayoutLMv2FeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py#L127",parametersDescription:[{anchor:"transformers.LayoutLMv2FeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.LayoutLMv2FeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> <li><strong>words</strong> \u2014 Optional words as identified by Tesseract OCR (only when <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor" >LayoutLMv2FeatureExtractor</a> was initialized with <code>apply_ocr</code> set to <code>True</code>).</li> <li><strong>boxes</strong> \u2014 Optional bounding boxes as identified by Tesseract OCR, normalized based on the image size (only when <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor" >LayoutLMv2FeatureExtractor</a> was initialized with <code>apply_ocr</code> set to <code>True</code>).</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),at=new Y({props:{code:`from transformers import LayoutLMv2FeatureExtractor from PIL import Image image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") # option 1: with apply_ocr=True (default) feature_extractor = LayoutLMv2FeatureExtractor() encoding = feature_extractor(image, return_tensors="pt") print(encoding.keys()) # dict_keys(['pixel_values', 'words', 'boxes']) # option 2: with apply_ocr=False feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False) encoding = feature_extractor(image, return_tensors="pt") print(encoding.keys()) # dict_keys(['pixel_values']),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># option 1: with apply_ocr=True (default)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = LayoutLMv2FeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># dict_keys([&#x27;pixel_values&#x27;, &#x27;words&#x27;, &#x27;boxes&#x27;])</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># option 2: with apply_ocr=False</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># dict_keys([&#x27;pixel_values&#x27;])</span>`}}),st=new re({}),rt=new N({props:{name:"class transformers.LayoutLMv2Tokenizer",anchor:"transformers.LayoutLMv2Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"cls_token_box",val:" = [0, 0, 0, 0]"},{name:"sep_token_box",val:" = [1000, 1000, 1000, 1000]"},{name:"pad_token_box",val:" = [0, 0, 0, 0]"},{name:"pad_token_label",val:" = -100"},{name:"only_label_first_subword",val:" = True"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"model_max_length",val:": int = 512"},{name:"additional_special_tokens",val:": typing.Optional[typing.List[str]] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py#L146"}}),lt=new N({props:{name:"__call__",anchor:"transformers.LayoutLMv2Tokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"boxes",val:": typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None"},{name:"word_labels",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py#L367",parametersDescription:[{anchor:"transformers.LayoutLMv2Tokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words).`,name:"text"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string).`,name:"text_pair"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.boxes",description:`<strong>boxes</strong> (<code>List[List[int]]</code>, <code>List[List[List[int]]]</code>) &#x2014; Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.`,name:"boxes"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.word_labels",description:`<strong>word_labels</strong> (<code>List[int]</code>, <code>List[List[int]]</code>, <em>optional</em>) &#x2014; Word-level integer labels (for token classification tasks such as FUNSD, CORD).`,name:"word_labels"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutLMv2Tokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}]}}),ct=new re({}),dt=new N({props:{name:"class transformers.LayoutLMv2TokenizerFast",anchor:"transformers.LayoutLMv2TokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"cls_token_box",val:" = [0, 0, 0, 0]"},{name:"sep_token_box",val:" = [1000, 1000, 1000, 1000]"},{name:"pad_token_box",val:" = [0, 0, 0, 0]"},{name:"pad_token_label",val:" = -100"},{name:"only_label_first_subword",val:" = True"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py#L62",parametersDescription:[{anchor:"transformers.LayoutLMv2TokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.LayoutLMv2TokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.LayoutLMv2TokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.LayoutLMv2TokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.LayoutLMv2TokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.LayoutLMv2TokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.LayoutLMv2TokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.LayoutLMv2TokenizerFast.cls_token_box",description:`<strong>cls_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [CLS] token.`,name:"cls_token_box"},{anchor:"transformers.LayoutLMv2TokenizerFast.sep_token_box",description:`<strong>sep_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[1000, 1000, 1000, 1000]</code>) &#x2014; The bounding box to use for the special [SEP] token.`,name:"sep_token_box"},{anchor:"transformers.LayoutLMv2TokenizerFast.pad_token_box",description:`<strong>pad_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [PAD] token.`,name:"pad_token_box"},{anchor:"transformers.LayoutLMv2TokenizerFast.pad_token_label",description:`<strong>pad_token_label</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The label to use for padding tokens. Defaults to -100, which is the <code>ignore_index</code> of PyTorch&#x2019;s CrossEntropyLoss.`,name:"pad_token_label"},{anchor:"transformers.LayoutLMv2TokenizerFast.only_label_first_subword",description:`<strong>only_label_first_subword</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to only label the first subword, in case word labels are provided.`,name:"only_label_first_subword"},{anchor:"transformers.LayoutLMv2TokenizerFast.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original LayoutLMv2).`,name:"tokenize_chinese_chars"}]}}),ht=new N({props:{name:"__call__",anchor:"transformers.LayoutLMv2TokenizerFast.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"boxes",val:": typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None"},{name:"word_labels",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py#L171",parametersDescription:[{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words).`,name:"text"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.text_pair",description:`<strong>text_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string).`,name:"text_pair"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.boxes",description:`<strong>boxes</strong> (<code>List[List[int]]</code>, <code>List[List[List[int]]]</code>) &#x2014; Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.`,name:"boxes"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.word_labels",description:`<strong>word_labels</strong> (<code>List[int]</code>, <code>List[List[int]]</code>, <em>optional</em>) &#x2014; Word-level integer labels (for token classification tasks such as FUNSD, CORD).`,name:"word_labels"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LayoutLMv2TokenizerFast.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}]}}),mt=new re({}),ft=new N({props:{name:"class transformers.LayoutLMv2Processor",anchor:"transformers.LayoutLMv2Processor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/processing_layoutlmv2.py#L27",parametersDescription:[{anchor:"transformers.LayoutLMv2Processor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>LayoutLMv2FeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.LayoutLMv2Processor.tokenizer",description:`<strong>tokenizer</strong> (<code>LayoutLMv2Tokenizer</code> or <code>LayoutLMv2TokenizerFast</code>) &#x2014; An instance of <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast">LayoutLMv2TokenizerFast</a>. The tokenizer is a required input.`,name:"tokenizer"}]}}),gt=new N({props:{name:"__call__",anchor:"transformers.LayoutLMv2Processor.__call__",parameters:[{name:"images",val:""},{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"boxes",val:": typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None"},{name:"word_labels",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/processing_layoutlmv2.py#L129"}}),yt=new re({}),bt=new N({props:{name:"class transformers.LayoutLMv2Model",anchor:"transformers.LayoutLMv2Model",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L706",parametersDescription:[{anchor:"transformers.LayoutLMv2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kt=new N({props:{name:"forward",anchor:"transformers.LayoutLMv2Model.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"image",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L804",parametersDescription:[{anchor:"transformers.LayoutLMv2Model.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMv2Model.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>((batch_size, sequence_length), 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.`,name:"bbox"},{anchor:"transformers.LayoutLMv2Model.forward.image",description:`<strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.`,name:"image"},{anchor:"transformers.LayoutLMv2Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMv2Model.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMv2Model.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMv2Model.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LayoutLMv2Model.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMv2Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMv2Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMv2Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ao=new ji({props:{$$slots:{default:[wv]},$$scope:{ctx:ie}}}),xt=new Y({props:{code:`from transformers import LayoutLMv2Processor, LayoutLMv2Model from PIL import Image processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased') model = LayoutLMv2Model.from_pretrained('microsoft/layoutlmv2-base-uncased') image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") encoding = processor(image, return_tensors="pt") outputs = model(**encoding) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2Model.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Mt=new re({}),Tt=new N({props:{name:"class transformers.LayoutLMv2ForSequenceClassification",anchor:"transformers.LayoutLMv2ForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L951",parametersDescription:[{anchor:"transformers.LayoutLMv2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qt=new N({props:{name:"forward",anchor:"transformers.LayoutLMv2ForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"image",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L965",parametersDescription:[{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.`,name:"bbox"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.image",description:`<strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.`,name:"image"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LayoutLMv2ForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ro=new ji({props:{$$slots:{default:[kv]},$$scope:{ctx:ie}}}),Ft=new Y({props:{code:`from transformers import LayoutLMv2Processor, LayoutLMv2ForSequenceClassification from PIL import Image import torch processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased') model = LayoutLMv2ForSequenceClassification.from_pretrained('microsoft/layoutlmv2-base-uncased') image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") encoding = processor(image, return_tensors="pt") sequence_label = torch.tensor([1]) outputs = model(**encoding, labels=sequence_label) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_label = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=sequence_label) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pt=new re({}),Ct=new N({props:{name:"class transformers.LayoutLMv2ForTokenClassification",anchor:"transformers.LayoutLMv2ForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1119",parametersDescription:[{anchor:"transformers.LayoutLMv2ForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),At=new N({props:{name:"forward",anchor:"transformers.LayoutLMv2ForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"image",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1133",parametersDescription:[{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.`,name:"bbox"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.image",description:`<strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.`,name:"image"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LayoutLMv2ForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),lo=new ji({props:{$$slots:{default:[xv]},$$scope:{ctx:ie}}}),Dt=new Y({props:{code:`from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification from PIL import Image processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased', revision="no_ocr") model = LayoutLMv2ForTokenClassification.from_pretrained('microsoft/layoutlmv2-base-uncased') image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes word_labels = [0, 1] encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt") outputs = model(**encoding) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2ForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2ForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_labels = [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Nt=new re({}),Ot=new N({props:{name:"class transformers.LayoutLMv2ForQuestionAnswering",anchor:"transformers.LayoutLMv2ForQuestionAnswering",parameters:[{name:"config",val:""},{name:"has_visual_segment_embedding",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1235",parametersDescription:[{anchor:"transformers.LayoutLMv2ForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ut=new N({props:{name:"forward",anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"image",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1249",parametersDescription:[{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.`,name:"bbox"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.image",description:`<strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.`,name:"image"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.LayoutLMv2ForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),uo=new ji({props:{$$slots:{default:[Mv]},$$scope:{ctx:ie}}}),Bt=new Y({props:{code:`from transformers import LayoutLMv2Processor, LayoutLMv2ForQuestionAnswering from PIL import Image import torch processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased') model = LayoutLMv2ForQuestionAnswering.from_pretrained('microsoft/layoutlmv2-base-uncased') image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") question = "what's his name?" encoding = processor(image, question, return_tensors="pt") start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2ForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2ForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlmv2-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question = <span class="hljs-string">&quot;what&#x27;s his name?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, question, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){y=s("meta"),q=c(),b=s("h1"),M=s("a"),P=s("span"),h(L.$$.fragment),w=c(),I=s("span"),Ii=n("LayoutLMV2"),sr=c(),ze=s("h2"),Qe=s("a"),na=s("span"),h(bo.$$.fragment),Ai=c(),aa=s("span"),Di=n("Overview"),rr=c(),pe=s("p"),Ni=n("The LayoutLMV2 model was proposed in "),Lo=s("a"),Oi=n("LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding"),Si=n(` by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. LayoutLMV2 improves `),Vt=s("a"),Ri=n("LayoutLM"),Ui=n(` to obtain state-of-the-art results across several document image understanding benchmarks:`),ir=c(),he=s("ul"),Z=s("li"),Bi=n("information extraction from scanned documents: the "),wo=s("a"),Wi=n("FUNSD"),Vi=n(` dataset (a collection of 199 annotated forms comprising more than 30,000 words), the `),ko=s("a"),Qi=n("CORD"),Gi=n(` dataset (a collection of 800 receipts for training, 100 for validation and 100 for testing), the `),xo=s("a"),Hi=n("SROIE"),Ki=n(` dataset (a collection of 626 receipts for training and 347 receipts for testing) and the `),Mo=s("a"),Xi=n("Kleister-NDA"),Yi=n(` dataset (a collection of non-disclosure agreements from the EDGAR database, including 254 documents for training, 83 documents for validation, and 203 documents for testing).`),Zi=c(),To=s("li"),Ji=n("document image classification: the "),Eo=s("a"),el=n("RVL-CDIP"),ol=n(` dataset (a collection of 400,000 images belonging to one of 16 classes).`),tl=c(),zo=s("li"),nl=n("document visual question answering: the "),$o=s("a"),al=n("DocVQA"),sl=n(` dataset (a collection of 50,000 questions defined on 12,000+ document images).`),lr=c(),Qt=s("p"),rl=n("The abstract from the paper is the following:"),cr=c(),Gt=s("p"),sa=s("em"),il=n(`Pre-training of text and layout has proved effective in a variety of visually-rich document understanding tasks due to its effective model architecture and the advantage of large-scale unlabeled scanned/digital-born documents. In this paper, we present LayoutLMv2 by pre-training text, layout and image in a multi-modal framework, where new model architectures and pre-training tasks are leveraged. Specifically, LayoutLMv2 not only uses the existing masked visual-language modeling task but also the new text-image alignment and text-image matching tasks in the pre-training stage, where cross-modality interaction is better learned. Meanwhile, it also integrates a spatial-aware self-attention mechanism into the Transformer architecture, so that the model can fully understand the relative positional relationship among different text blocks. Experiment results show that LayoutLMv2 outperforms strong baselines and achieves new state-of-the-art results on a wide variety of downstream visually-rich document understanding tasks, including FUNSD (0.7895 -> 0.8420), CORD (0.9493 -> 0.9601), SROIE (0.9524 -> 0.9781), Kleister-NDA (0.834 -> 0.852), RVL-CDIP (0.9443 -> 0.9564), and DocVQA (0.7295 -> 0.8672). The pre-trained LayoutLMv2 model is publicly available at this https URL.`),dr=c(),Ht=s("p"),ll=n("Tips:"),ur=c(),B=s("ul"),ra=s("li"),cl=n(`The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning).`),dl=c(),qo=s("li"),ul=n(`LayoutLMv2 adds both a relative 1D attention bias as well as a spatial 2D attention bias to the attention scores in the self-attention layers. Details can be found on page 5 of the `),Fo=s("a"),pl=n("paper"),hl=n("."),ml=c(),Po=s("li"),fl=n("Demo notebooks on how to use the LayoutLMv2 model on RVL-CDIP, FUNSD, DocVQA, CORD can be found "),Co=s("a"),gl=n("here"),_l=n("."),vl=c(),$e=s("li"),yl=n("LayoutLMv2 uses Facebook AI\u2019s "),jo=s("a"),bl=n("Detectron2"),Ll=n(` package for its visual backbone. See `),Io=s("a"),wl=n("this link"),kl=n(` for installation instructions.`),xl=c(),k=s("li"),Ml=n("In addition to "),ia=s("code"),Tl=n("input_ids"),El=n(", "),Kt=s("a"),zl=n("forward()"),$l=n(` expects 2 additional inputs, namely `),la=s("code"),ql=n("image"),Fl=n(" and "),ca=s("code"),Pl=n("bbox"),Cl=n(". The "),da=s("code"),jl=n("image"),Il=n(` input corresponds to the original document image in which the text tokens occur. The model expects each document image to be of size 224x224. This means that if you have a batch of document images, `),ua=s("code"),Al=n("image"),Dl=n(` should be a tensor of shape (batch_size, 3, 224, 224). This can be either a `),pa=s("code"),Nl=n("torch.Tensor"),Ol=n(" or a "),ha=s("code"),Sl=n("Detectron2.structures.ImageList"),Rl=n(`. You don\u2019t need to normalize the channels, as this is done by the model. Important to note is that the visual backbone expects BGR channels instead of RGB, as all models in Detectron2 are pre-trained using the BGR format. The `),ma=s("code"),Ul=n("bbox"),Bl=n(` input are the bounding boxes (i.e. 2D-positions) of the input text tokens. This is identical to `),Xt=s("a"),Wl=n("LayoutLMModel"),Vl=n(`. These can be obtained using an external OCR engine such as Google\u2019s `),Ao=s("a"),Ql=n("Tesseract"),Gl=n(" (there\u2019s a "),Do=s("a"),Hl=n(`Python wrapper`),Kl=n(` available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function:`),pr=c(),h(No.$$.fragment),hr=c(),me=s("p"),Xl=n("Here, "),fa=s("code"),Yl=n("width"),Zl=n(" and "),ga=s("code"),Jl=n("height"),ec=n(` correspond to the width and height of the original document in which the token occurs (before resizing the image). Those can be obtained using the Python Image Library (PIL) library for example, as follows:`),mr=c(),h(Oo.$$.fragment),fr=c(),Ge=s("p"),oc=n("However, this model includes a brand new "),Yt=s("a"),tc=n("LayoutLMv2Processor"),nc=n(` which can be used to directly prepare data for the model (including applying OCR under the hood). More information can be found in the \u201CUsage\u201D section below.`),gr=c(),fe=s("ul"),j=s("li"),ac=n("Internally, "),Zt=s("a"),sc=n("LayoutLMv2Model"),rc=n(" will send the "),_a=s("code"),ic=n("image"),lc=n(` input through its visual backbone to obtain a lower-resolution feature map, whose shape is equal to the `),va=s("code"),cc=n("image_feature_pool_shape"),dc=n(` attribute of `),Jt=s("a"),uc=n("LayoutLMv2Config"),pc=n(`. This feature map is then flattened to obtain a sequence of image tokens. As the size of the feature map is 7x7 by default, one obtains 49 image tokens. These are then concatenated with the text tokens, and send through the Transformer encoder. This means that the last hidden states of the model will have a length of 512 + 49 = 561, if you pad the text tokens up to the max length. More generally, the last hidden states will have a shape of `),ya=s("code"),hc=n("seq_length"),mc=n(" + "),ba=s("code"),fc=n("image_feature_pool_shape[0]"),gc=n(` * `),La=s("code"),_c=n("config.image_feature_pool_shape[1]"),vc=n("."),yc=c(),So=s("li"),bc=n("When calling "),en=s("a"),Lc=n("from_pretrained()"),wc=n(`, a warning will be printed with a long list of parameter names that are not initialized. This is not a problem, as these parameters are batch normalization statistics, which are going to have values when fine-tuning on a custom dataset.`),kc=c(),Ro=s("li"),xc=n("If you want to train the model in a distributed environment, make sure to call "),wa=s("code"),Mc=n("synchronize_batch_norm"),Tc=n(` on the model in order to properly synchronize the batch normalization layers of the visual backbone.`),_r=c(),He=s("p"),Ec=n(`In addition, there\u2019s LayoutXLM, which is a multilingual version of LayoutLMv2. More information can be found on `),on=s("a"),zc=n("LayoutXLM\u2019s documentation page"),$c=n("."),vr=c(),qe=s("h2"),Ke=s("a"),ka=s("span"),h(Uo.$$.fragment),qc=c(),xa=s("span"),Fc=n("Usage: LayoutLMv2Processor"),yr=c(),W=s("p"),Pc=n("The easiest way to prepare data for the model is to use "),tn=s("a"),Cc=n("LayoutLMv2Processor"),jc=n(`, which internally combines a feature extractor (`),nn=s("a"),Ic=n("LayoutLMv2FeatureExtractor"),Ac=n(`) and a tokenizer (`),an=s("a"),Dc=n("LayoutLMv2Tokenizer"),Nc=n(" or "),sn=s("a"),Oc=n("LayoutLMv2TokenizerFast"),Sc=n(`). The feature extractor handles the image modality, while the tokenizer handles the text modality. A processor combines both, which is ideal for a multi-modal model like LayoutLMv2. Note that you can still use both separately, if you only want to handle one modality.`),br=c(),h(Bo.$$.fragment),Lr=c(),T=s("p"),Rc=n("In short, one can provide a document image (and possibly additional data) to "),rn=s("a"),Uc=n("LayoutLMv2Processor"),Bc=n(`, and it will create the inputs expected by the model. Internally, the processor first uses `),ln=s("a"),Wc=n("LayoutLMv2FeatureExtractor"),Vc=n(` to apply OCR on the image to get a list of words and normalized bounding boxes, as well to resize the image to a given size in order to get the `),Ma=s("code"),Qc=n("image"),Gc=n(` input. The words and normalized bounding boxes are then provided to `),cn=s("a"),Hc=n("LayoutLMv2Tokenizer"),Kc=n(` or `),dn=s("a"),Xc=n("LayoutLMv2TokenizerFast"),Yc=n(", which converts them to token-level "),Ta=s("code"),Zc=n("input_ids"),Jc=n(`, `),Ea=s("code"),ed=n("attention_mask"),od=n(", "),za=s("code"),td=n("token_type_ids"),nd=n(", "),$a=s("code"),ad=n("bbox"),sd=n(`. Optionally, one can provide word labels to the processor, which are turned into token-level `),qa=s("code"),rd=n("labels"),id=n("."),wr=c(),S=s("p"),un=s("a"),ld=n("LayoutLMv2Processor"),cd=n(" uses "),Wo=s("a"),dd=n("PyTesseract"),ud=n(`, a Python wrapper around Google\u2019s Tesseract OCR engine, under the hood. Note that you can still use your own OCR engine of choice, and provide the words and normalized boxes yourself. This requires initializing `),pn=s("a"),pd=n("LayoutLMv2FeatureExtractor"),hd=n(" with "),Fa=s("code"),md=n("apply_ocr"),fd=n(" set to "),Pa=s("code"),gd=n("False"),_d=n("."),kr=c(),hn=s("p"),vd=n(`In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs).`),xr=c(),mn=s("p"),Ca=s("strong"),yd=n(`Use case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True`),Mr=c(),fn=s("p"),bd=n(`This is the simplest case, in which the processor (actually the feature extractor) will perform OCR on the image to get the words and normalized bounding boxes.`),Tr=c(),h(Vo.$$.fragment),Er=c(),gn=s("p"),ja=s("strong"),Ld=n("Use case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False"),zr=c(),ge=s("p"),wd=n("In case one wants to do OCR themselves, one can initialize the feature extractor with "),Ia=s("code"),kd=n("apply_ocr"),xd=n(` set to `),Aa=s("code"),Md=n("False"),Td=n(`. In that case, one should provide the words and corresponding (normalized) bounding boxes themselves to the processor.`),$r=c(),h(Qo.$$.fragment),qr=c(),_n=s("p"),Da=s("strong"),Ed=n("Use case 3: token classification (training), apply_ocr=False"),Fr=c(),V=s("p"),zd=n(`For token classification tasks (such as FUNSD, CORD, SROIE, Kleister-NDA), one can also provide the corresponding word labels in order to train a model. The processor will then convert these into token-level `),Na=s("code"),$d=n("labels"),qd=n(`. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the `),Oa=s("code"),Fd=n("ignore_index"),Pd=n(` of PyTorch\u2019s CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with `),Sa=s("code"),Cd=n("only_label_first_subword"),jd=n(" set to "),Ra=s("code"),Id=n("False"),Ad=n("."),Pr=c(),h(Go.$$.fragment),Cr=c(),vn=s("p"),Ua=s("strong"),Dd=n("Use case 4: visual question answering (inference), apply_ocr=True"),jr=c(),yn=s("p"),Nd=n(`For visual question answering tasks (such as DocVQA), you can provide a question to the processor. By default, the processor will apply OCR on the image, and create [CLS] question tokens [SEP] word tokens [SEP].`),Ir=c(),h(Ho.$$.fragment),Ar=c(),bn=s("p"),Ba=s("strong"),Od=n("Use case 5: visual question answering (inference), apply_ocr=False"),Dr=c(),Ln=s("p"),Sd=n(`For visual question answering tasks (such as DocVQA), you can provide a question to the processor. If you want to perform OCR yourself, you can provide your own words and (normalized) bounding boxes to the processor.`),Nr=c(),h(Ko.$$.fragment),Or=c(),Fe=s("h2"),Xe=s("a"),Wa=s("span"),h(Xo.$$.fragment),Rd=c(),Va=s("span"),Ud=n("LayoutLMv2Config"),Sr=c(),R=s("div"),h(Yo.$$.fragment),Bd=c(),Pe=s("p"),Wd=n("This is the configuration class to store the configuration of a "),wn=s("a"),Vd=n("LayoutLMv2Model"),Qd=n(`. It is used to instantiate an LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLMv2 `),Zo=s("a"),Gd=n("microsoft/layoutlmv2-base-uncased"),Hd=n(" architecture."),Kd=c(),Ce=s("p"),Xd=n("Configuration objects inherit from "),kn=s("a"),Yd=n("PretrainedConfig"),Zd=n(` and can be used to control the model outputs. Read the documentation from `),xn=s("a"),Jd=n("PretrainedConfig"),eu=n(" for more information."),ou=c(),Qa=s("p"),tu=n("Example:"),nu=c(),h(Jo.$$.fragment),Rr=c(),je=s("h2"),Ye=s("a"),Ga=s("span"),h(et.$$.fragment),au=c(),Ha=s("span"),su=n("LayoutLMv2FeatureExtractor"),Ur=c(),J=s("div"),h(ot.$$.fragment),ru=c(),Ka=s("p"),iu=n(`Constructs a LayoutLMv2 feature extractor. This can be used to resize document images to the same size, as well as to apply OCR on them in order to get a list of words and normalized bounding boxes.`),lu=c(),tt=s("p"),cu=n("This feature extractor inherits from "),Xa=s("code"),du=n("PreTrainedFeatureExtractor()"),uu=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),pu=c(),ae=s("div"),h(nt.$$.fragment),hu=c(),Ya=s("p"),mu=n("Main method to prepare for the model one or several image(s)."),fu=c(),Za=s("p"),gu=n("Examples:"),_u=c(),h(at.$$.fragment),Br=c(),Ie=s("h2"),Ze=s("a"),Ja=s("span"),h(st.$$.fragment),vu=c(),es=s("span"),yu=n("LayoutLMv2Tokenizer"),Wr=c(),A=s("div"),h(rt.$$.fragment),bu=c(),D=s("p"),Lu=n("Construct a LayoutLMv2 tokenizer. Based on WordPiece. "),Mn=s("a"),wu=n("LayoutLMv2Tokenizer"),ku=n(` can be used to turn words, word-level bounding boxes and optional word labels to token-level `),os=s("code"),xu=n("input_ids"),Mu=n(`, `),ts=s("code"),Tu=n("attention_mask"),Eu=n(", "),ns=s("code"),zu=n("token_type_ids"),$u=n(", "),as=s("code"),qu=n("bbox"),Fu=n(", and optional "),ss=s("code"),Pu=n("labels"),Cu=n(" (for token classification)."),ju=c(),it=s("p"),Iu=n("This tokenizer inherits from "),Tn=s("a"),Au=n("PreTrainedTokenizer"),Du=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Nu=c(),En=s("p"),zn=s("a"),Ou=n("LayoutLMv2Tokenizer"),Su=n(` runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes.`),Ru=c(),Je=s("div"),h(lt.$$.fragment),Uu=c(),rs=s("p"),Bu=n(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),Wu=c(),is=s("div"),Vr=c(),Ae=s("h2"),eo=s("a"),ls=s("span"),h(ct.$$.fragment),Vu=c(),cs=s("span"),Qu=n("LayoutLMv2TokenizerFast"),Qr=c(),ee=s("div"),h(dt.$$.fragment),Gu=c(),ut=s("p"),Hu=n("Construct a \u201Cfast\u201D LayoutLMv2 tokenizer (backed by HuggingFace\u2019s "),ds=s("em"),Ku=n("tokenizers"),Xu=n(" library). Based on WordPiece."),Yu=c(),pt=s("p"),Zu=n("This tokenizer inherits from "),$n=s("a"),Ju=n("PreTrainedTokenizerFast"),ep=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),op=c(),oo=s("div"),h(ht.$$.fragment),tp=c(),us=s("p"),np=n(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),Gr=c(),De=s("h2"),to=s("a"),ps=s("span"),h(mt.$$.fragment),ap=c(),hs=s("span"),sp=n("LayoutLMv2Processor"),Hr=c(),U=s("div"),h(ft.$$.fragment),rp=c(),ms=s("p"),ip=n(`Constructs a LayoutLMv2 processor which combines a LayoutLMv2 feature extractor and a LayoutLMv2 tokenizer into a single processor.`),lp=c(),qn=s("p"),Fn=s("a"),cp=n("LayoutLMv2Processor"),dp=n(" offers all the functionalities you need to prepare data for the model."),up=c(),$=s("p"),pp=n("It first uses "),Pn=s("a"),hp=n("LayoutLMv2FeatureExtractor"),mp=n(` to resize document images to a fixed size, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to `),Cn=s("a"),fp=n("LayoutLMv2Tokenizer"),gp=n(" or "),jn=s("a"),_p=n("LayoutLMv2TokenizerFast"),vp=n(`, which turns the words and bounding boxes into token-level `),fs=s("code"),yp=n("input_ids"),bp=n(", "),gs=s("code"),Lp=n("attention_mask"),wp=n(", "),_s=s("code"),kp=n("token_type_ids"),xp=n(", "),vs=s("code"),Mp=n("bbox"),Tp=n(`. Optionally, one can provide integer `),ys=s("code"),Ep=n("word_labels"),zp=n(", which are turned into token-level "),bs=s("code"),$p=n("labels"),qp=n(` for token classification tasks (such as FUNSD, CORD).`),Fp=c(),_e=s("div"),h(gt.$$.fragment),Pp=c(),x=s("p"),Cp=n("This method first forwards the "),Ls=s("code"),jp=n("images"),Ip=n(` argument to `),_t=s("a"),ws=s("strong"),Ap=n("call"),Dp=n("()"),Np=n(". In case "),In=s("a"),Op=n("LayoutLMv2FeatureExtractor"),Sp=n(` was initialized with `),ks=s("code"),Rp=n("apply_ocr"),Up=n(" set to "),xs=s("code"),Bp=n("True"),Wp=n(`, it passes the obtained words and bounding boxes along with the additional arguments to `),vt=s("a"),Ms=s("strong"),Vp=n("call"),Qp=n("()"),Gp=n(` and returns the output, together with resized `),Ts=s("code"),Hp=n("images"),Kp=n(". In case "),An=s("a"),Xp=n("LayoutLMv2FeatureExtractor"),Yp=n(" was initialized with "),Es=s("code"),Zp=n("apply_ocr"),Jp=n(` set to `),zs=s("code"),eh=n("False"),oh=n(", it passes the words ("),$s=s("code"),th=n("text"),nh=n("/"),qs=s("code"),ah=n("text_pair`) and `boxes` specified by the user along with the additional arguments to [__call__()](/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer.__call__) and returns the output, together with resized `images"),sh=n("."),rh=c(),Fs=s("p"),ih=n("Please refer to the docstring of the above two methods for more information."),Kr=c(),Ne=s("h2"),no=s("a"),Ps=s("span"),h(yt.$$.fragment),lh=c(),Cs=s("span"),ch=n("LayoutLMv2Model"),Xr=c(),le=s("div"),h(bt.$$.fragment),dh=c(),Lt=s("p"),uh=n(`The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),wt=s("a"),ph=n("torch.nn.Module"),hh=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mh=c(),Q=s("div"),h(kt.$$.fragment),fh=c(),Oe=s("p"),gh=n("The "),Dn=s("a"),_h=n("LayoutLMv2Model"),vh=n(" forward method, overrides the "),js=s("code"),yh=n("__call__"),bh=n(" special method."),Lh=c(),h(ao.$$.fragment),wh=c(),Is=s("p"),kh=n("Examples:"),xh=c(),h(xt.$$.fragment),Yr=c(),Se=s("h2"),so=s("a"),As=s("span"),h(Mt.$$.fragment),Mh=c(),Ds=s("span"),Th=n("LayoutLMv2ForSequenceClassification"),Zr=c(),oe=s("div"),h(Tt.$$.fragment),Eh=c(),Et=s("p"),zh=n(`LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual embeddings, e.g. for document image classification tasks such as the `),Ns=s("code"),$h=n("RVL-CDIP <https://www.cs.cmu.edu/~aharley/rvl-cdip/>"),qh=n("__ dataset."),Fh=c(),zt=s("p"),Ph=n("This model is a PyTorch "),$t=s("a"),Ch=n("torch.nn.Module"),jh=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ih=c(),G=s("div"),h(qt.$$.fragment),Ah=c(),Re=s("p"),Dh=n("The "),Nn=s("a"),Nh=n("LayoutLMv2ForSequenceClassification"),Oh=n(" forward method, overrides the "),Os=s("code"),Sh=n("__call__"),Rh=n(" special method."),Uh=c(),h(ro.$$.fragment),Bh=c(),Ss=s("p"),Wh=n("Examples:"),Vh=c(),h(Ft.$$.fragment),Jr=c(),Ue=s("h2"),io=s("a"),Rs=s("span"),h(Pt.$$.fragment),Qh=c(),Us=s("span"),Gh=n("LayoutLMv2ForTokenClassification"),ei=c(),te=s("div"),h(Ct.$$.fragment),Hh=c(),ce=s("p"),Kh=n(`LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden states) e.g. for sequence labeling (information extraction) tasks such as `),Bs=s("code"),Xh=n("FUNSD <https://guillaumejaume.github.io/FUNSD/>"),On=s("strong"),Yh=n(", "),Ws=s("code"),Zh=n("SROIE <https://rrc.cvc.uab.es/?ch=13>"),Jh=n(", "),Vs=s("code"),em=n("CORD <https://github.com/clovaai/cord>"),Sn=s("strong"),om=n("and "),Qs=s("code"),tm=n("Kleister-NDA <https://github.com/applicaai/kleister-nda>"),nm=n("."),am=c(),jt=s("p"),sm=n("This model is a PyTorch "),It=s("a"),rm=n("torch.nn.Module"),im=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),lm=c(),H=s("div"),h(At.$$.fragment),cm=c(),Be=s("p"),dm=n("The "),Rn=s("a"),um=n("LayoutLMv2ForTokenClassification"),pm=n(" forward method, overrides the "),Gs=s("code"),hm=n("__call__"),mm=n(" special method."),fm=c(),h(lo.$$.fragment),gm=c(),Hs=s("p"),_m=n("Examples:"),vm=c(),h(Dt.$$.fragment),oi=c(),We=s("h2"),co=s("a"),Ks=s("span"),h(Nt.$$.fragment),ym=c(),Xs=s("span"),bm=n("LayoutLMv2ForQuestionAnswering"),ti=c(),ne=s("div"),h(Ot.$$.fragment),Lm=c(),de=s("p"),wm=n("LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as "),Ys=s("code"),km=n("DocVQA <https://rrc.cvc.uab.es/?ch=17>"),xm=n(`__ (a linear layer on top of the text part of the hidden-states output to compute `),Zs=s("code"),Mm=n("span start logits"),Tm=n(" and "),Js=s("code"),Em=n("span end logits"),zm=n(")."),$m=c(),St=s("p"),qm=n("This model is a PyTorch "),Rt=s("a"),Fm=n("torch.nn.Module"),Pm=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cm=c(),K=s("div"),h(Ut.$$.fragment),jm=c(),Ve=s("p"),Im=n("The "),Un=s("a"),Am=n("LayoutLMv2ForQuestionAnswering"),Dm=n(" forward method, overrides the "),er=s("code"),Nm=n("__call__"),Om=n(" special method."),Sm=c(),h(uo.$$.fragment),Rm=c(),or=s("p"),Um=n("Examples:"),Bm=c(),h(Bt.$$.fragment),this.h()},l(o){const u=Lv('[data-svelte="svelte-1phssyn"]',document.head);y=r(u,"META",{name:!0,content:!0}),u.forEach(t),q=d(o),b=r(o,"H1",{class:!0});var Wt=i(b);M=r(Wt,"A",{id:!0,class:!0,href:!0});var tr=i(M);P=r(tr,"SPAN",{});var nr=i(P);m(L.$$.fragment,nr),nr.forEach(t),tr.forEach(t),w=d(Wt),I=r(Wt,"SPAN",{});var ar=i(I);Ii=a(ar,"LayoutLMV2"),ar.forEach(t),Wt.forEach(t),sr=d(o),ze=r(o,"H2",{class:!0});var ai=i(ze);Qe=r(ai,"A",{id:!0,class:!0,href:!0});var Xm=i(Qe);na=r(Xm,"SPAN",{});var Ym=i(na);m(bo.$$.fragment,Ym),Ym.forEach(t),Xm.forEach(t),Ai=d(ai),aa=r(ai,"SPAN",{});var Zm=i(aa);Di=a(Zm,"Overview"),Zm.forEach(t),ai.forEach(t),rr=d(o),pe=r(o,"P",{});var Bn=i(pe);Ni=a(Bn,"The LayoutLMV2 model was proposed in "),Lo=r(Bn,"A",{href:!0,rel:!0});var Jm=i(Lo);Oi=a(Jm,"LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding"),Jm.forEach(t),Si=a(Bn,` by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. LayoutLMV2 improves `),Vt=r(Bn,"A",{href:!0});var ef=i(Vt);Ri=a(ef,"LayoutLM"),ef.forEach(t),Ui=a(Bn,` to obtain state-of-the-art results across several document image understanding benchmarks:`),Bn.forEach(t),ir=d(o),he=r(o,"UL",{});var Wn=i(he);Z=r(Wn,"LI",{});var ve=i(Z);Bi=a(ve,"information extraction from scanned documents: the "),wo=r(ve,"A",{href:!0,rel:!0});var of=i(wo);Wi=a(of,"FUNSD"),of.forEach(t),Vi=a(ve,` dataset (a collection of 199 annotated forms comprising more than 30,000 words), the `),ko=r(ve,"A",{href:!0,rel:!0});var tf=i(ko);Qi=a(tf,"CORD"),tf.forEach(t),Gi=a(ve,` dataset (a collection of 800 receipts for training, 100 for validation and 100 for testing), the `),xo=r(ve,"A",{href:!0,rel:!0});var nf=i(xo);Hi=a(nf,"SROIE"),nf.forEach(t),Ki=a(ve,` dataset (a collection of 626 receipts for training and 347 receipts for testing) and the `),Mo=r(ve,"A",{href:!0,rel:!0});var af=i(Mo);Xi=a(af,"Kleister-NDA"),af.forEach(t),Yi=a(ve,` dataset (a collection of non-disclosure agreements from the EDGAR database, including 254 documents for training, 83 documents for validation, and 203 documents for testing).`),ve.forEach(t),Zi=d(Wn),To=r(Wn,"LI",{});var si=i(To);Ji=a(si,"document image classification: the "),Eo=r(si,"A",{href:!0,rel:!0});var sf=i(Eo);el=a(sf,"RVL-CDIP"),sf.forEach(t),ol=a(si,` dataset (a collection of 400,000 images belonging to one of 16 classes).`),si.forEach(t),tl=d(Wn),zo=r(Wn,"LI",{});var ri=i(zo);nl=a(ri,"document visual question answering: the "),$o=r(ri,"A",{href:!0,rel:!0});var rf=i($o);al=a(rf,"DocVQA"),rf.forEach(t),sl=a(ri,` dataset (a collection of 50,000 questions defined on 12,000+ document images).`),ri.forEach(t),Wn.forEach(t),lr=d(o),Qt=r(o,"P",{});var lf=i(Qt);rl=a(lf,"The abstract from the paper is the following:"),lf.forEach(t),cr=d(o),Gt=r(o,"P",{});var cf=i(Gt);sa=r(cf,"EM",{});var df=i(sa);il=a(df,`Pre-training of text and layout has proved effective in a variety of visually-rich document understanding tasks due to its effective model architecture and the advantage of large-scale unlabeled scanned/digital-born documents. In this paper, we present LayoutLMv2 by pre-training text, layout and image in a multi-modal framework, where new model architectures and pre-training tasks are leveraged. Specifically, LayoutLMv2 not only uses the existing masked visual-language modeling task but also the new text-image alignment and text-image matching tasks in the pre-training stage, where cross-modality interaction is better learned. Meanwhile, it also integrates a spatial-aware self-attention mechanism into the Transformer architecture, so that the model can fully understand the relative positional relationship among different text blocks. Experiment results show that LayoutLMv2 outperforms strong baselines and achieves new state-of-the-art results on a wide variety of downstream visually-rich document understanding tasks, including FUNSD (0.7895 -> 0.8420), CORD (0.9493 -> 0.9601), SROIE (0.9524 -> 0.9781), Kleister-NDA (0.834 -> 0.852), RVL-CDIP (0.9443 -> 0.9564), and DocVQA (0.7295 -> 0.8672). The pre-trained LayoutLMv2 model is publicly available at this https URL.`),df.forEach(t),cf.forEach(t),dr=d(o),Ht=r(o,"P",{});var uf=i(Ht);ll=a(uf,"Tips:"),uf.forEach(t),ur=d(o),B=r(o,"UL",{});var ye=i(B);ra=r(ye,"LI",{});var pf=i(ra);cl=a(pf,`The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning).`),pf.forEach(t),dl=d(ye),qo=r(ye,"LI",{});var ii=i(qo);ul=a(ii,`LayoutLMv2 adds both a relative 1D attention bias as well as a spatial 2D attention bias to the attention scores in the self-attention layers. Details can be found on page 5 of the `),Fo=r(ii,"A",{href:!0,rel:!0});var hf=i(Fo);pl=a(hf,"paper"),hf.forEach(t),hl=a(ii,"."),ii.forEach(t),ml=d(ye),Po=r(ye,"LI",{});var li=i(Po);fl=a(li,"Demo notebooks on how to use the LayoutLMv2 model on RVL-CDIP, FUNSD, DocVQA, CORD can be found "),Co=r(li,"A",{href:!0,rel:!0});var mf=i(Co);gl=a(mf,"here"),mf.forEach(t),_l=a(li,"."),li.forEach(t),vl=d(ye),$e=r(ye,"LI",{});var Vn=i($e);yl=a(Vn,"LayoutLMv2 uses Facebook AI\u2019s "),jo=r(Vn,"A",{href:!0,rel:!0});var ff=i(jo);bl=a(ff,"Detectron2"),ff.forEach(t),Ll=a(Vn,` package for its visual backbone. See `),Io=r(Vn,"A",{href:!0,rel:!0});var gf=i(Io);wl=a(gf,"this link"),gf.forEach(t),kl=a(Vn,` for installation instructions.`),Vn.forEach(t),xl=d(ye),k=r(ye,"LI",{});var E=i(k);Ml=a(E,"In addition to "),ia=r(E,"CODE",{});var _f=i(ia);Tl=a(_f,"input_ids"),_f.forEach(t),El=a(E,", "),Kt=r(E,"A",{href:!0});var vf=i(Kt);zl=a(vf,"forward()"),vf.forEach(t),$l=a(E,` expects 2 additional inputs, namely `),la=r(E,"CODE",{});var yf=i(la);ql=a(yf,"image"),yf.forEach(t),Fl=a(E," and "),ca=r(E,"CODE",{});var bf=i(ca);Pl=a(bf,"bbox"),bf.forEach(t),Cl=a(E,". The "),da=r(E,"CODE",{});var Lf=i(da);jl=a(Lf,"image"),Lf.forEach(t),Il=a(E,` input corresponds to the original document image in which the text tokens occur. The model expects each document image to be of size 224x224. This means that if you have a batch of document images, `),ua=r(E,"CODE",{});var wf=i(ua);Al=a(wf,"image"),wf.forEach(t),Dl=a(E,` should be a tensor of shape (batch_size, 3, 224, 224). This can be either a `),pa=r(E,"CODE",{});var kf=i(pa);Nl=a(kf,"torch.Tensor"),kf.forEach(t),Ol=a(E," or a "),ha=r(E,"CODE",{});var xf=i(ha);Sl=a(xf,"Detectron2.structures.ImageList"),xf.forEach(t),Rl=a(E,`. You don\u2019t need to normalize the channels, as this is done by the model. Important to note is that the visual backbone expects BGR channels instead of RGB, as all models in Detectron2 are pre-trained using the BGR format. The `),ma=r(E,"CODE",{});var Mf=i(ma);Ul=a(Mf,"bbox"),Mf.forEach(t),Bl=a(E,` input are the bounding boxes (i.e. 2D-positions) of the input text tokens. This is identical to `),Xt=r(E,"A",{href:!0});var Tf=i(Xt);Wl=a(Tf,"LayoutLMModel"),Tf.forEach(t),Vl=a(E,`. These can be obtained using an external OCR engine such as Google\u2019s `),Ao=r(E,"A",{href:!0,rel:!0});var Ef=i(Ao);Ql=a(Ef,"Tesseract"),Ef.forEach(t),Gl=a(E," (there\u2019s a "),Do=r(E,"A",{href:!0,rel:!0});var zf=i(Do);Hl=a(zf,`Python wrapper`),zf.forEach(t),Kl=a(E,` available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function:`),E.forEach(t),ye.forEach(t),pr=d(o),m(No.$$.fragment,o),hr=d(o),me=r(o,"P",{});var Qn=i(me);Xl=a(Qn,"Here, "),fa=r(Qn,"CODE",{});var $f=i(fa);Yl=a($f,"width"),$f.forEach(t),Zl=a(Qn," and "),ga=r(Qn,"CODE",{});var qf=i(ga);Jl=a(qf,"height"),qf.forEach(t),ec=a(Qn,` correspond to the width and height of the original document in which the token occurs (before resizing the image). Those can be obtained using the Python Image Library (PIL) library for example, as follows:`),Qn.forEach(t),mr=d(o),m(Oo.$$.fragment,o),fr=d(o),Ge=r(o,"P",{});var ci=i(Ge);oc=a(ci,"However, this model includes a brand new "),Yt=r(ci,"A",{href:!0});var Ff=i(Yt);tc=a(Ff,"LayoutLMv2Processor"),Ff.forEach(t),nc=a(ci,` which can be used to directly prepare data for the model (including applying OCR under the hood). More information can be found in the \u201CUsage\u201D section below.`),ci.forEach(t),gr=d(o),fe=r(o,"UL",{});var Gn=i(fe);j=r(Gn,"LI",{});var O=i(j);ac=a(O,"Internally, "),Zt=r(O,"A",{href:!0});var Pf=i(Zt);sc=a(Pf,"LayoutLMv2Model"),Pf.forEach(t),rc=a(O," will send the "),_a=r(O,"CODE",{});var Cf=i(_a);ic=a(Cf,"image"),Cf.forEach(t),lc=a(O,` input through its visual backbone to obtain a lower-resolution feature map, whose shape is equal to the `),va=r(O,"CODE",{});var jf=i(va);cc=a(jf,"image_feature_pool_shape"),jf.forEach(t),dc=a(O,` attribute of `),Jt=r(O,"A",{href:!0});var If=i(Jt);uc=a(If,"LayoutLMv2Config"),If.forEach(t),pc=a(O,`. This feature map is then flattened to obtain a sequence of image tokens. As the size of the feature map is 7x7 by default, one obtains 49 image tokens. These are then concatenated with the text tokens, and send through the Transformer encoder. This means that the last hidden states of the model will have a length of 512 + 49 = 561, if you pad the text tokens up to the max length. More generally, the last hidden states will have a shape of `),ya=r(O,"CODE",{});var Af=i(ya);hc=a(Af,"seq_length"),Af.forEach(t),mc=a(O," + "),ba=r(O,"CODE",{});var Df=i(ba);fc=a(Df,"image_feature_pool_shape[0]"),Df.forEach(t),gc=a(O,` * `),La=r(O,"CODE",{});var Nf=i(La);_c=a(Nf,"config.image_feature_pool_shape[1]"),Nf.forEach(t),vc=a(O,"."),O.forEach(t),yc=d(Gn),So=r(Gn,"LI",{});var di=i(So);bc=a(di,"When calling "),en=r(di,"A",{href:!0});var Of=i(en);Lc=a(Of,"from_pretrained()"),Of.forEach(t),wc=a(di,`, a warning will be printed with a long list of parameter names that are not initialized. This is not a problem, as these parameters are batch normalization statistics, which are going to have values when fine-tuning on a custom dataset.`),di.forEach(t),kc=d(Gn),Ro=r(Gn,"LI",{});var ui=i(Ro);xc=a(ui,"If you want to train the model in a distributed environment, make sure to call "),wa=r(ui,"CODE",{});var Sf=i(wa);Mc=a(Sf,"synchronize_batch_norm"),Sf.forEach(t),Tc=a(ui,` on the model in order to properly synchronize the batch normalization layers of the visual backbone.`),ui.forEach(t),Gn.forEach(t),_r=d(o),He=r(o,"P",{});var pi=i(He);Ec=a(pi,`In addition, there\u2019s LayoutXLM, which is a multilingual version of LayoutLMv2. More information can be found on `),on=r(pi,"A",{href:!0});var Rf=i(on);zc=a(Rf,"LayoutXLM\u2019s documentation page"),Rf.forEach(t),$c=a(pi,"."),pi.forEach(t),vr=d(o),qe=r(o,"H2",{class:!0});var hi=i(qe);Ke=r(hi,"A",{id:!0,class:!0,href:!0});var Uf=i(Ke);ka=r(Uf,"SPAN",{});var Bf=i(ka);m(Uo.$$.fragment,Bf),Bf.forEach(t),Uf.forEach(t),qc=d(hi),xa=r(hi,"SPAN",{});var Wf=i(xa);Fc=a(Wf,"Usage: LayoutLMv2Processor"),Wf.forEach(t),hi.forEach(t),yr=d(o),W=r(o,"P",{});var be=i(W);Pc=a(be,"The easiest way to prepare data for the model is to use "),tn=r(be,"A",{href:!0});var Vf=i(tn);Cc=a(Vf,"LayoutLMv2Processor"),Vf.forEach(t),jc=a(be,`, which internally combines a feature extractor (`),nn=r(be,"A",{href:!0});var Qf=i(nn);Ic=a(Qf,"LayoutLMv2FeatureExtractor"),Qf.forEach(t),Ac=a(be,`) and a tokenizer (`),an=r(be,"A",{href:!0});var Gf=i(an);Dc=a(Gf,"LayoutLMv2Tokenizer"),Gf.forEach(t),Nc=a(be," or "),sn=r(be,"A",{href:!0});var Hf=i(sn);Oc=a(Hf,"LayoutLMv2TokenizerFast"),Hf.forEach(t),Sc=a(be,`). The feature extractor handles the image modality, while the tokenizer handles the text modality. A processor combines both, which is ideal for a multi-modal model like LayoutLMv2. Note that you can still use both separately, if you only want to handle one modality.`),be.forEach(t),br=d(o),m(Bo.$$.fragment,o),Lr=d(o),T=r(o,"P",{});var F=i(T);Rc=a(F,"In short, one can provide a document image (and possibly additional data) to "),rn=r(F,"A",{href:!0});var Kf=i(rn);Uc=a(Kf,"LayoutLMv2Processor"),Kf.forEach(t),Bc=a(F,`, and it will create the inputs expected by the model. Internally, the processor first uses `),ln=r(F,"A",{href:!0});var Xf=i(ln);Wc=a(Xf,"LayoutLMv2FeatureExtractor"),Xf.forEach(t),Vc=a(F,` to apply OCR on the image to get a list of words and normalized bounding boxes, as well to resize the image to a given size in order to get the `),Ma=r(F,"CODE",{});var Yf=i(Ma);Qc=a(Yf,"image"),Yf.forEach(t),Gc=a(F,` input. The words and normalized bounding boxes are then provided to `),cn=r(F,"A",{href:!0});var Zf=i(cn);Hc=a(Zf,"LayoutLMv2Tokenizer"),Zf.forEach(t),Kc=a(F,` or `),dn=r(F,"A",{href:!0});var Jf=i(dn);Xc=a(Jf,"LayoutLMv2TokenizerFast"),Jf.forEach(t),Yc=a(F,", which converts them to token-level "),Ta=r(F,"CODE",{});var eg=i(Ta);Zc=a(eg,"input_ids"),eg.forEach(t),Jc=a(F,`, `),Ea=r(F,"CODE",{});var og=i(Ea);ed=a(og,"attention_mask"),og.forEach(t),od=a(F,", "),za=r(F,"CODE",{});var tg=i(za);td=a(tg,"token_type_ids"),tg.forEach(t),nd=a(F,", "),$a=r(F,"CODE",{});var ng=i($a);ad=a(ng,"bbox"),ng.forEach(t),sd=a(F,`. Optionally, one can provide word labels to the processor, which are turned into token-level `),qa=r(F,"CODE",{});var ag=i(qa);rd=a(ag,"labels"),ag.forEach(t),id=a(F,"."),F.forEach(t),wr=d(o),S=r(o,"P",{});var ue=i(S);un=r(ue,"A",{href:!0});var sg=i(un);ld=a(sg,"LayoutLMv2Processor"),sg.forEach(t),cd=a(ue," uses "),Wo=r(ue,"A",{href:!0,rel:!0});var rg=i(Wo);dd=a(rg,"PyTesseract"),rg.forEach(t),ud=a(ue,`, a Python wrapper around Google\u2019s Tesseract OCR engine, under the hood. Note that you can still use your own OCR engine of choice, and provide the words and normalized boxes yourself. This requires initializing `),pn=r(ue,"A",{href:!0});var ig=i(pn);pd=a(ig,"LayoutLMv2FeatureExtractor"),ig.forEach(t),hd=a(ue," with "),Fa=r(ue,"CODE",{});var lg=i(Fa);md=a(lg,"apply_ocr"),lg.forEach(t),fd=a(ue," set to "),Pa=r(ue,"CODE",{});var cg=i(Pa);gd=a(cg,"False"),cg.forEach(t),_d=a(ue,"."),ue.forEach(t),kr=d(o),hn=r(o,"P",{});var dg=i(hn);vd=a(dg,`In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs).`),dg.forEach(t),xr=d(o),mn=r(o,"P",{});var ug=i(mn);Ca=r(ug,"STRONG",{});var pg=i(Ca);yd=a(pg,`Use case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True`),pg.forEach(t),ug.forEach(t),Mr=d(o),fn=r(o,"P",{});var hg=i(fn);bd=a(hg,`This is the simplest case, in which the processor (actually the feature extractor) will perform OCR on the image to get the words and normalized bounding boxes.`),hg.forEach(t),Tr=d(o),m(Vo.$$.fragment,o),Er=d(o),gn=r(o,"P",{});var mg=i(gn);ja=r(mg,"STRONG",{});var fg=i(ja);Ld=a(fg,"Use case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False"),fg.forEach(t),mg.forEach(t),zr=d(o),ge=r(o,"P",{});var Hn=i(ge);wd=a(Hn,"In case one wants to do OCR themselves, one can initialize the feature extractor with "),Ia=r(Hn,"CODE",{});var gg=i(Ia);kd=a(gg,"apply_ocr"),gg.forEach(t),xd=a(Hn,` set to `),Aa=r(Hn,"CODE",{});var _g=i(Aa);Md=a(_g,"False"),_g.forEach(t),Td=a(Hn,`. In that case, one should provide the words and corresponding (normalized) bounding boxes themselves to the processor.`),Hn.forEach(t),$r=d(o),m(Qo.$$.fragment,o),qr=d(o),_n=r(o,"P",{});var vg=i(_n);Da=r(vg,"STRONG",{});var yg=i(Da);Ed=a(yg,"Use case 3: token classification (training), apply_ocr=False"),yg.forEach(t),vg.forEach(t),Fr=d(o),V=r(o,"P",{});var Le=i(V);zd=a(Le,`For token classification tasks (such as FUNSD, CORD, SROIE, Kleister-NDA), one can also provide the corresponding word labels in order to train a model. The processor will then convert these into token-level `),Na=r(Le,"CODE",{});var bg=i(Na);$d=a(bg,"labels"),bg.forEach(t),qd=a(Le,`. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the `),Oa=r(Le,"CODE",{});var Lg=i(Oa);Fd=a(Lg,"ignore_index"),Lg.forEach(t),Pd=a(Le,` of PyTorch\u2019s CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with `),Sa=r(Le,"CODE",{});var wg=i(Sa);Cd=a(wg,"only_label_first_subword"),wg.forEach(t),jd=a(Le," set to "),Ra=r(Le,"CODE",{});var kg=i(Ra);Id=a(kg,"False"),kg.forEach(t),Ad=a(Le,"."),Le.forEach(t),Pr=d(o),m(Go.$$.fragment,o),Cr=d(o),vn=r(o,"P",{});var xg=i(vn);Ua=r(xg,"STRONG",{});var Mg=i(Ua);Dd=a(Mg,"Use case 4: visual question answering (inference), apply_ocr=True"),Mg.forEach(t),xg.forEach(t),jr=d(o),yn=r(o,"P",{});var Tg=i(yn);Nd=a(Tg,`For visual question answering tasks (such as DocVQA), you can provide a question to the processor. By default, the processor will apply OCR on the image, and create [CLS] question tokens [SEP] word tokens [SEP].`),Tg.forEach(t),Ir=d(o),m(Ho.$$.fragment,o),Ar=d(o),bn=r(o,"P",{});var Eg=i(bn);Ba=r(Eg,"STRONG",{});var zg=i(Ba);Od=a(zg,"Use case 5: visual question answering (inference), apply_ocr=False"),zg.forEach(t),Eg.forEach(t),Dr=d(o),Ln=r(o,"P",{});var $g=i(Ln);Sd=a($g,`For visual question answering tasks (such as DocVQA), you can provide a question to the processor. If you want to perform OCR yourself, you can provide your own words and (normalized) bounding boxes to the processor.`),$g.forEach(t),Nr=d(o),m(Ko.$$.fragment,o),Or=d(o),Fe=r(o,"H2",{class:!0});var mi=i(Fe);Xe=r(mi,"A",{id:!0,class:!0,href:!0});var qg=i(Xe);Wa=r(qg,"SPAN",{});var Fg=i(Wa);m(Xo.$$.fragment,Fg),Fg.forEach(t),qg.forEach(t),Rd=d(mi),Va=r(mi,"SPAN",{});var Pg=i(Va);Ud=a(Pg,"LayoutLMv2Config"),Pg.forEach(t),mi.forEach(t),Sr=d(o),R=r(o,"DIV",{class:!0});var we=i(R);m(Yo.$$.fragment,we),Bd=d(we),Pe=r(we,"P",{});var Kn=i(Pe);Wd=a(Kn,"This is the configuration class to store the configuration of a "),wn=r(Kn,"A",{href:!0});var Cg=i(wn);Vd=a(Cg,"LayoutLMv2Model"),Cg.forEach(t),Qd=a(Kn,`. It is used to instantiate an LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLMv2 `),Zo=r(Kn,"A",{href:!0,rel:!0});var jg=i(Zo);Gd=a(jg,"microsoft/layoutlmv2-base-uncased"),jg.forEach(t),Hd=a(Kn," architecture."),Kn.forEach(t),Kd=d(we),Ce=r(we,"P",{});var Xn=i(Ce);Xd=a(Xn,"Configuration objects inherit from "),kn=r(Xn,"A",{href:!0});var Ig=i(kn);Yd=a(Ig,"PretrainedConfig"),Ig.forEach(t),Zd=a(Xn,` and can be used to control the model outputs. Read the documentation from `),xn=r(Xn,"A",{href:!0});var Ag=i(xn);Jd=a(Ag,"PretrainedConfig"),Ag.forEach(t),eu=a(Xn," for more information."),Xn.forEach(t),ou=d(we),Qa=r(we,"P",{});var Dg=i(Qa);tu=a(Dg,"Example:"),Dg.forEach(t),nu=d(we),m(Jo.$$.fragment,we),we.forEach(t),Rr=d(o),je=r(o,"H2",{class:!0});var fi=i(je);Ye=r(fi,"A",{id:!0,class:!0,href:!0});var Ng=i(Ye);Ga=r(Ng,"SPAN",{});var Og=i(Ga);m(et.$$.fragment,Og),Og.forEach(t),Ng.forEach(t),au=d(fi),Ha=r(fi,"SPAN",{});var Sg=i(Ha);su=a(Sg,"LayoutLMv2FeatureExtractor"),Sg.forEach(t),fi.forEach(t),Ur=d(o),J=r(o,"DIV",{class:!0});var po=i(J);m(ot.$$.fragment,po),ru=d(po),Ka=r(po,"P",{});var Rg=i(Ka);iu=a(Rg,`Constructs a LayoutLMv2 feature extractor. This can be used to resize document images to the same size, as well as to apply OCR on them in order to get a list of words and normalized bounding boxes.`),Rg.forEach(t),lu=d(po),tt=r(po,"P",{});var gi=i(tt);cu=a(gi,"This feature extractor inherits from "),Xa=r(gi,"CODE",{});var Ug=i(Xa);du=a(Ug,"PreTrainedFeatureExtractor()"),Ug.forEach(t),uu=a(gi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),gi.forEach(t),pu=d(po),ae=r(po,"DIV",{class:!0});var ho=i(ae);m(nt.$$.fragment,ho),hu=d(ho),Ya=r(ho,"P",{});var Bg=i(Ya);mu=a(Bg,"Main method to prepare for the model one or several image(s)."),Bg.forEach(t),fu=d(ho),Za=r(ho,"P",{});var Wg=i(Za);gu=a(Wg,"Examples:"),Wg.forEach(t),_u=d(ho),m(at.$$.fragment,ho),ho.forEach(t),po.forEach(t),Br=d(o),Ie=r(o,"H2",{class:!0});var _i=i(Ie);Ze=r(_i,"A",{id:!0,class:!0,href:!0});var Vg=i(Ze);Ja=r(Vg,"SPAN",{});var Qg=i(Ja);m(st.$$.fragment,Qg),Qg.forEach(t),Vg.forEach(t),vu=d(_i),es=r(_i,"SPAN",{});var Gg=i(es);yu=a(Gg,"LayoutLMv2Tokenizer"),Gg.forEach(t),_i.forEach(t),Wr=d(o),A=r(o,"DIV",{class:!0});var se=i(A);m(rt.$$.fragment,se),bu=d(se),D=r(se,"P",{});var X=i(D);Lu=a(X,"Construct a LayoutLMv2 tokenizer. Based on WordPiece. "),Mn=r(X,"A",{href:!0});var Hg=i(Mn);wu=a(Hg,"LayoutLMv2Tokenizer"),Hg.forEach(t),ku=a(X,` can be used to turn words, word-level bounding boxes and optional word labels to token-level `),os=r(X,"CODE",{});var Kg=i(os);xu=a(Kg,"input_ids"),Kg.forEach(t),Mu=a(X,`, `),ts=r(X,"CODE",{});var Xg=i(ts);Tu=a(Xg,"attention_mask"),Xg.forEach(t),Eu=a(X,", "),ns=r(X,"CODE",{});var Yg=i(ns);zu=a(Yg,"token_type_ids"),Yg.forEach(t),$u=a(X,", "),as=r(X,"CODE",{});var Zg=i(as);qu=a(Zg,"bbox"),Zg.forEach(t),Fu=a(X,", and optional "),ss=r(X,"CODE",{});var Jg=i(ss);Pu=a(Jg,"labels"),Jg.forEach(t),Cu=a(X," (for token classification)."),X.forEach(t),ju=d(se),it=r(se,"P",{});var vi=i(it);Iu=a(vi,"This tokenizer inherits from "),Tn=r(vi,"A",{href:!0});var e_=i(Tn);Au=a(e_,"PreTrainedTokenizer"),e_.forEach(t),Du=a(vi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),vi.forEach(t),Nu=d(se),En=r(se,"P",{});var Wm=i(En);zn=r(Wm,"A",{href:!0});var o_=i(zn);Ou=a(o_,"LayoutLMv2Tokenizer"),o_.forEach(t),Su=a(Wm,` runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes.`),Wm.forEach(t),Ru=d(se),Je=r(se,"DIV",{class:!0});var yi=i(Je);m(lt.$$.fragment,yi),Uu=d(yi),rs=r(yi,"P",{});var t_=i(rs);Bu=a(t_,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),t_.forEach(t),yi.forEach(t),Wu=d(se),is=r(se,"DIV",{class:!0}),i(is).forEach(t),se.forEach(t),Vr=d(o),Ae=r(o,"H2",{class:!0});var bi=i(Ae);eo=r(bi,"A",{id:!0,class:!0,href:!0});var n_=i(eo);ls=r(n_,"SPAN",{});var a_=i(ls);m(ct.$$.fragment,a_),a_.forEach(t),n_.forEach(t),Vu=d(bi),cs=r(bi,"SPAN",{});var s_=i(cs);Qu=a(s_,"LayoutLMv2TokenizerFast"),s_.forEach(t),bi.forEach(t),Qr=d(o),ee=r(o,"DIV",{class:!0});var mo=i(ee);m(dt.$$.fragment,mo),Gu=d(mo),ut=r(mo,"P",{});var Li=i(ut);Hu=a(Li,"Construct a \u201Cfast\u201D LayoutLMv2 tokenizer (backed by HuggingFace\u2019s "),ds=r(Li,"EM",{});var r_=i(ds);Ku=a(r_,"tokenizers"),r_.forEach(t),Xu=a(Li," library). Based on WordPiece."),Li.forEach(t),Yu=d(mo),pt=r(mo,"P",{});var wi=i(pt);Zu=a(wi,"This tokenizer inherits from "),$n=r(wi,"A",{href:!0});var i_=i($n);Ju=a(i_,"PreTrainedTokenizerFast"),i_.forEach(t),ep=a(wi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),wi.forEach(t),op=d(mo),oo=r(mo,"DIV",{class:!0});var ki=i(oo);m(ht.$$.fragment,ki),tp=d(ki),us=r(ki,"P",{});var l_=i(us);np=a(l_,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.`),l_.forEach(t),ki.forEach(t),mo.forEach(t),Gr=d(o),De=r(o,"H2",{class:!0});var xi=i(De);to=r(xi,"A",{id:!0,class:!0,href:!0});var c_=i(to);ps=r(c_,"SPAN",{});var d_=i(ps);m(mt.$$.fragment,d_),d_.forEach(t),c_.forEach(t),ap=d(xi),hs=r(xi,"SPAN",{});var u_=i(hs);sp=a(u_,"LayoutLMv2Processor"),u_.forEach(t),xi.forEach(t),Hr=d(o),U=r(o,"DIV",{class:!0});var ke=i(U);m(ft.$$.fragment,ke),rp=d(ke),ms=r(ke,"P",{});var p_=i(ms);ip=a(p_,`Constructs a LayoutLMv2 processor which combines a LayoutLMv2 feature extractor and a LayoutLMv2 tokenizer into a single processor.`),p_.forEach(t),lp=d(ke),qn=r(ke,"P",{});var Vm=i(qn);Fn=r(Vm,"A",{href:!0});var h_=i(Fn);cp=a(h_,"LayoutLMv2Processor"),h_.forEach(t),dp=a(Vm," offers all the functionalities you need to prepare data for the model."),Vm.forEach(t),up=d(ke),$=r(ke,"P",{});var C=i($);pp=a(C,"It first uses "),Pn=r(C,"A",{href:!0});var m_=i(Pn);hp=a(m_,"LayoutLMv2FeatureExtractor"),m_.forEach(t),mp=a(C,` to resize document images to a fixed size, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to `),Cn=r(C,"A",{href:!0});var f_=i(Cn);fp=a(f_,"LayoutLMv2Tokenizer"),f_.forEach(t),gp=a(C," or "),jn=r(C,"A",{href:!0});var g_=i(jn);_p=a(g_,"LayoutLMv2TokenizerFast"),g_.forEach(t),vp=a(C,`, which turns the words and bounding boxes into token-level `),fs=r(C,"CODE",{});var __=i(fs);yp=a(__,"input_ids"),__.forEach(t),bp=a(C,", "),gs=r(C,"CODE",{});var v_=i(gs);Lp=a(v_,"attention_mask"),v_.forEach(t),wp=a(C,", "),_s=r(C,"CODE",{});var y_=i(_s);kp=a(y_,"token_type_ids"),y_.forEach(t),xp=a(C,", "),vs=r(C,"CODE",{});var b_=i(vs);Mp=a(b_,"bbox"),b_.forEach(t),Tp=a(C,`. Optionally, one can provide integer `),ys=r(C,"CODE",{});var L_=i(ys);Ep=a(L_,"word_labels"),L_.forEach(t),zp=a(C,", which are turned into token-level "),bs=r(C,"CODE",{});var w_=i(bs);$p=a(w_,"labels"),w_.forEach(t),qp=a(C,` for token classification tasks (such as FUNSD, CORD).`),C.forEach(t),Fp=d(ke),_e=r(ke,"DIV",{class:!0});var Yn=i(_e);m(gt.$$.fragment,Yn),Pp=d(Yn),x=r(Yn,"P",{});var z=i(x);Cp=a(z,"This method first forwards the "),Ls=r(z,"CODE",{});var k_=i(Ls);jp=a(k_,"images"),k_.forEach(t),Ip=a(z,` argument to `),_t=r(z,"A",{href:!0});var Qm=i(_t);ws=r(Qm,"STRONG",{});var x_=i(ws);Ap=a(x_,"call"),x_.forEach(t),Dp=a(Qm,"()"),Qm.forEach(t),Np=a(z,". In case "),In=r(z,"A",{href:!0});var M_=i(In);Op=a(M_,"LayoutLMv2FeatureExtractor"),M_.forEach(t),Sp=a(z,` was initialized with `),ks=r(z,"CODE",{});var T_=i(ks);Rp=a(T_,"apply_ocr"),T_.forEach(t),Up=a(z," set to "),xs=r(z,"CODE",{});var E_=i(xs);Bp=a(E_,"True"),E_.forEach(t),Wp=a(z,`, it passes the obtained words and bounding boxes along with the additional arguments to `),vt=r(z,"A",{href:!0});var Gm=i(vt);Ms=r(Gm,"STRONG",{});var z_=i(Ms);Vp=a(z_,"call"),z_.forEach(t),Qp=a(Gm,"()"),Gm.forEach(t),Gp=a(z,` and returns the output, together with resized `),Ts=r(z,"CODE",{});var $_=i(Ts);Hp=a($_,"images"),$_.forEach(t),Kp=a(z,". In case "),An=r(z,"A",{href:!0});var q_=i(An);Xp=a(q_,"LayoutLMv2FeatureExtractor"),q_.forEach(t),Yp=a(z," was initialized with "),Es=r(z,"CODE",{});var F_=i(Es);Zp=a(F_,"apply_ocr"),F_.forEach(t),Jp=a(z,` set to `),zs=r(z,"CODE",{});var P_=i(zs);eh=a(P_,"False"),P_.forEach(t),oh=a(z,", it passes the words ("),$s=r(z,"CODE",{});var C_=i($s);th=a(C_,"text"),C_.forEach(t),nh=a(z,"/"),qs=r(z,"CODE",{});var j_=i(qs);ah=a(j_,"text_pair`) and `boxes` specified by the user along with the additional arguments to [__call__()](/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer.__call__) and returns the output, together with resized `images"),j_.forEach(t),sh=a(z,"."),z.forEach(t),rh=d(Yn),Fs=r(Yn,"P",{});var I_=i(Fs);ih=a(I_,"Please refer to the docstring of the above two methods for more information."),I_.forEach(t),Yn.forEach(t),ke.forEach(t),Kr=d(o),Ne=r(o,"H2",{class:!0});var Mi=i(Ne);no=r(Mi,"A",{id:!0,class:!0,href:!0});var A_=i(no);Ps=r(A_,"SPAN",{});var D_=i(Ps);m(yt.$$.fragment,D_),D_.forEach(t),A_.forEach(t),lh=d(Mi),Cs=r(Mi,"SPAN",{});var N_=i(Cs);ch=a(N_,"LayoutLMv2Model"),N_.forEach(t),Mi.forEach(t),Xr=d(o),le=r(o,"DIV",{class:!0});var Zn=i(le);m(bt.$$.fragment,Zn),dh=d(Zn),Lt=r(Zn,"P",{});var Ti=i(Lt);uh=a(Ti,`The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),wt=r(Ti,"A",{href:!0,rel:!0});var O_=i(wt);ph=a(O_,"torch.nn.Module"),O_.forEach(t),hh=a(Ti,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ti.forEach(t),mh=d(Zn),Q=r(Zn,"DIV",{class:!0});var xe=i(Q);m(kt.$$.fragment,xe),fh=d(xe),Oe=r(xe,"P",{});var Jn=i(Oe);gh=a(Jn,"The "),Dn=r(Jn,"A",{href:!0});var S_=i(Dn);_h=a(S_,"LayoutLMv2Model"),S_.forEach(t),vh=a(Jn," forward method, overrides the "),js=r(Jn,"CODE",{});var R_=i(js);yh=a(R_,"__call__"),R_.forEach(t),bh=a(Jn," special method."),Jn.forEach(t),Lh=d(xe),m(ao.$$.fragment,xe),wh=d(xe),Is=r(xe,"P",{});var U_=i(Is);kh=a(U_,"Examples:"),U_.forEach(t),xh=d(xe),m(xt.$$.fragment,xe),xe.forEach(t),Zn.forEach(t),Yr=d(o),Se=r(o,"H2",{class:!0});var Ei=i(Se);so=r(Ei,"A",{id:!0,class:!0,href:!0});var B_=i(so);As=r(B_,"SPAN",{});var W_=i(As);m(Mt.$$.fragment,W_),W_.forEach(t),B_.forEach(t),Mh=d(Ei),Ds=r(Ei,"SPAN",{});var V_=i(Ds);Th=a(V_,"LayoutLMv2ForSequenceClassification"),V_.forEach(t),Ei.forEach(t),Zr=d(o),oe=r(o,"DIV",{class:!0});var fo=i(oe);m(Tt.$$.fragment,fo),Eh=d(fo),Et=r(fo,"P",{});var zi=i(Et);zh=a(zi,`LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual embeddings, e.g. for document image classification tasks such as the `),Ns=r(zi,"CODE",{});var Q_=i(Ns);$h=a(Q_,"RVL-CDIP <https://www.cs.cmu.edu/~aharley/rvl-cdip/>"),Q_.forEach(t),qh=a(zi,"__ dataset."),zi.forEach(t),Fh=d(fo),zt=r(fo,"P",{});var $i=i(zt);Ph=a($i,"This model is a PyTorch "),$t=r($i,"A",{href:!0,rel:!0});var G_=i($t);Ch=a(G_,"torch.nn.Module"),G_.forEach(t),jh=a($i,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$i.forEach(t),Ih=d(fo),G=r(fo,"DIV",{class:!0});var Me=i(G);m(qt.$$.fragment,Me),Ah=d(Me),Re=r(Me,"P",{});var ea=i(Re);Dh=a(ea,"The "),Nn=r(ea,"A",{href:!0});var H_=i(Nn);Nh=a(H_,"LayoutLMv2ForSequenceClassification"),H_.forEach(t),Oh=a(ea," forward method, overrides the "),Os=r(ea,"CODE",{});var K_=i(Os);Sh=a(K_,"__call__"),K_.forEach(t),Rh=a(ea," special method."),ea.forEach(t),Uh=d(Me),m(ro.$$.fragment,Me),Bh=d(Me),Ss=r(Me,"P",{});var X_=i(Ss);Wh=a(X_,"Examples:"),X_.forEach(t),Vh=d(Me),m(Ft.$$.fragment,Me),Me.forEach(t),fo.forEach(t),Jr=d(o),Ue=r(o,"H2",{class:!0});var qi=i(Ue);io=r(qi,"A",{id:!0,class:!0,href:!0});var Y_=i(io);Rs=r(Y_,"SPAN",{});var Z_=i(Rs);m(Pt.$$.fragment,Z_),Z_.forEach(t),Y_.forEach(t),Qh=d(qi),Us=r(qi,"SPAN",{});var J_=i(Us);Gh=a(J_,"LayoutLMv2ForTokenClassification"),J_.forEach(t),qi.forEach(t),ei=d(o),te=r(o,"DIV",{class:!0});var go=i(te);m(Ct.$$.fragment,go),Hh=d(go),ce=r(go,"P",{});var _o=i(ce);Kh=a(_o,`LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden states) e.g. for sequence labeling (information extraction) tasks such as `),Bs=r(_o,"CODE",{});var ev=i(Bs);Xh=a(ev,"FUNSD <https://guillaumejaume.github.io/FUNSD/>"),ev.forEach(t),On=r(_o,"STRONG",{});var Hm=i(On);Yh=a(Hm,", "),Ws=r(Hm,"CODE",{});var ov=i(Ws);Zh=a(ov,"SROIE <https://rrc.cvc.uab.es/?ch=13>"),ov.forEach(t),Hm.forEach(t),Jh=a(_o,", "),Vs=r(_o,"CODE",{});var tv=i(Vs);em=a(tv,"CORD <https://github.com/clovaai/cord>"),tv.forEach(t),Sn=r(_o,"STRONG",{});var Km=i(Sn);om=a(Km,"and "),Qs=r(Km,"CODE",{});var nv=i(Qs);tm=a(nv,"Kleister-NDA <https://github.com/applicaai/kleister-nda>"),nv.forEach(t),Km.forEach(t),nm=a(_o,"."),_o.forEach(t),am=d(go),jt=r(go,"P",{});var Fi=i(jt);sm=a(Fi,"This model is a PyTorch "),It=r(Fi,"A",{href:!0,rel:!0});var av=i(It);rm=a(av,"torch.nn.Module"),av.forEach(t),im=a(Fi,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fi.forEach(t),lm=d(go),H=r(go,"DIV",{class:!0});var Te=i(H);m(At.$$.fragment,Te),cm=d(Te),Be=r(Te,"P",{});var oa=i(Be);dm=a(oa,"The "),Rn=r(oa,"A",{href:!0});var sv=i(Rn);um=a(sv,"LayoutLMv2ForTokenClassification"),sv.forEach(t),pm=a(oa," forward method, overrides the "),Gs=r(oa,"CODE",{});var rv=i(Gs);hm=a(rv,"__call__"),rv.forEach(t),mm=a(oa," special method."),oa.forEach(t),fm=d(Te),m(lo.$$.fragment,Te),gm=d(Te),Hs=r(Te,"P",{});var iv=i(Hs);_m=a(iv,"Examples:"),iv.forEach(t),vm=d(Te),m(Dt.$$.fragment,Te),Te.forEach(t),go.forEach(t),oi=d(o),We=r(o,"H2",{class:!0});var Pi=i(We);co=r(Pi,"A",{id:!0,class:!0,href:!0});var lv=i(co);Ks=r(lv,"SPAN",{});var cv=i(Ks);m(Nt.$$.fragment,cv),cv.forEach(t),lv.forEach(t),ym=d(Pi),Xs=r(Pi,"SPAN",{});var dv=i(Xs);bm=a(dv,"LayoutLMv2ForQuestionAnswering"),dv.forEach(t),Pi.forEach(t),ti=d(o),ne=r(o,"DIV",{class:!0});var vo=i(ne);m(Ot.$$.fragment,vo),Lm=d(vo),de=r(vo,"P",{});var yo=i(de);wm=a(yo,"LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as "),Ys=r(yo,"CODE",{});var uv=i(Ys);km=a(uv,"DocVQA <https://rrc.cvc.uab.es/?ch=17>"),uv.forEach(t),xm=a(yo,`__ (a linear layer on top of the text part of the hidden-states output to compute `),Zs=r(yo,"CODE",{});var pv=i(Zs);Mm=a(pv,"span start logits"),pv.forEach(t),Tm=a(yo," and "),Js=r(yo,"CODE",{});var hv=i(Js);Em=a(hv,"span end logits"),hv.forEach(t),zm=a(yo,")."),yo.forEach(t),$m=d(vo),St=r(vo,"P",{});var Ci=i(St);qm=a(Ci,"This model is a PyTorch "),Rt=r(Ci,"A",{href:!0,rel:!0});var mv=i(Rt);Fm=a(mv,"torch.nn.Module"),mv.forEach(t),Pm=a(Ci,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ci.forEach(t),Cm=d(vo),K=r(vo,"DIV",{class:!0});var Ee=i(K);m(Ut.$$.fragment,Ee),jm=d(Ee),Ve=r(Ee,"P",{});var ta=i(Ve);Im=a(ta,"The "),Un=r(ta,"A",{href:!0});var fv=i(Un);Am=a(fv,"LayoutLMv2ForQuestionAnswering"),fv.forEach(t),Dm=a(ta," forward method, overrides the "),er=r(ta,"CODE",{});var gv=i(er);Nm=a(gv,"__call__"),gv.forEach(t),Om=a(ta," special method."),ta.forEach(t),Sm=d(Ee),m(uo.$$.fragment,Ee),Rm=d(Ee),or=r(Ee,"P",{});var _v=i(or);Um=a(_v,"Examples:"),_v.forEach(t),Bm=d(Ee),m(Bt.$$.fragment,Ee),Ee.forEach(t),vo.forEach(t),this.h()},h(){l(y,"name","hf:doc:metadata"),l(y,"content",JSON.stringify(Ev)),l(M,"id","layoutlmv2"),l(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(M,"href","#layoutlmv2"),l(b,"class","relative group"),l(Qe,"id","overview"),l(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Qe,"href","#overview"),l(ze,"class","relative group"),l(Lo,"href","https://arxiv.org/abs/2012.14740"),l(Lo,"rel","nofollow"),l(Vt,"href","layoutlm"),l(wo,"href","https://guillaumejaume.github.io/FUNSD/"),l(wo,"rel","nofollow"),l(ko,"href","https://github.com/clovaai/cord"),l(ko,"rel","nofollow"),l(xo,"href","https://rrc.cvc.uab.es/?ch=13"),l(xo,"rel","nofollow"),l(Mo,"href","https://github.com/applicaai/kleister-nda"),l(Mo,"rel","nofollow"),l(Eo,"href","https://www.cs.cmu.edu/~aharley/rvl-cdip/"),l(Eo,"rel","nofollow"),l($o,"href","https://arxiv.org/abs/2007.00398"),l($o,"rel","nofollow"),l(Fo,"href","https://arxiv.org/abs/2012.14740"),l(Fo,"rel","nofollow"),l(Co,"href","https://github.com/NielsRogge/Transformers-Tutorials"),l(Co,"rel","nofollow"),l(jo,"href","https://github.com/facebookresearch/detectron2/"),l(jo,"rel","nofollow"),l(Io,"href","https://detectron2.readthedocs.io/en/latest/tutorials/install.html"),l(Io,"rel","nofollow"),l(Kt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model.forward"),l(Xt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel"),l(Ao,"href","https://github.com/tesseract-ocr/tesseract"),l(Ao,"rel","nofollow"),l(Do,"href","https://pypi.org/project/pytesseract/"),l(Do,"rel","nofollow"),l(Yt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor"),l(Zt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model"),l(Jt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config"),l(en,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),l(on,"href","layoutxlm"),l(Ke,"id","usage-layoutlmv2processor"),l(Ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ke,"href","#usage-layoutlmv2processor"),l(qe,"class","relative group"),l(tn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor"),l(nn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(an,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer"),l(sn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast"),l(rn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor"),l(ln,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(cn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer"),l(dn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast"),l(un,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor"),l(Wo,"href","https://pypi.org/project/pytesseract/"),l(Wo,"rel","nofollow"),l(pn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(Xe,"id","transformers.LayoutLMv2Config"),l(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Xe,"href","#transformers.LayoutLMv2Config"),l(Fe,"class","relative group"),l(wn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model"),l(Zo,"href","https://huggingface.co/microsoft/layoutlmv2-base-uncased"),l(Zo,"rel","nofollow"),l(kn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(xn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(R,"class","docstring"),l(Ye,"id","transformers.LayoutLMv2FeatureExtractor"),l(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ye,"href","#transformers.LayoutLMv2FeatureExtractor"),l(je,"class","relative group"),l(ae,"class","docstring"),l(J,"class","docstring"),l(Ze,"id","transformers.LayoutLMv2Tokenizer"),l(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ze,"href","#transformers.LayoutLMv2Tokenizer"),l(Ie,"class","relative group"),l(Mn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer"),l(Tn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(zn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer"),l(Je,"class","docstring"),l(is,"class","docstring"),l(A,"class","docstring"),l(eo,"id","transformers.LayoutLMv2TokenizerFast"),l(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(eo,"href","#transformers.LayoutLMv2TokenizerFast"),l(Ae,"class","relative group"),l($n,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(oo,"class","docstring"),l(ee,"class","docstring"),l(to,"id","transformers.LayoutLMv2Processor"),l(to,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(to,"href","#transformers.LayoutLMv2Processor"),l(De,"class","relative group"),l(Fn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor"),l(Pn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(Cn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer"),l(jn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast"),l(_t,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor.__call__"),l(In,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(vt,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer.__call__"),l(An,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor"),l(_e,"class","docstring"),l(U,"class","docstring"),l(no,"id","transformers.LayoutLMv2Model"),l(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(no,"href","#transformers.LayoutLMv2Model"),l(Ne,"class","relative group"),l(wt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(wt,"rel","nofollow"),l(Dn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model"),l(Q,"class","docstring"),l(le,"class","docstring"),l(so,"id","transformers.LayoutLMv2ForSequenceClassification"),l(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(so,"href","#transformers.LayoutLMv2ForSequenceClassification"),l(Se,"class","relative group"),l($t,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l($t,"rel","nofollow"),l(Nn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForSequenceClassification"),l(G,"class","docstring"),l(oe,"class","docstring"),l(io,"id","transformers.LayoutLMv2ForTokenClassification"),l(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(io,"href","#transformers.LayoutLMv2ForTokenClassification"),l(Ue,"class","relative group"),l(It,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(It,"rel","nofollow"),l(Rn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForTokenClassification"),l(H,"class","docstring"),l(te,"class","docstring"),l(co,"id","transformers.LayoutLMv2ForQuestionAnswering"),l(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(co,"href","#transformers.LayoutLMv2ForQuestionAnswering"),l(We,"class","relative group"),l(Rt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Rt,"rel","nofollow"),l(Un,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForQuestionAnswering"),l(K,"class","docstring"),l(ne,"class","docstring")},m(o,u){e(document.head,y),p(o,q,u),p(o,b,u),e(b,M),e(M,P),f(L,P,null),e(b,w),e(b,I),e(I,Ii),p(o,sr,u),p(o,ze,u),e(ze,Qe),e(Qe,na),f(bo,na,null),e(ze,Ai),e(ze,aa),e(aa,Di),p(o,rr,u),p(o,pe,u),e(pe,Ni),e(pe,Lo),e(Lo,Oi),e(pe,Si),e(pe,Vt),e(Vt,Ri),e(pe,Ui),p(o,ir,u),p(o,he,u),e(he,Z),e(Z,Bi),e(Z,wo),e(wo,Wi),e(Z,Vi),e(Z,ko),e(ko,Qi),e(Z,Gi),e(Z,xo),e(xo,Hi),e(Z,Ki),e(Z,Mo),e(Mo,Xi),e(Z,Yi),e(he,Zi),e(he,To),e(To,Ji),e(To,Eo),e(Eo,el),e(To,ol),e(he,tl),e(he,zo),e(zo,nl),e(zo,$o),e($o,al),e(zo,sl),p(o,lr,u),p(o,Qt,u),e(Qt,rl),p(o,cr,u),p(o,Gt,u),e(Gt,sa),e(sa,il),p(o,dr,u),p(o,Ht,u),e(Ht,ll),p(o,ur,u),p(o,B,u),e(B,ra),e(ra,cl),e(B,dl),e(B,qo),e(qo,ul),e(qo,Fo),e(Fo,pl),e(qo,hl),e(B,ml),e(B,Po),e(Po,fl),e(Po,Co),e(Co,gl),e(Po,_l),e(B,vl),e(B,$e),e($e,yl),e($e,jo),e(jo,bl),e($e,Ll),e($e,Io),e(Io,wl),e($e,kl),e(B,xl),e(B,k),e(k,Ml),e(k,ia),e(ia,Tl),e(k,El),e(k,Kt),e(Kt,zl),e(k,$l),e(k,la),e(la,ql),e(k,Fl),e(k,ca),e(ca,Pl),e(k,Cl),e(k,da),e(da,jl),e(k,Il),e(k,ua),e(ua,Al),e(k,Dl),e(k,pa),e(pa,Nl),e(k,Ol),e(k,ha),e(ha,Sl),e(k,Rl),e(k,ma),e(ma,Ul),e(k,Bl),e(k,Xt),e(Xt,Wl),e(k,Vl),e(k,Ao),e(Ao,Ql),e(k,Gl),e(k,Do),e(Do,Hl),e(k,Kl),p(o,pr,u),f(No,o,u),p(o,hr,u),p(o,me,u),e(me,Xl),e(me,fa),e(fa,Yl),e(me,Zl),e(me,ga),e(ga,Jl),e(me,ec),p(o,mr,u),f(Oo,o,u),p(o,fr,u),p(o,Ge,u),e(Ge,oc),e(Ge,Yt),e(Yt,tc),e(Ge,nc),p(o,gr,u),p(o,fe,u),e(fe,j),e(j,ac),e(j,Zt),e(Zt,sc),e(j,rc),e(j,_a),e(_a,ic),e(j,lc),e(j,va),e(va,cc),e(j,dc),e(j,Jt),e(Jt,uc),e(j,pc),e(j,ya),e(ya,hc),e(j,mc),e(j,ba),e(ba,fc),e(j,gc),e(j,La),e(La,_c),e(j,vc),e(fe,yc),e(fe,So),e(So,bc),e(So,en),e(en,Lc),e(So,wc),e(fe,kc),e(fe,Ro),e(Ro,xc),e(Ro,wa),e(wa,Mc),e(Ro,Tc),p(o,_r,u),p(o,He,u),e(He,Ec),e(He,on),e(on,zc),e(He,$c),p(o,vr,u),p(o,qe,u),e(qe,Ke),e(Ke,ka),f(Uo,ka,null),e(qe,qc),e(qe,xa),e(xa,Fc),p(o,yr,u),p(o,W,u),e(W,Pc),e(W,tn),e(tn,Cc),e(W,jc),e(W,nn),e(nn,Ic),e(W,Ac),e(W,an),e(an,Dc),e(W,Nc),e(W,sn),e(sn,Oc),e(W,Sc),p(o,br,u),f(Bo,o,u),p(o,Lr,u),p(o,T,u),e(T,Rc),e(T,rn),e(rn,Uc),e(T,Bc),e(T,ln),e(ln,Wc),e(T,Vc),e(T,Ma),e(Ma,Qc),e(T,Gc),e(T,cn),e(cn,Hc),e(T,Kc),e(T,dn),e(dn,Xc),e(T,Yc),e(T,Ta),e(Ta,Zc),e(T,Jc),e(T,Ea),e(Ea,ed),e(T,od),e(T,za),e(za,td),e(T,nd),e(T,$a),e($a,ad),e(T,sd),e(T,qa),e(qa,rd),e(T,id),p(o,wr,u),p(o,S,u),e(S,un),e(un,ld),e(S,cd),e(S,Wo),e(Wo,dd),e(S,ud),e(S,pn),e(pn,pd),e(S,hd),e(S,Fa),e(Fa,md),e(S,fd),e(S,Pa),e(Pa,gd),e(S,_d),p(o,kr,u),p(o,hn,u),e(hn,vd),p(o,xr,u),p(o,mn,u),e(mn,Ca),e(Ca,yd),p(o,Mr,u),p(o,fn,u),e(fn,bd),p(o,Tr,u),f(Vo,o,u),p(o,Er,u),p(o,gn,u),e(gn,ja),e(ja,Ld),p(o,zr,u),p(o,ge,u),e(ge,wd),e(ge,Ia),e(Ia,kd),e(ge,xd),e(ge,Aa),e(Aa,Md),e(ge,Td),p(o,$r,u),f(Qo,o,u),p(o,qr,u),p(o,_n,u),e(_n,Da),e(Da,Ed),p(o,Fr,u),p(o,V,u),e(V,zd),e(V,Na),e(Na,$d),e(V,qd),e(V,Oa),e(Oa,Fd),e(V,Pd),e(V,Sa),e(Sa,Cd),e(V,jd),e(V,Ra),e(Ra,Id),e(V,Ad),p(o,Pr,u),f(Go,o,u),p(o,Cr,u),p(o,vn,u),e(vn,Ua),e(Ua,Dd),p(o,jr,u),p(o,yn,u),e(yn,Nd),p(o,Ir,u),f(Ho,o,u),p(o,Ar,u),p(o,bn,u),e(bn,Ba),e(Ba,Od),p(o,Dr,u),p(o,Ln,u),e(Ln,Sd),p(o,Nr,u),f(Ko,o,u),p(o,Or,u),p(o,Fe,u),e(Fe,Xe),e(Xe,Wa),f(Xo,Wa,null),e(Fe,Rd),e(Fe,Va),e(Va,Ud),p(o,Sr,u),p(o,R,u),f(Yo,R,null),e(R,Bd),e(R,Pe),e(Pe,Wd),e(Pe,wn),e(wn,Vd),e(Pe,Qd),e(Pe,Zo),e(Zo,Gd),e(Pe,Hd),e(R,Kd),e(R,Ce),e(Ce,Xd),e(Ce,kn),e(kn,Yd),e(Ce,Zd),e(Ce,xn),e(xn,Jd),e(Ce,eu),e(R,ou),e(R,Qa),e(Qa,tu),e(R,nu),f(Jo,R,null),p(o,Rr,u),p(o,je,u),e(je,Ye),e(Ye,Ga),f(et,Ga,null),e(je,au),e(je,Ha),e(Ha,su),p(o,Ur,u),p(o,J,u),f(ot,J,null),e(J,ru),e(J,Ka),e(Ka,iu),e(J,lu),e(J,tt),e(tt,cu),e(tt,Xa),e(Xa,du),e(tt,uu),e(J,pu),e(J,ae),f(nt,ae,null),e(ae,hu),e(ae,Ya),e(Ya,mu),e(ae,fu),e(ae,Za),e(Za,gu),e(ae,_u),f(at,ae,null),p(o,Br,u),p(o,Ie,u),e(Ie,Ze),e(Ze,Ja),f(st,Ja,null),e(Ie,vu),e(Ie,es),e(es,yu),p(o,Wr,u),p(o,A,u),f(rt,A,null),e(A,bu),e(A,D),e(D,Lu),e(D,Mn),e(Mn,wu),e(D,ku),e(D,os),e(os,xu),e(D,Mu),e(D,ts),e(ts,Tu),e(D,Eu),e(D,ns),e(ns,zu),e(D,$u),e(D,as),e(as,qu),e(D,Fu),e(D,ss),e(ss,Pu),e(D,Cu),e(A,ju),e(A,it),e(it,Iu),e(it,Tn),e(Tn,Au),e(it,Du),e(A,Nu),e(A,En),e(En,zn),e(zn,Ou),e(En,Su),e(A,Ru),e(A,Je),f(lt,Je,null),e(Je,Uu),e(Je,rs),e(rs,Bu),e(A,Wu),e(A,is),p(o,Vr,u),p(o,Ae,u),e(Ae,eo),e(eo,ls),f(ct,ls,null),e(Ae,Vu),e(Ae,cs),e(cs,Qu),p(o,Qr,u),p(o,ee,u),f(dt,ee,null),e(ee,Gu),e(ee,ut),e(ut,Hu),e(ut,ds),e(ds,Ku),e(ut,Xu),e(ee,Yu),e(ee,pt),e(pt,Zu),e(pt,$n),e($n,Ju),e(pt,ep),e(ee,op),e(ee,oo),f(ht,oo,null),e(oo,tp),e(oo,us),e(us,np),p(o,Gr,u),p(o,De,u),e(De,to),e(to,ps),f(mt,ps,null),e(De,ap),e(De,hs),e(hs,sp),p(o,Hr,u),p(o,U,u),f(ft,U,null),e(U,rp),e(U,ms),e(ms,ip),e(U,lp),e(U,qn),e(qn,Fn),e(Fn,cp),e(qn,dp),e(U,up),e(U,$),e($,pp),e($,Pn),e(Pn,hp),e($,mp),e($,Cn),e(Cn,fp),e($,gp),e($,jn),e(jn,_p),e($,vp),e($,fs),e(fs,yp),e($,bp),e($,gs),e(gs,Lp),e($,wp),e($,_s),e(_s,kp),e($,xp),e($,vs),e(vs,Mp),e($,Tp),e($,ys),e(ys,Ep),e($,zp),e($,bs),e(bs,$p),e($,qp),e(U,Fp),e(U,_e),f(gt,_e,null),e(_e,Pp),e(_e,x),e(x,Cp),e(x,Ls),e(Ls,jp),e(x,Ip),e(x,_t),e(_t,ws),e(ws,Ap),e(_t,Dp),e(x,Np),e(x,In),e(In,Op),e(x,Sp),e(x,ks),e(ks,Rp),e(x,Up),e(x,xs),e(xs,Bp),e(x,Wp),e(x,vt),e(vt,Ms),e(Ms,Vp),e(vt,Qp),e(x,Gp),e(x,Ts),e(Ts,Hp),e(x,Kp),e(x,An),e(An,Xp),e(x,Yp),e(x,Es),e(Es,Zp),e(x,Jp),e(x,zs),e(zs,eh),e(x,oh),e(x,$s),e($s,th),e(x,nh),e(x,qs),e(qs,ah),e(x,sh),e(_e,rh),e(_e,Fs),e(Fs,ih),p(o,Kr,u),p(o,Ne,u),e(Ne,no),e(no,Ps),f(yt,Ps,null),e(Ne,lh),e(Ne,Cs),e(Cs,ch),p(o,Xr,u),p(o,le,u),f(bt,le,null),e(le,dh),e(le,Lt),e(Lt,uh),e(Lt,wt),e(wt,ph),e(Lt,hh),e(le,mh),e(le,Q),f(kt,Q,null),e(Q,fh),e(Q,Oe),e(Oe,gh),e(Oe,Dn),e(Dn,_h),e(Oe,vh),e(Oe,js),e(js,yh),e(Oe,bh),e(Q,Lh),f(ao,Q,null),e(Q,wh),e(Q,Is),e(Is,kh),e(Q,xh),f(xt,Q,null),p(o,Yr,u),p(o,Se,u),e(Se,so),e(so,As),f(Mt,As,null),e(Se,Mh),e(Se,Ds),e(Ds,Th),p(o,Zr,u),p(o,oe,u),f(Tt,oe,null),e(oe,Eh),e(oe,Et),e(Et,zh),e(Et,Ns),e(Ns,$h),e(Et,qh),e(oe,Fh),e(oe,zt),e(zt,Ph),e(zt,$t),e($t,Ch),e(zt,jh),e(oe,Ih),e(oe,G),f(qt,G,null),e(G,Ah),e(G,Re),e(Re,Dh),e(Re,Nn),e(Nn,Nh),e(Re,Oh),e(Re,Os),e(Os,Sh),e(Re,Rh),e(G,Uh),f(ro,G,null),e(G,Bh),e(G,Ss),e(Ss,Wh),e(G,Vh),f(Ft,G,null),p(o,Jr,u),p(o,Ue,u),e(Ue,io),e(io,Rs),f(Pt,Rs,null),e(Ue,Qh),e(Ue,Us),e(Us,Gh),p(o,ei,u),p(o,te,u),f(Ct,te,null),e(te,Hh),e(te,ce),e(ce,Kh),e(ce,Bs),e(Bs,Xh),e(ce,On),e(On,Yh),e(On,Ws),e(Ws,Zh),e(ce,Jh),e(ce,Vs),e(Vs,em),e(ce,Sn),e(Sn,om),e(Sn,Qs),e(Qs,tm),e(ce,nm),e(te,am),e(te,jt),e(jt,sm),e(jt,It),e(It,rm),e(jt,im),e(te,lm),e(te,H),f(At,H,null),e(H,cm),e(H,Be),e(Be,dm),e(Be,Rn),e(Rn,um),e(Be,pm),e(Be,Gs),e(Gs,hm),e(Be,mm),e(H,fm),f(lo,H,null),e(H,gm),e(H,Hs),e(Hs,_m),e(H,vm),f(Dt,H,null),p(o,oi,u),p(o,We,u),e(We,co),e(co,Ks),f(Nt,Ks,null),e(We,ym),e(We,Xs),e(Xs,bm),p(o,ti,u),p(o,ne,u),f(Ot,ne,null),e(ne,Lm),e(ne,de),e(de,wm),e(de,Ys),e(Ys,km),e(de,xm),e(de,Zs),e(Zs,Mm),e(de,Tm),e(de,Js),e(Js,Em),e(de,zm),e(ne,$m),e(ne,St),e(St,qm),e(St,Rt),e(Rt,Fm),e(St,Pm),e(ne,Cm),e(ne,K),f(Ut,K,null),e(K,jm),e(K,Ve),e(Ve,Im),e(Ve,Un),e(Un,Am),e(Ve,Dm),e(Ve,er),e(er,Nm),e(Ve,Om),e(K,Sm),f(uo,K,null),e(K,Rm),e(K,or),e(or,Um),e(K,Bm),f(Bt,K,null),ni=!0},p(o,[u]){const Wt={};u&2&&(Wt.$$scope={dirty:u,ctx:o}),ao.$set(Wt);const tr={};u&2&&(tr.$$scope={dirty:u,ctx:o}),ro.$set(tr);const nr={};u&2&&(nr.$$scope={dirty:u,ctx:o}),lo.$set(nr);const ar={};u&2&&(ar.$$scope={dirty:u,ctx:o}),uo.$set(ar)},i(o){ni||(g(L.$$.fragment,o),g(bo.$$.fragment,o),g(No.$$.fragment,o),g(Oo.$$.fragment,o),g(Uo.$$.fragment,o),g(Bo.$$.fragment,o),g(Vo.$$.fragment,o),g(Qo.$$.fragment,o),g(Go.$$.fragment,o),g(Ho.$$.fragment,o),g(Ko.$$.fragment,o),g(Xo.$$.fragment,o),g(Yo.$$.fragment,o),g(Jo.$$.fragment,o),g(et.$$.fragment,o),g(ot.$$.fragment,o),g(nt.$$.fragment,o),g(at.$$.fragment,o),g(st.$$.fragment,o),g(rt.$$.fragment,o),g(lt.$$.fragment,o),g(ct.$$.fragment,o),g(dt.$$.fragment,o),g(ht.$$.fragment,o),g(mt.$$.fragment,o),g(ft.$$.fragment,o),g(gt.$$.fragment,o),g(yt.$$.fragment,o),g(bt.$$.fragment,o),g(kt.$$.fragment,o),g(ao.$$.fragment,o),g(xt.$$.fragment,o),g(Mt.$$.fragment,o),g(Tt.$$.fragment,o),g(qt.$$.fragment,o),g(ro.$$.fragment,o),g(Ft.$$.fragment,o),g(Pt.$$.fragment,o),g(Ct.$$.fragment,o),g(At.$$.fragment,o),g(lo.$$.fragment,o),g(Dt.$$.fragment,o),g(Nt.$$.fragment,o),g(Ot.$$.fragment,o),g(Ut.$$.fragment,o),g(uo.$$.fragment,o),g(Bt.$$.fragment,o),ni=!0)},o(o){_(L.$$.fragment,o),_(bo.$$.fragment,o),_(No.$$.fragment,o),_(Oo.$$.fragment,o),_(Uo.$$.fragment,o),_(Bo.$$.fragment,o),_(Vo.$$.fragment,o),_(Qo.$$.fragment,o),_(Go.$$.fragment,o),_(Ho.$$.fragment,o),_(Ko.$$.fragment,o),_(Xo.$$.fragment,o),_(Yo.$$.fragment,o),_(Jo.$$.fragment,o),_(et.$$.fragment,o),_(ot.$$.fragment,o),_(nt.$$.fragment,o),_(at.$$.fragment,o),_(st.$$.fragment,o),_(rt.$$.fragment,o),_(lt.$$.fragment,o),_(ct.$$.fragment,o),_(dt.$$.fragment,o),_(ht.$$.fragment,o),_(mt.$$.fragment,o),_(ft.$$.fragment,o),_(gt.$$.fragment,o),_(yt.$$.fragment,o),_(bt.$$.fragment,o),_(kt.$$.fragment,o),_(ao.$$.fragment,o),_(xt.$$.fragment,o),_(Mt.$$.fragment,o),_(Tt.$$.fragment,o),_(qt.$$.fragment,o),_(ro.$$.fragment,o),_(Ft.$$.fragment,o),_(Pt.$$.fragment,o),_(Ct.$$.fragment,o),_(At.$$.fragment,o),_(lo.$$.fragment,o),_(Dt.$$.fragment,o),_(Nt.$$.fragment,o),_(Ot.$$.fragment,o),_(Ut.$$.fragment,o),_(uo.$$.fragment,o),_(Bt.$$.fragment,o),ni=!1},d(o){t(y),o&&t(q),o&&t(b),v(L),o&&t(sr),o&&t(ze),v(bo),o&&t(rr),o&&t(pe),o&&t(ir),o&&t(he),o&&t(lr),o&&t(Qt),o&&t(cr),o&&t(Gt),o&&t(dr),o&&t(Ht),o&&t(ur),o&&t(B),o&&t(pr),v(No,o),o&&t(hr),o&&t(me),o&&t(mr),v(Oo,o),o&&t(fr),o&&t(Ge),o&&t(gr),o&&t(fe),o&&t(_r),o&&t(He),o&&t(vr),o&&t(qe),v(Uo),o&&t(yr),o&&t(W),o&&t(br),v(Bo,o),o&&t(Lr),o&&t(T),o&&t(wr),o&&t(S),o&&t(kr),o&&t(hn),o&&t(xr),o&&t(mn),o&&t(Mr),o&&t(fn),o&&t(Tr),v(Vo,o),o&&t(Er),o&&t(gn),o&&t(zr),o&&t(ge),o&&t($r),v(Qo,o),o&&t(qr),o&&t(_n),o&&t(Fr),o&&t(V),o&&t(Pr),v(Go,o),o&&t(Cr),o&&t(vn),o&&t(jr),o&&t(yn),o&&t(Ir),v(Ho,o),o&&t(Ar),o&&t(bn),o&&t(Dr),o&&t(Ln),o&&t(Nr),v(Ko,o),o&&t(Or),o&&t(Fe),v(Xo),o&&t(Sr),o&&t(R),v(Yo),v(Jo),o&&t(Rr),o&&t(je),v(et),o&&t(Ur),o&&t(J),v(ot),v(nt),v(at),o&&t(Br),o&&t(Ie),v(st),o&&t(Wr),o&&t(A),v(rt),v(lt),o&&t(Vr),o&&t(Ae),v(ct),o&&t(Qr),o&&t(ee),v(dt),v(ht),o&&t(Gr),o&&t(De),v(mt),o&&t(Hr),o&&t(U),v(ft),v(gt),o&&t(Kr),o&&t(Ne),v(yt),o&&t(Xr),o&&t(le),v(bt),v(kt),v(ao),v(xt),o&&t(Yr),o&&t(Se),v(Mt),o&&t(Zr),o&&t(oe),v(Tt),v(qt),v(ro),v(Ft),o&&t(Jr),o&&t(Ue),v(Pt),o&&t(ei),o&&t(te),v(Ct),v(At),v(lo),v(Dt),o&&t(oi),o&&t(We),v(Nt),o&&t(ti),o&&t(ne),v(Ot),v(Ut),v(uo),v(Bt)}}}const Ev={local:"layoutlmv2",sections:[{local:"overview",title:"Overview"},{local:"usage-layoutlmv2processor",title:"Usage: LayoutLMv2Processor"},{local:"transformers.LayoutLMv2Config",title:"LayoutLMv2Config"},{local:"transformers.LayoutLMv2FeatureExtractor",title:"LayoutLMv2FeatureExtractor"},{local:"transformers.LayoutLMv2Tokenizer",title:"LayoutLMv2Tokenizer"},{local:"transformers.LayoutLMv2TokenizerFast",title:"LayoutLMv2TokenizerFast"},{local:"transformers.LayoutLMv2Processor",title:"LayoutLMv2Processor"},{local:"transformers.LayoutLMv2Model",title:"LayoutLMv2Model"},{local:"transformers.LayoutLMv2ForSequenceClassification",title:"LayoutLMv2ForSequenceClassification"},{local:"transformers.LayoutLMv2ForTokenClassification",title:"LayoutLMv2ForTokenClassification"},{local:"transformers.LayoutLMv2ForQuestionAnswering",title:"LayoutLMv2ForQuestionAnswering"}],title:"LayoutLMV2"};function zv(ie,y,q){let{fw:b}=y;return ie.$$set=M=>{"fw"in M&&q(0,b=M.fw)},[b]}class Iv extends vv{constructor(y){super();yv(this,y,zv,Tv,bv,{fw:0})}}export{Iv as default,Ev as metadata};
9,934
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/byt5.mdx-2bba9a19.js
import{S as Go,i as Mo,s as Wo,e as s,k as d,w as k,t as a,L as Uo,c as n,d as o,m as c,a as r,x as g,h as i,b as p,J as t,g as f,y,K as Vo,q as v,o as b,B as w}from"../../chunks/vendor-b1433968.js";import{D as Le}from"../../chunks/Docstring-ff504c58.js";import{C as Ro}from"../../chunks/CodeBlock-a320dbd7.js";import{I as at}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ho(it){let T,te,u,_,he,N,lt,ue,dt,Ae,q,B,_e,F,ct,ke,pt,je,L,ft,O,mt,ht,De,oe,ut,Pe,se,ge,_t,Ce,$,kt,R,gt,yt,G,vt,bt,Ie,A,wt,ne,Tt,$t,Se,re,xt,Ne,z,j,ye,M,qt,ve,zt,Fe,ae,Et,Oe,W,Re,ie,Bt,Ge,U,Me,E,D,be,V,Lt,we,At,We,m,H,jt,Te,Dt,Pt,X,Ct,le,It,St,Nt,x,K,Ft,$e,Ot,Rt,J,de,Gt,xe,Mt,Wt,ce,Ut,qe,Vt,Ht,P,Q,Xt,ze,Kt,Jt,C,Y,Qt,Ee,Yt,Zt,I,Z,eo,ee,to,Be,oo,so,Ue,S,no,pe,ro,ao,Ve;return N=new at({}),F=new at({}),M=new at({}),W=new Ro({props:{code:`from transformers import T5ForConditionalGeneration import torch model = T5ForConditionalGeneration.from_pretrained('google/byt5-small') input_ids = torch.tensor([list("Life is like a box of chocolates.".encode("utf-8"))]) + 3 # add 3 for special tokens labels = torch.tensor([list("La vie est comme une bo\xEEte de chocolat.".encode("utf-8"))]) + 3 # add 3 for special tokens loss = model(input_ids, labels=labels).loss # forward pass,`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration <span class="hljs-keyword">import</span> torch model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;google/byt5-small&#x27;</span>) input_ids = torch.tensor([<span class="hljs-built_in">list</span>(<span class="hljs-string">&quot;Life is like a box of chocolates.&quot;</span>.encode(<span class="hljs-string">&quot;utf-8&quot;</span>))]) + <span class="hljs-number">3</span> <span class="hljs-comment"># add 3 for special tokens</span> labels = torch.tensor([<span class="hljs-built_in">list</span>(<span class="hljs-string">&quot;La vie est comme une bo\xEEte de chocolat.&quot;</span>.encode(<span class="hljs-string">&quot;utf-8&quot;</span>))]) + <span class="hljs-number">3</span> <span class="hljs-comment"># add 3 for special tokens</span> loss = model(input_ids, labels=labels).loss <span class="hljs-comment"># forward pass</span>`}}),U=new Ro({props:{code:`from transformers import T5ForConditionalGeneration, AutoTokenizer model = T5ForConditionalGeneration.from_pretrained('google/byt5-small') tokenizer = AutoTokenizer.from_pretrained('google/byt5-small') model_inputs = tokenizer(["Life is like a box of chocolates.", "Today is Monday."], padding="longest", return_tensors="pt") labels = tokenizer(["La vie est comme une bo\xEEte de chocolat.", "Aujourd'hui c'est lundi."], padding="longest", return_tensors="pt").input_ids loss = model(**model_inputs, labels=labels).loss # forward pass,`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration, AutoTokenizer model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;google/byt5-small&#x27;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/byt5-small&#x27;</span>) model_inputs = tokenizer([<span class="hljs-string">&quot;Life is like a box of chocolates.&quot;</span>, <span class="hljs-string">&quot;Today is Monday.&quot;</span>], padding=<span class="hljs-string">&quot;longest&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) labels = tokenizer([<span class="hljs-string">&quot;La vie est comme une bo\xEEte de chocolat.&quot;</span>, <span class="hljs-string">&quot;Aujourd&#x27;hui c&#x27;est lundi.&quot;</span>], padding=<span class="hljs-string">&quot;longest&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids loss = model(**model_inputs, labels=labels).loss <span class="hljs-comment"># forward pass</span>`}}),V=new at({}),H=new Le({props:{name:"class transformers.ByT5Tokenizer",anchor:"transformers.ByT5Tokenizer",parameters:[{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"extra_ids",val:" = 125"},{name:"additional_special_tokens",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/byt5/tokenization_byt5.py#L28",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.ByT5Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.ByT5Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.ByT5Tokenizer.extra_ids",description:`<strong>extra_ids</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as &#x201C;<extra<em>id{%d}&gt;&#x201D; where &#x201D;{%d}&#x201D; is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning (&#x201C;<extra_id_0>&#x201D; is the last token in the vocabulary like in ByT5 preprocessing see <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117" rel="nofollow">here</a>).</extra_id_0></extra<em>`,name:"extra_ids"},{anchor:"transformers.ByT5Tokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),K=new Le({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.ByT5Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/byt5/tokenization_byt5.py#L176",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.ByT5Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Q=new Le({props:{name:"convert_tokens_to_string",anchor:"transformers.ByT5Tokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/byt5/tokenization_byt5.py#L227"}}),Y=new Le({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.ByT5Tokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/byt5/tokenization_byt5.py#L154",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.ByT5Tokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Z=new Le({props:{name:"get_special_tokens_mask",anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/byt5/tokenization_byt5.py#L116",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){T=s("meta"),te=d(),u=s("h1"),_=s("a"),he=s("span"),k(N.$$.fragment),lt=d(),ue=s("span"),dt=a("ByT5"),Ae=d(),q=s("h2"),B=s("a"),_e=s("span"),k(F.$$.fragment),ct=d(),ke=s("span"),pt=a("Overview"),je=d(),L=s("p"),ft=a("The ByT5 model was presented in "),O=s("a"),mt=a("ByT5: Towards a token-free future with pre-trained byte-to-byte models"),ht=a(` by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.`),De=d(),oe=s("p"),ut=a("The abstract from the paper is the following:"),Pe=d(),se=s("p"),ge=s("em"),_t=a(`Most widely-used pre-trained language models operate on sequences of tokens corresponding to word or subword units. Encoding text as a sequence of tokens requires a tokenizer, which is typically created as an independent artifact from the model. Token-free models that instead operate directly on raw text (bytes or characters) have many benefits: they can process text in any language out of the box, they are more robust to noise, and they minimize technical debt by removing complex and error-prone text preprocessing pipelines. Since byte or character sequences are longer than token sequences, past work on token-free models has often introduced new model architectures designed to amortize the cost of operating directly on raw text. In this paper, we show that a standard Transformer architecture can be used with minimal modifications to process byte sequences. We carefully characterize the trade-offs in terms of parameter count, training FLOPs, and inference speed, and show that byte-level models are competitive with their token-level counterparts. We also demonstrate that byte-level models are significantly more robust to noise and perform better on tasks that are sensitive to spelling and pronunciation. As part of our contribution, we release a new set of pre-trained byte-level Transformer models based on the T5 architecture, as well as all code and data used in our experiments.`),Ce=d(),$=s("p"),kt=a("This model was contributed by "),R=s("a"),gt=a("patrickvonplaten"),yt=a(`. The original code can be found `),G=s("a"),vt=a("here"),bt=a("."),Ie=d(),A=s("p"),wt=a("ByT5\u2019s architecture is based on the T5v1.1 model, so one can refer to "),ne=s("a"),Tt=a("T5v1.1\u2019s documentation page"),$t=a(`. They only differ in how inputs should be prepared for the model, see the code examples below.`),Se=d(),re=s("p"),xt=a(`Since ByT5 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),Ne=d(),z=s("h3"),j=s("a"),ye=s("span"),k(M.$$.fragment),qt=d(),ve=s("span"),zt=a("Example"),Fe=d(),ae=s("p"),Et=a("ByT5 works on raw UTF-8 bytes, so it can be used without a tokenizer:"),Oe=d(),k(W.$$.fragment),Re=d(),ie=s("p"),Bt=a("For batched inference and training it is however recommended to make use of the tokenizer:"),Ge=d(),k(U.$$.fragment),Me=d(),E=s("h2"),D=s("a"),be=s("span"),k(V.$$.fragment),Lt=d(),we=s("span"),At=a("ByT5Tokenizer"),We=d(),m=s("div"),k(H.$$.fragment),jt=d(),Te=s("p"),Dt=a("Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding."),Pt=d(),X=s("p"),Ct=a("This tokenizer inherits from "),le=s("a"),It=a("PreTrainedTokenizer"),St=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Nt=d(),x=s("div"),k(K.$$.fragment),Ft=d(),$e=s("p"),Ot=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Rt=d(),J=s("ul"),de=s("li"),Gt=a("single sequence: "),xe=s("code"),Mt=a("X </s>"),Wt=d(),ce=s("li"),Ut=a("pair of sequences: "),qe=s("code"),Vt=a("A </s> B </s>"),Ht=d(),P=s("div"),k(Q.$$.fragment),Xt=d(),ze=s("p"),Kt=a("Converts a sequence of tokens (string) in a single string."),Jt=d(),C=s("div"),k(Y.$$.fragment),Qt=d(),Ee=s("p"),Yt=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not make use of token type ids, therefore a list of zeros is returned.`),Zt=d(),I=s("div"),k(Z.$$.fragment),eo=d(),ee=s("p"),to=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Be=s("code"),oo=a("prepare_for_model"),so=a(" method."),Ue=d(),S=s("p"),no=a("See "),pe=s("a"),ro=a("ByT5Tokenizer"),ao=a(" for all details."),this.h()},l(e){const l=Uo('[data-svelte="svelte-1phssyn"]',document.head);T=n(l,"META",{name:!0,content:!0}),l.forEach(o),te=c(e),u=n(e,"H1",{class:!0});var He=r(u);_=n(He,"A",{id:!0,class:!0,href:!0});var co=r(_);he=n(co,"SPAN",{});var po=r(he);g(N.$$.fragment,po),po.forEach(o),co.forEach(o),lt=c(He),ue=n(He,"SPAN",{});var fo=r(ue);dt=i(fo,"ByT5"),fo.forEach(o),He.forEach(o),Ae=c(e),q=n(e,"H2",{class:!0});var Xe=r(q);B=n(Xe,"A",{id:!0,class:!0,href:!0});var mo=r(B);_e=n(mo,"SPAN",{});var ho=r(_e);g(F.$$.fragment,ho),ho.forEach(o),mo.forEach(o),ct=c(Xe),ke=n(Xe,"SPAN",{});var uo=r(ke);pt=i(uo,"Overview"),uo.forEach(o),Xe.forEach(o),je=c(e),L=n(e,"P",{});var Ke=r(L);ft=i(Ke,"The ByT5 model was presented in "),O=n(Ke,"A",{href:!0,rel:!0});var _o=r(O);mt=i(_o,"ByT5: Towards a token-free future with pre-trained byte-to-byte models"),_o.forEach(o),ht=i(Ke,` by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.`),Ke.forEach(o),De=c(e),oe=n(e,"P",{});var ko=r(oe);ut=i(ko,"The abstract from the paper is the following:"),ko.forEach(o),Pe=c(e),se=n(e,"P",{});var go=r(se);ge=n(go,"EM",{});var yo=r(ge);_t=i(yo,`Most widely-used pre-trained language models operate on sequences of tokens corresponding to word or subword units. Encoding text as a sequence of tokens requires a tokenizer, which is typically created as an independent artifact from the model. Token-free models that instead operate directly on raw text (bytes or characters) have many benefits: they can process text in any language out of the box, they are more robust to noise, and they minimize technical debt by removing complex and error-prone text preprocessing pipelines. Since byte or character sequences are longer than token sequences, past work on token-free models has often introduced new model architectures designed to amortize the cost of operating directly on raw text. In this paper, we show that a standard Transformer architecture can be used with minimal modifications to process byte sequences. We carefully characterize the trade-offs in terms of parameter count, training FLOPs, and inference speed, and show that byte-level models are competitive with their token-level counterparts. We also demonstrate that byte-level models are significantly more robust to noise and perform better on tasks that are sensitive to spelling and pronunciation. As part of our contribution, we release a new set of pre-trained byte-level Transformer models based on the T5 architecture, as well as all code and data used in our experiments.`),yo.forEach(o),go.forEach(o),Ce=c(e),$=n(e,"P",{});var fe=r($);kt=i(fe,"This model was contributed by "),R=n(fe,"A",{href:!0,rel:!0});var vo=r(R);gt=i(vo,"patrickvonplaten"),vo.forEach(o),yt=i(fe,`. The original code can be found `),G=n(fe,"A",{href:!0,rel:!0});var bo=r(G);vt=i(bo,"here"),bo.forEach(o),bt=i(fe,"."),fe.forEach(o),Ie=c(e),A=n(e,"P",{});var Je=r(A);wt=i(Je,"ByT5\u2019s architecture is based on the T5v1.1 model, so one can refer to "),ne=n(Je,"A",{href:!0});var wo=r(ne);Tt=i(wo,"T5v1.1\u2019s documentation page"),wo.forEach(o),$t=i(Je,`. They only differ in how inputs should be prepared for the model, see the code examples below.`),Je.forEach(o),Se=c(e),re=n(e,"P",{});var To=r(re);xt=i(To,`Since ByT5 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),To.forEach(o),Ne=c(e),z=n(e,"H3",{class:!0});var Qe=r(z);j=n(Qe,"A",{id:!0,class:!0,href:!0});var $o=r(j);ye=n($o,"SPAN",{});var xo=r(ye);g(M.$$.fragment,xo),xo.forEach(o),$o.forEach(o),qt=c(Qe),ve=n(Qe,"SPAN",{});var qo=r(ve);zt=i(qo,"Example"),qo.forEach(o),Qe.forEach(o),Fe=c(e),ae=n(e,"P",{});var zo=r(ae);Et=i(zo,"ByT5 works on raw UTF-8 bytes, so it can be used without a tokenizer:"),zo.forEach(o),Oe=c(e),g(W.$$.fragment,e),Re=c(e),ie=n(e,"P",{});var Eo=r(ie);Bt=i(Eo,"For batched inference and training it is however recommended to make use of the tokenizer:"),Eo.forEach(o),Ge=c(e),g(U.$$.fragment,e),Me=c(e),E=n(e,"H2",{class:!0});var Ye=r(E);D=n(Ye,"A",{id:!0,class:!0,href:!0});var Bo=r(D);be=n(Bo,"SPAN",{});var Lo=r(be);g(V.$$.fragment,Lo),Lo.forEach(o),Bo.forEach(o),Lt=c(Ye),we=n(Ye,"SPAN",{});var Ao=r(we);At=i(Ao,"ByT5Tokenizer"),Ao.forEach(o),Ye.forEach(o),We=c(e),m=n(e,"DIV",{class:!0});var h=r(m);g(H.$$.fragment,h),jt=c(h),Te=n(h,"P",{});var jo=r(Te);Dt=i(jo,"Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding."),jo.forEach(o),Pt=c(h),X=n(h,"P",{});var Ze=r(X);Ct=i(Ze,"This tokenizer inherits from "),le=n(Ze,"A",{href:!0});var Do=r(le);It=i(Do,"PreTrainedTokenizer"),Do.forEach(o),St=i(Ze,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ze.forEach(o),Nt=c(h),x=n(h,"DIV",{class:!0});var me=r(x);g(K.$$.fragment,me),Ft=c(me),$e=n(me,"P",{});var Po=r($e);Ot=i(Po,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Po.forEach(o),Rt=c(me),J=n(me,"UL",{});var et=r(J);de=n(et,"LI",{});var io=r(de);Gt=i(io,"single sequence: "),xe=n(io,"CODE",{});var Co=r(xe);Mt=i(Co,"X </s>"),Co.forEach(o),io.forEach(o),Wt=c(et),ce=n(et,"LI",{});var lo=r(ce);Ut=i(lo,"pair of sequences: "),qe=n(lo,"CODE",{});var Io=r(qe);Vt=i(Io,"A </s> B </s>"),Io.forEach(o),lo.forEach(o),et.forEach(o),me.forEach(o),Ht=c(h),P=n(h,"DIV",{class:!0});var tt=r(P);g(Q.$$.fragment,tt),Xt=c(tt),ze=n(tt,"P",{});var So=r(ze);Kt=i(So,"Converts a sequence of tokens (string) in a single string."),So.forEach(o),tt.forEach(o),Jt=c(h),C=n(h,"DIV",{class:!0});var ot=r(C);g(Y.$$.fragment,ot),Qt=c(ot),Ee=n(ot,"P",{});var No=r(Ee);Yt=i(No,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not make use of token type ids, therefore a list of zeros is returned.`),No.forEach(o),ot.forEach(o),Zt=c(h),I=n(h,"DIV",{class:!0});var st=r(I);g(Z.$$.fragment,st),eo=c(st),ee=n(st,"P",{});var nt=r(ee);to=i(nt,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Be=n(nt,"CODE",{});var Fo=r(Be);oo=i(Fo,"prepare_for_model"),Fo.forEach(o),so=i(nt," method."),nt.forEach(o),st.forEach(o),h.forEach(o),Ue=c(e),S=n(e,"P",{});var rt=r(S);no=i(rt,"See "),pe=n(rt,"A",{href:!0});var Oo=r(pe);ro=i(Oo,"ByT5Tokenizer"),Oo.forEach(o),ao=i(rt," for all details."),rt.forEach(o),this.h()},h(){p(T,"name","hf:doc:metadata"),p(T,"content",JSON.stringify(Xo)),p(_,"id","byt5"),p(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(_,"href","#byt5"),p(u,"class","relative group"),p(B,"id","overview"),p(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(B,"href","#overview"),p(q,"class","relative group"),p(O,"href","https://arxiv.org/abs/2105.13626"),p(O,"rel","nofollow"),p(R,"href","https://huggingface.co/patrickvonplaten"),p(R,"rel","nofollow"),p(G,"href","https://github.com/google-research/byt5"),p(G,"rel","nofollow"),p(ne,"href","t5v1.1"),p(j,"id","example"),p(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(j,"href","#example"),p(z,"class","relative group"),p(D,"id","transformers.ByT5Tokenizer"),p(D,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(D,"href","#transformers.ByT5Tokenizer"),p(E,"class","relative group"),p(le,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(x,"class","docstring"),p(P,"class","docstring"),p(C,"class","docstring"),p(I,"class","docstring"),p(m,"class","docstring"),p(pe,"href","/docs/transformers/v4.15.0/en/model_doc/byt5#transformers.ByT5Tokenizer")},m(e,l){t(document.head,T),f(e,te,l),f(e,u,l),t(u,_),t(_,he),y(N,he,null),t(u,lt),t(u,ue),t(ue,dt),f(e,Ae,l),f(e,q,l),t(q,B),t(B,_e),y(F,_e,null),t(q,ct),t(q,ke),t(ke,pt),f(e,je,l),f(e,L,l),t(L,ft),t(L,O),t(O,mt),t(L,ht),f(e,De,l),f(e,oe,l),t(oe,ut),f(e,Pe,l),f(e,se,l),t(se,ge),t(ge,_t),f(e,Ce,l),f(e,$,l),t($,kt),t($,R),t(R,gt),t($,yt),t($,G),t(G,vt),t($,bt),f(e,Ie,l),f(e,A,l),t(A,wt),t(A,ne),t(ne,Tt),t(A,$t),f(e,Se,l),f(e,re,l),t(re,xt),f(e,Ne,l),f(e,z,l),t(z,j),t(j,ye),y(M,ye,null),t(z,qt),t(z,ve),t(ve,zt),f(e,Fe,l),f(e,ae,l),t(ae,Et),f(e,Oe,l),y(W,e,l),f(e,Re,l),f(e,ie,l),t(ie,Bt),f(e,Ge,l),y(U,e,l),f(e,Me,l),f(e,E,l),t(E,D),t(D,be),y(V,be,null),t(E,Lt),t(E,we),t(we,At),f(e,We,l),f(e,m,l),y(H,m,null),t(m,jt),t(m,Te),t(Te,Dt),t(m,Pt),t(m,X),t(X,Ct),t(X,le),t(le,It),t(X,St),t(m,Nt),t(m,x),y(K,x,null),t(x,Ft),t(x,$e),t($e,Ot),t(x,Rt),t(x,J),t(J,de),t(de,Gt),t(de,xe),t(xe,Mt),t(J,Wt),t(J,ce),t(ce,Ut),t(ce,qe),t(qe,Vt),t(m,Ht),t(m,P),y(Q,P,null),t(P,Xt),t(P,ze),t(ze,Kt),t(m,Jt),t(m,C),y(Y,C,null),t(C,Qt),t(C,Ee),t(Ee,Yt),t(m,Zt),t(m,I),y(Z,I,null),t(I,eo),t(I,ee),t(ee,to),t(ee,Be),t(Be,oo),t(ee,so),f(e,Ue,l),f(e,S,l),t(S,no),t(S,pe),t(pe,ro),t(S,ao),Ve=!0},p:Vo,i(e){Ve||(v(N.$$.fragment,e),v(F.$$.fragment,e),v(M.$$.fragment,e),v(W.$$.fragment,e),v(U.$$.fragment,e),v(V.$$.fragment,e),v(H.$$.fragment,e),v(K.$$.fragment,e),v(Q.$$.fragment,e),v(Y.$$.fragment,e),v(Z.$$.fragment,e),Ve=!0)},o(e){b(N.$$.fragment,e),b(F.$$.fragment,e),b(M.$$.fragment,e),b(W.$$.fragment,e),b(U.$$.fragment,e),b(V.$$.fragment,e),b(H.$$.fragment,e),b(K.$$.fragment,e),b(Q.$$.fragment,e),b(Y.$$.fragment,e),b(Z.$$.fragment,e),Ve=!1},d(e){o(T),e&&o(te),e&&o(u),w(N),e&&o(Ae),e&&o(q),w(F),e&&o(je),e&&o(L),e&&o(De),e&&o(oe),e&&o(Pe),e&&o(se),e&&o(Ce),e&&o($),e&&o(Ie),e&&o(A),e&&o(Se),e&&o(re),e&&o(Ne),e&&o(z),w(M),e&&o(Fe),e&&o(ae),e&&o(Oe),w(W,e),e&&o(Re),e&&o(ie),e&&o(Ge),w(U,e),e&&o(Me),e&&o(E),w(V),e&&o(We),e&&o(m),w(H),w(K),w(Q),w(Y),w(Z),e&&o(Ue),e&&o(S)}}}const Xo={local:"byt5",sections:[{local:"overview",sections:[{local:"example",title:"Example"}],title:"Overview"},{local:"transformers.ByT5Tokenizer",title:"ByT5Tokenizer"}],title:"ByT5"};function Ko(it,T,te){let{fw:u}=T;return it.$$set=_=>{"fw"in _&&te(0,u=_.fw)},[u]}class ts extends Go{constructor(T){super();Mo(this,T,Ko,Ho,Wo,{fw:0})}}export{ts as default,Xo as metadata};
9,935
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/mluke.mdx-11ebc830.js
import{S as un,i as mn,s as hn,e as o,k as p,w as O,t as a,L as fn,c as s,d as n,m as u,a as i,x as B,h as r,b as l,J as t,g as d,y as C,K as _n,q as X,o as Y,B as H}from"../../chunks/vendor-b1433968.js";import{D as dn}from"../../chunks/Docstring-ff504c58.js";import{C as pn}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Ut}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function gn(Re){let k,V,h,_,le,$,je,ce,Fe,ye,b,L,de,P,Oe,pe,Be,ve,y,Ce,A,Xe,Ye,I,He,Ve,be,J,Je,we,G,Ge,Te,Q,ue,Qe,Le,Z,Ze,xe,N,qe,x,et,ee,tt,nt,ze,U,Ee,q,ot,te,st,it,Me,v,at,S,rt,lt,W,ct,dt,$e,w,z,me,K,pt,he,ut,Pe,m,D,mt,g,ht,ne,ft,_t,oe,gt,kt,R,yt,vt,bt,j,wt,se,Tt,Lt,xt,T,qt,fe,zt,Et,_e,Mt,$t,Pt,E,F,At,ge,It,Nt,ke,Ae;return $=new Ut({}),P=new Ut({}),N=new pn({props:{code:`from transformers import LukeModel model = LukeModel.from_pretrained('studio-ousia/mluke-base'),`,highlighted:`from transformers import LukeModel model = <span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">LukeModel</span>.</span></span>from<span class="hljs-constructor">_pretrained(&#x27;<span class="hljs-params">studio</span>-<span class="hljs-params">ousia</span><span class="hljs-operator">/</span><span class="hljs-params">mluke</span>-<span class="hljs-params">base</span>&#x27;)</span>`}}),U=new pn({props:{code:`from transformers import MLukeTokenizer tokenizer = MLukeTokenizer.from_pretrained('studio-ousia/mluke-base'),`,highlighted:`from transformers import MLukeTokenizer tokenizer = <span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">MLukeTokenizer</span>.</span></span>from<span class="hljs-constructor">_pretrained(&#x27;<span class="hljs-params">studio</span>-<span class="hljs-params">ousia</span><span class="hljs-operator">/</span><span class="hljs-params">mluke</span>-<span class="hljs-params">base</span>&#x27;)</span>`}}),K=new Ut({}),D=new dn({props:{name:"class transformers.MLukeTokenizer",anchor:"transformers.MLukeTokenizer",parameters:[{name:"vocab_file",val:""},{name:"entity_vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"task",val:" = None"},{name:"max_entity_length",val:" = 32"},{name:"max_mention_length",val:" = 30"},{name:"entity_token_1",val:" = '<ent>'"},{name:"entity_token_2",val:" = '<ent2>'"},{name:"entity_unk_token",val:" = '[UNK]'"},{name:"entity_pad_token",val:" = '[PAD]'"},{name:"entity_mask_token",val:" = '[MASK]'"},{name:"entity_mask2_token",val:" = '[MASK2]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mluke/tokenization_mluke.py#L152",parametersDescription:[{anchor:"transformers.MLukeTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.MLukeTokenizer.entity_vocab_file",description:`<strong>entity_vocab_file</strong> (<code>str</code>) &#x2014; Path to the entity vocabulary file.`,name:"entity_vocab_file"},{anchor:"transformers.MLukeTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.MLukeTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.MLukeTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.MLukeTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.MLukeTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MLukeTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MLukeTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.MLukeTokenizer.task",description:`<strong>task</strong> (<code>str</code>, <em>optional</em>) &#x2014; Task for which you want to prepare sequences. One of <code>&quot;entity_classification&quot;</code>, <code>&quot;entity_pair_classification&quot;</code>, or <code>&quot;entity_span_classification&quot;</code>. If you specify this argument, the entity sequence is automatically created based on the given entity span(s).`,name:"task"},{anchor:"transformers.MLukeTokenizer.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.MLukeTokenizer.max_mention_length",description:`<strong>max_mention_length</strong> (<code>int</code>, <em>optional</em>, defaults to 30) &#x2014; The maximum number of tokens inside an entity span.`,name:"max_mention_length"},{anchor:"transformers.MLukeTokenizer.entity_token_1",description:`<strong>entity_token_1</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_1"},{anchor:"transformers.MLukeTokenizer.entity_token_2",description:`<strong>entity_token_2</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent2&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_2"},{anchor:"transformers.MLukeTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.MLukeTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),F=new dn({props:{name:"__call__",anchor:"transformers.MLukeTokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"entity_spans",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entity_spans_pair",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entities",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"entities_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_entity_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": typing.Optional[bool] = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mluke/tokenization_mluke.py#L366",parametersDescription:[{anchor:"transformers.MLukeTokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text"},{anchor:"transformers.MLukeTokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text_pair"},{anchor:"transformers.MLukeTokenizer.__call__.entity_spans",description:`<strong>entity_spans</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code> as the <code>task</code> argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify <code>entities</code>, the length of each sequence must be equal to the length of each sequence of <code>entities</code>.`,name:"entity_spans"},{anchor:"transformers.MLukeTokenizer.__call__.entity_spans_pair",description:`<strong>entity_spans_pair</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the <code>task</code> argument in the constructor, this argument is ignored. If you specify <code>entities_pair</code>, the length of each sequence must be equal to the length of each sequence of <code>entities_pair</code>.`,name:"entity_spans_pair"},{anchor:"transformers.MLukeTokenizer.__call__.entities",description:`<strong>entities</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans</code>. If you specify <code>entity_spans</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities"},{anchor:"transformers.MLukeTokenizer.__call__.entities_pair",description:`<strong>entities_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans_pair</code>. If you specify <code>entity_spans_pair</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities_pair"},{anchor:"transformers.MLukeTokenizer.__call__.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.MLukeTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.MLukeTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.MLukeTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.MLukeTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.MLukeTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.MLukeTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.MLukeTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.MLukeTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.MLukeTokenizer.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.MLukeTokenizer.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.MLukeTokenizer.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.MLukeTokenizer.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.MLukeTokenizer.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.MLukeTokenizer.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.MLukeTokenizer.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_ids</strong> \u2014 List of entity ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>entity_position_ids</strong> \u2014 List of entity positions in the input sequence to be fed to a model.</p> </li> <li> <p><strong>entity_token_type_ids</strong> \u2014 List of entity token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Centity_token_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>entity_attention_mask</strong> \u2014 List of indices specifying which entities should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Centity_attention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_start_positions</strong> \u2014 List of the start positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>entity_end_positions</strong> \u2014 List of the end positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),{c(){k=o("meta"),V=p(),h=o("h1"),_=o("a"),le=o("span"),O($.$$.fragment),je=p(),ce=o("span"),Fe=a("mLUKE"),ye=p(),b=o("h2"),L=o("a"),de=o("span"),O(P.$$.fragment),Oe=p(),pe=o("span"),Be=a("Overview"),ve=p(),y=o("p"),Ce=a("The mLUKE model was proposed in "),A=o("a"),Xe=a("mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),Ye=a(` by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka. It\u2019s a multilingual extension of the `),I=o("a"),He=a("LUKE model"),Ve=a(" trained on the basis of XLM-RoBERTa."),be=p(),J=o("p"),Je=a(`It is based on XLM-RoBERTa and adds entity embeddings, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive question answering, relation classification, cloze-style knowledge completion.`),we=p(),G=o("p"),Ge=a("The abstract from the paper is the following:"),Te=p(),Q=o("p"),ue=o("em"),Qe=a(`Recent studies have shown that multilingual pretrained language models can be effectively improved with cross-lingual alignment information from Wikipedia entities. However, existing methods only exploit entity information in pretraining and do not explicitly use entities in downstream tasks. In this study, we explore the effectiveness of leveraging entity representations for downstream cross-lingual tasks. We train a multilingual language model with 24 languages with entity representations and show the model consistently outperforms word-based pretrained models in various cross-lingual transfer tasks. We also analyze the model and the key insight is that incorporating entity representations into the input allows us to extract more language-agnostic features. We also evaluate the model with a multilingual cloze prompt task with the mLAMA dataset. We show that entity-based prompt elicits correct factual knowledge more likely than using only word representations.`),Le=p(),Z=o("p"),Ze=a("One can directly plug in the weights of mLUKE into a LUKE model, like so:"),xe=p(),O(N.$$.fragment),qe=p(),x=o("p"),et=a("Note that mLUKE has its own tokenizer, "),ee=o("a"),tt=a("MLukeTokenizer"),nt=a(". You can initialize it as follows:"),ze=p(),O(U.$$.fragment),Ee=p(),q=o("p"),ot=a("As mLUKE\u2019s architecture is equivalent to that of LUKE, one can refer to "),te=o("a"),st=a("LUKE\u2019s documentation page"),it=a(` for all tips, code examples and notebooks.`),Me=p(),v=o("p"),at=a("This model was contributed by "),S=o("a"),rt=a("ryo0634"),lt=a(". The original code can be found "),W=o("a"),ct=a("here"),dt=a("."),$e=p(),w=o("h2"),z=o("a"),me=o("span"),O(K.$$.fragment),pt=p(),he=o("span"),ut=a("MLukeTokenizer"),Pe=p(),m=o("div"),O(D.$$.fragment),mt=p(),g=o("p"),ht=a("Adapted from "),ne=o("a"),ft=a("XLMRobertaTokenizer"),_t=a(" and "),oe=o("a"),gt=a("LukeTokenizer"),kt=a(`. Based on `),R=o("a"),yt=a("SentencePiece"),vt=a("."),bt=p(),j=o("p"),wt=a("This tokenizer inherits from "),se=o("a"),Tt=a("PreTrainedTokenizer"),Lt=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),xt=p(),T=o("p"),qt=a(`Attributes: sp_model (`),fe=o("code"),zt=a("SentencePieceProcessor"),Et=a(`): The `),_e=o("em"),Mt=a("SentencePiece"),$t=a(" processor that is used for every conversion (string, tokens and IDs)."),Pt=p(),E=o("div"),O(F.$$.fragment),At=p(),ge=o("p"),It=a(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),Nt=p(),ke=o("div"),this.h()},l(e){const c=fn('[data-svelte="svelte-1phssyn"]',document.head);k=s(c,"META",{name:!0,content:!0}),c.forEach(n),V=u(e),h=s(e,"H1",{class:!0});var Ie=i(h);_=s(Ie,"A",{id:!0,class:!0,href:!0});var St=i(_);le=s(St,"SPAN",{});var Wt=i(le);B($.$$.fragment,Wt),Wt.forEach(n),St.forEach(n),je=u(Ie),ce=s(Ie,"SPAN",{});var Kt=i(ce);Fe=r(Kt,"mLUKE"),Kt.forEach(n),Ie.forEach(n),ye=u(e),b=s(e,"H2",{class:!0});var Ne=i(b);L=s(Ne,"A",{id:!0,class:!0,href:!0});var Dt=i(L);de=s(Dt,"SPAN",{});var Rt=i(de);B(P.$$.fragment,Rt),Rt.forEach(n),Dt.forEach(n),Oe=u(Ne),pe=s(Ne,"SPAN",{});var jt=i(pe);Be=r(jt,"Overview"),jt.forEach(n),Ne.forEach(n),ve=u(e),y=s(e,"P",{});var ie=i(y);Ce=r(ie,"The mLUKE model was proposed in "),A=s(ie,"A",{href:!0,rel:!0});var Ft=i(A);Xe=r(Ft,"mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),Ft.forEach(n),Ye=r(ie,` by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka. It\u2019s a multilingual extension of the `),I=s(ie,"A",{href:!0,rel:!0});var Ot=i(I);He=r(Ot,"LUKE model"),Ot.forEach(n),Ve=r(ie," trained on the basis of XLM-RoBERTa."),ie.forEach(n),be=u(e),J=s(e,"P",{});var Bt=i(J);Je=r(Bt,`It is based on XLM-RoBERTa and adds entity embeddings, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive question answering, relation classification, cloze-style knowledge completion.`),Bt.forEach(n),we=u(e),G=s(e,"P",{});var Ct=i(G);Ge=r(Ct,"The abstract from the paper is the following:"),Ct.forEach(n),Te=u(e),Q=s(e,"P",{});var Xt=i(Q);ue=s(Xt,"EM",{});var Yt=i(ue);Qe=r(Yt,`Recent studies have shown that multilingual pretrained language models can be effectively improved with cross-lingual alignment information from Wikipedia entities. However, existing methods only exploit entity information in pretraining and do not explicitly use entities in downstream tasks. In this study, we explore the effectiveness of leveraging entity representations for downstream cross-lingual tasks. We train a multilingual language model with 24 languages with entity representations and show the model consistently outperforms word-based pretrained models in various cross-lingual transfer tasks. We also analyze the model and the key insight is that incorporating entity representations into the input allows us to extract more language-agnostic features. We also evaluate the model with a multilingual cloze prompt task with the mLAMA dataset. We show that entity-based prompt elicits correct factual knowledge more likely than using only word representations.`),Yt.forEach(n),Xt.forEach(n),Le=u(e),Z=s(e,"P",{});var Ht=i(Z);Ze=r(Ht,"One can directly plug in the weights of mLUKE into a LUKE model, like so:"),Ht.forEach(n),xe=u(e),B(N.$$.fragment,e),qe=u(e),x=s(e,"P",{});var Ue=i(x);et=r(Ue,"Note that mLUKE has its own tokenizer, "),ee=s(Ue,"A",{href:!0});var Vt=i(ee);tt=r(Vt,"MLukeTokenizer"),Vt.forEach(n),nt=r(Ue,". You can initialize it as follows:"),Ue.forEach(n),ze=u(e),B(U.$$.fragment,e),Ee=u(e),q=s(e,"P",{});var Se=i(q);ot=r(Se,"As mLUKE\u2019s architecture is equivalent to that of LUKE, one can refer to "),te=s(Se,"A",{href:!0});var Jt=i(te);st=r(Jt,"LUKE\u2019s documentation page"),Jt.forEach(n),it=r(Se,` for all tips, code examples and notebooks.`),Se.forEach(n),Me=u(e),v=s(e,"P",{});var ae=i(v);at=r(ae,"This model was contributed by "),S=s(ae,"A",{href:!0,rel:!0});var Gt=i(S);rt=r(Gt,"ryo0634"),Gt.forEach(n),lt=r(ae,". The original code can be found "),W=s(ae,"A",{href:!0,rel:!0});var Qt=i(W);ct=r(Qt,"here"),Qt.forEach(n),dt=r(ae,"."),ae.forEach(n),$e=u(e),w=s(e,"H2",{class:!0});var We=i(w);z=s(We,"A",{id:!0,class:!0,href:!0});var Zt=i(z);me=s(Zt,"SPAN",{});var en=i(me);B(K.$$.fragment,en),en.forEach(n),Zt.forEach(n),pt=u(We),he=s(We,"SPAN",{});var tn=i(he);ut=r(tn,"MLukeTokenizer"),tn.forEach(n),We.forEach(n),Pe=u(e),m=s(e,"DIV",{class:!0});var f=i(m);B(D.$$.fragment,f),mt=u(f),g=s(f,"P",{});var M=i(g);ht=r(M,"Adapted from "),ne=s(M,"A",{href:!0});var nn=i(ne);ft=r(nn,"XLMRobertaTokenizer"),nn.forEach(n),_t=r(M," and "),oe=s(M,"A",{href:!0});var on=i(oe);gt=r(on,"LukeTokenizer"),on.forEach(n),kt=r(M,`. Based on `),R=s(M,"A",{href:!0,rel:!0});var sn=i(R);yt=r(sn,"SentencePiece"),sn.forEach(n),vt=r(M,"."),M.forEach(n),bt=u(f),j=s(f,"P",{});var Ke=i(j);wt=r(Ke,"This tokenizer inherits from "),se=s(Ke,"A",{href:!0});var an=i(se);Tt=r(an,"PreTrainedTokenizer"),an.forEach(n),Lt=r(Ke,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ke.forEach(n),xt=u(f),T=s(f,"P",{});var re=i(T);qt=r(re,`Attributes: sp_model (`),fe=s(re,"CODE",{});var rn=i(fe);zt=r(rn,"SentencePieceProcessor"),rn.forEach(n),Et=r(re,`): The `),_e=s(re,"EM",{});var ln=i(_e);Mt=r(ln,"SentencePiece"),ln.forEach(n),$t=r(re," processor that is used for every conversion (string, tokens and IDs)."),re.forEach(n),Pt=u(f),E=s(f,"DIV",{class:!0});var De=i(E);B(F.$$.fragment,De),At=u(De),ge=s(De,"P",{});var cn=i(ge);It=r(cn,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),cn.forEach(n),De.forEach(n),Nt=u(f),ke=s(f,"DIV",{class:!0}),i(ke).forEach(n),f.forEach(n),this.h()},h(){l(k,"name","hf:doc:metadata"),l(k,"content",JSON.stringify(kn)),l(_,"id","mluke"),l(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_,"href","#mluke"),l(h,"class","relative group"),l(L,"id","overview"),l(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(L,"href","#overview"),l(b,"class","relative group"),l(A,"href","https://arxiv.org/abs/2110.08151"),l(A,"rel","nofollow"),l(I,"href","https://arxiv.org/abs/2010.01057"),l(I,"rel","nofollow"),l(ee,"href","/docs/transformers/v4.15.0/en/model_doc/mluke#transformers.MLukeTokenizer"),l(te,"href","/docs/transformers/v4.15.0/en/luke"),l(S,"href","https://huggingface.co/ryo0634"),l(S,"rel","nofollow"),l(W,"href","https://github.com/studio-ousia/luke"),l(W,"rel","nofollow"),l(z,"id","transformers.MLukeTokenizer"),l(z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(z,"href","#transformers.MLukeTokenizer"),l(w,"class","relative group"),l(ne,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizer"),l(oe,"href","/docs/transformers/v4.15.0/en/model_doc/luke#transformers.LukeTokenizer"),l(R,"href","https://github.com/google/sentencepiece"),l(R,"rel","nofollow"),l(se,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(E,"class","docstring"),l(ke,"class","docstring"),l(m,"class","docstring")},m(e,c){t(document.head,k),d(e,V,c),d(e,h,c),t(h,_),t(_,le),C($,le,null),t(h,je),t(h,ce),t(ce,Fe),d(e,ye,c),d(e,b,c),t(b,L),t(L,de),C(P,de,null),t(b,Oe),t(b,pe),t(pe,Be),d(e,ve,c),d(e,y,c),t(y,Ce),t(y,A),t(A,Xe),t(y,Ye),t(y,I),t(I,He),t(y,Ve),d(e,be,c),d(e,J,c),t(J,Je),d(e,we,c),d(e,G,c),t(G,Ge),d(e,Te,c),d(e,Q,c),t(Q,ue),t(ue,Qe),d(e,Le,c),d(e,Z,c),t(Z,Ze),d(e,xe,c),C(N,e,c),d(e,qe,c),d(e,x,c),t(x,et),t(x,ee),t(ee,tt),t(x,nt),d(e,ze,c),C(U,e,c),d(e,Ee,c),d(e,q,c),t(q,ot),t(q,te),t(te,st),t(q,it),d(e,Me,c),d(e,v,c),t(v,at),t(v,S),t(S,rt),t(v,lt),t(v,W),t(W,ct),t(v,dt),d(e,$e,c),d(e,w,c),t(w,z),t(z,me),C(K,me,null),t(w,pt),t(w,he),t(he,ut),d(e,Pe,c),d(e,m,c),C(D,m,null),t(m,mt),t(m,g),t(g,ht),t(g,ne),t(ne,ft),t(g,_t),t(g,oe),t(oe,gt),t(g,kt),t(g,R),t(R,yt),t(g,vt),t(m,bt),t(m,j),t(j,wt),t(j,se),t(se,Tt),t(j,Lt),t(m,xt),t(m,T),t(T,qt),t(T,fe),t(fe,zt),t(T,Et),t(T,_e),t(_e,Mt),t(T,$t),t(m,Pt),t(m,E),C(F,E,null),t(E,At),t(E,ge),t(ge,It),t(m,Nt),t(m,ke),Ae=!0},p:_n,i(e){Ae||(X($.$$.fragment,e),X(P.$$.fragment,e),X(N.$$.fragment,e),X(U.$$.fragment,e),X(K.$$.fragment,e),X(D.$$.fragment,e),X(F.$$.fragment,e),Ae=!0)},o(e){Y($.$$.fragment,e),Y(P.$$.fragment,e),Y(N.$$.fragment,e),Y(U.$$.fragment,e),Y(K.$$.fragment,e),Y(D.$$.fragment,e),Y(F.$$.fragment,e),Ae=!1},d(e){n(k),e&&n(V),e&&n(h),H($),e&&n(ye),e&&n(b),H(P),e&&n(ve),e&&n(y),e&&n(be),e&&n(J),e&&n(we),e&&n(G),e&&n(Te),e&&n(Q),e&&n(Le),e&&n(Z),e&&n(xe),H(N,e),e&&n(qe),e&&n(x),e&&n(ze),H(U,e),e&&n(Ee),e&&n(q),e&&n(Me),e&&n(v),e&&n($e),e&&n(w),H(K),e&&n(Pe),e&&n(m),H(D),H(F)}}}const kn={local:"mluke",sections:[{local:"overview",title:"Overview"},{local:"transformers.MLukeTokenizer",title:"MLukeTokenizer"}],title:"mLUKE"};function yn(Re,k,V){let{fw:h}=k;return Re.$$set=_=>{"fw"in _&&V(0,h=_.fw)},[h]}class xn extends un{constructor(k){super();mn(this,k,yn,gn,hn,{fw:0})}}export{xn as default,kn as metadata};
9,936
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bart.mdx-5df68fbe.js
import{S as h2,i as u2,s as f2,e as o,k as d,w as f,t as s,L as m2,c as n,d as t,m as l,a,x as m,h as i,b as c,J as e,g as u,y as _,q as g,o as b,B as k}from"../../chunks/vendor-b1433968.js";import{T as Qe}from"../../chunks/Tip-c3840994.js";import{D as q}from"../../chunks/Docstring-ff504c58.js";import{C as X}from"../../chunks/CodeBlock-a320dbd7.js";import{I as D}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function _2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function g2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function b2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function k2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function v2(C){let p,B,v,x,z,T,y,F,Re,xe,$,Pe,he,He,ue,fe,Ke,Ue,K,G,Oe,oe,S,I,Ve,ie,de,Je,V,Xe,Ze,U,we,me,We,Z,ne,Ye,et,J,tt,_e,Ae;return{c(){p=o("p"),B=s("TF 2.0 models accepts two formats as inputs:"),v=d(),x=o("ul"),z=o("li"),T=s("having all inputs as keyword arguments (like PyTorch models), or"),y=d(),F=o("li"),Re=s("having all inputs as a list, tuple or dict in the first positional arguments."),xe=d(),$=o("p"),Pe=s("This second option is useful when using "),he=o("code"),He=s("tf.keras.Model.fit"),ue=s(` method which currently requires having all the tensors in the first argument of the model call function: `),fe=o("code"),Ke=s("model(inputs)"),Ue=s("."),K=d(),G=o("p"),Oe=s(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),oe=d(),S=o("ul"),I=o("li"),Ve=s("a single Tensor with "),ie=o("code"),de=s("input_ids"),Je=s(" only and nothing else: "),V=o("code"),Xe=s("model(input_ids)"),Ze=d(),U=o("li"),we=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),me=o("code"),We=s("model([input_ids, attention_mask])"),Z=s(" or "),ne=o("code"),Ye=s("model([input_ids, attention_mask, token_type_ids])"),et=d(),J=o("li"),tt=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),_e=o("code"),Ae=s('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(w){p=n(w,"P",{});var E=a(p);B=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),v=l(w),x=n(w,"UL",{});var H=a(x);z=n(H,"LI",{});var Ot=a(z);T=i(Ot,"having all inputs as keyword arguments (like PyTorch models), or"),Ot.forEach(t),y=l(H),F=n(H,"LI",{});var Se=a(F);Re=i(Se,"having all inputs as a list, tuple or dict in the first positional arguments."),Se.forEach(t),H.forEach(t),xe=l(w),$=n(w,"P",{});var ae=a($);Pe=i(ae,"This second option is useful when using "),he=n(ae,"CODE",{});var At=a(he);He=i(At,"tf.keras.Model.fit"),At.forEach(t),ue=i(ae,` method which currently requires having all the tensors in the first argument of the model call function: `),fe=n(ae,"CODE",{});var Ie=a(fe);Ke=i(Ie,"model(inputs)"),Ie.forEach(t),Ue=i(ae,"."),ae.forEach(t),K=l(w),G=n(w,"P",{});var St=a(G);Oe=i(St,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),St.forEach(t),oe=l(w),S=n(w,"UL",{});var re=a(S);I=n(re,"LI",{});var le=a(I);Ve=i(le,"a single Tensor with "),ie=n(le,"CODE",{});var ge=a(ie);de=i(ge,"input_ids"),ge.forEach(t),Je=i(le," only and nothing else: "),V=n(le,"CODE",{});var ze=a(V);Xe=i(ze,"model(input_ids)"),ze.forEach(t),le.forEach(t),Ze=l(re),U=n(re,"LI",{});var se=a(U);we=i(se,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),me=n(se,"CODE",{});var Ne=a(me);We=i(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),Z=i(se," or "),ne=n(se,"CODE",{});var It=a(ne);Ye=i(It,"model([input_ids, attention_mask, token_type_ids])"),It.forEach(t),se.forEach(t),et=l(re),J=n(re,"LI",{});var Le=a(J);tt=i(Le,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),_e=n(Le,"CODE",{});var Nt=a(_e);Ae=i(Nt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Nt.forEach(t),Le.forEach(t),re.forEach(t)},m(w,E){u(w,p,E),e(p,B),u(w,v,E),u(w,x,E),e(x,z),e(z,T),e(x,y),e(x,F),e(F,Re),u(w,xe,E),u(w,$,E),e($,Pe),e($,he),e(he,He),e($,ue),e($,fe),e(fe,Ke),e($,Ue),u(w,K,E),u(w,G,E),e(G,Oe),u(w,oe,E),u(w,S,E),e(S,I),e(I,Ve),e(I,ie),e(ie,de),e(I,Je),e(I,V),e(V,Xe),e(S,Ze),e(S,U),e(U,we),e(U,me),e(me,We),e(U,Z),e(U,ne),e(ne,Ye),e(S,et),e(S,J),e(J,tt),e(J,_e),e(_e,Ae)},d(w){w&&t(p),w&&t(v),w&&t(x),w&&t(xe),w&&t($),w&&t(K),w&&t(G),w&&t(oe),w&&t(S)}}}function y2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function T2(C){let p,B,v,x,z,T,y,F,Re,xe,$,Pe,he,He,ue,fe,Ke,Ue,K,G,Oe,oe,S,I,Ve,ie,de,Je,V,Xe,Ze,U,we,me,We,Z,ne,Ye,et,J,tt,_e,Ae;return{c(){p=o("p"),B=s("TF 2.0 models accepts two formats as inputs:"),v=d(),x=o("ul"),z=o("li"),T=s("having all inputs as keyword arguments (like PyTorch models), or"),y=d(),F=o("li"),Re=s("having all inputs as a list, tuple or dict in the first positional arguments."),xe=d(),$=o("p"),Pe=s("This second option is useful when using "),he=o("code"),He=s("tf.keras.Model.fit"),ue=s(` method which currently requires having all the tensors in the first argument of the model call function: `),fe=o("code"),Ke=s("model(inputs)"),Ue=s("."),K=d(),G=o("p"),Oe=s(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),oe=d(),S=o("ul"),I=o("li"),Ve=s("a single Tensor with "),ie=o("code"),de=s("input_ids"),Je=s(" only and nothing else: "),V=o("code"),Xe=s("model(input_ids)"),Ze=d(),U=o("li"),we=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),me=o("code"),We=s("model([input_ids, attention_mask])"),Z=s(" or "),ne=o("code"),Ye=s("model([input_ids, attention_mask, token_type_ids])"),et=d(),J=o("li"),tt=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),_e=o("code"),Ae=s('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(w){p=n(w,"P",{});var E=a(p);B=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),v=l(w),x=n(w,"UL",{});var H=a(x);z=n(H,"LI",{});var Ot=a(z);T=i(Ot,"having all inputs as keyword arguments (like PyTorch models), or"),Ot.forEach(t),y=l(H),F=n(H,"LI",{});var Se=a(F);Re=i(Se,"having all inputs as a list, tuple or dict in the first positional arguments."),Se.forEach(t),H.forEach(t),xe=l(w),$=n(w,"P",{});var ae=a($);Pe=i(ae,"This second option is useful when using "),he=n(ae,"CODE",{});var At=a(he);He=i(At,"tf.keras.Model.fit"),At.forEach(t),ue=i(ae,` method which currently requires having all the tensors in the first argument of the model call function: `),fe=n(ae,"CODE",{});var Ie=a(fe);Ke=i(Ie,"model(inputs)"),Ie.forEach(t),Ue=i(ae,"."),ae.forEach(t),K=l(w),G=n(w,"P",{});var St=a(G);Oe=i(St,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),St.forEach(t),oe=l(w),S=n(w,"UL",{});var re=a(S);I=n(re,"LI",{});var le=a(I);Ve=i(le,"a single Tensor with "),ie=n(le,"CODE",{});var ge=a(ie);de=i(ge,"input_ids"),ge.forEach(t),Je=i(le," only and nothing else: "),V=n(le,"CODE",{});var ze=a(V);Xe=i(ze,"model(input_ids)"),ze.forEach(t),le.forEach(t),Ze=l(re),U=n(re,"LI",{});var se=a(U);we=i(se,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),me=n(se,"CODE",{});var Ne=a(me);We=i(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),Z=i(se," or "),ne=n(se,"CODE",{});var It=a(ne);Ye=i(It,"model([input_ids, attention_mask, token_type_ids])"),It.forEach(t),se.forEach(t),et=l(re),J=n(re,"LI",{});var Le=a(J);tt=i(Le,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),_e=n(Le,"CODE",{});var Nt=a(_e);Ae=i(Nt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Nt.forEach(t),Le.forEach(t),re.forEach(t)},m(w,E){u(w,p,E),e(p,B),u(w,v,E),u(w,x,E),e(x,z),e(z,T),e(x,y),e(x,F),e(F,Re),u(w,xe,E),u(w,$,E),e($,Pe),e($,he),e(he,He),e($,ue),e($,fe),e(fe,Ke),e($,Ue),u(w,K,E),u(w,G,E),e(G,Oe),u(w,oe,E),u(w,S,E),e(S,I),e(I,Ve),e(I,ie),e(ie,de),e(I,Je),e(I,V),e(V,Xe),e(S,Ze),e(S,U),e(U,we),e(U,me),e(me,We),e(U,Z),e(U,ne),e(ne,Ye),e(S,et),e(S,J),e(J,tt),e(J,_e),e(_e,Ae)},d(w){w&&t(p),w&&t(v),w&&t(x),w&&t(xe),w&&t($),w&&t(K),w&&t(G),w&&t(oe),w&&t(S)}}}function x2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function w2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function z2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function B2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function F2(C){let p,B,v,x,z;return{c(){p=o("p"),B=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),x=s("Module"),z=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=n(T,"P",{});var y=a(p);B=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var F=a(v);x=i(F,"Module"),F.forEach(t),z=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,p,y),e(p,B),e(p,v),e(v,x),e(p,z)},d(T){T&&t(p)}}}function q2(C){let p,B,v,x,z,T,y,F,Re,xe,$,Pe,he,He,ue,fe,Ke,Ue,K,G,Oe,oe,S,I,Ve,ie,de,Je,V,Xe,Ze,U,we,me,We,Z,ne,Ye,et,J,tt,_e,Ae,w,E,H,Ot,Se,ae,At,Ie,St,re,le,ge,ze,se,Ne,It,Le,Nt,Lc,ct,un,fh,fn,mh,_h,gh,ot,bh,Nr,kh,vh,Ks,yh,Th,mn,xh,wh,zh,vo,_n,Bh,Fh,gn,qh,$h,Dc,Lt,yo,Vs,bn,Eh,Js,Mh,Gc,Be,nt,jh,Xs,Ch,Ph,Lr,Oh,Ah,Dr,Sh,Ih,Nh,Dt,Lh,Gr,Dh,Gh,Zs,Uh,Wh,Qh,Gt,Rh,Ys,Hh,Kh,ei,Vh,Jh,Xh,Ur,Wr,Zh,Yh,eu,Ut,tu,ti,ou,nu,oi,au,ru,Uc,Wt,To,ni,kn,su,ai,iu,Wc,pt,du,ri,lu,cu,si,pu,hu,Qc,vn,Rc,Qt,xo,ii,yn,uu,di,fu,Hc,be,Tn,mu,Rt,_u,Qr,gu,bu,xn,ku,vu,yu,Ht,Tu,Rr,xu,wu,Hr,zu,Bu,Fu,li,qu,$u,wn,Kc,Kt,wo,ci,zn,Eu,pi,Mu,Vc,at,Bn,ju,hi,Cu,Pu,ht,Kr,Ou,Au,Vr,Su,Iu,Jr,Nu,Lu,Jc,Vt,zo,ui,Fn,Du,fi,Gu,Xc,rt,qn,Uu,$n,Wu,mi,Qu,Ru,Hu,ut,Xr,Ku,Vu,Zr,Ju,Xu,Yr,Zu,Yu,Zc,Jt,Bo,_i,En,ef,gi,tf,Yc,De,Mn,of,jn,nf,es,af,rf,sf,Cn,df,Pn,lf,cf,pf,Fe,On,hf,Xt,uf,ts,ff,mf,bi,_f,gf,bf,Fo,kf,ki,vf,yf,An,ep,Zt,qo,vi,Sn,Tf,yi,xf,tp,Ge,In,wf,Nn,zf,os,Bf,Ff,qf,Ln,$f,Dn,Ef,Mf,jf,M,Gn,Cf,Yt,Pf,ns,Of,Af,Ti,Sf,If,Nf,$o,Lf,xi,Df,Gf,wi,zi,Bi,Fi,Uf,Wf,qi,$i,Ei,Mi,Qf,Rf,ji,Ci,Pi,Oi,Hf,Kf,Ai,Si,Un,Eo,Mo,Ii,Wn,Vf,Ni,Jf,Xf,Li,Zf,Yf,Di,em,tm,Gi,Ui,Wi,Qi,om,nm,Ri,Hi,Ki,Vi,am,rm,Ji,Xi,Zi,Yi,sm,im,ed,td,od,nd,dm,op,eo,jo,ad,Qn,lm,rd,cm,np,ke,Rn,pm,sd,hm,um,Hn,fm,as,mm,_m,gm,Kn,bm,Vn,km,vm,ym,Y,Jn,Tm,to,xm,rs,wm,zm,id,Bm,Fm,qm,Co,$m,dd,Em,Mm,Xn,jm,ld,Cm,Pm,Zn,ap,oo,Po,cd,Yn,Om,pd,Am,rp,ve,ea,Sm,no,Im,hd,Nm,Lm,ud,Dm,Gm,Um,ta,Wm,ss,Qm,Rm,Hm,oa,Km,na,Vm,Jm,Xm,qe,aa,Zm,ao,Ym,is,e_,t_,fd,o_,n_,a_,Oo,r_,md,s_,i_,ra,sp,ro,Ao,_d,sa,d_,gd,l_,ip,ia,ft,da,c_,bd,p_,h_,la,dp,so,So,kd,ca,u_,vd,f_,lp,ye,pa,m_,ha,__,ds,g_,b_,k_,ua,v_,fa,y_,T_,x_,Io,w_,$e,ma,z_,io,B_,ls,F_,q_,yd,$_,E_,M_,No,j_,Td,C_,P_,_a,cp,lo,Lo,xd,ga,O_,wd,A_,pp,Te,ba,S_,ka,I_,cs,N_,L_,D_,va,G_,ya,U_,W_,Q_,Do,R_,P,Ta,H_,co,K_,ps,V_,J_,zd,X_,Z_,Y_,Go,eg,Bd,tg,og,Fd,qd,$d,Ed,ng,ag,Md,jd,Cd,Pd,rg,sg,Od,Ad,Sd,Id,ig,dg,Nd,Ld,xa,Uo,Wo,Dd,wa,lg,Gd,cg,pg,Ud,hg,ug,Wd,fg,mg,Qd,Rd,Hd,Kd,_g,gg,Vd,Jd,za,Xd,bg,kg,Qo,Ro,Zd,Ba,vg,Yd,yg,hp,po,Ho,el,Fa,Tg,tl,xg,up,Q,qa,wg,$a,zg,hs,Bg,Fg,qg,Ea,$g,Ma,Eg,Mg,jg,ol,Cg,Pg,st,nl,ja,Og,Ag,al,Ca,Sg,Ig,rl,Pa,Ng,Lg,sl,Oa,Dg,Gg,Ee,Aa,Ug,ho,Wg,il,Qg,Rg,dl,Hg,Kg,Vg,Ko,Jg,ll,Xg,Zg,Sa,Yg,mt,Ia,eb,cl,tb,ob,Na,nb,_t,La,ab,pl,rb,sb,Da,fp,uo,Vo,hl,Ga,ib,ul,db,mp,R,Ua,lb,Wa,cb,us,pb,hb,ub,Qa,fb,Ra,mb,_b,gb,fl,bb,kb,it,ml,Ha,vb,yb,_l,Ka,Tb,xb,gl,Va,wb,zb,bl,Ja,Bb,Fb,j,Xa,qb,fo,$b,kl,Eb,Mb,vl,jb,Cb,Pb,Jo,Ob,yl,Ab,Sb,Tl,xl,wl,zl,Ib,Nb,Bl,Fl,ql,$l,Lb,Db,El,Ml,jl,Cl,Gb,Ub,Pl,Ol,Za,Xo,Zo,Al,Ya,Wb,Sl,Qb,Rb,Il,Hb,Kb,Nl,Vb,Jb,Ll,Dl,Gl,Ul,Xb,Zb,Wl,Ql,Rl,Hl,Yb,ek,Kl,Vl,Jl,Xl,tk,ok,Zl,Yl,ec,tc,nk,ak,gt,er,rk,oc,sk,ik,tr,dk,bt,or,lk,nc,ck,pk,nr,_p,mo,Yo,ac,ar,hk,rc,uk,gp,N,rr,fk,sc,mk,_k,sr,gk,fs,bk,kk,vk,ir,yk,dr,Tk,xk,wk,ic,zk,Bk,dt,dc,lr,Fk,qk,lc,cr,$k,Ek,cc,pr,Mk,jk,pc,hr,Ck,Pk,Me,ur,Ok,_o,Ak,hc,Sk,Ik,uc,Nk,Lk,Dk,en,Gk,fc,Uk,Wk,fr,Qk,kt,mr,Rk,mc,Hk,Kk,_r,Vk,vt,gr,Jk,_c,Xk,Zk,br,bp,go,tn,gc,kr,Yk,bc,ev,kp,L,vr,tv,bo,ov,kc,nv,av,vc,rv,sv,iv,yr,dv,ms,lv,cv,pv,Tr,hv,xr,uv,fv,mv,yc,_v,gv,lt,Tc,wr,bv,kv,xc,zr,vv,yv,wc,Br,Tv,xv,zc,Fr,wv,zv,je,qr,Bv,ko,Fv,Bc,qv,$v,Fc,Ev,Mv,jv,on,Cv,qc,Pv,Ov,$r,Av,yt,Er,Sv,$c,Iv,Nv,Mr,Lv,Tt,jr,Dv,Ec,Gv,Uv,Cr,vp;return T=new D({}),oe=new D({}),Ne=new D({}),bn=new D({}),kn=new D({}),vn=new X({props:{code:`from transformers import BartForConditionalGeneration, BartTokenizer model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0) tok = BartTokenizer.from_pretrained("facebook/bart-large") example_english_phrase = "UN Chief Says There Is No <mask> in Syria" batch = tok(example_english_phrase, return_tensors='pt') generated_ids = model.generate(batch['input_ids']) assert tok.batch_decode(generated_ids, skip_special_tokens=True) == ['UN Chief Says There Is No Plan to Stop Chemical Weapons in Syria'],`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartForConditionalGeneration, BartTokenizer model = BartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/bart-large&quot;</span>, forced_bos_token_id=<span class="hljs-number">0</span>) tok = BartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/bart-large&quot;</span>) example_english_phrase = <span class="hljs-string">&quot;UN Chief Says There Is No &lt;mask&gt; in Syria&quot;</span> batch = tok(example_english_phrase, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) generated_ids = model.generate(batch[<span class="hljs-string">&#x27;input_ids&#x27;</span>]) <span class="hljs-keyword">assert</span> tok.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>) == [<span class="hljs-string">&#x27;UN Chief Says There Is No Plan to Stop Chemical Weapons in Syria&#x27;</span>]`}}),yn=new D({}),Tn=new q({props:{name:"class transformers.BartConfig",anchor:"transformers.BartConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"use_cache",val:" = True"},{name:"num_labels",val:" = 3"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"is_encoder_decoder",val:" = True"},{name:"decoder_start_token_id",val:" = 2"},{name:"forced_eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/configuration_bart.py#L36",parametersDescription:[{anchor:"transformers.BartConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel">BartModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartModel">TFBartModel</a>.`,name:"vocab_size"},{anchor:"transformers.BartConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.BartConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.BartConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.BartConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.BartConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.BartConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.BartConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.BartConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.BartConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.BartConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.BartConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.BartConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.BartConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.BartConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.BartConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.BartConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). num_labels &#x2014; (<code>int</code>, <em>optional</em>, defaults to 3): The number of labels to use in <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForSequenceClassification">BartForSequenceClassification</a>.`,name:"use_cache"},{anchor:"transformers.BartConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),wn=new X({props:{code:`from transformers import BartModel, BartConfig # Initializing a BART facebook/bart-large style configuration configuration = BartConfig() # Initializing a model from the facebook/bart-large style configuration model = BartModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartModel, BartConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BART facebook/bart-large style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BartConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/bart-large style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BartModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),zn=new D({}),Bn=new q({props:{name:"class transformers.BartTokenizer",anchor:"transformers.BartTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/tokenization_bart.py#L55"}}),Fn=new D({}),qn=new q({props:{name:"class transformers.BartTokenizerFast",anchor:"transformers.BartTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/tokenization_bart_fast.py#L64"}}),En=new D({}),Mn=new q({props:{name:"class transformers.BartModel",anchor:"transformers.BartModel",parameters:[{name:"config",val:": BartConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1118",parametersDescription:[{anchor:"transformers.BartModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),On=new q({props:{name:"forward",anchor:"transformers.BartModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1145",parametersDescription:[{anchor:"transformers.BartModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BartModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BartModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Bart uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BartModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bart._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BartModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BartModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BartModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BartModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BartModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BartModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BartModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BartModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BartModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BartModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fo=new Qe({props:{$$slots:{default:[_2]},$$scope:{ctx:C}}}),An=new X({props:{code:`from transformers import BartTokenizer, BartModel import torch tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') model = BartModel.from_pretrained('facebook/bart-large') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, BartModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BartModel.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Sn=new D({}),In=new q({props:{name:"class transformers.BartForConditionalGeneration",anchor:"transformers.BartForConditionalGeneration",parameters:[{name:"config",val:": BartConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1244",parametersDescription:[{anchor:"transformers.BartForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gn=new q({props:{name:"forward",anchor:"transformers.BartForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1283",parametersDescription:[{anchor:"transformers.BartForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BartForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BartForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Bart uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BartForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bart._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BartForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BartForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BartForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BartForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BartForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BartForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BartForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BartForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BartForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BartForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BartForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$o=new Qe({props:{$$slots:{default:[g2]},$$scope:{ctx:C}}}),Wn=new D({}),Qn=new D({}),Rn=new q({props:{name:"class transformers.BartForSequenceClassification",anchor:"transformers.BartForSequenceClassification",parameters:[{name:"config",val:": BartConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1409",parametersDescription:[{anchor:"transformers.BartForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jn=new q({props:{name:"forward",anchor:"transformers.BartForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1422",parametersDescription:[{anchor:"transformers.BartForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BartForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BartForSequenceClassification.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Bart uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BartForSequenceClassification.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bart._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BartForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BartForSequenceClassification.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BartForSequenceClassification.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BartForSequenceClassification.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BartForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BartForSequenceClassification.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BartForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BartForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BartForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BartForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BartForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Co=new Qe({props:{$$slots:{default:[b2]},$$scope:{ctx:C}}}),Xn=new X({props:{code:`from transformers import BartTokenizer, BartForSequenceClassification import torch tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForSequenceClassification.from_pretrained('facebook/bart-large') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, BartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BartForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Zn=new X({props:{code:`from transformers import BartTokenizer, BartForSequenceClassification import torch tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForSequenceClassification.from_pretrained('facebook/bart-large', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, BartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BartForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Yn=new D({}),ea=new q({props:{name:"class transformers.BartForQuestionAnswering",anchor:"transformers.BartForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1533",parametersDescription:[{anchor:"transformers.BartForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),aa=new q({props:{name:"forward",anchor:"transformers.BartForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1545",parametersDescription:[{anchor:"transformers.BartForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BartForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BartForQuestionAnswering.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Bart uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BartForQuestionAnswering.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bart._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BartForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BartForQuestionAnswering.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BartForQuestionAnswering.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BartForQuestionAnswering.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BartForQuestionAnswering.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BartForQuestionAnswering.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BartForQuestionAnswering.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BartForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BartForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BartForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BartForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.BartForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Oo=new Qe({props:{$$slots:{default:[k2]},$$scope:{ctx:C}}}),ra=new X({props:{code:`from transformers import BartTokenizer, BartForQuestionAnswering import torch tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForQuestionAnswering.from_pretrained('facebook/bart-large') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, BartForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BartForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),sa=new D({}),da=new q({props:{name:"forward",anchor:"transformers.BartForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_bart.py#L1692",parametersDescription:[{anchor:"transformers.BartForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BartForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BartForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BartForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.BartForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BartForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BartForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BartForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.BartForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.BartForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BartForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BartForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),la=new X({props:{code:`from transformers import BartTokenizer, BartForCausalLM tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, BartForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BartForCausalLM.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ca=new D({}),pa=new q({props:{name:"class transformers.TFBartModel",anchor:"transformers.TFBartModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_tf_bart.py#L1180",parametersDescription:[{anchor:"transformers.TFBartModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Io=new Qe({props:{$$slots:{default:[v2]},$$scope:{ctx:C}}}),ma=new q({props:{name:"call",anchor:"transformers.TFBartModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_tf_bart.py#L1195",parametersDescription:[{anchor:"transformers.TFBartModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBartModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBartModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Bart uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.TFBartModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBartModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBartModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBartModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBartModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBartModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBartModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBartModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBartModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBartModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBartModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),No=new Qe({props:{$$slots:{default:[y2]},$$scope:{ctx:C}}}),_a=new X({props:{code:`from transformers import BartTokenizer, TFBartModel import tensorflow as tf tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') model = TFBartModel.from_pretrained('facebook/bart-large') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, TFBartModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBartModel.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ga=new D({}),ba=new q({props:{name:"class transformers.TFBartForConditionalGeneration",anchor:"transformers.TFBartForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_tf_bart.py#L1289",parametersDescription:[{anchor:"transformers.TFBartForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Do=new Qe({props:{$$slots:{default:[T2]},$$scope:{ctx:C}}}),Ta=new q({props:{name:"call",anchor:"transformers.TFBartForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_tf_bart.py#L1324",parametersDescription:[{anchor:"transformers.TFBartForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBartForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBartForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Bart uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.TFBartForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBartForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBartForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBartForConditionalGeneration.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBartForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBartForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBartForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBartForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBartForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBartForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBartForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBartForConditionalGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Go=new Qe({props:{$$slots:{default:[x2]},$$scope:{ctx:C}}}),wa=new D({}),Ba=new D({}),Fa=new D({}),qa=new q({props:{name:"class transformers.FlaxBartModel",anchor:"transformers.FlaxBartModel",parameters:[{name:"config",val:": BartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1243",parametersDescription:[{anchor:"transformers.FlaxBartModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBartModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Aa=new q({props:{name:"__call__",anchor:"transformers.FlaxBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1178",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ko=new Qe({props:{$$slots:{default:[w2]},$$scope:{ctx:C}}}),Sa=new X({props:{code:`from transformers import BartTokenizer, FlaxBartModel tokenizer = BartTokenizer.from_pretrained('facebook/bart-base') model = FlaxBartModel.from_pretrained('facebook/bart-base') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartModel.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ia=new q({props:{name:"encode",anchor:"transformers.FlaxBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1002",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Na=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),La=new q({props:{name:"decode",anchor:"transformers.FlaxBartPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1065",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Da=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),Ga=new D({}),Ua=new q({props:{name:"class transformers.FlaxBartForConditionalGeneration",anchor:"transformers.FlaxBartForConditionalGeneration",parameters:[{name:"config",val:": BartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1329",parametersDescription:[{anchor:"transformers.FlaxBartForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBartForConditionalGeneration.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Xa=new q({props:{name:"__call__",anchor:"transformers.FlaxBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1178",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Jo=new Qe({props:{$$slots:{default:[z2]},$$scope:{ctx:C}}}),Ya=new D({}),er=new q({props:{name:"encode",anchor:"transformers.FlaxBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1002",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tr=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),or=new q({props:{name:"decode",anchor:"transformers.FlaxBartForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1333",parametersDescription:[{anchor:"transformers.FlaxBartForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nr=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ar=new D({}),rr=new q({props:{name:"class transformers.FlaxBartForSequenceClassification",anchor:"transformers.FlaxBartForSequenceClassification",parameters:[{name:"config",val:": BartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1635",parametersDescription:[{anchor:"transformers.FlaxBartForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBartForSequenceClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ur=new q({props:{name:"__call__",anchor:"transformers.FlaxBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1178",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),en=new Qe({props:{$$slots:{default:[B2]},$$scope:{ctx:C}}}),fr=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForSequenceClassification tokenizer = BartTokenizer.from_pretrained('facebook/bart-base') model = FlaxBartForSequenceClassification.from_pretrained('facebook/bart-base') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),mr=new q({props:{name:"encode",anchor:"transformers.FlaxBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1002",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_r=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),gr=new q({props:{name:"decode",anchor:"transformers.FlaxBartPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1065",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),br=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),kr=new D({}),vr=new q({props:{name:"class transformers.FlaxBartForQuestionAnswering",anchor:"transformers.FlaxBartForQuestionAnswering",parameters:[{name:"config",val:": BartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1722",parametersDescription:[{anchor:"transformers.FlaxBartForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig">BartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBartForQuestionAnswering.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),qr=new q({props:{name:"__call__",anchor:"transformers.FlaxBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1178",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartConfig" >BartConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),on=new Qe({props:{$$slots:{default:[F2]},$$scope:{ctx:C}}}),$r=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForQuestionAnswering tokenizer = BartTokenizer.from_pretrained('facebook/bart-base') model = FlaxBartForQuestionAnswering.from_pretrained('facebook/bart-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='jax') outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Er=new q({props:{name:"encode",anchor:"transformers.FlaxBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1002",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mr=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),jr=new q({props:{name:"decode",anchor:"transformers.FlaxBartPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bart/modeling_flax_bart.py#L1065",parametersDescription:[{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBartPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.bart.configuration_bart.BartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Cr=new X({props:{code:`from transformers import BartTokenizer, FlaxBartForConditionalGeneration model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BartTokenizer, FlaxBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large-cnn&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),{c(){p=o("meta"),B=d(),v=o("h1"),x=o("a"),z=o("span"),f(T.$$.fragment),y=d(),F=o("span"),Re=s("BART"),xe=d(),$=o("p"),Pe=o("strong"),he=s("DISCLAIMER:"),He=s(" If you see something strange, file a "),ue=o("a"),fe=s("Github Issue"),Ke=s(` and assign @patrickvonplaten`),Ue=d(),K=o("h2"),G=o("a"),Oe=o("span"),f(oe.$$.fragment),S=d(),I=o("span"),Ve=s("Overview"),ie=d(),de=o("p"),Je=s("The Bart model was proposed in "),V=o("a"),Xe=s(`BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension`),Ze=s(` by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019.`),U=d(),we=o("p"),me=s("According to the abstract,"),We=d(),Z=o("ul"),ne=o("li"),Ye=s(`Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a left-to-right decoder (like GPT).`),et=d(),J=o("li"),tt=s(`The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme, where spans of text are replaced with a single mask token.`),_e=d(),Ae=o("li"),w=s(`BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 6 ROUGE.`),E=d(),H=o("p"),Ot=s("This model was contributed by "),Se=o("a"),ae=s("sshleifer"),At=s(". The Authors\u2019 code can be found "),Ie=o("a"),St=s("here"),re=s("."),le=d(),ge=o("h3"),ze=o("a"),se=o("span"),f(Ne.$$.fragment),It=d(),Le=o("span"),Nt=s("Examples"),Lc=d(),ct=o("ul"),un=o("li"),fh=s(`Examples and scripts for fine-tuning BART and other models for sequence to sequence tasks can be found in `),fn=o("a"),mh=s("examples/pytorch/summarization/"),_h=s("."),gh=d(),ot=o("li"),bh=s("An example of how to train "),Nr=o("a"),kh=s("BartForConditionalGeneration"),vh=s(" with a Hugging Face "),Ks=o("code"),yh=s("datasets"),Th=s(` object can be found in this `),mn=o("a"),xh=s("forum discussion"),wh=s("."),zh=d(),vo=o("li"),_n=o("a"),Bh=s("Distilled checkpoints"),Fh=s(" are described in this "),gn=o("a"),qh=s("paper"),$h=s("."),Dc=d(),Lt=o("h2"),yo=o("a"),Vs=o("span"),f(bn.$$.fragment),Eh=d(),Js=o("span"),Mh=s("Implementation Notes"),Gc=d(),Be=o("ul"),nt=o("li"),jh=s("Bart doesn\u2019t use "),Xs=o("code"),Ch=s("token_type_ids"),Ph=s(" for sequence classification. Use "),Lr=o("a"),Oh=s("BartTokenizer"),Ah=s(` or `),Dr=o("a"),Sh=s("encode()"),Ih=s(" to get the proper splitting."),Nh=d(),Dt=o("li"),Lh=s("The forward pass of "),Gr=o("a"),Dh=s("BartModel"),Gh=s(" will create the "),Zs=o("code"),Uh=s("decoder_input_ids"),Wh=s(` if they are not passed. This is different than some other modeling APIs. A typical use case of this feature is mask filling.`),Qh=d(),Gt=o("li"),Rh=s(`Model predictions are intended to be identical to the original implementation when `),Ys=o("code"),Hh=s("force_bos_token_to_be_generated=True"),Kh=s(`. This only works, however, if the string you pass to `),ei=o("code"),Vh=s("fairseq.encode"),Jh=s(" starts with a space."),Xh=d(),Ur=o("li"),Wr=o("a"),Zh=s("generate()"),Yh=s(` should be used for conditional generation tasks like summarization, see the example in that docstrings.`),eu=d(),Ut=o("li"),tu=s("Models that load the "),ti=o("em"),ou=s("facebook/bart-large-cnn"),nu=s(" weights will not have a "),oi=o("code"),au=s("mask_token_id"),ru=s(`, or be able to perform mask-filling tasks.`),Uc=d(),Wt=o("h2"),To=o("a"),ni=o("span"),f(kn.$$.fragment),su=d(),ai=o("span"),iu=s("Mask Filling"),Wc=d(),pt=o("p"),du=s("The "),ri=o("code"),lu=s("facebook/bart-base"),cu=s(" and "),si=o("code"),pu=s("facebook/bart-large"),hu=s(" checkpoints can be used to fill multi-token masks."),Qc=d(),f(vn.$$.fragment),Rc=d(),Qt=o("h2"),xo=o("a"),ii=o("span"),f(yn.$$.fragment),uu=d(),di=o("span"),fu=s("BartConfig"),Hc=d(),be=o("div"),f(Tn.$$.fragment),mu=d(),Rt=o("p"),_u=s("This is the configuration class to store the configuration of a "),Qr=o("a"),gu=s("BartModel"),bu=s(`. It is used to instantiate a BART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BART `),xn=o("a"),ku=s("facebook/bart-large"),vu=s(" architecture."),yu=d(),Ht=o("p"),Tu=s("Configuration objects inherit from "),Rr=o("a"),xu=s("PretrainedConfig"),wu=s(` and can be used to control the model outputs. Read the documentation from `),Hr=o("a"),zu=s("PretrainedConfig"),Bu=s(" for more information."),Fu=d(),li=o("p"),qu=s("Example:"),$u=d(),f(wn.$$.fragment),Kc=d(),Kt=o("h2"),wo=o("a"),ci=o("span"),f(zn.$$.fragment),Eu=d(),pi=o("span"),Mu=s("BartTokenizer"),Vc=d(),at=o("div"),f(Bn.$$.fragment),ju=d(),hi=o("p"),Cu=s("Construct a BART tokenizer."),Pu=d(),ht=o("p"),Kr=o("a"),Ou=s("BartTokenizer"),Au=s(" is identical to "),Vr=o("a"),Su=s("RobertaTokenizer"),Iu=s(`. Refer to superclass `),Jr=o("a"),Nu=s("RobertaTokenizer"),Lu=s(` for usage examples and documentation concerning the initialization parameters and other methods.`),Jc=d(),Vt=o("h2"),zo=o("a"),ui=o("span"),f(Fn.$$.fragment),Du=d(),fi=o("span"),Gu=s("BartTokenizerFast"),Xc=d(),rt=o("div"),f(qn.$$.fragment),Uu=d(),$n=o("p"),Wu=s("Construct a \u201Cfast\u201D BART tokenizer (backed by HuggingFace\u2019s "),mi=o("em"),Qu=s("tokenizers"),Ru=s(" library)."),Hu=d(),ut=o("p"),Xr=o("a"),Ku=s("BartTokenizerFast"),Vu=s(" is identical to "),Zr=o("a"),Ju=s("RobertaTokenizerFast"),Xu=s(`. Refer to superclass `),Yr=o("a"),Zu=s("RobertaTokenizerFast"),Yu=s(` for usage examples and documentation concerning the initialization parameters and other methods.`),Zc=d(),Jt=o("h2"),Bo=o("a"),_i=o("span"),f(En.$$.fragment),ef=d(),gi=o("span"),tf=s("BartModel"),Yc=d(),De=o("div"),f(Mn.$$.fragment),of=d(),jn=o("p"),nf=s(`The bare BART Model outputting raw hidden-states without any specific head on top. This model inherits from `),es=o("a"),af=s("PreTrainedModel"),rf=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sf=d(),Cn=o("p"),df=s("This model is also a PyTorch "),Pn=o("a"),lf=s("torch.nn.Module"),cf=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pf=d(),Fe=o("div"),f(On.$$.fragment),hf=d(),Xt=o("p"),uf=s("The "),ts=o("a"),ff=s("BartModel"),mf=s(" forward method, overrides the "),bi=o("code"),_f=s("__call__"),gf=s(" special method."),bf=d(),f(Fo.$$.fragment),kf=d(),ki=o("p"),vf=s("Example:"),yf=d(),f(An.$$.fragment),ep=d(),Zt=o("h2"),qo=o("a"),vi=o("span"),f(Sn.$$.fragment),Tf=d(),yi=o("span"),xf=s("BartForConditionalGeneration"),tp=d(),Ge=o("div"),f(In.$$.fragment),wf=d(),Nn=o("p"),zf=s(`The BART Model with a language modeling head. Can be used for summarization. This model inherits from `),os=o("a"),Bf=s("PreTrainedModel"),Ff=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qf=d(),Ln=o("p"),$f=s("This model is also a PyTorch "),Dn=o("a"),Ef=s("torch.nn.Module"),Mf=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jf=d(),M=o("div"),f(Gn.$$.fragment),Cf=d(),Yt=o("p"),Pf=s("The "),ns=o("a"),Of=s("BartForConditionalGeneration"),Af=s(" forward method, overrides the "),Ti=o("code"),Sf=s("__call__"),If=s(" special method."),Nf=d(),f($o.$$.fragment),Lf=d(),xi=o("p"),Df=s("Summarization example::"),Gf=d(),wi=o("blockquote"),zi=o("blockquote"),Bi=o("blockquote"),Fi=o("p"),Uf=s("from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig"),Wf=d(),qi=o("blockquote"),$i=o("blockquote"),Ei=o("blockquote"),Mi=o("p"),Qf=s(`model = BartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large-cnn\u2019) tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large-cnn\u2019)`),Rf=d(),ji=o("blockquote"),Ci=o("blockquote"),Pi=o("blockquote"),Oi=o("p"),Hf=s(`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018pt\u2019)`),Kf=d(),Ai=o("blockquote"),Si=o("blockquote"),Un=o("blockquote"),Eo=o("h1"),Mo=o("a"),Ii=o("span"),f(Wn.$$.fragment),Vf=d(),Ni=o("span"),Jf=s("Generate Summary"),Xf=d(),Li=o("p"),Zf=s(`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),Yf=d(),Di=o("p"),em=s("Mask filling example::"),tm=d(),Gi=o("blockquote"),Ui=o("blockquote"),Wi=o("blockquote"),Qi=o("p"),om=s(`from transformers import BartTokenizer, BartForConditionalGeneration tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),nm=d(),Ri=o("blockquote"),Hi=o("blockquote"),Ki=o("blockquote"),Vi=o("p"),am=s(`model = BartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018pt\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),rm=d(),Ji=o("blockquote"),Xi=o("blockquote"),Zi=o("blockquote"),Yi=o("p"),sm=s(`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5)`),im=d(),ed=o("blockquote"),td=o("blockquote"),od=o("blockquote"),nd=o("p"),dm=s("tokenizer.decode(predictions).split()"),op=d(),eo=o("h2"),jo=o("a"),ad=o("span"),f(Qn.$$.fragment),lm=d(),rd=o("span"),cm=s("BartForSequenceClassification"),np=d(),ke=o("div"),f(Rn.$$.fragment),pm=d(),sd=o("p"),hm=s(`Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),um=d(),Hn=o("p"),fm=s("This model inherits from "),as=o("a"),mm=s("PreTrainedModel"),_m=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gm=d(),Kn=o("p"),bm=s("This model is also a PyTorch "),Vn=o("a"),km=s("torch.nn.Module"),vm=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ym=d(),Y=o("div"),f(Jn.$$.fragment),Tm=d(),to=o("p"),xm=s("The "),rs=o("a"),wm=s("BartForSequenceClassification"),zm=s(" forward method, overrides the "),id=o("code"),Bm=s("__call__"),Fm=s(" special method."),qm=d(),f(Co.$$.fragment),$m=d(),dd=o("p"),Em=s("Example of single-label classification:"),Mm=d(),f(Xn.$$.fragment),jm=d(),ld=o("p"),Cm=s("Example of multi-label classification:"),Pm=d(),f(Zn.$$.fragment),ap=d(),oo=o("h2"),Po=o("a"),cd=o("span"),f(Yn.$$.fragment),Om=d(),pd=o("span"),Am=s("BartForQuestionAnswering"),rp=d(),ve=o("div"),f(ea.$$.fragment),Sm=d(),no=o("p"),Im=s(`BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),hd=o("code"),Nm=s("span start logits"),Lm=s(" and "),ud=o("code"),Dm=s("span end logits"),Gm=s(")."),Um=d(),ta=o("p"),Wm=s("This model inherits from "),ss=o("a"),Qm=s("PreTrainedModel"),Rm=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hm=d(),oa=o("p"),Km=s("This model is also a PyTorch "),na=o("a"),Vm=s("torch.nn.Module"),Jm=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xm=d(),qe=o("div"),f(aa.$$.fragment),Zm=d(),ao=o("p"),Ym=s("The "),is=o("a"),e_=s("BartForQuestionAnswering"),t_=s(" forward method, overrides the "),fd=o("code"),o_=s("__call__"),n_=s(" special method."),a_=d(),f(Oo.$$.fragment),r_=d(),md=o("p"),s_=s("Example:"),i_=d(),f(ra.$$.fragment),sp=d(),ro=o("h2"),Ao=o("a"),_d=o("span"),f(sa.$$.fragment),d_=d(),gd=o("span"),l_=s("BartForCausalLM"),ip=d(),ia=o("div"),ft=o("div"),f(da.$$.fragment),c_=d(),bd=o("p"),p_=s("Example:"),h_=d(),f(la.$$.fragment),dp=d(),so=o("h2"),So=o("a"),kd=o("span"),f(ca.$$.fragment),u_=d(),vd=o("span"),f_=s("TFBartModel"),lp=d(),ye=o("div"),f(pa.$$.fragment),m_=d(),ha=o("p"),__=s(`The bare BART Model outputting raw hidden-states without any specific head on top. This model inherits from `),ds=o("a"),g_=s("TFPreTrainedModel"),b_=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),k_=d(),ua=o("p"),v_=s("This model is also a "),fa=o("a"),y_=s("tf.keras.Model"),T_=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),x_=d(),f(Io.$$.fragment),w_=d(),$e=o("div"),f(ma.$$.fragment),z_=d(),io=o("p"),B_=s("The "),ls=o("a"),F_=s("TFBartModel"),q_=s(" forward method, overrides the "),yd=o("code"),$_=s("__call__"),E_=s(" special method."),M_=d(),f(No.$$.fragment),j_=d(),Td=o("p"),C_=s("Example:"),P_=d(),f(_a.$$.fragment),cp=d(),lo=o("h2"),Lo=o("a"),xd=o("span"),f(ga.$$.fragment),O_=d(),wd=o("span"),A_=s("TFBartForConditionalGeneration"),pp=d(),Te=o("div"),f(ba.$$.fragment),S_=d(),ka=o("p"),I_=s(`The BART Model with a language modeling head. Can be used for summarization. This model inherits from `),cs=o("a"),N_=s("TFPreTrainedModel"),L_=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),D_=d(),va=o("p"),G_=s("This model is also a "),ya=o("a"),U_=s("tf.keras.Model"),W_=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Q_=d(),f(Do.$$.fragment),R_=d(),P=o("div"),f(Ta.$$.fragment),H_=d(),co=o("p"),K_=s("The "),ps=o("a"),V_=s("TFBartForConditionalGeneration"),J_=s(" forward method, overrides the "),zd=o("code"),X_=s("__call__"),Z_=s(" special method."),Y_=d(),f(Go.$$.fragment),eg=d(),Bd=o("p"),tg=s("Summarization example::"),og=d(),Fd=o("blockquote"),qd=o("blockquote"),$d=o("blockquote"),Ed=o("p"),ng=s("from transformers import BartTokenizer, TFBartForConditionalGeneration, BartConfig"),ag=d(),Md=o("blockquote"),jd=o("blockquote"),Cd=o("blockquote"),Pd=o("p"),rg=s(`model = TFBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019)`),sg=d(),Od=o("blockquote"),Ad=o("blockquote"),Sd=o("blockquote"),Id=o("p"),ig=s(`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018tf\u2019)`),dg=d(),Nd=o("blockquote"),Ld=o("blockquote"),xa=o("blockquote"),Uo=o("h1"),Wo=o("a"),Dd=o("span"),f(wa.$$.fragment),lg=d(),Gd=o("span"),cg=s("Generate Summary"),pg=d(),Ud=o("p"),hg=s(`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),ug=d(),Wd=o("p"),fg=s("Mask filling example::"),mg=d(),Qd=o("blockquote"),Rd=o("blockquote"),Hd=o("blockquote"),Kd=o("p"),_g=s(`from transformers import BartTokenizer, TFBartForConditionalGeneration tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),gg=d(),Vd=o("blockquote"),Jd=o("blockquote"),za=o("blockquote"),Xd=o("p"),bg=s(`model = TFBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018tf\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits probs = tf.nn.softmax(logits[0])`),kg=d(),Qo=o("h1"),Ro=o("a"),Zd=o("span"),f(Ba.$$.fragment),vg=d(),Yd=o("span"),yg=s("probs[5] is associated with the mask token"),hp=d(),po=o("h2"),Ho=o("a"),el=o("span"),f(Fa.$$.fragment),Tg=d(),tl=o("span"),xg=s("FlaxBartModel"),up=d(),Q=o("div"),f(qa.$$.fragment),wg=d(),$a=o("p"),zg=s(`The bare Bart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),hs=o("a"),Bg=s("FlaxPreTrainedModel"),Fg=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qg=d(),Ea=o("p"),$g=s("This model is also a Flax Linen "),Ma=o("a"),Eg=s("flax.nn.Module"),Mg=s(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),jg=d(),ol=o("p"),Cg=s("Finally, this model supports inherent JAX features such as:"),Pg=d(),st=o("ul"),nl=o("li"),ja=o("a"),Og=s("Just-In-Time (JIT) compilation"),Ag=d(),al=o("li"),Ca=o("a"),Sg=s("Automatic Differentiation"),Ig=d(),rl=o("li"),Pa=o("a"),Ng=s("Vectorization"),Lg=d(),sl=o("li"),Oa=o("a"),Dg=s("Parallelization"),Gg=d(),Ee=o("div"),f(Aa.$$.fragment),Ug=d(),ho=o("p"),Wg=s("The "),il=o("code"),Qg=s("FlaxBartPreTrainedModel"),Rg=s(" forward method, overrides the "),dl=o("code"),Hg=s("__call__"),Kg=s(" special method."),Vg=d(),f(Ko.$$.fragment),Jg=d(),ll=o("p"),Xg=s("Example:"),Zg=d(),f(Sa.$$.fragment),Yg=d(),mt=o("div"),f(Ia.$$.fragment),eb=d(),cl=o("p"),tb=s("Example:"),ob=d(),f(Na.$$.fragment),nb=d(),_t=o("div"),f(La.$$.fragment),ab=d(),pl=o("p"),rb=s("Example:"),sb=d(),f(Da.$$.fragment),fp=d(),uo=o("h2"),Vo=o("a"),hl=o("span"),f(Ga.$$.fragment),ib=d(),ul=o("span"),db=s("FlaxBartForConditionalGeneration"),mp=d(),R=o("div"),f(Ua.$$.fragment),lb=d(),Wa=o("p"),cb=s(`The BART Model with a language modeling head. Can be used for summarization. This model inherits from `),us=o("a"),pb=s("FlaxPreTrainedModel"),hb=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ub=d(),Qa=o("p"),fb=s("This model is also a Flax Linen "),Ra=o("a"),mb=s("flax.nn.Module"),_b=s(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),gb=d(),fl=o("p"),bb=s("Finally, this model supports inherent JAX features such as:"),kb=d(),it=o("ul"),ml=o("li"),Ha=o("a"),vb=s("Just-In-Time (JIT) compilation"),yb=d(),_l=o("li"),Ka=o("a"),Tb=s("Automatic Differentiation"),xb=d(),gl=o("li"),Va=o("a"),wb=s("Vectorization"),zb=d(),bl=o("li"),Ja=o("a"),Bb=s("Parallelization"),Fb=d(),j=o("div"),f(Xa.$$.fragment),qb=d(),fo=o("p"),$b=s("The "),kl=o("code"),Eb=s("FlaxBartPreTrainedModel"),Mb=s(" forward method, overrides the "),vl=o("code"),jb=s("__call__"),Cb=s(" special method."),Pb=d(),f(Jo.$$.fragment),Ob=d(),yl=o("p"),Ab=s("Summarization example::"),Sb=d(),Tl=o("blockquote"),xl=o("blockquote"),wl=o("blockquote"),zl=o("p"),Ib=s("from transformers import BartTokenizer, FlaxBartForConditionalGeneration"),Nb=d(),Bl=o("blockquote"),Fl=o("blockquote"),ql=o("blockquote"),$l=o("p"),Lb=s(`model = FlaxBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large-cnn\u2019) tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large-cnn\u2019)`),Db=d(),El=o("blockquote"),Ml=o("blockquote"),jl=o("blockquote"),Cl=o("p"),Gb=s(`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018jax\u2019)`),Ub=d(),Pl=o("blockquote"),Ol=o("blockquote"),Za=o("blockquote"),Xo=o("h1"),Zo=o("a"),Al=o("span"),f(Ya.$$.fragment),Wb=d(),Sl=o("span"),Qb=s("Generate Summary"),Rb=d(),Il=o("p"),Hb=s(`summary_ids = model.generate(inputs[\u2018input_ids\u2019]).sequences print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))`),Kb=d(),Nl=o("p"),Vb=s("Mask filling example::"),Jb=d(),Ll=o("blockquote"),Dl=o("blockquote"),Gl=o("blockquote"),Ul=o("p"),Xb=s(`from transformers import BartTokenizer, FlaxBartForConditionalGeneration tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),Zb=d(),Wl=o("blockquote"),Ql=o("blockquote"),Rl=o("blockquote"),Hl=o("p"),Yb=s(`model = FlaxBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018jax\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),ek=d(),Kl=o("blockquote"),Vl=o("blockquote"),Jl=o("blockquote"),Xl=o("p"),tk=s(`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item() probs = jax.nn.softmax(logits[0, masked_index], axis=0) values, predictions = jax.lax.top_k(probs)`),ok=d(),Zl=o("blockquote"),Yl=o("blockquote"),ec=o("blockquote"),tc=o("p"),nk=s("tokenizer.decode(predictions).split()"),ak=d(),gt=o("div"),f(er.$$.fragment),rk=d(),oc=o("p"),sk=s("Example:"),ik=d(),f(tr.$$.fragment),dk=d(),bt=o("div"),f(or.$$.fragment),lk=d(),nc=o("p"),ck=s("Example:"),pk=d(),f(nr.$$.fragment),_p=d(),mo=o("h2"),Yo=o("a"),ac=o("span"),f(ar.$$.fragment),hk=d(),rc=o("span"),uk=s("FlaxBartForSequenceClassification"),gp=d(),N=o("div"),f(rr.$$.fragment),fk=d(),sc=o("p"),mk=s(`Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),_k=d(),sr=o("p"),gk=s("This model inherits from "),fs=o("a"),bk=s("FlaxPreTrainedModel"),kk=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vk=d(),ir=o("p"),yk=s("This model is also a Flax Linen "),dr=o("a"),Tk=s("flax.nn.Module"),xk=s(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),wk=d(),ic=o("p"),zk=s("Finally, this model supports inherent JAX features such as:"),Bk=d(),dt=o("ul"),dc=o("li"),lr=o("a"),Fk=s("Just-In-Time (JIT) compilation"),qk=d(),lc=o("li"),cr=o("a"),$k=s("Automatic Differentiation"),Ek=d(),cc=o("li"),pr=o("a"),Mk=s("Vectorization"),jk=d(),pc=o("li"),hr=o("a"),Ck=s("Parallelization"),Pk=d(),Me=o("div"),f(ur.$$.fragment),Ok=d(),_o=o("p"),Ak=s("The "),hc=o("code"),Sk=s("FlaxBartPreTrainedModel"),Ik=s(" forward method, overrides the "),uc=o("code"),Nk=s("__call__"),Lk=s(" special method."),Dk=d(),f(en.$$.fragment),Gk=d(),fc=o("p"),Uk=s("Example:"),Wk=d(),f(fr.$$.fragment),Qk=d(),kt=o("div"),f(mr.$$.fragment),Rk=d(),mc=o("p"),Hk=s("Example:"),Kk=d(),f(_r.$$.fragment),Vk=d(),vt=o("div"),f(gr.$$.fragment),Jk=d(),_c=o("p"),Xk=s("Example:"),Zk=d(),f(br.$$.fragment),bp=d(),go=o("h2"),tn=o("a"),gc=o("span"),f(kr.$$.fragment),Yk=d(),bc=o("span"),ev=s("FlaxBartForQuestionAnswering"),kp=d(),L=o("div"),f(vr.$$.fragment),tv=d(),bo=o("p"),ov=s(`BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),kc=o("code"),nv=s("span start logits"),av=s(" and "),vc=o("code"),rv=s("span end logits"),sv=s(")."),iv=d(),yr=o("p"),dv=s("This model inherits from "),ms=o("a"),lv=s("FlaxPreTrainedModel"),cv=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pv=d(),Tr=o("p"),hv=s("This model is also a Flax Linen "),xr=o("a"),uv=s("flax.nn.Module"),fv=s(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),mv=d(),yc=o("p"),_v=s("Finally, this model supports inherent JAX features such as:"),gv=d(),lt=o("ul"),Tc=o("li"),wr=o("a"),bv=s("Just-In-Time (JIT) compilation"),kv=d(),xc=o("li"),zr=o("a"),vv=s("Automatic Differentiation"),yv=d(),wc=o("li"),Br=o("a"),Tv=s("Vectorization"),xv=d(),zc=o("li"),Fr=o("a"),wv=s("Parallelization"),zv=d(),je=o("div"),f(qr.$$.fragment),Bv=d(),ko=o("p"),Fv=s("The "),Bc=o("code"),qv=s("FlaxBartPreTrainedModel"),$v=s(" forward method, overrides the "),Fc=o("code"),Ev=s("__call__"),Mv=s(" special method."),jv=d(),f(on.$$.fragment),Cv=d(),qc=o("p"),Pv=s("Example:"),Ov=d(),f($r.$$.fragment),Av=d(),yt=o("div"),f(Er.$$.fragment),Sv=d(),$c=o("p"),Iv=s("Example:"),Nv=d(),f(Mr.$$.fragment),Lv=d(),Tt=o("div"),f(jr.$$.fragment),Dv=d(),Ec=o("p"),Gv=s("Example:"),Uv=d(),f(Cr.$$.fragment),this.h()},l(r){const h=m2('[data-svelte="svelte-1phssyn"]',document.head);p=n(h,"META",{name:!0,content:!0}),h.forEach(t),B=l(r),v=n(r,"H1",{class:!0});var Pr=a(v);x=n(Pr,"A",{id:!0,class:!0,href:!0});var Mc=a(x);z=n(Mc,"SPAN",{});var jc=a(z);m(T.$$.fragment,jc),jc.forEach(t),Mc.forEach(t),y=l(Pr),F=n(Pr,"SPAN",{});var Cc=a(F);Re=i(Cc,"BART"),Cc.forEach(t),Pr.forEach(t),xe=l(r),$=n(r,"P",{});var nn=a($);Pe=n(nn,"STRONG",{});var Pc=a(Pe);he=i(Pc,"DISCLAIMER:"),Pc.forEach(t),He=i(nn," If you see something strange, file a "),ue=n(nn,"A",{href:!0,rel:!0});var Oc=a(ue);fe=i(Oc,"Github Issue"),Oc.forEach(t),Ke=i(nn,` and assign @patrickvonplaten`),nn.forEach(t),Ue=l(r),K=n(r,"H2",{class:!0});var Or=a(K);G=n(Or,"A",{id:!0,class:!0,href:!0});var Ac=a(G);Oe=n(Ac,"SPAN",{});var Sc=a(Oe);m(oe.$$.fragment,Sc),Sc.forEach(t),Ac.forEach(t),S=l(Or),I=n(Or,"SPAN",{});var Ic=a(I);Ve=i(Ic,"Overview"),Ic.forEach(t),Or.forEach(t),ie=l(r),de=n(r,"P",{});var Ar=a(de);Je=i(Ar,"The Bart model was proposed in "),V=n(Ar,"A",{href:!0,rel:!0});var Qv=a(V);Xe=i(Qv,`BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension`),Qv.forEach(t),Ze=i(Ar,` by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019.`),Ar.forEach(t),U=l(r),we=n(r,"P",{});var Rv=a(we);me=i(Rv,"According to the abstract,"),Rv.forEach(t),We=l(r),Z=n(r,"UL",{});var _s=a(Z);ne=n(_s,"LI",{});var Hv=a(ne);Ye=i(Hv,`Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a left-to-right decoder (like GPT).`),Hv.forEach(t),et=l(_s),J=n(_s,"LI",{});var Kv=a(J);tt=i(Kv,`The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme, where spans of text are replaced with a single mask token.`),Kv.forEach(t),_e=l(_s),Ae=n(_s,"LI",{});var Vv=a(Ae);w=i(Vv,`BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 6 ROUGE.`),Vv.forEach(t),_s.forEach(t),E=l(r),H=n(r,"P",{});var gs=a(H);Ot=i(gs,"This model was contributed by "),Se=n(gs,"A",{href:!0,rel:!0});var Jv=a(Se);ae=i(Jv,"sshleifer"),Jv.forEach(t),At=i(gs,". The Authors\u2019 code can be found "),Ie=n(gs,"A",{href:!0,rel:!0});var Xv=a(Ie);St=i(Xv,"here"),Xv.forEach(t),re=i(gs,"."),gs.forEach(t),le=l(r),ge=n(r,"H3",{class:!0});var yp=a(ge);ze=n(yp,"A",{id:!0,class:!0,href:!0});var Zv=a(ze);se=n(Zv,"SPAN",{});var Yv=a(se);m(Ne.$$.fragment,Yv),Yv.forEach(t),Zv.forEach(t),It=l(yp),Le=n(yp,"SPAN",{});var ey=a(Le);Nt=i(ey,"Examples"),ey.forEach(t),yp.forEach(t),Lc=l(r),ct=n(r,"UL",{});var bs=a(ct);un=n(bs,"LI",{});var Tp=a(un);fh=i(Tp,`Examples and scripts for fine-tuning BART and other models for sequence to sequence tasks can be found in `),fn=n(Tp,"A",{href:!0,rel:!0});var ty=a(fn);mh=i(ty,"examples/pytorch/summarization/"),ty.forEach(t),_h=i(Tp,"."),Tp.forEach(t),gh=l(bs),ot=n(bs,"LI",{});var an=a(ot);bh=i(an,"An example of how to train "),Nr=n(an,"A",{href:!0});var oy=a(Nr);kh=i(oy,"BartForConditionalGeneration"),oy.forEach(t),vh=i(an," with a Hugging Face "),Ks=n(an,"CODE",{});var ny=a(Ks);yh=i(ny,"datasets"),ny.forEach(t),Th=i(an,` object can be found in this `),mn=n(an,"A",{href:!0,rel:!0});var ay=a(mn);xh=i(ay,"forum discussion"),ay.forEach(t),wh=i(an,"."),an.forEach(t),zh=l(bs),vo=n(bs,"LI",{});var Nc=a(vo);_n=n(Nc,"A",{href:!0,rel:!0});var ry=a(_n);Bh=i(ry,"Distilled checkpoints"),ry.forEach(t),Fh=i(Nc," are described in this "),gn=n(Nc,"A",{href:!0,rel:!0});var sy=a(gn);qh=i(sy,"paper"),sy.forEach(t),$h=i(Nc,"."),Nc.forEach(t),bs.forEach(t),Dc=l(r),Lt=n(r,"H2",{class:!0});var xp=a(Lt);yo=n(xp,"A",{id:!0,class:!0,href:!0});var iy=a(yo);Vs=n(iy,"SPAN",{});var dy=a(Vs);m(bn.$$.fragment,dy),dy.forEach(t),iy.forEach(t),Eh=l(xp),Js=n(xp,"SPAN",{});var ly=a(Js);Mh=i(ly,"Implementation Notes"),ly.forEach(t),xp.forEach(t),Gc=l(r),Be=n(r,"UL",{});var xt=a(Be);nt=n(xt,"LI",{});var rn=a(nt);jh=i(rn,"Bart doesn\u2019t use "),Xs=n(rn,"CODE",{});var cy=a(Xs);Ch=i(cy,"token_type_ids"),cy.forEach(t),Ph=i(rn," for sequence classification. Use "),Lr=n(rn,"A",{href:!0});var py=a(Lr);Oh=i(py,"BartTokenizer"),py.forEach(t),Ah=i(rn,` or `),Dr=n(rn,"A",{href:!0});var hy=a(Dr);Sh=i(hy,"encode()"),hy.forEach(t),Ih=i(rn," to get the proper splitting."),rn.forEach(t),Nh=l(xt),Dt=n(xt,"LI",{});var ks=a(Dt);Lh=i(ks,"The forward pass of "),Gr=n(ks,"A",{href:!0});var uy=a(Gr);Dh=i(uy,"BartModel"),uy.forEach(t),Gh=i(ks," will create the "),Zs=n(ks,"CODE",{});var fy=a(Zs);Uh=i(fy,"decoder_input_ids"),fy.forEach(t),Wh=i(ks,` if they are not passed. This is different than some other modeling APIs. A typical use case of this feature is mask filling.`),ks.forEach(t),Qh=l(xt),Gt=n(xt,"LI",{});var vs=a(Gt);Rh=i(vs,`Model predictions are intended to be identical to the original implementation when `),Ys=n(vs,"CODE",{});var my=a(Ys);Hh=i(my,"force_bos_token_to_be_generated=True"),my.forEach(t),Kh=i(vs,`. This only works, however, if the string you pass to `),ei=n(vs,"CODE",{});var _y=a(ei);Vh=i(_y,"fairseq.encode"),_y.forEach(t),Jh=i(vs," starts with a space."),vs.forEach(t),Xh=l(xt),Ur=n(xt,"LI",{});var Wv=a(Ur);Wr=n(Wv,"A",{href:!0});var gy=a(Wr);Zh=i(gy,"generate()"),gy.forEach(t),Yh=i(Wv,` should be used for conditional generation tasks like summarization, see the example in that docstrings.`),Wv.forEach(t),eu=l(xt),Ut=n(xt,"LI",{});var ys=a(Ut);tu=i(ys,"Models that load the "),ti=n(ys,"EM",{});var by=a(ti);ou=i(by,"facebook/bart-large-cnn"),by.forEach(t),nu=i(ys," weights will not have a "),oi=n(ys,"CODE",{});var ky=a(oi);au=i(ky,"mask_token_id"),ky.forEach(t),ru=i(ys,`, or be able to perform mask-filling tasks.`),ys.forEach(t),xt.forEach(t),Uc=l(r),Wt=n(r,"H2",{class:!0});var wp=a(Wt);To=n(wp,"A",{id:!0,class:!0,href:!0});var vy=a(To);ni=n(vy,"SPAN",{});var yy=a(ni);m(kn.$$.fragment,yy),yy.forEach(t),vy.forEach(t),su=l(wp),ai=n(wp,"SPAN",{});var Ty=a(ai);iu=i(Ty,"Mask Filling"),Ty.forEach(t),wp.forEach(t),Wc=l(r),pt=n(r,"P",{});var Ts=a(pt);du=i(Ts,"The "),ri=n(Ts,"CODE",{});var xy=a(ri);lu=i(xy,"facebook/bart-base"),xy.forEach(t),cu=i(Ts," and "),si=n(Ts,"CODE",{});var wy=a(si);pu=i(wy,"facebook/bart-large"),wy.forEach(t),hu=i(Ts," checkpoints can be used to fill multi-token masks."),Ts.forEach(t),Qc=l(r),m(vn.$$.fragment,r),Rc=l(r),Qt=n(r,"H2",{class:!0});var zp=a(Qt);xo=n(zp,"A",{id:!0,class:!0,href:!0});var zy=a(xo);ii=n(zy,"SPAN",{});var By=a(ii);m(yn.$$.fragment,By),By.forEach(t),zy.forEach(t),uu=l(zp),di=n(zp,"SPAN",{});var Fy=a(di);fu=i(Fy,"BartConfig"),Fy.forEach(t),zp.forEach(t),Hc=l(r),be=n(r,"DIV",{class:!0});var wt=a(be);m(Tn.$$.fragment,wt),mu=l(wt),Rt=n(wt,"P",{});var xs=a(Rt);_u=i(xs,"This is the configuration class to store the configuration of a "),Qr=n(xs,"A",{href:!0});var qy=a(Qr);gu=i(qy,"BartModel"),qy.forEach(t),bu=i(xs,`. It is used to instantiate a BART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BART `),xn=n(xs,"A",{href:!0,rel:!0});var $y=a(xn);ku=i($y,"facebook/bart-large"),$y.forEach(t),vu=i(xs," architecture."),xs.forEach(t),yu=l(wt),Ht=n(wt,"P",{});var ws=a(Ht);Tu=i(ws,"Configuration objects inherit from "),Rr=n(ws,"A",{href:!0});var Ey=a(Rr);xu=i(Ey,"PretrainedConfig"),Ey.forEach(t),wu=i(ws,` and can be used to control the model outputs. Read the documentation from `),Hr=n(ws,"A",{href:!0});var My=a(Hr);zu=i(My,"PretrainedConfig"),My.forEach(t),Bu=i(ws," for more information."),ws.forEach(t),Fu=l(wt),li=n(wt,"P",{});var jy=a(li);qu=i(jy,"Example:"),jy.forEach(t),$u=l(wt),m(wn.$$.fragment,wt),wt.forEach(t),Kc=l(r),Kt=n(r,"H2",{class:!0});var Bp=a(Kt);wo=n(Bp,"A",{id:!0,class:!0,href:!0});var Cy=a(wo);ci=n(Cy,"SPAN",{});var Py=a(ci);m(zn.$$.fragment,Py),Py.forEach(t),Cy.forEach(t),Eu=l(Bp),pi=n(Bp,"SPAN",{});var Oy=a(pi);Mu=i(Oy,"BartTokenizer"),Oy.forEach(t),Bp.forEach(t),Vc=l(r),at=n(r,"DIV",{class:!0});var zs=a(at);m(Bn.$$.fragment,zs),ju=l(zs),hi=n(zs,"P",{});var Ay=a(hi);Cu=i(Ay,"Construct a BART tokenizer."),Ay.forEach(t),Pu=l(zs),ht=n(zs,"P",{});var Sr=a(ht);Kr=n(Sr,"A",{href:!0});var Sy=a(Kr);Ou=i(Sy,"BartTokenizer"),Sy.forEach(t),Au=i(Sr," is identical to "),Vr=n(Sr,"A",{href:!0});var Iy=a(Vr);Su=i(Iy,"RobertaTokenizer"),Iy.forEach(t),Iu=i(Sr,`. Refer to superclass `),Jr=n(Sr,"A",{href:!0});var Ny=a(Jr);Nu=i(Ny,"RobertaTokenizer"),Ny.forEach(t),Lu=i(Sr,` for usage examples and documentation concerning the initialization parameters and other methods.`),Sr.forEach(t),zs.forEach(t),Jc=l(r),Vt=n(r,"H2",{class:!0});var Fp=a(Vt);zo=n(Fp,"A",{id:!0,class:!0,href:!0});var Ly=a(zo);ui=n(Ly,"SPAN",{});var Dy=a(ui);m(Fn.$$.fragment,Dy),Dy.forEach(t),Ly.forEach(t),Du=l(Fp),fi=n(Fp,"SPAN",{});var Gy=a(fi);Gu=i(Gy,"BartTokenizerFast"),Gy.forEach(t),Fp.forEach(t),Xc=l(r),rt=n(r,"DIV",{class:!0});var Bs=a(rt);m(qn.$$.fragment,Bs),Uu=l(Bs),$n=n(Bs,"P",{});var qp=a($n);Wu=i(qp,"Construct a \u201Cfast\u201D BART tokenizer (backed by HuggingFace\u2019s "),mi=n(qp,"EM",{});var Uy=a(mi);Qu=i(Uy,"tokenizers"),Uy.forEach(t),Ru=i(qp," library)."),qp.forEach(t),Hu=l(Bs),ut=n(Bs,"P",{});var Ir=a(ut);Xr=n(Ir,"A",{href:!0});var Wy=a(Xr);Ku=i(Wy,"BartTokenizerFast"),Wy.forEach(t),Vu=i(Ir," is identical to "),Zr=n(Ir,"A",{href:!0});var Qy=a(Zr);Ju=i(Qy,"RobertaTokenizerFast"),Qy.forEach(t),Xu=i(Ir,`. Refer to superclass `),Yr=n(Ir,"A",{href:!0});var Ry=a(Yr);Zu=i(Ry,"RobertaTokenizerFast"),Ry.forEach(t),Yu=i(Ir,` for usage examples and documentation concerning the initialization parameters and other methods.`),Ir.forEach(t),Bs.forEach(t),Zc=l(r),Jt=n(r,"H2",{class:!0});var $p=a(Jt);Bo=n($p,"A",{id:!0,class:!0,href:!0});var Hy=a(Bo);_i=n(Hy,"SPAN",{});var Ky=a(_i);m(En.$$.fragment,Ky),Ky.forEach(t),Hy.forEach(t),ef=l($p),gi=n($p,"SPAN",{});var Vy=a(gi);tf=i(Vy,"BartModel"),Vy.forEach(t),$p.forEach(t),Yc=l(r),De=n(r,"DIV",{class:!0});var sn=a(De);m(Mn.$$.fragment,sn),of=l(sn),jn=n(sn,"P",{});var Ep=a(jn);nf=i(Ep,`The bare BART Model outputting raw hidden-states without any specific head on top. This model inherits from `),es=n(Ep,"A",{href:!0});var Jy=a(es);af=i(Jy,"PreTrainedModel"),Jy.forEach(t),rf=i(Ep,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ep.forEach(t),sf=l(sn),Cn=n(sn,"P",{});var Mp=a(Cn);df=i(Mp,"This model is also a PyTorch "),Pn=n(Mp,"A",{href:!0,rel:!0});var Xy=a(Pn);lf=i(Xy,"torch.nn.Module"),Xy.forEach(t),cf=i(Mp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Mp.forEach(t),pf=l(sn),Fe=n(sn,"DIV",{class:!0});var zt=a(Fe);m(On.$$.fragment,zt),hf=l(zt),Xt=n(zt,"P",{});var Fs=a(Xt);uf=i(Fs,"The "),ts=n(Fs,"A",{href:!0});var Zy=a(ts);ff=i(Zy,"BartModel"),Zy.forEach(t),mf=i(Fs," forward method, overrides the "),bi=n(Fs,"CODE",{});var Yy=a(bi);_f=i(Yy,"__call__"),Yy.forEach(t),gf=i(Fs," special method."),Fs.forEach(t),bf=l(zt),m(Fo.$$.fragment,zt),kf=l(zt),ki=n(zt,"P",{});var eT=a(ki);vf=i(eT,"Example:"),eT.forEach(t),yf=l(zt),m(An.$$.fragment,zt),zt.forEach(t),sn.forEach(t),ep=l(r),Zt=n(r,"H2",{class:!0});var jp=a(Zt);qo=n(jp,"A",{id:!0,class:!0,href:!0});var tT=a(qo);vi=n(tT,"SPAN",{});var oT=a(vi);m(Sn.$$.fragment,oT),oT.forEach(t),tT.forEach(t),Tf=l(jp),yi=n(jp,"SPAN",{});var nT=a(yi);xf=i(nT,"BartForConditionalGeneration"),nT.forEach(t),jp.forEach(t),tp=l(r),Ge=n(r,"DIV",{class:!0});var dn=a(Ge);m(In.$$.fragment,dn),wf=l(dn),Nn=n(dn,"P",{});var Cp=a(Nn);zf=i(Cp,`The BART Model with a language modeling head. Can be used for summarization. This model inherits from `),os=n(Cp,"A",{href:!0});var aT=a(os);Bf=i(aT,"PreTrainedModel"),aT.forEach(t),Ff=i(Cp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cp.forEach(t),qf=l(dn),Ln=n(dn,"P",{});var Pp=a(Ln);$f=i(Pp,"This model is also a PyTorch "),Dn=n(Pp,"A",{href:!0,rel:!0});var rT=a(Dn);Ef=i(rT,"torch.nn.Module"),rT.forEach(t),Mf=i(Pp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pp.forEach(t),jf=l(dn),M=n(dn,"DIV",{class:!0});var O=a(M);m(Gn.$$.fragment,O),Cf=l(O),Yt=n(O,"P",{});var qs=a(Yt);Pf=i(qs,"The "),ns=n(qs,"A",{href:!0});var sT=a(ns);Of=i(sT,"BartForConditionalGeneration"),sT.forEach(t),Af=i(qs," forward method, overrides the "),Ti=n(qs,"CODE",{});var iT=a(Ti);Sf=i(iT,"__call__"),iT.forEach(t),If=i(qs," special method."),qs.forEach(t),Nf=l(O),m($o.$$.fragment,O),Lf=l(O),xi=n(O,"P",{});var dT=a(xi);Df=i(dT,"Summarization example::"),dT.forEach(t),Gf=l(O),wi=n(O,"BLOCKQUOTE",{});var lT=a(wi);zi=n(lT,"BLOCKQUOTE",{});var cT=a(zi);Bi=n(cT,"BLOCKQUOTE",{});var pT=a(Bi);Fi=n(pT,"P",{});var hT=a(Fi);Uf=i(hT,"from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig"),hT.forEach(t),pT.forEach(t),cT.forEach(t),lT.forEach(t),Wf=l(O),qi=n(O,"BLOCKQUOTE",{});var uT=a(qi);$i=n(uT,"BLOCKQUOTE",{});var fT=a($i);Ei=n(fT,"BLOCKQUOTE",{});var mT=a(Ei);Mi=n(mT,"P",{});var _T=a(Mi);Qf=i(_T,`model = BartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large-cnn\u2019) tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large-cnn\u2019)`),_T.forEach(t),mT.forEach(t),fT.forEach(t),uT.forEach(t),Rf=l(O),ji=n(O,"BLOCKQUOTE",{});var gT=a(ji);Ci=n(gT,"BLOCKQUOTE",{});var bT=a(Ci);Pi=n(bT,"BLOCKQUOTE",{});var kT=a(Pi);Oi=n(kT,"P",{});var vT=a(Oi);Hf=i(vT,`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018pt\u2019)`),vT.forEach(t),kT.forEach(t),bT.forEach(t),gT.forEach(t),Kf=l(O),Ai=n(O,"BLOCKQUOTE",{});var yT=a(Ai);Si=n(yT,"BLOCKQUOTE",{});var TT=a(Si);Un=n(TT,"BLOCKQUOTE",{});var Op=a(Un);Eo=n(Op,"H1",{class:!0});var Ap=a(Eo);Mo=n(Ap,"A",{id:!0,class:!0,href:!0});var xT=a(Mo);Ii=n(xT,"SPAN",{});var wT=a(Ii);m(Wn.$$.fragment,wT),wT.forEach(t),xT.forEach(t),Vf=l(Ap),Ni=n(Ap,"SPAN",{});var zT=a(Ni);Jf=i(zT,"Generate Summary"),zT.forEach(t),Ap.forEach(t),Xf=l(Op),Li=n(Op,"P",{});var BT=a(Li);Zf=i(BT,`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),BT.forEach(t),Op.forEach(t),TT.forEach(t),yT.forEach(t),Yf=l(O),Di=n(O,"P",{});var FT=a(Di);em=i(FT,"Mask filling example::"),FT.forEach(t),tm=l(O),Gi=n(O,"BLOCKQUOTE",{});var qT=a(Gi);Ui=n(qT,"BLOCKQUOTE",{});var $T=a(Ui);Wi=n($T,"BLOCKQUOTE",{});var ET=a(Wi);Qi=n(ET,"P",{});var MT=a(Qi);om=i(MT,`from transformers import BartTokenizer, BartForConditionalGeneration tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),MT.forEach(t),ET.forEach(t),$T.forEach(t),qT.forEach(t),nm=l(O),Ri=n(O,"BLOCKQUOTE",{});var jT=a(Ri);Hi=n(jT,"BLOCKQUOTE",{});var CT=a(Hi);Ki=n(CT,"BLOCKQUOTE",{});var PT=a(Ki);Vi=n(PT,"P",{});var OT=a(Vi);am=i(OT,`model = BartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018pt\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),OT.forEach(t),PT.forEach(t),CT.forEach(t),jT.forEach(t),rm=l(O),Ji=n(O,"BLOCKQUOTE",{});var AT=a(Ji);Xi=n(AT,"BLOCKQUOTE",{});var ST=a(Xi);Zi=n(ST,"BLOCKQUOTE",{});var IT=a(Zi);Yi=n(IT,"P",{});var NT=a(Yi);sm=i(NT,`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5)`),NT.forEach(t),IT.forEach(t),ST.forEach(t),AT.forEach(t),im=l(O),ed=n(O,"BLOCKQUOTE",{});var LT=a(ed);td=n(LT,"BLOCKQUOTE",{});var DT=a(td);od=n(DT,"BLOCKQUOTE",{});var GT=a(od);nd=n(GT,"P",{});var UT=a(nd);dm=i(UT,"tokenizer.decode(predictions).split()"),UT.forEach(t),GT.forEach(t),DT.forEach(t),LT.forEach(t),O.forEach(t),dn.forEach(t),op=l(r),eo=n(r,"H2",{class:!0});var Sp=a(eo);jo=n(Sp,"A",{id:!0,class:!0,href:!0});var WT=a(jo);ad=n(WT,"SPAN",{});var QT=a(ad);m(Qn.$$.fragment,QT),QT.forEach(t),WT.forEach(t),lm=l(Sp),rd=n(Sp,"SPAN",{});var RT=a(rd);cm=i(RT,"BartForSequenceClassification"),RT.forEach(t),Sp.forEach(t),np=l(r),ke=n(r,"DIV",{class:!0});var Bt=a(ke);m(Rn.$$.fragment,Bt),pm=l(Bt),sd=n(Bt,"P",{});var HT=a(sd);hm=i(HT,`Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),HT.forEach(t),um=l(Bt),Hn=n(Bt,"P",{});var Ip=a(Hn);fm=i(Ip,"This model inherits from "),as=n(Ip,"A",{href:!0});var KT=a(as);mm=i(KT,"PreTrainedModel"),KT.forEach(t),_m=i(Ip,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ip.forEach(t),gm=l(Bt),Kn=n(Bt,"P",{});var Np=a(Kn);bm=i(Np,"This model is also a PyTorch "),Vn=n(Np,"A",{href:!0,rel:!0});var VT=a(Vn);km=i(VT,"torch.nn.Module"),VT.forEach(t),vm=i(Np,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Np.forEach(t),ym=l(Bt),Y=n(Bt,"DIV",{class:!0});var Ce=a(Y);m(Jn.$$.fragment,Ce),Tm=l(Ce),to=n(Ce,"P",{});var $s=a(to);xm=i($s,"The "),rs=n($s,"A",{href:!0});var JT=a(rs);wm=i(JT,"BartForSequenceClassification"),JT.forEach(t),zm=i($s," forward method, overrides the "),id=n($s,"CODE",{});var XT=a(id);Bm=i(XT,"__call__"),XT.forEach(t),Fm=i($s," special method."),$s.forEach(t),qm=l(Ce),m(Co.$$.fragment,Ce),$m=l(Ce),dd=n(Ce,"P",{});var ZT=a(dd);Em=i(ZT,"Example of single-label classification:"),ZT.forEach(t),Mm=l(Ce),m(Xn.$$.fragment,Ce),jm=l(Ce),ld=n(Ce,"P",{});var YT=a(ld);Cm=i(YT,"Example of multi-label classification:"),YT.forEach(t),Pm=l(Ce),m(Zn.$$.fragment,Ce),Ce.forEach(t),Bt.forEach(t),ap=l(r),oo=n(r,"H2",{class:!0});var Lp=a(oo);Po=n(Lp,"A",{id:!0,class:!0,href:!0});var e1=a(Po);cd=n(e1,"SPAN",{});var t1=a(cd);m(Yn.$$.fragment,t1),t1.forEach(t),e1.forEach(t),Om=l(Lp),pd=n(Lp,"SPAN",{});var o1=a(pd);Am=i(o1,"BartForQuestionAnswering"),o1.forEach(t),Lp.forEach(t),rp=l(r),ve=n(r,"DIV",{class:!0});var Ft=a(ve);m(ea.$$.fragment,Ft),Sm=l(Ft),no=n(Ft,"P",{});var Es=a(no);Im=i(Es,`BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),hd=n(Es,"CODE",{});var n1=a(hd);Nm=i(n1,"span start logits"),n1.forEach(t),Lm=i(Es," and "),ud=n(Es,"CODE",{});var a1=a(ud);Dm=i(a1,"span end logits"),a1.forEach(t),Gm=i(Es,")."),Es.forEach(t),Um=l(Ft),ta=n(Ft,"P",{});var Dp=a(ta);Wm=i(Dp,"This model inherits from "),ss=n(Dp,"A",{href:!0});var r1=a(ss);Qm=i(r1,"PreTrainedModel"),r1.forEach(t),Rm=i(Dp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Dp.forEach(t),Hm=l(Ft),oa=n(Ft,"P",{});var Gp=a(oa);Km=i(Gp,"This model is also a PyTorch "),na=n(Gp,"A",{href:!0,rel:!0});var s1=a(na);Vm=i(s1,"torch.nn.Module"),s1.forEach(t),Jm=i(Gp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gp.forEach(t),Xm=l(Ft),qe=n(Ft,"DIV",{class:!0});var qt=a(qe);m(aa.$$.fragment,qt),Zm=l(qt),ao=n(qt,"P",{});var Ms=a(ao);Ym=i(Ms,"The "),is=n(Ms,"A",{href:!0});var i1=a(is);e_=i(i1,"BartForQuestionAnswering"),i1.forEach(t),t_=i(Ms," forward method, overrides the "),fd=n(Ms,"CODE",{});var d1=a(fd);o_=i(d1,"__call__"),d1.forEach(t),n_=i(Ms," special method."),Ms.forEach(t),a_=l(qt),m(Oo.$$.fragment,qt),r_=l(qt),md=n(qt,"P",{});var l1=a(md);s_=i(l1,"Example:"),l1.forEach(t),i_=l(qt),m(ra.$$.fragment,qt),qt.forEach(t),Ft.forEach(t),sp=l(r),ro=n(r,"H2",{class:!0});var Up=a(ro);Ao=n(Up,"A",{id:!0,class:!0,href:!0});var c1=a(Ao);_d=n(c1,"SPAN",{});var p1=a(_d);m(sa.$$.fragment,p1),p1.forEach(t),c1.forEach(t),d_=l(Up),gd=n(Up,"SPAN",{});var h1=a(gd);l_=i(h1,"BartForCausalLM"),h1.forEach(t),Up.forEach(t),ip=l(r),ia=n(r,"DIV",{class:!0});var u1=a(ia);ft=n(u1,"DIV",{class:!0});var js=a(ft);m(da.$$.fragment,js),c_=l(js),bd=n(js,"P",{});var f1=a(bd);p_=i(f1,"Example:"),f1.forEach(t),h_=l(js),m(la.$$.fragment,js),js.forEach(t),u1.forEach(t),dp=l(r),so=n(r,"H2",{class:!0});var Wp=a(so);So=n(Wp,"A",{id:!0,class:!0,href:!0});var m1=a(So);kd=n(m1,"SPAN",{});var _1=a(kd);m(ca.$$.fragment,_1),_1.forEach(t),m1.forEach(t),u_=l(Wp),vd=n(Wp,"SPAN",{});var g1=a(vd);f_=i(g1,"TFBartModel"),g1.forEach(t),Wp.forEach(t),lp=l(r),ye=n(r,"DIV",{class:!0});var $t=a(ye);m(pa.$$.fragment,$t),m_=l($t),ha=n($t,"P",{});var Qp=a(ha);__=i(Qp,`The bare BART Model outputting raw hidden-states without any specific head on top. This model inherits from `),ds=n(Qp,"A",{href:!0});var b1=a(ds);g_=i(b1,"TFPreTrainedModel"),b1.forEach(t),b_=i(Qp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qp.forEach(t),k_=l($t),ua=n($t,"P",{});var Rp=a(ua);v_=i(Rp,"This model is also a "),fa=n(Rp,"A",{href:!0,rel:!0});var k1=a(fa);y_=i(k1,"tf.keras.Model"),k1.forEach(t),T_=i(Rp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Rp.forEach(t),x_=l($t),m(Io.$$.fragment,$t),w_=l($t),$e=n($t,"DIV",{class:!0});var Et=a($e);m(ma.$$.fragment,Et),z_=l(Et),io=n(Et,"P",{});var Cs=a(io);B_=i(Cs,"The "),ls=n(Cs,"A",{href:!0});var v1=a(ls);F_=i(v1,"TFBartModel"),v1.forEach(t),q_=i(Cs," forward method, overrides the "),yd=n(Cs,"CODE",{});var y1=a(yd);$_=i(y1,"__call__"),y1.forEach(t),E_=i(Cs," special method."),Cs.forEach(t),M_=l(Et),m(No.$$.fragment,Et),j_=l(Et),Td=n(Et,"P",{});var T1=a(Td);C_=i(T1,"Example:"),T1.forEach(t),P_=l(Et),m(_a.$$.fragment,Et),Et.forEach(t),$t.forEach(t),cp=l(r),lo=n(r,"H2",{class:!0});var Hp=a(lo);Lo=n(Hp,"A",{id:!0,class:!0,href:!0});var x1=a(Lo);xd=n(x1,"SPAN",{});var w1=a(xd);m(ga.$$.fragment,w1),w1.forEach(t),x1.forEach(t),O_=l(Hp),wd=n(Hp,"SPAN",{});var z1=a(wd);A_=i(z1,"TFBartForConditionalGeneration"),z1.forEach(t),Hp.forEach(t),pp=l(r),Te=n(r,"DIV",{class:!0});var Mt=a(Te);m(ba.$$.fragment,Mt),S_=l(Mt),ka=n(Mt,"P",{});var Kp=a(ka);I_=i(Kp,`The BART Model with a language modeling head. Can be used for summarization. This model inherits from `),cs=n(Kp,"A",{href:!0});var B1=a(cs);N_=i(B1,"TFPreTrainedModel"),B1.forEach(t),L_=i(Kp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kp.forEach(t),D_=l(Mt),va=n(Mt,"P",{});var Vp=a(va);G_=i(Vp,"This model is also a "),ya=n(Vp,"A",{href:!0,rel:!0});var F1=a(ya);U_=i(F1,"tf.keras.Model"),F1.forEach(t),W_=i(Vp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vp.forEach(t),Q_=l(Mt),m(Do.$$.fragment,Mt),R_=l(Mt),P=n(Mt,"DIV",{class:!0});var W=a(P);m(Ta.$$.fragment,W),H_=l(W),co=n(W,"P",{});var Ps=a(co);K_=i(Ps,"The "),ps=n(Ps,"A",{href:!0});var q1=a(ps);V_=i(q1,"TFBartForConditionalGeneration"),q1.forEach(t),J_=i(Ps," forward method, overrides the "),zd=n(Ps,"CODE",{});var $1=a(zd);X_=i($1,"__call__"),$1.forEach(t),Z_=i(Ps," special method."),Ps.forEach(t),Y_=l(W),m(Go.$$.fragment,W),eg=l(W),Bd=n(W,"P",{});var E1=a(Bd);tg=i(E1,"Summarization example::"),E1.forEach(t),og=l(W),Fd=n(W,"BLOCKQUOTE",{});var M1=a(Fd);qd=n(M1,"BLOCKQUOTE",{});var j1=a(qd);$d=n(j1,"BLOCKQUOTE",{});var C1=a($d);Ed=n(C1,"P",{});var P1=a(Ed);ng=i(P1,"from transformers import BartTokenizer, TFBartForConditionalGeneration, BartConfig"),P1.forEach(t),C1.forEach(t),j1.forEach(t),M1.forEach(t),ag=l(W),Md=n(W,"BLOCKQUOTE",{});var O1=a(Md);jd=n(O1,"BLOCKQUOTE",{});var A1=a(jd);Cd=n(A1,"BLOCKQUOTE",{});var S1=a(Cd);Pd=n(S1,"P",{});var I1=a(Pd);rg=i(I1,`model = TFBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019)`),I1.forEach(t),S1.forEach(t),A1.forEach(t),O1.forEach(t),sg=l(W),Od=n(W,"BLOCKQUOTE",{});var N1=a(Od);Ad=n(N1,"BLOCKQUOTE",{});var L1=a(Ad);Sd=n(L1,"BLOCKQUOTE",{});var D1=a(Sd);Id=n(D1,"P",{});var G1=a(Id);ig=i(G1,`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018tf\u2019)`),G1.forEach(t),D1.forEach(t),L1.forEach(t),N1.forEach(t),dg=l(W),Nd=n(W,"BLOCKQUOTE",{});var U1=a(Nd);Ld=n(U1,"BLOCKQUOTE",{});var W1=a(Ld);xa=n(W1,"BLOCKQUOTE",{});var Jp=a(xa);Uo=n(Jp,"H1",{class:!0});var Xp=a(Uo);Wo=n(Xp,"A",{id:!0,class:!0,href:!0});var Q1=a(Wo);Dd=n(Q1,"SPAN",{});var R1=a(Dd);m(wa.$$.fragment,R1),R1.forEach(t),Q1.forEach(t),lg=l(Xp),Gd=n(Xp,"SPAN",{});var H1=a(Gd);cg=i(H1,"Generate Summary"),H1.forEach(t),Xp.forEach(t),pg=l(Jp),Ud=n(Jp,"P",{});var K1=a(Ud);hg=i(K1,`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),K1.forEach(t),Jp.forEach(t),W1.forEach(t),U1.forEach(t),ug=l(W),Wd=n(W,"P",{});var V1=a(Wd);fg=i(V1,"Mask filling example::"),V1.forEach(t),mg=l(W),Qd=n(W,"BLOCKQUOTE",{});var J1=a(Qd);Rd=n(J1,"BLOCKQUOTE",{});var X1=a(Rd);Hd=n(X1,"BLOCKQUOTE",{});var Z1=a(Hd);Kd=n(Z1,"P",{});var Y1=a(Kd);_g=i(Y1,`from transformers import BartTokenizer, TFBartForConditionalGeneration tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),Y1.forEach(t),Z1.forEach(t),X1.forEach(t),J1.forEach(t),gg=l(W),Vd=n(W,"BLOCKQUOTE",{});var ex=a(Vd);Jd=n(ex,"BLOCKQUOTE",{});var tx=a(Jd);za=n(tx,"BLOCKQUOTE",{});var Zp=a(za);Xd=n(Zp,"P",{});var ox=a(Xd);bg=i(ox,`model = TFBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018tf\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits probs = tf.nn.softmax(logits[0])`),ox.forEach(t),kg=l(Zp),Qo=n(Zp,"H1",{class:!0});var Yp=a(Qo);Ro=n(Yp,"A",{id:!0,class:!0,href:!0});var nx=a(Ro);Zd=n(nx,"SPAN",{});var ax=a(Zd);m(Ba.$$.fragment,ax),ax.forEach(t),nx.forEach(t),vg=l(Yp),Yd=n(Yp,"SPAN",{});var rx=a(Yd);yg=i(rx,"probs[5] is associated with the mask token"),rx.forEach(t),Yp.forEach(t),Zp.forEach(t),tx.forEach(t),ex.forEach(t),W.forEach(t),Mt.forEach(t),hp=l(r),po=n(r,"H2",{class:!0});var eh=a(po);Ho=n(eh,"A",{id:!0,class:!0,href:!0});var sx=a(Ho);el=n(sx,"SPAN",{});var ix=a(el);m(Fa.$$.fragment,ix),ix.forEach(t),sx.forEach(t),Tg=l(eh),tl=n(eh,"SPAN",{});var dx=a(tl);xg=i(dx,"FlaxBartModel"),dx.forEach(t),eh.forEach(t),up=l(r),Q=n(r,"DIV",{class:!0});var ce=a(Q);m(qa.$$.fragment,ce),wg=l(ce),$a=n(ce,"P",{});var th=a($a);zg=i(th,`The bare Bart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),hs=n(th,"A",{href:!0});var lx=a(hs);Bg=i(lx,"FlaxPreTrainedModel"),lx.forEach(t),Fg=i(th,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),th.forEach(t),qg=l(ce),Ea=n(ce,"P",{});var oh=a(Ea);$g=i(oh,"This model is also a Flax Linen "),Ma=n(oh,"A",{href:!0,rel:!0});var cx=a(Ma);Eg=i(cx,"flax.nn.Module"),cx.forEach(t),Mg=i(oh,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),oh.forEach(t),jg=l(ce),ol=n(ce,"P",{});var px=a(ol);Cg=i(px,"Finally, this model supports inherent JAX features such as:"),px.forEach(t),Pg=l(ce),st=n(ce,"UL",{});var ln=a(st);nl=n(ln,"LI",{});var hx=a(nl);ja=n(hx,"A",{href:!0,rel:!0});var ux=a(ja);Og=i(ux,"Just-In-Time (JIT) compilation"),ux.forEach(t),hx.forEach(t),Ag=l(ln),al=n(ln,"LI",{});var fx=a(al);Ca=n(fx,"A",{href:!0,rel:!0});var mx=a(Ca);Sg=i(mx,"Automatic Differentiation"),mx.forEach(t),fx.forEach(t),Ig=l(ln),rl=n(ln,"LI",{});var _x=a(rl);Pa=n(_x,"A",{href:!0,rel:!0});var gx=a(Pa);Ng=i(gx,"Vectorization"),gx.forEach(t),_x.forEach(t),Lg=l(ln),sl=n(ln,"LI",{});var bx=a(sl);Oa=n(bx,"A",{href:!0,rel:!0});var kx=a(Oa);Dg=i(kx,"Parallelization"),kx.forEach(t),bx.forEach(t),ln.forEach(t),Gg=l(ce),Ee=n(ce,"DIV",{class:!0});var jt=a(Ee);m(Aa.$$.fragment,jt),Ug=l(jt),ho=n(jt,"P",{});var Os=a(ho);Wg=i(Os,"The "),il=n(Os,"CODE",{});var vx=a(il);Qg=i(vx,"FlaxBartPreTrainedModel"),vx.forEach(t),Rg=i(Os," forward method, overrides the "),dl=n(Os,"CODE",{});var yx=a(dl);Hg=i(yx,"__call__"),yx.forEach(t),Kg=i(Os," special method."),Os.forEach(t),Vg=l(jt),m(Ko.$$.fragment,jt),Jg=l(jt),ll=n(jt,"P",{});var Tx=a(ll);Xg=i(Tx,"Example:"),Tx.forEach(t),Zg=l(jt),m(Sa.$$.fragment,jt),jt.forEach(t),Yg=l(ce),mt=n(ce,"DIV",{class:!0});var As=a(mt);m(Ia.$$.fragment,As),eb=l(As),cl=n(As,"P",{});var xx=a(cl);tb=i(xx,"Example:"),xx.forEach(t),ob=l(As),m(Na.$$.fragment,As),As.forEach(t),nb=l(ce),_t=n(ce,"DIV",{class:!0});var Ss=a(_t);m(La.$$.fragment,Ss),ab=l(Ss),pl=n(Ss,"P",{});var wx=a(pl);rb=i(wx,"Example:"),wx.forEach(t),sb=l(Ss),m(Da.$$.fragment,Ss),Ss.forEach(t),ce.forEach(t),fp=l(r),uo=n(r,"H2",{class:!0});var nh=a(uo);Vo=n(nh,"A",{id:!0,class:!0,href:!0});var zx=a(Vo);hl=n(zx,"SPAN",{});var Bx=a(hl);m(Ga.$$.fragment,Bx),Bx.forEach(t),zx.forEach(t),ib=l(nh),ul=n(nh,"SPAN",{});var Fx=a(ul);db=i(Fx,"FlaxBartForConditionalGeneration"),Fx.forEach(t),nh.forEach(t),mp=l(r),R=n(r,"DIV",{class:!0});var pe=a(R);m(Ua.$$.fragment,pe),lb=l(pe),Wa=n(pe,"P",{});var ah=a(Wa);cb=i(ah,`The BART Model with a language modeling head. Can be used for summarization. This model inherits from `),us=n(ah,"A",{href:!0});var qx=a(us);pb=i(qx,"FlaxPreTrainedModel"),qx.forEach(t),hb=i(ah,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ah.forEach(t),ub=l(pe),Qa=n(pe,"P",{});var rh=a(Qa);fb=i(rh,"This model is also a Flax Linen "),Ra=n(rh,"A",{href:!0,rel:!0});var $x=a(Ra);mb=i($x,"flax.nn.Module"),$x.forEach(t),_b=i(rh,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),rh.forEach(t),gb=l(pe),fl=n(pe,"P",{});var Ex=a(fl);bb=i(Ex,"Finally, this model supports inherent JAX features such as:"),Ex.forEach(t),kb=l(pe),it=n(pe,"UL",{});var cn=a(it);ml=n(cn,"LI",{});var Mx=a(ml);Ha=n(Mx,"A",{href:!0,rel:!0});var jx=a(Ha);vb=i(jx,"Just-In-Time (JIT) compilation"),jx.forEach(t),Mx.forEach(t),yb=l(cn),_l=n(cn,"LI",{});var Cx=a(_l);Ka=n(Cx,"A",{href:!0,rel:!0});var Px=a(Ka);Tb=i(Px,"Automatic Differentiation"),Px.forEach(t),Cx.forEach(t),xb=l(cn),gl=n(cn,"LI",{});var Ox=a(gl);Va=n(Ox,"A",{href:!0,rel:!0});var Ax=a(Va);wb=i(Ax,"Vectorization"),Ax.forEach(t),Ox.forEach(t),zb=l(cn),bl=n(cn,"LI",{});var Sx=a(bl);Ja=n(Sx,"A",{href:!0,rel:!0});var Ix=a(Ja);Bb=i(Ix,"Parallelization"),Ix.forEach(t),Sx.forEach(t),cn.forEach(t),Fb=l(pe),j=n(pe,"DIV",{class:!0});var A=a(j);m(Xa.$$.fragment,A),qb=l(A),fo=n(A,"P",{});var Is=a(fo);$b=i(Is,"The "),kl=n(Is,"CODE",{});var Nx=a(kl);Eb=i(Nx,"FlaxBartPreTrainedModel"),Nx.forEach(t),Mb=i(Is," forward method, overrides the "),vl=n(Is,"CODE",{});var Lx=a(vl);jb=i(Lx,"__call__"),Lx.forEach(t),Cb=i(Is," special method."),Is.forEach(t),Pb=l(A),m(Jo.$$.fragment,A),Ob=l(A),yl=n(A,"P",{});var Dx=a(yl);Ab=i(Dx,"Summarization example::"),Dx.forEach(t),Sb=l(A),Tl=n(A,"BLOCKQUOTE",{});var Gx=a(Tl);xl=n(Gx,"BLOCKQUOTE",{});var Ux=a(xl);wl=n(Ux,"BLOCKQUOTE",{});var Wx=a(wl);zl=n(Wx,"P",{});var Qx=a(zl);Ib=i(Qx,"from transformers import BartTokenizer, FlaxBartForConditionalGeneration"),Qx.forEach(t),Wx.forEach(t),Ux.forEach(t),Gx.forEach(t),Nb=l(A),Bl=n(A,"BLOCKQUOTE",{});var Rx=a(Bl);Fl=n(Rx,"BLOCKQUOTE",{});var Hx=a(Fl);ql=n(Hx,"BLOCKQUOTE",{});var Kx=a(ql);$l=n(Kx,"P",{});var Vx=a($l);Lb=i(Vx,`model = FlaxBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large-cnn\u2019) tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large-cnn\u2019)`),Vx.forEach(t),Kx.forEach(t),Hx.forEach(t),Rx.forEach(t),Db=l(A),El=n(A,"BLOCKQUOTE",{});var Jx=a(El);Ml=n(Jx,"BLOCKQUOTE",{});var Xx=a(Ml);jl=n(Xx,"BLOCKQUOTE",{});var Zx=a(jl);Cl=n(Zx,"P",{});var Yx=a(Cl);Gb=i(Yx,`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018jax\u2019)`),Yx.forEach(t),Zx.forEach(t),Xx.forEach(t),Jx.forEach(t),Ub=l(A),Pl=n(A,"BLOCKQUOTE",{});var ew=a(Pl);Ol=n(ew,"BLOCKQUOTE",{});var tw=a(Ol);Za=n(tw,"BLOCKQUOTE",{});var sh=a(Za);Xo=n(sh,"H1",{class:!0});var ih=a(Xo);Zo=n(ih,"A",{id:!0,class:!0,href:!0});var ow=a(Zo);Al=n(ow,"SPAN",{});var nw=a(Al);m(Ya.$$.fragment,nw),nw.forEach(t),ow.forEach(t),Wb=l(ih),Sl=n(ih,"SPAN",{});var aw=a(Sl);Qb=i(aw,"Generate Summary"),aw.forEach(t),ih.forEach(t),Rb=l(sh),Il=n(sh,"P",{});var rw=a(Il);Hb=i(rw,`summary_ids = model.generate(inputs[\u2018input_ids\u2019]).sequences print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))`),rw.forEach(t),sh.forEach(t),tw.forEach(t),ew.forEach(t),Kb=l(A),Nl=n(A,"P",{});var sw=a(Nl);Vb=i(sw,"Mask filling example::"),sw.forEach(t),Jb=l(A),Ll=n(A,"BLOCKQUOTE",{});var iw=a(Ll);Dl=n(iw,"BLOCKQUOTE",{});var dw=a(Dl);Gl=n(dw,"BLOCKQUOTE",{});var lw=a(Gl);Ul=n(lw,"P",{});var cw=a(Ul);Xb=i(cw,`from transformers import BartTokenizer, FlaxBartForConditionalGeneration tokenizer = BartTokenizer.from_pretrained(\u2018facebook/bart-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),cw.forEach(t),lw.forEach(t),dw.forEach(t),iw.forEach(t),Zb=l(A),Wl=n(A,"BLOCKQUOTE",{});var pw=a(Wl);Ql=n(pw,"BLOCKQUOTE",{});var hw=a(Ql);Rl=n(hw,"BLOCKQUOTE",{});var uw=a(Rl);Hl=n(uw,"P",{});var fw=a(Hl);Yb=i(fw,`model = FlaxBartForConditionalGeneration.from_pretrained(\u2018facebook/bart-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018jax\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),fw.forEach(t),uw.forEach(t),hw.forEach(t),pw.forEach(t),ek=l(A),Kl=n(A,"BLOCKQUOTE",{});var mw=a(Kl);Vl=n(mw,"BLOCKQUOTE",{});var _w=a(Vl);Jl=n(_w,"BLOCKQUOTE",{});var gw=a(Jl);Xl=n(gw,"P",{});var bw=a(Xl);tk=i(bw,`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item() probs = jax.nn.softmax(logits[0, masked_index], axis=0) values, predictions = jax.lax.top_k(probs)`),bw.forEach(t),gw.forEach(t),_w.forEach(t),mw.forEach(t),ok=l(A),Zl=n(A,"BLOCKQUOTE",{});var kw=a(Zl);Yl=n(kw,"BLOCKQUOTE",{});var vw=a(Yl);ec=n(vw,"BLOCKQUOTE",{});var yw=a(ec);tc=n(yw,"P",{});var Tw=a(tc);nk=i(Tw,"tokenizer.decode(predictions).split()"),Tw.forEach(t),yw.forEach(t),vw.forEach(t),kw.forEach(t),A.forEach(t),ak=l(pe),gt=n(pe,"DIV",{class:!0});var Ns=a(gt);m(er.$$.fragment,Ns),rk=l(Ns),oc=n(Ns,"P",{});var xw=a(oc);sk=i(xw,"Example:"),xw.forEach(t),ik=l(Ns),m(tr.$$.fragment,Ns),Ns.forEach(t),dk=l(pe),bt=n(pe,"DIV",{class:!0});var Ls=a(bt);m(or.$$.fragment,Ls),lk=l(Ls),nc=n(Ls,"P",{});var ww=a(nc);ck=i(ww,"Example:"),ww.forEach(t),pk=l(Ls),m(nr.$$.fragment,Ls),Ls.forEach(t),pe.forEach(t),_p=l(r),mo=n(r,"H2",{class:!0});var dh=a(mo);Yo=n(dh,"A",{id:!0,class:!0,href:!0});var zw=a(Yo);ac=n(zw,"SPAN",{});var Bw=a(ac);m(ar.$$.fragment,Bw),Bw.forEach(t),zw.forEach(t),hk=l(dh),rc=n(dh,"SPAN",{});var Fw=a(rc);uk=i(Fw,"FlaxBartForSequenceClassification"),Fw.forEach(t),dh.forEach(t),gp=l(r),N=n(r,"DIV",{class:!0});var ee=a(N);m(rr.$$.fragment,ee),fk=l(ee),sc=n(ee,"P",{});var qw=a(sc);mk=i(qw,`Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),qw.forEach(t),_k=l(ee),sr=n(ee,"P",{});var lh=a(sr);gk=i(lh,"This model inherits from "),fs=n(lh,"A",{href:!0});var $w=a(fs);bk=i($w,"FlaxPreTrainedModel"),$w.forEach(t),kk=i(lh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lh.forEach(t),vk=l(ee),ir=n(ee,"P",{});var ch=a(ir);yk=i(ch,"This model is also a Flax Linen "),dr=n(ch,"A",{href:!0,rel:!0});var Ew=a(dr);Tk=i(Ew,"flax.nn.Module"),Ew.forEach(t),xk=i(ch,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ch.forEach(t),wk=l(ee),ic=n(ee,"P",{});var Mw=a(ic);zk=i(Mw,"Finally, this model supports inherent JAX features such as:"),Mw.forEach(t),Bk=l(ee),dt=n(ee,"UL",{});var pn=a(dt);dc=n(pn,"LI",{});var jw=a(dc);lr=n(jw,"A",{href:!0,rel:!0});var Cw=a(lr);Fk=i(Cw,"Just-In-Time (JIT) compilation"),Cw.forEach(t),jw.forEach(t),qk=l(pn),lc=n(pn,"LI",{});var Pw=a(lc);cr=n(Pw,"A",{href:!0,rel:!0});var Ow=a(cr);$k=i(Ow,"Automatic Differentiation"),Ow.forEach(t),Pw.forEach(t),Ek=l(pn),cc=n(pn,"LI",{});var Aw=a(cc);pr=n(Aw,"A",{href:!0,rel:!0});var Sw=a(pr);Mk=i(Sw,"Vectorization"),Sw.forEach(t),Aw.forEach(t),jk=l(pn),pc=n(pn,"LI",{});var Iw=a(pc);hr=n(Iw,"A",{href:!0,rel:!0});var Nw=a(hr);Ck=i(Nw,"Parallelization"),Nw.forEach(t),Iw.forEach(t),pn.forEach(t),Pk=l(ee),Me=n(ee,"DIV",{class:!0});var Ct=a(Me);m(ur.$$.fragment,Ct),Ok=l(Ct),_o=n(Ct,"P",{});var Ds=a(_o);Ak=i(Ds,"The "),hc=n(Ds,"CODE",{});var Lw=a(hc);Sk=i(Lw,"FlaxBartPreTrainedModel"),Lw.forEach(t),Ik=i(Ds," forward method, overrides the "),uc=n(Ds,"CODE",{});var Dw=a(uc);Nk=i(Dw,"__call__"),Dw.forEach(t),Lk=i(Ds," special method."),Ds.forEach(t),Dk=l(Ct),m(en.$$.fragment,Ct),Gk=l(Ct),fc=n(Ct,"P",{});var Gw=a(fc);Uk=i(Gw,"Example:"),Gw.forEach(t),Wk=l(Ct),m(fr.$$.fragment,Ct),Ct.forEach(t),Qk=l(ee),kt=n(ee,"DIV",{class:!0});var Gs=a(kt);m(mr.$$.fragment,Gs),Rk=l(Gs),mc=n(Gs,"P",{});var Uw=a(mc);Hk=i(Uw,"Example:"),Uw.forEach(t),Kk=l(Gs),m(_r.$$.fragment,Gs),Gs.forEach(t),Vk=l(ee),vt=n(ee,"DIV",{class:!0});var Us=a(vt);m(gr.$$.fragment,Us),Jk=l(Us),_c=n(Us,"P",{});var Ww=a(_c);Xk=i(Ww,"Example:"),Ww.forEach(t),Zk=l(Us),m(br.$$.fragment,Us),Us.forEach(t),ee.forEach(t),bp=l(r),go=n(r,"H2",{class:!0});var ph=a(go);tn=n(ph,"A",{id:!0,class:!0,href:!0});var Qw=a(tn);gc=n(Qw,"SPAN",{});var Rw=a(gc);m(kr.$$.fragment,Rw),Rw.forEach(t),Qw.forEach(t),Yk=l(ph),bc=n(ph,"SPAN",{});var Hw=a(bc);ev=i(Hw,"FlaxBartForQuestionAnswering"),Hw.forEach(t),ph.forEach(t),kp=l(r),L=n(r,"DIV",{class:!0});var te=a(L);m(vr.$$.fragment,te),tv=l(te),bo=n(te,"P",{});var Ws=a(bo);ov=i(Ws,`BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),kc=n(Ws,"CODE",{});var Kw=a(kc);nv=i(Kw,"span start logits"),Kw.forEach(t),av=i(Ws," and "),vc=n(Ws,"CODE",{});var Vw=a(vc);rv=i(Vw,"span end logits"),Vw.forEach(t),sv=i(Ws,")."),Ws.forEach(t),iv=l(te),yr=n(te,"P",{});var hh=a(yr);dv=i(hh,"This model inherits from "),ms=n(hh,"A",{href:!0});var Jw=a(ms);lv=i(Jw,"FlaxPreTrainedModel"),Jw.forEach(t),cv=i(hh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hh.forEach(t),pv=l(te),Tr=n(te,"P",{});var uh=a(Tr);hv=i(uh,"This model is also a Flax Linen "),xr=n(uh,"A",{href:!0,rel:!0});var Xw=a(xr);uv=i(Xw,"flax.nn.Module"),Xw.forEach(t),fv=i(uh,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),uh.forEach(t),mv=l(te),yc=n(te,"P",{});var Zw=a(yc);_v=i(Zw,"Finally, this model supports inherent JAX features such as:"),Zw.forEach(t),gv=l(te),lt=n(te,"UL",{});var hn=a(lt);Tc=n(hn,"LI",{});var Yw=a(Tc);wr=n(Yw,"A",{href:!0,rel:!0});var e2=a(wr);bv=i(e2,"Just-In-Time (JIT) compilation"),e2.forEach(t),Yw.forEach(t),kv=l(hn),xc=n(hn,"LI",{});var t2=a(xc);zr=n(t2,"A",{href:!0,rel:!0});var o2=a(zr);vv=i(o2,"Automatic Differentiation"),o2.forEach(t),t2.forEach(t),yv=l(hn),wc=n(hn,"LI",{});var n2=a(wc);Br=n(n2,"A",{href:!0,rel:!0});var a2=a(Br);Tv=i(a2,"Vectorization"),a2.forEach(t),n2.forEach(t),xv=l(hn),zc=n(hn,"LI",{});var r2=a(zc);Fr=n(r2,"A",{href:!0,rel:!0});var s2=a(Fr);wv=i(s2,"Parallelization"),s2.forEach(t),r2.forEach(t),hn.forEach(t),zv=l(te),je=n(te,"DIV",{class:!0});var Pt=a(je);m(qr.$$.fragment,Pt),Bv=l(Pt),ko=n(Pt,"P",{});var Qs=a(ko);Fv=i(Qs,"The "),Bc=n(Qs,"CODE",{});var i2=a(Bc);qv=i(i2,"FlaxBartPreTrainedModel"),i2.forEach(t),$v=i(Qs," forward method, overrides the "),Fc=n(Qs,"CODE",{});var d2=a(Fc);Ev=i(d2,"__call__"),d2.forEach(t),Mv=i(Qs," special method."),Qs.forEach(t),jv=l(Pt),m(on.$$.fragment,Pt),Cv=l(Pt),qc=n(Pt,"P",{});var l2=a(qc);Pv=i(l2,"Example:"),l2.forEach(t),Ov=l(Pt),m($r.$$.fragment,Pt),Pt.forEach(t),Av=l(te),yt=n(te,"DIV",{class:!0});var Rs=a(yt);m(Er.$$.fragment,Rs),Sv=l(Rs),$c=n(Rs,"P",{});var c2=a($c);Iv=i(c2,"Example:"),c2.forEach(t),Nv=l(Rs),m(Mr.$$.fragment,Rs),Rs.forEach(t),Lv=l(te),Tt=n(te,"DIV",{class:!0});var Hs=a(Tt);m(jr.$$.fragment,Hs),Dv=l(Hs),Ec=n(Hs,"P",{});var p2=a(Ec);Gv=i(p2,"Example:"),p2.forEach(t),Uv=l(Hs),m(Cr.$$.fragment,Hs),Hs.forEach(t),te.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify($2)),c(x,"id","bart"),c(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(x,"href","#bart"),c(v,"class","relative group"),c(ue,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),c(ue,"rel","nofollow"),c(G,"id","overview"),c(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(G,"href","#overview"),c(K,"class","relative group"),c(V,"href","https://arxiv.org/abs/1910.13461"),c(V,"rel","nofollow"),c(Se,"href","https://huggingface.co/sshleifer"),c(Se,"rel","nofollow"),c(Ie,"href","https://github.com/pytorch/fairseq/tree/master/examples/bart"),c(Ie,"rel","nofollow"),c(ze,"id","examples"),c(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ze,"href","#examples"),c(ge,"class","relative group"),c(fn,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization/README.md"),c(fn,"rel","nofollow"),c(Nr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(mn,"href","https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904"),c(mn,"rel","nofollow"),c(_n,"href","https://huggingface.co/models?search=distilbart"),c(_n,"rel","nofollow"),c(gn,"href","https://arxiv.org/abs/2010.13002"),c(gn,"rel","nofollow"),c(yo,"id","implementation-notes"),c(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(yo,"href","#implementation-notes"),c(Lt,"class","relative group"),c(Lr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),c(Dr,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode"),c(Gr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel"),c(Wr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),c(To,"id","mask-filling"),c(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(To,"href","#mask-filling"),c(Wt,"class","relative group"),c(xo,"id","transformers.BartConfig"),c(xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xo,"href","#transformers.BartConfig"),c(Qt,"class","relative group"),c(Qr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel"),c(xn,"href","https://huggingface.co/facebook/bart-large"),c(xn,"rel","nofollow"),c(Rr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Hr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(be,"class","docstring"),c(wo,"id","transformers.BartTokenizer"),c(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wo,"href","#transformers.BartTokenizer"),c(Kt,"class","relative group"),c(Kr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizer"),c(Vr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Jr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(at,"class","docstring"),c(zo,"id","transformers.BartTokenizerFast"),c(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zo,"href","#transformers.BartTokenizerFast"),c(Vt,"class","relative group"),c(Xr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartTokenizerFast"),c(Zr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(Yr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(rt,"class","docstring"),c(Bo,"id","transformers.BartModel"),c(Bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bo,"href","#transformers.BartModel"),c(Jt,"class","relative group"),c(es,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Pn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Pn,"rel","nofollow"),c(ts,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartModel"),c(Fe,"class","docstring"),c(De,"class","docstring"),c(qo,"id","transformers.BartForConditionalGeneration"),c(qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qo,"href","#transformers.BartForConditionalGeneration"),c(Zt,"class","relative group"),c(os,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Dn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Dn,"rel","nofollow"),c(ns,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(Mo,"id","generate-summary"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#generate-summary"),c(Eo,"class","relative group"),c(M,"class","docstring"),c(Ge,"class","docstring"),c(jo,"id","transformers.BartForSequenceClassification"),c(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jo,"href","#transformers.BartForSequenceClassification"),c(eo,"class","relative group"),c(as,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Vn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Vn,"rel","nofollow"),c(rs,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForSequenceClassification"),c(Y,"class","docstring"),c(ke,"class","docstring"),c(Po,"id","transformers.BartForQuestionAnswering"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.BartForQuestionAnswering"),c(oo,"class","relative group"),c(ss,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(na,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(na,"rel","nofollow"),c(is,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForQuestionAnswering"),c(qe,"class","docstring"),c(ve,"class","docstring"),c(Ao,"id","transformers.BartForCausalLM"),c(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ao,"href","#transformers.BartForCausalLM"),c(ro,"class","relative group"),c(ft,"class","docstring"),c(ia,"class","docstring"),c(So,"id","transformers.TFBartModel"),c(So,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(So,"href","#transformers.TFBartModel"),c(so,"class","relative group"),c(ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(fa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(fa,"rel","nofollow"),c(ls,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartModel"),c($e,"class","docstring"),c(ye,"class","docstring"),c(Lo,"id","transformers.TFBartForConditionalGeneration"),c(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lo,"href","#transformers.TFBartForConditionalGeneration"),c(lo,"class","relative group"),c(cs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ya,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ya,"rel","nofollow"),c(ps,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),c(Wo,"id","generate-summary"),c(Wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wo,"href","#generate-summary"),c(Uo,"class","relative group"),c(Ro,"id","probs[5]-is-associated-with-the-mask-token"),c(Ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ro,"href","#probs[5]-is-associated-with-the-mask-token"),c(Qo,"class","relative group"),c(P,"class","docstring"),c(Te,"class","docstring"),c(Ho,"id","transformers.FlaxBartModel"),c(Ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ho,"href","#transformers.FlaxBartModel"),c(po,"class","relative group"),c(hs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ma,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Ma,"rel","nofollow"),c(ja,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(ja,"rel","nofollow"),c(Ca,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ca,"rel","nofollow"),c(Pa,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Pa,"rel","nofollow"),c(Oa,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Oa,"rel","nofollow"),c(Ee,"class","docstring"),c(mt,"class","docstring"),c(_t,"class","docstring"),c(Q,"class","docstring"),c(Vo,"id","transformers.FlaxBartForConditionalGeneration"),c(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vo,"href","#transformers.FlaxBartForConditionalGeneration"),c(uo,"class","relative group"),c(us,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ra,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Ra,"rel","nofollow"),c(Ha,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Ha,"rel","nofollow"),c(Ka,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ka,"rel","nofollow"),c(Va,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Va,"rel","nofollow"),c(Ja,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Ja,"rel","nofollow"),c(Zo,"id","generate-summary"),c(Zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Zo,"href","#generate-summary"),c(Xo,"class","relative group"),c(j,"class","docstring"),c(gt,"class","docstring"),c(bt,"class","docstring"),c(R,"class","docstring"),c(Yo,"id","transformers.FlaxBartForSequenceClassification"),c(Yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yo,"href","#transformers.FlaxBartForSequenceClassification"),c(mo,"class","relative group"),c(fs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(dr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(dr,"rel","nofollow"),c(lr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(lr,"rel","nofollow"),c(cr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(cr,"rel","nofollow"),c(pr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(pr,"rel","nofollow"),c(hr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(hr,"rel","nofollow"),c(Me,"class","docstring"),c(kt,"class","docstring"),c(vt,"class","docstring"),c(N,"class","docstring"),c(tn,"id","transformers.FlaxBartForQuestionAnswering"),c(tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(tn,"href","#transformers.FlaxBartForQuestionAnswering"),c(go,"class","relative group"),c(ms,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(xr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(xr,"rel","nofollow"),c(wr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(wr,"rel","nofollow"),c(zr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(zr,"rel","nofollow"),c(Br,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Br,"rel","nofollow"),c(Fr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Fr,"rel","nofollow"),c(je,"class","docstring"),c(yt,"class","docstring"),c(Tt,"class","docstring"),c(L,"class","docstring")},m(r,h){e(document.head,p),u(r,B,h),u(r,v,h),e(v,x),e(x,z),_(T,z,null),e(v,y),e(v,F),e(F,Re),u(r,xe,h),u(r,$,h),e($,Pe),e(Pe,he),e($,He),e($,ue),e(ue,fe),e($,Ke),u(r,Ue,h),u(r,K,h),e(K,G),e(G,Oe),_(oe,Oe,null),e(K,S),e(K,I),e(I,Ve),u(r,ie,h),u(r,de,h),e(de,Je),e(de,V),e(V,Xe),e(de,Ze),u(r,U,h),u(r,we,h),e(we,me),u(r,We,h),u(r,Z,h),e(Z,ne),e(ne,Ye),e(Z,et),e(Z,J),e(J,tt),e(Z,_e),e(Z,Ae),e(Ae,w),u(r,E,h),u(r,H,h),e(H,Ot),e(H,Se),e(Se,ae),e(H,At),e(H,Ie),e(Ie,St),e(H,re),u(r,le,h),u(r,ge,h),e(ge,ze),e(ze,se),_(Ne,se,null),e(ge,It),e(ge,Le),e(Le,Nt),u(r,Lc,h),u(r,ct,h),e(ct,un),e(un,fh),e(un,fn),e(fn,mh),e(un,_h),e(ct,gh),e(ct,ot),e(ot,bh),e(ot,Nr),e(Nr,kh),e(ot,vh),e(ot,Ks),e(Ks,yh),e(ot,Th),e(ot,mn),e(mn,xh),e(ot,wh),e(ct,zh),e(ct,vo),e(vo,_n),e(_n,Bh),e(vo,Fh),e(vo,gn),e(gn,qh),e(vo,$h),u(r,Dc,h),u(r,Lt,h),e(Lt,yo),e(yo,Vs),_(bn,Vs,null),e(Lt,Eh),e(Lt,Js),e(Js,Mh),u(r,Gc,h),u(r,Be,h),e(Be,nt),e(nt,jh),e(nt,Xs),e(Xs,Ch),e(nt,Ph),e(nt,Lr),e(Lr,Oh),e(nt,Ah),e(nt,Dr),e(Dr,Sh),e(nt,Ih),e(Be,Nh),e(Be,Dt),e(Dt,Lh),e(Dt,Gr),e(Gr,Dh),e(Dt,Gh),e(Dt,Zs),e(Zs,Uh),e(Dt,Wh),e(Be,Qh),e(Be,Gt),e(Gt,Rh),e(Gt,Ys),e(Ys,Hh),e(Gt,Kh),e(Gt,ei),e(ei,Vh),e(Gt,Jh),e(Be,Xh),e(Be,Ur),e(Ur,Wr),e(Wr,Zh),e(Ur,Yh),e(Be,eu),e(Be,Ut),e(Ut,tu),e(Ut,ti),e(ti,ou),e(Ut,nu),e(Ut,oi),e(oi,au),e(Ut,ru),u(r,Uc,h),u(r,Wt,h),e(Wt,To),e(To,ni),_(kn,ni,null),e(Wt,su),e(Wt,ai),e(ai,iu),u(r,Wc,h),u(r,pt,h),e(pt,du),e(pt,ri),e(ri,lu),e(pt,cu),e(pt,si),e(si,pu),e(pt,hu),u(r,Qc,h),_(vn,r,h),u(r,Rc,h),u(r,Qt,h),e(Qt,xo),e(xo,ii),_(yn,ii,null),e(Qt,uu),e(Qt,di),e(di,fu),u(r,Hc,h),u(r,be,h),_(Tn,be,null),e(be,mu),e(be,Rt),e(Rt,_u),e(Rt,Qr),e(Qr,gu),e(Rt,bu),e(Rt,xn),e(xn,ku),e(Rt,vu),e(be,yu),e(be,Ht),e(Ht,Tu),e(Ht,Rr),e(Rr,xu),e(Ht,wu),e(Ht,Hr),e(Hr,zu),e(Ht,Bu),e(be,Fu),e(be,li),e(li,qu),e(be,$u),_(wn,be,null),u(r,Kc,h),u(r,Kt,h),e(Kt,wo),e(wo,ci),_(zn,ci,null),e(Kt,Eu),e(Kt,pi),e(pi,Mu),u(r,Vc,h),u(r,at,h),_(Bn,at,null),e(at,ju),e(at,hi),e(hi,Cu),e(at,Pu),e(at,ht),e(ht,Kr),e(Kr,Ou),e(ht,Au),e(ht,Vr),e(Vr,Su),e(ht,Iu),e(ht,Jr),e(Jr,Nu),e(ht,Lu),u(r,Jc,h),u(r,Vt,h),e(Vt,zo),e(zo,ui),_(Fn,ui,null),e(Vt,Du),e(Vt,fi),e(fi,Gu),u(r,Xc,h),u(r,rt,h),_(qn,rt,null),e(rt,Uu),e(rt,$n),e($n,Wu),e($n,mi),e(mi,Qu),e($n,Ru),e(rt,Hu),e(rt,ut),e(ut,Xr),e(Xr,Ku),e(ut,Vu),e(ut,Zr),e(Zr,Ju),e(ut,Xu),e(ut,Yr),e(Yr,Zu),e(ut,Yu),u(r,Zc,h),u(r,Jt,h),e(Jt,Bo),e(Bo,_i),_(En,_i,null),e(Jt,ef),e(Jt,gi),e(gi,tf),u(r,Yc,h),u(r,De,h),_(Mn,De,null),e(De,of),e(De,jn),e(jn,nf),e(jn,es),e(es,af),e(jn,rf),e(De,sf),e(De,Cn),e(Cn,df),e(Cn,Pn),e(Pn,lf),e(Cn,cf),e(De,pf),e(De,Fe),_(On,Fe,null),e(Fe,hf),e(Fe,Xt),e(Xt,uf),e(Xt,ts),e(ts,ff),e(Xt,mf),e(Xt,bi),e(bi,_f),e(Xt,gf),e(Fe,bf),_(Fo,Fe,null),e(Fe,kf),e(Fe,ki),e(ki,vf),e(Fe,yf),_(An,Fe,null),u(r,ep,h),u(r,Zt,h),e(Zt,qo),e(qo,vi),_(Sn,vi,null),e(Zt,Tf),e(Zt,yi),e(yi,xf),u(r,tp,h),u(r,Ge,h),_(In,Ge,null),e(Ge,wf),e(Ge,Nn),e(Nn,zf),e(Nn,os),e(os,Bf),e(Nn,Ff),e(Ge,qf),e(Ge,Ln),e(Ln,$f),e(Ln,Dn),e(Dn,Ef),e(Ln,Mf),e(Ge,jf),e(Ge,M),_(Gn,M,null),e(M,Cf),e(M,Yt),e(Yt,Pf),e(Yt,ns),e(ns,Of),e(Yt,Af),e(Yt,Ti),e(Ti,Sf),e(Yt,If),e(M,Nf),_($o,M,null),e(M,Lf),e(M,xi),e(xi,Df),e(M,Gf),e(M,wi),e(wi,zi),e(zi,Bi),e(Bi,Fi),e(Fi,Uf),e(M,Wf),e(M,qi),e(qi,$i),e($i,Ei),e(Ei,Mi),e(Mi,Qf),e(M,Rf),e(M,ji),e(ji,Ci),e(Ci,Pi),e(Pi,Oi),e(Oi,Hf),e(M,Kf),e(M,Ai),e(Ai,Si),e(Si,Un),e(Un,Eo),e(Eo,Mo),e(Mo,Ii),_(Wn,Ii,null),e(Eo,Vf),e(Eo,Ni),e(Ni,Jf),e(Un,Xf),e(Un,Li),e(Li,Zf),e(M,Yf),e(M,Di),e(Di,em),e(M,tm),e(M,Gi),e(Gi,Ui),e(Ui,Wi),e(Wi,Qi),e(Qi,om),e(M,nm),e(M,Ri),e(Ri,Hi),e(Hi,Ki),e(Ki,Vi),e(Vi,am),e(M,rm),e(M,Ji),e(Ji,Xi),e(Xi,Zi),e(Zi,Yi),e(Yi,sm),e(M,im),e(M,ed),e(ed,td),e(td,od),e(od,nd),e(nd,dm),u(r,op,h),u(r,eo,h),e(eo,jo),e(jo,ad),_(Qn,ad,null),e(eo,lm),e(eo,rd),e(rd,cm),u(r,np,h),u(r,ke,h),_(Rn,ke,null),e(ke,pm),e(ke,sd),e(sd,hm),e(ke,um),e(ke,Hn),e(Hn,fm),e(Hn,as),e(as,mm),e(Hn,_m),e(ke,gm),e(ke,Kn),e(Kn,bm),e(Kn,Vn),e(Vn,km),e(Kn,vm),e(ke,ym),e(ke,Y),_(Jn,Y,null),e(Y,Tm),e(Y,to),e(to,xm),e(to,rs),e(rs,wm),e(to,zm),e(to,id),e(id,Bm),e(to,Fm),e(Y,qm),_(Co,Y,null),e(Y,$m),e(Y,dd),e(dd,Em),e(Y,Mm),_(Xn,Y,null),e(Y,jm),e(Y,ld),e(ld,Cm),e(Y,Pm),_(Zn,Y,null),u(r,ap,h),u(r,oo,h),e(oo,Po),e(Po,cd),_(Yn,cd,null),e(oo,Om),e(oo,pd),e(pd,Am),u(r,rp,h),u(r,ve,h),_(ea,ve,null),e(ve,Sm),e(ve,no),e(no,Im),e(no,hd),e(hd,Nm),e(no,Lm),e(no,ud),e(ud,Dm),e(no,Gm),e(ve,Um),e(ve,ta),e(ta,Wm),e(ta,ss),e(ss,Qm),e(ta,Rm),e(ve,Hm),e(ve,oa),e(oa,Km),e(oa,na),e(na,Vm),e(oa,Jm),e(ve,Xm),e(ve,qe),_(aa,qe,null),e(qe,Zm),e(qe,ao),e(ao,Ym),e(ao,is),e(is,e_),e(ao,t_),e(ao,fd),e(fd,o_),e(ao,n_),e(qe,a_),_(Oo,qe,null),e(qe,r_),e(qe,md),e(md,s_),e(qe,i_),_(ra,qe,null),u(r,sp,h),u(r,ro,h),e(ro,Ao),e(Ao,_d),_(sa,_d,null),e(ro,d_),e(ro,gd),e(gd,l_),u(r,ip,h),u(r,ia,h),e(ia,ft),_(da,ft,null),e(ft,c_),e(ft,bd),e(bd,p_),e(ft,h_),_(la,ft,null),u(r,dp,h),u(r,so,h),e(so,So),e(So,kd),_(ca,kd,null),e(so,u_),e(so,vd),e(vd,f_),u(r,lp,h),u(r,ye,h),_(pa,ye,null),e(ye,m_),e(ye,ha),e(ha,__),e(ha,ds),e(ds,g_),e(ha,b_),e(ye,k_),e(ye,ua),e(ua,v_),e(ua,fa),e(fa,y_),e(ua,T_),e(ye,x_),_(Io,ye,null),e(ye,w_),e(ye,$e),_(ma,$e,null),e($e,z_),e($e,io),e(io,B_),e(io,ls),e(ls,F_),e(io,q_),e(io,yd),e(yd,$_),e(io,E_),e($e,M_),_(No,$e,null),e($e,j_),e($e,Td),e(Td,C_),e($e,P_),_(_a,$e,null),u(r,cp,h),u(r,lo,h),e(lo,Lo),e(Lo,xd),_(ga,xd,null),e(lo,O_),e(lo,wd),e(wd,A_),u(r,pp,h),u(r,Te,h),_(ba,Te,null),e(Te,S_),e(Te,ka),e(ka,I_),e(ka,cs),e(cs,N_),e(ka,L_),e(Te,D_),e(Te,va),e(va,G_),e(va,ya),e(ya,U_),e(va,W_),e(Te,Q_),_(Do,Te,null),e(Te,R_),e(Te,P),_(Ta,P,null),e(P,H_),e(P,co),e(co,K_),e(co,ps),e(ps,V_),e(co,J_),e(co,zd),e(zd,X_),e(co,Z_),e(P,Y_),_(Go,P,null),e(P,eg),e(P,Bd),e(Bd,tg),e(P,og),e(P,Fd),e(Fd,qd),e(qd,$d),e($d,Ed),e(Ed,ng),e(P,ag),e(P,Md),e(Md,jd),e(jd,Cd),e(Cd,Pd),e(Pd,rg),e(P,sg),e(P,Od),e(Od,Ad),e(Ad,Sd),e(Sd,Id),e(Id,ig),e(P,dg),e(P,Nd),e(Nd,Ld),e(Ld,xa),e(xa,Uo),e(Uo,Wo),e(Wo,Dd),_(wa,Dd,null),e(Uo,lg),e(Uo,Gd),e(Gd,cg),e(xa,pg),e(xa,Ud),e(Ud,hg),e(P,ug),e(P,Wd),e(Wd,fg),e(P,mg),e(P,Qd),e(Qd,Rd),e(Rd,Hd),e(Hd,Kd),e(Kd,_g),e(P,gg),e(P,Vd),e(Vd,Jd),e(Jd,za),e(za,Xd),e(Xd,bg),e(za,kg),e(za,Qo),e(Qo,Ro),e(Ro,Zd),_(Ba,Zd,null),e(Qo,vg),e(Qo,Yd),e(Yd,yg),u(r,hp,h),u(r,po,h),e(po,Ho),e(Ho,el),_(Fa,el,null),e(po,Tg),e(po,tl),e(tl,xg),u(r,up,h),u(r,Q,h),_(qa,Q,null),e(Q,wg),e(Q,$a),e($a,zg),e($a,hs),e(hs,Bg),e($a,Fg),e(Q,qg),e(Q,Ea),e(Ea,$g),e(Ea,Ma),e(Ma,Eg),e(Ea,Mg),e(Q,jg),e(Q,ol),e(ol,Cg),e(Q,Pg),e(Q,st),e(st,nl),e(nl,ja),e(ja,Og),e(st,Ag),e(st,al),e(al,Ca),e(Ca,Sg),e(st,Ig),e(st,rl),e(rl,Pa),e(Pa,Ng),e(st,Lg),e(st,sl),e(sl,Oa),e(Oa,Dg),e(Q,Gg),e(Q,Ee),_(Aa,Ee,null),e(Ee,Ug),e(Ee,ho),e(ho,Wg),e(ho,il),e(il,Qg),e(ho,Rg),e(ho,dl),e(dl,Hg),e(ho,Kg),e(Ee,Vg),_(Ko,Ee,null),e(Ee,Jg),e(Ee,ll),e(ll,Xg),e(Ee,Zg),_(Sa,Ee,null),e(Q,Yg),e(Q,mt),_(Ia,mt,null),e(mt,eb),e(mt,cl),e(cl,tb),e(mt,ob),_(Na,mt,null),e(Q,nb),e(Q,_t),_(La,_t,null),e(_t,ab),e(_t,pl),e(pl,rb),e(_t,sb),_(Da,_t,null),u(r,fp,h),u(r,uo,h),e(uo,Vo),e(Vo,hl),_(Ga,hl,null),e(uo,ib),e(uo,ul),e(ul,db),u(r,mp,h),u(r,R,h),_(Ua,R,null),e(R,lb),e(R,Wa),e(Wa,cb),e(Wa,us),e(us,pb),e(Wa,hb),e(R,ub),e(R,Qa),e(Qa,fb),e(Qa,Ra),e(Ra,mb),e(Qa,_b),e(R,gb),e(R,fl),e(fl,bb),e(R,kb),e(R,it),e(it,ml),e(ml,Ha),e(Ha,vb),e(it,yb),e(it,_l),e(_l,Ka),e(Ka,Tb),e(it,xb),e(it,gl),e(gl,Va),e(Va,wb),e(it,zb),e(it,bl),e(bl,Ja),e(Ja,Bb),e(R,Fb),e(R,j),_(Xa,j,null),e(j,qb),e(j,fo),e(fo,$b),e(fo,kl),e(kl,Eb),e(fo,Mb),e(fo,vl),e(vl,jb),e(fo,Cb),e(j,Pb),_(Jo,j,null),e(j,Ob),e(j,yl),e(yl,Ab),e(j,Sb),e(j,Tl),e(Tl,xl),e(xl,wl),e(wl,zl),e(zl,Ib),e(j,Nb),e(j,Bl),e(Bl,Fl),e(Fl,ql),e(ql,$l),e($l,Lb),e(j,Db),e(j,El),e(El,Ml),e(Ml,jl),e(jl,Cl),e(Cl,Gb),e(j,Ub),e(j,Pl),e(Pl,Ol),e(Ol,Za),e(Za,Xo),e(Xo,Zo),e(Zo,Al),_(Ya,Al,null),e(Xo,Wb),e(Xo,Sl),e(Sl,Qb),e(Za,Rb),e(Za,Il),e(Il,Hb),e(j,Kb),e(j,Nl),e(Nl,Vb),e(j,Jb),e(j,Ll),e(Ll,Dl),e(Dl,Gl),e(Gl,Ul),e(Ul,Xb),e(j,Zb),e(j,Wl),e(Wl,Ql),e(Ql,Rl),e(Rl,Hl),e(Hl,Yb),e(j,ek),e(j,Kl),e(Kl,Vl),e(Vl,Jl),e(Jl,Xl),e(Xl,tk),e(j,ok),e(j,Zl),e(Zl,Yl),e(Yl,ec),e(ec,tc),e(tc,nk),e(R,ak),e(R,gt),_(er,gt,null),e(gt,rk),e(gt,oc),e(oc,sk),e(gt,ik),_(tr,gt,null),e(R,dk),e(R,bt),_(or,bt,null),e(bt,lk),e(bt,nc),e(nc,ck),e(bt,pk),_(nr,bt,null),u(r,_p,h),u(r,mo,h),e(mo,Yo),e(Yo,ac),_(ar,ac,null),e(mo,hk),e(mo,rc),e(rc,uk),u(r,gp,h),u(r,N,h),_(rr,N,null),e(N,fk),e(N,sc),e(sc,mk),e(N,_k),e(N,sr),e(sr,gk),e(sr,fs),e(fs,bk),e(sr,kk),e(N,vk),e(N,ir),e(ir,yk),e(ir,dr),e(dr,Tk),e(ir,xk),e(N,wk),e(N,ic),e(ic,zk),e(N,Bk),e(N,dt),e(dt,dc),e(dc,lr),e(lr,Fk),e(dt,qk),e(dt,lc),e(lc,cr),e(cr,$k),e(dt,Ek),e(dt,cc),e(cc,pr),e(pr,Mk),e(dt,jk),e(dt,pc),e(pc,hr),e(hr,Ck),e(N,Pk),e(N,Me),_(ur,Me,null),e(Me,Ok),e(Me,_o),e(_o,Ak),e(_o,hc),e(hc,Sk),e(_o,Ik),e(_o,uc),e(uc,Nk),e(_o,Lk),e(Me,Dk),_(en,Me,null),e(Me,Gk),e(Me,fc),e(fc,Uk),e(Me,Wk),_(fr,Me,null),e(N,Qk),e(N,kt),_(mr,kt,null),e(kt,Rk),e(kt,mc),e(mc,Hk),e(kt,Kk),_(_r,kt,null),e(N,Vk),e(N,vt),_(gr,vt,null),e(vt,Jk),e(vt,_c),e(_c,Xk),e(vt,Zk),_(br,vt,null),u(r,bp,h),u(r,go,h),e(go,tn),e(tn,gc),_(kr,gc,null),e(go,Yk),e(go,bc),e(bc,ev),u(r,kp,h),u(r,L,h),_(vr,L,null),e(L,tv),e(L,bo),e(bo,ov),e(bo,kc),e(kc,nv),e(bo,av),e(bo,vc),e(vc,rv),e(bo,sv),e(L,iv),e(L,yr),e(yr,dv),e(yr,ms),e(ms,lv),e(yr,cv),e(L,pv),e(L,Tr),e(Tr,hv),e(Tr,xr),e(xr,uv),e(Tr,fv),e(L,mv),e(L,yc),e(yc,_v),e(L,gv),e(L,lt),e(lt,Tc),e(Tc,wr),e(wr,bv),e(lt,kv),e(lt,xc),e(xc,zr),e(zr,vv),e(lt,yv),e(lt,wc),e(wc,Br),e(Br,Tv),e(lt,xv),e(lt,zc),e(zc,Fr),e(Fr,wv),e(L,zv),e(L,je),_(qr,je,null),e(je,Bv),e(je,ko),e(ko,Fv),e(ko,Bc),e(Bc,qv),e(ko,$v),e(ko,Fc),e(Fc,Ev),e(ko,Mv),e(je,jv),_(on,je,null),e(je,Cv),e(je,qc),e(qc,Pv),e(je,Ov),_($r,je,null),e(L,Av),e(L,yt),_(Er,yt,null),e(yt,Sv),e(yt,$c),e($c,Iv),e(yt,Nv),_(Mr,yt,null),e(L,Lv),e(L,Tt),_(jr,Tt,null),e(Tt,Dv),e(Tt,Ec),e(Ec,Gv),e(Tt,Uv),_(Cr,Tt,null),vp=!0},p(r,[h]){const Pr={};h&2&&(Pr.$$scope={dirty:h,ctx:r}),Fo.$set(Pr);const Mc={};h&2&&(Mc.$$scope={dirty:h,ctx:r}),$o.$set(Mc);const jc={};h&2&&(jc.$$scope={dirty:h,ctx:r}),Co.$set(jc);const Cc={};h&2&&(Cc.$$scope={dirty:h,ctx:r}),Oo.$set(Cc);const nn={};h&2&&(nn.$$scope={dirty:h,ctx:r}),Io.$set(nn);const Pc={};h&2&&(Pc.$$scope={dirty:h,ctx:r}),No.$set(Pc);const Oc={};h&2&&(Oc.$$scope={dirty:h,ctx:r}),Do.$set(Oc);const Or={};h&2&&(Or.$$scope={dirty:h,ctx:r}),Go.$set(Or);const Ac={};h&2&&(Ac.$$scope={dirty:h,ctx:r}),Ko.$set(Ac);const Sc={};h&2&&(Sc.$$scope={dirty:h,ctx:r}),Jo.$set(Sc);const Ic={};h&2&&(Ic.$$scope={dirty:h,ctx:r}),en.$set(Ic);const Ar={};h&2&&(Ar.$$scope={dirty:h,ctx:r}),on.$set(Ar)},i(r){vp||(g(T.$$.fragment,r),g(oe.$$.fragment,r),g(Ne.$$.fragment,r),g(bn.$$.fragment,r),g(kn.$$.fragment,r),g(vn.$$.fragment,r),g(yn.$$.fragment,r),g(Tn.$$.fragment,r),g(wn.$$.fragment,r),g(zn.$$.fragment,r),g(Bn.$$.fragment,r),g(Fn.$$.fragment,r),g(qn.$$.fragment,r),g(En.$$.fragment,r),g(Mn.$$.fragment,r),g(On.$$.fragment,r),g(Fo.$$.fragment,r),g(An.$$.fragment,r),g(Sn.$$.fragment,r),g(In.$$.fragment,r),g(Gn.$$.fragment,r),g($o.$$.fragment,r),g(Wn.$$.fragment,r),g(Qn.$$.fragment,r),g(Rn.$$.fragment,r),g(Jn.$$.fragment,r),g(Co.$$.fragment,r),g(Xn.$$.fragment,r),g(Zn.$$.fragment,r),g(Yn.$$.fragment,r),g(ea.$$.fragment,r),g(aa.$$.fragment,r),g(Oo.$$.fragment,r),g(ra.$$.fragment,r),g(sa.$$.fragment,r),g(da.$$.fragment,r),g(la.$$.fragment,r),g(ca.$$.fragment,r),g(pa.$$.fragment,r),g(Io.$$.fragment,r),g(ma.$$.fragment,r),g(No.$$.fragment,r),g(_a.$$.fragment,r),g(ga.$$.fragment,r),g(ba.$$.fragment,r),g(Do.$$.fragment,r),g(Ta.$$.fragment,r),g(Go.$$.fragment,r),g(wa.$$.fragment,r),g(Ba.$$.fragment,r),g(Fa.$$.fragment,r),g(qa.$$.fragment,r),g(Aa.$$.fragment,r),g(Ko.$$.fragment,r),g(Sa.$$.fragment,r),g(Ia.$$.fragment,r),g(Na.$$.fragment,r),g(La.$$.fragment,r),g(Da.$$.fragment,r),g(Ga.$$.fragment,r),g(Ua.$$.fragment,r),g(Xa.$$.fragment,r),g(Jo.$$.fragment,r),g(Ya.$$.fragment,r),g(er.$$.fragment,r),g(tr.$$.fragment,r),g(or.$$.fragment,r),g(nr.$$.fragment,r),g(ar.$$.fragment,r),g(rr.$$.fragment,r),g(ur.$$.fragment,r),g(en.$$.fragment,r),g(fr.$$.fragment,r),g(mr.$$.fragment,r),g(_r.$$.fragment,r),g(gr.$$.fragment,r),g(br.$$.fragment,r),g(kr.$$.fragment,r),g(vr.$$.fragment,r),g(qr.$$.fragment,r),g(on.$$.fragment,r),g($r.$$.fragment,r),g(Er.$$.fragment,r),g(Mr.$$.fragment,r),g(jr.$$.fragment,r),g(Cr.$$.fragment,r),vp=!0)},o(r){b(T.$$.fragment,r),b(oe.$$.fragment,r),b(Ne.$$.fragment,r),b(bn.$$.fragment,r),b(kn.$$.fragment,r),b(vn.$$.fragment,r),b(yn.$$.fragment,r),b(Tn.$$.fragment,r),b(wn.$$.fragment,r),b(zn.$$.fragment,r),b(Bn.$$.fragment,r),b(Fn.$$.fragment,r),b(qn.$$.fragment,r),b(En.$$.fragment,r),b(Mn.$$.fragment,r),b(On.$$.fragment,r),b(Fo.$$.fragment,r),b(An.$$.fragment,r),b(Sn.$$.fragment,r),b(In.$$.fragment,r),b(Gn.$$.fragment,r),b($o.$$.fragment,r),b(Wn.$$.fragment,r),b(Qn.$$.fragment,r),b(Rn.$$.fragment,r),b(Jn.$$.fragment,r),b(Co.$$.fragment,r),b(Xn.$$.fragment,r),b(Zn.$$.fragment,r),b(Yn.$$.fragment,r),b(ea.$$.fragment,r),b(aa.$$.fragment,r),b(Oo.$$.fragment,r),b(ra.$$.fragment,r),b(sa.$$.fragment,r),b(da.$$.fragment,r),b(la.$$.fragment,r),b(ca.$$.fragment,r),b(pa.$$.fragment,r),b(Io.$$.fragment,r),b(ma.$$.fragment,r),b(No.$$.fragment,r),b(_a.$$.fragment,r),b(ga.$$.fragment,r),b(ba.$$.fragment,r),b(Do.$$.fragment,r),b(Ta.$$.fragment,r),b(Go.$$.fragment,r),b(wa.$$.fragment,r),b(Ba.$$.fragment,r),b(Fa.$$.fragment,r),b(qa.$$.fragment,r),b(Aa.$$.fragment,r),b(Ko.$$.fragment,r),b(Sa.$$.fragment,r),b(Ia.$$.fragment,r),b(Na.$$.fragment,r),b(La.$$.fragment,r),b(Da.$$.fragment,r),b(Ga.$$.fragment,r),b(Ua.$$.fragment,r),b(Xa.$$.fragment,r),b(Jo.$$.fragment,r),b(Ya.$$.fragment,r),b(er.$$.fragment,r),b(tr.$$.fragment,r),b(or.$$.fragment,r),b(nr.$$.fragment,r),b(ar.$$.fragment,r),b(rr.$$.fragment,r),b(ur.$$.fragment,r),b(en.$$.fragment,r),b(fr.$$.fragment,r),b(mr.$$.fragment,r),b(_r.$$.fragment,r),b(gr.$$.fragment,r),b(br.$$.fragment,r),b(kr.$$.fragment,r),b(vr.$$.fragment,r),b(qr.$$.fragment,r),b(on.$$.fragment,r),b($r.$$.fragment,r),b(Er.$$.fragment,r),b(Mr.$$.fragment,r),b(jr.$$.fragment,r),b(Cr.$$.fragment,r),vp=!1},d(r){t(p),r&&t(B),r&&t(v),k(T),r&&t(xe),r&&t($),r&&t(Ue),r&&t(K),k(oe),r&&t(ie),r&&t(de),r&&t(U),r&&t(we),r&&t(We),r&&t(Z),r&&t(E),r&&t(H),r&&t(le),r&&t(ge),k(Ne),r&&t(Lc),r&&t(ct),r&&t(Dc),r&&t(Lt),k(bn),r&&t(Gc),r&&t(Be),r&&t(Uc),r&&t(Wt),k(kn),r&&t(Wc),r&&t(pt),r&&t(Qc),k(vn,r),r&&t(Rc),r&&t(Qt),k(yn),r&&t(Hc),r&&t(be),k(Tn),k(wn),r&&t(Kc),r&&t(Kt),k(zn),r&&t(Vc),r&&t(at),k(Bn),r&&t(Jc),r&&t(Vt),k(Fn),r&&t(Xc),r&&t(rt),k(qn),r&&t(Zc),r&&t(Jt),k(En),r&&t(Yc),r&&t(De),k(Mn),k(On),k(Fo),k(An),r&&t(ep),r&&t(Zt),k(Sn),r&&t(tp),r&&t(Ge),k(In),k(Gn),k($o),k(Wn),r&&t(op),r&&t(eo),k(Qn),r&&t(np),r&&t(ke),k(Rn),k(Jn),k(Co),k(Xn),k(Zn),r&&t(ap),r&&t(oo),k(Yn),r&&t(rp),r&&t(ve),k(ea),k(aa),k(Oo),k(ra),r&&t(sp),r&&t(ro),k(sa),r&&t(ip),r&&t(ia),k(da),k(la),r&&t(dp),r&&t(so),k(ca),r&&t(lp),r&&t(ye),k(pa),k(Io),k(ma),k(No),k(_a),r&&t(cp),r&&t(lo),k(ga),r&&t(pp),r&&t(Te),k(ba),k(Do),k(Ta),k(Go),k(wa),k(Ba),r&&t(hp),r&&t(po),k(Fa),r&&t(up),r&&t(Q),k(qa),k(Aa),k(Ko),k(Sa),k(Ia),k(Na),k(La),k(Da),r&&t(fp),r&&t(uo),k(Ga),r&&t(mp),r&&t(R),k(Ua),k(Xa),k(Jo),k(Ya),k(er),k(tr),k(or),k(nr),r&&t(_p),r&&t(mo),k(ar),r&&t(gp),r&&t(N),k(rr),k(ur),k(en),k(fr),k(mr),k(_r),k(gr),k(br),r&&t(bp),r&&t(go),k(kr),r&&t(kp),r&&t(L),k(vr),k(qr),k(on),k($r),k(Er),k(Mr),k(jr),k(Cr)}}}const $2={local:"bart",sections:[{local:"overview",sections:[{local:"examples",title:"Examples"}],title:"Overview"},{local:"implementation-notes",title:"Implementation Notes"},{local:"mask-filling",title:"Mask Filling"},{local:"transformers.BartConfig",title:"BartConfig"},{local:"transformers.BartTokenizer",title:"BartTokenizer"},{local:"transformers.BartTokenizerFast",title:"BartTokenizerFast"},{local:"transformers.BartModel",title:"BartModel"},{local:"transformers.BartForConditionalGeneration",title:"BartForConditionalGeneration"},{local:"transformers.BartForSequenceClassification",title:"BartForSequenceClassification"},{local:"transformers.BartForQuestionAnswering",title:"BartForQuestionAnswering"},{local:"transformers.BartForCausalLM",title:"BartForCausalLM"},{local:"transformers.TFBartModel",title:"TFBartModel"},{local:"transformers.TFBartForConditionalGeneration",title:"TFBartForConditionalGeneration"},{local:"transformers.FlaxBartModel",title:"FlaxBartModel"},{local:"transformers.FlaxBartForConditionalGeneration",title:"FlaxBartForConditionalGeneration"},{local:"transformers.FlaxBartForSequenceClassification",title:"FlaxBartForSequenceClassification"},{local:"transformers.FlaxBartForQuestionAnswering",title:"FlaxBartForQuestionAnswering"}],title:"BART"};function E2(C,p,B){let{fw:v}=p;return C.$$set=x=>{"fw"in x&&B(0,v=x.fw)},[v]}class S2 extends h2{constructor(p){super();u2(this,p,E2,q2,f2,{fw:0})}}export{S2 as default,$2 as metadata};
9,937
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/xlmroberta.mdx-d158a4a7.js
import{S as QT,i as HT,s as UT,e as s,k as l,w as v,t as o,L as VT,c as r,d as t,m as d,a as i,x as T,h as n,b as c,J as e,g as u,y as w,q as y,o as M,B as $}from"../../chunks/vendor-b1433968.js";import{T as Ae}from"../../chunks/Tip-c3840994.js";import{D as G}from"../../chunks/Docstring-ff504c58.js";import{C as ht}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Xe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function GT(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function KT(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function JT(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function ZT(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function YT(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function e1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function t1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function o1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be;return{c(){h=s("p"),F=o("TF 2.0 models accepts two formats as inputs:"),m=l(),_=s("ul"),k=s("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),E=s("li"),pe=o("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=s("p"),Z=o("This second option is useful when using "),D=s("code"),ee=o("tf.keras.Model.fit"),he=o(` method which currently requires having all the tensors in the first argument of the model call function: `),S=s("code"),ue=o("model(inputs)"),ie=o("."),U=l(),P=s("p"),te=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),L=s("ul"),C=s("li"),se=o("a single Tensor with "),B=s("code"),le=o("input_ids"),re=o(" only and nothing else: "),O=s("code"),fe=o("model(inputs_ids)"),de=l(),q=s("li"),me=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s("code"),Y=o("model([input_ids, attention_mask])"),oe=o(" or "),Q=s("code"),ae=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),A=s("li"),_e=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=s("code"),be=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var R=i(h);F=n(R,"TF 2.0 models accepts two formats as inputs:"),R.forEach(t),m=d(p),_=r(p,"UL",{});var J=i(_);k=r(J,"LI",{});var we=i(k);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),g=d(J),E=r(J,"LI",{});var ce=i(E);pe=n(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),J.forEach(t),V=d(p),z=r(p,"P",{});var j=i(z);Z=n(j,"This second option is useful when using "),D=r(j,"CODE",{});var ve=i(D);ee=n(ve,"tf.keras.Model.fit"),ve.forEach(t),he=n(j,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(j,"CODE",{});var Me=i(S);ue=n(Me,"model(inputs)"),Me.forEach(t),ie=n(j,"."),j.forEach(t),U=d(p),P=r(p,"P",{});var $e=i(P);te=n($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),K=d(p),L=r(p,"UL",{});var x=i(L);C=r(x,"LI",{});var H=i(C);se=n(H,"a single Tensor with "),B=r(H,"CODE",{});var Fe=i(B);le=n(Fe,"input_ids"),Fe.forEach(t),re=n(H," only and nothing else: "),O=r(H,"CODE",{});var ye=i(O);fe=n(ye,"model(inputs_ids)"),ye.forEach(t),H.forEach(t),de=d(x),q=r(x,"LI",{});var X=i(q);me=n(X,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r(X,"CODE",{});var ke=i(W);Y=n(ke,"model([input_ids, attention_mask])"),ke.forEach(t),oe=n(X," or "),Q=r(X,"CODE",{});var Te=i(Q);ae=n(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(t),X.forEach(t),ge=d(x),A=r(x,"LI",{});var ne=i(A);_e=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=r(ne,"CODE",{});var Re=i(I);be=n(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),ne.forEach(t),x.forEach(t)},m(p,R){u(p,h,R),e(h,F),u(p,m,R),u(p,_,R),e(_,k),e(k,b),e(_,g),e(_,E),e(E,pe),u(p,V,R),u(p,z,R),e(z,Z),e(z,D),e(D,ee),e(z,he),e(z,S),e(S,ue),e(z,ie),u(p,U,R),u(p,P,R),e(P,te),u(p,K,R),u(p,L,R),e(L,C),e(C,se),e(C,B),e(B,le),e(C,re),e(C,O),e(O,fe),e(L,de),e(L,q),e(q,me),e(q,W),e(W,Y),e(q,oe),e(q,Q),e(Q,ae),e(L,ge),e(L,A),e(A,_e),e(A,I),e(I,be)},d(p){p&&t(h),p&&t(m),p&&t(_),p&&t(V),p&&t(z),p&&t(U),p&&t(P),p&&t(K),p&&t(L)}}}function n1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function s1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be;return{c(){h=s("p"),F=o("TF 2.0 models accepts two formats as inputs:"),m=l(),_=s("ul"),k=s("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),E=s("li"),pe=o("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=s("p"),Z=o("This second option is useful when using "),D=s("code"),ee=o("tf.keras.Model.fit"),he=o(` method which currently requires having all the tensors in the first argument of the model call function: `),S=s("code"),ue=o("model(inputs)"),ie=o("."),U=l(),P=s("p"),te=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),L=s("ul"),C=s("li"),se=o("a single Tensor with "),B=s("code"),le=o("input_ids"),re=o(" only and nothing else: "),O=s("code"),fe=o("model(inputs_ids)"),de=l(),q=s("li"),me=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s("code"),Y=o("model([input_ids, attention_mask])"),oe=o(" or "),Q=s("code"),ae=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),A=s("li"),_e=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=s("code"),be=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var R=i(h);F=n(R,"TF 2.0 models accepts two formats as inputs:"),R.forEach(t),m=d(p),_=r(p,"UL",{});var J=i(_);k=r(J,"LI",{});var we=i(k);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),g=d(J),E=r(J,"LI",{});var ce=i(E);pe=n(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),J.forEach(t),V=d(p),z=r(p,"P",{});var j=i(z);Z=n(j,"This second option is useful when using "),D=r(j,"CODE",{});var ve=i(D);ee=n(ve,"tf.keras.Model.fit"),ve.forEach(t),he=n(j,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(j,"CODE",{});var Me=i(S);ue=n(Me,"model(inputs)"),Me.forEach(t),ie=n(j,"."),j.forEach(t),U=d(p),P=r(p,"P",{});var $e=i(P);te=n($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),K=d(p),L=r(p,"UL",{});var x=i(L);C=r(x,"LI",{});var H=i(C);se=n(H,"a single Tensor with "),B=r(H,"CODE",{});var Fe=i(B);le=n(Fe,"input_ids"),Fe.forEach(t),re=n(H," only and nothing else: "),O=r(H,"CODE",{});var ye=i(O);fe=n(ye,"model(inputs_ids)"),ye.forEach(t),H.forEach(t),de=d(x),q=r(x,"LI",{});var X=i(q);me=n(X,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r(X,"CODE",{});var ke=i(W);Y=n(ke,"model([input_ids, attention_mask])"),ke.forEach(t),oe=n(X," or "),Q=r(X,"CODE",{});var Te=i(Q);ae=n(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(t),X.forEach(t),ge=d(x),A=r(x,"LI",{});var ne=i(A);_e=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=r(ne,"CODE",{});var Re=i(I);be=n(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),ne.forEach(t),x.forEach(t)},m(p,R){u(p,h,R),e(h,F),u(p,m,R),u(p,_,R),e(_,k),e(k,b),e(_,g),e(_,E),e(E,pe),u(p,V,R),u(p,z,R),e(z,Z),e(z,D),e(D,ee),e(z,he),e(z,S),e(S,ue),e(z,ie),u(p,U,R),u(p,P,R),e(P,te),u(p,K,R),u(p,L,R),e(L,C),e(C,se),e(C,B),e(B,le),e(C,re),e(C,O),e(O,fe),e(L,de),e(L,q),e(q,me),e(q,W),e(W,Y),e(q,oe),e(q,Q),e(Q,ae),e(L,ge),e(L,A),e(A,_e),e(A,I),e(I,be)},d(p){p&&t(h),p&&t(m),p&&t(_),p&&t(V),p&&t(z),p&&t(U),p&&t(P),p&&t(K),p&&t(L)}}}function r1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function a1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be;return{c(){h=s("p"),F=o("TF 2.0 models accepts two formats as inputs:"),m=l(),_=s("ul"),k=s("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),E=s("li"),pe=o("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=s("p"),Z=o("This second option is useful when using "),D=s("code"),ee=o("tf.keras.Model.fit"),he=o(` method which currently requires having all the tensors in the first argument of the model call function: `),S=s("code"),ue=o("model(inputs)"),ie=o("."),U=l(),P=s("p"),te=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),L=s("ul"),C=s("li"),se=o("a single Tensor with "),B=s("code"),le=o("input_ids"),re=o(" only and nothing else: "),O=s("code"),fe=o("model(inputs_ids)"),de=l(),q=s("li"),me=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s("code"),Y=o("model([input_ids, attention_mask])"),oe=o(" or "),Q=s("code"),ae=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),A=s("li"),_e=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=s("code"),be=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var R=i(h);F=n(R,"TF 2.0 models accepts two formats as inputs:"),R.forEach(t),m=d(p),_=r(p,"UL",{});var J=i(_);k=r(J,"LI",{});var we=i(k);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),g=d(J),E=r(J,"LI",{});var ce=i(E);pe=n(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),J.forEach(t),V=d(p),z=r(p,"P",{});var j=i(z);Z=n(j,"This second option is useful when using "),D=r(j,"CODE",{});var ve=i(D);ee=n(ve,"tf.keras.Model.fit"),ve.forEach(t),he=n(j,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(j,"CODE",{});var Me=i(S);ue=n(Me,"model(inputs)"),Me.forEach(t),ie=n(j,"."),j.forEach(t),U=d(p),P=r(p,"P",{});var $e=i(P);te=n($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),K=d(p),L=r(p,"UL",{});var x=i(L);C=r(x,"LI",{});var H=i(C);se=n(H,"a single Tensor with "),B=r(H,"CODE",{});var Fe=i(B);le=n(Fe,"input_ids"),Fe.forEach(t),re=n(H," only and nothing else: "),O=r(H,"CODE",{});var ye=i(O);fe=n(ye,"model(inputs_ids)"),ye.forEach(t),H.forEach(t),de=d(x),q=r(x,"LI",{});var X=i(q);me=n(X,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r(X,"CODE",{});var ke=i(W);Y=n(ke,"model([input_ids, attention_mask])"),ke.forEach(t),oe=n(X," or "),Q=r(X,"CODE",{});var Te=i(Q);ae=n(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(t),X.forEach(t),ge=d(x),A=r(x,"LI",{});var ne=i(A);_e=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=r(ne,"CODE",{});var Re=i(I);be=n(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),ne.forEach(t),x.forEach(t)},m(p,R){u(p,h,R),e(h,F),u(p,m,R),u(p,_,R),e(_,k),e(k,b),e(_,g),e(_,E),e(E,pe),u(p,V,R),u(p,z,R),e(z,Z),e(z,D),e(D,ee),e(z,he),e(z,S),e(S,ue),e(z,ie),u(p,U,R),u(p,P,R),e(P,te),u(p,K,R),u(p,L,R),e(L,C),e(C,se),e(C,B),e(B,le),e(C,re),e(C,O),e(O,fe),e(L,de),e(L,q),e(q,me),e(q,W),e(W,Y),e(q,oe),e(q,Q),e(Q,ae),e(L,ge),e(L,A),e(A,_e),e(A,I),e(I,be)},d(p){p&&t(h),p&&t(m),p&&t(_),p&&t(V),p&&t(z),p&&t(U),p&&t(P),p&&t(K),p&&t(L)}}}function i1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function l1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be;return{c(){h=s("p"),F=o("TF 2.0 models accepts two formats as inputs:"),m=l(),_=s("ul"),k=s("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),E=s("li"),pe=o("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=s("p"),Z=o("This second option is useful when using "),D=s("code"),ee=o("tf.keras.Model.fit"),he=o(` method which currently requires having all the tensors in the first argument of the model call function: `),S=s("code"),ue=o("model(inputs)"),ie=o("."),U=l(),P=s("p"),te=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),L=s("ul"),C=s("li"),se=o("a single Tensor with "),B=s("code"),le=o("input_ids"),re=o(" only and nothing else: "),O=s("code"),fe=o("model(inputs_ids)"),de=l(),q=s("li"),me=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s("code"),Y=o("model([input_ids, attention_mask])"),oe=o(" or "),Q=s("code"),ae=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),A=s("li"),_e=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=s("code"),be=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var R=i(h);F=n(R,"TF 2.0 models accepts two formats as inputs:"),R.forEach(t),m=d(p),_=r(p,"UL",{});var J=i(_);k=r(J,"LI",{});var we=i(k);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),g=d(J),E=r(J,"LI",{});var ce=i(E);pe=n(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),J.forEach(t),V=d(p),z=r(p,"P",{});var j=i(z);Z=n(j,"This second option is useful when using "),D=r(j,"CODE",{});var ve=i(D);ee=n(ve,"tf.keras.Model.fit"),ve.forEach(t),he=n(j,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(j,"CODE",{});var Me=i(S);ue=n(Me,"model(inputs)"),Me.forEach(t),ie=n(j,"."),j.forEach(t),U=d(p),P=r(p,"P",{});var $e=i(P);te=n($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),K=d(p),L=r(p,"UL",{});var x=i(L);C=r(x,"LI",{});var H=i(C);se=n(H,"a single Tensor with "),B=r(H,"CODE",{});var Fe=i(B);le=n(Fe,"input_ids"),Fe.forEach(t),re=n(H," only and nothing else: "),O=r(H,"CODE",{});var ye=i(O);fe=n(ye,"model(inputs_ids)"),ye.forEach(t),H.forEach(t),de=d(x),q=r(x,"LI",{});var X=i(q);me=n(X,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r(X,"CODE",{});var ke=i(W);Y=n(ke,"model([input_ids, attention_mask])"),ke.forEach(t),oe=n(X," or "),Q=r(X,"CODE",{});var Te=i(Q);ae=n(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(t),X.forEach(t),ge=d(x),A=r(x,"LI",{});var ne=i(A);_e=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=r(ne,"CODE",{});var Re=i(I);be=n(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),ne.forEach(t),x.forEach(t)},m(p,R){u(p,h,R),e(h,F),u(p,m,R),u(p,_,R),e(_,k),e(k,b),e(_,g),e(_,E),e(E,pe),u(p,V,R),u(p,z,R),e(z,Z),e(z,D),e(D,ee),e(z,he),e(z,S),e(S,ue),e(z,ie),u(p,U,R),u(p,P,R),e(P,te),u(p,K,R),u(p,L,R),e(L,C),e(C,se),e(C,B),e(B,le),e(C,re),e(C,O),e(O,fe),e(L,de),e(L,q),e(q,me),e(q,W),e(W,Y),e(q,oe),e(q,Q),e(Q,ae),e(L,ge),e(L,A),e(A,_e),e(A,I),e(I,be)},d(p){p&&t(h),p&&t(m),p&&t(_),p&&t(V),p&&t(z),p&&t(U),p&&t(P),p&&t(K),p&&t(L)}}}function d1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function c1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be;return{c(){h=s("p"),F=o("TF 2.0 models accepts two formats as inputs:"),m=l(),_=s("ul"),k=s("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),E=s("li"),pe=o("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=s("p"),Z=o("This second option is useful when using "),D=s("code"),ee=o("tf.keras.Model.fit"),he=o(` method which currently requires having all the tensors in the first argument of the model call function: `),S=s("code"),ue=o("model(inputs)"),ie=o("."),U=l(),P=s("p"),te=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),L=s("ul"),C=s("li"),se=o("a single Tensor with "),B=s("code"),le=o("input_ids"),re=o(" only and nothing else: "),O=s("code"),fe=o("model(inputs_ids)"),de=l(),q=s("li"),me=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s("code"),Y=o("model([input_ids, attention_mask])"),oe=o(" or "),Q=s("code"),ae=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),A=s("li"),_e=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=s("code"),be=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var R=i(h);F=n(R,"TF 2.0 models accepts two formats as inputs:"),R.forEach(t),m=d(p),_=r(p,"UL",{});var J=i(_);k=r(J,"LI",{});var we=i(k);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),g=d(J),E=r(J,"LI",{});var ce=i(E);pe=n(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),J.forEach(t),V=d(p),z=r(p,"P",{});var j=i(z);Z=n(j,"This second option is useful when using "),D=r(j,"CODE",{});var ve=i(D);ee=n(ve,"tf.keras.Model.fit"),ve.forEach(t),he=n(j,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(j,"CODE",{});var Me=i(S);ue=n(Me,"model(inputs)"),Me.forEach(t),ie=n(j,"."),j.forEach(t),U=d(p),P=r(p,"P",{});var $e=i(P);te=n($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),K=d(p),L=r(p,"UL",{});var x=i(L);C=r(x,"LI",{});var H=i(C);se=n(H,"a single Tensor with "),B=r(H,"CODE",{});var Fe=i(B);le=n(Fe,"input_ids"),Fe.forEach(t),re=n(H," only and nothing else: "),O=r(H,"CODE",{});var ye=i(O);fe=n(ye,"model(inputs_ids)"),ye.forEach(t),H.forEach(t),de=d(x),q=r(x,"LI",{});var X=i(q);me=n(X,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r(X,"CODE",{});var ke=i(W);Y=n(ke,"model([input_ids, attention_mask])"),ke.forEach(t),oe=n(X," or "),Q=r(X,"CODE",{});var Te=i(Q);ae=n(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(t),X.forEach(t),ge=d(x),A=r(x,"LI",{});var ne=i(A);_e=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=r(ne,"CODE",{});var Re=i(I);be=n(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),ne.forEach(t),x.forEach(t)},m(p,R){u(p,h,R),e(h,F),u(p,m,R),u(p,_,R),e(_,k),e(k,b),e(_,g),e(_,E),e(E,pe),u(p,V,R),u(p,z,R),e(z,Z),e(z,D),e(D,ee),e(z,he),e(z,S),e(S,ue),e(z,ie),u(p,U,R),u(p,P,R),e(P,te),u(p,K,R),u(p,L,R),e(L,C),e(C,se),e(C,B),e(B,le),e(C,re),e(C,O),e(O,fe),e(L,de),e(L,q),e(q,me),e(q,W),e(W,Y),e(q,oe),e(q,Q),e(Q,ae),e(L,ge),e(L,A),e(A,_e),e(A,I),e(I,be)},d(p){p&&t(h),p&&t(m),p&&t(_),p&&t(V),p&&t(z),p&&t(U),p&&t(P),p&&t(K),p&&t(L)}}}function p1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function h1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be;return{c(){h=s("p"),F=o("TF 2.0 models accepts two formats as inputs:"),m=l(),_=s("ul"),k=s("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),E=s("li"),pe=o("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=s("p"),Z=o("This second option is useful when using "),D=s("code"),ee=o("tf.keras.Model.fit"),he=o(` method which currently requires having all the tensors in the first argument of the model call function: `),S=s("code"),ue=o("model(inputs)"),ie=o("."),U=l(),P=s("p"),te=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),K=l(),L=s("ul"),C=s("li"),se=o("a single Tensor with "),B=s("code"),le=o("input_ids"),re=o(" only and nothing else: "),O=s("code"),fe=o("model(inputs_ids)"),de=l(),q=s("li"),me=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s("code"),Y=o("model([input_ids, attention_mask])"),oe=o(" or "),Q=s("code"),ae=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),A=s("li"),_e=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=s("code"),be=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var R=i(h);F=n(R,"TF 2.0 models accepts two formats as inputs:"),R.forEach(t),m=d(p),_=r(p,"UL",{});var J=i(_);k=r(J,"LI",{});var we=i(k);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),g=d(J),E=r(J,"LI",{});var ce=i(E);pe=n(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),J.forEach(t),V=d(p),z=r(p,"P",{});var j=i(z);Z=n(j,"This second option is useful when using "),D=r(j,"CODE",{});var ve=i(D);ee=n(ve,"tf.keras.Model.fit"),ve.forEach(t),he=n(j,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(j,"CODE",{});var Me=i(S);ue=n(Me,"model(inputs)"),Me.forEach(t),ie=n(j,"."),j.forEach(t),U=d(p),P=r(p,"P",{});var $e=i(P);te=n($e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),$e.forEach(t),K=d(p),L=r(p,"UL",{});var x=i(L);C=r(x,"LI",{});var H=i(C);se=n(H,"a single Tensor with "),B=r(H,"CODE",{});var Fe=i(B);le=n(Fe,"input_ids"),Fe.forEach(t),re=n(H," only and nothing else: "),O=r(H,"CODE",{});var ye=i(O);fe=n(ye,"model(inputs_ids)"),ye.forEach(t),H.forEach(t),de=d(x),q=r(x,"LI",{});var X=i(q);me=n(X,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r(X,"CODE",{});var ke=i(W);Y=n(ke,"model([input_ids, attention_mask])"),ke.forEach(t),oe=n(X," or "),Q=r(X,"CODE",{});var Te=i(Q);ae=n(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(t),X.forEach(t),ge=d(x),A=r(x,"LI",{});var ne=i(A);_e=n(ne,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),I=r(ne,"CODE",{});var Re=i(I);be=n(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),ne.forEach(t),x.forEach(t)},m(p,R){u(p,h,R),e(h,F),u(p,m,R),u(p,_,R),e(_,k),e(k,b),e(_,g),e(_,E),e(E,pe),u(p,V,R),u(p,z,R),e(z,Z),e(z,D),e(D,ee),e(z,he),e(z,S),e(S,ue),e(z,ie),u(p,U,R),u(p,P,R),e(P,te),u(p,K,R),u(p,L,R),e(L,C),e(C,se),e(C,B),e(B,le),e(C,re),e(C,O),e(O,fe),e(L,de),e(L,q),e(q,me),e(q,W),e(W,Y),e(q,oe),e(q,Q),e(Q,ae),e(L,ge),e(L,A),e(A,_e),e(A,I),e(I,be)},d(p){p&&t(h),p&&t(m),p&&t(_),p&&t(V),p&&t(z),p&&t(U),p&&t(P),p&&t(K),p&&t(L)}}}function u1(N){let h,F,m,_,k;return{c(){h=s("p"),F=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),_=o("Module"),k=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=r(b,"P",{});var g=i(h);F=n(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(g,"CODE",{});var E=i(m);_=n(E,"Module"),E.forEach(t),k=n(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(b,g){u(b,h,g),e(h,F),e(h,m),e(m,_),e(h,k)},d(b){b&&t(h)}}}function f1(N){let h,F,m,_,k,b,g,E,pe,V,z,Z,D,ee,he,S,ue,ie,U,P,te,K,L,C,se,B,le,re,O,fe,de,q,me,W,Y,oe,Q,ae,ge,A,_e,I,be,p,R,J,we,ce,j,ve,Me,$e,x,H,Fe,ye,X,ke,Te,ne,Re,Da,Mc,Cl,It,tn,$c,on,Fc,Fr,Rc,Ec,xl,Nt,_o,Sa,nn,zc,Oa,Lc,Pl,Ee,sn,qc,Tt,Cc,Rr,xc,Pc,Er,Ac,jc,rn,Xc,Ic,Nc,an,Dc,zr,Sc,Oc,Wc,Dt,Bc,Wa,Qc,Hc,Ba,Uc,Vc,Gc,wt,ln,Kc,Qa,Jc,Zc,dn,Lr,Yc,Ha,ep,tp,qr,op,Ua,np,sp,bo,cn,rp,pn,ap,Va,ip,lp,dp,ko,hn,cp,Ga,pp,hp,Ka,Al,St,vo,Ja,un,up,Za,fp,jl,He,fn,mp,ut,gp,Ya,_p,bp,Cr,kp,vp,xr,Tp,wp,mn,yp,Mp,$p,gn,Fp,Pr,Rp,Ep,zp,yt,_n,Lp,ei,qp,Cp,bn,Ar,xp,ti,Pp,Ap,jr,jp,oi,Xp,Ip,To,kn,Np,ni,Dp,Xl,Ot,wo,si,vn,Sp,ri,Op,Il,Ie,Tn,Wp,ai,Bp,Qp,wn,Hp,Xr,Up,Vp,Gp,yn,Kp,Mn,Jp,Zp,Yp,$n,eh,Ir,th,oh,nh,Ue,Fn,sh,Wt,rh,Nr,ah,ih,ii,lh,dh,ch,yo,ph,li,hh,uh,Rn,Nl,Bt,Mo,di,En,fh,ci,mh,Dl,Ne,zn,gh,Ln,_h,pi,bh,kh,vh,qn,Th,Dr,wh,yh,Mh,Cn,$h,xn,Fh,Rh,Eh,Pn,zh,Sr,Lh,qh,Ch,Ve,An,xh,Qt,Ph,Or,Ah,jh,hi,Xh,Ih,Nh,$o,Dh,ui,Sh,Oh,jn,Sl,Ht,Fo,fi,Xn,Wh,mi,Bh,Ol,De,In,Qh,Nn,Hh,gi,Uh,Vh,Gh,Dn,Kh,Wr,Jh,Zh,Yh,Sn,eu,On,tu,ou,nu,Wn,su,Br,ru,au,iu,Ge,Bn,lu,Ut,du,Qr,cu,pu,_i,hu,uu,fu,Ro,mu,bi,gu,_u,Qn,Wl,Vt,Eo,ki,Hn,bu,vi,ku,Bl,Se,Un,vu,Ti,Tu,wu,Vn,yu,Hr,Mu,$u,Fu,Gn,Ru,Kn,Eu,zu,Lu,Jn,qu,Ur,Cu,xu,Pu,je,Zn,Au,Gt,ju,Vr,Xu,Iu,wi,Nu,Du,Su,zo,Ou,yi,Wu,Bu,Yn,Qu,Mi,Hu,Uu,es,Ql,Kt,Lo,$i,ts,Vu,Fi,Gu,Hl,Oe,os,Ku,Ri,Ju,Zu,ns,Yu,Gr,ef,tf,of,ss,nf,rs,sf,rf,af,as,lf,Kr,df,cf,pf,Ke,is,hf,Jt,uf,Jr,ff,mf,Ei,gf,_f,bf,qo,kf,zi,vf,Tf,ls,Ul,Zt,Co,Li,ds,wf,qi,yf,Vl,We,cs,Mf,Ci,$f,Ff,ps,Rf,Zr,Ef,zf,Lf,hs,qf,us,Cf,xf,Pf,fs,Af,Yr,jf,Xf,If,Je,ms,Nf,Yt,Df,ea,Sf,Of,xi,Wf,Bf,Qf,xo,Hf,Pi,Uf,Vf,gs,Gl,eo,Po,Ai,_s,Gf,ji,Kf,Kl,Be,bs,Jf,to,Zf,Xi,Yf,em,Ii,tm,om,nm,ks,sm,ta,rm,am,im,vs,lm,Ts,dm,cm,pm,ws,hm,oa,um,fm,mm,Ze,ys,gm,oo,_m,na,bm,km,Ni,vm,Tm,wm,Ao,ym,Di,Mm,$m,Ms,Jl,no,jo,Si,$s,Fm,Oi,Rm,Zl,ze,Fs,Em,Wi,zm,Lm,Rs,qm,sa,Cm,xm,Pm,Es,Am,zs,jm,Xm,Im,Xo,Nm,Ls,Dm,ra,Sm,Om,Wm,Ye,qs,Bm,so,Qm,aa,Hm,Um,Bi,Vm,Gm,Km,Io,Jm,Qi,Zm,Ym,Cs,Yl,ro,No,Hi,xs,eg,Ui,tg,ed,Le,Ps,og,As,ng,Vi,sg,rg,ag,js,ig,ia,lg,dg,cg,Xs,pg,Is,hg,ug,fg,Do,mg,Ns,gg,la,_g,bg,kg,et,Ds,vg,ao,Tg,da,wg,yg,Gi,Mg,$g,Fg,So,Rg,Ki,Eg,zg,Ss,td,io,Oo,Ji,Os,Lg,Zi,qg,od,qe,Ws,Cg,Yi,xg,Pg,Bs,Ag,ca,jg,Xg,Ig,Qs,Ng,Hs,Dg,Sg,Og,Wo,Wg,Us,Bg,pa,Qg,Hg,Ug,tt,Vs,Vg,lo,Gg,ha,Kg,Jg,el,Zg,Yg,e_,Bo,t_,tl,o_,n_,Gs,nd,co,Qo,ol,Ks,s_,nl,r_,sd,Ce,Js,a_,sl,i_,l_,Zs,d_,ua,c_,p_,h_,Ys,u_,er,f_,m_,g_,Ho,__,tr,b_,fa,k_,v_,T_,ot,or,w_,po,y_,ma,M_,$_,rl,F_,R_,E_,Uo,z_,al,L_,q_,nr,rd,ho,Vo,il,sr,C_,ll,x_,ad,xe,rr,P_,dl,A_,j_,ar,X_,ga,I_,N_,D_,ir,S_,lr,O_,W_,B_,Go,Q_,dr,H_,_a,U_,V_,G_,nt,cr,K_,uo,J_,ba,Z_,Y_,cl,eb,tb,ob,Ko,nb,pl,sb,rb,pr,id,fo,Jo,hl,hr,ab,ul,ib,ld,Pe,ur,lb,mo,db,fl,cb,pb,ml,hb,ub,fb,fr,mb,ka,gb,_b,bb,mr,kb,gr,vb,Tb,wb,Zo,yb,_r,Mb,gl,$b,Fb,Rb,st,br,Eb,go,zb,va,Lb,qb,_l,Cb,xb,Pb,Yo,Ab,bl,jb,Xb,kr,dd;return b=new Xe({}),ee=new Xe({}),ne=new Xe({}),tn=new G({props:{name:"class transformers.XLMRobertaConfig",anchor:"transformers.XLMRobertaConfig",parameters:[{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py#L37"}}),nn=new Xe({}),sn=new G({props:{name:"class transformers.XLMRobertaTokenizer",anchor:"transformers.XLMRobertaTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py#L55",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.XLMRobertaTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.XLMRobertaTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.XLMRobertaTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.XLMRobertaTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.XLMRobertaTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.XLMRobertaTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.XLMRobertaTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.XLMRobertaTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.XLMRobertaTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),ln=new G({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.XLMRobertaTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py#L192",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.XLMRobertaTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),cn=new G({props:{name:"get_special_tokens_mask",anchor:"transformers.XLMRobertaTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py#L218",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMRobertaTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.XLMRobertaTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),hn=new G({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.XLMRobertaTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py#L246",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMRobertaTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),un=new Xe({}),fn=new G({props:{name:"class transformers.XLMRobertaTokenizerFast",anchor:"transformers.XLMRobertaTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py#L67",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.XLMRobertaTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.XLMRobertaTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.XLMRobertaTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.XLMRobertaTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.XLMRobertaTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.XLMRobertaTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.XLMRobertaTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.XLMRobertaTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),_n=new G({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.XLMRobertaTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py#L155",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.XLMRobertaTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),kn=new G({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.XLMRobertaTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py#L181",parametersDescription:[{anchor:"transformers.XLMRobertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMRobertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),vn=new Xe({}),Tn=new G({props:{name:"class transformers.XLMRobertaModel",anchor:"transformers.XLMRobertaModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L67",parametersDescription:[{anchor:"transformers.XLMRobertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fn=new G({props:{name:"forward",anchor:"transformers.RobertaModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L741",parametersDescription:[{anchor:"transformers.RobertaModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RobertaModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RobertaModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RobertaModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yo=new Ae({props:{$$slots:{default:[GT]},$$scope:{ctx:N}}}),Rn=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaModel import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaModel.from_pretrained('roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaModel.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),En=new Xe({}),zn=new G({props:{name:"class transformers.XLMRobertaForCausalLM",anchor:"transformers.XLMRobertaForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L80",parametersDescription:[{anchor:"transformers.XLMRobertaForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),An=new G({props:{name:"forward",anchor:"transformers.RobertaForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L908",parametersDescription:[{anchor:"transformers.RobertaForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RobertaForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RobertaForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.RobertaForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RobertaForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$o=new Ae({props:{$$slots:{default:[KT]},$$scope:{ctx:N}}}),jn=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') config = RobertaConfig.from_pretrained("roberta-base") config.is_decoder = True model = RobertaForCausalLM.from_pretrained('roberta-base', config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForCausalLM, RobertaConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RobertaConfig.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForCausalLM.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),Xn=new Xe({}),In=new G({props:{name:"class transformers.XLMRobertaForMaskedLM",anchor:"transformers.XLMRobertaForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L93",parametersDescription:[{anchor:"transformers.XLMRobertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bn=new G({props:{name:"forward",anchor:"transformers.RobertaForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L1063",parametersDescription:[{anchor:"transformers.RobertaForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.RobertaForMaskedLM.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ro=new Ae({props:{$$slots:{default:[JT]},$$scope:{ctx:N}}}),Qn=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForMaskedLM.from_pretrained('roberta-base') inputs = tokenizer("The capital of France is <mask>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Hn=new Xe({}),Un=new G({props:{name:"class transformers.XLMRobertaForSequenceClassification",anchor:"transformers.XLMRobertaForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L109",parametersDescription:[{anchor:"transformers.XLMRobertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Zn=new G({props:{name:"forward",anchor:"transformers.RobertaForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L1176",parametersDescription:[{anchor:"transformers.RobertaForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),zo=new Ae({props:{$$slots:{default:[ZT]},$$scope:{ctx:N}}}),Yn=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForSequenceClassification import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForSequenceClassification.from_pretrained('roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),es=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForSequenceClassification import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForSequenceClassification.from_pretrained('roberta-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ts=new Xe({}),os=new G({props:{name:"class transformers.XLMRobertaForMultipleChoice",anchor:"transformers.XLMRobertaForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L125",parametersDescription:[{anchor:"transformers.XLMRobertaForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),is=new G({props:{name:"forward",anchor:"transformers.RobertaForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L1272",parametersDescription:[{anchor:"transformers.RobertaForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qo=new Ae({props:{$$slots:{default:[YT]},$$scope:{ctx:N}}}),ls=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForMultipleChoice import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForMultipleChoice.from_pretrained('roberta-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ds=new Xe({}),cs=new G({props:{name:"class transformers.XLMRobertaForTokenClassification",anchor:"transformers.XLMRobertaForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L141",parametersDescription:[{anchor:"transformers.XLMRobertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ms=new G({props:{name:"forward",anchor:"transformers.RobertaForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L1369",parametersDescription:[{anchor:"transformers.RobertaForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xo=new Ae({props:{$$slots:{default:[e1]},$$scope:{ctx:N}}}),gs=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForTokenClassification import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForTokenClassification.from_pretrained('roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),_s=new Xe({}),bs=new G({props:{name:"class transformers.XLMRobertaForQuestionAnswering",anchor:"transformers.XLMRobertaForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L157",parametersDescription:[{anchor:"transformers.XLMRobertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ys=new G({props:{name:"forward",anchor:"transformers.RobertaForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_roberta.py#L1481",parametersDescription:[{anchor:"transformers.RobertaForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.RobertaForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ao=new Ae({props:{$$slots:{default:[t1]},$$scope:{ctx:N}}}),Ms=new ht({props:{code:`from transformers import RobertaTokenizer, RobertaForQuestionAnswering import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForQuestionAnswering.from_pretrained('roberta-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),$s=new Xe({}),Fs=new G({props:{name:"class transformers.TFXLMRobertaModel",anchor:"transformers.TFXLMRobertaModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py#L82",parametersDescription:[{anchor:"transformers.TFXLMRobertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xo=new Ae({props:{$$slots:{default:[o1]},$$scope:{ctx:N}}}),qs=new G({props:{name:"call",anchor:"transformers.TFRobertaModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_tf_roberta.py#L932",parametersDescription:[{anchor:"transformers.TFRobertaModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaModel.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFRobertaModel.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFRobertaModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFRobertaModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Io=new Ae({props:{$$slots:{default:[n1]},$$scope:{ctx:N}}}),Cs=new ht({props:{code:`from transformers import RobertaTokenizer, TFRobertaModel import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = TFRobertaModel.from_pretrained('roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaModel.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),xs=new Xe({}),Ps=new G({props:{name:"class transformers.TFXLMRobertaForMaskedLM",anchor:"transformers.TFXLMRobertaForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py#L108",parametersDescription:[{anchor:"transformers.TFXLMRobertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Do=new Ae({props:{$$slots:{default:[s1]},$$scope:{ctx:N}}}),Ds=new G({props:{name:"call",anchor:"transformers.TFRobertaForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_tf_roberta.py#L1107",parametersDescription:[{anchor:"transformers.TFRobertaForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),So=new Ae({props:{$$slots:{default:[r1]},$$scope:{ctx:N}}}),Ss=new ht({props:{code:`from transformers import RobertaTokenizer, TFRobertaForMaskedLM import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = TFRobertaForMaskedLM.from_pretrained('roberta-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Os=new Xe({}),Ws=new G({props:{name:"class transformers.TFXLMRobertaForSequenceClassification",anchor:"transformers.TFXLMRobertaForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py#L124",parametersDescription:[{anchor:"transformers.TFXLMRobertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wo=new Ae({props:{$$slots:{default:[a1]},$$scope:{ctx:N}}}),Vs=new G({props:{name:"call",anchor:"transformers.TFRobertaForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_tf_roberta.py#L1389",parametersDescription:[{anchor:"transformers.TFRobertaForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Bo=new Ae({props:{$$slots:{default:[i1]},$$scope:{ctx:N}}}),Gs=new ht({props:{code:`from transformers import RobertaTokenizer, TFRobertaForSequenceClassification import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = TFRobertaForSequenceClassification.from_pretrained('roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ks=new Xe({}),Js=new G({props:{name:"class transformers.TFXLMRobertaForMultipleChoice",anchor:"transformers.TFXLMRobertaForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py#L172",parametersDescription:[{anchor:"transformers.TFXLMRobertaForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ho=new Ae({props:{$$slots:{default:[l1]},$$scope:{ctx:N}}}),or=new G({props:{name:"call",anchor:"transformers.TFRobertaForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_tf_roberta.py#L1499",parametersDescription:[{anchor:"transformers.TFRobertaForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Uo=new Ae({props:{$$slots:{default:[d1]},$$scope:{ctx:N}}}),nr=new ht({props:{code:`from transformers import RobertaTokenizer, TFRobertaForMultipleChoice import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = TFRobertaForMultipleChoice.from_pretrained('roberta-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sr=new Xe({}),rr=new G({props:{name:"class transformers.TFXLMRobertaForTokenClassification",anchor:"transformers.TFXLMRobertaForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py#L140",parametersDescription:[{anchor:"transformers.TFXLMRobertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Go=new Ae({props:{$$slots:{default:[c1]},$$scope:{ctx:N}}}),cr=new G({props:{name:"call",anchor:"transformers.TFRobertaForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_tf_roberta.py#L1636",parametersDescription:[{anchor:"transformers.TFRobertaForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ko=new Ae({props:{$$slots:{default:[p1]},$$scope:{ctx:N}}}),pr=new ht({props:{code:`from transformers import RobertaTokenizer, TFRobertaForTokenClassification import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = TFRobertaForTokenClassification.from_pretrained('roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),hr=new Xe({}),ur=new G({props:{name:"class transformers.TFXLMRobertaForQuestionAnswering",anchor:"transformers.TFXLMRobertaForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py#L156",parametersDescription:[{anchor:"transformers.TFXLMRobertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaConfig">XLMRobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Zo=new Ae({props:{$$slots:{default:[h1]},$$scope:{ctx:N}}}),br=new G({props:{name:"call",anchor:"transformers.TFRobertaForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roberta/modeling_tf_roberta.py#L1736",parametersDescription:[{anchor:"transformers.TFRobertaForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Yo=new Ae({props:{$$slots:{default:[u1]},$$scope:{ctx:N}}}),kr=new ht({props:{code:`from transformers import RobertaTokenizer, TFRobertaForQuestionAnswering import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = TFRobertaForQuestionAnswering.from_pretrained('roberta-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){h=s("meta"),F=l(),m=s("h1"),_=s("a"),k=s("span"),v(b.$$.fragment),g=l(),E=s("span"),pe=o("XLM-RoBERTa"),V=l(),z=s("h2"),Z=s("a"),D=s("span"),v(ee.$$.fragment),he=l(),S=s("span"),ue=o("Overview"),ie=l(),U=s("p"),P=o("The XLM-RoBERTa model was proposed in "),te=s("a"),K=o("Unsupervised Cross-lingual Representation Learning at Scale"),L=o(` by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\xE1n, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook\u2019s RoBERTa model released in 2019. It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data.`),C=l(),se=s("p"),B=o("The abstract from the paper is the following:"),le=l(),re=s("p"),O=s("em"),fe=o(`This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +13.8% average accuracy on XNLI, +12.3% average F1 score on MLQA, and +2.1% average F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 11.8% in XNLI accuracy for Swahili and 9.2% for Urdu over the previous XLM model. We also present a detailed empirical evaluation of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-Ris very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make XLM-R code, data, and models publicly available.`),de=l(),q=s("p"),me=o("Tips:"),W=l(),Y=s("ul"),oe=s("li"),Q=o(`XLM-RoBERTa is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require `),ae=s("code"),ge=o("lang"),A=o(` tensors to understand which language is used, and should be able to determine the correct language from the input ids.`),_e=l(),I=s("li"),be=o("This implementation is the same as RoBERTa. Refer to the "),p=s("a"),R=o("documentation of RoBERTa"),J=o(` for usage examples as well as the information relative to the inputs and outputs.`),we=l(),ce=s("p"),j=o("This model was contributed by "),ve=s("a"),Me=o("stefan-it"),$e=o(". The original code can be found "),x=s("a"),H=o("here"),Fe=o("."),ye=l(),X=s("h2"),ke=s("a"),Te=s("span"),v(ne.$$.fragment),Re=l(),Da=s("span"),Mc=o("XLMRobertaConfig"),Cl=l(),It=s("div"),v(tn.$$.fragment),$c=l(),on=s("p"),Fc=o("This class overrides "),Fr=s("a"),Rc=o("RobertaConfig"),Ec=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),xl=l(),Nt=s("h2"),_o=s("a"),Sa=s("span"),v(nn.$$.fragment),zc=l(),Oa=s("span"),Lc=o("XLMRobertaTokenizer"),Pl=l(),Ee=s("div"),v(sn.$$.fragment),qc=l(),Tt=s("p"),Cc=o("Adapted from "),Rr=s("a"),xc=o("RobertaTokenizer"),Pc=o(" and "),Er=s("a"),Ac=o("XLNetTokenizer"),jc=o(`. Based on `),rn=s("a"),Xc=o("SentencePiece"),Ic=o("."),Nc=l(),an=s("p"),Dc=o("This tokenizer inherits from "),zr=s("a"),Sc=o("PreTrainedTokenizer"),Oc=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Wc=l(),Dt=s("p"),Bc=o(`Attributes: sp_model (`),Wa=s("code"),Qc=o("SentencePieceProcessor"),Hc=o(`): The `),Ba=s("em"),Uc=o("SentencePiece"),Vc=o(" processor that is used for every conversion (string, tokens and IDs)."),Gc=l(),wt=s("div"),v(ln.$$.fragment),Kc=l(),Qa=s("p"),Jc=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:`),Zc=l(),dn=s("ul"),Lr=s("li"),Yc=o("single sequence: "),Ha=s("code"),ep=o("<s> X </s>"),tp=l(),qr=s("li"),op=o("pair of sequences: "),Ua=s("code"),np=o("<s> A </s></s> B </s>"),sp=l(),bo=s("div"),v(cn.$$.fragment),rp=l(),pn=s("p"),ap=o(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Va=s("code"),ip=o("prepare_for_model"),lp=o(" method."),dp=l(),ko=s("div"),v(hn.$$.fragment),cp=l(),Ga=s("p"),pp=o(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),hp=l(),Ka=s("div"),Al=l(),St=s("h2"),vo=s("a"),Ja=s("span"),v(un.$$.fragment),up=l(),Za=s("span"),fp=o("XLMRobertaTokenizerFast"),jl=l(),He=s("div"),v(fn.$$.fragment),mp=l(),ut=s("p"),gp=o("Construct a \u201Cfast\u201D XLM-RoBERTa tokenizer (backed by HuggingFace\u2019s "),Ya=s("em"),_p=o("tokenizers"),bp=o(` library). Adapted from `),Cr=s("a"),kp=o("RobertaTokenizer"),vp=o(" and "),xr=s("a"),Tp=o("XLNetTokenizer"),wp=o(". Based on "),mn=s("a"),yp=o("BPE"),Mp=o("."),$p=l(),gn=s("p"),Fp=o("This tokenizer inherits from "),Pr=s("a"),Rp=o("PreTrainedTokenizerFast"),Ep=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),zp=l(),yt=s("div"),v(_n.$$.fragment),Lp=l(),ei=s("p"),qp=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:`),Cp=l(),bn=s("ul"),Ar=s("li"),xp=o("single sequence: "),ti=s("code"),Pp=o("<s> X </s>"),Ap=l(),jr=s("li"),jp=o("pair of sequences: "),oi=s("code"),Xp=o("<s> A </s></s> B </s>"),Ip=l(),To=s("div"),v(kn.$$.fragment),Np=l(),ni=s("p"),Dp=o(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),Xl=l(),Ot=s("h2"),wo=s("a"),si=s("span"),v(vn.$$.fragment),Sp=l(),ri=s("span"),Op=o("XLMRobertaModel"),Il=l(),Ie=s("div"),v(Tn.$$.fragment),Wp=l(),ai=s("p"),Bp=o("The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),Qp=l(),wn=s("p"),Hp=o("This model inherits from "),Xr=s("a"),Up=o("PreTrainedModel"),Vp=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gp=l(),yn=s("p"),Kp=o("This model is also a PyTorch "),Mn=s("a"),Jp=o("torch.nn.Module"),Zp=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yp=l(),$n=s("p"),eh=o("This class overrides "),Ir=s("a"),th=o("RobertaModel"),oh=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),nh=l(),Ue=s("div"),v(Fn.$$.fragment),sh=l(),Wt=s("p"),rh=o("The "),Nr=s("a"),ah=o("RobertaModel"),ih=o(" forward method, overrides the "),ii=s("code"),lh=o("__call__"),dh=o(" special method."),ch=l(),v(yo.$$.fragment),ph=l(),li=s("p"),hh=o("Example:"),uh=l(),v(Rn.$$.fragment),Nl=l(),Bt=s("h2"),Mo=s("a"),di=s("span"),v(En.$$.fragment),fh=l(),ci=s("span"),mh=o("XLMRobertaForCausalLM"),Dl=l(),Ne=s("div"),v(zn.$$.fragment),gh=l(),Ln=s("p"),_h=o("XLM-RoBERTa Model with a "),pi=s("code"),bh=o("language modeling"),kh=o(" head on top for CLM fine-tuning."),vh=l(),qn=s("p"),Th=o("This model inherits from "),Dr=s("a"),wh=o("PreTrainedModel"),yh=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mh=l(),Cn=s("p"),$h=o("This model is also a PyTorch "),xn=s("a"),Fh=o("torch.nn.Module"),Rh=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Eh=l(),Pn=s("p"),zh=o("This class overrides "),Sr=s("a"),Lh=o("RobertaForCausalLM"),qh=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ch=l(),Ve=s("div"),v(An.$$.fragment),xh=l(),Qt=s("p"),Ph=o("The "),Or=s("a"),Ah=o("RobertaForCausalLM"),jh=o(" forward method, overrides the "),hi=s("code"),Xh=o("__call__"),Ih=o(" special method."),Nh=l(),v($o.$$.fragment),Dh=l(),ui=s("p"),Sh=o("Example:"),Oh=l(),v(jn.$$.fragment),Sl=l(),Ht=s("h2"),Fo=s("a"),fi=s("span"),v(Xn.$$.fragment),Wh=l(),mi=s("span"),Bh=o("XLMRobertaForMaskedLM"),Ol=l(),De=s("div"),v(In.$$.fragment),Qh=l(),Nn=s("p"),Hh=o("XLM-RoBERTa Model with a "),gi=s("code"),Uh=o("language modeling"),Vh=o(" head on top."),Gh=l(),Dn=s("p"),Kh=o("This model inherits from "),Wr=s("a"),Jh=o("PreTrainedModel"),Zh=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yh=l(),Sn=s("p"),eu=o("This model is also a PyTorch "),On=s("a"),tu=o("torch.nn.Module"),ou=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nu=l(),Wn=s("p"),su=o("This class overrides "),Br=s("a"),ru=o("RobertaForMaskedLM"),au=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),iu=l(),Ge=s("div"),v(Bn.$$.fragment),lu=l(),Ut=s("p"),du=o("The "),Qr=s("a"),cu=o("RobertaForMaskedLM"),pu=o(" forward method, overrides the "),_i=s("code"),hu=o("__call__"),uu=o(" special method."),fu=l(),v(Ro.$$.fragment),mu=l(),bi=s("p"),gu=o("Example:"),_u=l(),v(Qn.$$.fragment),Wl=l(),Vt=s("h2"),Eo=s("a"),ki=s("span"),v(Hn.$$.fragment),bu=l(),vi=s("span"),ku=o("XLMRobertaForSequenceClassification"),Bl=l(),Se=s("div"),v(Un.$$.fragment),vu=l(),Ti=s("p"),Tu=o(`XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),wu=l(),Vn=s("p"),yu=o("This model inherits from "),Hr=s("a"),Mu=o("PreTrainedModel"),$u=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fu=l(),Gn=s("p"),Ru=o("This model is also a PyTorch "),Kn=s("a"),Eu=o("torch.nn.Module"),zu=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lu=l(),Jn=s("p"),qu=o("This class overrides "),Ur=s("a"),Cu=o("RobertaForSequenceClassification"),xu=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Pu=l(),je=s("div"),v(Zn.$$.fragment),Au=l(),Gt=s("p"),ju=o("The "),Vr=s("a"),Xu=o("RobertaForSequenceClassification"),Iu=o(" forward method, overrides the "),wi=s("code"),Nu=o("__call__"),Du=o(" special method."),Su=l(),v(zo.$$.fragment),Ou=l(),yi=s("p"),Wu=o("Example of single-label classification:"),Bu=l(),v(Yn.$$.fragment),Qu=l(),Mi=s("p"),Hu=o("Example of multi-label classification:"),Uu=l(),v(es.$$.fragment),Ql=l(),Kt=s("h2"),Lo=s("a"),$i=s("span"),v(ts.$$.fragment),Vu=l(),Fi=s("span"),Gu=o("XLMRobertaForMultipleChoice"),Hl=l(),Oe=s("div"),v(os.$$.fragment),Ku=l(),Ri=s("p"),Ju=o(`XLM-RoBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Zu=l(),ns=s("p"),Yu=o("This model inherits from "),Gr=s("a"),ef=o("PreTrainedModel"),tf=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),of=l(),ss=s("p"),nf=o("This model is also a PyTorch "),rs=s("a"),sf=o("torch.nn.Module"),rf=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),af=l(),as=s("p"),lf=o("This class overrides "),Kr=s("a"),df=o("RobertaForMultipleChoice"),cf=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),pf=l(),Ke=s("div"),v(is.$$.fragment),hf=l(),Jt=s("p"),uf=o("The "),Jr=s("a"),ff=o("RobertaForMultipleChoice"),mf=o(" forward method, overrides the "),Ei=s("code"),gf=o("__call__"),_f=o(" special method."),bf=l(),v(qo.$$.fragment),kf=l(),zi=s("p"),vf=o("Example:"),Tf=l(),v(ls.$$.fragment),Ul=l(),Zt=s("h2"),Co=s("a"),Li=s("span"),v(ds.$$.fragment),wf=l(),qi=s("span"),yf=o("XLMRobertaForTokenClassification"),Vl=l(),We=s("div"),v(cs.$$.fragment),Mf=l(),Ci=s("p"),$f=o(`XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ff=l(),ps=s("p"),Rf=o("This model inherits from "),Zr=s("a"),Ef=o("PreTrainedModel"),zf=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lf=l(),hs=s("p"),qf=o("This model is also a PyTorch "),us=s("a"),Cf=o("torch.nn.Module"),xf=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pf=l(),fs=s("p"),Af=o("This class overrides "),Yr=s("a"),jf=o("RobertaForTokenClassification"),Xf=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),If=l(),Je=s("div"),v(ms.$$.fragment),Nf=l(),Yt=s("p"),Df=o("The "),ea=s("a"),Sf=o("RobertaForTokenClassification"),Of=o(" forward method, overrides the "),xi=s("code"),Wf=o("__call__"),Bf=o(" special method."),Qf=l(),v(xo.$$.fragment),Hf=l(),Pi=s("p"),Uf=o("Example:"),Vf=l(),v(gs.$$.fragment),Gl=l(),eo=s("h2"),Po=s("a"),Ai=s("span"),v(_s.$$.fragment),Gf=l(),ji=s("span"),Kf=o("XLMRobertaForQuestionAnswering"),Kl=l(),Be=s("div"),v(bs.$$.fragment),Jf=l(),to=s("p"),Zf=o(`XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Xi=s("code"),Yf=o("span start logits"),em=o(" and "),Ii=s("code"),tm=o("span end logits"),om=o(")."),nm=l(),ks=s("p"),sm=o("This model inherits from "),ta=s("a"),rm=o("PreTrainedModel"),am=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),im=l(),vs=s("p"),lm=o("This model is also a PyTorch "),Ts=s("a"),dm=o("torch.nn.Module"),cm=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pm=l(),ws=s("p"),hm=o("This class overrides "),oa=s("a"),um=o("RobertaForQuestionAnswering"),fm=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),mm=l(),Ze=s("div"),v(ys.$$.fragment),gm=l(),oo=s("p"),_m=o("The "),na=s("a"),bm=o("RobertaForQuestionAnswering"),km=o(" forward method, overrides the "),Ni=s("code"),vm=o("__call__"),Tm=o(" special method."),wm=l(),v(Ao.$$.fragment),ym=l(),Di=s("p"),Mm=o("Example:"),$m=l(),v(Ms.$$.fragment),Jl=l(),no=s("h2"),jo=s("a"),Si=s("span"),v($s.$$.fragment),Fm=l(),Oi=s("span"),Rm=o("TFXLMRobertaModel"),Zl=l(),ze=s("div"),v(Fs.$$.fragment),Em=l(),Wi=s("p"),zm=o("The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),Lm=l(),Rs=s("p"),qm=o("This model inherits from "),sa=s("a"),Cm=o("TFPreTrainedModel"),xm=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pm=l(),Es=s("p"),Am=o("This model is also a "),zs=s("a"),jm=o("tf.keras.Model"),Xm=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Im=l(),v(Xo.$$.fragment),Nm=l(),Ls=s("p"),Dm=o("This class overrides "),ra=s("a"),Sm=o("TFRobertaModel"),Om=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Wm=l(),Ye=s("div"),v(qs.$$.fragment),Bm=l(),so=s("p"),Qm=o("The "),aa=s("a"),Hm=o("TFRobertaModel"),Um=o(" forward method, overrides the "),Bi=s("code"),Vm=o("__call__"),Gm=o(" special method."),Km=l(),v(Io.$$.fragment),Jm=l(),Qi=s("p"),Zm=o("Example:"),Ym=l(),v(Cs.$$.fragment),Yl=l(),ro=s("h2"),No=s("a"),Hi=s("span"),v(xs.$$.fragment),eg=l(),Ui=s("span"),tg=o("TFXLMRobertaForMaskedLM"),ed=l(),Le=s("div"),v(Ps.$$.fragment),og=l(),As=s("p"),ng=o("XLM-RoBERTa Model with a "),Vi=s("code"),sg=o("language modeling"),rg=o(" head on top."),ag=l(),js=s("p"),ig=o("This model inherits from "),ia=s("a"),lg=o("TFPreTrainedModel"),dg=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cg=l(),Xs=s("p"),pg=o("This model is also a "),Is=s("a"),hg=o("tf.keras.Model"),ug=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fg=l(),v(Do.$$.fragment),mg=l(),Ns=s("p"),gg=o("This class overrides "),la=s("a"),_g=o("TFRobertaForMaskedLM"),bg=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),kg=l(),et=s("div"),v(Ds.$$.fragment),vg=l(),ao=s("p"),Tg=o("The "),da=s("a"),wg=o("TFRobertaForMaskedLM"),yg=o(" forward method, overrides the "),Gi=s("code"),Mg=o("__call__"),$g=o(" special method."),Fg=l(),v(So.$$.fragment),Rg=l(),Ki=s("p"),Eg=o("Example:"),zg=l(),v(Ss.$$.fragment),td=l(),io=s("h2"),Oo=s("a"),Ji=s("span"),v(Os.$$.fragment),Lg=l(),Zi=s("span"),qg=o("TFXLMRobertaForSequenceClassification"),od=l(),qe=s("div"),v(Ws.$$.fragment),Cg=l(),Yi=s("p"),xg=o(`XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Pg=l(),Bs=s("p"),Ag=o("This model inherits from "),ca=s("a"),jg=o("TFPreTrainedModel"),Xg=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ig=l(),Qs=s("p"),Ng=o("This model is also a "),Hs=s("a"),Dg=o("tf.keras.Model"),Sg=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Og=l(),v(Wo.$$.fragment),Wg=l(),Us=s("p"),Bg=o("This class overrides "),pa=s("a"),Qg=o("TFRobertaForSequenceClassification"),Hg=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ug=l(),tt=s("div"),v(Vs.$$.fragment),Vg=l(),lo=s("p"),Gg=o("The "),ha=s("a"),Kg=o("TFRobertaForSequenceClassification"),Jg=o(" forward method, overrides the "),el=s("code"),Zg=o("__call__"),Yg=o(" special method."),e_=l(),v(Bo.$$.fragment),t_=l(),tl=s("p"),o_=o("Example:"),n_=l(),v(Gs.$$.fragment),nd=l(),co=s("h2"),Qo=s("a"),ol=s("span"),v(Ks.$$.fragment),s_=l(),nl=s("span"),r_=o("TFXLMRobertaForMultipleChoice"),sd=l(),Ce=s("div"),v(Js.$$.fragment),a_=l(),sl=s("p"),i_=o(`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),l_=l(),Zs=s("p"),d_=o("This model inherits from "),ua=s("a"),c_=o("TFPreTrainedModel"),p_=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),h_=l(),Ys=s("p"),u_=o("This model is also a "),er=s("a"),f_=o("tf.keras.Model"),m_=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),g_=l(),v(Ho.$$.fragment),__=l(),tr=s("p"),b_=o("This class overrides "),fa=s("a"),k_=o("TFRobertaForMultipleChoice"),v_=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),T_=l(),ot=s("div"),v(or.$$.fragment),w_=l(),po=s("p"),y_=o("The "),ma=s("a"),M_=o("TFRobertaForMultipleChoice"),$_=o(" forward method, overrides the "),rl=s("code"),F_=o("__call__"),R_=o(" special method."),E_=l(),v(Uo.$$.fragment),z_=l(),al=s("p"),L_=o("Example:"),q_=l(),v(nr.$$.fragment),rd=l(),ho=s("h2"),Vo=s("a"),il=s("span"),v(sr.$$.fragment),C_=l(),ll=s("span"),x_=o("TFXLMRobertaForTokenClassification"),ad=l(),xe=s("div"),v(rr.$$.fragment),P_=l(),dl=s("p"),A_=o(`XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),j_=l(),ar=s("p"),X_=o("This model inherits from "),ga=s("a"),I_=o("TFPreTrainedModel"),N_=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),D_=l(),ir=s("p"),S_=o("This model is also a "),lr=s("a"),O_=o("tf.keras.Model"),W_=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),B_=l(),v(Go.$$.fragment),Q_=l(),dr=s("p"),H_=o("This class overrides "),_a=s("a"),U_=o("TFRobertaForTokenClassification"),V_=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),G_=l(),nt=s("div"),v(cr.$$.fragment),K_=l(),uo=s("p"),J_=o("The "),ba=s("a"),Z_=o("TFRobertaForTokenClassification"),Y_=o(" forward method, overrides the "),cl=s("code"),eb=o("__call__"),tb=o(" special method."),ob=l(),v(Ko.$$.fragment),nb=l(),pl=s("p"),sb=o("Example:"),rb=l(),v(pr.$$.fragment),id=l(),fo=s("h2"),Jo=s("a"),hl=s("span"),v(hr.$$.fragment),ab=l(),ul=s("span"),ib=o("TFXLMRobertaForQuestionAnswering"),ld=l(),Pe=s("div"),v(ur.$$.fragment),lb=l(),mo=s("p"),db=o(`XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),fl=s("code"),cb=o("span start logits"),pb=o(" and "),ml=s("code"),hb=o("span end logits"),ub=o(")."),fb=l(),fr=s("p"),mb=o("This model inherits from "),ka=s("a"),gb=o("TFPreTrainedModel"),_b=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bb=l(),mr=s("p"),kb=o("This model is also a "),gr=s("a"),vb=o("tf.keras.Model"),Tb=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),wb=l(),v(Zo.$$.fragment),yb=l(),_r=s("p"),Mb=o("This class overrides "),gl=s("code"),$b=o("TFRobertaForQuestionAnsweringSimple"),Fb=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Rb=l(),st=s("div"),v(br.$$.fragment),Eb=l(),go=s("p"),zb=o("The "),va=s("a"),Lb=o("TFRobertaForQuestionAnswering"),qb=o(" forward method, overrides the "),_l=s("code"),Cb=o("__call__"),xb=o(" special method."),Pb=l(),v(Yo.$$.fragment),Ab=l(),bl=s("p"),jb=o("Example:"),Xb=l(),v(kr.$$.fragment),this.h()},l(a){const f=VT('[data-svelte="svelte-1phssyn"]',document.head);h=r(f,"META",{name:!0,content:!0}),f.forEach(t),F=d(a),m=r(a,"H1",{class:!0});var vr=i(m);_=r(vr,"A",{id:!0,class:!0,href:!0});var kl=i(_);k=r(kl,"SPAN",{});var vl=i(k);T(b.$$.fragment,vl),vl.forEach(t),kl.forEach(t),g=d(vr),E=r(vr,"SPAN",{});var Tl=i(E);pe=n(Tl,"XLM-RoBERTa"),Tl.forEach(t),vr.forEach(t),V=d(a),z=r(a,"H2",{class:!0});var Tr=i(z);Z=r(Tr,"A",{id:!0,class:!0,href:!0});var wl=i(Z);D=r(wl,"SPAN",{});var yl=i(D);T(ee.$$.fragment,yl),yl.forEach(t),wl.forEach(t),he=d(Tr),S=r(Tr,"SPAN",{});var Ml=i(S);ue=n(Ml,"Overview"),Ml.forEach(t),Tr.forEach(t),ie=d(a),U=r(a,"P",{});var wr=i(U);P=n(wr,"The XLM-RoBERTa model was proposed in "),te=r(wr,"A",{href:!0,rel:!0});var $l=i(te);K=n($l,"Unsupervised Cross-lingual Representation Learning at Scale"),$l.forEach(t),L=n(wr,` by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\xE1n, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook\u2019s RoBERTa model released in 2019. It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data.`),wr.forEach(t),C=d(a),se=r(a,"P",{});var Fl=i(se);B=n(Fl,"The abstract from the paper is the following:"),Fl.forEach(t),le=d(a),re=r(a,"P",{});var Rl=i(re);O=r(Rl,"EM",{});var El=i(O);fe=n(El,`This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +13.8% average accuracy on XNLI, +12.3% average F1 score on MLQA, and +2.1% average F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 11.8% in XNLI accuracy for Swahili and 9.2% for Urdu over the previous XLM model. We also present a detailed empirical evaluation of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-Ris very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make XLM-R code, data, and models publicly available.`),El.forEach(t),Rl.forEach(t),de=d(a),q=r(a,"P",{});var zl=i(q);me=n(zl,"Tips:"),zl.forEach(t),W=d(a),Y=r(a,"UL",{});var yr=i(Y);oe=r(yr,"LI",{});var Mr=i(oe);Q=n(Mr,`XLM-RoBERTa is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require `),ae=r(Mr,"CODE",{});var Ll=i(ae);ge=n(Ll,"lang"),Ll.forEach(t),A=n(Mr,` tensors to understand which language is used, and should be able to determine the correct language from the input ids.`),Mr.forEach(t),_e=d(yr),I=r(yr,"LI",{});var $r=i(I);be=n($r,"This implementation is the same as RoBERTa. Refer to the "),p=r($r,"A",{href:!0});var ql=i(p);R=n(ql,"documentation of RoBERTa"),ql.forEach(t),J=n($r,` for usage examples as well as the information relative to the inputs and outputs.`),$r.forEach(t),yr.forEach(t),we=d(a),ce=r(a,"P",{});var Ta=i(ce);j=n(Ta,"This model was contributed by "),ve=r(Ta,"A",{href:!0,rel:!0});var Ob=i(ve);Me=n(Ob,"stefan-it"),Ob.forEach(t),$e=n(Ta,". The original code can be found "),x=r(Ta,"A",{href:!0,rel:!0});var Wb=i(x);H=n(Wb,"here"),Wb.forEach(t),Fe=n(Ta,"."),Ta.forEach(t),ye=d(a),X=r(a,"H2",{class:!0});var cd=i(X);ke=r(cd,"A",{id:!0,class:!0,href:!0});var Bb=i(ke);Te=r(Bb,"SPAN",{});var Qb=i(Te);T(ne.$$.fragment,Qb),Qb.forEach(t),Bb.forEach(t),Re=d(cd),Da=r(cd,"SPAN",{});var Hb=i(Da);Mc=n(Hb,"XLMRobertaConfig"),Hb.forEach(t),cd.forEach(t),Cl=d(a),It=r(a,"DIV",{class:!0});var pd=i(It);T(tn.$$.fragment,pd),$c=d(pd),on=r(pd,"P",{});var hd=i(on);Fc=n(hd,"This class overrides "),Fr=r(hd,"A",{href:!0});var Ub=i(Fr);Rc=n(Ub,"RobertaConfig"),Ub.forEach(t),Ec=n(hd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),hd.forEach(t),pd.forEach(t),xl=d(a),Nt=r(a,"H2",{class:!0});var ud=i(Nt);_o=r(ud,"A",{id:!0,class:!0,href:!0});var Vb=i(_o);Sa=r(Vb,"SPAN",{});var Gb=i(Sa);T(nn.$$.fragment,Gb),Gb.forEach(t),Vb.forEach(t),zc=d(ud),Oa=r(ud,"SPAN",{});var Kb=i(Oa);Lc=n(Kb,"XLMRobertaTokenizer"),Kb.forEach(t),ud.forEach(t),Pl=d(a),Ee=r(a,"DIV",{class:!0});var Qe=i(Ee);T(sn.$$.fragment,Qe),qc=d(Qe),Tt=r(Qe,"P",{});var en=i(Tt);Cc=n(en,"Adapted from "),Rr=r(en,"A",{href:!0});var Jb=i(Rr);xc=n(Jb,"RobertaTokenizer"),Jb.forEach(t),Pc=n(en," and "),Er=r(en,"A",{href:!0});var Zb=i(Er);Ac=n(Zb,"XLNetTokenizer"),Zb.forEach(t),jc=n(en,`. Based on `),rn=r(en,"A",{href:!0,rel:!0});var Yb=i(rn);Xc=n(Yb,"SentencePiece"),Yb.forEach(t),Ic=n(en,"."),en.forEach(t),Nc=d(Qe),an=r(Qe,"P",{});var fd=i(an);Dc=n(fd,"This tokenizer inherits from "),zr=r(fd,"A",{href:!0});var ek=i(zr);Sc=n(ek,"PreTrainedTokenizer"),ek.forEach(t),Oc=n(fd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),fd.forEach(t),Wc=d(Qe),Dt=r(Qe,"P",{});var wa=i(Dt);Bc=n(wa,`Attributes: sp_model (`),Wa=r(wa,"CODE",{});var tk=i(Wa);Qc=n(tk,"SentencePieceProcessor"),tk.forEach(t),Hc=n(wa,`): The `),Ba=r(wa,"EM",{});var ok=i(Ba);Uc=n(ok,"SentencePiece"),ok.forEach(t),Vc=n(wa," processor that is used for every conversion (string, tokens and IDs)."),wa.forEach(t),Gc=d(Qe),wt=r(Qe,"DIV",{class:!0});var ya=i(wt);T(ln.$$.fragment,ya),Kc=d(ya),Qa=r(ya,"P",{});var nk=i(Qa);Jc=n(nk,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:`),nk.forEach(t),Zc=d(ya),dn=r(ya,"UL",{});var md=i(dn);Lr=r(md,"LI",{});var Ib=i(Lr);Yc=n(Ib,"single sequence: "),Ha=r(Ib,"CODE",{});var sk=i(Ha);ep=n(sk,"<s> X </s>"),sk.forEach(t),Ib.forEach(t),tp=d(md),qr=r(md,"LI",{});var Nb=i(qr);op=n(Nb,"pair of sequences: "),Ua=r(Nb,"CODE",{});var rk=i(Ua);np=n(rk,"<s> A </s></s> B </s>"),rk.forEach(t),Nb.forEach(t),md.forEach(t),ya.forEach(t),sp=d(Qe),bo=r(Qe,"DIV",{class:!0});var gd=i(bo);T(cn.$$.fragment,gd),rp=d(gd),pn=r(gd,"P",{});var _d=i(pn);ap=n(_d,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Va=r(_d,"CODE",{});var ak=i(Va);ip=n(ak,"prepare_for_model"),ak.forEach(t),lp=n(_d," method."),_d.forEach(t),gd.forEach(t),dp=d(Qe),ko=r(Qe,"DIV",{class:!0});var bd=i(ko);T(hn.$$.fragment,bd),cp=d(bd),Ga=r(bd,"P",{});var ik=i(Ga);pp=n(ik,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),ik.forEach(t),bd.forEach(t),hp=d(Qe),Ka=r(Qe,"DIV",{class:!0}),i(Ka).forEach(t),Qe.forEach(t),Al=d(a),St=r(a,"H2",{class:!0});var kd=i(St);vo=r(kd,"A",{id:!0,class:!0,href:!0});var lk=i(vo);Ja=r(lk,"SPAN",{});var dk=i(Ja);T(un.$$.fragment,dk),dk.forEach(t),lk.forEach(t),up=d(kd),Za=r(kd,"SPAN",{});var ck=i(Za);fp=n(ck,"XLMRobertaTokenizerFast"),ck.forEach(t),kd.forEach(t),jl=d(a),He=r(a,"DIV",{class:!0});var Mt=i(He);T(fn.$$.fragment,Mt),mp=d(Mt),ut=r(Mt,"P",{});var $t=i(ut);gp=n($t,"Construct a \u201Cfast\u201D XLM-RoBERTa tokenizer (backed by HuggingFace\u2019s "),Ya=r($t,"EM",{});var pk=i(Ya);_p=n(pk,"tokenizers"),pk.forEach(t),bp=n($t,` library). Adapted from `),Cr=r($t,"A",{href:!0});var hk=i(Cr);kp=n(hk,"RobertaTokenizer"),hk.forEach(t),vp=n($t," and "),xr=r($t,"A",{href:!0});var uk=i(xr);Tp=n(uk,"XLNetTokenizer"),uk.forEach(t),wp=n($t,". Based on "),mn=r($t,"A",{href:!0,rel:!0});var fk=i(mn);yp=n(fk,"BPE"),fk.forEach(t),Mp=n($t,"."),$t.forEach(t),$p=d(Mt),gn=r(Mt,"P",{});var vd=i(gn);Fp=n(vd,"This tokenizer inherits from "),Pr=r(vd,"A",{href:!0});var mk=i(Pr);Rp=n(mk,"PreTrainedTokenizerFast"),mk.forEach(t),Ep=n(vd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),vd.forEach(t),zp=d(Mt),yt=r(Mt,"DIV",{class:!0});var Ma=i(yt);T(_n.$$.fragment,Ma),Lp=d(Ma),ei=r(Ma,"P",{});var gk=i(ei);qp=n(gk,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:`),gk.forEach(t),Cp=d(Ma),bn=r(Ma,"UL",{});var Td=i(bn);Ar=r(Td,"LI",{});var Db=i(Ar);xp=n(Db,"single sequence: "),ti=r(Db,"CODE",{});var _k=i(ti);Pp=n(_k,"<s> X </s>"),_k.forEach(t),Db.forEach(t),Ap=d(Td),jr=r(Td,"LI",{});var Sb=i(jr);jp=n(Sb,"pair of sequences: "),oi=r(Sb,"CODE",{});var bk=i(oi);Xp=n(bk,"<s> A </s></s> B </s>"),bk.forEach(t),Sb.forEach(t),Td.forEach(t),Ma.forEach(t),Ip=d(Mt),To=r(Mt,"DIV",{class:!0});var wd=i(To);T(kn.$$.fragment,wd),Np=d(wd),ni=r(wd,"P",{});var kk=i(ni);Dp=n(kk,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),kk.forEach(t),wd.forEach(t),Mt.forEach(t),Xl=d(a),Ot=r(a,"H2",{class:!0});var yd=i(Ot);wo=r(yd,"A",{id:!0,class:!0,href:!0});var vk=i(wo);si=r(vk,"SPAN",{});var Tk=i(si);T(vn.$$.fragment,Tk),Tk.forEach(t),vk.forEach(t),Sp=d(yd),ri=r(yd,"SPAN",{});var wk=i(ri);Op=n(wk,"XLMRobertaModel"),wk.forEach(t),yd.forEach(t),Il=d(a),Ie=r(a,"DIV",{class:!0});var ft=i(Ie);T(Tn.$$.fragment,ft),Wp=d(ft),ai=r(ft,"P",{});var yk=i(ai);Bp=n(yk,"The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),yk.forEach(t),Qp=d(ft),wn=r(ft,"P",{});var Md=i(wn);Hp=n(Md,"This model inherits from "),Xr=r(Md,"A",{href:!0});var Mk=i(Xr);Up=n(Mk,"PreTrainedModel"),Mk.forEach(t),Vp=n(Md,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Md.forEach(t),Gp=d(ft),yn=r(ft,"P",{});var $d=i(yn);Kp=n($d,"This model is also a PyTorch "),Mn=r($d,"A",{href:!0,rel:!0});var $k=i(Mn);Jp=n($k,"torch.nn.Module"),$k.forEach(t),Zp=n($d,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$d.forEach(t),Yp=d(ft),$n=r(ft,"P",{});var Fd=i($n);eh=n(Fd,"This class overrides "),Ir=r(Fd,"A",{href:!0});var Fk=i(Ir);th=n(Fk,"RobertaModel"),Fk.forEach(t),oh=n(Fd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Fd.forEach(t),nh=d(ft),Ue=r(ft,"DIV",{class:!0});var Ft=i(Ue);T(Fn.$$.fragment,Ft),sh=d(Ft),Wt=r(Ft,"P",{});var $a=i(Wt);rh=n($a,"The "),Nr=r($a,"A",{href:!0});var Rk=i(Nr);ah=n(Rk,"RobertaModel"),Rk.forEach(t),ih=n($a," forward method, overrides the "),ii=r($a,"CODE",{});var Ek=i(ii);lh=n(Ek,"__call__"),Ek.forEach(t),dh=n($a," special method."),$a.forEach(t),ch=d(Ft),T(yo.$$.fragment,Ft),ph=d(Ft),li=r(Ft,"P",{});var zk=i(li);hh=n(zk,"Example:"),zk.forEach(t),uh=d(Ft),T(Rn.$$.fragment,Ft),Ft.forEach(t),ft.forEach(t),Nl=d(a),Bt=r(a,"H2",{class:!0});var Rd=i(Bt);Mo=r(Rd,"A",{id:!0,class:!0,href:!0});var Lk=i(Mo);di=r(Lk,"SPAN",{});var qk=i(di);T(En.$$.fragment,qk),qk.forEach(t),Lk.forEach(t),fh=d(Rd),ci=r(Rd,"SPAN",{});var Ck=i(ci);mh=n(Ck,"XLMRobertaForCausalLM"),Ck.forEach(t),Rd.forEach(t),Dl=d(a),Ne=r(a,"DIV",{class:!0});var mt=i(Ne);T(zn.$$.fragment,mt),gh=d(mt),Ln=r(mt,"P",{});var Ed=i(Ln);_h=n(Ed,"XLM-RoBERTa Model with a "),pi=r(Ed,"CODE",{});var xk=i(pi);bh=n(xk,"language modeling"),xk.forEach(t),kh=n(Ed," head on top for CLM fine-tuning."),Ed.forEach(t),vh=d(mt),qn=r(mt,"P",{});var zd=i(qn);Th=n(zd,"This model inherits from "),Dr=r(zd,"A",{href:!0});var Pk=i(Dr);wh=n(Pk,"PreTrainedModel"),Pk.forEach(t),yh=n(zd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zd.forEach(t),Mh=d(mt),Cn=r(mt,"P",{});var Ld=i(Cn);$h=n(Ld,"This model is also a PyTorch "),xn=r(Ld,"A",{href:!0,rel:!0});var Ak=i(xn);Fh=n(Ak,"torch.nn.Module"),Ak.forEach(t),Rh=n(Ld,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ld.forEach(t),Eh=d(mt),Pn=r(mt,"P",{});var qd=i(Pn);zh=n(qd,"This class overrides "),Sr=r(qd,"A",{href:!0});var jk=i(Sr);Lh=n(jk,"RobertaForCausalLM"),jk.forEach(t),qh=n(qd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),qd.forEach(t),Ch=d(mt),Ve=r(mt,"DIV",{class:!0});var Rt=i(Ve);T(An.$$.fragment,Rt),xh=d(Rt),Qt=r(Rt,"P",{});var Fa=i(Qt);Ph=n(Fa,"The "),Or=r(Fa,"A",{href:!0});var Xk=i(Or);Ah=n(Xk,"RobertaForCausalLM"),Xk.forEach(t),jh=n(Fa," forward method, overrides the "),hi=r(Fa,"CODE",{});var Ik=i(hi);Xh=n(Ik,"__call__"),Ik.forEach(t),Ih=n(Fa," special method."),Fa.forEach(t),Nh=d(Rt),T($o.$$.fragment,Rt),Dh=d(Rt),ui=r(Rt,"P",{});var Nk=i(ui);Sh=n(Nk,"Example:"),Nk.forEach(t),Oh=d(Rt),T(jn.$$.fragment,Rt),Rt.forEach(t),mt.forEach(t),Sl=d(a),Ht=r(a,"H2",{class:!0});var Cd=i(Ht);Fo=r(Cd,"A",{id:!0,class:!0,href:!0});var Dk=i(Fo);fi=r(Dk,"SPAN",{});var Sk=i(fi);T(Xn.$$.fragment,Sk),Sk.forEach(t),Dk.forEach(t),Wh=d(Cd),mi=r(Cd,"SPAN",{});var Ok=i(mi);Bh=n(Ok,"XLMRobertaForMaskedLM"),Ok.forEach(t),Cd.forEach(t),Ol=d(a),De=r(a,"DIV",{class:!0});var gt=i(De);T(In.$$.fragment,gt),Qh=d(gt),Nn=r(gt,"P",{});var xd=i(Nn);Hh=n(xd,"XLM-RoBERTa Model with a "),gi=r(xd,"CODE",{});var Wk=i(gi);Uh=n(Wk,"language modeling"),Wk.forEach(t),Vh=n(xd," head on top."),xd.forEach(t),Gh=d(gt),Dn=r(gt,"P",{});var Pd=i(Dn);Kh=n(Pd,"This model inherits from "),Wr=r(Pd,"A",{href:!0});var Bk=i(Wr);Jh=n(Bk,"PreTrainedModel"),Bk.forEach(t),Zh=n(Pd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pd.forEach(t),Yh=d(gt),Sn=r(gt,"P",{});var Ad=i(Sn);eu=n(Ad,"This model is also a PyTorch "),On=r(Ad,"A",{href:!0,rel:!0});var Qk=i(On);tu=n(Qk,"torch.nn.Module"),Qk.forEach(t),ou=n(Ad,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ad.forEach(t),nu=d(gt),Wn=r(gt,"P",{});var jd=i(Wn);su=n(jd,"This class overrides "),Br=r(jd,"A",{href:!0});var Hk=i(Br);ru=n(Hk,"RobertaForMaskedLM"),Hk.forEach(t),au=n(jd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),jd.forEach(t),iu=d(gt),Ge=r(gt,"DIV",{class:!0});var Et=i(Ge);T(Bn.$$.fragment,Et),lu=d(Et),Ut=r(Et,"P",{});var Ra=i(Ut);du=n(Ra,"The "),Qr=r(Ra,"A",{href:!0});var Uk=i(Qr);cu=n(Uk,"RobertaForMaskedLM"),Uk.forEach(t),pu=n(Ra," forward method, overrides the "),_i=r(Ra,"CODE",{});var Vk=i(_i);hu=n(Vk,"__call__"),Vk.forEach(t),uu=n(Ra," special method."),Ra.forEach(t),fu=d(Et),T(Ro.$$.fragment,Et),mu=d(Et),bi=r(Et,"P",{});var Gk=i(bi);gu=n(Gk,"Example:"),Gk.forEach(t),_u=d(Et),T(Qn.$$.fragment,Et),Et.forEach(t),gt.forEach(t),Wl=d(a),Vt=r(a,"H2",{class:!0});var Xd=i(Vt);Eo=r(Xd,"A",{id:!0,class:!0,href:!0});var Kk=i(Eo);ki=r(Kk,"SPAN",{});var Jk=i(ki);T(Hn.$$.fragment,Jk),Jk.forEach(t),Kk.forEach(t),bu=d(Xd),vi=r(Xd,"SPAN",{});var Zk=i(vi);ku=n(Zk,"XLMRobertaForSequenceClassification"),Zk.forEach(t),Xd.forEach(t),Bl=d(a),Se=r(a,"DIV",{class:!0});var _t=i(Se);T(Un.$$.fragment,_t),vu=d(_t),Ti=r(_t,"P",{});var Yk=i(Ti);Tu=n(Yk,`XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Yk.forEach(t),wu=d(_t),Vn=r(_t,"P",{});var Id=i(Vn);yu=n(Id,"This model inherits from "),Hr=r(Id,"A",{href:!0});var ev=i(Hr);Mu=n(ev,"PreTrainedModel"),ev.forEach(t),$u=n(Id,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Id.forEach(t),Fu=d(_t),Gn=r(_t,"P",{});var Nd=i(Gn);Ru=n(Nd,"This model is also a PyTorch "),Kn=r(Nd,"A",{href:!0,rel:!0});var tv=i(Kn);Eu=n(tv,"torch.nn.Module"),tv.forEach(t),zu=n(Nd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nd.forEach(t),Lu=d(_t),Jn=r(_t,"P",{});var Dd=i(Jn);qu=n(Dd,"This class overrides "),Ur=r(Dd,"A",{href:!0});var ov=i(Ur);Cu=n(ov,"RobertaForSequenceClassification"),ov.forEach(t),xu=n(Dd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Dd.forEach(t),Pu=d(_t),je=r(_t,"DIV",{class:!0});var rt=i(je);T(Zn.$$.fragment,rt),Au=d(rt),Gt=r(rt,"P",{});var Ea=i(Gt);ju=n(Ea,"The "),Vr=r(Ea,"A",{href:!0});var nv=i(Vr);Xu=n(nv,"RobertaForSequenceClassification"),nv.forEach(t),Iu=n(Ea," forward method, overrides the "),wi=r(Ea,"CODE",{});var sv=i(wi);Nu=n(sv,"__call__"),sv.forEach(t),Du=n(Ea," special method."),Ea.forEach(t),Su=d(rt),T(zo.$$.fragment,rt),Ou=d(rt),yi=r(rt,"P",{});var rv=i(yi);Wu=n(rv,"Example of single-label classification:"),rv.forEach(t),Bu=d(rt),T(Yn.$$.fragment,rt),Qu=d(rt),Mi=r(rt,"P",{});var av=i(Mi);Hu=n(av,"Example of multi-label classification:"),av.forEach(t),Uu=d(rt),T(es.$$.fragment,rt),rt.forEach(t),_t.forEach(t),Ql=d(a),Kt=r(a,"H2",{class:!0});var Sd=i(Kt);Lo=r(Sd,"A",{id:!0,class:!0,href:!0});var iv=i(Lo);$i=r(iv,"SPAN",{});var lv=i($i);T(ts.$$.fragment,lv),lv.forEach(t),iv.forEach(t),Vu=d(Sd),Fi=r(Sd,"SPAN",{});var dv=i(Fi);Gu=n(dv,"XLMRobertaForMultipleChoice"),dv.forEach(t),Sd.forEach(t),Hl=d(a),Oe=r(a,"DIV",{class:!0});var bt=i(Oe);T(os.$$.fragment,bt),Ku=d(bt),Ri=r(bt,"P",{});var cv=i(Ri);Ju=n(cv,`XLM-RoBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),cv.forEach(t),Zu=d(bt),ns=r(bt,"P",{});var Od=i(ns);Yu=n(Od,"This model inherits from "),Gr=r(Od,"A",{href:!0});var pv=i(Gr);ef=n(pv,"PreTrainedModel"),pv.forEach(t),tf=n(Od,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Od.forEach(t),of=d(bt),ss=r(bt,"P",{});var Wd=i(ss);nf=n(Wd,"This model is also a PyTorch "),rs=r(Wd,"A",{href:!0,rel:!0});var hv=i(rs);sf=n(hv,"torch.nn.Module"),hv.forEach(t),rf=n(Wd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wd.forEach(t),af=d(bt),as=r(bt,"P",{});var Bd=i(as);lf=n(Bd,"This class overrides "),Kr=r(Bd,"A",{href:!0});var uv=i(Kr);df=n(uv,"RobertaForMultipleChoice"),uv.forEach(t),cf=n(Bd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Bd.forEach(t),pf=d(bt),Ke=r(bt,"DIV",{class:!0});var zt=i(Ke);T(is.$$.fragment,zt),hf=d(zt),Jt=r(zt,"P",{});var za=i(Jt);uf=n(za,"The "),Jr=r(za,"A",{href:!0});var fv=i(Jr);ff=n(fv,"RobertaForMultipleChoice"),fv.forEach(t),mf=n(za," forward method, overrides the "),Ei=r(za,"CODE",{});var mv=i(Ei);gf=n(mv,"__call__"),mv.forEach(t),_f=n(za," special method."),za.forEach(t),bf=d(zt),T(qo.$$.fragment,zt),kf=d(zt),zi=r(zt,"P",{});var gv=i(zi);vf=n(gv,"Example:"),gv.forEach(t),Tf=d(zt),T(ls.$$.fragment,zt),zt.forEach(t),bt.forEach(t),Ul=d(a),Zt=r(a,"H2",{class:!0});var Qd=i(Zt);Co=r(Qd,"A",{id:!0,class:!0,href:!0});var _v=i(Co);Li=r(_v,"SPAN",{});var bv=i(Li);T(ds.$$.fragment,bv),bv.forEach(t),_v.forEach(t),wf=d(Qd),qi=r(Qd,"SPAN",{});var kv=i(qi);yf=n(kv,"XLMRobertaForTokenClassification"),kv.forEach(t),Qd.forEach(t),Vl=d(a),We=r(a,"DIV",{class:!0});var kt=i(We);T(cs.$$.fragment,kt),Mf=d(kt),Ci=r(kt,"P",{});var vv=i(Ci);$f=n(vv,`XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),vv.forEach(t),Ff=d(kt),ps=r(kt,"P",{});var Hd=i(ps);Rf=n(Hd,"This model inherits from "),Zr=r(Hd,"A",{href:!0});var Tv=i(Zr);Ef=n(Tv,"PreTrainedModel"),Tv.forEach(t),zf=n(Hd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hd.forEach(t),Lf=d(kt),hs=r(kt,"P",{});var Ud=i(hs);qf=n(Ud,"This model is also a PyTorch "),us=r(Ud,"A",{href:!0,rel:!0});var wv=i(us);Cf=n(wv,"torch.nn.Module"),wv.forEach(t),xf=n(Ud,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ud.forEach(t),Pf=d(kt),fs=r(kt,"P",{});var Vd=i(fs);Af=n(Vd,"This class overrides "),Yr=r(Vd,"A",{href:!0});var yv=i(Yr);jf=n(yv,"RobertaForTokenClassification"),yv.forEach(t),Xf=n(Vd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Vd.forEach(t),If=d(kt),Je=r(kt,"DIV",{class:!0});var Lt=i(Je);T(ms.$$.fragment,Lt),Nf=d(Lt),Yt=r(Lt,"P",{});var La=i(Yt);Df=n(La,"The "),ea=r(La,"A",{href:!0});var Mv=i(ea);Sf=n(Mv,"RobertaForTokenClassification"),Mv.forEach(t),Of=n(La," forward method, overrides the "),xi=r(La,"CODE",{});var $v=i(xi);Wf=n($v,"__call__"),$v.forEach(t),Bf=n(La," special method."),La.forEach(t),Qf=d(Lt),T(xo.$$.fragment,Lt),Hf=d(Lt),Pi=r(Lt,"P",{});var Fv=i(Pi);Uf=n(Fv,"Example:"),Fv.forEach(t),Vf=d(Lt),T(gs.$$.fragment,Lt),Lt.forEach(t),kt.forEach(t),Gl=d(a),eo=r(a,"H2",{class:!0});var Gd=i(eo);Po=r(Gd,"A",{id:!0,class:!0,href:!0});var Rv=i(Po);Ai=r(Rv,"SPAN",{});var Ev=i(Ai);T(_s.$$.fragment,Ev),Ev.forEach(t),Rv.forEach(t),Gf=d(Gd),ji=r(Gd,"SPAN",{});var zv=i(ji);Kf=n(zv,"XLMRobertaForQuestionAnswering"),zv.forEach(t),Gd.forEach(t),Kl=d(a),Be=r(a,"DIV",{class:!0});var vt=i(Be);T(bs.$$.fragment,vt),Jf=d(vt),to=r(vt,"P",{});var qa=i(to);Zf=n(qa,`XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Xi=r(qa,"CODE",{});var Lv=i(Xi);Yf=n(Lv,"span start logits"),Lv.forEach(t),em=n(qa," and "),Ii=r(qa,"CODE",{});var qv=i(Ii);tm=n(qv,"span end logits"),qv.forEach(t),om=n(qa,")."),qa.forEach(t),nm=d(vt),ks=r(vt,"P",{});var Kd=i(ks);sm=n(Kd,"This model inherits from "),ta=r(Kd,"A",{href:!0});var Cv=i(ta);rm=n(Cv,"PreTrainedModel"),Cv.forEach(t),am=n(Kd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kd.forEach(t),im=d(vt),vs=r(vt,"P",{});var Jd=i(vs);lm=n(Jd,"This model is also a PyTorch "),Ts=r(Jd,"A",{href:!0,rel:!0});var xv=i(Ts);dm=n(xv,"torch.nn.Module"),xv.forEach(t),cm=n(Jd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Jd.forEach(t),pm=d(vt),ws=r(vt,"P",{});var Zd=i(ws);hm=n(Zd,"This class overrides "),oa=r(Zd,"A",{href:!0});var Pv=i(oa);um=n(Pv,"RobertaForQuestionAnswering"),Pv.forEach(t),fm=n(Zd,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Zd.forEach(t),mm=d(vt),Ze=r(vt,"DIV",{class:!0});var qt=i(Ze);T(ys.$$.fragment,qt),gm=d(qt),oo=r(qt,"P",{});var Ca=i(oo);_m=n(Ca,"The "),na=r(Ca,"A",{href:!0});var Av=i(na);bm=n(Av,"RobertaForQuestionAnswering"),Av.forEach(t),km=n(Ca," forward method, overrides the "),Ni=r(Ca,"CODE",{});var jv=i(Ni);vm=n(jv,"__call__"),jv.forEach(t),Tm=n(Ca," special method."),Ca.forEach(t),wm=d(qt),T(Ao.$$.fragment,qt),ym=d(qt),Di=r(qt,"P",{});var Xv=i(Di);Mm=n(Xv,"Example:"),Xv.forEach(t),$m=d(qt),T(Ms.$$.fragment,qt),qt.forEach(t),vt.forEach(t),Jl=d(a),no=r(a,"H2",{class:!0});var Yd=i(no);jo=r(Yd,"A",{id:!0,class:!0,href:!0});var Iv=i(jo);Si=r(Iv,"SPAN",{});var Nv=i(Si);T($s.$$.fragment,Nv),Nv.forEach(t),Iv.forEach(t),Fm=d(Yd),Oi=r(Yd,"SPAN",{});var Dv=i(Oi);Rm=n(Dv,"TFXLMRobertaModel"),Dv.forEach(t),Yd.forEach(t),Zl=d(a),ze=r(a,"DIV",{class:!0});var at=i(ze);T(Fs.$$.fragment,at),Em=d(at),Wi=r(at,"P",{});var Sv=i(Wi);zm=n(Sv,"The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),Sv.forEach(t),Lm=d(at),Rs=r(at,"P",{});var ec=i(Rs);qm=n(ec,"This model inherits from "),sa=r(ec,"A",{href:!0});var Ov=i(sa);Cm=n(Ov,"TFPreTrainedModel"),Ov.forEach(t),xm=n(ec,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ec.forEach(t),Pm=d(at),Es=r(at,"P",{});var tc=i(Es);Am=n(tc,"This model is also a "),zs=r(tc,"A",{href:!0,rel:!0});var Wv=i(zs);jm=n(Wv,"tf.keras.Model"),Wv.forEach(t),Xm=n(tc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),tc.forEach(t),Im=d(at),T(Xo.$$.fragment,at),Nm=d(at),Ls=r(at,"P",{});var oc=i(Ls);Dm=n(oc,"This class overrides "),ra=r(oc,"A",{href:!0});var Bv=i(ra);Sm=n(Bv,"TFRobertaModel"),Bv.forEach(t),Om=n(oc,`. Please check the superclass for the appropriate documentation alongside usage examples.`),oc.forEach(t),Wm=d(at),Ye=r(at,"DIV",{class:!0});var Ct=i(Ye);T(qs.$$.fragment,Ct),Bm=d(Ct),so=r(Ct,"P",{});var xa=i(so);Qm=n(xa,"The "),aa=r(xa,"A",{href:!0});var Qv=i(aa);Hm=n(Qv,"TFRobertaModel"),Qv.forEach(t),Um=n(xa," forward method, overrides the "),Bi=r(xa,"CODE",{});var Hv=i(Bi);Vm=n(Hv,"__call__"),Hv.forEach(t),Gm=n(xa," special method."),xa.forEach(t),Km=d(Ct),T(Io.$$.fragment,Ct),Jm=d(Ct),Qi=r(Ct,"P",{});var Uv=i(Qi);Zm=n(Uv,"Example:"),Uv.forEach(t),Ym=d(Ct),T(Cs.$$.fragment,Ct),Ct.forEach(t),at.forEach(t),Yl=d(a),ro=r(a,"H2",{class:!0});var nc=i(ro);No=r(nc,"A",{id:!0,class:!0,href:!0});var Vv=i(No);Hi=r(Vv,"SPAN",{});var Gv=i(Hi);T(xs.$$.fragment,Gv),Gv.forEach(t),Vv.forEach(t),eg=d(nc),Ui=r(nc,"SPAN",{});var Kv=i(Ui);tg=n(Kv,"TFXLMRobertaForMaskedLM"),Kv.forEach(t),nc.forEach(t),ed=d(a),Le=r(a,"DIV",{class:!0});var it=i(Le);T(Ps.$$.fragment,it),og=d(it),As=r(it,"P",{});var sc=i(As);ng=n(sc,"XLM-RoBERTa Model with a "),Vi=r(sc,"CODE",{});var Jv=i(Vi);sg=n(Jv,"language modeling"),Jv.forEach(t),rg=n(sc," head on top."),sc.forEach(t),ag=d(it),js=r(it,"P",{});var rc=i(js);ig=n(rc,"This model inherits from "),ia=r(rc,"A",{href:!0});var Zv=i(ia);lg=n(Zv,"TFPreTrainedModel"),Zv.forEach(t),dg=n(rc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rc.forEach(t),cg=d(it),Xs=r(it,"P",{});var ac=i(Xs);pg=n(ac,"This model is also a "),Is=r(ac,"A",{href:!0,rel:!0});var Yv=i(Is);hg=n(Yv,"tf.keras.Model"),Yv.forEach(t),ug=n(ac,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ac.forEach(t),fg=d(it),T(Do.$$.fragment,it),mg=d(it),Ns=r(it,"P",{});var ic=i(Ns);gg=n(ic,"This class overrides "),la=r(ic,"A",{href:!0});var eT=i(la);_g=n(eT,"TFRobertaForMaskedLM"),eT.forEach(t),bg=n(ic,`. Please check the superclass for the appropriate documentation alongside usage examples.`),ic.forEach(t),kg=d(it),et=r(it,"DIV",{class:!0});var xt=i(et);T(Ds.$$.fragment,xt),vg=d(xt),ao=r(xt,"P",{});var Pa=i(ao);Tg=n(Pa,"The "),da=r(Pa,"A",{href:!0});var tT=i(da);wg=n(tT,"TFRobertaForMaskedLM"),tT.forEach(t),yg=n(Pa," forward method, overrides the "),Gi=r(Pa,"CODE",{});var oT=i(Gi);Mg=n(oT,"__call__"),oT.forEach(t),$g=n(Pa," special method."),Pa.forEach(t),Fg=d(xt),T(So.$$.fragment,xt),Rg=d(xt),Ki=r(xt,"P",{});var nT=i(Ki);Eg=n(nT,"Example:"),nT.forEach(t),zg=d(xt),T(Ss.$$.fragment,xt),xt.forEach(t),it.forEach(t),td=d(a),io=r(a,"H2",{class:!0});var lc=i(io);Oo=r(lc,"A",{id:!0,class:!0,href:!0});var sT=i(Oo);Ji=r(sT,"SPAN",{});var rT=i(Ji);T(Os.$$.fragment,rT),rT.forEach(t),sT.forEach(t),Lg=d(lc),Zi=r(lc,"SPAN",{});var aT=i(Zi);qg=n(aT,"TFXLMRobertaForSequenceClassification"),aT.forEach(t),lc.forEach(t),od=d(a),qe=r(a,"DIV",{class:!0});var lt=i(qe);T(Ws.$$.fragment,lt),Cg=d(lt),Yi=r(lt,"P",{});var iT=i(Yi);xg=n(iT,`XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),iT.forEach(t),Pg=d(lt),Bs=r(lt,"P",{});var dc=i(Bs);Ag=n(dc,"This model inherits from "),ca=r(dc,"A",{href:!0});var lT=i(ca);jg=n(lT,"TFPreTrainedModel"),lT.forEach(t),Xg=n(dc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dc.forEach(t),Ig=d(lt),Qs=r(lt,"P",{});var cc=i(Qs);Ng=n(cc,"This model is also a "),Hs=r(cc,"A",{href:!0,rel:!0});var dT=i(Hs);Dg=n(dT,"tf.keras.Model"),dT.forEach(t),Sg=n(cc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cc.forEach(t),Og=d(lt),T(Wo.$$.fragment,lt),Wg=d(lt),Us=r(lt,"P",{});var pc=i(Us);Bg=n(pc,"This class overrides "),pa=r(pc,"A",{href:!0});var cT=i(pa);Qg=n(cT,"TFRobertaForSequenceClassification"),cT.forEach(t),Hg=n(pc,`. Please check the superclass for the appropriate documentation alongside usage examples.`),pc.forEach(t),Ug=d(lt),tt=r(lt,"DIV",{class:!0});var Pt=i(tt);T(Vs.$$.fragment,Pt),Vg=d(Pt),lo=r(Pt,"P",{});var Aa=i(lo);Gg=n(Aa,"The "),ha=r(Aa,"A",{href:!0});var pT=i(ha);Kg=n(pT,"TFRobertaForSequenceClassification"),pT.forEach(t),Jg=n(Aa," forward method, overrides the "),el=r(Aa,"CODE",{});var hT=i(el);Zg=n(hT,"__call__"),hT.forEach(t),Yg=n(Aa," special method."),Aa.forEach(t),e_=d(Pt),T(Bo.$$.fragment,Pt),t_=d(Pt),tl=r(Pt,"P",{});var uT=i(tl);o_=n(uT,"Example:"),uT.forEach(t),n_=d(Pt),T(Gs.$$.fragment,Pt),Pt.forEach(t),lt.forEach(t),nd=d(a),co=r(a,"H2",{class:!0});var hc=i(co);Qo=r(hc,"A",{id:!0,class:!0,href:!0});var fT=i(Qo);ol=r(fT,"SPAN",{});var mT=i(ol);T(Ks.$$.fragment,mT),mT.forEach(t),fT.forEach(t),s_=d(hc),nl=r(hc,"SPAN",{});var gT=i(nl);r_=n(gT,"TFXLMRobertaForMultipleChoice"),gT.forEach(t),hc.forEach(t),sd=d(a),Ce=r(a,"DIV",{class:!0});var dt=i(Ce);T(Js.$$.fragment,dt),a_=d(dt),sl=r(dt,"P",{});var _T=i(sl);i_=n(_T,`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),_T.forEach(t),l_=d(dt),Zs=r(dt,"P",{});var uc=i(Zs);d_=n(uc,"This model inherits from "),ua=r(uc,"A",{href:!0});var bT=i(ua);c_=n(bT,"TFPreTrainedModel"),bT.forEach(t),p_=n(uc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uc.forEach(t),h_=d(dt),Ys=r(dt,"P",{});var fc=i(Ys);u_=n(fc,"This model is also a "),er=r(fc,"A",{href:!0,rel:!0});var kT=i(er);f_=n(kT,"tf.keras.Model"),kT.forEach(t),m_=n(fc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fc.forEach(t),g_=d(dt),T(Ho.$$.fragment,dt),__=d(dt),tr=r(dt,"P",{});var mc=i(tr);b_=n(mc,"This class overrides "),fa=r(mc,"A",{href:!0});var vT=i(fa);k_=n(vT,"TFRobertaForMultipleChoice"),vT.forEach(t),v_=n(mc,`. Please check the superclass for the appropriate documentation alongside usage examples.`),mc.forEach(t),T_=d(dt),ot=r(dt,"DIV",{class:!0});var At=i(ot);T(or.$$.fragment,At),w_=d(At),po=r(At,"P",{});var ja=i(po);y_=n(ja,"The "),ma=r(ja,"A",{href:!0});var TT=i(ma);M_=n(TT,"TFRobertaForMultipleChoice"),TT.forEach(t),$_=n(ja," forward method, overrides the "),rl=r(ja,"CODE",{});var wT=i(rl);F_=n(wT,"__call__"),wT.forEach(t),R_=n(ja," special method."),ja.forEach(t),E_=d(At),T(Uo.$$.fragment,At),z_=d(At),al=r(At,"P",{});var yT=i(al);L_=n(yT,"Example:"),yT.forEach(t),q_=d(At),T(nr.$$.fragment,At),At.forEach(t),dt.forEach(t),rd=d(a),ho=r(a,"H2",{class:!0});var gc=i(ho);Vo=r(gc,"A",{id:!0,class:!0,href:!0});var MT=i(Vo);il=r(MT,"SPAN",{});var $T=i(il);T(sr.$$.fragment,$T),$T.forEach(t),MT.forEach(t),C_=d(gc),ll=r(gc,"SPAN",{});var FT=i(ll);x_=n(FT,"TFXLMRobertaForTokenClassification"),FT.forEach(t),gc.forEach(t),ad=d(a),xe=r(a,"DIV",{class:!0});var ct=i(xe);T(rr.$$.fragment,ct),P_=d(ct),dl=r(ct,"P",{});var RT=i(dl);A_=n(RT,`XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),RT.forEach(t),j_=d(ct),ar=r(ct,"P",{});var _c=i(ar);X_=n(_c,"This model inherits from "),ga=r(_c,"A",{href:!0});var ET=i(ga);I_=n(ET,"TFPreTrainedModel"),ET.forEach(t),N_=n(_c,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_c.forEach(t),D_=d(ct),ir=r(ct,"P",{});var bc=i(ir);S_=n(bc,"This model is also a "),lr=r(bc,"A",{href:!0,rel:!0});var zT=i(lr);O_=n(zT,"tf.keras.Model"),zT.forEach(t),W_=n(bc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bc.forEach(t),B_=d(ct),T(Go.$$.fragment,ct),Q_=d(ct),dr=r(ct,"P",{});var kc=i(dr);H_=n(kc,"This class overrides "),_a=r(kc,"A",{href:!0});var LT=i(_a);U_=n(LT,"TFRobertaForTokenClassification"),LT.forEach(t),V_=n(kc,`. Please check the superclass for the appropriate documentation alongside usage examples.`),kc.forEach(t),G_=d(ct),nt=r(ct,"DIV",{class:!0});var jt=i(nt);T(cr.$$.fragment,jt),K_=d(jt),uo=r(jt,"P",{});var Xa=i(uo);J_=n(Xa,"The "),ba=r(Xa,"A",{href:!0});var qT=i(ba);Z_=n(qT,"TFRobertaForTokenClassification"),qT.forEach(t),Y_=n(Xa," forward method, overrides the "),cl=r(Xa,"CODE",{});var CT=i(cl);eb=n(CT,"__call__"),CT.forEach(t),tb=n(Xa," special method."),Xa.forEach(t),ob=d(jt),T(Ko.$$.fragment,jt),nb=d(jt),pl=r(jt,"P",{});var xT=i(pl);sb=n(xT,"Example:"),xT.forEach(t),rb=d(jt),T(pr.$$.fragment,jt),jt.forEach(t),ct.forEach(t),id=d(a),fo=r(a,"H2",{class:!0});var vc=i(fo);Jo=r(vc,"A",{id:!0,class:!0,href:!0});var PT=i(Jo);hl=r(PT,"SPAN",{});var AT=i(hl);T(hr.$$.fragment,AT),AT.forEach(t),PT.forEach(t),ab=d(vc),ul=r(vc,"SPAN",{});var jT=i(ul);ib=n(jT,"TFXLMRobertaForQuestionAnswering"),jT.forEach(t),vc.forEach(t),ld=d(a),Pe=r(a,"DIV",{class:!0});var pt=i(Pe);T(ur.$$.fragment,pt),lb=d(pt),mo=r(pt,"P",{});var Ia=i(mo);db=n(Ia,`XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),fl=r(Ia,"CODE",{});var XT=i(fl);cb=n(XT,"span start logits"),XT.forEach(t),pb=n(Ia," and "),ml=r(Ia,"CODE",{});var IT=i(ml);hb=n(IT,"span end logits"),IT.forEach(t),ub=n(Ia,")."),Ia.forEach(t),fb=d(pt),fr=r(pt,"P",{});var Tc=i(fr);mb=n(Tc,"This model inherits from "),ka=r(Tc,"A",{href:!0});var NT=i(ka);gb=n(NT,"TFPreTrainedModel"),NT.forEach(t),_b=n(Tc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tc.forEach(t),bb=d(pt),mr=r(pt,"P",{});var wc=i(mr);kb=n(wc,"This model is also a "),gr=r(wc,"A",{href:!0,rel:!0});var DT=i(gr);vb=n(DT,"tf.keras.Model"),DT.forEach(t),Tb=n(wc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),wc.forEach(t),wb=d(pt),T(Zo.$$.fragment,pt),yb=d(pt),_r=r(pt,"P",{});var yc=i(_r);Mb=n(yc,"This class overrides "),gl=r(yc,"CODE",{});var ST=i(gl);$b=n(ST,"TFRobertaForQuestionAnsweringSimple"),ST.forEach(t),Fb=n(yc,`. Please check the superclass for the appropriate documentation alongside usage examples.`),yc.forEach(t),Rb=d(pt),st=r(pt,"DIV",{class:!0});var Xt=i(st);T(br.$$.fragment,Xt),Eb=d(Xt),go=r(Xt,"P",{});var Na=i(go);zb=n(Na,"The "),va=r(Na,"A",{href:!0});var OT=i(va);Lb=n(OT,"TFRobertaForQuestionAnswering"),OT.forEach(t),qb=n(Na," forward method, overrides the "),_l=r(Na,"CODE",{});var WT=i(_l);Cb=n(WT,"__call__"),WT.forEach(t),xb=n(Na," special method."),Na.forEach(t),Pb=d(Xt),T(Yo.$$.fragment,Xt),Ab=d(Xt),bl=r(Xt,"P",{});var BT=i(bl);jb=n(BT,"Example:"),BT.forEach(t),Xb=d(Xt),T(kr.$$.fragment,Xt),Xt.forEach(t),pt.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(m1)),c(_,"id","xlmroberta"),c(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_,"href","#xlmroberta"),c(m,"class","relative group"),c(Z,"id","overview"),c(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Z,"href","#overview"),c(z,"class","relative group"),c(te,"href","https://arxiv.org/abs/1911.02116"),c(te,"rel","nofollow"),c(p,"href","/docs/transformers/v4.15.0/en/roberta"),c(ve,"href","https://huggingface.co/stefan-it"),c(ve,"rel","nofollow"),c(x,"href","https://github.com/pytorch/fairseq/tree/master/examples/xlmr"),c(x,"rel","nofollow"),c(ke,"id","transformers.XLMRobertaConfig"),c(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ke,"href","#transformers.XLMRobertaConfig"),c(X,"class","relative group"),c(Fr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig"),c(It,"class","docstring"),c(_o,"id","transformers.XLMRobertaTokenizer"),c(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_o,"href","#transformers.XLMRobertaTokenizer"),c(Nt,"class","relative group"),c(Rr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Er,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),c(rn,"href","https://github.com/google/sentencepiece"),c(rn,"rel","nofollow"),c(zr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(wt,"class","docstring"),c(bo,"class","docstring"),c(ko,"class","docstring"),c(Ka,"class","docstring"),c(Ee,"class","docstring"),c(vo,"id","transformers.XLMRobertaTokenizerFast"),c(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vo,"href","#transformers.XLMRobertaTokenizerFast"),c(St,"class","relative group"),c(Cr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(xr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),c(mn,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models"),c(mn,"rel","nofollow"),c(Pr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(yt,"class","docstring"),c(To,"class","docstring"),c(He,"class","docstring"),c(wo,"id","transformers.XLMRobertaModel"),c(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wo,"href","#transformers.XLMRobertaModel"),c(Ot,"class","relative group"),c(Xr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Mn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Mn,"rel","nofollow"),c(Ir,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel"),c(Nr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel"),c(Ue,"class","docstring"),c(Ie,"class","docstring"),c(Mo,"id","transformers.XLMRobertaForCausalLM"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#transformers.XLMRobertaForCausalLM"),c(Bt,"class","relative group"),c(Dr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(xn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(xn,"rel","nofollow"),c(Sr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForCausalLM"),c(Or,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForCausalLM"),c(Ve,"class","docstring"),c(Ne,"class","docstring"),c(Fo,"id","transformers.XLMRobertaForMaskedLM"),c(Fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fo,"href","#transformers.XLMRobertaForMaskedLM"),c(Ht,"class","relative group"),c(Wr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(On,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(On,"rel","nofollow"),c(Br,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM"),c(Qr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM"),c(Ge,"class","docstring"),c(De,"class","docstring"),c(Eo,"id","transformers.XLMRobertaForSequenceClassification"),c(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Eo,"href","#transformers.XLMRobertaForSequenceClassification"),c(Vt,"class","relative group"),c(Hr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Kn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Kn,"rel","nofollow"),c(Ur,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForSequenceClassification"),c(Vr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForSequenceClassification"),c(je,"class","docstring"),c(Se,"class","docstring"),c(Lo,"id","transformers.XLMRobertaForMultipleChoice"),c(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lo,"href","#transformers.XLMRobertaForMultipleChoice"),c(Kt,"class","relative group"),c(Gr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(rs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(rs,"rel","nofollow"),c(Kr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMultipleChoice"),c(Jr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMultipleChoice"),c(Ke,"class","docstring"),c(Oe,"class","docstring"),c(Co,"id","transformers.XLMRobertaForTokenClassification"),c(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Co,"href","#transformers.XLMRobertaForTokenClassification"),c(Zt,"class","relative group"),c(Zr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(us,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(us,"rel","nofollow"),c(Yr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForTokenClassification"),c(ea,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForTokenClassification"),c(Je,"class","docstring"),c(We,"class","docstring"),c(Po,"id","transformers.XLMRobertaForQuestionAnswering"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.XLMRobertaForQuestionAnswering"),c(eo,"class","relative group"),c(ta,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ts,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ts,"rel","nofollow"),c(oa,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForQuestionAnswering"),c(na,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForQuestionAnswering"),c(Ze,"class","docstring"),c(Be,"class","docstring"),c(jo,"id","transformers.TFXLMRobertaModel"),c(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jo,"href","#transformers.TFXLMRobertaModel"),c(no,"class","relative group"),c(sa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(zs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(zs,"rel","nofollow"),c(ra,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaModel"),c(aa,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaModel"),c(Ye,"class","docstring"),c(ze,"class","docstring"),c(No,"id","transformers.TFXLMRobertaForMaskedLM"),c(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(No,"href","#transformers.TFXLMRobertaForMaskedLM"),c(ro,"class","relative group"),c(ia,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Is,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Is,"rel","nofollow"),c(la,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM"),c(da,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM"),c(et,"class","docstring"),c(Le,"class","docstring"),c(Oo,"id","transformers.TFXLMRobertaForSequenceClassification"),c(Oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oo,"href","#transformers.TFXLMRobertaForSequenceClassification"),c(io,"class","relative group"),c(ca,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Hs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Hs,"rel","nofollow"),c(pa,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification"),c(ha,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification"),c(tt,"class","docstring"),c(qe,"class","docstring"),c(Qo,"id","transformers.TFXLMRobertaForMultipleChoice"),c(Qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qo,"href","#transformers.TFXLMRobertaForMultipleChoice"),c(co,"class","relative group"),c(ua,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(er,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(er,"rel","nofollow"),c(fa,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice"),c(ma,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice"),c(ot,"class","docstring"),c(Ce,"class","docstring"),c(Vo,"id","transformers.TFXLMRobertaForTokenClassification"),c(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vo,"href","#transformers.TFXLMRobertaForTokenClassification"),c(ho,"class","relative group"),c(ga,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(lr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(lr,"rel","nofollow"),c(_a,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForTokenClassification"),c(ba,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForTokenClassification"),c(nt,"class","docstring"),c(xe,"class","docstring"),c(Jo,"id","transformers.TFXLMRobertaForQuestionAnswering"),c(Jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jo,"href","#transformers.TFXLMRobertaForQuestionAnswering"),c(fo,"class","relative group"),c(ka,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(gr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(gr,"rel","nofollow"),c(va,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForQuestionAnswering"),c(st,"class","docstring"),c(Pe,"class","docstring")},m(a,f){e(document.head,h),u(a,F,f),u(a,m,f),e(m,_),e(_,k),w(b,k,null),e(m,g),e(m,E),e(E,pe),u(a,V,f),u(a,z,f),e(z,Z),e(Z,D),w(ee,D,null),e(z,he),e(z,S),e(S,ue),u(a,ie,f),u(a,U,f),e(U,P),e(U,te),e(te,K),e(U,L),u(a,C,f),u(a,se,f),e(se,B),u(a,le,f),u(a,re,f),e(re,O),e(O,fe),u(a,de,f),u(a,q,f),e(q,me),u(a,W,f),u(a,Y,f),e(Y,oe),e(oe,Q),e(oe,ae),e(ae,ge),e(oe,A),e(Y,_e),e(Y,I),e(I,be),e(I,p),e(p,R),e(I,J),u(a,we,f),u(a,ce,f),e(ce,j),e(ce,ve),e(ve,Me),e(ce,$e),e(ce,x),e(x,H),e(ce,Fe),u(a,ye,f),u(a,X,f),e(X,ke),e(ke,Te),w(ne,Te,null),e(X,Re),e(X,Da),e(Da,Mc),u(a,Cl,f),u(a,It,f),w(tn,It,null),e(It,$c),e(It,on),e(on,Fc),e(on,Fr),e(Fr,Rc),e(on,Ec),u(a,xl,f),u(a,Nt,f),e(Nt,_o),e(_o,Sa),w(nn,Sa,null),e(Nt,zc),e(Nt,Oa),e(Oa,Lc),u(a,Pl,f),u(a,Ee,f),w(sn,Ee,null),e(Ee,qc),e(Ee,Tt),e(Tt,Cc),e(Tt,Rr),e(Rr,xc),e(Tt,Pc),e(Tt,Er),e(Er,Ac),e(Tt,jc),e(Tt,rn),e(rn,Xc),e(Tt,Ic),e(Ee,Nc),e(Ee,an),e(an,Dc),e(an,zr),e(zr,Sc),e(an,Oc),e(Ee,Wc),e(Ee,Dt),e(Dt,Bc),e(Dt,Wa),e(Wa,Qc),e(Dt,Hc),e(Dt,Ba),e(Ba,Uc),e(Dt,Vc),e(Ee,Gc),e(Ee,wt),w(ln,wt,null),e(wt,Kc),e(wt,Qa),e(Qa,Jc),e(wt,Zc),e(wt,dn),e(dn,Lr),e(Lr,Yc),e(Lr,Ha),e(Ha,ep),e(dn,tp),e(dn,qr),e(qr,op),e(qr,Ua),e(Ua,np),e(Ee,sp),e(Ee,bo),w(cn,bo,null),e(bo,rp),e(bo,pn),e(pn,ap),e(pn,Va),e(Va,ip),e(pn,lp),e(Ee,dp),e(Ee,ko),w(hn,ko,null),e(ko,cp),e(ko,Ga),e(Ga,pp),e(Ee,hp),e(Ee,Ka),u(a,Al,f),u(a,St,f),e(St,vo),e(vo,Ja),w(un,Ja,null),e(St,up),e(St,Za),e(Za,fp),u(a,jl,f),u(a,He,f),w(fn,He,null),e(He,mp),e(He,ut),e(ut,gp),e(ut,Ya),e(Ya,_p),e(ut,bp),e(ut,Cr),e(Cr,kp),e(ut,vp),e(ut,xr),e(xr,Tp),e(ut,wp),e(ut,mn),e(mn,yp),e(ut,Mp),e(He,$p),e(He,gn),e(gn,Fp),e(gn,Pr),e(Pr,Rp),e(gn,Ep),e(He,zp),e(He,yt),w(_n,yt,null),e(yt,Lp),e(yt,ei),e(ei,qp),e(yt,Cp),e(yt,bn),e(bn,Ar),e(Ar,xp),e(Ar,ti),e(ti,Pp),e(bn,Ap),e(bn,jr),e(jr,jp),e(jr,oi),e(oi,Xp),e(He,Ip),e(He,To),w(kn,To,null),e(To,Np),e(To,ni),e(ni,Dp),u(a,Xl,f),u(a,Ot,f),e(Ot,wo),e(wo,si),w(vn,si,null),e(Ot,Sp),e(Ot,ri),e(ri,Op),u(a,Il,f),u(a,Ie,f),w(Tn,Ie,null),e(Ie,Wp),e(Ie,ai),e(ai,Bp),e(Ie,Qp),e(Ie,wn),e(wn,Hp),e(wn,Xr),e(Xr,Up),e(wn,Vp),e(Ie,Gp),e(Ie,yn),e(yn,Kp),e(yn,Mn),e(Mn,Jp),e(yn,Zp),e(Ie,Yp),e(Ie,$n),e($n,eh),e($n,Ir),e(Ir,th),e($n,oh),e(Ie,nh),e(Ie,Ue),w(Fn,Ue,null),e(Ue,sh),e(Ue,Wt),e(Wt,rh),e(Wt,Nr),e(Nr,ah),e(Wt,ih),e(Wt,ii),e(ii,lh),e(Wt,dh),e(Ue,ch),w(yo,Ue,null),e(Ue,ph),e(Ue,li),e(li,hh),e(Ue,uh),w(Rn,Ue,null),u(a,Nl,f),u(a,Bt,f),e(Bt,Mo),e(Mo,di),w(En,di,null),e(Bt,fh),e(Bt,ci),e(ci,mh),u(a,Dl,f),u(a,Ne,f),w(zn,Ne,null),e(Ne,gh),e(Ne,Ln),e(Ln,_h),e(Ln,pi),e(pi,bh),e(Ln,kh),e(Ne,vh),e(Ne,qn),e(qn,Th),e(qn,Dr),e(Dr,wh),e(qn,yh),e(Ne,Mh),e(Ne,Cn),e(Cn,$h),e(Cn,xn),e(xn,Fh),e(Cn,Rh),e(Ne,Eh),e(Ne,Pn),e(Pn,zh),e(Pn,Sr),e(Sr,Lh),e(Pn,qh),e(Ne,Ch),e(Ne,Ve),w(An,Ve,null),e(Ve,xh),e(Ve,Qt),e(Qt,Ph),e(Qt,Or),e(Or,Ah),e(Qt,jh),e(Qt,hi),e(hi,Xh),e(Qt,Ih),e(Ve,Nh),w($o,Ve,null),e(Ve,Dh),e(Ve,ui),e(ui,Sh),e(Ve,Oh),w(jn,Ve,null),u(a,Sl,f),u(a,Ht,f),e(Ht,Fo),e(Fo,fi),w(Xn,fi,null),e(Ht,Wh),e(Ht,mi),e(mi,Bh),u(a,Ol,f),u(a,De,f),w(In,De,null),e(De,Qh),e(De,Nn),e(Nn,Hh),e(Nn,gi),e(gi,Uh),e(Nn,Vh),e(De,Gh),e(De,Dn),e(Dn,Kh),e(Dn,Wr),e(Wr,Jh),e(Dn,Zh),e(De,Yh),e(De,Sn),e(Sn,eu),e(Sn,On),e(On,tu),e(Sn,ou),e(De,nu),e(De,Wn),e(Wn,su),e(Wn,Br),e(Br,ru),e(Wn,au),e(De,iu),e(De,Ge),w(Bn,Ge,null),e(Ge,lu),e(Ge,Ut),e(Ut,du),e(Ut,Qr),e(Qr,cu),e(Ut,pu),e(Ut,_i),e(_i,hu),e(Ut,uu),e(Ge,fu),w(Ro,Ge,null),e(Ge,mu),e(Ge,bi),e(bi,gu),e(Ge,_u),w(Qn,Ge,null),u(a,Wl,f),u(a,Vt,f),e(Vt,Eo),e(Eo,ki),w(Hn,ki,null),e(Vt,bu),e(Vt,vi),e(vi,ku),u(a,Bl,f),u(a,Se,f),w(Un,Se,null),e(Se,vu),e(Se,Ti),e(Ti,Tu),e(Se,wu),e(Se,Vn),e(Vn,yu),e(Vn,Hr),e(Hr,Mu),e(Vn,$u),e(Se,Fu),e(Se,Gn),e(Gn,Ru),e(Gn,Kn),e(Kn,Eu),e(Gn,zu),e(Se,Lu),e(Se,Jn),e(Jn,qu),e(Jn,Ur),e(Ur,Cu),e(Jn,xu),e(Se,Pu),e(Se,je),w(Zn,je,null),e(je,Au),e(je,Gt),e(Gt,ju),e(Gt,Vr),e(Vr,Xu),e(Gt,Iu),e(Gt,wi),e(wi,Nu),e(Gt,Du),e(je,Su),w(zo,je,null),e(je,Ou),e(je,yi),e(yi,Wu),e(je,Bu),w(Yn,je,null),e(je,Qu),e(je,Mi),e(Mi,Hu),e(je,Uu),w(es,je,null),u(a,Ql,f),u(a,Kt,f),e(Kt,Lo),e(Lo,$i),w(ts,$i,null),e(Kt,Vu),e(Kt,Fi),e(Fi,Gu),u(a,Hl,f),u(a,Oe,f),w(os,Oe,null),e(Oe,Ku),e(Oe,Ri),e(Ri,Ju),e(Oe,Zu),e(Oe,ns),e(ns,Yu),e(ns,Gr),e(Gr,ef),e(ns,tf),e(Oe,of),e(Oe,ss),e(ss,nf),e(ss,rs),e(rs,sf),e(ss,rf),e(Oe,af),e(Oe,as),e(as,lf),e(as,Kr),e(Kr,df),e(as,cf),e(Oe,pf),e(Oe,Ke),w(is,Ke,null),e(Ke,hf),e(Ke,Jt),e(Jt,uf),e(Jt,Jr),e(Jr,ff),e(Jt,mf),e(Jt,Ei),e(Ei,gf),e(Jt,_f),e(Ke,bf),w(qo,Ke,null),e(Ke,kf),e(Ke,zi),e(zi,vf),e(Ke,Tf),w(ls,Ke,null),u(a,Ul,f),u(a,Zt,f),e(Zt,Co),e(Co,Li),w(ds,Li,null),e(Zt,wf),e(Zt,qi),e(qi,yf),u(a,Vl,f),u(a,We,f),w(cs,We,null),e(We,Mf),e(We,Ci),e(Ci,$f),e(We,Ff),e(We,ps),e(ps,Rf),e(ps,Zr),e(Zr,Ef),e(ps,zf),e(We,Lf),e(We,hs),e(hs,qf),e(hs,us),e(us,Cf),e(hs,xf),e(We,Pf),e(We,fs),e(fs,Af),e(fs,Yr),e(Yr,jf),e(fs,Xf),e(We,If),e(We,Je),w(ms,Je,null),e(Je,Nf),e(Je,Yt),e(Yt,Df),e(Yt,ea),e(ea,Sf),e(Yt,Of),e(Yt,xi),e(xi,Wf),e(Yt,Bf),e(Je,Qf),w(xo,Je,null),e(Je,Hf),e(Je,Pi),e(Pi,Uf),e(Je,Vf),w(gs,Je,null),u(a,Gl,f),u(a,eo,f),e(eo,Po),e(Po,Ai),w(_s,Ai,null),e(eo,Gf),e(eo,ji),e(ji,Kf),u(a,Kl,f),u(a,Be,f),w(bs,Be,null),e(Be,Jf),e(Be,to),e(to,Zf),e(to,Xi),e(Xi,Yf),e(to,em),e(to,Ii),e(Ii,tm),e(to,om),e(Be,nm),e(Be,ks),e(ks,sm),e(ks,ta),e(ta,rm),e(ks,am),e(Be,im),e(Be,vs),e(vs,lm),e(vs,Ts),e(Ts,dm),e(vs,cm),e(Be,pm),e(Be,ws),e(ws,hm),e(ws,oa),e(oa,um),e(ws,fm),e(Be,mm),e(Be,Ze),w(ys,Ze,null),e(Ze,gm),e(Ze,oo),e(oo,_m),e(oo,na),e(na,bm),e(oo,km),e(oo,Ni),e(Ni,vm),e(oo,Tm),e(Ze,wm),w(Ao,Ze,null),e(Ze,ym),e(Ze,Di),e(Di,Mm),e(Ze,$m),w(Ms,Ze,null),u(a,Jl,f),u(a,no,f),e(no,jo),e(jo,Si),w($s,Si,null),e(no,Fm),e(no,Oi),e(Oi,Rm),u(a,Zl,f),u(a,ze,f),w(Fs,ze,null),e(ze,Em),e(ze,Wi),e(Wi,zm),e(ze,Lm),e(ze,Rs),e(Rs,qm),e(Rs,sa),e(sa,Cm),e(Rs,xm),e(ze,Pm),e(ze,Es),e(Es,Am),e(Es,zs),e(zs,jm),e(Es,Xm),e(ze,Im),w(Xo,ze,null),e(ze,Nm),e(ze,Ls),e(Ls,Dm),e(Ls,ra),e(ra,Sm),e(Ls,Om),e(ze,Wm),e(ze,Ye),w(qs,Ye,null),e(Ye,Bm),e(Ye,so),e(so,Qm),e(so,aa),e(aa,Hm),e(so,Um),e(so,Bi),e(Bi,Vm),e(so,Gm),e(Ye,Km),w(Io,Ye,null),e(Ye,Jm),e(Ye,Qi),e(Qi,Zm),e(Ye,Ym),w(Cs,Ye,null),u(a,Yl,f),u(a,ro,f),e(ro,No),e(No,Hi),w(xs,Hi,null),e(ro,eg),e(ro,Ui),e(Ui,tg),u(a,ed,f),u(a,Le,f),w(Ps,Le,null),e(Le,og),e(Le,As),e(As,ng),e(As,Vi),e(Vi,sg),e(As,rg),e(Le,ag),e(Le,js),e(js,ig),e(js,ia),e(ia,lg),e(js,dg),e(Le,cg),e(Le,Xs),e(Xs,pg),e(Xs,Is),e(Is,hg),e(Xs,ug),e(Le,fg),w(Do,Le,null),e(Le,mg),e(Le,Ns),e(Ns,gg),e(Ns,la),e(la,_g),e(Ns,bg),e(Le,kg),e(Le,et),w(Ds,et,null),e(et,vg),e(et,ao),e(ao,Tg),e(ao,da),e(da,wg),e(ao,yg),e(ao,Gi),e(Gi,Mg),e(ao,$g),e(et,Fg),w(So,et,null),e(et,Rg),e(et,Ki),e(Ki,Eg),e(et,zg),w(Ss,et,null),u(a,td,f),u(a,io,f),e(io,Oo),e(Oo,Ji),w(Os,Ji,null),e(io,Lg),e(io,Zi),e(Zi,qg),u(a,od,f),u(a,qe,f),w(Ws,qe,null),e(qe,Cg),e(qe,Yi),e(Yi,xg),e(qe,Pg),e(qe,Bs),e(Bs,Ag),e(Bs,ca),e(ca,jg),e(Bs,Xg),e(qe,Ig),e(qe,Qs),e(Qs,Ng),e(Qs,Hs),e(Hs,Dg),e(Qs,Sg),e(qe,Og),w(Wo,qe,null),e(qe,Wg),e(qe,Us),e(Us,Bg),e(Us,pa),e(pa,Qg),e(Us,Hg),e(qe,Ug),e(qe,tt),w(Vs,tt,null),e(tt,Vg),e(tt,lo),e(lo,Gg),e(lo,ha),e(ha,Kg),e(lo,Jg),e(lo,el),e(el,Zg),e(lo,Yg),e(tt,e_),w(Bo,tt,null),e(tt,t_),e(tt,tl),e(tl,o_),e(tt,n_),w(Gs,tt,null),u(a,nd,f),u(a,co,f),e(co,Qo),e(Qo,ol),w(Ks,ol,null),e(co,s_),e(co,nl),e(nl,r_),u(a,sd,f),u(a,Ce,f),w(Js,Ce,null),e(Ce,a_),e(Ce,sl),e(sl,i_),e(Ce,l_),e(Ce,Zs),e(Zs,d_),e(Zs,ua),e(ua,c_),e(Zs,p_),e(Ce,h_),e(Ce,Ys),e(Ys,u_),e(Ys,er),e(er,f_),e(Ys,m_),e(Ce,g_),w(Ho,Ce,null),e(Ce,__),e(Ce,tr),e(tr,b_),e(tr,fa),e(fa,k_),e(tr,v_),e(Ce,T_),e(Ce,ot),w(or,ot,null),e(ot,w_),e(ot,po),e(po,y_),e(po,ma),e(ma,M_),e(po,$_),e(po,rl),e(rl,F_),e(po,R_),e(ot,E_),w(Uo,ot,null),e(ot,z_),e(ot,al),e(al,L_),e(ot,q_),w(nr,ot,null),u(a,rd,f),u(a,ho,f),e(ho,Vo),e(Vo,il),w(sr,il,null),e(ho,C_),e(ho,ll),e(ll,x_),u(a,ad,f),u(a,xe,f),w(rr,xe,null),e(xe,P_),e(xe,dl),e(dl,A_),e(xe,j_),e(xe,ar),e(ar,X_),e(ar,ga),e(ga,I_),e(ar,N_),e(xe,D_),e(xe,ir),e(ir,S_),e(ir,lr),e(lr,O_),e(ir,W_),e(xe,B_),w(Go,xe,null),e(xe,Q_),e(xe,dr),e(dr,H_),e(dr,_a),e(_a,U_),e(dr,V_),e(xe,G_),e(xe,nt),w(cr,nt,null),e(nt,K_),e(nt,uo),e(uo,J_),e(uo,ba),e(ba,Z_),e(uo,Y_),e(uo,cl),e(cl,eb),e(uo,tb),e(nt,ob),w(Ko,nt,null),e(nt,nb),e(nt,pl),e(pl,sb),e(nt,rb),w(pr,nt,null),u(a,id,f),u(a,fo,f),e(fo,Jo),e(Jo,hl),w(hr,hl,null),e(fo,ab),e(fo,ul),e(ul,ib),u(a,ld,f),u(a,Pe,f),w(ur,Pe,null),e(Pe,lb),e(Pe,mo),e(mo,db),e(mo,fl),e(fl,cb),e(mo,pb),e(mo,ml),e(ml,hb),e(mo,ub),e(Pe,fb),e(Pe,fr),e(fr,mb),e(fr,ka),e(ka,gb),e(fr,_b),e(Pe,bb),e(Pe,mr),e(mr,kb),e(mr,gr),e(gr,vb),e(mr,Tb),e(Pe,wb),w(Zo,Pe,null),e(Pe,yb),e(Pe,_r),e(_r,Mb),e(_r,gl),e(gl,$b),e(_r,Fb),e(Pe,Rb),e(Pe,st),w(br,st,null),e(st,Eb),e(st,go),e(go,zb),e(go,va),e(va,Lb),e(go,qb),e(go,_l),e(_l,Cb),e(go,xb),e(st,Pb),w(Yo,st,null),e(st,Ab),e(st,bl),e(bl,jb),e(st,Xb),w(kr,st,null),dd=!0},p(a,[f]){const vr={};f&2&&(vr.$$scope={dirty:f,ctx:a}),yo.$set(vr);const kl={};f&2&&(kl.$$scope={dirty:f,ctx:a}),$o.$set(kl);const vl={};f&2&&(vl.$$scope={dirty:f,ctx:a}),Ro.$set(vl);const Tl={};f&2&&(Tl.$$scope={dirty:f,ctx:a}),zo.$set(Tl);const Tr={};f&2&&(Tr.$$scope={dirty:f,ctx:a}),qo.$set(Tr);const wl={};f&2&&(wl.$$scope={dirty:f,ctx:a}),xo.$set(wl);const yl={};f&2&&(yl.$$scope={dirty:f,ctx:a}),Ao.$set(yl);const Ml={};f&2&&(Ml.$$scope={dirty:f,ctx:a}),Xo.$set(Ml);const wr={};f&2&&(wr.$$scope={dirty:f,ctx:a}),Io.$set(wr);const $l={};f&2&&($l.$$scope={dirty:f,ctx:a}),Do.$set($l);const Fl={};f&2&&(Fl.$$scope={dirty:f,ctx:a}),So.$set(Fl);const Rl={};f&2&&(Rl.$$scope={dirty:f,ctx:a}),Wo.$set(Rl);const El={};f&2&&(El.$$scope={dirty:f,ctx:a}),Bo.$set(El);const zl={};f&2&&(zl.$$scope={dirty:f,ctx:a}),Ho.$set(zl);const yr={};f&2&&(yr.$$scope={dirty:f,ctx:a}),Uo.$set(yr);const Mr={};f&2&&(Mr.$$scope={dirty:f,ctx:a}),Go.$set(Mr);const Ll={};f&2&&(Ll.$$scope={dirty:f,ctx:a}),Ko.$set(Ll);const $r={};f&2&&($r.$$scope={dirty:f,ctx:a}),Zo.$set($r);const ql={};f&2&&(ql.$$scope={dirty:f,ctx:a}),Yo.$set(ql)},i(a){dd||(y(b.$$.fragment,a),y(ee.$$.fragment,a),y(ne.$$.fragment,a),y(tn.$$.fragment,a),y(nn.$$.fragment,a),y(sn.$$.fragment,a),y(ln.$$.fragment,a),y(cn.$$.fragment,a),y(hn.$$.fragment,a),y(un.$$.fragment,a),y(fn.$$.fragment,a),y(_n.$$.fragment,a),y(kn.$$.fragment,a),y(vn.$$.fragment,a),y(Tn.$$.fragment,a),y(Fn.$$.fragment,a),y(yo.$$.fragment,a),y(Rn.$$.fragment,a),y(En.$$.fragment,a),y(zn.$$.fragment,a),y(An.$$.fragment,a),y($o.$$.fragment,a),y(jn.$$.fragment,a),y(Xn.$$.fragment,a),y(In.$$.fragment,a),y(Bn.$$.fragment,a),y(Ro.$$.fragment,a),y(Qn.$$.fragment,a),y(Hn.$$.fragment,a),y(Un.$$.fragment,a),y(Zn.$$.fragment,a),y(zo.$$.fragment,a),y(Yn.$$.fragment,a),y(es.$$.fragment,a),y(ts.$$.fragment,a),y(os.$$.fragment,a),y(is.$$.fragment,a),y(qo.$$.fragment,a),y(ls.$$.fragment,a),y(ds.$$.fragment,a),y(cs.$$.fragment,a),y(ms.$$.fragment,a),y(xo.$$.fragment,a),y(gs.$$.fragment,a),y(_s.$$.fragment,a),y(bs.$$.fragment,a),y(ys.$$.fragment,a),y(Ao.$$.fragment,a),y(Ms.$$.fragment,a),y($s.$$.fragment,a),y(Fs.$$.fragment,a),y(Xo.$$.fragment,a),y(qs.$$.fragment,a),y(Io.$$.fragment,a),y(Cs.$$.fragment,a),y(xs.$$.fragment,a),y(Ps.$$.fragment,a),y(Do.$$.fragment,a),y(Ds.$$.fragment,a),y(So.$$.fragment,a),y(Ss.$$.fragment,a),y(Os.$$.fragment,a),y(Ws.$$.fragment,a),y(Wo.$$.fragment,a),y(Vs.$$.fragment,a),y(Bo.$$.fragment,a),y(Gs.$$.fragment,a),y(Ks.$$.fragment,a),y(Js.$$.fragment,a),y(Ho.$$.fragment,a),y(or.$$.fragment,a),y(Uo.$$.fragment,a),y(nr.$$.fragment,a),y(sr.$$.fragment,a),y(rr.$$.fragment,a),y(Go.$$.fragment,a),y(cr.$$.fragment,a),y(Ko.$$.fragment,a),y(pr.$$.fragment,a),y(hr.$$.fragment,a),y(ur.$$.fragment,a),y(Zo.$$.fragment,a),y(br.$$.fragment,a),y(Yo.$$.fragment,a),y(kr.$$.fragment,a),dd=!0)},o(a){M(b.$$.fragment,a),M(ee.$$.fragment,a),M(ne.$$.fragment,a),M(tn.$$.fragment,a),M(nn.$$.fragment,a),M(sn.$$.fragment,a),M(ln.$$.fragment,a),M(cn.$$.fragment,a),M(hn.$$.fragment,a),M(un.$$.fragment,a),M(fn.$$.fragment,a),M(_n.$$.fragment,a),M(kn.$$.fragment,a),M(vn.$$.fragment,a),M(Tn.$$.fragment,a),M(Fn.$$.fragment,a),M(yo.$$.fragment,a),M(Rn.$$.fragment,a),M(En.$$.fragment,a),M(zn.$$.fragment,a),M(An.$$.fragment,a),M($o.$$.fragment,a),M(jn.$$.fragment,a),M(Xn.$$.fragment,a),M(In.$$.fragment,a),M(Bn.$$.fragment,a),M(Ro.$$.fragment,a),M(Qn.$$.fragment,a),M(Hn.$$.fragment,a),M(Un.$$.fragment,a),M(Zn.$$.fragment,a),M(zo.$$.fragment,a),M(Yn.$$.fragment,a),M(es.$$.fragment,a),M(ts.$$.fragment,a),M(os.$$.fragment,a),M(is.$$.fragment,a),M(qo.$$.fragment,a),M(ls.$$.fragment,a),M(ds.$$.fragment,a),M(cs.$$.fragment,a),M(ms.$$.fragment,a),M(xo.$$.fragment,a),M(gs.$$.fragment,a),M(_s.$$.fragment,a),M(bs.$$.fragment,a),M(ys.$$.fragment,a),M(Ao.$$.fragment,a),M(Ms.$$.fragment,a),M($s.$$.fragment,a),M(Fs.$$.fragment,a),M(Xo.$$.fragment,a),M(qs.$$.fragment,a),M(Io.$$.fragment,a),M(Cs.$$.fragment,a),M(xs.$$.fragment,a),M(Ps.$$.fragment,a),M(Do.$$.fragment,a),M(Ds.$$.fragment,a),M(So.$$.fragment,a),M(Ss.$$.fragment,a),M(Os.$$.fragment,a),M(Ws.$$.fragment,a),M(Wo.$$.fragment,a),M(Vs.$$.fragment,a),M(Bo.$$.fragment,a),M(Gs.$$.fragment,a),M(Ks.$$.fragment,a),M(Js.$$.fragment,a),M(Ho.$$.fragment,a),M(or.$$.fragment,a),M(Uo.$$.fragment,a),M(nr.$$.fragment,a),M(sr.$$.fragment,a),M(rr.$$.fragment,a),M(Go.$$.fragment,a),M(cr.$$.fragment,a),M(Ko.$$.fragment,a),M(pr.$$.fragment,a),M(hr.$$.fragment,a),M(ur.$$.fragment,a),M(Zo.$$.fragment,a),M(br.$$.fragment,a),M(Yo.$$.fragment,a),M(kr.$$.fragment,a),dd=!1},d(a){t(h),a&&t(F),a&&t(m),$(b),a&&t(V),a&&t(z),$(ee),a&&t(ie),a&&t(U),a&&t(C),a&&t(se),a&&t(le),a&&t(re),a&&t(de),a&&t(q),a&&t(W),a&&t(Y),a&&t(we),a&&t(ce),a&&t(ye),a&&t(X),$(ne),a&&t(Cl),a&&t(It),$(tn),a&&t(xl),a&&t(Nt),$(nn),a&&t(Pl),a&&t(Ee),$(sn),$(ln),$(cn),$(hn),a&&t(Al),a&&t(St),$(un),a&&t(jl),a&&t(He),$(fn),$(_n),$(kn),a&&t(Xl),a&&t(Ot),$(vn),a&&t(Il),a&&t(Ie),$(Tn),$(Fn),$(yo),$(Rn),a&&t(Nl),a&&t(Bt),$(En),a&&t(Dl),a&&t(Ne),$(zn),$(An),$($o),$(jn),a&&t(Sl),a&&t(Ht),$(Xn),a&&t(Ol),a&&t(De),$(In),$(Bn),$(Ro),$(Qn),a&&t(Wl),a&&t(Vt),$(Hn),a&&t(Bl),a&&t(Se),$(Un),$(Zn),$(zo),$(Yn),$(es),a&&t(Ql),a&&t(Kt),$(ts),a&&t(Hl),a&&t(Oe),$(os),$(is),$(qo),$(ls),a&&t(Ul),a&&t(Zt),$(ds),a&&t(Vl),a&&t(We),$(cs),$(ms),$(xo),$(gs),a&&t(Gl),a&&t(eo),$(_s),a&&t(Kl),a&&t(Be),$(bs),$(ys),$(Ao),$(Ms),a&&t(Jl),a&&t(no),$($s),a&&t(Zl),a&&t(ze),$(Fs),$(Xo),$(qs),$(Io),$(Cs),a&&t(Yl),a&&t(ro),$(xs),a&&t(ed),a&&t(Le),$(Ps),$(Do),$(Ds),$(So),$(Ss),a&&t(td),a&&t(io),$(Os),a&&t(od),a&&t(qe),$(Ws),$(Wo),$(Vs),$(Bo),$(Gs),a&&t(nd),a&&t(co),$(Ks),a&&t(sd),a&&t(Ce),$(Js),$(Ho),$(or),$(Uo),$(nr),a&&t(rd),a&&t(ho),$(sr),a&&t(ad),a&&t(xe),$(rr),$(Go),$(cr),$(Ko),$(pr),a&&t(id),a&&t(fo),$(hr),a&&t(ld),a&&t(Pe),$(ur),$(Zo),$(br),$(Yo),$(kr)}}}const m1={local:"xlmroberta",sections:[{local:"overview",title:"Overview"},{local:"transformers.XLMRobertaConfig",title:"XLMRobertaConfig"},{local:"transformers.XLMRobertaTokenizer",title:"XLMRobertaTokenizer"},{local:"transformers.XLMRobertaTokenizerFast",title:"XLMRobertaTokenizerFast"},{local:"transformers.XLMRobertaModel",title:"XLMRobertaModel"},{local:"transformers.XLMRobertaForCausalLM",title:"XLMRobertaForCausalLM"},{local:"transformers.XLMRobertaForMaskedLM",title:"XLMRobertaForMaskedLM"},{local:"transformers.XLMRobertaForSequenceClassification",title:"XLMRobertaForSequenceClassification"},{local:"transformers.XLMRobertaForMultipleChoice",title:"XLMRobertaForMultipleChoice"},{local:"transformers.XLMRobertaForTokenClassification",title:"XLMRobertaForTokenClassification"},{local:"transformers.XLMRobertaForQuestionAnswering",title:"XLMRobertaForQuestionAnswering"},{local:"transformers.TFXLMRobertaModel",title:"TFXLMRobertaModel"},{local:"transformers.TFXLMRobertaForMaskedLM",title:"TFXLMRobertaForMaskedLM"},{local:"transformers.TFXLMRobertaForSequenceClassification",title:"TFXLMRobertaForSequenceClassification"},{local:"transformers.TFXLMRobertaForMultipleChoice",title:"TFXLMRobertaForMultipleChoice"},{local:"transformers.TFXLMRobertaForTokenClassification",title:"TFXLMRobertaForTokenClassification"},{local:"transformers.TFXLMRobertaForQuestionAnswering",title:"TFXLMRobertaForQuestionAnswering"}],title:"XLM-RoBERTa"};function g1(N,h,F){let{fw:m}=h;return N.$$set=_=>{"fw"in _&&F(0,m=_.fw)},[m]}class y1 extends QT{constructor(h){super();HT(this,h,g1,f1,UT,{fw:0})}}export{y1 as default,m1 as metadata};
9,938
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/pegasus.mdx-40bdb7e7.js
import{S as Pb,i as wb,s as xb,e as o,k as d,w as f,t as r,L as zb,c as n,d as t,m as l,a as s,x as g,h as i,b as c,J as e,g as u,y as _,q as k,o as v,B as b}from"../../chunks/vendor-b1433968.js";import{T as Mo}from"../../chunks/Tip-c3840994.js";import{D as C}from"../../chunks/Docstring-ff504c58.js";import{C as ht}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Q}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Eb(U){let h,z,y,T,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=o("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){h=n(w,"P",{});var P=s(h);z=i(P,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(P,"CODE",{});var E=s(y);T=i(E,"Module"),E.forEach(t),x=i(P,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),P.forEach(t)},m(w,P){u(w,h,P),e(h,z),e(h,y),e(y,T),e(h,x)},d(w){w&&t(h)}}}function $b(U){let h,z,y,T,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=o("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){h=n(w,"P",{});var P=s(h);z=i(P,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(P,"CODE",{});var E=s(y);T=i(E,"Module"),E.forEach(t),x=i(P,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),P.forEach(t)},m(w,P){u(w,h,P),e(h,z),e(h,y),e(y,T),e(h,x)},d(w){w&&t(h)}}}function qb(U){let h,z,y,T,x,w,P,E,Ae,ke,q,ze,le,Ie,ce,ue,Le,je,B,O,Ee,J,j,S,Ne,te,oe,Ge,W,De,Ue,A,ve,pe,Se,ne,X,Be,We,K,Ke,se,R;return{c(){h=o("p"),z=r("TF 2.0 models accepts two formats as inputs:"),y=d(),T=o("ul"),x=o("li"),w=r("having all inputs as keyword arguments (like PyTorch models), or"),P=d(),E=o("li"),Ae=r("having all inputs as a list, tuple or dict in the first positional arguments."),ke=d(),q=o("p"),ze=r("This second option is useful when using "),le=o("code"),Ie=r("tf.keras.Model.fit"),ce=r(` method which currently requires having all the tensors in the first argument of the model call function: `),ue=o("code"),Le=r("model(inputs)"),je=r("."),B=d(),O=o("p"),Ee=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=d(),j=o("ul"),S=o("li"),Ne=r("a single Tensor with "),te=o("code"),oe=r("input_ids"),Ge=r(" only and nothing else: "),W=o("code"),De=r("model(input_ids)"),Ue=d(),A=o("li"),ve=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),pe=o("code"),Se=r("model([input_ids, attention_mask])"),ne=r(" or "),X=o("code"),Be=r("model([input_ids, attention_mask, token_type_ids])"),We=d(),K=o("li"),Ke=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),se=o("code"),R=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){h=n(m,"P",{});var $=s(h);z=i($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),y=l(m),T=n(m,"UL",{});var $e=s(T);x=n($e,"LI",{});var mt=s(x);w=i(mt,"having all inputs as keyword arguments (like PyTorch models), or"),mt.forEach(t),P=l($e),E=n($e,"LI",{});var qe=s(E);Ae=i(qe,"having all inputs as a list, tuple or dict in the first positional arguments."),qe.forEach(t),$e.forEach(t),ke=l(m),q=n(m,"P",{});var Z=s(q);ze=i(Z,"This second option is useful when using "),le=n(Z,"CODE",{});var ft=s(le);Ie=i(ft,"tf.keras.Model.fit"),ft.forEach(t),ce=i(Z,` method which currently requires having all the tensors in the first argument of the model call function: `),ue=n(Z,"CODE",{});var Ve=s(ue);Le=i(Ve,"model(inputs)"),Ve.forEach(t),je=i(Z,"."),Z.forEach(t),B=l(m),O=n(m,"P",{});var he=s(O);Ee=i(he,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),he.forEach(t),J=l(m),j=n(m,"UL",{});var I=s(j);S=n(I,"LI",{});var Y=s(S);Ne=i(Y,"a single Tensor with "),te=n(Y,"CODE",{});var Fe=s(te);oe=i(Fe,"input_ids"),Fe.forEach(t),Ge=i(Y," only and nothing else: "),W=n(Y,"CODE",{});var gt=s(W);De=i(gt,"model(input_ids)"),gt.forEach(t),Y.forEach(t),Ue=l(I),A=n(I,"LI",{});var ee=s(A);ve=i(ee,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),pe=n(ee,"CODE",{});var _t=s(pe);Se=i(_t,"model([input_ids, attention_mask])"),_t.forEach(t),ne=i(ee," or "),X=n(ee,"CODE",{});var Je=s(X);Be=i(Je,"model([input_ids, attention_mask, token_type_ids])"),Je.forEach(t),ee.forEach(t),We=l(I),K=n(I,"LI",{});var V=s(K);Ke=i(V,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),se=n(V,"CODE",{});var kt=s(se);R=i(kt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),kt.forEach(t),V.forEach(t),I.forEach(t)},m(m,$){u(m,h,$),e(h,z),u(m,y,$),u(m,T,$),e(T,x),e(x,w),e(T,P),e(T,E),e(E,Ae),u(m,ke,$),u(m,q,$),e(q,ze),e(q,le),e(le,Ie),e(q,ce),e(q,ue),e(ue,Le),e(q,je),u(m,B,$),u(m,O,$),e(O,Ee),u(m,J,$),u(m,j,$),e(j,S),e(S,Ne),e(S,te),e(te,oe),e(S,Ge),e(S,W),e(W,De),e(j,Ue),e(j,A),e(A,ve),e(A,pe),e(pe,Se),e(A,ne),e(A,X),e(X,Be),e(j,We),e(j,K),e(K,Ke),e(K,se),e(se,R)},d(m){m&&t(h),m&&t(y),m&&t(T),m&&t(ke),m&&t(q),m&&t(B),m&&t(O),m&&t(J),m&&t(j)}}}function Fb(U){let h,z,y,T,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=o("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){h=n(w,"P",{});var P=s(h);z=i(P,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(P,"CODE",{});var E=s(y);T=i(E,"Module"),E.forEach(t),x=i(P,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),P.forEach(t)},m(w,P){u(w,h,P),e(h,z),e(h,y),e(y,T),e(h,x)},d(w){w&&t(h)}}}function Mb(U){let h,z,y,T,x,w,P,E,Ae,ke,q,ze,le,Ie,ce,ue,Le,je,B,O,Ee,J,j,S,Ne,te,oe,Ge,W,De,Ue,A,ve,pe,Se,ne,X,Be,We,K,Ke,se,R;return{c(){h=o("p"),z=r("TF 2.0 models accepts two formats as inputs:"),y=d(),T=o("ul"),x=o("li"),w=r("having all inputs as keyword arguments (like PyTorch models), or"),P=d(),E=o("li"),Ae=r("having all inputs as a list, tuple or dict in the first positional arguments."),ke=d(),q=o("p"),ze=r("This second option is useful when using "),le=o("code"),Ie=r("tf.keras.Model.fit"),ce=r(` method which currently requires having all the tensors in the first argument of the model call function: `),ue=o("code"),Le=r("model(inputs)"),je=r("."),B=d(),O=o("p"),Ee=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=d(),j=o("ul"),S=o("li"),Ne=r("a single Tensor with "),te=o("code"),oe=r("input_ids"),Ge=r(" only and nothing else: "),W=o("code"),De=r("model(input_ids)"),Ue=d(),A=o("li"),ve=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),pe=o("code"),Se=r("model([input_ids, attention_mask])"),ne=r(" or "),X=o("code"),Be=r("model([input_ids, attention_mask, token_type_ids])"),We=d(),K=o("li"),Ke=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),se=o("code"),R=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){h=n(m,"P",{});var $=s(h);z=i($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),y=l(m),T=n(m,"UL",{});var $e=s(T);x=n($e,"LI",{});var mt=s(x);w=i(mt,"having all inputs as keyword arguments (like PyTorch models), or"),mt.forEach(t),P=l($e),E=n($e,"LI",{});var qe=s(E);Ae=i(qe,"having all inputs as a list, tuple or dict in the first positional arguments."),qe.forEach(t),$e.forEach(t),ke=l(m),q=n(m,"P",{});var Z=s(q);ze=i(Z,"This second option is useful when using "),le=n(Z,"CODE",{});var ft=s(le);Ie=i(ft,"tf.keras.Model.fit"),ft.forEach(t),ce=i(Z,` method which currently requires having all the tensors in the first argument of the model call function: `),ue=n(Z,"CODE",{});var Ve=s(ue);Le=i(Ve,"model(inputs)"),Ve.forEach(t),je=i(Z,"."),Z.forEach(t),B=l(m),O=n(m,"P",{});var he=s(O);Ee=i(he,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),he.forEach(t),J=l(m),j=n(m,"UL",{});var I=s(j);S=n(I,"LI",{});var Y=s(S);Ne=i(Y,"a single Tensor with "),te=n(Y,"CODE",{});var Fe=s(te);oe=i(Fe,"input_ids"),Fe.forEach(t),Ge=i(Y," only and nothing else: "),W=n(Y,"CODE",{});var gt=s(W);De=i(gt,"model(input_ids)"),gt.forEach(t),Y.forEach(t),Ue=l(I),A=n(I,"LI",{});var ee=s(A);ve=i(ee,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),pe=n(ee,"CODE",{});var _t=s(pe);Se=i(_t,"model([input_ids, attention_mask])"),_t.forEach(t),ne=i(ee," or "),X=n(ee,"CODE",{});var Je=s(X);Be=i(Je,"model([input_ids, attention_mask, token_type_ids])"),Je.forEach(t),ee.forEach(t),We=l(I),K=n(I,"LI",{});var V=s(K);Ke=i(V,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),se=n(V,"CODE",{});var kt=s(se);R=i(kt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),kt.forEach(t),V.forEach(t),I.forEach(t)},m(m,$){u(m,h,$),e(h,z),u(m,y,$),u(m,T,$),e(T,x),e(x,w),e(T,P),e(T,E),e(E,Ae),u(m,ke,$),u(m,q,$),e(q,ze),e(q,le),e(le,Ie),e(q,ce),e(q,ue),e(ue,Le),e(q,je),u(m,B,$),u(m,O,$),e(O,Ee),u(m,J,$),u(m,j,$),e(j,S),e(S,Ne),e(S,te),e(te,oe),e(S,Ge),e(S,W),e(W,De),e(j,Ue),e(j,A),e(A,ve),e(A,pe),e(pe,Se),e(A,ne),e(A,X),e(X,Be),e(j,We),e(j,K),e(K,Ke),e(K,se),e(se,R)},d(m){m&&t(h),m&&t(y),m&&t(T),m&&t(ke),m&&t(q),m&&t(B),m&&t(O),m&&t(J),m&&t(j)}}}function Cb(U){let h,z,y,T,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=o("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){h=n(w,"P",{});var P=s(h);z=i(P,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(P,"CODE",{});var E=s(y);T=i(E,"Module"),E.forEach(t),x=i(P,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),P.forEach(t)},m(w,P){u(w,h,P),e(h,z),e(h,y),e(y,T),e(h,x)},d(w){w&&t(h)}}}function jb(U){let h,z,y,T,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=o("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){h=n(w,"P",{});var P=s(h);z=i(P,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(P,"CODE",{});var E=s(y);T=i(E,"Module"),E.forEach(t),x=i(P,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),P.forEach(t)},m(w,P){u(w,h,P),e(h,z),e(h,y),e(y,T),e(h,x)},d(w){w&&t(h)}}}function Sb(U){let h,z,y,T,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=o("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){h=n(w,"P",{});var P=s(h);z=i(P,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(P,"CODE",{});var E=s(y);T=i(E,"Module"),E.forEach(t),x=i(P,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),P.forEach(t)},m(w,P){u(w,h,P),e(h,z),e(h,y),e(y,T),e(h,x)},d(w){w&&t(h)}}}function Ob(U){let h,z,y,T,x,w,P,E,Ae,ke,q,ze,le,Ie,ce,ue,Le,je,B,O,Ee,J,j,S,Ne,te,oe,Ge,W,De,Ue,A,ve,pe,Se,ne,X,Be,We,K,Ke,se,R,m,$,$e,mt,qe,Z,ft,Ve,he,I,Y,Fe,gt,ee,_t,Je,V,kt,Co,Dl,Ul,fa,Bl,Wl,gd,be,ga,Kl,Hl,_a,Ql,Rl,ka,Vl,Jl,jo,Xl,So,Zl,Yl,ec,Bt,Oo,tc,oc,Ao,nc,sc,_d,vt,Wt,va,Io,ac,ba,rc,kd,Xe,Kt,Lo,ic,dc,No,lc,cc,uc,ya,pc,hc,Ta,mc,vd,bt,Ht,Pa,Go,fc,wa,gc,bd,ye,xa,za,_c,kc,Ea,Fs,vc,Ms,bc,yc,Do,$a,Tc,Pc,yt,qa,wc,xc,Fa,zc,Ec,Uo,$c,Ma,qc,Fc,Mc,Ca,He,Cc,ja,jc,Sc,Sa,Oc,Ac,Oa,Ic,Lc,Nc,Aa,Tt,Gc,Bo,Dc,Uc,Ia,Bc,Wc,yd,Pt,Qt,La,Wo,Kc,Na,Hc,Td,Ko,Pd,wt,Rt,Ga,Ho,Qc,Da,Rc,wd,me,Qo,Vc,xt,Jc,Cs,Xc,Zc,Ro,Yc,eu,tu,zt,ou,js,nu,su,Ss,au,ru,iu,Ua,du,lu,Vo,xd,Et,Vt,Ba,Jo,cu,Wa,uu,zd,Jt,pu,Ka,hu,mu,Ed,H,Xo,fu,Zo,gu,Yo,_u,ku,vu,en,bu,Os,yu,Tu,Pu,Oe,tn,wu,on,xu,Ha,zu,Eu,$u,nn,As,qu,Qa,Fu,Mu,sn,Cu,Ra,ju,Su,Ou,Va,Au,Iu,Xt,an,Lu,Ja,Nu,Gu,Zt,rn,Du,Xa,Uu,Bu,Yt,dn,Wu,Za,Ku,$d,$t,eo,Ya,ln,Hu,er,Qu,qd,fe,cn,Ru,qt,Vu,tr,Ju,Xu,un,Zu,Yu,ep,pn,tp,Is,op,np,sp,Ze,hn,ap,or,rp,ip,mn,Ls,dp,nr,lp,cp,fn,up,sr,pp,hp,mp,to,gn,fp,ar,gp,Fd,Ft,oo,rr,_n,_p,ir,kp,Md,Me,kn,vp,vn,bp,Ns,yp,Tp,Pp,bn,wp,yn,xp,zp,Ep,Te,Tn,$p,Mt,qp,Gs,Fp,Mp,dr,Cp,jp,Sp,no,Op,lr,Ap,Ip,Pn,Cd,Ct,so,cr,wn,Lp,ur,Np,jd,Ce,xn,Gp,zn,Dp,Ds,Up,Bp,Wp,En,Kp,$n,Hp,Qp,Rp,G,qn,Vp,jt,Jp,Us,Xp,Zp,pr,Yp,eh,th,ao,oh,hr,nh,sh,mr,fr,gr,_r,ah,rh,kr,vr,br,yr,ih,dh,Tr,Pr,wr,xr,lh,ch,zr,Er,Fn,ro,io,$r,Mn,uh,qr,ph,hh,Fr,mh,Sd,St,lo,Mr,Cn,fh,Cr,gh,Od,jn,Ye,Sn,_h,jr,kh,vh,On,Ad,Ot,co,Sr,An,bh,Or,yh,Id,ge,In,Th,Ln,Ph,Bs,wh,xh,zh,Nn,Eh,Gn,$h,qh,Fh,uo,Mh,Pe,Dn,Ch,At,jh,Ws,Sh,Oh,Ar,Ah,Ih,Lh,po,Nh,Ir,Gh,Dh,Un,Ld,It,ho,Lr,Bn,Uh,Nr,Bh,Nd,_e,Wn,Wh,Kn,Kh,Ks,Hh,Qh,Rh,Hn,Vh,Qn,Jh,Xh,Zh,mo,Yh,D,Rn,em,Lt,tm,Hs,om,nm,Gr,sm,am,rm,fo,im,Dr,dm,lm,Ur,Br,Wr,Kr,cm,um,Hr,Qr,Rr,Vr,pm,hm,Jr,Xr,Zr,Yr,mm,fm,ei,ti,Vn,go,_o,oi,Jn,gm,ni,_m,km,si,vm,Gd,Nt,ko,ai,Xn,bm,ri,ym,Dd,L,Zn,Tm,Yn,Pm,Qs,wm,xm,zm,es,Em,ts,$m,qm,Fm,ii,Mm,Cm,Qe,di,os,jm,Sm,li,ns,Om,Am,ci,ss,Im,Lm,ui,as,Nm,Gm,we,rs,Dm,Gt,Um,pi,Bm,Wm,hi,Km,Hm,Qm,vo,Rm,mi,Vm,Jm,is,Xm,et,ds,Zm,fi,Ym,ef,ls,tf,tt,cs,of,gi,nf,sf,us,Ud,Dt,bo,_i,ps,af,ki,rf,Bd,N,hs,df,ms,lf,Rs,cf,uf,pf,fs,hf,gs,mf,ff,gf,vi,_f,kf,Re,bi,_s,vf,bf,yi,ks,yf,Tf,Ti,vs,Pf,wf,Pi,bs,xf,zf,F,ys,Ef,Ut,$f,wi,qf,Ff,xi,Mf,Cf,jf,yo,Sf,zi,Of,Af,Ei,$i,qi,Fi,If,Lf,Mi,Ci,ji,Si,Nf,Gf,Oi,Ai,Ii,Li,Df,Uf,Ni,Gi,Ts,To,Po,Di,Ps,Bf,Ui,Wf,Kf,Bi,Hf,Qf,Wi,Rf,Vf,Ki,Hi,Qi,Ri,Jf,Xf,Vi,Ji,Xi,Zi,Zf,Yf,Yi,ed,td,od,eg,tg,nd,sd,ad,rd,og,ng,ot,ws,sg,id,ag,rg,xs,ig,nt,zs,dg,dd,lg,cg,Es,Wd;return w=new Q({}),J=new Q({}),Fe=new Q({}),Io=new Q({}),Go=new Q({}),Wo=new Q({}),Ko=new ht({props:{code:`from transformers import PegasusForConditionalGeneration, PegasusTokenizer import torch src_text = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""" ] model_name = 'google/pegasus-xsum' device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = PegasusTokenizer.from_pretrained(model_name) model = PegasusForConditionalGeneration.from_pretrained(model_name).to(device) batch = tokenizer(src_text, truncation=True, padding='longest', return_tensors="pt").to(device) translated = model.generate(**batch) tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True) assert tgt_text[0] == "California's largest electricity provider has turned off power to hundreds of thousands of customers.",`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusForConditionalGeneration, PegasusTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; PG&amp;E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.&quot;&quot;&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;google/pegasus-xsum&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>device = <span class="hljs-string">&#x27;cuda&#x27;</span> <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> <span class="hljs-string">&#x27;cpu&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PegasusForConditionalGeneration.from_pretrained(model_name).to(device) <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer(src_text, truncation=<span class="hljs-literal">True</span>, padding=<span class="hljs-string">&#x27;longest&#x27;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).to(device) <span class="hljs-meta">&gt;&gt;&gt; </span>translated = model.generate(**batch) <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> tgt_text[<span class="hljs-number">0</span>] == <span class="hljs-string">&quot;California&#x27;s largest electricity provider has turned off power to hundreds of thousands of customers.&quot;</span>`}}),Ho=new Q({}),Qo=new C({props:{name:"class transformers.PegasusConfig",anchor:"transformers.PegasusConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 0"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"eos_token_id",val:" = 1"},{name:"forced_eos_token_id",val:" = 1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/configuration_pegasus.py#L29",parametersDescription:[{anchor:"transformers.PegasusConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusModel">PegasusModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusModel">TFPegasusModel</a>.`,name:"vocab_size"},{anchor:"transformers.PegasusConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.PegasusConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.PegasusConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.PegasusConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.PegasusConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.PegasusConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.PegasusConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.PegasusConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.PegasusConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.PegasusConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.PegasusConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.PegasusConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.PegasusConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.PegasusConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.PegasusConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.PegasusConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.PegasusConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),Vo=new ht({props:{code:`from transformers import PegasusModel, PegasusConfig # Initializing a PEGASUS google/pegasus-large style configuration configuration = PegasusConfig() # Initializing a model from the google/pegasus-large style configuration model = PegasusModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusModel, PegasusConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a PEGASUS google/pegasus-large style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = PegasusConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the google/pegasus-large style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = PegasusModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Jo=new Q({}),Xo=new C({props:{name:"class transformers.PegasusTokenizer",anchor:"transformers.PegasusTokenizer",parameters:[{name:"vocab_file",val:""},{name:"pad_token",val:" = '<pad>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"mask_token",val:" = '<mask_2>'"},{name:"mask_token_sent",val:" = '<mask_1>'"},{name:"additional_special_tokens",val:" = None"},{name:"offset",val:" = 103"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus.py#L41",parametersDescription:[{anchor:"transformers.PegasusTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.PegasusTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.PegasusTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.PegasusTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.PegasusTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask_2&gt;&quot;</code>) &#x2014; The token used for masking single token values. This is the token used when training this model with masked language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining. It corresponds to <em>[MASK2]</em> in <a href="https://arxiv.org/pdf/1912.08777.pdf" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a>.`,name:"mask_token"},{anchor:"transformers.PegasusTokenizer.mask_token_sent",description:`<strong>mask_token_sent</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask_1&gt;&quot;</code>) &#x2014; The token used for masking whole target sentences. This is the token used when training this model with gap sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during pretraining. It corresponds to <em>[MASK1]</em> in <a href="https://arxiv.org/pdf/1912.08777.pdf" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a>.`,name:"mask_token_sent"},{anchor:"transformers.PegasusTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and <unk_2, \u2026, unk_102> are used as additional special tokens corresponding to the <a href="https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66" rel="nofollow">original PEGASUS tokenizer</a> that uses the tokens 2 - 104 only for pretraining</unk_2,></mask_2>`,name:"additional_special_tokens"},{anchor:"transformers.PegasusTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),tn=new C({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.PegasusTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus.py#L253",parametersDescription:[{anchor:"transformers.PegasusTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.PegasusTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),an=new C({props:{name:"convert_tokens_to_string",anchor:"transformers.PegasusTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus.py#L227"}}),rn=new C({props:{name:"get_special_tokens_mask",anchor:"transformers.PegasusTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List"},{name:"token_ids_1",val:": typing.Optional[typing.List] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus.py#L242"}}),dn=new C({props:{name:"num_special_tokens_to_add",anchor:"transformers.PegasusTokenizer.num_special_tokens_to_add",parameters:[{name:"pair",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus.py#L232"}}),ln=new Q({}),cn=new C({props:{name:"class transformers.PegasusTokenizerFast",anchor:"transformers.PegasusTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"pad_token",val:" = '<pad>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"mask_token",val:" = '<mask_2>'"},{name:"mask_token_sent",val:" = '<mask_1>'"},{name:"additional_special_tokens",val:" = None"},{name:"offset",val:" = 103"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus_fast.py#L52",parametersDescription:[{anchor:"transformers.PegasusTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.PegasusTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.PegasusTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.PegasusTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.PegasusTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask_2&gt;&quot;</code>) &#x2014; The token used for masking single token values. This is the token used when training this model with masked language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining. It corresponds to <em>[MASK2]</em> in <a href="https://arxiv.org/pdf/1912.08777.pdf" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a>.`,name:"mask_token"},{anchor:"transformers.PegasusTokenizerFast.mask_token_sent",description:`<strong>mask_token_sent</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask_1&gt;&quot;</code>) &#x2014; The token used for masking whole target sentences. This is the token used when training this model with gap sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during pretraining. It corresponds to <em>[MASK1]</em> in <a href="https://arxiv.org/pdf/1912.08777.pdf" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a>.`,name:"mask_token_sent"},{anchor:"transformers.PegasusTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and <unk_2, \u2026, unk_102> are used as additional special tokens corresponding to the <a href="https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66" rel="nofollow">original PEGASUS tokenizer</a> that uses the tokens 2 - 104 only for pretraining</unk_2,></mask_2>`,name:"additional_special_tokens"}]}}),hn=new C({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.PegasusTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus_fast.py#L175",parametersDescription:[{anchor:"transformers.PegasusTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.PegasusTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),gn=new C({props:{name:"get_special_tokens_mask",anchor:"transformers.PegasusTokenizerFast.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List"},{name:"token_ids_1",val:": typing.Optional[typing.List] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/tokenization_pegasus_fast.py#L164"}}),_n=new Q({}),kn=new C({props:{name:"class transformers.PegasusModel",anchor:"transformers.PegasusModel",parameters:[{name:"config",val:": PegasusConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_pegasus.py#L1128",parametersDescription:[{anchor:"transformers.PegasusModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tn=new C({props:{name:"forward",anchor:"transformers.PegasusModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_pegasus.py#L1177",parametersDescription:[{anchor:"transformers.PegasusModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.PegasusModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PegasusModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Pegasus uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.PegasusModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.PegasusModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PegasusModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.PegasusModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.PegasusModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.PegasusModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.PegasusModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.PegasusModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.PegasusModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PegasusModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PegasusModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),no=new Mo({props:{$$slots:{default:[Eb]},$$scope:{ctx:U}}}),Pn=new ht({props:{code:`from transformers import PegasusTokenizer, PegasusModel tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-large") model = PegasusModel.from_pretrained("google/pegasus-large") input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, PegasusModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&quot;google/pegasus-large&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PegasusModel.from_pretrained(<span class="hljs-string">&quot;google/pegasus-large&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),wn=new Q({}),xn=new C({props:{name:"class transformers.PegasusForConditionalGeneration",anchor:"transformers.PegasusForConditionalGeneration",parameters:[{name:"config",val:": PegasusConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_pegasus.py#L1274",parametersDescription:[{anchor:"transformers.PegasusForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qn=new C({props:{name:"forward",anchor:"transformers.PegasusForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_pegasus.py#L1341",parametersDescription:[{anchor:"transformers.PegasusForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.PegasusForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PegasusForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Pegasus uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.PegasusForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.PegasusForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PegasusForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.PegasusForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.PegasusForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.PegasusForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.PegasusForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.PegasusForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.PegasusForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PegasusForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PegasusForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PegasusForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ao=new Mo({props:{$$slots:{default:[$b]},$$scope:{ctx:U}}}),Mn=new Q({}),Cn=new Q({}),Sn=new C({props:{name:"forward",anchor:"transformers.PegasusForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_pegasus.py#L1528",parametersDescription:[{anchor:"transformers.PegasusForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.PegasusForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PegasusForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.PegasusForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.PegasusForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PegasusForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.PegasusForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.PegasusForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.PegasusForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.PegasusForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PegasusForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PegasusForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new ht({props:{code:`from transformers import PegasusTokenizer, PegasusForCausalLM tokenizer = PegasusTokenizer.from_pretrained('facebook/bart-large') model = PegasusForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, PegasusForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PegasusForCausalLM.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),An=new Q({}),In=new C({props:{name:"class transformers.TFPegasusModel",anchor:"transformers.TFPegasusModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_tf_pegasus.py#L1220",parametersDescription:[{anchor:"transformers.TFPegasusModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),uo=new Mo({props:{$$slots:{default:[qb]},$$scope:{ctx:U}}}),Dn=new C({props:{name:"call",anchor:"transformers.TFPegasusModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_tf_pegasus.py#L1232",parametersDescription:[{anchor:"transformers.TFPegasusModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFPegasusModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFPegasusModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Pegasus uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFPegasusModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFPegasusModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFPegasusModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFPegasusModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFPegasusModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFPegasusModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFPegasusModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation output_attentions (<code>bool</code>, <em>optional</em>): Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"use_cache"},{anchor:"transformers.TFPegasusModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFPegasusModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFPegasusModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFPegasusModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),po=new Mo({props:{$$slots:{default:[Fb]},$$scope:{ctx:U}}}),Un=new ht({props:{code:`from transformers import PegasusTokenizer, TFPegasusModel import tensorflow as tf tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') model = TFPegasusModel.from_pretrained('google/pegasus-large') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, TFPegasusModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFPegasusModel.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Bn=new Q({}),Wn=new C({props:{name:"class transformers.TFPegasusForConditionalGeneration",anchor:"transformers.TFPegasusForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_tf_pegasus.py#L1327",parametersDescription:[{anchor:"transformers.TFPegasusForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mo=new Mo({props:{$$slots:{default:[Mb]},$$scope:{ctx:U}}}),Rn=new C({props:{name:"call",anchor:"transformers.TFPegasusForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_tf_pegasus.py#L1360",parametersDescription:[{anchor:"transformers.TFPegasusForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Pegasus uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation output_attentions (<code>bool</code>, <em>optional</em>): Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"use_cache"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFPegasusForConditionalGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),fo=new Mo({props:{$$slots:{default:[Cb]},$$scope:{ctx:U}}}),Jn=new Q({}),Xn=new Q({}),Zn=new C({props:{name:"class transformers.FlaxPegasusModel",anchor:"transformers.FlaxPegasusModel",parameters:[{name:"config",val:": PegasusConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L1216",parametersDescription:[{anchor:"transformers.FlaxPegasusModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxPegasusModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),rs=new C({props:{name:"__call__",anchor:"transformers.FlaxPegasusPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L1151",parametersDescription:[{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a>`,name:"decoder_input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vo=new Mo({props:{$$slots:{default:[jb]},$$scope:{ctx:U}}}),is=new ht({props:{code:`from transformers import PegasusTokenizer, FlaxPegasusModel tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') model = FlaxPegasusModel.from_pretrained('google/pegasus-large') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, FlaxPegasusModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxPegasusModel.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ds=new C({props:{name:"encode",anchor:"transformers.FlaxPegasusPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L975",parametersDescription:[{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.pegasus.configuration_pegasus.PegasusConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ls=new ht({props:{code:`from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration model = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, FlaxPegasusForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxPegasusForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),cs=new C({props:{name:"decode",anchor:"transformers.FlaxPegasusPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L1038",parametersDescription:[{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a>`,name:"decoder_input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxPegasusPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.pegasus.configuration_pegasus.PegasusConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),us=new ht({props:{code:`from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration model = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, FlaxPegasusForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxPegasusForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),ps=new Q({}),hs=new C({props:{name:"class transformers.FlaxPegasusForConditionalGeneration",anchor:"transformers.FlaxPegasusForConditionalGeneration",parameters:[{name:"config",val:": PegasusConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L1303",parametersDescription:[{anchor:"transformers.FlaxPegasusForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig">PegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ys=new C({props:{name:"__call__",anchor:"transformers.FlaxPegasusPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L1151",parametersDescription:[{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a>`,name:"decoder_input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxPegasusPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusConfig" >PegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yo=new Mo({props:{$$slots:{default:[Sb]},$$scope:{ctx:U}}}),Ps=new Q({}),ws=new C({props:{name:"encode",anchor:"transformers.FlaxPegasusPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L975",parametersDescription:[{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxPegasusPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.pegasus.configuration_pegasus.PegasusConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xs=new ht({props:{code:`from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration model = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, FlaxPegasusForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxPegasusForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),zs=new C({props:{name:"decode",anchor:"transformers.FlaxPegasusForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"deterministic",val:": bool = True"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/pegasus/modeling_flax_pegasus.py#L1307",parametersDescription:[{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a>`,name:"decoder_input_ids"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxPegasusForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.pegasus.configuration_pegasus.PegasusConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Es=new ht({props:{code:`from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration model = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, FlaxPegasusForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxPegasusForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/pegasus-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){h=o("meta"),z=d(),y=o("h1"),T=o("a"),x=o("span"),f(w.$$.fragment),P=d(),E=o("span"),Ae=r("Pegasus"),ke=d(),q=o("p"),ze=o("strong"),le=r("DISCLAIMER:"),Ie=r(" If you see something strange, file a "),ce=o("a"),ue=r("Github Issue"),Le=r(` and assign @patrickvonplaten.`),je=d(),B=o("h2"),O=o("a"),Ee=o("span"),f(J.$$.fragment),j=d(),S=o("span"),Ne=r("Overview"),te=d(),oe=o("p"),Ge=r("The Pegasus model was proposed in "),W=o("a"),De=r("PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization"),Ue=r(" by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019."),A=d(),ve=o("p"),pe=r("According to the abstract,"),Se=d(),ne=o("ul"),X=o("li"),Be=r(`Pegasus\u2019 pretraining task is intentionally similar to summarization: important sentences are removed/masked from an input document and are generated together as one output sequence from the remaining sentences, similar to an extractive summary.`),We=d(),K=o("li"),Ke=r("Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval."),se=d(),R=o("p"),m=r("This model was contributed by "),$=o("a"),$e=r("sshleifer"),mt=r(". The Authors\u2019 code can be found "),qe=o("a"),Z=r("here"),ft=r("."),Ve=d(),he=o("h2"),I=o("a"),Y=o("span"),f(Fe.$$.fragment),gt=d(),ee=o("span"),_t=r("Checkpoints"),Je=d(),V=o("p"),kt=r("All the "),Co=o("a"),Dl=r("checkpoints"),Ul=r(` are fine-tuned for summarization, besides `),fa=o("em"),Bl=r("pegasus-large"),Wl=r(", whence the other checkpoints are fine-tuned:"),gd=d(),be=o("ul"),ga=o("li"),Kl=r("Each checkpoint is 2.2 GB on disk and 568M parameters."),Hl=d(),_a=o("li"),Ql=r("FP16 is not supported (help/ideas on this appreciated!)."),Rl=d(),ka=o("li"),Vl=r("Summarizing xsum in fp32 takes about 400ms/sample, with default parameters on a v100 GPU."),Jl=d(),jo=o("li"),Xl=r("Full replication results and correctly pre-processed data can be found in this "),So=o("a"),Zl=r("Issue"),Yl=r("."),ec=d(),Bt=o("li"),Oo=o("a"),tc=r("Distilled checkpoints"),oc=r(" are described in this "),Ao=o("a"),nc=r("paper"),sc=r("."),_d=d(),vt=o("h3"),Wt=o("a"),va=o("span"),f(Io.$$.fragment),ac=d(),ba=o("span"),rc=r("Examples"),kd=d(),Xe=o("ul"),Kt=o("li"),Lo=o("a"),ic=r("Script"),dc=r(` to fine-tune pegasus on the XSUM dataset. Data download instructions at `),No=o("a"),lc=r("examples/pytorch/summarization/"),cc=r("."),uc=d(),ya=o("li"),pc=r("FP16 is not supported (help/ideas on this appreciated!)."),hc=d(),Ta=o("li"),mc=r("The adafactor optimizer is recommended for pegasus fine-tuning."),vd=d(),bt=o("h2"),Ht=o("a"),Pa=o("span"),f(Go.$$.fragment),fc=d(),wa=o("span"),gc=r("Implementation Notes"),bd=d(),ye=o("ul"),xa=o("li"),za=o("p"),_c=r("All models are transformer encoder-decoders with 16 layers in each component."),kc=d(),Ea=o("li"),Fs=o("p"),vc=r("The implementation is completely inherited from "),Ms=o("a"),bc=r("BartForConditionalGeneration"),yc=d(),Do=o("li"),$a=o("p"),Tc=r("Some key configuration differences:"),Pc=d(),yt=o("ul"),qa=o("li"),wc=r("static, sinusoidal position embeddings"),xc=d(),Fa=o("li"),zc=r("the model starts generating with pad_token_id (which has 0 token_embedding) as the prefix."),Ec=d(),Uo=o("li"),$c=r("more beams are used ("),Ma=o("code"),qc=r("num_beams=8"),Fc=r(")"),Mc=d(),Ca=o("li"),He=o("p"),Cc=r("All pretrained pegasus checkpoints are the same besides three attributes: "),ja=o("code"),jc=r("tokenizer.model_max_length"),Sc=r(` (maximum input size), `),Sa=o("code"),Oc=r("max_length"),Ac=r(" (the maximum number of tokens to generate) and "),Oa=o("code"),Ic=r("length_penalty"),Lc=r("."),Nc=d(),Aa=o("li"),Tt=o("p"),Gc=r("The code to convert checkpoints trained in the author\u2019s "),Bo=o("a"),Dc=r("repo"),Uc=r(` can be found in `),Ia=o("code"),Bc=r("convert_pegasus_tf_to_pytorch.py"),Wc=r("."),yd=d(),Pt=o("h2"),Qt=o("a"),La=o("span"),f(Wo.$$.fragment),Kc=d(),Na=o("span"),Hc=r("Usage Example"),Td=d(),f(Ko.$$.fragment),Pd=d(),wt=o("h2"),Rt=o("a"),Ga=o("span"),f(Ho.$$.fragment),Qc=d(),Da=o("span"),Rc=r("PegasusConfig"),wd=d(),me=o("div"),f(Qo.$$.fragment),Vc=d(),xt=o("p"),Jc=r("This is the configuration class to store the configuration of a "),Cs=o("a"),Xc=r("PegasusModel"),Zc=r(`. It is used to instantiate an PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PEGASUS `),Ro=o("a"),Yc=r("google/pegasus-large"),eu=r(" architecture."),tu=d(),zt=o("p"),ou=r("Configuration objects inherit from "),js=o("a"),nu=r("PretrainedConfig"),su=r(` and can be used to control the model outputs. Read the documentation from `),Ss=o("a"),au=r("PretrainedConfig"),ru=r(" for more information."),iu=d(),Ua=o("p"),du=r("Example:"),lu=d(),f(Vo.$$.fragment),xd=d(),Et=o("h2"),Vt=o("a"),Ba=o("span"),f(Jo.$$.fragment),cu=d(),Wa=o("span"),uu=r("PegasusTokenizer"),zd=d(),Jt=o("p"),pu=r("warning: "),Ka=o("code"),hu=r("add_tokens"),mu=r(" does not work at the moment."),Ed=d(),H=o("div"),f(Xo.$$.fragment),fu=d(),Zo=o("p"),gu=r("Construct a PEGASUS tokenizer. Based on "),Yo=o("a"),_u=r("SentencePiece"),ku=r("."),vu=d(),en=o("p"),bu=r("This tokenizer inherits from "),Os=o("a"),yu=r("PreTrainedTokenizer"),Tu=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Pu=d(),Oe=o("div"),f(tn.$$.fragment),wu=d(),on=o("p"),xu=r(`Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating and adding special tokens. A PEGASUS sequence has the following format, where `),Ha=o("code"),zu=r("X"),Eu=r(" represents the sequence:"),$u=d(),nn=o("ul"),As=o("li"),qu=r("single sequence: "),Qa=o("code"),Fu=r("X </s>"),Mu=d(),sn=o("li"),Cu=r("pair of sequences: "),Ra=o("code"),ju=r("A B </s>"),Su=r(" (not intended use)"),Ou=d(),Va=o("p"),Au=r(`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),Iu=d(),Xt=o("div"),f(an.$$.fragment),Lu=d(),Ja=o("p"),Nu=r("Converts a sequence of tokens (string) in a single string."),Gu=d(),Zt=o("div"),f(rn.$$.fragment),Du=d(),Xa=o("p"),Uu=r("Get list where entries are [1] if a token is [eos] or [pad] else 0."),Bu=d(),Yt=o("div"),f(dn.$$.fragment),Wu=d(),Za=o("p"),Ku=r("Just EOS"),$d=d(),$t=o("h2"),eo=o("a"),Ya=o("span"),f(ln.$$.fragment),Hu=d(),er=o("span"),Qu=r("PegasusTokenizerFast"),qd=d(),fe=o("div"),f(cn.$$.fragment),Ru=d(),qt=o("p"),Vu=r("Construct a \u201Cfast\u201D PEGASUS tokenizer (backed by HuggingFace\u2019s "),tr=o("em"),Ju=r("tokenizers"),Xu=r(" library). Based on "),un=o("a"),Zu=r("Unigram"),Yu=r("."),ep=d(),pn=o("p"),tp=r("This tokenizer inherits from "),Is=o("a"),op=r("PreTrainedTokenizerFast"),np=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),sp=d(),Ze=o("div"),f(hn.$$.fragment),ap=d(),or=o("p"),rp=r("Build model inputs from a sequence by adding eos to the end. no bos token is added to the front."),ip=d(),mn=o("ul"),Ls=o("li"),dp=r("single sequence: "),nr=o("code"),lp=r("X </s>"),cp=d(),fn=o("li"),up=r("pair of sequences: "),sr=o("code"),pp=r("A B </s>"),hp=r(" (not intended use)"),mp=d(),to=o("div"),f(gn.$$.fragment),fp=d(),ar=o("p"),gp=r("Get list where entries are [1] if a token is [eos] or [pad] else 0."),Fd=d(),Ft=o("h2"),oo=o("a"),rr=o("span"),f(_n.$$.fragment),_p=d(),ir=o("span"),kp=r("PegasusModel"),Md=d(),Me=o("div"),f(kn.$$.fragment),vp=d(),vn=o("p"),bp=r(`The bare PEGASUS Model outputting raw hidden-states without any specific head on top. This model inherits from `),Ns=o("a"),yp=r("PreTrainedModel"),Tp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pp=d(),bn=o("p"),wp=r("This model is also a PyTorch "),yn=o("a"),xp=r("torch.nn.Module"),zp=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ep=d(),Te=o("div"),f(Tn.$$.fragment),$p=d(),Mt=o("p"),qp=r("The "),Gs=o("a"),Fp=r("PegasusModel"),Mp=r(" forward method, overrides the "),dr=o("code"),Cp=r("__call__"),jp=r(" special method."),Sp=d(),f(no.$$.fragment),Op=d(),lr=o("p"),Ap=r("Example:"),Ip=d(),f(Pn.$$.fragment),Cd=d(),Ct=o("h2"),so=o("a"),cr=o("span"),f(wn.$$.fragment),Lp=d(),ur=o("span"),Np=r("PegasusForConditionalGeneration"),jd=d(),Ce=o("div"),f(xn.$$.fragment),Gp=d(),zn=o("p"),Dp=r(`The PEGASUS Model with a language modeling head. Can be used for summarization. This model inherits from `),Ds=o("a"),Up=r("PreTrainedModel"),Bp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wp=d(),En=o("p"),Kp=r("This model is also a PyTorch "),$n=o("a"),Hp=r("torch.nn.Module"),Qp=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rp=d(),G=o("div"),f(qn.$$.fragment),Vp=d(),jt=o("p"),Jp=r("The "),Us=o("a"),Xp=r("PegasusForConditionalGeneration"),Zp=r(" forward method, overrides the "),pr=o("code"),Yp=r("__call__"),eh=r(" special method."),th=d(),f(ao.$$.fragment),oh=d(),hr=o("p"),nh=r("Summarization example::"),sh=d(),mr=o("blockquote"),fr=o("blockquote"),gr=o("blockquote"),_r=o("p"),ah=r("from transformers import PegasusTokenizer, PegasusForConditionalGeneration"),rh=d(),kr=o("blockquote"),vr=o("blockquote"),br=o("blockquote"),yr=o("p"),ih=r(`model = PegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-xsum\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-xsum\u2019)`),dh=d(),Tr=o("blockquote"),Pr=o("blockquote"),wr=o("blockquote"),xr=o("p"),lh=r(`ARTICLE_TO_SUMMARIZE = ( \u2026 \u201CPG&E stated it scheduled the blackouts in response to forecasts for high winds \u201D \u2026 \u201Camid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were \u201D \u2026 \u201Cscheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\u201D \u2026 ) inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018pt\u2019)`),ch=d(),zr=o("blockquote"),Er=o("blockquote"),Fn=o("blockquote"),ro=o("h1"),io=o("a"),$r=o("span"),f(Mn.$$.fragment),uh=d(),qr=o("span"),ph=r("Generate Summary"),hh=d(),Fr=o("p"),mh=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019]) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),Sd=d(),St=o("h2"),lo=o("a"),Mr=o("span"),f(Cn.$$.fragment),fh=d(),Cr=o("span"),gh=r("PegasusForCausalLM"),Od=d(),jn=o("div"),Ye=o("div"),f(Sn.$$.fragment),_h=d(),jr=o("p"),kh=r("Example:"),vh=d(),f(On.$$.fragment),Ad=d(),Ot=o("h2"),co=o("a"),Sr=o("span"),f(An.$$.fragment),bh=d(),Or=o("span"),yh=r("TFPegasusModel"),Id=d(),ge=o("div"),f(In.$$.fragment),Th=d(),Ln=o("p"),Ph=r(`The bare PEGASUS Model outputting raw hidden-states without any specific head on top. This model inherits from `),Bs=o("a"),wh=r("TFPreTrainedModel"),xh=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zh=d(),Nn=o("p"),Eh=r("This model is also a "),Gn=o("a"),$h=r("tf.keras.Model"),qh=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Fh=d(),f(uo.$$.fragment),Mh=d(),Pe=o("div"),f(Dn.$$.fragment),Ch=d(),At=o("p"),jh=r("The "),Ws=o("a"),Sh=r("TFPegasusModel"),Oh=r(" forward method, overrides the "),Ar=o("code"),Ah=r("__call__"),Ih=r(" special method."),Lh=d(),f(po.$$.fragment),Nh=d(),Ir=o("p"),Gh=r("Example:"),Dh=d(),f(Un.$$.fragment),Ld=d(),It=o("h2"),ho=o("a"),Lr=o("span"),f(Bn.$$.fragment),Uh=d(),Nr=o("span"),Bh=r("TFPegasusForConditionalGeneration"),Nd=d(),_e=o("div"),f(Wn.$$.fragment),Wh=d(),Kn=o("p"),Kh=r(`The PEGASUS Model with a language modeling head. Can be used for summarization. This model inherits from `),Ks=o("a"),Hh=r("TFPreTrainedModel"),Qh=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rh=d(),Hn=o("p"),Vh=r("This model is also a "),Qn=o("a"),Jh=r("tf.keras.Model"),Xh=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Zh=d(),f(mo.$$.fragment),Yh=d(),D=o("div"),f(Rn.$$.fragment),em=d(),Lt=o("p"),tm=r("The "),Hs=o("a"),om=r("TFPegasusForConditionalGeneration"),nm=r(" forward method, overrides the "),Gr=o("code"),sm=r("__call__"),am=r(" special method."),rm=d(),f(fo.$$.fragment),im=d(),Dr=o("p"),dm=r("Summarization example::"),lm=d(),Ur=o("blockquote"),Br=o("blockquote"),Wr=o("blockquote"),Kr=o("p"),cm=r("from transformers import PegasusTokenizer, TFPegasusForConditionalGeneration"),um=d(),Hr=o("blockquote"),Qr=o("blockquote"),Rr=o("blockquote"),Vr=o("p"),pm=r(`model = TFPegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-xsum\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-xsum\u2019)`),hm=d(),Jr=o("blockquote"),Xr=o("blockquote"),Zr=o("blockquote"),Yr=o("p"),mm=r(`ARTICLE_TO_SUMMARIZE = ( \u2026 \u201CPG&E stated it scheduled the blackouts in response to forecasts for high winds \u201D \u2026 \u201Camid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were \u201D \u2026 \u201Cscheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\u201D \u2026 ) inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018tf\u2019)`),fm=d(),ei=o("blockquote"),ti=o("blockquote"),Vn=o("blockquote"),go=o("h1"),_o=o("a"),oi=o("span"),f(Jn.$$.fragment),gm=d(),ni=o("span"),_m=r("Generate Summary"),km=d(),si=o("p"),vm=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019]) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),Gd=d(),Nt=o("h2"),ko=o("a"),ai=o("span"),f(Xn.$$.fragment),bm=d(),ri=o("span"),ym=r("FlaxPegasusModel"),Dd=d(),L=o("div"),f(Zn.$$.fragment),Tm=d(),Yn=o("p"),Pm=r(`The bare Pegasus Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Qs=o("a"),wm=r("FlaxPreTrainedModel"),xm=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zm=d(),es=o("p"),Em=r("This model is also a Flax Linen "),ts=o("a"),$m=r("flax.nn.Module"),qm=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Fm=d(),ii=o("p"),Mm=r("Finally, this model supports inherent JAX features such as:"),Cm=d(),Qe=o("ul"),di=o("li"),os=o("a"),jm=r("Just-In-Time (JIT) compilation"),Sm=d(),li=o("li"),ns=o("a"),Om=r("Automatic Differentiation"),Am=d(),ci=o("li"),ss=o("a"),Im=r("Vectorization"),Lm=d(),ui=o("li"),as=o("a"),Nm=r("Parallelization"),Gm=d(),we=o("div"),f(rs.$$.fragment),Dm=d(),Gt=o("p"),Um=r("The "),pi=o("code"),Bm=r("FlaxPegasusPreTrainedModel"),Wm=r(" forward method, overrides the "),hi=o("code"),Km=r("__call__"),Hm=r(" special method."),Qm=d(),f(vo.$$.fragment),Rm=d(),mi=o("p"),Vm=r("Example:"),Jm=d(),f(is.$$.fragment),Xm=d(),et=o("div"),f(ds.$$.fragment),Zm=d(),fi=o("p"),Ym=r("Example:"),ef=d(),f(ls.$$.fragment),tf=d(),tt=o("div"),f(cs.$$.fragment),of=d(),gi=o("p"),nf=r("Example:"),sf=d(),f(us.$$.fragment),Ud=d(),Dt=o("h2"),bo=o("a"),_i=o("span"),f(ps.$$.fragment),af=d(),ki=o("span"),rf=r("FlaxPegasusForConditionalGeneration"),Bd=d(),N=o("div"),f(hs.$$.fragment),df=d(),ms=o("p"),lf=r(`The PEGASUS Model with a language modeling head. Can be used for summarization. This model inherits from `),Rs=o("a"),cf=r("FlaxPreTrainedModel"),uf=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pf=d(),fs=o("p"),hf=r("This model is also a Flax Linen "),gs=o("a"),mf=r("flax.nn.Module"),ff=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),gf=d(),vi=o("p"),_f=r("Finally, this model supports inherent JAX features such as:"),kf=d(),Re=o("ul"),bi=o("li"),_s=o("a"),vf=r("Just-In-Time (JIT) compilation"),bf=d(),yi=o("li"),ks=o("a"),yf=r("Automatic Differentiation"),Tf=d(),Ti=o("li"),vs=o("a"),Pf=r("Vectorization"),wf=d(),Pi=o("li"),bs=o("a"),xf=r("Parallelization"),zf=d(),F=o("div"),f(ys.$$.fragment),Ef=d(),Ut=o("p"),$f=r("The "),wi=o("code"),qf=r("FlaxPegasusPreTrainedModel"),Ff=r(" forward method, overrides the "),xi=o("code"),Mf=r("__call__"),Cf=r(" special method."),jf=d(),f(yo.$$.fragment),Sf=d(),zi=o("p"),Of=r("Summarization example::"),Af=d(),Ei=o("blockquote"),$i=o("blockquote"),qi=o("blockquote"),Fi=o("p"),If=r("from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration"),Lf=d(),Mi=o("blockquote"),Ci=o("blockquote"),ji=o("blockquote"),Si=o("p"),Nf=r(`model = FlaxPegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-large\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-large\u2019)`),Gf=d(),Oi=o("blockquote"),Ai=o("blockquote"),Ii=o("blockquote"),Li=o("p"),Df=r(`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018np\u2019)`),Uf=d(),Ni=o("blockquote"),Gi=o("blockquote"),Ts=o("blockquote"),To=o("h1"),Po=o("a"),Di=o("span"),f(Ps.$$.fragment),Bf=d(),Ui=o("span"),Wf=r("Generate Summary"),Kf=d(),Bi=o("p"),Hf=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019]).sequences print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))`),Qf=d(),Wi=o("p"),Rf=r("Mask filling example::"),Vf=d(),Ki=o("blockquote"),Hi=o("blockquote"),Qi=o("blockquote"),Ri=o("p"),Jf=r(`from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),Xf=d(),Vi=o("blockquote"),Ji=o("blockquote"),Xi=o("blockquote"),Zi=o("p"),Zf=r(`model = FlaxPegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018np\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),Yf=d(),Yi=o("blockquote"),ed=o("blockquote"),td=o("blockquote"),od=o("p"),eg=r(`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = jax.nn.softmax(logits[0, masked_index], axis=0) values, predictions = jax.lax.top_k(probs)`),tg=d(),nd=o("blockquote"),sd=o("blockquote"),ad=o("blockquote"),rd=o("p"),og=r("tokenizer.decode(predictions).split()"),ng=d(),ot=o("div"),f(ws.$$.fragment),sg=d(),id=o("p"),ag=r("Example:"),rg=d(),f(xs.$$.fragment),ig=d(),nt=o("div"),f(zs.$$.fragment),dg=d(),dd=o("p"),lg=r("Example:"),cg=d(),f(Es.$$.fragment),this.h()},l(a){const p=zb('[data-svelte="svelte-1phssyn"]',document.head);h=n(p,"META",{name:!0,content:!0}),p.forEach(t),z=l(a),y=n(a,"H1",{class:!0});var $s=s(y);T=n($s,"A",{id:!0,class:!0,href:!0});var ld=s(T);x=n(ld,"SPAN",{});var cd=s(x);g(w.$$.fragment,cd),cd.forEach(t),ld.forEach(t),P=l($s),E=n($s,"SPAN",{});var ud=s(E);Ae=i(ud,"Pegasus"),ud.forEach(t),$s.forEach(t),ke=l(a),q=n(a,"P",{});var wo=s(q);ze=n(wo,"STRONG",{});var pd=s(ze);le=i(pd,"DISCLAIMER:"),pd.forEach(t),Ie=i(wo," If you see something strange, file a "),ce=n(wo,"A",{href:!0,rel:!0});var hd=s(ce);ue=i(hd,"Github Issue"),hd.forEach(t),Le=i(wo,` and assign @patrickvonplaten.`),wo.forEach(t),je=l(a),B=n(a,"H2",{class:!0});var qs=s(B);O=n(qs,"A",{id:!0,class:!0,href:!0});var mg=s(O);Ee=n(mg,"SPAN",{});var fg=s(Ee);g(J.$$.fragment,fg),fg.forEach(t),mg.forEach(t),j=l(qs),S=n(qs,"SPAN",{});var gg=s(S);Ne=i(gg,"Overview"),gg.forEach(t),qs.forEach(t),te=l(a),oe=n(a,"P",{});var Kd=s(oe);Ge=i(Kd,"The Pegasus model was proposed in "),W=n(Kd,"A",{href:!0,rel:!0});var _g=s(W);De=i(_g,"PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization"),_g.forEach(t),Ue=i(Kd," by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019."),Kd.forEach(t),A=l(a),ve=n(a,"P",{});var kg=s(ve);pe=i(kg,"According to the abstract,"),kg.forEach(t),Se=l(a),ne=n(a,"UL",{});var Hd=s(ne);X=n(Hd,"LI",{});var vg=s(X);Be=i(vg,`Pegasus\u2019 pretraining task is intentionally similar to summarization: important sentences are removed/masked from an input document and are generated together as one output sequence from the remaining sentences, similar to an extractive summary.`),vg.forEach(t),We=l(Hd),K=n(Hd,"LI",{});var bg=s(K);Ke=i(bg,"Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval."),bg.forEach(t),Hd.forEach(t),se=l(a),R=n(a,"P",{});var Vs=s(R);m=i(Vs,"This model was contributed by "),$=n(Vs,"A",{href:!0,rel:!0});var yg=s($);$e=i(yg,"sshleifer"),yg.forEach(t),mt=i(Vs,". The Authors\u2019 code can be found "),qe=n(Vs,"A",{href:!0,rel:!0});var Tg=s(qe);Z=i(Tg,"here"),Tg.forEach(t),ft=i(Vs,"."),Vs.forEach(t),Ve=l(a),he=n(a,"H2",{class:!0});var Qd=s(he);I=n(Qd,"A",{id:!0,class:!0,href:!0});var Pg=s(I);Y=n(Pg,"SPAN",{});var wg=s(Y);g(Fe.$$.fragment,wg),wg.forEach(t),Pg.forEach(t),gt=l(Qd),ee=n(Qd,"SPAN",{});var xg=s(ee);_t=i(xg,"Checkpoints"),xg.forEach(t),Qd.forEach(t),Je=l(a),V=n(a,"P",{});var Js=s(V);kt=i(Js,"All the "),Co=n(Js,"A",{href:!0,rel:!0});var zg=s(Co);Dl=i(zg,"checkpoints"),zg.forEach(t),Ul=i(Js,` are fine-tuned for summarization, besides `),fa=n(Js,"EM",{});var Eg=s(fa);Bl=i(Eg,"pegasus-large"),Eg.forEach(t),Wl=i(Js,", whence the other checkpoints are fine-tuned:"),Js.forEach(t),gd=l(a),be=n(a,"UL",{});var st=s(be);ga=n(st,"LI",{});var $g=s(ga);Kl=i($g,"Each checkpoint is 2.2 GB on disk and 568M parameters."),$g.forEach(t),Hl=l(st),_a=n(st,"LI",{});var qg=s(_a);Ql=i(qg,"FP16 is not supported (help/ideas on this appreciated!)."),qg.forEach(t),Rl=l(st),ka=n(st,"LI",{});var Fg=s(ka);Vl=i(Fg,"Summarizing xsum in fp32 takes about 400ms/sample, with default parameters on a v100 GPU."),Fg.forEach(t),Jl=l(st),jo=n(st,"LI",{});var Rd=s(jo);Xl=i(Rd,"Full replication results and correctly pre-processed data can be found in this "),So=n(Rd,"A",{href:!0,rel:!0});var Mg=s(So);Zl=i(Mg,"Issue"),Mg.forEach(t),Yl=i(Rd,"."),Rd.forEach(t),ec=l(st),Bt=n(st,"LI",{});var md=s(Bt);Oo=n(md,"A",{href:!0,rel:!0});var Cg=s(Oo);tc=i(Cg,"Distilled checkpoints"),Cg.forEach(t),oc=i(md," are described in this "),Ao=n(md,"A",{href:!0,rel:!0});var jg=s(Ao);nc=i(jg,"paper"),jg.forEach(t),sc=i(md,"."),md.forEach(t),st.forEach(t),_d=l(a),vt=n(a,"H3",{class:!0});var Vd=s(vt);Wt=n(Vd,"A",{id:!0,class:!0,href:!0});var Sg=s(Wt);va=n(Sg,"SPAN",{});var Og=s(va);g(Io.$$.fragment,Og),Og.forEach(t),Sg.forEach(t),ac=l(Vd),ba=n(Vd,"SPAN",{});var Ag=s(ba);rc=i(Ag,"Examples"),Ag.forEach(t),Vd.forEach(t),kd=l(a),Xe=n(a,"UL",{});var Xs=s(Xe);Kt=n(Xs,"LI",{});var fd=s(Kt);Lo=n(fd,"A",{href:!0,rel:!0});var Ig=s(Lo);ic=i(Ig,"Script"),Ig.forEach(t),dc=i(fd,` to fine-tune pegasus on the XSUM dataset. Data download instructions at `),No=n(fd,"A",{href:!0,rel:!0});var Lg=s(No);lc=i(Lg,"examples/pytorch/summarization/"),Lg.forEach(t),cc=i(fd,"."),fd.forEach(t),uc=l(Xs),ya=n(Xs,"LI",{});var Ng=s(ya);pc=i(Ng,"FP16 is not supported (help/ideas on this appreciated!)."),Ng.forEach(t),hc=l(Xs),Ta=n(Xs,"LI",{});var Gg=s(Ta);mc=i(Gg,"The adafactor optimizer is recommended for pegasus fine-tuning."),Gg.forEach(t),Xs.forEach(t),vd=l(a),bt=n(a,"H2",{class:!0});var Jd=s(bt);Ht=n(Jd,"A",{id:!0,class:!0,href:!0});var Dg=s(Ht);Pa=n(Dg,"SPAN",{});var Ug=s(Pa);g(Go.$$.fragment,Ug),Ug.forEach(t),Dg.forEach(t),fc=l(Jd),wa=n(Jd,"SPAN",{});var Bg=s(wa);gc=i(Bg,"Implementation Notes"),Bg.forEach(t),Jd.forEach(t),bd=l(a),ye=n(a,"UL",{});var at=s(ye);xa=n(at,"LI",{});var Wg=s(xa);za=n(Wg,"P",{});var Kg=s(za);_c=i(Kg,"All models are transformer encoder-decoders with 16 layers in each component."),Kg.forEach(t),Wg.forEach(t),kc=l(at),Ea=n(at,"LI",{});var Hg=s(Ea);Fs=n(Hg,"P",{});var ug=s(Fs);vc=i(ug,"The implementation is completely inherited from "),Ms=n(ug,"A",{href:!0});var Qg=s(Ms);bc=i(Qg,"BartForConditionalGeneration"),Qg.forEach(t),ug.forEach(t),Hg.forEach(t),yc=l(at),Do=n(at,"LI",{});var Xd=s(Do);$a=n(Xd,"P",{});var Rg=s($a);Tc=i(Rg,"Some key configuration differences:"),Rg.forEach(t),Pc=l(Xd),yt=n(Xd,"UL",{});var Zs=s(yt);qa=n(Zs,"LI",{});var Vg=s(qa);wc=i(Vg,"static, sinusoidal position embeddings"),Vg.forEach(t),xc=l(Zs),Fa=n(Zs,"LI",{});var Jg=s(Fa);zc=i(Jg,"the model starts generating with pad_token_id (which has 0 token_embedding) as the prefix."),Jg.forEach(t),Ec=l(Zs),Uo=n(Zs,"LI",{});var Zd=s(Uo);$c=i(Zd,"more beams are used ("),Ma=n(Zd,"CODE",{});var Xg=s(Ma);qc=i(Xg,"num_beams=8"),Xg.forEach(t),Fc=i(Zd,")"),Zd.forEach(t),Zs.forEach(t),Xd.forEach(t),Mc=l(at),Ca=n(at,"LI",{});var Zg=s(Ca);He=n(Zg,"P",{});var xo=s(He);Cc=i(xo,"All pretrained pegasus checkpoints are the same besides three attributes: "),ja=n(xo,"CODE",{});var Yg=s(ja);jc=i(Yg,"tokenizer.model_max_length"),Yg.forEach(t),Sc=i(xo,` (maximum input size), `),Sa=n(xo,"CODE",{});var e_=s(Sa);Oc=i(e_,"max_length"),e_.forEach(t),Ac=i(xo," (the maximum number of tokens to generate) and "),Oa=n(xo,"CODE",{});var t_=s(Oa);Ic=i(t_,"length_penalty"),t_.forEach(t),Lc=i(xo,"."),xo.forEach(t),Zg.forEach(t),Nc=l(at),Aa=n(at,"LI",{});var o_=s(Aa);Tt=n(o_,"P",{});var Ys=s(Tt);Gc=i(Ys,"The code to convert checkpoints trained in the author\u2019s "),Bo=n(Ys,"A",{href:!0,rel:!0});var n_=s(Bo);Dc=i(n_,"repo"),n_.forEach(t),Uc=i(Ys,` can be found in `),Ia=n(Ys,"CODE",{});var s_=s(Ia);Bc=i(s_,"convert_pegasus_tf_to_pytorch.py"),s_.forEach(t),Wc=i(Ys,"."),Ys.forEach(t),o_.forEach(t),at.forEach(t),yd=l(a),Pt=n(a,"H2",{class:!0});var Yd=s(Pt);Qt=n(Yd,"A",{id:!0,class:!0,href:!0});var a_=s(Qt);La=n(a_,"SPAN",{});var r_=s(La);g(Wo.$$.fragment,r_),r_.forEach(t),a_.forEach(t),Kc=l(Yd),Na=n(Yd,"SPAN",{});var i_=s(Na);Hc=i(i_,"Usage Example"),i_.forEach(t),Yd.forEach(t),Td=l(a),g(Ko.$$.fragment,a),Pd=l(a),wt=n(a,"H2",{class:!0});var el=s(wt);Rt=n(el,"A",{id:!0,class:!0,href:!0});var d_=s(Rt);Ga=n(d_,"SPAN",{});var l_=s(Ga);g(Ho.$$.fragment,l_),l_.forEach(t),d_.forEach(t),Qc=l(el),Da=n(el,"SPAN",{});var c_=s(Da);Rc=i(c_,"PegasusConfig"),c_.forEach(t),el.forEach(t),wd=l(a),me=n(a,"DIV",{class:!0});var rt=s(me);g(Qo.$$.fragment,rt),Vc=l(rt),xt=n(rt,"P",{});var ea=s(xt);Jc=i(ea,"This is the configuration class to store the configuration of a "),Cs=n(ea,"A",{href:!0});var u_=s(Cs);Xc=i(u_,"PegasusModel"),u_.forEach(t),Zc=i(ea,`. It is used to instantiate an PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PEGASUS `),Ro=n(ea,"A",{href:!0,rel:!0});var p_=s(Ro);Yc=i(p_,"google/pegasus-large"),p_.forEach(t),eu=i(ea," architecture."),ea.forEach(t),tu=l(rt),zt=n(rt,"P",{});var ta=s(zt);ou=i(ta,"Configuration objects inherit from "),js=n(ta,"A",{href:!0});var h_=s(js);nu=i(h_,"PretrainedConfig"),h_.forEach(t),su=i(ta,` and can be used to control the model outputs. Read the documentation from `),Ss=n(ta,"A",{href:!0});var m_=s(Ss);au=i(m_,"PretrainedConfig"),m_.forEach(t),ru=i(ta," for more information."),ta.forEach(t),iu=l(rt),Ua=n(rt,"P",{});var f_=s(Ua);du=i(f_,"Example:"),f_.forEach(t),lu=l(rt),g(Vo.$$.fragment,rt),rt.forEach(t),xd=l(a),Et=n(a,"H2",{class:!0});var tl=s(Et);Vt=n(tl,"A",{id:!0,class:!0,href:!0});var g_=s(Vt);Ba=n(g_,"SPAN",{});var __=s(Ba);g(Jo.$$.fragment,__),__.forEach(t),g_.forEach(t),cu=l(tl),Wa=n(tl,"SPAN",{});var k_=s(Wa);uu=i(k_,"PegasusTokenizer"),k_.forEach(t),tl.forEach(t),zd=l(a),Jt=n(a,"P",{});var ol=s(Jt);pu=i(ol,"warning: "),Ka=n(ol,"CODE",{});var v_=s(Ka);hu=i(v_,"add_tokens"),v_.forEach(t),mu=i(ol," does not work at the moment."),ol.forEach(t),Ed=l(a),H=n(a,"DIV",{class:!0});var xe=s(H);g(Xo.$$.fragment,xe),fu=l(xe),Zo=n(xe,"P",{});var nl=s(Zo);gu=i(nl,"Construct a PEGASUS tokenizer. Based on "),Yo=n(nl,"A",{href:!0,rel:!0});var b_=s(Yo);_u=i(b_,"SentencePiece"),b_.forEach(t),ku=i(nl,"."),nl.forEach(t),vu=l(xe),en=n(xe,"P",{});var sl=s(en);bu=i(sl,"This tokenizer inherits from "),Os=n(sl,"A",{href:!0});var y_=s(Os);yu=i(y_,"PreTrainedTokenizer"),y_.forEach(t),Tu=i(sl,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),sl.forEach(t),Pu=l(xe),Oe=n(xe,"DIV",{class:!0});var zo=s(Oe);g(tn.$$.fragment,zo),wu=l(zo),on=n(zo,"P",{});var al=s(on);xu=i(al,`Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating and adding special tokens. A PEGASUS sequence has the following format, where `),Ha=n(al,"CODE",{});var T_=s(Ha);zu=i(T_,"X"),T_.forEach(t),Eu=i(al," represents the sequence:"),al.forEach(t),$u=l(zo),nn=n(zo,"UL",{});var rl=s(nn);As=n(rl,"LI",{});var pg=s(As);qu=i(pg,"single sequence: "),Qa=n(pg,"CODE",{});var P_=s(Qa);Fu=i(P_,"X </s>"),P_.forEach(t),pg.forEach(t),Mu=l(rl),sn=n(rl,"LI",{});var il=s(sn);Cu=i(il,"pair of sequences: "),Ra=n(il,"CODE",{});var w_=s(Ra);ju=i(w_,"A B </s>"),w_.forEach(t),Su=i(il," (not intended use)"),il.forEach(t),rl.forEach(t),Ou=l(zo),Va=n(zo,"P",{});var x_=s(Va);Au=i(x_,`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),x_.forEach(t),zo.forEach(t),Iu=l(xe),Xt=n(xe,"DIV",{class:!0});var dl=s(Xt);g(an.$$.fragment,dl),Lu=l(dl),Ja=n(dl,"P",{});var z_=s(Ja);Nu=i(z_,"Converts a sequence of tokens (string) in a single string."),z_.forEach(t),dl.forEach(t),Gu=l(xe),Zt=n(xe,"DIV",{class:!0});var ll=s(Zt);g(rn.$$.fragment,ll),Du=l(ll),Xa=n(ll,"P",{});var E_=s(Xa);Uu=i(E_,"Get list where entries are [1] if a token is [eos] or [pad] else 0."),E_.forEach(t),ll.forEach(t),Bu=l(xe),Yt=n(xe,"DIV",{class:!0});var cl=s(Yt);g(dn.$$.fragment,cl),Wu=l(cl),Za=n(cl,"P",{});var $_=s(Za);Ku=i($_,"Just EOS"),$_.forEach(t),cl.forEach(t),xe.forEach(t),$d=l(a),$t=n(a,"H2",{class:!0});var ul=s($t);eo=n(ul,"A",{id:!0,class:!0,href:!0});var q_=s(eo);Ya=n(q_,"SPAN",{});var F_=s(Ya);g(ln.$$.fragment,F_),F_.forEach(t),q_.forEach(t),Hu=l(ul),er=n(ul,"SPAN",{});var M_=s(er);Qu=i(M_,"PegasusTokenizerFast"),M_.forEach(t),ul.forEach(t),qd=l(a),fe=n(a,"DIV",{class:!0});var it=s(fe);g(cn.$$.fragment,it),Ru=l(it),qt=n(it,"P",{});var oa=s(qt);Vu=i(oa,"Construct a \u201Cfast\u201D PEGASUS tokenizer (backed by HuggingFace\u2019s "),tr=n(oa,"EM",{});var C_=s(tr);Ju=i(C_,"tokenizers"),C_.forEach(t),Xu=i(oa," library). Based on "),un=n(oa,"A",{href:!0,rel:!0});var j_=s(un);Zu=i(j_,"Unigram"),j_.forEach(t),Yu=i(oa,"."),oa.forEach(t),ep=l(it),pn=n(it,"P",{});var pl=s(pn);tp=i(pl,"This tokenizer inherits from "),Is=n(pl,"A",{href:!0});var S_=s(Is);op=i(S_,"PreTrainedTokenizerFast"),S_.forEach(t),np=i(pl,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),pl.forEach(t),sp=l(it),Ze=n(it,"DIV",{class:!0});var na=s(Ze);g(hn.$$.fragment,na),ap=l(na),or=n(na,"P",{});var O_=s(or);rp=i(O_,"Build model inputs from a sequence by adding eos to the end. no bos token is added to the front."),O_.forEach(t),ip=l(na),mn=n(na,"UL",{});var hl=s(mn);Ls=n(hl,"LI",{});var hg=s(Ls);dp=i(hg,"single sequence: "),nr=n(hg,"CODE",{});var A_=s(nr);lp=i(A_,"X </s>"),A_.forEach(t),hg.forEach(t),cp=l(hl),fn=n(hl,"LI",{});var ml=s(fn);up=i(ml,"pair of sequences: "),sr=n(ml,"CODE",{});var I_=s(sr);pp=i(I_,"A B </s>"),I_.forEach(t),hp=i(ml," (not intended use)"),ml.forEach(t),hl.forEach(t),na.forEach(t),mp=l(it),to=n(it,"DIV",{class:!0});var fl=s(to);g(gn.$$.fragment,fl),fp=l(fl),ar=n(fl,"P",{});var L_=s(ar);gp=i(L_,"Get list where entries are [1] if a token is [eos] or [pad] else 0."),L_.forEach(t),fl.forEach(t),it.forEach(t),Fd=l(a),Ft=n(a,"H2",{class:!0});var gl=s(Ft);oo=n(gl,"A",{id:!0,class:!0,href:!0});var N_=s(oo);rr=n(N_,"SPAN",{});var G_=s(rr);g(_n.$$.fragment,G_),G_.forEach(t),N_.forEach(t),_p=l(gl),ir=n(gl,"SPAN",{});var D_=s(ir);kp=i(D_,"PegasusModel"),D_.forEach(t),gl.forEach(t),Md=l(a),Me=n(a,"DIV",{class:!0});var Eo=s(Me);g(kn.$$.fragment,Eo),vp=l(Eo),vn=n(Eo,"P",{});var _l=s(vn);bp=i(_l,`The bare PEGASUS Model outputting raw hidden-states without any specific head on top. This model inherits from `),Ns=n(_l,"A",{href:!0});var U_=s(Ns);yp=i(U_,"PreTrainedModel"),U_.forEach(t),Tp=i(_l,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_l.forEach(t),Pp=l(Eo),bn=n(Eo,"P",{});var kl=s(bn);wp=i(kl,"This model is also a PyTorch "),yn=n(kl,"A",{href:!0,rel:!0});var B_=s(yn);xp=i(B_,"torch.nn.Module"),B_.forEach(t),zp=i(kl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kl.forEach(t),Ep=l(Eo),Te=n(Eo,"DIV",{class:!0});var dt=s(Te);g(Tn.$$.fragment,dt),$p=l(dt),Mt=n(dt,"P",{});var sa=s(Mt);qp=i(sa,"The "),Gs=n(sa,"A",{href:!0});var W_=s(Gs);Fp=i(W_,"PegasusModel"),W_.forEach(t),Mp=i(sa," forward method, overrides the "),dr=n(sa,"CODE",{});var K_=s(dr);Cp=i(K_,"__call__"),K_.forEach(t),jp=i(sa," special method."),sa.forEach(t),Sp=l(dt),g(no.$$.fragment,dt),Op=l(dt),lr=n(dt,"P",{});var H_=s(lr);Ap=i(H_,"Example:"),H_.forEach(t),Ip=l(dt),g(Pn.$$.fragment,dt),dt.forEach(t),Eo.forEach(t),Cd=l(a),Ct=n(a,"H2",{class:!0});var vl=s(Ct);so=n(vl,"A",{id:!0,class:!0,href:!0});var Q_=s(so);cr=n(Q_,"SPAN",{});var R_=s(cr);g(wn.$$.fragment,R_),R_.forEach(t),Q_.forEach(t),Lp=l(vl),ur=n(vl,"SPAN",{});var V_=s(ur);Np=i(V_,"PegasusForConditionalGeneration"),V_.forEach(t),vl.forEach(t),jd=l(a),Ce=n(a,"DIV",{class:!0});var $o=s(Ce);g(xn.$$.fragment,$o),Gp=l($o),zn=n($o,"P",{});var bl=s(zn);Dp=i(bl,`The PEGASUS Model with a language modeling head. Can be used for summarization. This model inherits from `),Ds=n(bl,"A",{href:!0});var J_=s(Ds);Up=i(J_,"PreTrainedModel"),J_.forEach(t),Bp=i(bl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bl.forEach(t),Wp=l($o),En=n($o,"P",{});var yl=s(En);Kp=i(yl,"This model is also a PyTorch "),$n=n(yl,"A",{href:!0,rel:!0});var X_=s($n);Hp=i(X_,"torch.nn.Module"),X_.forEach(t),Qp=i(yl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yl.forEach(t),Rp=l($o),G=n($o,"DIV",{class:!0});var ae=s(G);g(qn.$$.fragment,ae),Vp=l(ae),jt=n(ae,"P",{});var aa=s(jt);Jp=i(aa,"The "),Us=n(aa,"A",{href:!0});var Z_=s(Us);Xp=i(Z_,"PegasusForConditionalGeneration"),Z_.forEach(t),Zp=i(aa," forward method, overrides the "),pr=n(aa,"CODE",{});var Y_=s(pr);Yp=i(Y_,"__call__"),Y_.forEach(t),eh=i(aa," special method."),aa.forEach(t),th=l(ae),g(ao.$$.fragment,ae),oh=l(ae),hr=n(ae,"P",{});var ek=s(hr);nh=i(ek,"Summarization example::"),ek.forEach(t),sh=l(ae),mr=n(ae,"BLOCKQUOTE",{});var tk=s(mr);fr=n(tk,"BLOCKQUOTE",{});var ok=s(fr);gr=n(ok,"BLOCKQUOTE",{});var nk=s(gr);_r=n(nk,"P",{});var sk=s(_r);ah=i(sk,"from transformers import PegasusTokenizer, PegasusForConditionalGeneration"),sk.forEach(t),nk.forEach(t),ok.forEach(t),tk.forEach(t),rh=l(ae),kr=n(ae,"BLOCKQUOTE",{});var ak=s(kr);vr=n(ak,"BLOCKQUOTE",{});var rk=s(vr);br=n(rk,"BLOCKQUOTE",{});var ik=s(br);yr=n(ik,"P",{});var dk=s(yr);ih=i(dk,`model = PegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-xsum\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-xsum\u2019)`),dk.forEach(t),ik.forEach(t),rk.forEach(t),ak.forEach(t),dh=l(ae),Tr=n(ae,"BLOCKQUOTE",{});var lk=s(Tr);Pr=n(lk,"BLOCKQUOTE",{});var ck=s(Pr);wr=n(ck,"BLOCKQUOTE",{});var uk=s(wr);xr=n(uk,"P",{});var pk=s(xr);lh=i(pk,`ARTICLE_TO_SUMMARIZE = ( \u2026 \u201CPG&E stated it scheduled the blackouts in response to forecasts for high winds \u201D \u2026 \u201Camid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were \u201D \u2026 \u201Cscheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\u201D \u2026 ) inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018pt\u2019)`),pk.forEach(t),uk.forEach(t),ck.forEach(t),lk.forEach(t),ch=l(ae),zr=n(ae,"BLOCKQUOTE",{});var hk=s(zr);Er=n(hk,"BLOCKQUOTE",{});var mk=s(Er);Fn=n(mk,"BLOCKQUOTE",{});var Tl=s(Fn);ro=n(Tl,"H1",{class:!0});var Pl=s(ro);io=n(Pl,"A",{id:!0,class:!0,href:!0});var fk=s(io);$r=n(fk,"SPAN",{});var gk=s($r);g(Mn.$$.fragment,gk),gk.forEach(t),fk.forEach(t),uh=l(Pl),qr=n(Pl,"SPAN",{});var _k=s(qr);ph=i(_k,"Generate Summary"),_k.forEach(t),Pl.forEach(t),hh=l(Tl),Fr=n(Tl,"P",{});var kk=s(Fr);mh=i(kk,`summary_ids = model.generate(inputs[\u2018input_ids\u2019]) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),kk.forEach(t),Tl.forEach(t),mk.forEach(t),hk.forEach(t),ae.forEach(t),$o.forEach(t),Sd=l(a),St=n(a,"H2",{class:!0});var wl=s(St);lo=n(wl,"A",{id:!0,class:!0,href:!0});var vk=s(lo);Mr=n(vk,"SPAN",{});var bk=s(Mr);g(Cn.$$.fragment,bk),bk.forEach(t),vk.forEach(t),fh=l(wl),Cr=n(wl,"SPAN",{});var yk=s(Cr);gh=i(yk,"PegasusForCausalLM"),yk.forEach(t),wl.forEach(t),Od=l(a),jn=n(a,"DIV",{class:!0});var Tk=s(jn);Ye=n(Tk,"DIV",{class:!0});var ra=s(Ye);g(Sn.$$.fragment,ra),_h=l(ra),jr=n(ra,"P",{});var Pk=s(jr);kh=i(Pk,"Example:"),Pk.forEach(t),vh=l(ra),g(On.$$.fragment,ra),ra.forEach(t),Tk.forEach(t),Ad=l(a),Ot=n(a,"H2",{class:!0});var xl=s(Ot);co=n(xl,"A",{id:!0,class:!0,href:!0});var wk=s(co);Sr=n(wk,"SPAN",{});var xk=s(Sr);g(An.$$.fragment,xk),xk.forEach(t),wk.forEach(t),bh=l(xl),Or=n(xl,"SPAN",{});var zk=s(Or);yh=i(zk,"TFPegasusModel"),zk.forEach(t),xl.forEach(t),Id=l(a),ge=n(a,"DIV",{class:!0});var lt=s(ge);g(In.$$.fragment,lt),Th=l(lt),Ln=n(lt,"P",{});var zl=s(Ln);Ph=i(zl,`The bare PEGASUS Model outputting raw hidden-states without any specific head on top. This model inherits from `),Bs=n(zl,"A",{href:!0});var Ek=s(Bs);wh=i(Ek,"TFPreTrainedModel"),Ek.forEach(t),xh=i(zl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zl.forEach(t),zh=l(lt),Nn=n(lt,"P",{});var El=s(Nn);Eh=i(El,"This model is also a "),Gn=n(El,"A",{href:!0,rel:!0});var $k=s(Gn);$h=i($k,"tf.keras.Model"),$k.forEach(t),qh=i(El,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),El.forEach(t),Fh=l(lt),g(uo.$$.fragment,lt),Mh=l(lt),Pe=n(lt,"DIV",{class:!0});var ct=s(Pe);g(Dn.$$.fragment,ct),Ch=l(ct),At=n(ct,"P",{});var ia=s(At);jh=i(ia,"The "),Ws=n(ia,"A",{href:!0});var qk=s(Ws);Sh=i(qk,"TFPegasusModel"),qk.forEach(t),Oh=i(ia," forward method, overrides the "),Ar=n(ia,"CODE",{});var Fk=s(Ar);Ah=i(Fk,"__call__"),Fk.forEach(t),Ih=i(ia," special method."),ia.forEach(t),Lh=l(ct),g(po.$$.fragment,ct),Nh=l(ct),Ir=n(ct,"P",{});var Mk=s(Ir);Gh=i(Mk,"Example:"),Mk.forEach(t),Dh=l(ct),g(Un.$$.fragment,ct),ct.forEach(t),lt.forEach(t),Ld=l(a),It=n(a,"H2",{class:!0});var $l=s(It);ho=n($l,"A",{id:!0,class:!0,href:!0});var Ck=s(ho);Lr=n(Ck,"SPAN",{});var jk=s(Lr);g(Bn.$$.fragment,jk),jk.forEach(t),Ck.forEach(t),Uh=l($l),Nr=n($l,"SPAN",{});var Sk=s(Nr);Bh=i(Sk,"TFPegasusForConditionalGeneration"),Sk.forEach(t),$l.forEach(t),Nd=l(a),_e=n(a,"DIV",{class:!0});var ut=s(_e);g(Wn.$$.fragment,ut),Wh=l(ut),Kn=n(ut,"P",{});var ql=s(Kn);Kh=i(ql,`The PEGASUS Model with a language modeling head. Can be used for summarization. This model inherits from `),Ks=n(ql,"A",{href:!0});var Ok=s(Ks);Hh=i(Ok,"TFPreTrainedModel"),Ok.forEach(t),Qh=i(ql,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ql.forEach(t),Rh=l(ut),Hn=n(ut,"P",{});var Fl=s(Hn);Vh=i(Fl,"This model is also a "),Qn=n(Fl,"A",{href:!0,rel:!0});var Ak=s(Qn);Jh=i(Ak,"tf.keras.Model"),Ak.forEach(t),Xh=i(Fl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Fl.forEach(t),Zh=l(ut),g(mo.$$.fragment,ut),Yh=l(ut),D=n(ut,"DIV",{class:!0});var re=s(D);g(Rn.$$.fragment,re),em=l(re),Lt=n(re,"P",{});var da=s(Lt);tm=i(da,"The "),Hs=n(da,"A",{href:!0});var Ik=s(Hs);om=i(Ik,"TFPegasusForConditionalGeneration"),Ik.forEach(t),nm=i(da," forward method, overrides the "),Gr=n(da,"CODE",{});var Lk=s(Gr);sm=i(Lk,"__call__"),Lk.forEach(t),am=i(da," special method."),da.forEach(t),rm=l(re),g(fo.$$.fragment,re),im=l(re),Dr=n(re,"P",{});var Nk=s(Dr);dm=i(Nk,"Summarization example::"),Nk.forEach(t),lm=l(re),Ur=n(re,"BLOCKQUOTE",{});var Gk=s(Ur);Br=n(Gk,"BLOCKQUOTE",{});var Dk=s(Br);Wr=n(Dk,"BLOCKQUOTE",{});var Uk=s(Wr);Kr=n(Uk,"P",{});var Bk=s(Kr);cm=i(Bk,"from transformers import PegasusTokenizer, TFPegasusForConditionalGeneration"),Bk.forEach(t),Uk.forEach(t),Dk.forEach(t),Gk.forEach(t),um=l(re),Hr=n(re,"BLOCKQUOTE",{});var Wk=s(Hr);Qr=n(Wk,"BLOCKQUOTE",{});var Kk=s(Qr);Rr=n(Kk,"BLOCKQUOTE",{});var Hk=s(Rr);Vr=n(Hk,"P",{});var Qk=s(Vr);pm=i(Qk,`model = TFPegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-xsum\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-xsum\u2019)`),Qk.forEach(t),Hk.forEach(t),Kk.forEach(t),Wk.forEach(t),hm=l(re),Jr=n(re,"BLOCKQUOTE",{});var Rk=s(Jr);Xr=n(Rk,"BLOCKQUOTE",{});var Vk=s(Xr);Zr=n(Vk,"BLOCKQUOTE",{});var Jk=s(Zr);Yr=n(Jk,"P",{});var Xk=s(Yr);mm=i(Xk,`ARTICLE_TO_SUMMARIZE = ( \u2026 \u201CPG&E stated it scheduled the blackouts in response to forecasts for high winds \u201D \u2026 \u201Camid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were \u201D \u2026 \u201Cscheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\u201D \u2026 ) inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018tf\u2019)`),Xk.forEach(t),Jk.forEach(t),Vk.forEach(t),Rk.forEach(t),fm=l(re),ei=n(re,"BLOCKQUOTE",{});var Zk=s(ei);ti=n(Zk,"BLOCKQUOTE",{});var Yk=s(ti);Vn=n(Yk,"BLOCKQUOTE",{});var Ml=s(Vn);go=n(Ml,"H1",{class:!0});var Cl=s(go);_o=n(Cl,"A",{id:!0,class:!0,href:!0});var ev=s(_o);oi=n(ev,"SPAN",{});var tv=s(oi);g(Jn.$$.fragment,tv),tv.forEach(t),ev.forEach(t),gm=l(Cl),ni=n(Cl,"SPAN",{});var ov=s(ni);_m=i(ov,"Generate Summary"),ov.forEach(t),Cl.forEach(t),km=l(Ml),si=n(Ml,"P",{});var nv=s(si);vm=i(nv,`summary_ids = model.generate(inputs[\u2018input_ids\u2019]) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),nv.forEach(t),Ml.forEach(t),Yk.forEach(t),Zk.forEach(t),re.forEach(t),ut.forEach(t),Gd=l(a),Nt=n(a,"H2",{class:!0});var jl=s(Nt);ko=n(jl,"A",{id:!0,class:!0,href:!0});var sv=s(ko);ai=n(sv,"SPAN",{});var av=s(ai);g(Xn.$$.fragment,av),av.forEach(t),sv.forEach(t),bm=l(jl),ri=n(jl,"SPAN",{});var rv=s(ri);ym=i(rv,"FlaxPegasusModel"),rv.forEach(t),jl.forEach(t),Dd=l(a),L=n(a,"DIV",{class:!0});var ie=s(L);g(Zn.$$.fragment,ie),Tm=l(ie),Yn=n(ie,"P",{});var Sl=s(Yn);Pm=i(Sl,`The bare Pegasus Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Qs=n(Sl,"A",{href:!0});var iv=s(Qs);wm=i(iv,"FlaxPreTrainedModel"),iv.forEach(t),xm=i(Sl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sl.forEach(t),zm=l(ie),es=n(ie,"P",{});var Ol=s(es);Em=i(Ol,"This model is also a Flax Linen "),ts=n(Ol,"A",{href:!0,rel:!0});var dv=s(ts);$m=i(dv,"flax.nn.Module"),dv.forEach(t),qm=i(Ol,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ol.forEach(t),Fm=l(ie),ii=n(ie,"P",{});var lv=s(ii);Mm=i(lv,"Finally, this model supports inherent JAX features such as:"),lv.forEach(t),Cm=l(ie),Qe=n(ie,"UL",{});var qo=s(Qe);di=n(qo,"LI",{});var cv=s(di);os=n(cv,"A",{href:!0,rel:!0});var uv=s(os);jm=i(uv,"Just-In-Time (JIT) compilation"),uv.forEach(t),cv.forEach(t),Sm=l(qo),li=n(qo,"LI",{});var pv=s(li);ns=n(pv,"A",{href:!0,rel:!0});var hv=s(ns);Om=i(hv,"Automatic Differentiation"),hv.forEach(t),pv.forEach(t),Am=l(qo),ci=n(qo,"LI",{});var mv=s(ci);ss=n(mv,"A",{href:!0,rel:!0});var fv=s(ss);Im=i(fv,"Vectorization"),fv.forEach(t),mv.forEach(t),Lm=l(qo),ui=n(qo,"LI",{});var gv=s(ui);as=n(gv,"A",{href:!0,rel:!0});var _v=s(as);Nm=i(_v,"Parallelization"),_v.forEach(t),gv.forEach(t),qo.forEach(t),Gm=l(ie),we=n(ie,"DIV",{class:!0});var pt=s(we);g(rs.$$.fragment,pt),Dm=l(pt),Gt=n(pt,"P",{});var la=s(Gt);Um=i(la,"The "),pi=n(la,"CODE",{});var kv=s(pi);Bm=i(kv,"FlaxPegasusPreTrainedModel"),kv.forEach(t),Wm=i(la," forward method, overrides the "),hi=n(la,"CODE",{});var vv=s(hi);Km=i(vv,"__call__"),vv.forEach(t),Hm=i(la," special method."),la.forEach(t),Qm=l(pt),g(vo.$$.fragment,pt),Rm=l(pt),mi=n(pt,"P",{});var bv=s(mi);Vm=i(bv,"Example:"),bv.forEach(t),Jm=l(pt),g(is.$$.fragment,pt),pt.forEach(t),Xm=l(ie),et=n(ie,"DIV",{class:!0});var ca=s(et);g(ds.$$.fragment,ca),Zm=l(ca),fi=n(ca,"P",{});var yv=s(fi);Ym=i(yv,"Example:"),yv.forEach(t),ef=l(ca),g(ls.$$.fragment,ca),ca.forEach(t),tf=l(ie),tt=n(ie,"DIV",{class:!0});var ua=s(tt);g(cs.$$.fragment,ua),of=l(ua),gi=n(ua,"P",{});var Tv=s(gi);nf=i(Tv,"Example:"),Tv.forEach(t),sf=l(ua),g(us.$$.fragment,ua),ua.forEach(t),ie.forEach(t),Ud=l(a),Dt=n(a,"H2",{class:!0});var Al=s(Dt);bo=n(Al,"A",{id:!0,class:!0,href:!0});var Pv=s(bo);_i=n(Pv,"SPAN",{});var wv=s(_i);g(ps.$$.fragment,wv),wv.forEach(t),Pv.forEach(t),af=l(Al),ki=n(Al,"SPAN",{});var xv=s(ki);rf=i(xv,"FlaxPegasusForConditionalGeneration"),xv.forEach(t),Al.forEach(t),Bd=l(a),N=n(a,"DIV",{class:!0});var de=s(N);g(hs.$$.fragment,de),df=l(de),ms=n(de,"P",{});var Il=s(ms);lf=i(Il,`The PEGASUS Model with a language modeling head. Can be used for summarization. This model inherits from `),Rs=n(Il,"A",{href:!0});var zv=s(Rs);cf=i(zv,"FlaxPreTrainedModel"),zv.forEach(t),uf=i(Il,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Il.forEach(t),pf=l(de),fs=n(de,"P",{});var Ll=s(fs);hf=i(Ll,"This model is also a Flax Linen "),gs=n(Ll,"A",{href:!0,rel:!0});var Ev=s(gs);mf=i(Ev,"flax.nn.Module"),Ev.forEach(t),ff=i(Ll,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ll.forEach(t),gf=l(de),vi=n(de,"P",{});var $v=s(vi);_f=i($v,"Finally, this model supports inherent JAX features such as:"),$v.forEach(t),kf=l(de),Re=n(de,"UL",{});var Fo=s(Re);bi=n(Fo,"LI",{});var qv=s(bi);_s=n(qv,"A",{href:!0,rel:!0});var Fv=s(_s);vf=i(Fv,"Just-In-Time (JIT) compilation"),Fv.forEach(t),qv.forEach(t),bf=l(Fo),yi=n(Fo,"LI",{});var Mv=s(yi);ks=n(Mv,"A",{href:!0,rel:!0});var Cv=s(ks);yf=i(Cv,"Automatic Differentiation"),Cv.forEach(t),Mv.forEach(t),Tf=l(Fo),Ti=n(Fo,"LI",{});var jv=s(Ti);vs=n(jv,"A",{href:!0,rel:!0});var Sv=s(vs);Pf=i(Sv,"Vectorization"),Sv.forEach(t),jv.forEach(t),wf=l(Fo),Pi=n(Fo,"LI",{});var Ov=s(Pi);bs=n(Ov,"A",{href:!0,rel:!0});var Av=s(bs);xf=i(Av,"Parallelization"),Av.forEach(t),Ov.forEach(t),Fo.forEach(t),zf=l(de),F=n(de,"DIV",{class:!0});var M=s(F);g(ys.$$.fragment,M),Ef=l(M),Ut=n(M,"P",{});var pa=s(Ut);$f=i(pa,"The "),wi=n(pa,"CODE",{});var Iv=s(wi);qf=i(Iv,"FlaxPegasusPreTrainedModel"),Iv.forEach(t),Ff=i(pa," forward method, overrides the "),xi=n(pa,"CODE",{});var Lv=s(xi);Mf=i(Lv,"__call__"),Lv.forEach(t),Cf=i(pa," special method."),pa.forEach(t),jf=l(M),g(yo.$$.fragment,M),Sf=l(M),zi=n(M,"P",{});var Nv=s(zi);Of=i(Nv,"Summarization example::"),Nv.forEach(t),Af=l(M),Ei=n(M,"BLOCKQUOTE",{});var Gv=s(Ei);$i=n(Gv,"BLOCKQUOTE",{});var Dv=s($i);qi=n(Dv,"BLOCKQUOTE",{});var Uv=s(qi);Fi=n(Uv,"P",{});var Bv=s(Fi);If=i(Bv,"from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration"),Bv.forEach(t),Uv.forEach(t),Dv.forEach(t),Gv.forEach(t),Lf=l(M),Mi=n(M,"BLOCKQUOTE",{});var Wv=s(Mi);Ci=n(Wv,"BLOCKQUOTE",{});var Kv=s(Ci);ji=n(Kv,"BLOCKQUOTE",{});var Hv=s(ji);Si=n(Hv,"P",{});var Qv=s(Si);Nf=i(Qv,`model = FlaxPegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-large\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-large\u2019)`),Qv.forEach(t),Hv.forEach(t),Kv.forEach(t),Wv.forEach(t),Gf=l(M),Oi=n(M,"BLOCKQUOTE",{});var Rv=s(Oi);Ai=n(Rv,"BLOCKQUOTE",{});var Vv=s(Ai);Ii=n(Vv,"BLOCKQUOTE",{});var Jv=s(Ii);Li=n(Jv,"P",{});var Xv=s(Li);Df=i(Xv,`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018np\u2019)`),Xv.forEach(t),Jv.forEach(t),Vv.forEach(t),Rv.forEach(t),Uf=l(M),Ni=n(M,"BLOCKQUOTE",{});var Zv=s(Ni);Gi=n(Zv,"BLOCKQUOTE",{});var Yv=s(Gi);Ts=n(Yv,"BLOCKQUOTE",{});var Nl=s(Ts);To=n(Nl,"H1",{class:!0});var Gl=s(To);Po=n(Gl,"A",{id:!0,class:!0,href:!0});var eb=s(Po);Di=n(eb,"SPAN",{});var tb=s(Di);g(Ps.$$.fragment,tb),tb.forEach(t),eb.forEach(t),Bf=l(Gl),Ui=n(Gl,"SPAN",{});var ob=s(Ui);Wf=i(ob,"Generate Summary"),ob.forEach(t),Gl.forEach(t),Kf=l(Nl),Bi=n(Nl,"P",{});var nb=s(Bi);Hf=i(nb,`summary_ids = model.generate(inputs[\u2018input_ids\u2019]).sequences print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))`),nb.forEach(t),Nl.forEach(t),Yv.forEach(t),Zv.forEach(t),Qf=l(M),Wi=n(M,"P",{});var sb=s(Wi);Rf=i(sb,"Mask filling example::"),sb.forEach(t),Vf=l(M),Ki=n(M,"BLOCKQUOTE",{});var ab=s(Ki);Hi=n(ab,"BLOCKQUOTE",{});var rb=s(Hi);Qi=n(rb,"BLOCKQUOTE",{});var ib=s(Qi);Ri=n(ib,"P",{});var db=s(Ri);Jf=i(db,`from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration tokenizer = PegasusTokenizer.from_pretrained(\u2018google/pegasus-large\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),db.forEach(t),ib.forEach(t),rb.forEach(t),ab.forEach(t),Xf=l(M),Vi=n(M,"BLOCKQUOTE",{});var lb=s(Vi);Ji=n(lb,"BLOCKQUOTE",{});var cb=s(Ji);Xi=n(cb,"BLOCKQUOTE",{});var ub=s(Xi);Zi=n(ub,"P",{});var pb=s(Zi);Zf=i(pb,`model = FlaxPegasusForConditionalGeneration.from_pretrained(\u2018google/pegasus-large\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018np\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),pb.forEach(t),ub.forEach(t),cb.forEach(t),lb.forEach(t),Yf=l(M),Yi=n(M,"BLOCKQUOTE",{});var hb=s(Yi);ed=n(hb,"BLOCKQUOTE",{});var mb=s(ed);td=n(mb,"BLOCKQUOTE",{});var fb=s(td);od=n(fb,"P",{});var gb=s(od);eg=i(gb,`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = jax.nn.softmax(logits[0, masked_index], axis=0) values, predictions = jax.lax.top_k(probs)`),gb.forEach(t),fb.forEach(t),mb.forEach(t),hb.forEach(t),tg=l(M),nd=n(M,"BLOCKQUOTE",{});var _b=s(nd);sd=n(_b,"BLOCKQUOTE",{});var kb=s(sd);ad=n(kb,"BLOCKQUOTE",{});var vb=s(ad);rd=n(vb,"P",{});var bb=s(rd);og=i(bb,"tokenizer.decode(predictions).split()"),bb.forEach(t),vb.forEach(t),kb.forEach(t),_b.forEach(t),M.forEach(t),ng=l(de),ot=n(de,"DIV",{class:!0});var ha=s(ot);g(ws.$$.fragment,ha),sg=l(ha),id=n(ha,"P",{});var yb=s(id);ag=i(yb,"Example:"),yb.forEach(t),rg=l(ha),g(xs.$$.fragment,ha),ha.forEach(t),ig=l(de),nt=n(de,"DIV",{class:!0});var ma=s(nt);g(zs.$$.fragment,ma),dg=l(ma),dd=n(ma,"P",{});var Tb=s(dd);lg=i(Tb,"Example:"),Tb.forEach(t),cg=l(ma),g(Es.$$.fragment,ma),ma.forEach(t),de.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(Ab)),c(T,"id","pegasus"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#pegasus"),c(y,"class","relative group"),c(ce,"href","https://github.com/huggingface/transformers/issues/new?assignees=sshleifer&labels=&template=bug-report.md&title"),c(ce,"rel","nofollow"),c(O,"id","overview"),c(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(O,"href","#overview"),c(B,"class","relative group"),c(W,"href","https://arxiv.org/pdf/1912.08777.pdf"),c(W,"rel","nofollow"),c($,"href","https://huggingface.co/sshleifer"),c($,"rel","nofollow"),c(qe,"href","https://github.com/google-research/pegasus"),c(qe,"rel","nofollow"),c(I,"id","checkpoints"),c(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(I,"href","#checkpoints"),c(he,"class","relative group"),c(Co,"href","https://huggingface.co/models?search=pegasus"),c(Co,"rel","nofollow"),c(So,"href","https://github.com/huggingface/transformers/issues/6844#issue-689259666"),c(So,"rel","nofollow"),c(Oo,"href","https://huggingface.co/models?search=distill-pegasus"),c(Oo,"rel","nofollow"),c(Ao,"href","https://arxiv.org/abs/2010.13002"),c(Ao,"rel","nofollow"),c(Wt,"id","examples"),c(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wt,"href","#examples"),c(vt,"class","relative group"),c(Lo,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh"),c(Lo,"rel","nofollow"),c(No,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization/README.md"),c(No,"rel","nofollow"),c(Ht,"id","implementation-notes"),c(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ht,"href","#implementation-notes"),c(bt,"class","relative group"),c(Ms,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(Bo,"href","https://github.com/google-research/pegasus"),c(Bo,"rel","nofollow"),c(Qt,"id","usage-example"),c(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qt,"href","#usage-example"),c(Pt,"class","relative group"),c(Rt,"id","transformers.PegasusConfig"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.PegasusConfig"),c(wt,"class","relative group"),c(Cs,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusModel"),c(Ro,"href","https://huggingface.co/google/pegasus-large"),c(Ro,"rel","nofollow"),c(js,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Ss,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(me,"class","docstring"),c(Vt,"id","transformers.PegasusTokenizer"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.PegasusTokenizer"),c(Et,"class","relative group"),c(Yo,"href","https://github.com/google/sentencepiece"),c(Yo,"rel","nofollow"),c(Os,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Oe,"class","docstring"),c(Xt,"class","docstring"),c(Zt,"class","docstring"),c(Yt,"class","docstring"),c(H,"class","docstring"),c(eo,"id","transformers.PegasusTokenizerFast"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.PegasusTokenizerFast"),c($t,"class","relative group"),c(un,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),c(un,"rel","nofollow"),c(Is,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Ze,"class","docstring"),c(to,"class","docstring"),c(fe,"class","docstring"),c(oo,"id","transformers.PegasusModel"),c(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(oo,"href","#transformers.PegasusModel"),c(Ft,"class","relative group"),c(Ns,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(yn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(yn,"rel","nofollow"),c(Gs,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusModel"),c(Te,"class","docstring"),c(Me,"class","docstring"),c(so,"id","transformers.PegasusForConditionalGeneration"),c(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(so,"href","#transformers.PegasusForConditionalGeneration"),c(Ct,"class","relative group"),c(Ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c($n,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c($n,"rel","nofollow"),c(Us,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusForConditionalGeneration"),c(io,"id","generate-summary"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#generate-summary"),c(ro,"class","relative group"),c(G,"class","docstring"),c(Ce,"class","docstring"),c(lo,"id","transformers.PegasusForCausalLM"),c(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(lo,"href","#transformers.PegasusForCausalLM"),c(St,"class","relative group"),c(Ye,"class","docstring"),c(jn,"class","docstring"),c(co,"id","transformers.TFPegasusModel"),c(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(co,"href","#transformers.TFPegasusModel"),c(Ot,"class","relative group"),c(Bs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Gn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Gn,"rel","nofollow"),c(Ws,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusModel"),c(Pe,"class","docstring"),c(ge,"class","docstring"),c(ho,"id","transformers.TFPegasusForConditionalGeneration"),c(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ho,"href","#transformers.TFPegasusForConditionalGeneration"),c(It,"class","relative group"),c(Ks,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Qn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Qn,"rel","nofollow"),c(Hs,"href","/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.TFPegasusForConditionalGeneration"),c(_o,"id","generate-summary"),c(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_o,"href","#generate-summary"),c(go,"class","relative group"),c(D,"class","docstring"),c(_e,"class","docstring"),c(ko,"id","transformers.FlaxPegasusModel"),c(ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ko,"href","#transformers.FlaxPegasusModel"),c(Nt,"class","relative group"),c(Qs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ts,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(ts,"rel","nofollow"),c(os,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(os,"rel","nofollow"),c(ns,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(ns,"rel","nofollow"),c(ss,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(ss,"rel","nofollow"),c(as,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(as,"rel","nofollow"),c(we,"class","docstring"),c(et,"class","docstring"),c(tt,"class","docstring"),c(L,"class","docstring"),c(bo,"id","transformers.FlaxPegasusForConditionalGeneration"),c(bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bo,"href","#transformers.FlaxPegasusForConditionalGeneration"),c(Dt,"class","relative group"),c(Rs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(gs,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(gs,"rel","nofollow"),c(_s,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(_s,"rel","nofollow"),c(ks,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(ks,"rel","nofollow"),c(vs,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(vs,"rel","nofollow"),c(bs,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(bs,"rel","nofollow"),c(Po,"id","generate-summary"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#generate-summary"),c(To,"class","relative group"),c(F,"class","docstring"),c(ot,"class","docstring"),c(nt,"class","docstring"),c(N,"class","docstring")},m(a,p){e(document.head,h),u(a,z,p),u(a,y,p),e(y,T),e(T,x),_(w,x,null),e(y,P),e(y,E),e(E,Ae),u(a,ke,p),u(a,q,p),e(q,ze),e(ze,le),e(q,Ie),e(q,ce),e(ce,ue),e(q,Le),u(a,je,p),u(a,B,p),e(B,O),e(O,Ee),_(J,Ee,null),e(B,j),e(B,S),e(S,Ne),u(a,te,p),u(a,oe,p),e(oe,Ge),e(oe,W),e(W,De),e(oe,Ue),u(a,A,p),u(a,ve,p),e(ve,pe),u(a,Se,p),u(a,ne,p),e(ne,X),e(X,Be),e(ne,We),e(ne,K),e(K,Ke),u(a,se,p),u(a,R,p),e(R,m),e(R,$),e($,$e),e(R,mt),e(R,qe),e(qe,Z),e(R,ft),u(a,Ve,p),u(a,he,p),e(he,I),e(I,Y),_(Fe,Y,null),e(he,gt),e(he,ee),e(ee,_t),u(a,Je,p),u(a,V,p),e(V,kt),e(V,Co),e(Co,Dl),e(V,Ul),e(V,fa),e(fa,Bl),e(V,Wl),u(a,gd,p),u(a,be,p),e(be,ga),e(ga,Kl),e(be,Hl),e(be,_a),e(_a,Ql),e(be,Rl),e(be,ka),e(ka,Vl),e(be,Jl),e(be,jo),e(jo,Xl),e(jo,So),e(So,Zl),e(jo,Yl),e(be,ec),e(be,Bt),e(Bt,Oo),e(Oo,tc),e(Bt,oc),e(Bt,Ao),e(Ao,nc),e(Bt,sc),u(a,_d,p),u(a,vt,p),e(vt,Wt),e(Wt,va),_(Io,va,null),e(vt,ac),e(vt,ba),e(ba,rc),u(a,kd,p),u(a,Xe,p),e(Xe,Kt),e(Kt,Lo),e(Lo,ic),e(Kt,dc),e(Kt,No),e(No,lc),e(Kt,cc),e(Xe,uc),e(Xe,ya),e(ya,pc),e(Xe,hc),e(Xe,Ta),e(Ta,mc),u(a,vd,p),u(a,bt,p),e(bt,Ht),e(Ht,Pa),_(Go,Pa,null),e(bt,fc),e(bt,wa),e(wa,gc),u(a,bd,p),u(a,ye,p),e(ye,xa),e(xa,za),e(za,_c),e(ye,kc),e(ye,Ea),e(Ea,Fs),e(Fs,vc),e(Fs,Ms),e(Ms,bc),e(ye,yc),e(ye,Do),e(Do,$a),e($a,Tc),e(Do,Pc),e(Do,yt),e(yt,qa),e(qa,wc),e(yt,xc),e(yt,Fa),e(Fa,zc),e(yt,Ec),e(yt,Uo),e(Uo,$c),e(Uo,Ma),e(Ma,qc),e(Uo,Fc),e(ye,Mc),e(ye,Ca),e(Ca,He),e(He,Cc),e(He,ja),e(ja,jc),e(He,Sc),e(He,Sa),e(Sa,Oc),e(He,Ac),e(He,Oa),e(Oa,Ic),e(He,Lc),e(ye,Nc),e(ye,Aa),e(Aa,Tt),e(Tt,Gc),e(Tt,Bo),e(Bo,Dc),e(Tt,Uc),e(Tt,Ia),e(Ia,Bc),e(Tt,Wc),u(a,yd,p),u(a,Pt,p),e(Pt,Qt),e(Qt,La),_(Wo,La,null),e(Pt,Kc),e(Pt,Na),e(Na,Hc),u(a,Td,p),_(Ko,a,p),u(a,Pd,p),u(a,wt,p),e(wt,Rt),e(Rt,Ga),_(Ho,Ga,null),e(wt,Qc),e(wt,Da),e(Da,Rc),u(a,wd,p),u(a,me,p),_(Qo,me,null),e(me,Vc),e(me,xt),e(xt,Jc),e(xt,Cs),e(Cs,Xc),e(xt,Zc),e(xt,Ro),e(Ro,Yc),e(xt,eu),e(me,tu),e(me,zt),e(zt,ou),e(zt,js),e(js,nu),e(zt,su),e(zt,Ss),e(Ss,au),e(zt,ru),e(me,iu),e(me,Ua),e(Ua,du),e(me,lu),_(Vo,me,null),u(a,xd,p),u(a,Et,p),e(Et,Vt),e(Vt,Ba),_(Jo,Ba,null),e(Et,cu),e(Et,Wa),e(Wa,uu),u(a,zd,p),u(a,Jt,p),e(Jt,pu),e(Jt,Ka),e(Ka,hu),e(Jt,mu),u(a,Ed,p),u(a,H,p),_(Xo,H,null),e(H,fu),e(H,Zo),e(Zo,gu),e(Zo,Yo),e(Yo,_u),e(Zo,ku),e(H,vu),e(H,en),e(en,bu),e(en,Os),e(Os,yu),e(en,Tu),e(H,Pu),e(H,Oe),_(tn,Oe,null),e(Oe,wu),e(Oe,on),e(on,xu),e(on,Ha),e(Ha,zu),e(on,Eu),e(Oe,$u),e(Oe,nn),e(nn,As),e(As,qu),e(As,Qa),e(Qa,Fu),e(nn,Mu),e(nn,sn),e(sn,Cu),e(sn,Ra),e(Ra,ju),e(sn,Su),e(Oe,Ou),e(Oe,Va),e(Va,Au),e(H,Iu),e(H,Xt),_(an,Xt,null),e(Xt,Lu),e(Xt,Ja),e(Ja,Nu),e(H,Gu),e(H,Zt),_(rn,Zt,null),e(Zt,Du),e(Zt,Xa),e(Xa,Uu),e(H,Bu),e(H,Yt),_(dn,Yt,null),e(Yt,Wu),e(Yt,Za),e(Za,Ku),u(a,$d,p),u(a,$t,p),e($t,eo),e(eo,Ya),_(ln,Ya,null),e($t,Hu),e($t,er),e(er,Qu),u(a,qd,p),u(a,fe,p),_(cn,fe,null),e(fe,Ru),e(fe,qt),e(qt,Vu),e(qt,tr),e(tr,Ju),e(qt,Xu),e(qt,un),e(un,Zu),e(qt,Yu),e(fe,ep),e(fe,pn),e(pn,tp),e(pn,Is),e(Is,op),e(pn,np),e(fe,sp),e(fe,Ze),_(hn,Ze,null),e(Ze,ap),e(Ze,or),e(or,rp),e(Ze,ip),e(Ze,mn),e(mn,Ls),e(Ls,dp),e(Ls,nr),e(nr,lp),e(mn,cp),e(mn,fn),e(fn,up),e(fn,sr),e(sr,pp),e(fn,hp),e(fe,mp),e(fe,to),_(gn,to,null),e(to,fp),e(to,ar),e(ar,gp),u(a,Fd,p),u(a,Ft,p),e(Ft,oo),e(oo,rr),_(_n,rr,null),e(Ft,_p),e(Ft,ir),e(ir,kp),u(a,Md,p),u(a,Me,p),_(kn,Me,null),e(Me,vp),e(Me,vn),e(vn,bp),e(vn,Ns),e(Ns,yp),e(vn,Tp),e(Me,Pp),e(Me,bn),e(bn,wp),e(bn,yn),e(yn,xp),e(bn,zp),e(Me,Ep),e(Me,Te),_(Tn,Te,null),e(Te,$p),e(Te,Mt),e(Mt,qp),e(Mt,Gs),e(Gs,Fp),e(Mt,Mp),e(Mt,dr),e(dr,Cp),e(Mt,jp),e(Te,Sp),_(no,Te,null),e(Te,Op),e(Te,lr),e(lr,Ap),e(Te,Ip),_(Pn,Te,null),u(a,Cd,p),u(a,Ct,p),e(Ct,so),e(so,cr),_(wn,cr,null),e(Ct,Lp),e(Ct,ur),e(ur,Np),u(a,jd,p),u(a,Ce,p),_(xn,Ce,null),e(Ce,Gp),e(Ce,zn),e(zn,Dp),e(zn,Ds),e(Ds,Up),e(zn,Bp),e(Ce,Wp),e(Ce,En),e(En,Kp),e(En,$n),e($n,Hp),e(En,Qp),e(Ce,Rp),e(Ce,G),_(qn,G,null),e(G,Vp),e(G,jt),e(jt,Jp),e(jt,Us),e(Us,Xp),e(jt,Zp),e(jt,pr),e(pr,Yp),e(jt,eh),e(G,th),_(ao,G,null),e(G,oh),e(G,hr),e(hr,nh),e(G,sh),e(G,mr),e(mr,fr),e(fr,gr),e(gr,_r),e(_r,ah),e(G,rh),e(G,kr),e(kr,vr),e(vr,br),e(br,yr),e(yr,ih),e(G,dh),e(G,Tr),e(Tr,Pr),e(Pr,wr),e(wr,xr),e(xr,lh),e(G,ch),e(G,zr),e(zr,Er),e(Er,Fn),e(Fn,ro),e(ro,io),e(io,$r),_(Mn,$r,null),e(ro,uh),e(ro,qr),e(qr,ph),e(Fn,hh),e(Fn,Fr),e(Fr,mh),u(a,Sd,p),u(a,St,p),e(St,lo),e(lo,Mr),_(Cn,Mr,null),e(St,fh),e(St,Cr),e(Cr,gh),u(a,Od,p),u(a,jn,p),e(jn,Ye),_(Sn,Ye,null),e(Ye,_h),e(Ye,jr),e(jr,kh),e(Ye,vh),_(On,Ye,null),u(a,Ad,p),u(a,Ot,p),e(Ot,co),e(co,Sr),_(An,Sr,null),e(Ot,bh),e(Ot,Or),e(Or,yh),u(a,Id,p),u(a,ge,p),_(In,ge,null),e(ge,Th),e(ge,Ln),e(Ln,Ph),e(Ln,Bs),e(Bs,wh),e(Ln,xh),e(ge,zh),e(ge,Nn),e(Nn,Eh),e(Nn,Gn),e(Gn,$h),e(Nn,qh),e(ge,Fh),_(uo,ge,null),e(ge,Mh),e(ge,Pe),_(Dn,Pe,null),e(Pe,Ch),e(Pe,At),e(At,jh),e(At,Ws),e(Ws,Sh),e(At,Oh),e(At,Ar),e(Ar,Ah),e(At,Ih),e(Pe,Lh),_(po,Pe,null),e(Pe,Nh),e(Pe,Ir),e(Ir,Gh),e(Pe,Dh),_(Un,Pe,null),u(a,Ld,p),u(a,It,p),e(It,ho),e(ho,Lr),_(Bn,Lr,null),e(It,Uh),e(It,Nr),e(Nr,Bh),u(a,Nd,p),u(a,_e,p),_(Wn,_e,null),e(_e,Wh),e(_e,Kn),e(Kn,Kh),e(Kn,Ks),e(Ks,Hh),e(Kn,Qh),e(_e,Rh),e(_e,Hn),e(Hn,Vh),e(Hn,Qn),e(Qn,Jh),e(Hn,Xh),e(_e,Zh),_(mo,_e,null),e(_e,Yh),e(_e,D),_(Rn,D,null),e(D,em),e(D,Lt),e(Lt,tm),e(Lt,Hs),e(Hs,om),e(Lt,nm),e(Lt,Gr),e(Gr,sm),e(Lt,am),e(D,rm),_(fo,D,null),e(D,im),e(D,Dr),e(Dr,dm),e(D,lm),e(D,Ur),e(Ur,Br),e(Br,Wr),e(Wr,Kr),e(Kr,cm),e(D,um),e(D,Hr),e(Hr,Qr),e(Qr,Rr),e(Rr,Vr),e(Vr,pm),e(D,hm),e(D,Jr),e(Jr,Xr),e(Xr,Zr),e(Zr,Yr),e(Yr,mm),e(D,fm),e(D,ei),e(ei,ti),e(ti,Vn),e(Vn,go),e(go,_o),e(_o,oi),_(Jn,oi,null),e(go,gm),e(go,ni),e(ni,_m),e(Vn,km),e(Vn,si),e(si,vm),u(a,Gd,p),u(a,Nt,p),e(Nt,ko),e(ko,ai),_(Xn,ai,null),e(Nt,bm),e(Nt,ri),e(ri,ym),u(a,Dd,p),u(a,L,p),_(Zn,L,null),e(L,Tm),e(L,Yn),e(Yn,Pm),e(Yn,Qs),e(Qs,wm),e(Yn,xm),e(L,zm),e(L,es),e(es,Em),e(es,ts),e(ts,$m),e(es,qm),e(L,Fm),e(L,ii),e(ii,Mm),e(L,Cm),e(L,Qe),e(Qe,di),e(di,os),e(os,jm),e(Qe,Sm),e(Qe,li),e(li,ns),e(ns,Om),e(Qe,Am),e(Qe,ci),e(ci,ss),e(ss,Im),e(Qe,Lm),e(Qe,ui),e(ui,as),e(as,Nm),e(L,Gm),e(L,we),_(rs,we,null),e(we,Dm),e(we,Gt),e(Gt,Um),e(Gt,pi),e(pi,Bm),e(Gt,Wm),e(Gt,hi),e(hi,Km),e(Gt,Hm),e(we,Qm),_(vo,we,null),e(we,Rm),e(we,mi),e(mi,Vm),e(we,Jm),_(is,we,null),e(L,Xm),e(L,et),_(ds,et,null),e(et,Zm),e(et,fi),e(fi,Ym),e(et,ef),_(ls,et,null),e(L,tf),e(L,tt),_(cs,tt,null),e(tt,of),e(tt,gi),e(gi,nf),e(tt,sf),_(us,tt,null),u(a,Ud,p),u(a,Dt,p),e(Dt,bo),e(bo,_i),_(ps,_i,null),e(Dt,af),e(Dt,ki),e(ki,rf),u(a,Bd,p),u(a,N,p),_(hs,N,null),e(N,df),e(N,ms),e(ms,lf),e(ms,Rs),e(Rs,cf),e(ms,uf),e(N,pf),e(N,fs),e(fs,hf),e(fs,gs),e(gs,mf),e(fs,ff),e(N,gf),e(N,vi),e(vi,_f),e(N,kf),e(N,Re),e(Re,bi),e(bi,_s),e(_s,vf),e(Re,bf),e(Re,yi),e(yi,ks),e(ks,yf),e(Re,Tf),e(Re,Ti),e(Ti,vs),e(vs,Pf),e(Re,wf),e(Re,Pi),e(Pi,bs),e(bs,xf),e(N,zf),e(N,F),_(ys,F,null),e(F,Ef),e(F,Ut),e(Ut,$f),e(Ut,wi),e(wi,qf),e(Ut,Ff),e(Ut,xi),e(xi,Mf),e(Ut,Cf),e(F,jf),_(yo,F,null),e(F,Sf),e(F,zi),e(zi,Of),e(F,Af),e(F,Ei),e(Ei,$i),e($i,qi),e(qi,Fi),e(Fi,If),e(F,Lf),e(F,Mi),e(Mi,Ci),e(Ci,ji),e(ji,Si),e(Si,Nf),e(F,Gf),e(F,Oi),e(Oi,Ai),e(Ai,Ii),e(Ii,Li),e(Li,Df),e(F,Uf),e(F,Ni),e(Ni,Gi),e(Gi,Ts),e(Ts,To),e(To,Po),e(Po,Di),_(Ps,Di,null),e(To,Bf),e(To,Ui),e(Ui,Wf),e(Ts,Kf),e(Ts,Bi),e(Bi,Hf),e(F,Qf),e(F,Wi),e(Wi,Rf),e(F,Vf),e(F,Ki),e(Ki,Hi),e(Hi,Qi),e(Qi,Ri),e(Ri,Jf),e(F,Xf),e(F,Vi),e(Vi,Ji),e(Ji,Xi),e(Xi,Zi),e(Zi,Zf),e(F,Yf),e(F,Yi),e(Yi,ed),e(ed,td),e(td,od),e(od,eg),e(F,tg),e(F,nd),e(nd,sd),e(sd,ad),e(ad,rd),e(rd,og),e(N,ng),e(N,ot),_(ws,ot,null),e(ot,sg),e(ot,id),e(id,ag),e(ot,rg),_(xs,ot,null),e(N,ig),e(N,nt),_(zs,nt,null),e(nt,dg),e(nt,dd),e(dd,lg),e(nt,cg),_(Es,nt,null),Wd=!0},p(a,[p]){const $s={};p&2&&($s.$$scope={dirty:p,ctx:a}),no.$set($s);const ld={};p&2&&(ld.$$scope={dirty:p,ctx:a}),ao.$set(ld);const cd={};p&2&&(cd.$$scope={dirty:p,ctx:a}),uo.$set(cd);const ud={};p&2&&(ud.$$scope={dirty:p,ctx:a}),po.$set(ud);const wo={};p&2&&(wo.$$scope={dirty:p,ctx:a}),mo.$set(wo);const pd={};p&2&&(pd.$$scope={dirty:p,ctx:a}),fo.$set(pd);const hd={};p&2&&(hd.$$scope={dirty:p,ctx:a}),vo.$set(hd);const qs={};p&2&&(qs.$$scope={dirty:p,ctx:a}),yo.$set(qs)},i(a){Wd||(k(w.$$.fragment,a),k(J.$$.fragment,a),k(Fe.$$.fragment,a),k(Io.$$.fragment,a),k(Go.$$.fragment,a),k(Wo.$$.fragment,a),k(Ko.$$.fragment,a),k(Ho.$$.fragment,a),k(Qo.$$.fragment,a),k(Vo.$$.fragment,a),k(Jo.$$.fragment,a),k(Xo.$$.fragment,a),k(tn.$$.fragment,a),k(an.$$.fragment,a),k(rn.$$.fragment,a),k(dn.$$.fragment,a),k(ln.$$.fragment,a),k(cn.$$.fragment,a),k(hn.$$.fragment,a),k(gn.$$.fragment,a),k(_n.$$.fragment,a),k(kn.$$.fragment,a),k(Tn.$$.fragment,a),k(no.$$.fragment,a),k(Pn.$$.fragment,a),k(wn.$$.fragment,a),k(xn.$$.fragment,a),k(qn.$$.fragment,a),k(ao.$$.fragment,a),k(Mn.$$.fragment,a),k(Cn.$$.fragment,a),k(Sn.$$.fragment,a),k(On.$$.fragment,a),k(An.$$.fragment,a),k(In.$$.fragment,a),k(uo.$$.fragment,a),k(Dn.$$.fragment,a),k(po.$$.fragment,a),k(Un.$$.fragment,a),k(Bn.$$.fragment,a),k(Wn.$$.fragment,a),k(mo.$$.fragment,a),k(Rn.$$.fragment,a),k(fo.$$.fragment,a),k(Jn.$$.fragment,a),k(Xn.$$.fragment,a),k(Zn.$$.fragment,a),k(rs.$$.fragment,a),k(vo.$$.fragment,a),k(is.$$.fragment,a),k(ds.$$.fragment,a),k(ls.$$.fragment,a),k(cs.$$.fragment,a),k(us.$$.fragment,a),k(ps.$$.fragment,a),k(hs.$$.fragment,a),k(ys.$$.fragment,a),k(yo.$$.fragment,a),k(Ps.$$.fragment,a),k(ws.$$.fragment,a),k(xs.$$.fragment,a),k(zs.$$.fragment,a),k(Es.$$.fragment,a),Wd=!0)},o(a){v(w.$$.fragment,a),v(J.$$.fragment,a),v(Fe.$$.fragment,a),v(Io.$$.fragment,a),v(Go.$$.fragment,a),v(Wo.$$.fragment,a),v(Ko.$$.fragment,a),v(Ho.$$.fragment,a),v(Qo.$$.fragment,a),v(Vo.$$.fragment,a),v(Jo.$$.fragment,a),v(Xo.$$.fragment,a),v(tn.$$.fragment,a),v(an.$$.fragment,a),v(rn.$$.fragment,a),v(dn.$$.fragment,a),v(ln.$$.fragment,a),v(cn.$$.fragment,a),v(hn.$$.fragment,a),v(gn.$$.fragment,a),v(_n.$$.fragment,a),v(kn.$$.fragment,a),v(Tn.$$.fragment,a),v(no.$$.fragment,a),v(Pn.$$.fragment,a),v(wn.$$.fragment,a),v(xn.$$.fragment,a),v(qn.$$.fragment,a),v(ao.$$.fragment,a),v(Mn.$$.fragment,a),v(Cn.$$.fragment,a),v(Sn.$$.fragment,a),v(On.$$.fragment,a),v(An.$$.fragment,a),v(In.$$.fragment,a),v(uo.$$.fragment,a),v(Dn.$$.fragment,a),v(po.$$.fragment,a),v(Un.$$.fragment,a),v(Bn.$$.fragment,a),v(Wn.$$.fragment,a),v(mo.$$.fragment,a),v(Rn.$$.fragment,a),v(fo.$$.fragment,a),v(Jn.$$.fragment,a),v(Xn.$$.fragment,a),v(Zn.$$.fragment,a),v(rs.$$.fragment,a),v(vo.$$.fragment,a),v(is.$$.fragment,a),v(ds.$$.fragment,a),v(ls.$$.fragment,a),v(cs.$$.fragment,a),v(us.$$.fragment,a),v(ps.$$.fragment,a),v(hs.$$.fragment,a),v(ys.$$.fragment,a),v(yo.$$.fragment,a),v(Ps.$$.fragment,a),v(ws.$$.fragment,a),v(xs.$$.fragment,a),v(zs.$$.fragment,a),v(Es.$$.fragment,a),Wd=!1},d(a){t(h),a&&t(z),a&&t(y),b(w),a&&t(ke),a&&t(q),a&&t(je),a&&t(B),b(J),a&&t(te),a&&t(oe),a&&t(A),a&&t(ve),a&&t(Se),a&&t(ne),a&&t(se),a&&t(R),a&&t(Ve),a&&t(he),b(Fe),a&&t(Je),a&&t(V),a&&t(gd),a&&t(be),a&&t(_d),a&&t(vt),b(Io),a&&t(kd),a&&t(Xe),a&&t(vd),a&&t(bt),b(Go),a&&t(bd),a&&t(ye),a&&t(yd),a&&t(Pt),b(Wo),a&&t(Td),b(Ko,a),a&&t(Pd),a&&t(wt),b(Ho),a&&t(wd),a&&t(me),b(Qo),b(Vo),a&&t(xd),a&&t(Et),b(Jo),a&&t(zd),a&&t(Jt),a&&t(Ed),a&&t(H),b(Xo),b(tn),b(an),b(rn),b(dn),a&&t($d),a&&t($t),b(ln),a&&t(qd),a&&t(fe),b(cn),b(hn),b(gn),a&&t(Fd),a&&t(Ft),b(_n),a&&t(Md),a&&t(Me),b(kn),b(Tn),b(no),b(Pn),a&&t(Cd),a&&t(Ct),b(wn),a&&t(jd),a&&t(Ce),b(xn),b(qn),b(ao),b(Mn),a&&t(Sd),a&&t(St),b(Cn),a&&t(Od),a&&t(jn),b(Sn),b(On),a&&t(Ad),a&&t(Ot),b(An),a&&t(Id),a&&t(ge),b(In),b(uo),b(Dn),b(po),b(Un),a&&t(Ld),a&&t(It),b(Bn),a&&t(Nd),a&&t(_e),b(Wn),b(mo),b(Rn),b(fo),b(Jn),a&&t(Gd),a&&t(Nt),b(Xn),a&&t(Dd),a&&t(L),b(Zn),b(rs),b(vo),b(is),b(ds),b(ls),b(cs),b(us),a&&t(Ud),a&&t(Dt),b(ps),a&&t(Bd),a&&t(N),b(hs),b(ys),b(yo),b(Ps),b(ws),b(xs),b(zs),b(Es)}}}const Ab={local:"pegasus",sections:[{local:"overview",title:"Overview"},{local:"checkpoints",sections:[{local:"examples",title:"Examples"}],title:"Checkpoints"},{local:"implementation-notes",title:"Implementation Notes"},{local:"usage-example",title:"Usage Example"},{local:"transformers.PegasusConfig",title:"PegasusConfig"},{local:"transformers.PegasusTokenizer",title:"PegasusTokenizer"},{local:"transformers.PegasusTokenizerFast",title:"PegasusTokenizerFast"},{local:"transformers.PegasusModel",title:"PegasusModel"},{local:"transformers.PegasusForConditionalGeneration",title:"PegasusForConditionalGeneration"},{local:"transformers.PegasusForCausalLM",title:"PegasusForCausalLM"},{local:"transformers.TFPegasusModel",title:"TFPegasusModel"},{local:"transformers.TFPegasusForConditionalGeneration",title:"TFPegasusForConditionalGeneration"},{local:"transformers.FlaxPegasusModel",title:"FlaxPegasusModel"},{local:"transformers.FlaxPegasusForConditionalGeneration",title:"FlaxPegasusForConditionalGeneration"}],title:"Pegasus"};function Ib(U,h,z){let{fw:y}=h;return U.$$set=T=>{"fw"in T&&z(0,y=T.fw)},[y]}class Wb extends Pb{constructor(h){super();wb(this,h,Ib,Ob,xb,{fw:0})}}export{Wb as default,Ab as metadata};
9,939
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/mpnet.mdx-56646040.js
import{S as z_,i as E_,s as q_,e as n,k as l,w as M,t as a,L as C_,c as s,d as t,m as d,a as r,x as b,h as i,b as h,J as e,g as m,y as w,q as P,o as N,B as $}from"../../chunks/vendor-b1433968.js";import{T as Ne}from"../../chunks/Tip-c3840994.js";import{D as ne}from"../../chunks/Docstring-ff504c58.js";import{C as Ye}from"../../chunks/CodeBlock-a320dbd7.js";import{I as xe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function x_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function j_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function L_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function A_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function I_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function D_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function S_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me;return{c(){p=n("p"),y=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=n("ul"),k=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=n("li"),le=a("having all inputs as a list, tuple or dict in the first positional arguments."),U=l(),z=n("p"),J=a("This second option is useful when using "),S=n("code"),X=a("tf.keras.Model.fit"),de=a(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),ce=a("model(inputs)"),se=a("."),R=l(),A=n("p"),G=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),V=l(),E=n("ul"),q=n("li"),Y=a("a single Tensor with "),B=n("code"),re=a("input_ids"),Z=a(" only and nothing else: "),H=n("code"),ae=a("model(inputs_ids)"),ee=l(),C=n("li"),pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),te=a("model([input_ids, attention_mask])"),he=l(),j=n("li"),oe=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=n("code"),me=a('model({"input_ids": input_ids, "attention_mask": attention_mask})')},l(c){p=s(c,"P",{});var T=r(p);y=i(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=s(c,"UL",{});var K=r(g);k=s(K,"LI",{});var _e=r(k);v=i(_e,"having all inputs as keyword arguments (like PyTorch models), or"),_e.forEach(t),_=d(K),F=s(K,"LI",{});var ke=r(F);le=i(ke,"having all inputs as a list, tuple or dict in the first positional arguments."),ke.forEach(t),K.forEach(t),U=d(c),z=s(c,"P",{});var I=r(z);J=i(I,"This second option is useful when using "),S=s(I,"CODE",{});var ve=r(S);X=i(ve,"tf.keras.Model.fit"),ve.forEach(t),de=i(I,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(I,"CODE",{});var Te=r(O);ce=i(Te,"model(inputs)"),Te.forEach(t),se=i(I,"."),I.forEach(t),R=d(c),A=s(c,"P",{});var Me=r(A);G=i(Me,`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),Me.forEach(t),V=d(c),E=s(c,"UL",{});var L=r(E);q=s(L,"LI",{});var D=r(q);Y=i(D,"a single Tensor with "),B=s(D,"CODE",{});var be=r(B);re=i(be,"input_ids"),be.forEach(t),Z=i(D," only and nothing else: "),H=s(D,"CODE",{});var ge=r(H);ae=i(ge,"model(inputs_ids)"),ge.forEach(t),D.forEach(t),ee=d(L),C=s(L,"LI",{});var ue=r(C);pe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(ue,"CODE",{});var we=r(W);te=i(we,"model([input_ids, attention_mask])"),we.forEach(t),ue.forEach(t),he=d(L),j=s(L,"LI",{});var ie=r(j);oe=i(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=s(ie,"CODE",{});var fe=r(x);me=i(fe,'model({"input_ids": input_ids, "attention_mask": attention_mask})'),fe.forEach(t),ie.forEach(t),L.forEach(t)},m(c,T){m(c,p,T),e(p,y),m(c,f,T),m(c,g,T),e(g,k),e(k,v),e(g,_),e(g,F),e(F,le),m(c,U,T),m(c,z,T),e(z,J),e(z,S),e(S,X),e(z,de),e(z,O),e(O,ce),e(z,se),m(c,R,T),m(c,A,T),e(A,G),m(c,V,T),m(c,E,T),e(E,q),e(q,Y),e(q,B),e(B,re),e(q,Z),e(q,H),e(H,ae),e(E,ee),e(E,C),e(C,pe),e(C,W),e(W,te),e(E,he),e(E,j),e(j,oe),e(j,x),e(x,me)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(U),c&&t(z),c&&t(R),c&&t(A),c&&t(V),c&&t(E)}}}function O_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function W_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me;return{c(){p=n("p"),y=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=n("ul"),k=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=n("li"),le=a("having all inputs as a list, tuple or dict in the first positional arguments."),U=l(),z=n("p"),J=a("This second option is useful when using "),S=n("code"),X=a("tf.keras.Model.fit"),de=a(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),ce=a("model(inputs)"),se=a("."),R=l(),A=n("p"),G=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),V=l(),E=n("ul"),q=n("li"),Y=a("a single Tensor with "),B=n("code"),re=a("input_ids"),Z=a(" only and nothing else: "),H=n("code"),ae=a("model(inputs_ids)"),ee=l(),C=n("li"),pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),te=a("model([input_ids, attention_mask])"),he=l(),j=n("li"),oe=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=n("code"),me=a('model({"input_ids": input_ids, "attention_mask": attention_mask})')},l(c){p=s(c,"P",{});var T=r(p);y=i(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=s(c,"UL",{});var K=r(g);k=s(K,"LI",{});var _e=r(k);v=i(_e,"having all inputs as keyword arguments (like PyTorch models), or"),_e.forEach(t),_=d(K),F=s(K,"LI",{});var ke=r(F);le=i(ke,"having all inputs as a list, tuple or dict in the first positional arguments."),ke.forEach(t),K.forEach(t),U=d(c),z=s(c,"P",{});var I=r(z);J=i(I,"This second option is useful when using "),S=s(I,"CODE",{});var ve=r(S);X=i(ve,"tf.keras.Model.fit"),ve.forEach(t),de=i(I,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(I,"CODE",{});var Te=r(O);ce=i(Te,"model(inputs)"),Te.forEach(t),se=i(I,"."),I.forEach(t),R=d(c),A=s(c,"P",{});var Me=r(A);G=i(Me,`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),Me.forEach(t),V=d(c),E=s(c,"UL",{});var L=r(E);q=s(L,"LI",{});var D=r(q);Y=i(D,"a single Tensor with "),B=s(D,"CODE",{});var be=r(B);re=i(be,"input_ids"),be.forEach(t),Z=i(D," only and nothing else: "),H=s(D,"CODE",{});var ge=r(H);ae=i(ge,"model(inputs_ids)"),ge.forEach(t),D.forEach(t),ee=d(L),C=s(L,"LI",{});var ue=r(C);pe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(ue,"CODE",{});var we=r(W);te=i(we,"model([input_ids, attention_mask])"),we.forEach(t),ue.forEach(t),he=d(L),j=s(L,"LI",{});var ie=r(j);oe=i(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=s(ie,"CODE",{});var fe=r(x);me=i(fe,'model({"input_ids": input_ids, "attention_mask": attention_mask})'),fe.forEach(t),ie.forEach(t),L.forEach(t)},m(c,T){m(c,p,T),e(p,y),m(c,f,T),m(c,g,T),e(g,k),e(k,v),e(g,_),e(g,F),e(F,le),m(c,U,T),m(c,z,T),e(z,J),e(z,S),e(S,X),e(z,de),e(z,O),e(O,ce),e(z,se),m(c,R,T),m(c,A,T),e(A,G),m(c,V,T),m(c,E,T),e(E,q),e(q,Y),e(q,B),e(B,re),e(q,Z),e(q,H),e(H,ae),e(E,ee),e(E,C),e(C,pe),e(C,W),e(W,te),e(E,he),e(E,j),e(j,oe),e(j,x),e(x,me)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(U),c&&t(z),c&&t(R),c&&t(A),c&&t(V),c&&t(E)}}}function Q_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function B_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me;return{c(){p=n("p"),y=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=n("ul"),k=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=n("li"),le=a("having all inputs as a list, tuple or dict in the first positional arguments."),U=l(),z=n("p"),J=a("This second option is useful when using "),S=n("code"),X=a("tf.keras.Model.fit"),de=a(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),ce=a("model(inputs)"),se=a("."),R=l(),A=n("p"),G=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),V=l(),E=n("ul"),q=n("li"),Y=a("a single Tensor with "),B=n("code"),re=a("input_ids"),Z=a(" only and nothing else: "),H=n("code"),ae=a("model(inputs_ids)"),ee=l(),C=n("li"),pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),te=a("model([input_ids, attention_mask])"),he=l(),j=n("li"),oe=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=n("code"),me=a('model({"input_ids": input_ids, "attention_mask": attention_mask})')},l(c){p=s(c,"P",{});var T=r(p);y=i(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=s(c,"UL",{});var K=r(g);k=s(K,"LI",{});var _e=r(k);v=i(_e,"having all inputs as keyword arguments (like PyTorch models), or"),_e.forEach(t),_=d(K),F=s(K,"LI",{});var ke=r(F);le=i(ke,"having all inputs as a list, tuple or dict in the first positional arguments."),ke.forEach(t),K.forEach(t),U=d(c),z=s(c,"P",{});var I=r(z);J=i(I,"This second option is useful when using "),S=s(I,"CODE",{});var ve=r(S);X=i(ve,"tf.keras.Model.fit"),ve.forEach(t),de=i(I,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(I,"CODE",{});var Te=r(O);ce=i(Te,"model(inputs)"),Te.forEach(t),se=i(I,"."),I.forEach(t),R=d(c),A=s(c,"P",{});var Me=r(A);G=i(Me,`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),Me.forEach(t),V=d(c),E=s(c,"UL",{});var L=r(E);q=s(L,"LI",{});var D=r(q);Y=i(D,"a single Tensor with "),B=s(D,"CODE",{});var be=r(B);re=i(be,"input_ids"),be.forEach(t),Z=i(D," only and nothing else: "),H=s(D,"CODE",{});var ge=r(H);ae=i(ge,"model(inputs_ids)"),ge.forEach(t),D.forEach(t),ee=d(L),C=s(L,"LI",{});var ue=r(C);pe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(ue,"CODE",{});var we=r(W);te=i(we,"model([input_ids, attention_mask])"),we.forEach(t),ue.forEach(t),he=d(L),j=s(L,"LI",{});var ie=r(j);oe=i(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=s(ie,"CODE",{});var fe=r(x);me=i(fe,'model({"input_ids": input_ids, "attention_mask": attention_mask})'),fe.forEach(t),ie.forEach(t),L.forEach(t)},m(c,T){m(c,p,T),e(p,y),m(c,f,T),m(c,g,T),e(g,k),e(k,v),e(g,_),e(g,F),e(F,le),m(c,U,T),m(c,z,T),e(z,J),e(z,S),e(S,X),e(z,de),e(z,O),e(O,ce),e(z,se),m(c,R,T),m(c,A,T),e(A,G),m(c,V,T),m(c,E,T),e(E,q),e(q,Y),e(q,B),e(B,re),e(q,Z),e(q,H),e(H,ae),e(E,ee),e(E,C),e(C,pe),e(C,W),e(W,te),e(E,he),e(E,j),e(j,oe),e(j,x),e(x,me)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(U),c&&t(z),c&&t(R),c&&t(A),c&&t(V),c&&t(E)}}}function H_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function R_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me;return{c(){p=n("p"),y=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=n("ul"),k=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=n("li"),le=a("having all inputs as a list, tuple or dict in the first positional arguments."),U=l(),z=n("p"),J=a("This second option is useful when using "),S=n("code"),X=a("tf.keras.Model.fit"),de=a(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),ce=a("model(inputs)"),se=a("."),R=l(),A=n("p"),G=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),V=l(),E=n("ul"),q=n("li"),Y=a("a single Tensor with "),B=n("code"),re=a("input_ids"),Z=a(" only and nothing else: "),H=n("code"),ae=a("model(inputs_ids)"),ee=l(),C=n("li"),pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),te=a("model([input_ids, attention_mask])"),he=l(),j=n("li"),oe=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=n("code"),me=a('model({"input_ids": input_ids, "attention_mask": attention_mask})')},l(c){p=s(c,"P",{});var T=r(p);y=i(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=s(c,"UL",{});var K=r(g);k=s(K,"LI",{});var _e=r(k);v=i(_e,"having all inputs as keyword arguments (like PyTorch models), or"),_e.forEach(t),_=d(K),F=s(K,"LI",{});var ke=r(F);le=i(ke,"having all inputs as a list, tuple or dict in the first positional arguments."),ke.forEach(t),K.forEach(t),U=d(c),z=s(c,"P",{});var I=r(z);J=i(I,"This second option is useful when using "),S=s(I,"CODE",{});var ve=r(S);X=i(ve,"tf.keras.Model.fit"),ve.forEach(t),de=i(I,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(I,"CODE",{});var Te=r(O);ce=i(Te,"model(inputs)"),Te.forEach(t),se=i(I,"."),I.forEach(t),R=d(c),A=s(c,"P",{});var Me=r(A);G=i(Me,`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),Me.forEach(t),V=d(c),E=s(c,"UL",{});var L=r(E);q=s(L,"LI",{});var D=r(q);Y=i(D,"a single Tensor with "),B=s(D,"CODE",{});var be=r(B);re=i(be,"input_ids"),be.forEach(t),Z=i(D," only and nothing else: "),H=s(D,"CODE",{});var ge=r(H);ae=i(ge,"model(inputs_ids)"),ge.forEach(t),D.forEach(t),ee=d(L),C=s(L,"LI",{});var ue=r(C);pe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(ue,"CODE",{});var we=r(W);te=i(we,"model([input_ids, attention_mask])"),we.forEach(t),ue.forEach(t),he=d(L),j=s(L,"LI",{});var ie=r(j);oe=i(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=s(ie,"CODE",{});var fe=r(x);me=i(fe,'model({"input_ids": input_ids, "attention_mask": attention_mask})'),fe.forEach(t),ie.forEach(t),L.forEach(t)},m(c,T){m(c,p,T),e(p,y),m(c,f,T),m(c,g,T),e(g,k),e(k,v),e(g,_),e(g,F),e(F,le),m(c,U,T),m(c,z,T),e(z,J),e(z,S),e(S,X),e(z,de),e(z,O),e(O,ce),e(z,se),m(c,R,T),m(c,A,T),e(A,G),m(c,V,T),m(c,E,T),e(E,q),e(q,Y),e(q,B),e(B,re),e(q,Z),e(q,H),e(H,ae),e(E,ee),e(E,C),e(C,pe),e(C,W),e(W,te),e(E,he),e(E,j),e(j,oe),e(j,x),e(x,me)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(U),c&&t(z),c&&t(R),c&&t(A),c&&t(V),c&&t(E)}}}function U_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function V_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me;return{c(){p=n("p"),y=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=n("ul"),k=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=n("li"),le=a("having all inputs as a list, tuple or dict in the first positional arguments."),U=l(),z=n("p"),J=a("This second option is useful when using "),S=n("code"),X=a("tf.keras.Model.fit"),de=a(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),ce=a("model(inputs)"),se=a("."),R=l(),A=n("p"),G=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),V=l(),E=n("ul"),q=n("li"),Y=a("a single Tensor with "),B=n("code"),re=a("input_ids"),Z=a(" only and nothing else: "),H=n("code"),ae=a("model(inputs_ids)"),ee=l(),C=n("li"),pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),te=a("model([input_ids, attention_mask])"),he=l(),j=n("li"),oe=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=n("code"),me=a('model({"input_ids": input_ids, "attention_mask": attention_mask})')},l(c){p=s(c,"P",{});var T=r(p);y=i(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=s(c,"UL",{});var K=r(g);k=s(K,"LI",{});var _e=r(k);v=i(_e,"having all inputs as keyword arguments (like PyTorch models), or"),_e.forEach(t),_=d(K),F=s(K,"LI",{});var ke=r(F);le=i(ke,"having all inputs as a list, tuple or dict in the first positional arguments."),ke.forEach(t),K.forEach(t),U=d(c),z=s(c,"P",{});var I=r(z);J=i(I,"This second option is useful when using "),S=s(I,"CODE",{});var ve=r(S);X=i(ve,"tf.keras.Model.fit"),ve.forEach(t),de=i(I,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(I,"CODE",{});var Te=r(O);ce=i(Te,"model(inputs)"),Te.forEach(t),se=i(I,"."),I.forEach(t),R=d(c),A=s(c,"P",{});var Me=r(A);G=i(Me,`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),Me.forEach(t),V=d(c),E=s(c,"UL",{});var L=r(E);q=s(L,"LI",{});var D=r(q);Y=i(D,"a single Tensor with "),B=s(D,"CODE",{});var be=r(B);re=i(be,"input_ids"),be.forEach(t),Z=i(D," only and nothing else: "),H=s(D,"CODE",{});var ge=r(H);ae=i(ge,"model(inputs_ids)"),ge.forEach(t),D.forEach(t),ee=d(L),C=s(L,"LI",{});var ue=r(C);pe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(ue,"CODE",{});var we=r(W);te=i(we,"model([input_ids, attention_mask])"),we.forEach(t),ue.forEach(t),he=d(L),j=s(L,"LI",{});var ie=r(j);oe=i(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=s(ie,"CODE",{});var fe=r(x);me=i(fe,'model({"input_ids": input_ids, "attention_mask": attention_mask})'),fe.forEach(t),ie.forEach(t),L.forEach(t)},m(c,T){m(c,p,T),e(p,y),m(c,f,T),m(c,g,T),e(g,k),e(k,v),e(g,_),e(g,F),e(F,le),m(c,U,T),m(c,z,T),e(z,J),e(z,S),e(S,X),e(z,de),e(z,O),e(O,ce),e(z,se),m(c,R,T),m(c,A,T),e(A,G),m(c,V,T),m(c,E,T),e(E,q),e(q,Y),e(q,B),e(B,re),e(q,Z),e(q,H),e(H,ae),e(E,ee),e(E,C),e(C,pe),e(C,W),e(W,te),e(E,he),e(E,j),e(j,oe),e(j,x),e(x,me)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(U),c&&t(z),c&&t(R),c&&t(A),c&&t(V),c&&t(E)}}}function K_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function J_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me;return{c(){p=n("p"),y=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=n("ul"),k=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=n("li"),le=a("having all inputs as a list, tuple or dict in the first positional arguments."),U=l(),z=n("p"),J=a("This second option is useful when using "),S=n("code"),X=a("tf.keras.Model.fit"),de=a(` method which currently requires having all the tensors in the first argument of the model call function: `),O=n("code"),ce=a("model(inputs)"),se=a("."),R=l(),A=n("p"),G=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),V=l(),E=n("ul"),q=n("li"),Y=a("a single Tensor with "),B=n("code"),re=a("input_ids"),Z=a(" only and nothing else: "),H=n("code"),ae=a("model(inputs_ids)"),ee=l(),C=n("li"),pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),te=a("model([input_ids, attention_mask])"),he=l(),j=n("li"),oe=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=n("code"),me=a('model({"input_ids": input_ids, "attention_mask": attention_mask})')},l(c){p=s(c,"P",{});var T=r(p);y=i(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=s(c,"UL",{});var K=r(g);k=s(K,"LI",{});var _e=r(k);v=i(_e,"having all inputs as keyword arguments (like PyTorch models), or"),_e.forEach(t),_=d(K),F=s(K,"LI",{});var ke=r(F);le=i(ke,"having all inputs as a list, tuple or dict in the first positional arguments."),ke.forEach(t),K.forEach(t),U=d(c),z=s(c,"P",{});var I=r(z);J=i(I,"This second option is useful when using "),S=s(I,"CODE",{});var ve=r(S);X=i(ve,"tf.keras.Model.fit"),ve.forEach(t),de=i(I,` method which currently requires having all the tensors in the first argument of the model call function: `),O=s(I,"CODE",{});var Te=r(O);ce=i(Te,"model(inputs)"),Te.forEach(t),se=i(I,"."),I.forEach(t),R=d(c),A=s(c,"P",{});var Me=r(A);G=i(Me,`If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :`),Me.forEach(t),V=d(c),E=s(c,"UL",{});var L=r(E);q=s(L,"LI",{});var D=r(q);Y=i(D,"a single Tensor with "),B=s(D,"CODE",{});var be=r(B);re=i(be,"input_ids"),be.forEach(t),Z=i(D," only and nothing else: "),H=s(D,"CODE",{});var ge=r(H);ae=i(ge,"model(inputs_ids)"),ge.forEach(t),D.forEach(t),ee=d(L),C=s(L,"LI",{});var ue=r(C);pe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(ue,"CODE",{});var we=r(W);te=i(we,"model([input_ids, attention_mask])"),we.forEach(t),ue.forEach(t),he=d(L),j=s(L,"LI",{});var ie=r(j);oe=i(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),x=s(ie,"CODE",{});var fe=r(x);me=i(fe,'model({"input_ids": input_ids, "attention_mask": attention_mask})'),fe.forEach(t),ie.forEach(t),L.forEach(t)},m(c,T){m(c,p,T),e(p,y),m(c,f,T),m(c,g,T),e(g,k),e(k,v),e(g,_),e(g,F),e(F,le),m(c,U,T),m(c,z,T),e(z,J),e(z,S),e(S,X),e(z,de),e(z,O),e(O,ce),e(z,se),m(c,R,T),m(c,A,T),e(A,G),m(c,V,T),m(c,E,T),e(E,q),e(q,Y),e(q,B),e(B,re),e(q,Z),e(q,H),e(H,ae),e(E,ee),e(E,C),e(C,pe),e(C,W),e(W,te),e(E,he),e(E,j),e(j,oe),e(j,x),e(x,me)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(U),c&&t(z),c&&t(R),c&&t(A),c&&t(V),c&&t(E)}}}function X_(Q){let p,y,f,g,k;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=s(v,"P",{});var _=r(p);y=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var F=r(f);g=i(F,"Module"),F.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,p,_),e(p,y),e(p,f),e(f,g),e(p,k)},d(v){v&&t(p)}}}function G_(Q){let p,y,f,g,k,v,_,F,le,U,z,J,S,X,de,O,ce,se,R,A,G,V,E,q,Y,B,re,Z,H,ae,ee,C,pe,W,te,he,j,oe,x,me,c,T,K,_e,ke,I,ve,Te,Me,L,D,be,ge,ue,we,ie,fe,oo,Er,Wo,Al,qr,Il,mi,je,Qo,Dl,it,Sl,Is,Ol,Wl,Ds,Ql,Bl,Bo,Hl,Rl,Ul,yt,Vl,Ss,Kl,Jl,Os,Xl,Gl,Yl,Cr,Zl,ed,Ho,ui,Ft,no,xr,Ro,td,jr,od,fi,$e,Uo,nd,Vo,sd,Ws,rd,ad,id,dt,Ko,ld,Lr,dd,cd,Jo,Qs,pd,Ar,hd,md,Bs,ud,Ir,fd,gd,so,Xo,_d,Go,vd,Dr,kd,Td,Md,ro,Yo,bd,Sr,wd,Pd,Or,gi,zt,ao,Wr,Zo,Nd,Qr,$d,_i,Ze,en,yd,tn,Fd,Br,zd,Ed,qd,on,Cd,Hs,xd,jd,Ld,io,nn,Ad,Hr,Id,vi,Et,lo,Rr,sn,Dd,Ur,Sd,ki,Le,rn,Od,Vr,Wd,Qd,an,Bd,Rs,Hd,Rd,Ud,ln,Vd,dn,Kd,Jd,Xd,Oe,cn,Gd,qt,Yd,Us,Zd,ec,Kr,tc,oc,nc,co,sc,Jr,rc,ac,pn,Ti,Ct,po,Xr,hn,ic,Gr,lc,Mi,mn,We,un,dc,xt,cc,Vs,pc,hc,Yr,mc,uc,fc,ho,gc,Zr,_c,vc,fn,bi,jt,mo,ea,gn,kc,ta,Tc,wi,Ae,_n,Mc,oa,bc,wc,vn,Pc,Ks,Nc,$c,yc,kn,Fc,Tn,zc,Ec,qc,Pe,Mn,Cc,Lt,xc,Js,jc,Lc,na,Ac,Ic,Dc,uo,Sc,sa,Oc,Wc,bn,Qc,ra,Bc,Hc,wn,Pi,At,fo,aa,Pn,Rc,ia,Uc,Ni,Ie,Nn,Vc,la,Kc,Jc,$n,Xc,Xs,Gc,Yc,Zc,yn,ep,Fn,tp,op,np,Qe,zn,sp,It,rp,Gs,ap,ip,da,lp,dp,cp,go,pp,ca,hp,mp,En,$i,Dt,_o,pa,qn,up,ha,fp,yi,De,Cn,gp,ma,_p,vp,xn,kp,Ys,Tp,Mp,bp,jn,wp,Ln,Pp,Np,$p,Be,An,yp,St,Fp,Zs,zp,Ep,ua,qp,Cp,xp,vo,jp,fa,Lp,Ap,In,Fi,Ot,ko,ga,Dn,Ip,_a,Dp,zi,Se,Sn,Sp,Wt,Op,va,Wp,Qp,ka,Bp,Hp,Rp,On,Up,er,Vp,Kp,Jp,Wn,Xp,Qn,Gp,Yp,Zp,He,Bn,eh,Qt,th,tr,oh,nh,Ta,sh,rh,ah,To,ih,Ma,lh,dh,Hn,Ei,Bt,Mo,ba,Rn,ch,wa,ph,qi,ye,Un,hh,Pa,mh,uh,Vn,fh,or,gh,_h,vh,Kn,kh,Jn,Th,Mh,bh,bo,wh,Re,Xn,Ph,Ht,Nh,nr,$h,yh,Na,Fh,zh,Eh,wo,qh,$a,Ch,xh,Gn,Ci,Rt,Po,ya,Yn,jh,Fa,Lh,xi,Fe,Zn,Ah,es,Ih,za,Dh,Sh,Oh,ts,Wh,sr,Qh,Bh,Hh,os,Rh,ns,Uh,Vh,Kh,No,Jh,Ue,ss,Xh,Ut,Gh,rr,Yh,Zh,Ea,em,tm,om,$o,nm,qa,sm,rm,rs,ji,Vt,yo,Ca,as,am,xa,im,Li,ze,is,lm,ja,dm,cm,ls,pm,ar,hm,mm,um,ds,fm,cs,gm,_m,vm,Fo,km,Ve,ps,Tm,Kt,Mm,ir,bm,wm,La,Pm,Nm,$m,zo,ym,Aa,Fm,zm,hs,Ai,Jt,Eo,Ia,ms,Em,Da,qm,Ii,Ee,us,Cm,Sa,xm,jm,fs,Lm,lr,Am,Im,Dm,gs,Sm,_s,Om,Wm,Qm,qo,Bm,Ke,vs,Hm,Xt,Rm,dr,Um,Vm,Oa,Km,Jm,Xm,Co,Gm,Wa,Ym,Zm,ks,Di,Gt,xo,Qa,Ts,eu,Ba,tu,Si,qe,Ms,ou,Ha,nu,su,bs,ru,cr,au,iu,lu,ws,du,Ps,cu,pu,hu,jo,mu,Je,Ns,uu,Yt,fu,pr,gu,_u,Ra,vu,ku,Tu,Lo,Mu,Ua,bu,wu,$s,Oi,Zt,Ao,Va,ys,Pu,Ka,Nu,Wi,Ce,Fs,$u,eo,yu,Ja,Fu,zu,Xa,Eu,qu,Cu,zs,xu,hr,ju,Lu,Au,Es,Iu,qs,Du,Su,Ou,Io,Wu,Xe,Cs,Qu,to,Bu,mr,Hu,Ru,Ga,Uu,Vu,Ku,Do,Ju,Ya,Xu,Gu,xs,Qi;return v=new xe({}),X=new xe({}),Wo=new xe({}),Qo=new ne({props:{name:"class transformers.MPNetConfig",anchor:"transformers.MPNetConfig",parameters:[{name:"vocab_size",val:" = 30527"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"relative_attention_num_buckets",val:" = 32"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/configuration_mpnet.py#L29",parametersDescription:[{anchor:"transformers.MPNetConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30527) &#x2014; Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetModel">MPNetModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetModel">TFMPNetModel</a>.`,name:"vocab_size"},{anchor:"transformers.MPNetConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.MPNetConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.MPNetConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.MPNetConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.MPNetConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.MPNetConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.MPNetConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.MPNetConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.MPNetConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.MPNetConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.MPNetConfig.relative_attention_num_buckets",description:`<strong>relative_attention_num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer.`,name:"relative_attention_num_buckets"}]}}),Ho=new Ye({props:{code:`from transformers import MPNetModel, MPNetConfig # Initializing a MPNet mpnet-base style configuration configuration = MPNetConfig() # Initializing a model from the mpnet-base style configuration model = MPNetModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetModel, MPNetConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MPNet mpnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MPNetConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the mpnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ro=new xe({}),Uo=new ne({props:{name:"class transformers.MPNetTokenizer",anchor:"transformers.MPNetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '[UNK]'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/tokenization_mpnet.py#L66",parametersDescription:[{anchor:"transformers.MPNetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.MPNetTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.MPNetTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.MPNetTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.MPNetTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.MPNetTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.MPNetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.MPNetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.MPNetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MPNetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MPNetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.MPNetTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),Ko=new ne({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.MPNetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/tokenization_mpnet.py#L230",parametersDescription:[{anchor:"transformers.MPNetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.MPNetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Xo=new ne({props:{name:"get_special_tokens_mask",anchor:"transformers.MPNetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/tokenization_mpnet.py#L255",parametersDescription:[{anchor:"transformers.MPNetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.MPNetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.MPNetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Set to True if the token list is already formatted with special tokens for the model`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Yo=new ne({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.MPNetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/tokenization_mpnet.py#L282",parametersDescription:[{anchor:"transformers.MPNetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.MPNetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Zo=new xe({}),en=new ne({props:{name:"class transformers.MPNetTokenizerFast",anchor:"transformers.MPNetTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '[UNK]'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/tokenization_mpnet_fast.py#L51",parametersDescription:[{anchor:"transformers.MPNetTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.MPNetTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.MPNetTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.MPNetTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.MPNetTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.MPNetTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.MPNetTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MPNetTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MPNetTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.MPNetTokenizerFast.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),nn=new ne({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/tokenization_mpnet_fast.py#L190",parametersDescription:[{anchor:"transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),sn=new xe({}),rn=new ne({props:{name:"class transformers.MPNetModel",anchor:"transformers.MPNetModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L483",parametersDescription:[{anchor:"transformers.MPNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),cn=new ne({props:{name:"forward",anchor:"transformers.MPNetModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L512",parametersDescription:[{anchor:"transformers.MPNetModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MPNetModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MPNetModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MPNetModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MPNetModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MPNetModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MPNetModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MPNetModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),co=new Ne({props:{$$slots:{default:[x_]},$$scope:{ctx:Q}}}),pn=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetModel import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetModel.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),hn=new xe({}),un=new ne({props:{name:"forward",anchor:"transformers.MPNetForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L595",parametersDescription:[{anchor:"transformers.MPNetForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MPNetForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MPNetForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MPNetForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MPNetForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MPNetForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MPNetForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MPNetForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MPNetForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ho=new Ne({props:{$$slots:{default:[j_]},$$scope:{ctx:Q}}}),fn=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetForMaskedLM import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetForMaskedLM.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),gn=new xe({}),_n=new ne({props:{name:"class transformers.MPNetForSequenceClassification",anchor:"transformers.MPNetForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L684",parametersDescription:[{anchor:"transformers.MPNetForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mn=new ne({props:{name:"forward",anchor:"transformers.MPNetForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L697",parametersDescription:[{anchor:"transformers.MPNetForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MPNetForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MPNetForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MPNetForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MPNetForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MPNetForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MPNetForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MPNetForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MPNetForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),uo=new Ne({props:{$$slots:{default:[L_]},$$scope:{ctx:Q}}}),bn=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetForSequenceClassification import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetForSequenceClassification.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),wn=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetForSequenceClassification import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetForSequenceClassification.from_pretrained('microsoft/mpnet-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pn=new xe({}),Nn=new ne({props:{name:"class transformers.MPNetForMultipleChoice",anchor:"transformers.MPNetForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L778",parametersDescription:[{anchor:"transformers.MPNetForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zn=new ne({props:{name:"forward",anchor:"transformers.MPNetForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L791",parametersDescription:[{anchor:"transformers.MPNetForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MPNetForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MPNetForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MPNetForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MPNetForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MPNetForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MPNetForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MPNetForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MPNetForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),go=new Ne({props:{$$slots:{default:[A_]},$$scope:{ctx:Q}}}),En=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetForMultipleChoice import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetForMultipleChoice.from_pretrained('microsoft/mpnet-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qn=new xe({}),Cn=new ne({props:{name:"class transformers.MPNetForTokenClassification",anchor:"transformers.MPNetForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L868",parametersDescription:[{anchor:"transformers.MPNetForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),An=new ne({props:{name:"forward",anchor:"transformers.MPNetForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L883",parametersDescription:[{anchor:"transformers.MPNetForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MPNetForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MPNetForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MPNetForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MPNetForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MPNetForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MPNetForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MPNetForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MPNetForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vo=new Ne({props:{$$slots:{default:[I_]},$$scope:{ctx:Q}}}),In=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetForTokenClassification import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetForTokenClassification.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Dn=new xe({}),Sn=new ne({props:{name:"class transformers.MPNetForQuestionAnswering",anchor:"transformers.MPNetForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L977",parametersDescription:[{anchor:"transformers.MPNetForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bn=new ne({props:{name:"forward",anchor:"transformers.MPNetForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_mpnet.py#L991",parametersDescription:[{anchor:"transformers.MPNetForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MPNetForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MPNetForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MPNetForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MPNetForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MPNetForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MPNetForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MPNetForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MPNetForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.MPNetForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),To=new Ne({props:{$$slots:{default:[D_]},$$scope:{ctx:Q}}}),Hn=new Ye({props:{code:`from transformers import MPNetTokenizer, MPNetForQuestionAnswering import torch tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = MPNetForQuestionAnswering.from_pretrained('microsoft/mpnet-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Rn=new xe({}),Un=new ne({props:{name:"class transformers.TFMPNetModel",anchor:"transformers.TFMPNetModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L679",parametersDescription:[{anchor:"transformers.TFMPNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bo=new Ne({props:{$$slots:{default:[S_]},$$scope:{ctx:Q}}}),Xn=new ne({props:{name:"call",anchor:"transformers.TFMPNetModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L684",parametersDescription:[{anchor:"transformers.TFMPNetModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMPNetModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMPNetModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMPNetModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMPNetModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMPNetModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMPNetModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMPNetModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMPNetModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),wo=new Ne({props:{$$slots:{default:[O_]},$$scope:{ctx:Q}}}),Gn=new Ye({props:{code:`from transformers import MPNetTokenizer, TFMPNetModel import tensorflow as tf tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = TFMPNetModel.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Yn=new xe({}),Zn=new ne({props:{name:"class transformers.TFMPNetForMaskedLM",anchor:"transformers.TFMPNetForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L796",parametersDescription:[{anchor:"transformers.TFMPNetForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),No=new Ne({props:{$$slots:{default:[W_]},$$scope:{ctx:Q}}}),ss=new ne({props:{name:"call",anchor:"transformers.TFMPNetForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L813",parametersDescription:[{anchor:"transformers.TFMPNetForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMPNetForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMPNetForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMPNetForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMPNetForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMPNetForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMPNetForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMPNetForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMPNetForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMPNetForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),$o=new Ne({props:{$$slots:{default:[Q_]},$$scope:{ctx:Q}}}),rs=new Ye({props:{code:`from transformers import MPNetTokenizer, TFMPNetForMaskedLM import tensorflow as tf tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = TFMPNetForMaskedLM.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),as=new xe({}),is=new ne({props:{name:"class transformers.TFMPNetForSequenceClassification",anchor:"transformers.TFMPNetForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L922",parametersDescription:[{anchor:"transformers.TFMPNetForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fo=new Ne({props:{$$slots:{default:[B_]},$$scope:{ctx:Q}}}),ps=new ne({props:{name:"call",anchor:"transformers.TFMPNetForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L933",parametersDescription:[{anchor:"transformers.TFMPNetForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMPNetForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMPNetForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMPNetForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMPNetForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMPNetForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMPNetForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMPNetForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMPNetForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMPNetForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),zo=new Ne({props:{$$slots:{default:[H_]},$$scope:{ctx:Q}}}),hs=new Ye({props:{code:`from transformers import MPNetTokenizer, TFMPNetForSequenceClassification import tensorflow as tf tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = TFMPNetForSequenceClassification.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ms=new xe({}),us=new ne({props:{name:"class transformers.TFMPNetForMultipleChoice",anchor:"transformers.TFMPNetForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1018",parametersDescription:[{anchor:"transformers.TFMPNetForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qo=new Ne({props:{$$slots:{default:[R_]},$$scope:{ctx:Q}}}),vs=new ne({props:{name:"call",anchor:"transformers.TFMPNetForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1038",parametersDescription:[{anchor:"transformers.TFMPNetForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMPNetForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMPNetForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMPNetForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMPNetForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMPNetForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMPNetForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMPNetForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMPNetForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMPNetForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Co=new Ne({props:{$$slots:{default:[U_]},$$scope:{ctx:Q}}}),ks=new Ye({props:{code:`from transformers import MPNetTokenizer, TFMPNetForMultipleChoice import tensorflow as tf tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = TFMPNetForMultipleChoice.from_pretrained('microsoft/mpnet-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ts=new xe({}),Ms=new ne({props:{name:"class transformers.TFMPNetForTokenClassification",anchor:"transformers.TFMPNetForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1155",parametersDescription:[{anchor:"transformers.TFMPNetForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),jo=new Ne({props:{$$slots:{default:[V_]},$$scope:{ctx:Q}}}),Ns=new ne({props:{name:"call",anchor:"transformers.TFMPNetForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1169",parametersDescription:[{anchor:"transformers.TFMPNetForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMPNetForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMPNetForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMPNetForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMPNetForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMPNetForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMPNetForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMPNetForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMPNetForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMPNetForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Lo=new Ne({props:{$$slots:{default:[K_]},$$scope:{ctx:Q}}}),$s=new Ye({props:{code:`from transformers import MPNetTokenizer, TFMPNetForTokenClassification import tensorflow as tf tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = TFMPNetForTokenClassification.from_pretrained('microsoft/mpnet-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ys=new xe({}),Fs=new ne({props:{name:"class transformers.TFMPNetForQuestionAnswering",anchor:"transformers.TFMPNetForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1254",parametersDescription:[{anchor:"transformers.TFMPNetForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Io=new Ne({props:{$$slots:{default:[J_]},$$scope:{ctx:Q}}}),Cs=new ne({props:{name:"call",anchor:"transformers.TFMPNetForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1267",parametersDescription:[{anchor:"transformers.TFMPNetForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFMPNetForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Do=new Ne({props:{$$slots:{default:[X_]},$$scope:{ctx:Q}}}),xs=new Ye({props:{code:`from transformers import MPNetTokenizer, TFMPNetForQuestionAnswering import tensorflow as tf tokenizer = MPNetTokenizer.from_pretrained('microsoft/mpnet-base') model = TFMPNetForQuestionAnswering.from_pretrained('microsoft/mpnet-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;microsoft/mpnet-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){p=n("meta"),y=l(),f=n("h1"),g=n("a"),k=n("span"),M(v.$$.fragment),_=l(),F=n("span"),le=a("MPNet"),U=l(),z=n("h2"),J=n("a"),S=n("span"),M(X.$$.fragment),de=l(),O=n("span"),ce=a("Overview"),se=l(),R=n("p"),A=a("The MPNet model was proposed in "),G=n("a"),V=a("MPNet: Masked and Permuted Pre-training for Language Understanding"),E=a(" by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu."),q=l(),Y=n("p"),B=a(`MPNet adopts a novel pre-training method, named masked and permuted language modeling, to inherit the advantages of masked language modeling and permuted language modeling for natural language understanding.`),re=l(),Z=n("p"),H=a("The abstract from the paper is the following:"),ae=l(),ee=n("p"),C=n("em"),pe=a(`BERT adopts masked language modeling (MLM) for pre-training and is one of the most successful pre-training models. Since BERT neglects dependency among predicted tokens, XLNet introduces permuted language modeling (PLM) for pre-training to address this problem. However, XLNet does not leverage the full position information of a sentence and thus suffers from position discrepancy between pre-training and fine-tuning. In this paper, we propose MPNet, a novel pre-training method that inherits the advantages of BERT and XLNet and avoids their limitations. MPNet leverages the dependency among predicted tokens through permuted language modeling (vs. MLM in BERT), and takes auxiliary position information as input to make the model see a full sentence and thus reducing the position discrepancy (vs. PLM in XLNet). We pre-train MPNet on a large-scale dataset (over 160GB text corpora) and fine-tune on a variety of down-streaming tasks (GLUE, SQuAD, etc). Experimental results show that MPNet outperforms MLM and PLM by a large margin, and achieves better results on these tasks compared with previous state-of-the-art pre-trained methods (e.g., BERT, XLNet, RoBERTa) under the same model setting.`),W=l(),te=n("p"),he=a("Tips:"),j=l(),oe=n("ul"),x=n("li"),me=a("MPNet doesn\u2019t have "),c=n("code"),T=a("token_type_ids"),K=a(`, you don\u2019t need to indicate which token belongs to which segment. just separate your segments with the separation token `),_e=n("code"),ke=a("tokenizer.sep_token"),I=a(" (or "),ve=n("code"),Te=a("[sep]"),Me=a(")."),L=l(),D=n("p"),be=a("The original code can be found "),ge=n("a"),ue=a("here"),we=a("."),ie=l(),fe=n("h2"),oo=n("a"),Er=n("span"),M(Wo.$$.fragment),Al=l(),qr=n("span"),Il=a("MPNetConfig"),mi=l(),je=n("div"),M(Qo.$$.fragment),Dl=l(),it=n("p"),Sl=a("This is the configuration class to store the configuration of a "),Is=n("a"),Ol=a("MPNetModel"),Wl=a(` or a `),Ds=n("a"),Ql=a("TFMPNetModel"),Bl=a(`. It is used to instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPNet `),Bo=n("a"),Hl=a("mpnet-base"),Rl=a(" architecture."),Ul=l(),yt=n("p"),Vl=a("Configuration objects inherit from "),Ss=n("a"),Kl=a("PretrainedConfig"),Jl=a(` and can be used to control the model outputs. Read the documentation from `),Os=n("a"),Xl=a("PretrainedConfig"),Gl=a(" for more information."),Yl=l(),Cr=n("p"),Zl=a("Examples:"),ed=l(),M(Ho.$$.fragment),ui=l(),Ft=n("h2"),no=n("a"),xr=n("span"),M(Ro.$$.fragment),td=l(),jr=n("span"),od=a("MPNetTokenizer"),fi=l(),$e=n("div"),M(Uo.$$.fragment),nd=l(),Vo=n("p"),sd=a("This tokenizer inherits from "),Ws=n("a"),rd=a("BertTokenizer"),ad=a(` which contains most of the methods. Users should refer to the superclass for more information regarding methods.`),id=l(),dt=n("div"),M(Ko.$$.fragment),ld=l(),Lr=n("p"),dd=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MPNet sequence has the following format:`),cd=l(),Jo=n("ul"),Qs=n("li"),pd=a("single sequence: "),Ar=n("code"),hd=a("<s> X </s>"),md=l(),Bs=n("li"),ud=a("pair of sequences: "),Ir=n("code"),fd=a("<s> A </s></s> B </s>"),gd=l(),so=n("div"),M(Xo.$$.fragment),_d=l(),Go=n("p"),vd=a(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Dr=n("code"),kd=a("prepare_for_model"),Td=a(" methods."),Md=l(),ro=n("div"),M(Yo.$$.fragment),bd=l(),Sr=n("p"),wd=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned.`),Pd=l(),Or=n("div"),gi=l(),zt=n("h2"),ao=n("a"),Wr=n("span"),M(Zo.$$.fragment),Nd=l(),Qr=n("span"),$d=a("MPNetTokenizerFast"),_i=l(),Ze=n("div"),M(en.$$.fragment),yd=l(),tn=n("p"),Fd=a("Construct a \u201Cfast\u201D MPNet tokenizer (backed by HuggingFace\u2019s "),Br=n("em"),zd=a("tokenizers"),Ed=a(" library). Based on WordPiece."),qd=l(),on=n("p"),Cd=a("This tokenizer inherits from "),Hs=n("a"),xd=a("PreTrainedTokenizerFast"),jd=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ld=l(),io=n("div"),M(nn.$$.fragment),Ad=l(),Hr=n("p"),Id=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned`),vi=l(),Et=n("h2"),lo=n("a"),Rr=n("span"),M(sn.$$.fragment),Dd=l(),Ur=n("span"),Sd=a("MPNetModel"),ki=l(),Le=n("div"),M(rn.$$.fragment),Od=l(),Vr=n("p"),Wd=a("The bare MPNet Model transformer outputting raw hidden-states without any specific head on top."),Qd=l(),an=n("p"),Bd=a("This model inherits from "),Rs=n("a"),Hd=a("PreTrainedModel"),Rd=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ud=l(),ln=n("p"),Vd=a("This model is also a PyTorch "),dn=n("a"),Kd=a("torch.nn.Module"),Jd=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xd=l(),Oe=n("div"),M(cn.$$.fragment),Gd=l(),qt=n("p"),Yd=a("The "),Us=n("a"),Zd=a("MPNetModel"),ec=a(" forward method, overrides the "),Kr=n("code"),tc=a("__call__"),oc=a(" special method."),nc=l(),M(co.$$.fragment),sc=l(),Jr=n("p"),rc=a("Example:"),ac=l(),M(pn.$$.fragment),Ti=l(),Ct=n("h2"),po=n("a"),Xr=n("span"),M(hn.$$.fragment),ic=l(),Gr=n("span"),lc=a("MPNetForMaskedLM"),Mi=l(),mn=n("div"),We=n("div"),M(un.$$.fragment),dc=l(),xt=n("p"),cc=a("The "),Vs=n("a"),pc=a("MPNetForMaskedLM"),hc=a(" forward method, overrides the "),Yr=n("code"),mc=a("__call__"),uc=a(" special method."),fc=l(),M(ho.$$.fragment),gc=l(),Zr=n("p"),_c=a("Example:"),vc=l(),M(fn.$$.fragment),bi=l(),jt=n("h2"),mo=n("a"),ea=n("span"),M(gn.$$.fragment),kc=l(),ta=n("span"),Tc=a("MPNetForSequenceClassification"),wi=l(),Ae=n("div"),M(_n.$$.fragment),Mc=l(),oa=n("p"),bc=a(`MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),wc=l(),vn=n("p"),Pc=a("This model inherits from "),Ks=n("a"),Nc=a("PreTrainedModel"),$c=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yc=l(),kn=n("p"),Fc=a("This model is also a PyTorch "),Tn=n("a"),zc=a("torch.nn.Module"),Ec=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qc=l(),Pe=n("div"),M(Mn.$$.fragment),Cc=l(),Lt=n("p"),xc=a("The "),Js=n("a"),jc=a("MPNetForSequenceClassification"),Lc=a(" forward method, overrides the "),na=n("code"),Ac=a("__call__"),Ic=a(" special method."),Dc=l(),M(uo.$$.fragment),Sc=l(),sa=n("p"),Oc=a("Example of single-label classification:"),Wc=l(),M(bn.$$.fragment),Qc=l(),ra=n("p"),Bc=a("Example of multi-label classification:"),Hc=l(),M(wn.$$.fragment),Pi=l(),At=n("h2"),fo=n("a"),aa=n("span"),M(Pn.$$.fragment),Rc=l(),ia=n("span"),Uc=a("MPNetForMultipleChoice"),Ni=l(),Ie=n("div"),M(Nn.$$.fragment),Vc=l(),la=n("p"),Kc=a(`MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Jc=l(),$n=n("p"),Xc=a("This model inherits from "),Xs=n("a"),Gc=a("PreTrainedModel"),Yc=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zc=l(),yn=n("p"),ep=a("This model is also a PyTorch "),Fn=n("a"),tp=a("torch.nn.Module"),op=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),np=l(),Qe=n("div"),M(zn.$$.fragment),sp=l(),It=n("p"),rp=a("The "),Gs=n("a"),ap=a("MPNetForMultipleChoice"),ip=a(" forward method, overrides the "),da=n("code"),lp=a("__call__"),dp=a(" special method."),cp=l(),M(go.$$.fragment),pp=l(),ca=n("p"),hp=a("Example:"),mp=l(),M(En.$$.fragment),$i=l(),Dt=n("h2"),_o=n("a"),pa=n("span"),M(qn.$$.fragment),up=l(),ha=n("span"),fp=a("MPNetForTokenClassification"),yi=l(),De=n("div"),M(Cn.$$.fragment),gp=l(),ma=n("p"),_p=a(`MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),vp=l(),xn=n("p"),kp=a("This model inherits from "),Ys=n("a"),Tp=a("PreTrainedModel"),Mp=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bp=l(),jn=n("p"),wp=a("This model is also a PyTorch "),Ln=n("a"),Pp=a("torch.nn.Module"),Np=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$p=l(),Be=n("div"),M(An.$$.fragment),yp=l(),St=n("p"),Fp=a("The "),Zs=n("a"),zp=a("MPNetForTokenClassification"),Ep=a(" forward method, overrides the "),ua=n("code"),qp=a("__call__"),Cp=a(" special method."),xp=l(),M(vo.$$.fragment),jp=l(),fa=n("p"),Lp=a("Example:"),Ap=l(),M(In.$$.fragment),Fi=l(),Ot=n("h2"),ko=n("a"),ga=n("span"),M(Dn.$$.fragment),Ip=l(),_a=n("span"),Dp=a("MPNetForQuestionAnswering"),zi=l(),Se=n("div"),M(Sn.$$.fragment),Sp=l(),Wt=n("p"),Op=a(`MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),va=n("code"),Wp=a("span start logits"),Qp=a(" and "),ka=n("code"),Bp=a("span end logits"),Hp=a(")."),Rp=l(),On=n("p"),Up=a("This model inherits from "),er=n("a"),Vp=a("PreTrainedModel"),Kp=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jp=l(),Wn=n("p"),Xp=a("This model is also a PyTorch "),Qn=n("a"),Gp=a("torch.nn.Module"),Yp=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zp=l(),He=n("div"),M(Bn.$$.fragment),eh=l(),Qt=n("p"),th=a("The "),tr=n("a"),oh=a("MPNetForQuestionAnswering"),nh=a(" forward method, overrides the "),Ta=n("code"),sh=a("__call__"),rh=a(" special method."),ah=l(),M(To.$$.fragment),ih=l(),Ma=n("p"),lh=a("Example:"),dh=l(),M(Hn.$$.fragment),Ei=l(),Bt=n("h2"),Mo=n("a"),ba=n("span"),M(Rn.$$.fragment),ch=l(),wa=n("span"),ph=a("TFMPNetModel"),qi=l(),ye=n("div"),M(Un.$$.fragment),hh=l(),Pa=n("p"),mh=a("The bare MPNet Model transformer outputting raw hidden-states without any specific head on top."),uh=l(),Vn=n("p"),fh=a("This model inherits from "),or=n("a"),gh=a("TFPreTrainedModel"),_h=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vh=l(),Kn=n("p"),kh=a("This model is also a "),Jn=n("a"),Th=a("tf.keras.Model"),Mh=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bh=l(),M(bo.$$.fragment),wh=l(),Re=n("div"),M(Xn.$$.fragment),Ph=l(),Ht=n("p"),Nh=a("The "),nr=n("a"),$h=a("TFMPNetModel"),yh=a(" forward method, overrides the "),Na=n("code"),Fh=a("__call__"),zh=a(" special method."),Eh=l(),M(wo.$$.fragment),qh=l(),$a=n("p"),Ch=a("Example:"),xh=l(),M(Gn.$$.fragment),Ci=l(),Rt=n("h2"),Po=n("a"),ya=n("span"),M(Yn.$$.fragment),jh=l(),Fa=n("span"),Lh=a("TFMPNetForMaskedLM"),xi=l(),Fe=n("div"),M(Zn.$$.fragment),Ah=l(),es=n("p"),Ih=a("MPNet Model with a "),za=n("code"),Dh=a("language modeling"),Sh=a(" head on top."),Oh=l(),ts=n("p"),Wh=a("This model inherits from "),sr=n("a"),Qh=a("TFPreTrainedModel"),Bh=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hh=l(),os=n("p"),Rh=a("This model is also a "),ns=n("a"),Uh=a("tf.keras.Model"),Vh=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Kh=l(),M(No.$$.fragment),Jh=l(),Ue=n("div"),M(ss.$$.fragment),Xh=l(),Ut=n("p"),Gh=a("The "),rr=n("a"),Yh=a("TFMPNetForMaskedLM"),Zh=a(" forward method, overrides the "),Ea=n("code"),em=a("__call__"),tm=a(" special method."),om=l(),M($o.$$.fragment),nm=l(),qa=n("p"),sm=a("Example:"),rm=l(),M(rs.$$.fragment),ji=l(),Vt=n("h2"),yo=n("a"),Ca=n("span"),M(as.$$.fragment),am=l(),xa=n("span"),im=a("TFMPNetForSequenceClassification"),Li=l(),ze=n("div"),M(is.$$.fragment),lm=l(),ja=n("p"),dm=a(`MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),cm=l(),ls=n("p"),pm=a("This model inherits from "),ar=n("a"),hm=a("TFPreTrainedModel"),mm=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),um=l(),ds=n("p"),fm=a("This model is also a "),cs=n("a"),gm=a("tf.keras.Model"),_m=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vm=l(),M(Fo.$$.fragment),km=l(),Ve=n("div"),M(ps.$$.fragment),Tm=l(),Kt=n("p"),Mm=a("The "),ir=n("a"),bm=a("TFMPNetForSequenceClassification"),wm=a(" forward method, overrides the "),La=n("code"),Pm=a("__call__"),Nm=a(" special method."),$m=l(),M(zo.$$.fragment),ym=l(),Aa=n("p"),Fm=a("Example:"),zm=l(),M(hs.$$.fragment),Ai=l(),Jt=n("h2"),Eo=n("a"),Ia=n("span"),M(ms.$$.fragment),Em=l(),Da=n("span"),qm=a("TFMPNetForMultipleChoice"),Ii=l(),Ee=n("div"),M(us.$$.fragment),Cm=l(),Sa=n("p"),xm=a(`MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),jm=l(),fs=n("p"),Lm=a("This model inherits from "),lr=n("a"),Am=a("TFPreTrainedModel"),Im=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Dm=l(),gs=n("p"),Sm=a("This model is also a "),_s=n("a"),Om=a("tf.keras.Model"),Wm=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Qm=l(),M(qo.$$.fragment),Bm=l(),Ke=n("div"),M(vs.$$.fragment),Hm=l(),Xt=n("p"),Rm=a("The "),dr=n("a"),Um=a("TFMPNetForMultipleChoice"),Vm=a(" forward method, overrides the "),Oa=n("code"),Km=a("__call__"),Jm=a(" special method."),Xm=l(),M(Co.$$.fragment),Gm=l(),Wa=n("p"),Ym=a("Example:"),Zm=l(),M(ks.$$.fragment),Di=l(),Gt=n("h2"),xo=n("a"),Qa=n("span"),M(Ts.$$.fragment),eu=l(),Ba=n("span"),tu=a("TFMPNetForTokenClassification"),Si=l(),qe=n("div"),M(Ms.$$.fragment),ou=l(),Ha=n("p"),nu=a(`MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),su=l(),bs=n("p"),ru=a("This model inherits from "),cr=n("a"),au=a("TFPreTrainedModel"),iu=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lu=l(),ws=n("p"),du=a("This model is also a "),Ps=n("a"),cu=a("tf.keras.Model"),pu=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hu=l(),M(jo.$$.fragment),mu=l(),Je=n("div"),M(Ns.$$.fragment),uu=l(),Yt=n("p"),fu=a("The "),pr=n("a"),gu=a("TFMPNetForTokenClassification"),_u=a(" forward method, overrides the "),Ra=n("code"),vu=a("__call__"),ku=a(" special method."),Tu=l(),M(Lo.$$.fragment),Mu=l(),Ua=n("p"),bu=a("Example:"),wu=l(),M($s.$$.fragment),Oi=l(),Zt=n("h2"),Ao=n("a"),Va=n("span"),M(ys.$$.fragment),Pu=l(),Ka=n("span"),Nu=a("TFMPNetForQuestionAnswering"),Wi=l(),Ce=n("div"),M(Fs.$$.fragment),$u=l(),eo=n("p"),yu=a(`MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ja=n("code"),Fu=a("span start logits"),zu=a(" and "),Xa=n("code"),Eu=a("span end logits"),qu=a(")."),Cu=l(),zs=n("p"),xu=a("This model inherits from "),hr=n("a"),ju=a("TFPreTrainedModel"),Lu=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Au=l(),Es=n("p"),Iu=a("This model is also a "),qs=n("a"),Du=a("tf.keras.Model"),Su=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ou=l(),M(Io.$$.fragment),Wu=l(),Xe=n("div"),M(Cs.$$.fragment),Qu=l(),to=n("p"),Bu=a("The "),mr=n("a"),Hu=a("TFMPNetForQuestionAnswering"),Ru=a(" forward method, overrides the "),Ga=n("code"),Uu=a("__call__"),Vu=a(" special method."),Ku=l(),M(Do.$$.fragment),Ju=l(),Ya=n("p"),Xu=a("Example:"),Gu=l(),M(xs.$$.fragment),this.h()},l(o){const u=C_('[data-svelte="svelte-1phssyn"]',document.head);p=s(u,"META",{name:!0,content:!0}),u.forEach(t),y=d(o),f=s(o,"H1",{class:!0});var js=r(f);g=s(js,"A",{id:!0,class:!0,href:!0});var Za=r(g);k=s(Za,"SPAN",{});var ei=r(k);b(v.$$.fragment,ei),ei.forEach(t),Za.forEach(t),_=d(js),F=s(js,"SPAN",{});var ti=r(F);le=i(ti,"MPNet"),ti.forEach(t),js.forEach(t),U=d(o),z=s(o,"H2",{class:!0});var Ls=r(z);J=s(Ls,"A",{id:!0,class:!0,href:!0});var oi=r(J);S=s(oi,"SPAN",{});var ni=r(S);b(X.$$.fragment,ni),ni.forEach(t),oi.forEach(t),de=d(Ls),O=s(Ls,"SPAN",{});var si=r(O);ce=i(si,"Overview"),si.forEach(t),Ls.forEach(t),se=d(o),R=s(o,"P",{});var As=r(R);A=i(As,"The MPNet model was proposed in "),G=s(As,"A",{href:!0,rel:!0});var ri=r(G);V=i(ri,"MPNet: Masked and Permuted Pre-training for Language Understanding"),ri.forEach(t),E=i(As," by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu."),As.forEach(t),q=d(o),Y=s(o,"P",{});var ai=r(Y);B=i(ai,`MPNet adopts a novel pre-training method, named masked and permuted language modeling, to inherit the advantages of masked language modeling and permuted language modeling for natural language understanding.`),ai.forEach(t),re=d(o),Z=s(o,"P",{});var ii=r(Z);H=i(ii,"The abstract from the paper is the following:"),ii.forEach(t),ae=d(o),ee=s(o,"P",{});var li=r(ee);C=s(li,"EM",{});var di=r(C);pe=i(di,`BERT adopts masked language modeling (MLM) for pre-training and is one of the most successful pre-training models. Since BERT neglects dependency among predicted tokens, XLNet introduces permuted language modeling (PLM) for pre-training to address this problem. However, XLNet does not leverage the full position information of a sentence and thus suffers from position discrepancy between pre-training and fine-tuning. In this paper, we propose MPNet, a novel pre-training method that inherits the advantages of BERT and XLNet and avoids their limitations. MPNet leverages the dependency among predicted tokens through permuted language modeling (vs. MLM in BERT), and takes auxiliary position information as input to make the model see a full sentence and thus reducing the position discrepancy (vs. PLM in XLNet). We pre-train MPNet on a large-scale dataset (over 160GB text corpora) and fine-tune on a variety of down-streaming tasks (GLUE, SQuAD, etc). Experimental results show that MPNet outperforms MLM and PLM by a large margin, and achieves better results on these tasks compared with previous state-of-the-art pre-trained methods (e.g., BERT, XLNet, RoBERTa) under the same model setting.`),di.forEach(t),li.forEach(t),W=d(o),te=s(o,"P",{});var ci=r(te);he=i(ci,"Tips:"),ci.forEach(t),j=d(o),oe=s(o,"UL",{});var pi=r(oe);x=s(pi,"LI",{});var lt=r(x);me=i(lt,"MPNet doesn\u2019t have "),c=s(lt,"CODE",{});var hi=r(c);T=i(hi,"token_type_ids"),hi.forEach(t),K=i(lt,`, you don\u2019t need to indicate which token belongs to which segment. just separate your segments with the separation token `),_e=s(lt,"CODE",{});var ef=r(_e);ke=i(ef,"tokenizer.sep_token"),ef.forEach(t),I=i(lt," (or "),ve=s(lt,"CODE",{});var tf=r(ve);Te=i(tf,"[sep]"),tf.forEach(t),Me=i(lt,")."),lt.forEach(t),pi.forEach(t),L=d(o),D=s(o,"P",{});var Bi=r(D);be=i(Bi,"The original code can be found "),ge=s(Bi,"A",{href:!0,rel:!0});var of=r(ge);ue=i(of,"here"),of.forEach(t),we=i(Bi,"."),Bi.forEach(t),ie=d(o),fe=s(o,"H2",{class:!0});var Hi=r(fe);oo=s(Hi,"A",{id:!0,class:!0,href:!0});var nf=r(oo);Er=s(nf,"SPAN",{});var sf=r(Er);b(Wo.$$.fragment,sf),sf.forEach(t),nf.forEach(t),Al=d(Hi),qr=s(Hi,"SPAN",{});var rf=r(qr);Il=i(rf,"MPNetConfig"),rf.forEach(t),Hi.forEach(t),mi=d(o),je=s(o,"DIV",{class:!0});var ct=r(je);b(Qo.$$.fragment,ct),Dl=d(ct),it=s(ct,"P",{});var So=r(it);Sl=i(So,"This is the configuration class to store the configuration of a "),Is=s(So,"A",{href:!0});var af=r(Is);Ol=i(af,"MPNetModel"),af.forEach(t),Wl=i(So,` or a `),Ds=s(So,"A",{href:!0});var lf=r(Ds);Ql=i(lf,"TFMPNetModel"),lf.forEach(t),Bl=i(So,`. It is used to instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPNet `),Bo=s(So,"A",{href:!0,rel:!0});var df=r(Bo);Hl=i(df,"mpnet-base"),df.forEach(t),Rl=i(So," architecture."),So.forEach(t),Ul=d(ct),yt=s(ct,"P",{});var ur=r(yt);Vl=i(ur,"Configuration objects inherit from "),Ss=s(ur,"A",{href:!0});var cf=r(Ss);Kl=i(cf,"PretrainedConfig"),cf.forEach(t),Jl=i(ur,` and can be used to control the model outputs. Read the documentation from `),Os=s(ur,"A",{href:!0});var pf=r(Os);Xl=i(pf,"PretrainedConfig"),pf.forEach(t),Gl=i(ur," for more information."),ur.forEach(t),Yl=d(ct),Cr=s(ct,"P",{});var hf=r(Cr);Zl=i(hf,"Examples:"),hf.forEach(t),ed=d(ct),b(Ho.$$.fragment,ct),ct.forEach(t),ui=d(o),Ft=s(o,"H2",{class:!0});var Ri=r(Ft);no=s(Ri,"A",{id:!0,class:!0,href:!0});var mf=r(no);xr=s(mf,"SPAN",{});var uf=r(xr);b(Ro.$$.fragment,uf),uf.forEach(t),mf.forEach(t),td=d(Ri),jr=s(Ri,"SPAN",{});var ff=r(jr);od=i(ff,"MPNetTokenizer"),ff.forEach(t),Ri.forEach(t),fi=d(o),$e=s(o,"DIV",{class:!0});var et=r($e);b(Uo.$$.fragment,et),nd=d(et),Vo=s(et,"P",{});var Ui=r(Vo);sd=i(Ui,"This tokenizer inherits from "),Ws=s(Ui,"A",{href:!0});var gf=r(Ws);rd=i(gf,"BertTokenizer"),gf.forEach(t),ad=i(Ui,` which contains most of the methods. Users should refer to the superclass for more information regarding methods.`),Ui.forEach(t),id=d(et),dt=s(et,"DIV",{class:!0});var fr=r(dt);b(Ko.$$.fragment,fr),ld=d(fr),Lr=s(fr,"P",{});var _f=r(Lr);dd=i(_f,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MPNet sequence has the following format:`),_f.forEach(t),cd=d(fr),Jo=s(fr,"UL",{});var Vi=r(Jo);Qs=s(Vi,"LI",{});var Yu=r(Qs);pd=i(Yu,"single sequence: "),Ar=s(Yu,"CODE",{});var vf=r(Ar);hd=i(vf,"<s> X </s>"),vf.forEach(t),Yu.forEach(t),md=d(Vi),Bs=s(Vi,"LI",{});var Zu=r(Bs);ud=i(Zu,"pair of sequences: "),Ir=s(Zu,"CODE",{});var kf=r(Ir);fd=i(kf,"<s> A </s></s> B </s>"),kf.forEach(t),Zu.forEach(t),Vi.forEach(t),fr.forEach(t),gd=d(et),so=s(et,"DIV",{class:!0});var Ki=r(so);b(Xo.$$.fragment,Ki),_d=d(Ki),Go=s(Ki,"P",{});var Ji=r(Go);vd=i(Ji,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Dr=s(Ji,"CODE",{});var Tf=r(Dr);kd=i(Tf,"prepare_for_model"),Tf.forEach(t),Td=i(Ji," methods."),Ji.forEach(t),Ki.forEach(t),Md=d(et),ro=s(et,"DIV",{class:!0});var Xi=r(ro);b(Yo.$$.fragment,Xi),bd=d(Xi),Sr=s(Xi,"P",{});var Mf=r(Sr);wd=i(Mf,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned.`),Mf.forEach(t),Xi.forEach(t),Pd=d(et),Or=s(et,"DIV",{class:!0}),r(Or).forEach(t),et.forEach(t),gi=d(o),zt=s(o,"H2",{class:!0});var Gi=r(zt);ao=s(Gi,"A",{id:!0,class:!0,href:!0});var bf=r(ao);Wr=s(bf,"SPAN",{});var wf=r(Wr);b(Zo.$$.fragment,wf),wf.forEach(t),bf.forEach(t),Nd=d(Gi),Qr=s(Gi,"SPAN",{});var Pf=r(Qr);$d=i(Pf,"MPNetTokenizerFast"),Pf.forEach(t),Gi.forEach(t),_i=d(o),Ze=s(o,"DIV",{class:!0});var Oo=r(Ze);b(en.$$.fragment,Oo),yd=d(Oo),tn=s(Oo,"P",{});var Yi=r(tn);Fd=i(Yi,"Construct a \u201Cfast\u201D MPNet tokenizer (backed by HuggingFace\u2019s "),Br=s(Yi,"EM",{});var Nf=r(Br);zd=i(Nf,"tokenizers"),Nf.forEach(t),Ed=i(Yi," library). Based on WordPiece."),Yi.forEach(t),qd=d(Oo),on=s(Oo,"P",{});var Zi=r(on);Cd=i(Zi,"This tokenizer inherits from "),Hs=s(Zi,"A",{href:!0});var $f=r(Hs);xd=i($f,"PreTrainedTokenizerFast"),$f.forEach(t),jd=i(Zi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Zi.forEach(t),Ld=d(Oo),io=s(Oo,"DIV",{class:!0});var el=r(io);b(nn.$$.fragment,el),Ad=d(el),Hr=s(el,"P",{});var yf=r(Hr);Id=i(yf,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned`),yf.forEach(t),el.forEach(t),Oo.forEach(t),vi=d(o),Et=s(o,"H2",{class:!0});var tl=r(Et);lo=s(tl,"A",{id:!0,class:!0,href:!0});var Ff=r(lo);Rr=s(Ff,"SPAN",{});var zf=r(Rr);b(sn.$$.fragment,zf),zf.forEach(t),Ff.forEach(t),Dd=d(tl),Ur=s(tl,"SPAN",{});var Ef=r(Ur);Sd=i(Ef,"MPNetModel"),Ef.forEach(t),tl.forEach(t),ki=d(o),Le=s(o,"DIV",{class:!0});var pt=r(Le);b(rn.$$.fragment,pt),Od=d(pt),Vr=s(pt,"P",{});var qf=r(Vr);Wd=i(qf,"The bare MPNet Model transformer outputting raw hidden-states without any specific head on top."),qf.forEach(t),Qd=d(pt),an=s(pt,"P",{});var ol=r(an);Bd=i(ol,"This model inherits from "),Rs=s(ol,"A",{href:!0});var Cf=r(Rs);Hd=i(Cf,"PreTrainedModel"),Cf.forEach(t),Rd=i(ol,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ol.forEach(t),Ud=d(pt),ln=s(pt,"P",{});var nl=r(ln);Vd=i(nl,"This model is also a PyTorch "),dn=s(nl,"A",{href:!0,rel:!0});var xf=r(dn);Kd=i(xf,"torch.nn.Module"),xf.forEach(t),Jd=i(nl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nl.forEach(t),Xd=d(pt),Oe=s(pt,"DIV",{class:!0});var ht=r(Oe);b(cn.$$.fragment,ht),Gd=d(ht),qt=s(ht,"P",{});var gr=r(qt);Yd=i(gr,"The "),Us=s(gr,"A",{href:!0});var jf=r(Us);Zd=i(jf,"MPNetModel"),jf.forEach(t),ec=i(gr," forward method, overrides the "),Kr=s(gr,"CODE",{});var Lf=r(Kr);tc=i(Lf,"__call__"),Lf.forEach(t),oc=i(gr," special method."),gr.forEach(t),nc=d(ht),b(co.$$.fragment,ht),sc=d(ht),Jr=s(ht,"P",{});var Af=r(Jr);rc=i(Af,"Example:"),Af.forEach(t),ac=d(ht),b(pn.$$.fragment,ht),ht.forEach(t),pt.forEach(t),Ti=d(o),Ct=s(o,"H2",{class:!0});var sl=r(Ct);po=s(sl,"A",{id:!0,class:!0,href:!0});var If=r(po);Xr=s(If,"SPAN",{});var Df=r(Xr);b(hn.$$.fragment,Df),Df.forEach(t),If.forEach(t),ic=d(sl),Gr=s(sl,"SPAN",{});var Sf=r(Gr);lc=i(Sf,"MPNetForMaskedLM"),Sf.forEach(t),sl.forEach(t),Mi=d(o),mn=s(o,"DIV",{class:!0});var Of=r(mn);We=s(Of,"DIV",{class:!0});var mt=r(We);b(un.$$.fragment,mt),dc=d(mt),xt=s(mt,"P",{});var _r=r(xt);cc=i(_r,"The "),Vs=s(_r,"A",{href:!0});var Wf=r(Vs);pc=i(Wf,"MPNetForMaskedLM"),Wf.forEach(t),hc=i(_r," forward method, overrides the "),Yr=s(_r,"CODE",{});var Qf=r(Yr);mc=i(Qf,"__call__"),Qf.forEach(t),uc=i(_r," special method."),_r.forEach(t),fc=d(mt),b(ho.$$.fragment,mt),gc=d(mt),Zr=s(mt,"P",{});var Bf=r(Zr);_c=i(Bf,"Example:"),Bf.forEach(t),vc=d(mt),b(fn.$$.fragment,mt),mt.forEach(t),Of.forEach(t),bi=d(o),jt=s(o,"H2",{class:!0});var rl=r(jt);mo=s(rl,"A",{id:!0,class:!0,href:!0});var Hf=r(mo);ea=s(Hf,"SPAN",{});var Rf=r(ea);b(gn.$$.fragment,Rf),Rf.forEach(t),Hf.forEach(t),kc=d(rl),ta=s(rl,"SPAN",{});var Uf=r(ta);Tc=i(Uf,"MPNetForSequenceClassification"),Uf.forEach(t),rl.forEach(t),wi=d(o),Ae=s(o,"DIV",{class:!0});var ut=r(Ae);b(_n.$$.fragment,ut),Mc=d(ut),oa=s(ut,"P",{});var Vf=r(oa);bc=i(Vf,`MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Vf.forEach(t),wc=d(ut),vn=s(ut,"P",{});var al=r(vn);Pc=i(al,"This model inherits from "),Ks=s(al,"A",{href:!0});var Kf=r(Ks);Nc=i(Kf,"PreTrainedModel"),Kf.forEach(t),$c=i(al,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),al.forEach(t),yc=d(ut),kn=s(ut,"P",{});var il=r(kn);Fc=i(il,"This model is also a PyTorch "),Tn=s(il,"A",{href:!0,rel:!0});var Jf=r(Tn);zc=i(Jf,"torch.nn.Module"),Jf.forEach(t),Ec=i(il,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),il.forEach(t),qc=d(ut),Pe=s(ut,"DIV",{class:!0});var Ge=r(Pe);b(Mn.$$.fragment,Ge),Cc=d(Ge),Lt=s(Ge,"P",{});var vr=r(Lt);xc=i(vr,"The "),Js=s(vr,"A",{href:!0});var Xf=r(Js);jc=i(Xf,"MPNetForSequenceClassification"),Xf.forEach(t),Lc=i(vr," forward method, overrides the "),na=s(vr,"CODE",{});var Gf=r(na);Ac=i(Gf,"__call__"),Gf.forEach(t),Ic=i(vr," special method."),vr.forEach(t),Dc=d(Ge),b(uo.$$.fragment,Ge),Sc=d(Ge),sa=s(Ge,"P",{});var Yf=r(sa);Oc=i(Yf,"Example of single-label classification:"),Yf.forEach(t),Wc=d(Ge),b(bn.$$.fragment,Ge),Qc=d(Ge),ra=s(Ge,"P",{});var Zf=r(ra);Bc=i(Zf,"Example of multi-label classification:"),Zf.forEach(t),Hc=d(Ge),b(wn.$$.fragment,Ge),Ge.forEach(t),ut.forEach(t),Pi=d(o),At=s(o,"H2",{class:!0});var ll=r(At);fo=s(ll,"A",{id:!0,class:!0,href:!0});var eg=r(fo);aa=s(eg,"SPAN",{});var tg=r(aa);b(Pn.$$.fragment,tg),tg.forEach(t),eg.forEach(t),Rc=d(ll),ia=s(ll,"SPAN",{});var og=r(ia);Uc=i(og,"MPNetForMultipleChoice"),og.forEach(t),ll.forEach(t),Ni=d(o),Ie=s(o,"DIV",{class:!0});var ft=r(Ie);b(Nn.$$.fragment,ft),Vc=d(ft),la=s(ft,"P",{});var ng=r(la);Kc=i(ng,`MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ng.forEach(t),Jc=d(ft),$n=s(ft,"P",{});var dl=r($n);Xc=i(dl,"This model inherits from "),Xs=s(dl,"A",{href:!0});var sg=r(Xs);Gc=i(sg,"PreTrainedModel"),sg.forEach(t),Yc=i(dl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dl.forEach(t),Zc=d(ft),yn=s(ft,"P",{});var cl=r(yn);ep=i(cl,"This model is also a PyTorch "),Fn=s(cl,"A",{href:!0,rel:!0});var rg=r(Fn);tp=i(rg,"torch.nn.Module"),rg.forEach(t),op=i(cl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cl.forEach(t),np=d(ft),Qe=s(ft,"DIV",{class:!0});var gt=r(Qe);b(zn.$$.fragment,gt),sp=d(gt),It=s(gt,"P",{});var kr=r(It);rp=i(kr,"The "),Gs=s(kr,"A",{href:!0});var ag=r(Gs);ap=i(ag,"MPNetForMultipleChoice"),ag.forEach(t),ip=i(kr," forward method, overrides the "),da=s(kr,"CODE",{});var ig=r(da);lp=i(ig,"__call__"),ig.forEach(t),dp=i(kr," special method."),kr.forEach(t),cp=d(gt),b(go.$$.fragment,gt),pp=d(gt),ca=s(gt,"P",{});var lg=r(ca);hp=i(lg,"Example:"),lg.forEach(t),mp=d(gt),b(En.$$.fragment,gt),gt.forEach(t),ft.forEach(t),$i=d(o),Dt=s(o,"H2",{class:!0});var pl=r(Dt);_o=s(pl,"A",{id:!0,class:!0,href:!0});var dg=r(_o);pa=s(dg,"SPAN",{});var cg=r(pa);b(qn.$$.fragment,cg),cg.forEach(t),dg.forEach(t),up=d(pl),ha=s(pl,"SPAN",{});var pg=r(ha);fp=i(pg,"MPNetForTokenClassification"),pg.forEach(t),pl.forEach(t),yi=d(o),De=s(o,"DIV",{class:!0});var _t=r(De);b(Cn.$$.fragment,_t),gp=d(_t),ma=s(_t,"P",{});var hg=r(ma);_p=i(hg,`MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),hg.forEach(t),vp=d(_t),xn=s(_t,"P",{});var hl=r(xn);kp=i(hl,"This model inherits from "),Ys=s(hl,"A",{href:!0});var mg=r(Ys);Tp=i(mg,"PreTrainedModel"),mg.forEach(t),Mp=i(hl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hl.forEach(t),bp=d(_t),jn=s(_t,"P",{});var ml=r(jn);wp=i(ml,"This model is also a PyTorch "),Ln=s(ml,"A",{href:!0,rel:!0});var ug=r(Ln);Pp=i(ug,"torch.nn.Module"),ug.forEach(t),Np=i(ml,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ml.forEach(t),$p=d(_t),Be=s(_t,"DIV",{class:!0});var vt=r(Be);b(An.$$.fragment,vt),yp=d(vt),St=s(vt,"P",{});var Tr=r(St);Fp=i(Tr,"The "),Zs=s(Tr,"A",{href:!0});var fg=r(Zs);zp=i(fg,"MPNetForTokenClassification"),fg.forEach(t),Ep=i(Tr," forward method, overrides the "),ua=s(Tr,"CODE",{});var gg=r(ua);qp=i(gg,"__call__"),gg.forEach(t),Cp=i(Tr," special method."),Tr.forEach(t),xp=d(vt),b(vo.$$.fragment,vt),jp=d(vt),fa=s(vt,"P",{});var _g=r(fa);Lp=i(_g,"Example:"),_g.forEach(t),Ap=d(vt),b(In.$$.fragment,vt),vt.forEach(t),_t.forEach(t),Fi=d(o),Ot=s(o,"H2",{class:!0});var ul=r(Ot);ko=s(ul,"A",{id:!0,class:!0,href:!0});var vg=r(ko);ga=s(vg,"SPAN",{});var kg=r(ga);b(Dn.$$.fragment,kg),kg.forEach(t),vg.forEach(t),Ip=d(ul),_a=s(ul,"SPAN",{});var Tg=r(_a);Dp=i(Tg,"MPNetForQuestionAnswering"),Tg.forEach(t),ul.forEach(t),zi=d(o),Se=s(o,"DIV",{class:!0});var kt=r(Se);b(Sn.$$.fragment,kt),Sp=d(kt),Wt=s(kt,"P",{});var Mr=r(Wt);Op=i(Mr,`MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),va=s(Mr,"CODE",{});var Mg=r(va);Wp=i(Mg,"span start logits"),Mg.forEach(t),Qp=i(Mr," and "),ka=s(Mr,"CODE",{});var bg=r(ka);Bp=i(bg,"span end logits"),bg.forEach(t),Hp=i(Mr,")."),Mr.forEach(t),Rp=d(kt),On=s(kt,"P",{});var fl=r(On);Up=i(fl,"This model inherits from "),er=s(fl,"A",{href:!0});var wg=r(er);Vp=i(wg,"PreTrainedModel"),wg.forEach(t),Kp=i(fl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fl.forEach(t),Jp=d(kt),Wn=s(kt,"P",{});var gl=r(Wn);Xp=i(gl,"This model is also a PyTorch "),Qn=s(gl,"A",{href:!0,rel:!0});var Pg=r(Qn);Gp=i(Pg,"torch.nn.Module"),Pg.forEach(t),Yp=i(gl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gl.forEach(t),Zp=d(kt),He=s(kt,"DIV",{class:!0});var Tt=r(He);b(Bn.$$.fragment,Tt),eh=d(Tt),Qt=s(Tt,"P",{});var br=r(Qt);th=i(br,"The "),tr=s(br,"A",{href:!0});var Ng=r(tr);oh=i(Ng,"MPNetForQuestionAnswering"),Ng.forEach(t),nh=i(br," forward method, overrides the "),Ta=s(br,"CODE",{});var $g=r(Ta);sh=i($g,"__call__"),$g.forEach(t),rh=i(br," special method."),br.forEach(t),ah=d(Tt),b(To.$$.fragment,Tt),ih=d(Tt),Ma=s(Tt,"P",{});var yg=r(Ma);lh=i(yg,"Example:"),yg.forEach(t),dh=d(Tt),b(Hn.$$.fragment,Tt),Tt.forEach(t),kt.forEach(t),Ei=d(o),Bt=s(o,"H2",{class:!0});var _l=r(Bt);Mo=s(_l,"A",{id:!0,class:!0,href:!0});var Fg=r(Mo);ba=s(Fg,"SPAN",{});var zg=r(ba);b(Rn.$$.fragment,zg),zg.forEach(t),Fg.forEach(t),ch=d(_l),wa=s(_l,"SPAN",{});var Eg=r(wa);ph=i(Eg,"TFMPNetModel"),Eg.forEach(t),_l.forEach(t),qi=d(o),ye=s(o,"DIV",{class:!0});var tt=r(ye);b(Un.$$.fragment,tt),hh=d(tt),Pa=s(tt,"P",{});var qg=r(Pa);mh=i(qg,"The bare MPNet Model transformer outputting raw hidden-states without any specific head on top."),qg.forEach(t),uh=d(tt),Vn=s(tt,"P",{});var vl=r(Vn);fh=i(vl,"This model inherits from "),or=s(vl,"A",{href:!0});var Cg=r(or);gh=i(Cg,"TFPreTrainedModel"),Cg.forEach(t),_h=i(vl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vl.forEach(t),vh=d(tt),Kn=s(tt,"P",{});var kl=r(Kn);kh=i(kl,"This model is also a "),Jn=s(kl,"A",{href:!0,rel:!0});var xg=r(Jn);Th=i(xg,"tf.keras.Model"),xg.forEach(t),Mh=i(kl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),kl.forEach(t),bh=d(tt),b(bo.$$.fragment,tt),wh=d(tt),Re=s(tt,"DIV",{class:!0});var Mt=r(Re);b(Xn.$$.fragment,Mt),Ph=d(Mt),Ht=s(Mt,"P",{});var wr=r(Ht);Nh=i(wr,"The "),nr=s(wr,"A",{href:!0});var jg=r(nr);$h=i(jg,"TFMPNetModel"),jg.forEach(t),yh=i(wr," forward method, overrides the "),Na=s(wr,"CODE",{});var Lg=r(Na);Fh=i(Lg,"__call__"),Lg.forEach(t),zh=i(wr," special method."),wr.forEach(t),Eh=d(Mt),b(wo.$$.fragment,Mt),qh=d(Mt),$a=s(Mt,"P",{});var Ag=r($a);Ch=i(Ag,"Example:"),Ag.forEach(t),xh=d(Mt),b(Gn.$$.fragment,Mt),Mt.forEach(t),tt.forEach(t),Ci=d(o),Rt=s(o,"H2",{class:!0});var Tl=r(Rt);Po=s(Tl,"A",{id:!0,class:!0,href:!0});var Ig=r(Po);ya=s(Ig,"SPAN",{});var Dg=r(ya);b(Yn.$$.fragment,Dg),Dg.forEach(t),Ig.forEach(t),jh=d(Tl),Fa=s(Tl,"SPAN",{});var Sg=r(Fa);Lh=i(Sg,"TFMPNetForMaskedLM"),Sg.forEach(t),Tl.forEach(t),xi=d(o),Fe=s(o,"DIV",{class:!0});var ot=r(Fe);b(Zn.$$.fragment,ot),Ah=d(ot),es=s(ot,"P",{});var Ml=r(es);Ih=i(Ml,"MPNet Model with a "),za=s(Ml,"CODE",{});var Og=r(za);Dh=i(Og,"language modeling"),Og.forEach(t),Sh=i(Ml," head on top."),Ml.forEach(t),Oh=d(ot),ts=s(ot,"P",{});var bl=r(ts);Wh=i(bl,"This model inherits from "),sr=s(bl,"A",{href:!0});var Wg=r(sr);Qh=i(Wg,"TFPreTrainedModel"),Wg.forEach(t),Bh=i(bl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bl.forEach(t),Hh=d(ot),os=s(ot,"P",{});var wl=r(os);Rh=i(wl,"This model is also a "),ns=s(wl,"A",{href:!0,rel:!0});var Qg=r(ns);Uh=i(Qg,"tf.keras.Model"),Qg.forEach(t),Vh=i(wl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),wl.forEach(t),Kh=d(ot),b(No.$$.fragment,ot),Jh=d(ot),Ue=s(ot,"DIV",{class:!0});var bt=r(Ue);b(ss.$$.fragment,bt),Xh=d(bt),Ut=s(bt,"P",{});var Pr=r(Ut);Gh=i(Pr,"The "),rr=s(Pr,"A",{href:!0});var Bg=r(rr);Yh=i(Bg,"TFMPNetForMaskedLM"),Bg.forEach(t),Zh=i(Pr," forward method, overrides the "),Ea=s(Pr,"CODE",{});var Hg=r(Ea);em=i(Hg,"__call__"),Hg.forEach(t),tm=i(Pr," special method."),Pr.forEach(t),om=d(bt),b($o.$$.fragment,bt),nm=d(bt),qa=s(bt,"P",{});var Rg=r(qa);sm=i(Rg,"Example:"),Rg.forEach(t),rm=d(bt),b(rs.$$.fragment,bt),bt.forEach(t),ot.forEach(t),ji=d(o),Vt=s(o,"H2",{class:!0});var Pl=r(Vt);yo=s(Pl,"A",{id:!0,class:!0,href:!0});var Ug=r(yo);Ca=s(Ug,"SPAN",{});var Vg=r(Ca);b(as.$$.fragment,Vg),Vg.forEach(t),Ug.forEach(t),am=d(Pl),xa=s(Pl,"SPAN",{});var Kg=r(xa);im=i(Kg,"TFMPNetForSequenceClassification"),Kg.forEach(t),Pl.forEach(t),Li=d(o),ze=s(o,"DIV",{class:!0});var nt=r(ze);b(is.$$.fragment,nt),lm=d(nt),ja=s(nt,"P",{});var Jg=r(ja);dm=i(Jg,`MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Jg.forEach(t),cm=d(nt),ls=s(nt,"P",{});var Nl=r(ls);pm=i(Nl,"This model inherits from "),ar=s(Nl,"A",{href:!0});var Xg=r(ar);hm=i(Xg,"TFPreTrainedModel"),Xg.forEach(t),mm=i(Nl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nl.forEach(t),um=d(nt),ds=s(nt,"P",{});var $l=r(ds);fm=i($l,"This model is also a "),cs=s($l,"A",{href:!0,rel:!0});var Gg=r(cs);gm=i(Gg,"tf.keras.Model"),Gg.forEach(t),_m=i($l,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),$l.forEach(t),vm=d(nt),b(Fo.$$.fragment,nt),km=d(nt),Ve=s(nt,"DIV",{class:!0});var wt=r(Ve);b(ps.$$.fragment,wt),Tm=d(wt),Kt=s(wt,"P",{});var Nr=r(Kt);Mm=i(Nr,"The "),ir=s(Nr,"A",{href:!0});var Yg=r(ir);bm=i(Yg,"TFMPNetForSequenceClassification"),Yg.forEach(t),wm=i(Nr," forward method, overrides the "),La=s(Nr,"CODE",{});var Zg=r(La);Pm=i(Zg,"__call__"),Zg.forEach(t),Nm=i(Nr," special method."),Nr.forEach(t),$m=d(wt),b(zo.$$.fragment,wt),ym=d(wt),Aa=s(wt,"P",{});var e_=r(Aa);Fm=i(e_,"Example:"),e_.forEach(t),zm=d(wt),b(hs.$$.fragment,wt),wt.forEach(t),nt.forEach(t),Ai=d(o),Jt=s(o,"H2",{class:!0});var yl=r(Jt);Eo=s(yl,"A",{id:!0,class:!0,href:!0});var t_=r(Eo);Ia=s(t_,"SPAN",{});var o_=r(Ia);b(ms.$$.fragment,o_),o_.forEach(t),t_.forEach(t),Em=d(yl),Da=s(yl,"SPAN",{});var n_=r(Da);qm=i(n_,"TFMPNetForMultipleChoice"),n_.forEach(t),yl.forEach(t),Ii=d(o),Ee=s(o,"DIV",{class:!0});var st=r(Ee);b(us.$$.fragment,st),Cm=d(st),Sa=s(st,"P",{});var s_=r(Sa);xm=i(s_,`MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),s_.forEach(t),jm=d(st),fs=s(st,"P",{});var Fl=r(fs);Lm=i(Fl,"This model inherits from "),lr=s(Fl,"A",{href:!0});var r_=r(lr);Am=i(r_,"TFPreTrainedModel"),r_.forEach(t),Im=i(Fl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fl.forEach(t),Dm=d(st),gs=s(st,"P",{});var zl=r(gs);Sm=i(zl,"This model is also a "),_s=s(zl,"A",{href:!0,rel:!0});var a_=r(_s);Om=i(a_,"tf.keras.Model"),a_.forEach(t),Wm=i(zl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),zl.forEach(t),Qm=d(st),b(qo.$$.fragment,st),Bm=d(st),Ke=s(st,"DIV",{class:!0});var Pt=r(Ke);b(vs.$$.fragment,Pt),Hm=d(Pt),Xt=s(Pt,"P",{});var $r=r(Xt);Rm=i($r,"The "),dr=s($r,"A",{href:!0});var i_=r(dr);Um=i(i_,"TFMPNetForMultipleChoice"),i_.forEach(t),Vm=i($r," forward method, overrides the "),Oa=s($r,"CODE",{});var l_=r(Oa);Km=i(l_,"__call__"),l_.forEach(t),Jm=i($r," special method."),$r.forEach(t),Xm=d(Pt),b(Co.$$.fragment,Pt),Gm=d(Pt),Wa=s(Pt,"P",{});var d_=r(Wa);Ym=i(d_,"Example:"),d_.forEach(t),Zm=d(Pt),b(ks.$$.fragment,Pt),Pt.forEach(t),st.forEach(t),Di=d(o),Gt=s(o,"H2",{class:!0});var El=r(Gt);xo=s(El,"A",{id:!0,class:!0,href:!0});var c_=r(xo);Qa=s(c_,"SPAN",{});var p_=r(Qa);b(Ts.$$.fragment,p_),p_.forEach(t),c_.forEach(t),eu=d(El),Ba=s(El,"SPAN",{});var h_=r(Ba);tu=i(h_,"TFMPNetForTokenClassification"),h_.forEach(t),El.forEach(t),Si=d(o),qe=s(o,"DIV",{class:!0});var rt=r(qe);b(Ms.$$.fragment,rt),ou=d(rt),Ha=s(rt,"P",{});var m_=r(Ha);nu=i(m_,`MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),m_.forEach(t),su=d(rt),bs=s(rt,"P",{});var ql=r(bs);ru=i(ql,"This model inherits from "),cr=s(ql,"A",{href:!0});var u_=r(cr);au=i(u_,"TFPreTrainedModel"),u_.forEach(t),iu=i(ql,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ql.forEach(t),lu=d(rt),ws=s(rt,"P",{});var Cl=r(ws);du=i(Cl,"This model is also a "),Ps=s(Cl,"A",{href:!0,rel:!0});var f_=r(Ps);cu=i(f_,"tf.keras.Model"),f_.forEach(t),pu=i(Cl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cl.forEach(t),hu=d(rt),b(jo.$$.fragment,rt),mu=d(rt),Je=s(rt,"DIV",{class:!0});var Nt=r(Je);b(Ns.$$.fragment,Nt),uu=d(Nt),Yt=s(Nt,"P",{});var yr=r(Yt);fu=i(yr,"The "),pr=s(yr,"A",{href:!0});var g_=r(pr);gu=i(g_,"TFMPNetForTokenClassification"),g_.forEach(t),_u=i(yr," forward method, overrides the "),Ra=s(yr,"CODE",{});var __=r(Ra);vu=i(__,"__call__"),__.forEach(t),ku=i(yr," special method."),yr.forEach(t),Tu=d(Nt),b(Lo.$$.fragment,Nt),Mu=d(Nt),Ua=s(Nt,"P",{});var v_=r(Ua);bu=i(v_,"Example:"),v_.forEach(t),wu=d(Nt),b($s.$$.fragment,Nt),Nt.forEach(t),rt.forEach(t),Oi=d(o),Zt=s(o,"H2",{class:!0});var xl=r(Zt);Ao=s(xl,"A",{id:!0,class:!0,href:!0});var k_=r(Ao);Va=s(k_,"SPAN",{});var T_=r(Va);b(ys.$$.fragment,T_),T_.forEach(t),k_.forEach(t),Pu=d(xl),Ka=s(xl,"SPAN",{});var M_=r(Ka);Nu=i(M_,"TFMPNetForQuestionAnswering"),M_.forEach(t),xl.forEach(t),Wi=d(o),Ce=s(o,"DIV",{class:!0});var at=r(Ce);b(Fs.$$.fragment,at),$u=d(at),eo=s(at,"P",{});var Fr=r(eo);yu=i(Fr,`MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ja=s(Fr,"CODE",{});var b_=r(Ja);Fu=i(b_,"span start logits"),b_.forEach(t),zu=i(Fr," and "),Xa=s(Fr,"CODE",{});var w_=r(Xa);Eu=i(w_,"span end logits"),w_.forEach(t),qu=i(Fr,")."),Fr.forEach(t),Cu=d(at),zs=s(at,"P",{});var jl=r(zs);xu=i(jl,"This model inherits from "),hr=s(jl,"A",{href:!0});var P_=r(hr);ju=i(P_,"TFPreTrainedModel"),P_.forEach(t),Lu=i(jl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jl.forEach(t),Au=d(at),Es=s(at,"P",{});var Ll=r(Es);Iu=i(Ll,"This model is also a "),qs=s(Ll,"A",{href:!0,rel:!0});var N_=r(qs);Du=i(N_,"tf.keras.Model"),N_.forEach(t),Su=i(Ll,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ll.forEach(t),Ou=d(at),b(Io.$$.fragment,at),Wu=d(at),Xe=s(at,"DIV",{class:!0});var $t=r(Xe);b(Cs.$$.fragment,$t),Qu=d($t),to=s($t,"P",{});var zr=r(to);Bu=i(zr,"The "),mr=s(zr,"A",{href:!0});var $_=r(mr);Hu=i($_,"TFMPNetForQuestionAnswering"),$_.forEach(t),Ru=i(zr," forward method, overrides the "),Ga=s(zr,"CODE",{});var y_=r(Ga);Uu=i(y_,"__call__"),y_.forEach(t),Vu=i(zr," special method."),zr.forEach(t),Ku=d($t),b(Do.$$.fragment,$t),Ju=d($t),Ya=s($t,"P",{});var F_=r(Ya);Xu=i(F_,"Example:"),F_.forEach(t),Gu=d($t),b(xs.$$.fragment,$t),$t.forEach(t),at.forEach(t),this.h()},h(){h(p,"name","hf:doc:metadata"),h(p,"content",JSON.stringify(Y_)),h(g,"id","mpnet"),h(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(g,"href","#mpnet"),h(f,"class","relative group"),h(J,"id","overview"),h(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(J,"href","#overview"),h(z,"class","relative group"),h(G,"href","https://arxiv.org/abs/2004.09297"),h(G,"rel","nofollow"),h(ge,"href","https://github.com/microsoft/MPNet"),h(ge,"rel","nofollow"),h(oo,"id","transformers.MPNetConfig"),h(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(oo,"href","#transformers.MPNetConfig"),h(fe,"class","relative group"),h(Is,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetModel"),h(Ds,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetModel"),h(Bo,"href","https://huggingface.co/mpnet-base"),h(Bo,"rel","nofollow"),h(Ss,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(Os,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(je,"class","docstring"),h(no,"id","transformers.MPNetTokenizer"),h(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(no,"href","#transformers.MPNetTokenizer"),h(Ft,"class","relative group"),h(Ws,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),h(dt,"class","docstring"),h(so,"class","docstring"),h(ro,"class","docstring"),h(Or,"class","docstring"),h($e,"class","docstring"),h(ao,"id","transformers.MPNetTokenizerFast"),h(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ao,"href","#transformers.MPNetTokenizerFast"),h(zt,"class","relative group"),h(Hs,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),h(io,"class","docstring"),h(Ze,"class","docstring"),h(lo,"id","transformers.MPNetModel"),h(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(lo,"href","#transformers.MPNetModel"),h(Et,"class","relative group"),h(Rs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(dn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(dn,"rel","nofollow"),h(Us,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetModel"),h(Oe,"class","docstring"),h(Le,"class","docstring"),h(po,"id","transformers.MPNetForMaskedLM"),h(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(po,"href","#transformers.MPNetForMaskedLM"),h(Ct,"class","relative group"),h(Vs,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMaskedLM"),h(We,"class","docstring"),h(mn,"class","docstring"),h(mo,"id","transformers.MPNetForSequenceClassification"),h(mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(mo,"href","#transformers.MPNetForSequenceClassification"),h(jt,"class","relative group"),h(Ks,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Tn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Tn,"rel","nofollow"),h(Js,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForSequenceClassification"),h(Pe,"class","docstring"),h(Ae,"class","docstring"),h(fo,"id","transformers.MPNetForMultipleChoice"),h(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(fo,"href","#transformers.MPNetForMultipleChoice"),h(At,"class","relative group"),h(Xs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Fn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Fn,"rel","nofollow"),h(Gs,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForMultipleChoice"),h(Qe,"class","docstring"),h(Ie,"class","docstring"),h(_o,"id","transformers.MPNetForTokenClassification"),h(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(_o,"href","#transformers.MPNetForTokenClassification"),h(Dt,"class","relative group"),h(Ys,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Ln,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Ln,"rel","nofollow"),h(Zs,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForTokenClassification"),h(Be,"class","docstring"),h(De,"class","docstring"),h(ko,"id","transformers.MPNetForQuestionAnswering"),h(ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ko,"href","#transformers.MPNetForQuestionAnswering"),h(Ot,"class","relative group"),h(er,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Qn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Qn,"rel","nofollow"),h(tr,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.MPNetForQuestionAnswering"),h(He,"class","docstring"),h(Se,"class","docstring"),h(Mo,"id","transformers.TFMPNetModel"),h(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Mo,"href","#transformers.TFMPNetModel"),h(Bt,"class","relative group"),h(or,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Jn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Jn,"rel","nofollow"),h(nr,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetModel"),h(Re,"class","docstring"),h(ye,"class","docstring"),h(Po,"id","transformers.TFMPNetForMaskedLM"),h(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Po,"href","#transformers.TFMPNetForMaskedLM"),h(Rt,"class","relative group"),h(sr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(ns,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(ns,"rel","nofollow"),h(rr,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMaskedLM"),h(Ue,"class","docstring"),h(Fe,"class","docstring"),h(yo,"id","transformers.TFMPNetForSequenceClassification"),h(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(yo,"href","#transformers.TFMPNetForSequenceClassification"),h(Vt,"class","relative group"),h(ar,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(cs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(cs,"rel","nofollow"),h(ir,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForSequenceClassification"),h(Ve,"class","docstring"),h(ze,"class","docstring"),h(Eo,"id","transformers.TFMPNetForMultipleChoice"),h(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Eo,"href","#transformers.TFMPNetForMultipleChoice"),h(Jt,"class","relative group"),h(lr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(_s,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(_s,"rel","nofollow"),h(dr,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForMultipleChoice"),h(Ke,"class","docstring"),h(Ee,"class","docstring"),h(xo,"id","transformers.TFMPNetForTokenClassification"),h(xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(xo,"href","#transformers.TFMPNetForTokenClassification"),h(Gt,"class","relative group"),h(cr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Ps,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Ps,"rel","nofollow"),h(pr,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForTokenClassification"),h(Je,"class","docstring"),h(qe,"class","docstring"),h(Ao,"id","transformers.TFMPNetForQuestionAnswering"),h(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ao,"href","#transformers.TFMPNetForQuestionAnswering"),h(Zt,"class","relative group"),h(hr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(qs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(qs,"rel","nofollow"),h(mr,"href","/docs/transformers/v4.15.0/en/model_doc/mpnet#transformers.TFMPNetForQuestionAnswering"),h(Xe,"class","docstring"),h(Ce,"class","docstring")},m(o,u){e(document.head,p),m(o,y,u),m(o,f,u),e(f,g),e(g,k),w(v,k,null),e(f,_),e(f,F),e(F,le),m(o,U,u),m(o,z,u),e(z,J),e(J,S),w(X,S,null),e(z,de),e(z,O),e(O,ce),m(o,se,u),m(o,R,u),e(R,A),e(R,G),e(G,V),e(R,E),m(o,q,u),m(o,Y,u),e(Y,B),m(o,re,u),m(o,Z,u),e(Z,H),m(o,ae,u),m(o,ee,u),e(ee,C),e(C,pe),m(o,W,u),m(o,te,u),e(te,he),m(o,j,u),m(o,oe,u),e(oe,x),e(x,me),e(x,c),e(c,T),e(x,K),e(x,_e),e(_e,ke),e(x,I),e(x,ve),e(ve,Te),e(x,Me),m(o,L,u),m(o,D,u),e(D,be),e(D,ge),e(ge,ue),e(D,we),m(o,ie,u),m(o,fe,u),e(fe,oo),e(oo,Er),w(Wo,Er,null),e(fe,Al),e(fe,qr),e(qr,Il),m(o,mi,u),m(o,je,u),w(Qo,je,null),e(je,Dl),e(je,it),e(it,Sl),e(it,Is),e(Is,Ol),e(it,Wl),e(it,Ds),e(Ds,Ql),e(it,Bl),e(it,Bo),e(Bo,Hl),e(it,Rl),e(je,Ul),e(je,yt),e(yt,Vl),e(yt,Ss),e(Ss,Kl),e(yt,Jl),e(yt,Os),e(Os,Xl),e(yt,Gl),e(je,Yl),e(je,Cr),e(Cr,Zl),e(je,ed),w(Ho,je,null),m(o,ui,u),m(o,Ft,u),e(Ft,no),e(no,xr),w(Ro,xr,null),e(Ft,td),e(Ft,jr),e(jr,od),m(o,fi,u),m(o,$e,u),w(Uo,$e,null),e($e,nd),e($e,Vo),e(Vo,sd),e(Vo,Ws),e(Ws,rd),e(Vo,ad),e($e,id),e($e,dt),w(Ko,dt,null),e(dt,ld),e(dt,Lr),e(Lr,dd),e(dt,cd),e(dt,Jo),e(Jo,Qs),e(Qs,pd),e(Qs,Ar),e(Ar,hd),e(Jo,md),e(Jo,Bs),e(Bs,ud),e(Bs,Ir),e(Ir,fd),e($e,gd),e($e,so),w(Xo,so,null),e(so,_d),e(so,Go),e(Go,vd),e(Go,Dr),e(Dr,kd),e(Go,Td),e($e,Md),e($e,ro),w(Yo,ro,null),e(ro,bd),e(ro,Sr),e(Sr,wd),e($e,Pd),e($e,Or),m(o,gi,u),m(o,zt,u),e(zt,ao),e(ao,Wr),w(Zo,Wr,null),e(zt,Nd),e(zt,Qr),e(Qr,$d),m(o,_i,u),m(o,Ze,u),w(en,Ze,null),e(Ze,yd),e(Ze,tn),e(tn,Fd),e(tn,Br),e(Br,zd),e(tn,Ed),e(Ze,qd),e(Ze,on),e(on,Cd),e(on,Hs),e(Hs,xd),e(on,jd),e(Ze,Ld),e(Ze,io),w(nn,io,null),e(io,Ad),e(io,Hr),e(Hr,Id),m(o,vi,u),m(o,Et,u),e(Et,lo),e(lo,Rr),w(sn,Rr,null),e(Et,Dd),e(Et,Ur),e(Ur,Sd),m(o,ki,u),m(o,Le,u),w(rn,Le,null),e(Le,Od),e(Le,Vr),e(Vr,Wd),e(Le,Qd),e(Le,an),e(an,Bd),e(an,Rs),e(Rs,Hd),e(an,Rd),e(Le,Ud),e(Le,ln),e(ln,Vd),e(ln,dn),e(dn,Kd),e(ln,Jd),e(Le,Xd),e(Le,Oe),w(cn,Oe,null),e(Oe,Gd),e(Oe,qt),e(qt,Yd),e(qt,Us),e(Us,Zd),e(qt,ec),e(qt,Kr),e(Kr,tc),e(qt,oc),e(Oe,nc),w(co,Oe,null),e(Oe,sc),e(Oe,Jr),e(Jr,rc),e(Oe,ac),w(pn,Oe,null),m(o,Ti,u),m(o,Ct,u),e(Ct,po),e(po,Xr),w(hn,Xr,null),e(Ct,ic),e(Ct,Gr),e(Gr,lc),m(o,Mi,u),m(o,mn,u),e(mn,We),w(un,We,null),e(We,dc),e(We,xt),e(xt,cc),e(xt,Vs),e(Vs,pc),e(xt,hc),e(xt,Yr),e(Yr,mc),e(xt,uc),e(We,fc),w(ho,We,null),e(We,gc),e(We,Zr),e(Zr,_c),e(We,vc),w(fn,We,null),m(o,bi,u),m(o,jt,u),e(jt,mo),e(mo,ea),w(gn,ea,null),e(jt,kc),e(jt,ta),e(ta,Tc),m(o,wi,u),m(o,Ae,u),w(_n,Ae,null),e(Ae,Mc),e(Ae,oa),e(oa,bc),e(Ae,wc),e(Ae,vn),e(vn,Pc),e(vn,Ks),e(Ks,Nc),e(vn,$c),e(Ae,yc),e(Ae,kn),e(kn,Fc),e(kn,Tn),e(Tn,zc),e(kn,Ec),e(Ae,qc),e(Ae,Pe),w(Mn,Pe,null),e(Pe,Cc),e(Pe,Lt),e(Lt,xc),e(Lt,Js),e(Js,jc),e(Lt,Lc),e(Lt,na),e(na,Ac),e(Lt,Ic),e(Pe,Dc),w(uo,Pe,null),e(Pe,Sc),e(Pe,sa),e(sa,Oc),e(Pe,Wc),w(bn,Pe,null),e(Pe,Qc),e(Pe,ra),e(ra,Bc),e(Pe,Hc),w(wn,Pe,null),m(o,Pi,u),m(o,At,u),e(At,fo),e(fo,aa),w(Pn,aa,null),e(At,Rc),e(At,ia),e(ia,Uc),m(o,Ni,u),m(o,Ie,u),w(Nn,Ie,null),e(Ie,Vc),e(Ie,la),e(la,Kc),e(Ie,Jc),e(Ie,$n),e($n,Xc),e($n,Xs),e(Xs,Gc),e($n,Yc),e(Ie,Zc),e(Ie,yn),e(yn,ep),e(yn,Fn),e(Fn,tp),e(yn,op),e(Ie,np),e(Ie,Qe),w(zn,Qe,null),e(Qe,sp),e(Qe,It),e(It,rp),e(It,Gs),e(Gs,ap),e(It,ip),e(It,da),e(da,lp),e(It,dp),e(Qe,cp),w(go,Qe,null),e(Qe,pp),e(Qe,ca),e(ca,hp),e(Qe,mp),w(En,Qe,null),m(o,$i,u),m(o,Dt,u),e(Dt,_o),e(_o,pa),w(qn,pa,null),e(Dt,up),e(Dt,ha),e(ha,fp),m(o,yi,u),m(o,De,u),w(Cn,De,null),e(De,gp),e(De,ma),e(ma,_p),e(De,vp),e(De,xn),e(xn,kp),e(xn,Ys),e(Ys,Tp),e(xn,Mp),e(De,bp),e(De,jn),e(jn,wp),e(jn,Ln),e(Ln,Pp),e(jn,Np),e(De,$p),e(De,Be),w(An,Be,null),e(Be,yp),e(Be,St),e(St,Fp),e(St,Zs),e(Zs,zp),e(St,Ep),e(St,ua),e(ua,qp),e(St,Cp),e(Be,xp),w(vo,Be,null),e(Be,jp),e(Be,fa),e(fa,Lp),e(Be,Ap),w(In,Be,null),m(o,Fi,u),m(o,Ot,u),e(Ot,ko),e(ko,ga),w(Dn,ga,null),e(Ot,Ip),e(Ot,_a),e(_a,Dp),m(o,zi,u),m(o,Se,u),w(Sn,Se,null),e(Se,Sp),e(Se,Wt),e(Wt,Op),e(Wt,va),e(va,Wp),e(Wt,Qp),e(Wt,ka),e(ka,Bp),e(Wt,Hp),e(Se,Rp),e(Se,On),e(On,Up),e(On,er),e(er,Vp),e(On,Kp),e(Se,Jp),e(Se,Wn),e(Wn,Xp),e(Wn,Qn),e(Qn,Gp),e(Wn,Yp),e(Se,Zp),e(Se,He),w(Bn,He,null),e(He,eh),e(He,Qt),e(Qt,th),e(Qt,tr),e(tr,oh),e(Qt,nh),e(Qt,Ta),e(Ta,sh),e(Qt,rh),e(He,ah),w(To,He,null),e(He,ih),e(He,Ma),e(Ma,lh),e(He,dh),w(Hn,He,null),m(o,Ei,u),m(o,Bt,u),e(Bt,Mo),e(Mo,ba),w(Rn,ba,null),e(Bt,ch),e(Bt,wa),e(wa,ph),m(o,qi,u),m(o,ye,u),w(Un,ye,null),e(ye,hh),e(ye,Pa),e(Pa,mh),e(ye,uh),e(ye,Vn),e(Vn,fh),e(Vn,or),e(or,gh),e(Vn,_h),e(ye,vh),e(ye,Kn),e(Kn,kh),e(Kn,Jn),e(Jn,Th),e(Kn,Mh),e(ye,bh),w(bo,ye,null),e(ye,wh),e(ye,Re),w(Xn,Re,null),e(Re,Ph),e(Re,Ht),e(Ht,Nh),e(Ht,nr),e(nr,$h),e(Ht,yh),e(Ht,Na),e(Na,Fh),e(Ht,zh),e(Re,Eh),w(wo,Re,null),e(Re,qh),e(Re,$a),e($a,Ch),e(Re,xh),w(Gn,Re,null),m(o,Ci,u),m(o,Rt,u),e(Rt,Po),e(Po,ya),w(Yn,ya,null),e(Rt,jh),e(Rt,Fa),e(Fa,Lh),m(o,xi,u),m(o,Fe,u),w(Zn,Fe,null),e(Fe,Ah),e(Fe,es),e(es,Ih),e(es,za),e(za,Dh),e(es,Sh),e(Fe,Oh),e(Fe,ts),e(ts,Wh),e(ts,sr),e(sr,Qh),e(ts,Bh),e(Fe,Hh),e(Fe,os),e(os,Rh),e(os,ns),e(ns,Uh),e(os,Vh),e(Fe,Kh),w(No,Fe,null),e(Fe,Jh),e(Fe,Ue),w(ss,Ue,null),e(Ue,Xh),e(Ue,Ut),e(Ut,Gh),e(Ut,rr),e(rr,Yh),e(Ut,Zh),e(Ut,Ea),e(Ea,em),e(Ut,tm),e(Ue,om),w($o,Ue,null),e(Ue,nm),e(Ue,qa),e(qa,sm),e(Ue,rm),w(rs,Ue,null),m(o,ji,u),m(o,Vt,u),e(Vt,yo),e(yo,Ca),w(as,Ca,null),e(Vt,am),e(Vt,xa),e(xa,im),m(o,Li,u),m(o,ze,u),w(is,ze,null),e(ze,lm),e(ze,ja),e(ja,dm),e(ze,cm),e(ze,ls),e(ls,pm),e(ls,ar),e(ar,hm),e(ls,mm),e(ze,um),e(ze,ds),e(ds,fm),e(ds,cs),e(cs,gm),e(ds,_m),e(ze,vm),w(Fo,ze,null),e(ze,km),e(ze,Ve),w(ps,Ve,null),e(Ve,Tm),e(Ve,Kt),e(Kt,Mm),e(Kt,ir),e(ir,bm),e(Kt,wm),e(Kt,La),e(La,Pm),e(Kt,Nm),e(Ve,$m),w(zo,Ve,null),e(Ve,ym),e(Ve,Aa),e(Aa,Fm),e(Ve,zm),w(hs,Ve,null),m(o,Ai,u),m(o,Jt,u),e(Jt,Eo),e(Eo,Ia),w(ms,Ia,null),e(Jt,Em),e(Jt,Da),e(Da,qm),m(o,Ii,u),m(o,Ee,u),w(us,Ee,null),e(Ee,Cm),e(Ee,Sa),e(Sa,xm),e(Ee,jm),e(Ee,fs),e(fs,Lm),e(fs,lr),e(lr,Am),e(fs,Im),e(Ee,Dm),e(Ee,gs),e(gs,Sm),e(gs,_s),e(_s,Om),e(gs,Wm),e(Ee,Qm),w(qo,Ee,null),e(Ee,Bm),e(Ee,Ke),w(vs,Ke,null),e(Ke,Hm),e(Ke,Xt),e(Xt,Rm),e(Xt,dr),e(dr,Um),e(Xt,Vm),e(Xt,Oa),e(Oa,Km),e(Xt,Jm),e(Ke,Xm),w(Co,Ke,null),e(Ke,Gm),e(Ke,Wa),e(Wa,Ym),e(Ke,Zm),w(ks,Ke,null),m(o,Di,u),m(o,Gt,u),e(Gt,xo),e(xo,Qa),w(Ts,Qa,null),e(Gt,eu),e(Gt,Ba),e(Ba,tu),m(o,Si,u),m(o,qe,u),w(Ms,qe,null),e(qe,ou),e(qe,Ha),e(Ha,nu),e(qe,su),e(qe,bs),e(bs,ru),e(bs,cr),e(cr,au),e(bs,iu),e(qe,lu),e(qe,ws),e(ws,du),e(ws,Ps),e(Ps,cu),e(ws,pu),e(qe,hu),w(jo,qe,null),e(qe,mu),e(qe,Je),w(Ns,Je,null),e(Je,uu),e(Je,Yt),e(Yt,fu),e(Yt,pr),e(pr,gu),e(Yt,_u),e(Yt,Ra),e(Ra,vu),e(Yt,ku),e(Je,Tu),w(Lo,Je,null),e(Je,Mu),e(Je,Ua),e(Ua,bu),e(Je,wu),w($s,Je,null),m(o,Oi,u),m(o,Zt,u),e(Zt,Ao),e(Ao,Va),w(ys,Va,null),e(Zt,Pu),e(Zt,Ka),e(Ka,Nu),m(o,Wi,u),m(o,Ce,u),w(Fs,Ce,null),e(Ce,$u),e(Ce,eo),e(eo,yu),e(eo,Ja),e(Ja,Fu),e(eo,zu),e(eo,Xa),e(Xa,Eu),e(eo,qu),e(Ce,Cu),e(Ce,zs),e(zs,xu),e(zs,hr),e(hr,ju),e(zs,Lu),e(Ce,Au),e(Ce,Es),e(Es,Iu),e(Es,qs),e(qs,Du),e(Es,Su),e(Ce,Ou),w(Io,Ce,null),e(Ce,Wu),e(Ce,Xe),w(Cs,Xe,null),e(Xe,Qu),e(Xe,to),e(to,Bu),e(to,mr),e(mr,Hu),e(to,Ru),e(to,Ga),e(Ga,Uu),e(to,Vu),e(Xe,Ku),w(Do,Xe,null),e(Xe,Ju),e(Xe,Ya),e(Ya,Xu),e(Xe,Gu),w(xs,Xe,null),Qi=!0},p(o,[u]){const js={};u&2&&(js.$$scope={dirty:u,ctx:o}),co.$set(js);const Za={};u&2&&(Za.$$scope={dirty:u,ctx:o}),ho.$set(Za);const ei={};u&2&&(ei.$$scope={dirty:u,ctx:o}),uo.$set(ei);const ti={};u&2&&(ti.$$scope={dirty:u,ctx:o}),go.$set(ti);const Ls={};u&2&&(Ls.$$scope={dirty:u,ctx:o}),vo.$set(Ls);const oi={};u&2&&(oi.$$scope={dirty:u,ctx:o}),To.$set(oi);const ni={};u&2&&(ni.$$scope={dirty:u,ctx:o}),bo.$set(ni);const si={};u&2&&(si.$$scope={dirty:u,ctx:o}),wo.$set(si);const As={};u&2&&(As.$$scope={dirty:u,ctx:o}),No.$set(As);const ri={};u&2&&(ri.$$scope={dirty:u,ctx:o}),$o.$set(ri);const ai={};u&2&&(ai.$$scope={dirty:u,ctx:o}),Fo.$set(ai);const ii={};u&2&&(ii.$$scope={dirty:u,ctx:o}),zo.$set(ii);const li={};u&2&&(li.$$scope={dirty:u,ctx:o}),qo.$set(li);const di={};u&2&&(di.$$scope={dirty:u,ctx:o}),Co.$set(di);const ci={};u&2&&(ci.$$scope={dirty:u,ctx:o}),jo.$set(ci);const pi={};u&2&&(pi.$$scope={dirty:u,ctx:o}),Lo.$set(pi);const lt={};u&2&&(lt.$$scope={dirty:u,ctx:o}),Io.$set(lt);const hi={};u&2&&(hi.$$scope={dirty:u,ctx:o}),Do.$set(hi)},i(o){Qi||(P(v.$$.fragment,o),P(X.$$.fragment,o),P(Wo.$$.fragment,o),P(Qo.$$.fragment,o),P(Ho.$$.fragment,o),P(Ro.$$.fragment,o),P(Uo.$$.fragment,o),P(Ko.$$.fragment,o),P(Xo.$$.fragment,o),P(Yo.$$.fragment,o),P(Zo.$$.fragment,o),P(en.$$.fragment,o),P(nn.$$.fragment,o),P(sn.$$.fragment,o),P(rn.$$.fragment,o),P(cn.$$.fragment,o),P(co.$$.fragment,o),P(pn.$$.fragment,o),P(hn.$$.fragment,o),P(un.$$.fragment,o),P(ho.$$.fragment,o),P(fn.$$.fragment,o),P(gn.$$.fragment,o),P(_n.$$.fragment,o),P(Mn.$$.fragment,o),P(uo.$$.fragment,o),P(bn.$$.fragment,o),P(wn.$$.fragment,o),P(Pn.$$.fragment,o),P(Nn.$$.fragment,o),P(zn.$$.fragment,o),P(go.$$.fragment,o),P(En.$$.fragment,o),P(qn.$$.fragment,o),P(Cn.$$.fragment,o),P(An.$$.fragment,o),P(vo.$$.fragment,o),P(In.$$.fragment,o),P(Dn.$$.fragment,o),P(Sn.$$.fragment,o),P(Bn.$$.fragment,o),P(To.$$.fragment,o),P(Hn.$$.fragment,o),P(Rn.$$.fragment,o),P(Un.$$.fragment,o),P(bo.$$.fragment,o),P(Xn.$$.fragment,o),P(wo.$$.fragment,o),P(Gn.$$.fragment,o),P(Yn.$$.fragment,o),P(Zn.$$.fragment,o),P(No.$$.fragment,o),P(ss.$$.fragment,o),P($o.$$.fragment,o),P(rs.$$.fragment,o),P(as.$$.fragment,o),P(is.$$.fragment,o),P(Fo.$$.fragment,o),P(ps.$$.fragment,o),P(zo.$$.fragment,o),P(hs.$$.fragment,o),P(ms.$$.fragment,o),P(us.$$.fragment,o),P(qo.$$.fragment,o),P(vs.$$.fragment,o),P(Co.$$.fragment,o),P(ks.$$.fragment,o),P(Ts.$$.fragment,o),P(Ms.$$.fragment,o),P(jo.$$.fragment,o),P(Ns.$$.fragment,o),P(Lo.$$.fragment,o),P($s.$$.fragment,o),P(ys.$$.fragment,o),P(Fs.$$.fragment,o),P(Io.$$.fragment,o),P(Cs.$$.fragment,o),P(Do.$$.fragment,o),P(xs.$$.fragment,o),Qi=!0)},o(o){N(v.$$.fragment,o),N(X.$$.fragment,o),N(Wo.$$.fragment,o),N(Qo.$$.fragment,o),N(Ho.$$.fragment,o),N(Ro.$$.fragment,o),N(Uo.$$.fragment,o),N(Ko.$$.fragment,o),N(Xo.$$.fragment,o),N(Yo.$$.fragment,o),N(Zo.$$.fragment,o),N(en.$$.fragment,o),N(nn.$$.fragment,o),N(sn.$$.fragment,o),N(rn.$$.fragment,o),N(cn.$$.fragment,o),N(co.$$.fragment,o),N(pn.$$.fragment,o),N(hn.$$.fragment,o),N(un.$$.fragment,o),N(ho.$$.fragment,o),N(fn.$$.fragment,o),N(gn.$$.fragment,o),N(_n.$$.fragment,o),N(Mn.$$.fragment,o),N(uo.$$.fragment,o),N(bn.$$.fragment,o),N(wn.$$.fragment,o),N(Pn.$$.fragment,o),N(Nn.$$.fragment,o),N(zn.$$.fragment,o),N(go.$$.fragment,o),N(En.$$.fragment,o),N(qn.$$.fragment,o),N(Cn.$$.fragment,o),N(An.$$.fragment,o),N(vo.$$.fragment,o),N(In.$$.fragment,o),N(Dn.$$.fragment,o),N(Sn.$$.fragment,o),N(Bn.$$.fragment,o),N(To.$$.fragment,o),N(Hn.$$.fragment,o),N(Rn.$$.fragment,o),N(Un.$$.fragment,o),N(bo.$$.fragment,o),N(Xn.$$.fragment,o),N(wo.$$.fragment,o),N(Gn.$$.fragment,o),N(Yn.$$.fragment,o),N(Zn.$$.fragment,o),N(No.$$.fragment,o),N(ss.$$.fragment,o),N($o.$$.fragment,o),N(rs.$$.fragment,o),N(as.$$.fragment,o),N(is.$$.fragment,o),N(Fo.$$.fragment,o),N(ps.$$.fragment,o),N(zo.$$.fragment,o),N(hs.$$.fragment,o),N(ms.$$.fragment,o),N(us.$$.fragment,o),N(qo.$$.fragment,o),N(vs.$$.fragment,o),N(Co.$$.fragment,o),N(ks.$$.fragment,o),N(Ts.$$.fragment,o),N(Ms.$$.fragment,o),N(jo.$$.fragment,o),N(Ns.$$.fragment,o),N(Lo.$$.fragment,o),N($s.$$.fragment,o),N(ys.$$.fragment,o),N(Fs.$$.fragment,o),N(Io.$$.fragment,o),N(Cs.$$.fragment,o),N(Do.$$.fragment,o),N(xs.$$.fragment,o),Qi=!1},d(o){t(p),o&&t(y),o&&t(f),$(v),o&&t(U),o&&t(z),$(X),o&&t(se),o&&t(R),o&&t(q),o&&t(Y),o&&t(re),o&&t(Z),o&&t(ae),o&&t(ee),o&&t(W),o&&t(te),o&&t(j),o&&t(oe),o&&t(L),o&&t(D),o&&t(ie),o&&t(fe),$(Wo),o&&t(mi),o&&t(je),$(Qo),$(Ho),o&&t(ui),o&&t(Ft),$(Ro),o&&t(fi),o&&t($e),$(Uo),$(Ko),$(Xo),$(Yo),o&&t(gi),o&&t(zt),$(Zo),o&&t(_i),o&&t(Ze),$(en),$(nn),o&&t(vi),o&&t(Et),$(sn),o&&t(ki),o&&t(Le),$(rn),$(cn),$(co),$(pn),o&&t(Ti),o&&t(Ct),$(hn),o&&t(Mi),o&&t(mn),$(un),$(ho),$(fn),o&&t(bi),o&&t(jt),$(gn),o&&t(wi),o&&t(Ae),$(_n),$(Mn),$(uo),$(bn),$(wn),o&&t(Pi),o&&t(At),$(Pn),o&&t(Ni),o&&t(Ie),$(Nn),$(zn),$(go),$(En),o&&t($i),o&&t(Dt),$(qn),o&&t(yi),o&&t(De),$(Cn),$(An),$(vo),$(In),o&&t(Fi),o&&t(Ot),$(Dn),o&&t(zi),o&&t(Se),$(Sn),$(Bn),$(To),$(Hn),o&&t(Ei),o&&t(Bt),$(Rn),o&&t(qi),o&&t(ye),$(Un),$(bo),$(Xn),$(wo),$(Gn),o&&t(Ci),o&&t(Rt),$(Yn),o&&t(xi),o&&t(Fe),$(Zn),$(No),$(ss),$($o),$(rs),o&&t(ji),o&&t(Vt),$(as),o&&t(Li),o&&t(ze),$(is),$(Fo),$(ps),$(zo),$(hs),o&&t(Ai),o&&t(Jt),$(ms),o&&t(Ii),o&&t(Ee),$(us),$(qo),$(vs),$(Co),$(ks),o&&t(Di),o&&t(Gt),$(Ts),o&&t(Si),o&&t(qe),$(Ms),$(jo),$(Ns),$(Lo),$($s),o&&t(Oi),o&&t(Zt),$(ys),o&&t(Wi),o&&t(Ce),$(Fs),$(Io),$(Cs),$(Do),$(xs)}}}const Y_={local:"mpnet",sections:[{local:"overview",title:"Overview"},{local:"transformers.MPNetConfig",title:"MPNetConfig"},{local:"transformers.MPNetTokenizer",title:"MPNetTokenizer"},{local:"transformers.MPNetTokenizerFast",title:"MPNetTokenizerFast"},{local:"transformers.MPNetModel",title:"MPNetModel"},{local:"transformers.MPNetForMaskedLM",title:"MPNetForMaskedLM"},{local:"transformers.MPNetForSequenceClassification",title:"MPNetForSequenceClassification"},{local:"transformers.MPNetForMultipleChoice",title:"MPNetForMultipleChoice"},{local:"transformers.MPNetForTokenClassification",title:"MPNetForTokenClassification"},{local:"transformers.MPNetForQuestionAnswering",title:"MPNetForQuestionAnswering"},{local:"transformers.TFMPNetModel",title:"TFMPNetModel"},{local:"transformers.TFMPNetForMaskedLM",title:"TFMPNetForMaskedLM"},{local:"transformers.TFMPNetForSequenceClassification",title:"TFMPNetForSequenceClassification"},{local:"transformers.TFMPNetForMultipleChoice",title:"TFMPNetForMultipleChoice"},{local:"transformers.TFMPNetForTokenClassification",title:"TFMPNetForTokenClassification"},{local:"transformers.TFMPNetForQuestionAnswering",title:"TFMPNetForQuestionAnswering"}],title:"MPNet"};function Z_(Q,p,y){let{fw:f}=p;return Q.$$set=g=>{"fw"in g&&y(0,f=g.fw)},[f]}class av extends z_{constructor(p){super();E_(this,p,Z_,G_,q_,{fw:0})}}export{av as default,Y_ as metadata};
9,940
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/splinter.mdx-575e7a5f.js
import{S as di,i as ci,s as pi,e as o,k as d,w as m,t as a,L as hi,c as s,d as n,m as c,a as r,x as g,h as i,b as l,J as e,g as h,y as _,q as k,o as v,B as w}from"../../chunks/vendor-b1433968.js";import{T as li}from"../../chunks/Tip-c3840994.js";import{D as Q}from"../../chunks/Docstring-ff504c58.js";import{C as Gr}from"../../chunks/CodeBlock-a320dbd7.js";import{I as at}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ui(we){let u,q,f,b,A;return{c(){u=o("p"),q=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),b=a("Module"),A=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=s(S,"P",{});var T=r(u);q=i(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var L=r(f);b=i(L,"Module"),L.forEach(n),A=i(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(n)},m(S,T){h(S,u,T),e(u,q),e(u,f),e(f,b),e(u,A)},d(S){S&&n(u)}}}function fi(we){let u,q,f,b,A;return{c(){u=o("p"),q=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),b=a("Module"),A=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=s(S,"P",{});var T=r(u);q=i(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var L=r(f);b=i(L,"Module"),L.forEach(n),A=i(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(n)},m(S,T){h(S,u,T),e(u,q),e(u,f),e(f,b),e(u,A)},d(S){S&&n(u)}}}function mi(we){let u,q,f,b,A,S,T,L,no,vn,J,se,It,be,oo,Ct,so,wn,re,ro,Se,ao,io,bn,it,lo,Sn,lt,co,yn,dt,po,Tn,C,ye,ho,ct,uo,fo,mo,N,go,pt,_o,ko,ht,vo,wo,Dt,bo,So,yo,Te,To,Lt,zo,qo,$o,P,Eo,Nt,xo,Ao,Qt,Po,Mo,jt,Fo,Io,Ot,Co,Do,zn,D,Lo,ze,No,Qo,qe,jo,Oo,$e,Wo,Bo,qn,K,ae,Wt,Ee,Uo,Bt,Ho,$n,z,xe,Vo,R,Jo,ut,Ko,Ro,Ae,Go,Xo,Yo,G,Zo,ft,es,ts,mt,ns,os,ss,Ut,rs,as,Pe,En,X,ie,Ht,Me,is,Vt,ls,xn,y,Fe,ds,Jt,cs,ps,Ie,hs,gt,us,fs,ms,j,Ce,gs,Kt,_s,ks,De,_t,vs,Rt,ws,bs,kt,Ss,Gt,ys,Ts,le,Le,zs,Ne,qs,Xt,$s,Es,xs,O,Qe,As,vt,Ps,wt,Ms,Fs,Yt,Is,Cs,Zt,An,Y,de,en,je,Ds,tn,Ls,Pn,M,Oe,Ns,We,Qs,nn,js,Os,Ws,Be,Bs,bt,Us,Hs,Vs,W,Ue,Js,on,Ks,Rs,He,St,Gs,sn,Xs,Ys,yt,Zs,rn,er,Mn,Z,ce,an,Ve,tr,ln,nr,Fn,F,Je,or,Ke,sr,Re,rr,ar,ir,Ge,lr,dn,dr,cr,pr,$,Xe,hr,ee,ur,Tt,fr,mr,cn,gr,_r,kr,pe,vr,pn,wr,br,Ye,In,te,he,hn,Ze,Sr,un,yr,Cn,I,et,Tr,ne,zr,fn,qr,$r,mn,Er,xr,Ar,tt,Pr,nt,Mr,Fr,Ir,E,ot,Cr,oe,Dr,zt,Lr,Nr,gn,Qr,jr,Or,ue,Wr,_n,Br,Ur,st,Dn;return S=new at({}),be=new at({}),Ee=new at({}),xe=new Q({props:{name:"class transformers.SplinterConfig",anchor:"transformers.SplinterConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 0"},{name:"question_token_id",val:" = 104"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/configuration_splinter.py#L32",parametersDescription:[{anchor:"transformers.SplinterConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the Splinter model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterModel">SplinterModel</a>.`,name:"vocab_size"},{anchor:"transformers.SplinterConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.SplinterConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.SplinterConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SplinterConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.SplinterConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SplinterConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.SplinterConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.SplinterConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.SplinterConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterModel">SplinterModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.SplinterConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SplinterConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.SplinterConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"},{anchor:"transformers.SplinterConfig.question_token_id",description:`<strong>question_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 104) &#x2014; The id of the <code>[QUESTION]</code> token.`,name:"question_token_id"}]}}),Pe=new Gr({props:{code:`from transformers import SplinterModel, SplinterConfig # Initializing a Splinter tau/splinter-base style configuration configuration = SplinterConfig() # Initializing a model from the tau/splinter-base style configuration model = SplinterModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SplinterModel, SplinterConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Splinter tau/splinter-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SplinterConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the tau/splinter-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SplinterModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Me=new at({}),Fe=new Q({props:{name:"class transformers.SplinterTokenizer",anchor:"transformers.SplinterTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"question_token",val:" = '[QUESTION]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/tokenization_splinter.py#L75",parametersDescription:[{anchor:"transformers.SplinterTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.SplinterTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.SplinterTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.SplinterTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.SplinterTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.SplinterTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.SplinterTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.SplinterTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.SplinterTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.SplinterTokenizer.question_token",description:`<strong>question_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[QUESTION]&quot;</code>) &#x2014; The token used for constructing question representations.`,name:"question_token"},{anchor:"transformers.SplinterTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),Ce=new Q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.SplinterTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/tokenization_splinter.py#L217",parametersDescription:[{anchor:"transformers.SplinterTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The question token IDs if pad_on_right, else context tokens IDs`,name:"token_ids_0"},{anchor:"transformers.SplinterTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The context token IDs if pad_on_right, else question token IDs`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Le=new Q({props:{name:"get_special_tokens_mask",anchor:"transformers.SplinterTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/tokenization_splinter.py#L249",parametersDescription:[{anchor:"transformers.SplinterTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.SplinterTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.SplinterTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Qe=new Q({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.SplinterTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/tokenization_splinter.py#L277",parametersDescription:[{anchor:"transformers.SplinterTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.SplinterTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),je=new at({}),Oe=new Q({props:{name:"class transformers.SplinterTokenizerFast",anchor:"transformers.SplinterTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"question_token",val:" = '[QUESTION]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/tokenization_splinter_fast.py#L55",parametersDescription:[{anchor:"transformers.SplinterTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.SplinterTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.SplinterTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.SplinterTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.SplinterTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.SplinterTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.SplinterTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.SplinterTokenizerFast.question_token",description:`<strong>question_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[QUESTION]&quot;</code>) &#x2014; The token used for constructing question representations.`,name:"question_token"},{anchor:"transformers.SplinterTokenizerFast.clean_text",description:`<strong>clean_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one.`,name:"clean_text"},{anchor:"transformers.SplinterTokenizerFast.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT). wordpieces_prefix &#x2014; (<code>str</code>, <em>optional</em>, defaults to <code>&quot;##&quot;</code>): The prefix for subwords.`,name:"tokenize_chinese_chars"}]}}),Ue=new Q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.SplinterTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/tokenization_splinter_fast.py#L153",parametersDescription:[{anchor:"transformers.SplinterTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The question token IDs if pad_on_right, else context tokens IDs`,name:"token_ids_0"},{anchor:"transformers.SplinterTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The context token IDs if pad_on_right, else question token IDs`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ve=new at({}),Je=new Q({props:{name:"class transformers.SplinterModel",anchor:"transformers.SplinterModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/modeling_splinter.py#L606",parametersDescription:[{anchor:"transformers.SplinterModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig">SplinterConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xe=new Q({props:{name:"forward",anchor:"transformers.SplinterModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/modeling_splinter.py#L637",parametersDescription:[{anchor:"transformers.SplinterModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterTokenizer">SplinterTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SplinterModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SplinterModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SplinterModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SplinterModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SplinterModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SplinterModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SplinterModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SplinterModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SplinterModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.SplinterModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.SplinterModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.SplinterModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig" >SplinterConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),pe=new li({props:{$$slots:{default:[ui]},$$scope:{ctx:we}}}),Ye=new Gr({props:{code:`from transformers import SplinterTokenizer, SplinterModel import torch tokenizer = SplinterTokenizer.from_pretrained('tau/splinter-base') model = SplinterModel.from_pretrained('tau/splinter-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SplinterTokenizer, SplinterModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SplinterTokenizer.from_pretrained(<span class="hljs-string">&#x27;tau/splinter-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SplinterModel.from_pretrained(<span class="hljs-string">&#x27;tau/splinter-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ze=new at({}),et=new Q({props:{name:"class transformers.SplinterForQuestionAnswering",anchor:"transformers.SplinterForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/modeling_splinter.py#L828",parametersDescription:[{anchor:"transformers.SplinterForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig">SplinterConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ot=new Q({props:{name:"forward",anchor:"transformers.SplinterForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"question_positions",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/splinter/modeling_splinter.py#L839",parametersDescription:[{anchor:"transformers.SplinterForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterTokenizer">SplinterTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SplinterForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SplinterForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SplinterForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SplinterForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SplinterForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SplinterForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SplinterForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SplinterForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SplinterForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.SplinterForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"},{anchor:"transformers.SplinterForQuestionAnswering.forward.question_positions",description:`<strong>question_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_questions)</code>, <em>optional</em>) &#x2014; The positions of all question tokens. If given, start_logits and end_logits will be of shape <code>(batch_size, num_questions, sequence_length)</code>. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape <code>(batch_size, sequence_length)</code>.`,name:"question_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterConfig" >SplinterConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ue=new li({props:{$$slots:{default:[fi]},$$scope:{ctx:we}}}),st=new Gr({props:{code:`from transformers import SplinterTokenizer, SplinterForQuestionAnswering import torch tokenizer = SplinterTokenizer.from_pretrained('tau/splinter-base') model = SplinterForQuestionAnswering.from_pretrained('tau/splinter-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SplinterTokenizer, SplinterForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SplinterTokenizer.from_pretrained(<span class="hljs-string">&#x27;tau/splinter-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SplinterForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;tau/splinter-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){u=o("meta"),q=d(),f=o("h1"),b=o("a"),A=o("span"),m(S.$$.fragment),T=d(),L=o("span"),no=a("Splinter"),vn=d(),J=o("h2"),se=o("a"),It=o("span"),m(be.$$.fragment),oo=d(),Ct=o("span"),so=a("Overview"),wn=d(),re=o("p"),ro=a("The Splinter model was proposed in "),Se=o("a"),ao=a("Few-Shot Question Answering by Pretraining Span Selection"),io=a(` by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy. Splinter is an encoder-only transformer (similar to BERT) pretrained using the recurring span selection task on a large corpus comprising Wikipedia and the Toronto Book Corpus.`),bn=d(),it=o("p"),lo=a("The abstract from the paper is the following:"),Sn=d(),lt=o("p"),co=a(`In several question answering benchmarks, pretrained models have reached human parity through fine-tuning on an order of 100,000 annotated questions and answers. We explore the more realistic few-shot setting, where only a few hundred training examples are available, and observe that standard models perform poorly, highlighting the discrepancy between current pretraining objectives and question answering. We propose a new pretraining scheme tailored for question answering: recurring span selection. Given a passage with multiple sets of recurring spans, we mask in each set all recurring spans but one, and ask the model to select the correct span in the passage for each masked span. Masked spans are replaced with a special token, viewed as a question representation, that is later used during fine-tuning to select the answer span. The resulting model obtains surprisingly good results on multiple benchmarks (e.g., 72.7 F1 on SQuAD with only 128 training examples), while maintaining competitive performance in the high-resource setting.`),yn=d(),dt=o("p"),po=a("Tips:"),Tn=d(),C=o("ul"),ye=o("li"),ho=a(`Splinter was trained to predict answers spans conditioned on a special [QUESTION] token. These tokens contextualize to question representations which are used to predict the answers. This layer is called QASS, and is the default behaviour in the `),ct=o("a"),uo=a("SplinterForQuestionAnswering"),fo=a(" class. Therefore:"),mo=d(),N=o("li"),go=a("Use "),pt=o("a"),_o=a("SplinterTokenizer"),ko=a(" (rather than "),ht=o("a"),vo=a("BertTokenizer"),wo=a(`), as it already contains this special token. Also, its default behavior is to use this token when two sequences are given (for example, in the `),Dt=o("em"),bo=a("run_qa.py"),So=a(" script)."),yo=d(),Te=o("li"),To=a("If you plan on using Splinter outside "),Lt=o("em"),zo=a("run_qa.py"),qo=a(`, please keep in mind the question token - it might be important for the success of your model, especially in a few-shot setting.`),$o=d(),P=o("li"),Eo=a(`Please note there are two different checkpoints for each size of Splinter. Both are basically the same, except that one also has the pretrained wights of the QASS layer (`),Nt=o("em"),xo=a("tau/splinter-base-qass"),Ao=a(" and "),Qt=o("em"),Po=a("tau/splinter-large-qass"),Mo=a(`) and one doesn\u2019t (`),jt=o("em"),Fo=a("tau/splinter-base"),Io=a(" and "),Ot=o("em"),Co=a("tau/splinter-large"),Do=a(`). This is done to support randomly initializing this layer at fine-tuning, as it is shown to yield better results for some cases in the paper.`),zn=d(),D=o("p"),Lo=a("This model was contributed by "),ze=o("a"),No=a("yuvalkirstain"),Qo=a(" and "),qe=o("a"),jo=a("oriram"),Oo=a(". The original code can be found "),$e=o("a"),Wo=a("here"),Bo=a("."),qn=d(),K=o("h2"),ae=o("a"),Wt=o("span"),m(Ee.$$.fragment),Uo=d(),Bt=o("span"),Ho=a("SplinterConfig"),$n=d(),z=o("div"),m(xe.$$.fragment),Vo=d(),R=o("p"),Jo=a("This is the configuration class to store the configuration of a "),ut=o("a"),Ko=a("SplinterModel"),Ro=a(`. It is used to instantiate an Splinter model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Splinter `),Ae=o("a"),Go=a("tau/splinter-base"),Xo=a(" architecture."),Yo=d(),G=o("p"),Zo=a("Configuration objects inherit from "),ft=o("a"),es=a("PretrainedConfig"),ts=a(` and can be used to control the model outputs. Read the documentation from `),mt=o("a"),ns=a("PretrainedConfig"),os=a(" for more information."),ss=d(),Ut=o("p"),rs=a("Example:"),as=d(),m(Pe.$$.fragment),En=d(),X=o("h2"),ie=o("a"),Ht=o("span"),m(Me.$$.fragment),is=d(),Vt=o("span"),ls=a("SplinterTokenizer"),xn=d(),y=o("div"),m(Fe.$$.fragment),ds=d(),Jt=o("p"),cs=a("Construct a Splinter tokenizer. Based on WordPiece."),ps=d(),Ie=o("p"),hs=a("This tokenizer inherits from "),gt=o("a"),us=a("PreTrainedTokenizer"),fs=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ms=d(),j=o("div"),m(Ce.$$.fragment),gs=d(),Kt=o("p"),_s=a(`Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special tokens. A Splinter sequence has the following format:`),ks=d(),De=o("ul"),_t=o("li"),vs=a("single sequence: "),Rt=o("code"),ws=a("[CLS] X [SEP]"),bs=d(),kt=o("li"),Ss=a("pair of sequences for question answering: "),Gt=o("code"),ys=a("[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]"),Ts=d(),le=o("div"),m(Le.$$.fragment),zs=d(),Ne=o("p"),qs=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Xt=o("code"),$s=a("prepare_for_model"),Es=a(" method."),xs=d(),O=o("div"),m(Qe.$$.fragment),As=d(),vt=o("p"),Ps=a("Create the token type IDs corresponding to the sequences passed. "),wt=o("a"),Ms=a("What are token type IDs?"),Fs=d(),Yt=o("p"),Is=a("Should be overridden in a subclass if the model has a special way of building those."),Cs=d(),Zt=o("div"),An=d(),Y=o("h2"),de=o("a"),en=o("span"),m(je.$$.fragment),Ds=d(),tn=o("span"),Ls=a("SplinterTokenizerFast"),Pn=d(),M=o("div"),m(Oe.$$.fragment),Ns=d(),We=o("p"),Qs=a("Construct a \u201Cfast\u201D Splinter tokenizer (backed by HuggingFace\u2019s "),nn=o("em"),js=a("tokenizers"),Os=a(" library). Based on WordPiece."),Ws=d(),Be=o("p"),Bs=a("This tokenizer inherits from "),bt=o("a"),Us=a("PreTrainedTokenizerFast"),Hs=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Vs=d(),W=o("div"),m(Ue.$$.fragment),Js=d(),on=o("p"),Ks=a(`Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special tokens. A Splinter sequence has the following format:`),Rs=d(),He=o("ul"),St=o("li"),Gs=a("single sequence: "),sn=o("code"),Xs=a("[CLS] X [SEP]"),Ys=d(),yt=o("li"),Zs=a("pair of sequences for question answering: "),rn=o("code"),er=a("[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]"),Mn=d(),Z=o("h2"),ce=o("a"),an=o("span"),m(Ve.$$.fragment),tr=d(),ln=o("span"),nr=a("SplinterModel"),Fn=d(),F=o("div"),m(Je.$$.fragment),or=d(),Ke=o("p"),sr=a(`The bare Splinter Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Re=o("a"),rr=a("torch.nn.Module"),ar=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ir=d(),Ge=o("p"),lr=a("The model is an encoder (with only self-attention) following the architecture described in "),dn=o("code"),dr=a("Attention is all you need <https://arxiv.org/abs/1706.03762>"),cr=a(`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),pr=d(),$=o("div"),m(Xe.$$.fragment),hr=d(),ee=o("p"),ur=a("The "),Tt=o("a"),fr=a("SplinterModel"),mr=a(" forward method, overrides the "),cn=o("code"),gr=a("__call__"),_r=a(" special method."),kr=d(),m(pe.$$.fragment),vr=d(),pn=o("p"),wr=a("Example:"),br=d(),m(Ye.$$.fragment),In=d(),te=o("h2"),he=o("a"),hn=o("span"),m(Ze.$$.fragment),Sr=d(),un=o("span"),yr=a("SplinterForQuestionAnswering"),Cn=d(),I=o("div"),m(et.$$.fragment),Tr=d(),ne=o("p"),zr=a(`Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),fn=o("code"),qr=a("span start logits"),$r=a(" and "),mn=o("code"),Er=a("span end logits"),xr=a(")."),Ar=d(),tt=o("p"),Pr=a("This model is a PyTorch "),nt=o("a"),Mr=a("torch.nn.Module"),Fr=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ir=d(),E=o("div"),m(ot.$$.fragment),Cr=d(),oe=o("p"),Dr=a("The "),zt=o("a"),Lr=a("SplinterForQuestionAnswering"),Nr=a(" forward method, overrides the "),gn=o("code"),Qr=a("__call__"),jr=a(" special method."),Or=d(),m(ue.$$.fragment),Wr=d(),_n=o("p"),Br=a("Example:"),Ur=d(),m(st.$$.fragment),this.h()},l(t){const p=hi('[data-svelte="svelte-1phssyn"]',document.head);u=s(p,"META",{name:!0,content:!0}),p.forEach(n),q=c(t),f=s(t,"H1",{class:!0});var rt=r(f);b=s(rt,"A",{id:!0,class:!0,href:!0});var kn=r(b);A=s(kn,"SPAN",{});var Xr=r(A);g(S.$$.fragment,Xr),Xr.forEach(n),kn.forEach(n),T=c(rt),L=s(rt,"SPAN",{});var Yr=r(L);no=i(Yr,"Splinter"),Yr.forEach(n),rt.forEach(n),vn=c(t),J=s(t,"H2",{class:!0});var Ln=r(J);se=s(Ln,"A",{id:!0,class:!0,href:!0});var Zr=r(se);It=s(Zr,"SPAN",{});var ea=r(It);g(be.$$.fragment,ea),ea.forEach(n),Zr.forEach(n),oo=c(Ln),Ct=s(Ln,"SPAN",{});var ta=r(Ct);so=i(ta,"Overview"),ta.forEach(n),Ln.forEach(n),wn=c(t),re=s(t,"P",{});var Nn=r(re);ro=i(Nn,"The Splinter model was proposed in "),Se=s(Nn,"A",{href:!0,rel:!0});var na=r(Se);ao=i(na,"Few-Shot Question Answering by Pretraining Span Selection"),na.forEach(n),io=i(Nn,` by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy. Splinter is an encoder-only transformer (similar to BERT) pretrained using the recurring span selection task on a large corpus comprising Wikipedia and the Toronto Book Corpus.`),Nn.forEach(n),bn=c(t),it=s(t,"P",{});var oa=r(it);lo=i(oa,"The abstract from the paper is the following:"),oa.forEach(n),Sn=c(t),lt=s(t,"P",{});var sa=r(lt);co=i(sa,`In several question answering benchmarks, pretrained models have reached human parity through fine-tuning on an order of 100,000 annotated questions and answers. We explore the more realistic few-shot setting, where only a few hundred training examples are available, and observe that standard models perform poorly, highlighting the discrepancy between current pretraining objectives and question answering. We propose a new pretraining scheme tailored for question answering: recurring span selection. Given a passage with multiple sets of recurring spans, we mask in each set all recurring spans but one, and ask the model to select the correct span in the passage for each masked span. Masked spans are replaced with a special token, viewed as a question representation, that is later used during fine-tuning to select the answer span. The resulting model obtains surprisingly good results on multiple benchmarks (e.g., 72.7 F1 on SQuAD with only 128 training examples), while maintaining competitive performance in the high-resource setting.`),sa.forEach(n),yn=c(t),dt=s(t,"P",{});var ra=r(dt);po=i(ra,"Tips:"),ra.forEach(n),Tn=c(t),C=s(t,"UL",{});var fe=r(C);ye=s(fe,"LI",{});var Qn=r(ye);ho=i(Qn,`Splinter was trained to predict answers spans conditioned on a special [QUESTION] token. These tokens contextualize to question representations which are used to predict the answers. This layer is called QASS, and is the default behaviour in the `),ct=s(Qn,"A",{href:!0});var aa=r(ct);uo=i(aa,"SplinterForQuestionAnswering"),aa.forEach(n),fo=i(Qn," class. Therefore:"),Qn.forEach(n),mo=c(fe),N=s(fe,"LI",{});var me=r(N);go=i(me,"Use "),pt=s(me,"A",{href:!0});var ia=r(pt);_o=i(ia,"SplinterTokenizer"),ia.forEach(n),ko=i(me," (rather than "),ht=s(me,"A",{href:!0});var la=r(ht);vo=i(la,"BertTokenizer"),la.forEach(n),wo=i(me,`), as it already contains this special token. Also, its default behavior is to use this token when two sequences are given (for example, in the `),Dt=s(me,"EM",{});var da=r(Dt);bo=i(da,"run_qa.py"),da.forEach(n),So=i(me," script)."),me.forEach(n),yo=c(fe),Te=s(fe,"LI",{});var jn=r(Te);To=i(jn,"If you plan on using Splinter outside "),Lt=s(jn,"EM",{});var ca=r(Lt);zo=i(ca,"run_qa.py"),ca.forEach(n),qo=i(jn,`, please keep in mind the question token - it might be important for the success of your model, especially in a few-shot setting.`),jn.forEach(n),$o=c(fe),P=s(fe,"LI",{});var B=r(P);Eo=i(B,`Please note there are two different checkpoints for each size of Splinter. Both are basically the same, except that one also has the pretrained wights of the QASS layer (`),Nt=s(B,"EM",{});var pa=r(Nt);xo=i(pa,"tau/splinter-base-qass"),pa.forEach(n),Ao=i(B," and "),Qt=s(B,"EM",{});var ha=r(Qt);Po=i(ha,"tau/splinter-large-qass"),ha.forEach(n),Mo=i(B,`) and one doesn\u2019t (`),jt=s(B,"EM",{});var ua=r(jt);Fo=i(ua,"tau/splinter-base"),ua.forEach(n),Io=i(B," and "),Ot=s(B,"EM",{});var fa=r(Ot);Co=i(fa,"tau/splinter-large"),fa.forEach(n),Do=i(B,`). This is done to support randomly initializing this layer at fine-tuning, as it is shown to yield better results for some cases in the paper.`),B.forEach(n),fe.forEach(n),zn=c(t),D=s(t,"P",{});var ge=r(D);Lo=i(ge,"This model was contributed by "),ze=s(ge,"A",{href:!0,rel:!0});var ma=r(ze);No=i(ma,"yuvalkirstain"),ma.forEach(n),Qo=i(ge," and "),qe=s(ge,"A",{href:!0,rel:!0});var ga=r(qe);jo=i(ga,"oriram"),ga.forEach(n),Oo=i(ge,". The original code can be found "),$e=s(ge,"A",{href:!0,rel:!0});var _a=r($e);Wo=i(_a,"here"),_a.forEach(n),Bo=i(ge,"."),ge.forEach(n),qn=c(t),K=s(t,"H2",{class:!0});var On=r(K);ae=s(On,"A",{id:!0,class:!0,href:!0});var ka=r(ae);Wt=s(ka,"SPAN",{});var va=r(Wt);g(Ee.$$.fragment,va),va.forEach(n),ka.forEach(n),Uo=c(On),Bt=s(On,"SPAN",{});var wa=r(Bt);Ho=i(wa,"SplinterConfig"),wa.forEach(n),On.forEach(n),$n=c(t),z=s(t,"DIV",{class:!0});var U=r(z);g(xe.$$.fragment,U),Vo=c(U),R=s(U,"P",{});var qt=r(R);Jo=i(qt,"This is the configuration class to store the configuration of a "),ut=s(qt,"A",{href:!0});var ba=r(ut);Ko=i(ba,"SplinterModel"),ba.forEach(n),Ro=i(qt,`. It is used to instantiate an Splinter model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Splinter `),Ae=s(qt,"A",{href:!0,rel:!0});var Sa=r(Ae);Go=i(Sa,"tau/splinter-base"),Sa.forEach(n),Xo=i(qt," architecture."),qt.forEach(n),Yo=c(U),G=s(U,"P",{});var $t=r(G);Zo=i($t,"Configuration objects inherit from "),ft=s($t,"A",{href:!0});var ya=r(ft);es=i(ya,"PretrainedConfig"),ya.forEach(n),ts=i($t,` and can be used to control the model outputs. Read the documentation from `),mt=s($t,"A",{href:!0});var Ta=r(mt);ns=i(Ta,"PretrainedConfig"),Ta.forEach(n),os=i($t," for more information."),$t.forEach(n),ss=c(U),Ut=s(U,"P",{});var za=r(Ut);rs=i(za,"Example:"),za.forEach(n),as=c(U),g(Pe.$$.fragment,U),U.forEach(n),En=c(t),X=s(t,"H2",{class:!0});var Wn=r(X);ie=s(Wn,"A",{id:!0,class:!0,href:!0});var qa=r(ie);Ht=s(qa,"SPAN",{});var $a=r(Ht);g(Me.$$.fragment,$a),$a.forEach(n),qa.forEach(n),is=c(Wn),Vt=s(Wn,"SPAN",{});var Ea=r(Vt);ls=i(Ea,"SplinterTokenizer"),Ea.forEach(n),Wn.forEach(n),xn=c(t),y=s(t,"DIV",{class:!0});var x=r(y);g(Fe.$$.fragment,x),ds=c(x),Jt=s(x,"P",{});var xa=r(Jt);cs=i(xa,"Construct a Splinter tokenizer. Based on WordPiece."),xa.forEach(n),ps=c(x),Ie=s(x,"P",{});var Bn=r(Ie);hs=i(Bn,"This tokenizer inherits from "),gt=s(Bn,"A",{href:!0});var Aa=r(gt);us=i(Aa,"PreTrainedTokenizer"),Aa.forEach(n),fs=i(Bn,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Bn.forEach(n),ms=c(x),j=s(x,"DIV",{class:!0});var Et=r(j);g(Ce.$$.fragment,Et),gs=c(Et),Kt=s(Et,"P",{});var Pa=r(Kt);_s=i(Pa,`Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special tokens. A Splinter sequence has the following format:`),Pa.forEach(n),ks=c(Et),De=s(Et,"UL",{});var Un=r(De);_t=s(Un,"LI",{});var Hr=r(_t);vs=i(Hr,"single sequence: "),Rt=s(Hr,"CODE",{});var Ma=r(Rt);ws=i(Ma,"[CLS] X [SEP]"),Ma.forEach(n),Hr.forEach(n),bs=c(Un),kt=s(Un,"LI",{});var Vr=r(kt);Ss=i(Vr,"pair of sequences for question answering: "),Gt=s(Vr,"CODE",{});var Fa=r(Gt);ys=i(Fa,"[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]"),Fa.forEach(n),Vr.forEach(n),Un.forEach(n),Et.forEach(n),Ts=c(x),le=s(x,"DIV",{class:!0});var Hn=r(le);g(Le.$$.fragment,Hn),zs=c(Hn),Ne=s(Hn,"P",{});var Vn=r(Ne);qs=i(Vn,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Xt=s(Vn,"CODE",{});var Ia=r(Xt);$s=i(Ia,"prepare_for_model"),Ia.forEach(n),Es=i(Vn," method."),Vn.forEach(n),Hn.forEach(n),xs=c(x),O=s(x,"DIV",{class:!0});var xt=r(O);g(Qe.$$.fragment,xt),As=c(xt),vt=s(xt,"P",{});var Jr=r(vt);Ps=i(Jr,"Create the token type IDs corresponding to the sequences passed. "),wt=s(Jr,"A",{href:!0});var Ca=r(wt);Ms=i(Ca,"What are token type IDs?"),Ca.forEach(n),Jr.forEach(n),Fs=c(xt),Yt=s(xt,"P",{});var Da=r(Yt);Is=i(Da,"Should be overridden in a subclass if the model has a special way of building those."),Da.forEach(n),xt.forEach(n),Cs=c(x),Zt=s(x,"DIV",{class:!0}),r(Zt).forEach(n),x.forEach(n),An=c(t),Y=s(t,"H2",{class:!0});var Jn=r(Y);de=s(Jn,"A",{id:!0,class:!0,href:!0});var La=r(de);en=s(La,"SPAN",{});var Na=r(en);g(je.$$.fragment,Na),Na.forEach(n),La.forEach(n),Ds=c(Jn),tn=s(Jn,"SPAN",{});var Qa=r(tn);Ls=i(Qa,"SplinterTokenizerFast"),Qa.forEach(n),Jn.forEach(n),Pn=c(t),M=s(t,"DIV",{class:!0});var _e=r(M);g(Oe.$$.fragment,_e),Ns=c(_e),We=s(_e,"P",{});var Kn=r(We);Qs=i(Kn,"Construct a \u201Cfast\u201D Splinter tokenizer (backed by HuggingFace\u2019s "),nn=s(Kn,"EM",{});var ja=r(nn);js=i(ja,"tokenizers"),ja.forEach(n),Os=i(Kn," library). Based on WordPiece."),Kn.forEach(n),Ws=c(_e),Be=s(_e,"P",{});var Rn=r(Be);Bs=i(Rn,"This tokenizer inherits from "),bt=s(Rn,"A",{href:!0});var Oa=r(bt);Us=i(Oa,"PreTrainedTokenizerFast"),Oa.forEach(n),Hs=i(Rn,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Rn.forEach(n),Vs=c(_e),W=s(_e,"DIV",{class:!0});var At=r(W);g(Ue.$$.fragment,At),Js=c(At),on=s(At,"P",{});var Wa=r(on);Ks=i(Wa,`Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special tokens. A Splinter sequence has the following format:`),Wa.forEach(n),Rs=c(At),He=s(At,"UL",{});var Gn=r(He);St=s(Gn,"LI",{});var Kr=r(St);Gs=i(Kr,"single sequence: "),sn=s(Kr,"CODE",{});var Ba=r(sn);Xs=i(Ba,"[CLS] X [SEP]"),Ba.forEach(n),Kr.forEach(n),Ys=c(Gn),yt=s(Gn,"LI",{});var Rr=r(yt);Zs=i(Rr,"pair of sequences for question answering: "),rn=s(Rr,"CODE",{});var Ua=r(rn);er=i(Ua,"[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]"),Ua.forEach(n),Rr.forEach(n),Gn.forEach(n),At.forEach(n),_e.forEach(n),Mn=c(t),Z=s(t,"H2",{class:!0});var Xn=r(Z);ce=s(Xn,"A",{id:!0,class:!0,href:!0});var Ha=r(ce);an=s(Ha,"SPAN",{});var Va=r(an);g(Ve.$$.fragment,Va),Va.forEach(n),Ha.forEach(n),tr=c(Xn),ln=s(Xn,"SPAN",{});var Ja=r(ln);nr=i(Ja,"SplinterModel"),Ja.forEach(n),Xn.forEach(n),Fn=c(t),F=s(t,"DIV",{class:!0});var ke=r(F);g(Je.$$.fragment,ke),or=c(ke),Ke=s(ke,"P",{});var Yn=r(Ke);sr=i(Yn,`The bare Splinter Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Re=s(Yn,"A",{href:!0,rel:!0});var Ka=r(Re);rr=i(Ka,"torch.nn.Module"),Ka.forEach(n),ar=i(Yn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yn.forEach(n),ir=c(ke),Ge=s(ke,"P",{});var Zn=r(Ge);lr=i(Zn,"The model is an encoder (with only self-attention) following the architecture described in "),dn=s(Zn,"CODE",{});var Ra=r(dn);dr=i(Ra,"Attention is all you need <https://arxiv.org/abs/1706.03762>"),Ra.forEach(n),cr=i(Zn,`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Zn.forEach(n),pr=c(ke),$=s(ke,"DIV",{class:!0});var H=r($);g(Xe.$$.fragment,H),hr=c(H),ee=s(H,"P",{});var Pt=r(ee);ur=i(Pt,"The "),Tt=s(Pt,"A",{href:!0});var Ga=r(Tt);fr=i(Ga,"SplinterModel"),Ga.forEach(n),mr=i(Pt," forward method, overrides the "),cn=s(Pt,"CODE",{});var Xa=r(cn);gr=i(Xa,"__call__"),Xa.forEach(n),_r=i(Pt," special method."),Pt.forEach(n),kr=c(H),g(pe.$$.fragment,H),vr=c(H),pn=s(H,"P",{});var Ya=r(pn);wr=i(Ya,"Example:"),Ya.forEach(n),br=c(H),g(Ye.$$.fragment,H),H.forEach(n),ke.forEach(n),In=c(t),te=s(t,"H2",{class:!0});var eo=r(te);he=s(eo,"A",{id:!0,class:!0,href:!0});var Za=r(he);hn=s(Za,"SPAN",{});var ei=r(hn);g(Ze.$$.fragment,ei),ei.forEach(n),Za.forEach(n),Sr=c(eo),un=s(eo,"SPAN",{});var ti=r(un);yr=i(ti,"SplinterForQuestionAnswering"),ti.forEach(n),eo.forEach(n),Cn=c(t),I=s(t,"DIV",{class:!0});var ve=r(I);g(et.$$.fragment,ve),Tr=c(ve),ne=s(ve,"P",{});var Mt=r(ne);zr=i(Mt,`Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),fn=s(Mt,"CODE",{});var ni=r(fn);qr=i(ni,"span start logits"),ni.forEach(n),$r=i(Mt," and "),mn=s(Mt,"CODE",{});var oi=r(mn);Er=i(oi,"span end logits"),oi.forEach(n),xr=i(Mt,")."),Mt.forEach(n),Ar=c(ve),tt=s(ve,"P",{});var to=r(tt);Pr=i(to,"This model is a PyTorch "),nt=s(to,"A",{href:!0,rel:!0});var si=r(nt);Mr=i(si,"torch.nn.Module"),si.forEach(n),Fr=i(to,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),to.forEach(n),Ir=c(ve),E=s(ve,"DIV",{class:!0});var V=r(E);g(ot.$$.fragment,V),Cr=c(V),oe=s(V,"P",{});var Ft=r(oe);Dr=i(Ft,"The "),zt=s(Ft,"A",{href:!0});var ri=r(zt);Lr=i(ri,"SplinterForQuestionAnswering"),ri.forEach(n),Nr=i(Ft," forward method, overrides the "),gn=s(Ft,"CODE",{});var ai=r(gn);Qr=i(ai,"__call__"),ai.forEach(n),jr=i(Ft," special method."),Ft.forEach(n),Or=c(V),g(ue.$$.fragment,V),Wr=c(V),_n=s(V,"P",{});var ii=r(_n);Br=i(ii,"Example:"),ii.forEach(n),Ur=c(V),g(st.$$.fragment,V),V.forEach(n),ve.forEach(n),this.h()},h(){l(u,"name","hf:doc:metadata"),l(u,"content",JSON.stringify(gi)),l(b,"id","splinter"),l(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(b,"href","#splinter"),l(f,"class","relative group"),l(se,"id","overview"),l(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(se,"href","#overview"),l(J,"class","relative group"),l(Se,"href","https://arxiv.org/abs/2101.00438"),l(Se,"rel","nofollow"),l(ct,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterForQuestionAnswering"),l(pt,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterTokenizer"),l(ht,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),l(ze,"href","https://huggingface.co/yuvalkirstain"),l(ze,"rel","nofollow"),l(qe,"href","https://huggingface.co/oriram"),l(qe,"rel","nofollow"),l($e,"href","https://github.com/oriram/splinter"),l($e,"rel","nofollow"),l(ae,"id","transformers.SplinterConfig"),l(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ae,"href","#transformers.SplinterConfig"),l(K,"class","relative group"),l(ut,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterModel"),l(Ae,"href","https://huggingface.co/tau/splinter-base"),l(Ae,"rel","nofollow"),l(ft,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(mt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(z,"class","docstring"),l(ie,"id","transformers.SplinterTokenizer"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#transformers.SplinterTokenizer"),l(X,"class","relative group"),l(gt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(j,"class","docstring"),l(le,"class","docstring"),l(wt,"href","../glossary#token-type-ids"),l(O,"class","docstring"),l(Zt,"class","docstring"),l(y,"class","docstring"),l(de,"id","transformers.SplinterTokenizerFast"),l(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(de,"href","#transformers.SplinterTokenizerFast"),l(Y,"class","relative group"),l(bt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(W,"class","docstring"),l(M,"class","docstring"),l(ce,"id","transformers.SplinterModel"),l(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ce,"href","#transformers.SplinterModel"),l(Z,"class","relative group"),l(Re,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Re,"rel","nofollow"),l(Tt,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterModel"),l($,"class","docstring"),l(F,"class","docstring"),l(he,"id","transformers.SplinterForQuestionAnswering"),l(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(he,"href","#transformers.SplinterForQuestionAnswering"),l(te,"class","relative group"),l(nt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(nt,"rel","nofollow"),l(zt,"href","/docs/transformers/v4.15.0/en/model_doc/splinter#transformers.SplinterForQuestionAnswering"),l(E,"class","docstring"),l(I,"class","docstring")},m(t,p){e(document.head,u),h(t,q,p),h(t,f,p),e(f,b),e(b,A),_(S,A,null),e(f,T),e(f,L),e(L,no),h(t,vn,p),h(t,J,p),e(J,se),e(se,It),_(be,It,null),e(J,oo),e(J,Ct),e(Ct,so),h(t,wn,p),h(t,re,p),e(re,ro),e(re,Se),e(Se,ao),e(re,io),h(t,bn,p),h(t,it,p),e(it,lo),h(t,Sn,p),h(t,lt,p),e(lt,co),h(t,yn,p),h(t,dt,p),e(dt,po),h(t,Tn,p),h(t,C,p),e(C,ye),e(ye,ho),e(ye,ct),e(ct,uo),e(ye,fo),e(C,mo),e(C,N),e(N,go),e(N,pt),e(pt,_o),e(N,ko),e(N,ht),e(ht,vo),e(N,wo),e(N,Dt),e(Dt,bo),e(N,So),e(C,yo),e(C,Te),e(Te,To),e(Te,Lt),e(Lt,zo),e(Te,qo),e(C,$o),e(C,P),e(P,Eo),e(P,Nt),e(Nt,xo),e(P,Ao),e(P,Qt),e(Qt,Po),e(P,Mo),e(P,jt),e(jt,Fo),e(P,Io),e(P,Ot),e(Ot,Co),e(P,Do),h(t,zn,p),h(t,D,p),e(D,Lo),e(D,ze),e(ze,No),e(D,Qo),e(D,qe),e(qe,jo),e(D,Oo),e(D,$e),e($e,Wo),e(D,Bo),h(t,qn,p),h(t,K,p),e(K,ae),e(ae,Wt),_(Ee,Wt,null),e(K,Uo),e(K,Bt),e(Bt,Ho),h(t,$n,p),h(t,z,p),_(xe,z,null),e(z,Vo),e(z,R),e(R,Jo),e(R,ut),e(ut,Ko),e(R,Ro),e(R,Ae),e(Ae,Go),e(R,Xo),e(z,Yo),e(z,G),e(G,Zo),e(G,ft),e(ft,es),e(G,ts),e(G,mt),e(mt,ns),e(G,os),e(z,ss),e(z,Ut),e(Ut,rs),e(z,as),_(Pe,z,null),h(t,En,p),h(t,X,p),e(X,ie),e(ie,Ht),_(Me,Ht,null),e(X,is),e(X,Vt),e(Vt,ls),h(t,xn,p),h(t,y,p),_(Fe,y,null),e(y,ds),e(y,Jt),e(Jt,cs),e(y,ps),e(y,Ie),e(Ie,hs),e(Ie,gt),e(gt,us),e(Ie,fs),e(y,ms),e(y,j),_(Ce,j,null),e(j,gs),e(j,Kt),e(Kt,_s),e(j,ks),e(j,De),e(De,_t),e(_t,vs),e(_t,Rt),e(Rt,ws),e(De,bs),e(De,kt),e(kt,Ss),e(kt,Gt),e(Gt,ys),e(y,Ts),e(y,le),_(Le,le,null),e(le,zs),e(le,Ne),e(Ne,qs),e(Ne,Xt),e(Xt,$s),e(Ne,Es),e(y,xs),e(y,O),_(Qe,O,null),e(O,As),e(O,vt),e(vt,Ps),e(vt,wt),e(wt,Ms),e(O,Fs),e(O,Yt),e(Yt,Is),e(y,Cs),e(y,Zt),h(t,An,p),h(t,Y,p),e(Y,de),e(de,en),_(je,en,null),e(Y,Ds),e(Y,tn),e(tn,Ls),h(t,Pn,p),h(t,M,p),_(Oe,M,null),e(M,Ns),e(M,We),e(We,Qs),e(We,nn),e(nn,js),e(We,Os),e(M,Ws),e(M,Be),e(Be,Bs),e(Be,bt),e(bt,Us),e(Be,Hs),e(M,Vs),e(M,W),_(Ue,W,null),e(W,Js),e(W,on),e(on,Ks),e(W,Rs),e(W,He),e(He,St),e(St,Gs),e(St,sn),e(sn,Xs),e(He,Ys),e(He,yt),e(yt,Zs),e(yt,rn),e(rn,er),h(t,Mn,p),h(t,Z,p),e(Z,ce),e(ce,an),_(Ve,an,null),e(Z,tr),e(Z,ln),e(ln,nr),h(t,Fn,p),h(t,F,p),_(Je,F,null),e(F,or),e(F,Ke),e(Ke,sr),e(Ke,Re),e(Re,rr),e(Ke,ar),e(F,ir),e(F,Ge),e(Ge,lr),e(Ge,dn),e(dn,dr),e(Ge,cr),e(F,pr),e(F,$),_(Xe,$,null),e($,hr),e($,ee),e(ee,ur),e(ee,Tt),e(Tt,fr),e(ee,mr),e(ee,cn),e(cn,gr),e(ee,_r),e($,kr),_(pe,$,null),e($,vr),e($,pn),e(pn,wr),e($,br),_(Ye,$,null),h(t,In,p),h(t,te,p),e(te,he),e(he,hn),_(Ze,hn,null),e(te,Sr),e(te,un),e(un,yr),h(t,Cn,p),h(t,I,p),_(et,I,null),e(I,Tr),e(I,ne),e(ne,zr),e(ne,fn),e(fn,qr),e(ne,$r),e(ne,mn),e(mn,Er),e(ne,xr),e(I,Ar),e(I,tt),e(tt,Pr),e(tt,nt),e(nt,Mr),e(tt,Fr),e(I,Ir),e(I,E),_(ot,E,null),e(E,Cr),e(E,oe),e(oe,Dr),e(oe,zt),e(zt,Lr),e(oe,Nr),e(oe,gn),e(gn,Qr),e(oe,jr),e(E,Or),_(ue,E,null),e(E,Wr),e(E,_n),e(_n,Br),e(E,Ur),_(st,E,null),Dn=!0},p(t,[p]){const rt={};p&2&&(rt.$$scope={dirty:p,ctx:t}),pe.$set(rt);const kn={};p&2&&(kn.$$scope={dirty:p,ctx:t}),ue.$set(kn)},i(t){Dn||(k(S.$$.fragment,t),k(be.$$.fragment,t),k(Ee.$$.fragment,t),k(xe.$$.fragment,t),k(Pe.$$.fragment,t),k(Me.$$.fragment,t),k(Fe.$$.fragment,t),k(Ce.$$.fragment,t),k(Le.$$.fragment,t),k(Qe.$$.fragment,t),k(je.$$.fragment,t),k(Oe.$$.fragment,t),k(Ue.$$.fragment,t),k(Ve.$$.fragment,t),k(Je.$$.fragment,t),k(Xe.$$.fragment,t),k(pe.$$.fragment,t),k(Ye.$$.fragment,t),k(Ze.$$.fragment,t),k(et.$$.fragment,t),k(ot.$$.fragment,t),k(ue.$$.fragment,t),k(st.$$.fragment,t),Dn=!0)},o(t){v(S.$$.fragment,t),v(be.$$.fragment,t),v(Ee.$$.fragment,t),v(xe.$$.fragment,t),v(Pe.$$.fragment,t),v(Me.$$.fragment,t),v(Fe.$$.fragment,t),v(Ce.$$.fragment,t),v(Le.$$.fragment,t),v(Qe.$$.fragment,t),v(je.$$.fragment,t),v(Oe.$$.fragment,t),v(Ue.$$.fragment,t),v(Ve.$$.fragment,t),v(Je.$$.fragment,t),v(Xe.$$.fragment,t),v(pe.$$.fragment,t),v(Ye.$$.fragment,t),v(Ze.$$.fragment,t),v(et.$$.fragment,t),v(ot.$$.fragment,t),v(ue.$$.fragment,t),v(st.$$.fragment,t),Dn=!1},d(t){n(u),t&&n(q),t&&n(f),w(S),t&&n(vn),t&&n(J),w(be),t&&n(wn),t&&n(re),t&&n(bn),t&&n(it),t&&n(Sn),t&&n(lt),t&&n(yn),t&&n(dt),t&&n(Tn),t&&n(C),t&&n(zn),t&&n(D),t&&n(qn),t&&n(K),w(Ee),t&&n($n),t&&n(z),w(xe),w(Pe),t&&n(En),t&&n(X),w(Me),t&&n(xn),t&&n(y),w(Fe),w(Ce),w(Le),w(Qe),t&&n(An),t&&n(Y),w(je),t&&n(Pn),t&&n(M),w(Oe),w(Ue),t&&n(Mn),t&&n(Z),w(Ve),t&&n(Fn),t&&n(F),w(Je),w(Xe),w(pe),w(Ye),t&&n(In),t&&n(te),w(Ze),t&&n(Cn),t&&n(I),w(et),w(ot),w(ue),w(st)}}}const gi={local:"splinter",sections:[{local:"overview",title:"Overview"},{local:"transformers.SplinterConfig",title:"SplinterConfig"},{local:"transformers.SplinterTokenizer",title:"SplinterTokenizer"},{local:"transformers.SplinterTokenizerFast",title:"SplinterTokenizerFast"},{local:"transformers.SplinterModel",title:"SplinterModel"},{local:"transformers.SplinterForQuestionAnswering",title:"SplinterForQuestionAnswering"}],title:"Splinter"};function _i(we,u,q){let{fw:f}=u;return we.$$set=b=>{"fw"in b&&q(0,f=b.fw)},[f]}class Ti extends di{constructor(u){super();ci(this,u,_i,mi,pi,{fw:0})}}export{Ti as default,gi as metadata};
9,941
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/t5v1.1.mdx-c0979b77.js
import{S as $a,i as Pa,s as Aa,e as o,k as h,w as St,t as n,L as La,c as r,d as a,m as d,a as l,x as Gt,h as s,b as i,J as e,g as p,y as jt,K as Ca,q as Nt,o as Dt,B as Bt}from"../../chunks/vendor-b1433968.js";import{I as ka}from"../../chunks/IconCopyLink-7029626d.js";import{C as Ia}from"../../chunks/CodeBlock-a320dbd7.js";import"../../chunks/CopyButton-f65cb278.js";function Oa(De){let _,R,v,m,Q,A,Be,W,Ue,we,x,y,X,L,qe,Y,Fe,Ee,T,Re,C,Ve,He,be,V,Je,xe,I,ye,H,Ke,Te,c,Z,O,Me,S,ze,Qe,We,ee,te,Xe,Ye,ae,oe,Ze,et,re,le,tt,at,ne,g,ot,se,rt,lt,ie,nt,st,fe,it,ft,ke,k,pt,G,ht,dt,$e,J,ct,Pe,u,pe,he,j,ut,vt,de,ce,N,mt,gt,ue,ve,D,_t,wt,me,ge,B,Et,bt,_e,K,U,xt,yt,Ae,$,Tt,M,kt,$t,Le,w,Pt,q,At,Lt,F,Ct,It,Ce;return A=new ka({}),L=new ka({}),I=new Ia({props:{code:`from transformers import T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained('google/t5-v1_1-base'),`,highlighted:`from transformers import T5ForConditionalGeneration model = <span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">T5ForConditionalGeneration</span>.</span></span>from<span class="hljs-constructor">_pretrained(&#x27;<span class="hljs-params">google</span><span class="hljs-operator">/</span><span class="hljs-params">t5</span>-<span class="hljs-params">v1_1</span>-<span class="hljs-params">base</span>&#x27;)</span>`}}),{c(){_=o("meta"),R=h(),v=o("h1"),m=o("a"),Q=o("span"),St(A.$$.fragment),Be=h(),W=o("span"),Ue=n("T5v1.1"),we=h(),x=o("h2"),y=o("a"),X=o("span"),St(L.$$.fragment),qe=h(),Y=o("span"),Fe=n("Overview"),Ee=h(),T=o("p"),Re=n("T5v1.1 was released in the "),C=o("a"),Ve=n("google-research/text-to-text-transfer-transformer"),He=n(` repository by Colin Raffel et al. It\u2019s an improved version of the original T5 model.`),be=h(),V=o("p"),Je=n("One can directly plug in the weights of T5v1.1 into a T5 model, like so:"),xe=h(),St(I.$$.fragment),ye=h(),H=o("p"),Ke=n("T5 Version 1.1 includes the following improvements compared to the original T5 model:"),Te=h(),c=o("ul"),Z=o("li"),O=o("p"),Me=n("GEGLU activation in the feed-forward hidden layer, rather than ReLU. See "),S=o("a"),ze=n("this paper"),Qe=n("."),We=h(),ee=o("li"),te=o("p"),Xe=n("Dropout was turned off in pre-training (quality win). Dropout should be re-enabled during fine-tuning."),Ye=h(),ae=o("li"),oe=o("p"),Ze=n("Pre-trained on C4 only without mixing in the downstream tasks."),et=h(),re=o("li"),le=o("p"),tt=n("No parameter sharing between the embedding and classifier layer."),at=h(),ne=o("li"),g=o("p"),ot=n("\u201Cxl\u201D and \u201Cxxl\u201D replace \u201C3B\u201D and \u201C11B\u201D. The model shapes are a bit different - larger "),se=o("code"),rt=n("d_model"),lt=n(` and smaller `),ie=o("code"),nt=n("num_heads"),st=n(" and "),fe=o("code"),it=n("d_ff"),ft=n("."),ke=h(),k=o("p"),pt=n("Note: T5 Version 1.1 was only pre-trained on "),G=o("a"),ht=n("C4"),dt=n(` excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task, unlike the original T5 model. Since t5v1.1 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),$e=h(),J=o("p"),ct=n("Google has released the following variants:"),Pe=h(),u=o("ul"),pe=o("li"),he=o("p"),j=o("a"),ut=n("google/t5-v1_1-small"),vt=h(),de=o("li"),ce=o("p"),N=o("a"),mt=n("google/t5-v1_1-base"),gt=h(),ue=o("li"),ve=o("p"),D=o("a"),_t=n("google/t5-v1_1-large"),wt=h(),me=o("li"),ge=o("p"),B=o("a"),Et=n("google/t5-v1_1-xl"),bt=h(),_e=o("li"),K=o("p"),U=o("a"),xt=n("google/t5-v1_1-xxl"),yt=n("."),Ae=h(),$=o("p"),Tt=n("One can refer to "),M=o("a"),kt=n("T5\u2019s documentation page"),$t=n(" for all tips, code examples and notebooks."),Le=h(),w=o("p"),Pt=n("This model was contributed by "),q=o("a"),At=n("patrickvonplaten"),Lt=n(`. The original code can be found `),F=o("a"),Ct=n("here"),It=n("."),this.h()},l(t){const f=La('[data-svelte="svelte-1phssyn"]',document.head);_=r(f,"META",{name:!0,content:!0}),f.forEach(a),R=d(t),v=r(t,"H1",{class:!0});var Ie=l(v);m=r(Ie,"A",{id:!0,class:!0,href:!0});var Ut=l(m);Q=r(Ut,"SPAN",{});var qt=l(Q);Gt(A.$$.fragment,qt),qt.forEach(a),Ut.forEach(a),Be=d(Ie),W=r(Ie,"SPAN",{});var Ft=l(W);Ue=s(Ft,"T5v1.1"),Ft.forEach(a),Ie.forEach(a),we=d(t),x=r(t,"H2",{class:!0});var Oe=l(x);y=r(Oe,"A",{id:!0,class:!0,href:!0});var Rt=l(y);X=r(Rt,"SPAN",{});var Vt=l(X);Gt(L.$$.fragment,Vt),Vt.forEach(a),Rt.forEach(a),qe=d(Oe),Y=r(Oe,"SPAN",{});var Ht=l(Y);Fe=s(Ht,"Overview"),Ht.forEach(a),Oe.forEach(a),Ee=d(t),T=r(t,"P",{});var Se=l(T);Re=s(Se,"T5v1.1 was released in the "),C=r(Se,"A",{href:!0,rel:!0});var Jt=l(C);Ve=s(Jt,"google-research/text-to-text-transfer-transformer"),Jt.forEach(a),He=s(Se,` repository by Colin Raffel et al. It\u2019s an improved version of the original T5 model.`),Se.forEach(a),be=d(t),V=r(t,"P",{});var Kt=l(V);Je=s(Kt,"One can directly plug in the weights of T5v1.1 into a T5 model, like so:"),Kt.forEach(a),xe=d(t),Gt(I.$$.fragment,t),ye=d(t),H=r(t,"P",{});var Mt=l(H);Ke=s(Mt,"T5 Version 1.1 includes the following improvements compared to the original T5 model:"),Mt.forEach(a),Te=d(t),c=r(t,"UL",{});var E=l(c);Z=r(E,"LI",{});var zt=l(Z);O=r(zt,"P",{});var Ge=l(O);Me=s(Ge,"GEGLU activation in the feed-forward hidden layer, rather than ReLU. See "),S=r(Ge,"A",{href:!0,rel:!0});var Qt=l(S);ze=s(Qt,"this paper"),Qt.forEach(a),Qe=s(Ge,"."),Ge.forEach(a),zt.forEach(a),We=d(E),ee=r(E,"LI",{});var Wt=l(ee);te=r(Wt,"P",{});var Xt=l(te);Xe=s(Xt,"Dropout was turned off in pre-training (quality win). Dropout should be re-enabled during fine-tuning."),Xt.forEach(a),Wt.forEach(a),Ye=d(E),ae=r(E,"LI",{});var Yt=l(ae);oe=r(Yt,"P",{});var Zt=l(oe);Ze=s(Zt,"Pre-trained on C4 only without mixing in the downstream tasks."),Zt.forEach(a),Yt.forEach(a),et=d(E),re=r(E,"LI",{});var ea=l(re);le=r(ea,"P",{});var ta=l(le);tt=s(ta,"No parameter sharing between the embedding and classifier layer."),ta.forEach(a),ea.forEach(a),at=d(E),ne=r(E,"LI",{});var aa=l(ne);g=r(aa,"P",{});var P=l(g);ot=s(P,"\u201Cxl\u201D and \u201Cxxl\u201D replace \u201C3B\u201D and \u201C11B\u201D. The model shapes are a bit different - larger "),se=r(P,"CODE",{});var oa=l(se);rt=s(oa,"d_model"),oa.forEach(a),lt=s(P,` and smaller `),ie=r(P,"CODE",{});var ra=l(ie);nt=s(ra,"num_heads"),ra.forEach(a),st=s(P," and "),fe=r(P,"CODE",{});var la=l(fe);it=s(la,"d_ff"),la.forEach(a),ft=s(P,"."),P.forEach(a),aa.forEach(a),E.forEach(a),ke=d(t),k=r(t,"P",{});var je=l(k);pt=s(je,"Note: T5 Version 1.1 was only pre-trained on "),G=r(je,"A",{href:!0,rel:!0});var na=l(G);ht=s(na,"C4"),na.forEach(a),dt=s(je,` excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task, unlike the original T5 model. Since t5v1.1 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),je.forEach(a),$e=d(t),J=r(t,"P",{});var sa=l(J);ct=s(sa,"Google has released the following variants:"),sa.forEach(a),Pe=d(t),u=r(t,"UL",{});var b=l(u);pe=r(b,"LI",{});var ia=l(pe);he=r(ia,"P",{});var fa=l(he);j=r(fa,"A",{href:!0,rel:!0});var pa=l(j);ut=s(pa,"google/t5-v1_1-small"),pa.forEach(a),fa.forEach(a),ia.forEach(a),vt=d(b),de=r(b,"LI",{});var ha=l(de);ce=r(ha,"P",{});var da=l(ce);N=r(da,"A",{href:!0,rel:!0});var ca=l(N);mt=s(ca,"google/t5-v1_1-base"),ca.forEach(a),da.forEach(a),ha.forEach(a),gt=d(b),ue=r(b,"LI",{});var ua=l(ue);ve=r(ua,"P",{});var va=l(ve);D=r(va,"A",{href:!0,rel:!0});var ma=l(D);_t=s(ma,"google/t5-v1_1-large"),ma.forEach(a),va.forEach(a),ua.forEach(a),wt=d(b),me=r(b,"LI",{});var ga=l(me);ge=r(ga,"P",{});var _a=l(ge);B=r(_a,"A",{href:!0,rel:!0});var wa=l(B);Et=s(wa,"google/t5-v1_1-xl"),wa.forEach(a),_a.forEach(a),ga.forEach(a),bt=d(b),_e=r(b,"LI",{});var Ea=l(_e);K=r(Ea,"P",{});var Ot=l(K);U=r(Ot,"A",{href:!0,rel:!0});var ba=l(U);xt=s(ba,"google/t5-v1_1-xxl"),ba.forEach(a),yt=s(Ot,"."),Ot.forEach(a),Ea.forEach(a),b.forEach(a),Ae=d(t),$=r(t,"P",{});var Ne=l($);Tt=s(Ne,"One can refer to "),M=r(Ne,"A",{href:!0});var xa=l(M);kt=s(xa,"T5\u2019s documentation page"),xa.forEach(a),$t=s(Ne," for all tips, code examples and notebooks."),Ne.forEach(a),Le=d(t),w=r(t,"P",{});var z=l(w);Pt=s(z,"This model was contributed by "),q=r(z,"A",{href:!0,rel:!0});var ya=l(q);At=s(ya,"patrickvonplaten"),ya.forEach(a),Lt=s(z,`. The original code can be found `),F=r(z,"A",{href:!0,rel:!0});var Ta=l(F);Ct=s(Ta,"here"),Ta.forEach(a),It=s(z,"."),z.forEach(a),this.h()},h(){i(_,"name","hf:doc:metadata"),i(_,"content",JSON.stringify(Sa)),i(m,"id","t5v11"),i(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(m,"href","#t5v11"),i(v,"class","relative group"),i(y,"id","overview"),i(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(y,"href","#overview"),i(x,"class","relative group"),i(C,"href","https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511"),i(C,"rel","nofollow"),i(S,"href","https://arxiv.org/abs/2002.05202"),i(S,"rel","nofollow"),i(G,"href","https://huggingface.co/datasets/c4"),i(G,"rel","nofollow"),i(j,"href","https://huggingface.co/google/t5-v1_1-small"),i(j,"rel","nofollow"),i(N,"href","https://huggingface.co/google/t5-v1_1-base"),i(N,"rel","nofollow"),i(D,"href","https://huggingface.co/google/t5-v1_1-large"),i(D,"rel","nofollow"),i(B,"href","https://huggingface.co/google/t5-v1_1-xl"),i(B,"rel","nofollow"),i(U,"href","https://huggingface.co/google/t5-v1_1-xxl"),i(U,"rel","nofollow"),i(M,"href","/docs/transformers/v4.15.0/en/t5"),i(q,"href","https://huggingface.co/patrickvonplaten"),i(q,"rel","nofollow"),i(F,"href","https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511"),i(F,"rel","nofollow")},m(t,f){e(document.head,_),p(t,R,f),p(t,v,f),e(v,m),e(m,Q),jt(A,Q,null),e(v,Be),e(v,W),e(W,Ue),p(t,we,f),p(t,x,f),e(x,y),e(y,X),jt(L,X,null),e(x,qe),e(x,Y),e(Y,Fe),p(t,Ee,f),p(t,T,f),e(T,Re),e(T,C),e(C,Ve),e(T,He),p(t,be,f),p(t,V,f),e(V,Je),p(t,xe,f),jt(I,t,f),p(t,ye,f),p(t,H,f),e(H,Ke),p(t,Te,f),p(t,c,f),e(c,Z),e(Z,O),e(O,Me),e(O,S),e(S,ze),e(O,Qe),e(c,We),e(c,ee),e(ee,te),e(te,Xe),e(c,Ye),e(c,ae),e(ae,oe),e(oe,Ze),e(c,et),e(c,re),e(re,le),e(le,tt),e(c,at),e(c,ne),e(ne,g),e(g,ot),e(g,se),e(se,rt),e(g,lt),e(g,ie),e(ie,nt),e(g,st),e(g,fe),e(fe,it),e(g,ft),p(t,ke,f),p(t,k,f),e(k,pt),e(k,G),e(G,ht),e(k,dt),p(t,$e,f),p(t,J,f),e(J,ct),p(t,Pe,f),p(t,u,f),e(u,pe),e(pe,he),e(he,j),e(j,ut),e(u,vt),e(u,de),e(de,ce),e(ce,N),e(N,mt),e(u,gt),e(u,ue),e(ue,ve),e(ve,D),e(D,_t),e(u,wt),e(u,me),e(me,ge),e(ge,B),e(B,Et),e(u,bt),e(u,_e),e(_e,K),e(K,U),e(U,xt),e(K,yt),p(t,Ae,f),p(t,$,f),e($,Tt),e($,M),e(M,kt),e($,$t),p(t,Le,f),p(t,w,f),e(w,Pt),e(w,q),e(q,At),e(w,Lt),e(w,F),e(F,Ct),e(w,It),Ce=!0},p:Ca,i(t){Ce||(Nt(A.$$.fragment,t),Nt(L.$$.fragment,t),Nt(I.$$.fragment,t),Ce=!0)},o(t){Dt(A.$$.fragment,t),Dt(L.$$.fragment,t),Dt(I.$$.fragment,t),Ce=!1},d(t){a(_),t&&a(R),t&&a(v),Bt(A),t&&a(we),t&&a(x),Bt(L),t&&a(Ee),t&&a(T),t&&a(be),t&&a(V),t&&a(xe),Bt(I,t),t&&a(ye),t&&a(H),t&&a(Te),t&&a(c),t&&a(ke),t&&a(k),t&&a($e),t&&a(J),t&&a(Pe),t&&a(u),t&&a(Ae),t&&a($),t&&a(Le),t&&a(w)}}}const Sa={local:"t5v11",sections:[{local:"overview",title:"Overview"}],title:"T5v1.1"};function Ga(De,_,R){let{fw:v}=_;return De.$$set=m=>{"fw"in m&&R(0,v=m.fw)},[v]}class Ua extends $a{constructor(_){super();Pa(this,_,Ga,Oa,Aa,{fw:0})}}export{Ua as default,Sa as metadata};
9,942
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/imagegpt.mdx-f9d3532a.js
import{S as Ic,i as yc,s as wc,e as a,k as l,w as v,t as r,L as Pc,c as n,d as t,m as d,a as s,x as T,h as i,b as c,M as kc,J as e,g as h,y as b,q as I,o as y,B as w}from"../../chunks/vendor-b1433968.js";import{T as cn}from"../../chunks/Tip-c3840994.js";import{D as be}from"../../chunks/Docstring-ff504c58.js";import{C as mn}from"../../chunks/CodeBlock-a320dbd7.js";import{I as kt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function xc(ee){let p,P;return{c(){p=a("p"),P=r(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(g){p=n(g,"P",{});var u=s(p);P=i(u,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),u.forEach(t)},m(g,u){h(g,p,u),e(p,P)},d(g){g&&t(p)}}}function Gc(ee){let p,P,g,u,k;return{c(){p=a("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){p=n(f,"P",{});var _=s(p);P=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n(_,"CODE",{});var L=s(g);u=i(L,"Module"),L.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(f,_){h(f,p,_),e(p,P),e(p,g),e(g,u),e(p,k)},d(f){f&&t(p)}}}function Ec(ee){let p,P,g,u,k;return{c(){p=a("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){p=n(f,"P",{});var _=s(p);P=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n(_,"CODE",{});var L=s(g);u=i(L,"Module"),L.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(f,_){h(f,p,_),e(p,P),e(p,g),e(g,u),e(p,k)},d(f){f&&t(p)}}}function $c(ee){let p,P,g,u,k;return{c(){p=a("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),u=r("Module"),k=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(f){p=n(f,"P",{});var _=s(p);P=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n(_,"CODE",{});var L=s(g);u=i(L,"Module"),L.forEach(t),k=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(f,_){h(f,p,_),e(p,P),e(p,g),e(g,u),e(p,k)},d(f){f&&t(p)}}}function Fc(ee){let p,P,g,u,k,f,_,L,hn,Ia,ce,Ie,Jt,De,pn,Xt,gn,ya,ye,un,Ne,fn,_n,wa,xt,vn,Pa,Gt,Qt,Tn,ka,we,Tl,xa,Et,bn,Ga,H,In,Le,yn,wn,Se,Pn,kn,Oe,xn,Gn,Ea,$t,En,$a,x,We,$n,Re,Fn,Cn,Mn,He,zn,Ft,jn,qn,An,Be,Dn,Ct,Nn,Ln,Sn,Ve,On,Kt,Wn,Rn,Hn,Ue,Bn,Mt,Vn,Un,Jn,Yt,Xn,Fa,Pe,Zt,G,eo,to,Qn,Kn,oo,ao,Yn,Zn,no,so,es,ts,ro,io,os,as,lo,co,ns,ss,mo,ho,rs,is,E,$,po,ls,ds,go,cs,ms,uo,hs,ps,fo,gs,us,_o,fs,_s,vo,vs,Ts,F,To,bs,Is,bo,ys,ws,Io,Ps,ks,yo,xs,Gs,wo,Es,$s,Po,Fs,Cs,C,ko,Ms,zs,xo,js,qs,Go,As,Ds,Eo,Ns,Ls,$o,Ss,Os,Fo,Ws,Rs,M,Co,Hs,Bs,Mo,Vs,Us,zo,Js,Xs,jo,Qs,Ks,qo,Ys,Zs,Ao,er,tr,z,Do,or,ar,No,nr,sr,Lo,rr,ir,So,lr,dr,Oo,cr,mr,Wo,hr,pr,j,Ro,gr,ur,Ho,fr,_r,Bo,vr,Tr,Vo,br,Ir,Uo,yr,wr,Jo,Pr,Ca,me,ke,Xo,Je,kr,Qo,xr,Ma,q,Xe,Gr,te,Er,zt,$r,Fr,Ko,Cr,Mr,Qe,zr,jr,qr,he,Ar,jt,Dr,Nr,qt,Lr,Sr,Or,Yo,Wr,Rr,Ke,za,pe,xe,Zo,Ye,Hr,ea,Br,ja,R,Ze,Vr,ta,Ur,Jr,et,Xr,oa,Qr,Kr,Yr,oe,tt,Zr,aa,ei,ti,Ge,qa,ge,Ee,na,ot,oi,sa,ai,Aa,A,at,ni,ra,si,ri,nt,ii,At,li,di,ci,st,mi,rt,hi,pi,gi,S,it,ui,ue,fi,Dt,_i,vi,ia,Ti,bi,Ii,$e,yi,la,wi,Pi,lt,Da,fe,Fe,da,dt,ki,ca,xi,Na,D,ct,Gi,ma,Ei,$i,mt,Fi,Nt,Ci,Mi,zi,ht,ji,pt,qi,Ai,Di,O,gt,Ni,_e,Li,Lt,Si,Oi,ha,Wi,Ri,Hi,Ce,Bi,pa,Vi,Ui,ut,La,ve,Me,ga,ft,Ji,ua,Xi,Sa,N,_t,Qi,vt,Ki,St,Yi,Zi,el,Tt,tl,Ot,ol,al,nl,bt,sl,It,rl,il,ll,W,yt,dl,Te,cl,Wt,ml,hl,fa,pl,gl,ul,ze,fl,_a,_l,vl,wt,Oa;return f=new kt({}),De=new kt({}),Je=new kt({}),Xe=new be({props:{name:"class transformers.ImageGPTConfig",anchor:"transformers.ImageGPTConfig",parameters:[{name:"vocab_size",val:" = 513"},{name:"n_positions",val:" = 1024"},{name:"n_embd",val:" = 512"},{name:"n_layer",val:" = 24"},{name:"n_head",val:" = 8"},{name:"n_inner",val:" = None"},{name:"activation_function",val:" = 'quick_gelu'"},{name:"resid_pdrop",val:" = 0.1"},{name:"embd_pdrop",val:" = 0.1"},{name:"attn_pdrop",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-05"},{name:"initializer_range",val:" = 0.02"},{name:"scale_attn_weights",val:" = True"},{name:"use_cache",val:" = True"},{name:"tie_word_embeddings",val:" = False"},{name:"scale_attn_by_inverse_layer_idx",val:" = False"},{name:"reorder_and_upcast_attn",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/configuration_imagegpt.py#L30",parametersDescription:[{anchor:"transformers.ImageGPTConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTModel">ImageGPTModel</a> or <code>TFImageGPTModel</code>.`,name:"vocab_size"},{anchor:"transformers.ImageGPTConfig.n_positions",description:`<strong>n_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 32*32) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"n_positions"},{anchor:"transformers.ImageGPTConfig.n_embd",description:`<strong>n_embd</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the embeddings and hidden states.`,name:"n_embd"},{anchor:"transformers.ImageGPTConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.ImageGPTConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.ImageGPTConfig.n_inner",description:`<strong>n_inner</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; Dimensionality of the inner feed-forward layers. <code>None</code> will set it to 4 times n_embd`,name:"n_inner"},{anchor:"transformers.ImageGPTConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;quick_gelu&quot;</code>) &#x2014; Activation function (can be one of the activation functions defined in src/transformers/activations.py). Defaults to &#x201C;quick_gelu&#x201D;.`,name:"activation_function"},{anchor:"transformers.ImageGPTConfig.resid_pdrop",description:`<strong>resid_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"resid_pdrop"},{anchor:"transformers.ImageGPTConfig.embd_pdrop",description:`<strong>embd_pdrop</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the embeddings.`,name:"embd_pdrop"},{anchor:"transformers.ImageGPTConfig.attn_pdrop",description:`<strong>attn_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention.`,name:"attn_pdrop"},{anchor:"transformers.ImageGPTConfig.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon to use in the layer normalization layers.`,name:"layer_norm_epsilon"},{anchor:"transformers.ImageGPTConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.ImageGPTConfig.scale_attn_weights",description:`<strong>scale_attn_weights</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Scale attention weights by dividing by sqrt(hidden_size)..`,name:"scale_attn_weights"},{anchor:"transformers.ImageGPTConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.ImageGPTConfig.scale_attn_by_inverse_layer_idx",description:`<strong>scale_attn_by_inverse_layer_idx</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to additionally scale attention weights by <code>1 / layer_idx + 1</code>.`,name:"scale_attn_by_inverse_layer_idx"},{anchor:"transformers.ImageGPTConfig.reorder_and_upcast_attn",description:`<strong>reorder_and_upcast_attn</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision.`,name:"reorder_and_upcast_attn"}]}}),Ke=new mn({props:{code:`from transformers import ImageGPTModel, ImageGPTConfig # Initializing a ImageGPT configuration configuration = ImageGPTConfig() # Initializing a model from the configuration model = ImageGPTModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTModel, ImageGPTConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ImageGPT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ImageGPTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ye=new kt({}),Ze=new be({props:{name:"class transformers.ImageGPTFeatureExtractor",anchor:"transformers.ImageGPTFeatureExtractor",parameters:[{name:"clusters",val:""},{name:"do_resize",val:" = True"},{name:"size",val:" = 32"},{name:"resample",val:" = 2"},{name:"do_normalize",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/feature_extraction_imagegpt.py#L46",parametersDescription:[{anchor:"transformers.ImageGPTFeatureExtractor.clusters",description:`<strong>clusters</strong> (<code>np.ndarray</code>) &#x2014; The color clusters to use, as a <code>np.ndarray</code> of shape <code>(n_clusters, 3)</code>.`,name:"clusters"},{anchor:"transformers.ImageGPTFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.ImageGPTFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 32) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.ImageGPTFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.ImageGPTFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input to the range between -1 and +1.`,name:"do_normalize"}]}}),tt=new be({props:{name:"__call__",anchor:"transformers.ImageGPTFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/feature_extraction_imagegpt.py#L97",parametersDescription:[{anchor:"transformers.ImageGPTFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.ImageGPTFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),Ge=new cn({props:{warning:"&lcub;true}",$$slots:{default:[xc]},$$scope:{ctx:ee}}}),ot=new kt({}),at=new be({props:{name:"class transformers.ImageGPTModel",anchor:"transformers.ImageGPTModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/modeling_imagegpt.py#L620",parametersDescription:[{anchor:"transformers.ImageGPTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),it=new be({props:{name:"forward",anchor:"transformers.ImageGPTModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/modeling_imagegpt.py#L655",parametersDescription:[{anchor:"transformers.ImageGPTModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor.__call__">ImageGPTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"input_ids"},{anchor:"transformers.ImageGPTModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.ImageGPTModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ImageGPTModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ImageGPTModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ImageGPTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ImageGPTModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.ImageGPTModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ImageGPTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ImageGPTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ImageGPTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ImageGPTModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig" >ImageGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$e=new cn({props:{$$slots:{default:[Gc]},$$scope:{ctx:ee}}}),lt=new mn({props:{code:`from transformers import ImageGPTFeatureExtractor, ImageGPTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ImageGPTFeatureExtractor.from_pretrained('openai/imagegpt-small') model = ImageGPTModel.from_pretrained('openai/imagegpt-small') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTFeatureExtractor, ImageGPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ImageGPTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;openai/imagegpt-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTModel.from_pretrained(<span class="hljs-string">&#x27;openai/imagegpt-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),dt=new kt({}),ct=new be({props:{name:"class transformers.ImageGPTForCausalImageModeling",anchor:"transformers.ImageGPTForCausalImageModeling",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/modeling_imagegpt.py#L900",parametersDescription:[{anchor:"transformers.ImageGPTForCausalImageModeling.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gt=new be({props:{name:"forward",anchor:"transformers.ImageGPTForCausalImageModeling.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/modeling_imagegpt.py#L948",parametersDescription:[{anchor:"transformers.ImageGPTForCausalImageModeling.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor.__call__">ImageGPTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"input_ids"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ImageGPTForCausalImageModeling.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig" >ImageGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ce=new cn({props:{$$slots:{default:[Ec]},$$scope:{ctx:ee}}}),ut=new mn({props:{code:`from transformers import ImageGPTFeatureExtractor, ImageGPTForCausalImageModeling import torch import matplotlib.pyplot as plt import numpy as np feature_extractor = ImageGPTFeatureExtractor.from_pretrained('openai/imagegpt-small') model = ImageGPTForCausalImageModeling.from_pretrained('openai/imagegpt-small') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # unconditional generation of 8 images batch_size = 8 context = torch.full((batch_size, 1), model.config.vocab_size - 1) #initialize with SOS token context = torch.tensor(context).to(device) output = model.generate(input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40) clusters = feature_extractor.clusters n_px = feature_extractor.size samples = output[:,1:].cpu().detach().numpy() samples_img = [np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples] # convert color cluster tokens back to pixels f, axes = plt.subplots(1, batch_size, dpi=300) for img, ax in zip(samples_img, axes): ax.axis('off') ax.imshow(img),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTFeatureExtractor, ImageGPTForCausalImageModeling <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> matplotlib.pyplot <span class="hljs-keyword">as</span> plt <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ImageGPTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;openai/imagegpt-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTForCausalImageModeling.from_pretrained(<span class="hljs-string">&#x27;openai/imagegpt-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span> <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.to(device) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># unconditional generation of 8 images</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">8</span> <span class="hljs-meta">&gt;&gt;&gt; </span>context = torch.full((batch_size, <span class="hljs-number">1</span>), model.config.vocab_size - <span class="hljs-number">1</span>) <span class="hljs-comment">#initialize with SOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>context = torch.tensor(context).to(device) <span class="hljs-meta">&gt;&gt;&gt; </span>output = model.generate(input_ids=context, max_length=model.config.n_positions + <span class="hljs-number">1</span>, temperature=<span class="hljs-number">1.0</span>, do_sample=<span class="hljs-literal">True</span>, top_k=<span class="hljs-number">40</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>clusters = feature_extractor.clusters <span class="hljs-meta">&gt;&gt;&gt; </span>n_px = feature_extractor.size <span class="hljs-meta">&gt;&gt;&gt; </span>samples = output[:,<span class="hljs-number">1</span>:].cpu().detach().numpy() <span class="hljs-meta">&gt;&gt;&gt; </span>samples_img = [np.reshape(np.rint(<span class="hljs-number">127.5</span> * (clusters[s] + <span class="hljs-number">1.0</span>)), [n_px, n_px, <span class="hljs-number">3</span>]).astype(np.uint8) <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> samples] <span class="hljs-comment"># convert color cluster tokens back to pixels</span> <span class="hljs-meta">&gt;&gt;&gt; </span>f, axes = plt.subplots(<span class="hljs-number">1</span>, batch_size, dpi=<span class="hljs-number">300</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> img, ax <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(samples_img, axes): <span class="hljs-meta">... </span> ax.axis(<span class="hljs-string">&#x27;off&#x27;</span>) <span class="hljs-meta">... </span> ax.imshow(img)`}}),ft=new kt({}),_t=new be({props:{name:"class transformers.ImageGPTForImageClassification",anchor:"transformers.ImageGPTForImageClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/modeling_imagegpt.py#L1085",parametersDescription:[{anchor:"transformers.ImageGPTForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yt=new be({props:{name:"forward",anchor:"transformers.ImageGPTForImageClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/imagegpt/modeling_imagegpt.py#L1097",parametersDescription:[{anchor:"transformers.ImageGPTForImageClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor.__call__">ImageGPTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"input_ids"},{anchor:"transformers.ImageGPTForImageClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.ImageGPTForImageClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ImageGPTForImageClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.ImageGPTForImageClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ImageGPTForImageClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ImageGPTForImageClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.ImageGPTForImageClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ImageGPTForImageClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ImageGPTForImageClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ImageGPTForImageClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ImageGPTForImageClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTConfig" >ImageGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),ze=new cn({props:{$$slots:{default:[$c]},$$scope:{ctx:ee}}}),wt=new mn({props:{code:`from transformers import ImageGPTFeatureExtractor, ImageGPTForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ImageGPTFeatureExtractor.from_pretrained('openai/imagegpt-small') model = ImageGPTForImageClassification.from_pretrained('openai/imagegpt-small') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTFeatureExtractor, ImageGPTForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ImageGPTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;openai/imagegpt-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTForImageClassification.from_pretrained(<span class="hljs-string">&#x27;openai/imagegpt-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){p=a("meta"),P=l(),g=a("h1"),u=a("a"),k=a("span"),v(f.$$.fragment),_=l(),L=a("span"),hn=r("ImageGPT"),Ia=l(),ce=a("h2"),Ie=a("a"),Jt=a("span"),v(De.$$.fragment),pn=l(),Xt=a("span"),gn=r("Overview"),ya=l(),ye=a("p"),un=r("The ImageGPT model was proposed in "),Ne=a("a"),fn=r("Generative Pretraining from Pixels"),_n=r(` by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation.`),wa=l(),xt=a("p"),vn=r("The abstract from the paper is the following:"),Pa=l(),Gt=a("p"),Qt=a("em"),Tn=r(`Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.`),ka=l(),we=a("img"),xa=l(),Et=a("small"),bn=r("Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf)."),Ga=l(),H=a("p"),In=r("This model was contributed by "),Le=a("a"),yn=r("nielsr"),wn=r(", based on "),Se=a("a"),Pn=r("this issue"),kn=r(`. The original code can be found `),Oe=a("a"),xn=r("here"),Gn=r("."),Ea=l(),$t=a("p"),En=r("Tips:"),$a=l(),x=a("ul"),We=a("li"),$n=r(`Demo notebooks for ImageGPT can be found `),Re=a("a"),Fn=r("here"),Cn=r("."),Mn=l(),He=a("li"),zn=r("ImageGPT is almost exactly the same as "),Ft=a("a"),jn=r("GPT-2"),qn=r(`, with the exception that a different activation function is used (namely \u201Cquick gelu\u201D), and the layer normalization layers don\u2019t mean center the inputs. ImageGPT also doesn\u2019t have tied input- and output embeddings.`),An=l(),Be=a("li"),Dn=r(`As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special \u201Cstart of sentence\u201D (SOS) token, used at the beginning of every sequence. One can use `),Ct=a("a"),Nn=r("ImageGPTFeatureExtractor"),Ln=r(` to prepare images for the model.`),Sn=l(),Ve=a("li"),On=r(`Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as \u201Clinear probing\u201D. Features can be easily obtained by first forwarding the image through the model, then specifying `),Kt=a("code"),Wn=r("output_hidden_states=True"),Rn=r(`, and then average-pool the hidden states at whatever layer you like.`),Hn=l(),Ue=a("li"),Bn=r(`Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use `),Mt=a("a"),Vn=r("ImageGPTForImageClassification"),Un=r("."),Jn=l(),Yt=a("li"),Xn=r(`ImageGPT comes in different sizes: there\u2019s ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn\u2019t release. The differences in size are summarized in the following table:`),Fa=l(),Pe=a("table"),Zt=a("thead"),G=a("tr"),eo=a("th"),to=a("strong"),Qn=r("Model variant"),Kn=l(),oo=a("th"),ao=a("strong"),Yn=r("Depths"),Zn=l(),no=a("th"),so=a("strong"),es=r("Hidden sizes"),ts=l(),ro=a("th"),io=a("strong"),os=r("Decoder hidden size"),as=l(),lo=a("th"),co=a("strong"),ns=r("Params (M)"),ss=l(),mo=a("th"),ho=a("strong"),rs=r("ImageNet-1k Top 1"),is=l(),E=a("tbody"),$=a("tr"),po=a("td"),ls=r("MiT-b0"),ds=l(),go=a("td"),cs=r("[2, 2, 2, 2]"),ms=l(),uo=a("td"),hs=r("[32, 64, 160, 256]"),ps=l(),fo=a("td"),gs=r("256"),us=l(),_o=a("td"),fs=r("3.7"),_s=l(),vo=a("td"),vs=r("70.5"),Ts=l(),F=a("tr"),To=a("td"),bs=r("MiT-b1"),Is=l(),bo=a("td"),ys=r("[2, 2, 2, 2]"),ws=l(),Io=a("td"),Ps=r("[64, 128, 320, 512]"),ks=l(),yo=a("td"),xs=r("256"),Gs=l(),wo=a("td"),Es=r("14.0"),$s=l(),Po=a("td"),Fs=r("78.7"),Cs=l(),C=a("tr"),ko=a("td"),Ms=r("MiT-b2"),zs=l(),xo=a("td"),js=r("[3, 4, 6, 3]"),qs=l(),Go=a("td"),As=r("[64, 128, 320, 512]"),Ds=l(),Eo=a("td"),Ns=r("768"),Ls=l(),$o=a("td"),Ss=r("25.4"),Os=l(),Fo=a("td"),Ws=r("81.6"),Rs=l(),M=a("tr"),Co=a("td"),Hs=r("MiT-b3"),Bs=l(),Mo=a("td"),Vs=r("[3, 4, 18, 3]"),Us=l(),zo=a("td"),Js=r("[64, 128, 320, 512]"),Xs=l(),jo=a("td"),Qs=r("768"),Ks=l(),qo=a("td"),Ys=r("45.2"),Zs=l(),Ao=a("td"),er=r("83.1"),tr=l(),z=a("tr"),Do=a("td"),or=r("MiT-b4"),ar=l(),No=a("td"),nr=r("[3, 8, 27, 3]"),sr=l(),Lo=a("td"),rr=r("[64, 128, 320, 512]"),ir=l(),So=a("td"),lr=r("768"),dr=l(),Oo=a("td"),cr=r("62.6"),mr=l(),Wo=a("td"),hr=r("83.6"),pr=l(),j=a("tr"),Ro=a("td"),gr=r("MiT-b5"),ur=l(),Ho=a("td"),fr=r("[3, 6, 40, 3]"),_r=l(),Bo=a("td"),vr=r("[64, 128, 320, 512]"),Tr=l(),Vo=a("td"),br=r("768"),Ir=l(),Uo=a("td"),yr=r("82.0"),wr=l(),Jo=a("td"),Pr=r("83.8"),Ca=l(),me=a("h2"),ke=a("a"),Xo=a("span"),v(Je.$$.fragment),kr=l(),Qo=a("span"),xr=r("ImageGPTConfig"),Ma=l(),q=a("div"),v(Xe.$$.fragment),Gr=l(),te=a("p"),Er=r("This is the configuration class to store the configuration of a "),zt=a("a"),$r=r("ImageGPTModel"),Fr=r(` or a `),Ko=a("code"),Cr=r("TFImageGPTModel"),Mr=r(`. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT `),Qe=a("a"),zr=r("small"),jr=r(" architecture."),qr=l(),he=a("p"),Ar=r("Configuration objects inherit from "),jt=a("a"),Dr=r("PretrainedConfig"),Nr=r(` and can be used to control the model outputs. Read the documentation from `),qt=a("a"),Lr=r("PretrainedConfig"),Sr=r(" for more information."),Or=l(),Yo=a("p"),Wr=r("Example:"),Rr=l(),v(Ke.$$.fragment),za=l(),pe=a("h2"),xe=a("a"),Zo=a("span"),v(Ye.$$.fragment),Hr=l(),ea=a("span"),Br=r("ImageGPTFeatureExtractor"),ja=l(),R=a("div"),v(Ze.$$.fragment),Vr=l(),ta=a("p"),Ur=r(`Constructs an ImageGPT feature extractor. This feature extractor can be used to resize images to a smaller resolution (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of \u201Cpixel values\u201D (color clusters).`),Jr=l(),et=a("p"),Xr=r("This feature extractor inherits from "),oa=a("code"),Qr=r("FeatureExtractionMixin"),Kr=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Yr=l(),oe=a("div"),v(tt.$$.fragment),Zr=l(),aa=a("p"),ei=r("Main method to prepare for the model one or several image(s)."),ti=l(),v(Ge.$$.fragment),qa=l(),ge=a("h2"),Ee=a("a"),na=a("span"),v(ot.$$.fragment),oi=l(),sa=a("span"),ai=r("ImageGPTModel"),Aa=l(),A=a("div"),v(at.$$.fragment),ni=l(),ra=a("p"),si=r("The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top."),ri=l(),nt=a("p"),ii=r("This model inherits from "),At=a("a"),li=r("PreTrainedModel"),di=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ci=l(),st=a("p"),mi=r("This model is also a PyTorch "),rt=a("a"),hi=r("torch.nn.Module"),pi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gi=l(),S=a("div"),v(it.$$.fragment),ui=l(),ue=a("p"),fi=r("The "),Dt=a("a"),_i=r("ImageGPTModel"),vi=r(" forward method, overrides the "),ia=a("code"),Ti=r("__call__"),bi=r(" special method."),Ii=l(),v($e.$$.fragment),yi=l(),la=a("p"),wi=r("Examples:"),Pi=l(),v(lt.$$.fragment),Da=l(),fe=a("h2"),Fe=a("a"),da=a("span"),v(dt.$$.fragment),ki=l(),ca=a("span"),xi=r("ImageGPTForCausalImageModeling"),Na=l(),D=a("div"),v(ct.$$.fragment),Gi=l(),ma=a("p"),Ei=r(`The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),$i=l(),mt=a("p"),Fi=r("This model inherits from "),Nt=a("a"),Ci=r("PreTrainedModel"),Mi=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zi=l(),ht=a("p"),ji=r("This model is also a PyTorch "),pt=a("a"),qi=r("torch.nn.Module"),Ai=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Di=l(),O=a("div"),v(gt.$$.fragment),Ni=l(),_e=a("p"),Li=r("The "),Lt=a("a"),Si=r("ImageGPTForCausalImageModeling"),Oi=r(" forward method, overrides the "),ha=a("code"),Wi=r("__call__"),Ri=r(" special method."),Hi=l(),v(Ce.$$.fragment),Bi=l(),pa=a("p"),Vi=r("Examples:"),Ui=l(),v(ut.$$.fragment),La=l(),ve=a("h2"),Me=a("a"),ga=a("span"),v(ft.$$.fragment),Ji=l(),ua=a("span"),Xi=r("ImageGPTForImageClassification"),Sa=l(),N=a("div"),v(_t.$$.fragment),Qi=l(),vt=a("p"),Ki=r(`The ImageGPT Model transformer with an image classification head on top (linear layer). `),St=a("a"),Yi=r("ImageGPTForImageClassification"),Zi=r(` average-pools the hidden states in order to do the classification.`),el=l(),Tt=a("p"),tl=r("This model inherits from "),Ot=a("a"),ol=r("PreTrainedModel"),al=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nl=l(),bt=a("p"),sl=r("This model is also a PyTorch "),It=a("a"),rl=r("torch.nn.Module"),il=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ll=l(),W=a("div"),v(yt.$$.fragment),dl=l(),Te=a("p"),cl=r("The "),Wt=a("a"),ml=r("ImageGPTForImageClassification"),hl=r(" forward method, overrides the "),fa=a("code"),pl=r("__call__"),gl=r(" special method."),ul=l(),v(ze.$$.fragment),fl=l(),_a=a("p"),_l=r("Examples:"),vl=l(),v(wt.$$.fragment),this.h()},l(o){const m=Pc('[data-svelte="svelte-1phssyn"]',document.head);p=n(m,"META",{name:!0,content:!0}),m.forEach(t),P=d(o),g=n(o,"H1",{class:!0});var Pt=s(g);u=n(Pt,"A",{id:!0,class:!0,href:!0});var va=s(u);k=n(va,"SPAN",{});var Ta=s(k);T(f.$$.fragment,Ta),Ta.forEach(t),va.forEach(t),_=d(Pt),L=n(Pt,"SPAN",{});var ba=s(L);hn=i(ba,"ImageGPT"),ba.forEach(t),Pt.forEach(t),Ia=d(o),ce=n(o,"H2",{class:!0});var Wa=s(ce);Ie=n(Wa,"A",{id:!0,class:!0,href:!0});var bl=s(Ie);Jt=n(bl,"SPAN",{});var Il=s(Jt);T(De.$$.fragment,Il),Il.forEach(t),bl.forEach(t),pn=d(Wa),Xt=n(Wa,"SPAN",{});var yl=s(Xt);gn=i(yl,"Overview"),yl.forEach(t),Wa.forEach(t),ya=d(o),ye=n(o,"P",{});var Ra=s(ye);un=i(Ra,"The ImageGPT model was proposed in "),Ne=n(Ra,"A",{href:!0,rel:!0});var wl=s(Ne);fn=i(wl,"Generative Pretraining from Pixels"),wl.forEach(t),_n=i(Ra,` by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation.`),Ra.forEach(t),wa=d(o),xt=n(o,"P",{});var Pl=s(xt);vn=i(Pl,"The abstract from the paper is the following:"),Pl.forEach(t),Pa=d(o),Gt=n(o,"P",{});var kl=s(Gt);Qt=n(kl,"EM",{});var xl=s(Qt);Tn=i(xl,`Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.`),xl.forEach(t),kl.forEach(t),ka=d(o),we=n(o,"IMG",{src:!0,alt:!0,width:!0}),xa=d(o),Et=n(o,"SMALL",{});var Gl=s(Et);bn=i(Gl,"Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf)."),Gl.forEach(t),Ga=d(o),H=n(o,"P",{});var je=s(H);In=i(je,"This model was contributed by "),Le=n(je,"A",{href:!0,rel:!0});var El=s(Le);yn=i(El,"nielsr"),El.forEach(t),wn=i(je,", based on "),Se=n(je,"A",{href:!0,rel:!0});var $l=s(Se);Pn=i($l,"this issue"),$l.forEach(t),kn=i(je,`. The original code can be found `),Oe=n(je,"A",{href:!0,rel:!0});var Fl=s(Oe);xn=i(Fl,"here"),Fl.forEach(t),Gn=i(je,"."),je.forEach(t),Ea=d(o),$t=n(o,"P",{});var Cl=s($t);En=i(Cl,"Tips:"),Cl.forEach(t),$a=d(o),x=n(o,"UL",{});var B=s(x);We=n(B,"LI",{});var Ha=s(We);$n=i(Ha,`Demo notebooks for ImageGPT can be found `),Re=n(Ha,"A",{href:!0,rel:!0});var Ml=s(Re);Fn=i(Ml,"here"),Ml.forEach(t),Cn=i(Ha,"."),Ha.forEach(t),Mn=d(B),He=n(B,"LI",{});var Ba=s(He);zn=i(Ba,"ImageGPT is almost exactly the same as "),Ft=n(Ba,"A",{href:!0});var zl=s(Ft);jn=i(zl,"GPT-2"),zl.forEach(t),qn=i(Ba,`, with the exception that a different activation function is used (namely \u201Cquick gelu\u201D), and the layer normalization layers don\u2019t mean center the inputs. ImageGPT also doesn\u2019t have tied input- and output embeddings.`),Ba.forEach(t),An=d(B),Be=n(B,"LI",{});var Va=s(Be);Dn=i(Va,`As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special \u201Cstart of sentence\u201D (SOS) token, used at the beginning of every sequence. One can use `),Ct=n(Va,"A",{href:!0});var jl=s(Ct);Nn=i(jl,"ImageGPTFeatureExtractor"),jl.forEach(t),Ln=i(Va,` to prepare images for the model.`),Va.forEach(t),Sn=d(B),Ve=n(B,"LI",{});var Ua=s(Ve);On=i(Ua,`Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as \u201Clinear probing\u201D. Features can be easily obtained by first forwarding the image through the model, then specifying `),Kt=n(Ua,"CODE",{});var ql=s(Kt);Wn=i(ql,"output_hidden_states=True"),ql.forEach(t),Rn=i(Ua,`, and then average-pool the hidden states at whatever layer you like.`),Ua.forEach(t),Hn=d(B),Ue=n(B,"LI",{});var Ja=s(Ue);Bn=i(Ja,`Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use `),Mt=n(Ja,"A",{href:!0});var Al=s(Mt);Vn=i(Al,"ImageGPTForImageClassification"),Al.forEach(t),Un=i(Ja,"."),Ja.forEach(t),Jn=d(B),Yt=n(B,"LI",{});var Dl=s(Yt);Xn=i(Dl,`ImageGPT comes in different sizes: there\u2019s ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn\u2019t release. The differences in size are summarized in the following table:`),Dl.forEach(t),B.forEach(t),Fa=d(o),Pe=n(o,"TABLE",{});var Xa=s(Pe);Zt=n(Xa,"THEAD",{});var Nl=s(Zt);G=n(Nl,"TR",{});var V=s(G);eo=n(V,"TH",{});var Ll=s(eo);to=n(Ll,"STRONG",{});var Sl=s(to);Qn=i(Sl,"Model variant"),Sl.forEach(t),Ll.forEach(t),Kn=d(V),oo=n(V,"TH",{});var Ol=s(oo);ao=n(Ol,"STRONG",{});var Wl=s(ao);Yn=i(Wl,"Depths"),Wl.forEach(t),Ol.forEach(t),Zn=d(V),no=n(V,"TH",{});var Rl=s(no);so=n(Rl,"STRONG",{});var Hl=s(so);es=i(Hl,"Hidden sizes"),Hl.forEach(t),Rl.forEach(t),ts=d(V),ro=n(V,"TH",{});var Bl=s(ro);io=n(Bl,"STRONG",{});var Vl=s(io);os=i(Vl,"Decoder hidden size"),Vl.forEach(t),Bl.forEach(t),as=d(V),lo=n(V,"TH",{});var Ul=s(lo);co=n(Ul,"STRONG",{});var Jl=s(co);ns=i(Jl,"Params (M)"),Jl.forEach(t),Ul.forEach(t),ss=d(V),mo=n(V,"TH",{});var Xl=s(mo);ho=n(Xl,"STRONG",{});var Ql=s(ho);rs=i(Ql,"ImageNet-1k Top 1"),Ql.forEach(t),Xl.forEach(t),V.forEach(t),Nl.forEach(t),is=d(Xa),E=n(Xa,"TBODY",{});var U=s(E);$=n(U,"TR",{});var J=s($);po=n(J,"TD",{});var Kl=s(po);ls=i(Kl,"MiT-b0"),Kl.forEach(t),ds=d(J),go=n(J,"TD",{});var Yl=s(go);cs=i(Yl,"[2, 2, 2, 2]"),Yl.forEach(t),ms=d(J),uo=n(J,"TD",{});var Zl=s(uo);hs=i(Zl,"[32, 64, 160, 256]"),Zl.forEach(t),ps=d(J),fo=n(J,"TD",{});var ed=s(fo);gs=i(ed,"256"),ed.forEach(t),us=d(J),_o=n(J,"TD",{});var td=s(_o);fs=i(td,"3.7"),td.forEach(t),_s=d(J),vo=n(J,"TD",{});var od=s(vo);vs=i(od,"70.5"),od.forEach(t),J.forEach(t),Ts=d(U),F=n(U,"TR",{});var X=s(F);To=n(X,"TD",{});var ad=s(To);bs=i(ad,"MiT-b1"),ad.forEach(t),Is=d(X),bo=n(X,"TD",{});var nd=s(bo);ys=i(nd,"[2, 2, 2, 2]"),nd.forEach(t),ws=d(X),Io=n(X,"TD",{});var sd=s(Io);Ps=i(sd,"[64, 128, 320, 512]"),sd.forEach(t),ks=d(X),yo=n(X,"TD",{});var rd=s(yo);xs=i(rd,"256"),rd.forEach(t),Gs=d(X),wo=n(X,"TD",{});var id=s(wo);Es=i(id,"14.0"),id.forEach(t),$s=d(X),Po=n(X,"TD",{});var ld=s(Po);Fs=i(ld,"78.7"),ld.forEach(t),X.forEach(t),Cs=d(U),C=n(U,"TR",{});var Q=s(C);ko=n(Q,"TD",{});var dd=s(ko);Ms=i(dd,"MiT-b2"),dd.forEach(t),zs=d(Q),xo=n(Q,"TD",{});var cd=s(xo);js=i(cd,"[3, 4, 6, 3]"),cd.forEach(t),qs=d(Q),Go=n(Q,"TD",{});var md=s(Go);As=i(md,"[64, 128, 320, 512]"),md.forEach(t),Ds=d(Q),Eo=n(Q,"TD",{});var hd=s(Eo);Ns=i(hd,"768"),hd.forEach(t),Ls=d(Q),$o=n(Q,"TD",{});var pd=s($o);Ss=i(pd,"25.4"),pd.forEach(t),Os=d(Q),Fo=n(Q,"TD",{});var gd=s(Fo);Ws=i(gd,"81.6"),gd.forEach(t),Q.forEach(t),Rs=d(U),M=n(U,"TR",{});var K=s(M);Co=n(K,"TD",{});var ud=s(Co);Hs=i(ud,"MiT-b3"),ud.forEach(t),Bs=d(K),Mo=n(K,"TD",{});var fd=s(Mo);Vs=i(fd,"[3, 4, 18, 3]"),fd.forEach(t),Us=d(K),zo=n(K,"TD",{});var _d=s(zo);Js=i(_d,"[64, 128, 320, 512]"),_d.forEach(t),Xs=d(K),jo=n(K,"TD",{});var vd=s(jo);Qs=i(vd,"768"),vd.forEach(t),Ks=d(K),qo=n(K,"TD",{});var Td=s(qo);Ys=i(Td,"45.2"),Td.forEach(t),Zs=d(K),Ao=n(K,"TD",{});var bd=s(Ao);er=i(bd,"83.1"),bd.forEach(t),K.forEach(t),tr=d(U),z=n(U,"TR",{});var Y=s(z);Do=n(Y,"TD",{});var Id=s(Do);or=i(Id,"MiT-b4"),Id.forEach(t),ar=d(Y),No=n(Y,"TD",{});var yd=s(No);nr=i(yd,"[3, 8, 27, 3]"),yd.forEach(t),sr=d(Y),Lo=n(Y,"TD",{});var wd=s(Lo);rr=i(wd,"[64, 128, 320, 512]"),wd.forEach(t),ir=d(Y),So=n(Y,"TD",{});var Pd=s(So);lr=i(Pd,"768"),Pd.forEach(t),dr=d(Y),Oo=n(Y,"TD",{});var kd=s(Oo);cr=i(kd,"62.6"),kd.forEach(t),mr=d(Y),Wo=n(Y,"TD",{});var xd=s(Wo);hr=i(xd,"83.6"),xd.forEach(t),Y.forEach(t),pr=d(U),j=n(U,"TR",{});var Z=s(j);Ro=n(Z,"TD",{});var Gd=s(Ro);gr=i(Gd,"MiT-b5"),Gd.forEach(t),ur=d(Z),Ho=n(Z,"TD",{});var Ed=s(Ho);fr=i(Ed,"[3, 6, 40, 3]"),Ed.forEach(t),_r=d(Z),Bo=n(Z,"TD",{});var $d=s(Bo);vr=i($d,"[64, 128, 320, 512]"),$d.forEach(t),Tr=d(Z),Vo=n(Z,"TD",{});var Fd=s(Vo);br=i(Fd,"768"),Fd.forEach(t),Ir=d(Z),Uo=n(Z,"TD",{});var Cd=s(Uo);yr=i(Cd,"82.0"),Cd.forEach(t),wr=d(Z),Jo=n(Z,"TD",{});var Md=s(Jo);Pr=i(Md,"83.8"),Md.forEach(t),Z.forEach(t),U.forEach(t),Xa.forEach(t),Ca=d(o),me=n(o,"H2",{class:!0});var Qa=s(me);ke=n(Qa,"A",{id:!0,class:!0,href:!0});var zd=s(ke);Xo=n(zd,"SPAN",{});var jd=s(Xo);T(Je.$$.fragment,jd),jd.forEach(t),zd.forEach(t),kr=d(Qa),Qo=n(Qa,"SPAN",{});var qd=s(Qo);xr=i(qd,"ImageGPTConfig"),qd.forEach(t),Qa.forEach(t),Ma=d(o),q=n(o,"DIV",{class:!0});var ae=s(q);T(Xe.$$.fragment,ae),Gr=d(ae),te=n(ae,"P",{});var qe=s(te);Er=i(qe,"This is the configuration class to store the configuration of a "),zt=n(qe,"A",{href:!0});var Ad=s(zt);$r=i(Ad,"ImageGPTModel"),Ad.forEach(t),Fr=i(qe,` or a `),Ko=n(qe,"CODE",{});var Dd=s(Ko);Cr=i(Dd,"TFImageGPTModel"),Dd.forEach(t),Mr=i(qe,`. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT `),Qe=n(qe,"A",{href:!0,rel:!0});var Nd=s(Qe);zr=i(Nd,"small"),Nd.forEach(t),jr=i(qe," architecture."),qe.forEach(t),qr=d(ae),he=n(ae,"P",{});var Rt=s(he);Ar=i(Rt,"Configuration objects inherit from "),jt=n(Rt,"A",{href:!0});var Ld=s(jt);Dr=i(Ld,"PretrainedConfig"),Ld.forEach(t),Nr=i(Rt,` and can be used to control the model outputs. Read the documentation from `),qt=n(Rt,"A",{href:!0});var Sd=s(qt);Lr=i(Sd,"PretrainedConfig"),Sd.forEach(t),Sr=i(Rt," for more information."),Rt.forEach(t),Or=d(ae),Yo=n(ae,"P",{});var Od=s(Yo);Wr=i(Od,"Example:"),Od.forEach(t),Rr=d(ae),T(Ke.$$.fragment,ae),ae.forEach(t),za=d(o),pe=n(o,"H2",{class:!0});var Ka=s(pe);xe=n(Ka,"A",{id:!0,class:!0,href:!0});var Wd=s(xe);Zo=n(Wd,"SPAN",{});var Rd=s(Zo);T(Ye.$$.fragment,Rd),Rd.forEach(t),Wd.forEach(t),Hr=d(Ka),ea=n(Ka,"SPAN",{});var Hd=s(ea);Br=i(Hd,"ImageGPTFeatureExtractor"),Hd.forEach(t),Ka.forEach(t),ja=d(o),R=n(o,"DIV",{class:!0});var Ae=s(R);T(Ze.$$.fragment,Ae),Vr=d(Ae),ta=n(Ae,"P",{});var Bd=s(ta);Ur=i(Bd,`Constructs an ImageGPT feature extractor. This feature extractor can be used to resize images to a smaller resolution (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of \u201Cpixel values\u201D (color clusters).`),Bd.forEach(t),Jr=d(Ae),et=n(Ae,"P",{});var Ya=s(et);Xr=i(Ya,"This feature extractor inherits from "),oa=n(Ya,"CODE",{});var Vd=s(oa);Qr=i(Vd,"FeatureExtractionMixin"),Vd.forEach(t),Kr=i(Ya,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ya.forEach(t),Yr=d(Ae),oe=n(Ae,"DIV",{class:!0});var Ht=s(oe);T(tt.$$.fragment,Ht),Zr=d(Ht),aa=n(Ht,"P",{});var Ud=s(aa);ei=i(Ud,"Main method to prepare for the model one or several image(s)."),Ud.forEach(t),ti=d(Ht),T(Ge.$$.fragment,Ht),Ht.forEach(t),Ae.forEach(t),qa=d(o),ge=n(o,"H2",{class:!0});var Za=s(ge);Ee=n(Za,"A",{id:!0,class:!0,href:!0});var Jd=s(Ee);na=n(Jd,"SPAN",{});var Xd=s(na);T(ot.$$.fragment,Xd),Xd.forEach(t),Jd.forEach(t),oi=d(Za),sa=n(Za,"SPAN",{});var Qd=s(sa);ai=i(Qd,"ImageGPTModel"),Qd.forEach(t),Za.forEach(t),Aa=d(o),A=n(o,"DIV",{class:!0});var ne=s(A);T(at.$$.fragment,ne),ni=d(ne),ra=n(ne,"P",{});var Kd=s(ra);si=i(Kd,"The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top."),Kd.forEach(t),ri=d(ne),nt=n(ne,"P",{});var en=s(nt);ii=i(en,"This model inherits from "),At=n(en,"A",{href:!0});var Yd=s(At);li=i(Yd,"PreTrainedModel"),Yd.forEach(t),di=i(en,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),en.forEach(t),ci=d(ne),st=n(ne,"P",{});var tn=s(st);mi=i(tn,"This model is also a PyTorch "),rt=n(tn,"A",{href:!0,rel:!0});var Zd=s(rt);hi=i(Zd,"torch.nn.Module"),Zd.forEach(t),pi=i(tn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),tn.forEach(t),gi=d(ne),S=n(ne,"DIV",{class:!0});var se=s(S);T(it.$$.fragment,se),ui=d(se),ue=n(se,"P",{});var Bt=s(ue);fi=i(Bt,"The "),Dt=n(Bt,"A",{href:!0});var ec=s(Dt);_i=i(ec,"ImageGPTModel"),ec.forEach(t),vi=i(Bt," forward method, overrides the "),ia=n(Bt,"CODE",{});var tc=s(ia);Ti=i(tc,"__call__"),tc.forEach(t),bi=i(Bt," special method."),Bt.forEach(t),Ii=d(se),T($e.$$.fragment,se),yi=d(se),la=n(se,"P",{});var oc=s(la);wi=i(oc,"Examples:"),oc.forEach(t),Pi=d(se),T(lt.$$.fragment,se),se.forEach(t),ne.forEach(t),Da=d(o),fe=n(o,"H2",{class:!0});var on=s(fe);Fe=n(on,"A",{id:!0,class:!0,href:!0});var ac=s(Fe);da=n(ac,"SPAN",{});var nc=s(da);T(dt.$$.fragment,nc),nc.forEach(t),ac.forEach(t),ki=d(on),ca=n(on,"SPAN",{});var sc=s(ca);xi=i(sc,"ImageGPTForCausalImageModeling"),sc.forEach(t),on.forEach(t),Na=d(o),D=n(o,"DIV",{class:!0});var re=s(D);T(ct.$$.fragment,re),Gi=d(re),ma=n(re,"P",{});var rc=s(ma);Ei=i(rc,`The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),rc.forEach(t),$i=d(re),mt=n(re,"P",{});var an=s(mt);Fi=i(an,"This model inherits from "),Nt=n(an,"A",{href:!0});var ic=s(Nt);Ci=i(ic,"PreTrainedModel"),ic.forEach(t),Mi=i(an,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),an.forEach(t),zi=d(re),ht=n(re,"P",{});var nn=s(ht);ji=i(nn,"This model is also a PyTorch "),pt=n(nn,"A",{href:!0,rel:!0});var lc=s(pt);qi=i(lc,"torch.nn.Module"),lc.forEach(t),Ai=i(nn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nn.forEach(t),Di=d(re),O=n(re,"DIV",{class:!0});var ie=s(O);T(gt.$$.fragment,ie),Ni=d(ie),_e=n(ie,"P",{});var Vt=s(_e);Li=i(Vt,"The "),Lt=n(Vt,"A",{href:!0});var dc=s(Lt);Si=i(dc,"ImageGPTForCausalImageModeling"),dc.forEach(t),Oi=i(Vt," forward method, overrides the "),ha=n(Vt,"CODE",{});var cc=s(ha);Wi=i(cc,"__call__"),cc.forEach(t),Ri=i(Vt," special method."),Vt.forEach(t),Hi=d(ie),T(Ce.$$.fragment,ie),Bi=d(ie),pa=n(ie,"P",{});var mc=s(pa);Vi=i(mc,"Examples:"),mc.forEach(t),Ui=d(ie),T(ut.$$.fragment,ie),ie.forEach(t),re.forEach(t),La=d(o),ve=n(o,"H2",{class:!0});var sn=s(ve);Me=n(sn,"A",{id:!0,class:!0,href:!0});var hc=s(Me);ga=n(hc,"SPAN",{});var pc=s(ga);T(ft.$$.fragment,pc),pc.forEach(t),hc.forEach(t),Ji=d(sn),ua=n(sn,"SPAN",{});var gc=s(ua);Xi=i(gc,"ImageGPTForImageClassification"),gc.forEach(t),sn.forEach(t),Sa=d(o),N=n(o,"DIV",{class:!0});var le=s(N);T(_t.$$.fragment,le),Qi=d(le),vt=n(le,"P",{});var rn=s(vt);Ki=i(rn,`The ImageGPT Model transformer with an image classification head on top (linear layer). `),St=n(rn,"A",{href:!0});var uc=s(St);Yi=i(uc,"ImageGPTForImageClassification"),uc.forEach(t),Zi=i(rn,` average-pools the hidden states in order to do the classification.`),rn.forEach(t),el=d(le),Tt=n(le,"P",{});var ln=s(Tt);tl=i(ln,"This model inherits from "),Ot=n(ln,"A",{href:!0});var fc=s(Ot);ol=i(fc,"PreTrainedModel"),fc.forEach(t),al=i(ln,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ln.forEach(t),nl=d(le),bt=n(le,"P",{});var dn=s(bt);sl=i(dn,"This model is also a PyTorch "),It=n(dn,"A",{href:!0,rel:!0});var _c=s(It);rl=i(_c,"torch.nn.Module"),_c.forEach(t),il=i(dn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dn.forEach(t),ll=d(le),W=n(le,"DIV",{class:!0});var de=s(W);T(yt.$$.fragment,de),dl=d(de),Te=n(de,"P",{});var Ut=s(Te);cl=i(Ut,"The "),Wt=n(Ut,"A",{href:!0});var vc=s(Wt);ml=i(vc,"ImageGPTForImageClassification"),vc.forEach(t),hl=i(Ut," forward method, overrides the "),fa=n(Ut,"CODE",{});var Tc=s(fa);pl=i(Tc,"__call__"),Tc.forEach(t),gl=i(Ut," special method."),Ut.forEach(t),ul=d(de),T(ze.$$.fragment,de),fl=d(de),_a=n(de,"P",{});var bc=s(_a);_l=i(bc,"Examples:"),bc.forEach(t),vl=d(de),T(wt.$$.fragment,de),de.forEach(t),le.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(Cc)),c(u,"id","imagegpt"),c(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(u,"href","#imagegpt"),c(g,"class","relative group"),c(Ie,"id","overview"),c(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ie,"href","#overview"),c(ce,"class","relative group"),c(Ne,"href","https://openai.com/blog/image-gpt"),c(Ne,"rel","nofollow"),kc(we.src,Tl="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png")||c(we,"src",Tl),c(we,"alt","drawing"),c(we,"width","600"),c(Le,"href","https://huggingface.co/nielsr"),c(Le,"rel","nofollow"),c(Se,"href","https://github.com/openai/image-gpt/issues/7"),c(Se,"rel","nofollow"),c(Oe,"href","https://github.com/openai/image-gpt"),c(Oe,"rel","nofollow"),c(Re,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT"),c(Re,"rel","nofollow"),c(Ft,"href","./model_doc/gpt2"),c(Ct,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor"),c(Mt,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification"),c(ke,"id","transformers.ImageGPTConfig"),c(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ke,"href","#transformers.ImageGPTConfig"),c(me,"class","relative group"),c(zt,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTModel"),c(Qe,"href","https://huggingface.co/imagegpt"),c(Qe,"rel","nofollow"),c(jt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(qt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(q,"class","docstring"),c(xe,"id","transformers.ImageGPTFeatureExtractor"),c(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xe,"href","#transformers.ImageGPTFeatureExtractor"),c(pe,"class","relative group"),c(oe,"class","docstring"),c(R,"class","docstring"),c(Ee,"id","transformers.ImageGPTModel"),c(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ee,"href","#transformers.ImageGPTModel"),c(ge,"class","relative group"),c(At,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(rt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(rt,"rel","nofollow"),c(Dt,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTModel"),c(S,"class","docstring"),c(A,"class","docstring"),c(Fe,"id","transformers.ImageGPTForCausalImageModeling"),c(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fe,"href","#transformers.ImageGPTForCausalImageModeling"),c(fe,"class","relative group"),c(Nt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(pt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(pt,"rel","nofollow"),c(Lt,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTForCausalImageModeling"),c(O,"class","docstring"),c(D,"class","docstring"),c(Me,"id","transformers.ImageGPTForImageClassification"),c(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Me,"href","#transformers.ImageGPTForImageClassification"),c(ve,"class","relative group"),c(St,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification"),c(Ot,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(It,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(It,"rel","nofollow"),c(Wt,"href","/docs/transformers/v4.15.0/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification"),c(W,"class","docstring"),c(N,"class","docstring")},m(o,m){e(document.head,p),h(o,P,m),h(o,g,m),e(g,u),e(u,k),b(f,k,null),e(g,_),e(g,L),e(L,hn),h(o,Ia,m),h(o,ce,m),e(ce,Ie),e(Ie,Jt),b(De,Jt,null),e(ce,pn),e(ce,Xt),e(Xt,gn),h(o,ya,m),h(o,ye,m),e(ye,un),e(ye,Ne),e(Ne,fn),e(ye,_n),h(o,wa,m),h(o,xt,m),e(xt,vn),h(o,Pa,m),h(o,Gt,m),e(Gt,Qt),e(Qt,Tn),h(o,ka,m),h(o,we,m),h(o,xa,m),h(o,Et,m),e(Et,bn),h(o,Ga,m),h(o,H,m),e(H,In),e(H,Le),e(Le,yn),e(H,wn),e(H,Se),e(Se,Pn),e(H,kn),e(H,Oe),e(Oe,xn),e(H,Gn),h(o,Ea,m),h(o,$t,m),e($t,En),h(o,$a,m),h(o,x,m),e(x,We),e(We,$n),e(We,Re),e(Re,Fn),e(We,Cn),e(x,Mn),e(x,He),e(He,zn),e(He,Ft),e(Ft,jn),e(He,qn),e(x,An),e(x,Be),e(Be,Dn),e(Be,Ct),e(Ct,Nn),e(Be,Ln),e(x,Sn),e(x,Ve),e(Ve,On),e(Ve,Kt),e(Kt,Wn),e(Ve,Rn),e(x,Hn),e(x,Ue),e(Ue,Bn),e(Ue,Mt),e(Mt,Vn),e(Ue,Un),e(x,Jn),e(x,Yt),e(Yt,Xn),h(o,Fa,m),h(o,Pe,m),e(Pe,Zt),e(Zt,G),e(G,eo),e(eo,to),e(to,Qn),e(G,Kn),e(G,oo),e(oo,ao),e(ao,Yn),e(G,Zn),e(G,no),e(no,so),e(so,es),e(G,ts),e(G,ro),e(ro,io),e(io,os),e(G,as),e(G,lo),e(lo,co),e(co,ns),e(G,ss),e(G,mo),e(mo,ho),e(ho,rs),e(Pe,is),e(Pe,E),e(E,$),e($,po),e(po,ls),e($,ds),e($,go),e(go,cs),e($,ms),e($,uo),e(uo,hs),e($,ps),e($,fo),e(fo,gs),e($,us),e($,_o),e(_o,fs),e($,_s),e($,vo),e(vo,vs),e(E,Ts),e(E,F),e(F,To),e(To,bs),e(F,Is),e(F,bo),e(bo,ys),e(F,ws),e(F,Io),e(Io,Ps),e(F,ks),e(F,yo),e(yo,xs),e(F,Gs),e(F,wo),e(wo,Es),e(F,$s),e(F,Po),e(Po,Fs),e(E,Cs),e(E,C),e(C,ko),e(ko,Ms),e(C,zs),e(C,xo),e(xo,js),e(C,qs),e(C,Go),e(Go,As),e(C,Ds),e(C,Eo),e(Eo,Ns),e(C,Ls),e(C,$o),e($o,Ss),e(C,Os),e(C,Fo),e(Fo,Ws),e(E,Rs),e(E,M),e(M,Co),e(Co,Hs),e(M,Bs),e(M,Mo),e(Mo,Vs),e(M,Us),e(M,zo),e(zo,Js),e(M,Xs),e(M,jo),e(jo,Qs),e(M,Ks),e(M,qo),e(qo,Ys),e(M,Zs),e(M,Ao),e(Ao,er),e(E,tr),e(E,z),e(z,Do),e(Do,or),e(z,ar),e(z,No),e(No,nr),e(z,sr),e(z,Lo),e(Lo,rr),e(z,ir),e(z,So),e(So,lr),e(z,dr),e(z,Oo),e(Oo,cr),e(z,mr),e(z,Wo),e(Wo,hr),e(E,pr),e(E,j),e(j,Ro),e(Ro,gr),e(j,ur),e(j,Ho),e(Ho,fr),e(j,_r),e(j,Bo),e(Bo,vr),e(j,Tr),e(j,Vo),e(Vo,br),e(j,Ir),e(j,Uo),e(Uo,yr),e(j,wr),e(j,Jo),e(Jo,Pr),h(o,Ca,m),h(o,me,m),e(me,ke),e(ke,Xo),b(Je,Xo,null),e(me,kr),e(me,Qo),e(Qo,xr),h(o,Ma,m),h(o,q,m),b(Xe,q,null),e(q,Gr),e(q,te),e(te,Er),e(te,zt),e(zt,$r),e(te,Fr),e(te,Ko),e(Ko,Cr),e(te,Mr),e(te,Qe),e(Qe,zr),e(te,jr),e(q,qr),e(q,he),e(he,Ar),e(he,jt),e(jt,Dr),e(he,Nr),e(he,qt),e(qt,Lr),e(he,Sr),e(q,Or),e(q,Yo),e(Yo,Wr),e(q,Rr),b(Ke,q,null),h(o,za,m),h(o,pe,m),e(pe,xe),e(xe,Zo),b(Ye,Zo,null),e(pe,Hr),e(pe,ea),e(ea,Br),h(o,ja,m),h(o,R,m),b(Ze,R,null),e(R,Vr),e(R,ta),e(ta,Ur),e(R,Jr),e(R,et),e(et,Xr),e(et,oa),e(oa,Qr),e(et,Kr),e(R,Yr),e(R,oe),b(tt,oe,null),e(oe,Zr),e(oe,aa),e(aa,ei),e(oe,ti),b(Ge,oe,null),h(o,qa,m),h(o,ge,m),e(ge,Ee),e(Ee,na),b(ot,na,null),e(ge,oi),e(ge,sa),e(sa,ai),h(o,Aa,m),h(o,A,m),b(at,A,null),e(A,ni),e(A,ra),e(ra,si),e(A,ri),e(A,nt),e(nt,ii),e(nt,At),e(At,li),e(nt,di),e(A,ci),e(A,st),e(st,mi),e(st,rt),e(rt,hi),e(st,pi),e(A,gi),e(A,S),b(it,S,null),e(S,ui),e(S,ue),e(ue,fi),e(ue,Dt),e(Dt,_i),e(ue,vi),e(ue,ia),e(ia,Ti),e(ue,bi),e(S,Ii),b($e,S,null),e(S,yi),e(S,la),e(la,wi),e(S,Pi),b(lt,S,null),h(o,Da,m),h(o,fe,m),e(fe,Fe),e(Fe,da),b(dt,da,null),e(fe,ki),e(fe,ca),e(ca,xi),h(o,Na,m),h(o,D,m),b(ct,D,null),e(D,Gi),e(D,ma),e(ma,Ei),e(D,$i),e(D,mt),e(mt,Fi),e(mt,Nt),e(Nt,Ci),e(mt,Mi),e(D,zi),e(D,ht),e(ht,ji),e(ht,pt),e(pt,qi),e(ht,Ai),e(D,Di),e(D,O),b(gt,O,null),e(O,Ni),e(O,_e),e(_e,Li),e(_e,Lt),e(Lt,Si),e(_e,Oi),e(_e,ha),e(ha,Wi),e(_e,Ri),e(O,Hi),b(Ce,O,null),e(O,Bi),e(O,pa),e(pa,Vi),e(O,Ui),b(ut,O,null),h(o,La,m),h(o,ve,m),e(ve,Me),e(Me,ga),b(ft,ga,null),e(ve,Ji),e(ve,ua),e(ua,Xi),h(o,Sa,m),h(o,N,m),b(_t,N,null),e(N,Qi),e(N,vt),e(vt,Ki),e(vt,St),e(St,Yi),e(vt,Zi),e(N,el),e(N,Tt),e(Tt,tl),e(Tt,Ot),e(Ot,ol),e(Tt,al),e(N,nl),e(N,bt),e(bt,sl),e(bt,It),e(It,rl),e(bt,il),e(N,ll),e(N,W),b(yt,W,null),e(W,dl),e(W,Te),e(Te,cl),e(Te,Wt),e(Wt,ml),e(Te,hl),e(Te,fa),e(fa,pl),e(Te,gl),e(W,ul),b(ze,W,null),e(W,fl),e(W,_a),e(_a,_l),e(W,vl),b(wt,W,null),Oa=!0},p(o,[m]){const Pt={};m&2&&(Pt.$$scope={dirty:m,ctx:o}),Ge.$set(Pt);const va={};m&2&&(va.$$scope={dirty:m,ctx:o}),$e.$set(va);const Ta={};m&2&&(Ta.$$scope={dirty:m,ctx:o}),Ce.$set(Ta);const ba={};m&2&&(ba.$$scope={dirty:m,ctx:o}),ze.$set(ba)},i(o){Oa||(I(f.$$.fragment,o),I(De.$$.fragment,o),I(Je.$$.fragment,o),I(Xe.$$.fragment,o),I(Ke.$$.fragment,o),I(Ye.$$.fragment,o),I(Ze.$$.fragment,o),I(tt.$$.fragment,o),I(Ge.$$.fragment,o),I(ot.$$.fragment,o),I(at.$$.fragment,o),I(it.$$.fragment,o),I($e.$$.fragment,o),I(lt.$$.fragment,o),I(dt.$$.fragment,o),I(ct.$$.fragment,o),I(gt.$$.fragment,o),I(Ce.$$.fragment,o),I(ut.$$.fragment,o),I(ft.$$.fragment,o),I(_t.$$.fragment,o),I(yt.$$.fragment,o),I(ze.$$.fragment,o),I(wt.$$.fragment,o),Oa=!0)},o(o){y(f.$$.fragment,o),y(De.$$.fragment,o),y(Je.$$.fragment,o),y(Xe.$$.fragment,o),y(Ke.$$.fragment,o),y(Ye.$$.fragment,o),y(Ze.$$.fragment,o),y(tt.$$.fragment,o),y(Ge.$$.fragment,o),y(ot.$$.fragment,o),y(at.$$.fragment,o),y(it.$$.fragment,o),y($e.$$.fragment,o),y(lt.$$.fragment,o),y(dt.$$.fragment,o),y(ct.$$.fragment,o),y(gt.$$.fragment,o),y(Ce.$$.fragment,o),y(ut.$$.fragment,o),y(ft.$$.fragment,o),y(_t.$$.fragment,o),y(yt.$$.fragment,o),y(ze.$$.fragment,o),y(wt.$$.fragment,o),Oa=!1},d(o){t(p),o&&t(P),o&&t(g),w(f),o&&t(Ia),o&&t(ce),w(De),o&&t(ya),o&&t(ye),o&&t(wa),o&&t(xt),o&&t(Pa),o&&t(Gt),o&&t(ka),o&&t(we),o&&t(xa),o&&t(Et),o&&t(Ga),o&&t(H),o&&t(Ea),o&&t($t),o&&t($a),o&&t(x),o&&t(Fa),o&&t(Pe),o&&t(Ca),o&&t(me),w(Je),o&&t(Ma),o&&t(q),w(Xe),w(Ke),o&&t(za),o&&t(pe),w(Ye),o&&t(ja),o&&t(R),w(Ze),w(tt),w(Ge),o&&t(qa),o&&t(ge),w(ot),o&&t(Aa),o&&t(A),w(at),w(it),w($e),w(lt),o&&t(Da),o&&t(fe),w(dt),o&&t(Na),o&&t(D),w(ct),w(gt),w(Ce),w(ut),o&&t(La),o&&t(ve),w(ft),o&&t(Sa),o&&t(N),w(_t),w(yt),w(ze),w(wt)}}}const Cc={local:"imagegpt",sections:[{local:"overview",title:"Overview"},{local:"transformers.ImageGPTConfig",title:"ImageGPTConfig"},{local:"transformers.ImageGPTFeatureExtractor",title:"ImageGPTFeatureExtractor"},{local:"transformers.ImageGPTModel",title:"ImageGPTModel"},{local:"transformers.ImageGPTForCausalImageModeling",title:"ImageGPTForCausalImageModeling"},{local:"transformers.ImageGPTForImageClassification",title:"ImageGPTForImageClassification"}],title:"ImageGPT"};function Mc(ee,p,P){let{fw:g}=p;return ee.$$set=u=>{"fw"in u&&P(0,g=u.fw)},[g]}class Lc extends Ic{constructor(p){super();yc(this,p,Mc,Fc,wc,{fw:0})}}export{Lc as default,Cc as metadata};
9,943
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/retribert.mdx-442b273b.js
import{S as mo,i as fo,s as ho,e as o,k as d,w as b,t as s,L as po,c as n,d as r,m as c,a,x as k,h as l,b as i,J as e,g as f,y,K as uo,q as B,o as T,B as R}from"../../chunks/vendor-b1433968.js";import{D as Ve}from"../../chunks/Docstring-ff504c58.js";import{I as we}from"../../chunks/IconCopyLink-7029626d.js";function _o(_t){let w,me,_,g,ze,V,gt,$e,vt,Ke,E,q,Ee,K,bt,xe,kt,Ue,F,yt,U,Bt,Tt,We,z,Rt,W,wt,zt,J,$t,Et,Je,x,N,Pe,Q,xt,Ce,Pt,Qe,v,G,Ct,X,At,fe,Mt,qt,Ft,P,Nt,he,St,Dt,pe,It,jt,Ge,C,S,Ae,Y,Lt,Me,Ht,Xe,p,Z,Ot,qe,Vt,Kt,D,Fe,Ut,Wt,ue,Jt,Qt,Gt,ee,Xt,_e,Yt,Zt,Ye,A,I,Ne,te,er,Se,tr,Ze,u,re,rr,oe,or,De,nr,ar,ir,j,ge,sr,lr,ve,dr,cr,mr,ne,fr,be,hr,pr,et,M,L,Ie,ae,ur,je,_r,tt,h,ie,gr,Le,vr,br,se,kr,ke,yr,Br,Tr,le,Rr,de,wr,zr,$r,ye,ce,rt;return V=new we({}),K=new we({}),Q=new we({}),G=new Ve({props:{name:"class transformers.RetriBertConfig",anchor:"transformers.RetriBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 8"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"share_encoders",val:" = True"},{name:"projection_dim",val:" = 128"},{name:"pad_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/retribert/configuration_retribert.py#L29",parametersDescription:[{anchor:"transformers.RetriBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertModel">RetriBertModel</a>`,name:"vocab_size"},{anchor:"transformers.RetriBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.RetriBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.RetriBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.RetriBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.RetriBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.RetriBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.RetriBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.RetriBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.RetriBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <em>token_type_ids</em> passed into <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel">BertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.RetriBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.RetriBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.RetriBertConfig.share_encoders",description:`<strong>share_encoders</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use the same Bert-type encoder for the queries and document`,name:"share_encoders"},{anchor:"transformers.RetriBertConfig.projection_dim",description:`<strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Final dimension of the query and document representation after projection`,name:"projection_dim"}]}}),Y=new we({}),Z=new Ve({props:{name:"class transformers.RetriBertTokenizer",anchor:"transformers.RetriBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/retribert/tokenization_retribert.py#L41"}}),te=new we({}),re=new Ve({props:{name:"class transformers.RetriBertTokenizerFast",anchor:"transformers.RetriBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/retribert/tokenization_retribert_fast.py#L45"}}),ae=new we({}),ie=new Ve({props:{name:"class transformers.RetriBertModel",anchor:"transformers.RetriBertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/retribert/modeling_retribert.py#L89",parametersDescription:[{anchor:"transformers.RetriBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertConfig">RetriBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ce=new Ve({props:{name:"forward",anchor:"transformers.RetriBertModel.forward",parameters:[{name:"input_ids_query",val:""},{name:"attention_mask_query",val:""},{name:"input_ids_doc",val:""},{name:"attention_mask_doc",val:""},{name:"checkpoint_batch_size",val:" = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/retribert/modeling_retribert.py#L177",parametersDescription:[{anchor:"transformers.RetriBertModel.forward.input_ids_query",description:`<strong>input_ids_query</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary for the queries in a batch.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertTokenizer">RetriBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids_query"},{anchor:"transformers.RetriBertModel.forward.attention_mask_query",description:`<strong>attention_mask_query</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask_query"},{anchor:"transformers.RetriBertModel.forward.input_ids_doc",description:`<strong>input_ids_doc</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary for the documents in a batch.`,name:"input_ids_doc"},{anchor:"transformers.RetriBertModel.forward.attention_mask_doc",description:`<strong>attention_mask_doc</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on documents padding token indices.`,name:"attention_mask_doc"},{anchor:"transformers.RetriBertModel.forward.checkpoint_batch_size",description:`<strong>checkpoint_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to &#x201C;<code>-1</code>) &#x2014; If greater than 0, uses gradient checkpointing to only compute sequence representation on <code>checkpoint_batch_size</code> examples at a time on the GPU. All query representations are still compared to all document representations in the batch.`,name:"checkpoint_batch_size"}],returnDescription:` <p>The bidirectional cross-entropy loss obtained while trying to match each query to its corresponding document and each document to its corresponding query in the batch</p> `,returnType:` <p>\`torch.FloatTensor\u201C</p> `}}),{c(){w=o("meta"),me=d(),_=o("h1"),g=o("a"),ze=o("span"),b(V.$$.fragment),gt=d(),$e=o("span"),vt=s("RetriBERT"),Ke=d(),E=o("h2"),q=o("a"),Ee=o("span"),b(K.$$.fragment),bt=d(),xe=o("span"),kt=s("Overview"),Ue=d(),F=o("p"),yt=s("The RetriBERT model was proposed in the blog post "),U=o("a"),Bt=s(`Explain Anything Like I\u2019m Five: A Model for Open Domain Long Form Question Answering`),Tt=s(`. RetriBERT is a small model that uses either a single or pair of BERT encoders with lower-dimension projection for dense semantic indexing of text.`),We=d(),z=o("p"),Rt=s("This model was contributed by "),W=o("a"),wt=s("yjernite"),zt=s(`. Code to train and use the model can be found `),J=o("a"),$t=s("here"),Et=s("."),Je=d(),x=o("h2"),N=o("a"),Pe=o("span"),b(Q.$$.fragment),xt=d(),Ce=o("span"),Pt=s("RetriBertConfig"),Qe=d(),v=o("div"),b(G.$$.fragment),Ct=d(),X=o("p"),At=s("This is the configuration class to store the configuration of a "),fe=o("a"),Mt=s("RetriBertModel"),qt=s(`. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture.`),Ft=d(),P=o("p"),Nt=s("Configuration objects inherit from "),he=o("a"),St=s("PretrainedConfig"),Dt=s(` and can be used to control the model outputs. Read the documentation from `),pe=o("a"),It=s("PretrainedConfig"),jt=s(" for more information."),Ge=d(),C=o("h2"),S=o("a"),Ae=o("span"),b(Y.$$.fragment),Lt=d(),Me=o("span"),Ht=s("RetriBertTokenizer"),Xe=d(),p=o("div"),b(Z.$$.fragment),Ot=d(),qe=o("p"),Vt=s("Constructs a RetriBERT tokenizer."),Kt=d(),D=o("p"),Fe=o("code"),Ut=s("RetroBertTokenizer"),Wt=s(" is identical to "),ue=o("a"),Jt=s("BertTokenizer"),Qt=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Gt=d(),ee=o("p"),Xt=s("Refer to superclass "),_e=o("a"),Yt=s("BertTokenizer"),Zt=s(` for usage examples and documentation concerning parameters.`),Ye=d(),A=o("h2"),I=o("a"),Ne=o("span"),b(te.$$.fragment),er=d(),Se=o("span"),tr=s("RetriBertTokenizerFast"),Ze=d(),u=o("div"),b(re.$$.fragment),rr=d(),oe=o("p"),or=s("Construct a \u201Cfast\u201D RetriBERT tokenizer (backed by HuggingFace\u2019s "),De=o("em"),nr=s("tokenizers"),ar=s(" library)."),ir=d(),j=o("p"),ge=o("a"),sr=s("RetriBertTokenizerFast"),lr=s(" is identical to "),ve=o("a"),dr=s("BertTokenizerFast"),cr=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),mr=d(),ne=o("p"),fr=s("Refer to superclass "),be=o("a"),hr=s("BertTokenizerFast"),pr=s(` for usage examples and documentation concerning parameters.`),et=d(),M=o("h2"),L=o("a"),Ie=o("span"),b(ae.$$.fragment),ur=d(),je=o("span"),_r=s("RetriBertModel"),tt=d(),h=o("div"),b(ie.$$.fragment),gr=d(),Le=o("p"),vr=s("Bert Based model to embed queries or document for document retrieval."),br=d(),se=o("p"),kr=s("This model inherits from "),ke=o("a"),yr=s("PreTrainedModel"),Br=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tr=d(),le=o("p"),Rr=s("This model is also a PyTorch "),de=o("a"),wr=s("torch.nn.Module"),zr=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$r=d(),ye=o("div"),b(ce.$$.fragment),this.h()},l(t){const m=po('[data-svelte="svelte-1phssyn"]',document.head);w=n(m,"META",{name:!0,content:!0}),m.forEach(r),me=c(t),_=n(t,"H1",{class:!0});var ot=a(_);g=n(ot,"A",{id:!0,class:!0,href:!0});var Er=a(g);ze=n(Er,"SPAN",{});var xr=a(ze);k(V.$$.fragment,xr),xr.forEach(r),Er.forEach(r),gt=c(ot),$e=n(ot,"SPAN",{});var Pr=a($e);vt=l(Pr,"RetriBERT"),Pr.forEach(r),ot.forEach(r),Ke=c(t),E=n(t,"H2",{class:!0});var nt=a(E);q=n(nt,"A",{id:!0,class:!0,href:!0});var Cr=a(q);Ee=n(Cr,"SPAN",{});var Ar=a(Ee);k(K.$$.fragment,Ar),Ar.forEach(r),Cr.forEach(r),bt=c(nt),xe=n(nt,"SPAN",{});var Mr=a(xe);kt=l(Mr,"Overview"),Mr.forEach(r),nt.forEach(r),Ue=c(t),F=n(t,"P",{});var at=a(F);yt=l(at,"The RetriBERT model was proposed in the blog post "),U=n(at,"A",{href:!0,rel:!0});var qr=a(U);Bt=l(qr,`Explain Anything Like I\u2019m Five: A Model for Open Domain Long Form Question Answering`),qr.forEach(r),Tt=l(at,`. RetriBERT is a small model that uses either a single or pair of BERT encoders with lower-dimension projection for dense semantic indexing of text.`),at.forEach(r),We=c(t),z=n(t,"P",{});var Be=a(z);Rt=l(Be,"This model was contributed by "),W=n(Be,"A",{href:!0,rel:!0});var Fr=a(W);wt=l(Fr,"yjernite"),Fr.forEach(r),zt=l(Be,`. Code to train and use the model can be found `),J=n(Be,"A",{href:!0,rel:!0});var Nr=a(J);$t=l(Nr,"here"),Nr.forEach(r),Et=l(Be,"."),Be.forEach(r),Je=c(t),x=n(t,"H2",{class:!0});var it=a(x);N=n(it,"A",{id:!0,class:!0,href:!0});var Sr=a(N);Pe=n(Sr,"SPAN",{});var Dr=a(Pe);k(Q.$$.fragment,Dr),Dr.forEach(r),Sr.forEach(r),xt=c(it),Ce=n(it,"SPAN",{});var Ir=a(Ce);Pt=l(Ir,"RetriBertConfig"),Ir.forEach(r),it.forEach(r),Qe=c(t),v=n(t,"DIV",{class:!0});var Te=a(v);k(G.$$.fragment,Te),Ct=c(Te),X=n(Te,"P",{});var st=a(X);At=l(st,"This is the configuration class to store the configuration of a "),fe=n(st,"A",{href:!0});var jr=a(fe);Mt=l(jr,"RetriBertModel"),jr.forEach(r),qt=l(st,`. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture.`),st.forEach(r),Ft=c(Te),P=n(Te,"P",{});var Re=a(P);Nt=l(Re,"Configuration objects inherit from "),he=n(Re,"A",{href:!0});var Lr=a(he);St=l(Lr,"PretrainedConfig"),Lr.forEach(r),Dt=l(Re,` and can be used to control the model outputs. Read the documentation from `),pe=n(Re,"A",{href:!0});var Hr=a(pe);It=l(Hr,"PretrainedConfig"),Hr.forEach(r),jt=l(Re," for more information."),Re.forEach(r),Te.forEach(r),Ge=c(t),C=n(t,"H2",{class:!0});var lt=a(C);S=n(lt,"A",{id:!0,class:!0,href:!0});var Or=a(S);Ae=n(Or,"SPAN",{});var Vr=a(Ae);k(Y.$$.fragment,Vr),Vr.forEach(r),Or.forEach(r),Lt=c(lt),Me=n(lt,"SPAN",{});var Kr=a(Me);Ht=l(Kr,"RetriBertTokenizer"),Kr.forEach(r),lt.forEach(r),Xe=c(t),p=n(t,"DIV",{class:!0});var H=a(p);k(Z.$$.fragment,H),Ot=c(H),qe=n(H,"P",{});var Ur=a(qe);Vt=l(Ur,"Constructs a RetriBERT tokenizer."),Ur.forEach(r),Kt=c(H),D=n(H,"P",{});var He=a(D);Fe=n(He,"CODE",{});var Wr=a(Fe);Ut=l(Wr,"RetroBertTokenizer"),Wr.forEach(r),Wt=l(He," is identical to "),ue=n(He,"A",{href:!0});var Jr=a(ue);Jt=l(Jr,"BertTokenizer"),Jr.forEach(r),Qt=l(He,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),He.forEach(r),Gt=c(H),ee=n(H,"P",{});var dt=a(ee);Xt=l(dt,"Refer to superclass "),_e=n(dt,"A",{href:!0});var Qr=a(_e);Yt=l(Qr,"BertTokenizer"),Qr.forEach(r),Zt=l(dt,` for usage examples and documentation concerning parameters.`),dt.forEach(r),H.forEach(r),Ye=c(t),A=n(t,"H2",{class:!0});var ct=a(A);I=n(ct,"A",{id:!0,class:!0,href:!0});var Gr=a(I);Ne=n(Gr,"SPAN",{});var Xr=a(Ne);k(te.$$.fragment,Xr),Xr.forEach(r),Gr.forEach(r),er=c(ct),Se=n(ct,"SPAN",{});var Yr=a(Se);tr=l(Yr,"RetriBertTokenizerFast"),Yr.forEach(r),ct.forEach(r),Ze=c(t),u=n(t,"DIV",{class:!0});var O=a(u);k(re.$$.fragment,O),rr=c(O),oe=n(O,"P",{});var mt=a(oe);or=l(mt,"Construct a \u201Cfast\u201D RetriBERT tokenizer (backed by HuggingFace\u2019s "),De=n(mt,"EM",{});var Zr=a(De);nr=l(Zr,"tokenizers"),Zr.forEach(r),ar=l(mt," library)."),mt.forEach(r),ir=c(O),j=n(O,"P",{});var Oe=a(j);ge=n(Oe,"A",{href:!0});var eo=a(ge);sr=l(eo,"RetriBertTokenizerFast"),eo.forEach(r),lr=l(Oe," is identical to "),ve=n(Oe,"A",{href:!0});var to=a(ve);dr=l(to,"BertTokenizerFast"),to.forEach(r),cr=l(Oe,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Oe.forEach(r),mr=c(O),ne=n(O,"P",{});var ft=a(ne);fr=l(ft,"Refer to superclass "),be=n(ft,"A",{href:!0});var ro=a(be);hr=l(ro,"BertTokenizerFast"),ro.forEach(r),pr=l(ft,` for usage examples and documentation concerning parameters.`),ft.forEach(r),O.forEach(r),et=c(t),M=n(t,"H2",{class:!0});var ht=a(M);L=n(ht,"A",{id:!0,class:!0,href:!0});var oo=a(L);Ie=n(oo,"SPAN",{});var no=a(Ie);k(ae.$$.fragment,no),no.forEach(r),oo.forEach(r),ur=c(ht),je=n(ht,"SPAN",{});var ao=a(je);_r=l(ao,"RetriBertModel"),ao.forEach(r),ht.forEach(r),tt=c(t),h=n(t,"DIV",{class:!0});var $=a(h);k(ie.$$.fragment,$),gr=c($),Le=n($,"P",{});var io=a(Le);vr=l(io,"Bert Based model to embed queries or document for document retrieval."),io.forEach(r),br=c($),se=n($,"P",{});var pt=a(se);kr=l(pt,"This model inherits from "),ke=n(pt,"A",{href:!0});var so=a(ke);yr=l(so,"PreTrainedModel"),so.forEach(r),Br=l(pt,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pt.forEach(r),Tr=c($),le=n($,"P",{});var ut=a(le);Rr=l(ut,"This model is also a PyTorch "),de=n(ut,"A",{href:!0,rel:!0});var lo=a(de);wr=l(lo,"torch.nn.Module"),lo.forEach(r),zr=l(ut,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ut.forEach(r),$r=c($),ye=n($,"DIV",{class:!0});var co=a(ye);k(ce.$$.fragment,co),co.forEach(r),$.forEach(r),this.h()},h(){i(w,"name","hf:doc:metadata"),i(w,"content",JSON.stringify(go)),i(g,"id","retribert"),i(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(g,"href","#retribert"),i(_,"class","relative group"),i(q,"id","overview"),i(q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(q,"href","#overview"),i(E,"class","relative group"),i(U,"href","https://yjernite.github.io/lfqa.html"),i(U,"rel","nofollow"),i(W,"href","https://huggingface.co/yjernite"),i(W,"rel","nofollow"),i(J,"href","https://github.com/huggingface/transformers/tree/master/examples/research-projects/distillation"),i(J,"rel","nofollow"),i(N,"id","transformers.RetriBertConfig"),i(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(N,"href","#transformers.RetriBertConfig"),i(x,"class","relative group"),i(fe,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertModel"),i(he,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(pe,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(v,"class","docstring"),i(S,"id","transformers.RetriBertTokenizer"),i(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(S,"href","#transformers.RetriBertTokenizer"),i(C,"class","relative group"),i(ue,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),i(_e,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),i(p,"class","docstring"),i(I,"id","transformers.RetriBertTokenizerFast"),i(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(I,"href","#transformers.RetriBertTokenizerFast"),i(A,"class","relative group"),i(ge,"href","/docs/transformers/v4.15.0/en/model_doc/retribert#transformers.RetriBertTokenizerFast"),i(ve,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),i(be,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),i(u,"class","docstring"),i(L,"id","transformers.RetriBertModel"),i(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(L,"href","#transformers.RetriBertModel"),i(M,"class","relative group"),i(ke,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(de,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(de,"rel","nofollow"),i(ye,"class","docstring"),i(h,"class","docstring")},m(t,m){e(document.head,w),f(t,me,m),f(t,_,m),e(_,g),e(g,ze),y(V,ze,null),e(_,gt),e(_,$e),e($e,vt),f(t,Ke,m),f(t,E,m),e(E,q),e(q,Ee),y(K,Ee,null),e(E,bt),e(E,xe),e(xe,kt),f(t,Ue,m),f(t,F,m),e(F,yt),e(F,U),e(U,Bt),e(F,Tt),f(t,We,m),f(t,z,m),e(z,Rt),e(z,W),e(W,wt),e(z,zt),e(z,J),e(J,$t),e(z,Et),f(t,Je,m),f(t,x,m),e(x,N),e(N,Pe),y(Q,Pe,null),e(x,xt),e(x,Ce),e(Ce,Pt),f(t,Qe,m),f(t,v,m),y(G,v,null),e(v,Ct),e(v,X),e(X,At),e(X,fe),e(fe,Mt),e(X,qt),e(v,Ft),e(v,P),e(P,Nt),e(P,he),e(he,St),e(P,Dt),e(P,pe),e(pe,It),e(P,jt),f(t,Ge,m),f(t,C,m),e(C,S),e(S,Ae),y(Y,Ae,null),e(C,Lt),e(C,Me),e(Me,Ht),f(t,Xe,m),f(t,p,m),y(Z,p,null),e(p,Ot),e(p,qe),e(qe,Vt),e(p,Kt),e(p,D),e(D,Fe),e(Fe,Ut),e(D,Wt),e(D,ue),e(ue,Jt),e(D,Qt),e(p,Gt),e(p,ee),e(ee,Xt),e(ee,_e),e(_e,Yt),e(ee,Zt),f(t,Ye,m),f(t,A,m),e(A,I),e(I,Ne),y(te,Ne,null),e(A,er),e(A,Se),e(Se,tr),f(t,Ze,m),f(t,u,m),y(re,u,null),e(u,rr),e(u,oe),e(oe,or),e(oe,De),e(De,nr),e(oe,ar),e(u,ir),e(u,j),e(j,ge),e(ge,sr),e(j,lr),e(j,ve),e(ve,dr),e(j,cr),e(u,mr),e(u,ne),e(ne,fr),e(ne,be),e(be,hr),e(ne,pr),f(t,et,m),f(t,M,m),e(M,L),e(L,Ie),y(ae,Ie,null),e(M,ur),e(M,je),e(je,_r),f(t,tt,m),f(t,h,m),y(ie,h,null),e(h,gr),e(h,Le),e(Le,vr),e(h,br),e(h,se),e(se,kr),e(se,ke),e(ke,yr),e(se,Br),e(h,Tr),e(h,le),e(le,Rr),e(le,de),e(de,wr),e(le,zr),e(h,$r),e(h,ye),y(ce,ye,null),rt=!0},p:uo,i(t){rt||(B(V.$$.fragment,t),B(K.$$.fragment,t),B(Q.$$.fragment,t),B(G.$$.fragment,t),B(Y.$$.fragment,t),B(Z.$$.fragment,t),B(te.$$.fragment,t),B(re.$$.fragment,t),B(ae.$$.fragment,t),B(ie.$$.fragment,t),B(ce.$$.fragment,t),rt=!0)},o(t){T(V.$$.fragment,t),T(K.$$.fragment,t),T(Q.$$.fragment,t),T(G.$$.fragment,t),T(Y.$$.fragment,t),T(Z.$$.fragment,t),T(te.$$.fragment,t),T(re.$$.fragment,t),T(ae.$$.fragment,t),T(ie.$$.fragment,t),T(ce.$$.fragment,t),rt=!1},d(t){r(w),t&&r(me),t&&r(_),R(V),t&&r(Ke),t&&r(E),R(K),t&&r(Ue),t&&r(F),t&&r(We),t&&r(z),t&&r(Je),t&&r(x),R(Q),t&&r(Qe),t&&r(v),R(G),t&&r(Ge),t&&r(C),R(Y),t&&r(Xe),t&&r(p),R(Z),t&&r(Ye),t&&r(A),R(te),t&&r(Ze),t&&r(u),R(re),t&&r(et),t&&r(M),R(ae),t&&r(tt),t&&r(h),R(ie),R(ce)}}}const go={local:"retribert",sections:[{local:"overview",title:"Overview"},{local:"transformers.RetriBertConfig",title:"RetriBertConfig"},{local:"transformers.RetriBertTokenizer",title:"RetriBertTokenizer"},{local:"transformers.RetriBertTokenizerFast",title:"RetriBertTokenizerFast"},{local:"transformers.RetriBertModel",title:"RetriBertModel"}],title:"RetriBERT"};function vo(_t,w,me){let{fw:_}=w;return _t.$$set=g=>{"fw"in g&&me(0,_=g.fw)},[_]}class Bo extends mo{constructor(w){super();fo(this,w,vo,_o,ho,{fw:0})}}export{Bo as default,go as metadata};
9,944
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/dialogpt.mdx-44319d9c.js
import{S as rt,i as st,s as ht,e as n,k as p,w as et,t as r,L as ft,c as o,d as t,m as c,a as i,x as tt,h as s,b as f,J as a,g as h,y as at,K as pt,q as nt,o as ot,B as it}from"../../chunks/vendor-b1433968.js";import{I as lt}from"../../chunks/IconCopyLink-7029626d.js";function ct(se){let m,$,u,d,N,b,he,W,fe,F,v,T,q,y,pe,j,ce,H,P,ue,x,de,me,Y,A,ge,z,M,J,ve,X,S,Te,Z,g,O,Pe,_e,R,we,Ge,D,be,E,ye,xe,K,C,De,U,_,Ee,B,ke,$e,Q,w,Ae,L,Me,Se,V,G,Ce,k,Le,Ie,ee;return b=new lt({}),y=new lt({}),{c(){m=n("meta"),$=p(),u=n("h1"),d=n("a"),N=n("span"),et(b.$$.fragment),he=p(),W=n("span"),fe=r("DialoGPT"),F=p(),v=n("h2"),T=n("a"),q=n("span"),et(y.$$.fragment),pe=p(),j=n("span"),ce=r("Overview"),H=p(),P=n("p"),ue=r("DialoGPT was proposed in "),x=n("a"),de=r("DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation"),me=r(` by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. It\u2019s a GPT2 Model trained on 147M conversation-like exchanges extracted from Reddit.`),Y=p(),A=n("p"),ge=r("The abstract from the paper is the following:"),z=p(),M=n("p"),J=n("em"),ve=r(`We present a large, tunable neural conversational response generation model, DialoGPT (dialogue generative pre-trained transformer). Trained on 147M conversation-like exchanges extracted from Reddit comment chains over a period spanning from 2005 through 2017, DialoGPT extends the Hugging Face PyTorch transformer to attain a performance close to human both in terms of automatic and human evaluation in single-turn dialogue settings. We show that conversational systems that leverage DialoGPT generate more relevant, contentful and context-consistent responses than strong baseline systems. The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.`),X=p(),S=n("p"),Te=r("Tips:"),Z=p(),g=n("ul"),O=n("li"),Pe=r(`DialoGPT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),_e=p(),R=n("li"),we=r(`DialoGPT was trained with a causal language modeling (CLM) objective on conversational data and is therefore powerful at response generation in open-domain dialogue systems.`),Ge=p(),D=n("li"),be=r("DialoGPT enables the user to create a chat bot in just 10 lines of code as shown on "),E=n("a"),ye=r("DialoGPT\u2019s model card"),xe=r("."),K=p(),C=n("p"),De=r("Training:"),U=p(),_=n("p"),Ee=r("In order to train or fine-tune DialoGPT, one can use causal language modeling training. To cite the official paper: "),B=n("em"),ke=r(`We follow the OpenAI GPT-2 to model a multiturn dialogue session as a long text and frame the generation task as language modeling. We first concatenate all dialog turns within a dialogue session into a long text x_1,\u2026, x_N (N is the sequence length), ended by the end-of-text token.`),$e=r(" For more information please confer to the original paper."),Q=p(),w=n("p"),Ae=r("DialoGPT\u2019s architecture is based on the GPT2 model, so one can refer to "),L=n("a"),Me=r("GPT2\u2019s documentation page"),Se=r("."),V=p(),G=n("p"),Ce=r("The original code can be found "),k=n("a"),Le=r("here"),Ie=r("."),this.h()},l(e){const l=ft('[data-svelte="svelte-1phssyn"]',document.head);m=o(l,"META",{name:!0,content:!0}),l.forEach(t),$=c(e),u=o(e,"H1",{class:!0});var te=i(u);d=o(te,"A",{id:!0,class:!0,href:!0});var Ne=i(d);N=o(Ne,"SPAN",{});var We=i(N);tt(b.$$.fragment,We),We.forEach(t),Ne.forEach(t),he=c(te),W=o(te,"SPAN",{});var qe=i(W);fe=s(qe,"DialoGPT"),qe.forEach(t),te.forEach(t),F=c(e),v=o(e,"H2",{class:!0});var ae=i(v);T=o(ae,"A",{id:!0,class:!0,href:!0});var je=i(T);q=o(je,"SPAN",{});var Je=i(q);tt(y.$$.fragment,Je),Je.forEach(t),je.forEach(t),pe=c(ae),j=o(ae,"SPAN",{});var Oe=i(j);ce=s(Oe,"Overview"),Oe.forEach(t),ae.forEach(t),H=c(e),P=o(e,"P",{});var ne=i(P);ue=s(ne,"DialoGPT was proposed in "),x=o(ne,"A",{href:!0,rel:!0});var Re=i(x);de=s(Re,"DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation"),Re.forEach(t),me=s(ne,` by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. It\u2019s a GPT2 Model trained on 147M conversation-like exchanges extracted from Reddit.`),ne.forEach(t),Y=c(e),A=o(e,"P",{});var Be=i(A);ge=s(Be,"The abstract from the paper is the following:"),Be.forEach(t),z=c(e),M=o(e,"P",{});var Fe=i(M);J=o(Fe,"EM",{});var He=i(J);ve=s(He,`We present a large, tunable neural conversational response generation model, DialoGPT (dialogue generative pre-trained transformer). Trained on 147M conversation-like exchanges extracted from Reddit comment chains over a period spanning from 2005 through 2017, DialoGPT extends the Hugging Face PyTorch transformer to attain a performance close to human both in terms of automatic and human evaluation in single-turn dialogue settings. We show that conversational systems that leverage DialoGPT generate more relevant, contentful and context-consistent responses than strong baseline systems. The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.`),He.forEach(t),Fe.forEach(t),X=c(e),S=o(e,"P",{});var Ye=i(S);Te=s(Ye,"Tips:"),Ye.forEach(t),Z=c(e),g=o(e,"UL",{});var I=i(g);O=o(I,"LI",{});var ze=i(O);Pe=s(ze,`DialoGPT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),ze.forEach(t),_e=c(I),R=o(I,"LI",{});var Xe=i(R);we=s(Xe,`DialoGPT was trained with a causal language modeling (CLM) objective on conversational data and is therefore powerful at response generation in open-domain dialogue systems.`),Xe.forEach(t),Ge=c(I),D=o(I,"LI",{});var oe=i(D);be=s(oe,"DialoGPT enables the user to create a chat bot in just 10 lines of code as shown on "),E=o(oe,"A",{href:!0,rel:!0});var Ze=i(E);ye=s(Ze,"DialoGPT\u2019s model card"),Ze.forEach(t),xe=s(oe,"."),oe.forEach(t),I.forEach(t),K=c(e),C=o(e,"P",{});var Ke=i(C);De=s(Ke,"Training:"),Ke.forEach(t),U=c(e),_=o(e,"P",{});var ie=i(_);Ee=s(ie,"In order to train or fine-tune DialoGPT, one can use causal language modeling training. To cite the official paper: "),B=o(ie,"EM",{});var Ue=i(B);ke=s(Ue,`We follow the OpenAI GPT-2 to model a multiturn dialogue session as a long text and frame the generation task as language modeling. We first concatenate all dialog turns within a dialogue session into a long text x_1,\u2026, x_N (N is the sequence length), ended by the end-of-text token.`),Ue.forEach(t),$e=s(ie," For more information please confer to the original paper."),ie.forEach(t),Q=c(e),w=o(e,"P",{});var le=i(w);Ae=s(le,"DialoGPT\u2019s architecture is based on the GPT2 model, so one can refer to "),L=o(le,"A",{href:!0});var Qe=i(L);Me=s(Qe,"GPT2\u2019s documentation page"),Qe.forEach(t),Se=s(le,"."),le.forEach(t),V=c(e),G=o(e,"P",{});var re=i(G);Ce=s(re,"The original code can be found "),k=o(re,"A",{href:!0,rel:!0});var Ve=i(k);Le=s(Ve,"here"),Ve.forEach(t),Ie=s(re,"."),re.forEach(t),this.h()},h(){f(m,"name","hf:doc:metadata"),f(m,"content",JSON.stringify(ut)),f(d,"id","dialogpt"),f(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(d,"href","#dialogpt"),f(u,"class","relative group"),f(T,"id","overview"),f(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(T,"href","#overview"),f(v,"class","relative group"),f(x,"href","https://arxiv.org/abs/1911.00536"),f(x,"rel","nofollow"),f(E,"href","https://huggingface.co/microsoft/DialoGPT-medium"),f(E,"rel","nofollow"),f(L,"href","gpt2"),f(k,"href","https://github.com/microsoft/DialoGPT"),f(k,"rel","nofollow")},m(e,l){a(document.head,m),h(e,$,l),h(e,u,l),a(u,d),a(d,N),at(b,N,null),a(u,he),a(u,W),a(W,fe),h(e,F,l),h(e,v,l),a(v,T),a(T,q),at(y,q,null),a(v,pe),a(v,j),a(j,ce),h(e,H,l),h(e,P,l),a(P,ue),a(P,x),a(x,de),a(P,me),h(e,Y,l),h(e,A,l),a(A,ge),h(e,z,l),h(e,M,l),a(M,J),a(J,ve),h(e,X,l),h(e,S,l),a(S,Te),h(e,Z,l),h(e,g,l),a(g,O),a(O,Pe),a(g,_e),a(g,R),a(R,we),a(g,Ge),a(g,D),a(D,be),a(D,E),a(E,ye),a(D,xe),h(e,K,l),h(e,C,l),a(C,De),h(e,U,l),h(e,_,l),a(_,Ee),a(_,B),a(B,ke),a(_,$e),h(e,Q,l),h(e,w,l),a(w,Ae),a(w,L),a(L,Me),a(w,Se),h(e,V,l),h(e,G,l),a(G,Ce),a(G,k),a(k,Le),a(G,Ie),ee=!0},p:pt,i(e){ee||(nt(b.$$.fragment,e),nt(y.$$.fragment,e),ee=!0)},o(e){ot(b.$$.fragment,e),ot(y.$$.fragment,e),ee=!1},d(e){t(m),e&&t($),e&&t(u),it(b),e&&t(F),e&&t(v),it(y),e&&t(H),e&&t(P),e&&t(Y),e&&t(A),e&&t(z),e&&t(M),e&&t(X),e&&t(S),e&&t(Z),e&&t(g),e&&t(K),e&&t(C),e&&t(U),e&&t(_),e&&t(Q),e&&t(w),e&&t(V),e&&t(G)}}}const ut={local:"dialogpt",sections:[{local:"overview",title:"Overview"}],title:"DialoGPT"};function dt(se,m,$){let{fw:u}=m;return se.$$set=d=>{"fw"in d&&$(0,u=d.fw)},[u]}class vt extends rt{constructor(m){super();st(this,m,dt,ct,ht,{fw:0})}}export{vt as default,ut as metadata};
9,945
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/herbert.mdx-c7984b22.js
import{S as ss,i as as,s as ns,e as s,k as l,w as _,t as o,L as os,c as a,d as r,m as c,a as n,x as v,h as i,b as p,J as e,g as m,y as b,K as is,q as w,o as T,B as z}from"../../chunks/vendor-b1433968.js";import{D as De}from"../../chunks/Docstring-ff504c58.js";import{C as rs}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ct}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ls(pt){let E,se,k,g,fe,I,dt,ue,mt,Me,P,j,ke,D,ht,ge,ft,Ne,R,ut,M,kt,gt,Ce,ae,_t,Se,ne,_e,vt,Ue,oe,bt,Oe,N,Je,y,wt,C,Tt,zt,S,Et,yt,Ke,q,A,ve,U,$t,be,Ht,Ve,u,O,Lt,we,Pt,qt,Te,Bt,jt,J,ze,Ee,Rt,At,ye,$e,xt,Ft,K,It,ie,Dt,Mt,Xe,B,x,He,V,Nt,Le,Ct,We,h,X,St,W,Ut,Pe,Ot,Jt,Kt,qe,Vt,Xt,Be,je,Wt,Gt,G,Qt,le,Yt,Zt,er,$,Q,tr,Re,rr,sr,Y,ce,ar,Ae,nr,or,pe,ir,xe,lr,cr,H,Z,pr,Fe,dr,mr,ee,hr,F,te,fr,re,ur,Ie,kr,gr,Ge;return I=new ct({}),D=new ct({}),N=new rs({props:{code:`from transformers import HerbertTokenizer, RobertaModel tokenizer = HerbertTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1") model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1") encoded_input = tokenizer.encode("Kto ma lepsz\u0105 sztuk\u0119, ma lepszy rz\u0105d \u2013 to jasne.", return_tensors='pt') outputs = model(encoded_input) # HerBERT can also be loaded using AutoTokenizer and AutoModel: import torch from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1") model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> HerbertTokenizer, RobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = HerbertTokenizer.from_pretrained(<span class="hljs-string">&quot;allegro/herbert-klej-cased-tokenizer-v1&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaModel.from_pretrained(<span class="hljs-string">&quot;allegro/herbert-klej-cased-v1&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_input = tokenizer.encode(<span class="hljs-string">&quot;Kto ma lepsz\u0105 sztuk\u0119, ma lepszy rz\u0105d \u2013 to jasne.&quot;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(encoded_input) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># HerBERT can also be loaded using AutoTokenizer and AutoModel:</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;allegro/herbert-klej-cased-tokenizer-v1&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;allegro/herbert-klej-cased-v1&quot;</span>)`}}),U=new ct({}),O=new De({props:{name:"class transformers.HerbertTokenizer",anchor:"transformers.HerbertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"tokenizer_file",val:" = None"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"sep_token",val:" = '</s>'"},{name:"do_lowercase_and_remove_accent",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/herbert/tokenization_herbert.py#L41"}}),V=new ct({}),X=new De({props:{name:"class transformers.HerbertTokenizerFast",anchor:"transformers.HerbertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"sep_token",val:" = '</s>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/herbert/tokenization_herbert_fast.py#L40",parametersDescription:[{anchor:"transformers.HerbertTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.HerbertTokenizerFast.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"}]}}),Q=new De({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.HerbertTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/herbert/tokenization_herbert_fast.py#L90",parametersDescription:[{anchor:"transformers.HerbertTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.HerbertTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Z=new De({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.HerbertTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/herbert/tokenization_herbert_fast.py#L144",parametersDescription:[{anchor:"transformers.HerbertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.HerbertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),ee=new rs({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),te=new De({props:{name:"get_special_tokens_mask",anchor:"transformers.HerbertTokenizerFast.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/herbert/tokenization_herbert_fast.py#L117",parametersDescription:[{anchor:"transformers.HerbertTokenizerFast.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.HerbertTokenizerFast.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.HerbertTokenizerFast.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){E=s("meta"),se=l(),k=s("h1"),g=s("a"),fe=s("span"),_(I.$$.fragment),dt=l(),ue=s("span"),mt=o("HerBERT"),Me=l(),P=s("h2"),j=s("a"),ke=s("span"),_(D.$$.fragment),ht=l(),ge=s("span"),ft=o("Overview"),Ne=l(),R=s("p"),ut=o("The HerBERT model was proposed in "),M=s("a"),kt=o("KLEJ: Comprehensive Benchmark for Polish Language Understanding"),gt=o(` by Piotr Rybak, Robert Mroczkowski, Janusz Tracz, and Ireneusz Gawlik. It is a BERT-based Language Model trained on Polish Corpora using only MLM objective with dynamic masking of whole words.`),Ce=l(),ae=s("p"),_t=o("The abstract from the paper is the following:"),Se=l(),ne=s("p"),_e=s("em"),vt=o(`In recent years, a series of Transformer-based models unlocked major improvements in general natural language understanding (NLU) tasks. Such a fast pace of research would not be possible without general NLU benchmarks, which allow for a fair comparison of the proposed methods. However, such benchmarks are available only for a handful of languages. To alleviate this issue, we introduce a comprehensive multi-task benchmark for the Polish language understanding, accompanied by an online leaderboard. It consists of a diverse set of tasks, adopted from existing datasets for named entity recognition, question-answering, textual entailment, and others. We also introduce a new sentiment analysis task for the e-commerce domain, named Allegro Reviews (AR). To ensure a common evaluation scheme and promote models that generalize to different NLU tasks, the benchmark includes datasets from varying domains and applications. Additionally, we release HerBERT, a Transformer-based model trained specifically for the Polish language, which has the best average performance and obtains the best results for three out of nine tasks. Finally, we provide an extensive evaluation, including several standard baselines and recently proposed, multilingual Transformer-based models.`),Ue=l(),oe=s("p"),bt=o("Examples of use:"),Oe=l(),_(N.$$.fragment),Je=l(),y=s("p"),wt=o("This model was contributed by "),C=s("a"),Tt=o("rmroczkowski"),zt=o(`. The original code can be found `),S=s("a"),Et=o("here"),yt=o("."),Ke=l(),q=s("h2"),A=s("a"),ve=s("span"),_(U.$$.fragment),$t=l(),be=s("span"),Ht=o("HerbertTokenizer"),Ve=l(),u=s("div"),_(O.$$.fragment),Lt=l(),we=s("p"),Pt=o("Construct a BPE tokenizer for HerBERT."),qt=l(),Te=s("p"),Bt=o("Peculiarities:"),jt=l(),J=s("ul"),ze=s("li"),Ee=s("p"),Rt=o(`uses BERT\u2019s pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately.`),At=l(),ye=s("li"),$e=s("p"),xt=o("Such pretokenized input is BPE subtokenized"),Ft=l(),K=s("p"),It=o("This tokenizer inherits from "),ie=s("a"),Dt=o("XLMTokenizer"),Mt=o(` which contains most of the methods. Users should refer to the superclass for more information regarding methods.`),Xe=l(),B=s("h2"),x=s("a"),He=s("span"),_(V.$$.fragment),Nt=l(),Le=s("span"),Ct=o("HerbertTokenizerFast"),We=l(),h=s("div"),_(X.$$.fragment),St=l(),W=s("p"),Ut=o("Construct a \u201CFast\u201D BPE tokenizer for HerBERT (backed by HuggingFace\u2019s "),Pe=s("em"),Ot=o("tokenizers"),Jt=o(" library)."),Kt=l(),qe=s("p"),Vt=o("Peculiarities:"),Xt=l(),Be=s("ul"),je=s("li"),Wt=o(`uses BERT\u2019s pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately.`),Gt=l(),G=s("p"),Qt=o("This tokenizer inherits from "),le=s("a"),Yt=o("PreTrainedTokenizer"),Zt=o(` which contains most of the methods. Users should refer to the superclass for more information regarding methods.`),er=l(),$=s("div"),_(Q.$$.fragment),tr=l(),Re=s("p"),rr=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An HerBERT, like BERT sequence has the following format:`),sr=l(),Y=s("ul"),ce=s("li"),ar=o("single sequence: "),Ae=s("code"),nr=o("<s> X </s>"),or=l(),pe=s("li"),ir=o("pair of sequences: "),xe=s("code"),lr=o("<s> A </s> B </s>"),cr=l(),H=s("div"),_(Z.$$.fragment),pr=l(),Fe=s("p"),dr=o(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like BERT sequence pair mask has the following format:`),mr=l(),_(ee.$$.fragment),hr=l(),F=s("div"),_(te.$$.fragment),fr=l(),re=s("p"),ur=o(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Ie=s("code"),kr=o("prepare_for_model"),gr=o(" method."),this.h()},l(t){const d=os('[data-svelte="svelte-1phssyn"]',document.head);E=a(d,"META",{name:!0,content:!0}),d.forEach(r),se=c(t),k=a(t,"H1",{class:!0});var Qe=n(k);g=a(Qe,"A",{id:!0,class:!0,href:!0});var br=n(g);fe=a(br,"SPAN",{});var wr=n(fe);v(I.$$.fragment,wr),wr.forEach(r),br.forEach(r),dt=c(Qe),ue=a(Qe,"SPAN",{});var Tr=n(ue);mt=i(Tr,"HerBERT"),Tr.forEach(r),Qe.forEach(r),Me=c(t),P=a(t,"H2",{class:!0});var Ye=n(P);j=a(Ye,"A",{id:!0,class:!0,href:!0});var zr=n(j);ke=a(zr,"SPAN",{});var Er=n(ke);v(D.$$.fragment,Er),Er.forEach(r),zr.forEach(r),ht=c(Ye),ge=a(Ye,"SPAN",{});var yr=n(ge);ft=i(yr,"Overview"),yr.forEach(r),Ye.forEach(r),Ne=c(t),R=a(t,"P",{});var Ze=n(R);ut=i(Ze,"The HerBERT model was proposed in "),M=a(Ze,"A",{href:!0,rel:!0});var $r=n(M);kt=i($r,"KLEJ: Comprehensive Benchmark for Polish Language Understanding"),$r.forEach(r),gt=i(Ze,` by Piotr Rybak, Robert Mroczkowski, Janusz Tracz, and Ireneusz Gawlik. It is a BERT-based Language Model trained on Polish Corpora using only MLM objective with dynamic masking of whole words.`),Ze.forEach(r),Ce=c(t),ae=a(t,"P",{});var Hr=n(ae);_t=i(Hr,"The abstract from the paper is the following:"),Hr.forEach(r),Se=c(t),ne=a(t,"P",{});var Lr=n(ne);_e=a(Lr,"EM",{});var Pr=n(_e);vt=i(Pr,`In recent years, a series of Transformer-based models unlocked major improvements in general natural language understanding (NLU) tasks. Such a fast pace of research would not be possible without general NLU benchmarks, which allow for a fair comparison of the proposed methods. However, such benchmarks are available only for a handful of languages. To alleviate this issue, we introduce a comprehensive multi-task benchmark for the Polish language understanding, accompanied by an online leaderboard. It consists of a diverse set of tasks, adopted from existing datasets for named entity recognition, question-answering, textual entailment, and others. We also introduce a new sentiment analysis task for the e-commerce domain, named Allegro Reviews (AR). To ensure a common evaluation scheme and promote models that generalize to different NLU tasks, the benchmark includes datasets from varying domains and applications. Additionally, we release HerBERT, a Transformer-based model trained specifically for the Polish language, which has the best average performance and obtains the best results for three out of nine tasks. Finally, we provide an extensive evaluation, including several standard baselines and recently proposed, multilingual Transformer-based models.`),Pr.forEach(r),Lr.forEach(r),Ue=c(t),oe=a(t,"P",{});var qr=n(oe);bt=i(qr,"Examples of use:"),qr.forEach(r),Oe=c(t),v(N.$$.fragment,t),Je=c(t),y=a(t,"P",{});var de=n(y);wt=i(de,"This model was contributed by "),C=a(de,"A",{href:!0,rel:!0});var Br=n(C);Tt=i(Br,"rmroczkowski"),Br.forEach(r),zt=i(de,`. The original code can be found `),S=a(de,"A",{href:!0,rel:!0});var jr=n(S);Et=i(jr,"here"),jr.forEach(r),yt=i(de,"."),de.forEach(r),Ke=c(t),q=a(t,"H2",{class:!0});var et=n(q);A=a(et,"A",{id:!0,class:!0,href:!0});var Rr=n(A);ve=a(Rr,"SPAN",{});var Ar=n(ve);v(U.$$.fragment,Ar),Ar.forEach(r),Rr.forEach(r),$t=c(et),be=a(et,"SPAN",{});var xr=n(be);Ht=i(xr,"HerbertTokenizer"),xr.forEach(r),et.forEach(r),Ve=c(t),u=a(t,"DIV",{class:!0});var L=n(u);v(O.$$.fragment,L),Lt=c(L),we=a(L,"P",{});var Fr=n(we);Pt=i(Fr,"Construct a BPE tokenizer for HerBERT."),Fr.forEach(r),qt=c(L),Te=a(L,"P",{});var Ir=n(Te);Bt=i(Ir,"Peculiarities:"),Ir.forEach(r),jt=c(L),J=a(L,"UL",{});var tt=n(J);ze=a(tt,"LI",{});var Dr=n(ze);Ee=a(Dr,"P",{});var Mr=n(Ee);Rt=i(Mr,`uses BERT\u2019s pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately.`),Mr.forEach(r),Dr.forEach(r),At=c(tt),ye=a(tt,"LI",{});var Nr=n(ye);$e=a(Nr,"P",{});var Cr=n($e);xt=i(Cr,"Such pretokenized input is BPE subtokenized"),Cr.forEach(r),Nr.forEach(r),tt.forEach(r),Ft=c(L),K=a(L,"P",{});var rt=n(K);It=i(rt,"This tokenizer inherits from "),ie=a(rt,"A",{href:!0});var Sr=n(ie);Dt=i(Sr,"XLMTokenizer"),Sr.forEach(r),Mt=i(rt,` which contains most of the methods. Users should refer to the superclass for more information regarding methods.`),rt.forEach(r),L.forEach(r),Xe=c(t),B=a(t,"H2",{class:!0});var st=n(B);x=a(st,"A",{id:!0,class:!0,href:!0});var Ur=n(x);He=a(Ur,"SPAN",{});var Or=n(He);v(V.$$.fragment,Or),Or.forEach(r),Ur.forEach(r),Nt=c(st),Le=a(st,"SPAN",{});var Jr=n(Le);Ct=i(Jr,"HerbertTokenizerFast"),Jr.forEach(r),st.forEach(r),We=c(t),h=a(t,"DIV",{class:!0});var f=n(h);v(X.$$.fragment,f),St=c(f),W=a(f,"P",{});var at=n(W);Ut=i(at,"Construct a \u201CFast\u201D BPE tokenizer for HerBERT (backed by HuggingFace\u2019s "),Pe=a(at,"EM",{});var Kr=n(Pe);Ot=i(Kr,"tokenizers"),Kr.forEach(r),Jt=i(at," library)."),at.forEach(r),Kt=c(f),qe=a(f,"P",{});var Vr=n(qe);Vt=i(Vr,"Peculiarities:"),Vr.forEach(r),Xt=c(f),Be=a(f,"UL",{});var Xr=n(Be);je=a(Xr,"LI",{});var Wr=n(je);Wt=i(Wr,`uses BERT\u2019s pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately.`),Wr.forEach(r),Xr.forEach(r),Gt=c(f),G=a(f,"P",{});var nt=n(G);Qt=i(nt,"This tokenizer inherits from "),le=a(nt,"A",{href:!0});var Gr=n(le);Yt=i(Gr,"PreTrainedTokenizer"),Gr.forEach(r),Zt=i(nt,` which contains most of the methods. Users should refer to the superclass for more information regarding methods.`),nt.forEach(r),er=c(f),$=a(f,"DIV",{class:!0});var me=n($);v(Q.$$.fragment,me),tr=c(me),Re=a(me,"P",{});var Qr=n(Re);rr=i(Qr,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An HerBERT, like BERT sequence has the following format:`),Qr.forEach(r),sr=c(me),Y=a(me,"UL",{});var ot=n(Y);ce=a(ot,"LI",{});var _r=n(ce);ar=i(_r,"single sequence: "),Ae=a(_r,"CODE",{});var Yr=n(Ae);nr=i(Yr,"<s> X </s>"),Yr.forEach(r),_r.forEach(r),or=c(ot),pe=a(ot,"LI",{});var vr=n(pe);ir=i(vr,"pair of sequences: "),xe=a(vr,"CODE",{});var Zr=n(xe);lr=i(Zr,"<s> A </s> B </s>"),Zr.forEach(r),vr.forEach(r),ot.forEach(r),me.forEach(r),cr=c(f),H=a(f,"DIV",{class:!0});var he=n(H);v(Z.$$.fragment,he),pr=c(he),Fe=a(he,"P",{});var es=n(Fe);dr=i(es,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like BERT sequence pair mask has the following format:`),es.forEach(r),mr=c(he),v(ee.$$.fragment,he),he.forEach(r),hr=c(f),F=a(f,"DIV",{class:!0});var it=n(F);v(te.$$.fragment,it),fr=c(it),re=a(it,"P",{});var lt=n(re);ur=i(lt,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Ie=a(lt,"CODE",{});var ts=n(Ie);kr=i(ts,"prepare_for_model"),ts.forEach(r),gr=i(lt," method."),lt.forEach(r),it.forEach(r),f.forEach(r),this.h()},h(){p(E,"name","hf:doc:metadata"),p(E,"content",JSON.stringify(cs)),p(g,"id","herbert"),p(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(g,"href","#herbert"),p(k,"class","relative group"),p(j,"id","overview"),p(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(j,"href","#overview"),p(P,"class","relative group"),p(M,"href","https://www.aclweb.org/anthology/2020.acl-main.111.pdf"),p(M,"rel","nofollow"),p(C,"href","https://huggingface.co/rmroczkowski"),p(C,"rel","nofollow"),p(S,"href","https://github.com/allegro/HerBERT"),p(S,"rel","nofollow"),p(A,"id","transformers.HerbertTokenizer"),p(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(A,"href","#transformers.HerbertTokenizer"),p(q,"class","relative group"),p(ie,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer"),p(u,"class","docstring"),p(x,"id","transformers.HerbertTokenizerFast"),p(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(x,"href","#transformers.HerbertTokenizerFast"),p(B,"class","relative group"),p(le,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p($,"class","docstring"),p(H,"class","docstring"),p(F,"class","docstring"),p(h,"class","docstring")},m(t,d){e(document.head,E),m(t,se,d),m(t,k,d),e(k,g),e(g,fe),b(I,fe,null),e(k,dt),e(k,ue),e(ue,mt),m(t,Me,d),m(t,P,d),e(P,j),e(j,ke),b(D,ke,null),e(P,ht),e(P,ge),e(ge,ft),m(t,Ne,d),m(t,R,d),e(R,ut),e(R,M),e(M,kt),e(R,gt),m(t,Ce,d),m(t,ae,d),e(ae,_t),m(t,Se,d),m(t,ne,d),e(ne,_e),e(_e,vt),m(t,Ue,d),m(t,oe,d),e(oe,bt),m(t,Oe,d),b(N,t,d),m(t,Je,d),m(t,y,d),e(y,wt),e(y,C),e(C,Tt),e(y,zt),e(y,S),e(S,Et),e(y,yt),m(t,Ke,d),m(t,q,d),e(q,A),e(A,ve),b(U,ve,null),e(q,$t),e(q,be),e(be,Ht),m(t,Ve,d),m(t,u,d),b(O,u,null),e(u,Lt),e(u,we),e(we,Pt),e(u,qt),e(u,Te),e(Te,Bt),e(u,jt),e(u,J),e(J,ze),e(ze,Ee),e(Ee,Rt),e(J,At),e(J,ye),e(ye,$e),e($e,xt),e(u,Ft),e(u,K),e(K,It),e(K,ie),e(ie,Dt),e(K,Mt),m(t,Xe,d),m(t,B,d),e(B,x),e(x,He),b(V,He,null),e(B,Nt),e(B,Le),e(Le,Ct),m(t,We,d),m(t,h,d),b(X,h,null),e(h,St),e(h,W),e(W,Ut),e(W,Pe),e(Pe,Ot),e(W,Jt),e(h,Kt),e(h,qe),e(qe,Vt),e(h,Xt),e(h,Be),e(Be,je),e(je,Wt),e(h,Gt),e(h,G),e(G,Qt),e(G,le),e(le,Yt),e(G,Zt),e(h,er),e(h,$),b(Q,$,null),e($,tr),e($,Re),e(Re,rr),e($,sr),e($,Y),e(Y,ce),e(ce,ar),e(ce,Ae),e(Ae,nr),e(Y,or),e(Y,pe),e(pe,ir),e(pe,xe),e(xe,lr),e(h,cr),e(h,H),b(Z,H,null),e(H,pr),e(H,Fe),e(Fe,dr),e(H,mr),b(ee,H,null),e(h,hr),e(h,F),b(te,F,null),e(F,fr),e(F,re),e(re,ur),e(re,Ie),e(Ie,kr),e(re,gr),Ge=!0},p:is,i(t){Ge||(w(I.$$.fragment,t),w(D.$$.fragment,t),w(N.$$.fragment,t),w(U.$$.fragment,t),w(O.$$.fragment,t),w(V.$$.fragment,t),w(X.$$.fragment,t),w(Q.$$.fragment,t),w(Z.$$.fragment,t),w(ee.$$.fragment,t),w(te.$$.fragment,t),Ge=!0)},o(t){T(I.$$.fragment,t),T(D.$$.fragment,t),T(N.$$.fragment,t),T(U.$$.fragment,t),T(O.$$.fragment,t),T(V.$$.fragment,t),T(X.$$.fragment,t),T(Q.$$.fragment,t),T(Z.$$.fragment,t),T(ee.$$.fragment,t),T(te.$$.fragment,t),Ge=!1},d(t){r(E),t&&r(se),t&&r(k),z(I),t&&r(Me),t&&r(P),z(D),t&&r(Ne),t&&r(R),t&&r(Ce),t&&r(ae),t&&r(Se),t&&r(ne),t&&r(Ue),t&&r(oe),t&&r(Oe),z(N,t),t&&r(Je),t&&r(y),t&&r(Ke),t&&r(q),z(U),t&&r(Ve),t&&r(u),z(O),t&&r(Xe),t&&r(B),z(V),t&&r(We),t&&r(h),z(X),z(Q),z(Z),z(ee),z(te)}}}const cs={local:"herbert",sections:[{local:"overview",title:"Overview"},{local:"transformers.HerbertTokenizer",title:"HerbertTokenizer"},{local:"transformers.HerbertTokenizerFast",title:"HerbertTokenizerFast"}],title:"HerBERT"};function ps(pt,E,se){let{fw:k}=E;return pt.$$set=g=>{"fw"in g&&se(0,k=g.fw)},[k]}class ks extends ss{constructor(E){super();as(this,E,ps,ls,ns,{fw:0})}}export{ks as default,cs as metadata};
9,946
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/gpt.mdx-4854e0b3.js
import{S as Zf,i as em,s as tm,e as a,k as l,w as b,t as n,L as om,c as r,d as t,m as d,a as i,x as w,h as s,b as c,J as e,g as h,y as P,q as y,o as I,B as O}from"../../chunks/vendor-b1433968.js";import{T as st}from"../../chunks/Tip-c3840994.js";import{D as le}from"../../chunks/Docstring-ff504c58.js";import{C as it}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Xe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function nm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function sm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function am(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function rm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function im(N){let u,A,m,g,v,T,_,$,he,J,G,X,D,Y,ue,j,fe,de,S,q,Z,ee,M,z,oe,B,ce,ne,H,me,pe,E,ge,L,Q,ae,W,_e,te,C,re,U,Te;return{c(){u=a("p"),A=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),$=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),G=a("p"),X=n("This second option is useful when using "),D=a("code"),Y=n("tf.keras.Model.fit"),ue=n(` method which currently requires having all the tensors in the first argument of the model call function: `),j=a("code"),fe=n("model(inputs)"),de=n("."),S=l(),q=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ee=l(),M=a("ul"),z=a("li"),oe=n("a single Tensor with "),B=a("code"),ce=n("input_ids"),ne=n(" only and nothing else: "),H=a("code"),me=n("model(inputs_ids)"),pe=l(),E=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),Q=n("model([input_ids, attention_mask])"),ae=n(" or "),W=a("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),te=l(),C=a("li"),re=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),Te=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){u=r(p,"P",{});var k=i(u);A=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(p),g=r(p,"UL",{});var R=i(g);v=r(R,"LI",{});var Me=i(v);T=s(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),_=d(R),$=r(R,"LI",{});var Ee=i($);he=s(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),R.forEach(t),J=d(p),G=r(p,"P",{});var F=i(G);X=s(F,"This second option is useful when using "),D=r(F,"CODE",{});var se=i(D);Y=s(se,"tf.keras.Model.fit"),se.forEach(t),ue=s(F,` method which currently requires having all the tensors in the first argument of the model call function: `),j=r(F,"CODE",{});var ze=i(j);fe=s(ze,"model(inputs)"),ze.forEach(t),de=s(F,"."),F.forEach(t),S=d(p),q=r(p,"P",{});var ve=i(q);Z=s(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),ee=d(p),M=r(p,"UL",{});var x=i(M);z=r(x,"LI",{});var V=i(z);oe=s(V,"a single Tensor with "),B=r(V,"CODE",{});var ke=i(B);ce=s(ke,"input_ids"),ke.forEach(t),ne=s(V," only and nothing else: "),H=r(V,"CODE",{});var Fe=i(H);me=s(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),pe=d(x),E=r(x,"LI",{});var K=i(E);ge=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(K,"CODE",{});var Ae=i(L);Q=s(Ae,"model([input_ids, attention_mask])"),Ae.forEach(t),ae=s(K," or "),W=r(K,"CODE",{});var we=i(W);_e=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),K.forEach(t),te=d(x),C=r(x,"LI",{});var be=i(C);re=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var $e=i(U);Te=s($e,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),$e.forEach(t),be.forEach(t),x.forEach(t)},m(p,k){h(p,u,k),e(u,A),h(p,m,k),h(p,g,k),e(g,v),e(v,T),e(g,_),e(g,$),e($,he),h(p,J,k),h(p,G,k),e(G,X),e(G,D),e(D,Y),e(G,ue),e(G,j),e(j,fe),e(G,de),h(p,S,k),h(p,q,k),e(q,Z),h(p,ee,k),h(p,M,k),e(M,z),e(z,oe),e(z,B),e(B,ce),e(z,ne),e(z,H),e(H,me),e(M,pe),e(M,E),e(E,ge),e(E,L),e(L,Q),e(E,ae),e(E,W),e(W,_e),e(M,te),e(M,C),e(C,re),e(C,U),e(U,Te)},d(p){p&&t(u),p&&t(m),p&&t(g),p&&t(J),p&&t(G),p&&t(S),p&&t(q),p&&t(ee),p&&t(M)}}}function lm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function dm(N){let u,A,m,g,v,T,_,$,he,J,G,X,D,Y,ue,j,fe,de,S,q,Z,ee,M,z,oe,B,ce,ne,H,me,pe,E,ge,L,Q,ae,W,_e,te,C,re,U,Te;return{c(){u=a("p"),A=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),$=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),G=a("p"),X=n("This second option is useful when using "),D=a("code"),Y=n("tf.keras.Model.fit"),ue=n(` method which currently requires having all the tensors in the first argument of the model call function: `),j=a("code"),fe=n("model(inputs)"),de=n("."),S=l(),q=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ee=l(),M=a("ul"),z=a("li"),oe=n("a single Tensor with "),B=a("code"),ce=n("input_ids"),ne=n(" only and nothing else: "),H=a("code"),me=n("model(inputs_ids)"),pe=l(),E=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),Q=n("model([input_ids, attention_mask])"),ae=n(" or "),W=a("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),te=l(),C=a("li"),re=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),Te=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){u=r(p,"P",{});var k=i(u);A=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(p),g=r(p,"UL",{});var R=i(g);v=r(R,"LI",{});var Me=i(v);T=s(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),_=d(R),$=r(R,"LI",{});var Ee=i($);he=s(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),R.forEach(t),J=d(p),G=r(p,"P",{});var F=i(G);X=s(F,"This second option is useful when using "),D=r(F,"CODE",{});var se=i(D);Y=s(se,"tf.keras.Model.fit"),se.forEach(t),ue=s(F,` method which currently requires having all the tensors in the first argument of the model call function: `),j=r(F,"CODE",{});var ze=i(j);fe=s(ze,"model(inputs)"),ze.forEach(t),de=s(F,"."),F.forEach(t),S=d(p),q=r(p,"P",{});var ve=i(q);Z=s(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),ee=d(p),M=r(p,"UL",{});var x=i(M);z=r(x,"LI",{});var V=i(z);oe=s(V,"a single Tensor with "),B=r(V,"CODE",{});var ke=i(B);ce=s(ke,"input_ids"),ke.forEach(t),ne=s(V," only and nothing else: "),H=r(V,"CODE",{});var Fe=i(H);me=s(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),pe=d(x),E=r(x,"LI",{});var K=i(E);ge=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(K,"CODE",{});var Ae=i(L);Q=s(Ae,"model([input_ids, attention_mask])"),Ae.forEach(t),ae=s(K," or "),W=r(K,"CODE",{});var we=i(W);_e=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),K.forEach(t),te=d(x),C=r(x,"LI",{});var be=i(C);re=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var $e=i(U);Te=s($e,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),$e.forEach(t),be.forEach(t),x.forEach(t)},m(p,k){h(p,u,k),e(u,A),h(p,m,k),h(p,g,k),e(g,v),e(v,T),e(g,_),e(g,$),e($,he),h(p,J,k),h(p,G,k),e(G,X),e(G,D),e(D,Y),e(G,ue),e(G,j),e(j,fe),e(G,de),h(p,S,k),h(p,q,k),e(q,Z),h(p,ee,k),h(p,M,k),e(M,z),e(z,oe),e(z,B),e(B,ce),e(z,ne),e(z,H),e(H,me),e(M,pe),e(M,E),e(E,ge),e(E,L),e(L,Q),e(E,ae),e(E,W),e(W,_e),e(M,te),e(M,C),e(C,re),e(C,U),e(U,Te)},d(p){p&&t(u),p&&t(m),p&&t(g),p&&t(J),p&&t(G),p&&t(S),p&&t(q),p&&t(ee),p&&t(M)}}}function cm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function pm(N){let u,A,m,g,v,T,_,$,he,J,G,X,D,Y,ue,j,fe,de,S,q,Z,ee,M,z,oe,B,ce,ne,H,me,pe,E,ge,L,Q,ae,W,_e,te,C,re,U,Te;return{c(){u=a("p"),A=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),$=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),G=a("p"),X=n("This second option is useful when using "),D=a("code"),Y=n("tf.keras.Model.fit"),ue=n(` method which currently requires having all the tensors in the first argument of the model call function: `),j=a("code"),fe=n("model(inputs)"),de=n("."),S=l(),q=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ee=l(),M=a("ul"),z=a("li"),oe=n("a single Tensor with "),B=a("code"),ce=n("input_ids"),ne=n(" only and nothing else: "),H=a("code"),me=n("model(inputs_ids)"),pe=l(),E=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),Q=n("model([input_ids, attention_mask])"),ae=n(" or "),W=a("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),te=l(),C=a("li"),re=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),Te=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){u=r(p,"P",{});var k=i(u);A=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(p),g=r(p,"UL",{});var R=i(g);v=r(R,"LI",{});var Me=i(v);T=s(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),_=d(R),$=r(R,"LI",{});var Ee=i($);he=s(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),R.forEach(t),J=d(p),G=r(p,"P",{});var F=i(G);X=s(F,"This second option is useful when using "),D=r(F,"CODE",{});var se=i(D);Y=s(se,"tf.keras.Model.fit"),se.forEach(t),ue=s(F,` method which currently requires having all the tensors in the first argument of the model call function: `),j=r(F,"CODE",{});var ze=i(j);fe=s(ze,"model(inputs)"),ze.forEach(t),de=s(F,"."),F.forEach(t),S=d(p),q=r(p,"P",{});var ve=i(q);Z=s(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),ee=d(p),M=r(p,"UL",{});var x=i(M);z=r(x,"LI",{});var V=i(z);oe=s(V,"a single Tensor with "),B=r(V,"CODE",{});var ke=i(B);ce=s(ke,"input_ids"),ke.forEach(t),ne=s(V," only and nothing else: "),H=r(V,"CODE",{});var Fe=i(H);me=s(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),pe=d(x),E=r(x,"LI",{});var K=i(E);ge=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(K,"CODE",{});var Ae=i(L);Q=s(Ae,"model([input_ids, attention_mask])"),Ae.forEach(t),ae=s(K," or "),W=r(K,"CODE",{});var we=i(W);_e=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),K.forEach(t),te=d(x),C=r(x,"LI",{});var be=i(C);re=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var $e=i(U);Te=s($e,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),$e.forEach(t),be.forEach(t),x.forEach(t)},m(p,k){h(p,u,k),e(u,A),h(p,m,k),h(p,g,k),e(g,v),e(v,T),e(g,_),e(g,$),e($,he),h(p,J,k),h(p,G,k),e(G,X),e(G,D),e(D,Y),e(G,ue),e(G,j),e(j,fe),e(G,de),h(p,S,k),h(p,q,k),e(q,Z),h(p,ee,k),h(p,M,k),e(M,z),e(z,oe),e(z,B),e(B,ce),e(z,ne),e(z,H),e(H,me),e(M,pe),e(M,E),e(E,ge),e(E,L),e(L,Q),e(E,ae),e(E,W),e(W,_e),e(M,te),e(M,C),e(C,re),e(C,U),e(U,Te)},d(p){p&&t(u),p&&t(m),p&&t(g),p&&t(J),p&&t(G),p&&t(S),p&&t(q),p&&t(ee),p&&t(M)}}}function hm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function um(N){let u,A,m,g,v,T,_,$,he,J,G,X,D,Y,ue,j,fe,de,S,q,Z,ee,M,z,oe,B,ce,ne,H,me,pe,E,ge,L,Q,ae,W,_e,te,C,re,U,Te;return{c(){u=a("p"),A=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),$=a("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),J=l(),G=a("p"),X=n("This second option is useful when using "),D=a("code"),Y=n("tf.keras.Model.fit"),ue=n(` method which currently requires having all the tensors in the first argument of the model call function: `),j=a("code"),fe=n("model(inputs)"),de=n("."),S=l(),q=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ee=l(),M=a("ul"),z=a("li"),oe=n("a single Tensor with "),B=a("code"),ce=n("input_ids"),ne=n(" only and nothing else: "),H=a("code"),me=n("model(inputs_ids)"),pe=l(),E=a("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=a("code"),Q=n("model([input_ids, attention_mask])"),ae=n(" or "),W=a("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),te=l(),C=a("li"),re=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),Te=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){u=r(p,"P",{});var k=i(u);A=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(p),g=r(p,"UL",{});var R=i(g);v=r(R,"LI",{});var Me=i(v);T=s(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),_=d(R),$=r(R,"LI",{});var Ee=i($);he=s(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),R.forEach(t),J=d(p),G=r(p,"P",{});var F=i(G);X=s(F,"This second option is useful when using "),D=r(F,"CODE",{});var se=i(D);Y=s(se,"tf.keras.Model.fit"),se.forEach(t),ue=s(F,` method which currently requires having all the tensors in the first argument of the model call function: `),j=r(F,"CODE",{});var ze=i(j);fe=s(ze,"model(inputs)"),ze.forEach(t),de=s(F,"."),F.forEach(t),S=d(p),q=r(p,"P",{});var ve=i(q);Z=s(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),ee=d(p),M=r(p,"UL",{});var x=i(M);z=r(x,"LI",{});var V=i(z);oe=s(V,"a single Tensor with "),B=r(V,"CODE",{});var ke=i(B);ce=s(ke,"input_ids"),ke.forEach(t),ne=s(V," only and nothing else: "),H=r(V,"CODE",{});var Fe=i(H);me=s(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),pe=d(x),E=r(x,"LI",{});var K=i(E);ge=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),L=r(K,"CODE",{});var Ae=i(L);Q=s(Ae,"model([input_ids, attention_mask])"),Ae.forEach(t),ae=s(K," or "),W=r(K,"CODE",{});var we=i(W);_e=s(we,"model([input_ids, attention_mask, token_type_ids])"),we.forEach(t),K.forEach(t),te=d(x),C=r(x,"LI",{});var be=i(C);re=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(be,"CODE",{});var $e=i(U);Te=s($e,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),$e.forEach(t),be.forEach(t),x.forEach(t)},m(p,k){h(p,u,k),e(u,A),h(p,m,k),h(p,g,k),e(g,v),e(v,T),e(g,_),e(g,$),e($,he),h(p,J,k),h(p,G,k),e(G,X),e(G,D),e(D,Y),e(G,ue),e(G,j),e(j,fe),e(G,de),h(p,S,k),h(p,q,k),e(q,Z),h(p,ee,k),h(p,M,k),e(M,z),e(z,oe),e(z,B),e(B,ce),e(z,ne),e(z,H),e(H,me),e(M,pe),e(M,E),e(E,ge),e(E,L),e(L,Q),e(E,ae),e(E,W),e(W,_e),e(M,te),e(M,C),e(C,re),e(C,U),e(U,Te)},d(p){p&&t(u),p&&t(m),p&&t(g),p&&t(J),p&&t(G),p&&t(S),p&&t(q),p&&t(ee),p&&t(M)}}}function fm(N){let u,A,m,g,v;return{c(){u=a("p"),A=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){u=r(T,"P",{});var _=i(u);A=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var $=i(m);g=s($,"Module"),$.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){h(T,u,_),e(u,A),e(u,m),e(m,g),e(u,v)},d(T){T&&t(u)}}}function mm(N){let u,A,m,g,v,T,_,$,he,J,G,X,D,Y,ue,j,fe,de,S,q,Z,ee,M,z,oe,B,ce,ne,H,me,pe,E,ge,L,Q,ae,W,_e,te,C,re,U,Te,p,k,R,Me,Ee,F,se,ze,ve,x,V,ke,Fe,K,Ae,we,be,$e,Ze,ui,As,fi,mi,$s,gi,_i,Gs,Ti,vi,or,bo,nr,Le,ki,Ms,bi,wi,Es,Pi,yi,Un,Ii,Oi,zs,Ai,$i,sr,Pt,Rt,Fs,wo,Gi,qs,Mi,ar,qe,Po,Ei,at,zi,Rn,Fi,qi,Vn,Ci,xi,yo,Di,ji,Hi,yt,Si,Kn,Li,Ni,Jn,Bi,Wi,Ui,Cs,Ri,Vi,Io,rr,It,Vt,xs,Oo,Ki,Ds,Ji,ir,Ce,Ao,Xi,js,Qi,Yi,$o,Hs,Zi,el,rt,tl,Ss,ol,nl,Ls,sl,al,Ns,rl,il,ll,Go,dl,Xn,cl,pl,hl,Bs,lr,Ot,Kt,Ws,Mo,ul,Us,fl,dr,Qe,Eo,ml,zo,gl,Rs,_l,Tl,vl,Fo,Vs,kl,bl,Ks,wl,Pl,qo,yl,Qn,Il,Ol,cr,At,Jt,Js,Co,Al,Xs,$l,pr,$t,xo,Gl,Qs,Ml,hr,Gt,Do,El,Ys,zl,ur,Mt,Xt,Zs,jo,Fl,ea,ql,fr,xe,Ho,Cl,ta,xl,Dl,So,jl,Yn,Hl,Sl,Ll,Lo,Nl,No,Bl,Wl,Ul,Ne,Bo,Rl,Et,Vl,Zn,Kl,Jl,oa,Xl,Ql,Yl,Qt,Zl,na,ed,td,Wo,mr,zt,Yt,sa,Uo,od,aa,nd,gr,De,Ro,sd,ra,ad,rd,Vo,id,es,ld,dd,cd,Ko,pd,Jo,hd,ud,fd,Be,Xo,md,Ft,gd,ts,_d,Td,ia,vd,kd,bd,Zt,wd,la,Pd,yd,Qo,_r,qt,eo,da,Yo,Id,ca,Od,Tr,je,Zo,Ad,pa,$d,Gd,en,Md,os,Ed,zd,Fd,tn,qd,on,Cd,xd,Dd,We,nn,jd,Ct,Hd,ns,Sd,Ld,ha,Nd,Bd,Wd,to,Ud,ua,Rd,Vd,sn,vr,xt,oo,fa,an,Kd,ma,Jd,kr,He,rn,Xd,Se,Qd,ss,Yd,Zd,ga,ec,tc,_a,oc,nc,Ta,sc,ac,va,rc,ic,lc,ln,dc,as,cc,pc,hc,dn,uc,cn,fc,mc,gc,Pe,pn,_c,Dt,Tc,rs,vc,kc,ka,bc,wc,Pc,no,yc,ba,Ic,Oc,hn,Ac,wa,$c,Gc,un,br,jt,so,Pa,fn,Mc,ya,Ec,wr,ye,mn,zc,Ia,Fc,qc,gn,Cc,is,xc,Dc,jc,_n,Hc,Tn,Sc,Lc,Nc,ao,Bc,Ue,vn,Wc,Ht,Uc,ls,Rc,Vc,Oa,Kc,Jc,Xc,ro,Qc,Aa,Yc,Zc,kn,Pr,St,io,$a,bn,ep,Ga,tp,yr,Ie,wn,op,Ma,np,sp,Pn,ap,ds,rp,ip,lp,yn,dp,In,cp,pp,hp,lo,up,Re,On,fp,Lt,mp,cs,gp,_p,Ea,Tp,vp,kp,co,bp,za,wp,Pp,An,Ir,Nt,po,Fa,$n,yp,qa,Ip,Or,Oe,Gn,Op,Ca,Ap,$p,Mn,Gp,ps,Mp,Ep,zp,En,Fp,zn,qp,Cp,xp,ho,Dp,Ve,Fn,jp,Bt,Hp,hs,Sp,Lp,xa,Np,Bp,Wp,uo,Up,Da,Rp,Vp,qn,Ar,Wt,fo,ja,Cn,Kp,Ha,Jp,$r,ie,xn,Xp,Sa,Qp,Yp,us,fs,Zp,eh,th,Ye,oh,La,nh,sh,Na,ah,rh,Ba,ih,lh,Wa,dh,ch,ph,Dn,hh,ms,uh,fh,mh,jn,gh,Hn,_h,Th,vh,mo,kh,Ke,Sn,bh,Ut,wh,gs,Ph,yh,Ua,Ih,Oh,Ah,go,$h,Ra,Gh,Mh,Ln,Gr;return T=new Xe({}),Y=new Xe({}),bo=new it({props:{code:`pip install spacy ftfy==4.4.3 python -m spacy download en,`,highlighted:`pip install spacy ftfy==4.4.3 python -m spacy download en`}}),wo=new Xe({}),Po=new le({props:{name:"class transformers.OpenAIGPTConfig",anchor:"transformers.OpenAIGPTConfig",parameters:[{name:"vocab_size",val:" = 40478"},{name:"n_positions",val:" = 512"},{name:"n_embd",val:" = 768"},{name:"n_layer",val:" = 12"},{name:"n_head",val:" = 12"},{name:"afn",val:" = 'gelu'"},{name:"resid_pdrop",val:" = 0.1"},{name:"embd_pdrop",val:" = 0.1"},{name:"attn_pdrop",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-05"},{name:"initializer_range",val:" = 0.02"},{name:"predict_special_tokens",val:" = True"},{name:"summary_type",val:" = 'cls_index'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = None"},{name:"summary_proj_to_labels",val:" = True"},{name:"summary_first_dropout",val:" = 0.1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/configuration_openai.py#L27",parametersDescription:[{anchor:"transformers.OpenAIGPTConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 40478) &#x2014; Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTModel">OpenAIGPTModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTModel">TFOpenAIGPTModel</a>.`,name:"vocab_size"},{anchor:"transformers.OpenAIGPTConfig.n_positions",description:`<strong>n_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"n_positions"},{anchor:"transformers.OpenAIGPTConfig.n_embd",description:`<strong>n_embd</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the embeddings and hidden states.`,name:"n_embd"},{anchor:"transformers.OpenAIGPTConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.OpenAIGPTConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.OpenAIGPTConfig.afn",description:`<strong>afn</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"afn"},{anchor:"transformers.OpenAIGPTConfig.resid_pdrop",description:`<strong>resid_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"resid_pdrop"},{anchor:"transformers.OpenAIGPTConfig.embd_pdrop",description:`<strong>embd_pdrop</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the embeddings.`,name:"embd_pdrop"},{anchor:"transformers.OpenAIGPTConfig.attn_pdrop",description:`<strong>attn_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention.`,name:"attn_pdrop"},{anchor:"transformers.OpenAIGPTConfig.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon to use in the layer normalization layers`,name:"layer_norm_epsilon"},{anchor:"transformers.OpenAIGPTConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.OpenAIGPTConfig.predict_special_tokens",description:`<strong>predict_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not special tokens should be predicted when the model has a language modeling head.`,name:"predict_special_tokens"},{anchor:"transformers.OpenAIGPTConfig.summary_type",description:`<strong>summary_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;cls_index&quot;</code>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a>.</p> <p>Has to be one of the following options:</p> <ul> <li><code>&quot;last&quot;</code>: Take the last token hidden state (like XLNet).</li> <li><code>&quot;first&quot;</code>: Take the first token hidden state (like BERT).</li> <li><code>&quot;mean&quot;</code>: Take the mean of all tokens hidden states.</li> <li><code>&quot;cls_index&quot;</code>: Supply a Tensor of classification token position (like GPT/GPT-2).</li> <li><code>&quot;attn&quot;</code>: Not implemented now, use multi-head attention.</li> </ul>`,name:"summary_type"},{anchor:"transformers.OpenAIGPTConfig.summary_use_proj",description:`<strong>summary_use_proj</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a>.</p> <p>Whether or not to add a projection after the vector extraction.`,name:"summary_use_proj"},{anchor:"transformers.OpenAIGPTConfig.summary_activation",description:`<strong>summary_activation</strong> (<code>str</code>, <em>optional</em>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a>.</p> <p>Pass <code>&quot;tanh&quot;</code> for a tanh activation to the output, any other value will result in no activation.`,name:"summary_activation"},{anchor:"transformers.OpenAIGPTConfig.summary_proj_to_labels",description:`<strong>summary_proj_to_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a>.</p> <p>Whether the projection outputs should have <code>config.num_labels</code> or <code>config.hidden_size</code> classes.`,name:"summary_proj_to_labels"},{anchor:"transformers.OpenAIGPTConfig.summary_first_dropout",description:`<strong>summary_first_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel">OpenAIGPTDoubleHeadsModel</a>.</p> <p>The dropout ratio to be used after the projection and activation.`,name:"summary_first_dropout"},{anchor:"transformers.OpenAIGPTConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),Io=new it({props:{code:`from transformers import OpenAIGPTConfig, OpenAIGPTModel # Initializing a GPT configuration configuration = OpenAIGPTConfig() # Initializing a model from the configuration model = OpenAIGPTModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTConfig, OpenAIGPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a GPT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = OpenAIGPTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = OpenAIGPTModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Oo=new Xe({}),Ao=new le({props:{name:"class transformers.OpenAIGPTTokenizer",anchor:"transformers.OpenAIGPTTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"unk_token",val:" = '<unk>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/tokenization_openai.py#L73",parametersDescription:[{anchor:"transformers.OpenAIGPTTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.OpenAIGPTTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.OpenAIGPTTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"}]}}),Mo=new Xe({}),Eo=new le({props:{name:"class transformers.OpenAIGPTTokenizerFast",anchor:"transformers.OpenAIGPTTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"unk_token",val:" = '<unk>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/tokenization_openai_fast.py#L40",parametersDescription:[{anchor:"transformers.OpenAIGPTTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.OpenAIGPTTokenizerFast.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.OpenAIGPTTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"}]}}),Co=new Xe({}),xo=new le({props:{name:"class transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput",anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"mc_loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"mc_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L301",parametersDescription:[{anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput.mc_loss",description:`<strong>mc_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>mc_labels</code> is provided) &#x2014; Multiple choice classification loss.`,name:"mc_loss"},{anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput.mc_logits",description:`<strong>mc_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).`,name:"mc_logits"},{anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Do=new le({props:{name:"class transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput",anchor:"transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput",parameters:[{name:"logits",val:": Tensor = None"},{name:"mc_logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L391",parametersDescription:[{anchor:"transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput.mc_logits",description:`<strong>mc_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).`,name:"mc_logits"},{anchor:"transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),jo=new Xe({}),Ho=new le({props:{name:"class transformers.OpenAIGPTModel",anchor:"transformers.OpenAIGPTModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L404",parametersDescription:[{anchor:"transformers.OpenAIGPTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bo=new le({props:{name:"forward",anchor:"transformers.OpenAIGPTModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L430",parametersDescription:[{anchor:"transformers.OpenAIGPTModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OpenAIGPTModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.OpenAIGPTModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.OpenAIGPTModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.OpenAIGPTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OpenAIGPTModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OpenAIGPTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OpenAIGPTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OpenAIGPTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qt=new st({props:{$$slots:{default:[nm]},$$scope:{ctx:N}}}),Wo=new it({props:{code:`from transformers import OpenAIGPTTokenizer, OpenAIGPTModel import torch tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = OpenAIGPTModel.from_pretrained('openai-gpt') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, OpenAIGPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OpenAIGPTModel.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Uo=new Xe({}),Ro=new le({props:{name:"class transformers.OpenAIGPTLMHeadModel",anchor:"transformers.OpenAIGPTLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L535",parametersDescription:[{anchor:"transformers.OpenAIGPTLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xo=new le({props:{name:"forward",anchor:"transformers.OpenAIGPTLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L550",parametersDescription:[{anchor:"transformers.OpenAIGPTLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.OpenAIGPTLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Zt=new st({props:{$$slots:{default:[sm]},$$scope:{ctx:N}}}),Qo=new it({props:{code:`import torch from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, OpenAIGPTLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OpenAIGPTLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Yo=new Xe({}),Zo=new le({props:{name:"class transformers.OpenAIGPTDoubleHeadsModel",anchor:"transformers.OpenAIGPTDoubleHeadsModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L622",parametersDescription:[{anchor:"transformers.OpenAIGPTDoubleHeadsModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),nn=new le({props:{name:"forward",anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"mc_token_ids",val:" = None"},{name:"labels",val:" = None"},{name:"mc_labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L640",parametersDescription:[{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.mc_token_ids",description:`<strong>mc_token_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices)</code>, <em>optional</em>, default to index of the last token of the input) &#x2014; Index of the classification token in each input sequence. Selected in the range <code>[0, input_ids.size(-1) - 1]</code>.`,name:"mc_token_ids"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-1, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.OpenAIGPTDoubleHeadsModel.forward.mc_labels",description:`<strong>mc_labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <em>num_choices</em> is the size of the second dimension of the input tensors. (see <em>input_ids</em> above)`,name:"mc_labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput" >transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>mc_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>mc_labels</code> is provided) \u2014 Multiple choice classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>mc_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput" >transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),to=new st({props:{$$slots:{default:[am]},$$scope:{ctx:N}}}),sn=new it({props:{code:`from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel import torch tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!) model.resize_token_embeddings(len(tokenizer)) choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_logits = outputs.lm_logits mc_logits = outputs.mc_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OpenAIGPTDoubleHeadsModel.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.add_special_tokens({<span class="hljs-string">&#x27;cls_token&#x27;</span>: <span class="hljs-string">&#x27;[CLS]&#x27;</span>}) <span class="hljs-comment"># Add a [CLS] to the vocabulary (we should train it also!)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-meta">&gt;&gt;&gt; </span>choices = [<span class="hljs-string">&quot;Hello, my dog is cute [CLS]&quot;</span>, <span class="hljs-string">&quot;Hello, my cat is cute [CLS]&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([tokenizer.encode(s) <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> choices]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1, 2 choices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>mc_token_ids = torch.tensor([input_ids.size(-<span class="hljs-number">1</span>)-<span class="hljs-number">1</span>, input_ids.size(-<span class="hljs-number">1</span>)-<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, mc_token_ids=mc_token_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>lm_logits = outputs.lm_logits <span class="hljs-meta">&gt;&gt;&gt; </span>mc_logits = outputs.mc_logits`}}),an=new Xe({}),rn=new le({props:{name:"class transformers.OpenAIGPTForSequenceClassification",anchor:"transformers.OpenAIGPTForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L745",parametersDescription:[{anchor:"transformers.OpenAIGPTForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),pn=new le({props:{name:"forward",anchor:"transformers.OpenAIGPTForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_openai.py#L755",parametersDescription:[{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.OpenAIGPTForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),no=new st({props:{$$slots:{default:[rm]},$$scope:{ctx:N}}}),hn=new it({props:{code:`from transformers import OpenAIGPTTokenizer, OpenAIGPTForSequenceClassification import torch tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = OpenAIGPTForSequenceClassification.from_pretrained('openai-gpt') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, OpenAIGPTForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OpenAIGPTForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),un=new it({props:{code:`from transformers import OpenAIGPTTokenizer, OpenAIGPTForSequenceClassification import torch tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = OpenAIGPTForSequenceClassification.from_pretrained('openai-gpt', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, OpenAIGPTForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OpenAIGPTForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),fn=new Xe({}),mn=new le({props:{name:"class transformers.TFOpenAIGPTModel",anchor:"transformers.TFOpenAIGPTModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L515",parametersDescription:[{anchor:"transformers.TFOpenAIGPTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ao=new st({props:{$$slots:{default:[im]},$$scope:{ctx:N}}}),vn=new le({props:{name:"call",anchor:"transformers.TFOpenAIGPTModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L520",parametersDescription:[{anchor:"transformers.TFOpenAIGPTModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFOpenAIGPTModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFOpenAIGPTModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFOpenAIGPTModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFOpenAIGPTModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFOpenAIGPTModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFOpenAIGPTModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFOpenAIGPTModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFOpenAIGPTModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFOpenAIGPTModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ro=new st({props:{$$slots:{default:[lm]},$$scope:{ctx:N}}}),kn=new it({props:{code:`from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel import tensorflow as tf tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTModel.from_pretrained('openai-gpt') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, TFOpenAIGPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFOpenAIGPTModel.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),bn=new Xe({}),wn=new le({props:{name:"class transformers.TFOpenAIGPTLMHeadModel",anchor:"transformers.TFOpenAIGPTLMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L585",parametersDescription:[{anchor:"transformers.TFOpenAIGPTLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lo=new st({props:{$$slots:{default:[dm]},$$scope:{ctx:N}}}),On=new le({props:{name:"call",anchor:"transformers.TFOpenAIGPTLMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L596",parametersDescription:[{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFOpenAIGPTLMHeadModel.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),co=new st({props:{$$slots:{default:[cm]},$$scope:{ctx:N}}}),An=new it({props:{code:`from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel import tensorflow as tf tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFOpenAIGPTLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$n=new Xe({}),Gn=new le({props:{name:"class transformers.TFOpenAIGPTDoubleHeadsModel",anchor:"transformers.TFOpenAIGPTDoubleHeadsModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L688",parametersDescription:[{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ho=new st({props:{$$slots:{default:[pm]},$$scope:{ctx:N}}}),Fn=new le({props:{name:"call",anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"mc_token_ids",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L697",parametersDescription:[{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFOpenAIGPTDoubleHeadsModel.call.mc_token_ids",description:`<strong>mc_token_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, num_choices)</code>, <em>optional</em>, default to index of the last token of the input) &#x2014; Index of the classification token in each input sequence. Selected in the range <code>[0, input_ids.size(-1) - 1]</code>.`,name:"mc_token_ids"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput" >transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>mc_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput" >transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),uo=new st({props:{$$slots:{default:[hm]},$$scope:{ctx:N}}}),qn=new it({props:{code:`import tensorflow as tf from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') # Add a [CLS] to the vocabulary (we should train it also!) tokenizer.add_special_tokens({'cls_token': '[CLS]'}) model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] encoding = tokenizer(choices, return_tensors="tf") inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} inputs["mc_token_ids"]= tf.constant([inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1])[None, :] # Batch size 1 outputs = model(inputs) lm_prediction_scores, mc_prediction_scores = outputs[:2],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFOpenAIGPTDoubleHeadsModel.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Add a [CLS] to the vocabulary (we should train it also!)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.add_special_tokens({<span class="hljs-string">&#x27;cls_token&#x27;</span>: <span class="hljs-string">&#x27;[CLS]&#x27;</span>}) <span class="hljs-meta">&gt;&gt;&gt; </span>model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-comment"># Update the model embeddings with the new vocabulary size</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.cls_token_id, <span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-comment"># The newly token the last token of the vocabulary</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choices = [<span class="hljs-string">&quot;Hello, my dog is cute [CLS]&quot;</span>, <span class="hljs-string">&quot;Hello, my cat is cute [CLS]&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(choices, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;mc_token_ids&quot;</span>]= tf.constant([inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].shape[-<span class="hljs-number">1</span>] - <span class="hljs-number">1</span>, inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].shape[-<span class="hljs-number">1</span>] - <span class="hljs-number">1</span>])[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>lm_prediction_scores, mc_prediction_scores = outputs[:<span class="hljs-number">2</span>]`}}),Cn=new Xe({}),xn=new le({props:{name:"class transformers.TFOpenAIGPTForSequenceClassification",anchor:"transformers.TFOpenAIGPTForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L841",parametersDescription:[{anchor:"transformers.TFOpenAIGPTForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig">OpenAIGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mo=new st({props:{$$slots:{default:[um]},$$scope:{ctx:N}}}),Sn=new le({props:{name:"call",anchor:"transformers.TFOpenAIGPTForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/openai/modeling_tf_openai.py#L853",parametersDescription:[{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer">OpenAIGPTTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFOpenAIGPTForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTConfig" >OpenAIGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),go=new st({props:{$$slots:{default:[fm]},$$scope:{ctx:N}}}),Ln=new it({props:{code:`from transformers import OpenAIGPTTokenizer, TFOpenAIGPTForSequenceClassification import tensorflow as tf tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTForSequenceClassification.from_pretrained('openai-gpt') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OpenAIGPTTokenizer, TFOpenAIGPTForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = OpenAIGPTTokenizer.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFOpenAIGPTForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;openai-gpt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){u=a("meta"),A=l(),m=a("h1"),g=a("a"),v=a("span"),b(T.$$.fragment),_=l(),$=a("span"),he=n("OpenAI GPT"),J=l(),G=a("h2"),X=a("a"),D=a("span"),b(Y.$$.fragment),ue=l(),j=a("span"),fe=n("Overview"),de=l(),S=a("p"),q=n("OpenAI GPT model was proposed in "),Z=a("a"),ee=n("Improving Language Understanding by Generative Pre-Training"),M=n(` by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. It\u2019s a causal (unidirectional) transformer pre-trained using language modeling on a large corpus will long range dependencies, the Toronto Book Corpus.`),z=l(),oe=a("p"),B=n("The abstract from the paper is the following:"),ce=l(),ne=a("p"),H=a("em"),me=n(`Natural language understanding comprises a wide range of diverse tasks such as textual entailment, question answering, semantic similarity assessment, and document classification. Although large unlabeled text corpora are abundant, labeled data for learning these specific tasks is scarce, making it challenging for discriminatively trained models to perform adequately. We demonstrate that large gains on these tasks can be realized by generative pretraining of a language model on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each specific task. In contrast to previous approaches, we make use of task-aware input transformations during fine-tuning to achieve effective transfer while requiring minimal changes to the model architecture. We demonstrate the effectiveness of our approach on a wide range of benchmarks for natural language understanding. Our general task-agnostic model outperforms discriminatively trained models that use architectures specifically crafted for each task, significantly improving upon the state of the art in 9 out of the 12 tasks studied.`),pe=l(),E=a("p"),ge=n("Tips:"),L=l(),Q=a("ul"),ae=a("li"),W=n(`GPT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),_e=l(),te=a("li"),C=n(`GPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the `),re=a("em"),U=n("run_generation.py"),Te=n(" example script."),p=l(),k=a("p"),R=a("a"),Me=n("Write With Transformer"),Ee=n(` is a webapp created and hosted by Hugging Face showcasing the generative capabilities of several models. GPT is one of them.`),F=l(),se=a("p"),ze=n("This model was contributed by "),ve=a("a"),x=n("thomwolf"),V=n(". The original code can be found "),ke=a("a"),Fe=n("here"),K=n("."),Ae=l(),we=a("p"),be=n("Note:"),$e=l(),Ze=a("p"),ui=n("If you want to reproduce the original tokenization process of the "),As=a("em"),fi=n("OpenAI GPT"),mi=n(" paper, you will need to install "),$s=a("code"),gi=n("ftfy"),_i=n(` and `),Gs=a("code"),Ti=n("SpaCy"),vi=n(":"),or=l(),b(bo.$$.fragment),nr=l(),Le=a("p"),ki=n("If you don\u2019t install "),Ms=a("code"),bi=n("ftfy"),wi=n(" and "),Es=a("code"),Pi=n("SpaCy"),yi=n(", the "),Un=a("a"),Ii=n("OpenAIGPTTokenizer"),Oi=n(` will default to tokenize using BERT\u2019s `),zs=a("code"),Ai=n("BasicTokenizer"),$i=n(" followed by Byte-Pair Encoding (which should be fine for most usage, don\u2019t worry)."),sr=l(),Pt=a("h2"),Rt=a("a"),Fs=a("span"),b(wo.$$.fragment),Gi=l(),qs=a("span"),Mi=n("OpenAIGPTConfig"),ar=l(),qe=a("div"),b(Po.$$.fragment),Ei=l(),at=a("p"),zi=n("This is the configuration class to store the configuration of a "),Rn=a("a"),Fi=n("OpenAIGPTModel"),qi=n(` or a `),Vn=a("a"),Ci=n("TFOpenAIGPTModel"),xi=n(`. It is used to instantiate a GPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),yo=a("a"),Di=n("GPT"),ji=n(" architecture from OpenAI."),Hi=l(),yt=a("p"),Si=n("Configuration objects inherit from "),Kn=a("a"),Li=n("PretrainedConfig"),Ni=n(` and can be used to control the model outputs. Read the documentation from `),Jn=a("a"),Bi=n("PretrainedConfig"),Wi=n(" for more information."),Ui=l(),Cs=a("p"),Ri=n("Examples:"),Vi=l(),b(Io.$$.fragment),rr=l(),It=a("h2"),Vt=a("a"),xs=a("span"),b(Oo.$$.fragment),Ki=l(),Ds=a("span"),Ji=n("OpenAIGPTTokenizer"),ir=l(),Ce=a("div"),b(Ao.$$.fragment),Xi=l(),js=a("p"),Qi=n("Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities:"),Yi=l(),$o=a("ul"),Hs=a("li"),Zi=n("lowercases all inputs,"),el=l(),rt=a("li"),tl=n("uses "),Ss=a("code"),ol=n("SpaCy"),nl=n(" tokenizer and "),Ls=a("code"),sl=n("ftfy"),al=n(` for pre-BPE tokenization if they are installed, fallback to BERT\u2019s `),Ns=a("code"),rl=n("BasicTokenizer"),il=n(" if not."),ll=l(),Go=a("p"),dl=n("This tokenizer inherits from "),Xn=a("a"),cl=n("PreTrainedTokenizer"),pl=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),hl=l(),Bs=a("div"),lr=l(),Ot=a("h2"),Kt=a("a"),Ws=a("span"),b(Mo.$$.fragment),ul=l(),Us=a("span"),fl=n("OpenAIGPTTokenizerFast"),dr=l(),Qe=a("div"),b(Eo.$$.fragment),ml=l(),zo=a("p"),gl=n("Construct a \u201Cfast\u201D GPT Tokenizer (backed by HuggingFace\u2019s "),Rs=a("em"),_l=n("tokenizers"),Tl=n(` library). Based on Byte-Pair-Encoding with the following peculiarities:`),vl=l(),Fo=a("ul"),Vs=a("li"),kl=n("lower case all inputs"),bl=l(),Ks=a("li"),wl=n("uses BERT\u2019s BasicTokenizer for pre-BPE tokenization"),Pl=l(),qo=a("p"),yl=n("This tokenizer inherits from "),Qn=a("a"),Il=n("PreTrainedTokenizerFast"),Ol=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),cr=l(),At=a("h2"),Jt=a("a"),Js=a("span"),b(Co.$$.fragment),Al=l(),Xs=a("span"),$l=n("OpenAI specific outputs"),pr=l(),$t=a("div"),b(xo.$$.fragment),Gl=l(),Qs=a("p"),Ml=n("Base class for outputs of models predicting if two sentences are consecutive or not."),hr=l(),Gt=a("div"),b(Do.$$.fragment),El=l(),Ys=a("p"),zl=n("Base class for outputs of models predicting if two sentences are consecutive or not."),ur=l(),Mt=a("h2"),Xt=a("a"),Zs=a("span"),b(jo.$$.fragment),Fl=l(),ea=a("span"),ql=n("OpenAIGPTModel"),fr=l(),xe=a("div"),b(Ho.$$.fragment),Cl=l(),ta=a("p"),xl=n("The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top."),Dl=l(),So=a("p"),jl=n("This model inherits from "),Yn=a("a"),Hl=n("PreTrainedModel"),Sl=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ll=l(),Lo=a("p"),Nl=n("This model is also a PyTorch "),No=a("a"),Bl=n("torch.nn.Module"),Wl=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ul=l(),Ne=a("div"),b(Bo.$$.fragment),Rl=l(),Et=a("p"),Vl=n("The "),Zn=a("a"),Kl=n("OpenAIGPTModel"),Jl=n(" forward method, overrides the "),oa=a("code"),Xl=n("__call__"),Ql=n(" special method."),Yl=l(),b(Qt.$$.fragment),Zl=l(),na=a("p"),ed=n("Example:"),td=l(),b(Wo.$$.fragment),mr=l(),zt=a("h2"),Yt=a("a"),sa=a("span"),b(Uo.$$.fragment),od=l(),aa=a("span"),nd=n("OpenAIGPTLMHeadModel"),gr=l(),De=a("div"),b(Ro.$$.fragment),sd=l(),ra=a("p"),ad=n(`OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),rd=l(),Vo=a("p"),id=n("This model inherits from "),es=a("a"),ld=n("PreTrainedModel"),dd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cd=l(),Ko=a("p"),pd=n("This model is also a PyTorch "),Jo=a("a"),hd=n("torch.nn.Module"),ud=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fd=l(),Be=a("div"),b(Xo.$$.fragment),md=l(),Ft=a("p"),gd=n("The "),ts=a("a"),_d=n("OpenAIGPTLMHeadModel"),Td=n(" forward method, overrides the "),ia=a("code"),vd=n("__call__"),kd=n(" special method."),bd=l(),b(Zt.$$.fragment),wd=l(),la=a("p"),Pd=n("Example:"),yd=l(),b(Qo.$$.fragment),_r=l(),qt=a("h2"),eo=a("a"),da=a("span"),b(Yo.$$.fragment),Id=l(),ca=a("span"),Od=n("OpenAIGPTDoubleHeadsModel"),Tr=l(),je=a("div"),b(Zo.$$.fragment),Ad=l(),pa=a("p"),$d=n(`OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),Gd=l(),en=a("p"),Md=n("This model inherits from "),os=a("a"),Ed=n("PreTrainedModel"),zd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fd=l(),tn=a("p"),qd=n("This model is also a PyTorch "),on=a("a"),Cd=n("torch.nn.Module"),xd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dd=l(),We=a("div"),b(nn.$$.fragment),jd=l(),Ct=a("p"),Hd=n("The "),ns=a("a"),Sd=n("OpenAIGPTDoubleHeadsModel"),Ld=n(" forward method, overrides the "),ha=a("code"),Nd=n("__call__"),Bd=n(" special method."),Wd=l(),b(to.$$.fragment),Ud=l(),ua=a("p"),Rd=n("Examples:"),Vd=l(),b(sn.$$.fragment),vr=l(),xt=a("h2"),oo=a("a"),fa=a("span"),b(an.$$.fragment),Kd=l(),ma=a("span"),Jd=n("OpenAIGPTForSequenceClassification"),kr=l(),He=a("div"),b(rn.$$.fragment),Xd=l(),Se=a("p"),Qd=n(`The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer). `),ss=a("a"),Yd=n("OpenAIGPTForSequenceClassification"),Zd=n(` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `),ga=a("code"),ec=n("pad_token_id"),tc=n(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),_a=a("code"),oc=n("pad_token_id"),nc=n(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Ta=a("code"),sc=n("inputs_embeds"),ac=n(` are passed instead of `),va=a("code"),rc=n("input_ids"),ic=n(", it does the same (take the last value in each row of the batch)."),lc=l(),ln=a("p"),dc=n("This model inherits from "),as=a("a"),cc=n("PreTrainedModel"),pc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hc=l(),dn=a("p"),uc=n("This model is also a PyTorch "),cn=a("a"),fc=n("torch.nn.Module"),mc=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gc=l(),Pe=a("div"),b(pn.$$.fragment),_c=l(),Dt=a("p"),Tc=n("The "),rs=a("a"),vc=n("OpenAIGPTForSequenceClassification"),kc=n(" forward method, overrides the "),ka=a("code"),bc=n("__call__"),wc=n(" special method."),Pc=l(),b(no.$$.fragment),yc=l(),ba=a("p"),Ic=n("Example of single-label classification:"),Oc=l(),b(hn.$$.fragment),Ac=l(),wa=a("p"),$c=n("Example of multi-label classification:"),Gc=l(),b(un.$$.fragment),br=l(),jt=a("h2"),so=a("a"),Pa=a("span"),b(fn.$$.fragment),Mc=l(),ya=a("span"),Ec=n("TFOpenAIGPTModel"),wr=l(),ye=a("div"),b(mn.$$.fragment),zc=l(),Ia=a("p"),Fc=n("The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top."),qc=l(),gn=a("p"),Cc=n("This model inherits from "),is=a("a"),xc=n("TFPreTrainedModel"),Dc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jc=l(),_n=a("p"),Hc=n("This model is also a "),Tn=a("a"),Sc=n("tf.keras.Model"),Lc=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Nc=l(),b(ao.$$.fragment),Bc=l(),Ue=a("div"),b(vn.$$.fragment),Wc=l(),Ht=a("p"),Uc=n("The "),ls=a("a"),Rc=n("TFOpenAIGPTModel"),Vc=n(" forward method, overrides the "),Oa=a("code"),Kc=n("__call__"),Jc=n(" special method."),Xc=l(),b(ro.$$.fragment),Qc=l(),Aa=a("p"),Yc=n("Example:"),Zc=l(),b(kn.$$.fragment),Pr=l(),St=a("h2"),io=a("a"),$a=a("span"),b(bn.$$.fragment),ep=l(),Ga=a("span"),tp=n("TFOpenAIGPTLMHeadModel"),yr=l(),Ie=a("div"),b(wn.$$.fragment),op=l(),Ma=a("p"),np=n(`OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),sp=l(),Pn=a("p"),ap=n("This model inherits from "),ds=a("a"),rp=n("TFPreTrainedModel"),ip=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lp=l(),yn=a("p"),dp=n("This model is also a "),In=a("a"),cp=n("tf.keras.Model"),pp=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hp=l(),b(lo.$$.fragment),up=l(),Re=a("div"),b(On.$$.fragment),fp=l(),Lt=a("p"),mp=n("The "),cs=a("a"),gp=n("TFOpenAIGPTLMHeadModel"),_p=n(" forward method, overrides the "),Ea=a("code"),Tp=n("__call__"),vp=n(" special method."),kp=l(),b(co.$$.fragment),bp=l(),za=a("p"),wp=n("Example:"),Pp=l(),b(An.$$.fragment),Ir=l(),Nt=a("h2"),po=a("a"),Fa=a("span"),b($n.$$.fragment),yp=l(),qa=a("span"),Ip=n("TFOpenAIGPTDoubleHeadsModel"),Or=l(),Oe=a("div"),b(Gn.$$.fragment),Op=l(),Ca=a("p"),Ap=n(`OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),$p=l(),Mn=a("p"),Gp=n("This model inherits from "),ps=a("a"),Mp=n("TFPreTrainedModel"),Ep=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zp=l(),En=a("p"),Fp=n("This model is also a "),zn=a("a"),qp=n("tf.keras.Model"),Cp=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),xp=l(),b(ho.$$.fragment),Dp=l(),Ve=a("div"),b(Fn.$$.fragment),jp=l(),Bt=a("p"),Hp=n("The "),hs=a("a"),Sp=n("TFOpenAIGPTDoubleHeadsModel"),Lp=n(" forward method, overrides the "),xa=a("code"),Np=n("__call__"),Bp=n(" special method."),Wp=l(),b(uo.$$.fragment),Up=l(),Da=a("p"),Rp=n("Examples:"),Vp=l(),b(qn.$$.fragment),Ar=l(),Wt=a("h2"),fo=a("a"),ja=a("span"),b(Cn.$$.fragment),Kp=l(),Ha=a("span"),Jp=n("TFOpenAIGPTForSequenceClassification"),$r=l(),ie=a("div"),b(xn.$$.fragment),Xp=l(),Sa=a("p"),Qp=n("The OpenAI GPT Model transformer with a sequence classification head on top (linear layer)."),Yp=l(),us=a("p"),fs=a("a"),Zp=n("TFOpenAIGPTForSequenceClassification"),eh=n(` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.`),th=l(),Ye=a("p"),oh=n(`Since it does classification on the last token, it requires to know the position of the last token. If a `),La=a("code"),nh=n("pad_token_id"),sh=n(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Na=a("code"),ah=n("pad_token_id"),rh=n(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Ba=a("code"),ih=n("inputs_embeds"),lh=n(" are passed instead of "),Wa=a("code"),dh=n("input_ids"),ch=n(`, it does the same (take the last value in each row of the batch).`),ph=l(),Dn=a("p"),hh=n("This model inherits from "),ms=a("a"),uh=n("TFPreTrainedModel"),fh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mh=l(),jn=a("p"),gh=n("This model is also a "),Hn=a("a"),_h=n("tf.keras.Model"),Th=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vh=l(),b(mo.$$.fragment),kh=l(),Ke=a("div"),b(Sn.$$.fragment),bh=l(),Ut=a("p"),wh=n("The "),gs=a("a"),Ph=n("TFOpenAIGPTForSequenceClassification"),yh=n(" forward method, overrides the "),Ua=a("code"),Ih=n("__call__"),Oh=n(" special method."),Ah=l(),b(go.$$.fragment),$h=l(),Ra=a("p"),Gh=n("Example:"),Mh=l(),b(Ln.$$.fragment),this.h()},l(o){const f=om('[data-svelte="svelte-1phssyn"]',document.head);u=r(f,"META",{name:!0,content:!0}),f.forEach(t),A=d(o),m=r(o,"H1",{class:!0});var Nn=i(m);g=r(Nn,"A",{id:!0,class:!0,href:!0});var Va=i(g);v=r(Va,"SPAN",{});var Ka=i(v);w(T.$$.fragment,Ka),Ka.forEach(t),Va.forEach(t),_=d(Nn),$=r(Nn,"SPAN",{});var Ja=i($);he=s(Ja,"OpenAI GPT"),Ja.forEach(t),Nn.forEach(t),J=d(o),G=r(o,"H2",{class:!0});var Bn=i(G);X=r(Bn,"A",{id:!0,class:!0,href:!0});var Xa=i(X);D=r(Xa,"SPAN",{});var Qa=i(D);w(Y.$$.fragment,Qa),Qa.forEach(t),Xa.forEach(t),ue=d(Bn),j=r(Bn,"SPAN",{});var Ya=i(j);fe=s(Ya,"Overview"),Ya.forEach(t),Bn.forEach(t),de=d(o),S=r(o,"P",{});var Wn=i(S);q=s(Wn,"OpenAI GPT model was proposed in "),Z=r(Wn,"A",{href:!0,rel:!0});var Za=i(Z);ee=s(Za,"Improving Language Understanding by Generative Pre-Training"),Za.forEach(t),M=s(Wn,` by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. It\u2019s a causal (unidirectional) transformer pre-trained using language modeling on a large corpus will long range dependencies, the Toronto Book Corpus.`),Wn.forEach(t),z=d(o),oe=r(o,"P",{});var er=i(oe);B=s(er,"The abstract from the paper is the following:"),er.forEach(t),ce=d(o),ne=r(o,"P",{});var tr=i(ne);H=r(tr,"EM",{});var Fh=i(H);me=s(Fh,`Natural language understanding comprises a wide range of diverse tasks such as textual entailment, question answering, semantic similarity assessment, and document classification. Although large unlabeled text corpora are abundant, labeled data for learning these specific tasks is scarce, making it challenging for discriminatively trained models to perform adequately. We demonstrate that large gains on these tasks can be realized by generative pretraining of a language model on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each specific task. In contrast to previous approaches, we make use of task-aware input transformations during fine-tuning to achieve effective transfer while requiring minimal changes to the model architecture. We demonstrate the effectiveness of our approach on a wide range of benchmarks for natural language understanding. Our general task-agnostic model outperforms discriminatively trained models that use architectures specifically crafted for each task, significantly improving upon the state of the art in 9 out of the 12 tasks studied.`),Fh.forEach(t),tr.forEach(t),pe=d(o),E=r(o,"P",{});var qh=i(E);ge=s(qh,"Tips:"),qh.forEach(t),L=d(o),Q=r(o,"UL",{});var Mr=i(Q);ae=r(Mr,"LI",{});var Ch=i(ae);W=s(Ch,`GPT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),Ch.forEach(t),_e=d(Mr),te=r(Mr,"LI",{});var Er=i(te);C=s(Er,`GPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the `),re=r(Er,"EM",{});var xh=i(re);U=s(xh,"run_generation.py"),xh.forEach(t),Te=s(Er," example script."),Er.forEach(t),Mr.forEach(t),p=d(o),k=r(o,"P",{});var Eh=i(k);R=r(Eh,"A",{href:!0,rel:!0});var Dh=i(R);Me=s(Dh,"Write With Transformer"),Dh.forEach(t),Ee=s(Eh,` is a webapp created and hosted by Hugging Face showcasing the generative capabilities of several models. GPT is one of them.`),Eh.forEach(t),F=d(o),se=r(o,"P",{});var _s=i(se);ze=s(_s,"This model was contributed by "),ve=r(_s,"A",{href:!0,rel:!0});var jh=i(ve);x=s(jh,"thomwolf"),jh.forEach(t),V=s(_s,". The original code can be found "),ke=r(_s,"A",{href:!0,rel:!0});var Hh=i(ke);Fe=s(Hh,"here"),Hh.forEach(t),K=s(_s,"."),_s.forEach(t),Ae=d(o),we=r(o,"P",{});var Sh=i(we);be=s(Sh,"Note:"),Sh.forEach(t),$e=d(o),Ze=r(o,"P",{});var _o=i(Ze);ui=s(_o,"If you want to reproduce the original tokenization process of the "),As=r(_o,"EM",{});var Lh=i(As);fi=s(Lh,"OpenAI GPT"),Lh.forEach(t),mi=s(_o," paper, you will need to install "),$s=r(_o,"CODE",{});var Nh=i($s);gi=s(Nh,"ftfy"),Nh.forEach(t),_i=s(_o,` and `),Gs=r(_o,"CODE",{});var Bh=i(Gs);Ti=s(Bh,"SpaCy"),Bh.forEach(t),vi=s(_o,":"),_o.forEach(t),or=d(o),w(bo.$$.fragment,o),nr=d(o),Le=r(o,"P",{});var lt=i(Le);ki=s(lt,"If you don\u2019t install "),Ms=r(lt,"CODE",{});var Wh=i(Ms);bi=s(Wh,"ftfy"),Wh.forEach(t),wi=s(lt," and "),Es=r(lt,"CODE",{});var Uh=i(Es);Pi=s(Uh,"SpaCy"),Uh.forEach(t),yi=s(lt,", the "),Un=r(lt,"A",{href:!0});var Rh=i(Un);Ii=s(Rh,"OpenAIGPTTokenizer"),Rh.forEach(t),Oi=s(lt,` will default to tokenize using BERT\u2019s `),zs=r(lt,"CODE",{});var Vh=i(zs);Ai=s(Vh,"BasicTokenizer"),Vh.forEach(t),$i=s(lt," followed by Byte-Pair Encoding (which should be fine for most usage, don\u2019t worry)."),lt.forEach(t),sr=d(o),Pt=r(o,"H2",{class:!0});var zr=i(Pt);Rt=r(zr,"A",{id:!0,class:!0,href:!0});var Kh=i(Rt);Fs=r(Kh,"SPAN",{});var Jh=i(Fs);w(wo.$$.fragment,Jh),Jh.forEach(t),Kh.forEach(t),Gi=d(zr),qs=r(zr,"SPAN",{});var Xh=i(qs);Mi=s(Xh,"OpenAIGPTConfig"),Xh.forEach(t),zr.forEach(t),ar=d(o),qe=r(o,"DIV",{class:!0});var dt=i(qe);w(Po.$$.fragment,dt),Ei=d(dt),at=r(dt,"P",{});var To=i(at);zi=s(To,"This is the configuration class to store the configuration of a "),Rn=r(To,"A",{href:!0});var Qh=i(Rn);Fi=s(Qh,"OpenAIGPTModel"),Qh.forEach(t),qi=s(To,` or a `),Vn=r(To,"A",{href:!0});var Yh=i(Vn);Ci=s(Yh,"TFOpenAIGPTModel"),Yh.forEach(t),xi=s(To,`. It is used to instantiate a GPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),yo=r(To,"A",{href:!0,rel:!0});var Zh=i(yo);Di=s(Zh,"GPT"),Zh.forEach(t),ji=s(To," architecture from OpenAI."),To.forEach(t),Hi=d(dt),yt=r(dt,"P",{});var Ts=i(yt);Si=s(Ts,"Configuration objects inherit from "),Kn=r(Ts,"A",{href:!0});var eu=i(Kn);Li=s(eu,"PretrainedConfig"),eu.forEach(t),Ni=s(Ts,` and can be used to control the model outputs. Read the documentation from `),Jn=r(Ts,"A",{href:!0});var tu=i(Jn);Bi=s(tu,"PretrainedConfig"),tu.forEach(t),Wi=s(Ts," for more information."),Ts.forEach(t),Ui=d(dt),Cs=r(dt,"P",{});var ou=i(Cs);Ri=s(ou,"Examples:"),ou.forEach(t),Vi=d(dt),w(Io.$$.fragment,dt),dt.forEach(t),rr=d(o),It=r(o,"H2",{class:!0});var Fr=i(It);Vt=r(Fr,"A",{id:!0,class:!0,href:!0});var nu=i(Vt);xs=r(nu,"SPAN",{});var su=i(xs);w(Oo.$$.fragment,su),su.forEach(t),nu.forEach(t),Ki=d(Fr),Ds=r(Fr,"SPAN",{});var au=i(Ds);Ji=s(au,"OpenAIGPTTokenizer"),au.forEach(t),Fr.forEach(t),ir=d(o),Ce=r(o,"DIV",{class:!0});var ct=i(Ce);w(Ao.$$.fragment,ct),Xi=d(ct),js=r(ct,"P",{});var ru=i(js);Qi=s(ru,"Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities:"),ru.forEach(t),Yi=d(ct),$o=r(ct,"UL",{});var qr=i($o);Hs=r(qr,"LI",{});var iu=i(Hs);Zi=s(iu,"lowercases all inputs,"),iu.forEach(t),el=d(qr),rt=r(qr,"LI",{});var vo=i(rt);tl=s(vo,"uses "),Ss=r(vo,"CODE",{});var lu=i(Ss);ol=s(lu,"SpaCy"),lu.forEach(t),nl=s(vo," tokenizer and "),Ls=r(vo,"CODE",{});var du=i(Ls);sl=s(du,"ftfy"),du.forEach(t),al=s(vo,` for pre-BPE tokenization if they are installed, fallback to BERT\u2019s `),Ns=r(vo,"CODE",{});var cu=i(Ns);rl=s(cu,"BasicTokenizer"),cu.forEach(t),il=s(vo," if not."),vo.forEach(t),qr.forEach(t),ll=d(ct),Go=r(ct,"P",{});var Cr=i(Go);dl=s(Cr,"This tokenizer inherits from "),Xn=r(Cr,"A",{href:!0});var pu=i(Xn);cl=s(pu,"PreTrainedTokenizer"),pu.forEach(t),pl=s(Cr,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Cr.forEach(t),hl=d(ct),Bs=r(ct,"DIV",{class:!0}),i(Bs).forEach(t),ct.forEach(t),lr=d(o),Ot=r(o,"H2",{class:!0});var xr=i(Ot);Kt=r(xr,"A",{id:!0,class:!0,href:!0});var hu=i(Kt);Ws=r(hu,"SPAN",{});var uu=i(Ws);w(Mo.$$.fragment,uu),uu.forEach(t),hu.forEach(t),ul=d(xr),Us=r(xr,"SPAN",{});var fu=i(Us);fl=s(fu,"OpenAIGPTTokenizerFast"),fu.forEach(t),xr.forEach(t),dr=d(o),Qe=r(o,"DIV",{class:!0});var ko=i(Qe);w(Eo.$$.fragment,ko),ml=d(ko),zo=r(ko,"P",{});var Dr=i(zo);gl=s(Dr,"Construct a \u201Cfast\u201D GPT Tokenizer (backed by HuggingFace\u2019s "),Rs=r(Dr,"EM",{});var mu=i(Rs);_l=s(mu,"tokenizers"),mu.forEach(t),Tl=s(Dr,` library). Based on Byte-Pair-Encoding with the following peculiarities:`),Dr.forEach(t),vl=d(ko),Fo=r(ko,"UL",{});var jr=i(Fo);Vs=r(jr,"LI",{});var gu=i(Vs);kl=s(gu,"lower case all inputs"),gu.forEach(t),bl=d(jr),Ks=r(jr,"LI",{});var _u=i(Ks);wl=s(_u,"uses BERT\u2019s BasicTokenizer for pre-BPE tokenization"),_u.forEach(t),jr.forEach(t),Pl=d(ko),qo=r(ko,"P",{});var Hr=i(qo);yl=s(Hr,"This tokenizer inherits from "),Qn=r(Hr,"A",{href:!0});var Tu=i(Qn);Il=s(Tu,"PreTrainedTokenizerFast"),Tu.forEach(t),Ol=s(Hr,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Hr.forEach(t),ko.forEach(t),cr=d(o),At=r(o,"H2",{class:!0});var Sr=i(At);Jt=r(Sr,"A",{id:!0,class:!0,href:!0});var vu=i(Jt);Js=r(vu,"SPAN",{});var ku=i(Js);w(Co.$$.fragment,ku),ku.forEach(t),vu.forEach(t),Al=d(Sr),Xs=r(Sr,"SPAN",{});var bu=i(Xs);$l=s(bu,"OpenAI specific outputs"),bu.forEach(t),Sr.forEach(t),pr=d(o),$t=r(o,"DIV",{class:!0});var Lr=i($t);w(xo.$$.fragment,Lr),Gl=d(Lr),Qs=r(Lr,"P",{});var wu=i(Qs);Ml=s(wu,"Base class for outputs of models predicting if two sentences are consecutive or not."),wu.forEach(t),Lr.forEach(t),hr=d(o),Gt=r(o,"DIV",{class:!0});var Nr=i(Gt);w(Do.$$.fragment,Nr),El=d(Nr),Ys=r(Nr,"P",{});var Pu=i(Ys);zl=s(Pu,"Base class for outputs of models predicting if two sentences are consecutive or not."),Pu.forEach(t),Nr.forEach(t),ur=d(o),Mt=r(o,"H2",{class:!0});var Br=i(Mt);Xt=r(Br,"A",{id:!0,class:!0,href:!0});var yu=i(Xt);Zs=r(yu,"SPAN",{});var Iu=i(Zs);w(jo.$$.fragment,Iu),Iu.forEach(t),yu.forEach(t),Fl=d(Br),ea=r(Br,"SPAN",{});var Ou=i(ea);ql=s(Ou,"OpenAIGPTModel"),Ou.forEach(t),Br.forEach(t),fr=d(o),xe=r(o,"DIV",{class:!0});var pt=i(xe);w(Ho.$$.fragment,pt),Cl=d(pt),ta=r(pt,"P",{});var Au=i(ta);xl=s(Au,"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top."),Au.forEach(t),Dl=d(pt),So=r(pt,"P",{});var Wr=i(So);jl=s(Wr,"This model inherits from "),Yn=r(Wr,"A",{href:!0});var $u=i(Yn);Hl=s($u,"PreTrainedModel"),$u.forEach(t),Sl=s(Wr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wr.forEach(t),Ll=d(pt),Lo=r(pt,"P",{});var Ur=i(Lo);Nl=s(Ur,"This model is also a PyTorch "),No=r(Ur,"A",{href:!0,rel:!0});var Gu=i(No);Bl=s(Gu,"torch.nn.Module"),Gu.forEach(t),Wl=s(Ur,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ur.forEach(t),Ul=d(pt),Ne=r(pt,"DIV",{class:!0});var ht=i(Ne);w(Bo.$$.fragment,ht),Rl=d(ht),Et=r(ht,"P",{});var vs=i(Et);Vl=s(vs,"The "),Zn=r(vs,"A",{href:!0});var Mu=i(Zn);Kl=s(Mu,"OpenAIGPTModel"),Mu.forEach(t),Jl=s(vs," forward method, overrides the "),oa=r(vs,"CODE",{});var Eu=i(oa);Xl=s(Eu,"__call__"),Eu.forEach(t),Ql=s(vs," special method."),vs.forEach(t),Yl=d(ht),w(Qt.$$.fragment,ht),Zl=d(ht),na=r(ht,"P",{});var zu=i(na);ed=s(zu,"Example:"),zu.forEach(t),td=d(ht),w(Wo.$$.fragment,ht),ht.forEach(t),pt.forEach(t),mr=d(o),zt=r(o,"H2",{class:!0});var Rr=i(zt);Yt=r(Rr,"A",{id:!0,class:!0,href:!0});var Fu=i(Yt);sa=r(Fu,"SPAN",{});var qu=i(sa);w(Uo.$$.fragment,qu),qu.forEach(t),Fu.forEach(t),od=d(Rr),aa=r(Rr,"SPAN",{});var Cu=i(aa);nd=s(Cu,"OpenAIGPTLMHeadModel"),Cu.forEach(t),Rr.forEach(t),gr=d(o),De=r(o,"DIV",{class:!0});var ut=i(De);w(Ro.$$.fragment,ut),sd=d(ut),ra=r(ut,"P",{});var xu=i(ra);ad=s(xu,`OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),xu.forEach(t),rd=d(ut),Vo=r(ut,"P",{});var Vr=i(Vo);id=s(Vr,"This model inherits from "),es=r(Vr,"A",{href:!0});var Du=i(es);ld=s(Du,"PreTrainedModel"),Du.forEach(t),dd=s(Vr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Vr.forEach(t),cd=d(ut),Ko=r(ut,"P",{});var Kr=i(Ko);pd=s(Kr,"This model is also a PyTorch "),Jo=r(Kr,"A",{href:!0,rel:!0});var ju=i(Jo);hd=s(ju,"torch.nn.Module"),ju.forEach(t),ud=s(Kr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kr.forEach(t),fd=d(ut),Be=r(ut,"DIV",{class:!0});var ft=i(Be);w(Xo.$$.fragment,ft),md=d(ft),Ft=r(ft,"P",{});var ks=i(Ft);gd=s(ks,"The "),ts=r(ks,"A",{href:!0});var Hu=i(ts);_d=s(Hu,"OpenAIGPTLMHeadModel"),Hu.forEach(t),Td=s(ks," forward method, overrides the "),ia=r(ks,"CODE",{});var Su=i(ia);vd=s(Su,"__call__"),Su.forEach(t),kd=s(ks," special method."),ks.forEach(t),bd=d(ft),w(Zt.$$.fragment,ft),wd=d(ft),la=r(ft,"P",{});var Lu=i(la);Pd=s(Lu,"Example:"),Lu.forEach(t),yd=d(ft),w(Qo.$$.fragment,ft),ft.forEach(t),ut.forEach(t),_r=d(o),qt=r(o,"H2",{class:!0});var Jr=i(qt);eo=r(Jr,"A",{id:!0,class:!0,href:!0});var Nu=i(eo);da=r(Nu,"SPAN",{});var Bu=i(da);w(Yo.$$.fragment,Bu),Bu.forEach(t),Nu.forEach(t),Id=d(Jr),ca=r(Jr,"SPAN",{});var Wu=i(ca);Od=s(Wu,"OpenAIGPTDoubleHeadsModel"),Wu.forEach(t),Jr.forEach(t),Tr=d(o),je=r(o,"DIV",{class:!0});var mt=i(je);w(Zo.$$.fragment,mt),Ad=d(mt),pa=r(mt,"P",{});var Uu=i(pa);$d=s(Uu,`OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),Uu.forEach(t),Gd=d(mt),en=r(mt,"P",{});var Xr=i(en);Md=s(Xr,"This model inherits from "),os=r(Xr,"A",{href:!0});var Ru=i(os);Ed=s(Ru,"PreTrainedModel"),Ru.forEach(t),zd=s(Xr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xr.forEach(t),Fd=d(mt),tn=r(mt,"P",{});var Qr=i(tn);qd=s(Qr,"This model is also a PyTorch "),on=r(Qr,"A",{href:!0,rel:!0});var Vu=i(on);Cd=s(Vu,"torch.nn.Module"),Vu.forEach(t),xd=s(Qr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qr.forEach(t),Dd=d(mt),We=r(mt,"DIV",{class:!0});var gt=i(We);w(nn.$$.fragment,gt),jd=d(gt),Ct=r(gt,"P",{});var bs=i(Ct);Hd=s(bs,"The "),ns=r(bs,"A",{href:!0});var Ku=i(ns);Sd=s(Ku,"OpenAIGPTDoubleHeadsModel"),Ku.forEach(t),Ld=s(bs," forward method, overrides the "),ha=r(bs,"CODE",{});var Ju=i(ha);Nd=s(Ju,"__call__"),Ju.forEach(t),Bd=s(bs," special method."),bs.forEach(t),Wd=d(gt),w(to.$$.fragment,gt),Ud=d(gt),ua=r(gt,"P",{});var Xu=i(ua);Rd=s(Xu,"Examples:"),Xu.forEach(t),Vd=d(gt),w(sn.$$.fragment,gt),gt.forEach(t),mt.forEach(t),vr=d(o),xt=r(o,"H2",{class:!0});var Yr=i(xt);oo=r(Yr,"A",{id:!0,class:!0,href:!0});var Qu=i(oo);fa=r(Qu,"SPAN",{});var Yu=i(fa);w(an.$$.fragment,Yu),Yu.forEach(t),Qu.forEach(t),Kd=d(Yr),ma=r(Yr,"SPAN",{});var Zu=i(ma);Jd=s(Zu,"OpenAIGPTForSequenceClassification"),Zu.forEach(t),Yr.forEach(t),kr=d(o),He=r(o,"DIV",{class:!0});var _t=i(He);w(rn.$$.fragment,_t),Xd=d(_t),Se=r(_t,"P",{});var et=i(Se);Qd=s(et,`The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer). `),ss=r(et,"A",{href:!0});var ef=i(ss);Yd=s(ef,"OpenAIGPTForSequenceClassification"),ef.forEach(t),Zd=s(et,` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `),ga=r(et,"CODE",{});var tf=i(ga);ec=s(tf,"pad_token_id"),tf.forEach(t),tc=s(et,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),_a=r(et,"CODE",{});var of=i(_a);oc=s(of,"pad_token_id"),of.forEach(t),nc=s(et,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Ta=r(et,"CODE",{});var nf=i(Ta);sc=s(nf,"inputs_embeds"),nf.forEach(t),ac=s(et,` are passed instead of `),va=r(et,"CODE",{});var sf=i(va);rc=s(sf,"input_ids"),sf.forEach(t),ic=s(et,", it does the same (take the last value in each row of the batch)."),et.forEach(t),lc=d(_t),ln=r(_t,"P",{});var Zr=i(ln);dc=s(Zr,"This model inherits from "),as=r(Zr,"A",{href:!0});var af=i(as);cc=s(af,"PreTrainedModel"),af.forEach(t),pc=s(Zr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zr.forEach(t),hc=d(_t),dn=r(_t,"P",{});var ei=i(dn);uc=s(ei,"This model is also a PyTorch "),cn=r(ei,"A",{href:!0,rel:!0});var rf=i(cn);fc=s(rf,"torch.nn.Module"),rf.forEach(t),mc=s(ei,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ei.forEach(t),gc=d(_t),Pe=r(_t,"DIV",{class:!0});var Je=i(Pe);w(pn.$$.fragment,Je),_c=d(Je),Dt=r(Je,"P",{});var ws=i(Dt);Tc=s(ws,"The "),rs=r(ws,"A",{href:!0});var lf=i(rs);vc=s(lf,"OpenAIGPTForSequenceClassification"),lf.forEach(t),kc=s(ws," forward method, overrides the "),ka=r(ws,"CODE",{});var df=i(ka);bc=s(df,"__call__"),df.forEach(t),wc=s(ws," special method."),ws.forEach(t),Pc=d(Je),w(no.$$.fragment,Je),yc=d(Je),ba=r(Je,"P",{});var cf=i(ba);Ic=s(cf,"Example of single-label classification:"),cf.forEach(t),Oc=d(Je),w(hn.$$.fragment,Je),Ac=d(Je),wa=r(Je,"P",{});var pf=i(wa);$c=s(pf,"Example of multi-label classification:"),pf.forEach(t),Gc=d(Je),w(un.$$.fragment,Je),Je.forEach(t),_t.forEach(t),br=d(o),jt=r(o,"H2",{class:!0});var ti=i(jt);so=r(ti,"A",{id:!0,class:!0,href:!0});var hf=i(so);Pa=r(hf,"SPAN",{});var uf=i(Pa);w(fn.$$.fragment,uf),uf.forEach(t),hf.forEach(t),Mc=d(ti),ya=r(ti,"SPAN",{});var ff=i(ya);Ec=s(ff,"TFOpenAIGPTModel"),ff.forEach(t),ti.forEach(t),wr=d(o),ye=r(o,"DIV",{class:!0});var tt=i(ye);w(mn.$$.fragment,tt),zc=d(tt),Ia=r(tt,"P",{});var mf=i(Ia);Fc=s(mf,"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top."),mf.forEach(t),qc=d(tt),gn=r(tt,"P",{});var oi=i(gn);Cc=s(oi,"This model inherits from "),is=r(oi,"A",{href:!0});var gf=i(is);xc=s(gf,"TFPreTrainedModel"),gf.forEach(t),Dc=s(oi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oi.forEach(t),jc=d(tt),_n=r(tt,"P",{});var ni=i(_n);Hc=s(ni,"This model is also a "),Tn=r(ni,"A",{href:!0,rel:!0});var _f=i(Tn);Sc=s(_f,"tf.keras.Model"),_f.forEach(t),Lc=s(ni,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ni.forEach(t),Nc=d(tt),w(ao.$$.fragment,tt),Bc=d(tt),Ue=r(tt,"DIV",{class:!0});var Tt=i(Ue);w(vn.$$.fragment,Tt),Wc=d(Tt),Ht=r(Tt,"P",{});var Ps=i(Ht);Uc=s(Ps,"The "),ls=r(Ps,"A",{href:!0});var Tf=i(ls);Rc=s(Tf,"TFOpenAIGPTModel"),Tf.forEach(t),Vc=s(Ps," forward method, overrides the "),Oa=r(Ps,"CODE",{});var vf=i(Oa);Kc=s(vf,"__call__"),vf.forEach(t),Jc=s(Ps," special method."),Ps.forEach(t),Xc=d(Tt),w(ro.$$.fragment,Tt),Qc=d(Tt),Aa=r(Tt,"P",{});var kf=i(Aa);Yc=s(kf,"Example:"),kf.forEach(t),Zc=d(Tt),w(kn.$$.fragment,Tt),Tt.forEach(t),tt.forEach(t),Pr=d(o),St=r(o,"H2",{class:!0});var si=i(St);io=r(si,"A",{id:!0,class:!0,href:!0});var bf=i(io);$a=r(bf,"SPAN",{});var wf=i($a);w(bn.$$.fragment,wf),wf.forEach(t),bf.forEach(t),ep=d(si),Ga=r(si,"SPAN",{});var Pf=i(Ga);tp=s(Pf,"TFOpenAIGPTLMHeadModel"),Pf.forEach(t),si.forEach(t),yr=d(o),Ie=r(o,"DIV",{class:!0});var ot=i(Ie);w(wn.$$.fragment,ot),op=d(ot),Ma=r(ot,"P",{});var yf=i(Ma);np=s(yf,`OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),yf.forEach(t),sp=d(ot),Pn=r(ot,"P",{});var ai=i(Pn);ap=s(ai,"This model inherits from "),ds=r(ai,"A",{href:!0});var If=i(ds);rp=s(If,"TFPreTrainedModel"),If.forEach(t),ip=s(ai,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ai.forEach(t),lp=d(ot),yn=r(ot,"P",{});var ri=i(yn);dp=s(ri,"This model is also a "),In=r(ri,"A",{href:!0,rel:!0});var Of=i(In);cp=s(Of,"tf.keras.Model"),Of.forEach(t),pp=s(ri,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ri.forEach(t),hp=d(ot),w(lo.$$.fragment,ot),up=d(ot),Re=r(ot,"DIV",{class:!0});var vt=i(Re);w(On.$$.fragment,vt),fp=d(vt),Lt=r(vt,"P",{});var ys=i(Lt);mp=s(ys,"The "),cs=r(ys,"A",{href:!0});var Af=i(cs);gp=s(Af,"TFOpenAIGPTLMHeadModel"),Af.forEach(t),_p=s(ys," forward method, overrides the "),Ea=r(ys,"CODE",{});var $f=i(Ea);Tp=s($f,"__call__"),$f.forEach(t),vp=s(ys," special method."),ys.forEach(t),kp=d(vt),w(co.$$.fragment,vt),bp=d(vt),za=r(vt,"P",{});var Gf=i(za);wp=s(Gf,"Example:"),Gf.forEach(t),Pp=d(vt),w(An.$$.fragment,vt),vt.forEach(t),ot.forEach(t),Ir=d(o),Nt=r(o,"H2",{class:!0});var ii=i(Nt);po=r(ii,"A",{id:!0,class:!0,href:!0});var Mf=i(po);Fa=r(Mf,"SPAN",{});var Ef=i(Fa);w($n.$$.fragment,Ef),Ef.forEach(t),Mf.forEach(t),yp=d(ii),qa=r(ii,"SPAN",{});var zf=i(qa);Ip=s(zf,"TFOpenAIGPTDoubleHeadsModel"),zf.forEach(t),ii.forEach(t),Or=d(o),Oe=r(o,"DIV",{class:!0});var nt=i(Oe);w(Gn.$$.fragment,nt),Op=d(nt),Ca=r(nt,"P",{});var Ff=i(Ca);Ap=s(Ff,`OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),Ff.forEach(t),$p=d(nt),Mn=r(nt,"P",{});var li=i(Mn);Gp=s(li,"This model inherits from "),ps=r(li,"A",{href:!0});var qf=i(ps);Mp=s(qf,"TFPreTrainedModel"),qf.forEach(t),Ep=s(li,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),li.forEach(t),zp=d(nt),En=r(nt,"P",{});var di=i(En);Fp=s(di,"This model is also a "),zn=r(di,"A",{href:!0,rel:!0});var Cf=i(zn);qp=s(Cf,"tf.keras.Model"),Cf.forEach(t),Cp=s(di,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),di.forEach(t),xp=d(nt),w(ho.$$.fragment,nt),Dp=d(nt),Ve=r(nt,"DIV",{class:!0});var kt=i(Ve);w(Fn.$$.fragment,kt),jp=d(kt),Bt=r(kt,"P",{});var Is=i(Bt);Hp=s(Is,"The "),hs=r(Is,"A",{href:!0});var xf=i(hs);Sp=s(xf,"TFOpenAIGPTDoubleHeadsModel"),xf.forEach(t),Lp=s(Is," forward method, overrides the "),xa=r(Is,"CODE",{});var Df=i(xa);Np=s(Df,"__call__"),Df.forEach(t),Bp=s(Is," special method."),Is.forEach(t),Wp=d(kt),w(uo.$$.fragment,kt),Up=d(kt),Da=r(kt,"P",{});var jf=i(Da);Rp=s(jf,"Examples:"),jf.forEach(t),Vp=d(kt),w(qn.$$.fragment,kt),kt.forEach(t),nt.forEach(t),Ar=d(o),Wt=r(o,"H2",{class:!0});var ci=i(Wt);fo=r(ci,"A",{id:!0,class:!0,href:!0});var Hf=i(fo);ja=r(Hf,"SPAN",{});var Sf=i(ja);w(Cn.$$.fragment,Sf),Sf.forEach(t),Hf.forEach(t),Kp=d(ci),Ha=r(ci,"SPAN",{});var Lf=i(Ha);Jp=s(Lf,"TFOpenAIGPTForSequenceClassification"),Lf.forEach(t),ci.forEach(t),$r=d(o),ie=r(o,"DIV",{class:!0});var Ge=i(ie);w(xn.$$.fragment,Ge),Xp=d(Ge),Sa=r(Ge,"P",{});var Nf=i(Sa);Qp=s(Nf,"The OpenAI GPT Model transformer with a sequence classification head on top (linear layer)."),Nf.forEach(t),Yp=d(Ge),us=r(Ge,"P",{});var zh=i(us);fs=r(zh,"A",{href:!0});var Bf=i(fs);Zp=s(Bf,"TFOpenAIGPTForSequenceClassification"),Bf.forEach(t),eh=s(zh,` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.`),zh.forEach(t),th=d(Ge),Ye=r(Ge,"P",{});var bt=i(Ye);oh=s(bt,`Since it does classification on the last token, it requires to know the position of the last token. If a `),La=r(bt,"CODE",{});var Wf=i(La);nh=s(Wf,"pad_token_id"),Wf.forEach(t),sh=s(bt,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Na=r(bt,"CODE",{});var Uf=i(Na);ah=s(Uf,"pad_token_id"),Uf.forEach(t),rh=s(bt,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Ba=r(bt,"CODE",{});var Rf=i(Ba);ih=s(Rf,"inputs_embeds"),Rf.forEach(t),lh=s(bt," are passed instead of "),Wa=r(bt,"CODE",{});var Vf=i(Wa);dh=s(Vf,"input_ids"),Vf.forEach(t),ch=s(bt,`, it does the same (take the last value in each row of the batch).`),bt.forEach(t),ph=d(Ge),Dn=r(Ge,"P",{});var pi=i(Dn);hh=s(pi,"This model inherits from "),ms=r(pi,"A",{href:!0});var Kf=i(ms);uh=s(Kf,"TFPreTrainedModel"),Kf.forEach(t),fh=s(pi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pi.forEach(t),mh=d(Ge),jn=r(Ge,"P",{});var hi=i(jn);gh=s(hi,"This model is also a "),Hn=r(hi,"A",{href:!0,rel:!0});var Jf=i(Hn);_h=s(Jf,"tf.keras.Model"),Jf.forEach(t),Th=s(hi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hi.forEach(t),vh=d(Ge),w(mo.$$.fragment,Ge),kh=d(Ge),Ke=r(Ge,"DIV",{class:!0});var wt=i(Ke);w(Sn.$$.fragment,wt),bh=d(wt),Ut=r(wt,"P",{});var Os=i(Ut);wh=s(Os,"The "),gs=r(Os,"A",{href:!0});var Xf=i(gs);Ph=s(Xf,"TFOpenAIGPTForSequenceClassification"),Xf.forEach(t),yh=s(Os," forward method, overrides the "),Ua=r(Os,"CODE",{});var Qf=i(Ua);Ih=s(Qf,"__call__"),Qf.forEach(t),Oh=s(Os," special method."),Os.forEach(t),Ah=d(wt),w(go.$$.fragment,wt),$h=d(wt),Ra=r(wt,"P",{});var Yf=i(Ra);Gh=s(Yf,"Example:"),Yf.forEach(t),Mh=d(wt),w(Ln.$$.fragment,wt),wt.forEach(t),Ge.forEach(t),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(gm)),c(g,"id","openai-gpt"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#openai-gpt"),c(m,"class","relative group"),c(X,"id","overview"),c(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(X,"href","#overview"),c(G,"class","relative group"),c(Z,"href","https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf"),c(Z,"rel","nofollow"),c(R,"href","https://transformer.huggingface.co/doc/gpt"),c(R,"rel","nofollow"),c(ve,"href","https://huggingface.co/thomwolf"),c(ve,"rel","nofollow"),c(ke,"href","https://github.com/openai/finetune-transformer-lm"),c(ke,"rel","nofollow"),c(Un,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTTokenizer"),c(Rt,"id","transformers.OpenAIGPTConfig"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.OpenAIGPTConfig"),c(Pt,"class","relative group"),c(Rn,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTModel"),c(Vn,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTModel"),c(yo,"href","https://huggingface.co/openai-gpt"),c(yo,"rel","nofollow"),c(Kn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Jn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(qe,"class","docstring"),c(Vt,"id","transformers.OpenAIGPTTokenizer"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.OpenAIGPTTokenizer"),c(It,"class","relative group"),c(Xn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Bs,"class","docstring"),c(Ce,"class","docstring"),c(Kt,"id","transformers.OpenAIGPTTokenizerFast"),c(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kt,"href","#transformers.OpenAIGPTTokenizerFast"),c(Ot,"class","relative group"),c(Qn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Qe,"class","docstring"),c(Jt,"id","transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput"),c(At,"class","relative group"),c($t,"class","docstring"),c(Gt,"class","docstring"),c(Xt,"id","transformers.OpenAIGPTModel"),c(Xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xt,"href","#transformers.OpenAIGPTModel"),c(Mt,"class","relative group"),c(Yn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(No,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(No,"rel","nofollow"),c(Zn,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTModel"),c(Ne,"class","docstring"),c(xe,"class","docstring"),c(Yt,"id","transformers.OpenAIGPTLMHeadModel"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#transformers.OpenAIGPTLMHeadModel"),c(zt,"class","relative group"),c(es,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Jo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Jo,"rel","nofollow"),c(ts,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTLMHeadModel"),c(Be,"class","docstring"),c(De,"class","docstring"),c(eo,"id","transformers.OpenAIGPTDoubleHeadsModel"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.OpenAIGPTDoubleHeadsModel"),c(qt,"class","relative group"),c(os,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(on,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(on,"rel","nofollow"),c(ns,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTDoubleHeadsModel"),c(We,"class","docstring"),c(je,"class","docstring"),c(oo,"id","transformers.OpenAIGPTForSequenceClassification"),c(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(oo,"href","#transformers.OpenAIGPTForSequenceClassification"),c(xt,"class","relative group"),c(ss,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTForSequenceClassification"),c(as,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(cn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(cn,"rel","nofollow"),c(rs,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.OpenAIGPTForSequenceClassification"),c(Pe,"class","docstring"),c(He,"class","docstring"),c(so,"id","transformers.TFOpenAIGPTModel"),c(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(so,"href","#transformers.TFOpenAIGPTModel"),c(jt,"class","relative group"),c(is,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Tn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Tn,"rel","nofollow"),c(ls,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTModel"),c(Ue,"class","docstring"),c(ye,"class","docstring"),c(io,"id","transformers.TFOpenAIGPTLMHeadModel"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.TFOpenAIGPTLMHeadModel"),c(St,"class","relative group"),c(ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(In,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(In,"rel","nofollow"),c(cs,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTLMHeadModel"),c(Re,"class","docstring"),c(Ie,"class","docstring"),c(po,"id","transformers.TFOpenAIGPTDoubleHeadsModel"),c(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(po,"href","#transformers.TFOpenAIGPTDoubleHeadsModel"),c(Nt,"class","relative group"),c(ps,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(zn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(zn,"rel","nofollow"),c(hs,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTDoubleHeadsModel"),c(Ve,"class","docstring"),c(Oe,"class","docstring"),c(fo,"id","transformers.TFOpenAIGPTForSequenceClassification"),c(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fo,"href","#transformers.TFOpenAIGPTForSequenceClassification"),c(Wt,"class","relative group"),c(fs,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTForSequenceClassification"),c(ms,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Hn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Hn,"rel","nofollow"),c(gs,"href","/docs/transformers/v4.15.0/en/model_doc/gpt#transformers.TFOpenAIGPTForSequenceClassification"),c(Ke,"class","docstring"),c(ie,"class","docstring")},m(o,f){e(document.head,u),h(o,A,f),h(o,m,f),e(m,g),e(g,v),P(T,v,null),e(m,_),e(m,$),e($,he),h(o,J,f),h(o,G,f),e(G,X),e(X,D),P(Y,D,null),e(G,ue),e(G,j),e(j,fe),h(o,de,f),h(o,S,f),e(S,q),e(S,Z),e(Z,ee),e(S,M),h(o,z,f),h(o,oe,f),e(oe,B),h(o,ce,f),h(o,ne,f),e(ne,H),e(H,me),h(o,pe,f),h(o,E,f),e(E,ge),h(o,L,f),h(o,Q,f),e(Q,ae),e(ae,W),e(Q,_e),e(Q,te),e(te,C),e(te,re),e(re,U),e(te,Te),h(o,p,f),h(o,k,f),e(k,R),e(R,Me),e(k,Ee),h(o,F,f),h(o,se,f),e(se,ze),e(se,ve),e(ve,x),e(se,V),e(se,ke),e(ke,Fe),e(se,K),h(o,Ae,f),h(o,we,f),e(we,be),h(o,$e,f),h(o,Ze,f),e(Ze,ui),e(Ze,As),e(As,fi),e(Ze,mi),e(Ze,$s),e($s,gi),e(Ze,_i),e(Ze,Gs),e(Gs,Ti),e(Ze,vi),h(o,or,f),P(bo,o,f),h(o,nr,f),h(o,Le,f),e(Le,ki),e(Le,Ms),e(Ms,bi),e(Le,wi),e(Le,Es),e(Es,Pi),e(Le,yi),e(Le,Un),e(Un,Ii),e(Le,Oi),e(Le,zs),e(zs,Ai),e(Le,$i),h(o,sr,f),h(o,Pt,f),e(Pt,Rt),e(Rt,Fs),P(wo,Fs,null),e(Pt,Gi),e(Pt,qs),e(qs,Mi),h(o,ar,f),h(o,qe,f),P(Po,qe,null),e(qe,Ei),e(qe,at),e(at,zi),e(at,Rn),e(Rn,Fi),e(at,qi),e(at,Vn),e(Vn,Ci),e(at,xi),e(at,yo),e(yo,Di),e(at,ji),e(qe,Hi),e(qe,yt),e(yt,Si),e(yt,Kn),e(Kn,Li),e(yt,Ni),e(yt,Jn),e(Jn,Bi),e(yt,Wi),e(qe,Ui),e(qe,Cs),e(Cs,Ri),e(qe,Vi),P(Io,qe,null),h(o,rr,f),h(o,It,f),e(It,Vt),e(Vt,xs),P(Oo,xs,null),e(It,Ki),e(It,Ds),e(Ds,Ji),h(o,ir,f),h(o,Ce,f),P(Ao,Ce,null),e(Ce,Xi),e(Ce,js),e(js,Qi),e(Ce,Yi),e(Ce,$o),e($o,Hs),e(Hs,Zi),e($o,el),e($o,rt),e(rt,tl),e(rt,Ss),e(Ss,ol),e(rt,nl),e(rt,Ls),e(Ls,sl),e(rt,al),e(rt,Ns),e(Ns,rl),e(rt,il),e(Ce,ll),e(Ce,Go),e(Go,dl),e(Go,Xn),e(Xn,cl),e(Go,pl),e(Ce,hl),e(Ce,Bs),h(o,lr,f),h(o,Ot,f),e(Ot,Kt),e(Kt,Ws),P(Mo,Ws,null),e(Ot,ul),e(Ot,Us),e(Us,fl),h(o,dr,f),h(o,Qe,f),P(Eo,Qe,null),e(Qe,ml),e(Qe,zo),e(zo,gl),e(zo,Rs),e(Rs,_l),e(zo,Tl),e(Qe,vl),e(Qe,Fo),e(Fo,Vs),e(Vs,kl),e(Fo,bl),e(Fo,Ks),e(Ks,wl),e(Qe,Pl),e(Qe,qo),e(qo,yl),e(qo,Qn),e(Qn,Il),e(qo,Ol),h(o,cr,f),h(o,At,f),e(At,Jt),e(Jt,Js),P(Co,Js,null),e(At,Al),e(At,Xs),e(Xs,$l),h(o,pr,f),h(o,$t,f),P(xo,$t,null),e($t,Gl),e($t,Qs),e(Qs,Ml),h(o,hr,f),h(o,Gt,f),P(Do,Gt,null),e(Gt,El),e(Gt,Ys),e(Ys,zl),h(o,ur,f),h(o,Mt,f),e(Mt,Xt),e(Xt,Zs),P(jo,Zs,null),e(Mt,Fl),e(Mt,ea),e(ea,ql),h(o,fr,f),h(o,xe,f),P(Ho,xe,null),e(xe,Cl),e(xe,ta),e(ta,xl),e(xe,Dl),e(xe,So),e(So,jl),e(So,Yn),e(Yn,Hl),e(So,Sl),e(xe,Ll),e(xe,Lo),e(Lo,Nl),e(Lo,No),e(No,Bl),e(Lo,Wl),e(xe,Ul),e(xe,Ne),P(Bo,Ne,null),e(Ne,Rl),e(Ne,Et),e(Et,Vl),e(Et,Zn),e(Zn,Kl),e(Et,Jl),e(Et,oa),e(oa,Xl),e(Et,Ql),e(Ne,Yl),P(Qt,Ne,null),e(Ne,Zl),e(Ne,na),e(na,ed),e(Ne,td),P(Wo,Ne,null),h(o,mr,f),h(o,zt,f),e(zt,Yt),e(Yt,sa),P(Uo,sa,null),e(zt,od),e(zt,aa),e(aa,nd),h(o,gr,f),h(o,De,f),P(Ro,De,null),e(De,sd),e(De,ra),e(ra,ad),e(De,rd),e(De,Vo),e(Vo,id),e(Vo,es),e(es,ld),e(Vo,dd),e(De,cd),e(De,Ko),e(Ko,pd),e(Ko,Jo),e(Jo,hd),e(Ko,ud),e(De,fd),e(De,Be),P(Xo,Be,null),e(Be,md),e(Be,Ft),e(Ft,gd),e(Ft,ts),e(ts,_d),e(Ft,Td),e(Ft,ia),e(ia,vd),e(Ft,kd),e(Be,bd),P(Zt,Be,null),e(Be,wd),e(Be,la),e(la,Pd),e(Be,yd),P(Qo,Be,null),h(o,_r,f),h(o,qt,f),e(qt,eo),e(eo,da),P(Yo,da,null),e(qt,Id),e(qt,ca),e(ca,Od),h(o,Tr,f),h(o,je,f),P(Zo,je,null),e(je,Ad),e(je,pa),e(pa,$d),e(je,Gd),e(je,en),e(en,Md),e(en,os),e(os,Ed),e(en,zd),e(je,Fd),e(je,tn),e(tn,qd),e(tn,on),e(on,Cd),e(tn,xd),e(je,Dd),e(je,We),P(nn,We,null),e(We,jd),e(We,Ct),e(Ct,Hd),e(Ct,ns),e(ns,Sd),e(Ct,Ld),e(Ct,ha),e(ha,Nd),e(Ct,Bd),e(We,Wd),P(to,We,null),e(We,Ud),e(We,ua),e(ua,Rd),e(We,Vd),P(sn,We,null),h(o,vr,f),h(o,xt,f),e(xt,oo),e(oo,fa),P(an,fa,null),e(xt,Kd),e(xt,ma),e(ma,Jd),h(o,kr,f),h(o,He,f),P(rn,He,null),e(He,Xd),e(He,Se),e(Se,Qd),e(Se,ss),e(ss,Yd),e(Se,Zd),e(Se,ga),e(ga,ec),e(Se,tc),e(Se,_a),e(_a,oc),e(Se,nc),e(Se,Ta),e(Ta,sc),e(Se,ac),e(Se,va),e(va,rc),e(Se,ic),e(He,lc),e(He,ln),e(ln,dc),e(ln,as),e(as,cc),e(ln,pc),e(He,hc),e(He,dn),e(dn,uc),e(dn,cn),e(cn,fc),e(dn,mc),e(He,gc),e(He,Pe),P(pn,Pe,null),e(Pe,_c),e(Pe,Dt),e(Dt,Tc),e(Dt,rs),e(rs,vc),e(Dt,kc),e(Dt,ka),e(ka,bc),e(Dt,wc),e(Pe,Pc),P(no,Pe,null),e(Pe,yc),e(Pe,ba),e(ba,Ic),e(Pe,Oc),P(hn,Pe,null),e(Pe,Ac),e(Pe,wa),e(wa,$c),e(Pe,Gc),P(un,Pe,null),h(o,br,f),h(o,jt,f),e(jt,so),e(so,Pa),P(fn,Pa,null),e(jt,Mc),e(jt,ya),e(ya,Ec),h(o,wr,f),h(o,ye,f),P(mn,ye,null),e(ye,zc),e(ye,Ia),e(Ia,Fc),e(ye,qc),e(ye,gn),e(gn,Cc),e(gn,is),e(is,xc),e(gn,Dc),e(ye,jc),e(ye,_n),e(_n,Hc),e(_n,Tn),e(Tn,Sc),e(_n,Lc),e(ye,Nc),P(ao,ye,null),e(ye,Bc),e(ye,Ue),P(vn,Ue,null),e(Ue,Wc),e(Ue,Ht),e(Ht,Uc),e(Ht,ls),e(ls,Rc),e(Ht,Vc),e(Ht,Oa),e(Oa,Kc),e(Ht,Jc),e(Ue,Xc),P(ro,Ue,null),e(Ue,Qc),e(Ue,Aa),e(Aa,Yc),e(Ue,Zc),P(kn,Ue,null),h(o,Pr,f),h(o,St,f),e(St,io),e(io,$a),P(bn,$a,null),e(St,ep),e(St,Ga),e(Ga,tp),h(o,yr,f),h(o,Ie,f),P(wn,Ie,null),e(Ie,op),e(Ie,Ma),e(Ma,np),e(Ie,sp),e(Ie,Pn),e(Pn,ap),e(Pn,ds),e(ds,rp),e(Pn,ip),e(Ie,lp),e(Ie,yn),e(yn,dp),e(yn,In),e(In,cp),e(yn,pp),e(Ie,hp),P(lo,Ie,null),e(Ie,up),e(Ie,Re),P(On,Re,null),e(Re,fp),e(Re,Lt),e(Lt,mp),e(Lt,cs),e(cs,gp),e(Lt,_p),e(Lt,Ea),e(Ea,Tp),e(Lt,vp),e(Re,kp),P(co,Re,null),e(Re,bp),e(Re,za),e(za,wp),e(Re,Pp),P(An,Re,null),h(o,Ir,f),h(o,Nt,f),e(Nt,po),e(po,Fa),P($n,Fa,null),e(Nt,yp),e(Nt,qa),e(qa,Ip),h(o,Or,f),h(o,Oe,f),P(Gn,Oe,null),e(Oe,Op),e(Oe,Ca),e(Ca,Ap),e(Oe,$p),e(Oe,Mn),e(Mn,Gp),e(Mn,ps),e(ps,Mp),e(Mn,Ep),e(Oe,zp),e(Oe,En),e(En,Fp),e(En,zn),e(zn,qp),e(En,Cp),e(Oe,xp),P(ho,Oe,null),e(Oe,Dp),e(Oe,Ve),P(Fn,Ve,null),e(Ve,jp),e(Ve,Bt),e(Bt,Hp),e(Bt,hs),e(hs,Sp),e(Bt,Lp),e(Bt,xa),e(xa,Np),e(Bt,Bp),e(Ve,Wp),P(uo,Ve,null),e(Ve,Up),e(Ve,Da),e(Da,Rp),e(Ve,Vp),P(qn,Ve,null),h(o,Ar,f),h(o,Wt,f),e(Wt,fo),e(fo,ja),P(Cn,ja,null),e(Wt,Kp),e(Wt,Ha),e(Ha,Jp),h(o,$r,f),h(o,ie,f),P(xn,ie,null),e(ie,Xp),e(ie,Sa),e(Sa,Qp),e(ie,Yp),e(ie,us),e(us,fs),e(fs,Zp),e(us,eh),e(ie,th),e(ie,Ye),e(Ye,oh),e(Ye,La),e(La,nh),e(Ye,sh),e(Ye,Na),e(Na,ah),e(Ye,rh),e(Ye,Ba),e(Ba,ih),e(Ye,lh),e(Ye,Wa),e(Wa,dh),e(Ye,ch),e(ie,ph),e(ie,Dn),e(Dn,hh),e(Dn,ms),e(ms,uh),e(Dn,fh),e(ie,mh),e(ie,jn),e(jn,gh),e(jn,Hn),e(Hn,_h),e(jn,Th),e(ie,vh),P(mo,ie,null),e(ie,kh),e(ie,Ke),P(Sn,Ke,null),e(Ke,bh),e(Ke,Ut),e(Ut,wh),e(Ut,gs),e(gs,Ph),e(Ut,yh),e(Ut,Ua),e(Ua,Ih),e(Ut,Oh),e(Ke,Ah),P(go,Ke,null),e(Ke,$h),e(Ke,Ra),e(Ra,Gh),e(Ke,Mh),P(Ln,Ke,null),Gr=!0},p(o,[f]){const Nn={};f&2&&(Nn.$$scope={dirty:f,ctx:o}),Qt.$set(Nn);const Va={};f&2&&(Va.$$scope={dirty:f,ctx:o}),Zt.$set(Va);const Ka={};f&2&&(Ka.$$scope={dirty:f,ctx:o}),to.$set(Ka);const Ja={};f&2&&(Ja.$$scope={dirty:f,ctx:o}),no.$set(Ja);const Bn={};f&2&&(Bn.$$scope={dirty:f,ctx:o}),ao.$set(Bn);const Xa={};f&2&&(Xa.$$scope={dirty:f,ctx:o}),ro.$set(Xa);const Qa={};f&2&&(Qa.$$scope={dirty:f,ctx:o}),lo.$set(Qa);const Ya={};f&2&&(Ya.$$scope={dirty:f,ctx:o}),co.$set(Ya);const Wn={};f&2&&(Wn.$$scope={dirty:f,ctx:o}),ho.$set(Wn);const Za={};f&2&&(Za.$$scope={dirty:f,ctx:o}),uo.$set(Za);const er={};f&2&&(er.$$scope={dirty:f,ctx:o}),mo.$set(er);const tr={};f&2&&(tr.$$scope={dirty:f,ctx:o}),go.$set(tr)},i(o){Gr||(y(T.$$.fragment,o),y(Y.$$.fragment,o),y(bo.$$.fragment,o),y(wo.$$.fragment,o),y(Po.$$.fragment,o),y(Io.$$.fragment,o),y(Oo.$$.fragment,o),y(Ao.$$.fragment,o),y(Mo.$$.fragment,o),y(Eo.$$.fragment,o),y(Co.$$.fragment,o),y(xo.$$.fragment,o),y(Do.$$.fragment,o),y(jo.$$.fragment,o),y(Ho.$$.fragment,o),y(Bo.$$.fragment,o),y(Qt.$$.fragment,o),y(Wo.$$.fragment,o),y(Uo.$$.fragment,o),y(Ro.$$.fragment,o),y(Xo.$$.fragment,o),y(Zt.$$.fragment,o),y(Qo.$$.fragment,o),y(Yo.$$.fragment,o),y(Zo.$$.fragment,o),y(nn.$$.fragment,o),y(to.$$.fragment,o),y(sn.$$.fragment,o),y(an.$$.fragment,o),y(rn.$$.fragment,o),y(pn.$$.fragment,o),y(no.$$.fragment,o),y(hn.$$.fragment,o),y(un.$$.fragment,o),y(fn.$$.fragment,o),y(mn.$$.fragment,o),y(ao.$$.fragment,o),y(vn.$$.fragment,o),y(ro.$$.fragment,o),y(kn.$$.fragment,o),y(bn.$$.fragment,o),y(wn.$$.fragment,o),y(lo.$$.fragment,o),y(On.$$.fragment,o),y(co.$$.fragment,o),y(An.$$.fragment,o),y($n.$$.fragment,o),y(Gn.$$.fragment,o),y(ho.$$.fragment,o),y(Fn.$$.fragment,o),y(uo.$$.fragment,o),y(qn.$$.fragment,o),y(Cn.$$.fragment,o),y(xn.$$.fragment,o),y(mo.$$.fragment,o),y(Sn.$$.fragment,o),y(go.$$.fragment,o),y(Ln.$$.fragment,o),Gr=!0)},o(o){I(T.$$.fragment,o),I(Y.$$.fragment,o),I(bo.$$.fragment,o),I(wo.$$.fragment,o),I(Po.$$.fragment,o),I(Io.$$.fragment,o),I(Oo.$$.fragment,o),I(Ao.$$.fragment,o),I(Mo.$$.fragment,o),I(Eo.$$.fragment,o),I(Co.$$.fragment,o),I(xo.$$.fragment,o),I(Do.$$.fragment,o),I(jo.$$.fragment,o),I(Ho.$$.fragment,o),I(Bo.$$.fragment,o),I(Qt.$$.fragment,o),I(Wo.$$.fragment,o),I(Uo.$$.fragment,o),I(Ro.$$.fragment,o),I(Xo.$$.fragment,o),I(Zt.$$.fragment,o),I(Qo.$$.fragment,o),I(Yo.$$.fragment,o),I(Zo.$$.fragment,o),I(nn.$$.fragment,o),I(to.$$.fragment,o),I(sn.$$.fragment,o),I(an.$$.fragment,o),I(rn.$$.fragment,o),I(pn.$$.fragment,o),I(no.$$.fragment,o),I(hn.$$.fragment,o),I(un.$$.fragment,o),I(fn.$$.fragment,o),I(mn.$$.fragment,o),I(ao.$$.fragment,o),I(vn.$$.fragment,o),I(ro.$$.fragment,o),I(kn.$$.fragment,o),I(bn.$$.fragment,o),I(wn.$$.fragment,o),I(lo.$$.fragment,o),I(On.$$.fragment,o),I(co.$$.fragment,o),I(An.$$.fragment,o),I($n.$$.fragment,o),I(Gn.$$.fragment,o),I(ho.$$.fragment,o),I(Fn.$$.fragment,o),I(uo.$$.fragment,o),I(qn.$$.fragment,o),I(Cn.$$.fragment,o),I(xn.$$.fragment,o),I(mo.$$.fragment,o),I(Sn.$$.fragment,o),I(go.$$.fragment,o),I(Ln.$$.fragment,o),Gr=!1},d(o){t(u),o&&t(A),o&&t(m),O(T),o&&t(J),o&&t(G),O(Y),o&&t(de),o&&t(S),o&&t(z),o&&t(oe),o&&t(ce),o&&t(ne),o&&t(pe),o&&t(E),o&&t(L),o&&t(Q),o&&t(p),o&&t(k),o&&t(F),o&&t(se),o&&t(Ae),o&&t(we),o&&t($e),o&&t(Ze),o&&t(or),O(bo,o),o&&t(nr),o&&t(Le),o&&t(sr),o&&t(Pt),O(wo),o&&t(ar),o&&t(qe),O(Po),O(Io),o&&t(rr),o&&t(It),O(Oo),o&&t(ir),o&&t(Ce),O(Ao),o&&t(lr),o&&t(Ot),O(Mo),o&&t(dr),o&&t(Qe),O(Eo),o&&t(cr),o&&t(At),O(Co),o&&t(pr),o&&t($t),O(xo),o&&t(hr),o&&t(Gt),O(Do),o&&t(ur),o&&t(Mt),O(jo),o&&t(fr),o&&t(xe),O(Ho),O(Bo),O(Qt),O(Wo),o&&t(mr),o&&t(zt),O(Uo),o&&t(gr),o&&t(De),O(Ro),O(Xo),O(Zt),O(Qo),o&&t(_r),o&&t(qt),O(Yo),o&&t(Tr),o&&t(je),O(Zo),O(nn),O(to),O(sn),o&&t(vr),o&&t(xt),O(an),o&&t(kr),o&&t(He),O(rn),O(pn),O(no),O(hn),O(un),o&&t(br),o&&t(jt),O(fn),o&&t(wr),o&&t(ye),O(mn),O(ao),O(vn),O(ro),O(kn),o&&t(Pr),o&&t(St),O(bn),o&&t(yr),o&&t(Ie),O(wn),O(lo),O(On),O(co),O(An),o&&t(Ir),o&&t(Nt),O($n),o&&t(Or),o&&t(Oe),O(Gn),O(ho),O(Fn),O(uo),O(qn),o&&t(Ar),o&&t(Wt),O(Cn),o&&t($r),o&&t(ie),O(xn),O(mo),O(Sn),O(go),O(Ln)}}}const gm={local:"openai-gpt",sections:[{local:"overview",title:"Overview"},{local:"transformers.OpenAIGPTConfig",title:"OpenAIGPTConfig"},{local:"transformers.OpenAIGPTTokenizer",title:"OpenAIGPTTokenizer"},{local:"transformers.OpenAIGPTTokenizerFast",title:"OpenAIGPTTokenizerFast"},{local:"transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput",title:"OpenAI specific outputs"},{local:"transformers.OpenAIGPTModel",title:"OpenAIGPTModel"},{local:"transformers.OpenAIGPTLMHeadModel",title:"OpenAIGPTLMHeadModel"},{local:"transformers.OpenAIGPTDoubleHeadsModel",title:"OpenAIGPTDoubleHeadsModel"},{local:"transformers.OpenAIGPTForSequenceClassification",title:"OpenAIGPTForSequenceClassification"},{local:"transformers.TFOpenAIGPTModel",title:"TFOpenAIGPTModel"},{local:"transformers.TFOpenAIGPTLMHeadModel",title:"TFOpenAIGPTLMHeadModel"},{local:"transformers.TFOpenAIGPTDoubleHeadsModel",title:"TFOpenAIGPTDoubleHeadsModel"},{local:"transformers.TFOpenAIGPTForSequenceClassification",title:"TFOpenAIGPTForSequenceClassification"}],title:"OpenAI GPT"};function _m(N,u,A){let{fw:m}=u;return N.$$set=g=>{"fw"in g&&A(0,m=g.fw)},[m]}class ym extends Zf{constructor(u){super();em(this,u,_m,mm,tm,{fw:0})}}export{ym as default,gm as metadata};
9,947
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/rag.mdx-7a1a71af.js
import{S as eT,i as tT,s as oT,e as r,k as c,w as v,t,L as nT,c as s,d as n,m as l,a,x as b,h as o,b as i,J as e,g as _,y as T,q as w,o as k,B as q}from"../../chunks/vendor-b1433968.js";import{T as ge}from"../../chunks/Tip-c3840994.js";import{D as M}from"../../chunks/Docstring-ff504c58.js";import{C as rn}from"../../chunks/CodeBlock-a320dbd7.js";import{I as _e}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function rT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function sT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function aT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function dT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function iT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function cT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function lT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function hT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function pT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function uT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function mT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function gT($){let h,y,u,f,x;return{c(){h=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var p=a(h);y=o(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(p,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(n)},m(g,p){_(g,h,p),e(h,y),e(h,u),e(u,f),e(h,x)},d(g){g&&n(h)}}}function _T($){let h,y,u,f,x,g,p,R,Zd,La,$e,tt,Rr,Ct,ei,Fr,ti,Ia,sn,oi,Oa,ot,ni,St,ri,si,Wa,an,ai,Ha,dn,zr,di,Ba,nt,ii,Dt,ci,li,Qa,Ee,rt,$r,Lt,hi,Er,pi,Va,de,It,ui,me,cn,mi,gi,Gr,_i,fi,ln,vi,bi,hn,Ti,wi,ki,st,Ot,qi,Wt,yi,pn,xi,Ri,Fi,at,Ht,zi,Bt,$i,un,Ei,Gi,Ua,Ge,dt,Mr,Qt,Mi,Ar,Ai,Ka,Vt,it,Ut,ji,jr,Pi,Ya,Me,ct,Pr,Kt,Ni,Nr,Ci,Ja,Ae,Yt,Si,Cr,Di,Xa,Jt,Xt,Za,je,lt,Sr,Zt,Li,Dr,Ii,ed,X,eo,Oi,Lr,Wi,Hi,Ir,Bi,Qi,to,Vi,ht,oo,Ui,Or,Ki,Yi,pt,no,Ji,Pe,Xi,Wr,Zi,ec,Hr,tc,oc,nc,ut,ro,rc,so,sc,Br,ac,dc,td,Ne,mt,Qr,ao,ic,Vr,cc,od,D,io,lc,Ce,hc,mn,pc,uc,Ur,mc,gc,_c,gt,fc,Kr,vc,bc,ie,Tc,Yr,wc,kc,gn,qc,yc,Jr,xc,Rc,_n,Fc,zc,$c,A,Ec,fn,Gc,Mc,Xr,Ac,jc,Zr,Pc,Nc,es,Cc,Sc,ts,Dc,Lc,vn,Ic,Oc,os,Wc,Hc,bn,Bc,Qc,Tn,Vc,Uc,ns,Kc,Yc,Jc,co,Xc,wn,Zc,el,tl,lo,ol,ho,nl,rl,sl,ee,po,al,Se,dl,kn,il,cl,rs,ll,hl,pl,_t,ul,ss,ml,gl,uo,nd,De,ft,as,mo,_l,ds,fl,rd,E,go,vl,Le,bl,qn,Tl,wl,is,kl,ql,yl,vt,xl,cs,Rl,Fl,ls,zl,$l,ce,El,hs,Gl,Ml,yn,Al,jl,ps,Pl,Nl,xn,Cl,Sl,Dl,j,Ll,Rn,Il,Ol,us,Wl,Hl,ms,Bl,Ql,gs,Vl,Ul,_s,Kl,Yl,Fn,Jl,Xl,fs,Zl,eh,zn,th,oh,$n,nh,rh,vs,sh,ah,dh,_o,ih,En,ch,lh,hh,fo,ph,vo,uh,mh,gh,te,bo,_h,Ie,fh,Gn,vh,bh,bs,Th,wh,kh,bt,qh,Ts,yh,xh,To,Rh,Tt,wo,Fh,ko,zh,Mn,$h,Eh,sd,Oe,wt,ws,qo,Gh,ks,Mh,ad,G,yo,Ah,We,jh,An,Ph,Nh,qs,Ch,Sh,Dh,kt,Lh,ys,Ih,Oh,xs,Wh,Hh,le,Bh,Rs,Qh,Vh,jn,Uh,Kh,Fs,Yh,Jh,Pn,Xh,Zh,ep,P,tp,Nn,op,np,zs,rp,sp,$s,ap,dp,Es,ip,cp,Gs,lp,hp,Cn,pp,up,Ms,mp,gp,Sn,_p,fp,Dn,vp,bp,As,Tp,wp,kp,xo,qp,Ln,yp,xp,Rp,Ro,Fp,Fo,zp,$p,Ep,oe,zo,Gp,He,Mp,In,Ap,jp,js,Pp,Np,Cp,qt,Sp,Ps,Dp,Lp,$o,Ip,yt,Eo,Op,Ns,Wp,dd,Be,xt,Cs,Go,Hp,Ss,Bp,id,N,Mo,Qp,Qe,Vp,On,Up,Kp,Ds,Yp,Jp,Xp,Rt,Zp,Ls,eu,tu,he,ou,Is,nu,ru,Wn,su,au,Os,du,iu,Hn,cu,lu,hu,L,pu,Bn,uu,mu,Ws,gu,_u,Hs,fu,vu,Bs,bu,Tu,Qs,wu,ku,Qn,qu,yu,Vs,xu,Ru,Vn,Fu,zu,Us,$u,Eu,Gu,Ao,Mu,Un,Au,ju,Pu,jo,Nu,Po,Cu,Su,Du,Ks,Lu,Iu,ne,No,Ou,Ve,Wu,Kn,Hu,Bu,Ys,Qu,Vu,Uu,Ft,Ku,Js,Yu,Ju,Co,cd,Ue,zt,Xs,So,Xu,Zs,Zu,ld,F,Do,em,Ke,tm,Yn,om,nm,ea,rm,sm,am,$t,dm,ta,im,cm,oa,lm,hm,pe,pm,na,um,mm,Jn,gm,_m,ra,fm,vm,Xn,bm,Tm,wm,I,km,Zn,qm,ym,sa,xm,Rm,aa,Fm,zm,da,$m,Em,ia,Gm,Mm,er,Am,jm,ca,Pm,Nm,tr,Cm,Sm,la,Dm,Lm,Im,Lo,Om,or,Wm,Hm,Bm,Io,Qm,Oo,Vm,Um,Km,ha,Ym,Jm,re,Wo,Xm,Ye,Zm,nr,eg,tg,pa,og,ng,rg,Et,sg,ua,ag,dg,Ho,ig,Gt,Bo,cg,Qo,lg,rr,hg,pg,hd,Je,Mt,ma,Vo,ug,ga,mg,pd,z,Uo,gg,Xe,_g,sr,fg,vg,_a,bg,Tg,wg,At,kg,fa,qg,yg,va,xg,Rg,ue,Fg,ba,zg,$g,ar,Eg,Gg,Ta,Mg,Ag,dr,jg,Pg,Ng,O,Cg,ir,Sg,Dg,wa,Lg,Ig,ka,Og,Wg,qa,Hg,Bg,ya,Qg,Vg,cr,Ug,Kg,xa,Yg,Jg,lr,Xg,Zg,Ra,e_,t_,o_,Ko,n_,hr,r_,s_,a_,Yo,d_,Jo,i_,c_,l_,Fa,h_,p_,se,Xo,u_,Ze,m_,pr,g_,__,za,f_,v_,b_,jt,T_,$a,w_,k_,Zo,q_,Pt,en,y_,Ea,x_,ud;return g=new _e({}),Ct=new _e({}),Lt=new _e({}),It=new M({props:{name:"class transformers.RagConfig",anchor:"transformers.RagConfig",parameters:[{name:"vocab_size",val:" = None"},{name:"is_encoder_decoder",val:" = True"},{name:"prefix",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"title_sep",val:" = ' / '"},{name:"doc_sep",val:" = ' // '"},{name:"n_docs",val:" = 5"},{name:"max_combined_length",val:" = 300"},{name:"retrieval_vector_size",val:" = 768"},{name:"retrieval_batch_size",val:" = 8"},{name:"dataset",val:" = 'wiki_dpr'"},{name:"dataset_split",val:" = 'train'"},{name:"index_name",val:" = 'compressed'"},{name:"index_path",val:" = None"},{name:"passages_path",val:" = None"},{name:"use_dummy_dataset",val:" = False"},{name:"reduce_loss",val:" = False"},{name:"label_smoothing",val:" = 0.0"},{name:"do_deduplication",val:" = True"},{name:"exclude_bos_score",val:" = False"},{name:"do_marginalize",val:" = False"},{name:"output_retrieved",val:" = False"},{name:"use_cache",val:" = True"},{name:"forced_eos_token_id",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/configuration_rag.py#L84",parametersDescription:[{anchor:"transformers.RagConfig.title_sep",description:`<strong>title_sep</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot; / &quot;</code>) &#x2014; Separator inserted between the title and the text of the retrieved document when calling <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"title_sep"},{anchor:"transformers.RagConfig.doc_sep",description:`<strong>doc_sep</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot; // &quot;</code>) &#x2014; Separator inserted between the the text of the retrieved document and the original input when calling <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"doc_sep"},{anchor:"transformers.RagConfig.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of documents to retrieve.`,name:"n_docs"},{anchor:"transformers.RagConfig.max_combined_length",description:`<strong>max_combined_length</strong> (<code>int</code>, <em>optional</em>, defaults to 300) &#x2014; Max length of contextualized input returned by <code>__call__()</code>.`,name:"max_combined_length"},{anchor:"transformers.RagConfig.retrieval_vector_size",description:`<strong>retrieval_vector_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the document embeddings indexed by <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"retrieval_vector_size"},{anchor:"transformers.RagConfig.retrieval_batch_size",description:`<strong>retrieval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"retrieval_batch_size"},{anchor:"transformers.RagConfig.dataset",description:`<strong>dataset</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;wiki_dpr&quot;</code>) &#x2014; A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using <code>datasets.list_datasets()</code>).`,name:"dataset"},{anchor:"transformers.RagConfig.dataset_split",description:`<strong>dataset_split</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;train&quot;</code>) &#x2014; Which split of the <code>dataset</code> to load.`,name:"dataset_split"},{anchor:"transformers.RagConfig.index_name",description:`<strong>index_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;compressed&quot;</code>) &#x2014; The index name of the index associated with the <code>dataset</code>. One can choose between <code>&quot;legacy&quot;</code>, <code>&quot;exact&quot;</code> and <code>&quot;compressed&quot;</code>.`,name:"index_name"},{anchor:"transformers.RagConfig.index_path",description:`<strong>index_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to the serialized faiss index on disk. passages_path &#x2014; (<code>str</code>, <em>optional</em>): A path to text passages compatible with the faiss index. Required if using <code>LegacyIndex</code>`,name:"index_path"},{anchor:"transformers.RagConfig.use_dummy_dataset",description:`<strong>use_dummy_dataset</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to load a &#x201C;dummy&#x201D; variant of the dataset specified by <code>dataset</code>.`,name:"use_dummy_dataset"},{anchor:"transformers.RagConfig.label_smoothing",description:`<strong>label_smoothing</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Only relevant if <code>return_loss</code> is set to <code>True</code>. Controls the <code>epsilon</code> parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed.`,name:"label_smoothing"},{anchor:"transformers.RagConfig.do_marginalize",description:`<strong>do_marginalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, the logits are marginalized over all documents by making use of <code>torch.nn.functional.log_softmax</code>.`,name:"do_marginalize"},{anchor:"transformers.RagConfig.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to reduce the NLL loss using the <code>torch.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.RagConfig.do_deduplication",description:`<strong>do_deduplication</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to <code>False</code> if used while training with distributed backend.`,name:"do_deduplication"},{anchor:"transformers.RagConfig.exclude_bos_score",description:`<strong>exclude_bos_score</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to disregard the BOS token when computing the loss.`,name:"exclude_bos_score"},{anchor:"transformers.RagConfig.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; If set to <code>True</code>, <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code> are returned. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.RagConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),Ot=new M({props:{name:"from_question_encoder_generator_configs",anchor:"transformers.RagConfig.from_question_encoder_generator_configs",parameters:[{name:"question_encoder_config",val:": PretrainedConfig"},{name:"generator_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/configuration_rag.py#L172",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig" >EncoderDecoderConfig</a></p> `}}),Ht=new M({props:{name:"to_dict",anchor:"transformers.RagConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/configuration_rag.py#L185",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),Qt=new _e({}),Ut=new M({props:{name:"as_target_tokenizer",anchor:"transformers.RagTokenizer.as_target_tokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/tokenization_rag.py#L71"}}),Kt=new _e({}),Yt=new M({props:{name:"class transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput",anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"doc_scores",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"retrieved_doc_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"retrieved_doc_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"question_encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"question_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"question_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"generator_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L41",parametersDescription:[{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.`,name:"logits"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.`,name:"doc_scores"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.retrieved_doc_embeds",description:`<strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.`,name:"retrieved_doc_embeds"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.retrieved_doc_ids",description:`<strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; The indexes of the embedded documents retrieved by the retriever.`,name:"retrieved_doc_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.`,name:"context_attention_mask"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.question_encoder_last_hidden_state",description:`<strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.`,name:"question_encoder_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.question_enc_hidden_states",description:`<strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.`,name:"question_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.question_enc_attentions",description:`<strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"question_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_enc_last_hidden_state",description:`<strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the generator encoder of the model.`,name:"generator_enc_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_enc_hidden_states",description:`<strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.`,name:"generator_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_enc_attentions",description:`<strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_dec_hidden_states",description:`<strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.`,name:"generator_dec_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_dec_attentions",description:`<strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_dec_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_cross_attentions",description:`<strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"generator_cross_attentions"}]}}),Xt=new M({props:{name:"class transformers.models.rag.modeling_rag.RetrievAugLMOutput",anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput",parameters:[{name:"logits",val:": FloatTensor = None"},{name:"doc_scores",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"retrieved_doc_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"retrieved_doc_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"question_encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"question_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"question_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"generator_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L131",parametersDescription:[{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.`,name:"logits"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.`,name:"doc_scores"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.retrieved_doc_embeds",description:`<strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.`,name:"retrieved_doc_embeds"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.retrieved_doc_ids",description:`<strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; The indexes of the embedded documents retrieved by the retriever.`,name:"retrieved_doc_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.`,name:"context_attention_mask"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.question_encoder_last_hidden_state",description:`<strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.`,name:"question_encoder_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.question_enc_hidden_states",description:`<strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.`,name:"question_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.question_enc_attentions",description:`<strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"question_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_enc_last_hidden_state",description:`<strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the generator encoder of the model.`,name:"generator_enc_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_enc_hidden_states",description:`<strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.`,name:"generator_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_enc_attentions",description:`<strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_dec_hidden_states",description:`<strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.`,name:"generator_dec_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_dec_attentions",description:`<strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_dec_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_cross_attentions",description:`<strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"generator_cross_attentions"}]}}),Zt=new _e({}),eo=new M({props:{name:"class transformers.RagRetriever",anchor:"transformers.RagRetriever",parameters:[{name:"config",val:""},{name:"question_encoder_tokenizer",val:""},{name:"generator_tokenizer",val:""},{name:"index",val:" = None"},{name:"init_retrieval",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/retrieval_rag.py#L326",parametersDescription:[{anchor:"transformers.RagRetriever.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; The configuration of the RAG model this Retriever is used with. Contains parameters indicating which <code>Index</code> to build. You can load your own custom dataset with <code>config.index_name=&quot;custom&quot;</code> or use a canonical one (default) from the datasets library with <code>config.index_name=&quot;wiki_dpr&quot;</code> for example.`,name:"config"},{anchor:"transformers.RagRetriever.question_encoder_tokenizer",description:`<strong>question_encoder_tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer.`,name:"question_encoder_tokenizer"},{anchor:"transformers.RagRetriever.generator_tokenizer",description:`<strong>generator_tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for the generator part of the RagModel.`,name:"generator_tokenizer"},{anchor:"transformers.RagRetriever.index",description:`<strong>index</strong> (<code>Index</code>, optional, defaults to the one defined by the configuration) &#x2014; If specified, use this index instead of the one built using the configuration`,name:"index"}]}}),to=new rn({props:{code:`# To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') from transformers import RagRetriever retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', dataset="wiki_dpr", index_name='compressed') # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py from transformers import RagRetriever dataset = ... # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', indexed_dataset=dataset) # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py from transformers import RagRetriever dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', index_name='custom', passages_path=dataset_path, index_path=index_path) # To load the legacy index built originally for Rag's paper from transformers import RagRetriever retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', index_name='legacy'),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load the default &quot;wiki_dpr&quot; dataset with 21M passages from wikipedia (index name is &#x27;compressed&#x27; or &#x27;exact&#x27;)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&#x27;facebook/dpr-ctx_encoder-single-nq-base&#x27;</span>, dataset=<span class="hljs-string">&quot;wiki_dpr&quot;</span>, index_name=<span class="hljs-string">&#x27;compressed&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = ... <span class="hljs-comment"># dataset must be a datasets.Datasets object with columns &quot;title&quot;, &quot;text&quot; and &quot;embeddings&quot;, and it must have a faiss index</span> <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&#x27;facebook/dpr-ctx_encoder-single-nq-base&#x27;</span>, indexed_dataset=dataset) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>dataset_path = <span class="hljs-string">&quot;path/to/my/dataset&quot;</span> <span class="hljs-comment"># dataset saved via *dataset.save_to_disk(...)*</span> <span class="hljs-meta">&gt;&gt;&gt; </span>index_path = <span class="hljs-string">&quot;path/to/my/index.faiss&quot;</span> <span class="hljs-comment"># faiss index saved via *dataset.get_index(&quot;embeddings&quot;).save(...)*</span> <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&#x27;facebook/dpr-ctx_encoder-single-nq-base&#x27;</span>, index_name=<span class="hljs-string">&#x27;custom&#x27;</span>, passages_path=dataset_path, index_path=index_path) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load the legacy index built originally for Rag&#x27;s paper</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&#x27;facebook/dpr-ctx_encoder-single-nq-base&#x27;</span>, index_name=<span class="hljs-string">&#x27;legacy&#x27;</span>)`}}),oo=new M({props:{name:"init_retrieval",anchor:"transformers.RagRetriever.init_retrieval",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/retrieval_rag.py#L447"}}),no=new M({props:{name:"postprocess_docs",anchor:"transformers.RagRetriever.postprocess_docs",parameters:[{name:"docs",val:""},{name:"input_strings",val:""},{name:"prefix",val:""},{name:"n_docs",val:""},{name:"return_tensors",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/retrieval_rag.py#L455",parametersDescription:[{anchor:"transformers.RagRetriever.postprocess_docs.docs",description:`<strong>docs</strong> (<code>dict</code>) &#x2014; Retrieved documents.`,name:"docs"},{anchor:"transformers.RagRetriever.postprocess_docs.input_strings",description:`<strong>input_strings</strong> (<code>str</code>) &#x2014; Input strings decoded by <code>preprocess_query</code>.`,name:"input_strings"},{anchor:"transformers.RagRetriever.postprocess_docs.prefix",description:`<strong>prefix</strong> (<code>str</code>) &#x2014; Prefix added at the beginning of each input, typically used with T5-based models.`,name:"prefix"}],returnDescription:` <p>a tuple consisting of two elements: contextualized <code>input_ids</code> and a compatible <code>attention_mask</code>.</p> `,returnType:` <p><code>tuple(tensors)</code></p> `}}),ro=new M({props:{name:"retrieve",anchor:"transformers.RagRetriever.retrieve",parameters:[{name:"question_hidden_states",val:": ndarray"},{name:"n_docs",val:": int"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/retrieval_rag.py#L527",parametersDescription:[{anchor:"transformers.RagRetriever.retrieve.question_hidden_states",description:`<strong>question_hidden_states</strong> (<code>np.ndarray</code> of shape <code>(batch_size, vector_size)</code>) &#x2014; A batch of query vectors to retrieve with.`,name:"question_hidden_states"},{anchor:"transformers.RagRetriever.retrieve.n_docs",description:`<strong>n_docs</strong> (<code>int</code>) &#x2014; The number of docs retrieved per query.`,name:"n_docs"}],returnDescription:` <p>A tuple with the following objects:</p> <ul> <li><strong>retrieved_doc_embeds</strong> (<code>np.ndarray</code> of shape <code>(batch_size, n_docs, dim)</code>) \u2014 The retrieval embeddings of the retrieved docs per query.</li> <li><strong>doc_ids</strong> (<code>np.ndarray</code> of shape <code>(batch_size, n_docs)</code>) \u2014 The ids of the documents in the index</li> <li><strong>doc_dicts</strong> (<code>List[dict]</code>): The <code>retrieved_doc_embeds</code> examples per query.</li> </ul> `,returnType:` <p><code>Tuple[np.ndarray, np.ndarray, List[dict]]</code></p> `}}),ao=new _e({}),io=new M({props:{name:"class transformers.RagModel",anchor:"transformers.RagModel",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"question_encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"generator",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"retriever",val:": typing.Optional = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L482",parametersDescription:[{anchor:"transformers.RagModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RagModel.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.RagModel.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.RagModel.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),gt=new ge({props:{$$slots:{default:[rT]},$$scope:{ctx:$}}}),po=new M({props:{name:"forward",anchor:"transformers.RagModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L525",parametersDescription:[{anchor:"transformers.RagModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RagModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagModel">RagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.RagModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <em>None</em> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.RagModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.RagModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.RagModel.forward.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.RagModel.forward.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>. context_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.RagModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.RagModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RagModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RagModel.forward.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagModel.forward.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMOutput" >transformers.models.rag.modeling_rag.RetrievAugLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMOutput" >transformers.models.rag.modeling_rag.RetrievAugLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_t=new ge({props:{$$slots:{default:[sT]},$$scope:{ctx:$}}}),uo=new rn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagModel import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base") retriever = RagRetriever.from_pretrained("facebook/rag-token-base", index_name="exact", use_dummy_dataset=True) # initialize with RagRetriever to do everything in one forward call model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever) inputs = tokenizer("How many people live in Paris?", return_tensors="pt") outputs = model(input_ids=inputs["input_ids"]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagModel.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>])`}}),mo=new _e({}),go=new M({props:{name:"class transformers.RagSequenceForGeneration",anchor:"transformers.RagSequenceForGeneration",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"question_encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"generator",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"retriever",val:": typing.Optional = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L724",parametersDescription:[{anchor:"transformers.RagSequenceForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RagSequenceForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.RagSequenceForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.RagSequenceForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),vt=new ge({props:{$$slots:{default:[aT]},$$scope:{ctx:$}}}),bo=new M({props:{name:"forward",anchor:"transformers.RagSequenceForGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"exclude_bos_score",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"labels",val:" = None"},{name:"n_docs",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L753",parametersDescription:[{anchor:"transformers.RagSequenceForGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RagSequenceForGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagSequenceForGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagModel">RagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.RagSequenceForGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <em>None</em> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.RagSequenceForGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.RagSequenceForGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.RagSequenceForGeneration.forward.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.RagSequenceForGeneration.forward.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>. context_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.RagSequenceForGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.RagSequenceForGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RagSequenceForGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RagSequenceForGeneration.forward.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagSequenceForGeneration.forward.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.RagSequenceForGeneration.forward.exclude_bos_score",description:`<strong>exclude_bos_score</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the score of the BOS token is disregarded when computing the loss.`,name:"exclude_bos_score"},{anchor:"transformers.RagSequenceForGeneration.forward.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>torch.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.RagSequenceForGeneration.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),bt=new ge({props:{$$slots:{default:[dT]},$$scope:{ctx:$}}}),To=new rn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True) # initialize with RagRetriever to do everything in one forward call model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) inputs = tokenizer("How many people live in Paris?", return_tensors="pt") with tokenizer.as_target_tokenizer(): targets = tokenizer("In Paris, there are 10 million people.", return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] outputs = model(input_ids=input_ids, labels=labels) # or use retriever separately model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True) # 1. Encode question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)).squeeze(1) # 3. Forward to generator outputs = model(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=labels),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagSequenceForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagSequenceForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> targets = tokenizer(<span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>labels = targets[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagSequenceForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = torch.bmm(question_hidden_states.unsqueeze(<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>].<span class="hljs-built_in">float</span>().transpose(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>)).squeeze(<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores, decoder_input_ids=labels)`}}),wo=new M({props:{name:"generate",anchor:"transformers.RagSequenceForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"do_deduplication",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"num_beams",val:" = None"},{name:"n_docs",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L886",parametersDescription:[{anchor:"transformers.RagSequenceForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.RagSequenceForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagSequenceForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.RagSequenceForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>context_input_ids</code> and <code>context_attention_mask</code> have to be provided to the forward pass. They are returned by <code>__call__()</code>.`,name:"context_attention_mask"},{anchor:"transformers.RagSequenceForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> <p>If the model is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> are returned by <code>__call__()</code>.`,name:"doc_scores"},{anchor:"transformers.RagSequenceForGeneration.generate.do_deduplication",description:`<strong>do_deduplication</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to <code>False</code> if used while training with distributed backend.`,name:"do_deduplication"},{anchor:"transformers.RagSequenceForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate)</code> function, where we set <code>num_return_sequences</code> to <code>num_beams</code>.`,name:"num_return_sequences(int,"},{anchor:"transformers.RagSequenceForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.RagSequenceForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs &#x2014; Additional kwargs will be passed to <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate">generate()</a>.`,name:"n_docs"}],returnDescription:` <p>The generated sequences. The second dimension (sequence length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),qo=new _e({}),yo=new M({props:{name:"class transformers.RagTokenForGeneration",anchor:"transformers.RagTokenForGeneration",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"question_encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"generator",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"retriever",val:": typing.Optional = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L1115",parametersDescription:[{anchor:"transformers.RagTokenForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RagTokenForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.RagTokenForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.RagTokenForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),kt=new ge({props:{$$slots:{default:[iT]},$$scope:{ctx:$}}}),zo=new M({props:{name:"forward",anchor:"transformers.RagTokenForGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"do_marginalize",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"labels",val:" = None"},{name:"n_docs",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L1214",parametersDescription:[{anchor:"transformers.RagTokenForGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RagTokenForGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagTokenForGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagModel">RagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.RagTokenForGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <em>None</em> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.RagTokenForGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.RagTokenForGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.RagTokenForGeneration.forward.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.RagTokenForGeneration.forward.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>. context_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.RagTokenForGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.RagTokenForGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RagTokenForGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RagTokenForGeneration.forward.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagTokenForGeneration.forward.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.RagTokenForGeneration.forward.do_marginalize",description:`<strong>do_marginalize</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If <code>True</code>, the logits are marginalized over all documents by making use of <code>torch.nn.functional.log_softmax</code>.`,name:"do_marginalize"},{anchor:"transformers.RagTokenForGeneration.forward.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>torch.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.RagTokenForGeneration.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qt=new ge({props:{$$slots:{default:[cT]},$$scope:{ctx:$}}}),$o=new rn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) # initialize with RagRetriever to do everything in one forward call model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) inputs = tokenizer("How many people live in Paris?", return_tensors="pt") with tokenizer.as_target_tokenizer(): targets = tokenizer("In Paris, there are 10 million people.", return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] outputs = model(input_ids=input_ids, labels=labels) # or use retriever separately model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True) # 1. Encode question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)).squeeze(1) # 3. Forward to generator outputs = model(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=labels) # or directly generate generated = model.generate(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagTokenForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagTokenForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> targets = tokenizer(<span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>labels = targets[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagTokenForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = torch.bmm(question_hidden_states.unsqueeze(<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>].<span class="hljs-built_in">float</span>().transpose(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>)).squeeze(<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores, decoder_input_ids=labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or directly generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_string = tokenizer.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),Eo=new M({props:{name:"generate",anchor:"transformers.RagTokenForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"max_length",val:" = None"},{name:"min_length",val:" = None"},{name:"early_stopping",val:" = None"},{name:"use_cache",val:" = None"},{name:"num_beams",val:" = None"},{name:"num_beam_groups",val:" = None"},{name:"diversity_penalty",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"length_penalty",val:" = None"},{name:"no_repeat_ngram_size",val:" = None"},{name:"encoder_no_repeat_ngram_size",val:" = None"},{name:"repetition_penalty",val:" = None"},{name:"bad_words_ids",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"n_docs",val:" = None"},{name:"prefix_allowed_tokens_fn",val:": typing.Callable[[int, torch.Tensor], typing.List[int]] = None"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = []"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = []"},{name:"forced_bos_token_id",val:": typing.Optional[int] = None"},{name:"forced_eos_token_id",val:": typing.Optional[int] = None"},{name:"remove_invalid_values",val:": typing.Optional[bool] = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_rag.py#L1343",parametersDescription:[{anchor:"transformers.RagTokenForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.RagTokenForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagTokenForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.RagTokenForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>.`,name:"context_attention_mask"},{anchor:"transformers.RagTokenForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>.`,name:"doc_scores"},{anchor:"transformers.RagTokenForGeneration.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.RagTokenForGeneration.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.RagTokenForGeneration.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"early_stopping"},{anchor:"transformers.RagTokenForGeneration.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty.</p> <p>Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.RagTokenForGeneration.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.RagTokenForGeneration.generate.encoder_no_repeat_ngram_size",description:`<strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.`,name:"encoder_no_repeat_ngram_size"},{anchor:"transformers.RagTokenForGeneration.generate.bad_words_ids(List[int],",description:`<strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids(List[int],"},{anchor:"transformers.RagTokenForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.RagTokenForGeneration.generate.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"},{anchor:"transformers.RagTokenForGeneration.generate.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.`,name:"diversity_penalty"},{anchor:"transformers.RagTokenForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate) function, where we set </code>num_return_sequences<code>to</code>num_beams<code>. decoder_start_token_id (</code>int\`, <em>optional</em>): If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"num_return_sequences(int,"},{anchor:"transformers.RagTokenForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer. prefix_allowed_tokens_fn &#x2014; (<code>Callable[[int, torch.Tensor], List[int]]</code>, <em>optional</em>): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments <code>inputs_ids</code> and the batch ID <code>batch_id</code>. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens <code>inputs_ids</code> and the batch ID <code>batch_id</code>. This argument is useful for constrained generation conditioned on the prefix, as described in <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a>.`,name:"n_docs"},{anchor:"transformers.RagTokenForGeneration.generate.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; Custom logits processors that complement the default logits processors built from arguments and a model&#x2019;s config. If a logit processor is passed that is already created with the arguments or a model&#x2019;s config an error is thrown.`,name:"logits_processor"},{anchor:"transformers.RagTokenForGeneration.generate.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; Custom stopping criteria that complement the default stopping criteria built from arguments and a model&#x2019;s config. If a stopping criteria is passed that is already created with the arguments or a model&#x2019;s config an error is thrown.`,name:"stopping_criteria"},{anchor:"transformers.RagTokenForGeneration.generate.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"forced_eos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.remove_invalid_values",description:`<strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.`,name:"remove_invalid_values"}],returnDescription:` <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),Go=new _e({}),Mo=new M({props:{name:"class transformers.TFRagModel",anchor:"transformers.TFRagModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L467",parametersDescription:[{anchor:"transformers.TFRagModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.TFRagModel.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.TFRagModel.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.TFRagModel.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),Rt=new ge({props:{$$slots:{default:[lT]},$$scope:{ctx:$}}}),No=new M({props:{name:"call",anchor:"transformers.TFRagModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L518",parametersDescription:[{anchor:"transformers.TFRagModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.`,name:"input_ids"},{anchor:"transformers.TFRagModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagModel">TFRagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.TFRagModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <em>None</em> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.TFRagModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFRagModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.TFRagModel.call.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.TFRagModel.call.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>. context_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.TFRagModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFRagModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFRagModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFRagModel.call.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.TFRagModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>TFRetrievAugLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFRagModel.call.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"}],returnDescription:` <p>A <code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),Ft=new ge({props:{$$slots:{default:[hT]},$$scope:{ctx:$}}}),Co=new rn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagModel import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base") retriever = RagRetriever.from_pretrained("facebook/rag-token-base", index_name="exact", use_dummy_dataset=True) # initialize with RagRetriever to do everything in one forward call model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) input_dict = tokenizer.prepare_seq2seq_batch("How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf") input_ids = input_dict["input_ids"] outputs = model(input_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRagModel.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, retriever=retriever, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer.prepare_seq2seq_batch(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, <span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids)`}}),So=new _e({}),Do=new M({props:{name:"class transformers.TFRagSequenceForGeneration",anchor:"transformers.TFRagSequenceForGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L1421",parametersDescription:[{anchor:"transformers.TFRagSequenceForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.TFRagSequenceForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.TFRagSequenceForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.TFRagSequenceForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),$t=new ge({props:{$$slots:{default:[pT]},$$scope:{ctx:$}}}),Wo=new M({props:{name:"call",anchor:"transformers.TFRagSequenceForGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"},{name:"exclude_bos_score",val:" = None"},{name:"labels",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L1469",parametersDescription:[{anchor:"transformers.TFRagSequenceForGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.`,name:"input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagModel">TFRagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.TFRagSequenceForGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <em>None</em> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.TFRagSequenceForGeneration.call.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.TFRagSequenceForGeneration.call.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>. context_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFRagSequenceForGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFRagSequenceForGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFRagSequenceForGeneration.call.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.TFRagSequenceForGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>TFRetrievAugLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFRagSequenceForGeneration.call.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.TFRagSequenceForGeneration.call.exclude_bos_score",description:`<strong>exclude_bos_score</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the score of the BOS token is disregarded when computing the loss.`,name:"exclude_bos_score"},{anchor:"transformers.TFRagSequenceForGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See <a href="https://arxiv.org/pdf/2005.11401.pdf" rel="nofollow">https://arxiv.org/pdf/2005.11401.pdf</a> Section 2.1 for details about Rag-Sequence formulation. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"},{anchor:"transformers.TFRagSequenceForGeneration.call.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>tf.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.TFRagSequenceForGeneration.call.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>tf.Tensor</code>(int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),Et=new ge({props:{$$slots:{default:[uT]},$$scope:{ctx:$}}}),Ho=new rn({props:{code:`from transformers import RagTokenizer, RagRetriever, TFRagSequenceForGeneration tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True) # initialize with RagRetriever to do everything in one forward call model = TFRagRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever, from_pt=True) input_dict = tokenizer.prepare_seq2seq_batch("How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf") outputs = model(input_dict, output_retrieved=True) # or use retriever separately # 1. Encode input_ids = input_dict["input_ids"] question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True), axis=1) # 3. Forward to generator outputs = model(inputs=None, context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=input_dict["labels"]) # or directly generate generated = model.generate(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, TFRagSequenceForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRagRagSequenceForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, retriever=retriever, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer.prepare_seq2seq_batch(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, <span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict, output_retrieved=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>], transpose_b=<span class="hljs-literal">True</span>), axis=<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=<span class="hljs-literal">None</span>, context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores, decoder_input_ids=input_dict[<span class="hljs-string">&quot;labels&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or directly generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_string = tokenizer.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),Bo=new M({props:{name:"generate",anchor:"transformers.TFRagSequenceForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"do_deduplication",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"num_beams",val:" = None"},{name:"n_docs",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L1706",parametersDescription:[{anchor:"transformers.TFRagSequenceForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>: - 1 for tokens that are <strong>not masked</strong>, - 0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever. If the model has is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>context_input_ids</code> and <code>context_attention_mask</code> have to be provided to the forward pass. They are returned by <code>__call__()</code>.`,name:"context_attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> are returned by <code>__call__()</code>.`,name:"doc_scores"},{anchor:"transformers.TFRagSequenceForGeneration.generate.do_deduplication",description:`<strong>do_deduplication</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to <code>False</code> if used while training with distributed backend.`,name:"do_deduplication"},{anchor:"transformers.TFRagSequenceForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate)</code> function, where we set <code>num_return_sequences</code> to <code>num_beams</code>.`,name:"num_return_sequences(int,"},{anchor:"transformers.TFRagSequenceForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.TFRagSequenceForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs &#x2014; Additional kwargs will be passed to <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate">generate()</a>`,name:"n_docs"}],returnDescription:` <p>The generated sequences. The second dimension (sequence length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),Vo=new _e({}),Uo=new M({props:{name:"class transformers.TFRagTokenForGeneration",anchor:"transformers.TFRagTokenForGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L731",parametersDescription:[{anchor:"transformers.TFRagTokenForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.TFRagTokenForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.TFRagTokenForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.TFRagTokenForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),At=new ge({props:{$$slots:{default:[mT]},$$scope:{ctx:$}}}),Xo=new M({props:{name:"call",anchor:"transformers.TFRagTokenForGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"},{name:"do_marginalize",val:" = None"},{name:"labels",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L877",parametersDescription:[{anchor:"transformers.TFRagTokenForGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.`,name:"input_ids"},{anchor:"transformers.TFRagTokenForGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagModel">TFRagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.TFRagTokenForGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <em>None</em> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.TFRagTokenForGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.TFRagTokenForGeneration.call.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.TFRagTokenForGeneration.call.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>. context_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.TFRagTokenForGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFRagTokenForGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFRagTokenForGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFRagTokenForGeneration.call.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.TFRagTokenForGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>TFRetrievAugLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFRagTokenForGeneration.call.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.TFRagTokenForGeneration.call.do_marginalize",description:`<strong>do_marginalize</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If <code>True</code>, the logits are marginalized over all documents by making use of <code>torch.nn.functional.log_softmax</code>.`,name:"do_marginalize"},{anchor:"transformers.TFRagTokenForGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss according to Rag-Token model formulation See <a href="https://arxiv.org/pdf/2005.11401.pdf" rel="nofollow">https://arxiv.org/pdf/2005.11401.pdf</a> Section 2.1 for details about Rag-Token formulation. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"},{anchor:"transformers.TFRagTokenForGeneration.call.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>tf.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.TFRagTokenForGeneration.call.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>tf.Tensor</code>(int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),jt=new ge({props:{$$slots:{default:[gT]},$$scope:{ctx:$}}}),Zo=new rn({props:{code:`from transformers import RagTokenizer, RagRetriever, TFRagTokenForGeneration tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) # initialize with RagRetriever to do everything in one forward call model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) input_dict = tokenizer.prepare_seq2seq_batch("How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf") outputs = model(input_dict, output_retrieved=True) # or use retriever separately # 1. Encode input_ids = input_dict["input_ids"] question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True), axis=1) # 3. Forward to generator outputs = model(inputs=None, context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=input_dict["labels"]) # or directly generate generated = model.generate(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, TFRagTokenForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRagTokenForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, retriever=retriever, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer.prepare_seq2seq_batch(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, <span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict, output_retrieved=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>], transpose_b=<span class="hljs-literal">True</span>), axis=<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=<span class="hljs-literal">None</span>, context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores, decoder_input_ids=input_dict[<span class="hljs-string">&quot;labels&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or directly generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], doc_scores=doc_scores) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_string = tokenizer.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),en=new M({props:{name:"generate",anchor:"transformers.TFRagTokenForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"max_length",val:" = None"},{name:"min_length",val:" = None"},{name:"early_stopping",val:" = None"},{name:"use_cache",val:" = None"},{name:"num_beams",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"length_penalty",val:" = None"},{name:"no_repeat_ngram_size",val:" = None"},{name:"bad_words_ids",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"n_docs",val:" = None"},{name:"output_scores",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict_in_generate",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/rag/modeling_tf_rag.py#L1036",parametersDescription:[{anchor:"transformers.TFRagTokenForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.TFRagTokenForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>.`,name:"context_input_ids"},{anchor:"transformers.TFRagTokenForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>.`,name:"context_attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>.`,name:"doc_scores"},{anchor:"transformers.TFRagTokenForGeneration.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.TFRagTokenForGeneration.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.TFRagTokenForGeneration.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"early_stopping"},{anchor:"transformers.TFRagTokenForGeneration.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.TFRagTokenForGeneration.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.TFRagTokenForGeneration.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.TFRagTokenForGeneration.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty.</p> <p>Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.TFRagTokenForGeneration.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.TFRagTokenForGeneration.generate.bad_words_ids(List[int],",description:`<strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids(List[int],"},{anchor:"transformers.TFRagTokenForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.TFRagTokenForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate) function, where we set </code>num_return_sequences<code>to</code>num_beams<code>. decoder_start_token_id (</code>int\`, <em>optional</em>): If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"num_return_sequences(int,"},{anchor:"transformers.TFRagTokenForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer.`,name:"n_docs"},{anchor:"transformers.TFRagTokenForGeneration.generate.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.TFRagTokenForGeneration.generate.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.TFRagTokenForGeneration.generate.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.TFRagTokenForGeneration.generate.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <em>False</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. model_specific_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model.`,name:"return_dict_in_generate"}],returnDescription:` <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),{c(){h=r("meta"),y=c(),u=r("h1"),f=r("a"),x=r("span"),v(g.$$.fragment),p=c(),R=r("span"),Zd=t("RAG"),La=c(),$e=r("h2"),tt=r("a"),Rr=r("span"),v(Ct.$$.fragment),ei=c(),Fr=r("span"),ti=t("Overview"),Ia=c(),sn=r("p"),oi=t(`Retrieval-augmented generation (\u201CRAG\u201D) models combine the powers of pretrained dense retrieval (DPR) and sequence-to-sequence models. RAG models retrieve documents, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.`),Oa=c(),ot=r("p"),ni=t("It is based on the paper "),St=r("a"),ri=t("Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"),si=t(` by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\xFCttler, Mike Lewis, Wen-tau Yih, Tim Rockt\xE4schel, Sebastian Riedel, Douwe Kiela.`),Wa=c(),an=r("p"),ai=t("The abstract from the paper is the following:"),Ha=c(),dn=r("p"),zr=r("em"),di=t(`Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit nonparametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) \u2014 models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.`),Ba=c(),nt=r("p"),ii=t("This model was contributed by "),Dt=r("a"),ci=t("ola13"),li=t("."),Qa=c(),Ee=r("h2"),rt=r("a"),$r=r("span"),v(Lt.$$.fragment),hi=c(),Er=r("span"),pi=t("RagConfig"),Va=c(),de=r("div"),v(It.$$.fragment),ui=c(),me=r("p"),cn=r("a"),mi=t("RagConfig"),gi=t(" stores the configuration of a "),Gr=r("em"),_i=t("RagModel"),fi=t(`. Configuration objects inherit from `),ln=r("a"),vi=t("PretrainedConfig"),bi=t(` and can be used to control the model outputs. Read the documentation from `),hn=r("a"),Ti=t("PretrainedConfig"),wi=t(" for more information."),ki=c(),st=r("div"),v(Ot.$$.fragment),qi=c(),Wt=r("p"),yi=t("Instantiate a "),pn=r("a"),xi=t("EncoderDecoderConfig"),Ri=t(` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),Fi=c(),at=r("div"),v(Ht.$$.fragment),zi=c(),Bt=r("p"),$i=t(`Serializes this instance to a Python dictionary. Override the default `),un=r("a"),Ei=t("to_dict()"),Gi=t("."),Ua=c(),Ge=r("h2"),dt=r("a"),Mr=r("span"),v(Qt.$$.fragment),Mi=c(),Ar=r("span"),Ai=t("RagTokenizer"),Ka=c(),Vt=r("div"),it=r("div"),v(Ut.$$.fragment),ji=c(),jr=r("p"),Pi=t(`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Ya=c(),Me=r("h2"),ct=r("a"),Pr=r("span"),v(Kt.$$.fragment),Ni=c(),Nr=r("span"),Ci=t("Rag specific outputs"),Ja=c(),Ae=r("div"),v(Yt.$$.fragment),Si=c(),Cr=r("p"),Di=t("Base class for retriever augmented marginalized models outputs."),Xa=c(),Jt=r("div"),v(Xt.$$.fragment),Za=c(),je=r("h2"),lt=r("a"),Sr=r("span"),v(Zt.$$.fragment),Li=c(),Dr=r("span"),Ii=t("RagRetriever"),ed=c(),X=r("div"),v(eo.$$.fragment),Oi=c(),Lr=r("p"),Wi=t(`Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel.`),Hi=c(),Ir=r("p"),Bi=t("Examples:"),Qi=c(),v(to.$$.fragment),Vi=c(),ht=r("div"),v(oo.$$.fragment),Ui=c(),Or=r("p"),Ki=t("Retriever initialization function. It loads the index into memory."),Yi=c(),pt=r("div"),v(no.$$.fragment),Ji=c(),Pe=r("p"),Xi=t("Postprocessing retrieved "),Wr=r("code"),Zi=t("docs"),ec=t(" and combining them with "),Hr=r("code"),tc=t("input_strings"),oc=t("."),nc=c(),ut=r("div"),v(ro.$$.fragment),rc=c(),so=r("p"),sc=t("Retrieves documents for specified "),Br=r("code"),ac=t("question_hidden_states"),dc=t("."),td=c(),Ne=r("h2"),mt=r("a"),Qr=r("span"),v(ao.$$.fragment),ic=c(),Vr=r("span"),cc=t("RagModel"),od=c(),D=r("div"),v(io.$$.fragment),lc=c(),Ce=r("p"),hc=t("The "),mn=r("a"),pc=t("RagModel"),uc=t(" forward method, overrides the "),Ur=r("code"),mc=t("__call__"),gc=t(" special method."),_c=c(),v(gt.$$.fragment),fc=c(),Kr=r("p"),vc=t(`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),bc=c(),ie=r("p"),Tc=t("The question encoder can be any "),Yr=r("em"),wc=t("autoencoding"),kc=t(" model, preferably "),gn=r("a"),qc=t("DPRQuestionEncoder"),yc=t(`, and the generator can be any `),Jr=r("em"),xc=t("seq2seq"),Rc=t(" model, preferably "),_n=r("a"),Fc=t("BartForConditionalGeneration"),zc=t("."),$c=c(),A=r("p"),Ec=t("The model can be initialized with a "),fn=r("a"),Gc=t("RagRetriever"),Mc=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Xr=r("em"),Ac=t("autoencoding"),jc=t(" model as the "),Zr=r("code"),Pc=t("question_encoder"),Nc=t(" and any "),es=r("em"),Cc=t("seq2seq"),Sc=t(` model with language model head as the `),ts=r("code"),Dc=t("generator"),Lc=t(". It has been tested with "),vn=r("a"),Ic=t("DPRQuestionEncoder"),Oc=t(" as the "),os=r("code"),Wc=t("question_encoder"),Hc=t(` and `),bn=r("a"),Bc=t("BartForConditionalGeneration"),Qc=t(" or "),Tn=r("a"),Vc=t("T5ForConditionalGeneration"),Uc=t(` as the `),ns=r("code"),Kc=t("generator"),Yc=t("."),Jc=c(),co=r("p"),Xc=t("This model inherits from "),wn=r("a"),Zc=t("PreTrainedModel"),el=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tl=c(),lo=r("p"),ol=t("This model is also a PyTorch "),ho=r("a"),nl=t("torch.nn.Module"),rl=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sl=c(),ee=r("div"),v(po.$$.fragment),al=c(),Se=r("p"),dl=t("The "),kn=r("a"),il=t("RagModel"),cl=t(" forward method, overrides the "),rs=r("code"),ll=t("__call__"),hl=t(" special method."),pl=c(),v(_t.$$.fragment),ul=c(),ss=r("p"),ml=t("Example:"),gl=c(),v(uo.$$.fragment),nd=c(),De=r("h2"),ft=r("a"),as=r("span"),v(mo.$$.fragment),_l=c(),ds=r("span"),fl=t("RagSequenceForGeneration"),rd=c(),E=r("div"),v(go.$$.fragment),vl=c(),Le=r("p"),bl=t("The "),qn=r("a"),Tl=t("RagSequenceForGeneration"),wl=t(" forward method, overrides the "),is=r("code"),kl=t("__call__"),ql=t(" special method."),yl=c(),v(vt.$$.fragment),xl=c(),cs=r("p"),Rl=t("A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),Fl=c(),ls=r("p"),zl=t(`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),$l=c(),ce=r("p"),El=t("The question encoder can be any "),hs=r("em"),Gl=t("autoencoding"),Ml=t(" model, preferably "),yn=r("a"),Al=t("DPRQuestionEncoder"),jl=t(`, and the generator can be any `),ps=r("em"),Pl=t("seq2seq"),Nl=t(" model, preferably "),xn=r("a"),Cl=t("BartForConditionalGeneration"),Sl=t("."),Dl=c(),j=r("p"),Ll=t("The model can be initialized with a "),Rn=r("a"),Il=t("RagRetriever"),Ol=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),us=r("em"),Wl=t("autoencoding"),Hl=t(" model as the "),ms=r("code"),Bl=t("question_encoder"),Ql=t(" and any "),gs=r("em"),Vl=t("seq2seq"),Ul=t(` model with language model head as the `),_s=r("code"),Kl=t("generator"),Yl=t(". It has been tested with "),Fn=r("a"),Jl=t("DPRQuestionEncoder"),Xl=t(" as the "),fs=r("code"),Zl=t("question_encoder"),eh=t(` and `),zn=r("a"),th=t("BartForConditionalGeneration"),oh=t(" or "),$n=r("a"),nh=t("T5ForConditionalGeneration"),rh=t(` as the `),vs=r("code"),sh=t("generator"),ah=t("."),dh=c(),_o=r("p"),ih=t("This model inherits from "),En=r("a"),ch=t("PreTrainedModel"),lh=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hh=c(),fo=r("p"),ph=t("This model is also a PyTorch "),vo=r("a"),uh=t("torch.nn.Module"),mh=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gh=c(),te=r("div"),v(bo.$$.fragment),_h=c(),Ie=r("p"),fh=t("The "),Gn=r("a"),vh=t("RagSequenceForGeneration"),bh=t(" forward method, overrides the "),bs=r("code"),Th=t("__call__"),wh=t(" special method."),kh=c(),v(bt.$$.fragment),qh=c(),Ts=r("p"),yh=t("Example:"),xh=c(),v(To.$$.fragment),Rh=c(),Tt=r("div"),v(wo.$$.fragment),Fh=c(),ko=r("p"),zh=t(`Implements RAG sequence \u201Cthorough\u201D decoding. Read the `),Mn=r("a"),$h=t("generate()"),Eh=t("` documentation for more information on how to\nset other generate input parameters."),sd=c(),Oe=r("h2"),wt=r("a"),ws=r("span"),v(qo.$$.fragment),Gh=c(),ks=r("span"),Mh=t("RagTokenForGeneration"),ad=c(),G=r("div"),v(yo.$$.fragment),Ah=c(),We=r("p"),jh=t("The "),An=r("a"),Ph=t("RagTokenForGeneration"),Nh=t(" forward method, overrides the "),qs=r("code"),Ch=t("__call__"),Sh=t(" special method."),Dh=c(),v(kt.$$.fragment),Lh=c(),ys=r("p"),Ih=t("A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),Oh=c(),xs=r("p"),Wh=t(`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Hh=c(),le=r("p"),Bh=t("The question encoder can be any "),Rs=r("em"),Qh=t("autoencoding"),Vh=t(" model, preferably "),jn=r("a"),Uh=t("DPRQuestionEncoder"),Kh=t(`, and the generator can be any `),Fs=r("em"),Yh=t("seq2seq"),Jh=t(" model, preferably "),Pn=r("a"),Xh=t("BartForConditionalGeneration"),Zh=t("."),ep=c(),P=r("p"),tp=t("The model can be initialized with a "),Nn=r("a"),op=t("RagRetriever"),np=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),zs=r("em"),rp=t("autoencoding"),sp=t(" model as the "),$s=r("code"),ap=t("question_encoder"),dp=t(" and any "),Es=r("em"),ip=t("seq2seq"),cp=t(` model with language model head as the `),Gs=r("code"),lp=t("generator"),hp=t(". It has been tested with "),Cn=r("a"),pp=t("DPRQuestionEncoder"),up=t(" as the "),Ms=r("code"),mp=t("question_encoder"),gp=t(` and `),Sn=r("a"),_p=t("BartForConditionalGeneration"),fp=t(" or "),Dn=r("a"),vp=t("T5ForConditionalGeneration"),bp=t(` as the `),As=r("code"),Tp=t("generator"),wp=t("."),kp=c(),xo=r("p"),qp=t("This model inherits from "),Ln=r("a"),yp=t("PreTrainedModel"),xp=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rp=c(),Ro=r("p"),Fp=t("This model is also a PyTorch "),Fo=r("a"),zp=t("torch.nn.Module"),$p=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ep=c(),oe=r("div"),v(zo.$$.fragment),Gp=c(),He=r("p"),Mp=t("The "),In=r("a"),Ap=t("RagTokenForGeneration"),jp=t(" forward method, overrides the "),js=r("code"),Pp=t("__call__"),Np=t(" special method."),Cp=c(),v(qt.$$.fragment),Sp=c(),Ps=r("p"),Dp=t("Example:"),Lp=c(),v($o.$$.fragment),Ip=c(),yt=r("div"),v(Eo.$$.fragment),Op=c(),Ns=r("p"),Wp=t("Implements RAG token decoding."),dd=c(),Be=r("h2"),xt=r("a"),Cs=r("span"),v(Go.$$.fragment),Hp=c(),Ss=r("span"),Bp=t("TFRagModel"),id=c(),N=r("div"),v(Mo.$$.fragment),Qp=c(),Qe=r("p"),Vp=t("The "),On=r("a"),Up=t("TFRagModel"),Kp=t(" forward method, overrides the "),Ds=r("code"),Yp=t("__call__"),Jp=t(" special method."),Xp=c(),v(Rt.$$.fragment),Zp=c(),Ls=r("p"),eu=t(`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),tu=c(),he=r("p"),ou=t("The question encoder can be any "),Is=r("em"),nu=t("autoencoding"),ru=t(" model, preferably "),Wn=r("a"),su=t("TFDPRQuestionEncoder"),au=t(`, and the generator can be any `),Os=r("em"),du=t("seq2seq"),iu=t(" model, preferably "),Hn=r("a"),cu=t("TFBartForConditionalGeneration"),lu=t("."),hu=c(),L=r("p"),pu=t("The model can be initialized with a "),Bn=r("a"),uu=t("RagRetriever"),mu=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Ws=r("em"),gu=t("autoencoding"),_u=t(" model as the "),Hs=r("code"),fu=t("question_encoder"),vu=t(" and any "),Bs=r("em"),bu=t("seq2seq"),Tu=t(` model with language model head as the `),Qs=r("code"),wu=t("generator"),ku=t(". It has been tested with "),Qn=r("a"),qu=t("TFDPRQuestionEncoder"),yu=t(" as the "),Vs=r("code"),xu=t("question_encoder"),Ru=t(` and `),Vn=r("a"),Fu=t("TFBartForConditionalGeneration"),zu=t(" as the "),Us=r("code"),$u=t("generator"),Eu=t("."),Gu=c(),Ao=r("p"),Mu=t("This model inherits from "),Un=r("a"),Au=t("TFPreTrainedModel"),ju=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pu=c(),jo=r("p"),Nu=t("This model is also a Tensorflow "),Po=r("a"),Cu=t("tf.keras.Model"),Su=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Du=c(),Ks=r("p"),Lu=t(`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Iu=c(),ne=r("div"),v(No.$$.fragment),Ou=c(),Ve=r("p"),Wu=t("The "),Kn=r("a"),Hu=t("TFRagModel"),Bu=t(" forward method, overrides the "),Ys=r("code"),Qu=t("__call__"),Vu=t(" special method."),Uu=c(),v(Ft.$$.fragment),Ku=c(),Js=r("p"),Yu=t("Example:"),Ju=c(),v(Co.$$.fragment),cd=c(),Ue=r("h2"),zt=r("a"),Xs=r("span"),v(So.$$.fragment),Xu=c(),Zs=r("span"),Zu=t("TFRagSequenceForGeneration"),ld=c(),F=r("div"),v(Do.$$.fragment),em=c(),Ke=r("p"),tm=t("The "),Yn=r("a"),om=t("TFRagSequenceForGeneration"),nm=t(" forward method, overrides the "),ea=r("code"),rm=t("__call__"),sm=t(" special method."),am=c(),v($t.$$.fragment),dm=c(),ta=r("p"),im=t("A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),cm=c(),oa=r("p"),lm=t(`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),hm=c(),pe=r("p"),pm=t("The question encoder can be any "),na=r("em"),um=t("autoencoding"),mm=t(" model, preferably "),Jn=r("a"),gm=t("TFDPRQuestionEncoder"),_m=t(`, and the generator can be any `),ra=r("em"),fm=t("seq2seq"),vm=t(" model, preferably "),Xn=r("a"),bm=t("TFBartForConditionalGeneration"),Tm=t("."),wm=c(),I=r("p"),km=t("The model can be initialized with a "),Zn=r("a"),qm=t("RagRetriever"),ym=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),sa=r("em"),xm=t("autoencoding"),Rm=t(" model as the "),aa=r("code"),Fm=t("question_encoder"),zm=t(" and any "),da=r("em"),$m=t("seq2seq"),Em=t(` model with language model head as the `),ia=r("code"),Gm=t("generator"),Mm=t(". It has been tested with "),er=r("a"),Am=t("TFDPRQuestionEncoder"),jm=t(" as the "),ca=r("code"),Pm=t("question_encoder"),Nm=t(` and `),tr=r("a"),Cm=t("TFBartForConditionalGeneration"),Sm=t(" as the "),la=r("code"),Dm=t("generator"),Lm=t("."),Im=c(),Lo=r("p"),Om=t("This model inherits from "),or=r("a"),Wm=t("TFPreTrainedModel"),Hm=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bm=c(),Io=r("p"),Qm=t("This model is also a Tensorflow "),Oo=r("a"),Vm=t("tf.keras.Model"),Um=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Km=c(),ha=r("p"),Ym=t(`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Jm=c(),re=r("div"),v(Wo.$$.fragment),Xm=c(),Ye=r("p"),Zm=t("The "),nr=r("a"),eg=t("TFRagSequenceForGeneration"),tg=t(" forward method, overrides the "),pa=r("code"),og=t("__call__"),ng=t(" special method."),rg=c(),v(Et.$$.fragment),sg=c(),ua=r("p"),ag=t("Example:"),dg=c(),v(Ho.$$.fragment),ig=c(),Gt=r("div"),v(Bo.$$.fragment),cg=c(),Qo=r("p"),lg=t(`Implements RAG sequence \u201Cthorough\u201D decoding. Read the `),rr=r("a"),hg=t("generate()"),pg=t("` documentation for more information on how to\nset other generate input parameters"),hd=c(),Je=r("h2"),Mt=r("a"),ma=r("span"),v(Vo.$$.fragment),ug=c(),ga=r("span"),mg=t("TFRagTokenForGeneration"),pd=c(),z=r("div"),v(Uo.$$.fragment),gg=c(),Xe=r("p"),_g=t("The "),sr=r("a"),fg=t("TFRagTokenForGeneration"),vg=t(" forward method, overrides the "),_a=r("code"),bg=t("__call__"),Tg=t(" special method."),wg=c(),v(At.$$.fragment),kg=c(),fa=r("p"),qg=t("A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),yg=c(),va=r("p"),xg=t(`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Rg=c(),ue=r("p"),Fg=t("The question encoder can be any "),ba=r("em"),zg=t("autoencoding"),$g=t(" model, preferably "),ar=r("a"),Eg=t("TFDPRQuestionEncoder"),Gg=t(`, and the generator can be any `),Ta=r("em"),Mg=t("seq2seq"),Ag=t(" model, preferably "),dr=r("a"),jg=t("TFBartForConditionalGeneration"),Pg=t("."),Ng=c(),O=r("p"),Cg=t("The model can be initialized with a "),ir=r("a"),Sg=t("RagRetriever"),Dg=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),wa=r("em"),Lg=t("autoencoding"),Ig=t(" model as the "),ka=r("code"),Og=t("question_encoder"),Wg=t(" and any "),qa=r("em"),Hg=t("seq2seq"),Bg=t(` model with language model head as the `),ya=r("code"),Qg=t("generator"),Vg=t(". It has been tested with "),cr=r("a"),Ug=t("TFDPRQuestionEncoder"),Kg=t(" as the "),xa=r("code"),Yg=t("question_encoder"),Jg=t(` and `),lr=r("a"),Xg=t("TFBartForConditionalGeneration"),Zg=t(" as the "),Ra=r("code"),e_=t("generator"),t_=t("."),o_=c(),Ko=r("p"),n_=t("This model inherits from "),hr=r("a"),r_=t("TFPreTrainedModel"),s_=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),a_=c(),Yo=r("p"),d_=t("This model is also a Tensorflow "),Jo=r("a"),i_=t("tf.keras.Model"),c_=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),l_=c(),Fa=r("p"),h_=t(`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),p_=c(),se=r("div"),v(Xo.$$.fragment),u_=c(),Ze=r("p"),m_=t("The "),pr=r("a"),g_=t("TFRagTokenForGeneration"),__=t(" forward method, overrides the "),za=r("code"),f_=t("__call__"),v_=t(" special method."),b_=c(),v(jt.$$.fragment),T_=c(),$a=r("p"),w_=t("Example:"),k_=c(),v(Zo.$$.fragment),q_=c(),Pt=r("div"),v(en.$$.fragment),y_=c(),Ea=r("p"),x_=t("Implements TFRAG token decoding."),this.h()},l(d){const m=nT('[data-svelte="svelte-1phssyn"]',document.head);h=s(m,"META",{name:!0,content:!0}),m.forEach(n),y=l(d),u=s(d,"H1",{class:!0});var tn=a(u);f=s(tn,"A",{id:!0,class:!0,href:!0});var Ga=a(f);x=s(Ga,"SPAN",{});var Ma=a(x);b(g.$$.fragment,Ma),Ma.forEach(n),Ga.forEach(n),p=l(tn),R=s(tn,"SPAN",{});var Aa=a(R);Zd=o(Aa,"RAG"),Aa.forEach(n),tn.forEach(n),La=l(d),$e=s(d,"H2",{class:!0});var on=a($e);tt=s(on,"A",{id:!0,class:!0,href:!0});var ja=a(tt);Rr=s(ja,"SPAN",{});var Pa=a(Rr);b(Ct.$$.fragment,Pa),Pa.forEach(n),ja.forEach(n),ei=l(on),Fr=s(on,"SPAN",{});var Na=a(Fr);ti=o(Na,"Overview"),Na.forEach(n),on.forEach(n),Ia=l(d),sn=s(d,"P",{});var Ca=a(sn);oi=o(Ca,`Retrieval-augmented generation (\u201CRAG\u201D) models combine the powers of pretrained dense retrieval (DPR) and sequence-to-sequence models. RAG models retrieve documents, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.`),Ca.forEach(n),Oa=l(d),ot=s(d,"P",{});var nn=a(ot);ni=o(nn,"It is based on the paper "),St=s(nn,"A",{href:!0,rel:!0});var Sa=a(St);ri=o(Sa,"Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"),Sa.forEach(n),si=o(nn,` by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\xFCttler, Mike Lewis, Wen-tau Yih, Tim Rockt\xE4schel, Sebastian Riedel, Douwe Kiela.`),nn.forEach(n),Wa=l(d),an=s(d,"P",{});var Da=a(an);ai=o(Da,"The abstract from the paper is the following:"),Da.forEach(n),Ha=l(d),dn=s(d,"P",{});var R_=a(dn);zr=s(R_,"EM",{});var F_=a(zr);di=o(F_,`Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit nonparametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) \u2014 models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.`),F_.forEach(n),R_.forEach(n),Ba=l(d),nt=s(d,"P",{});var md=a(nt);ii=o(md,"This model was contributed by "),Dt=s(md,"A",{href:!0,rel:!0});var z_=a(Dt);ci=o(z_,"ola13"),z_.forEach(n),li=o(md,"."),md.forEach(n),Qa=l(d),Ee=s(d,"H2",{class:!0});var gd=a(Ee);rt=s(gd,"A",{id:!0,class:!0,href:!0});var $_=a(rt);$r=s($_,"SPAN",{});var E_=a($r);b(Lt.$$.fragment,E_),E_.forEach(n),$_.forEach(n),hi=l(gd),Er=s(gd,"SPAN",{});var G_=a(Er);pi=o(G_,"RagConfig"),G_.forEach(n),gd.forEach(n),Va=l(d),de=s(d,"DIV",{class:!0});var Nt=a(de);b(It.$$.fragment,Nt),ui=l(Nt),me=s(Nt,"P",{});var et=a(me);cn=s(et,"A",{href:!0});var M_=a(cn);mi=o(M_,"RagConfig"),M_.forEach(n),gi=o(et," stores the configuration of a "),Gr=s(et,"EM",{});var A_=a(Gr);_i=o(A_,"RagModel"),A_.forEach(n),fi=o(et,`. Configuration objects inherit from `),ln=s(et,"A",{href:!0});var j_=a(ln);vi=o(j_,"PretrainedConfig"),j_.forEach(n),bi=o(et,` and can be used to control the model outputs. Read the documentation from `),hn=s(et,"A",{href:!0});var P_=a(hn);Ti=o(P_,"PretrainedConfig"),P_.forEach(n),wi=o(et," for more information."),et.forEach(n),ki=l(Nt),st=s(Nt,"DIV",{class:!0});var _d=a(st);b(Ot.$$.fragment,_d),qi=l(_d),Wt=s(_d,"P",{});var fd=a(Wt);yi=o(fd,"Instantiate a "),pn=s(fd,"A",{href:!0});var N_=a(pn);xi=o(N_,"EncoderDecoderConfig"),N_.forEach(n),Ri=o(fd,` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),fd.forEach(n),_d.forEach(n),Fi=l(Nt),at=s(Nt,"DIV",{class:!0});var vd=a(at);b(Ht.$$.fragment,vd),zi=l(vd),Bt=s(vd,"P",{});var bd=a(Bt);$i=o(bd,`Serializes this instance to a Python dictionary. Override the default `),un=s(bd,"A",{href:!0});var C_=a(un);Ei=o(C_,"to_dict()"),C_.forEach(n),Gi=o(bd,"."),bd.forEach(n),vd.forEach(n),Nt.forEach(n),Ua=l(d),Ge=s(d,"H2",{class:!0});var Td=a(Ge);dt=s(Td,"A",{id:!0,class:!0,href:!0});var S_=a(dt);Mr=s(S_,"SPAN",{});var D_=a(Mr);b(Qt.$$.fragment,D_),D_.forEach(n),S_.forEach(n),Mi=l(Td),Ar=s(Td,"SPAN",{});var L_=a(Ar);Ai=o(L_,"RagTokenizer"),L_.forEach(n),Td.forEach(n),Ka=l(d),Vt=s(d,"DIV",{class:!0});var I_=a(Vt);it=s(I_,"DIV",{class:!0});var wd=a(it);b(Ut.$$.fragment,wd),ji=l(wd),jr=s(wd,"P",{});var O_=a(jr);Pi=o(O_,`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),O_.forEach(n),wd.forEach(n),I_.forEach(n),Ya=l(d),Me=s(d,"H2",{class:!0});var kd=a(Me);ct=s(kd,"A",{id:!0,class:!0,href:!0});var W_=a(ct);Pr=s(W_,"SPAN",{});var H_=a(Pr);b(Kt.$$.fragment,H_),H_.forEach(n),W_.forEach(n),Ni=l(kd),Nr=s(kd,"SPAN",{});var B_=a(Nr);Ci=o(B_,"Rag specific outputs"),B_.forEach(n),kd.forEach(n),Ja=l(d),Ae=s(d,"DIV",{class:!0});var qd=a(Ae);b(Yt.$$.fragment,qd),Si=l(qd),Cr=s(qd,"P",{});var Q_=a(Cr);Di=o(Q_,"Base class for retriever augmented marginalized models outputs."),Q_.forEach(n),qd.forEach(n),Xa=l(d),Jt=s(d,"DIV",{class:!0});var V_=a(Jt);b(Xt.$$.fragment,V_),V_.forEach(n),Za=l(d),je=s(d,"H2",{class:!0});var yd=a(je);lt=s(yd,"A",{id:!0,class:!0,href:!0});var U_=a(lt);Sr=s(U_,"SPAN",{});var K_=a(Sr);b(Zt.$$.fragment,K_),K_.forEach(n),U_.forEach(n),Li=l(yd),Dr=s(yd,"SPAN",{});var Y_=a(Dr);Ii=o(Y_,"RagRetriever"),Y_.forEach(n),yd.forEach(n),ed=l(d),X=s(d,"DIV",{class:!0});var ae=a(X);b(eo.$$.fragment,ae),Oi=l(ae),Lr=s(ae,"P",{});var J_=a(Lr);Wi=o(J_,`Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel.`),J_.forEach(n),Hi=l(ae),Ir=s(ae,"P",{});var X_=a(Ir);Bi=o(X_,"Examples:"),X_.forEach(n),Qi=l(ae),b(to.$$.fragment,ae),Vi=l(ae),ht=s(ae,"DIV",{class:!0});var xd=a(ht);b(oo.$$.fragment,xd),Ui=l(xd),Or=s(xd,"P",{});var Z_=a(Or);Ki=o(Z_,"Retriever initialization function. It loads the index into memory."),Z_.forEach(n),xd.forEach(n),Yi=l(ae),pt=s(ae,"DIV",{class:!0});var Rd=a(pt);b(no.$$.fragment,Rd),Ji=l(Rd),Pe=s(Rd,"P",{});var ur=a(Pe);Xi=o(ur,"Postprocessing retrieved "),Wr=s(ur,"CODE",{});var ef=a(Wr);Zi=o(ef,"docs"),ef.forEach(n),ec=o(ur," and combining them with "),Hr=s(ur,"CODE",{});var tf=a(Hr);tc=o(tf,"input_strings"),tf.forEach(n),oc=o(ur,"."),ur.forEach(n),Rd.forEach(n),nc=l(ae),ut=s(ae,"DIV",{class:!0});var Fd=a(ut);b(ro.$$.fragment,Fd),rc=l(Fd),so=s(Fd,"P",{});var zd=a(so);sc=o(zd,"Retrieves documents for specified "),Br=s(zd,"CODE",{});var of=a(Br);ac=o(of,"question_hidden_states"),of.forEach(n),dc=o(zd,"."),zd.forEach(n),Fd.forEach(n),ae.forEach(n),td=l(d),Ne=s(d,"H2",{class:!0});var $d=a(Ne);mt=s($d,"A",{id:!0,class:!0,href:!0});var nf=a(mt);Qr=s(nf,"SPAN",{});var rf=a(Qr);b(ao.$$.fragment,rf),rf.forEach(n),nf.forEach(n),ic=l($d),Vr=s($d,"SPAN",{});var sf=a(Vr);cc=o(sf,"RagModel"),sf.forEach(n),$d.forEach(n),od=l(d),D=s(d,"DIV",{class:!0});var Z=a(D);b(io.$$.fragment,Z),lc=l(Z),Ce=s(Z,"P",{});var mr=a(Ce);hc=o(mr,"The "),mn=s(mr,"A",{href:!0});var af=a(mn);pc=o(af,"RagModel"),af.forEach(n),uc=o(mr," forward method, overrides the "),Ur=s(mr,"CODE",{});var df=a(Ur);mc=o(df,"__call__"),df.forEach(n),gc=o(mr," special method."),mr.forEach(n),_c=l(Z),b(gt.$$.fragment,Z),fc=l(Z),Kr=s(Z,"P",{});var cf=a(Kr);vc=o(cf,`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),cf.forEach(n),bc=l(Z),ie=s(Z,"P",{});var fe=a(ie);Tc=o(fe,"The question encoder can be any "),Yr=s(fe,"EM",{});var lf=a(Yr);wc=o(lf,"autoencoding"),lf.forEach(n),kc=o(fe," model, preferably "),gn=s(fe,"A",{href:!0});var hf=a(gn);qc=o(hf,"DPRQuestionEncoder"),hf.forEach(n),yc=o(fe,`, and the generator can be any `),Jr=s(fe,"EM",{});var pf=a(Jr);xc=o(pf,"seq2seq"),pf.forEach(n),Rc=o(fe," model, preferably "),_n=s(fe,"A",{href:!0});var uf=a(_n);Fc=o(uf,"BartForConditionalGeneration"),uf.forEach(n),zc=o(fe,"."),fe.forEach(n),$c=l(Z),A=s(Z,"P",{});var W=a(A);Ec=o(W,"The model can be initialized with a "),fn=s(W,"A",{href:!0});var mf=a(fn);Gc=o(mf,"RagRetriever"),mf.forEach(n),Mc=o(W,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Xr=s(W,"EM",{});var gf=a(Xr);Ac=o(gf,"autoencoding"),gf.forEach(n),jc=o(W," model as the "),Zr=s(W,"CODE",{});var _f=a(Zr);Pc=o(_f,"question_encoder"),_f.forEach(n),Nc=o(W," and any "),es=s(W,"EM",{});var ff=a(es);Cc=o(ff,"seq2seq"),ff.forEach(n),Sc=o(W,` model with language model head as the `),ts=s(W,"CODE",{});var vf=a(ts);Dc=o(vf,"generator"),vf.forEach(n),Lc=o(W,". It has been tested with "),vn=s(W,"A",{href:!0});var bf=a(vn);Ic=o(bf,"DPRQuestionEncoder"),bf.forEach(n),Oc=o(W," as the "),os=s(W,"CODE",{});var Tf=a(os);Wc=o(Tf,"question_encoder"),Tf.forEach(n),Hc=o(W,` and `),bn=s(W,"A",{href:!0});var wf=a(bn);Bc=o(wf,"BartForConditionalGeneration"),wf.forEach(n),Qc=o(W," or "),Tn=s(W,"A",{href:!0});var kf=a(Tn);Vc=o(kf,"T5ForConditionalGeneration"),kf.forEach(n),Uc=o(W,` as the `),ns=s(W,"CODE",{});var qf=a(ns);Kc=o(qf,"generator"),qf.forEach(n),Yc=o(W,"."),W.forEach(n),Jc=l(Z),co=s(Z,"P",{});var Ed=a(co);Xc=o(Ed,"This model inherits from "),wn=s(Ed,"A",{href:!0});var yf=a(wn);Zc=o(yf,"PreTrainedModel"),yf.forEach(n),el=o(Ed,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ed.forEach(n),tl=l(Z),lo=s(Z,"P",{});var Gd=a(lo);ol=o(Gd,"This model is also a PyTorch "),ho=s(Gd,"A",{href:!0,rel:!0});var xf=a(ho);nl=o(xf,"torch.nn.Module"),xf.forEach(n),rl=o(Gd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gd.forEach(n),sl=l(Z),ee=s(Z,"DIV",{class:!0});var ve=a(ee);b(po.$$.fragment,ve),al=l(ve),Se=s(ve,"P",{});var gr=a(Se);dl=o(gr,"The "),kn=s(gr,"A",{href:!0});var Rf=a(kn);il=o(Rf,"RagModel"),Rf.forEach(n),cl=o(gr," forward method, overrides the "),rs=s(gr,"CODE",{});var Ff=a(rs);ll=o(Ff,"__call__"),Ff.forEach(n),hl=o(gr," special method."),gr.forEach(n),pl=l(ve),b(_t.$$.fragment,ve),ul=l(ve),ss=s(ve,"P",{});var zf=a(ss);ml=o(zf,"Example:"),zf.forEach(n),gl=l(ve),b(uo.$$.fragment,ve),ve.forEach(n),Z.forEach(n),nd=l(d),De=s(d,"H2",{class:!0});var Md=a(De);ft=s(Md,"A",{id:!0,class:!0,href:!0});var $f=a(ft);as=s($f,"SPAN",{});var Ef=a(as);b(mo.$$.fragment,Ef),Ef.forEach(n),$f.forEach(n),_l=l(Md),ds=s(Md,"SPAN",{});var Gf=a(ds);fl=o(Gf,"RagSequenceForGeneration"),Gf.forEach(n),Md.forEach(n),rd=l(d),E=s(d,"DIV",{class:!0});var H=a(E);b(go.$$.fragment,H),vl=l(H),Le=s(H,"P",{});var _r=a(Le);bl=o(_r,"The "),qn=s(_r,"A",{href:!0});var Mf=a(qn);Tl=o(Mf,"RagSequenceForGeneration"),Mf.forEach(n),wl=o(_r," forward method, overrides the "),is=s(_r,"CODE",{});var Af=a(is);kl=o(Af,"__call__"),Af.forEach(n),ql=o(_r," special method."),_r.forEach(n),yl=l(H),b(vt.$$.fragment,H),xl=l(H),cs=s(H,"P",{});var jf=a(cs);Rl=o(jf,"A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),jf.forEach(n),Fl=l(H),ls=s(H,"P",{});var Pf=a(ls);zl=o(Pf,`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Pf.forEach(n),$l=l(H),ce=s(H,"P",{});var be=a(ce);El=o(be,"The question encoder can be any "),hs=s(be,"EM",{});var Nf=a(hs);Gl=o(Nf,"autoencoding"),Nf.forEach(n),Ml=o(be," model, preferably "),yn=s(be,"A",{href:!0});var Cf=a(yn);Al=o(Cf,"DPRQuestionEncoder"),Cf.forEach(n),jl=o(be,`, and the generator can be any `),ps=s(be,"EM",{});var Sf=a(ps);Pl=o(Sf,"seq2seq"),Sf.forEach(n),Nl=o(be," model, preferably "),xn=s(be,"A",{href:!0});var Df=a(xn);Cl=o(Df,"BartForConditionalGeneration"),Df.forEach(n),Sl=o(be,"."),be.forEach(n),Dl=l(H),j=s(H,"P",{});var B=a(j);Ll=o(B,"The model can be initialized with a "),Rn=s(B,"A",{href:!0});var Lf=a(Rn);Il=o(Lf,"RagRetriever"),Lf.forEach(n),Ol=o(B,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),us=s(B,"EM",{});var If=a(us);Wl=o(If,"autoencoding"),If.forEach(n),Hl=o(B," model as the "),ms=s(B,"CODE",{});var Of=a(ms);Bl=o(Of,"question_encoder"),Of.forEach(n),Ql=o(B," and any "),gs=s(B,"EM",{});var Wf=a(gs);Vl=o(Wf,"seq2seq"),Wf.forEach(n),Ul=o(B,` model with language model head as the `),_s=s(B,"CODE",{});var Hf=a(_s);Kl=o(Hf,"generator"),Hf.forEach(n),Yl=o(B,". It has been tested with "),Fn=s(B,"A",{href:!0});var Bf=a(Fn);Jl=o(Bf,"DPRQuestionEncoder"),Bf.forEach(n),Xl=o(B," as the "),fs=s(B,"CODE",{});var Qf=a(fs);Zl=o(Qf,"question_encoder"),Qf.forEach(n),eh=o(B,` and `),zn=s(B,"A",{href:!0});var Vf=a(zn);th=o(Vf,"BartForConditionalGeneration"),Vf.forEach(n),oh=o(B," or "),$n=s(B,"A",{href:!0});var Uf=a($n);nh=o(Uf,"T5ForConditionalGeneration"),Uf.forEach(n),rh=o(B,` as the `),vs=s(B,"CODE",{});var Kf=a(vs);sh=o(Kf,"generator"),Kf.forEach(n),ah=o(B,"."),B.forEach(n),dh=l(H),_o=s(H,"P",{});var Ad=a(_o);ih=o(Ad,"This model inherits from "),En=s(Ad,"A",{href:!0});var Yf=a(En);ch=o(Yf,"PreTrainedModel"),Yf.forEach(n),lh=o(Ad,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ad.forEach(n),hh=l(H),fo=s(H,"P",{});var jd=a(fo);ph=o(jd,"This model is also a PyTorch "),vo=s(jd,"A",{href:!0,rel:!0});var Jf=a(vo);uh=o(Jf,"torch.nn.Module"),Jf.forEach(n),mh=o(jd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jd.forEach(n),gh=l(H),te=s(H,"DIV",{class:!0});var Te=a(te);b(bo.$$.fragment,Te),_h=l(Te),Ie=s(Te,"P",{});var fr=a(Ie);fh=o(fr,"The "),Gn=s(fr,"A",{href:!0});var Xf=a(Gn);vh=o(Xf,"RagSequenceForGeneration"),Xf.forEach(n),bh=o(fr," forward method, overrides the "),bs=s(fr,"CODE",{});var Zf=a(bs);Th=o(Zf,"__call__"),Zf.forEach(n),wh=o(fr," special method."),fr.forEach(n),kh=l(Te),b(bt.$$.fragment,Te),qh=l(Te),Ts=s(Te,"P",{});var ev=a(Ts);yh=o(ev,"Example:"),ev.forEach(n),xh=l(Te),b(To.$$.fragment,Te),Te.forEach(n),Rh=l(H),Tt=s(H,"DIV",{class:!0});var Pd=a(Tt);b(wo.$$.fragment,Pd),Fh=l(Pd),ko=s(Pd,"P",{});var Nd=a(ko);zh=o(Nd,`Implements RAG sequence \u201Cthorough\u201D decoding. Read the `),Mn=s(Nd,"A",{href:!0});var tv=a(Mn);$h=o(tv,"generate()"),tv.forEach(n),Eh=o(Nd,"` documentation for more information on how to\nset other generate input parameters."),Nd.forEach(n),Pd.forEach(n),H.forEach(n),sd=l(d),Oe=s(d,"H2",{class:!0});var Cd=a(Oe);wt=s(Cd,"A",{id:!0,class:!0,href:!0});var ov=a(wt);ws=s(ov,"SPAN",{});var nv=a(ws);b(qo.$$.fragment,nv),nv.forEach(n),ov.forEach(n),Gh=l(Cd),ks=s(Cd,"SPAN",{});var rv=a(ks);Mh=o(rv,"RagTokenForGeneration"),rv.forEach(n),Cd.forEach(n),ad=l(d),G=s(d,"DIV",{class:!0});var Q=a(G);b(yo.$$.fragment,Q),Ah=l(Q),We=s(Q,"P",{});var vr=a(We);jh=o(vr,"The "),An=s(vr,"A",{href:!0});var sv=a(An);Ph=o(sv,"RagTokenForGeneration"),sv.forEach(n),Nh=o(vr," forward method, overrides the "),qs=s(vr,"CODE",{});var av=a(qs);Ch=o(av,"__call__"),av.forEach(n),Sh=o(vr," special method."),vr.forEach(n),Dh=l(Q),b(kt.$$.fragment,Q),Lh=l(Q),ys=s(Q,"P",{});var dv=a(ys);Ih=o(dv,"A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),dv.forEach(n),Oh=l(Q),xs=s(Q,"P",{});var iv=a(xs);Wh=o(iv,`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),iv.forEach(n),Hh=l(Q),le=s(Q,"P",{});var we=a(le);Bh=o(we,"The question encoder can be any "),Rs=s(we,"EM",{});var cv=a(Rs);Qh=o(cv,"autoencoding"),cv.forEach(n),Vh=o(we," model, preferably "),jn=s(we,"A",{href:!0});var lv=a(jn);Uh=o(lv,"DPRQuestionEncoder"),lv.forEach(n),Kh=o(we,`, and the generator can be any `),Fs=s(we,"EM",{});var hv=a(Fs);Yh=o(hv,"seq2seq"),hv.forEach(n),Jh=o(we," model, preferably "),Pn=s(we,"A",{href:!0});var pv=a(Pn);Xh=o(pv,"BartForConditionalGeneration"),pv.forEach(n),Zh=o(we,"."),we.forEach(n),ep=l(Q),P=s(Q,"P",{});var V=a(P);tp=o(V,"The model can be initialized with a "),Nn=s(V,"A",{href:!0});var uv=a(Nn);op=o(uv,"RagRetriever"),uv.forEach(n),np=o(V,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),zs=s(V,"EM",{});var mv=a(zs);rp=o(mv,"autoencoding"),mv.forEach(n),sp=o(V," model as the "),$s=s(V,"CODE",{});var gv=a($s);ap=o(gv,"question_encoder"),gv.forEach(n),dp=o(V," and any "),Es=s(V,"EM",{});var _v=a(Es);ip=o(_v,"seq2seq"),_v.forEach(n),cp=o(V,` model with language model head as the `),Gs=s(V,"CODE",{});var fv=a(Gs);lp=o(fv,"generator"),fv.forEach(n),hp=o(V,". It has been tested with "),Cn=s(V,"A",{href:!0});var vv=a(Cn);pp=o(vv,"DPRQuestionEncoder"),vv.forEach(n),up=o(V," as the "),Ms=s(V,"CODE",{});var bv=a(Ms);mp=o(bv,"question_encoder"),bv.forEach(n),gp=o(V,` and `),Sn=s(V,"A",{href:!0});var Tv=a(Sn);_p=o(Tv,"BartForConditionalGeneration"),Tv.forEach(n),fp=o(V," or "),Dn=s(V,"A",{href:!0});var wv=a(Dn);vp=o(wv,"T5ForConditionalGeneration"),wv.forEach(n),bp=o(V,` as the `),As=s(V,"CODE",{});var kv=a(As);Tp=o(kv,"generator"),kv.forEach(n),wp=o(V,"."),V.forEach(n),kp=l(Q),xo=s(Q,"P",{});var Sd=a(xo);qp=o(Sd,"This model inherits from "),Ln=s(Sd,"A",{href:!0});var qv=a(Ln);yp=o(qv,"PreTrainedModel"),qv.forEach(n),xp=o(Sd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sd.forEach(n),Rp=l(Q),Ro=s(Q,"P",{});var Dd=a(Ro);Fp=o(Dd,"This model is also a PyTorch "),Fo=s(Dd,"A",{href:!0,rel:!0});var yv=a(Fo);zp=o(yv,"torch.nn.Module"),yv.forEach(n),$p=o(Dd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dd.forEach(n),Ep=l(Q),oe=s(Q,"DIV",{class:!0});var ke=a(oe);b(zo.$$.fragment,ke),Gp=l(ke),He=s(ke,"P",{});var br=a(He);Mp=o(br,"The "),In=s(br,"A",{href:!0});var xv=a(In);Ap=o(xv,"RagTokenForGeneration"),xv.forEach(n),jp=o(br," forward method, overrides the "),js=s(br,"CODE",{});var Rv=a(js);Pp=o(Rv,"__call__"),Rv.forEach(n),Np=o(br," special method."),br.forEach(n),Cp=l(ke),b(qt.$$.fragment,ke),Sp=l(ke),Ps=s(ke,"P",{});var Fv=a(Ps);Dp=o(Fv,"Example:"),Fv.forEach(n),Lp=l(ke),b($o.$$.fragment,ke),ke.forEach(n),Ip=l(Q),yt=s(Q,"DIV",{class:!0});var Ld=a(yt);b(Eo.$$.fragment,Ld),Op=l(Ld),Ns=s(Ld,"P",{});var zv=a(Ns);Wp=o(zv,"Implements RAG token decoding."),zv.forEach(n),Ld.forEach(n),Q.forEach(n),dd=l(d),Be=s(d,"H2",{class:!0});var Id=a(Be);xt=s(Id,"A",{id:!0,class:!0,href:!0});var $v=a(xt);Cs=s($v,"SPAN",{});var Ev=a(Cs);b(Go.$$.fragment,Ev),Ev.forEach(n),$v.forEach(n),Hp=l(Id),Ss=s(Id,"SPAN",{});var Gv=a(Ss);Bp=o(Gv,"TFRagModel"),Gv.forEach(n),Id.forEach(n),id=l(d),N=s(d,"DIV",{class:!0});var U=a(N);b(Mo.$$.fragment,U),Qp=l(U),Qe=s(U,"P",{});var Tr=a(Qe);Vp=o(Tr,"The "),On=s(Tr,"A",{href:!0});var Mv=a(On);Up=o(Mv,"TFRagModel"),Mv.forEach(n),Kp=o(Tr," forward method, overrides the "),Ds=s(Tr,"CODE",{});var Av=a(Ds);Yp=o(Av,"__call__"),Av.forEach(n),Jp=o(Tr," special method."),Tr.forEach(n),Xp=l(U),b(Rt.$$.fragment,U),Zp=l(U),Ls=s(U,"P",{});var jv=a(Ls);eu=o(jv,`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),jv.forEach(n),tu=l(U),he=s(U,"P",{});var qe=a(he);ou=o(qe,"The question encoder can be any "),Is=s(qe,"EM",{});var Pv=a(Is);nu=o(Pv,"autoencoding"),Pv.forEach(n),ru=o(qe," model, preferably "),Wn=s(qe,"A",{href:!0});var Nv=a(Wn);su=o(Nv,"TFDPRQuestionEncoder"),Nv.forEach(n),au=o(qe,`, and the generator can be any `),Os=s(qe,"EM",{});var Cv=a(Os);du=o(Cv,"seq2seq"),Cv.forEach(n),iu=o(qe," model, preferably "),Hn=s(qe,"A",{href:!0});var Sv=a(Hn);cu=o(Sv,"TFBartForConditionalGeneration"),Sv.forEach(n),lu=o(qe,"."),qe.forEach(n),hu=l(U),L=s(U,"P",{});var K=a(L);pu=o(K,"The model can be initialized with a "),Bn=s(K,"A",{href:!0});var Dv=a(Bn);uu=o(Dv,"RagRetriever"),Dv.forEach(n),mu=o(K,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Ws=s(K,"EM",{});var Lv=a(Ws);gu=o(Lv,"autoencoding"),Lv.forEach(n),_u=o(K," model as the "),Hs=s(K,"CODE",{});var Iv=a(Hs);fu=o(Iv,"question_encoder"),Iv.forEach(n),vu=o(K," and any "),Bs=s(K,"EM",{});var Ov=a(Bs);bu=o(Ov,"seq2seq"),Ov.forEach(n),Tu=o(K,` model with language model head as the `),Qs=s(K,"CODE",{});var Wv=a(Qs);wu=o(Wv,"generator"),Wv.forEach(n),ku=o(K,". It has been tested with "),Qn=s(K,"A",{href:!0});var Hv=a(Qn);qu=o(Hv,"TFDPRQuestionEncoder"),Hv.forEach(n),yu=o(K," as the "),Vs=s(K,"CODE",{});var Bv=a(Vs);xu=o(Bv,"question_encoder"),Bv.forEach(n),Ru=o(K,` and `),Vn=s(K,"A",{href:!0});var Qv=a(Vn);Fu=o(Qv,"TFBartForConditionalGeneration"),Qv.forEach(n),zu=o(K," as the "),Us=s(K,"CODE",{});var Vv=a(Us);$u=o(Vv,"generator"),Vv.forEach(n),Eu=o(K,"."),K.forEach(n),Gu=l(U),Ao=s(U,"P",{});var Od=a(Ao);Mu=o(Od,"This model inherits from "),Un=s(Od,"A",{href:!0});var Uv=a(Un);Au=o(Uv,"TFPreTrainedModel"),Uv.forEach(n),ju=o(Od,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Od.forEach(n),Pu=l(U),jo=s(U,"P",{});var Wd=a(jo);Nu=o(Wd,"This model is also a Tensorflow "),Po=s(Wd,"A",{href:!0,rel:!0});var Kv=a(Po);Cu=o(Kv,"tf.keras.Model"),Kv.forEach(n),Su=o(Wd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Wd.forEach(n),Du=l(U),Ks=s(U,"P",{});var Yv=a(Ks);Lu=o(Yv,`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Yv.forEach(n),Iu=l(U),ne=s(U,"DIV",{class:!0});var ye=a(ne);b(No.$$.fragment,ye),Ou=l(ye),Ve=s(ye,"P",{});var wr=a(Ve);Wu=o(wr,"The "),Kn=s(wr,"A",{href:!0});var Jv=a(Kn);Hu=o(Jv,"TFRagModel"),Jv.forEach(n),Bu=o(wr," forward method, overrides the "),Ys=s(wr,"CODE",{});var Xv=a(Ys);Qu=o(Xv,"__call__"),Xv.forEach(n),Vu=o(wr," special method."),wr.forEach(n),Uu=l(ye),b(Ft.$$.fragment,ye),Ku=l(ye),Js=s(ye,"P",{});var Zv=a(Js);Yu=o(Zv,"Example:"),Zv.forEach(n),Ju=l(ye),b(Co.$$.fragment,ye),ye.forEach(n),U.forEach(n),cd=l(d),Ue=s(d,"H2",{class:!0});var Hd=a(Ue);zt=s(Hd,"A",{id:!0,class:!0,href:!0});var eb=a(zt);Xs=s(eb,"SPAN",{});var tb=a(Xs);b(So.$$.fragment,tb),tb.forEach(n),eb.forEach(n),Xu=l(Hd),Zs=s(Hd,"SPAN",{});var ob=a(Zs);Zu=o(ob,"TFRagSequenceForGeneration"),ob.forEach(n),Hd.forEach(n),ld=l(d),F=s(d,"DIV",{class:!0});var C=a(F);b(Do.$$.fragment,C),em=l(C),Ke=s(C,"P",{});var kr=a(Ke);tm=o(kr,"The "),Yn=s(kr,"A",{href:!0});var nb=a(Yn);om=o(nb,"TFRagSequenceForGeneration"),nb.forEach(n),nm=o(kr," forward method, overrides the "),ea=s(kr,"CODE",{});var rb=a(ea);rm=o(rb,"__call__"),rb.forEach(n),sm=o(kr," special method."),kr.forEach(n),am=l(C),b($t.$$.fragment,C),dm=l(C),ta=s(C,"P",{});var sb=a(ta);im=o(sb,"A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),sb.forEach(n),cm=l(C),oa=s(C,"P",{});var ab=a(oa);lm=o(ab,`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),ab.forEach(n),hm=l(C),pe=s(C,"P",{});var xe=a(pe);pm=o(xe,"The question encoder can be any "),na=s(xe,"EM",{});var db=a(na);um=o(db,"autoencoding"),db.forEach(n),mm=o(xe," model, preferably "),Jn=s(xe,"A",{href:!0});var ib=a(Jn);gm=o(ib,"TFDPRQuestionEncoder"),ib.forEach(n),_m=o(xe,`, and the generator can be any `),ra=s(xe,"EM",{});var cb=a(ra);fm=o(cb,"seq2seq"),cb.forEach(n),vm=o(xe," model, preferably "),Xn=s(xe,"A",{href:!0});var lb=a(Xn);bm=o(lb,"TFBartForConditionalGeneration"),lb.forEach(n),Tm=o(xe,"."),xe.forEach(n),wm=l(C),I=s(C,"P",{});var Y=a(I);km=o(Y,"The model can be initialized with a "),Zn=s(Y,"A",{href:!0});var hb=a(Zn);qm=o(hb,"RagRetriever"),hb.forEach(n),ym=o(Y,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),sa=s(Y,"EM",{});var pb=a(sa);xm=o(pb,"autoencoding"),pb.forEach(n),Rm=o(Y," model as the "),aa=s(Y,"CODE",{});var ub=a(aa);Fm=o(ub,"question_encoder"),ub.forEach(n),zm=o(Y," and any "),da=s(Y,"EM",{});var mb=a(da);$m=o(mb,"seq2seq"),mb.forEach(n),Em=o(Y,` model with language model head as the `),ia=s(Y,"CODE",{});var gb=a(ia);Gm=o(gb,"generator"),gb.forEach(n),Mm=o(Y,". It has been tested with "),er=s(Y,"A",{href:!0});var _b=a(er);Am=o(_b,"TFDPRQuestionEncoder"),_b.forEach(n),jm=o(Y," as the "),ca=s(Y,"CODE",{});var fb=a(ca);Pm=o(fb,"question_encoder"),fb.forEach(n),Nm=o(Y,` and `),tr=s(Y,"A",{href:!0});var vb=a(tr);Cm=o(vb,"TFBartForConditionalGeneration"),vb.forEach(n),Sm=o(Y," as the "),la=s(Y,"CODE",{});var bb=a(la);Dm=o(bb,"generator"),bb.forEach(n),Lm=o(Y,"."),Y.forEach(n),Im=l(C),Lo=s(C,"P",{});var Bd=a(Lo);Om=o(Bd,"This model inherits from "),or=s(Bd,"A",{href:!0});var Tb=a(or);Wm=o(Tb,"TFPreTrainedModel"),Tb.forEach(n),Hm=o(Bd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bd.forEach(n),Bm=l(C),Io=s(C,"P",{});var Qd=a(Io);Qm=o(Qd,"This model is also a Tensorflow "),Oo=s(Qd,"A",{href:!0,rel:!0});var wb=a(Oo);Vm=o(wb,"tf.keras.Model"),wb.forEach(n),Um=o(Qd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Qd.forEach(n),Km=l(C),ha=s(C,"P",{});var kb=a(ha);Ym=o(kb,`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),kb.forEach(n),Jm=l(C),re=s(C,"DIV",{class:!0});var Re=a(re);b(Wo.$$.fragment,Re),Xm=l(Re),Ye=s(Re,"P",{});var qr=a(Ye);Zm=o(qr,"The "),nr=s(qr,"A",{href:!0});var qb=a(nr);eg=o(qb,"TFRagSequenceForGeneration"),qb.forEach(n),tg=o(qr," forward method, overrides the "),pa=s(qr,"CODE",{});var yb=a(pa);og=o(yb,"__call__"),yb.forEach(n),ng=o(qr," special method."),qr.forEach(n),rg=l(Re),b(Et.$$.fragment,Re),sg=l(Re),ua=s(Re,"P",{});var xb=a(ua);ag=o(xb,"Example:"),xb.forEach(n),dg=l(Re),b(Ho.$$.fragment,Re),Re.forEach(n),ig=l(C),Gt=s(C,"DIV",{class:!0});var Vd=a(Gt);b(Bo.$$.fragment,Vd),cg=l(Vd),Qo=s(Vd,"P",{});var Ud=a(Qo);lg=o(Ud,`Implements RAG sequence \u201Cthorough\u201D decoding. Read the `),rr=s(Ud,"A",{href:!0});var Rb=a(rr);hg=o(Rb,"generate()"),Rb.forEach(n),pg=o(Ud,"` documentation for more information on how to\nset other generate input parameters"),Ud.forEach(n),Vd.forEach(n),C.forEach(n),hd=l(d),Je=s(d,"H2",{class:!0});var Kd=a(Je);Mt=s(Kd,"A",{id:!0,class:!0,href:!0});var Fb=a(Mt);ma=s(Fb,"SPAN",{});var zb=a(ma);b(Vo.$$.fragment,zb),zb.forEach(n),Fb.forEach(n),ug=l(Kd),ga=s(Kd,"SPAN",{});var $b=a(ga);mg=o($b,"TFRagTokenForGeneration"),$b.forEach(n),Kd.forEach(n),pd=l(d),z=s(d,"DIV",{class:!0});var S=a(z);b(Uo.$$.fragment,S),gg=l(S),Xe=s(S,"P",{});var yr=a(Xe);_g=o(yr,"The "),sr=s(yr,"A",{href:!0});var Eb=a(sr);fg=o(Eb,"TFRagTokenForGeneration"),Eb.forEach(n),vg=o(yr," forward method, overrides the "),_a=s(yr,"CODE",{});var Gb=a(_a);bg=o(Gb,"__call__"),Gb.forEach(n),Tg=o(yr," special method."),yr.forEach(n),wg=l(S),b(At.$$.fragment,S),kg=l(S),fa=s(S,"P",{});var Mb=a(fa);qg=o(Mb,"A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),Mb.forEach(n),yg=l(S),va=s(S,"P",{});var Ab=a(va);xg=o(Ab,`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Ab.forEach(n),Rg=l(S),ue=s(S,"P",{});var Fe=a(ue);Fg=o(Fe,"The question encoder can be any "),ba=s(Fe,"EM",{});var jb=a(ba);zg=o(jb,"autoencoding"),jb.forEach(n),$g=o(Fe," model, preferably "),ar=s(Fe,"A",{href:!0});var Pb=a(ar);Eg=o(Pb,"TFDPRQuestionEncoder"),Pb.forEach(n),Gg=o(Fe,`, and the generator can be any `),Ta=s(Fe,"EM",{});var Nb=a(Ta);Mg=o(Nb,"seq2seq"),Nb.forEach(n),Ag=o(Fe," model, preferably "),dr=s(Fe,"A",{href:!0});var Cb=a(dr);jg=o(Cb,"TFBartForConditionalGeneration"),Cb.forEach(n),Pg=o(Fe,"."),Fe.forEach(n),Ng=l(S),O=s(S,"P",{});var J=a(O);Cg=o(J,"The model can be initialized with a "),ir=s(J,"A",{href:!0});var Sb=a(ir);Sg=o(Sb,"RagRetriever"),Sb.forEach(n),Dg=o(J,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),wa=s(J,"EM",{});var Db=a(wa);Lg=o(Db,"autoencoding"),Db.forEach(n),Ig=o(J," model as the "),ka=s(J,"CODE",{});var Lb=a(ka);Og=o(Lb,"question_encoder"),Lb.forEach(n),Wg=o(J," and any "),qa=s(J,"EM",{});var Ib=a(qa);Hg=o(Ib,"seq2seq"),Ib.forEach(n),Bg=o(J,` model with language model head as the `),ya=s(J,"CODE",{});var Ob=a(ya);Qg=o(Ob,"generator"),Ob.forEach(n),Vg=o(J,". It has been tested with "),cr=s(J,"A",{href:!0});var Wb=a(cr);Ug=o(Wb,"TFDPRQuestionEncoder"),Wb.forEach(n),Kg=o(J," as the "),xa=s(J,"CODE",{});var Hb=a(xa);Yg=o(Hb,"question_encoder"),Hb.forEach(n),Jg=o(J,` and `),lr=s(J,"A",{href:!0});var Bb=a(lr);Xg=o(Bb,"TFBartForConditionalGeneration"),Bb.forEach(n),Zg=o(J," as the "),Ra=s(J,"CODE",{});var Qb=a(Ra);e_=o(Qb,"generator"),Qb.forEach(n),t_=o(J,"."),J.forEach(n),o_=l(S),Ko=s(S,"P",{});var Yd=a(Ko);n_=o(Yd,"This model inherits from "),hr=s(Yd,"A",{href:!0});var Vb=a(hr);r_=o(Vb,"TFPreTrainedModel"),Vb.forEach(n),s_=o(Yd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yd.forEach(n),a_=l(S),Yo=s(S,"P",{});var Jd=a(Yo);d_=o(Jd,"This model is also a Tensorflow "),Jo=s(Jd,"A",{href:!0,rel:!0});var Ub=a(Jo);i_=o(Ub,"tf.keras.Model"),Ub.forEach(n),c_=o(Jd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Jd.forEach(n),l_=l(S),Fa=s(S,"P",{});var Kb=a(Fa);h_=o(Kb,`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Kb.forEach(n),p_=l(S),se=s(S,"DIV",{class:!0});var ze=a(se);b(Xo.$$.fragment,ze),u_=l(ze),Ze=s(ze,"P",{});var xr=a(Ze);m_=o(xr,"The "),pr=s(xr,"A",{href:!0});var Yb=a(pr);g_=o(Yb,"TFRagTokenForGeneration"),Yb.forEach(n),__=o(xr," forward method, overrides the "),za=s(xr,"CODE",{});var Jb=a(za);f_=o(Jb,"__call__"),Jb.forEach(n),v_=o(xr," special method."),xr.forEach(n),b_=l(ze),b(jt.$$.fragment,ze),T_=l(ze),$a=s(ze,"P",{});var Xb=a($a);w_=o(Xb,"Example:"),Xb.forEach(n),k_=l(ze),b(Zo.$$.fragment,ze),ze.forEach(n),q_=l(S),Pt=s(S,"DIV",{class:!0});var Xd=a(Pt);b(en.$$.fragment,Xd),y_=l(Xd),Ea=s(Xd,"P",{});var Zb=a(Ea);x_=o(Zb,"Implements TFRAG token decoding."),Zb.forEach(n),Xd.forEach(n),S.forEach(n),this.h()},h(){i(h,"name","hf:doc:metadata"),i(h,"content",JSON.stringify(fT)),i(f,"id","rag"),i(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(f,"href","#rag"),i(u,"class","relative group"),i(tt,"id","overview"),i(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(tt,"href","#overview"),i($e,"class","relative group"),i(St,"href","https://arxiv.org/abs/2005.11401"),i(St,"rel","nofollow"),i(Dt,"href","https://huggingface.co/ola13"),i(Dt,"rel","nofollow"),i(rt,"id","transformers.RagConfig"),i(rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(rt,"href","#transformers.RagConfig"),i(Ee,"class","relative group"),i(cn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig"),i(ln,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(hn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(pn,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig"),i(st,"class","docstring"),i(un,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig.to_dict"),i(at,"class","docstring"),i(de,"class","docstring"),i(dt,"id","transformers.RagTokenizer"),i(dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(dt,"href","#transformers.RagTokenizer"),i(Ge,"class","relative group"),i(it,"class","docstring"),i(Vt,"class","docstring"),i(ct,"id","transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput"),i(ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(ct,"href","#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput"),i(Me,"class","relative group"),i(Ae,"class","docstring"),i(Jt,"class","docstring"),i(lt,"id","transformers.RagRetriever"),i(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(lt,"href","#transformers.RagRetriever"),i(je,"class","relative group"),i(ht,"class","docstring"),i(pt,"class","docstring"),i(ut,"class","docstring"),i(X,"class","docstring"),i(mt,"id","transformers.RagModel"),i(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(mt,"href","#transformers.RagModel"),i(Ne,"class","relative group"),i(mn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagModel"),i(gn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(_n,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(fn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever"),i(vn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(bn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Tn,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),i(wn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(ho,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(ho,"rel","nofollow"),i(kn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagModel"),i(ee,"class","docstring"),i(D,"class","docstring"),i(ft,"id","transformers.RagSequenceForGeneration"),i(ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(ft,"href","#transformers.RagSequenceForGeneration"),i(De,"class","relative group"),i(qn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagSequenceForGeneration"),i(yn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(xn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Rn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever"),i(Fn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(zn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),i($n,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),i(En,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(vo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(vo,"rel","nofollow"),i(Gn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagSequenceForGeneration"),i(te,"class","docstring"),i(Mn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),i(Tt,"class","docstring"),i(E,"class","docstring"),i(wt,"id","transformers.RagTokenForGeneration"),i(wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(wt,"href","#transformers.RagTokenForGeneration"),i(Oe,"class","relative group"),i(An,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration"),i(jn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(Pn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Nn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever"),i(Cn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(Sn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Dn,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),i(Ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(Fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(Fo,"rel","nofollow"),i(In,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagTokenForGeneration"),i(oe,"class","docstring"),i(yt,"class","docstring"),i(G,"class","docstring"),i(xt,"id","transformers.TFRagModel"),i(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(xt,"href","#transformers.TFRagModel"),i(Be,"class","relative group"),i(On,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagModel"),i(Wn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(Hn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(Bn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever"),i(Qn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(Vn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(Un,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),i(Po,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Po,"rel","nofollow"),i(Kn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagModel"),i(ne,"class","docstring"),i(N,"class","docstring"),i(zt,"id","transformers.TFRagSequenceForGeneration"),i(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(zt,"href","#transformers.TFRagSequenceForGeneration"),i(Ue,"class","relative group"),i(Yn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagSequenceForGeneration"),i(Jn,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(Xn,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(Zn,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever"),i(er,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(tr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(or,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),i(Oo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Oo,"rel","nofollow"),i(nr,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagSequenceForGeneration"),i(re,"class","docstring"),i(rr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),i(Gt,"class","docstring"),i(F,"class","docstring"),i(Mt,"id","transformers.TFRagTokenForGeneration"),i(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Mt,"href","#transformers.TFRagTokenForGeneration"),i(Je,"class","relative group"),i(sr,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagTokenForGeneration"),i(ar,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(dr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(ir,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagRetriever"),i(cr,"href","/docs/transformers/v4.15.0/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(lr,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(hr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),i(Jo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Jo,"rel","nofollow"),i(pr,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.TFRagTokenForGeneration"),i(se,"class","docstring"),i(Pt,"class","docstring"),i(z,"class","docstring")},m(d,m){e(document.head,h),_(d,y,m),_(d,u,m),e(u,f),e(f,x),T(g,x,null),e(u,p),e(u,R),e(R,Zd),_(d,La,m),_(d,$e,m),e($e,tt),e(tt,Rr),T(Ct,Rr,null),e($e,ei),e($e,Fr),e(Fr,ti),_(d,Ia,m),_(d,sn,m),e(sn,oi),_(d,Oa,m),_(d,ot,m),e(ot,ni),e(ot,St),e(St,ri),e(ot,si),_(d,Wa,m),_(d,an,m),e(an,ai),_(d,Ha,m),_(d,dn,m),e(dn,zr),e(zr,di),_(d,Ba,m),_(d,nt,m),e(nt,ii),e(nt,Dt),e(Dt,ci),e(nt,li),_(d,Qa,m),_(d,Ee,m),e(Ee,rt),e(rt,$r),T(Lt,$r,null),e(Ee,hi),e(Ee,Er),e(Er,pi),_(d,Va,m),_(d,de,m),T(It,de,null),e(de,ui),e(de,me),e(me,cn),e(cn,mi),e(me,gi),e(me,Gr),e(Gr,_i),e(me,fi),e(me,ln),e(ln,vi),e(me,bi),e(me,hn),e(hn,Ti),e(me,wi),e(de,ki),e(de,st),T(Ot,st,null),e(st,qi),e(st,Wt),e(Wt,yi),e(Wt,pn),e(pn,xi),e(Wt,Ri),e(de,Fi),e(de,at),T(Ht,at,null),e(at,zi),e(at,Bt),e(Bt,$i),e(Bt,un),e(un,Ei),e(Bt,Gi),_(d,Ua,m),_(d,Ge,m),e(Ge,dt),e(dt,Mr),T(Qt,Mr,null),e(Ge,Mi),e(Ge,Ar),e(Ar,Ai),_(d,Ka,m),_(d,Vt,m),e(Vt,it),T(Ut,it,null),e(it,ji),e(it,jr),e(jr,Pi),_(d,Ya,m),_(d,Me,m),e(Me,ct),e(ct,Pr),T(Kt,Pr,null),e(Me,Ni),e(Me,Nr),e(Nr,Ci),_(d,Ja,m),_(d,Ae,m),T(Yt,Ae,null),e(Ae,Si),e(Ae,Cr),e(Cr,Di),_(d,Xa,m),_(d,Jt,m),T(Xt,Jt,null),_(d,Za,m),_(d,je,m),e(je,lt),e(lt,Sr),T(Zt,Sr,null),e(je,Li),e(je,Dr),e(Dr,Ii),_(d,ed,m),_(d,X,m),T(eo,X,null),e(X,Oi),e(X,Lr),e(Lr,Wi),e(X,Hi),e(X,Ir),e(Ir,Bi),e(X,Qi),T(to,X,null),e(X,Vi),e(X,ht),T(oo,ht,null),e(ht,Ui),e(ht,Or),e(Or,Ki),e(X,Yi),e(X,pt),T(no,pt,null),e(pt,Ji),e(pt,Pe),e(Pe,Xi),e(Pe,Wr),e(Wr,Zi),e(Pe,ec),e(Pe,Hr),e(Hr,tc),e(Pe,oc),e(X,nc),e(X,ut),T(ro,ut,null),e(ut,rc),e(ut,so),e(so,sc),e(so,Br),e(Br,ac),e(so,dc),_(d,td,m),_(d,Ne,m),e(Ne,mt),e(mt,Qr),T(ao,Qr,null),e(Ne,ic),e(Ne,Vr),e(Vr,cc),_(d,od,m),_(d,D,m),T(io,D,null),e(D,lc),e(D,Ce),e(Ce,hc),e(Ce,mn),e(mn,pc),e(Ce,uc),e(Ce,Ur),e(Ur,mc),e(Ce,gc),e(D,_c),T(gt,D,null),e(D,fc),e(D,Kr),e(Kr,vc),e(D,bc),e(D,ie),e(ie,Tc),e(ie,Yr),e(Yr,wc),e(ie,kc),e(ie,gn),e(gn,qc),e(ie,yc),e(ie,Jr),e(Jr,xc),e(ie,Rc),e(ie,_n),e(_n,Fc),e(ie,zc),e(D,$c),e(D,A),e(A,Ec),e(A,fn),e(fn,Gc),e(A,Mc),e(A,Xr),e(Xr,Ac),e(A,jc),e(A,Zr),e(Zr,Pc),e(A,Nc),e(A,es),e(es,Cc),e(A,Sc),e(A,ts),e(ts,Dc),e(A,Lc),e(A,vn),e(vn,Ic),e(A,Oc),e(A,os),e(os,Wc),e(A,Hc),e(A,bn),e(bn,Bc),e(A,Qc),e(A,Tn),e(Tn,Vc),e(A,Uc),e(A,ns),e(ns,Kc),e(A,Yc),e(D,Jc),e(D,co),e(co,Xc),e(co,wn),e(wn,Zc),e(co,el),e(D,tl),e(D,lo),e(lo,ol),e(lo,ho),e(ho,nl),e(lo,rl),e(D,sl),e(D,ee),T(po,ee,null),e(ee,al),e(ee,Se),e(Se,dl),e(Se,kn),e(kn,il),e(Se,cl),e(Se,rs),e(rs,ll),e(Se,hl),e(ee,pl),T(_t,ee,null),e(ee,ul),e(ee,ss),e(ss,ml),e(ee,gl),T(uo,ee,null),_(d,nd,m),_(d,De,m),e(De,ft),e(ft,as),T(mo,as,null),e(De,_l),e(De,ds),e(ds,fl),_(d,rd,m),_(d,E,m),T(go,E,null),e(E,vl),e(E,Le),e(Le,bl),e(Le,qn),e(qn,Tl),e(Le,wl),e(Le,is),e(is,kl),e(Le,ql),e(E,yl),T(vt,E,null),e(E,xl),e(E,cs),e(cs,Rl),e(E,Fl),e(E,ls),e(ls,zl),e(E,$l),e(E,ce),e(ce,El),e(ce,hs),e(hs,Gl),e(ce,Ml),e(ce,yn),e(yn,Al),e(ce,jl),e(ce,ps),e(ps,Pl),e(ce,Nl),e(ce,xn),e(xn,Cl),e(ce,Sl),e(E,Dl),e(E,j),e(j,Ll),e(j,Rn),e(Rn,Il),e(j,Ol),e(j,us),e(us,Wl),e(j,Hl),e(j,ms),e(ms,Bl),e(j,Ql),e(j,gs),e(gs,Vl),e(j,Ul),e(j,_s),e(_s,Kl),e(j,Yl),e(j,Fn),e(Fn,Jl),e(j,Xl),e(j,fs),e(fs,Zl),e(j,eh),e(j,zn),e(zn,th),e(j,oh),e(j,$n),e($n,nh),e(j,rh),e(j,vs),e(vs,sh),e(j,ah),e(E,dh),e(E,_o),e(_o,ih),e(_o,En),e(En,ch),e(_o,lh),e(E,hh),e(E,fo),e(fo,ph),e(fo,vo),e(vo,uh),e(fo,mh),e(E,gh),e(E,te),T(bo,te,null),e(te,_h),e(te,Ie),e(Ie,fh),e(Ie,Gn),e(Gn,vh),e(Ie,bh),e(Ie,bs),e(bs,Th),e(Ie,wh),e(te,kh),T(bt,te,null),e(te,qh),e(te,Ts),e(Ts,yh),e(te,xh),T(To,te,null),e(E,Rh),e(E,Tt),T(wo,Tt,null),e(Tt,Fh),e(Tt,ko),e(ko,zh),e(ko,Mn),e(Mn,$h),e(ko,Eh),_(d,sd,m),_(d,Oe,m),e(Oe,wt),e(wt,ws),T(qo,ws,null),e(Oe,Gh),e(Oe,ks),e(ks,Mh),_(d,ad,m),_(d,G,m),T(yo,G,null),e(G,Ah),e(G,We),e(We,jh),e(We,An),e(An,Ph),e(We,Nh),e(We,qs),e(qs,Ch),e(We,Sh),e(G,Dh),T(kt,G,null),e(G,Lh),e(G,ys),e(ys,Ih),e(G,Oh),e(G,xs),e(xs,Wh),e(G,Hh),e(G,le),e(le,Bh),e(le,Rs),e(Rs,Qh),e(le,Vh),e(le,jn),e(jn,Uh),e(le,Kh),e(le,Fs),e(Fs,Yh),e(le,Jh),e(le,Pn),e(Pn,Xh),e(le,Zh),e(G,ep),e(G,P),e(P,tp),e(P,Nn),e(Nn,op),e(P,np),e(P,zs),e(zs,rp),e(P,sp),e(P,$s),e($s,ap),e(P,dp),e(P,Es),e(Es,ip),e(P,cp),e(P,Gs),e(Gs,lp),e(P,hp),e(P,Cn),e(Cn,pp),e(P,up),e(P,Ms),e(Ms,mp),e(P,gp),e(P,Sn),e(Sn,_p),e(P,fp),e(P,Dn),e(Dn,vp),e(P,bp),e(P,As),e(As,Tp),e(P,wp),e(G,kp),e(G,xo),e(xo,qp),e(xo,Ln),e(Ln,yp),e(xo,xp),e(G,Rp),e(G,Ro),e(Ro,Fp),e(Ro,Fo),e(Fo,zp),e(Ro,$p),e(G,Ep),e(G,oe),T(zo,oe,null),e(oe,Gp),e(oe,He),e(He,Mp),e(He,In),e(In,Ap),e(He,jp),e(He,js),e(js,Pp),e(He,Np),e(oe,Cp),T(qt,oe,null),e(oe,Sp),e(oe,Ps),e(Ps,Dp),e(oe,Lp),T($o,oe,null),e(G,Ip),e(G,yt),T(Eo,yt,null),e(yt,Op),e(yt,Ns),e(Ns,Wp),_(d,dd,m),_(d,Be,m),e(Be,xt),e(xt,Cs),T(Go,Cs,null),e(Be,Hp),e(Be,Ss),e(Ss,Bp),_(d,id,m),_(d,N,m),T(Mo,N,null),e(N,Qp),e(N,Qe),e(Qe,Vp),e(Qe,On),e(On,Up),e(Qe,Kp),e(Qe,Ds),e(Ds,Yp),e(Qe,Jp),e(N,Xp),T(Rt,N,null),e(N,Zp),e(N,Ls),e(Ls,eu),e(N,tu),e(N,he),e(he,ou),e(he,Is),e(Is,nu),e(he,ru),e(he,Wn),e(Wn,su),e(he,au),e(he,Os),e(Os,du),e(he,iu),e(he,Hn),e(Hn,cu),e(he,lu),e(N,hu),e(N,L),e(L,pu),e(L,Bn),e(Bn,uu),e(L,mu),e(L,Ws),e(Ws,gu),e(L,_u),e(L,Hs),e(Hs,fu),e(L,vu),e(L,Bs),e(Bs,bu),e(L,Tu),e(L,Qs),e(Qs,wu),e(L,ku),e(L,Qn),e(Qn,qu),e(L,yu),e(L,Vs),e(Vs,xu),e(L,Ru),e(L,Vn),e(Vn,Fu),e(L,zu),e(L,Us),e(Us,$u),e(L,Eu),e(N,Gu),e(N,Ao),e(Ao,Mu),e(Ao,Un),e(Un,Au),e(Ao,ju),e(N,Pu),e(N,jo),e(jo,Nu),e(jo,Po),e(Po,Cu),e(jo,Su),e(N,Du),e(N,Ks),e(Ks,Lu),e(N,Iu),e(N,ne),T(No,ne,null),e(ne,Ou),e(ne,Ve),e(Ve,Wu),e(Ve,Kn),e(Kn,Hu),e(Ve,Bu),e(Ve,Ys),e(Ys,Qu),e(Ve,Vu),e(ne,Uu),T(Ft,ne,null),e(ne,Ku),e(ne,Js),e(Js,Yu),e(ne,Ju),T(Co,ne,null),_(d,cd,m),_(d,Ue,m),e(Ue,zt),e(zt,Xs),T(So,Xs,null),e(Ue,Xu),e(Ue,Zs),e(Zs,Zu),_(d,ld,m),_(d,F,m),T(Do,F,null),e(F,em),e(F,Ke),e(Ke,tm),e(Ke,Yn),e(Yn,om),e(Ke,nm),e(Ke,ea),e(ea,rm),e(Ke,sm),e(F,am),T($t,F,null),e(F,dm),e(F,ta),e(ta,im),e(F,cm),e(F,oa),e(oa,lm),e(F,hm),e(F,pe),e(pe,pm),e(pe,na),e(na,um),e(pe,mm),e(pe,Jn),e(Jn,gm),e(pe,_m),e(pe,ra),e(ra,fm),e(pe,vm),e(pe,Xn),e(Xn,bm),e(pe,Tm),e(F,wm),e(F,I),e(I,km),e(I,Zn),e(Zn,qm),e(I,ym),e(I,sa),e(sa,xm),e(I,Rm),e(I,aa),e(aa,Fm),e(I,zm),e(I,da),e(da,$m),e(I,Em),e(I,ia),e(ia,Gm),e(I,Mm),e(I,er),e(er,Am),e(I,jm),e(I,ca),e(ca,Pm),e(I,Nm),e(I,tr),e(tr,Cm),e(I,Sm),e(I,la),e(la,Dm),e(I,Lm),e(F,Im),e(F,Lo),e(Lo,Om),e(Lo,or),e(or,Wm),e(Lo,Hm),e(F,Bm),e(F,Io),e(Io,Qm),e(Io,Oo),e(Oo,Vm),e(Io,Um),e(F,Km),e(F,ha),e(ha,Ym),e(F,Jm),e(F,re),T(Wo,re,null),e(re,Xm),e(re,Ye),e(Ye,Zm),e(Ye,nr),e(nr,eg),e(Ye,tg),e(Ye,pa),e(pa,og),e(Ye,ng),e(re,rg),T(Et,re,null),e(re,sg),e(re,ua),e(ua,ag),e(re,dg),T(Ho,re,null),e(F,ig),e(F,Gt),T(Bo,Gt,null),e(Gt,cg),e(Gt,Qo),e(Qo,lg),e(Qo,rr),e(rr,hg),e(Qo,pg),_(d,hd,m),_(d,Je,m),e(Je,Mt),e(Mt,ma),T(Vo,ma,null),e(Je,ug),e(Je,ga),e(ga,mg),_(d,pd,m),_(d,z,m),T(Uo,z,null),e(z,gg),e(z,Xe),e(Xe,_g),e(Xe,sr),e(sr,fg),e(Xe,vg),e(Xe,_a),e(_a,bg),e(Xe,Tg),e(z,wg),T(At,z,null),e(z,kg),e(z,fa),e(fa,qg),e(z,yg),e(z,va),e(va,xg),e(z,Rg),e(z,ue),e(ue,Fg),e(ue,ba),e(ba,zg),e(ue,$g),e(ue,ar),e(ar,Eg),e(ue,Gg),e(ue,Ta),e(Ta,Mg),e(ue,Ag),e(ue,dr),e(dr,jg),e(ue,Pg),e(z,Ng),e(z,O),e(O,Cg),e(O,ir),e(ir,Sg),e(O,Dg),e(O,wa),e(wa,Lg),e(O,Ig),e(O,ka),e(ka,Og),e(O,Wg),e(O,qa),e(qa,Hg),e(O,Bg),e(O,ya),e(ya,Qg),e(O,Vg),e(O,cr),e(cr,Ug),e(O,Kg),e(O,xa),e(xa,Yg),e(O,Jg),e(O,lr),e(lr,Xg),e(O,Zg),e(O,Ra),e(Ra,e_),e(O,t_),e(z,o_),e(z,Ko),e(Ko,n_),e(Ko,hr),e(hr,r_),e(Ko,s_),e(z,a_),e(z,Yo),e(Yo,d_),e(Yo,Jo),e(Jo,i_),e(Yo,c_),e(z,l_),e(z,Fa),e(Fa,h_),e(z,p_),e(z,se),T(Xo,se,null),e(se,u_),e(se,Ze),e(Ze,m_),e(Ze,pr),e(pr,g_),e(Ze,__),e(Ze,za),e(za,f_),e(Ze,v_),e(se,b_),T(jt,se,null),e(se,T_),e(se,$a),e($a,w_),e(se,k_),T(Zo,se,null),e(z,q_),e(z,Pt),T(en,Pt,null),e(Pt,y_),e(Pt,Ea),e(Ea,x_),ud=!0},p(d,[m]){const tn={};m&2&&(tn.$$scope={dirty:m,ctx:d}),gt.$set(tn);const Ga={};m&2&&(Ga.$$scope={dirty:m,ctx:d}),_t.$set(Ga);const Ma={};m&2&&(Ma.$$scope={dirty:m,ctx:d}),vt.$set(Ma);const Aa={};m&2&&(Aa.$$scope={dirty:m,ctx:d}),bt.$set(Aa);const on={};m&2&&(on.$$scope={dirty:m,ctx:d}),kt.$set(on);const ja={};m&2&&(ja.$$scope={dirty:m,ctx:d}),qt.$set(ja);const Pa={};m&2&&(Pa.$$scope={dirty:m,ctx:d}),Rt.$set(Pa);const Na={};m&2&&(Na.$$scope={dirty:m,ctx:d}),Ft.$set(Na);const Ca={};m&2&&(Ca.$$scope={dirty:m,ctx:d}),$t.$set(Ca);const nn={};m&2&&(nn.$$scope={dirty:m,ctx:d}),Et.$set(nn);const Sa={};m&2&&(Sa.$$scope={dirty:m,ctx:d}),At.$set(Sa);const Da={};m&2&&(Da.$$scope={dirty:m,ctx:d}),jt.$set(Da)},i(d){ud||(w(g.$$.fragment,d),w(Ct.$$.fragment,d),w(Lt.$$.fragment,d),w(It.$$.fragment,d),w(Ot.$$.fragment,d),w(Ht.$$.fragment,d),w(Qt.$$.fragment,d),w(Ut.$$.fragment,d),w(Kt.$$.fragment,d),w(Yt.$$.fragment,d),w(Xt.$$.fragment,d),w(Zt.$$.fragment,d),w(eo.$$.fragment,d),w(to.$$.fragment,d),w(oo.$$.fragment,d),w(no.$$.fragment,d),w(ro.$$.fragment,d),w(ao.$$.fragment,d),w(io.$$.fragment,d),w(gt.$$.fragment,d),w(po.$$.fragment,d),w(_t.$$.fragment,d),w(uo.$$.fragment,d),w(mo.$$.fragment,d),w(go.$$.fragment,d),w(vt.$$.fragment,d),w(bo.$$.fragment,d),w(bt.$$.fragment,d),w(To.$$.fragment,d),w(wo.$$.fragment,d),w(qo.$$.fragment,d),w(yo.$$.fragment,d),w(kt.$$.fragment,d),w(zo.$$.fragment,d),w(qt.$$.fragment,d),w($o.$$.fragment,d),w(Eo.$$.fragment,d),w(Go.$$.fragment,d),w(Mo.$$.fragment,d),w(Rt.$$.fragment,d),w(No.$$.fragment,d),w(Ft.$$.fragment,d),w(Co.$$.fragment,d),w(So.$$.fragment,d),w(Do.$$.fragment,d),w($t.$$.fragment,d),w(Wo.$$.fragment,d),w(Et.$$.fragment,d),w(Ho.$$.fragment,d),w(Bo.$$.fragment,d),w(Vo.$$.fragment,d),w(Uo.$$.fragment,d),w(At.$$.fragment,d),w(Xo.$$.fragment,d),w(jt.$$.fragment,d),w(Zo.$$.fragment,d),w(en.$$.fragment,d),ud=!0)},o(d){k(g.$$.fragment,d),k(Ct.$$.fragment,d),k(Lt.$$.fragment,d),k(It.$$.fragment,d),k(Ot.$$.fragment,d),k(Ht.$$.fragment,d),k(Qt.$$.fragment,d),k(Ut.$$.fragment,d),k(Kt.$$.fragment,d),k(Yt.$$.fragment,d),k(Xt.$$.fragment,d),k(Zt.$$.fragment,d),k(eo.$$.fragment,d),k(to.$$.fragment,d),k(oo.$$.fragment,d),k(no.$$.fragment,d),k(ro.$$.fragment,d),k(ao.$$.fragment,d),k(io.$$.fragment,d),k(gt.$$.fragment,d),k(po.$$.fragment,d),k(_t.$$.fragment,d),k(uo.$$.fragment,d),k(mo.$$.fragment,d),k(go.$$.fragment,d),k(vt.$$.fragment,d),k(bo.$$.fragment,d),k(bt.$$.fragment,d),k(To.$$.fragment,d),k(wo.$$.fragment,d),k(qo.$$.fragment,d),k(yo.$$.fragment,d),k(kt.$$.fragment,d),k(zo.$$.fragment,d),k(qt.$$.fragment,d),k($o.$$.fragment,d),k(Eo.$$.fragment,d),k(Go.$$.fragment,d),k(Mo.$$.fragment,d),k(Rt.$$.fragment,d),k(No.$$.fragment,d),k(Ft.$$.fragment,d),k(Co.$$.fragment,d),k(So.$$.fragment,d),k(Do.$$.fragment,d),k($t.$$.fragment,d),k(Wo.$$.fragment,d),k(Et.$$.fragment,d),k(Ho.$$.fragment,d),k(Bo.$$.fragment,d),k(Vo.$$.fragment,d),k(Uo.$$.fragment,d),k(At.$$.fragment,d),k(Xo.$$.fragment,d),k(jt.$$.fragment,d),k(Zo.$$.fragment,d),k(en.$$.fragment,d),ud=!1},d(d){n(h),d&&n(y),d&&n(u),q(g),d&&n(La),d&&n($e),q(Ct),d&&n(Ia),d&&n(sn),d&&n(Oa),d&&n(ot),d&&n(Wa),d&&n(an),d&&n(Ha),d&&n(dn),d&&n(Ba),d&&n(nt),d&&n(Qa),d&&n(Ee),q(Lt),d&&n(Va),d&&n(de),q(It),q(Ot),q(Ht),d&&n(Ua),d&&n(Ge),q(Qt),d&&n(Ka),d&&n(Vt),q(Ut),d&&n(Ya),d&&n(Me),q(Kt),d&&n(Ja),d&&n(Ae),q(Yt),d&&n(Xa),d&&n(Jt),q(Xt),d&&n(Za),d&&n(je),q(Zt),d&&n(ed),d&&n(X),q(eo),q(to),q(oo),q(no),q(ro),d&&n(td),d&&n(Ne),q(ao),d&&n(od),d&&n(D),q(io),q(gt),q(po),q(_t),q(uo),d&&n(nd),d&&n(De),q(mo),d&&n(rd),d&&n(E),q(go),q(vt),q(bo),q(bt),q(To),q(wo),d&&n(sd),d&&n(Oe),q(qo),d&&n(ad),d&&n(G),q(yo),q(kt),q(zo),q(qt),q($o),q(Eo),d&&n(dd),d&&n(Be),q(Go),d&&n(id),d&&n(N),q(Mo),q(Rt),q(No),q(Ft),q(Co),d&&n(cd),d&&n(Ue),q(So),d&&n(ld),d&&n(F),q(Do),q($t),q(Wo),q(Et),q(Ho),q(Bo),d&&n(hd),d&&n(Je),q(Vo),d&&n(pd),d&&n(z),q(Uo),q(At),q(Xo),q(jt),q(Zo),q(en)}}}const fT={local:"rag",sections:[{local:"overview",title:"Overview"},{local:"transformers.RagConfig",title:"RagConfig"},{local:"transformers.RagTokenizer",title:"RagTokenizer"},{local:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput",title:"Rag specific outputs"},{local:"transformers.RagRetriever",title:"RagRetriever"},{local:"transformers.RagModel",title:"RagModel"},{local:"transformers.RagSequenceForGeneration",title:"RagSequenceForGeneration"},{local:"transformers.RagTokenForGeneration",title:"RagTokenForGeneration"},{local:"transformers.TFRagModel",title:"TFRagModel"},{local:"transformers.TFRagSequenceForGeneration",title:"TFRagSequenceForGeneration"},{local:"transformers.TFRagTokenForGeneration",title:"TFRagTokenForGeneration"}],title:"RAG"};function vT($,h,y){let{fw:u}=h;return $.$$set=f=>{"fw"in f&&y(0,u=f.fw)},[u]}class xT extends eT{constructor(h){super();tT(this,h,vT,_T,oT,{fw:0})}}export{xT as default,fT as metadata};
9,948
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/reformer.mdx-94fe346c.js
import{S as Ou,i as Iu,s as Wu,e as o,k as m,w as u,t,T as x,L as Qu,c as r,d as a,m as p,a as i,x as f,h as n,U as T,b as l,J as s,g as h,y as _,q as v,o as y,B as b}from"../../chunks/vendor-b1433968.js";import{T as Jn}from"../../chunks/Tip-c3840994.js";import{D as Y}from"../../chunks/Docstring-ff504c58.js";import{C as ye}from"../../chunks/CodeBlock-a320dbd7.js";import{I as V}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Uu(G){let d,M,g,z,R;return{c(){d=o("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),z=t("Module"),R=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){d=r(w,"P",{});var k=i(d);M=n(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(k,"CODE",{});var L=i(g);z=n(L,"Module"),L.forEach(a),R=n(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(a)},m(w,k){h(w,d,k),s(d,M),s(d,g),s(g,z),s(d,R)},d(w){w&&a(d)}}}function Ku(G){let d,M,g,z,R;return{c(){d=o("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),z=t("Module"),R=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){d=r(w,"P",{});var k=i(d);M=n(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(k,"CODE",{});var L=i(g);z=n(L,"Module"),L.forEach(a),R=n(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(a)},m(w,k){h(w,d,k),s(d,M),s(d,g),s(g,z),s(d,R)},d(w){w&&a(d)}}}function Bu(G){let d,M,g,z,R;return{c(){d=o("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),z=t("Module"),R=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){d=r(w,"P",{});var k=i(d);M=n(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(k,"CODE",{});var L=i(g);z=n(L,"Module"),L.forEach(a),R=n(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(a)},m(w,k){h(w,d,k),s(d,M),s(d,g),s(g,z),s(d,R)},d(w){w&&a(d)}}}function Xu(G){let d,M,g,z,R;return{c(){d=o("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),z=t("Module"),R=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){d=r(w,"P",{});var k=i(d);M=n(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(k,"CODE",{});var L=i(g);z=n(L,"Module"),L.forEach(a),R=n(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(a)},m(w,k){h(w,d,k),s(d,M),s(d,g),s(g,z),s(d,R)},d(w){w&&a(d)}}}function Vu(G){let d,M,g,z,R;return{c(){d=o("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),z=t("Module"),R=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){d=r(w,"P",{});var k=i(d);M=n(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(k,"CODE",{});var L=i(g);z=n(L,"Module"),L.forEach(a),R=n(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(a)},m(w,k){h(w,d,k),s(d,M),s(d,g),s(g,z),s(d,R)},d(w){w&&a(d)}}}function Gu(G){let d,M,g,z,R,w,k,L,xi,Yn,ws,Tt,Ti,Mi,be,Ri,$i,Zn,zs,Bs,Mt,ke,Ei,Rt,qi,so,Xs,Li,we,Fi,Pi,eo,Qa,Ci,ao,Ua,$t,Ai,to,is,Si,ze,ji,Ni,xe,Di,Hi,no,Te,Et,Oi,Ii,oo,Ka,ls,Wi,qt,Qi,Ui,Lt,Ki,Bi,Me,Xi,ro,xs,Vs,Ft,Re,Vi,Pt,Gi,io,$,Ji,$e,Yi,Zi,lo,hu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>d</mi></mrow><annotation encoding="application/x-tex">d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span>',mo,Ct,sl,el,po,du='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>i</mi><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msub><mi>n</mi><mi>s</mi></msub></mrow><annotation encoding="application/x-tex">i, \\ldots, n_s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.854em;vertical-align:-0.1944em;"></span><span class="mord mathnormal">i</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',co,ho,gu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>n</mi><mi>s</mi></msub></mrow><annotation encoding="application/x-tex">n_s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',go,At,al,tl,uo,uu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>n</mi><mi>s</mi></msub><mo>=</mo><msup><mn>2</mn><mn>19</mn></msup><mo>\u2248</mo><mn>0.5</mn><mi>M</mi></mrow><annotation encoding="application/x-tex">n_s = 2^{19} \\approx 0.5M</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.8141em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">19</span></span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2248</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6833em;"></span><span class="mord">0.5</span><span class="mord mathnormal" style="margin-right:0.10903em;">M</span></span></span></span>',fo,St,nl,ol,_o,fu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>d</mi><mo>=</mo><msup><mn>2</mn><mn>10</mn></msup><mo>\u2248</mo><mn>1000</mn></mrow><annotation encoding="application/x-tex">d = 2^{10} \\approx 1000</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.8141em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">10</span></span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2248</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6444em;"></span><span class="mord">1000</span></span></span></span>',vo,yo,_u='<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><msub><mi>X</mi><mrow><mi>i</mi><mo separator="true">,</mo><mi>j</mi></mrow></msub><mo separator="true">,</mo><mtext>\xA0with\xA0</mtext><mi>i</mi><mo>\u2208</mo><mrow><mo fence="true">[</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><mi>d</mi><mo fence="true">]</mo></mrow><mtext>\xA0and\xA0</mtext><mi>j</mi><mo>\u2208</mo><mrow><mo fence="true">[</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msub><mi>n</mi><mi>s</mi></msub><mo fence="true">]</mo></mrow></mrow><annotation encoding="application/x-tex">X_{i,j}, \\text{ with } i \\in \\left[1,\\ldots, d\\right] \\text{ and } j \\in \\left[1,\\ldots, n_s\\right]</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.9805em;vertical-align:-0.2861em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2861em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord text"><span class="mord">\xA0with\xA0</span></span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">[</span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord mathnormal">d</span><span class="mclose delimcenter" style="top:0em;">]</span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord text"><span class="mord">\xA0and\xA0</span></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">[</span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;">]</span></span></span></span></span></span>',bo,Ts,rl,ko,vu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>X</mi><mrow><mi>i</mi><mo separator="true">,</mo><mi>j</mi></mrow></msub></mrow><annotation encoding="application/x-tex">X_{i,j}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.9694em;vertical-align:-0.2861em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2861em;"><span></span></span></span></span></span></span></span></span></span>',wo,zo,yu='<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><msubsup><mi>X</mi><mrow><mi>i</mi><mo separator="true">,</mo><mi>j</mi></mrow><mn>1</mn></msubsup><mo separator="true">,</mo><mtext>\xA0with\xA0</mtext><mi>i</mi><mo>\u2208</mo><mrow><mo fence="true">[</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msup><mi>d</mi><mn>1</mn></msup><mo fence="true">]</mo></mrow><mtext>\xA0and\xA0</mtext><mi>j</mi><mo>\u2208</mo><mrow><mo fence="true">[</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msubsup><mi>n</mi><mi>s</mi><mn>1</mn></msubsup><mo fence="true">]</mo></mrow></mrow><annotation encoding="application/x-tex">X^{1}_{i,j}, \\text{ with } i \\in \\left[1,\\ldots, d^1\\right] \\text{ and } j \\in \\left[1,\\ldots, n_s^1\\right]</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.2472em;vertical-align:-0.3831em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-2.453em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.3831em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord text"><span class="mord">\xA0with\xA0</span></span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.2141em;vertical-align:-0.35em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size1">[</span></span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;"><span class="delimsizing size1">]</span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord text"><span class="mord">\xA0and\xA0</span></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.2141em;vertical-align:-0.35em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size1">[</span></span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;"><span class="delimsizing size1">]</span></span></span></span></span></span></span>',xo,Ee,il,To,bu='<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><msubsup><mi>X</mi><mrow><mi>i</mi><mo separator="true">,</mo><mi>j</mi></mrow><mn>2</mn></msubsup><mo separator="true">,</mo><mtext>\xA0with\xA0</mtext><mi>i</mi><mo>\u2208</mo><mrow><mo fence="true">[</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msup><mi>d</mi><mn>2</mn></msup><mo fence="true">]</mo></mrow><mtext>\xA0and\xA0</mtext><mi>j</mi><mo>\u2208</mo><mrow><mo fence="true">[</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msubsup><mi>n</mi><mi>s</mi><mn>2</mn></msubsup><mo fence="true">]</mo></mrow></mrow><annotation encoding="application/x-tex">X^{2}_{i,j}, \\text{ with } i \\in \\left[1,\\ldots, d^2\\right] \\text{ and } j \\in \\left[1,\\ldots, n_s^2\\right]</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.2472em;vertical-align:-0.3831em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-2.453em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.3831em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord text"><span class="mord">\xA0with\xA0</span></span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.2141em;vertical-align:-0.35em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size1">[</span></span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;"><span class="delimsizing size1">]</span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord text"><span class="mord">\xA0and\xA0</span></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.2141em;vertical-align:-0.35em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size1">[</span></span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;"><span class="delimsizing size1">]</span></span></span></span></span></span></span>',Mo,qe,ll,Ro,ku='<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi>d</mi><mo>=</mo><msup><mi>d</mi><mn>1</mn></msup><mo>+</mo><msup><mi>d</mi><mn>2</mn></msup><mtext>\xA0and\xA0</mtext><msub><mi>n</mi><mi>s</mi></msub><mo>=</mo><msubsup><mi>n</mi><mi>s</mi><mn>1</mn></msubsup><mo>\xD7</mo><msubsup><mi>n</mi><mi>s</mi><mn>2</mn></msubsup><mi mathvariant="normal">.</mi></mrow><annotation encoding="application/x-tex">d = d^1 + d^2 \\text{ and } n_s = n_s^1 \\times n_s^2 .</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.9474em;vertical-align:-0.0833em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1.0141em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span></span></span></span></span><span class="mord text"><span class="mord">\xA0and\xA0</span></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.1111em;vertical-align:-0.247em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1.1111em;vertical-align:-0.247em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8641em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.113em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mord">.</span></span></span></span></span>',$o,Le,ml,Eo,wu=`<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><msub><mi>X</mi><mrow><mi>i</mi><mo separator="true">,</mo><mi>j</mi></mrow></msub><mo>=</mo><mrow><mo fence="true">{</mo><mtable rowspacing="0.36em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><msubsup><mi>X</mi><mrow><mi>i</mi><mo separator="true">,</mo><mi>k</mi></mrow><mn>1</mn></msubsup><mo separator="true">,</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mtext>if\xA0\xA0</mtext><mi>i</mi><mo>&lt;</mo><msup><mi>d</mi><mn>1</mn></msup><mtext>\xA0with\xA0</mtext><mi>k</mi><mo>=</mo><mi>j</mi><mspace></mspace><mspace width="0.6667em"/><mrow><mi mathvariant="normal">m</mi><mi mathvariant="normal">o</mi><mi mathvariant="normal">d</mi></mrow><mtext>\u2009</mtext><mtext>\u2009</mtext><msubsup><mi>n</mi><mi>s</mi><mn>1</mn></msubsup></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><msubsup><mi>X</mi><mrow><mi>i</mi><mo>\u2212</mo><msup><mi>d</mi><mn>1</mn></msup><mo separator="true">,</mo><mi>l</mi></mrow><mn>2</mn></msubsup><mo separator="true">,</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mtext>if\xA0</mtext><mi>i</mi><mo>\u2265</mo><msup><mi>d</mi><mn>1</mn></msup><mtext>\xA0with\xA0</mtext><mi>l</mi><mo>=</mo><mo stretchy="false">\u230A</mo><mfrac><mi>j</mi><msubsup><mi>n</mi><mi>s</mi><mn>1</mn></msubsup></mfrac><mo stretchy="false">\u230B</mo></mrow></mstyle></mtd></mtr></mtable></mrow></mrow><annotation encoding="application/x-tex">X_{i,j} = \\begin{cases} X^{1}_{i, k}, &amp; \\text{if }\\ i &lt; d^1 \\text{ with } k = j \\mod n_s^1 \\\\ X^{2}_{i - d^1, l}, &amp; \\text{if } i \\ge d^1 \\text{ with } l = \\lfloor\\frac{j}{n_s^1}\\rfloor \\end{cases}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.9694em;vertical-align:-0.2861em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2861em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:3em;vertical-align:-1.25em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mtable"><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.7466em;"><span style="top:-3.7466em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.03148em;">k</span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.4192em;"><span></span></span></span></span></span></span><span class="mpunct">,</span></span></span><span style="top:-2.3066em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord"><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.3806em;margin-left:-0.0785em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mbin mtight">\u2212</span><span class="mord mtight"><span class="mord mathnormal mtight">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.7463em;"><span style="top:-2.786em;margin-right:0.0714em;"><span class="pstrut" style="height:2.5em;"></span><span class="sizing reset-size3 size1 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.01968em;">l</span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.4555em;"><span></span></span></span></span></span></span><span class="mpunct">,</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.2466em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:1em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.7466em;"><span style="top:-3.7466em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if\xA0</span></span><span class="mspace">\xA0</span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">&lt;</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mord text"><span class="mord">\xA0with\xA0</span></span><span class="mord mathnormal" style="margin-right:0.03148em;">k</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mspace allowbreak"></span><span class="mspace" style="margin-right:0.6667em;"></span><span class="mord"><span class="mord"><span class="mord mathrm">mod</span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span></span></span><span style="top:-2.3066em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if\xA0</span></span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2265</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mord text"><span class="mord">\xA0with\xA0</span></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mopen">\u230A</span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.9078em;"><span style="top:-2.655em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight"><span class="mord mathnormal mtight">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.7463em;"><span style="top:-2.214em;margin-left:0em;margin-right:0.0714em;"><span class="pstrut" style="height:2.5em;"></span><span class="sizing reset-size3 size1 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-2.786em;margin-right:0.0714em;"><span class="pstrut" style="height:2.5em;"></span><span class="sizing reset-size3 size1 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.286em;"><span></span></span></span></span></span></span></span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.4461em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.5452em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mclose">\u230B</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.2466em;"><span></span></span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span></span></span>`,qo,F,pl,Lo,zu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mi>j</mi></msub><mo>\u2208</mo><msup><mi mathvariant="double-struck">R</mi><mi>d</mi></msup></mrow><annotation encoding="application/x-tex">x_j \\in \\mathbb{R}^{d}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8252em;vertical-align:-0.2861em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2861em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2208</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.8491em;"></span><span class="mord"><span class="mord mathbb">R</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8491em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">d</span></span></span></span></span></span></span></span></span></span></span></span>',Fo,Po,xu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msubsup><mi>x</mi><mrow><mi>k</mi><mo separator="true">,</mo><mi>l</mi></mrow><mn>1</mn></msubsup><mo>+</mo><msubsup><mi>x</mi><mrow><mi>l</mi><mo separator="true">,</mo><mi>k</mi></mrow><mn>2</mn></msubsup></mrow><annotation encoding="application/x-tex">x^1_{k, l} + x^2_{l, k}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.2333em;vertical-align:-0.4192em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight" style="margin-right:0.03148em;">k</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.01968em;">l</span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.4192em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1.2333em;vertical-align:-0.4192em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight" style="margin-right:0.01968em;">l</span><span class="mpunct mtight">,</span><span class="mord mathnormal mtight" style="margin-right:0.03148em;">k</span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.4192em;"><span></span></span></span></span></span></span></span></span></span>',Co,jt,cl,hl,Ao,Tu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi></mrow><annotation encoding="application/x-tex">j</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.854em;vertical-align:-0.1944em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span></span></span></span>',So,jo,Mu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>k</mi><mtext>\xA0and\xA0</mtext><mi>l</mi></mrow><annotation encoding="application/x-tex">k \\text{ and } l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.03148em;">k</span><span class="mord text"><span class="mord">\xA0and\xA0</span></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span>',No,Do,Ru='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mi>j</mi></msub></mrow><annotation encoding="application/x-tex">x_j</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.7167em;vertical-align:-0.2861em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.05724em;">j</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2861em;"><span></span></span></span></span></span></span></span></span></span>',Ho,Oo,ms,dl,Io,$u='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msup><mi>d</mi><mn>1</mn></msup><mo>=</mo><msup><mn>2</mn><mn>5</mn></msup><mo separator="true">,</mo><msup><mi>d</mi><mn>2</mn></msup><mo>=</mo><msup><mn>2</mn><mn>5</mn></msup><mo separator="true">,</mo><msubsup><mi>n</mi><mi>s</mi><mn>1</mn></msubsup><mo>=</mo><msup><mn>2</mn><mn>9</mn></msup><mo separator="true">,</mo><msubsup><mi>n</mi><mi>s</mi><mn>2</mn></msubsup><mo>=</mo><msup><mn>2</mn><mn>10</mn></msup></mrow><annotation encoding="application/x-tex">d^1 = 2^5, d^2 = 2^5, n_s^1 = 2^9, n_s^2 = 2^{10}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8141em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.0085em;vertical-align:-0.1944em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">5</span></span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.0611em;vertical-align:-0.247em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">5</span></span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1.0611em;vertical-align:-0.247em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">9</span></span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.8141em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">10</span></span></span></span></span></span></span></span></span></span></span></span>',Wo,Qo,Eu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msup><mn>2</mn><mn>14</mn></msup><mo>+</mo><msup><mn>2</mn><mn>15</mn></msup><mo>\u2248</mo><mn>49000</mn></mrow><annotation encoding="application/x-tex">2^{14} + 2^{15} \\approx 49000</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8974em;vertical-align:-0.0833em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">14</span></span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8141em;"></span><span class="mord"><span class="mord">2</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">15</span></span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">\u2248</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6444em;"></span><span class="mord">49000</span></span></span></span>',Uo,Ko,E,gl,Nt,ul,fl,Bo,qu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><msup><mi>d</mi><mn>1</mn></msup><mo separator="true">,</mo><msup><mi>d</mi><mn>2</mn></msup><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(d^1, d^2)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.0641em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',Xo,Dt,_l,vl,Ht,yl,bl,Vo,Lu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><msubsup><mi>n</mi><mi>s</mi><mn>1</mn></msubsup><mo separator="true">,</mo><msubsup><mi>n</mi><mi>s</mi><mn>2</mn></msubsup><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(n_s^1, n_s^2)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.0641em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.453em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.247em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',Go,Ot,kl,wl,It,zl,xl,Wt,Tl,Ml,Jo,Ms,Gs,Qt,Fe,Rl,Ut,$l,Yo,Z,El,Pe,ql,Ll,Kt,Fl,Pl,Bt,Cl,Al,Zo,j,Sl,Xt,jl,Nl,Vt,Dl,Hl,Gt,Ol,Il,Jt,Wl,Ql,Yt,Ul,Kl,sr,ps,Bl,Ce,Xl,Vl,Ae,Gl,Jl,er,I,Yl,Zt,Zl,sm,ar,Fu=`<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><msubsup><mi>n</mi><mtext>buckets</mtext><mn>1</mn></msubsup><mo separator="true">,</mo><msubsup><mi>n</mi><mtext>buckets</mtext><mn>2</mn></msubsup><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(n_{\\text{buckets}}^1, n_{\\text{buckets}}^2)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.0972em;vertical-align:-0.2831em;"></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2831em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2831em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>`,tr,nr,Pu=`<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msub><mi>n</mi><mtext>buckets</mtext></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(1,\\ldots, n_{\\text{buckets}})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>`,or,rr,Cu=`<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mn>1</mn><mo>\u2212</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msubsup><mi>n</mi><mtext>buckets</mtext><mn>1</mn></msubsup><mo>\u2212</mo><mn>1</mn><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><mn>1</mn><mo>\u2212</mo><msubsup><mi>n</mi><mtext>buckets</mtext><mn>2</mn></msubsup><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msubsup><mi>n</mi><mtext>buckets</mtext><mn>1</mn></msubsup><mo>\u2212</mo><msubsup><mi>n</mi><mtext>buckets</mtext><mn>2</mn></msubsup><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(1-1,\\ldots, n_{\\text{buckets}}^1-1, \\ldots, 1-n_{\\text{buckets}}^2, \\ldots, n_{\\text{buckets}}^1-n_{\\text{buckets}}^2)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord">1</span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\u2212</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1.0972em;vertical-align:-0.2831em;"></span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2831em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\u2212</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8389em;vertical-align:-0.1944em;"></span><span class="mord">1</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord">1</span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\u2212</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1.0972em;vertical-align:-0.2831em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2831em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2831em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\u2212</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1.0972em;vertical-align:-0.2831em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8141em;"><span style="top:-2.4169em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">buckets</span></span></span></span></span><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">2</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.2831em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>`,ir,lr,cs,em,sn,am,tm,en,nm,om,mr,ss,rm,pr,Au='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi mathvariant="script">O</mi><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo>\xD7</mo><msub><mi>n</mi><mi>s</mi></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\mathcal{O}(n_s \\times n_s)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathcal" style="margin-right:0.02778em;">O</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',cr,hr,Su='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi mathvariant="script">O</mi><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo>\xD7</mo><mi>log</mi><mo>\u2061</mo><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo stretchy="false">)</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\mathcal{O}(n_s \\times \\log(n_s))</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathcal" style="margin-right:0.02778em;">O</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">))</span></span></span></span>',dr,gr,ju='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>n</mi><mi>s</mi></msub></mrow><annotation encoding="application/x-tex">n_s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',ur,fr,Rs,Js,an,Se,im,tn,lm,_r,es,mm,nn,pm,cm,on,hm,dm,rn,gm,um,vr,as,fm,yr,Nu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi mathvariant="script">O</mi><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo>\xD7</mo><msub><mi>n</mi><mi>s</mi></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\mathcal{O}(n_s \\times n_s)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathcal" style="margin-right:0.02778em;">O</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',br,kr,Du='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi mathvariant="script">O</mi><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo>\xD7</mo><mi>log</mi><mo>\u2061</mo><mo stretchy="false">(</mo><msub><mi>n</mi><mi>s</mi></msub><mo stretchy="false">)</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\mathcal{O}(n_s \\times \\log(n_s))</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathcal" style="margin-right:0.02778em;">O</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">))</span></span></span></span>',wr,zr,Hu='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>n</mi><mi>s</mi></msub></mrow><annotation encoding="application/x-tex">n_s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">n</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.1514em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">s</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',xr,Tr,$s,Ys,ln,je,_m,mn,vm,Mr,hs,ym,pn,bm,km,cn,wm,zm,Rr,Zs,xm,Ba,Tm,Mm,$r,Ne,Er,Es,se,hn,De,Rm,dn,$m,qr,N,He,Em,Oe,qm,Xa,Lm,Fm,Pm,qs,Cm,Va,Am,Sm,Ga,jm,Nm,Dm,gn,Hm,Om,Ie,Lr,Ls,ee,un,We,Im,fn,Wm,Fr,J,Qe,Qm,Ue,Um,Ke,Km,Bm,Xm,Be,Vm,Ja,Gm,Jm,Ym,_n,Pr,Fs,ae,vn,Xe,Zm,yn,sp,Cr,rs,Ve,ep,Ps,ap,bn,tp,np,Ge,op,rp,ip,Je,lp,Ya,mp,pp,Ar,Cs,te,kn,Ye,cp,wn,hp,Sr,D,Ze,dp,sa,gp,ea,up,fp,_p,aa,vp,Za,yp,bp,kp,ta,wp,na,zp,xp,Tp,W,oa,Mp,As,Rp,st,$p,Ep,zn,qp,Lp,Fp,ne,Pp,xn,Cp,Ap,ra,jr,Ss,oe,Tn,ia,Sp,Mn,jp,Nr,H,la,Np,js,Dp,Rn,Hp,Op,ma,Ip,Wp,Qp,pa,Up,et,Kp,Bp,Xp,ca,Vp,ha,Gp,Jp,Yp,Q,da,Zp,Ns,sc,at,ec,ac,$n,tc,nc,oc,re,rc,En,ic,lc,ga,Dr,Ds,ie,qn,ua,mc,Ln,pc,Hr,O,fa,cc,Hs,hc,Fn,dc,gc,_a,uc,fc,_c,va,vc,tt,yc,bc,kc,ya,wc,ba,zc,xc,Tc,U,ka,Mc,Os,Rc,nt,$c,Ec,Pn,qc,Lc,Fc,le,Pc,Cn,Cc,Ac,wa,Or,Is,me,An,za,Sc,Sn,jc,Ir,A,xa,Nc,jn,Dc,Hc,Ta,Oc,Ma,Ic,Wc,Qc,Ra,Uc,ot,Kc,Bc,Xc,$a,Vc,Ea,Gc,Jc,Yc,P,qa,Zc,Ws,sh,rt,eh,ah,Nn,th,nh,oh,pe,rh,Dn,ih,lh,La,mh,Hn,ph,ch,Fa,Wr,Qs,ce,On,Pa,hh,In,dh,Qr,S,Ca,gh,Us,uh,Wn,fh,_h,Qn,vh,yh,bh,Aa,kh,Sa,wh,zh,xh,ja,Th,it,Mh,Rh,$h,Na,Eh,Da,qh,Lh,Fh,K,Ha,Ph,Ks,Ch,lt,Ah,Sh,Un,jh,Nh,Dh,he,Hh,Kn,Oh,Ih,Oa,Ur;return w=new V({}),ke=new V({}),Re=new V({}),Fe=new V({}),Se=new V({}),je=new V({}),Ne=new ye({props:{code:`input_ids = tokenizer.encode('This is a sentence from the training data', return_tensors='pt') loss = model(input_ids, labels=input_ids)[0],`,highlighted:`<span class="hljs-attr">input_ids</span> = tokenizer.encode(<span class="hljs-string">&#x27;This is a sentence from the training data&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-attr">loss</span> = model(input_ids, labels=input_ids)[<span class="hljs-number">0</span>]`}}),De=new V({}),He=new Y({props:{name:"class transformers.ReformerConfig",anchor:"transformers.ReformerConfig",parameters:[{name:"attention_head_size",val:" = 64"},{name:"attn_layers",val:" = ['local', 'lsh', 'local', 'lsh', 'local', 'lsh']"},{name:"axial_norm_std",val:" = 1.0"},{name:"axial_pos_embds",val:" = True"},{name:"axial_pos_shape",val:" = [64, 64]"},{name:"axial_pos_embds_dim",val:" = [64, 192]"},{name:"chunk_size_lm_head",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"feed_forward_size",val:" = 512"},{name:"hash_seed",val:" = None"},{name:"hidden_act",val:" = 'relu'"},{name:"hidden_dropout_prob",val:" = 0.05"},{name:"hidden_size",val:" = 256"},{name:"initializer_range",val:" = 0.02"},{name:"is_decoder",val:" = False"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"local_num_chunks_before",val:" = 1"},{name:"local_num_chunks_after",val:" = 0"},{name:"local_attention_probs_dropout_prob",val:" = 0.05"},{name:"local_attn_chunk_length",val:" = 64"},{name:"lsh_attn_chunk_length",val:" = 64"},{name:"lsh_attention_probs_dropout_prob",val:" = 0.0"},{name:"lsh_num_chunks_before",val:" = 1"},{name:"lsh_num_chunks_after",val:" = 0"},{name:"max_position_embeddings",val:" = 4096"},{name:"num_attention_heads",val:" = 12"},{name:"num_buckets",val:" = None"},{name:"num_hashes",val:" = 1"},{name:"pad_token_id",val:" = 0"},{name:"vocab_size",val:" = 320"},{name:"tie_word_embeddings",val:" = False"},{name:"use_cache",val:" = True"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/configuration_reformer.py#L30",parametersDescription:[{anchor:"transformers.ReformerConfig.attention_head_size",description:`<strong>attention_head_size</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Dimensionality of the projected key, query and value vectors`,name:"attention_head_size"},{anchor:"transformers.ReformerConfig.attn_layers",description:`<strong>attn_layers</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;local&quot;, &quot;lsh&quot;, &quot;local&quot;, &quot;lsh&quot;, &quot;local&quot;, &quot;lsh&quot;]</code>) &#x2014; List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer (<code>&quot;lsh&quot;</code>) and a LocalSelfAttention layer (<code>&quot;local&quot;</code>).</p> <p>For more information on LSHSelfAttention layer, see <a href="reformer#lsh-self-attention">LSH Self Attention</a>. For more information on LocalSelfAttention layer, see <a href="reformer#local-self-attention">Local Self Attention</a>.`,name:"attn_layers"},{anchor:"transformers.ReformerConfig.axial_pos_embds",description:`<strong>axial_pos_embds</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use axial position embeddings. For more information on how axial position embeddings work, see <a href="reformer#axial-positional-encodings">Axial Position Encodings</a>.`,name:"axial_pos_embds"},{anchor:"transformers.ReformerConfig.axial_norm_std",description:`<strong>axial_norm_std</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The standard deviation of the normal_initializer for initializing the weight matrices of the axial positional encodings.`,name:"axial_norm_std"},{anchor:"transformers.ReformerConfig.axial_pos_shape",description:`<strong>axial_pos_shape</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[64, 64]</code>) &#x2014; The position dims of the axial position encodings. During training, the product of the position dims has to be equal to the sequence length.</p> <p>For more information on how axial position embeddings work, see <a href="reformer#axial-positional-encodings">Axial Position Encodings</a>.`,name:"axial_pos_shape"},{anchor:"transformers.ReformerConfig.axial_pos_embds_dim",description:`<strong>axial_pos_embds_dim</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[64, 192]</code>) &#x2014; The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the hidden size.</p> <p>For more information on how axial position embeddings work, see <a href="reformer#axial-positional-encodings">Axial Position Encodings</a>.`,name:"axial_pos_embds_dim"},{anchor:"transformers.ReformerConfig.chunk_size_lm_head",description:`<strong>chunk_size_lm_head</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes n &lt; sequence_length embeddings at a time.</p> <p>For more information on feed forward chunking, see <a href="../glossary#feed-forward-chunking">How does Feed Forward Chunking work?</a>.`,name:"chunk_size_lm_head"},{anchor:"transformers.ReformerConfig.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The token id for the end-of-sentence token.`,name:"eos_token_id"},{anchor:"transformers.ReformerConfig.feed_forward_size",description:`<strong>feed_forward_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the feed_forward layer in the residual attention block.`,name:"feed_forward_size"},{anchor:"transformers.ReformerConfig.hash_seed",description:`<strong>hash_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Seed that can be used to make local sensitive hashing in <code>LSHSelfAttention</code> deterministic. This should only be set for testing purposed. For evaluation and training purposes <code>hash_seed</code> should be left as <code>None</code> to ensure fully random rotations in local sensitive hashing scheme.`,name:"hash_seed"},{anchor:"transformers.ReformerConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the feed forward layer in the residual attention block. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.ReformerConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.ReformerConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the output hidden states of the residual attention blocks.`,name:"hidden_size"},{anchor:"transformers.ReformerConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.ReformerConfig.is_decoder",description:`<strong>is_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use a causal mask in addition to the <code>attention_mask</code> passed to <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModel">ReformerModel</a>. When using the Reformer for causal language modeling, this argument should be set to <code>True</code>.`,name:"is_decoder"},{anchor:"transformers.ReformerConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.ReformerConfig.local_chunk_length",description:`<strong>local_chunk_length</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Length of chunk which attends to itself in <code>LocalSelfAttention</code>. Chunking reduces memory complexity from sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk length (chunked self attention).`,name:"local_chunk_length"},{anchor:"transformers.ReformerConfig.local_num_chunks_before",description:`<strong>local_num_chunks_before</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of previous neighbouring chunks to attend to in <code>LocalSelfAttention</code> layer to itself.`,name:"local_num_chunks_before"},{anchor:"transformers.ReformerConfig.local_num_chunks_after",description:`<strong>local_num_chunks_after</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of following neighbouring chunks to attend to in <code>LocalSelfAttention</code> layer in addition to itself.`,name:"local_num_chunks_after"},{anchor:"transformers.ReformerConfig.local_attention_probs_dropout_prob",description:`<strong>local_attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities in <code>LocalSelfAttention</code>.`,name:"local_attention_probs_dropout_prob"},{anchor:"transformers.ReformerConfig.lsh_attn_chunk_length",description:`<strong>lsh_attn_chunk_length</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Length of chunk which attends to itself in <code>LSHSelfAttention</code>. Chunking reduces memory complexity from sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk length (chunked self attention).`,name:"lsh_attn_chunk_length"},{anchor:"transformers.ReformerConfig.lsh_num_chunks_before",description:`<strong>lsh_num_chunks_before</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of previous neighbouring chunks to attend to in <code>LSHSelfAttention</code> layer to itself.`,name:"lsh_num_chunks_before"},{anchor:"transformers.ReformerConfig.lsh_num_chunks_after",description:`<strong>lsh_num_chunks_after</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of following neighbouring chunks to attend to in <code>LSHSelfAttention</code> layer to itself.`,name:"lsh_num_chunks_after"},{anchor:"transformers.ReformerConfig.lsh_attention_probs_dropout_prob",description:`<strong>lsh_attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities in <code>LSHSelfAttention</code>.`,name:"lsh_attention_probs_dropout_prob"},{anchor:"transformers.ReformerConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.ReformerConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.ReformerConfig.num_buckets",description:`<strong>num_buckets</strong> (<code>int</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Number of buckets, the key query vectors can be &#x201C;hashed into&#x201D; using the locality sensitive hashing scheme. Each query key vector is hashed into a hash in <code>1, ..., num_buckets</code>. The number of buckets can also be factorized into a list for improved memory complexity. In this case, each query key vector is hashed into a hash in <code>1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]</code> if <code>num_buckets</code> is factorized into two factors. The number of buckets (or the product the factors) should approximately equal sequence length / lsh_chunk_length. If <code>num_buckets</code> not set, a good value is calculated on the fly.`,name:"num_buckets"},{anchor:"transformers.ReformerConfig.num_hashes",description:`<strong>num_hashes</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher <code>num_hashes</code>, the more accurate the <code>LSHSelfAttention</code> becomes, but also the more memory and time intensive the hashing becomes.`,name:"num_hashes"},{anchor:"transformers.ReformerConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The token id for the padding token.`,name:"pad_token_id"},{anchor:"transformers.ReformerConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 320) &#x2014;\\ Vocabulary size of the Reformer model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModel">ReformerModel</a>.`,name:"vocab_size"},{anchor:"transformers.ReformerConfig.tie_word_embeddings",description:`<strong>tie_word_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to tie input and output embeddings.`,name:"tie_word_embeddings"},{anchor:"transformers.ReformerConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.ReformerConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),Ie=new ye({props:{code:`from transformers import ReformerModel, ReformerConfig # Initializing a Reformer configuration configuration = ReformerConfig() # Initializing a Reformer model model = ReformerModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerModel, ReformerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Reformer configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ReformerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Reformer model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),We=new V({}),Qe=new Y({props:{name:"class transformers.ReformerTokenizer",anchor:"transformers.ReformerTokenizer",parameters:[{name:"vocab_file",val:""},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"additional_special_tokens",val:" = []"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/tokenization_reformer.py#L46",parametersDescription:[{anchor:"transformers.ReformerTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.ReformerTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.ReformerTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.ReformerTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.ReformerTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.ReformerTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Xe=new V({}),Ve=new Y({props:{name:"class transformers.ReformerTokenizerFast",anchor:"transformers.ReformerTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"additional_special_tokens",val:" = []"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/tokenization_reformer_fast.py#L54",parametersDescription:[{anchor:"transformers.ReformerTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.ReformerTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.ReformerTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.ReformerTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.ReformerTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),Ye=new V({}),Ze=new Y({props:{name:"class transformers.ReformerModel",anchor:"transformers.ReformerModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L1960",parametersDescription:[{anchor:"transformers.ReformerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),oa=new Y({props:{name:"forward",anchor:"transformers.ReformerModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"num_hashes",val:" = None"},{name:"past_buckets_states",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_attentions",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L1988",parametersDescription:[{anchor:"transformers.ReformerModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model&#x2019;s chunk lengths (lsh&#x2019;s, local&#x2019;s or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizer">ReformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ReformerModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ReformerModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ReformerModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ReformerModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ReformerModel.forward.num_hashes",description:`<strong>num_hashes</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in <code>config.num_hashes</code>.</p> <p>For more information, see <code>num_hashes</code> in <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>.`,name:"num_hashes"},{anchor:"transformers.ReformerModel.forward.past_buckets_states",description:`<strong>past_buckets_states</strong> (<code>List[Tuple(torch.LongTensor, torch.FloatTensor)]</code>, <em>optional</em>) &#x2014; List of <code>Tuple(torch.LongTensor, torch.FloatTensor</code> of length <code>config.n_layers</code>, with the first element being the previous <em>buckets</em> of shape <code>(batch_size, num_heads, num_hashes, sequence_length)</code>) and the second being the previous <em>hidden_states</em> of shape <code>(batch_size, sequence_length, hidden_size)</code>).</p> <p>Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding.`,name:"past_buckets_states"},{anchor:"transformers.ReformerModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ReformerModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ReformerModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ReformerModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.reformer.modeling_reformer.ReformerModelOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig" >ReformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, hidden_size)</code>) \u2014 Sequence of hidden-states at the last layer of the model.</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.</p> </li> <li> <p><strong>past_buckets_states</strong> (<code>List[Tuple(torch.LongTensor, torch.FloatTensor)]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>Tuple(torch.LongTensor, torch.FloatTensor</code> of length <code>config.n_layers</code>, with the first element being the previous <em>buckets</em> of shape <code>(batch_size, num_heads, num_hashes, sequence_length)</code>) and the second being the previous <em>hidden_states</em> of shape <code>(batch_size, sequence_length, hidden_size)</code>).</p> <p>Contains precomputed buckets and hidden-states that can be used (see <code>past_buckets_states</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.reformer.modeling_reformer.ReformerModelOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),ne=new Jn({props:{$$slots:{default:[Uu]},$$scope:{ctx:G}}}),ra=new ye({props:{code:`from transformers import ReformerTokenizer, ReformerModel import torch tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') model = ReformerModel.from_pretrained('google/reformer-crime-and-punishment') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerTokenizer, ReformerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ReformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerModel.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ia=new V({}),la=new Y({props:{name:"class transformers.ReformerModelWithLMHead",anchor:"transformers.ReformerModelWithLMHead",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2172",parametersDescription:[{anchor:"transformers.ReformerModelWithLMHead.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),da=new Y({props:{name:"forward",anchor:"transformers.ReformerModelWithLMHead.forward",parameters:[{name:"input_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"num_hashes",val:" = None"},{name:"past_buckets_states",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_attentions",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2195",parametersDescription:[{anchor:"transformers.ReformerModelWithLMHead.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model&#x2019;s chunk lengths (lsh&#x2019;s, local&#x2019;s or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizer">ReformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ReformerModelWithLMHead.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ReformerModelWithLMHead.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ReformerModelWithLMHead.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ReformerModelWithLMHead.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ReformerModelWithLMHead.forward.num_hashes",description:`<strong>num_hashes</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in <code>config.num_hashes</code>.</p> <p>For more information, see <code>num_hashes</code> in <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>.`,name:"num_hashes"},{anchor:"transformers.ReformerModelWithLMHead.forward.past_buckets_states",description:`<strong>past_buckets_states</strong> (<code>List[Tuple(torch.LongTensor, torch.FloatTensor)]</code>, <em>optional</em>) &#x2014; List of <code>Tuple(torch.LongTensor, torch.FloatTensor</code> of length <code>config.n_layers</code>, with the first element being the previous <em>buckets</em> of shape <code>(batch_size, num_heads, num_hashes, sequence_length)</code>) and the second being the previous <em>hidden_states</em> of shape <code>(batch_size, sequence_length, hidden_size)</code>).</p> <p>Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding.`,name:"past_buckets_states"},{anchor:"transformers.ReformerModelWithLMHead.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ReformerModelWithLMHead.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ReformerModelWithLMHead.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ReformerModelWithLMHead.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ReformerModelWithLMHead.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig" >ReformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),re=new Jn({props:{$$slots:{default:[Ku]},$$scope:{ctx:G}}}),ga=new ye({props:{code:`import torch from transformers import ReformerTokenizer, ReformerModelWithLMHead tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') model = ReformerModelWithLMHead.from_pretrained('google/reformer-crime-and-punishment') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerTokenizer, ReformerModelWithLMHead <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ReformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerModelWithLMHead.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ua=new V({}),fa=new Y({props:{name:"class transformers.ReformerForMaskedLM",anchor:"transformers.ReformerForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2292",parametersDescription:[{anchor:"transformers.ReformerForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ka=new Y({props:{name:"forward",anchor:"transformers.ReformerForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"num_hashes",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_attentions",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2310",parametersDescription:[{anchor:"transformers.ReformerForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model&#x2019;s chunk lengths (lsh&#x2019;s, local&#x2019;s or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizer">ReformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ReformerForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ReformerForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ReformerForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ReformerForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ReformerForMaskedLM.forward.num_hashes",description:`<strong>num_hashes</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in <code>config.num_hashes</code>.</p> <p>For more information, see <code>num_hashes</code> in <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>.`,name:"num_hashes"},{anchor:"transformers.ReformerForMaskedLM.forward.past_buckets_states",description:`<strong>past_buckets_states</strong> (<code>List[Tuple(torch.LongTensor, torch.FloatTensor)]</code>, <em>optional</em>) &#x2014; List of <code>Tuple(torch.LongTensor, torch.FloatTensor</code> of length <code>config.n_layers</code>, with the first element being the previous <em>buckets</em> of shape <code>(batch_size, num_heads, num_hashes, sequence_length)</code>) and the second being the previous <em>hidden_states</em> of shape <code>(batch_size, sequence_length, hidden_size)</code>).</p> <p>Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding.`,name:"past_buckets_states"},{anchor:"transformers.ReformerForMaskedLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ReformerForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ReformerForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ReformerForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ReformerForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig" >ReformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),le=new Jn({props:{$$slots:{default:[Bu]},$$scope:{ctx:G}}}),wa=new ye({props:{code:`from transformers import ReformerTokenizer, ReformerForMaskedLM import torch tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') model = ReformerForMaskedLM.from_pretrained('google/reformer-crime-and-punishment') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerTokenizer, ReformerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ReformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),za=new V({}),xa=new Y({props:{name:"class transformers.ReformerForSequenceClassification",anchor:"transformers.ReformerForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2377",parametersDescription:[{anchor:"transformers.ReformerForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qa=new Y({props:{name:"forward",anchor:"transformers.ReformerForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"num_hashes",val:" = None"},{name:"labels",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_attentions",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2391",parametersDescription:[{anchor:"transformers.ReformerForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model&#x2019;s chunk lengths (lsh&#x2019;s, local&#x2019;s or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizer">ReformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ReformerForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ReformerForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ReformerForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ReformerForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ReformerForSequenceClassification.forward.num_hashes",description:`<strong>num_hashes</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in <code>config.num_hashes</code>.</p> <p>For more information, see <code>num_hashes</code> in <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>.`,name:"num_hashes"},{anchor:"transformers.ReformerForSequenceClassification.forward.past_buckets_states",description:`<strong>past_buckets_states</strong> (<code>List[Tuple(torch.LongTensor, torch.FloatTensor)]</code>, <em>optional</em>) &#x2014; List of <code>Tuple(torch.LongTensor, torch.FloatTensor</code> of length <code>config.n_layers</code>, with the first element being the previous <em>buckets</em> of shape <code>(batch_size, num_heads, num_hashes, sequence_length)</code>) and the second being the previous <em>hidden_states</em> of shape <code>(batch_size, sequence_length, hidden_size)</code>).</p> <p>Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding.`,name:"past_buckets_states"},{anchor:"transformers.ReformerForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ReformerForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ReformerForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ReformerForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ReformerForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig" >ReformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),pe=new Jn({props:{$$slots:{default:[Xu]},$$scope:{ctx:G}}}),La=new ye({props:{code:`from transformers import ReformerTokenizer, ReformerForSequenceClassification import torch tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') model = ReformerForSequenceClassification.from_pretrained('google/reformer-crime-and-punishment') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerTokenizer, ReformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ReformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Fa=new ye({props:{code:`from transformers import ReformerTokenizer, ReformerForSequenceClassification import torch tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') model = ReformerForSequenceClassification.from_pretrained('google/reformer-crime-and-punishment', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerTokenizer, ReformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ReformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pa=new V({}),Ca=new Y({props:{name:"class transformers.ReformerForQuestionAnswering",anchor:"transformers.ReformerForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2497",parametersDescription:[{anchor:"transformers.ReformerForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ha=new Y({props:{name:"forward",anchor:"transformers.ReformerForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"num_hashes",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_attentions",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/reformer/modeling_reformer.py#L2509",parametersDescription:[{anchor:"transformers.ReformerForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model&#x2019;s chunk lengths (lsh&#x2019;s, local&#x2019;s or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerTokenizer">ReformerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ReformerForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.ReformerForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.ReformerForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ReformerForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.ReformerForQuestionAnswering.forward.num_hashes",description:`<strong>num_hashes</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in <code>config.num_hashes</code>.</p> <p>For more information, see <code>num_hashes</code> in <a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig">ReformerConfig</a>.`,name:"num_hashes"},{anchor:"transformers.ReformerForQuestionAnswering.forward.past_buckets_states",description:`<strong>past_buckets_states</strong> (<code>List[Tuple(torch.LongTensor, torch.FloatTensor)]</code>, <em>optional</em>) &#x2014; List of <code>Tuple(torch.LongTensor, torch.FloatTensor</code> of length <code>config.n_layers</code>, with the first element being the previous <em>buckets</em> of shape <code>(batch_size, num_heads, num_hashes, sequence_length)</code>) and the second being the previous <em>hidden_states</em> of shape <code>(batch_size, sequence_length, hidden_size)</code>).</p> <p>Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding.`,name:"past_buckets_states"},{anchor:"transformers.ReformerForQuestionAnswering.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.ReformerForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ReformerForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ReformerForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ReformerForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.ReformerForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerConfig" >ReformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),he=new Jn({props:{$$slots:{default:[Vu]},$$scope:{ctx:G}}}),Oa=new ye({props:{code:`from transformers import ReformerTokenizer, ReformerForQuestionAnswering import torch tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') model = ReformerForQuestionAnswering.from_pretrained('google/reformer-crime-and-punishment') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ReformerTokenizer, ReformerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ReformerTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ReformerForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/reformer-crime-and-punishment&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){d=o("meta"),M=m(),g=o("h1"),z=o("a"),R=o("span"),u(w.$$.fragment),k=m(),L=o("span"),xi=t("Reformer"),Yn=m(),ws=o("p"),Tt=o("strong"),Ti=t("DISCLAIMER:"),Mi=t(" This model is still a work in progress, if you see something strange, file a "),be=o("a"),Ri=t("Github Issue"),$i=t("."),Zn=m(),zs=o("h2"),Bs=o("a"),Mt=o("span"),u(ke.$$.fragment),Ei=m(),Rt=o("span"),qi=t("Overview"),so=m(),Xs=o("p"),Li=t("The Reformer model was proposed in the paper "),we=o("a"),Fi=t("Reformer: The Efficient Transformer"),Pi=t(" by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya."),eo=m(),Qa=o("p"),Ci=t("The abstract from the paper is the following:"),ao=m(),Ua=o("p"),$t=o("em"),Ai=t(`Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O(L^2) to O(Llog(L)), where L is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of N times, where N is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.`),to=m(),is=o("p"),Si=t("This model was contributed by "),ze=o("a"),ji=t("patrickvonplaten"),Ni=t(`. The Authors\u2019 code can be found `),xe=o("a"),Di=t("here"),Hi=t("."),no=m(),Te=o("p"),Et=o("strong"),Oi=t("Note"),Ii=t(":"),oo=m(),Ka=o("ul"),ls=o("li"),Wi=t("Reformer does "),qt=o("strong"),Qi=t("not"),Ui=t(" work with "),Lt=o("em"),Ki=t("torch.nn.DataParallel"),Bi=t(" due to a bug in PyTorch, see "),Me=o("a"),Xi=t("issue #36035"),ro=m(),xs=o("h2"),Vs=o("a"),Ft=o("span"),u(Re.$$.fragment),Vi=m(),Pt=o("span"),Gi=t("Axial Positional Encodings"),io=m(),$=o("p"),Ji=t("Axial Positional Encodings were first implemented in Google\u2019s "),$e=o("a"),Yi=t("trax library"),Zi=t(` and developed by the authors of this model\u2019s paper. In models that are treating very long input sequences, the conventional position id encodings store an embedings vector of size `),lo=new x,mo=t(" being the "),Ct=o("code"),sl=t("config.hidden_size"),el=t(` for every position `),po=new x,co=t(", with "),ho=new x,go=t(" being "),At=o("code"),al=t("config.max_embedding_size"),tl=t(`. This means that having a sequence length of `),uo=new x,fo=t(" and a "),St=o("code"),nl=t("config.hidden_size"),ol=t(" of "),_o=new x,vo=t(` would result in a position encoding matrix: `),yo=new x,bo=m(),Ts=o("p"),rl=t("which alone has over 500M parameters to store. Axial positional encodings factorize "),ko=new x,wo=t(` into two matrices: `),zo=new x,xo=m(),Ee=o("p"),il=t(`and `),To=new x,Mo=m(),qe=o("p"),ll=t(`with: `),Ro=new x,$o=m(),Le=o("p"),ml=t(`Therefore the following holds: `),Eo=new x,qo=m(),F=o("p"),pl=t("Intuitively, this means that a position embedding vector "),Lo=new x,Fo=t(` is now the composition of two factorized embedding vectors: `),Po=new x,Co=t(", where as the "),jt=o("code"),cl=t("config.max_embedding_size"),hl=t(` dimension `),Ao=new x,So=t(" is factorized into "),jo=new x,No=t(`. This design ensures that each position embedding vector `),Do=new x,Ho=t(" is unique."),Oo=m(),ms=o("p"),dl=t("Using the above example again, axial position encoding with "),Io=new x,Wo=t(` can drastically reduced the number of parameters to `),Qo=new x,Uo=t(" parameters."),Ko=m(),E=o("p"),gl=t("In practice, the parameter "),Nt=o("code"),ul=t("config.axial_pos_embds_dim"),fl=t(" is set to a tuple "),Bo=new x,Xo=t(` which sum has to be equal to `),Dt=o("code"),_l=t("config.hidden_size"),vl=t(" and "),Ht=o("code"),yl=t("config.axial_pos_shape"),bl=t(" is set to a tuple "),Vo=new x,Go=t(` which product has to be equal to `),Ot=o("code"),kl=t("config.max_embedding_size"),wl=t(", which during training has to be equal to the "),It=o("em"),zl=t(`sequence length`),xl=t(" of the "),Wt=o("code"),Tl=t("input_ids"),Ml=t("."),Jo=m(),Ms=o("h2"),Gs=o("a"),Qt=o("span"),u(Fe.$$.fragment),Rl=m(),Ut=o("span"),$l=t("LSH Self Attention"),Yo=m(),Z=o("p"),El=t(`In Locality sensitive hashing (LSH) self attention the key and query projection weights are tied. Therefore, the key query embedding vectors are also tied. LSH self attention uses the locality sensitive hashing mechanism proposed in `),Pe=o("a"),ql=t("Practical and Optimal LSH for Angular Distance"),Ll=t(` to assign each of the tied key query embedding vectors to one of `),Kt=o("code"),Fl=t("config.num_buckets"),Pl=t(` possible buckets. The premise is that the more \u201Csimilar\u201D key query embedding vectors (in terms of `),Bt=o("em"),Cl=t("cosine similarity"),Al=t(`) are to each other, the more likely they are assigned to the same bucket.`),Zo=m(),j=o("p"),Sl=t("The accuracy of the LSH mechanism can be improved by increasing "),Xt=o("code"),jl=t("config.num_hashes"),Nl=t(` or directly the argument `),Vt=o("code"),Dl=t("num_hashes"),Hl=t(` of the forward function so that the output of the LSH self attention better approximates the output of the \u201Cnormal\u201D full self attention. The buckets are then sorted and chunked into query key embedding vector chunks each of length `),Gt=o("code"),Ol=t("config.lsh_chunk_length"),Il=t(`. For each chunk, the query embedding vectors attend to its key vectors (which are tied to themselves) and to the key embedding vectors of `),Jt=o("code"),Wl=t("config.lsh_num_chunks_before"),Ql=t(` previous neighboring chunks and `),Yt=o("code"),Ul=t("config.lsh_num_chunks_after"),Kl=t(" following neighboring chunks."),sr=m(),ps=o("p"),Bl=t("For more information, see the "),Ce=o("a"),Xl=t("original Paper"),Vl=t(" or this great "),Ae=o("a"),Gl=t("blog post"),Jl=t("."),er=m(),I=o("p"),Yl=t("Note that "),Zt=o("code"),Zl=t("config.num_buckets"),sm=t(" can also be factorized into a list "),ar=new x,tr=t(". This way instead of assigning the query key embedding vectors to one of "),nr=new x,or=t(" they are assigned to one of "),rr=new x,ir=t(`. This is crucial for very long sequences to save memory.`),lr=m(),cs=o("p"),em=t("When training a model from scratch, it is recommended to leave "),sn=o("code"),am=t("config.num_buckets=None"),tm=t(`, so that depending on the sequence length a good value for `),en=o("code"),nm=t("num_buckets"),om=t(` is calculated on the fly. This value will then automatically be saved in the config and should be reused for inference.`),mr=m(),ss=o("p"),rm=t(`Using LSH self attention, the memory and time complexity of the query-key matmul operation can be reduced from `),pr=new x,cr=t(" to "),hr=new x,dr=t(`, which usually represents the memory and time bottleneck in a transformer model, with `),gr=new x,ur=t(" being the sequence length."),fr=m(),Rs=o("h2"),Js=o("a"),an=o("span"),u(Se.$$.fragment),im=m(),tn=o("span"),lm=t("Local Self Attention"),_r=m(),es=o("p"),mm=t(`Local self attention is essentially a \u201Cnormal\u201D self attention layer with key, query and value projections, but is chunked so that in each chunk of length `),nn=o("code"),pm=t("config.local_chunk_length"),cm=t(` the query embedding vectors only attends to the key embedding vectors in its chunk and to the key embedding vectors of `),on=o("code"),hm=t("config.local_num_chunks_before"),dm=t(` previous neighboring chunks and `),rn=o("code"),gm=t("config.local_num_chunks_after"),um=t(" following neighboring chunks."),vr=m(),as=o("p"),fm=t(`Using Local self attention, the memory and time complexity of the query-key matmul operation can be reduced from `),yr=new x,br=t(" to "),kr=new x,wr=t(`, which usually represents the memory and time bottleneck in a transformer model, with `),zr=new x,xr=t(" being the sequence length."),Tr=m(),$s=o("h2"),Ys=o("a"),ln=o("span"),u(je.$$.fragment),_m=m(),mn=o("span"),vm=t("Training"),Mr=m(),hs=o("p"),ym=t(`During training, we must ensure that the sequence length is set to a value that can be divided by the least common multiple of `),pn=o("code"),bm=t("config.lsh_chunk_length"),km=t(" and "),cn=o("code"),wm=t("config.local_chunk_length"),zm=t(` and that the parameters of the Axial Positional Encodings are correctly set as described above. Reformer is very memory efficient so that the model can easily be trained on sequences as long as 64000 tokens.`),Rr=m(),Zs=o("p"),xm=t("For training, the "),Ba=o("a"),Tm=t("ReformerModelWithLMHead"),Mm=t(" should be used as follows:"),$r=m(),u(Ne.$$.fragment),Er=m(),Es=o("h2"),se=o("a"),hn=o("span"),u(De.$$.fragment),Rm=m(),dn=o("span"),$m=t("ReformerConfig"),qr=m(),N=o("div"),u(He.$$.fragment),Em=m(),Oe=o("p"),qm=t("This is the configuration class to store the configuration of a "),Xa=o("a"),Lm=t("ReformerModel"),Fm=t(`. It is used to instantiate a Reformer model according to the specified arguments, defining the model architecture.`),Pm=m(),qs=o("p"),Cm=t("Configuration objects inherit from "),Va=o("a"),Am=t("PretrainedConfig"),Sm=t(` and can be used to control the model outputs. Read the documentation from `),Ga=o("a"),jm=t("PretrainedConfig"),Nm=t(" for more information."),Dm=m(),gn=o("p"),Hm=t("Examples:"),Om=m(),u(Ie.$$.fragment),Lr=m(),Ls=o("h2"),ee=o("a"),un=o("span"),u(We.$$.fragment),Im=m(),fn=o("span"),Wm=t("ReformerTokenizer"),Fr=m(),J=o("div"),u(Qe.$$.fragment),Qm=m(),Ue=o("p"),Um=t("Construct a Reformer tokenizer. Based on "),Ke=o("a"),Km=t("SentencePiece"),Bm=t(" ."),Xm=m(),Be=o("p"),Vm=t("This tokenizer inherits from "),Ja=o("a"),Gm=t("PreTrainedTokenizer"),Jm=t(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ym=m(),_n=o("div"),Pr=m(),Fs=o("h2"),ae=o("a"),vn=o("span"),u(Xe.$$.fragment),Zm=m(),yn=o("span"),sp=t("ReformerTokenizerFast"),Cr=m(),rs=o("div"),u(Ve.$$.fragment),ep=m(),Ps=o("p"),ap=t("Construct a \u201Cfast\u201D Reformer tokenizer (backed by HuggingFace\u2019s "),bn=o("em"),tp=t("tokenizers"),np=t(" library). Based on "),Ge=o("a"),op=t("Unigram"),rp=t("."),ip=m(),Je=o("p"),lp=t("This tokenizer inherits from "),Ya=o("a"),mp=t("PreTrainedTokenizerFast"),pp=t(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ar=m(),Cs=o("h2"),te=o("a"),kn=o("span"),u(Ye.$$.fragment),cp=m(),wn=o("span"),hp=t("ReformerModel"),Sr=m(),D=o("div"),u(Ze.$$.fragment),dp=m(),sa=o("p"),gp=t(`The bare Reformer Model transformer outputting raw hidden-stateswithout any specific head on top. Reformer was proposed in `),ea=o("a"),up=t("Reformer: The Efficient Transformer"),fp=t(` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),_p=m(),aa=o("p"),vp=t("This model inherits from "),Za=o("a"),yp=t("PreTrainedModel"),bp=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kp=m(),ta=o("p"),wp=t("This model is also a PyTorch "),na=o("a"),zp=t("torch.nn.Module"),xp=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tp=m(),W=o("div"),u(oa.$$.fragment),Mp=m(),As=o("p"),Rp=t("The "),st=o("a"),$p=t("ReformerModel"),Ep=t(" forward method, overrides the "),zn=o("code"),qp=t("__call__"),Lp=t(" special method."),Fp=m(),u(ne.$$.fragment),Pp=m(),xn=o("p"),Cp=t("Example:"),Ap=m(),u(ra.$$.fragment),jr=m(),Ss=o("h2"),oe=o("a"),Tn=o("span"),u(ia.$$.fragment),Sp=m(),Mn=o("span"),jp=t("ReformerModelWithLMHead"),Nr=m(),H=o("div"),u(la.$$.fragment),Np=m(),js=o("p"),Dp=t("Reformer Model with a "),Rn=o("code"),Hp=t("language modeling"),Op=t(` head on top. Reformer was proposed in `),ma=o("a"),Ip=t("Reformer: The Efficient Transformer"),Wp=t(` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),Qp=m(),pa=o("p"),Up=t("This model inherits from "),et=o("a"),Kp=t("PreTrainedModel"),Bp=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xp=m(),ca=o("p"),Vp=t("This model is also a PyTorch "),ha=o("a"),Gp=t("torch.nn.Module"),Jp=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yp=m(),Q=o("div"),u(da.$$.fragment),Zp=m(),Ns=o("p"),sc=t("The "),at=o("a"),ec=t("ReformerModelWithLMHead"),ac=t(" forward method, overrides the "),$n=o("code"),tc=t("__call__"),nc=t(" special method."),oc=m(),u(re.$$.fragment),rc=m(),En=o("p"),ic=t("Example:"),lc=m(),u(ga.$$.fragment),Dr=m(),Ds=o("h2"),ie=o("a"),qn=o("span"),u(ua.$$.fragment),mc=m(),Ln=o("span"),pc=t("ReformerForMaskedLM"),Hr=m(),O=o("div"),u(fa.$$.fragment),cc=m(),Hs=o("p"),hc=t("Reformer Model with a "),Fn=o("code"),dc=t("language modeling"),gc=t(` head on top. Reformer was proposed in `),_a=o("a"),uc=t("Reformer: The Efficient Transformer"),fc=t(` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),_c=m(),va=o("p"),vc=t("This model inherits from "),tt=o("a"),yc=t("PreTrainedModel"),bc=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kc=m(),ya=o("p"),wc=t("This model is also a PyTorch "),ba=o("a"),zc=t("torch.nn.Module"),xc=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tc=m(),U=o("div"),u(ka.$$.fragment),Mc=m(),Os=o("p"),Rc=t("The "),nt=o("a"),$c=t("ReformerForMaskedLM"),Ec=t(" forward method, overrides the "),Pn=o("code"),qc=t("__call__"),Lc=t(" special method."),Fc=m(),u(le.$$.fragment),Pc=m(),Cn=o("p"),Cc=t("Example:"),Ac=m(),u(wa.$$.fragment),Or=m(),Is=o("h2"),me=o("a"),An=o("span"),u(za.$$.fragment),Sc=m(),Sn=o("span"),jc=t("ReformerForSequenceClassification"),Ir=m(),A=o("div"),u(xa.$$.fragment),Nc=m(),jn=o("p"),Dc=t(`Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Hc=m(),Ta=o("p"),Oc=t("Reformer was proposed in "),Ma=o("a"),Ic=t("Reformer: The Efficient Transformer"),Wc=t(` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),Qc=m(),Ra=o("p"),Uc=t("This model inherits from "),ot=o("a"),Kc=t("PreTrainedModel"),Bc=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xc=m(),$a=o("p"),Vc=t("This model is also a PyTorch "),Ea=o("a"),Gc=t("torch.nn.Module"),Jc=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yc=m(),P=o("div"),u(qa.$$.fragment),Zc=m(),Ws=o("p"),sh=t("The "),rt=o("a"),eh=t("ReformerForSequenceClassification"),ah=t(" forward method, overrides the "),Nn=o("code"),th=t("__call__"),nh=t(" special method."),oh=m(),u(pe.$$.fragment),rh=m(),Dn=o("p"),ih=t("Example of single-label classification:"),lh=m(),u(La.$$.fragment),mh=m(),Hn=o("p"),ph=t("Example of multi-label classification:"),ch=m(),u(Fa.$$.fragment),Wr=m(),Qs=o("h2"),ce=o("a"),On=o("span"),u(Pa.$$.fragment),hh=m(),In=o("span"),dh=t("ReformerForQuestionAnswering"),Qr=m(),S=o("div"),u(Ca.$$.fragment),gh=m(),Us=o("p"),uh=t(`Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on top of hidden-states output to compute `),Wn=o("code"),fh=t("span start logits"),_h=t(" and "),Qn=o("code"),vh=t("span end logits"),yh=t("."),bh=m(),Aa=o("p"),kh=t("Reformer was proposed in "),Sa=o("a"),wh=t("Reformer: The Efficient Transformer"),zh=t(` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),xh=m(),ja=o("p"),Th=t("This model inherits from "),it=o("a"),Mh=t("PreTrainedModel"),Rh=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$h=m(),Na=o("p"),Eh=t("This model is also a PyTorch "),Da=o("a"),qh=t("torch.nn.Module"),Lh=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fh=m(),K=o("div"),u(Ha.$$.fragment),Ph=m(),Ks=o("p"),Ch=t("The "),lt=o("a"),Ah=t("ReformerForQuestionAnswering"),Sh=t(" forward method, overrides the "),Un=o("code"),jh=t("__call__"),Nh=t(" special method."),Dh=m(),u(he.$$.fragment),Hh=m(),Kn=o("p"),Oh=t("Example:"),Ih=m(),u(Oa.$$.fragment),this.h()},l(e){const c=Qu('[data-svelte="svelte-1phssyn"]',document.head);d=r(c,"META",{name:!0,content:!0}),c.forEach(a),M=p(e),g=r(e,"H1",{class:!0});var Ia=i(g);z=r(Ia,"A",{id:!0,class:!0,href:!0});var Bn=i(z);R=r(Bn,"SPAN",{});var Xn=i(R);f(w.$$.fragment,Xn),Xn.forEach(a),Bn.forEach(a),k=p(Ia),L=r(Ia,"SPAN",{});var Vn=i(L);xi=n(Vn,"Reformer"),Vn.forEach(a),Ia.forEach(a),Yn=p(e),ws=r(e,"P",{});var de=i(ws);Tt=r(de,"STRONG",{});var Bh=i(Tt);Ti=n(Bh,"DISCLAIMER:"),Bh.forEach(a),Mi=n(de," This model is still a work in progress, if you see something strange, file a "),be=r(de,"A",{href:!0,rel:!0});var Xh=i(be);Ri=n(Xh,"Github Issue"),Xh.forEach(a),$i=n(de,"."),de.forEach(a),Zn=p(e),zs=r(e,"H2",{class:!0});var Kr=i(zs);Bs=r(Kr,"A",{id:!0,class:!0,href:!0});var Vh=i(Bs);Mt=r(Vh,"SPAN",{});var Gh=i(Mt);f(ke.$$.fragment,Gh),Gh.forEach(a),Vh.forEach(a),Ei=p(Kr),Rt=r(Kr,"SPAN",{});var Jh=i(Rt);qi=n(Jh,"Overview"),Jh.forEach(a),Kr.forEach(a),so=p(e),Xs=r(e,"P",{});var Br=i(Xs);Li=n(Br,"The Reformer model was proposed in the paper "),we=r(Br,"A",{href:!0,rel:!0});var Yh=i(we);Fi=n(Yh,"Reformer: The Efficient Transformer"),Yh.forEach(a),Pi=n(Br," by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya."),Br.forEach(a),eo=p(e),Qa=r(e,"P",{});var Zh=i(Qa);Ci=n(Zh,"The abstract from the paper is the following:"),Zh.forEach(a),ao=p(e),Ua=r(e,"P",{});var sd=i(Ua);$t=r(sd,"EM",{});var ed=i($t);Ai=n(ed,`Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O(L^2) to O(Llog(L)), where L is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of N times, where N is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.`),ed.forEach(a),sd.forEach(a),to=p(e),is=r(e,"P",{});var mt=i(is);Si=n(mt,"This model was contributed by "),ze=r(mt,"A",{href:!0,rel:!0});var ad=i(ze);ji=n(ad,"patrickvonplaten"),ad.forEach(a),Ni=n(mt,`. The Authors\u2019 code can be found `),xe=r(mt,"A",{href:!0,rel:!0});var td=i(xe);Di=n(td,"here"),td.forEach(a),Hi=n(mt,"."),mt.forEach(a),no=p(e),Te=r(e,"P",{});var Wh=i(Te);Et=r(Wh,"STRONG",{});var nd=i(Et);Oi=n(nd,"Note"),nd.forEach(a),Ii=n(Wh,":"),Wh.forEach(a),oo=p(e),Ka=r(e,"UL",{});var od=i(Ka);ls=r(od,"LI",{});var Wa=i(ls);Wi=n(Wa,"Reformer does "),qt=r(Wa,"STRONG",{});var rd=i(qt);Qi=n(rd,"not"),rd.forEach(a),Ui=n(Wa," work with "),Lt=r(Wa,"EM",{});var id=i(Lt);Ki=n(id,"torch.nn.DataParallel"),id.forEach(a),Bi=n(Wa," due to a bug in PyTorch, see "),Me=r(Wa,"A",{href:!0,rel:!0});var ld=i(Me);Xi=n(ld,"issue #36035"),ld.forEach(a),Wa.forEach(a),od.forEach(a),ro=p(e),xs=r(e,"H2",{class:!0});var Xr=i(xs);Vs=r(Xr,"A",{id:!0,class:!0,href:!0});var md=i(Vs);Ft=r(md,"SPAN",{});var pd=i(Ft);f(Re.$$.fragment,pd),pd.forEach(a),md.forEach(a),Vi=p(Xr),Pt=r(Xr,"SPAN",{});var cd=i(Pt);Gi=n(cd,"Axial Positional Encodings"),cd.forEach(a),Xr.forEach(a),io=p(e),$=r(e,"P",{});var q=i($);Ji=n(q,"Axial Positional Encodings were first implemented in Google\u2019s "),$e=r(q,"A",{href:!0,rel:!0});var hd=i($e);Yi=n(hd,"trax library"),hd.forEach(a),Zi=n(q,` and developed by the authors of this model\u2019s paper. In models that are treating very long input sequences, the conventional position id encodings store an embedings vector of size `),lo=T(q),mo=n(q," being the "),Ct=r(q,"CODE",{});var dd=i(Ct);sl=n(dd,"config.hidden_size"),dd.forEach(a),el=n(q,` for every position `),po=T(q),co=n(q,", with "),ho=T(q),go=n(q," being "),At=r(q,"CODE",{});var gd=i(At);al=n(gd,"config.max_embedding_size"),gd.forEach(a),tl=n(q,`. This means that having a sequence length of `),uo=T(q),fo=n(q," and a "),St=r(q,"CODE",{});var ud=i(St);nl=n(ud,"config.hidden_size"),ud.forEach(a),ol=n(q," of "),_o=T(q),vo=n(q,` would result in a position encoding matrix: `),yo=T(q),q.forEach(a),bo=p(e),Ts=r(e,"P",{});var Gn=i(Ts);rl=n(Gn,"which alone has over 500M parameters to store. Axial positional encodings factorize "),ko=T(Gn),wo=n(Gn,` into two matrices: `),zo=T(Gn),Gn.forEach(a),xo=p(e),Ee=r(e,"P",{});var Qh=i(Ee);il=n(Qh,`and `),To=T(Qh),Qh.forEach(a),Mo=p(e),qe=r(e,"P",{});var Uh=i(qe);ll=n(Uh,`with: `),Ro=T(Uh),Uh.forEach(a),$o=p(e),Le=r(e,"P",{});var Kh=i(Le);ml=n(Kh,`Therefore the following holds: `),Eo=T(Kh),Kh.forEach(a),qo=p(e),F=r(e,"P",{});var B=i(F);pl=n(B,"Intuitively, this means that a position embedding vector "),Lo=T(B),Fo=n(B,` is now the composition of two factorized embedding vectors: `),Po=T(B),Co=n(B,", where as the "),jt=r(B,"CODE",{});var fd=i(jt);cl=n(fd,"config.max_embedding_size"),fd.forEach(a),hl=n(B,` dimension `),Ao=T(B),So=n(B," is factorized into "),jo=T(B),No=n(B,`. This design ensures that each position embedding vector `),Do=T(B),Ho=n(B," is unique."),B.forEach(a),Oo=p(e),ms=r(e,"P",{});var pt=i(ms);dl=n(pt,"Using the above example again, axial position encoding with "),Io=T(pt),Wo=n(pt,` can drastically reduced the number of parameters to `),Qo=T(pt),Uo=n(pt," parameters."),pt.forEach(a),Ko=p(e),E=r(e,"P",{});var C=i(E);gl=n(C,"In practice, the parameter "),Nt=r(C,"CODE",{});var _d=i(Nt);ul=n(_d,"config.axial_pos_embds_dim"),_d.forEach(a),fl=n(C," is set to a tuple "),Bo=T(C),Xo=n(C,` which sum has to be equal to `),Dt=r(C,"CODE",{});var vd=i(Dt);_l=n(vd,"config.hidden_size"),vd.forEach(a),vl=n(C," and "),Ht=r(C,"CODE",{});var yd=i(Ht);yl=n(yd,"config.axial_pos_shape"),yd.forEach(a),bl=n(C," is set to a tuple "),Vo=T(C),Go=n(C,` which product has to be equal to `),Ot=r(C,"CODE",{});var bd=i(Ot);kl=n(bd,"config.max_embedding_size"),bd.forEach(a),wl=n(C,", which during training has to be equal to the "),It=r(C,"EM",{});var kd=i(It);zl=n(kd,`sequence length`),kd.forEach(a),xl=n(C," of the "),Wt=r(C,"CODE",{});var wd=i(Wt);Tl=n(wd,"input_ids"),wd.forEach(a),Ml=n(C,"."),C.forEach(a),Jo=p(e),Ms=r(e,"H2",{class:!0});var Vr=i(Ms);Gs=r(Vr,"A",{id:!0,class:!0,href:!0});var zd=i(Gs);Qt=r(zd,"SPAN",{});var xd=i(Qt);f(Fe.$$.fragment,xd),xd.forEach(a),zd.forEach(a),Rl=p(Vr),Ut=r(Vr,"SPAN",{});var Td=i(Ut);$l=n(Td,"LSH Self Attention"),Td.forEach(a),Vr.forEach(a),Yo=p(e),Z=r(e,"P",{});var ge=i(Z);El=n(ge,`In Locality sensitive hashing (LSH) self attention the key and query projection weights are tied. Therefore, the key query embedding vectors are also tied. LSH self attention uses the locality sensitive hashing mechanism proposed in `),Pe=r(ge,"A",{href:!0,rel:!0});var Md=i(Pe);ql=n(Md,"Practical and Optimal LSH for Angular Distance"),Md.forEach(a),Ll=n(ge,` to assign each of the tied key query embedding vectors to one of `),Kt=r(ge,"CODE",{});var Rd=i(Kt);Fl=n(Rd,"config.num_buckets"),Rd.forEach(a),Pl=n(ge,` possible buckets. The premise is that the more \u201Csimilar\u201D key query embedding vectors (in terms of `),Bt=r(ge,"EM",{});var $d=i(Bt);Cl=n($d,"cosine similarity"),$d.forEach(a),Al=n(ge,`) are to each other, the more likely they are assigned to the same bucket.`),ge.forEach(a),Zo=p(e),j=r(e,"P",{});var ts=i(j);Sl=n(ts,"The accuracy of the LSH mechanism can be improved by increasing "),Xt=r(ts,"CODE",{});var Ed=i(Xt);jl=n(Ed,"config.num_hashes"),Ed.forEach(a),Nl=n(ts,` or directly the argument `),Vt=r(ts,"CODE",{});var qd=i(Vt);Dl=n(qd,"num_hashes"),qd.forEach(a),Hl=n(ts,` of the forward function so that the output of the LSH self attention better approximates the output of the \u201Cnormal\u201D full self attention. The buckets are then sorted and chunked into query key embedding vector chunks each of length `),Gt=r(ts,"CODE",{});var Ld=i(Gt);Ol=n(Ld,"config.lsh_chunk_length"),Ld.forEach(a),Il=n(ts,`. For each chunk, the query embedding vectors attend to its key vectors (which are tied to themselves) and to the key embedding vectors of `),Jt=r(ts,"CODE",{});var Fd=i(Jt);Wl=n(Fd,"config.lsh_num_chunks_before"),Fd.forEach(a),Ql=n(ts,` previous neighboring chunks and `),Yt=r(ts,"CODE",{});var Pd=i(Yt);Ul=n(Pd,"config.lsh_num_chunks_after"),Pd.forEach(a),Kl=n(ts," following neighboring chunks."),ts.forEach(a),sr=p(e),ps=r(e,"P",{});var ct=i(ps);Bl=n(ct,"For more information, see the "),Ce=r(ct,"A",{href:!0,rel:!0});var Cd=i(Ce);Xl=n(Cd,"original Paper"),Cd.forEach(a),Vl=n(ct," or this great "),Ae=r(ct,"A",{href:!0,rel:!0});var Ad=i(Ae);Gl=n(Ad,"blog post"),Ad.forEach(a),Jl=n(ct,"."),ct.forEach(a),er=p(e),I=r(e,"P",{});var ds=i(I);Yl=n(ds,"Note that "),Zt=r(ds,"CODE",{});var Sd=i(Zt);Zl=n(Sd,"config.num_buckets"),Sd.forEach(a),sm=n(ds," can also be factorized into a list "),ar=T(ds),tr=n(ds,". This way instead of assigning the query key embedding vectors to one of "),nr=T(ds),or=n(ds," they are assigned to one of "),rr=T(ds),ir=n(ds,`. This is crucial for very long sequences to save memory.`),ds.forEach(a),lr=p(e),cs=r(e,"P",{});var ht=i(cs);em=n(ht,"When training a model from scratch, it is recommended to leave "),sn=r(ht,"CODE",{});var jd=i(sn);am=n(jd,"config.num_buckets=None"),jd.forEach(a),tm=n(ht,`, so that depending on the sequence length a good value for `),en=r(ht,"CODE",{});var Nd=i(en);nm=n(Nd,"num_buckets"),Nd.forEach(a),om=n(ht,` is calculated on the fly. This value will then automatically be saved in the config and should be reused for inference.`),ht.forEach(a),mr=p(e),ss=r(e,"P",{});var ue=i(ss);rm=n(ue,`Using LSH self attention, the memory and time complexity of the query-key matmul operation can be reduced from `),pr=T(ue),cr=n(ue," to "),hr=T(ue),dr=n(ue,`, which usually represents the memory and time bottleneck in a transformer model, with `),gr=T(ue),ur=n(ue," being the sequence length."),ue.forEach(a),fr=p(e),Rs=r(e,"H2",{class:!0});var Gr=i(Rs);Js=r(Gr,"A",{id:!0,class:!0,href:!0});var Dd=i(Js);an=r(Dd,"SPAN",{});var Hd=i(an);f(Se.$$.fragment,Hd),Hd.forEach(a),Dd.forEach(a),im=p(Gr),tn=r(Gr,"SPAN",{});var Od=i(tn);lm=n(Od,"Local Self Attention"),Od.forEach(a),Gr.forEach(a),_r=p(e),es=r(e,"P",{});var fe=i(es);mm=n(fe,`Local self attention is essentially a \u201Cnormal\u201D self attention layer with key, query and value projections, but is chunked so that in each chunk of length `),nn=r(fe,"CODE",{});var Id=i(nn);pm=n(Id,"config.local_chunk_length"),Id.forEach(a),cm=n(fe,` the query embedding vectors only attends to the key embedding vectors in its chunk and to the key embedding vectors of `),on=r(fe,"CODE",{});var Wd=i(on);hm=n(Wd,"config.local_num_chunks_before"),Wd.forEach(a),dm=n(fe,` previous neighboring chunks and `),rn=r(fe,"CODE",{});var Qd=i(rn);gm=n(Qd,"config.local_num_chunks_after"),Qd.forEach(a),um=n(fe," following neighboring chunks."),fe.forEach(a),vr=p(e),as=r(e,"P",{});var _e=i(as);fm=n(_e,`Using Local self attention, the memory and time complexity of the query-key matmul operation can be reduced from `),yr=T(_e),br=n(_e," to "),kr=T(_e),wr=n(_e,`, which usually represents the memory and time bottleneck in a transformer model, with `),zr=T(_e),xr=n(_e," being the sequence length."),_e.forEach(a),Tr=p(e),$s=r(e,"H2",{class:!0});var Jr=i($s);Ys=r(Jr,"A",{id:!0,class:!0,href:!0});var Ud=i(Ys);ln=r(Ud,"SPAN",{});var Kd=i(ln);f(je.$$.fragment,Kd),Kd.forEach(a),Ud.forEach(a),_m=p(Jr),mn=r(Jr,"SPAN",{});var Bd=i(mn);vm=n(Bd,"Training"),Bd.forEach(a),Jr.forEach(a),Mr=p(e),hs=r(e,"P",{});var dt=i(hs);ym=n(dt,`During training, we must ensure that the sequence length is set to a value that can be divided by the least common multiple of `),pn=r(dt,"CODE",{});var Xd=i(pn);bm=n(Xd,"config.lsh_chunk_length"),Xd.forEach(a),km=n(dt," and "),cn=r(dt,"CODE",{});var Vd=i(cn);wm=n(Vd,"config.local_chunk_length"),Vd.forEach(a),zm=n(dt,` and that the parameters of the Axial Positional Encodings are correctly set as described above. Reformer is very memory efficient so that the model can easily be trained on sequences as long as 64000 tokens.`),dt.forEach(a),Rr=p(e),Zs=r(e,"P",{});var Yr=i(Zs);xm=n(Yr,"For training, the "),Ba=r(Yr,"A",{href:!0});var Gd=i(Ba);Tm=n(Gd,"ReformerModelWithLMHead"),Gd.forEach(a),Mm=n(Yr," should be used as follows:"),Yr.forEach(a),$r=p(e),f(Ne.$$.fragment,e),Er=p(e),Es=r(e,"H2",{class:!0});var Zr=i(Es);se=r(Zr,"A",{id:!0,class:!0,href:!0});var Jd=i(se);hn=r(Jd,"SPAN",{});var Yd=i(hn);f(De.$$.fragment,Yd),Yd.forEach(a),Jd.forEach(a),Rm=p(Zr),dn=r(Zr,"SPAN",{});var Zd=i(dn);$m=n(Zd,"ReformerConfig"),Zd.forEach(a),Zr.forEach(a),qr=p(e),N=r(e,"DIV",{class:!0});var gs=i(N);f(He.$$.fragment,gs),Em=p(gs),Oe=r(gs,"P",{});var si=i(Oe);qm=n(si,"This is the configuration class to store the configuration of a "),Xa=r(si,"A",{href:!0});var sg=i(Xa);Lm=n(sg,"ReformerModel"),sg.forEach(a),Fm=n(si,`. It is used to instantiate a Reformer model according to the specified arguments, defining the model architecture.`),si.forEach(a),Pm=p(gs),qs=r(gs,"P",{});var gt=i(qs);Cm=n(gt,"Configuration objects inherit from "),Va=r(gt,"A",{href:!0});var eg=i(Va);Am=n(eg,"PretrainedConfig"),eg.forEach(a),Sm=n(gt,` and can be used to control the model outputs. Read the documentation from `),Ga=r(gt,"A",{href:!0});var ag=i(Ga);jm=n(ag,"PretrainedConfig"),ag.forEach(a),Nm=n(gt," for more information."),gt.forEach(a),Dm=p(gs),gn=r(gs,"P",{});var tg=i(gn);Hm=n(tg,"Examples:"),tg.forEach(a),Om=p(gs),f(Ie.$$.fragment,gs),gs.forEach(a),Lr=p(e),Ls=r(e,"H2",{class:!0});var ei=i(Ls);ee=r(ei,"A",{id:!0,class:!0,href:!0});var ng=i(ee);un=r(ng,"SPAN",{});var og=i(un);f(We.$$.fragment,og),og.forEach(a),ng.forEach(a),Im=p(ei),fn=r(ei,"SPAN",{});var rg=i(fn);Wm=n(rg,"ReformerTokenizer"),rg.forEach(a),ei.forEach(a),Fr=p(e),J=r(e,"DIV",{class:!0});var ve=i(J);f(Qe.$$.fragment,ve),Qm=p(ve),Ue=r(ve,"P",{});var ai=i(Ue);Um=n(ai,"Construct a Reformer tokenizer. Based on "),Ke=r(ai,"A",{href:!0,rel:!0});var ig=i(Ke);Km=n(ig,"SentencePiece"),ig.forEach(a),Bm=n(ai," ."),ai.forEach(a),Xm=p(ve),Be=r(ve,"P",{});var ti=i(Be);Vm=n(ti,"This tokenizer inherits from "),Ja=r(ti,"A",{href:!0});var lg=i(Ja);Gm=n(lg,"PreTrainedTokenizer"),lg.forEach(a),Jm=n(ti,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ti.forEach(a),Ym=p(ve),_n=r(ve,"DIV",{class:!0}),i(_n).forEach(a),ve.forEach(a),Pr=p(e),Fs=r(e,"H2",{class:!0});var ni=i(Fs);ae=r(ni,"A",{id:!0,class:!0,href:!0});var mg=i(ae);vn=r(mg,"SPAN",{});var pg=i(vn);f(Xe.$$.fragment,pg),pg.forEach(a),mg.forEach(a),Zm=p(ni),yn=r(ni,"SPAN",{});var cg=i(yn);sp=n(cg,"ReformerTokenizerFast"),cg.forEach(a),ni.forEach(a),Cr=p(e),rs=r(e,"DIV",{class:!0});var ut=i(rs);f(Ve.$$.fragment,ut),ep=p(ut),Ps=r(ut,"P",{});var ft=i(Ps);ap=n(ft,"Construct a \u201Cfast\u201D Reformer tokenizer (backed by HuggingFace\u2019s "),bn=r(ft,"EM",{});var hg=i(bn);tp=n(hg,"tokenizers"),hg.forEach(a),np=n(ft," library). Based on "),Ge=r(ft,"A",{href:!0,rel:!0});var dg=i(Ge);op=n(dg,"Unigram"),dg.forEach(a),rp=n(ft,"."),ft.forEach(a),ip=p(ut),Je=r(ut,"P",{});var oi=i(Je);lp=n(oi,"This tokenizer inherits from "),Ya=r(oi,"A",{href:!0});var gg=i(Ya);mp=n(gg,"PreTrainedTokenizerFast"),gg.forEach(a),pp=n(oi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),oi.forEach(a),ut.forEach(a),Ar=p(e),Cs=r(e,"H2",{class:!0});var ri=i(Cs);te=r(ri,"A",{id:!0,class:!0,href:!0});var ug=i(te);kn=r(ug,"SPAN",{});var fg=i(kn);f(Ye.$$.fragment,fg),fg.forEach(a),ug.forEach(a),cp=p(ri),wn=r(ri,"SPAN",{});var _g=i(wn);hp=n(_g,"ReformerModel"),_g.forEach(a),ri.forEach(a),Sr=p(e),D=r(e,"DIV",{class:!0});var us=i(D);f(Ze.$$.fragment,us),dp=p(us),sa=r(us,"P",{});var ii=i(sa);gp=n(ii,`The bare Reformer Model transformer outputting raw hidden-stateswithout any specific head on top. Reformer was proposed in `),ea=r(ii,"A",{href:!0,rel:!0});var vg=i(ea);up=n(vg,"Reformer: The Efficient Transformer"),vg.forEach(a),fp=n(ii,` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),ii.forEach(a),_p=p(us),aa=r(us,"P",{});var li=i(aa);vp=n(li,"This model inherits from "),Za=r(li,"A",{href:!0});var yg=i(Za);yp=n(yg,"PreTrainedModel"),yg.forEach(a),bp=n(li,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),li.forEach(a),kp=p(us),ta=r(us,"P",{});var mi=i(ta);wp=n(mi,"This model is also a PyTorch "),na=r(mi,"A",{href:!0,rel:!0});var bg=i(na);zp=n(bg,"torch.nn.Module"),bg.forEach(a),xp=n(mi,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mi.forEach(a),Tp=p(us),W=r(us,"DIV",{class:!0});var fs=i(W);f(oa.$$.fragment,fs),Mp=p(fs),As=r(fs,"P",{});var _t=i(As);Rp=n(_t,"The "),st=r(_t,"A",{href:!0});var kg=i(st);$p=n(kg,"ReformerModel"),kg.forEach(a),Ep=n(_t," forward method, overrides the "),zn=r(_t,"CODE",{});var wg=i(zn);qp=n(wg,"__call__"),wg.forEach(a),Lp=n(_t," special method."),_t.forEach(a),Fp=p(fs),f(ne.$$.fragment,fs),Pp=p(fs),xn=r(fs,"P",{});var zg=i(xn);Cp=n(zg,"Example:"),zg.forEach(a),Ap=p(fs),f(ra.$$.fragment,fs),fs.forEach(a),us.forEach(a),jr=p(e),Ss=r(e,"H2",{class:!0});var pi=i(Ss);oe=r(pi,"A",{id:!0,class:!0,href:!0});var xg=i(oe);Tn=r(xg,"SPAN",{});var Tg=i(Tn);f(ia.$$.fragment,Tg),Tg.forEach(a),xg.forEach(a),Sp=p(pi),Mn=r(pi,"SPAN",{});var Mg=i(Mn);jp=n(Mg,"ReformerModelWithLMHead"),Mg.forEach(a),pi.forEach(a),Nr=p(e),H=r(e,"DIV",{class:!0});var _s=i(H);f(la.$$.fragment,_s),Np=p(_s),js=r(_s,"P",{});var vt=i(js);Dp=n(vt,"Reformer Model with a "),Rn=r(vt,"CODE",{});var Rg=i(Rn);Hp=n(Rg,"language modeling"),Rg.forEach(a),Op=n(vt,` head on top. Reformer was proposed in `),ma=r(vt,"A",{href:!0,rel:!0});var $g=i(ma);Ip=n($g,"Reformer: The Efficient Transformer"),$g.forEach(a),Wp=n(vt,` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),vt.forEach(a),Qp=p(_s),pa=r(_s,"P",{});var ci=i(pa);Up=n(ci,"This model inherits from "),et=r(ci,"A",{href:!0});var Eg=i(et);Kp=n(Eg,"PreTrainedModel"),Eg.forEach(a),Bp=n(ci,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ci.forEach(a),Xp=p(_s),ca=r(_s,"P",{});var hi=i(ca);Vp=n(hi,"This model is also a PyTorch "),ha=r(hi,"A",{href:!0,rel:!0});var qg=i(ha);Gp=n(qg,"torch.nn.Module"),qg.forEach(a),Jp=n(hi,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hi.forEach(a),Yp=p(_s),Q=r(_s,"DIV",{class:!0});var vs=i(Q);f(da.$$.fragment,vs),Zp=p(vs),Ns=r(vs,"P",{});var yt=i(Ns);sc=n(yt,"The "),at=r(yt,"A",{href:!0});var Lg=i(at);ec=n(Lg,"ReformerModelWithLMHead"),Lg.forEach(a),ac=n(yt," forward method, overrides the "),$n=r(yt,"CODE",{});var Fg=i($n);tc=n(Fg,"__call__"),Fg.forEach(a),nc=n(yt," special method."),yt.forEach(a),oc=p(vs),f(re.$$.fragment,vs),rc=p(vs),En=r(vs,"P",{});var Pg=i(En);ic=n(Pg,"Example:"),Pg.forEach(a),lc=p(vs),f(ga.$$.fragment,vs),vs.forEach(a),_s.forEach(a),Dr=p(e),Ds=r(e,"H2",{class:!0});var di=i(Ds);ie=r(di,"A",{id:!0,class:!0,href:!0});var Cg=i(ie);qn=r(Cg,"SPAN",{});var Ag=i(qn);f(ua.$$.fragment,Ag),Ag.forEach(a),Cg.forEach(a),mc=p(di),Ln=r(di,"SPAN",{});var Sg=i(Ln);pc=n(Sg,"ReformerForMaskedLM"),Sg.forEach(a),di.forEach(a),Hr=p(e),O=r(e,"DIV",{class:!0});var ys=i(O);f(fa.$$.fragment,ys),cc=p(ys),Hs=r(ys,"P",{});var bt=i(Hs);hc=n(bt,"Reformer Model with a "),Fn=r(bt,"CODE",{});var jg=i(Fn);dc=n(jg,"language modeling"),jg.forEach(a),gc=n(bt,` head on top. Reformer was proposed in `),_a=r(bt,"A",{href:!0,rel:!0});var Ng=i(_a);uc=n(Ng,"Reformer: The Efficient Transformer"),Ng.forEach(a),fc=n(bt,` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),bt.forEach(a),_c=p(ys),va=r(ys,"P",{});var gi=i(va);vc=n(gi,"This model inherits from "),tt=r(gi,"A",{href:!0});var Dg=i(tt);yc=n(Dg,"PreTrainedModel"),Dg.forEach(a),bc=n(gi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gi.forEach(a),kc=p(ys),ya=r(ys,"P",{});var ui=i(ya);wc=n(ui,"This model is also a PyTorch "),ba=r(ui,"A",{href:!0,rel:!0});var Hg=i(ba);zc=n(Hg,"torch.nn.Module"),Hg.forEach(a),xc=n(ui,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ui.forEach(a),Tc=p(ys),U=r(ys,"DIV",{class:!0});var bs=i(U);f(ka.$$.fragment,bs),Mc=p(bs),Os=r(bs,"P",{});var kt=i(Os);Rc=n(kt,"The "),nt=r(kt,"A",{href:!0});var Og=i(nt);$c=n(Og,"ReformerForMaskedLM"),Og.forEach(a),Ec=n(kt," forward method, overrides the "),Pn=r(kt,"CODE",{});var Ig=i(Pn);qc=n(Ig,"__call__"),Ig.forEach(a),Lc=n(kt," special method."),kt.forEach(a),Fc=p(bs),f(le.$$.fragment,bs),Pc=p(bs),Cn=r(bs,"P",{});var Wg=i(Cn);Cc=n(Wg,"Example:"),Wg.forEach(a),Ac=p(bs),f(wa.$$.fragment,bs),bs.forEach(a),ys.forEach(a),Or=p(e),Is=r(e,"H2",{class:!0});var fi=i(Is);me=r(fi,"A",{id:!0,class:!0,href:!0});var Qg=i(me);An=r(Qg,"SPAN",{});var Ug=i(An);f(za.$$.fragment,Ug),Ug.forEach(a),Qg.forEach(a),Sc=p(fi),Sn=r(fi,"SPAN",{});var Kg=i(Sn);jc=n(Kg,"ReformerForSequenceClassification"),Kg.forEach(a),fi.forEach(a),Ir=p(e),A=r(e,"DIV",{class:!0});var ns=i(A);f(xa.$$.fragment,ns),Nc=p(ns),jn=r(ns,"P",{});var Bg=i(jn);Dc=n(Bg,`Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Bg.forEach(a),Hc=p(ns),Ta=r(ns,"P",{});var _i=i(Ta);Oc=n(_i,"Reformer was proposed in "),Ma=r(_i,"A",{href:!0,rel:!0});var Xg=i(Ma);Ic=n(Xg,"Reformer: The Efficient Transformer"),Xg.forEach(a),Wc=n(_i,` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),_i.forEach(a),Qc=p(ns),Ra=r(ns,"P",{});var vi=i(Ra);Uc=n(vi,"This model inherits from "),ot=r(vi,"A",{href:!0});var Vg=i(ot);Kc=n(Vg,"PreTrainedModel"),Vg.forEach(a),Bc=n(vi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vi.forEach(a),Xc=p(ns),$a=r(ns,"P",{});var yi=i($a);Vc=n(yi,"This model is also a PyTorch "),Ea=r(yi,"A",{href:!0,rel:!0});var Gg=i(Ea);Gc=n(Gg,"torch.nn.Module"),Gg.forEach(a),Jc=n(yi,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yi.forEach(a),Yc=p(ns),P=r(ns,"DIV",{class:!0});var X=i(P);f(qa.$$.fragment,X),Zc=p(X),Ws=r(X,"P",{});var wt=i(Ws);sh=n(wt,"The "),rt=r(wt,"A",{href:!0});var Jg=i(rt);eh=n(Jg,"ReformerForSequenceClassification"),Jg.forEach(a),ah=n(wt," forward method, overrides the "),Nn=r(wt,"CODE",{});var Yg=i(Nn);th=n(Yg,"__call__"),Yg.forEach(a),nh=n(wt," special method."),wt.forEach(a),oh=p(X),f(pe.$$.fragment,X),rh=p(X),Dn=r(X,"P",{});var Zg=i(Dn);ih=n(Zg,"Example of single-label classification:"),Zg.forEach(a),lh=p(X),f(La.$$.fragment,X),mh=p(X),Hn=r(X,"P",{});var su=i(Hn);ph=n(su,"Example of multi-label classification:"),su.forEach(a),ch=p(X),f(Fa.$$.fragment,X),X.forEach(a),ns.forEach(a),Wr=p(e),Qs=r(e,"H2",{class:!0});var bi=i(Qs);ce=r(bi,"A",{id:!0,class:!0,href:!0});var eu=i(ce);On=r(eu,"SPAN",{});var au=i(On);f(Pa.$$.fragment,au),au.forEach(a),eu.forEach(a),hh=p(bi),In=r(bi,"SPAN",{});var tu=i(In);dh=n(tu,"ReformerForQuestionAnswering"),tu.forEach(a),bi.forEach(a),Qr=p(e),S=r(e,"DIV",{class:!0});var os=i(S);f(Ca.$$.fragment,os),gh=p(os),Us=r(os,"P",{});var zt=i(Us);uh=n(zt,`Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on top of hidden-states output to compute `),Wn=r(zt,"CODE",{});var nu=i(Wn);fh=n(nu,"span start logits"),nu.forEach(a),_h=n(zt," and "),Qn=r(zt,"CODE",{});var ou=i(Qn);vh=n(ou,"span end logits"),ou.forEach(a),yh=n(zt,"."),zt.forEach(a),bh=p(os),Aa=r(os,"P",{});var ki=i(Aa);kh=n(ki,"Reformer was proposed in "),Sa=r(ki,"A",{href:!0,rel:!0});var ru=i(Sa);wh=n(ru,"Reformer: The Efficient Transformer"),ru.forEach(a),zh=n(ki,` by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya.`),ki.forEach(a),xh=p(os),ja=r(os,"P",{});var wi=i(ja);Th=n(wi,"This model inherits from "),it=r(wi,"A",{href:!0});var iu=i(it);Mh=n(iu,"PreTrainedModel"),iu.forEach(a),Rh=n(wi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wi.forEach(a),$h=p(os),Na=r(os,"P",{});var zi=i(Na);Eh=n(zi,"This model is also a PyTorch "),Da=r(zi,"A",{href:!0,rel:!0});var lu=i(Da);qh=n(lu,"torch.nn.Module"),lu.forEach(a),Lh=n(zi,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zi.forEach(a),Fh=p(os),K=r(os,"DIV",{class:!0});var ks=i(K);f(Ha.$$.fragment,ks),Ph=p(ks),Ks=r(ks,"P",{});var xt=i(Ks);Ch=n(xt,"The "),lt=r(xt,"A",{href:!0});var mu=i(lt);Ah=n(mu,"ReformerForQuestionAnswering"),mu.forEach(a),Sh=n(xt," forward method, overrides the "),Un=r(xt,"CODE",{});var pu=i(Un);jh=n(pu,"__call__"),pu.forEach(a),Nh=n(xt," special method."),xt.forEach(a),Dh=p(ks),f(he.$$.fragment,ks),Hh=p(ks),Kn=r(ks,"P",{});var cu=i(Kn);Oh=n(cu,"Example:"),cu.forEach(a),Ih=p(ks),f(Oa.$$.fragment,ks),ks.forEach(a),os.forEach(a),this.h()},h(){l(d,"name","hf:doc:metadata"),l(d,"content",JSON.stringify(Ju)),l(z,"id","reformer"),l(z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(z,"href","#reformer"),l(g,"class","relative group"),l(be,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),l(be,"rel","nofollow"),l(Bs,"id","overview"),l(Bs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Bs,"href","#overview"),l(zs,"class","relative group"),l(we,"href","https://arxiv.org/abs/2001.04451.pdf"),l(we,"rel","nofollow"),l(ze,"href","https://huggingface.co/patrickvonplaten"),l(ze,"rel","nofollow"),l(xe,"href","https://github.com/google/trax/tree/master/trax/models/reformer"),l(xe,"rel","nofollow"),l(Me,"href","https://github.com/pytorch/pytorch/issues/36035"),l(Me,"rel","nofollow"),l(Vs,"id","axial-positional-encodings"),l(Vs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Vs,"href","#axial-positional-encodings"),l(xs,"class","relative group"),l($e,"href","https://github.com/google/trax/blob/4d99ad4965bab1deba227539758d59f0df0fef48/trax/layers/research/position_encodings.py#L29"),l($e,"rel","nofollow"),lo.a=mo,po.a=co,ho.a=go,uo.a=fo,_o.a=vo,yo.a=null,ko.a=wo,zo.a=null,To.a=null,Ro.a=null,Eo.a=null,Lo.a=Fo,Po.a=Co,Ao.a=So,jo.a=No,Do.a=Ho,Io.a=Wo,Qo.a=Uo,Bo.a=Xo,Vo.a=Go,l(Gs,"id","lsh-self-attention"),l(Gs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Gs,"href","#lsh-self-attention"),l(Ms,"class","relative group"),l(Pe,"href","https://arxiv.org/abs/1509.02897"),l(Pe,"rel","nofollow"),l(Ce,"href","https://arxiv.org/abs/2001.04451"),l(Ce,"rel","nofollow"),l(Ae,"href","https://www.pragmatic.ml/reformer-deep-dive/"),l(Ae,"rel","nofollow"),ar.a=tr,nr.a=or,rr.a=ir,pr.a=cr,hr.a=dr,gr.a=ur,l(Js,"id","local-self-attention"),l(Js,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Js,"href","#local-self-attention"),l(Rs,"class","relative group"),yr.a=br,kr.a=wr,zr.a=xr,l(Ys,"id","training"),l(Ys,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ys,"href","#training"),l($s,"class","relative group"),l(Ba,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModelWithLMHead"),l(se,"id","transformers.ReformerConfig"),l(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(se,"href","#transformers.ReformerConfig"),l(Es,"class","relative group"),l(Xa,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModel"),l(Va,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Ga,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(N,"class","docstring"),l(ee,"id","transformers.ReformerTokenizer"),l(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ee,"href","#transformers.ReformerTokenizer"),l(Ls,"class","relative group"),l(Ke,"href","https://github.com/google/sentencepiece"),l(Ke,"rel","nofollow"),l(Ja,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(_n,"class","docstring"),l(J,"class","docstring"),l(ae,"id","transformers.ReformerTokenizerFast"),l(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ae,"href","#transformers.ReformerTokenizerFast"),l(Fs,"class","relative group"),l(Ge,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),l(Ge,"rel","nofollow"),l(Ya,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(rs,"class","docstring"),l(te,"id","transformers.ReformerModel"),l(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(te,"href","#transformers.ReformerModel"),l(Cs,"class","relative group"),l(ea,"href","https://arxiv.org/abs/2001.04451"),l(ea,"rel","nofollow"),l(Za,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(na,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(na,"rel","nofollow"),l(st,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModel"),l(W,"class","docstring"),l(D,"class","docstring"),l(oe,"id","transformers.ReformerModelWithLMHead"),l(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(oe,"href","#transformers.ReformerModelWithLMHead"),l(Ss,"class","relative group"),l(ma,"href","https://arxiv.org/abs/2001.04451"),l(ma,"rel","nofollow"),l(et,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(ha,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(ha,"rel","nofollow"),l(at,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerModelWithLMHead"),l(Q,"class","docstring"),l(H,"class","docstring"),l(ie,"id","transformers.ReformerForMaskedLM"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#transformers.ReformerForMaskedLM"),l(Ds,"class","relative group"),l(_a,"href","https://arxiv.org/abs/2001.04451"),l(_a,"rel","nofollow"),l(tt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(ba,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(ba,"rel","nofollow"),l(nt,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForMaskedLM"),l(U,"class","docstring"),l(O,"class","docstring"),l(me,"id","transformers.ReformerForSequenceClassification"),l(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(me,"href","#transformers.ReformerForSequenceClassification"),l(Is,"class","relative group"),l(Ma,"href","https://arxiv.org/abs/2001.04451"),l(Ma,"rel","nofollow"),l(ot,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ea,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ea,"rel","nofollow"),l(rt,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForSequenceClassification"),l(P,"class","docstring"),l(A,"class","docstring"),l(ce,"id","transformers.ReformerForQuestionAnswering"),l(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ce,"href","#transformers.ReformerForQuestionAnswering"),l(Qs,"class","relative group"),l(Sa,"href","https://arxiv.org/abs/2001.04451"),l(Sa,"rel","nofollow"),l(it,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Da,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Da,"rel","nofollow"),l(lt,"href","/docs/transformers/v4.15.0/en/model_doc/reformer#transformers.ReformerForQuestionAnswering"),l(K,"class","docstring"),l(S,"class","docstring")},m(e,c){s(document.head,d),h(e,M,c),h(e,g,c),s(g,z),s(z,R),_(w,R,null),s(g,k),s(g,L),s(L,xi),h(e,Yn,c),h(e,ws,c),s(ws,Tt),s(Tt,Ti),s(ws,Mi),s(ws,be),s(be,Ri),s(ws,$i),h(e,Zn,c),h(e,zs,c),s(zs,Bs),s(Bs,Mt),_(ke,Mt,null),s(zs,Ei),s(zs,Rt),s(Rt,qi),h(e,so,c),h(e,Xs,c),s(Xs,Li),s(Xs,we),s(we,Fi),s(Xs,Pi),h(e,eo,c),h(e,Qa,c),s(Qa,Ci),h(e,ao,c),h(e,Ua,c),s(Ua,$t),s($t,Ai),h(e,to,c),h(e,is,c),s(is,Si),s(is,ze),s(ze,ji),s(is,Ni),s(is,xe),s(xe,Di),s(is,Hi),h(e,no,c),h(e,Te,c),s(Te,Et),s(Et,Oi),s(Te,Ii),h(e,oo,c),h(e,Ka,c),s(Ka,ls),s(ls,Wi),s(ls,qt),s(qt,Qi),s(ls,Ui),s(ls,Lt),s(Lt,Ki),s(ls,Bi),s(ls,Me),s(Me,Xi),h(e,ro,c),h(e,xs,c),s(xs,Vs),s(Vs,Ft),_(Re,Ft,null),s(xs,Vi),s(xs,Pt),s(Pt,Gi),h(e,io,c),h(e,$,c),s($,Ji),s($,$e),s($e,Yi),s($,Zi),lo.m(hu,$),s($,mo),s($,Ct),s(Ct,sl),s($,el),po.m(du,$),s($,co),ho.m(gu,$),s($,go),s($,At),s(At,al),s($,tl),uo.m(uu,$),s($,fo),s($,St),s(St,nl),s($,ol),_o.m(fu,$),s($,vo),yo.m(_u,$),h(e,bo,c),h(e,Ts,c),s(Ts,rl),ko.m(vu,Ts),s(Ts,wo),zo.m(yu,Ts),h(e,xo,c),h(e,Ee,c),s(Ee,il),To.m(bu,Ee),h(e,Mo,c),h(e,qe,c),s(qe,ll),Ro.m(ku,qe),h(e,$o,c),h(e,Le,c),s(Le,ml),Eo.m(wu,Le),h(e,qo,c),h(e,F,c),s(F,pl),Lo.m(zu,F),s(F,Fo),Po.m(xu,F),s(F,Co),s(F,jt),s(jt,cl),s(F,hl),Ao.m(Tu,F),s(F,So),jo.m(Mu,F),s(F,No),Do.m(Ru,F),s(F,Ho),h(e,Oo,c),h(e,ms,c),s(ms,dl),Io.m($u,ms),s(ms,Wo),Qo.m(Eu,ms),s(ms,Uo),h(e,Ko,c),h(e,E,c),s(E,gl),s(E,Nt),s(Nt,ul),s(E,fl),Bo.m(qu,E),s(E,Xo),s(E,Dt),s(Dt,_l),s(E,vl),s(E,Ht),s(Ht,yl),s(E,bl),Vo.m(Lu,E),s(E,Go),s(E,Ot),s(Ot,kl),s(E,wl),s(E,It),s(It,zl),s(E,xl),s(E,Wt),s(Wt,Tl),s(E,Ml),h(e,Jo,c),h(e,Ms,c),s(Ms,Gs),s(Gs,Qt),_(Fe,Qt,null),s(Ms,Rl),s(Ms,Ut),s(Ut,$l),h(e,Yo,c),h(e,Z,c),s(Z,El),s(Z,Pe),s(Pe,ql),s(Z,Ll),s(Z,Kt),s(Kt,Fl),s(Z,Pl),s(Z,Bt),s(Bt,Cl),s(Z,Al),h(e,Zo,c),h(e,j,c),s(j,Sl),s(j,Xt),s(Xt,jl),s(j,Nl),s(j,Vt),s(Vt,Dl),s(j,Hl),s(j,Gt),s(Gt,Ol),s(j,Il),s(j,Jt),s(Jt,Wl),s(j,Ql),s(j,Yt),s(Yt,Ul),s(j,Kl),h(e,sr,c),h(e,ps,c),s(ps,Bl),s(ps,Ce),s(Ce,Xl),s(ps,Vl),s(ps,Ae),s(Ae,Gl),s(ps,Jl),h(e,er,c),h(e,I,c),s(I,Yl),s(I,Zt),s(Zt,Zl),s(I,sm),ar.m(Fu,I),s(I,tr),nr.m(Pu,I),s(I,or),rr.m(Cu,I),s(I,ir),h(e,lr,c),h(e,cs,c),s(cs,em),s(cs,sn),s(sn,am),s(cs,tm),s(cs,en),s(en,nm),s(cs,om),h(e,mr,c),h(e,ss,c),s(ss,rm),pr.m(Au,ss),s(ss,cr),hr.m(Su,ss),s(ss,dr),gr.m(ju,ss),s(ss,ur),h(e,fr,c),h(e,Rs,c),s(Rs,Js),s(Js,an),_(Se,an,null),s(Rs,im),s(Rs,tn),s(tn,lm),h(e,_r,c),h(e,es,c),s(es,mm),s(es,nn),s(nn,pm),s(es,cm),s(es,on),s(on,hm),s(es,dm),s(es,rn),s(rn,gm),s(es,um),h(e,vr,c),h(e,as,c),s(as,fm),yr.m(Nu,as),s(as,br),kr.m(Du,as),s(as,wr),zr.m(Hu,as),s(as,xr),h(e,Tr,c),h(e,$s,c),s($s,Ys),s(Ys,ln),_(je,ln,null),s($s,_m),s($s,mn),s(mn,vm),h(e,Mr,c),h(e,hs,c),s(hs,ym),s(hs,pn),s(pn,bm),s(hs,km),s(hs,cn),s(cn,wm),s(hs,zm),h(e,Rr,c),h(e,Zs,c),s(Zs,xm),s(Zs,Ba),s(Ba,Tm),s(Zs,Mm),h(e,$r,c),_(Ne,e,c),h(e,Er,c),h(e,Es,c),s(Es,se),s(se,hn),_(De,hn,null),s(Es,Rm),s(Es,dn),s(dn,$m),h(e,qr,c),h(e,N,c),_(He,N,null),s(N,Em),s(N,Oe),s(Oe,qm),s(Oe,Xa),s(Xa,Lm),s(Oe,Fm),s(N,Pm),s(N,qs),s(qs,Cm),s(qs,Va),s(Va,Am),s(qs,Sm),s(qs,Ga),s(Ga,jm),s(qs,Nm),s(N,Dm),s(N,gn),s(gn,Hm),s(N,Om),_(Ie,N,null),h(e,Lr,c),h(e,Ls,c),s(Ls,ee),s(ee,un),_(We,un,null),s(Ls,Im),s(Ls,fn),s(fn,Wm),h(e,Fr,c),h(e,J,c),_(Qe,J,null),s(J,Qm),s(J,Ue),s(Ue,Um),s(Ue,Ke),s(Ke,Km),s(Ue,Bm),s(J,Xm),s(J,Be),s(Be,Vm),s(Be,Ja),s(Ja,Gm),s(Be,Jm),s(J,Ym),s(J,_n),h(e,Pr,c),h(e,Fs,c),s(Fs,ae),s(ae,vn),_(Xe,vn,null),s(Fs,Zm),s(Fs,yn),s(yn,sp),h(e,Cr,c),h(e,rs,c),_(Ve,rs,null),s(rs,ep),s(rs,Ps),s(Ps,ap),s(Ps,bn),s(bn,tp),s(Ps,np),s(Ps,Ge),s(Ge,op),s(Ps,rp),s(rs,ip),s(rs,Je),s(Je,lp),s(Je,Ya),s(Ya,mp),s(Je,pp),h(e,Ar,c),h(e,Cs,c),s(Cs,te),s(te,kn),_(Ye,kn,null),s(Cs,cp),s(Cs,wn),s(wn,hp),h(e,Sr,c),h(e,D,c),_(Ze,D,null),s(D,dp),s(D,sa),s(sa,gp),s(sa,ea),s(ea,up),s(sa,fp),s(D,_p),s(D,aa),s(aa,vp),s(aa,Za),s(Za,yp),s(aa,bp),s(D,kp),s(D,ta),s(ta,wp),s(ta,na),s(na,zp),s(ta,xp),s(D,Tp),s(D,W),_(oa,W,null),s(W,Mp),s(W,As),s(As,Rp),s(As,st),s(st,$p),s(As,Ep),s(As,zn),s(zn,qp),s(As,Lp),s(W,Fp),_(ne,W,null),s(W,Pp),s(W,xn),s(xn,Cp),s(W,Ap),_(ra,W,null),h(e,jr,c),h(e,Ss,c),s(Ss,oe),s(oe,Tn),_(ia,Tn,null),s(Ss,Sp),s(Ss,Mn),s(Mn,jp),h(e,Nr,c),h(e,H,c),_(la,H,null),s(H,Np),s(H,js),s(js,Dp),s(js,Rn),s(Rn,Hp),s(js,Op),s(js,ma),s(ma,Ip),s(js,Wp),s(H,Qp),s(H,pa),s(pa,Up),s(pa,et),s(et,Kp),s(pa,Bp),s(H,Xp),s(H,ca),s(ca,Vp),s(ca,ha),s(ha,Gp),s(ca,Jp),s(H,Yp),s(H,Q),_(da,Q,null),s(Q,Zp),s(Q,Ns),s(Ns,sc),s(Ns,at),s(at,ec),s(Ns,ac),s(Ns,$n),s($n,tc),s(Ns,nc),s(Q,oc),_(re,Q,null),s(Q,rc),s(Q,En),s(En,ic),s(Q,lc),_(ga,Q,null),h(e,Dr,c),h(e,Ds,c),s(Ds,ie),s(ie,qn),_(ua,qn,null),s(Ds,mc),s(Ds,Ln),s(Ln,pc),h(e,Hr,c),h(e,O,c),_(fa,O,null),s(O,cc),s(O,Hs),s(Hs,hc),s(Hs,Fn),s(Fn,dc),s(Hs,gc),s(Hs,_a),s(_a,uc),s(Hs,fc),s(O,_c),s(O,va),s(va,vc),s(va,tt),s(tt,yc),s(va,bc),s(O,kc),s(O,ya),s(ya,wc),s(ya,ba),s(ba,zc),s(ya,xc),s(O,Tc),s(O,U),_(ka,U,null),s(U,Mc),s(U,Os),s(Os,Rc),s(Os,nt),s(nt,$c),s(Os,Ec),s(Os,Pn),s(Pn,qc),s(Os,Lc),s(U,Fc),_(le,U,null),s(U,Pc),s(U,Cn),s(Cn,Cc),s(U,Ac),_(wa,U,null),h(e,Or,c),h(e,Is,c),s(Is,me),s(me,An),_(za,An,null),s(Is,Sc),s(Is,Sn),s(Sn,jc),h(e,Ir,c),h(e,A,c),_(xa,A,null),s(A,Nc),s(A,jn),s(jn,Dc),s(A,Hc),s(A,Ta),s(Ta,Oc),s(Ta,Ma),s(Ma,Ic),s(Ta,Wc),s(A,Qc),s(A,Ra),s(Ra,Uc),s(Ra,ot),s(ot,Kc),s(Ra,Bc),s(A,Xc),s(A,$a),s($a,Vc),s($a,Ea),s(Ea,Gc),s($a,Jc),s(A,Yc),s(A,P),_(qa,P,null),s(P,Zc),s(P,Ws),s(Ws,sh),s(Ws,rt),s(rt,eh),s(Ws,ah),s(Ws,Nn),s(Nn,th),s(Ws,nh),s(P,oh),_(pe,P,null),s(P,rh),s(P,Dn),s(Dn,ih),s(P,lh),_(La,P,null),s(P,mh),s(P,Hn),s(Hn,ph),s(P,ch),_(Fa,P,null),h(e,Wr,c),h(e,Qs,c),s(Qs,ce),s(ce,On),_(Pa,On,null),s(Qs,hh),s(Qs,In),s(In,dh),h(e,Qr,c),h(e,S,c),_(Ca,S,null),s(S,gh),s(S,Us),s(Us,uh),s(Us,Wn),s(Wn,fh),s(Us,_h),s(Us,Qn),s(Qn,vh),s(Us,yh),s(S,bh),s(S,Aa),s(Aa,kh),s(Aa,Sa),s(Sa,wh),s(Aa,zh),s(S,xh),s(S,ja),s(ja,Th),s(ja,it),s(it,Mh),s(ja,Rh),s(S,$h),s(S,Na),s(Na,Eh),s(Na,Da),s(Da,qh),s(Na,Lh),s(S,Fh),s(S,K),_(Ha,K,null),s(K,Ph),s(K,Ks),s(Ks,Ch),s(Ks,lt),s(lt,Ah),s(Ks,Sh),s(Ks,Un),s(Un,jh),s(Ks,Nh),s(K,Dh),_(he,K,null),s(K,Hh),s(K,Kn),s(Kn,Oh),s(K,Ih),_(Oa,K,null),Ur=!0},p(e,[c]){const Ia={};c&2&&(Ia.$$scope={dirty:c,ctx:e}),ne.$set(Ia);const Bn={};c&2&&(Bn.$$scope={dirty:c,ctx:e}),re.$set(Bn);const Xn={};c&2&&(Xn.$$scope={dirty:c,ctx:e}),le.$set(Xn);const Vn={};c&2&&(Vn.$$scope={dirty:c,ctx:e}),pe.$set(Vn);const de={};c&2&&(de.$$scope={dirty:c,ctx:e}),he.$set(de)},i(e){Ur||(v(w.$$.fragment,e),v(ke.$$.fragment,e),v(Re.$$.fragment,e),v(Fe.$$.fragment,e),v(Se.$$.fragment,e),v(je.$$.fragment,e),v(Ne.$$.fragment,e),v(De.$$.fragment,e),v(He.$$.fragment,e),v(Ie.$$.fragment,e),v(We.$$.fragment,e),v(Qe.$$.fragment,e),v(Xe.$$.fragment,e),v(Ve.$$.fragment,e),v(Ye.$$.fragment,e),v(Ze.$$.fragment,e),v(oa.$$.fragment,e),v(ne.$$.fragment,e),v(ra.$$.fragment,e),v(ia.$$.fragment,e),v(la.$$.fragment,e),v(da.$$.fragment,e),v(re.$$.fragment,e),v(ga.$$.fragment,e),v(ua.$$.fragment,e),v(fa.$$.fragment,e),v(ka.$$.fragment,e),v(le.$$.fragment,e),v(wa.$$.fragment,e),v(za.$$.fragment,e),v(xa.$$.fragment,e),v(qa.$$.fragment,e),v(pe.$$.fragment,e),v(La.$$.fragment,e),v(Fa.$$.fragment,e),v(Pa.$$.fragment,e),v(Ca.$$.fragment,e),v(Ha.$$.fragment,e),v(he.$$.fragment,e),v(Oa.$$.fragment,e),Ur=!0)},o(e){y(w.$$.fragment,e),y(ke.$$.fragment,e),y(Re.$$.fragment,e),y(Fe.$$.fragment,e),y(Se.$$.fragment,e),y(je.$$.fragment,e),y(Ne.$$.fragment,e),y(De.$$.fragment,e),y(He.$$.fragment,e),y(Ie.$$.fragment,e),y(We.$$.fragment,e),y(Qe.$$.fragment,e),y(Xe.$$.fragment,e),y(Ve.$$.fragment,e),y(Ye.$$.fragment,e),y(Ze.$$.fragment,e),y(oa.$$.fragment,e),y(ne.$$.fragment,e),y(ra.$$.fragment,e),y(ia.$$.fragment,e),y(la.$$.fragment,e),y(da.$$.fragment,e),y(re.$$.fragment,e),y(ga.$$.fragment,e),y(ua.$$.fragment,e),y(fa.$$.fragment,e),y(ka.$$.fragment,e),y(le.$$.fragment,e),y(wa.$$.fragment,e),y(za.$$.fragment,e),y(xa.$$.fragment,e),y(qa.$$.fragment,e),y(pe.$$.fragment,e),y(La.$$.fragment,e),y(Fa.$$.fragment,e),y(Pa.$$.fragment,e),y(Ca.$$.fragment,e),y(Ha.$$.fragment,e),y(he.$$.fragment,e),y(Oa.$$.fragment,e),Ur=!1},d(e){a(d),e&&a(M),e&&a(g),b(w),e&&a(Yn),e&&a(ws),e&&a(Zn),e&&a(zs),b(ke),e&&a(so),e&&a(Xs),e&&a(eo),e&&a(Qa),e&&a(ao),e&&a(Ua),e&&a(to),e&&a(is),e&&a(no),e&&a(Te),e&&a(oo),e&&a(Ka),e&&a(ro),e&&a(xs),b(Re),e&&a(io),e&&a($),e&&a(bo),e&&a(Ts),e&&a(xo),e&&a(Ee),e&&a(Mo),e&&a(qe),e&&a($o),e&&a(Le),e&&a(qo),e&&a(F),e&&a(Oo),e&&a(ms),e&&a(Ko),e&&a(E),e&&a(Jo),e&&a(Ms),b(Fe),e&&a(Yo),e&&a(Z),e&&a(Zo),e&&a(j),e&&a(sr),e&&a(ps),e&&a(er),e&&a(I),e&&a(lr),e&&a(cs),e&&a(mr),e&&a(ss),e&&a(fr),e&&a(Rs),b(Se),e&&a(_r),e&&a(es),e&&a(vr),e&&a(as),e&&a(Tr),e&&a($s),b(je),e&&a(Mr),e&&a(hs),e&&a(Rr),e&&a(Zs),e&&a($r),b(Ne,e),e&&a(Er),e&&a(Es),b(De),e&&a(qr),e&&a(N),b(He),b(Ie),e&&a(Lr),e&&a(Ls),b(We),e&&a(Fr),e&&a(J),b(Qe),e&&a(Pr),e&&a(Fs),b(Xe),e&&a(Cr),e&&a(rs),b(Ve),e&&a(Ar),e&&a(Cs),b(Ye),e&&a(Sr),e&&a(D),b(Ze),b(oa),b(ne),b(ra),e&&a(jr),e&&a(Ss),b(ia),e&&a(Nr),e&&a(H),b(la),b(da),b(re),b(ga),e&&a(Dr),e&&a(Ds),b(ua),e&&a(Hr),e&&a(O),b(fa),b(ka),b(le),b(wa),e&&a(Or),e&&a(Is),b(za),e&&a(Ir),e&&a(A),b(xa),b(qa),b(pe),b(La),b(Fa),e&&a(Wr),e&&a(Qs),b(Pa),e&&a(Qr),e&&a(S),b(Ca),b(Ha),b(he),b(Oa)}}}const Ju={local:"reformer",sections:[{local:"overview",title:"Overview"},{local:"axial-positional-encodings",title:"Axial Positional Encodings"},{local:"lsh-self-attention",title:"LSH Self Attention"},{local:"local-self-attention",title:"Local Self Attention"},{local:"training",title:"Training"},{local:"transformers.ReformerConfig",title:"ReformerConfig"},{local:"transformers.ReformerTokenizer",title:"ReformerTokenizer"},{local:"transformers.ReformerTokenizerFast",title:"ReformerTokenizerFast"},{local:"transformers.ReformerModel",title:"ReformerModel"},{local:"transformers.ReformerModelWithLMHead",title:"ReformerModelWithLMHead"},{local:"transformers.ReformerForMaskedLM",title:"ReformerForMaskedLM"},{local:"transformers.ReformerForSequenceClassification",title:"ReformerForSequenceClassification"},{local:"transformers.ReformerForQuestionAnswering",title:"ReformerForQuestionAnswering"}],title:"Reformer"};function Yu(G,d,M){let{fw:g}=d;return G.$$set=z=>{"fw"in z&&M(0,g=z.fw)},[g]}class of extends Ou{constructor(d){super();Iu(this,d,Yu,Gu,Wu,{fw:0})}}export{of as default,Ju as metadata};
9,949
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/gpt_neo.mdx-cdea7b7d.js
import{S as Ql,i as Yl,s as Zl,e as n,k as d,w as _,t as r,L as ed,c as s,d as o,m as c,a,x as v,h as i,b as l,J as e,g as u,y as T,q as y,o as P,B as k}from"../../chunks/vendor-b1433968.js";import{T as rn}from"../../chunks/Tip-c3840994.js";import{D as V}from"../../chunks/Docstring-ff504c58.js";import{C as je}from"../../chunks/CodeBlock-a320dbd7.js";import{I as _e}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function td(O){let p,w,f,b,N;return{c(){p=n("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),N=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var m=a(p);w=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),N=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,p,m),e(p,w),e(p,f),e(f,b),e(p,N)},d(g){g&&o(p)}}}function od(O){let p,w,f,b,N;return{c(){p=n("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),N=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var m=a(p);w=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),N=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,p,m),e(p,w),e(p,f),e(f,b),e(p,N)},d(g){g&&o(p)}}}function nd(O){let p,w,f,b,N;return{c(){p=n("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),N=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var m=a(p);w=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),N=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,p,m),e(p,w),e(p,f),e(f,b),e(p,N)},d(g){g&&o(p)}}}function sd(O){let p,w,f,b,N;return{c(){p=n("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),N=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var m=a(p);w=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),N=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,p,m),e(p,w),e(p,f),e(f,b),e(p,N)},d(g){g&&o(p)}}}function ad(O){let p,w,f,b,N;return{c(){p=n("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),b=r("Module"),N=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var m=a(p);w=i(m,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(m,"CODE",{});var x=a(f);b=i(x,"Module"),x.forEach(o),N=i(m,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),m.forEach(o)},m(g,m){u(g,p,m),e(p,w),e(p,f),e(f,b),e(p,N)},d(g){g&&o(p)}}}function rd(O){let p,w,f,b,N,g,m,x,Jn,ln,oe,ve,ro,Ae,Rn,io,Xn,dn,J,Kn,Ie,Qn,Yn,Le,Zn,es,cn,Lt,ts,pn,Te,os,Se,ns,ss,hn,ne,ye,lo,De,as,co,rs,un,Pe,is,po,ls,ds,fn,Be,mn,se,ke,ho,Oe,cs,uo,ps,gn,E,We,hs,ae,us,St,fs,ms,He,gs,_s,vs,re,Ts,Dt,ys,Ps,Bt,ks,bs,ws,fo,Ns,xs,Ue,_n,ie,be,mo,Ve,$s,go,Gs,vn,z,Je,Ms,_o,Fs,Es,Re,zs,Ot,Cs,qs,js,Xe,As,Ke,Is,Ls,Ss,q,Qe,Ds,le,Bs,Wt,Os,Ws,vo,Hs,Us,Vs,we,Js,To,Rs,Xs,Ye,Tn,de,Ne,yo,Ze,Ks,Po,Qs,yn,C,et,Ys,ko,Zs,ea,tt,ta,Ht,oa,na,sa,ot,aa,nt,ra,ia,la,j,st,da,ce,ca,Ut,pa,ha,bo,ua,fa,ma,xe,ga,wo,_a,va,at,Pn,pe,$e,No,rt,Ta,xo,ya,kn,$,it,Pa,$o,ka,ba,Vt,Jt,wa,Na,xa,W,$a,Go,Ga,Ma,Mo,Fa,Ea,Fo,za,Ca,Eo,qa,ja,Aa,lt,Ia,Rt,La,Sa,Da,dt,Ba,ct,Oa,Wa,Ha,F,pt,Ua,he,Va,Xt,Ja,Ra,zo,Xa,Ka,Qa,Ge,Ya,Co,Za,er,ht,tr,qo,or,nr,ut,bn,ue,Me,jo,ft,sr,Ao,ar,wn,G,mt,rr,Io,ir,lr,gt,dr,Kt,cr,pr,hr,_t,ur,vt,fr,mr,gr,Lo,_r,vr,H,So,Tt,Tr,yr,Do,yt,Pr,kr,Bo,Pt,br,wr,Oo,kt,Nr,xr,A,bt,$r,fe,Gr,Wo,Mr,Fr,Ho,Er,zr,Cr,Fe,qr,Uo,jr,Ar,wt,Nn,me,Ee,Vo,Nt,Ir,Jo,Lr,xn,M,xt,Sr,Ro,Dr,Br,$t,Or,Qt,Wr,Hr,Ur,Gt,Vr,Mt,Jr,Rr,Xr,Xo,Kr,Qr,U,Ko,Ft,Yr,Zr,Qo,Et,ei,ti,Yo,zt,oi,ni,Zo,Ct,si,ai,I,qt,ri,ge,ii,en,li,di,tn,ci,pi,hi,ze,ui,on,fi,mi,jt,$n;return g=new _e({}),Ae=new _e({}),De=new _e({}),Be=new je({props:{code:`from transformers import GPTNeoForCausalLM, GPT2Tokenizer model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \\ "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \\ "researchers was the fact that the unicorns spoke perfect English." input_ids = tokenizer(prompt, return_tensors="pt").input_ids gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,) gen_text = tokenizer.batch_decode(gen_tokens)[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTNeoForCausalLM, GPT2Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTNeoForCausalLM.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-neo-1.3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;EleutherAI/gpt-neo-1.3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In a shocking finding, scientists discovered a herd of unicorns living in a remote, &quot;</span> \\ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;previously unexplored valley, in the Andes Mountains. Even more surprising to the &quot;</span> \\ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;researchers was the fact that the unicorns spoke perfect English.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>gen_tokens = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, temperature=<span class="hljs-number">0.9</span>, max_length=<span class="hljs-number">100</span>,) <span class="hljs-meta">&gt;&gt;&gt; </span>gen_text = tokenizer.batch_decode(gen_tokens)[<span class="hljs-number">0</span>]`}}),Oe=new _e({}),We=new V({props:{name:"class transformers.GPTNeoConfig",anchor:"transformers.GPTNeoConfig",parameters:[{name:"vocab_size",val:" = 50257"},{name:"max_position_embeddings",val:" = 2048"},{name:"hidden_size",val:" = 2048"},{name:"num_layers",val:" = 24"},{name:"attention_types",val:" = [[['global', 'local'], 12]]"},{name:"num_heads",val:" = 16"},{name:"intermediate_size",val:" = None"},{name:"window_size",val:" = 256"},{name:"activation_function",val:" = 'gelu_new'"},{name:"resid_dropout",val:" = 0.0"},{name:"embed_dropout",val:" = 0.0"},{name:"attention_dropout",val:" = 0.0"},{name:"layer_norm_epsilon",val:" = 1e-05"},{name:"initializer_range",val:" = 0.02"},{name:"summary_type",val:" = 'cls_index'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = None"},{name:"summary_proj_to_labels",val:" = True"},{name:"summary_first_dropout",val:" = 0.1"},{name:"use_cache",val:" = True"},{name:"bos_token_id",val:" = 50256"},{name:"eos_token_id",val:" = 50256"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/configuration_gpt_neo.py#L34",parametersDescription:[{anchor:"transformers.GPTNeoConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50257) &#x2014; Vocabulary size of the GPT Neo model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel">GPTNeoModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel">GPTNeoModel</a>.`,name:"vocab_size"},{anchor:"transformers.GPTNeoConfig.attention_types",description:`<strong>attention_types</strong> (<code>List</code>, <em>optional</em>, defaults to <code>[[[&quot;global&quot;, &quot;local&quot;], 12]]</code>) &#x2014; The type of attention for each layer in a <code>List</code> of the following format <code>[[[&quot;attention_type&quot;], num_layerss]]</code> e.g. for a 24 layer model <code>[[[&quot;global&quot;], 24]]</code> or <code>[[[&quot;global&quot;, &quot;local&quot;], 12]]</code> Choose the value of <code>attention_type</code> from <code>[&quot;global&quot;, &quot;local&quot;]</code>`,name:"attention_types"},{anchor:"transformers.GPTNeoConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.GPTNeoConfig.num_layers",description:`<strong>num_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_layers"},{anchor:"transformers.GPTNeoConfig.num_heads",description:`<strong>num_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_heads"},{anchor:"transformers.GPTNeoConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8192) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.GPTNeoConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.GPTNeoConfig.embed_dropout",description:`<strong>embed_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"embed_dropout"},{anchor:"transformers.GPTNeoConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.GPTNeoConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.GPTNeoConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel">GPTNeoModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.GPTNeoConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.GPTNeoConfig.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_epsilon"},{anchor:"transformers.GPTNeoConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"}]}}),Ue=new je({props:{code:`from transformers import GPTNeoModel, GPTNeoConfig # Initializing a GPTNeo EleutherAI/gpt-neo-1.3B style configuration configuration = GPTNeoConfig() # Initializing a model from the EleutherAI/gpt-neo-1.3B style configuration model = GPTNeoModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPTNeoModel, GPTNeoConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a GPTNeo EleutherAI/gpt-neo-1.3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = GPTNeoConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the EleutherAI/gpt-neo-1.3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTNeoModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ve=new _e({}),Je=new V({props:{name:"class transformers.GPTNeoModel",anchor:"transformers.GPTNeoModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_gpt_neo.py#L476",parametersDescription:[{anchor:"transformers.GPTNeoModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qe=new V({props:{name:"forward",anchor:"transformers.GPTNeoModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_gpt_neo.py#L497",parametersDescription:[{anchor:"transformers.GPTNeoModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <code>GPTNeoTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTNeoModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.num_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPTNeoModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTNeoModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTNeoModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTNeoModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTNeoModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPTNeoModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPTNeoModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTNeoModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTNeoModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig" >GPTNeoConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),we=new rn({props:{$$slots:{default:[td]},$$scope:{ctx:O}}}),Ye=new je({props:{code:`from transformers import GPT2Tokenizer, GPTNeoModel import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') model = GPTNeoModel.from_pretrained('EleutherAI/gpt-neo-1.3B') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTNeoModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTNeoModel.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ze=new _e({}),et=new V({props:{name:"class transformers.GPTNeoForCausalLM",anchor:"transformers.GPTNeoForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_gpt_neo.py#L664",parametersDescription:[{anchor:"transformers.GPTNeoForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),st=new V({props:{name:"forward",anchor:"transformers.GPTNeoForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_gpt_neo.py#L714",parametersDescription:[{anchor:"transformers.GPTNeoForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <code>GPTNeoTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTNeoForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.num_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPTNeoForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTNeoForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTNeoForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTNeoForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTNeoForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPTNeoForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPTNeoForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTNeoForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTNeoForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPTNeoForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig" >GPTNeoConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xe=new rn({props:{$$slots:{default:[od]},$$scope:{ctx:O}}}),at=new je({props:{code:`import torch from transformers import GPT2Tokenizer, GPTNeoForCausalLM tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') model = GPTNeoForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTNeoForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTNeoForCausalLM.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),rt=new _e({}),it=new V({props:{name:"class transformers.GPTNeoForSequenceClassification",anchor:"transformers.GPTNeoForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_gpt_neo.py#L817",parametersDescription:[{anchor:"transformers.GPTNeoForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),pt=new V({props:{name:"forward",anchor:"transformers.GPTNeoForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_gpt_neo.py#L829",parametersDescription:[{anchor:"transformers.GPTNeoForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <code>GPTNeoTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.num_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPTNeoForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig" >GPTNeoConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ge=new rn({props:{$$slots:{default:[nd]},$$scope:{ctx:O}}}),ht=new je({props:{code:`from transformers import GPT2Tokenizer, GPTNeoForSequenceClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') model = GPTNeoForSequenceClassification.from_pretrained('EleutherAI/gpt-neo-1.3B') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTNeoForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTNeoForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ut=new je({props:{code:`from transformers import GPT2Tokenizer, GPTNeoForSequenceClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') model = GPTNeoForSequenceClassification.from_pretrained('EleutherAI/gpt-neo-1.3B', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPTNeoForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPTNeoForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ft=new _e({}),mt=new V({props:{name:"class transformers.FlaxGPTNeoModel",anchor:"transformers.FlaxGPTNeoModel",parameters:[{name:"config",val:": GPTNeoConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py#L581",parametersDescription:[{anchor:"transformers.FlaxGPTNeoModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxGPTNeoModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),bt=new V({props:{name:"__call__",anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py#L391",parametersDescription:[{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code>. Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTNeoTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig" >GPTNeoConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fe=new rn({props:{$$slots:{default:[sd]},$$scope:{ctx:O}}}),wt=new je({props:{code:`from transformers import GPT2Tokenizer, FlaxGPTNeoModel tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') model = FlaxGPTNeoModel.from_pretrained('EleutherAI/gpt-neo-1.3B') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, FlaxGPTNeoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxGPTNeoModel.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Nt=new _e({}),xt=new V({props:{name:"class transformers.FlaxGPTNeoForCausalLM",anchor:"transformers.FlaxGPTNeoForCausalLM",parameters:[{name:"config",val:": GPTNeoConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py#L646",parametersDescription:[{anchor:"transformers.FlaxGPTNeoForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig">GPTNeoConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxGPTNeoForCausalLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),qt=new V({props:{name:"__call__",anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py#L391",parametersDescription:[{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code>. Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>GPTNeoTokenizer</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxGPTNeoPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoConfig" >GPTNeoConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ze=new rn({props:{$$slots:{default:[ad]},$$scope:{ctx:O}}}),jt=new je({props:{code:`from transformers import GPT2Tokenizer, FlaxGPTNeoForCausalLM tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') model = FlaxGPTNeoForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) # retrieve logts for next token next_token_logits = outputs.logits[:, -1],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, FlaxGPTNeoForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxGPTNeoForCausalLM.from_pretrained(<span class="hljs-string">&#x27;EleutherAI/gpt-neo-1.3B&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve logts for next token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs.logits[:, -<span class="hljs-number">1</span>]`}}),{c(){p=n("meta"),w=d(),f=n("h1"),b=n("a"),N=n("span"),_(g.$$.fragment),m=d(),x=n("span"),Jn=r("GPT Neo"),ln=d(),oe=n("h2"),ve=n("a"),ro=n("span"),_(Ae.$$.fragment),Rn=d(),io=n("span"),Xn=r("Overview"),dn=d(),J=n("p"),Kn=r("The GPTNeo model was released in the "),Ie=n("a"),Qn=r("EleutherAI/gpt-neo"),Yn=r(` repository by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. It is a GPT2 like causal language model trained on the `),Le=n("a"),Zn=r("Pile"),es=r(" dataset."),cn=d(),Lt=n("p"),ts=r(`The architecture is similar to GPT2 except that GPT Neo uses local attention in every other layer with a window size of 256 tokens.`),pn=d(),Te=n("p"),os=r("This model was contributed by "),Se=n("a"),ns=r("valhalla"),ss=r("."),hn=d(),ne=n("h3"),ye=n("a"),lo=n("span"),_(De.$$.fragment),as=d(),co=n("span"),rs=r("Generation"),un=d(),Pe=n("p"),is=r("The "),po=n("code"),ls=r("generate()"),ds=r(" method can be used to generate text using GPT Neo model."),fn=d(),_(Be.$$.fragment),mn=d(),se=n("h2"),ke=n("a"),ho=n("span"),_(Oe.$$.fragment),cs=d(),uo=n("span"),ps=r("GPTNeoConfig"),gn=d(),E=n("div"),_(We.$$.fragment),hs=d(),ae=n("p"),us=r("This is the configuration class to store the configuration of a "),St=n("a"),fs=r("GPTNeoModel"),ms=r(`. It is used to instantiate a GPT Neo model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeo `),He=n("a"),gs=r("gpt-neo-1.3B"),_s=r(" architecture."),vs=d(),re=n("p"),Ts=r("Configuration objects inherit from "),Dt=n("a"),ys=r("PretrainedConfig"),Ps=r(` and can be used to control the model outputs. Read the documentation from `),Bt=n("a"),ks=r("PretrainedConfig"),bs=r(" for more information."),ws=d(),fo=n("p"),Ns=r("Example:"),xs=d(),_(Ue.$$.fragment),_n=d(),ie=n("h2"),be=n("a"),mo=n("span"),_(Ve.$$.fragment),$s=d(),go=n("span"),Gs=r("GPTNeoModel"),vn=d(),z=n("div"),_(Je.$$.fragment),Ms=d(),_o=n("p"),Fs=r("The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top."),Es=d(),Re=n("p"),zs=r("This model inherits from "),Ot=n("a"),Cs=r("PreTrainedModel"),qs=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),js=d(),Xe=n("p"),As=r("This model is also a PyTorch "),Ke=n("a"),Is=r("torch.nn.Module"),Ls=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ss=d(),q=n("div"),_(Qe.$$.fragment),Ds=d(),le=n("p"),Bs=r("The "),Wt=n("a"),Os=r("GPTNeoModel"),Ws=r(" forward method, overrides the "),vo=n("code"),Hs=r("__call__"),Us=r(" special method."),Vs=d(),_(we.$$.fragment),Js=d(),To=n("p"),Rs=r("Example:"),Xs=d(),_(Ye.$$.fragment),Tn=d(),de=n("h2"),Ne=n("a"),yo=n("span"),_(Ze.$$.fragment),Ks=d(),Po=n("span"),Qs=r("GPTNeoForCausalLM"),yn=d(),C=n("div"),_(et.$$.fragment),Ys=d(),ko=n("p"),Zs=r(`The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),ea=d(),tt=n("p"),ta=r("This model inherits from "),Ht=n("a"),oa=r("PreTrainedModel"),na=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sa=d(),ot=n("p"),aa=r("This model is also a PyTorch "),nt=n("a"),ra=r("torch.nn.Module"),ia=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),la=d(),j=n("div"),_(st.$$.fragment),da=d(),ce=n("p"),ca=r("The "),Ut=n("a"),pa=r("GPTNeoForCausalLM"),ha=r(" forward method, overrides the "),bo=n("code"),ua=r("__call__"),fa=r(" special method."),ma=d(),_(xe.$$.fragment),ga=d(),wo=n("p"),_a=r("Example:"),va=d(),_(at.$$.fragment),Pn=d(),pe=n("h2"),$e=n("a"),No=n("span"),_(rt.$$.fragment),Ta=d(),xo=n("span"),ya=r("GPTNeoForSequenceClassification"),kn=d(),$=n("div"),_(it.$$.fragment),Pa=d(),$o=n("p"),ka=r("The GPTNeo Model transformer with a sequence classification head on top (linear layer)."),ba=d(),Vt=n("p"),Jt=n("a"),wa=r("GPTNeoForSequenceClassification"),Na=r(` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),xa=d(),W=n("p"),$a=r(`Since it does classification on the last token, it requires to know the position of the last token. If a `),Go=n("code"),Ga=r("pad_token_id"),Ma=r(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Mo=n("code"),Fa=r("pad_token_id"),Ea=r(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Fo=n("code"),za=r("inputs_embeds"),Ca=r(" are passed instead of "),Eo=n("code"),qa=r("input_ids"),ja=r(`, it does the same (take the last value in each row of the batch).`),Aa=d(),lt=n("p"),Ia=r("This model inherits from "),Rt=n("a"),La=r("PreTrainedModel"),Sa=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Da=d(),dt=n("p"),Ba=r("This model is also a PyTorch "),ct=n("a"),Oa=r("torch.nn.Module"),Wa=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ha=d(),F=n("div"),_(pt.$$.fragment),Ua=d(),he=n("p"),Va=r("The "),Xt=n("a"),Ja=r("GPTNeoForSequenceClassification"),Ra=r(" forward method, overrides the "),zo=n("code"),Xa=r("__call__"),Ka=r(" special method."),Qa=d(),_(Ge.$$.fragment),Ya=d(),Co=n("p"),Za=r("Example of single-label classification:"),er=d(),_(ht.$$.fragment),tr=d(),qo=n("p"),or=r("Example of multi-label classification:"),nr=d(),_(ut.$$.fragment),bn=d(),ue=n("h2"),Me=n("a"),jo=n("span"),_(ft.$$.fragment),sr=d(),Ao=n("span"),ar=r("FlaxGPTNeoModel"),wn=d(),G=n("div"),_(mt.$$.fragment),rr=d(),Io=n("p"),ir=r("The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top."),lr=d(),gt=n("p"),dr=r("This model inherits from "),Kt=n("a"),cr=r("FlaxPreTrainedModel"),pr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hr=d(),_t=n("p"),ur=r("This model is also a Flax Linen "),vt=n("a"),fr=r("flax.nn.Module"),mr=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),gr=d(),Lo=n("p"),_r=r("Finally, this model supports inherent JAX features such as:"),vr=d(),H=n("ul"),So=n("li"),Tt=n("a"),Tr=r("Just-In-Time (JIT) compilation"),yr=d(),Do=n("li"),yt=n("a"),Pr=r("Automatic Differentiation"),kr=d(),Bo=n("li"),Pt=n("a"),br=r("Vectorization"),wr=d(),Oo=n("li"),kt=n("a"),Nr=r("Parallelization"),xr=d(),A=n("div"),_(bt.$$.fragment),$r=d(),fe=n("p"),Gr=r("The "),Wo=n("code"),Mr=r("FlaxGPTNeoPreTrainedModel"),Fr=r(" forward method, overrides the "),Ho=n("code"),Er=r("__call__"),zr=r(" special method."),Cr=d(),_(Fe.$$.fragment),qr=d(),Uo=n("p"),jr=r("Example:"),Ar=d(),_(wt.$$.fragment),Nn=d(),me=n("h2"),Ee=n("a"),Vo=n("span"),_(Nt.$$.fragment),Ir=d(),Jo=n("span"),Lr=r("FlaxGPTNeoForCausalLM"),xn=d(),M=n("div"),_(xt.$$.fragment),Sr=d(),Ro=n("p"),Dr=r(`The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Br=d(),$t=n("p"),Or=r("This model inherits from "),Qt=n("a"),Wr=r("FlaxPreTrainedModel"),Hr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ur=d(),Gt=n("p"),Vr=r("This model is also a Flax Linen "),Mt=n("a"),Jr=r("flax.nn.Module"),Rr=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Xr=d(),Xo=n("p"),Kr=r("Finally, this model supports inherent JAX features such as:"),Qr=d(),U=n("ul"),Ko=n("li"),Ft=n("a"),Yr=r("Just-In-Time (JIT) compilation"),Zr=d(),Qo=n("li"),Et=n("a"),ei=r("Automatic Differentiation"),ti=d(),Yo=n("li"),zt=n("a"),oi=r("Vectorization"),ni=d(),Zo=n("li"),Ct=n("a"),si=r("Parallelization"),ai=d(),I=n("div"),_(qt.$$.fragment),ri=d(),ge=n("p"),ii=r("The "),en=n("code"),li=r("FlaxGPTNeoPreTrainedModel"),di=r(" forward method, overrides the "),tn=n("code"),ci=r("__call__"),pi=r(" special method."),hi=d(),_(ze.$$.fragment),ui=d(),on=n("p"),fi=r("Example:"),mi=d(),_(jt.$$.fragment),this.h()},l(t){const h=ed('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(o),w=c(t),f=s(t,"H1",{class:!0});var At=a(f);b=s(At,"A",{id:!0,class:!0,href:!0});var nn=a(b);N=s(nn,"SPAN",{});var sn=a(N);v(g.$$.fragment,sn),sn.forEach(o),nn.forEach(o),m=c(At),x=s(At,"SPAN",{});var an=a(x);Jn=i(an,"GPT Neo"),an.forEach(o),At.forEach(o),ln=c(t),oe=s(t,"H2",{class:!0});var It=a(oe);ve=s(It,"A",{id:!0,class:!0,href:!0});var _i=a(ve);ro=s(_i,"SPAN",{});var vi=a(ro);v(Ae.$$.fragment,vi),vi.forEach(o),_i.forEach(o),Rn=c(It),io=s(It,"SPAN",{});var Ti=a(io);Xn=i(Ti,"Overview"),Ti.forEach(o),It.forEach(o),dn=c(t),J=s(t,"P",{});var Yt=a(J);Kn=i(Yt,"The GPTNeo model was released in the "),Ie=s(Yt,"A",{href:!0,rel:!0});var yi=a(Ie);Qn=i(yi,"EleutherAI/gpt-neo"),yi.forEach(o),Yn=i(Yt,` repository by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. It is a GPT2 like causal language model trained on the `),Le=s(Yt,"A",{href:!0,rel:!0});var Pi=a(Le);Zn=i(Pi,"Pile"),Pi.forEach(o),es=i(Yt," dataset."),Yt.forEach(o),cn=c(t),Lt=s(t,"P",{});var ki=a(Lt);ts=i(ki,`The architecture is similar to GPT2 except that GPT Neo uses local attention in every other layer with a window size of 256 tokens.`),ki.forEach(o),pn=c(t),Te=s(t,"P",{});var Gn=a(Te);os=i(Gn,"This model was contributed by "),Se=s(Gn,"A",{href:!0,rel:!0});var bi=a(Se);ns=i(bi,"valhalla"),bi.forEach(o),ss=i(Gn,"."),Gn.forEach(o),hn=c(t),ne=s(t,"H3",{class:!0});var Mn=a(ne);ye=s(Mn,"A",{id:!0,class:!0,href:!0});var wi=a(ye);lo=s(wi,"SPAN",{});var Ni=a(lo);v(De.$$.fragment,Ni),Ni.forEach(o),wi.forEach(o),as=c(Mn),co=s(Mn,"SPAN",{});var xi=a(co);rs=i(xi,"Generation"),xi.forEach(o),Mn.forEach(o),un=c(t),Pe=s(t,"P",{});var Fn=a(Pe);is=i(Fn,"The "),po=s(Fn,"CODE",{});var $i=a(po);ls=i($i,"generate()"),$i.forEach(o),ds=i(Fn," method can be used to generate text using GPT Neo model."),Fn.forEach(o),fn=c(t),v(Be.$$.fragment,t),mn=c(t),se=s(t,"H2",{class:!0});var En=a(se);ke=s(En,"A",{id:!0,class:!0,href:!0});var Gi=a(ke);ho=s(Gi,"SPAN",{});var Mi=a(ho);v(Oe.$$.fragment,Mi),Mi.forEach(o),Gi.forEach(o),cs=c(En),uo=s(En,"SPAN",{});var Fi=a(uo);ps=i(Fi,"GPTNeoConfig"),Fi.forEach(o),En.forEach(o),gn=c(t),E=s(t,"DIV",{class:!0});var R=a(E);v(We.$$.fragment,R),hs=c(R),ae=s(R,"P",{});var Zt=a(ae);us=i(Zt,"This is the configuration class to store the configuration of a "),St=s(Zt,"A",{href:!0});var Ei=a(St);fs=i(Ei,"GPTNeoModel"),Ei.forEach(o),ms=i(Zt,`. It is used to instantiate a GPT Neo model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeo `),He=s(Zt,"A",{href:!0,rel:!0});var zi=a(He);gs=i(zi,"gpt-neo-1.3B"),zi.forEach(o),_s=i(Zt," architecture."),Zt.forEach(o),vs=c(R),re=s(R,"P",{});var eo=a(re);Ts=i(eo,"Configuration objects inherit from "),Dt=s(eo,"A",{href:!0});var Ci=a(Dt);ys=i(Ci,"PretrainedConfig"),Ci.forEach(o),Ps=i(eo,` and can be used to control the model outputs. Read the documentation from `),Bt=s(eo,"A",{href:!0});var qi=a(Bt);ks=i(qi,"PretrainedConfig"),qi.forEach(o),bs=i(eo," for more information."),eo.forEach(o),ws=c(R),fo=s(R,"P",{});var ji=a(fo);Ns=i(ji,"Example:"),ji.forEach(o),xs=c(R),v(Ue.$$.fragment,R),R.forEach(o),_n=c(t),ie=s(t,"H2",{class:!0});var zn=a(ie);be=s(zn,"A",{id:!0,class:!0,href:!0});var Ai=a(be);mo=s(Ai,"SPAN",{});var Ii=a(mo);v(Ve.$$.fragment,Ii),Ii.forEach(o),Ai.forEach(o),$s=c(zn),go=s(zn,"SPAN",{});var Li=a(go);Gs=i(Li,"GPTNeoModel"),Li.forEach(o),zn.forEach(o),vn=c(t),z=s(t,"DIV",{class:!0});var X=a(z);v(Je.$$.fragment,X),Ms=c(X),_o=s(X,"P",{});var Si=a(_o);Fs=i(Si,"The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top."),Si.forEach(o),Es=c(X),Re=s(X,"P",{});var Cn=a(Re);zs=i(Cn,"This model inherits from "),Ot=s(Cn,"A",{href:!0});var Di=a(Ot);Cs=i(Di,"PreTrainedModel"),Di.forEach(o),qs=i(Cn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cn.forEach(o),js=c(X),Xe=s(X,"P",{});var qn=a(Xe);As=i(qn,"This model is also a PyTorch "),Ke=s(qn,"A",{href:!0,rel:!0});var Bi=a(Ke);Is=i(Bi,"torch.nn.Module"),Bi.forEach(o),Ls=i(qn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qn.forEach(o),Ss=c(X),q=s(X,"DIV",{class:!0});var K=a(q);v(Qe.$$.fragment,K),Ds=c(K),le=s(K,"P",{});var to=a(le);Bs=i(to,"The "),Wt=s(to,"A",{href:!0});var Oi=a(Wt);Os=i(Oi,"GPTNeoModel"),Oi.forEach(o),Ws=i(to," forward method, overrides the "),vo=s(to,"CODE",{});var Wi=a(vo);Hs=i(Wi,"__call__"),Wi.forEach(o),Us=i(to," special method."),to.forEach(o),Vs=c(K),v(we.$$.fragment,K),Js=c(K),To=s(K,"P",{});var Hi=a(To);Rs=i(Hi,"Example:"),Hi.forEach(o),Xs=c(K),v(Ye.$$.fragment,K),K.forEach(o),X.forEach(o),Tn=c(t),de=s(t,"H2",{class:!0});var jn=a(de);Ne=s(jn,"A",{id:!0,class:!0,href:!0});var Ui=a(Ne);yo=s(Ui,"SPAN",{});var Vi=a(yo);v(Ze.$$.fragment,Vi),Vi.forEach(o),Ui.forEach(o),Ks=c(jn),Po=s(jn,"SPAN",{});var Ji=a(Po);Qs=i(Ji,"GPTNeoForCausalLM"),Ji.forEach(o),jn.forEach(o),yn=c(t),C=s(t,"DIV",{class:!0});var Q=a(C);v(et.$$.fragment,Q),Ys=c(Q),ko=s(Q,"P",{});var Ri=a(ko);Zs=i(Ri,`The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Ri.forEach(o),ea=c(Q),tt=s(Q,"P",{});var An=a(tt);ta=i(An,"This model inherits from "),Ht=s(An,"A",{href:!0});var Xi=a(Ht);oa=i(Xi,"PreTrainedModel"),Xi.forEach(o),na=i(An,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),An.forEach(o),sa=c(Q),ot=s(Q,"P",{});var In=a(ot);aa=i(In,"This model is also a PyTorch "),nt=s(In,"A",{href:!0,rel:!0});var Ki=a(nt);ra=i(Ki,"torch.nn.Module"),Ki.forEach(o),ia=i(In,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),In.forEach(o),la=c(Q),j=s(Q,"DIV",{class:!0});var Y=a(j);v(st.$$.fragment,Y),da=c(Y),ce=s(Y,"P",{});var oo=a(ce);ca=i(oo,"The "),Ut=s(oo,"A",{href:!0});var Qi=a(Ut);pa=i(Qi,"GPTNeoForCausalLM"),Qi.forEach(o),ha=i(oo," forward method, overrides the "),bo=s(oo,"CODE",{});var Yi=a(bo);ua=i(Yi,"__call__"),Yi.forEach(o),fa=i(oo," special method."),oo.forEach(o),ma=c(Y),v(xe.$$.fragment,Y),ga=c(Y),wo=s(Y,"P",{});var Zi=a(wo);_a=i(Zi,"Example:"),Zi.forEach(o),va=c(Y),v(at.$$.fragment,Y),Y.forEach(o),Q.forEach(o),Pn=c(t),pe=s(t,"H2",{class:!0});var Ln=a(pe);$e=s(Ln,"A",{id:!0,class:!0,href:!0});var el=a($e);No=s(el,"SPAN",{});var tl=a(No);v(rt.$$.fragment,tl),tl.forEach(o),el.forEach(o),Ta=c(Ln),xo=s(Ln,"SPAN",{});var ol=a(xo);ya=i(ol,"GPTNeoForSequenceClassification"),ol.forEach(o),Ln.forEach(o),kn=c(t),$=s(t,"DIV",{class:!0});var L=a($);v(it.$$.fragment,L),Pa=c(L),$o=s(L,"P",{});var nl=a($o);ka=i(nl,"The GPTNeo Model transformer with a sequence classification head on top (linear layer)."),nl.forEach(o),ba=c(L),Vt=s(L,"P",{});var gi=a(Vt);Jt=s(gi,"A",{href:!0});var sl=a(Jt);wa=i(sl,"GPTNeoForSequenceClassification"),sl.forEach(o),Na=i(gi,` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),gi.forEach(o),xa=c(L),W=s(L,"P",{});var Z=a(W);$a=i(Z,`Since it does classification on the last token, it requires to know the position of the last token. If a `),Go=s(Z,"CODE",{});var al=a(Go);Ga=i(al,"pad_token_id"),al.forEach(o),Ma=i(Z,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Mo=s(Z,"CODE",{});var rl=a(Mo);Fa=i(rl,"pad_token_id"),rl.forEach(o),Ea=i(Z,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Fo=s(Z,"CODE",{});var il=a(Fo);za=i(il,"inputs_embeds"),il.forEach(o),Ca=i(Z," are passed instead of "),Eo=s(Z,"CODE",{});var ll=a(Eo);qa=i(ll,"input_ids"),ll.forEach(o),ja=i(Z,`, it does the same (take the last value in each row of the batch).`),Z.forEach(o),Aa=c(L),lt=s(L,"P",{});var Sn=a(lt);Ia=i(Sn,"This model inherits from "),Rt=s(Sn,"A",{href:!0});var dl=a(Rt);La=i(dl,"PreTrainedModel"),dl.forEach(o),Sa=i(Sn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sn.forEach(o),Da=c(L),dt=s(L,"P",{});var Dn=a(dt);Ba=i(Dn,"This model is also a PyTorch "),ct=s(Dn,"A",{href:!0,rel:!0});var cl=a(ct);Oa=i(cl,"torch.nn.Module"),cl.forEach(o),Wa=i(Dn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dn.forEach(o),Ha=c(L),F=s(L,"DIV",{class:!0});var S=a(F);v(pt.$$.fragment,S),Ua=c(S),he=s(S,"P",{});var no=a(he);Va=i(no,"The "),Xt=s(no,"A",{href:!0});var pl=a(Xt);Ja=i(pl,"GPTNeoForSequenceClassification"),pl.forEach(o),Ra=i(no," forward method, overrides the "),zo=s(no,"CODE",{});var hl=a(zo);Xa=i(hl,"__call__"),hl.forEach(o),Ka=i(no," special method."),no.forEach(o),Qa=c(S),v(Ge.$$.fragment,S),Ya=c(S),Co=s(S,"P",{});var ul=a(Co);Za=i(ul,"Example of single-label classification:"),ul.forEach(o),er=c(S),v(ht.$$.fragment,S),tr=c(S),qo=s(S,"P",{});var fl=a(qo);or=i(fl,"Example of multi-label classification:"),fl.forEach(o),nr=c(S),v(ut.$$.fragment,S),S.forEach(o),L.forEach(o),bn=c(t),ue=s(t,"H2",{class:!0});var Bn=a(ue);Me=s(Bn,"A",{id:!0,class:!0,href:!0});var ml=a(Me);jo=s(ml,"SPAN",{});var gl=a(jo);v(ft.$$.fragment,gl),gl.forEach(o),ml.forEach(o),sr=c(Bn),Ao=s(Bn,"SPAN",{});var _l=a(Ao);ar=i(_l,"FlaxGPTNeoModel"),_l.forEach(o),Bn.forEach(o),wn=c(t),G=s(t,"DIV",{class:!0});var D=a(G);v(mt.$$.fragment,D),rr=c(D),Io=s(D,"P",{});var vl=a(Io);ir=i(vl,"The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top."),vl.forEach(o),lr=c(D),gt=s(D,"P",{});var On=a(gt);dr=i(On,"This model inherits from "),Kt=s(On,"A",{href:!0});var Tl=a(Kt);cr=i(Tl,"FlaxPreTrainedModel"),Tl.forEach(o),pr=i(On,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),On.forEach(o),hr=c(D),_t=s(D,"P",{});var Wn=a(_t);ur=i(Wn,"This model is also a Flax Linen "),vt=s(Wn,"A",{href:!0,rel:!0});var yl=a(vt);fr=i(yl,"flax.nn.Module"),yl.forEach(o),mr=i(Wn,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Wn.forEach(o),gr=c(D),Lo=s(D,"P",{});var Pl=a(Lo);_r=i(Pl,"Finally, this model supports inherent JAX features such as:"),Pl.forEach(o),vr=c(D),H=s(D,"UL",{});var Ce=a(H);So=s(Ce,"LI",{});var kl=a(So);Tt=s(kl,"A",{href:!0,rel:!0});var bl=a(Tt);Tr=i(bl,"Just-In-Time (JIT) compilation"),bl.forEach(o),kl.forEach(o),yr=c(Ce),Do=s(Ce,"LI",{});var wl=a(Do);yt=s(wl,"A",{href:!0,rel:!0});var Nl=a(yt);Pr=i(Nl,"Automatic Differentiation"),Nl.forEach(o),wl.forEach(o),kr=c(Ce),Bo=s(Ce,"LI",{});var xl=a(Bo);Pt=s(xl,"A",{href:!0,rel:!0});var $l=a(Pt);br=i($l,"Vectorization"),$l.forEach(o),xl.forEach(o),wr=c(Ce),Oo=s(Ce,"LI",{});var Gl=a(Oo);kt=s(Gl,"A",{href:!0,rel:!0});var Ml=a(kt);Nr=i(Ml,"Parallelization"),Ml.forEach(o),Gl.forEach(o),Ce.forEach(o),xr=c(D),A=s(D,"DIV",{class:!0});var ee=a(A);v(bt.$$.fragment,ee),$r=c(ee),fe=s(ee,"P",{});var so=a(fe);Gr=i(so,"The "),Wo=s(so,"CODE",{});var Fl=a(Wo);Mr=i(Fl,"FlaxGPTNeoPreTrainedModel"),Fl.forEach(o),Fr=i(so," forward method, overrides the "),Ho=s(so,"CODE",{});var El=a(Ho);Er=i(El,"__call__"),El.forEach(o),zr=i(so," special method."),so.forEach(o),Cr=c(ee),v(Fe.$$.fragment,ee),qr=c(ee),Uo=s(ee,"P",{});var zl=a(Uo);jr=i(zl,"Example:"),zl.forEach(o),Ar=c(ee),v(wt.$$.fragment,ee),ee.forEach(o),D.forEach(o),Nn=c(t),me=s(t,"H2",{class:!0});var Hn=a(me);Ee=s(Hn,"A",{id:!0,class:!0,href:!0});var Cl=a(Ee);Vo=s(Cl,"SPAN",{});var ql=a(Vo);v(Nt.$$.fragment,ql),ql.forEach(o),Cl.forEach(o),Ir=c(Hn),Jo=s(Hn,"SPAN",{});var jl=a(Jo);Lr=i(jl,"FlaxGPTNeoForCausalLM"),jl.forEach(o),Hn.forEach(o),xn=c(t),M=s(t,"DIV",{class:!0});var B=a(M);v(xt.$$.fragment,B),Sr=c(B),Ro=s(B,"P",{});var Al=a(Ro);Dr=i(Al,`The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Al.forEach(o),Br=c(B),$t=s(B,"P",{});var Un=a($t);Or=i(Un,"This model inherits from "),Qt=s(Un,"A",{href:!0});var Il=a(Qt);Wr=i(Il,"FlaxPreTrainedModel"),Il.forEach(o),Hr=i(Un,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Un.forEach(o),Ur=c(B),Gt=s(B,"P",{});var Vn=a(Gt);Vr=i(Vn,"This model is also a Flax Linen "),Mt=s(Vn,"A",{href:!0,rel:!0});var Ll=a(Mt);Jr=i(Ll,"flax.nn.Module"),Ll.forEach(o),Rr=i(Vn,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Vn.forEach(o),Xr=c(B),Xo=s(B,"P",{});var Sl=a(Xo);Kr=i(Sl,"Finally, this model supports inherent JAX features such as:"),Sl.forEach(o),Qr=c(B),U=s(B,"UL",{});var qe=a(U);Ko=s(qe,"LI",{});var Dl=a(Ko);Ft=s(Dl,"A",{href:!0,rel:!0});var Bl=a(Ft);Yr=i(Bl,"Just-In-Time (JIT) compilation"),Bl.forEach(o),Dl.forEach(o),Zr=c(qe),Qo=s(qe,"LI",{});var Ol=a(Qo);Et=s(Ol,"A",{href:!0,rel:!0});var Wl=a(Et);ei=i(Wl,"Automatic Differentiation"),Wl.forEach(o),Ol.forEach(o),ti=c(qe),Yo=s(qe,"LI",{});var Hl=a(Yo);zt=s(Hl,"A",{href:!0,rel:!0});var Ul=a(zt);oi=i(Ul,"Vectorization"),Ul.forEach(o),Hl.forEach(o),ni=c(qe),Zo=s(qe,"LI",{});var Vl=a(Zo);Ct=s(Vl,"A",{href:!0,rel:!0});var Jl=a(Ct);si=i(Jl,"Parallelization"),Jl.forEach(o),Vl.forEach(o),qe.forEach(o),ai=c(B),I=s(B,"DIV",{class:!0});var te=a(I);v(qt.$$.fragment,te),ri=c(te),ge=s(te,"P",{});var ao=a(ge);ii=i(ao,"The "),en=s(ao,"CODE",{});var Rl=a(en);li=i(Rl,"FlaxGPTNeoPreTrainedModel"),Rl.forEach(o),di=i(ao," forward method, overrides the "),tn=s(ao,"CODE",{});var Xl=a(tn);ci=i(Xl,"__call__"),Xl.forEach(o),pi=i(ao," special method."),ao.forEach(o),hi=c(te),v(ze.$$.fragment,te),ui=c(te),on=s(te,"P",{});var Kl=a(on);fi=i(Kl,"Example:"),Kl.forEach(o),mi=c(te),v(jt.$$.fragment,te),te.forEach(o),B.forEach(o),this.h()},h(){l(p,"name","hf:doc:metadata"),l(p,"content",JSON.stringify(id)),l(b,"id","gpt-neo"),l(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(b,"href","#gpt-neo"),l(f,"class","relative group"),l(ve,"id","overview"),l(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ve,"href","#overview"),l(oe,"class","relative group"),l(Ie,"href","https://github.com/EleutherAI/gpt-neo"),l(Ie,"rel","nofollow"),l(Le,"href","https://pile.eleuther.ai/"),l(Le,"rel","nofollow"),l(Se,"href","https://huggingface.co/valhalla"),l(Se,"rel","nofollow"),l(ye,"id","generation"),l(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ye,"href","#generation"),l(ne,"class","relative group"),l(ke,"id","transformers.GPTNeoConfig"),l(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ke,"href","#transformers.GPTNeoConfig"),l(se,"class","relative group"),l(St,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel"),l(He,"href","https://huggingface.co/EleutherAI/gpt-neo-1.3B"),l(He,"rel","nofollow"),l(Dt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Bt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(E,"class","docstring"),l(be,"id","transformers.GPTNeoModel"),l(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(be,"href","#transformers.GPTNeoModel"),l(ie,"class","relative group"),l(Ot,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ke,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ke,"rel","nofollow"),l(Wt,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoModel"),l(q,"class","docstring"),l(z,"class","docstring"),l(Ne,"id","transformers.GPTNeoForCausalLM"),l(Ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ne,"href","#transformers.GPTNeoForCausalLM"),l(de,"class","relative group"),l(Ht,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(nt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(nt,"rel","nofollow"),l(Ut,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForCausalLM"),l(j,"class","docstring"),l(C,"class","docstring"),l($e,"id","transformers.GPTNeoForSequenceClassification"),l($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($e,"href","#transformers.GPTNeoForSequenceClassification"),l(pe,"class","relative group"),l(Jt,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForSequenceClassification"),l(Rt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(ct,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(ct,"rel","nofollow"),l(Xt,"href","/docs/transformers/v4.15.0/en/model_doc/gpt_neo#transformers.GPTNeoForSequenceClassification"),l(F,"class","docstring"),l($,"class","docstring"),l(Me,"id","transformers.FlaxGPTNeoModel"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.FlaxGPTNeoModel"),l(ue,"class","relative group"),l(Kt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(vt,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(vt,"rel","nofollow"),l(Tt,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(Tt,"rel","nofollow"),l(yt,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(yt,"rel","nofollow"),l(Pt,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Pt,"rel","nofollow"),l(kt,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(kt,"rel","nofollow"),l(A,"class","docstring"),l(G,"class","docstring"),l(Ee,"id","transformers.FlaxGPTNeoForCausalLM"),l(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ee,"href","#transformers.FlaxGPTNeoForCausalLM"),l(me,"class","relative group"),l(Qt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(Mt,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(Mt,"rel","nofollow"),l(Ft,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(Ft,"rel","nofollow"),l(Et,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Et,"rel","nofollow"),l(zt,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(zt,"rel","nofollow"),l(Ct,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(Ct,"rel","nofollow"),l(I,"class","docstring"),l(M,"class","docstring")},m(t,h){e(document.head,p),u(t,w,h),u(t,f,h),e(f,b),e(b,N),T(g,N,null),e(f,m),e(f,x),e(x,Jn),u(t,ln,h),u(t,oe,h),e(oe,ve),e(ve,ro),T(Ae,ro,null),e(oe,Rn),e(oe,io),e(io,Xn),u(t,dn,h),u(t,J,h),e(J,Kn),e(J,Ie),e(Ie,Qn),e(J,Yn),e(J,Le),e(Le,Zn),e(J,es),u(t,cn,h),u(t,Lt,h),e(Lt,ts),u(t,pn,h),u(t,Te,h),e(Te,os),e(Te,Se),e(Se,ns),e(Te,ss),u(t,hn,h),u(t,ne,h),e(ne,ye),e(ye,lo),T(De,lo,null),e(ne,as),e(ne,co),e(co,rs),u(t,un,h),u(t,Pe,h),e(Pe,is),e(Pe,po),e(po,ls),e(Pe,ds),u(t,fn,h),T(Be,t,h),u(t,mn,h),u(t,se,h),e(se,ke),e(ke,ho),T(Oe,ho,null),e(se,cs),e(se,uo),e(uo,ps),u(t,gn,h),u(t,E,h),T(We,E,null),e(E,hs),e(E,ae),e(ae,us),e(ae,St),e(St,fs),e(ae,ms),e(ae,He),e(He,gs),e(ae,_s),e(E,vs),e(E,re),e(re,Ts),e(re,Dt),e(Dt,ys),e(re,Ps),e(re,Bt),e(Bt,ks),e(re,bs),e(E,ws),e(E,fo),e(fo,Ns),e(E,xs),T(Ue,E,null),u(t,_n,h),u(t,ie,h),e(ie,be),e(be,mo),T(Ve,mo,null),e(ie,$s),e(ie,go),e(go,Gs),u(t,vn,h),u(t,z,h),T(Je,z,null),e(z,Ms),e(z,_o),e(_o,Fs),e(z,Es),e(z,Re),e(Re,zs),e(Re,Ot),e(Ot,Cs),e(Re,qs),e(z,js),e(z,Xe),e(Xe,As),e(Xe,Ke),e(Ke,Is),e(Xe,Ls),e(z,Ss),e(z,q),T(Qe,q,null),e(q,Ds),e(q,le),e(le,Bs),e(le,Wt),e(Wt,Os),e(le,Ws),e(le,vo),e(vo,Hs),e(le,Us),e(q,Vs),T(we,q,null),e(q,Js),e(q,To),e(To,Rs),e(q,Xs),T(Ye,q,null),u(t,Tn,h),u(t,de,h),e(de,Ne),e(Ne,yo),T(Ze,yo,null),e(de,Ks),e(de,Po),e(Po,Qs),u(t,yn,h),u(t,C,h),T(et,C,null),e(C,Ys),e(C,ko),e(ko,Zs),e(C,ea),e(C,tt),e(tt,ta),e(tt,Ht),e(Ht,oa),e(tt,na),e(C,sa),e(C,ot),e(ot,aa),e(ot,nt),e(nt,ra),e(ot,ia),e(C,la),e(C,j),T(st,j,null),e(j,da),e(j,ce),e(ce,ca),e(ce,Ut),e(Ut,pa),e(ce,ha),e(ce,bo),e(bo,ua),e(ce,fa),e(j,ma),T(xe,j,null),e(j,ga),e(j,wo),e(wo,_a),e(j,va),T(at,j,null),u(t,Pn,h),u(t,pe,h),e(pe,$e),e($e,No),T(rt,No,null),e(pe,Ta),e(pe,xo),e(xo,ya),u(t,kn,h),u(t,$,h),T(it,$,null),e($,Pa),e($,$o),e($o,ka),e($,ba),e($,Vt),e(Vt,Jt),e(Jt,wa),e(Vt,Na),e($,xa),e($,W),e(W,$a),e(W,Go),e(Go,Ga),e(W,Ma),e(W,Mo),e(Mo,Fa),e(W,Ea),e(W,Fo),e(Fo,za),e(W,Ca),e(W,Eo),e(Eo,qa),e(W,ja),e($,Aa),e($,lt),e(lt,Ia),e(lt,Rt),e(Rt,La),e(lt,Sa),e($,Da),e($,dt),e(dt,Ba),e(dt,ct),e(ct,Oa),e(dt,Wa),e($,Ha),e($,F),T(pt,F,null),e(F,Ua),e(F,he),e(he,Va),e(he,Xt),e(Xt,Ja),e(he,Ra),e(he,zo),e(zo,Xa),e(he,Ka),e(F,Qa),T(Ge,F,null),e(F,Ya),e(F,Co),e(Co,Za),e(F,er),T(ht,F,null),e(F,tr),e(F,qo),e(qo,or),e(F,nr),T(ut,F,null),u(t,bn,h),u(t,ue,h),e(ue,Me),e(Me,jo),T(ft,jo,null),e(ue,sr),e(ue,Ao),e(Ao,ar),u(t,wn,h),u(t,G,h),T(mt,G,null),e(G,rr),e(G,Io),e(Io,ir),e(G,lr),e(G,gt),e(gt,dr),e(gt,Kt),e(Kt,cr),e(gt,pr),e(G,hr),e(G,_t),e(_t,ur),e(_t,vt),e(vt,fr),e(_t,mr),e(G,gr),e(G,Lo),e(Lo,_r),e(G,vr),e(G,H),e(H,So),e(So,Tt),e(Tt,Tr),e(H,yr),e(H,Do),e(Do,yt),e(yt,Pr),e(H,kr),e(H,Bo),e(Bo,Pt),e(Pt,br),e(H,wr),e(H,Oo),e(Oo,kt),e(kt,Nr),e(G,xr),e(G,A),T(bt,A,null),e(A,$r),e(A,fe),e(fe,Gr),e(fe,Wo),e(Wo,Mr),e(fe,Fr),e(fe,Ho),e(Ho,Er),e(fe,zr),e(A,Cr),T(Fe,A,null),e(A,qr),e(A,Uo),e(Uo,jr),e(A,Ar),T(wt,A,null),u(t,Nn,h),u(t,me,h),e(me,Ee),e(Ee,Vo),T(Nt,Vo,null),e(me,Ir),e(me,Jo),e(Jo,Lr),u(t,xn,h),u(t,M,h),T(xt,M,null),e(M,Sr),e(M,Ro),e(Ro,Dr),e(M,Br),e(M,$t),e($t,Or),e($t,Qt),e(Qt,Wr),e($t,Hr),e(M,Ur),e(M,Gt),e(Gt,Vr),e(Gt,Mt),e(Mt,Jr),e(Gt,Rr),e(M,Xr),e(M,Xo),e(Xo,Kr),e(M,Qr),e(M,U),e(U,Ko),e(Ko,Ft),e(Ft,Yr),e(U,Zr),e(U,Qo),e(Qo,Et),e(Et,ei),e(U,ti),e(U,Yo),e(Yo,zt),e(zt,oi),e(U,ni),e(U,Zo),e(Zo,Ct),e(Ct,si),e(M,ai),e(M,I),T(qt,I,null),e(I,ri),e(I,ge),e(ge,ii),e(ge,en),e(en,li),e(ge,di),e(ge,tn),e(tn,ci),e(ge,pi),e(I,hi),T(ze,I,null),e(I,ui),e(I,on),e(on,fi),e(I,mi),T(jt,I,null),$n=!0},p(t,[h]){const At={};h&2&&(At.$$scope={dirty:h,ctx:t}),we.$set(At);const nn={};h&2&&(nn.$$scope={dirty:h,ctx:t}),xe.$set(nn);const sn={};h&2&&(sn.$$scope={dirty:h,ctx:t}),Ge.$set(sn);const an={};h&2&&(an.$$scope={dirty:h,ctx:t}),Fe.$set(an);const It={};h&2&&(It.$$scope={dirty:h,ctx:t}),ze.$set(It)},i(t){$n||(y(g.$$.fragment,t),y(Ae.$$.fragment,t),y(De.$$.fragment,t),y(Be.$$.fragment,t),y(Oe.$$.fragment,t),y(We.$$.fragment,t),y(Ue.$$.fragment,t),y(Ve.$$.fragment,t),y(Je.$$.fragment,t),y(Qe.$$.fragment,t),y(we.$$.fragment,t),y(Ye.$$.fragment,t),y(Ze.$$.fragment,t),y(et.$$.fragment,t),y(st.$$.fragment,t),y(xe.$$.fragment,t),y(at.$$.fragment,t),y(rt.$$.fragment,t),y(it.$$.fragment,t),y(pt.$$.fragment,t),y(Ge.$$.fragment,t),y(ht.$$.fragment,t),y(ut.$$.fragment,t),y(ft.$$.fragment,t),y(mt.$$.fragment,t),y(bt.$$.fragment,t),y(Fe.$$.fragment,t),y(wt.$$.fragment,t),y(Nt.$$.fragment,t),y(xt.$$.fragment,t),y(qt.$$.fragment,t),y(ze.$$.fragment,t),y(jt.$$.fragment,t),$n=!0)},o(t){P(g.$$.fragment,t),P(Ae.$$.fragment,t),P(De.$$.fragment,t),P(Be.$$.fragment,t),P(Oe.$$.fragment,t),P(We.$$.fragment,t),P(Ue.$$.fragment,t),P(Ve.$$.fragment,t),P(Je.$$.fragment,t),P(Qe.$$.fragment,t),P(we.$$.fragment,t),P(Ye.$$.fragment,t),P(Ze.$$.fragment,t),P(et.$$.fragment,t),P(st.$$.fragment,t),P(xe.$$.fragment,t),P(at.$$.fragment,t),P(rt.$$.fragment,t),P(it.$$.fragment,t),P(pt.$$.fragment,t),P(Ge.$$.fragment,t),P(ht.$$.fragment,t),P(ut.$$.fragment,t),P(ft.$$.fragment,t),P(mt.$$.fragment,t),P(bt.$$.fragment,t),P(Fe.$$.fragment,t),P(wt.$$.fragment,t),P(Nt.$$.fragment,t),P(xt.$$.fragment,t),P(qt.$$.fragment,t),P(ze.$$.fragment,t),P(jt.$$.fragment,t),$n=!1},d(t){o(p),t&&o(w),t&&o(f),k(g),t&&o(ln),t&&o(oe),k(Ae),t&&o(dn),t&&o(J),t&&o(cn),t&&o(Lt),t&&o(pn),t&&o(Te),t&&o(hn),t&&o(ne),k(De),t&&o(un),t&&o(Pe),t&&o(fn),k(Be,t),t&&o(mn),t&&o(se),k(Oe),t&&o(gn),t&&o(E),k(We),k(Ue),t&&o(_n),t&&o(ie),k(Ve),t&&o(vn),t&&o(z),k(Je),k(Qe),k(we),k(Ye),t&&o(Tn),t&&o(de),k(Ze),t&&o(yn),t&&o(C),k(et),k(st),k(xe),k(at),t&&o(Pn),t&&o(pe),k(rt),t&&o(kn),t&&o($),k(it),k(pt),k(Ge),k(ht),k(ut),t&&o(bn),t&&o(ue),k(ft),t&&o(wn),t&&o(G),k(mt),k(bt),k(Fe),k(wt),t&&o(Nn),t&&o(me),k(Nt),t&&o(xn),t&&o(M),k(xt),k(qt),k(ze),k(jt)}}}const id={local:"gpt-neo",sections:[{local:"overview",sections:[{local:"generation",title:"Generation"}],title:"Overview"},{local:"transformers.GPTNeoConfig",title:"GPTNeoConfig"},{local:"transformers.GPTNeoModel",title:"GPTNeoModel"},{local:"transformers.GPTNeoForCausalLM",title:"GPTNeoForCausalLM"},{local:"transformers.GPTNeoForSequenceClassification",title:"GPTNeoForSequenceClassification"},{local:"transformers.FlaxGPTNeoModel",title:"FlaxGPTNeoModel"},{local:"transformers.FlaxGPTNeoForCausalLM",title:"FlaxGPTNeoForCausalLM"}],title:"GPT Neo"};function ld(O,p,w){let{fw:f}=p;return O.$$set=b=>{"fw"in b&&w(0,f=b.fw)},[f]}class md extends Ql{constructor(p){super();Yl(this,p,ld,rd,Zl,{fw:0})}}export{md as default,id as metadata};
9,950
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bert.mdx-923d84ed.js
import{S as PN,i as qN,s as jN,e as s,k as l,w as k,t as o,L as CN,c as r,d as t,m as d,a,x as T,h as n,b as c,J as e,g as f,y,q as b,o as w,B as F}from"../../chunks/vendor-b1433968.js";import{T as ye}from"../../chunks/Tip-c3840994.js";import{D as C}from"../../chunks/Docstring-ff504c58.js";import{C as Be}from"../../chunks/CodeBlock-a320dbd7.js";import{I as we}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function NN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function IN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function AN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function DN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function LN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function ON(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function SN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function UN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function WN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function HN(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function RN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function QN(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function VN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function KN(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function JN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function GN(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function XN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function YN(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function ZN(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function eI(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function tI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function oI(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function nI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function sI(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le;return{c(){p=s("p"),$=o("TF 2.0 models accepts two formats as inputs:"),g=l(),v=s("ul"),x=s("li"),_=o("having all inputs as keyword arguments (like PyTorch models), or"),u=l(),B=s("li"),ue=o("having all inputs as a list, tuple or dict in the first positional arguments."),X=l(),M=s("p"),ee=o("This second option is useful when using "),L=s("code"),oe=o("tf.keras.Model.fit"),ge=o(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),_e=o("model(inputs)"),ce=o("."),J=l(),I=s("p"),ne=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Y=l(),z=s("ul"),q=s("li"),se=o("a single Tensor with "),H=s("code"),pe=o("input_ids"),re=o(" only and nothing else: "),S=s("code"),ve=o("model(inputs_ids)"),he=l(),P=s("li"),ie=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=s("code"),fe=o("model([input_ids, attention_mask])"),ae=o(" or "),Q=s("code"),me=o("model([input_ids, attention_mask, token_type_ids])"),te=l(),N=s("li"),ke=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=s("code"),le=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var E=a(p);$=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),g=d(h),v=r(h,"UL",{});var K=a(v);x=r(K,"LI",{});var Ee=a(x);_=n(Ee,"having all inputs as keyword arguments (like PyTorch models), or"),Ee.forEach(t),u=d(K),B=r(K,"LI",{});var Fe=a(B);ue=n(Fe,"having all inputs as a list, tuple or dict in the first positional arguments."),Fe.forEach(t),K.forEach(t),X=d(h),M=r(h,"P",{});var A=a(M);ee=n(A,"This second option is useful when using "),L=r(A,"CODE",{});var Me=a(L);oe=n(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=n(A,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(A,"CODE",{});var xe=a(O);_e=n(xe,"model(inputs)"),xe.forEach(t),ce=n(A,"."),A.forEach(t),J=d(h),I=r(h,"P",{});var ze=a(I);ne=n(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Y=d(h),z=r(h,"UL",{});var D=a(z);q=r(D,"LI",{});var W=a(q);se=n(W,"a single Tensor with "),H=r(W,"CODE",{});var Te=a(H);pe=n(Te,"input_ids"),Te.forEach(t),re=n(W," only and nothing else: "),S=r(W,"CODE",{});var be=a(S);ve=n(be,"model(inputs_ids)"),be.forEach(t),W.forEach(t),he=d(D),P=r(D,"LI",{});var U=a(P);ie=n(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r(U,"CODE",{});var $e=a(R);fe=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),ae=n(U," or "),Q=r(U,"CODE",{});var Pe=a(Q);me=n(Pe,"model([input_ids, attention_mask, token_type_ids])"),Pe.forEach(t),U.forEach(t),te=d(D),N=r(D,"LI",{});var de=a(N);ke=n(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),V=r(de,"CODE",{});var qe=a(V);le=n(qe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),qe.forEach(t),de.forEach(t),D.forEach(t)},m(h,E){f(h,p,E),e(p,$),f(h,g,E),f(h,v,E),e(v,x),e(x,_),e(v,u),e(v,B),e(B,ue),f(h,X,E),f(h,M,E),e(M,ee),e(M,L),e(L,oe),e(M,ge),e(M,O),e(O,_e),e(M,ce),f(h,J,E),f(h,I,E),e(I,ne),f(h,Y,E),f(h,z,E),e(z,q),e(q,se),e(q,H),e(H,pe),e(q,re),e(q,S),e(S,ve),e(z,he),e(z,P),e(P,ie),e(P,R),e(R,fe),e(P,ae),e(P,Q),e(Q,me),e(z,te),e(z,N),e(N,ke),e(N,V),e(V,le)},d(h){h&&t(p),h&&t(g),h&&t(v),h&&t(X),h&&t(M),h&&t(J),h&&t(I),h&&t(Y),h&&t(z)}}}function rI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function aI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function iI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function lI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function dI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function cI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function pI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function hI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function fI(j){let p,$,g,v,x;return{c(){p=s("p"),$=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s("code"),v=o("Module"),x=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=r(_,"P",{});var u=a(p);$=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r(u,"CODE",{});var B=a(g);v=n(B,"Module"),B.forEach(t),x=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(_,u){f(_,p,u),e(p,$),e(p,g),e(g,v),e(p,x)},d(_){_&&t(p)}}}function mI(j){let p,$,g,v,x,_,u,B,ue,X,M,ee,L,oe,ge,O,_e,ce,J,I,ne,Y,z,q,se,H,pe,re,S,ve,he,P,ie,R,fe,ae,Q,me,te,N,ke,V,le,h,E,K,Ee,Fe,A,Me,xe,ze,D,W,Te,be,U,$e,Pe,de,qe,e_,ot,jr,nT,ao,sT,Tc,rT,aT,yc,iT,lT,Cr,dT,cT,pT,Zo,hT,bc,fT,mT,wc,uT,gT,_T,nh,vT,kT,Nr,t_,en,fs,sh,Ir,TT,rh,yT,o_,je,Ar,bT,ah,wT,FT,Dr,xT,Fc,$T,BT,ET,vo,Lr,MT,ih,zT,PT,Or,xc,qT,lh,jT,CT,$c,NT,dh,IT,AT,ms,Sr,DT,Ur,LT,ch,OT,ST,UT,Jt,Wr,WT,ph,HT,RT,Hr,QT,tn,VT,hh,KT,JT,fh,GT,XT,YT,mh,n_,on,us,uh,Rr,ZT,gh,ey,s_,nt,Qr,ty,Vr,oy,_h,ny,sy,ry,Kr,ay,Bc,iy,ly,dy,ko,Jr,cy,vh,py,hy,Gr,Ec,fy,kh,my,uy,Mc,gy,Th,_y,vy,Gt,Xr,ky,yh,Ty,yy,Yr,by,nn,wy,bh,Fy,xy,wh,$y,By,r_,sn,gs,Fh,Zr,Ey,xh,My,a_,rn,ea,zy,ta,Py,zc,qy,jy,i_,an,oa,Cy,na,Ny,Pc,Iy,Ay,l_,io,sa,Dy,ra,Ly,qc,Oy,Sy,Uy,_s,aa,Wy,$h,Hy,d_,ln,vs,Bh,ia,Ry,Eh,Qy,c_,Ce,la,Vy,Mh,Ky,Jy,da,Gy,jc,Xy,Yy,Zy,ca,eb,pa,tb,ob,nb,ha,sb,fa,rb,ab,ib,Re,lb,zh,db,cb,Ph,pb,hb,qh,fb,mb,jh,ub,gb,Ch,_b,vb,Nh,kb,Tb,yb,ht,ma,bb,dn,wb,Cc,Fb,xb,Ih,$b,Bb,Eb,ks,Mb,Ah,zb,Pb,ua,p_,cn,Ts,Dh,ga,qb,Lh,jb,h_,st,_a,Cb,pn,Nb,Oh,Ib,Ab,Sh,Db,Lb,Ob,va,Sb,Nc,Ub,Wb,Hb,ka,Rb,Ta,Qb,Vb,Kb,ft,ya,Jb,hn,Gb,Ic,Xb,Yb,Uh,Zb,e1,t1,ys,o1,Wh,n1,s1,ba,f_,fn,bs,Hh,wa,r1,Rh,a1,m_,rt,Fa,i1,xa,l1,Qh,d1,c1,p1,$a,h1,Ac,f1,m1,u1,Ba,g1,Ea,_1,v1,k1,mt,Ma,T1,mn,y1,Dc,b1,w1,Vh,F1,x1,$1,ws,B1,Kh,E1,M1,za,u_,un,Fs,Jh,Pa,z1,Gh,P1,g_,at,qa,q1,ja,j1,Xh,C1,N1,I1,Ca,A1,Lc,D1,L1,O1,Na,S1,Ia,U1,W1,H1,ut,Aa,R1,gn,Q1,Oc,V1,K1,Yh,J1,G1,X1,xs,Y1,Zh,Z1,ew,Da,__,_n,$s,ef,La,tw,tf,ow,v_,it,Oa,nw,Sa,sw,of,rw,aw,iw,Ua,lw,Sc,dw,cw,pw,Wa,hw,Ha,fw,mw,uw,gt,Ra,gw,vn,_w,Uc,vw,kw,nf,Tw,yw,bw,Bs,ww,sf,Fw,xw,Qa,k_,kn,Es,rf,Va,$w,af,Bw,T_,lt,Ka,Ew,lf,Mw,zw,Ja,Pw,Wc,qw,jw,Cw,Ga,Nw,Xa,Iw,Aw,Dw,He,Ya,Lw,Tn,Ow,Hc,Sw,Uw,df,Ww,Hw,Rw,Ms,Qw,cf,Vw,Kw,Za,Jw,pf,Gw,Xw,ei,y_,yn,zs,hf,ti,Yw,ff,Zw,b_,dt,oi,e0,mf,t0,o0,ni,n0,Rc,s0,r0,a0,si,i0,ri,l0,d0,c0,_t,ai,p0,bn,h0,Qc,f0,m0,uf,u0,g0,_0,Ps,v0,gf,k0,T0,ii,w_,wn,qs,_f,li,y0,vf,b0,F_,ct,di,w0,kf,F0,x0,ci,$0,Vc,B0,E0,M0,pi,z0,hi,P0,q0,j0,vt,fi,C0,Fn,N0,Kc,I0,A0,Tf,D0,L0,O0,js,S0,yf,U0,W0,mi,x_,xn,Cs,bf,ui,H0,wf,R0,$_,pt,gi,Q0,$n,V0,Ff,K0,J0,xf,G0,X0,Y0,_i,Z0,Jc,e2,t2,o2,vi,n2,ki,s2,r2,a2,kt,Ti,i2,Bn,l2,Gc,d2,c2,$f,p2,h2,f2,Ns,m2,Bf,u2,g2,yi,B_,En,Is,Ef,bi,_2,Mf,v2,E_,Qe,wi,k2,zf,T2,y2,Fi,b2,Xc,w2,F2,x2,xi,$2,$i,B2,E2,M2,As,z2,Tt,Bi,P2,Mn,q2,Yc,j2,C2,Pf,N2,I2,A2,Ds,D2,qf,L2,O2,Ei,M_,zn,Ls,jf,Mi,S2,Cf,U2,z_,Ve,zi,W2,Pn,H2,Nf,R2,Q2,If,V2,K2,J2,Pi,G2,Zc,X2,Y2,Z2,qi,eF,ji,tF,oF,nF,Os,sF,yt,Ci,rF,qn,aF,ep,iF,lF,Af,dF,cF,pF,Ss,hF,Df,fF,mF,Ni,P_,jn,Us,Lf,Ii,uF,Of,gF,q_,Ai,et,Di,_F,Ne,vF,Sf,kF,TF,Uf,yF,bF,Wf,wF,FF,Hf,xF,$F,Rf,BF,EF,Qf,MF,zF,Vf,PF,qF,jF,Li,Oi,CF,Kf,NF,IF,AF,Si,DF,Jf,LF,OF,SF,G,UF,Gf,WF,HF,Xf,RF,QF,Yf,VF,KF,Zf,JF,GF,em,XF,YF,tm,ZF,ex,om,tx,ox,nm,nx,sx,sm,rx,ax,rm,ix,lx,am,dx,cx,im,px,hx,lm,fx,mx,dm,ux,gx,cm,_x,vx,pm,kx,Tx,hm,yx,bx,fm,wx,Fx,mm,xx,$x,um,Bx,Ex,Mx,gm,zx,Px,Ui,j_,Cn,Ws,_m,Wi,qx,vm,jx,C_,Ke,Hi,Cx,Ri,Nx,km,Ix,Ax,Dx,Qi,Lx,tp,Ox,Sx,Ux,Vi,Wx,Ki,Hx,Rx,Qx,Hs,Vx,bt,Ji,Kx,Nn,Jx,op,Gx,Xx,Tm,Yx,Zx,e$,Rs,t$,ym,o$,n$,Gi,N_,In,Qs,bm,Xi,s$,wm,r$,I_,Je,Yi,a$,Zi,i$,Fm,l$,d$,c$,el,p$,np,h$,f$,m$,tl,u$,ol,g$,_$,v$,Vs,k$,wt,nl,T$,An,y$,sp,b$,w$,xm,F$,x$,$$,Ks,B$,$m,E$,M$,sl,A_,Dn,Js,Bm,rl,z$,Em,P$,D_,Ge,al,q$,Mm,j$,C$,il,N$,rp,I$,A$,D$,ll,L$,dl,O$,S$,U$,Gs,W$,Ft,cl,H$,Ln,R$,ap,Q$,V$,zm,K$,J$,G$,Xs,X$,Pm,Y$,Z$,pl,L_,On,Ys,qm,hl,e4,jm,t4,O_,Xe,fl,o4,Cm,n4,s4,ml,r4,ip,a4,i4,l4,ul,d4,gl,c4,p4,h4,Zs,f4,xt,_l,m4,Sn,u4,lp,g4,_4,Nm,v4,k4,T4,er,y4,Im,b4,w4,vl,S_,Un,tr,Am,kl,F4,Dm,x4,U_,Ye,Tl,$4,Lm,B4,E4,yl,M4,dp,z4,P4,q4,bl,j4,wl,C4,N4,I4,or,A4,$t,Fl,D4,Wn,L4,cp,O4,S4,Om,U4,W4,H4,nr,R4,Sm,Q4,V4,xl,W_,Hn,sr,Um,$l,K4,Wm,J4,H_,Ze,Bl,G4,Rn,X4,Hm,Y4,Z4,Rm,eB,tB,oB,El,nB,pp,sB,rB,aB,Ml,iB,zl,lB,dB,cB,rr,pB,Bt,Pl,hB,Qn,fB,hp,mB,uB,Qm,gB,_B,vB,ar,kB,Vm,TB,yB,ql,R_,Vn,ir,Km,jl,bB,Jm,wB,Q_,Ie,Cl,FB,Gm,xB,$B,Nl,BB,fp,EB,MB,zB,Il,PB,Al,qB,jB,CB,Xm,NB,IB,lo,Ym,Dl,AB,DB,Zm,Ll,LB,OB,eu,Ol,SB,UB,tu,Sl,WB,HB,Et,Ul,RB,Kn,QB,ou,VB,KB,nu,JB,GB,XB,lr,YB,su,ZB,eE,Wl,V_,Jn,dr,ru,Hl,tE,au,oE,K_,Ae,Rl,nE,Gn,sE,iu,rE,aE,lu,iE,lE,dE,Ql,cE,mp,pE,hE,fE,Vl,mE,Kl,uE,gE,_E,du,vE,kE,co,cu,Jl,TE,yE,pu,Gl,bE,wE,hu,Xl,FE,xE,fu,Yl,$E,BE,Mt,Zl,EE,Xn,ME,mu,zE,PE,uu,qE,jE,CE,cr,NE,gu,IE,AE,ed,J_,Yn,pr,_u,td,DE,vu,LE,G_,De,od,OE,nd,SE,ku,UE,WE,HE,sd,RE,up,QE,VE,KE,rd,JE,ad,GE,XE,YE,Tu,ZE,eM,po,yu,id,tM,oM,bu,ld,nM,sM,wu,dd,rM,aM,Fu,cd,iM,lM,zt,pd,dM,Zn,cM,xu,pM,hM,$u,fM,mM,uM,hr,gM,Bu,_M,vM,hd,X_,es,fr,Eu,fd,kM,Mu,TM,Y_,Le,md,yM,ud,bM,zu,wM,FM,xM,gd,$M,gp,BM,EM,MM,_d,zM,vd,PM,qM,jM,Pu,CM,NM,ho,qu,kd,IM,AM,ju,Td,DM,LM,Cu,yd,OM,SM,Nu,bd,UM,WM,Pt,wd,HM,ts,RM,Iu,QM,VM,Au,KM,JM,GM,mr,XM,Du,YM,ZM,Fd,Z_,os,ur,Lu,xd,ez,Ou,tz,ev,Oe,$d,oz,Su,nz,sz,Bd,rz,_p,az,iz,lz,Ed,dz,Md,cz,pz,hz,Uu,fz,mz,fo,Wu,zd,uz,gz,Hu,Pd,_z,vz,Ru,qd,kz,Tz,Qu,jd,yz,bz,qt,Cd,wz,ns,Fz,Vu,xz,$z,Ku,Bz,Ez,Mz,gr,zz,Ju,Pz,qz,Nd,tv,ss,_r,Gu,Id,jz,Xu,Cz,ov,Se,Ad,Nz,Yu,Iz,Az,Dd,Dz,vp,Lz,Oz,Sz,Ld,Uz,Od,Wz,Hz,Rz,Zu,Qz,Vz,mo,eg,Sd,Kz,Jz,tg,Ud,Gz,Xz,og,Wd,Yz,Zz,ng,Hd,e5,t5,jt,Rd,o5,rs,n5,sg,s5,r5,rg,a5,i5,l5,vr,d5,ag,c5,p5,Qd,nv,as,kr,ig,Vd,h5,lg,f5,sv,Ue,Kd,m5,dg,u5,g5,Jd,_5,kp,v5,k5,T5,Gd,y5,Xd,b5,w5,F5,cg,x5,$5,uo,pg,Yd,B5,E5,hg,Zd,M5,z5,fg,ec,P5,q5,mg,tc,j5,C5,Ct,oc,N5,is,I5,ug,A5,D5,gg,L5,O5,S5,Tr,U5,_g,W5,H5,nc,rv,ls,yr,vg,sc,R5,kg,Q5,av,We,rc,V5,ds,K5,Tg,J5,G5,yg,X5,Y5,Z5,ac,eP,Tp,tP,oP,nP,ic,sP,lc,rP,aP,iP,bg,lP,dP,go,wg,dc,cP,pP,Fg,cc,hP,fP,xg,pc,mP,uP,$g,hc,gP,_P,Nt,fc,vP,cs,kP,Bg,TP,yP,Eg,bP,wP,FP,br,xP,Mg,$P,BP,mc,iv;return _=new we({}),oe=new we({}),$e=new we({}),jr=new C({props:{name:"class transformers.BertConfig",anchor:"transformers.BertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"use_cache",val:" = True"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/configuration_bert.py#L54",parametersDescription:[{anchor:"transformers.BertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel">BertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel">TFBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.BertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.BertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.BertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.BertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.BertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.BertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.BertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.BertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.BertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel">BertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel">TFBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.BertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.BertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.BertConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.BertConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"},{anchor:"transformers.BertConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),Nr=new Be({props:{code:`from transformers import BertModel, BertConfig # Initializing a BERT bert-base-uncased style configuration configuration = BertConfig() # Initializing a model from the bert-base-uncased style configuration model = BertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ir=new we({}),Ar=new C({props:{name:"class transformers.BertTokenizer",anchor:"transformers.BertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L117",parametersDescription:[{anchor:"transformers.BertTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.BertTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.BertTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.BertTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.BertTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BertTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BertTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BertTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BertTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.BertTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),Lr=new C({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L247",parametersDescription:[{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Sr=new C({props:{name:"get_special_tokens_mask",anchor:"transformers.BertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L272",parametersDescription:[{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Wr=new C({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L300",parametersDescription:[{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Hr=new Be({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Rr=new we({}),Qr=new C({props:{name:"class transformers.BertTokenizerFast",anchor:"transformers.BertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert_fast.py#L117",parametersDescription:[{anchor:"transformers.BertTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.BertTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.BertTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BertTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BertTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BertTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BertTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.BertTokenizerFast.clean_text",description:`<strong>clean_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one.`,name:"clean_text"},{anchor:"transformers.BertTokenizerFast.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT). wordpieces_prefix &#x2014; (<code>str</code>, <em>optional</em>, defaults to <code>&quot;##&quot;</code>): The prefix for subwords.`,name:"tokenize_chinese_chars"}]}}),Jr=new C({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert_fast.py#L203",parametersDescription:[{anchor:"transformers.BertTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Xr=new C({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BertTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert_fast.py#L227",parametersDescription:[{anchor:"transformers.BertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Yr=new Be({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Zr=new we({}),ea=new C({props:{name:"class transformers.models.bert.modeling_bert.BertForPreTrainingOutput",anchor:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prediction_logits",val:": FloatTensor = None"},{name:"seq_relationship_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L745",parametersDescription:[{anchor:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput.seq_relationship_logits",description:`<strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"seq_relationship_logits"},{anchor:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),oa=new C({props:{name:"class transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput",anchor:"transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"prediction_logits",val:": Tensor = None"},{name:"seq_relationship_logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Union[typing.Tuple[tensorflow.python.framework.ops.Tensor], tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"attentions",val:": typing.Union[typing.Tuple[tensorflow.python.framework.ops.Tensor], tensorflow.python.framework.ops.Tensor, NoneType] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L930",parametersDescription:[{anchor:"transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput.seq_relationship_logits",description:`<strong>seq_relationship_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"seq_relationship_logits"},{anchor:"transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),sa=new C({props:{name:"class transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput",anchor:"transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput",parameters:[{name:"prediction_logits",val:": ndarray = None"},{name:"seq_relationship_logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L58",parametersDescription:[{anchor:"transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput.seq_relationship_logits",description:`<strong>seq_relationship_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"seq_relationship_logits"},{anchor:"transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),aa=new C({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/flax/struct.py#L120"}}),ia=new we({}),la=new C({props:{name:"class transformers.BertModel",anchor:"transformers.BertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L848",parametersDescription:[{anchor:"transformers.BertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ma=new C({props:{name:"forward",anchor:"transformers.BertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L887",parametersDescription:[{anchor:"transformers.BertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BertModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.BertModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BertModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ks=new ye({props:{$$slots:{default:[NN]},$$scope:{ctx:j}}}),ua=new Be({props:{code:`from transformers import BertTokenizer, BertModel import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ga=new we({}),_a=new C({props:{name:"class transformers.BertForPreTraining",anchor:"transformers.BertForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1031",parametersDescription:[{anchor:"transformers.BertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ya=new C({props:{name:"forward",anchor:"transformers.BertForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"next_sentence_label",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1047",parametersDescription:[{anchor:"transformers.BertForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.</p> <p> labels (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code> next_sentence_label (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence. kwargs (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>): Used to hide legacy arguments that have been deprecated.</li> </ul>`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.models.bert.modeling_bert.BertForPreTrainingOutput" >transformers.models.bert.modeling_bert.BertForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</p> </li> <li> <p><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.models.bert.modeling_bert.BertForPreTrainingOutput" >transformers.models.bert.modeling_bert.BertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ys=new ye({props:{$$slots:{default:[IN]},$$scope:{ctx:j}}}),ba=new Be({props:{code:`from transformers import BertTokenizer, BertForPreTraining import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForPreTraining.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),wa=new we({}),Fa=new C({props:{name:"class transformers.BertLMHeadModel",anchor:"transformers.BertLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1135",parametersDescription:[{anchor:"transformers.BertLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ma=new C({props:{name:"forward",anchor:"transformers.BertLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1158",parametersDescription:[{anchor:"transformers.BertLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertLMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.</p> <p> encoder_hidden_states (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. labels (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code> past_key_values (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</li> </ul>`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ws=new ye({props:{$$slots:{default:[AN]},$$scope:{ctx:j}}}),za=new Be({props:{code:`from transformers import BertTokenizer, BertLMHeadModel, BertConfig import torch tokenizer = BertTokenizer.from_pretrained('bert-base-cased') config = BertConfig.from_pretrained("bert-base-cased") config.is_decoder = True model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertLMHeadModel, BertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),Pa=new we({}),qa=new C({props:{name:"class transformers.BertForMaskedLM",anchor:"transformers.BertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1286",parametersDescription:[{anchor:"transformers.BertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Aa=new C({props:{name:"forward",anchor:"transformers.BertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1312",parametersDescription:[{anchor:"transformers.BertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xs=new ye({props:{$$slots:{default:[DN]},$$scope:{ctx:j}}}),Da=new Be({props:{code:`from transformers import BertTokenizer, BertForMaskedLM import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMaskedLM.from_pretrained('bert-base-uncased') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),La=new we({}),Oa=new C({props:{name:"class transformers.BertForNextSentencePrediction",anchor:"transformers.BertForNextSentencePrediction",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1397",parametersDescription:[{anchor:"transformers.BertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ra=new C({props:{name:"forward",anchor:"transformers.BertForNextSentencePrediction.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1407",parametersDescription:[{anchor:"transformers.BertForNextSentencePrediction.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForNextSentencePrediction.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForNextSentencePrediction.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForNextSentencePrediction.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForNextSentencePrediction.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForNextSentencePrediction.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForNextSentencePrediction.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForNextSentencePrediction.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForNextSentencePrediction.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertForNextSentencePrediction.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring). Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) \u2014 Next sequence prediction (classification) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Bs=new ye({props:{$$slots:{default:[LN]},$$scope:{ctx:j}}}),Qa=new Be({props:{code:`from transformers import BertTokenizer, BertForNextSentencePrediction import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='pt') outputs = model(**encoding, labels=torch.LongTensor([1])) logits = outputs.logits assert logits[0, 0] < logits[0, 1] # next sentence was random,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=torch.LongTensor([<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># next sentence was random</span>`}}),Va=new we({}),Ka=new C({props:{name:"class transformers.BertForSequenceClassification",anchor:"transformers.BertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1501",parametersDescription:[{anchor:"transformers.BertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ya=new C({props:{name:"forward",anchor:"transformers.BertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1517",parametersDescription:[{anchor:"transformers.BertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ms=new ye({props:{$$slots:{default:[ON]},$$scope:{ctx:j}}}),Za=new Be({props:{code:`from transformers import BertTokenizer, BertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ei=new Be({props:{code:`from transformers import BertTokenizer, BertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ti=new we({}),oi=new C({props:{name:"class transformers.BertForMultipleChoice",anchor:"transformers.BertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1603",parametersDescription:[{anchor:"transformers.BertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ai=new C({props:{name:"forward",anchor:"transformers.BertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1617",parametersDescription:[{anchor:"transformers.BertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ps=new ye({props:{$$slots:{default:[SN]},$$scope:{ctx:j}}}),ii=new Be({props:{code:`from transformers import BertTokenizer, BertForMultipleChoice import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMultipleChoice.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),li=new we({}),di=new C({props:{name:"class transformers.BertForTokenClassification",anchor:"transformers.BertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1698",parametersDescription:[{anchor:"transformers.BertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fi=new C({props:{name:"forward",anchor:"transformers.BertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1716",parametersDescription:[{anchor:"transformers.BertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),js=new ye({props:{$$slots:{default:[UN]},$$scope:{ctx:j}}}),mi=new Be({props:{code:`from transformers import BertTokenizer, BertForTokenClassification import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForTokenClassification.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ui=new we({}),gi=new C({props:{name:"class transformers.BertForQuestionAnswering",anchor:"transformers.BertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1792",parametersDescription:[{anchor:"transformers.BertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ti=new C({props:{name:"forward",anchor:"transformers.BertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_bert.py#L1806",parametersDescription:[{anchor:"transformers.BertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.BertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ns=new ye({props:{$$slots:{default:[WN]},$$scope:{ctx:j}}}),yi=new Be({props:{code:`from transformers import BertTokenizer, BertForQuestionAnswering import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForQuestionAnswering.from_pretrained('bert-base-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),bi=new we({}),wi=new C({props:{name:"class transformers.TFBertModel",anchor:"transformers.TFBertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1056",parametersDescription:[{anchor:"transformers.TFBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),As=new ye({props:{$$slots:{default:[HN]},$$scope:{ctx:j}}}),Bi=new C({props:{name:"call",anchor:"transformers.TFBertModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1062",parametersDescription:[{anchor:"transformers.TFBertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertModel.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFBertModel.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFBertModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBertModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ds=new ye({props:{$$slots:{default:[RN]},$$scope:{ctx:j}}}),Ei=new Be({props:{code:`from transformers import BertTokenizer, TFBertModel import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertModel.from_pretrained('bert-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Mi=new we({}),zi=new C({props:{name:"class transformers.TFBertForPreTraining",anchor:"transformers.TFBertForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1173",parametersDescription:[{anchor:"transformers.TFBertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Os=new ye({props:{$$slots:{default:[QN]},$$scope:{ctx:j}}}),Ci=new C({props:{name:"call",anchor:"transformers.TFBertForPreTraining.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"next_sentence_label",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1195",parametersDescription:[{anchor:"transformers.TFBertForPreTraining.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForPreTraining.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForPreTraining.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForPreTraining.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForPreTraining.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForPreTraining.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertForPreTraining.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.TFBertForPreTraining.call.next_sentence_label",description:`<strong>next_sentence_label</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"next_sentence_label"},{anchor:"transformers.TFBertForPreTraining.call.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput" >transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput" >transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ss=new ye({props:{$$slots:{default:[VN]},$$scope:{ctx:j}}}),Ni=new Be({props:{code:`import tensorflow as tf from transformers import BertTokenizer, TFBertForPreTraining tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = TFBertForPreTraining.from_pretrained('bert-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores, seq_relationship_scores = outputs[:2],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tf.constant(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>))[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_scores, seq_relationship_scores = outputs[:<span class="hljs-number">2</span>]`}}),Ii=new we({}),Di=new C({props:{name:"call",anchor:"transformers.TFBertLMHeadModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1448",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ui=new Be({props:{code:`from transformers import BertTokenizer, TFBertLMHeadModel import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertLMHeadModel.from_pretrained('bert-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Wi=new we({}),Hi=new C({props:{name:"class transformers.TFBertForMaskedLM",anchor:"transformers.TFBertForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1304",parametersDescription:[{anchor:"transformers.TFBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Hs=new ye({props:{$$slots:{default:[KN]},$$scope:{ctx:j}}}),Ji=new C({props:{name:"call",anchor:"transformers.TFBertForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1332",parametersDescription:[{anchor:"transformers.TFBertForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Rs=new ye({props:{$$slots:{default:[JN]},$$scope:{ctx:j}}}),Gi=new Be({props:{code:`from transformers import BertTokenizer, TFBertForMaskedLM import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForMaskedLM.from_pretrained('bert-base-cased') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Xi=new we({}),Yi=new C({props:{name:"class transformers.TFBertForNextSentencePrediction",anchor:"transformers.TFBertForNextSentencePrediction",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1572",parametersDescription:[{anchor:"transformers.TFBertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vs=new ye({props:{$$slots:{default:[GN]},$$scope:{ctx:j}}}),nl=new C({props:{name:"call",anchor:"transformers.TFBertForNextSentencePrediction.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"next_sentence_label",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1582",parametersDescription:[{anchor:"transformers.TFBertForNextSentencePrediction.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForNextSentencePrediction.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForNextSentencePrediction.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForNextSentencePrediction.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForNextSentencePrediction.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForNextSentencePrediction.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForNextSentencePrediction.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForNextSentencePrediction.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForNextSentencePrediction.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForNextSentencePrediction.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" >transformers.modeling_tf_outputs.TFNextSentencePredictorOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>next_sentence_label</code> is provided) \u2014 Next sentence prediction loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" >transformers.modeling_tf_outputs.TFNextSentencePredictorOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ks=new ye({props:{$$slots:{default:[XN]},$$scope:{ctx:j}}}),sl=new Be({props:{code:`import tensorflow as tf from transformers import BertTokenizer, TFBertForNextSentencePrediction tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='tf') logits = model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'])[0] assert logits[0][0] < logits[0][1] # the next sentence was random,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(encoding[<span class="hljs-string">&#x27;input_ids&#x27;</span>], token_type_ids=encoding[<span class="hljs-string">&#x27;token_type_ids&#x27;</span>])[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>][<span class="hljs-number">1</span>] <span class="hljs-comment"># the next sentence was random</span>`}}),rl=new we({}),al=new C({props:{name:"class transformers.TFBertForSequenceClassification",anchor:"transformers.TFBertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1679",parametersDescription:[{anchor:"transformers.TFBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gs=new ye({props:{$$slots:{default:[YN]},$$scope:{ctx:j}}}),cl=new C({props:{name:"call",anchor:"transformers.TFBertForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1700",parametersDescription:[{anchor:"transformers.TFBertForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Xs=new ye({props:{$$slots:{default:[ZN]},$$scope:{ctx:j}}}),pl=new Be({props:{code:`from transformers import BertTokenizer, TFBertForSequenceClassification import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForSequenceClassification.from_pretrained('bert-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),hl=new we({}),fl=new C({props:{name:"class transformers.TFBertForMultipleChoice",anchor:"transformers.TFBertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1785",parametersDescription:[{anchor:"transformers.TFBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Zs=new ye({props:{$$slots:{default:[eI]},$$scope:{ctx:j}}}),_l=new C({props:{name:"call",anchor:"transformers.TFBertForMultipleChoice.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1809",parametersDescription:[{anchor:"transformers.TFBertForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),er=new ye({props:{$$slots:{default:[tI]},$$scope:{ctx:j}}}),vl=new Be({props:{code:`from transformers import BertTokenizer, TFBertForMultipleChoice import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForMultipleChoice.from_pretrained('bert-base-cased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),kl=new we({}),Tl=new C({props:{name:"class transformers.TFBertForTokenClassification",anchor:"transformers.TFBertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1940",parametersDescription:[{anchor:"transformers.TFBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),or=new ye({props:{$$slots:{default:[oI]},$$scope:{ctx:j}}}),Fl=new C({props:{name:"call",anchor:"transformers.TFBertForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L1967",parametersDescription:[{anchor:"transformers.TFBertForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),nr=new ye({props:{$$slots:{default:[nI]},$$scope:{ctx:j}}}),xl=new Be({props:{code:`from transformers import BertTokenizer, TFBertForTokenClassification import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForTokenClassification.from_pretrained('bert-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$l=new we({}),Bl=new C({props:{name:"class transformers.TFBertForQuestionAnswering",anchor:"transformers.TFBertForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L2051",parametersDescription:[{anchor:"transformers.TFBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rr=new ye({props:{$$slots:{default:[sI]},$$scope:{ctx:j}}}),Pl=new C({props:{name:"call",anchor:"transformers.TFBertForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_tf_bert.py#L2073",parametersDescription:[{anchor:"transformers.TFBertForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBertForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBertForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFBertForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFBertForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBertForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFBertForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBertForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBertForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBertForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBertForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFBertForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ar=new ye({props:{$$slots:{default:[rI]},$$scope:{ctx:j}}}),ql=new Be({props:{code:`from transformers import BertTokenizer, TFBertForQuestionAnswering import tensorflow as tf tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForQuestionAnswering.from_pretrained('bert-base-cased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, TFBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),jl=new we({}),Cl=new C({props:{name:"class transformers.FlaxBertModel",anchor:"transformers.FlaxBertModel",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L755",parametersDescription:[{anchor:"transformers.FlaxBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Ul=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),lr=new ye({props:{$$slots:{default:[aI]},$$scope:{ctx:j}}}),Wl=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertModel tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertModel.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Hl=new we({}),Rl=new C({props:{name:"class transformers.FlaxBertForPreTraining",anchor:"transformers.FlaxBertForPreTraining",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L828",parametersDescription:[{anchor:"transformers.FlaxBertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForPreTraining.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForPreTraining.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Zl=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput" >transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>prediction_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput" >transformers.models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),cr=new ye({props:{$$slots:{default:[iI]},$$scope:{ctx:j}}}),ed=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForPreTraining tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForPreTraining.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),td=new we({}),od=new C({props:{name:"class transformers.FlaxBertForMaskedLM",anchor:"transformers.FlaxBertForMaskedLM",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L913",parametersDescription:[{anchor:"transformers.FlaxBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForMaskedLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForMaskedLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),pd=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),hr=new ye({props:{$$slots:{default:[lI]},$$scope:{ctx:j}}}),hd=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForMaskedLM tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForMaskedLM.from_pretrained('bert-base-uncased') inputs = tokenizer("The capital of France is [MASK].", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),fd=new we({}),md=new C({props:{name:"class transformers.FlaxBertForNextSentencePrediction",anchor:"transformers.FlaxBertForNextSentencePrediction",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L974",parametersDescription:[{anchor:"transformers.FlaxBertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForNextSentencePrediction.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForNextSentencePrediction.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),wd=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput" >transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput" >transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),mr=new ye({props:{$$slots:{default:[dI]},$$scope:{ctx:j}}}),Fd=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForNextSentencePrediction tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForNextSentencePrediction.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='jax') outputs = model(**encoding) logits = outputs.logits assert logits[0, 0] < logits[0, 1] # next sentence was random,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># next sentence was random</span>`}}),xd=new we({}),$d=new C({props:{name:"class transformers.FlaxBertForSequenceClassification",anchor:"transformers.FlaxBertForSequenceClassification",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L1072",parametersDescription:[{anchor:"transformers.FlaxBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForSequenceClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForSequenceClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Cd=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),gr=new ye({props:{$$slots:{default:[cI]},$$scope:{ctx:j}}}),Nd=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForSequenceClassification tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForSequenceClassification.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Id=new we({}),Ad=new C({props:{name:"class transformers.FlaxBertForMultipleChoice",anchor:"transformers.FlaxBertForMultipleChoice",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L1148",parametersDescription:[{anchor:"transformers.FlaxBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForMultipleChoice.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForMultipleChoice.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Rd=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vr=new ye({props:{$$slots:{default:[pI]},$$scope:{ctx:j}}}),Qd=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForMultipleChoice tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForMultipleChoice.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='jax', padding=True) outputs = model(**{k: v[None, :] for k,v in encoding.items()}) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Vd=new we({}),Kd=new C({props:{name:"class transformers.FlaxBertForTokenClassification",anchor:"transformers.FlaxBertForTokenClassification",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L1220",parametersDescription:[{anchor:"transformers.FlaxBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForTokenClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForTokenClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),oc=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Tr=new ye({props:{$$slots:{default:[hI]},$$scope:{ctx:j}}}),nc=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForTokenClassification tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForTokenClassification.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sc=new we({}),rc=new C({props:{name:"class transformers.FlaxBertForQuestionAnswering",anchor:"transformers.FlaxBertForQuestionAnswering",parameters:[{name:"config",val:": BertConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L1287",parametersDescription:[{anchor:"transformers.FlaxBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig">BertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBertForQuestionAnswering.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxBertForQuestionAnswering.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),fc=new C({props:{name:"__call__",anchor:"transformers.FlaxBertPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/modeling_flax_bert.py#L638",parametersDescription:[{anchor:"transformers.FlaxBertPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBertPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig" >BertConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),br=new ye({props:{$$slots:{default:[fI]},$$scope:{ctx:j}}}),mc=new Be({props:{code:`from transformers import BertTokenizer, FlaxBertForQuestionAnswering tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = FlaxBertForQuestionAnswering.from_pretrained('bert-base-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='jax') outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, FlaxBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){p=s("meta"),$=l(),g=s("h1"),v=s("a"),x=s("span"),k(_.$$.fragment),u=l(),B=s("span"),ue=o("BERT"),X=l(),M=s("h2"),ee=s("a"),L=s("span"),k(oe.$$.fragment),ge=l(),O=s("span"),_e=o("Overview"),ce=l(),J=s("p"),I=o("The BERT model was proposed in "),ne=s("a"),Y=o("BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),z=o(` by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It\u2019s a bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia.`),q=l(),se=s("p"),H=o("The abstract from the paper is the following:"),pe=l(),re=s("p"),S=s("em"),ve=o(`We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.`),he=l(),P=s("p"),ie=s("em"),R=o(`BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).`),fe=l(),ae=s("p"),Q=o("Tips:"),me=l(),te=s("ul"),N=s("li"),ke=o(`BERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),V=l(),le=s("li"),h=o(`BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation.`),E=l(),K=s("p"),Ee=o("This model was contributed by "),Fe=s("a"),A=o("thomwolf"),Me=o(". The original code can be found "),xe=s("a"),ze=o("here"),D=o("."),W=l(),Te=s("h2"),be=s("a"),U=s("span"),k($e.$$.fragment),Pe=l(),de=s("span"),qe=o("BertConfig"),e_=l(),ot=s("div"),k(jr.$$.fragment),nT=l(),ao=s("p"),sT=o("This is the configuration class to store the configuration of a "),Tc=s("a"),rT=o("BertModel"),aT=o(` or a `),yc=s("a"),iT=o("TFBertModel"),lT=o(`. It is used to instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT `),Cr=s("a"),dT=o("bert-base-uncased"),cT=o(" architecture."),pT=l(),Zo=s("p"),hT=o("Configuration objects inherit from "),bc=s("a"),fT=o("PretrainedConfig"),mT=o(` and can be used to control the model outputs. Read the documentation from `),wc=s("a"),uT=o("PretrainedConfig"),gT=o(" for more information."),_T=l(),nh=s("p"),vT=o("Examples:"),kT=l(),k(Nr.$$.fragment),t_=l(),en=s("h2"),fs=s("a"),sh=s("span"),k(Ir.$$.fragment),TT=l(),rh=s("span"),yT=o("BertTokenizer"),o_=l(),je=s("div"),k(Ar.$$.fragment),bT=l(),ah=s("p"),wT=o("Construct a BERT tokenizer. Based on WordPiece."),FT=l(),Dr=s("p"),xT=o("This tokenizer inherits from "),Fc=s("a"),$T=o("PreTrainedTokenizer"),BT=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ET=l(),vo=s("div"),k(Lr.$$.fragment),MT=l(),ih=s("p"),zT=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),PT=l(),Or=s("ul"),xc=s("li"),qT=o("single sequence: "),lh=s("code"),jT=o("[CLS] X [SEP]"),CT=l(),$c=s("li"),NT=o("pair of sequences: "),dh=s("code"),IT=o("[CLS] A [SEP] B [SEP]"),AT=l(),ms=s("div"),k(Sr.$$.fragment),DT=l(),Ur=s("p"),LT=o(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ch=s("code"),OT=o("prepare_for_model"),ST=o(" method."),UT=l(),Jt=s("div"),k(Wr.$$.fragment),WT=l(),ph=s("p"),HT=o(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),RT=l(),k(Hr.$$.fragment),QT=l(),tn=s("p"),VT=o("If "),hh=s("code"),KT=o("token_ids_1"),JT=o(" is "),fh=s("code"),GT=o("None"),XT=o(", this method only returns the first portion of the mask (0s)."),YT=l(),mh=s("div"),n_=l(),on=s("h2"),us=s("a"),uh=s("span"),k(Rr.$$.fragment),ZT=l(),gh=s("span"),ey=o("BertTokenizerFast"),s_=l(),nt=s("div"),k(Qr.$$.fragment),ty=l(),Vr=s("p"),oy=o("Construct a \u201Cfast\u201D BERT tokenizer (backed by HuggingFace\u2019s "),_h=s("em"),ny=o("tokenizers"),sy=o(" library). Based on WordPiece."),ry=l(),Kr=s("p"),ay=o("This tokenizer inherits from "),Bc=s("a"),iy=o("PreTrainedTokenizerFast"),ly=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),dy=l(),ko=s("div"),k(Jr.$$.fragment),cy=l(),vh=s("p"),py=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),hy=l(),Gr=s("ul"),Ec=s("li"),fy=o("single sequence: "),kh=s("code"),my=o("[CLS] X [SEP]"),uy=l(),Mc=s("li"),gy=o("pair of sequences: "),Th=s("code"),_y=o("[CLS] A [SEP] B [SEP]"),vy=l(),Gt=s("div"),k(Xr.$$.fragment),ky=l(),yh=s("p"),Ty=o(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),yy=l(),k(Yr.$$.fragment),by=l(),nn=s("p"),wy=o("If "),bh=s("code"),Fy=o("token_ids_1"),xy=o(" is "),wh=s("code"),$y=o("None"),By=o(", this method only returns the first portion of the mask (0s)."),r_=l(),sn=s("h2"),gs=s("a"),Fh=s("span"),k(Zr.$$.fragment),Ey=l(),xh=s("span"),My=o("Bert specific outputs"),a_=l(),rn=s("div"),k(ea.$$.fragment),zy=l(),ta=s("p"),Py=o("Output type of "),zc=s("a"),qy=o("BertForPreTraining"),jy=o("."),i_=l(),an=s("div"),k(oa.$$.fragment),Cy=l(),na=s("p"),Ny=o("Output type of "),Pc=s("a"),Iy=o("TFBertForPreTraining"),Ay=o("."),l_=l(),io=s("div"),k(sa.$$.fragment),Dy=l(),ra=s("p"),Ly=o("Output type of "),qc=s("a"),Oy=o("BertForPreTraining"),Sy=o("."),Uy=l(),_s=s("div"),k(aa.$$.fragment),Wy=l(),$h=s("p"),Hy=o("\u201CReturns a new object replacing the specified fields with new values."),d_=l(),ln=s("h2"),vs=s("a"),Bh=s("span"),k(ia.$$.fragment),Ry=l(),Eh=s("span"),Qy=o("BertModel"),c_=l(),Ce=s("div"),k(la.$$.fragment),Vy=l(),Mh=s("p"),Ky=o("The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),Jy=l(),da=s("p"),Gy=o("This model inherits from "),jc=s("a"),Xy=o("PreTrainedModel"),Yy=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zy=l(),ca=s("p"),eb=o("This model is also a PyTorch "),pa=s("a"),tb=o("torch.nn.Module"),ob=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nb=l(),ha=s("p"),sb=o(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),fa=s("a"),rb=o(`Attention is all you need`),ab=o(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),ib=l(),Re=s("p"),lb=o("To behave as an decoder the model needs to be initialized with the "),zh=s("code"),db=o("is_decoder"),cb=o(` argument of the configuration set to `),Ph=s("code"),pb=o("True"),hb=o(". To be used in a Seq2Seq model, the model needs to initialized with both "),qh=s("code"),fb=o("is_decoder"),mb=o(` argument and `),jh=s("code"),ub=o("add_cross_attention"),gb=o(" set to "),Ch=s("code"),_b=o("True"),vb=o("; an "),Nh=s("code"),kb=o("encoder_hidden_states"),Tb=o(" is then expected as an input to the forward pass."),yb=l(),ht=s("div"),k(ma.$$.fragment),bb=l(),dn=s("p"),wb=o("The "),Cc=s("a"),Fb=o("BertModel"),xb=o(" forward method, overrides the "),Ih=s("code"),$b=o("__call__"),Bb=o(" special method."),Eb=l(),k(ks.$$.fragment),Mb=l(),Ah=s("p"),zb=o("Example:"),Pb=l(),k(ua.$$.fragment),p_=l(),cn=s("h2"),Ts=s("a"),Dh=s("span"),k(ga.$$.fragment),qb=l(),Lh=s("span"),jb=o("BertForPreTraining"),h_=l(),st=s("div"),k(_a.$$.fragment),Cb=l(),pn=s("p"),Nb=o("Bert Model with two heads on top as done during the pretraining: a "),Oh=s("code"),Ib=o("masked language modeling"),Ab=o(" head and a "),Sh=s("code"),Db=o("next sentence prediction (classification)"),Lb=o(" head."),Ob=l(),va=s("p"),Sb=o("This model inherits from "),Nc=s("a"),Ub=o("PreTrainedModel"),Wb=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hb=l(),ka=s("p"),Rb=o("This model is also a PyTorch "),Ta=s("a"),Qb=o("torch.nn.Module"),Vb=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kb=l(),ft=s("div"),k(ya.$$.fragment),Jb=l(),hn=s("p"),Gb=o("The "),Ic=s("a"),Xb=o("BertForPreTraining"),Yb=o(" forward method, overrides the "),Uh=s("code"),Zb=o("__call__"),e1=o(" special method."),t1=l(),k(ys.$$.fragment),o1=l(),Wh=s("p"),n1=o("Example:"),s1=l(),k(ba.$$.fragment),f_=l(),fn=s("h2"),bs=s("a"),Hh=s("span"),k(wa.$$.fragment),r1=l(),Rh=s("span"),a1=o("BertLMHeadModel"),m_=l(),rt=s("div"),k(Fa.$$.fragment),i1=l(),xa=s("p"),l1=o("Bert Model with a "),Qh=s("code"),d1=o("language modeling"),c1=o(" head on top for CLM fine-tuning."),p1=l(),$a=s("p"),h1=o("This model inherits from "),Ac=s("a"),f1=o("PreTrainedModel"),m1=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),u1=l(),Ba=s("p"),g1=o("This model is also a PyTorch "),Ea=s("a"),_1=o("torch.nn.Module"),v1=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),k1=l(),mt=s("div"),k(Ma.$$.fragment),T1=l(),mn=s("p"),y1=o("The "),Dc=s("a"),b1=o("BertLMHeadModel"),w1=o(" forward method, overrides the "),Vh=s("code"),F1=o("__call__"),x1=o(" special method."),$1=l(),k(ws.$$.fragment),B1=l(),Kh=s("p"),E1=o("Example:"),M1=l(),k(za.$$.fragment),u_=l(),un=s("h2"),Fs=s("a"),Jh=s("span"),k(Pa.$$.fragment),z1=l(),Gh=s("span"),P1=o("BertForMaskedLM"),g_=l(),at=s("div"),k(qa.$$.fragment),q1=l(),ja=s("p"),j1=o("Bert Model with a "),Xh=s("code"),C1=o("language modeling"),N1=o(" head on top."),I1=l(),Ca=s("p"),A1=o("This model inherits from "),Lc=s("a"),D1=o("PreTrainedModel"),L1=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),O1=l(),Na=s("p"),S1=o("This model is also a PyTorch "),Ia=s("a"),U1=o("torch.nn.Module"),W1=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),H1=l(),ut=s("div"),k(Aa.$$.fragment),R1=l(),gn=s("p"),Q1=o("The "),Oc=s("a"),V1=o("BertForMaskedLM"),K1=o(" forward method, overrides the "),Yh=s("code"),J1=o("__call__"),G1=o(" special method."),X1=l(),k(xs.$$.fragment),Y1=l(),Zh=s("p"),Z1=o("Example:"),ew=l(),k(Da.$$.fragment),__=l(),_n=s("h2"),$s=s("a"),ef=s("span"),k(La.$$.fragment),tw=l(),tf=s("span"),ow=o("BertForNextSentencePrediction"),v_=l(),it=s("div"),k(Oa.$$.fragment),nw=l(),Sa=s("p"),sw=o("Bert Model with a "),of=s("code"),rw=o("next sentence prediction (classification)"),aw=o(" head on top."),iw=l(),Ua=s("p"),lw=o("This model inherits from "),Sc=s("a"),dw=o("PreTrainedModel"),cw=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pw=l(),Wa=s("p"),hw=o("This model is also a PyTorch "),Ha=s("a"),fw=o("torch.nn.Module"),mw=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),uw=l(),gt=s("div"),k(Ra.$$.fragment),gw=l(),vn=s("p"),_w=o("The "),Uc=s("a"),vw=o("BertForNextSentencePrediction"),kw=o(" forward method, overrides the "),nf=s("code"),Tw=o("__call__"),yw=o(" special method."),bw=l(),k(Bs.$$.fragment),ww=l(),sf=s("p"),Fw=o("Example:"),xw=l(),k(Qa.$$.fragment),k_=l(),kn=s("h2"),Es=s("a"),rf=s("span"),k(Va.$$.fragment),$w=l(),af=s("span"),Bw=o("BertForSequenceClassification"),T_=l(),lt=s("div"),k(Ka.$$.fragment),Ew=l(),lf=s("p"),Mw=o(`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),zw=l(),Ja=s("p"),Pw=o("This model inherits from "),Wc=s("a"),qw=o("PreTrainedModel"),jw=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cw=l(),Ga=s("p"),Nw=o("This model is also a PyTorch "),Xa=s("a"),Iw=o("torch.nn.Module"),Aw=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dw=l(),He=s("div"),k(Ya.$$.fragment),Lw=l(),Tn=s("p"),Ow=o("The "),Hc=s("a"),Sw=o("BertForSequenceClassification"),Uw=o(" forward method, overrides the "),df=s("code"),Ww=o("__call__"),Hw=o(" special method."),Rw=l(),k(Ms.$$.fragment),Qw=l(),cf=s("p"),Vw=o("Example of single-label classification:"),Kw=l(),k(Za.$$.fragment),Jw=l(),pf=s("p"),Gw=o("Example of multi-label classification:"),Xw=l(),k(ei.$$.fragment),y_=l(),yn=s("h2"),zs=s("a"),hf=s("span"),k(ti.$$.fragment),Yw=l(),ff=s("span"),Zw=o("BertForMultipleChoice"),b_=l(),dt=s("div"),k(oi.$$.fragment),e0=l(),mf=s("p"),t0=o(`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),o0=l(),ni=s("p"),n0=o("This model inherits from "),Rc=s("a"),s0=o("PreTrainedModel"),r0=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),a0=l(),si=s("p"),i0=o("This model is also a PyTorch "),ri=s("a"),l0=o("torch.nn.Module"),d0=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),c0=l(),_t=s("div"),k(ai.$$.fragment),p0=l(),bn=s("p"),h0=o("The "),Qc=s("a"),f0=o("BertForMultipleChoice"),m0=o(" forward method, overrides the "),uf=s("code"),u0=o("__call__"),g0=o(" special method."),_0=l(),k(Ps.$$.fragment),v0=l(),gf=s("p"),k0=o("Example:"),T0=l(),k(ii.$$.fragment),w_=l(),wn=s("h2"),qs=s("a"),_f=s("span"),k(li.$$.fragment),y0=l(),vf=s("span"),b0=o("BertForTokenClassification"),F_=l(),ct=s("div"),k(di.$$.fragment),w0=l(),kf=s("p"),F0=o(`Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),x0=l(),ci=s("p"),$0=o("This model inherits from "),Vc=s("a"),B0=o("PreTrainedModel"),E0=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),M0=l(),pi=s("p"),z0=o("This model is also a PyTorch "),hi=s("a"),P0=o("torch.nn.Module"),q0=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),j0=l(),vt=s("div"),k(fi.$$.fragment),C0=l(),Fn=s("p"),N0=o("The "),Kc=s("a"),I0=o("BertForTokenClassification"),A0=o(" forward method, overrides the "),Tf=s("code"),D0=o("__call__"),L0=o(" special method."),O0=l(),k(js.$$.fragment),S0=l(),yf=s("p"),U0=o("Example:"),W0=l(),k(mi.$$.fragment),x_=l(),xn=s("h2"),Cs=s("a"),bf=s("span"),k(ui.$$.fragment),H0=l(),wf=s("span"),R0=o("BertForQuestionAnswering"),$_=l(),pt=s("div"),k(gi.$$.fragment),Q0=l(),$n=s("p"),V0=o(`Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ff=s("code"),K0=o("span start logits"),J0=o(" and "),xf=s("code"),G0=o("span end logits"),X0=o(")."),Y0=l(),_i=s("p"),Z0=o("This model inherits from "),Jc=s("a"),e2=o("PreTrainedModel"),t2=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),o2=l(),vi=s("p"),n2=o("This model is also a PyTorch "),ki=s("a"),s2=o("torch.nn.Module"),r2=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),a2=l(),kt=s("div"),k(Ti.$$.fragment),i2=l(),Bn=s("p"),l2=o("The "),Gc=s("a"),d2=o("BertForQuestionAnswering"),c2=o(" forward method, overrides the "),$f=s("code"),p2=o("__call__"),h2=o(" special method."),f2=l(),k(Ns.$$.fragment),m2=l(),Bf=s("p"),u2=o("Example:"),g2=l(),k(yi.$$.fragment),B_=l(),En=s("h2"),Is=s("a"),Ef=s("span"),k(bi.$$.fragment),_2=l(),Mf=s("span"),v2=o("TFBertModel"),E_=l(),Qe=s("div"),k(wi.$$.fragment),k2=l(),zf=s("p"),T2=o("The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),y2=l(),Fi=s("p"),b2=o("This model inherits from "),Xc=s("a"),w2=o("TFPreTrainedModel"),F2=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),x2=l(),xi=s("p"),$2=o("This model is also a "),$i=s("a"),B2=o("tf.keras.Model"),E2=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),M2=l(),k(As.$$.fragment),z2=l(),Tt=s("div"),k(Bi.$$.fragment),P2=l(),Mn=s("p"),q2=o("The "),Yc=s("a"),j2=o("TFBertModel"),C2=o(" forward method, overrides the "),Pf=s("code"),N2=o("__call__"),I2=o(" special method."),A2=l(),k(Ds.$$.fragment),D2=l(),qf=s("p"),L2=o("Example:"),O2=l(),k(Ei.$$.fragment),M_=l(),zn=s("h2"),Ls=s("a"),jf=s("span"),k(Mi.$$.fragment),S2=l(),Cf=s("span"),U2=o("TFBertForPreTraining"),z_=l(),Ve=s("div"),k(zi.$$.fragment),W2=l(),Pn=s("p"),H2=o(`Bert Model with two heads on top as done during the pretraining: a `),Nf=s("code"),R2=o("masked language modeling"),Q2=o(" head and a "),If=s("code"),V2=o("next sentence prediction (classification)"),K2=o(" head."),J2=l(),Pi=s("p"),G2=o("This model inherits from "),Zc=s("a"),X2=o("TFPreTrainedModel"),Y2=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Z2=l(),qi=s("p"),eF=o("This model is also a "),ji=s("a"),tF=o("tf.keras.Model"),oF=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),nF=l(),k(Os.$$.fragment),sF=l(),yt=s("div"),k(Ci.$$.fragment),rF=l(),qn=s("p"),aF=o("The "),ep=s("a"),iF=o("TFBertForPreTraining"),lF=o(" forward method, overrides the "),Af=s("code"),dF=o("__call__"),cF=o(" special method."),pF=l(),k(Ss.$$.fragment),hF=l(),Df=s("p"),fF=o("Examples:"),mF=l(),k(Ni.$$.fragment),P_=l(),jn=s("h2"),Us=s("a"),Lf=s("span"),k(Ii.$$.fragment),uF=l(),Of=s("span"),gF=o("TFBertModelLMHeadModel"),q_=l(),Ai=s("div"),et=s("div"),k(Di.$$.fragment),_F=l(),Ne=s("p"),vF=o("encoder_hidden_states ("),Sf=s("code"),kF=o("tf.Tensor"),TF=o(" of shape "),Uf=s("code"),yF=o("(batch_size, sequence_length, hidden_size)"),bF=o(", "),Wf=s("em"),wF=o("optional"),FF=o(`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`),Hf=s("code"),xF=o("tf.Tensor"),$F=o(" of shape "),Rf=s("code"),BF=o("(batch_size, sequence_length)"),EF=o(", "),Qf=s("em"),MF=o("optional"),zF=o(`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `),Vf=s("code"),PF=o("[0, 1]"),qF=o(":"),jF=l(),Li=s("ul"),Oi=s("li"),CF=o("1 for tokens that are "),Kf=s("strong"),NF=o("not masked"),IF=o(","),AF=l(),Si=s("li"),DF=o("0 for tokens that are "),Jf=s("strong"),LF=o("masked"),OF=o("."),SF=l(),G=s("p"),UF=o("past_key_values ("),Gf=s("code"),WF=o("Tuple[Tuple[tf.Tensor]]"),HF=o(" of length "),Xf=s("code"),RF=o("config.n_layers"),QF=o(`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `),Yf=s("code"),VF=o("past_key_values"),KF=o(" are used, the user can optionally input only the last "),Zf=s("code"),JF=o("decoder_input_ids"),GF=o(` (those that don\u2019t have their past key value states given to this model) of shape `),em=s("code"),XF=o("(batch_size, 1)"),YF=o(` instead of all `),tm=s("code"),ZF=o("decoder_input_ids"),ex=o(" of shape "),om=s("code"),tx=o("(batch_size, sequence_length)"),ox=o(`. use_cache (`),nm=s("code"),nx=o("bool"),sx=o(", "),sm=s("em"),rx=o("optional"),ax=o(", defaults to "),rm=s("code"),ix=o("True"),lx=o(`): If set to `),am=s("code"),dx=o("True"),cx=o(", "),im=s("code"),px=o("past_key_values"),hx=o(` key value states are returned and can be used to speed up decoding (see `),lm=s("code"),fx=o("past_key_values"),mx=o("). Set to "),dm=s("code"),ux=o("False"),gx=o(" during training, "),cm=s("code"),_x=o("True"),vx=o(` during generation labels (`),pm=s("code"),kx=o("tf.Tensor"),Tx=o(" or "),hm=s("code"),yx=o("np.ndarray"),bx=o(" of shape "),fm=s("code"),wx=o("(batch_size, sequence_length)"),Fx=o(", "),mm=s("em"),xx=o("optional"),$x=o(`): Labels for computing the cross entropy classification loss. Indices should be in `),um=s("code"),Bx=o("[0, ..., config.vocab_size - 1]"),Ex=o("."),Mx=l(),gm=s("p"),zx=o("Example:"),Px=l(),k(Ui.$$.fragment),j_=l(),Cn=s("h2"),Ws=s("a"),_m=s("span"),k(Wi.$$.fragment),qx=l(),vm=s("span"),jx=o("TFBertForMaskedLM"),C_=l(),Ke=s("div"),k(Hi.$$.fragment),Cx=l(),Ri=s("p"),Nx=o("Bert Model with a "),km=s("code"),Ix=o("language modeling"),Ax=o(" head on top."),Dx=l(),Qi=s("p"),Lx=o("This model inherits from "),tp=s("a"),Ox=o("TFPreTrainedModel"),Sx=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ux=l(),Vi=s("p"),Wx=o("This model is also a "),Ki=s("a"),Hx=o("tf.keras.Model"),Rx=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Qx=l(),k(Hs.$$.fragment),Vx=l(),bt=s("div"),k(Ji.$$.fragment),Kx=l(),Nn=s("p"),Jx=o("The "),op=s("a"),Gx=o("TFBertForMaskedLM"),Xx=o(" forward method, overrides the "),Tm=s("code"),Yx=o("__call__"),Zx=o(" special method."),e$=l(),k(Rs.$$.fragment),t$=l(),ym=s("p"),o$=o("Example:"),n$=l(),k(Gi.$$.fragment),N_=l(),In=s("h2"),Qs=s("a"),bm=s("span"),k(Xi.$$.fragment),s$=l(),wm=s("span"),r$=o("TFBertForNextSentencePrediction"),I_=l(),Je=s("div"),k(Yi.$$.fragment),a$=l(),Zi=s("p"),i$=o("Bert Model with a "),Fm=s("code"),l$=o("next sentence prediction (classification)"),d$=o(" head on top."),c$=l(),el=s("p"),p$=o("This model inherits from "),np=s("a"),h$=o("TFPreTrainedModel"),f$=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),m$=l(),tl=s("p"),u$=o("This model is also a "),ol=s("a"),g$=o("tf.keras.Model"),_$=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),v$=l(),k(Vs.$$.fragment),k$=l(),wt=s("div"),k(nl.$$.fragment),T$=l(),An=s("p"),y$=o("The "),sp=s("a"),b$=o("TFBertForNextSentencePrediction"),w$=o(" forward method, overrides the "),xm=s("code"),F$=o("__call__"),x$=o(" special method."),$$=l(),k(Ks.$$.fragment),B$=l(),$m=s("p"),E$=o("Examples:"),M$=l(),k(sl.$$.fragment),A_=l(),Dn=s("h2"),Js=s("a"),Bm=s("span"),k(rl.$$.fragment),z$=l(),Em=s("span"),P$=o("TFBertForSequenceClassification"),D_=l(),Ge=s("div"),k(al.$$.fragment),q$=l(),Mm=s("p"),j$=o(`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),C$=l(),il=s("p"),N$=o("This model inherits from "),rp=s("a"),I$=o("TFPreTrainedModel"),A$=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),D$=l(),ll=s("p"),L$=o("This model is also a "),dl=s("a"),O$=o("tf.keras.Model"),S$=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),U$=l(),k(Gs.$$.fragment),W$=l(),Ft=s("div"),k(cl.$$.fragment),H$=l(),Ln=s("p"),R$=o("The "),ap=s("a"),Q$=o("TFBertForSequenceClassification"),V$=o(" forward method, overrides the "),zm=s("code"),K$=o("__call__"),J$=o(" special method."),G$=l(),k(Xs.$$.fragment),X$=l(),Pm=s("p"),Y$=o("Example:"),Z$=l(),k(pl.$$.fragment),L_=l(),On=s("h2"),Ys=s("a"),qm=s("span"),k(hl.$$.fragment),e4=l(),jm=s("span"),t4=o("TFBertForMultipleChoice"),O_=l(),Xe=s("div"),k(fl.$$.fragment),o4=l(),Cm=s("p"),n4=o(`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),s4=l(),ml=s("p"),r4=o("This model inherits from "),ip=s("a"),a4=o("TFPreTrainedModel"),i4=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),l4=l(),ul=s("p"),d4=o("This model is also a "),gl=s("a"),c4=o("tf.keras.Model"),p4=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),h4=l(),k(Zs.$$.fragment),f4=l(),xt=s("div"),k(_l.$$.fragment),m4=l(),Sn=s("p"),u4=o("The "),lp=s("a"),g4=o("TFBertForMultipleChoice"),_4=o(" forward method, overrides the "),Nm=s("code"),v4=o("__call__"),k4=o(" special method."),T4=l(),k(er.$$.fragment),y4=l(),Im=s("p"),b4=o("Example:"),w4=l(),k(vl.$$.fragment),S_=l(),Un=s("h2"),tr=s("a"),Am=s("span"),k(kl.$$.fragment),F4=l(),Dm=s("span"),x4=o("TFBertForTokenClassification"),U_=l(),Ye=s("div"),k(Tl.$$.fragment),$4=l(),Lm=s("p"),B4=o(`Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),E4=l(),yl=s("p"),M4=o("This model inherits from "),dp=s("a"),z4=o("TFPreTrainedModel"),P4=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),q4=l(),bl=s("p"),j4=o("This model is also a "),wl=s("a"),C4=o("tf.keras.Model"),N4=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),I4=l(),k(or.$$.fragment),A4=l(),$t=s("div"),k(Fl.$$.fragment),D4=l(),Wn=s("p"),L4=o("The "),cp=s("a"),O4=o("TFBertForTokenClassification"),S4=o(" forward method, overrides the "),Om=s("code"),U4=o("__call__"),W4=o(" special method."),H4=l(),k(nr.$$.fragment),R4=l(),Sm=s("p"),Q4=o("Example:"),V4=l(),k(xl.$$.fragment),W_=l(),Hn=s("h2"),sr=s("a"),Um=s("span"),k($l.$$.fragment),K4=l(),Wm=s("span"),J4=o("TFBertForQuestionAnswering"),H_=l(),Ze=s("div"),k(Bl.$$.fragment),G4=l(),Rn=s("p"),X4=o(`Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Hm=s("code"),Y4=o("span start logits"),Z4=o(" and "),Rm=s("code"),eB=o("span end logits"),tB=o(")."),oB=l(),El=s("p"),nB=o("This model inherits from "),pp=s("a"),sB=o("TFPreTrainedModel"),rB=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),aB=l(),Ml=s("p"),iB=o("This model is also a "),zl=s("a"),lB=o("tf.keras.Model"),dB=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cB=l(),k(rr.$$.fragment),pB=l(),Bt=s("div"),k(Pl.$$.fragment),hB=l(),Qn=s("p"),fB=o("The "),hp=s("a"),mB=o("TFBertForQuestionAnswering"),uB=o(" forward method, overrides the "),Qm=s("code"),gB=o("__call__"),_B=o(" special method."),vB=l(),k(ar.$$.fragment),kB=l(),Vm=s("p"),TB=o("Example:"),yB=l(),k(ql.$$.fragment),R_=l(),Vn=s("h2"),ir=s("a"),Km=s("span"),k(jl.$$.fragment),bB=l(),Jm=s("span"),wB=o("FlaxBertModel"),Q_=l(),Ie=s("div"),k(Cl.$$.fragment),FB=l(),Gm=s("p"),xB=o("The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),$B=l(),Nl=s("p"),BB=o("This model inherits from "),fp=s("a"),EB=o("FlaxPreTrainedModel"),MB=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),zB=l(),Il=s("p"),PB=o("This model is also a Flax Linen "),Al=s("a"),qB=o("flax.linen.Module"),jB=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),CB=l(),Xm=s("p"),NB=o("Finally, this model supports inherent JAX features such as:"),IB=l(),lo=s("ul"),Ym=s("li"),Dl=s("a"),AB=o("Just-In-Time (JIT) compilation"),DB=l(),Zm=s("li"),Ll=s("a"),LB=o("Automatic Differentiation"),OB=l(),eu=s("li"),Ol=s("a"),SB=o("Vectorization"),UB=l(),tu=s("li"),Sl=s("a"),WB=o("Parallelization"),HB=l(),Et=s("div"),k(Ul.$$.fragment),RB=l(),Kn=s("p"),QB=o("The "),ou=s("code"),VB=o("FlaxBertPreTrainedModel"),KB=o(" forward method, overrides the "),nu=s("code"),JB=o("__call__"),GB=o(" special method."),XB=l(),k(lr.$$.fragment),YB=l(),su=s("p"),ZB=o("Example:"),eE=l(),k(Wl.$$.fragment),V_=l(),Jn=s("h2"),dr=s("a"),ru=s("span"),k(Hl.$$.fragment),tE=l(),au=s("span"),oE=o("FlaxBertForPreTraining"),K_=l(),Ae=s("div"),k(Rl.$$.fragment),nE=l(),Gn=s("p"),sE=o("Bert Model with two heads on top as done during the pretraining: a "),iu=s("code"),rE=o("masked language modeling"),aE=o(" head and a "),lu=s("code"),iE=o("next sentence prediction (classification)"),lE=o(" head."),dE=l(),Ql=s("p"),cE=o("This model inherits from "),mp=s("a"),pE=o("FlaxPreTrainedModel"),hE=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),fE=l(),Vl=s("p"),mE=o("This model is also a Flax Linen "),Kl=s("a"),uE=o("flax.linen.Module"),gE=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),_E=l(),du=s("p"),vE=o("Finally, this model supports inherent JAX features such as:"),kE=l(),co=s("ul"),cu=s("li"),Jl=s("a"),TE=o("Just-In-Time (JIT) compilation"),yE=l(),pu=s("li"),Gl=s("a"),bE=o("Automatic Differentiation"),wE=l(),hu=s("li"),Xl=s("a"),FE=o("Vectorization"),xE=l(),fu=s("li"),Yl=s("a"),$E=o("Parallelization"),BE=l(),Mt=s("div"),k(Zl.$$.fragment),EE=l(),Xn=s("p"),ME=o("The "),mu=s("code"),zE=o("FlaxBertPreTrainedModel"),PE=o(" forward method, overrides the "),uu=s("code"),qE=o("__call__"),jE=o(" special method."),CE=l(),k(cr.$$.fragment),NE=l(),gu=s("p"),IE=o("Example:"),AE=l(),k(ed.$$.fragment),J_=l(),Yn=s("h2"),pr=s("a"),_u=s("span"),k(td.$$.fragment),DE=l(),vu=s("span"),LE=o("FlaxBertForMaskedLM"),G_=l(),De=s("div"),k(od.$$.fragment),OE=l(),nd=s("p"),SE=o("Bert Model with a "),ku=s("code"),UE=o("language modeling"),WE=o(" head on top."),HE=l(),sd=s("p"),RE=o("This model inherits from "),up=s("a"),QE=o("FlaxPreTrainedModel"),VE=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),KE=l(),rd=s("p"),JE=o("This model is also a Flax Linen "),ad=s("a"),GE=o("flax.linen.Module"),XE=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),YE=l(),Tu=s("p"),ZE=o("Finally, this model supports inherent JAX features such as:"),eM=l(),po=s("ul"),yu=s("li"),id=s("a"),tM=o("Just-In-Time (JIT) compilation"),oM=l(),bu=s("li"),ld=s("a"),nM=o("Automatic Differentiation"),sM=l(),wu=s("li"),dd=s("a"),rM=o("Vectorization"),aM=l(),Fu=s("li"),cd=s("a"),iM=o("Parallelization"),lM=l(),zt=s("div"),k(pd.$$.fragment),dM=l(),Zn=s("p"),cM=o("The "),xu=s("code"),pM=o("FlaxBertPreTrainedModel"),hM=o(" forward method, overrides the "),$u=s("code"),fM=o("__call__"),mM=o(" special method."),uM=l(),k(hr.$$.fragment),gM=l(),Bu=s("p"),_M=o("Example:"),vM=l(),k(hd.$$.fragment),X_=l(),es=s("h2"),fr=s("a"),Eu=s("span"),k(fd.$$.fragment),kM=l(),Mu=s("span"),TM=o("FlaxBertForNextSentencePrediction"),Y_=l(),Le=s("div"),k(md.$$.fragment),yM=l(),ud=s("p"),bM=o("Bert Model with a "),zu=s("code"),wM=o("next sentence prediction (classification)"),FM=o(" head on top."),xM=l(),gd=s("p"),$M=o("This model inherits from "),gp=s("a"),BM=o("FlaxPreTrainedModel"),EM=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),MM=l(),_d=s("p"),zM=o("This model is also a Flax Linen "),vd=s("a"),PM=o("flax.linen.Module"),qM=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),jM=l(),Pu=s("p"),CM=o("Finally, this model supports inherent JAX features such as:"),NM=l(),ho=s("ul"),qu=s("li"),kd=s("a"),IM=o("Just-In-Time (JIT) compilation"),AM=l(),ju=s("li"),Td=s("a"),DM=o("Automatic Differentiation"),LM=l(),Cu=s("li"),yd=s("a"),OM=o("Vectorization"),SM=l(),Nu=s("li"),bd=s("a"),UM=o("Parallelization"),WM=l(),Pt=s("div"),k(wd.$$.fragment),HM=l(),ts=s("p"),RM=o("The "),Iu=s("code"),QM=o("FlaxBertPreTrainedModel"),VM=o(" forward method, overrides the "),Au=s("code"),KM=o("__call__"),JM=o(" special method."),GM=l(),k(mr.$$.fragment),XM=l(),Du=s("p"),YM=o("Example:"),ZM=l(),k(Fd.$$.fragment),Z_=l(),os=s("h2"),ur=s("a"),Lu=s("span"),k(xd.$$.fragment),ez=l(),Ou=s("span"),tz=o("FlaxBertForSequenceClassification"),ev=l(),Oe=s("div"),k($d.$$.fragment),oz=l(),Su=s("p"),nz=o(`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),sz=l(),Bd=s("p"),rz=o("This model inherits from "),_p=s("a"),az=o("FlaxPreTrainedModel"),iz=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),lz=l(),Ed=s("p"),dz=o("This model is also a Flax Linen "),Md=s("a"),cz=o("flax.linen.Module"),pz=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),hz=l(),Uu=s("p"),fz=o("Finally, this model supports inherent JAX features such as:"),mz=l(),fo=s("ul"),Wu=s("li"),zd=s("a"),uz=o("Just-In-Time (JIT) compilation"),gz=l(),Hu=s("li"),Pd=s("a"),_z=o("Automatic Differentiation"),vz=l(),Ru=s("li"),qd=s("a"),kz=o("Vectorization"),Tz=l(),Qu=s("li"),jd=s("a"),yz=o("Parallelization"),bz=l(),qt=s("div"),k(Cd.$$.fragment),wz=l(),ns=s("p"),Fz=o("The "),Vu=s("code"),xz=o("FlaxBertPreTrainedModel"),$z=o(" forward method, overrides the "),Ku=s("code"),Bz=o("__call__"),Ez=o(" special method."),Mz=l(),k(gr.$$.fragment),zz=l(),Ju=s("p"),Pz=o("Example:"),qz=l(),k(Nd.$$.fragment),tv=l(),ss=s("h2"),_r=s("a"),Gu=s("span"),k(Id.$$.fragment),jz=l(),Xu=s("span"),Cz=o("FlaxBertForMultipleChoice"),ov=l(),Se=s("div"),k(Ad.$$.fragment),Nz=l(),Yu=s("p"),Iz=o(`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Az=l(),Dd=s("p"),Dz=o("This model inherits from "),vp=s("a"),Lz=o("FlaxPreTrainedModel"),Oz=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Sz=l(),Ld=s("p"),Uz=o("This model is also a Flax Linen "),Od=s("a"),Wz=o("flax.linen.Module"),Hz=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Rz=l(),Zu=s("p"),Qz=o("Finally, this model supports inherent JAX features such as:"),Vz=l(),mo=s("ul"),eg=s("li"),Sd=s("a"),Kz=o("Just-In-Time (JIT) compilation"),Jz=l(),tg=s("li"),Ud=s("a"),Gz=o("Automatic Differentiation"),Xz=l(),og=s("li"),Wd=s("a"),Yz=o("Vectorization"),Zz=l(),ng=s("li"),Hd=s("a"),e5=o("Parallelization"),t5=l(),jt=s("div"),k(Rd.$$.fragment),o5=l(),rs=s("p"),n5=o("The "),sg=s("code"),s5=o("FlaxBertPreTrainedModel"),r5=o(" forward method, overrides the "),rg=s("code"),a5=o("__call__"),i5=o(" special method."),l5=l(),k(vr.$$.fragment),d5=l(),ag=s("p"),c5=o("Example:"),p5=l(),k(Qd.$$.fragment),nv=l(),as=s("h2"),kr=s("a"),ig=s("span"),k(Vd.$$.fragment),h5=l(),lg=s("span"),f5=o("FlaxBertForTokenClassification"),sv=l(),Ue=s("div"),k(Kd.$$.fragment),m5=l(),dg=s("p"),u5=o(`Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),g5=l(),Jd=s("p"),_5=o("This model inherits from "),kp=s("a"),v5=o("FlaxPreTrainedModel"),k5=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),T5=l(),Gd=s("p"),y5=o("This model is also a Flax Linen "),Xd=s("a"),b5=o("flax.linen.Module"),w5=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),F5=l(),cg=s("p"),x5=o("Finally, this model supports inherent JAX features such as:"),$5=l(),uo=s("ul"),pg=s("li"),Yd=s("a"),B5=o("Just-In-Time (JIT) compilation"),E5=l(),hg=s("li"),Zd=s("a"),M5=o("Automatic Differentiation"),z5=l(),fg=s("li"),ec=s("a"),P5=o("Vectorization"),q5=l(),mg=s("li"),tc=s("a"),j5=o("Parallelization"),C5=l(),Ct=s("div"),k(oc.$$.fragment),N5=l(),is=s("p"),I5=o("The "),ug=s("code"),A5=o("FlaxBertPreTrainedModel"),D5=o(" forward method, overrides the "),gg=s("code"),L5=o("__call__"),O5=o(" special method."),S5=l(),k(Tr.$$.fragment),U5=l(),_g=s("p"),W5=o("Example:"),H5=l(),k(nc.$$.fragment),rv=l(),ls=s("h2"),yr=s("a"),vg=s("span"),k(sc.$$.fragment),R5=l(),kg=s("span"),Q5=o("FlaxBertForQuestionAnswering"),av=l(),We=s("div"),k(rc.$$.fragment),V5=l(),ds=s("p"),K5=o(`Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Tg=s("code"),J5=o("span start logits"),G5=o(" and "),yg=s("code"),X5=o("span end logits"),Y5=o(")."),Z5=l(),ac=s("p"),eP=o("This model inherits from "),Tp=s("a"),tP=o("FlaxPreTrainedModel"),oP=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),nP=l(),ic=s("p"),sP=o("This model is also a Flax Linen "),lc=s("a"),rP=o("flax.linen.Module"),aP=o(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),iP=l(),bg=s("p"),lP=o("Finally, this model supports inherent JAX features such as:"),dP=l(),go=s("ul"),wg=s("li"),dc=s("a"),cP=o("Just-In-Time (JIT) compilation"),pP=l(),Fg=s("li"),cc=s("a"),hP=o("Automatic Differentiation"),fP=l(),xg=s("li"),pc=s("a"),mP=o("Vectorization"),uP=l(),$g=s("li"),hc=s("a"),gP=o("Parallelization"),_P=l(),Nt=s("div"),k(fc.$$.fragment),vP=l(),cs=s("p"),kP=o("The "),Bg=s("code"),TP=o("FlaxBertPreTrainedModel"),yP=o(" forward method, overrides the "),Eg=s("code"),bP=o("__call__"),wP=o(" special method."),FP=l(),k(br.$$.fragment),xP=l(),Mg=s("p"),$P=o("Example:"),BP=l(),k(mc.$$.fragment),this.h()},l(i){const m=CN('[data-svelte="svelte-1phssyn"]',document.head);p=r(m,"META",{name:!0,content:!0}),m.forEach(t),$=d(i),g=r(i,"H1",{class:!0});var uc=a(g);v=r(uc,"A",{id:!0,class:!0,href:!0});var zg=a(v);x=r(zg,"SPAN",{});var Pg=a(x);T(_.$$.fragment,Pg),Pg.forEach(t),zg.forEach(t),u=d(uc),B=r(uc,"SPAN",{});var qg=a(B);ue=n(qg,"BERT"),qg.forEach(t),uc.forEach(t),X=d(i),M=r(i,"H2",{class:!0});var gc=a(M);ee=r(gc,"A",{id:!0,class:!0,href:!0});var jg=a(ee);L=r(jg,"SPAN",{});var Cg=a(L);T(oe.$$.fragment,Cg),Cg.forEach(t),jg.forEach(t),ge=d(gc),O=r(gc,"SPAN",{});var Ng=a(O);_e=n(Ng,"Overview"),Ng.forEach(t),gc.forEach(t),ce=d(i),J=r(i,"P",{});var _c=a(J);I=n(_c,"The BERT model was proposed in "),ne=r(_c,"A",{href:!0,rel:!0});var Ig=a(ne);Y=n(Ig,"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),Ig.forEach(t),z=n(_c,` by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It\u2019s a bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia.`),_c.forEach(t),q=d(i),se=r(i,"P",{});var Ag=a(se);H=n(Ag,"The abstract from the paper is the following:"),Ag.forEach(t),pe=d(i),re=r(i,"P",{});var Dg=a(re);S=r(Dg,"EM",{});var Lg=a(S);ve=n(Lg,`We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.`),Lg.forEach(t),Dg.forEach(t),he=d(i),P=r(i,"P",{});var Og=a(P);ie=r(Og,"EM",{});var Sg=a(ie);R=n(Sg,`BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).`),Sg.forEach(t),Og.forEach(t),fe=d(i),ae=r(i,"P",{});var Ug=a(ae);Q=n(Ug,"Tips:"),Ug.forEach(t),me=d(i),te=r(i,"UL",{});var vc=a(te);N=r(vc,"LI",{});var Wg=a(N);ke=n(Wg,`BERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),Wg.forEach(t),V=d(vc),le=r(vc,"LI",{});var Hg=a(le);h=n(Hg,`BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation.`),Hg.forEach(t),vc.forEach(t),E=d(i),K=r(i,"P",{});var ps=a(K);Ee=n(ps,"This model was contributed by "),Fe=r(ps,"A",{href:!0,rel:!0});var Rg=a(Fe);A=n(Rg,"thomwolf"),Rg.forEach(t),Me=n(ps,". The original code can be found "),xe=r(ps,"A",{href:!0,rel:!0});var Qg=a(xe);ze=n(Qg,"here"),Qg.forEach(t),D=n(ps,"."),ps.forEach(t),W=d(i),Te=r(i,"H2",{class:!0});var kc=a(Te);be=r(kc,"A",{id:!0,class:!0,href:!0});var Vg=a(be);U=r(Vg,"SPAN",{});var Kg=a(U);T($e.$$.fragment,Kg),Kg.forEach(t),Vg.forEach(t),Pe=d(kc),de=r(kc,"SPAN",{});var Jg=a(de);qe=n(Jg,"BertConfig"),Jg.forEach(t),kc.forEach(t),e_=d(i),ot=r(i,"DIV",{class:!0});var Kt=a(ot);T(jr.$$.fragment,Kt),nT=d(Kt),ao=r(Kt,"P",{});var _o=a(ao);sT=n(_o,"This is the configuration class to store the configuration of a "),Tc=r(_o,"A",{href:!0});var Gg=a(Tc);rT=n(Gg,"BertModel"),Gg.forEach(t),aT=n(_o,` or a `),yc=r(_o,"A",{href:!0});var Xg=a(yc);iT=n(Xg,"TFBertModel"),Xg.forEach(t),lT=n(_o,`. It is used to instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT `),Cr=r(_o,"A",{href:!0,rel:!0});var Yg=a(Cr);dT=n(Yg,"bert-base-uncased"),Yg.forEach(t),cT=n(_o," architecture."),_o.forEach(t),pT=d(Kt),Zo=r(Kt,"P",{});var hs=a(Zo);hT=n(hs,"Configuration objects inherit from "),bc=r(hs,"A",{href:!0});var Zg=a(bc);fT=n(Zg,"PretrainedConfig"),Zg.forEach(t),mT=n(hs,` and can be used to control the model outputs. Read the documentation from `),wc=r(hs,"A",{href:!0});var qP=a(wc);uT=n(qP,"PretrainedConfig"),qP.forEach(t),gT=n(hs," for more information."),hs.forEach(t),_T=d(Kt),nh=r(Kt,"P",{});var jP=a(nh);vT=n(jP,"Examples:"),jP.forEach(t),kT=d(Kt),T(Nr.$$.fragment,Kt),Kt.forEach(t),t_=d(i),en=r(i,"H2",{class:!0});var lv=a(en);fs=r(lv,"A",{id:!0,class:!0,href:!0});var CP=a(fs);sh=r(CP,"SPAN",{});var NP=a(sh);T(Ir.$$.fragment,NP),NP.forEach(t),CP.forEach(t),TT=d(lv),rh=r(lv,"SPAN",{});var IP=a(rh);yT=n(IP,"BertTokenizer"),IP.forEach(t),lv.forEach(t),o_=d(i),je=r(i,"DIV",{class:!0});var It=a(je);T(Ar.$$.fragment,It),bT=d(It),ah=r(It,"P",{});var AP=a(ah);wT=n(AP,"Construct a BERT tokenizer. Based on WordPiece."),AP.forEach(t),FT=d(It),Dr=r(It,"P",{});var dv=a(Dr);xT=n(dv,"This tokenizer inherits from "),Fc=r(dv,"A",{href:!0});var DP=a(Fc);$T=n(DP,"PreTrainedTokenizer"),DP.forEach(t),BT=n(dv,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),dv.forEach(t),ET=d(It),vo=r(It,"DIV",{class:!0});var yp=a(vo);T(Lr.$$.fragment,yp),MT=d(yp),ih=r(yp,"P",{});var LP=a(ih);zT=n(LP,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),LP.forEach(t),PT=d(yp),Or=r(yp,"UL",{});var cv=a(Or);xc=r(cv,"LI",{});var EP=a(xc);qT=n(EP,"single sequence: "),lh=r(EP,"CODE",{});var OP=a(lh);jT=n(OP,"[CLS] X [SEP]"),OP.forEach(t),EP.forEach(t),CT=d(cv),$c=r(cv,"LI",{});var MP=a($c);NT=n(MP,"pair of sequences: "),dh=r(MP,"CODE",{});var SP=a(dh);IT=n(SP,"[CLS] A [SEP] B [SEP]"),SP.forEach(t),MP.forEach(t),cv.forEach(t),yp.forEach(t),AT=d(It),ms=r(It,"DIV",{class:!0});var pv=a(ms);T(Sr.$$.fragment,pv),DT=d(pv),Ur=r(pv,"P",{});var hv=a(Ur);LT=n(hv,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ch=r(hv,"CODE",{});var UP=a(ch);OT=n(UP,"prepare_for_model"),UP.forEach(t),ST=n(hv," method."),hv.forEach(t),pv.forEach(t),UT=d(It),Jt=r(It,"DIV",{class:!0});var wr=a(Jt);T(Wr.$$.fragment,wr),WT=d(wr),ph=r(wr,"P",{});var WP=a(ph);HT=n(WP,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),WP.forEach(t),RT=d(wr),T(Hr.$$.fragment,wr),QT=d(wr),tn=r(wr,"P",{});var bp=a(tn);VT=n(bp,"If "),hh=r(bp,"CODE",{});var HP=a(hh);KT=n(HP,"token_ids_1"),HP.forEach(t),JT=n(bp," is "),fh=r(bp,"CODE",{});var RP=a(fh);GT=n(RP,"None"),RP.forEach(t),XT=n(bp,", this method only returns the first portion of the mask (0s)."),bp.forEach(t),wr.forEach(t),YT=d(It),mh=r(It,"DIV",{class:!0}),a(mh).forEach(t),It.forEach(t),n_=d(i),on=r(i,"H2",{class:!0});var fv=a(on);us=r(fv,"A",{id:!0,class:!0,href:!0});var QP=a(us);uh=r(QP,"SPAN",{});var VP=a(uh);T(Rr.$$.fragment,VP),VP.forEach(t),QP.forEach(t),ZT=d(fv),gh=r(fv,"SPAN",{});var KP=a(gh);ey=n(KP,"BertTokenizerFast"),KP.forEach(t),fv.forEach(t),s_=d(i),nt=r(i,"DIV",{class:!0});var To=a(nt);T(Qr.$$.fragment,To),ty=d(To),Vr=r(To,"P",{});var mv=a(Vr);oy=n(mv,"Construct a \u201Cfast\u201D BERT tokenizer (backed by HuggingFace\u2019s "),_h=r(mv,"EM",{});var JP=a(_h);ny=n(JP,"tokenizers"),JP.forEach(t),sy=n(mv," library). Based on WordPiece."),mv.forEach(t),ry=d(To),Kr=r(To,"P",{});var uv=a(Kr);ay=n(uv,"This tokenizer inherits from "),Bc=r(uv,"A",{href:!0});var GP=a(Bc);iy=n(GP,"PreTrainedTokenizerFast"),GP.forEach(t),ly=n(uv,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),uv.forEach(t),dy=d(To),ko=r(To,"DIV",{class:!0});var wp=a(ko);T(Jr.$$.fragment,wp),cy=d(wp),vh=r(wp,"P",{});var XP=a(vh);py=n(XP,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),XP.forEach(t),hy=d(wp),Gr=r(wp,"UL",{});var gv=a(Gr);Ec=r(gv,"LI",{});var zP=a(Ec);fy=n(zP,"single sequence: "),kh=r(zP,"CODE",{});var YP=a(kh);my=n(YP,"[CLS] X [SEP]"),YP.forEach(t),zP.forEach(t),uy=d(gv),Mc=r(gv,"LI",{});var PP=a(Mc);gy=n(PP,"pair of sequences: "),Th=r(PP,"CODE",{});var ZP=a(Th);_y=n(ZP,"[CLS] A [SEP] B [SEP]"),ZP.forEach(t),PP.forEach(t),gv.forEach(t),wp.forEach(t),vy=d(To),Gt=r(To,"DIV",{class:!0});var Fr=a(Gt);T(Xr.$$.fragment,Fr),ky=d(Fr),yh=r(Fr,"P",{});var eq=a(yh);Ty=n(eq,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),eq.forEach(t),yy=d(Fr),T(Yr.$$.fragment,Fr),by=d(Fr),nn=r(Fr,"P",{});var Fp=a(nn);wy=n(Fp,"If "),bh=r(Fp,"CODE",{});var tq=a(bh);Fy=n(tq,"token_ids_1"),tq.forEach(t),xy=n(Fp," is "),wh=r(Fp,"CODE",{});var oq=a(wh);$y=n(oq,"None"),oq.forEach(t),By=n(Fp,", this method only returns the first portion of the mask (0s)."),Fp.forEach(t),Fr.forEach(t),To.forEach(t),r_=d(i),sn=r(i,"H2",{class:!0});var _v=a(sn);gs=r(_v,"A",{id:!0,class:!0,href:!0});var nq=a(gs);Fh=r(nq,"SPAN",{});var sq=a(Fh);T(Zr.$$.fragment,sq),sq.forEach(t),nq.forEach(t),Ey=d(_v),xh=r(_v,"SPAN",{});var rq=a(xh);My=n(rq,"Bert specific outputs"),rq.forEach(t),_v.forEach(t),a_=d(i),rn=r(i,"DIV",{class:!0});var vv=a(rn);T(ea.$$.fragment,vv),zy=d(vv),ta=r(vv,"P",{});var kv=a(ta);Py=n(kv,"Output type of "),zc=r(kv,"A",{href:!0});var aq=a(zc);qy=n(aq,"BertForPreTraining"),aq.forEach(t),jy=n(kv,"."),kv.forEach(t),vv.forEach(t),i_=d(i),an=r(i,"DIV",{class:!0});var Tv=a(an);T(oa.$$.fragment,Tv),Cy=d(Tv),na=r(Tv,"P",{});var yv=a(na);Ny=n(yv,"Output type of "),Pc=r(yv,"A",{href:!0});var iq=a(Pc);Iy=n(iq,"TFBertForPreTraining"),iq.forEach(t),Ay=n(yv,"."),yv.forEach(t),Tv.forEach(t),l_=d(i),io=r(i,"DIV",{class:!0});var xp=a(io);T(sa.$$.fragment,xp),Dy=d(xp),ra=r(xp,"P",{});var bv=a(ra);Ly=n(bv,"Output type of "),qc=r(bv,"A",{href:!0});var lq=a(qc);Oy=n(lq,"BertForPreTraining"),lq.forEach(t),Sy=n(bv,"."),bv.forEach(t),Uy=d(xp),_s=r(xp,"DIV",{class:!0});var wv=a(_s);T(aa.$$.fragment,wv),Wy=d(wv),$h=r(wv,"P",{});var dq=a($h);Hy=n(dq,"\u201CReturns a new object replacing the specified fields with new values."),dq.forEach(t),wv.forEach(t),xp.forEach(t),d_=d(i),ln=r(i,"H2",{class:!0});var Fv=a(ln);vs=r(Fv,"A",{id:!0,class:!0,href:!0});var cq=a(vs);Bh=r(cq,"SPAN",{});var pq=a(Bh);T(ia.$$.fragment,pq),pq.forEach(t),cq.forEach(t),Ry=d(Fv),Eh=r(Fv,"SPAN",{});var hq=a(Eh);Qy=n(hq,"BertModel"),hq.forEach(t),Fv.forEach(t),c_=d(i),Ce=r(i,"DIV",{class:!0});var At=a(Ce);T(la.$$.fragment,At),Vy=d(At),Mh=r(At,"P",{});var fq=a(Mh);Ky=n(fq,"The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),fq.forEach(t),Jy=d(At),da=r(At,"P",{});var xv=a(da);Gy=n(xv,"This model inherits from "),jc=r(xv,"A",{href:!0});var mq=a(jc);Xy=n(mq,"PreTrainedModel"),mq.forEach(t),Yy=n(xv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xv.forEach(t),Zy=d(At),ca=r(At,"P",{});var $v=a(ca);eb=n($v,"This model is also a PyTorch "),pa=r($v,"A",{href:!0,rel:!0});var uq=a(pa);tb=n(uq,"torch.nn.Module"),uq.forEach(t),ob=n($v,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$v.forEach(t),nb=d(At),ha=r(At,"P",{});var Bv=a(ha);sb=n(Bv,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),fa=r(Bv,"A",{href:!0,rel:!0});var gq=a(fa);rb=n(gq,`Attention is all you need`),gq.forEach(t),ab=n(Bv,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Bv.forEach(t),ib=d(At),Re=r(At,"P",{});var Dt=a(Re);lb=n(Dt,"To behave as an decoder the model needs to be initialized with the "),zh=r(Dt,"CODE",{});var _q=a(zh);db=n(_q,"is_decoder"),_q.forEach(t),cb=n(Dt,` argument of the configuration set to `),Ph=r(Dt,"CODE",{});var vq=a(Ph);pb=n(vq,"True"),vq.forEach(t),hb=n(Dt,". To be used in a Seq2Seq model, the model needs to initialized with both "),qh=r(Dt,"CODE",{});var kq=a(qh);fb=n(kq,"is_decoder"),kq.forEach(t),mb=n(Dt,` argument and `),jh=r(Dt,"CODE",{});var Tq=a(jh);ub=n(Tq,"add_cross_attention"),Tq.forEach(t),gb=n(Dt," set to "),Ch=r(Dt,"CODE",{});var yq=a(Ch);_b=n(yq,"True"),yq.forEach(t),vb=n(Dt,"; an "),Nh=r(Dt,"CODE",{});var bq=a(Nh);kb=n(bq,"encoder_hidden_states"),bq.forEach(t),Tb=n(Dt," is then expected as an input to the forward pass."),Dt.forEach(t),yb=d(At),ht=r(At,"DIV",{class:!0});var yo=a(ht);T(ma.$$.fragment,yo),bb=d(yo),dn=r(yo,"P",{});var $p=a(dn);wb=n($p,"The "),Cc=r($p,"A",{href:!0});var wq=a(Cc);Fb=n(wq,"BertModel"),wq.forEach(t),xb=n($p," forward method, overrides the "),Ih=r($p,"CODE",{});var Fq=a(Ih);$b=n(Fq,"__call__"),Fq.forEach(t),Bb=n($p," special method."),$p.forEach(t),Eb=d(yo),T(ks.$$.fragment,yo),Mb=d(yo),Ah=r(yo,"P",{});var xq=a(Ah);zb=n(xq,"Example:"),xq.forEach(t),Pb=d(yo),T(ua.$$.fragment,yo),yo.forEach(t),At.forEach(t),p_=d(i),cn=r(i,"H2",{class:!0});var Ev=a(cn);Ts=r(Ev,"A",{id:!0,class:!0,href:!0});var $q=a(Ts);Dh=r($q,"SPAN",{});var Bq=a(Dh);T(ga.$$.fragment,Bq),Bq.forEach(t),$q.forEach(t),qb=d(Ev),Lh=r(Ev,"SPAN",{});var Eq=a(Lh);jb=n(Eq,"BertForPreTraining"),Eq.forEach(t),Ev.forEach(t),h_=d(i),st=r(i,"DIV",{class:!0});var bo=a(st);T(_a.$$.fragment,bo),Cb=d(bo),pn=r(bo,"P",{});var Bp=a(pn);Nb=n(Bp,"Bert Model with two heads on top as done during the pretraining: a "),Oh=r(Bp,"CODE",{});var Mq=a(Oh);Ib=n(Mq,"masked language modeling"),Mq.forEach(t),Ab=n(Bp," head and a "),Sh=r(Bp,"CODE",{});var zq=a(Sh);Db=n(zq,"next sentence prediction (classification)"),zq.forEach(t),Lb=n(Bp," head."),Bp.forEach(t),Ob=d(bo),va=r(bo,"P",{});var Mv=a(va);Sb=n(Mv,"This model inherits from "),Nc=r(Mv,"A",{href:!0});var Pq=a(Nc);Ub=n(Pq,"PreTrainedModel"),Pq.forEach(t),Wb=n(Mv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mv.forEach(t),Hb=d(bo),ka=r(bo,"P",{});var zv=a(ka);Rb=n(zv,"This model is also a PyTorch "),Ta=r(zv,"A",{href:!0,rel:!0});var qq=a(Ta);Qb=n(qq,"torch.nn.Module"),qq.forEach(t),Vb=n(zv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zv.forEach(t),Kb=d(bo),ft=r(bo,"DIV",{class:!0});var wo=a(ft);T(ya.$$.fragment,wo),Jb=d(wo),hn=r(wo,"P",{});var Ep=a(hn);Gb=n(Ep,"The "),Ic=r(Ep,"A",{href:!0});var jq=a(Ic);Xb=n(jq,"BertForPreTraining"),jq.forEach(t),Yb=n(Ep," forward method, overrides the "),Uh=r(Ep,"CODE",{});var Cq=a(Uh);Zb=n(Cq,"__call__"),Cq.forEach(t),e1=n(Ep," special method."),Ep.forEach(t),t1=d(wo),T(ys.$$.fragment,wo),o1=d(wo),Wh=r(wo,"P",{});var Nq=a(Wh);n1=n(Nq,"Example:"),Nq.forEach(t),s1=d(wo),T(ba.$$.fragment,wo),wo.forEach(t),bo.forEach(t),f_=d(i),fn=r(i,"H2",{class:!0});var Pv=a(fn);bs=r(Pv,"A",{id:!0,class:!0,href:!0});var Iq=a(bs);Hh=r(Iq,"SPAN",{});var Aq=a(Hh);T(wa.$$.fragment,Aq),Aq.forEach(t),Iq.forEach(t),r1=d(Pv),Rh=r(Pv,"SPAN",{});var Dq=a(Rh);a1=n(Dq,"BertLMHeadModel"),Dq.forEach(t),Pv.forEach(t),m_=d(i),rt=r(i,"DIV",{class:!0});var Fo=a(rt);T(Fa.$$.fragment,Fo),i1=d(Fo),xa=r(Fo,"P",{});var qv=a(xa);l1=n(qv,"Bert Model with a "),Qh=r(qv,"CODE",{});var Lq=a(Qh);d1=n(Lq,"language modeling"),Lq.forEach(t),c1=n(qv," head on top for CLM fine-tuning."),qv.forEach(t),p1=d(Fo),$a=r(Fo,"P",{});var jv=a($a);h1=n(jv,"This model inherits from "),Ac=r(jv,"A",{href:!0});var Oq=a(Ac);f1=n(Oq,"PreTrainedModel"),Oq.forEach(t),m1=n(jv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jv.forEach(t),u1=d(Fo),Ba=r(Fo,"P",{});var Cv=a(Ba);g1=n(Cv,"This model is also a PyTorch "),Ea=r(Cv,"A",{href:!0,rel:!0});var Sq=a(Ea);_1=n(Sq,"torch.nn.Module"),Sq.forEach(t),v1=n(Cv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cv.forEach(t),k1=d(Fo),mt=r(Fo,"DIV",{class:!0});var xo=a(mt);T(Ma.$$.fragment,xo),T1=d(xo),mn=r(xo,"P",{});var Mp=a(mn);y1=n(Mp,"The "),Dc=r(Mp,"A",{href:!0});var Uq=a(Dc);b1=n(Uq,"BertLMHeadModel"),Uq.forEach(t),w1=n(Mp," forward method, overrides the "),Vh=r(Mp,"CODE",{});var Wq=a(Vh);F1=n(Wq,"__call__"),Wq.forEach(t),x1=n(Mp," special method."),Mp.forEach(t),$1=d(xo),T(ws.$$.fragment,xo),B1=d(xo),Kh=r(xo,"P",{});var Hq=a(Kh);E1=n(Hq,"Example:"),Hq.forEach(t),M1=d(xo),T(za.$$.fragment,xo),xo.forEach(t),Fo.forEach(t),u_=d(i),un=r(i,"H2",{class:!0});var Nv=a(un);Fs=r(Nv,"A",{id:!0,class:!0,href:!0});var Rq=a(Fs);Jh=r(Rq,"SPAN",{});var Qq=a(Jh);T(Pa.$$.fragment,Qq),Qq.forEach(t),Rq.forEach(t),z1=d(Nv),Gh=r(Nv,"SPAN",{});var Vq=a(Gh);P1=n(Vq,"BertForMaskedLM"),Vq.forEach(t),Nv.forEach(t),g_=d(i),at=r(i,"DIV",{class:!0});var $o=a(at);T(qa.$$.fragment,$o),q1=d($o),ja=r($o,"P",{});var Iv=a(ja);j1=n(Iv,"Bert Model with a "),Xh=r(Iv,"CODE",{});var Kq=a(Xh);C1=n(Kq,"language modeling"),Kq.forEach(t),N1=n(Iv," head on top."),Iv.forEach(t),I1=d($o),Ca=r($o,"P",{});var Av=a(Ca);A1=n(Av,"This model inherits from "),Lc=r(Av,"A",{href:!0});var Jq=a(Lc);D1=n(Jq,"PreTrainedModel"),Jq.forEach(t),L1=n(Av,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Av.forEach(t),O1=d($o),Na=r($o,"P",{});var Dv=a(Na);S1=n(Dv,"This model is also a PyTorch "),Ia=r(Dv,"A",{href:!0,rel:!0});var Gq=a(Ia);U1=n(Gq,"torch.nn.Module"),Gq.forEach(t),W1=n(Dv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dv.forEach(t),H1=d($o),ut=r($o,"DIV",{class:!0});var Bo=a(ut);T(Aa.$$.fragment,Bo),R1=d(Bo),gn=r(Bo,"P",{});var zp=a(gn);Q1=n(zp,"The "),Oc=r(zp,"A",{href:!0});var Xq=a(Oc);V1=n(Xq,"BertForMaskedLM"),Xq.forEach(t),K1=n(zp," forward method, overrides the "),Yh=r(zp,"CODE",{});var Yq=a(Yh);J1=n(Yq,"__call__"),Yq.forEach(t),G1=n(zp," special method."),zp.forEach(t),X1=d(Bo),T(xs.$$.fragment,Bo),Y1=d(Bo),Zh=r(Bo,"P",{});var Zq=a(Zh);Z1=n(Zq,"Example:"),Zq.forEach(t),ew=d(Bo),T(Da.$$.fragment,Bo),Bo.forEach(t),$o.forEach(t),__=d(i),_n=r(i,"H2",{class:!0});var Lv=a(_n);$s=r(Lv,"A",{id:!0,class:!0,href:!0});var ej=a($s);ef=r(ej,"SPAN",{});var tj=a(ef);T(La.$$.fragment,tj),tj.forEach(t),ej.forEach(t),tw=d(Lv),tf=r(Lv,"SPAN",{});var oj=a(tf);ow=n(oj,"BertForNextSentencePrediction"),oj.forEach(t),Lv.forEach(t),v_=d(i),it=r(i,"DIV",{class:!0});var Eo=a(it);T(Oa.$$.fragment,Eo),nw=d(Eo),Sa=r(Eo,"P",{});var Ov=a(Sa);sw=n(Ov,"Bert Model with a "),of=r(Ov,"CODE",{});var nj=a(of);rw=n(nj,"next sentence prediction (classification)"),nj.forEach(t),aw=n(Ov," head on top."),Ov.forEach(t),iw=d(Eo),Ua=r(Eo,"P",{});var Sv=a(Ua);lw=n(Sv,"This model inherits from "),Sc=r(Sv,"A",{href:!0});var sj=a(Sc);dw=n(sj,"PreTrainedModel"),sj.forEach(t),cw=n(Sv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sv.forEach(t),pw=d(Eo),Wa=r(Eo,"P",{});var Uv=a(Wa);hw=n(Uv,"This model is also a PyTorch "),Ha=r(Uv,"A",{href:!0,rel:!0});var rj=a(Ha);fw=n(rj,"torch.nn.Module"),rj.forEach(t),mw=n(Uv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Uv.forEach(t),uw=d(Eo),gt=r(Eo,"DIV",{class:!0});var Mo=a(gt);T(Ra.$$.fragment,Mo),gw=d(Mo),vn=r(Mo,"P",{});var Pp=a(vn);_w=n(Pp,"The "),Uc=r(Pp,"A",{href:!0});var aj=a(Uc);vw=n(aj,"BertForNextSentencePrediction"),aj.forEach(t),kw=n(Pp," forward method, overrides the "),nf=r(Pp,"CODE",{});var ij=a(nf);Tw=n(ij,"__call__"),ij.forEach(t),yw=n(Pp," special method."),Pp.forEach(t),bw=d(Mo),T(Bs.$$.fragment,Mo),ww=d(Mo),sf=r(Mo,"P",{});var lj=a(sf);Fw=n(lj,"Example:"),lj.forEach(t),xw=d(Mo),T(Qa.$$.fragment,Mo),Mo.forEach(t),Eo.forEach(t),k_=d(i),kn=r(i,"H2",{class:!0});var Wv=a(kn);Es=r(Wv,"A",{id:!0,class:!0,href:!0});var dj=a(Es);rf=r(dj,"SPAN",{});var cj=a(rf);T(Va.$$.fragment,cj),cj.forEach(t),dj.forEach(t),$w=d(Wv),af=r(Wv,"SPAN",{});var pj=a(af);Bw=n(pj,"BertForSequenceClassification"),pj.forEach(t),Wv.forEach(t),T_=d(i),lt=r(i,"DIV",{class:!0});var zo=a(lt);T(Ka.$$.fragment,zo),Ew=d(zo),lf=r(zo,"P",{});var hj=a(lf);Mw=n(hj,`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),hj.forEach(t),zw=d(zo),Ja=r(zo,"P",{});var Hv=a(Ja);Pw=n(Hv,"This model inherits from "),Wc=r(Hv,"A",{href:!0});var fj=a(Wc);qw=n(fj,"PreTrainedModel"),fj.forEach(t),jw=n(Hv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hv.forEach(t),Cw=d(zo),Ga=r(zo,"P",{});var Rv=a(Ga);Nw=n(Rv,"This model is also a PyTorch "),Xa=r(Rv,"A",{href:!0,rel:!0});var mj=a(Xa);Iw=n(mj,"torch.nn.Module"),mj.forEach(t),Aw=n(Rv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rv.forEach(t),Dw=d(zo),He=r(zo,"DIV",{class:!0});var Lt=a(He);T(Ya.$$.fragment,Lt),Lw=d(Lt),Tn=r(Lt,"P",{});var qp=a(Tn);Ow=n(qp,"The "),Hc=r(qp,"A",{href:!0});var uj=a(Hc);Sw=n(uj,"BertForSequenceClassification"),uj.forEach(t),Uw=n(qp," forward method, overrides the "),df=r(qp,"CODE",{});var gj=a(df);Ww=n(gj,"__call__"),gj.forEach(t),Hw=n(qp," special method."),qp.forEach(t),Rw=d(Lt),T(Ms.$$.fragment,Lt),Qw=d(Lt),cf=r(Lt,"P",{});var _j=a(cf);Vw=n(_j,"Example of single-label classification:"),_j.forEach(t),Kw=d(Lt),T(Za.$$.fragment,Lt),Jw=d(Lt),pf=r(Lt,"P",{});var vj=a(pf);Gw=n(vj,"Example of multi-label classification:"),vj.forEach(t),Xw=d(Lt),T(ei.$$.fragment,Lt),Lt.forEach(t),zo.forEach(t),y_=d(i),yn=r(i,"H2",{class:!0});var Qv=a(yn);zs=r(Qv,"A",{id:!0,class:!0,href:!0});var kj=a(zs);hf=r(kj,"SPAN",{});var Tj=a(hf);T(ti.$$.fragment,Tj),Tj.forEach(t),kj.forEach(t),Yw=d(Qv),ff=r(Qv,"SPAN",{});var yj=a(ff);Zw=n(yj,"BertForMultipleChoice"),yj.forEach(t),Qv.forEach(t),b_=d(i),dt=r(i,"DIV",{class:!0});var Po=a(dt);T(oi.$$.fragment,Po),e0=d(Po),mf=r(Po,"P",{});var bj=a(mf);t0=n(bj,`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),bj.forEach(t),o0=d(Po),ni=r(Po,"P",{});var Vv=a(ni);n0=n(Vv,"This model inherits from "),Rc=r(Vv,"A",{href:!0});var wj=a(Rc);s0=n(wj,"PreTrainedModel"),wj.forEach(t),r0=n(Vv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Vv.forEach(t),a0=d(Po),si=r(Po,"P",{});var Kv=a(si);i0=n(Kv,"This model is also a PyTorch "),ri=r(Kv,"A",{href:!0,rel:!0});var Fj=a(ri);l0=n(Fj,"torch.nn.Module"),Fj.forEach(t),d0=n(Kv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kv.forEach(t),c0=d(Po),_t=r(Po,"DIV",{class:!0});var qo=a(_t);T(ai.$$.fragment,qo),p0=d(qo),bn=r(qo,"P",{});var jp=a(bn);h0=n(jp,"The "),Qc=r(jp,"A",{href:!0});var xj=a(Qc);f0=n(xj,"BertForMultipleChoice"),xj.forEach(t),m0=n(jp," forward method, overrides the "),uf=r(jp,"CODE",{});var $j=a(uf);u0=n($j,"__call__"),$j.forEach(t),g0=n(jp," special method."),jp.forEach(t),_0=d(qo),T(Ps.$$.fragment,qo),v0=d(qo),gf=r(qo,"P",{});var Bj=a(gf);k0=n(Bj,"Example:"),Bj.forEach(t),T0=d(qo),T(ii.$$.fragment,qo),qo.forEach(t),Po.forEach(t),w_=d(i),wn=r(i,"H2",{class:!0});var Jv=a(wn);qs=r(Jv,"A",{id:!0,class:!0,href:!0});var Ej=a(qs);_f=r(Ej,"SPAN",{});var Mj=a(_f);T(li.$$.fragment,Mj),Mj.forEach(t),Ej.forEach(t),y0=d(Jv),vf=r(Jv,"SPAN",{});var zj=a(vf);b0=n(zj,"BertForTokenClassification"),zj.forEach(t),Jv.forEach(t),F_=d(i),ct=r(i,"DIV",{class:!0});var jo=a(ct);T(di.$$.fragment,jo),w0=d(jo),kf=r(jo,"P",{});var Pj=a(kf);F0=n(Pj,`Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Pj.forEach(t),x0=d(jo),ci=r(jo,"P",{});var Gv=a(ci);$0=n(Gv,"This model inherits from "),Vc=r(Gv,"A",{href:!0});var qj=a(Vc);B0=n(qj,"PreTrainedModel"),qj.forEach(t),E0=n(Gv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gv.forEach(t),M0=d(jo),pi=r(jo,"P",{});var Xv=a(pi);z0=n(Xv,"This model is also a PyTorch "),hi=r(Xv,"A",{href:!0,rel:!0});var jj=a(hi);P0=n(jj,"torch.nn.Module"),jj.forEach(t),q0=n(Xv,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xv.forEach(t),j0=d(jo),vt=r(jo,"DIV",{class:!0});var Co=a(vt);T(fi.$$.fragment,Co),C0=d(Co),Fn=r(Co,"P",{});var Cp=a(Fn);N0=n(Cp,"The "),Kc=r(Cp,"A",{href:!0});var Cj=a(Kc);I0=n(Cj,"BertForTokenClassification"),Cj.forEach(t),A0=n(Cp," forward method, overrides the "),Tf=r(Cp,"CODE",{});var Nj=a(Tf);D0=n(Nj,"__call__"),Nj.forEach(t),L0=n(Cp," special method."),Cp.forEach(t),O0=d(Co),T(js.$$.fragment,Co),S0=d(Co),yf=r(Co,"P",{});var Ij=a(yf);U0=n(Ij,"Example:"),Ij.forEach(t),W0=d(Co),T(mi.$$.fragment,Co),Co.forEach(t),jo.forEach(t),x_=d(i),xn=r(i,"H2",{class:!0});var Yv=a(xn);Cs=r(Yv,"A",{id:!0,class:!0,href:!0});var Aj=a(Cs);bf=r(Aj,"SPAN",{});var Dj=a(bf);T(ui.$$.fragment,Dj),Dj.forEach(t),Aj.forEach(t),H0=d(Yv),wf=r(Yv,"SPAN",{});var Lj=a(wf);R0=n(Lj,"BertForQuestionAnswering"),Lj.forEach(t),Yv.forEach(t),$_=d(i),pt=r(i,"DIV",{class:!0});var No=a(pt);T(gi.$$.fragment,No),Q0=d(No),$n=r(No,"P",{});var Np=a($n);V0=n(Np,`Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ff=r(Np,"CODE",{});var Oj=a(Ff);K0=n(Oj,"span start logits"),Oj.forEach(t),J0=n(Np," and "),xf=r(Np,"CODE",{});var Sj=a(xf);G0=n(Sj,"span end logits"),Sj.forEach(t),X0=n(Np,")."),Np.forEach(t),Y0=d(No),_i=r(No,"P",{});var Zv=a(_i);Z0=n(Zv,"This model inherits from "),Jc=r(Zv,"A",{href:!0});var Uj=a(Jc);e2=n(Uj,"PreTrainedModel"),Uj.forEach(t),t2=n(Zv,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zv.forEach(t),o2=d(No),vi=r(No,"P",{});var ek=a(vi);n2=n(ek,"This model is also a PyTorch "),ki=r(ek,"A",{href:!0,rel:!0});var Wj=a(ki);s2=n(Wj,"torch.nn.Module"),Wj.forEach(t),r2=n(ek,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ek.forEach(t),a2=d(No),kt=r(No,"DIV",{class:!0});var Io=a(kt);T(Ti.$$.fragment,Io),i2=d(Io),Bn=r(Io,"P",{});var Ip=a(Bn);l2=n(Ip,"The "),Gc=r(Ip,"A",{href:!0});var Hj=a(Gc);d2=n(Hj,"BertForQuestionAnswering"),Hj.forEach(t),c2=n(Ip," forward method, overrides the "),$f=r(Ip,"CODE",{});var Rj=a($f);p2=n(Rj,"__call__"),Rj.forEach(t),h2=n(Ip," special method."),Ip.forEach(t),f2=d(Io),T(Ns.$$.fragment,Io),m2=d(Io),Bf=r(Io,"P",{});var Qj=a(Bf);u2=n(Qj,"Example:"),Qj.forEach(t),g2=d(Io),T(yi.$$.fragment,Io),Io.forEach(t),No.forEach(t),B_=d(i),En=r(i,"H2",{class:!0});var tk=a(En);Is=r(tk,"A",{id:!0,class:!0,href:!0});var Vj=a(Is);Ef=r(Vj,"SPAN",{});var Kj=a(Ef);T(bi.$$.fragment,Kj),Kj.forEach(t),Vj.forEach(t),_2=d(tk),Mf=r(tk,"SPAN",{});var Jj=a(Mf);v2=n(Jj,"TFBertModel"),Jj.forEach(t),tk.forEach(t),E_=d(i),Qe=r(i,"DIV",{class:!0});var Xt=a(Qe);T(wi.$$.fragment,Xt),k2=d(Xt),zf=r(Xt,"P",{});var Gj=a(zf);T2=n(Gj,"The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),Gj.forEach(t),y2=d(Xt),Fi=r(Xt,"P",{});var ok=a(Fi);b2=n(ok,"This model inherits from "),Xc=r(ok,"A",{href:!0});var Xj=a(Xc);w2=n(Xj,"TFPreTrainedModel"),Xj.forEach(t),F2=n(ok,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ok.forEach(t),x2=d(Xt),xi=r(Xt,"P",{});var nk=a(xi);$2=n(nk,"This model is also a "),$i=r(nk,"A",{href:!0,rel:!0});var Yj=a($i);B2=n(Yj,"tf.keras.Model"),Yj.forEach(t),E2=n(nk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),nk.forEach(t),M2=d(Xt),T(As.$$.fragment,Xt),z2=d(Xt),Tt=r(Xt,"DIV",{class:!0});var Ao=a(Tt);T(Bi.$$.fragment,Ao),P2=d(Ao),Mn=r(Ao,"P",{});var Ap=a(Mn);q2=n(Ap,"The "),Yc=r(Ap,"A",{href:!0});var Zj=a(Yc);j2=n(Zj,"TFBertModel"),Zj.forEach(t),C2=n(Ap," forward method, overrides the "),Pf=r(Ap,"CODE",{});var eC=a(Pf);N2=n(eC,"__call__"),eC.forEach(t),I2=n(Ap," special method."),Ap.forEach(t),A2=d(Ao),T(Ds.$$.fragment,Ao),D2=d(Ao),qf=r(Ao,"P",{});var tC=a(qf);L2=n(tC,"Example:"),tC.forEach(t),O2=d(Ao),T(Ei.$$.fragment,Ao),Ao.forEach(t),Xt.forEach(t),M_=d(i),zn=r(i,"H2",{class:!0});var sk=a(zn);Ls=r(sk,"A",{id:!0,class:!0,href:!0});var oC=a(Ls);jf=r(oC,"SPAN",{});var nC=a(jf);T(Mi.$$.fragment,nC),nC.forEach(t),oC.forEach(t),S2=d(sk),Cf=r(sk,"SPAN",{});var sC=a(Cf);U2=n(sC,"TFBertForPreTraining"),sC.forEach(t),sk.forEach(t),z_=d(i),Ve=r(i,"DIV",{class:!0});var Yt=a(Ve);T(zi.$$.fragment,Yt),W2=d(Yt),Pn=r(Yt,"P",{});var Dp=a(Pn);H2=n(Dp,`Bert Model with two heads on top as done during the pretraining: a `),Nf=r(Dp,"CODE",{});var rC=a(Nf);R2=n(rC,"masked language modeling"),rC.forEach(t),Q2=n(Dp," head and a "),If=r(Dp,"CODE",{});var aC=a(If);V2=n(aC,"next sentence prediction (classification)"),aC.forEach(t),K2=n(Dp," head."),Dp.forEach(t),J2=d(Yt),Pi=r(Yt,"P",{});var rk=a(Pi);G2=n(rk,"This model inherits from "),Zc=r(rk,"A",{href:!0});var iC=a(Zc);X2=n(iC,"TFPreTrainedModel"),iC.forEach(t),Y2=n(rk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rk.forEach(t),Z2=d(Yt),qi=r(Yt,"P",{});var ak=a(qi);eF=n(ak,"This model is also a "),ji=r(ak,"A",{href:!0,rel:!0});var lC=a(ji);tF=n(lC,"tf.keras.Model"),lC.forEach(t),oF=n(ak,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ak.forEach(t),nF=d(Yt),T(Os.$$.fragment,Yt),sF=d(Yt),yt=r(Yt,"DIV",{class:!0});var Do=a(yt);T(Ci.$$.fragment,Do),rF=d(Do),qn=r(Do,"P",{});var Lp=a(qn);aF=n(Lp,"The "),ep=r(Lp,"A",{href:!0});var dC=a(ep);iF=n(dC,"TFBertForPreTraining"),dC.forEach(t),lF=n(Lp," forward method, overrides the "),Af=r(Lp,"CODE",{});var cC=a(Af);dF=n(cC,"__call__"),cC.forEach(t),cF=n(Lp," special method."),Lp.forEach(t),pF=d(Do),T(Ss.$$.fragment,Do),hF=d(Do),Df=r(Do,"P",{});var pC=a(Df);fF=n(pC,"Examples:"),pC.forEach(t),mF=d(Do),T(Ni.$$.fragment,Do),Do.forEach(t),Yt.forEach(t),P_=d(i),jn=r(i,"H2",{class:!0});var ik=a(jn);Us=r(ik,"A",{id:!0,class:!0,href:!0});var hC=a(Us);Lf=r(hC,"SPAN",{});var fC=a(Lf);T(Ii.$$.fragment,fC),fC.forEach(t),hC.forEach(t),uF=d(ik),Of=r(ik,"SPAN",{});var mC=a(Of);gF=n(mC,"TFBertModelLMHeadModel"),mC.forEach(t),ik.forEach(t),q_=d(i),Ai=r(i,"DIV",{class:!0});var uC=a(Ai);et=r(uC,"DIV",{class:!0});var Zt=a(et);T(Di.$$.fragment,Zt),_F=d(Zt),Ne=r(Zt,"P",{});var tt=a(Ne);vF=n(tt,"encoder_hidden_states ("),Sf=r(tt,"CODE",{});var gC=a(Sf);kF=n(gC,"tf.Tensor"),gC.forEach(t),TF=n(tt," of shape "),Uf=r(tt,"CODE",{});var _C=a(Uf);yF=n(_C,"(batch_size, sequence_length, hidden_size)"),_C.forEach(t),bF=n(tt,", "),Wf=r(tt,"EM",{});var vC=a(Wf);wF=n(vC,"optional"),vC.forEach(t),FF=n(tt,`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`),Hf=r(tt,"CODE",{});var kC=a(Hf);xF=n(kC,"tf.Tensor"),kC.forEach(t),$F=n(tt," of shape "),Rf=r(tt,"CODE",{});var TC=a(Rf);BF=n(TC,"(batch_size, sequence_length)"),TC.forEach(t),EF=n(tt,", "),Qf=r(tt,"EM",{});var yC=a(Qf);MF=n(yC,"optional"),yC.forEach(t),zF=n(tt,`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `),Vf=r(tt,"CODE",{});var bC=a(Vf);PF=n(bC,"[0, 1]"),bC.forEach(t),qF=n(tt,":"),tt.forEach(t),jF=d(Zt),Li=r(Zt,"UL",{});var lk=a(Li);Oi=r(lk,"LI",{});var dk=a(Oi);CF=n(dk,"1 for tokens that are "),Kf=r(dk,"STRONG",{});var wC=a(Kf);NF=n(wC,"not masked"),wC.forEach(t),IF=n(dk,","),dk.forEach(t),AF=d(lk),Si=r(lk,"LI",{});var ck=a(Si);DF=n(ck,"0 for tokens that are "),Jf=r(ck,"STRONG",{});var FC=a(Jf);LF=n(FC,"masked"),FC.forEach(t),OF=n(ck,"."),ck.forEach(t),lk.forEach(t),SF=d(Zt),G=r(Zt,"P",{});var Z=a(G);UF=n(Z,"past_key_values ("),Gf=r(Z,"CODE",{});var xC=a(Gf);WF=n(xC,"Tuple[Tuple[tf.Tensor]]"),xC.forEach(t),HF=n(Z," of length "),Xf=r(Z,"CODE",{});var $C=a(Xf);RF=n($C,"config.n_layers"),$C.forEach(t),QF=n(Z,`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `),Yf=r(Z,"CODE",{});var BC=a(Yf);VF=n(BC,"past_key_values"),BC.forEach(t),KF=n(Z," are used, the user can optionally input only the last "),Zf=r(Z,"CODE",{});var EC=a(Zf);JF=n(EC,"decoder_input_ids"),EC.forEach(t),GF=n(Z,` (those that don\u2019t have their past key value states given to this model) of shape `),em=r(Z,"CODE",{});var MC=a(em);XF=n(MC,"(batch_size, 1)"),MC.forEach(t),YF=n(Z,` instead of all `),tm=r(Z,"CODE",{});var zC=a(tm);ZF=n(zC,"decoder_input_ids"),zC.forEach(t),ex=n(Z," of shape "),om=r(Z,"CODE",{});var PC=a(om);tx=n(PC,"(batch_size, sequence_length)"),PC.forEach(t),ox=n(Z,`. use_cache (`),nm=r(Z,"CODE",{});var qC=a(nm);nx=n(qC,"bool"),qC.forEach(t),sx=n(Z,", "),sm=r(Z,"EM",{});var jC=a(sm);rx=n(jC,"optional"),jC.forEach(t),ax=n(Z,", defaults to "),rm=r(Z,"CODE",{});var CC=a(rm);ix=n(CC,"True"),CC.forEach(t),lx=n(Z,`): If set to `),am=r(Z,"CODE",{});var NC=a(am);dx=n(NC,"True"),NC.forEach(t),cx=n(Z,", "),im=r(Z,"CODE",{});var IC=a(im);px=n(IC,"past_key_values"),IC.forEach(t),hx=n(Z,` key value states are returned and can be used to speed up decoding (see `),lm=r(Z,"CODE",{});var AC=a(lm);fx=n(AC,"past_key_values"),AC.forEach(t),mx=n(Z,"). Set to "),dm=r(Z,"CODE",{});var DC=a(dm);ux=n(DC,"False"),DC.forEach(t),gx=n(Z," during training, "),cm=r(Z,"CODE",{});var LC=a(cm);_x=n(LC,"True"),LC.forEach(t),vx=n(Z,` during generation labels (`),pm=r(Z,"CODE",{});var OC=a(pm);kx=n(OC,"tf.Tensor"),OC.forEach(t),Tx=n(Z," or "),hm=r(Z,"CODE",{});var SC=a(hm);yx=n(SC,"np.ndarray"),SC.forEach(t),bx=n(Z," of shape "),fm=r(Z,"CODE",{});var UC=a(fm);wx=n(UC,"(batch_size, sequence_length)"),UC.forEach(t),Fx=n(Z,", "),mm=r(Z,"EM",{});var WC=a(mm);xx=n(WC,"optional"),WC.forEach(t),$x=n(Z,`): Labels for computing the cross entropy classification loss. Indices should be in `),um=r(Z,"CODE",{});var HC=a(um);Bx=n(HC,"[0, ..., config.vocab_size - 1]"),HC.forEach(t),Ex=n(Z,"."),Z.forEach(t),Mx=d(Zt),gm=r(Zt,"P",{});var RC=a(gm);zx=n(RC,"Example:"),RC.forEach(t),Px=d(Zt),T(Ui.$$.fragment,Zt),Zt.forEach(t),uC.forEach(t),j_=d(i),Cn=r(i,"H2",{class:!0});var pk=a(Cn);Ws=r(pk,"A",{id:!0,class:!0,href:!0});var QC=a(Ws);_m=r(QC,"SPAN",{});var VC=a(_m);T(Wi.$$.fragment,VC),VC.forEach(t),QC.forEach(t),qx=d(pk),vm=r(pk,"SPAN",{});var KC=a(vm);jx=n(KC,"TFBertForMaskedLM"),KC.forEach(t),pk.forEach(t),C_=d(i),Ke=r(i,"DIV",{class:!0});var eo=a(Ke);T(Hi.$$.fragment,eo),Cx=d(eo),Ri=r(eo,"P",{});var hk=a(Ri);Nx=n(hk,"Bert Model with a "),km=r(hk,"CODE",{});var JC=a(km);Ix=n(JC,"language modeling"),JC.forEach(t),Ax=n(hk," head on top."),hk.forEach(t),Dx=d(eo),Qi=r(eo,"P",{});var fk=a(Qi);Lx=n(fk,"This model inherits from "),tp=r(fk,"A",{href:!0});var GC=a(tp);Ox=n(GC,"TFPreTrainedModel"),GC.forEach(t),Sx=n(fk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fk.forEach(t),Ux=d(eo),Vi=r(eo,"P",{});var mk=a(Vi);Wx=n(mk,"This model is also a "),Ki=r(mk,"A",{href:!0,rel:!0});var XC=a(Ki);Hx=n(XC,"tf.keras.Model"),XC.forEach(t),Rx=n(mk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),mk.forEach(t),Qx=d(eo),T(Hs.$$.fragment,eo),Vx=d(eo),bt=r(eo,"DIV",{class:!0});var Lo=a(bt);T(Ji.$$.fragment,Lo),Kx=d(Lo),Nn=r(Lo,"P",{});var Op=a(Nn);Jx=n(Op,"The "),op=r(Op,"A",{href:!0});var YC=a(op);Gx=n(YC,"TFBertForMaskedLM"),YC.forEach(t),Xx=n(Op," forward method, overrides the "),Tm=r(Op,"CODE",{});var ZC=a(Tm);Yx=n(ZC,"__call__"),ZC.forEach(t),Zx=n(Op," special method."),Op.forEach(t),e$=d(Lo),T(Rs.$$.fragment,Lo),t$=d(Lo),ym=r(Lo,"P",{});var e3=a(ym);o$=n(e3,"Example:"),e3.forEach(t),n$=d(Lo),T(Gi.$$.fragment,Lo),Lo.forEach(t),eo.forEach(t),N_=d(i),In=r(i,"H2",{class:!0});var uk=a(In);Qs=r(uk,"A",{id:!0,class:!0,href:!0});var t3=a(Qs);bm=r(t3,"SPAN",{});var o3=a(bm);T(Xi.$$.fragment,o3),o3.forEach(t),t3.forEach(t),s$=d(uk),wm=r(uk,"SPAN",{});var n3=a(wm);r$=n(n3,"TFBertForNextSentencePrediction"),n3.forEach(t),uk.forEach(t),I_=d(i),Je=r(i,"DIV",{class:!0});var to=a(Je);T(Yi.$$.fragment,to),a$=d(to),Zi=r(to,"P",{});var gk=a(Zi);i$=n(gk,"Bert Model with a "),Fm=r(gk,"CODE",{});var s3=a(Fm);l$=n(s3,"next sentence prediction (classification)"),s3.forEach(t),d$=n(gk," head on top."),gk.forEach(t),c$=d(to),el=r(to,"P",{});var _k=a(el);p$=n(_k,"This model inherits from "),np=r(_k,"A",{href:!0});var r3=a(np);h$=n(r3,"TFPreTrainedModel"),r3.forEach(t),f$=n(_k,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_k.forEach(t),m$=d(to),tl=r(to,"P",{});var vk=a(tl);u$=n(vk,"This model is also a "),ol=r(vk,"A",{href:!0,rel:!0});var a3=a(ol);g$=n(a3,"tf.keras.Model"),a3.forEach(t),_$=n(vk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vk.forEach(t),v$=d(to),T(Vs.$$.fragment,to),k$=d(to),wt=r(to,"DIV",{class:!0});var Oo=a(wt);T(nl.$$.fragment,Oo),T$=d(Oo),An=r(Oo,"P",{});var Sp=a(An);y$=n(Sp,"The "),sp=r(Sp,"A",{href:!0});var i3=a(sp);b$=n(i3,"TFBertForNextSentencePrediction"),i3.forEach(t),w$=n(Sp," forward method, overrides the "),xm=r(Sp,"CODE",{});var l3=a(xm);F$=n(l3,"__call__"),l3.forEach(t),x$=n(Sp," special method."),Sp.forEach(t),$$=d(Oo),T(Ks.$$.fragment,Oo),B$=d(Oo),$m=r(Oo,"P",{});var d3=a($m);E$=n(d3,"Examples:"),d3.forEach(t),M$=d(Oo),T(sl.$$.fragment,Oo),Oo.forEach(t),to.forEach(t),A_=d(i),Dn=r(i,"H2",{class:!0});var kk=a(Dn);Js=r(kk,"A",{id:!0,class:!0,href:!0});var c3=a(Js);Bm=r(c3,"SPAN",{});var p3=a(Bm);T(rl.$$.fragment,p3),p3.forEach(t),c3.forEach(t),z$=d(kk),Em=r(kk,"SPAN",{});var h3=a(Em);P$=n(h3,"TFBertForSequenceClassification"),h3.forEach(t),kk.forEach(t),D_=d(i),Ge=r(i,"DIV",{class:!0});var oo=a(Ge);T(al.$$.fragment,oo),q$=d(oo),Mm=r(oo,"P",{});var f3=a(Mm);j$=n(f3,`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),f3.forEach(t),C$=d(oo),il=r(oo,"P",{});var Tk=a(il);N$=n(Tk,"This model inherits from "),rp=r(Tk,"A",{href:!0});var m3=a(rp);I$=n(m3,"TFPreTrainedModel"),m3.forEach(t),A$=n(Tk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tk.forEach(t),D$=d(oo),ll=r(oo,"P",{});var yk=a(ll);L$=n(yk,"This model is also a "),dl=r(yk,"A",{href:!0,rel:!0});var u3=a(dl);O$=n(u3,"tf.keras.Model"),u3.forEach(t),S$=n(yk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yk.forEach(t),U$=d(oo),T(Gs.$$.fragment,oo),W$=d(oo),Ft=r(oo,"DIV",{class:!0});var So=a(Ft);T(cl.$$.fragment,So),H$=d(So),Ln=r(So,"P",{});var Up=a(Ln);R$=n(Up,"The "),ap=r(Up,"A",{href:!0});var g3=a(ap);Q$=n(g3,"TFBertForSequenceClassification"),g3.forEach(t),V$=n(Up," forward method, overrides the "),zm=r(Up,"CODE",{});var _3=a(zm);K$=n(_3,"__call__"),_3.forEach(t),J$=n(Up," special method."),Up.forEach(t),G$=d(So),T(Xs.$$.fragment,So),X$=d(So),Pm=r(So,"P",{});var v3=a(Pm);Y$=n(v3,"Example:"),v3.forEach(t),Z$=d(So),T(pl.$$.fragment,So),So.forEach(t),oo.forEach(t),L_=d(i),On=r(i,"H2",{class:!0});var bk=a(On);Ys=r(bk,"A",{id:!0,class:!0,href:!0});var k3=a(Ys);qm=r(k3,"SPAN",{});var T3=a(qm);T(hl.$$.fragment,T3),T3.forEach(t),k3.forEach(t),e4=d(bk),jm=r(bk,"SPAN",{});var y3=a(jm);t4=n(y3,"TFBertForMultipleChoice"),y3.forEach(t),bk.forEach(t),O_=d(i),Xe=r(i,"DIV",{class:!0});var no=a(Xe);T(fl.$$.fragment,no),o4=d(no),Cm=r(no,"P",{});var b3=a(Cm);n4=n(b3,`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),b3.forEach(t),s4=d(no),ml=r(no,"P",{});var wk=a(ml);r4=n(wk,"This model inherits from "),ip=r(wk,"A",{href:!0});var w3=a(ip);a4=n(w3,"TFPreTrainedModel"),w3.forEach(t),i4=n(wk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wk.forEach(t),l4=d(no),ul=r(no,"P",{});var Fk=a(ul);d4=n(Fk,"This model is also a "),gl=r(Fk,"A",{href:!0,rel:!0});var F3=a(gl);c4=n(F3,"tf.keras.Model"),F3.forEach(t),p4=n(Fk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Fk.forEach(t),h4=d(no),T(Zs.$$.fragment,no),f4=d(no),xt=r(no,"DIV",{class:!0});var Uo=a(xt);T(_l.$$.fragment,Uo),m4=d(Uo),Sn=r(Uo,"P",{});var Wp=a(Sn);u4=n(Wp,"The "),lp=r(Wp,"A",{href:!0});var x3=a(lp);g4=n(x3,"TFBertForMultipleChoice"),x3.forEach(t),_4=n(Wp," forward method, overrides the "),Nm=r(Wp,"CODE",{});var $3=a(Nm);v4=n($3,"__call__"),$3.forEach(t),k4=n(Wp," special method."),Wp.forEach(t),T4=d(Uo),T(er.$$.fragment,Uo),y4=d(Uo),Im=r(Uo,"P",{});var B3=a(Im);b4=n(B3,"Example:"),B3.forEach(t),w4=d(Uo),T(vl.$$.fragment,Uo),Uo.forEach(t),no.forEach(t),S_=d(i),Un=r(i,"H2",{class:!0});var xk=a(Un);tr=r(xk,"A",{id:!0,class:!0,href:!0});var E3=a(tr);Am=r(E3,"SPAN",{});var M3=a(Am);T(kl.$$.fragment,M3),M3.forEach(t),E3.forEach(t),F4=d(xk),Dm=r(xk,"SPAN",{});var z3=a(Dm);x4=n(z3,"TFBertForTokenClassification"),z3.forEach(t),xk.forEach(t),U_=d(i),Ye=r(i,"DIV",{class:!0});var so=a(Ye);T(Tl.$$.fragment,so),$4=d(so),Lm=r(so,"P",{});var P3=a(Lm);B4=n(P3,`Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),P3.forEach(t),E4=d(so),yl=r(so,"P",{});var $k=a(yl);M4=n($k,"This model inherits from "),dp=r($k,"A",{href:!0});var q3=a(dp);z4=n(q3,"TFPreTrainedModel"),q3.forEach(t),P4=n($k,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$k.forEach(t),q4=d(so),bl=r(so,"P",{});var Bk=a(bl);j4=n(Bk,"This model is also a "),wl=r(Bk,"A",{href:!0,rel:!0});var j3=a(wl);C4=n(j3,"tf.keras.Model"),j3.forEach(t),N4=n(Bk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Bk.forEach(t),I4=d(so),T(or.$$.fragment,so),A4=d(so),$t=r(so,"DIV",{class:!0});var Wo=a($t);T(Fl.$$.fragment,Wo),D4=d(Wo),Wn=r(Wo,"P",{});var Hp=a(Wn);L4=n(Hp,"The "),cp=r(Hp,"A",{href:!0});var C3=a(cp);O4=n(C3,"TFBertForTokenClassification"),C3.forEach(t),S4=n(Hp," forward method, overrides the "),Om=r(Hp,"CODE",{});var N3=a(Om);U4=n(N3,"__call__"),N3.forEach(t),W4=n(Hp," special method."),Hp.forEach(t),H4=d(Wo),T(nr.$$.fragment,Wo),R4=d(Wo),Sm=r(Wo,"P",{});var I3=a(Sm);Q4=n(I3,"Example:"),I3.forEach(t),V4=d(Wo),T(xl.$$.fragment,Wo),Wo.forEach(t),so.forEach(t),W_=d(i),Hn=r(i,"H2",{class:!0});var Ek=a(Hn);sr=r(Ek,"A",{id:!0,class:!0,href:!0});var A3=a(sr);Um=r(A3,"SPAN",{});var D3=a(Um);T($l.$$.fragment,D3),D3.forEach(t),A3.forEach(t),K4=d(Ek),Wm=r(Ek,"SPAN",{});var L3=a(Wm);J4=n(L3,"TFBertForQuestionAnswering"),L3.forEach(t),Ek.forEach(t),H_=d(i),Ze=r(i,"DIV",{class:!0});var ro=a(Ze);T(Bl.$$.fragment,ro),G4=d(ro),Rn=r(ro,"P",{});var Rp=a(Rn);X4=n(Rp,`Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Hm=r(Rp,"CODE",{});var O3=a(Hm);Y4=n(O3,"span start logits"),O3.forEach(t),Z4=n(Rp," and "),Rm=r(Rp,"CODE",{});var S3=a(Rm);eB=n(S3,"span end logits"),S3.forEach(t),tB=n(Rp,")."),Rp.forEach(t),oB=d(ro),El=r(ro,"P",{});var Mk=a(El);nB=n(Mk,"This model inherits from "),pp=r(Mk,"A",{href:!0});var U3=a(pp);sB=n(U3,"TFPreTrainedModel"),U3.forEach(t),rB=n(Mk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mk.forEach(t),aB=d(ro),Ml=r(ro,"P",{});var zk=a(Ml);iB=n(zk,"This model is also a "),zl=r(zk,"A",{href:!0,rel:!0});var W3=a(zl);lB=n(W3,"tf.keras.Model"),W3.forEach(t),dB=n(zk,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),zk.forEach(t),cB=d(ro),T(rr.$$.fragment,ro),pB=d(ro),Bt=r(ro,"DIV",{class:!0});var Ho=a(Bt);T(Pl.$$.fragment,Ho),hB=d(Ho),Qn=r(Ho,"P",{});var Qp=a(Qn);fB=n(Qp,"The "),hp=r(Qp,"A",{href:!0});var H3=a(hp);mB=n(H3,"TFBertForQuestionAnswering"),H3.forEach(t),uB=n(Qp," forward method, overrides the "),Qm=r(Qp,"CODE",{});var R3=a(Qm);gB=n(R3,"__call__"),R3.forEach(t),_B=n(Qp," special method."),Qp.forEach(t),vB=d(Ho),T(ar.$$.fragment,Ho),kB=d(Ho),Vm=r(Ho,"P",{});var Q3=a(Vm);TB=n(Q3,"Example:"),Q3.forEach(t),yB=d(Ho),T(ql.$$.fragment,Ho),Ho.forEach(t),ro.forEach(t),R_=d(i),Vn=r(i,"H2",{class:!0});var Pk=a(Vn);ir=r(Pk,"A",{id:!0,class:!0,href:!0});var V3=a(ir);Km=r(V3,"SPAN",{});var K3=a(Km);T(jl.$$.fragment,K3),K3.forEach(t),V3.forEach(t),bB=d(Pk),Jm=r(Pk,"SPAN",{});var J3=a(Jm);wB=n(J3,"FlaxBertModel"),J3.forEach(t),Pk.forEach(t),Q_=d(i),Ie=r(i,"DIV",{class:!0});var Ot=a(Ie);T(Cl.$$.fragment,Ot),FB=d(Ot),Gm=r(Ot,"P",{});var G3=a(Gm);xB=n(G3,"The bare Bert Model transformer outputting raw hidden-states without any specific head on top."),G3.forEach(t),$B=d(Ot),Nl=r(Ot,"P",{});var qk=a(Nl);BB=n(qk,"This model inherits from "),fp=r(qk,"A",{href:!0});var X3=a(fp);EB=n(X3,"FlaxPreTrainedModel"),X3.forEach(t),MB=n(qk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),qk.forEach(t),zB=d(Ot),Il=r(Ot,"P",{});var jk=a(Il);PB=n(jk,"This model is also a Flax Linen "),Al=r(jk,"A",{href:!0,rel:!0});var Y3=a(Al);qB=n(Y3,"flax.linen.Module"),Y3.forEach(t),jB=n(jk,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),jk.forEach(t),CB=d(Ot),Xm=r(Ot,"P",{});var Z3=a(Xm);NB=n(Z3,"Finally, this model supports inherent JAX features such as:"),Z3.forEach(t),IB=d(Ot),lo=r(Ot,"UL",{});var xr=a(lo);Ym=r(xr,"LI",{});var e7=a(Ym);Dl=r(e7,"A",{href:!0,rel:!0});var t7=a(Dl);AB=n(t7,"Just-In-Time (JIT) compilation"),t7.forEach(t),e7.forEach(t),DB=d(xr),Zm=r(xr,"LI",{});var o7=a(Zm);Ll=r(o7,"A",{href:!0,rel:!0});var n7=a(Ll);LB=n(n7,"Automatic Differentiation"),n7.forEach(t),o7.forEach(t),OB=d(xr),eu=r(xr,"LI",{});var s7=a(eu);Ol=r(s7,"A",{href:!0,rel:!0});var r7=a(Ol);SB=n(r7,"Vectorization"),r7.forEach(t),s7.forEach(t),UB=d(xr),tu=r(xr,"LI",{});var a7=a(tu);Sl=r(a7,"A",{href:!0,rel:!0});var i7=a(Sl);WB=n(i7,"Parallelization"),i7.forEach(t),a7.forEach(t),xr.forEach(t),HB=d(Ot),Et=r(Ot,"DIV",{class:!0});var Ro=a(Et);T(Ul.$$.fragment,Ro),RB=d(Ro),Kn=r(Ro,"P",{});var Vp=a(Kn);QB=n(Vp,"The "),ou=r(Vp,"CODE",{});var l7=a(ou);VB=n(l7,"FlaxBertPreTrainedModel"),l7.forEach(t),KB=n(Vp," forward method, overrides the "),nu=r(Vp,"CODE",{});var d7=a(nu);JB=n(d7,"__call__"),d7.forEach(t),GB=n(Vp," special method."),Vp.forEach(t),XB=d(Ro),T(lr.$$.fragment,Ro),YB=d(Ro),su=r(Ro,"P",{});var c7=a(su);ZB=n(c7,"Example:"),c7.forEach(t),eE=d(Ro),T(Wl.$$.fragment,Ro),Ro.forEach(t),Ot.forEach(t),V_=d(i),Jn=r(i,"H2",{class:!0});var Ck=a(Jn);dr=r(Ck,"A",{id:!0,class:!0,href:!0});var p7=a(dr);ru=r(p7,"SPAN",{});var h7=a(ru);T(Hl.$$.fragment,h7),h7.forEach(t),p7.forEach(t),tE=d(Ck),au=r(Ck,"SPAN",{});var f7=a(au);oE=n(f7,"FlaxBertForPreTraining"),f7.forEach(t),Ck.forEach(t),K_=d(i),Ae=r(i,"DIV",{class:!0});var St=a(Ae);T(Rl.$$.fragment,St),nE=d(St),Gn=r(St,"P",{});var Kp=a(Gn);sE=n(Kp,"Bert Model with two heads on top as done during the pretraining: a "),iu=r(Kp,"CODE",{});var m7=a(iu);rE=n(m7,"masked language modeling"),m7.forEach(t),aE=n(Kp," head and a "),lu=r(Kp,"CODE",{});var u7=a(lu);iE=n(u7,"next sentence prediction (classification)"),u7.forEach(t),lE=n(Kp," head."),Kp.forEach(t),dE=d(St),Ql=r(St,"P",{});var Nk=a(Ql);cE=n(Nk,"This model inherits from "),mp=r(Nk,"A",{href:!0});var g7=a(mp);pE=n(g7,"FlaxPreTrainedModel"),g7.forEach(t),hE=n(Nk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Nk.forEach(t),fE=d(St),Vl=r(St,"P",{});var Ik=a(Vl);mE=n(Ik,"This model is also a Flax Linen "),Kl=r(Ik,"A",{href:!0,rel:!0});var _7=a(Kl);uE=n(_7,"flax.linen.Module"),_7.forEach(t),gE=n(Ik,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ik.forEach(t),_E=d(St),du=r(St,"P",{});var v7=a(du);vE=n(v7,"Finally, this model supports inherent JAX features such as:"),v7.forEach(t),kE=d(St),co=r(St,"UL",{});var $r=a(co);cu=r($r,"LI",{});var k7=a(cu);Jl=r(k7,"A",{href:!0,rel:!0});var T7=a(Jl);TE=n(T7,"Just-In-Time (JIT) compilation"),T7.forEach(t),k7.forEach(t),yE=d($r),pu=r($r,"LI",{});var y7=a(pu);Gl=r(y7,"A",{href:!0,rel:!0});var b7=a(Gl);bE=n(b7,"Automatic Differentiation"),b7.forEach(t),y7.forEach(t),wE=d($r),hu=r($r,"LI",{});var w7=a(hu);Xl=r(w7,"A",{href:!0,rel:!0});var F7=a(Xl);FE=n(F7,"Vectorization"),F7.forEach(t),w7.forEach(t),xE=d($r),fu=r($r,"LI",{});var x7=a(fu);Yl=r(x7,"A",{href:!0,rel:!0});var $7=a(Yl);$E=n($7,"Parallelization"),$7.forEach(t),x7.forEach(t),$r.forEach(t),BE=d(St),Mt=r(St,"DIV",{class:!0});var Qo=a(Mt);T(Zl.$$.fragment,Qo),EE=d(Qo),Xn=r(Qo,"P",{});var Jp=a(Xn);ME=n(Jp,"The "),mu=r(Jp,"CODE",{});var B7=a(mu);zE=n(B7,"FlaxBertPreTrainedModel"),B7.forEach(t),PE=n(Jp," forward method, overrides the "),uu=r(Jp,"CODE",{});var E7=a(uu);qE=n(E7,"__call__"),E7.forEach(t),jE=n(Jp," special method."),Jp.forEach(t),CE=d(Qo),T(cr.$$.fragment,Qo),NE=d(Qo),gu=r(Qo,"P",{});var M7=a(gu);IE=n(M7,"Example:"),M7.forEach(t),AE=d(Qo),T(ed.$$.fragment,Qo),Qo.forEach(t),St.forEach(t),J_=d(i),Yn=r(i,"H2",{class:!0});var Ak=a(Yn);pr=r(Ak,"A",{id:!0,class:!0,href:!0});var z7=a(pr);_u=r(z7,"SPAN",{});var P7=a(_u);T(td.$$.fragment,P7),P7.forEach(t),z7.forEach(t),DE=d(Ak),vu=r(Ak,"SPAN",{});var q7=a(vu);LE=n(q7,"FlaxBertForMaskedLM"),q7.forEach(t),Ak.forEach(t),G_=d(i),De=r(i,"DIV",{class:!0});var Ut=a(De);T(od.$$.fragment,Ut),OE=d(Ut),nd=r(Ut,"P",{});var Dk=a(nd);SE=n(Dk,"Bert Model with a "),ku=r(Dk,"CODE",{});var j7=a(ku);UE=n(j7,"language modeling"),j7.forEach(t),WE=n(Dk," head on top."),Dk.forEach(t),HE=d(Ut),sd=r(Ut,"P",{});var Lk=a(sd);RE=n(Lk,"This model inherits from "),up=r(Lk,"A",{href:!0});var C7=a(up);QE=n(C7,"FlaxPreTrainedModel"),C7.forEach(t),VE=n(Lk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Lk.forEach(t),KE=d(Ut),rd=r(Ut,"P",{});var Ok=a(rd);JE=n(Ok,"This model is also a Flax Linen "),ad=r(Ok,"A",{href:!0,rel:!0});var N7=a(ad);GE=n(N7,"flax.linen.Module"),N7.forEach(t),XE=n(Ok,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ok.forEach(t),YE=d(Ut),Tu=r(Ut,"P",{});var I7=a(Tu);ZE=n(I7,"Finally, this model supports inherent JAX features such as:"),I7.forEach(t),eM=d(Ut),po=r(Ut,"UL",{});var Br=a(po);yu=r(Br,"LI",{});var A7=a(yu);id=r(A7,"A",{href:!0,rel:!0});var D7=a(id);tM=n(D7,"Just-In-Time (JIT) compilation"),D7.forEach(t),A7.forEach(t),oM=d(Br),bu=r(Br,"LI",{});var L7=a(bu);ld=r(L7,"A",{href:!0,rel:!0});var O7=a(ld);nM=n(O7,"Automatic Differentiation"),O7.forEach(t),L7.forEach(t),sM=d(Br),wu=r(Br,"LI",{});var S7=a(wu);dd=r(S7,"A",{href:!0,rel:!0});var U7=a(dd);rM=n(U7,"Vectorization"),U7.forEach(t),S7.forEach(t),aM=d(Br),Fu=r(Br,"LI",{});var W7=a(Fu);cd=r(W7,"A",{href:!0,rel:!0});var H7=a(cd);iM=n(H7,"Parallelization"),H7.forEach(t),W7.forEach(t),Br.forEach(t),lM=d(Ut),zt=r(Ut,"DIV",{class:!0});var Vo=a(zt);T(pd.$$.fragment,Vo),dM=d(Vo),Zn=r(Vo,"P",{});var Gp=a(Zn);cM=n(Gp,"The "),xu=r(Gp,"CODE",{});var R7=a(xu);pM=n(R7,"FlaxBertPreTrainedModel"),R7.forEach(t),hM=n(Gp," forward method, overrides the "),$u=r(Gp,"CODE",{});var Q7=a($u);fM=n(Q7,"__call__"),Q7.forEach(t),mM=n(Gp," special method."),Gp.forEach(t),uM=d(Vo),T(hr.$$.fragment,Vo),gM=d(Vo),Bu=r(Vo,"P",{});var V7=a(Bu);_M=n(V7,"Example:"),V7.forEach(t),vM=d(Vo),T(hd.$$.fragment,Vo),Vo.forEach(t),Ut.forEach(t),X_=d(i),es=r(i,"H2",{class:!0});var Sk=a(es);fr=r(Sk,"A",{id:!0,class:!0,href:!0});var K7=a(fr);Eu=r(K7,"SPAN",{});var J7=a(Eu);T(fd.$$.fragment,J7),J7.forEach(t),K7.forEach(t),kM=d(Sk),Mu=r(Sk,"SPAN",{});var G7=a(Mu);TM=n(G7,"FlaxBertForNextSentencePrediction"),G7.forEach(t),Sk.forEach(t),Y_=d(i),Le=r(i,"DIV",{class:!0});var Wt=a(Le);T(md.$$.fragment,Wt),yM=d(Wt),ud=r(Wt,"P",{});var Uk=a(ud);bM=n(Uk,"Bert Model with a "),zu=r(Uk,"CODE",{});var X7=a(zu);wM=n(X7,"next sentence prediction (classification)"),X7.forEach(t),FM=n(Uk," head on top."),Uk.forEach(t),xM=d(Wt),gd=r(Wt,"P",{});var Wk=a(gd);$M=n(Wk,"This model inherits from "),gp=r(Wk,"A",{href:!0});var Y7=a(gp);BM=n(Y7,"FlaxPreTrainedModel"),Y7.forEach(t),EM=n(Wk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Wk.forEach(t),MM=d(Wt),_d=r(Wt,"P",{});var Hk=a(_d);zM=n(Hk,"This model is also a Flax Linen "),vd=r(Hk,"A",{href:!0,rel:!0});var Z7=a(vd);PM=n(Z7,"flax.linen.Module"),Z7.forEach(t),qM=n(Hk,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Hk.forEach(t),jM=d(Wt),Pu=r(Wt,"P",{});var e6=a(Pu);CM=n(e6,"Finally, this model supports inherent JAX features such as:"),e6.forEach(t),NM=d(Wt),ho=r(Wt,"UL",{});var Er=a(ho);qu=r(Er,"LI",{});var t6=a(qu);kd=r(t6,"A",{href:!0,rel:!0});var o6=a(kd);IM=n(o6,"Just-In-Time (JIT) compilation"),o6.forEach(t),t6.forEach(t),AM=d(Er),ju=r(Er,"LI",{});var n6=a(ju);Td=r(n6,"A",{href:!0,rel:!0});var s6=a(Td);DM=n(s6,"Automatic Differentiation"),s6.forEach(t),n6.forEach(t),LM=d(Er),Cu=r(Er,"LI",{});var r6=a(Cu);yd=r(r6,"A",{href:!0,rel:!0});var a6=a(yd);OM=n(a6,"Vectorization"),a6.forEach(t),r6.forEach(t),SM=d(Er),Nu=r(Er,"LI",{});var i6=a(Nu);bd=r(i6,"A",{href:!0,rel:!0});var l6=a(bd);UM=n(l6,"Parallelization"),l6.forEach(t),i6.forEach(t),Er.forEach(t),WM=d(Wt),Pt=r(Wt,"DIV",{class:!0});var Ko=a(Pt);T(wd.$$.fragment,Ko),HM=d(Ko),ts=r(Ko,"P",{});var Xp=a(ts);RM=n(Xp,"The "),Iu=r(Xp,"CODE",{});var d6=a(Iu);QM=n(d6,"FlaxBertPreTrainedModel"),d6.forEach(t),VM=n(Xp," forward method, overrides the "),Au=r(Xp,"CODE",{});var c6=a(Au);KM=n(c6,"__call__"),c6.forEach(t),JM=n(Xp," special method."),Xp.forEach(t),GM=d(Ko),T(mr.$$.fragment,Ko),XM=d(Ko),Du=r(Ko,"P",{});var p6=a(Du);YM=n(p6,"Example:"),p6.forEach(t),ZM=d(Ko),T(Fd.$$.fragment,Ko),Ko.forEach(t),Wt.forEach(t),Z_=d(i),os=r(i,"H2",{class:!0});var Rk=a(os);ur=r(Rk,"A",{id:!0,class:!0,href:!0});var h6=a(ur);Lu=r(h6,"SPAN",{});var f6=a(Lu);T(xd.$$.fragment,f6),f6.forEach(t),h6.forEach(t),ez=d(Rk),Ou=r(Rk,"SPAN",{});var m6=a(Ou);tz=n(m6,"FlaxBertForSequenceClassification"),m6.forEach(t),Rk.forEach(t),ev=d(i),Oe=r(i,"DIV",{class:!0});var Ht=a(Oe);T($d.$$.fragment,Ht),oz=d(Ht),Su=r(Ht,"P",{});var u6=a(Su);nz=n(u6,`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),u6.forEach(t),sz=d(Ht),Bd=r(Ht,"P",{});var Qk=a(Bd);rz=n(Qk,"This model inherits from "),_p=r(Qk,"A",{href:!0});var g6=a(_p);az=n(g6,"FlaxPreTrainedModel"),g6.forEach(t),iz=n(Qk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Qk.forEach(t),lz=d(Ht),Ed=r(Ht,"P",{});var Vk=a(Ed);dz=n(Vk,"This model is also a Flax Linen "),Md=r(Vk,"A",{href:!0,rel:!0});var _6=a(Md);cz=n(_6,"flax.linen.Module"),_6.forEach(t),pz=n(Vk,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Vk.forEach(t),hz=d(Ht),Uu=r(Ht,"P",{});var v6=a(Uu);fz=n(v6,"Finally, this model supports inherent JAX features such as:"),v6.forEach(t),mz=d(Ht),fo=r(Ht,"UL",{});var Mr=a(fo);Wu=r(Mr,"LI",{});var k6=a(Wu);zd=r(k6,"A",{href:!0,rel:!0});var T6=a(zd);uz=n(T6,"Just-In-Time (JIT) compilation"),T6.forEach(t),k6.forEach(t),gz=d(Mr),Hu=r(Mr,"LI",{});var y6=a(Hu);Pd=r(y6,"A",{href:!0,rel:!0});var b6=a(Pd);_z=n(b6,"Automatic Differentiation"),b6.forEach(t),y6.forEach(t),vz=d(Mr),Ru=r(Mr,"LI",{});var w6=a(Ru);qd=r(w6,"A",{href:!0,rel:!0});var F6=a(qd);kz=n(F6,"Vectorization"),F6.forEach(t),w6.forEach(t),Tz=d(Mr),Qu=r(Mr,"LI",{});var x6=a(Qu);jd=r(x6,"A",{href:!0,rel:!0});var $6=a(jd);yz=n($6,"Parallelization"),$6.forEach(t),x6.forEach(t),Mr.forEach(t),bz=d(Ht),qt=r(Ht,"DIV",{class:!0});var Jo=a(qt);T(Cd.$$.fragment,Jo),wz=d(Jo),ns=r(Jo,"P",{});var Yp=a(ns);Fz=n(Yp,"The "),Vu=r(Yp,"CODE",{});var B6=a(Vu);xz=n(B6,"FlaxBertPreTrainedModel"),B6.forEach(t),$z=n(Yp," forward method, overrides the "),Ku=r(Yp,"CODE",{});var E6=a(Ku);Bz=n(E6,"__call__"),E6.forEach(t),Ez=n(Yp," special method."),Yp.forEach(t),Mz=d(Jo),T(gr.$$.fragment,Jo),zz=d(Jo),Ju=r(Jo,"P",{});var M6=a(Ju);Pz=n(M6,"Example:"),M6.forEach(t),qz=d(Jo),T(Nd.$$.fragment,Jo),Jo.forEach(t),Ht.forEach(t),tv=d(i),ss=r(i,"H2",{class:!0});var Kk=a(ss);_r=r(Kk,"A",{id:!0,class:!0,href:!0});var z6=a(_r);Gu=r(z6,"SPAN",{});var P6=a(Gu);T(Id.$$.fragment,P6),P6.forEach(t),z6.forEach(t),jz=d(Kk),Xu=r(Kk,"SPAN",{});var q6=a(Xu);Cz=n(q6,"FlaxBertForMultipleChoice"),q6.forEach(t),Kk.forEach(t),ov=d(i),Se=r(i,"DIV",{class:!0});var Rt=a(Se);T(Ad.$$.fragment,Rt),Nz=d(Rt),Yu=r(Rt,"P",{});var j6=a(Yu);Iz=n(j6,`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),j6.forEach(t),Az=d(Rt),Dd=r(Rt,"P",{});var Jk=a(Dd);Dz=n(Jk,"This model inherits from "),vp=r(Jk,"A",{href:!0});var C6=a(vp);Lz=n(C6,"FlaxPreTrainedModel"),C6.forEach(t),Oz=n(Jk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Jk.forEach(t),Sz=d(Rt),Ld=r(Rt,"P",{});var Gk=a(Ld);Uz=n(Gk,"This model is also a Flax Linen "),Od=r(Gk,"A",{href:!0,rel:!0});var N6=a(Od);Wz=n(N6,"flax.linen.Module"),N6.forEach(t),Hz=n(Gk,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Gk.forEach(t),Rz=d(Rt),Zu=r(Rt,"P",{});var I6=a(Zu);Qz=n(I6,"Finally, this model supports inherent JAX features such as:"),I6.forEach(t),Vz=d(Rt),mo=r(Rt,"UL",{});var zr=a(mo);eg=r(zr,"LI",{});var A6=a(eg);Sd=r(A6,"A",{href:!0,rel:!0});var D6=a(Sd);Kz=n(D6,"Just-In-Time (JIT) compilation"),D6.forEach(t),A6.forEach(t),Jz=d(zr),tg=r(zr,"LI",{});var L6=a(tg);Ud=r(L6,"A",{href:!0,rel:!0});var O6=a(Ud);Gz=n(O6,"Automatic Differentiation"),O6.forEach(t),L6.forEach(t),Xz=d(zr),og=r(zr,"LI",{});var S6=a(og);Wd=r(S6,"A",{href:!0,rel:!0});var U6=a(Wd);Yz=n(U6,"Vectorization"),U6.forEach(t),S6.forEach(t),Zz=d(zr),ng=r(zr,"LI",{});var W6=a(ng);Hd=r(W6,"A",{href:!0,rel:!0});var H6=a(Hd);e5=n(H6,"Parallelization"),H6.forEach(t),W6.forEach(t),zr.forEach(t),t5=d(Rt),jt=r(Rt,"DIV",{class:!0});var Go=a(jt);T(Rd.$$.fragment,Go),o5=d(Go),rs=r(Go,"P",{});var Zp=a(rs);n5=n(Zp,"The "),sg=r(Zp,"CODE",{});var R6=a(sg);s5=n(R6,"FlaxBertPreTrainedModel"),R6.forEach(t),r5=n(Zp," forward method, overrides the "),rg=r(Zp,"CODE",{});var Q6=a(rg);a5=n(Q6,"__call__"),Q6.forEach(t),i5=n(Zp," special method."),Zp.forEach(t),l5=d(Go),T(vr.$$.fragment,Go),d5=d(Go),ag=r(Go,"P",{});var V6=a(ag);c5=n(V6,"Example:"),V6.forEach(t),p5=d(Go),T(Qd.$$.fragment,Go),Go.forEach(t),Rt.forEach(t),nv=d(i),as=r(i,"H2",{class:!0});var Xk=a(as);kr=r(Xk,"A",{id:!0,class:!0,href:!0});var K6=a(kr);ig=r(K6,"SPAN",{});var J6=a(ig);T(Vd.$$.fragment,J6),J6.forEach(t),K6.forEach(t),h5=d(Xk),lg=r(Xk,"SPAN",{});var G6=a(lg);f5=n(G6,"FlaxBertForTokenClassification"),G6.forEach(t),Xk.forEach(t),sv=d(i),Ue=r(i,"DIV",{class:!0});var Qt=a(Ue);T(Kd.$$.fragment,Qt),m5=d(Qt),dg=r(Qt,"P",{});var X6=a(dg);u5=n(X6,`Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),X6.forEach(t),g5=d(Qt),Jd=r(Qt,"P",{});var Yk=a(Jd);_5=n(Yk,"This model inherits from "),kp=r(Yk,"A",{href:!0});var Y6=a(kp);v5=n(Y6,"FlaxPreTrainedModel"),Y6.forEach(t),k5=n(Yk,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Yk.forEach(t),T5=d(Qt),Gd=r(Qt,"P",{});var Zk=a(Gd);y5=n(Zk,"This model is also a Flax Linen "),Xd=r(Zk,"A",{href:!0,rel:!0});var Z6=a(Xd);b5=n(Z6,"flax.linen.Module"),Z6.forEach(t),w5=n(Zk,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Zk.forEach(t),F5=d(Qt),cg=r(Qt,"P",{});var eN=a(cg);x5=n(eN,"Finally, this model supports inherent JAX features such as:"),eN.forEach(t),$5=d(Qt),uo=r(Qt,"UL",{});var Pr=a(uo);pg=r(Pr,"LI",{});var tN=a(pg);Yd=r(tN,"A",{href:!0,rel:!0});var oN=a(Yd);B5=n(oN,"Just-In-Time (JIT) compilation"),oN.forEach(t),tN.forEach(t),E5=d(Pr),hg=r(Pr,"LI",{});var nN=a(hg);Zd=r(nN,"A",{href:!0,rel:!0});var sN=a(Zd);M5=n(sN,"Automatic Differentiation"),sN.forEach(t),nN.forEach(t),z5=d(Pr),fg=r(Pr,"LI",{});var rN=a(fg);ec=r(rN,"A",{href:!0,rel:!0});var aN=a(ec);P5=n(aN,"Vectorization"),aN.forEach(t),rN.forEach(t),q5=d(Pr),mg=r(Pr,"LI",{});var iN=a(mg);tc=r(iN,"A",{href:!0,rel:!0});var lN=a(tc);j5=n(lN,"Parallelization"),lN.forEach(t),iN.forEach(t),Pr.forEach(t),C5=d(Qt),Ct=r(Qt,"DIV",{class:!0});var Xo=a(Ct);T(oc.$$.fragment,Xo),N5=d(Xo),is=r(Xo,"P",{});var eh=a(is);I5=n(eh,"The "),ug=r(eh,"CODE",{});var dN=a(ug);A5=n(dN,"FlaxBertPreTrainedModel"),dN.forEach(t),D5=n(eh," forward method, overrides the "),gg=r(eh,"CODE",{});var cN=a(gg);L5=n(cN,"__call__"),cN.forEach(t),O5=n(eh," special method."),eh.forEach(t),S5=d(Xo),T(Tr.$$.fragment,Xo),U5=d(Xo),_g=r(Xo,"P",{});var pN=a(_g);W5=n(pN,"Example:"),pN.forEach(t),H5=d(Xo),T(nc.$$.fragment,Xo),Xo.forEach(t),Qt.forEach(t),rv=d(i),ls=r(i,"H2",{class:!0});var eT=a(ls);yr=r(eT,"A",{id:!0,class:!0,href:!0});var hN=a(yr);vg=r(hN,"SPAN",{});var fN=a(vg);T(sc.$$.fragment,fN),fN.forEach(t),hN.forEach(t),R5=d(eT),kg=r(eT,"SPAN",{});var mN=a(kg);Q5=n(mN,"FlaxBertForQuestionAnswering"),mN.forEach(t),eT.forEach(t),av=d(i),We=r(i,"DIV",{class:!0});var Vt=a(We);T(rc.$$.fragment,Vt),V5=d(Vt),ds=r(Vt,"P",{});var th=a(ds);K5=n(th,`Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Tg=r(th,"CODE",{});var uN=a(Tg);J5=n(uN,"span start logits"),uN.forEach(t),G5=n(th," and "),yg=r(th,"CODE",{});var gN=a(yg);X5=n(gN,"span end logits"),gN.forEach(t),Y5=n(th,")."),th.forEach(t),Z5=d(Vt),ac=r(Vt,"P",{});var tT=a(ac);eP=n(tT,"This model inherits from "),Tp=r(tT,"A",{href:!0});var _N=a(Tp);tP=n(_N,"FlaxPreTrainedModel"),_N.forEach(t),oP=n(tT,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),tT.forEach(t),nP=d(Vt),ic=r(Vt,"P",{});var oT=a(ic);sP=n(oT,"This model is also a Flax Linen "),lc=r(oT,"A",{href:!0,rel:!0});var vN=a(lc);rP=n(vN,"flax.linen.Module"),vN.forEach(t),aP=n(oT,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),oT.forEach(t),iP=d(Vt),bg=r(Vt,"P",{});var kN=a(bg);lP=n(kN,"Finally, this model supports inherent JAX features such as:"),kN.forEach(t),dP=d(Vt),go=r(Vt,"UL",{});var qr=a(go);wg=r(qr,"LI",{});var TN=a(wg);dc=r(TN,"A",{href:!0,rel:!0});var yN=a(dc);cP=n(yN,"Just-In-Time (JIT) compilation"),yN.forEach(t),TN.forEach(t),pP=d(qr),Fg=r(qr,"LI",{});var bN=a(Fg);cc=r(bN,"A",{href:!0,rel:!0});var wN=a(cc);hP=n(wN,"Automatic Differentiation"),wN.forEach(t),bN.forEach(t),fP=d(qr),xg=r(qr,"LI",{});var FN=a(xg);pc=r(FN,"A",{href:!0,rel:!0});var xN=a(pc);mP=n(xN,"Vectorization"),xN.forEach(t),FN.forEach(t),uP=d(qr),$g=r(qr,"LI",{});var $N=a($g);hc=r($N,"A",{href:!0,rel:!0});var BN=a(hc);gP=n(BN,"Parallelization"),BN.forEach(t),$N.forEach(t),qr.forEach(t),_P=d(Vt),Nt=r(Vt,"DIV",{class:!0});var Yo=a(Nt);T(fc.$$.fragment,Yo),vP=d(Yo),cs=r(Yo,"P",{});var oh=a(cs);kP=n(oh,"The "),Bg=r(oh,"CODE",{});var EN=a(Bg);TP=n(EN,"FlaxBertPreTrainedModel"),EN.forEach(t),yP=n(oh," forward method, overrides the "),Eg=r(oh,"CODE",{});var MN=a(Eg);bP=n(MN,"__call__"),MN.forEach(t),wP=n(oh," special method."),oh.forEach(t),FP=d(Yo),T(br.$$.fragment,Yo),xP=d(Yo),Mg=r(Yo,"P",{});var zN=a(Mg);$P=n(zN,"Example:"),zN.forEach(t),BP=d(Yo),T(mc.$$.fragment,Yo),Yo.forEach(t),Vt.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(uI)),c(v,"id","bert"),c(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(v,"href","#bert"),c(g,"class","relative group"),c(ee,"id","overview"),c(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ee,"href","#overview"),c(M,"class","relative group"),c(ne,"href","https://arxiv.org/abs/1810.04805"),c(ne,"rel","nofollow"),c(Fe,"href","https://huggingface.co/thomwolf"),c(Fe,"rel","nofollow"),c(xe,"href","https://github.com/google-research/bert"),c(xe,"rel","nofollow"),c(be,"id","transformers.BertConfig"),c(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(be,"href","#transformers.BertConfig"),c(Te,"class","relative group"),c(Tc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel"),c(yc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel"),c(Cr,"href","https://huggingface.co/bert-base-uncased"),c(Cr,"rel","nofollow"),c(bc,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(wc,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(ot,"class","docstring"),c(fs,"id","transformers.BertTokenizer"),c(fs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fs,"href","#transformers.BertTokenizer"),c(en,"class","relative group"),c(Fc,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(vo,"class","docstring"),c(ms,"class","docstring"),c(Jt,"class","docstring"),c(mh,"class","docstring"),c(je,"class","docstring"),c(us,"id","transformers.BertTokenizerFast"),c(us,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(us,"href","#transformers.BertTokenizerFast"),c(on,"class","relative group"),c(Bc,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(ko,"class","docstring"),c(Gt,"class","docstring"),c(nt,"class","docstring"),c(gs,"id","transformers.models.bert.modeling_bert.BertForPreTrainingOutput"),c(gs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(gs,"href","#transformers.models.bert.modeling_bert.BertForPreTrainingOutput"),c(sn,"class","relative group"),c(zc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForPreTraining"),c(rn,"class","docstring"),c(Pc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForPreTraining"),c(an,"class","docstring"),c(qc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForPreTraining"),c(_s,"class","docstring"),c(io,"class","docstring"),c(vs,"id","transformers.BertModel"),c(vs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vs,"href","#transformers.BertModel"),c(ln,"class","relative group"),c(jc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(pa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(pa,"rel","nofollow"),c(fa,"href","https://arxiv.org/abs/1706.03762"),c(fa,"rel","nofollow"),c(Cc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel"),c(ht,"class","docstring"),c(Ce,"class","docstring"),c(Ts,"id","transformers.BertForPreTraining"),c(Ts,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ts,"href","#transformers.BertForPreTraining"),c(cn,"class","relative group"),c(Nc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ta,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ta,"rel","nofollow"),c(Ic,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForPreTraining"),c(ft,"class","docstring"),c(st,"class","docstring"),c(bs,"id","transformers.BertLMHeadModel"),c(bs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bs,"href","#transformers.BertLMHeadModel"),c(fn,"class","relative group"),c(Ac,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ea,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ea,"rel","nofollow"),c(Dc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertLMHeadModel"),c(mt,"class","docstring"),c(rt,"class","docstring"),c(Fs,"id","transformers.BertForMaskedLM"),c(Fs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fs,"href","#transformers.BertForMaskedLM"),c(un,"class","relative group"),c(Lc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ia,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ia,"rel","nofollow"),c(Oc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForMaskedLM"),c(ut,"class","docstring"),c(at,"class","docstring"),c($s,"id","transformers.BertForNextSentencePrediction"),c($s,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c($s,"href","#transformers.BertForNextSentencePrediction"),c(_n,"class","relative group"),c(Sc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ha,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ha,"rel","nofollow"),c(Uc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForNextSentencePrediction"),c(gt,"class","docstring"),c(it,"class","docstring"),c(Es,"id","transformers.BertForSequenceClassification"),c(Es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Es,"href","#transformers.BertForSequenceClassification"),c(kn,"class","relative group"),c(Wc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Xa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Xa,"rel","nofollow"),c(Hc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForSequenceClassification"),c(He,"class","docstring"),c(lt,"class","docstring"),c(zs,"id","transformers.BertForMultipleChoice"),c(zs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zs,"href","#transformers.BertForMultipleChoice"),c(yn,"class","relative group"),c(Rc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ri,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ri,"rel","nofollow"),c(Qc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForMultipleChoice"),c(_t,"class","docstring"),c(dt,"class","docstring"),c(qs,"id","transformers.BertForTokenClassification"),c(qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qs,"href","#transformers.BertForTokenClassification"),c(wn,"class","relative group"),c(Vc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(hi,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(hi,"rel","nofollow"),c(Kc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForTokenClassification"),c(vt,"class","docstring"),c(ct,"class","docstring"),c(Cs,"id","transformers.BertForQuestionAnswering"),c(Cs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Cs,"href","#transformers.BertForQuestionAnswering"),c(xn,"class","relative group"),c(Jc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ki,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ki,"rel","nofollow"),c(Gc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertForQuestionAnswering"),c(kt,"class","docstring"),c(pt,"class","docstring"),c(Is,"id","transformers.TFBertModel"),c(Is,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Is,"href","#transformers.TFBertModel"),c(En,"class","relative group"),c(Xc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c($i,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c($i,"rel","nofollow"),c(Yc,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel"),c(Tt,"class","docstring"),c(Qe,"class","docstring"),c(Ls,"id","transformers.TFBertForPreTraining"),c(Ls,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ls,"href","#transformers.TFBertForPreTraining"),c(zn,"class","relative group"),c(Zc,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ji,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ji,"rel","nofollow"),c(ep,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForPreTraining"),c(yt,"class","docstring"),c(Ve,"class","docstring"),c(Us,"id","transformers.TFBertLMHeadModel"),c(Us,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Us,"href","#transformers.TFBertLMHeadModel"),c(jn,"class","relative group"),c(et,"class","docstring"),c(Ai,"class","docstring"),c(Ws,"id","transformers.TFBertForMaskedLM"),c(Ws,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ws,"href","#transformers.TFBertForMaskedLM"),c(Cn,"class","relative group"),c(tp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ki,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ki,"rel","nofollow"),c(op,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForMaskedLM"),c(bt,"class","docstring"),c(Ke,"class","docstring"),c(Qs,"id","transformers.TFBertForNextSentencePrediction"),c(Qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qs,"href","#transformers.TFBertForNextSentencePrediction"),c(In,"class","relative group"),c(np,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ol,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ol,"rel","nofollow"),c(sp,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForNextSentencePrediction"),c(wt,"class","docstring"),c(Je,"class","docstring"),c(Js,"id","transformers.TFBertForSequenceClassification"),c(Js,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Js,"href","#transformers.TFBertForSequenceClassification"),c(Dn,"class","relative group"),c(rp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(dl,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(dl,"rel","nofollow"),c(ap,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForSequenceClassification"),c(Ft,"class","docstring"),c(Ge,"class","docstring"),c(Ys,"id","transformers.TFBertForMultipleChoice"),c(Ys,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ys,"href","#transformers.TFBertForMultipleChoice"),c(On,"class","relative group"),c(ip,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(gl,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(gl,"rel","nofollow"),c(lp,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForMultipleChoice"),c(xt,"class","docstring"),c(Xe,"class","docstring"),c(tr,"id","transformers.TFBertForTokenClassification"),c(tr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(tr,"href","#transformers.TFBertForTokenClassification"),c(Un,"class","relative group"),c(dp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(wl,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(wl,"rel","nofollow"),c(cp,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForTokenClassification"),c($t,"class","docstring"),c(Ye,"class","docstring"),c(sr,"id","transformers.TFBertForQuestionAnswering"),c(sr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(sr,"href","#transformers.TFBertForQuestionAnswering"),c(Hn,"class","relative group"),c(pp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(zl,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(zl,"rel","nofollow"),c(hp,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertForQuestionAnswering"),c(Bt,"class","docstring"),c(Ze,"class","docstring"),c(ir,"id","transformers.FlaxBertModel"),c(ir,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ir,"href","#transformers.FlaxBertModel"),c(Vn,"class","relative group"),c(fp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Al,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Al,"rel","nofollow"),c(Dl,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Dl,"rel","nofollow"),c(Ll,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ll,"rel","nofollow"),c(Ol,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ol,"rel","nofollow"),c(Sl,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Sl,"rel","nofollow"),c(Et,"class","docstring"),c(Ie,"class","docstring"),c(dr,"id","transformers.FlaxBertForPreTraining"),c(dr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(dr,"href","#transformers.FlaxBertForPreTraining"),c(Jn,"class","relative group"),c(mp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Kl,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Kl,"rel","nofollow"),c(Jl,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Jl,"rel","nofollow"),c(Gl,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Gl,"rel","nofollow"),c(Xl,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Xl,"rel","nofollow"),c(Yl,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Yl,"rel","nofollow"),c(Mt,"class","docstring"),c(Ae,"class","docstring"),c(pr,"id","transformers.FlaxBertForMaskedLM"),c(pr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(pr,"href","#transformers.FlaxBertForMaskedLM"),c(Yn,"class","relative group"),c(up,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ad,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(ad,"rel","nofollow"),c(id,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(id,"rel","nofollow"),c(ld,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(ld,"rel","nofollow"),c(dd,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(dd,"rel","nofollow"),c(cd,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(cd,"rel","nofollow"),c(zt,"class","docstring"),c(De,"class","docstring"),c(fr,"id","transformers.FlaxBertForNextSentencePrediction"),c(fr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fr,"href","#transformers.FlaxBertForNextSentencePrediction"),c(es,"class","relative group"),c(gp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(vd,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(vd,"rel","nofollow"),c(kd,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(kd,"rel","nofollow"),c(Td,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Td,"rel","nofollow"),c(yd,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(yd,"rel","nofollow"),c(bd,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(bd,"rel","nofollow"),c(Pt,"class","docstring"),c(Le,"class","docstring"),c(ur,"id","transformers.FlaxBertForSequenceClassification"),c(ur,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ur,"href","#transformers.FlaxBertForSequenceClassification"),c(os,"class","relative group"),c(_p,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Md,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Md,"rel","nofollow"),c(zd,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(zd,"rel","nofollow"),c(Pd,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Pd,"rel","nofollow"),c(qd,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(qd,"rel","nofollow"),c(jd,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(jd,"rel","nofollow"),c(qt,"class","docstring"),c(Oe,"class","docstring"),c(_r,"id","transformers.FlaxBertForMultipleChoice"),c(_r,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_r,"href","#transformers.FlaxBertForMultipleChoice"),c(ss,"class","relative group"),c(vp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Od,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Od,"rel","nofollow"),c(Sd,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Sd,"rel","nofollow"),c(Ud,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ud,"rel","nofollow"),c(Wd,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Wd,"rel","nofollow"),c(Hd,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Hd,"rel","nofollow"),c(jt,"class","docstring"),c(Se,"class","docstring"),c(kr,"id","transformers.FlaxBertForTokenClassification"),c(kr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(kr,"href","#transformers.FlaxBertForTokenClassification"),c(as,"class","relative group"),c(kp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Xd,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Xd,"rel","nofollow"),c(Yd,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Yd,"rel","nofollow"),c(Zd,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Zd,"rel","nofollow"),c(ec,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(ec,"rel","nofollow"),c(tc,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(tc,"rel","nofollow"),c(Ct,"class","docstring"),c(Ue,"class","docstring"),c(yr,"id","transformers.FlaxBertForQuestionAnswering"),c(yr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(yr,"href","#transformers.FlaxBertForQuestionAnswering"),c(ls,"class","relative group"),c(Tp,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(lc,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(lc,"rel","nofollow"),c(dc,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(dc,"rel","nofollow"),c(cc,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(cc,"rel","nofollow"),c(pc,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(pc,"rel","nofollow"),c(hc,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(hc,"rel","nofollow"),c(Nt,"class","docstring"),c(We,"class","docstring")},m(i,m){e(document.head,p),f(i,$,m),f(i,g,m),e(g,v),e(v,x),y(_,x,null),e(g,u),e(g,B),e(B,ue),f(i,X,m),f(i,M,m),e(M,ee),e(ee,L),y(oe,L,null),e(M,ge),e(M,O),e(O,_e),f(i,ce,m),f(i,J,m),e(J,I),e(J,ne),e(ne,Y),e(J,z),f(i,q,m),f(i,se,m),e(se,H),f(i,pe,m),f(i,re,m),e(re,S),e(S,ve),f(i,he,m),f(i,P,m),e(P,ie),e(ie,R),f(i,fe,m),f(i,ae,m),e(ae,Q),f(i,me,m),f(i,te,m),e(te,N),e(N,ke),e(te,V),e(te,le),e(le,h),f(i,E,m),f(i,K,m),e(K,Ee),e(K,Fe),e(Fe,A),e(K,Me),e(K,xe),e(xe,ze),e(K,D),f(i,W,m),f(i,Te,m),e(Te,be),e(be,U),y($e,U,null),e(Te,Pe),e(Te,de),e(de,qe),f(i,e_,m),f(i,ot,m),y(jr,ot,null),e(ot,nT),e(ot,ao),e(ao,sT),e(ao,Tc),e(Tc,rT),e(ao,aT),e(ao,yc),e(yc,iT),e(ao,lT),e(ao,Cr),e(Cr,dT),e(ao,cT),e(ot,pT),e(ot,Zo),e(Zo,hT),e(Zo,bc),e(bc,fT),e(Zo,mT),e(Zo,wc),e(wc,uT),e(Zo,gT),e(ot,_T),e(ot,nh),e(nh,vT),e(ot,kT),y(Nr,ot,null),f(i,t_,m),f(i,en,m),e(en,fs),e(fs,sh),y(Ir,sh,null),e(en,TT),e(en,rh),e(rh,yT),f(i,o_,m),f(i,je,m),y(Ar,je,null),e(je,bT),e(je,ah),e(ah,wT),e(je,FT),e(je,Dr),e(Dr,xT),e(Dr,Fc),e(Fc,$T),e(Dr,BT),e(je,ET),e(je,vo),y(Lr,vo,null),e(vo,MT),e(vo,ih),e(ih,zT),e(vo,PT),e(vo,Or),e(Or,xc),e(xc,qT),e(xc,lh),e(lh,jT),e(Or,CT),e(Or,$c),e($c,NT),e($c,dh),e(dh,IT),e(je,AT),e(je,ms),y(Sr,ms,null),e(ms,DT),e(ms,Ur),e(Ur,LT),e(Ur,ch),e(ch,OT),e(Ur,ST),e(je,UT),e(je,Jt),y(Wr,Jt,null),e(Jt,WT),e(Jt,ph),e(ph,HT),e(Jt,RT),y(Hr,Jt,null),e(Jt,QT),e(Jt,tn),e(tn,VT),e(tn,hh),e(hh,KT),e(tn,JT),e(tn,fh),e(fh,GT),e(tn,XT),e(je,YT),e(je,mh),f(i,n_,m),f(i,on,m),e(on,us),e(us,uh),y(Rr,uh,null),e(on,ZT),e(on,gh),e(gh,ey),f(i,s_,m),f(i,nt,m),y(Qr,nt,null),e(nt,ty),e(nt,Vr),e(Vr,oy),e(Vr,_h),e(_h,ny),e(Vr,sy),e(nt,ry),e(nt,Kr),e(Kr,ay),e(Kr,Bc),e(Bc,iy),e(Kr,ly),e(nt,dy),e(nt,ko),y(Jr,ko,null),e(ko,cy),e(ko,vh),e(vh,py),e(ko,hy),e(ko,Gr),e(Gr,Ec),e(Ec,fy),e(Ec,kh),e(kh,my),e(Gr,uy),e(Gr,Mc),e(Mc,gy),e(Mc,Th),e(Th,_y),e(nt,vy),e(nt,Gt),y(Xr,Gt,null),e(Gt,ky),e(Gt,yh),e(yh,Ty),e(Gt,yy),y(Yr,Gt,null),e(Gt,by),e(Gt,nn),e(nn,wy),e(nn,bh),e(bh,Fy),e(nn,xy),e(nn,wh),e(wh,$y),e(nn,By),f(i,r_,m),f(i,sn,m),e(sn,gs),e(gs,Fh),y(Zr,Fh,null),e(sn,Ey),e(sn,xh),e(xh,My),f(i,a_,m),f(i,rn,m),y(ea,rn,null),e(rn,zy),e(rn,ta),e(ta,Py),e(ta,zc),e(zc,qy),e(ta,jy),f(i,i_,m),f(i,an,m),y(oa,an,null),e(an,Cy),e(an,na),e(na,Ny),e(na,Pc),e(Pc,Iy),e(na,Ay),f(i,l_,m),f(i,io,m),y(sa,io,null),e(io,Dy),e(io,ra),e(ra,Ly),e(ra,qc),e(qc,Oy),e(ra,Sy),e(io,Uy),e(io,_s),y(aa,_s,null),e(_s,Wy),e(_s,$h),e($h,Hy),f(i,d_,m),f(i,ln,m),e(ln,vs),e(vs,Bh),y(ia,Bh,null),e(ln,Ry),e(ln,Eh),e(Eh,Qy),f(i,c_,m),f(i,Ce,m),y(la,Ce,null),e(Ce,Vy),e(Ce,Mh),e(Mh,Ky),e(Ce,Jy),e(Ce,da),e(da,Gy),e(da,jc),e(jc,Xy),e(da,Yy),e(Ce,Zy),e(Ce,ca),e(ca,eb),e(ca,pa),e(pa,tb),e(ca,ob),e(Ce,nb),e(Ce,ha),e(ha,sb),e(ha,fa),e(fa,rb),e(ha,ab),e(Ce,ib),e(Ce,Re),e(Re,lb),e(Re,zh),e(zh,db),e(Re,cb),e(Re,Ph),e(Ph,pb),e(Re,hb),e(Re,qh),e(qh,fb),e(Re,mb),e(Re,jh),e(jh,ub),e(Re,gb),e(Re,Ch),e(Ch,_b),e(Re,vb),e(Re,Nh),e(Nh,kb),e(Re,Tb),e(Ce,yb),e(Ce,ht),y(ma,ht,null),e(ht,bb),e(ht,dn),e(dn,wb),e(dn,Cc),e(Cc,Fb),e(dn,xb),e(dn,Ih),e(Ih,$b),e(dn,Bb),e(ht,Eb),y(ks,ht,null),e(ht,Mb),e(ht,Ah),e(Ah,zb),e(ht,Pb),y(ua,ht,null),f(i,p_,m),f(i,cn,m),e(cn,Ts),e(Ts,Dh),y(ga,Dh,null),e(cn,qb),e(cn,Lh),e(Lh,jb),f(i,h_,m),f(i,st,m),y(_a,st,null),e(st,Cb),e(st,pn),e(pn,Nb),e(pn,Oh),e(Oh,Ib),e(pn,Ab),e(pn,Sh),e(Sh,Db),e(pn,Lb),e(st,Ob),e(st,va),e(va,Sb),e(va,Nc),e(Nc,Ub),e(va,Wb),e(st,Hb),e(st,ka),e(ka,Rb),e(ka,Ta),e(Ta,Qb),e(ka,Vb),e(st,Kb),e(st,ft),y(ya,ft,null),e(ft,Jb),e(ft,hn),e(hn,Gb),e(hn,Ic),e(Ic,Xb),e(hn,Yb),e(hn,Uh),e(Uh,Zb),e(hn,e1),e(ft,t1),y(ys,ft,null),e(ft,o1),e(ft,Wh),e(Wh,n1),e(ft,s1),y(ba,ft,null),f(i,f_,m),f(i,fn,m),e(fn,bs),e(bs,Hh),y(wa,Hh,null),e(fn,r1),e(fn,Rh),e(Rh,a1),f(i,m_,m),f(i,rt,m),y(Fa,rt,null),e(rt,i1),e(rt,xa),e(xa,l1),e(xa,Qh),e(Qh,d1),e(xa,c1),e(rt,p1),e(rt,$a),e($a,h1),e($a,Ac),e(Ac,f1),e($a,m1),e(rt,u1),e(rt,Ba),e(Ba,g1),e(Ba,Ea),e(Ea,_1),e(Ba,v1),e(rt,k1),e(rt,mt),y(Ma,mt,null),e(mt,T1),e(mt,mn),e(mn,y1),e(mn,Dc),e(Dc,b1),e(mn,w1),e(mn,Vh),e(Vh,F1),e(mn,x1),e(mt,$1),y(ws,mt,null),e(mt,B1),e(mt,Kh),e(Kh,E1),e(mt,M1),y(za,mt,null),f(i,u_,m),f(i,un,m),e(un,Fs),e(Fs,Jh),y(Pa,Jh,null),e(un,z1),e(un,Gh),e(Gh,P1),f(i,g_,m),f(i,at,m),y(qa,at,null),e(at,q1),e(at,ja),e(ja,j1),e(ja,Xh),e(Xh,C1),e(ja,N1),e(at,I1),e(at,Ca),e(Ca,A1),e(Ca,Lc),e(Lc,D1),e(Ca,L1),e(at,O1),e(at,Na),e(Na,S1),e(Na,Ia),e(Ia,U1),e(Na,W1),e(at,H1),e(at,ut),y(Aa,ut,null),e(ut,R1),e(ut,gn),e(gn,Q1),e(gn,Oc),e(Oc,V1),e(gn,K1),e(gn,Yh),e(Yh,J1),e(gn,G1),e(ut,X1),y(xs,ut,null),e(ut,Y1),e(ut,Zh),e(Zh,Z1),e(ut,ew),y(Da,ut,null),f(i,__,m),f(i,_n,m),e(_n,$s),e($s,ef),y(La,ef,null),e(_n,tw),e(_n,tf),e(tf,ow),f(i,v_,m),f(i,it,m),y(Oa,it,null),e(it,nw),e(it,Sa),e(Sa,sw),e(Sa,of),e(of,rw),e(Sa,aw),e(it,iw),e(it,Ua),e(Ua,lw),e(Ua,Sc),e(Sc,dw),e(Ua,cw),e(it,pw),e(it,Wa),e(Wa,hw),e(Wa,Ha),e(Ha,fw),e(Wa,mw),e(it,uw),e(it,gt),y(Ra,gt,null),e(gt,gw),e(gt,vn),e(vn,_w),e(vn,Uc),e(Uc,vw),e(vn,kw),e(vn,nf),e(nf,Tw),e(vn,yw),e(gt,bw),y(Bs,gt,null),e(gt,ww),e(gt,sf),e(sf,Fw),e(gt,xw),y(Qa,gt,null),f(i,k_,m),f(i,kn,m),e(kn,Es),e(Es,rf),y(Va,rf,null),e(kn,$w),e(kn,af),e(af,Bw),f(i,T_,m),f(i,lt,m),y(Ka,lt,null),e(lt,Ew),e(lt,lf),e(lf,Mw),e(lt,zw),e(lt,Ja),e(Ja,Pw),e(Ja,Wc),e(Wc,qw),e(Ja,jw),e(lt,Cw),e(lt,Ga),e(Ga,Nw),e(Ga,Xa),e(Xa,Iw),e(Ga,Aw),e(lt,Dw),e(lt,He),y(Ya,He,null),e(He,Lw),e(He,Tn),e(Tn,Ow),e(Tn,Hc),e(Hc,Sw),e(Tn,Uw),e(Tn,df),e(df,Ww),e(Tn,Hw),e(He,Rw),y(Ms,He,null),e(He,Qw),e(He,cf),e(cf,Vw),e(He,Kw),y(Za,He,null),e(He,Jw),e(He,pf),e(pf,Gw),e(He,Xw),y(ei,He,null),f(i,y_,m),f(i,yn,m),e(yn,zs),e(zs,hf),y(ti,hf,null),e(yn,Yw),e(yn,ff),e(ff,Zw),f(i,b_,m),f(i,dt,m),y(oi,dt,null),e(dt,e0),e(dt,mf),e(mf,t0),e(dt,o0),e(dt,ni),e(ni,n0),e(ni,Rc),e(Rc,s0),e(ni,r0),e(dt,a0),e(dt,si),e(si,i0),e(si,ri),e(ri,l0),e(si,d0),e(dt,c0),e(dt,_t),y(ai,_t,null),e(_t,p0),e(_t,bn),e(bn,h0),e(bn,Qc),e(Qc,f0),e(bn,m0),e(bn,uf),e(uf,u0),e(bn,g0),e(_t,_0),y(Ps,_t,null),e(_t,v0),e(_t,gf),e(gf,k0),e(_t,T0),y(ii,_t,null),f(i,w_,m),f(i,wn,m),e(wn,qs),e(qs,_f),y(li,_f,null),e(wn,y0),e(wn,vf),e(vf,b0),f(i,F_,m),f(i,ct,m),y(di,ct,null),e(ct,w0),e(ct,kf),e(kf,F0),e(ct,x0),e(ct,ci),e(ci,$0),e(ci,Vc),e(Vc,B0),e(ci,E0),e(ct,M0),e(ct,pi),e(pi,z0),e(pi,hi),e(hi,P0),e(pi,q0),e(ct,j0),e(ct,vt),y(fi,vt,null),e(vt,C0),e(vt,Fn),e(Fn,N0),e(Fn,Kc),e(Kc,I0),e(Fn,A0),e(Fn,Tf),e(Tf,D0),e(Fn,L0),e(vt,O0),y(js,vt,null),e(vt,S0),e(vt,yf),e(yf,U0),e(vt,W0),y(mi,vt,null),f(i,x_,m),f(i,xn,m),e(xn,Cs),e(Cs,bf),y(ui,bf,null),e(xn,H0),e(xn,wf),e(wf,R0),f(i,$_,m),f(i,pt,m),y(gi,pt,null),e(pt,Q0),e(pt,$n),e($n,V0),e($n,Ff),e(Ff,K0),e($n,J0),e($n,xf),e(xf,G0),e($n,X0),e(pt,Y0),e(pt,_i),e(_i,Z0),e(_i,Jc),e(Jc,e2),e(_i,t2),e(pt,o2),e(pt,vi),e(vi,n2),e(vi,ki),e(ki,s2),e(vi,r2),e(pt,a2),e(pt,kt),y(Ti,kt,null),e(kt,i2),e(kt,Bn),e(Bn,l2),e(Bn,Gc),e(Gc,d2),e(Bn,c2),e(Bn,$f),e($f,p2),e(Bn,h2),e(kt,f2),y(Ns,kt,null),e(kt,m2),e(kt,Bf),e(Bf,u2),e(kt,g2),y(yi,kt,null),f(i,B_,m),f(i,En,m),e(En,Is),e(Is,Ef),y(bi,Ef,null),e(En,_2),e(En,Mf),e(Mf,v2),f(i,E_,m),f(i,Qe,m),y(wi,Qe,null),e(Qe,k2),e(Qe,zf),e(zf,T2),e(Qe,y2),e(Qe,Fi),e(Fi,b2),e(Fi,Xc),e(Xc,w2),e(Fi,F2),e(Qe,x2),e(Qe,xi),e(xi,$2),e(xi,$i),e($i,B2),e(xi,E2),e(Qe,M2),y(As,Qe,null),e(Qe,z2),e(Qe,Tt),y(Bi,Tt,null),e(Tt,P2),e(Tt,Mn),e(Mn,q2),e(Mn,Yc),e(Yc,j2),e(Mn,C2),e(Mn,Pf),e(Pf,N2),e(Mn,I2),e(Tt,A2),y(Ds,Tt,null),e(Tt,D2),e(Tt,qf),e(qf,L2),e(Tt,O2),y(Ei,Tt,null),f(i,M_,m),f(i,zn,m),e(zn,Ls),e(Ls,jf),y(Mi,jf,null),e(zn,S2),e(zn,Cf),e(Cf,U2),f(i,z_,m),f(i,Ve,m),y(zi,Ve,null),e(Ve,W2),e(Ve,Pn),e(Pn,H2),e(Pn,Nf),e(Nf,R2),e(Pn,Q2),e(Pn,If),e(If,V2),e(Pn,K2),e(Ve,J2),e(Ve,Pi),e(Pi,G2),e(Pi,Zc),e(Zc,X2),e(Pi,Y2),e(Ve,Z2),e(Ve,qi),e(qi,eF),e(qi,ji),e(ji,tF),e(qi,oF),e(Ve,nF),y(Os,Ve,null),e(Ve,sF),e(Ve,yt),y(Ci,yt,null),e(yt,rF),e(yt,qn),e(qn,aF),e(qn,ep),e(ep,iF),e(qn,lF),e(qn,Af),e(Af,dF),e(qn,cF),e(yt,pF),y(Ss,yt,null),e(yt,hF),e(yt,Df),e(Df,fF),e(yt,mF),y(Ni,yt,null),f(i,P_,m),f(i,jn,m),e(jn,Us),e(Us,Lf),y(Ii,Lf,null),e(jn,uF),e(jn,Of),e(Of,gF),f(i,q_,m),f(i,Ai,m),e(Ai,et),y(Di,et,null),e(et,_F),e(et,Ne),e(Ne,vF),e(Ne,Sf),e(Sf,kF),e(Ne,TF),e(Ne,Uf),e(Uf,yF),e(Ne,bF),e(Ne,Wf),e(Wf,wF),e(Ne,FF),e(Ne,Hf),e(Hf,xF),e(Ne,$F),e(Ne,Rf),e(Rf,BF),e(Ne,EF),e(Ne,Qf),e(Qf,MF),e(Ne,zF),e(Ne,Vf),e(Vf,PF),e(Ne,qF),e(et,jF),e(et,Li),e(Li,Oi),e(Oi,CF),e(Oi,Kf),e(Kf,NF),e(Oi,IF),e(Li,AF),e(Li,Si),e(Si,DF),e(Si,Jf),e(Jf,LF),e(Si,OF),e(et,SF),e(et,G),e(G,UF),e(G,Gf),e(Gf,WF),e(G,HF),e(G,Xf),e(Xf,RF),e(G,QF),e(G,Yf),e(Yf,VF),e(G,KF),e(G,Zf),e(Zf,JF),e(G,GF),e(G,em),e(em,XF),e(G,YF),e(G,tm),e(tm,ZF),e(G,ex),e(G,om),e(om,tx),e(G,ox),e(G,nm),e(nm,nx),e(G,sx),e(G,sm),e(sm,rx),e(G,ax),e(G,rm),e(rm,ix),e(G,lx),e(G,am),e(am,dx),e(G,cx),e(G,im),e(im,px),e(G,hx),e(G,lm),e(lm,fx),e(G,mx),e(G,dm),e(dm,ux),e(G,gx),e(G,cm),e(cm,_x),e(G,vx),e(G,pm),e(pm,kx),e(G,Tx),e(G,hm),e(hm,yx),e(G,bx),e(G,fm),e(fm,wx),e(G,Fx),e(G,mm),e(mm,xx),e(G,$x),e(G,um),e(um,Bx),e(G,Ex),e(et,Mx),e(et,gm),e(gm,zx),e(et,Px),y(Ui,et,null),f(i,j_,m),f(i,Cn,m),e(Cn,Ws),e(Ws,_m),y(Wi,_m,null),e(Cn,qx),e(Cn,vm),e(vm,jx),f(i,C_,m),f(i,Ke,m),y(Hi,Ke,null),e(Ke,Cx),e(Ke,Ri),e(Ri,Nx),e(Ri,km),e(km,Ix),e(Ri,Ax),e(Ke,Dx),e(Ke,Qi),e(Qi,Lx),e(Qi,tp),e(tp,Ox),e(Qi,Sx),e(Ke,Ux),e(Ke,Vi),e(Vi,Wx),e(Vi,Ki),e(Ki,Hx),e(Vi,Rx),e(Ke,Qx),y(Hs,Ke,null),e(Ke,Vx),e(Ke,bt),y(Ji,bt,null),e(bt,Kx),e(bt,Nn),e(Nn,Jx),e(Nn,op),e(op,Gx),e(Nn,Xx),e(Nn,Tm),e(Tm,Yx),e(Nn,Zx),e(bt,e$),y(Rs,bt,null),e(bt,t$),e(bt,ym),e(ym,o$),e(bt,n$),y(Gi,bt,null),f(i,N_,m),f(i,In,m),e(In,Qs),e(Qs,bm),y(Xi,bm,null),e(In,s$),e(In,wm),e(wm,r$),f(i,I_,m),f(i,Je,m),y(Yi,Je,null),e(Je,a$),e(Je,Zi),e(Zi,i$),e(Zi,Fm),e(Fm,l$),e(Zi,d$),e(Je,c$),e(Je,el),e(el,p$),e(el,np),e(np,h$),e(el,f$),e(Je,m$),e(Je,tl),e(tl,u$),e(tl,ol),e(ol,g$),e(tl,_$),e(Je,v$),y(Vs,Je,null),e(Je,k$),e(Je,wt),y(nl,wt,null),e(wt,T$),e(wt,An),e(An,y$),e(An,sp),e(sp,b$),e(An,w$),e(An,xm),e(xm,F$),e(An,x$),e(wt,$$),y(Ks,wt,null),e(wt,B$),e(wt,$m),e($m,E$),e(wt,M$),y(sl,wt,null),f(i,A_,m),f(i,Dn,m),e(Dn,Js),e(Js,Bm),y(rl,Bm,null),e(Dn,z$),e(Dn,Em),e(Em,P$),f(i,D_,m),f(i,Ge,m),y(al,Ge,null),e(Ge,q$),e(Ge,Mm),e(Mm,j$),e(Ge,C$),e(Ge,il),e(il,N$),e(il,rp),e(rp,I$),e(il,A$),e(Ge,D$),e(Ge,ll),e(ll,L$),e(ll,dl),e(dl,O$),e(ll,S$),e(Ge,U$),y(Gs,Ge,null),e(Ge,W$),e(Ge,Ft),y(cl,Ft,null),e(Ft,H$),e(Ft,Ln),e(Ln,R$),e(Ln,ap),e(ap,Q$),e(Ln,V$),e(Ln,zm),e(zm,K$),e(Ln,J$),e(Ft,G$),y(Xs,Ft,null),e(Ft,X$),e(Ft,Pm),e(Pm,Y$),e(Ft,Z$),y(pl,Ft,null),f(i,L_,m),f(i,On,m),e(On,Ys),e(Ys,qm),y(hl,qm,null),e(On,e4),e(On,jm),e(jm,t4),f(i,O_,m),f(i,Xe,m),y(fl,Xe,null),e(Xe,o4),e(Xe,Cm),e(Cm,n4),e(Xe,s4),e(Xe,ml),e(ml,r4),e(ml,ip),e(ip,a4),e(ml,i4),e(Xe,l4),e(Xe,ul),e(ul,d4),e(ul,gl),e(gl,c4),e(ul,p4),e(Xe,h4),y(Zs,Xe,null),e(Xe,f4),e(Xe,xt),y(_l,xt,null),e(xt,m4),e(xt,Sn),e(Sn,u4),e(Sn,lp),e(lp,g4),e(Sn,_4),e(Sn,Nm),e(Nm,v4),e(Sn,k4),e(xt,T4),y(er,xt,null),e(xt,y4),e(xt,Im),e(Im,b4),e(xt,w4),y(vl,xt,null),f(i,S_,m),f(i,Un,m),e(Un,tr),e(tr,Am),y(kl,Am,null),e(Un,F4),e(Un,Dm),e(Dm,x4),f(i,U_,m),f(i,Ye,m),y(Tl,Ye,null),e(Ye,$4),e(Ye,Lm),e(Lm,B4),e(Ye,E4),e(Ye,yl),e(yl,M4),e(yl,dp),e(dp,z4),e(yl,P4),e(Ye,q4),e(Ye,bl),e(bl,j4),e(bl,wl),e(wl,C4),e(bl,N4),e(Ye,I4),y(or,Ye,null),e(Ye,A4),e(Ye,$t),y(Fl,$t,null),e($t,D4),e($t,Wn),e(Wn,L4),e(Wn,cp),e(cp,O4),e(Wn,S4),e(Wn,Om),e(Om,U4),e(Wn,W4),e($t,H4),y(nr,$t,null),e($t,R4),e($t,Sm),e(Sm,Q4),e($t,V4),y(xl,$t,null),f(i,W_,m),f(i,Hn,m),e(Hn,sr),e(sr,Um),y($l,Um,null),e(Hn,K4),e(Hn,Wm),e(Wm,J4),f(i,H_,m),f(i,Ze,m),y(Bl,Ze,null),e(Ze,G4),e(Ze,Rn),e(Rn,X4),e(Rn,Hm),e(Hm,Y4),e(Rn,Z4),e(Rn,Rm),e(Rm,eB),e(Rn,tB),e(Ze,oB),e(Ze,El),e(El,nB),e(El,pp),e(pp,sB),e(El,rB),e(Ze,aB),e(Ze,Ml),e(Ml,iB),e(Ml,zl),e(zl,lB),e(Ml,dB),e(Ze,cB),y(rr,Ze,null),e(Ze,pB),e(Ze,Bt),y(Pl,Bt,null),e(Bt,hB),e(Bt,Qn),e(Qn,fB),e(Qn,hp),e(hp,mB),e(Qn,uB),e(Qn,Qm),e(Qm,gB),e(Qn,_B),e(Bt,vB),y(ar,Bt,null),e(Bt,kB),e(Bt,Vm),e(Vm,TB),e(Bt,yB),y(ql,Bt,null),f(i,R_,m),f(i,Vn,m),e(Vn,ir),e(ir,Km),y(jl,Km,null),e(Vn,bB),e(Vn,Jm),e(Jm,wB),f(i,Q_,m),f(i,Ie,m),y(Cl,Ie,null),e(Ie,FB),e(Ie,Gm),e(Gm,xB),e(Ie,$B),e(Ie,Nl),e(Nl,BB),e(Nl,fp),e(fp,EB),e(Nl,MB),e(Ie,zB),e(Ie,Il),e(Il,PB),e(Il,Al),e(Al,qB),e(Il,jB),e(Ie,CB),e(Ie,Xm),e(Xm,NB),e(Ie,IB),e(Ie,lo),e(lo,Ym),e(Ym,Dl),e(Dl,AB),e(lo,DB),e(lo,Zm),e(Zm,Ll),e(Ll,LB),e(lo,OB),e(lo,eu),e(eu,Ol),e(Ol,SB),e(lo,UB),e(lo,tu),e(tu,Sl),e(Sl,WB),e(Ie,HB),e(Ie,Et),y(Ul,Et,null),e(Et,RB),e(Et,Kn),e(Kn,QB),e(Kn,ou),e(ou,VB),e(Kn,KB),e(Kn,nu),e(nu,JB),e(Kn,GB),e(Et,XB),y(lr,Et,null),e(Et,YB),e(Et,su),e(su,ZB),e(Et,eE),y(Wl,Et,null),f(i,V_,m),f(i,Jn,m),e(Jn,dr),e(dr,ru),y(Hl,ru,null),e(Jn,tE),e(Jn,au),e(au,oE),f(i,K_,m),f(i,Ae,m),y(Rl,Ae,null),e(Ae,nE),e(Ae,Gn),e(Gn,sE),e(Gn,iu),e(iu,rE),e(Gn,aE),e(Gn,lu),e(lu,iE),e(Gn,lE),e(Ae,dE),e(Ae,Ql),e(Ql,cE),e(Ql,mp),e(mp,pE),e(Ql,hE),e(Ae,fE),e(Ae,Vl),e(Vl,mE),e(Vl,Kl),e(Kl,uE),e(Vl,gE),e(Ae,_E),e(Ae,du),e(du,vE),e(Ae,kE),e(Ae,co),e(co,cu),e(cu,Jl),e(Jl,TE),e(co,yE),e(co,pu),e(pu,Gl),e(Gl,bE),e(co,wE),e(co,hu),e(hu,Xl),e(Xl,FE),e(co,xE),e(co,fu),e(fu,Yl),e(Yl,$E),e(Ae,BE),e(Ae,Mt),y(Zl,Mt,null),e(Mt,EE),e(Mt,Xn),e(Xn,ME),e(Xn,mu),e(mu,zE),e(Xn,PE),e(Xn,uu),e(uu,qE),e(Xn,jE),e(Mt,CE),y(cr,Mt,null),e(Mt,NE),e(Mt,gu),e(gu,IE),e(Mt,AE),y(ed,Mt,null),f(i,J_,m),f(i,Yn,m),e(Yn,pr),e(pr,_u),y(td,_u,null),e(Yn,DE),e(Yn,vu),e(vu,LE),f(i,G_,m),f(i,De,m),y(od,De,null),e(De,OE),e(De,nd),e(nd,SE),e(nd,ku),e(ku,UE),e(nd,WE),e(De,HE),e(De,sd),e(sd,RE),e(sd,up),e(up,QE),e(sd,VE),e(De,KE),e(De,rd),e(rd,JE),e(rd,ad),e(ad,GE),e(rd,XE),e(De,YE),e(De,Tu),e(Tu,ZE),e(De,eM),e(De,po),e(po,yu),e(yu,id),e(id,tM),e(po,oM),e(po,bu),e(bu,ld),e(ld,nM),e(po,sM),e(po,wu),e(wu,dd),e(dd,rM),e(po,aM),e(po,Fu),e(Fu,cd),e(cd,iM),e(De,lM),e(De,zt),y(pd,zt,null),e(zt,dM),e(zt,Zn),e(Zn,cM),e(Zn,xu),e(xu,pM),e(Zn,hM),e(Zn,$u),e($u,fM),e(Zn,mM),e(zt,uM),y(hr,zt,null),e(zt,gM),e(zt,Bu),e(Bu,_M),e(zt,vM),y(hd,zt,null),f(i,X_,m),f(i,es,m),e(es,fr),e(fr,Eu),y(fd,Eu,null),e(es,kM),e(es,Mu),e(Mu,TM),f(i,Y_,m),f(i,Le,m),y(md,Le,null),e(Le,yM),e(Le,ud),e(ud,bM),e(ud,zu),e(zu,wM),e(ud,FM),e(Le,xM),e(Le,gd),e(gd,$M),e(gd,gp),e(gp,BM),e(gd,EM),e(Le,MM),e(Le,_d),e(_d,zM),e(_d,vd),e(vd,PM),e(_d,qM),e(Le,jM),e(Le,Pu),e(Pu,CM),e(Le,NM),e(Le,ho),e(ho,qu),e(qu,kd),e(kd,IM),e(ho,AM),e(ho,ju),e(ju,Td),e(Td,DM),e(ho,LM),e(ho,Cu),e(Cu,yd),e(yd,OM),e(ho,SM),e(ho,Nu),e(Nu,bd),e(bd,UM),e(Le,WM),e(Le,Pt),y(wd,Pt,null),e(Pt,HM),e(Pt,ts),e(ts,RM),e(ts,Iu),e(Iu,QM),e(ts,VM),e(ts,Au),e(Au,KM),e(ts,JM),e(Pt,GM),y(mr,Pt,null),e(Pt,XM),e(Pt,Du),e(Du,YM),e(Pt,ZM),y(Fd,Pt,null),f(i,Z_,m),f(i,os,m),e(os,ur),e(ur,Lu),y(xd,Lu,null),e(os,ez),e(os,Ou),e(Ou,tz),f(i,ev,m),f(i,Oe,m),y($d,Oe,null),e(Oe,oz),e(Oe,Su),e(Su,nz),e(Oe,sz),e(Oe,Bd),e(Bd,rz),e(Bd,_p),e(_p,az),e(Bd,iz),e(Oe,lz),e(Oe,Ed),e(Ed,dz),e(Ed,Md),e(Md,cz),e(Ed,pz),e(Oe,hz),e(Oe,Uu),e(Uu,fz),e(Oe,mz),e(Oe,fo),e(fo,Wu),e(Wu,zd),e(zd,uz),e(fo,gz),e(fo,Hu),e(Hu,Pd),e(Pd,_z),e(fo,vz),e(fo,Ru),e(Ru,qd),e(qd,kz),e(fo,Tz),e(fo,Qu),e(Qu,jd),e(jd,yz),e(Oe,bz),e(Oe,qt),y(Cd,qt,null),e(qt,wz),e(qt,ns),e(ns,Fz),e(ns,Vu),e(Vu,xz),e(ns,$z),e(ns,Ku),e(Ku,Bz),e(ns,Ez),e(qt,Mz),y(gr,qt,null),e(qt,zz),e(qt,Ju),e(Ju,Pz),e(qt,qz),y(Nd,qt,null),f(i,tv,m),f(i,ss,m),e(ss,_r),e(_r,Gu),y(Id,Gu,null),e(ss,jz),e(ss,Xu),e(Xu,Cz),f(i,ov,m),f(i,Se,m),y(Ad,Se,null),e(Se,Nz),e(Se,Yu),e(Yu,Iz),e(Se,Az),e(Se,Dd),e(Dd,Dz),e(Dd,vp),e(vp,Lz),e(Dd,Oz),e(Se,Sz),e(Se,Ld),e(Ld,Uz),e(Ld,Od),e(Od,Wz),e(Ld,Hz),e(Se,Rz),e(Se,Zu),e(Zu,Qz),e(Se,Vz),e(Se,mo),e(mo,eg),e(eg,Sd),e(Sd,Kz),e(mo,Jz),e(mo,tg),e(tg,Ud),e(Ud,Gz),e(mo,Xz),e(mo,og),e(og,Wd),e(Wd,Yz),e(mo,Zz),e(mo,ng),e(ng,Hd),e(Hd,e5),e(Se,t5),e(Se,jt),y(Rd,jt,null),e(jt,o5),e(jt,rs),e(rs,n5),e(rs,sg),e(sg,s5),e(rs,r5),e(rs,rg),e(rg,a5),e(rs,i5),e(jt,l5),y(vr,jt,null),e(jt,d5),e(jt,ag),e(ag,c5),e(jt,p5),y(Qd,jt,null),f(i,nv,m),f(i,as,m),e(as,kr),e(kr,ig),y(Vd,ig,null),e(as,h5),e(as,lg),e(lg,f5),f(i,sv,m),f(i,Ue,m),y(Kd,Ue,null),e(Ue,m5),e(Ue,dg),e(dg,u5),e(Ue,g5),e(Ue,Jd),e(Jd,_5),e(Jd,kp),e(kp,v5),e(Jd,k5),e(Ue,T5),e(Ue,Gd),e(Gd,y5),e(Gd,Xd),e(Xd,b5),e(Gd,w5),e(Ue,F5),e(Ue,cg),e(cg,x5),e(Ue,$5),e(Ue,uo),e(uo,pg),e(pg,Yd),e(Yd,B5),e(uo,E5),e(uo,hg),e(hg,Zd),e(Zd,M5),e(uo,z5),e(uo,fg),e(fg,ec),e(ec,P5),e(uo,q5),e(uo,mg),e(mg,tc),e(tc,j5),e(Ue,C5),e(Ue,Ct),y(oc,Ct,null),e(Ct,N5),e(Ct,is),e(is,I5),e(is,ug),e(ug,A5),e(is,D5),e(is,gg),e(gg,L5),e(is,O5),e(Ct,S5),y(Tr,Ct,null),e(Ct,U5),e(Ct,_g),e(_g,W5),e(Ct,H5),y(nc,Ct,null),f(i,rv,m),f(i,ls,m),e(ls,yr),e(yr,vg),y(sc,vg,null),e(ls,R5),e(ls,kg),e(kg,Q5),f(i,av,m),f(i,We,m),y(rc,We,null),e(We,V5),e(We,ds),e(ds,K5),e(ds,Tg),e(Tg,J5),e(ds,G5),e(ds,yg),e(yg,X5),e(ds,Y5),e(We,Z5),e(We,ac),e(ac,eP),e(ac,Tp),e(Tp,tP),e(ac,oP),e(We,nP),e(We,ic),e(ic,sP),e(ic,lc),e(lc,rP),e(ic,aP),e(We,iP),e(We,bg),e(bg,lP),e(We,dP),e(We,go),e(go,wg),e(wg,dc),e(dc,cP),e(go,pP),e(go,Fg),e(Fg,cc),e(cc,hP),e(go,fP),e(go,xg),e(xg,pc),e(pc,mP),e(go,uP),e(go,$g),e($g,hc),e(hc,gP),e(We,_P),e(We,Nt),y(fc,Nt,null),e(Nt,vP),e(Nt,cs),e(cs,kP),e(cs,Bg),e(Bg,TP),e(cs,yP),e(cs,Eg),e(Eg,bP),e(cs,wP),e(Nt,FP),y(br,Nt,null),e(Nt,xP),e(Nt,Mg),e(Mg,$P),e(Nt,BP),y(mc,Nt,null),iv=!0},p(i,[m]){const uc={};m&2&&(uc.$$scope={dirty:m,ctx:i}),ks.$set(uc);const zg={};m&2&&(zg.$$scope={dirty:m,ctx:i}),ys.$set(zg);const Pg={};m&2&&(Pg.$$scope={dirty:m,ctx:i}),ws.$set(Pg);const qg={};m&2&&(qg.$$scope={dirty:m,ctx:i}),xs.$set(qg);const gc={};m&2&&(gc.$$scope={dirty:m,ctx:i}),Bs.$set(gc);const jg={};m&2&&(jg.$$scope={dirty:m,ctx:i}),Ms.$set(jg);const Cg={};m&2&&(Cg.$$scope={dirty:m,ctx:i}),Ps.$set(Cg);const Ng={};m&2&&(Ng.$$scope={dirty:m,ctx:i}),js.$set(Ng);const _c={};m&2&&(_c.$$scope={dirty:m,ctx:i}),Ns.$set(_c);const Ig={};m&2&&(Ig.$$scope={dirty:m,ctx:i}),As.$set(Ig);const Ag={};m&2&&(Ag.$$scope={dirty:m,ctx:i}),Ds.$set(Ag);const Dg={};m&2&&(Dg.$$scope={dirty:m,ctx:i}),Os.$set(Dg);const Lg={};m&2&&(Lg.$$scope={dirty:m,ctx:i}),Ss.$set(Lg);const Og={};m&2&&(Og.$$scope={dirty:m,ctx:i}),Hs.$set(Og);const Sg={};m&2&&(Sg.$$scope={dirty:m,ctx:i}),Rs.$set(Sg);const Ug={};m&2&&(Ug.$$scope={dirty:m,ctx:i}),Vs.$set(Ug);const vc={};m&2&&(vc.$$scope={dirty:m,ctx:i}),Ks.$set(vc);const Wg={};m&2&&(Wg.$$scope={dirty:m,ctx:i}),Gs.$set(Wg);const Hg={};m&2&&(Hg.$$scope={dirty:m,ctx:i}),Xs.$set(Hg);const ps={};m&2&&(ps.$$scope={dirty:m,ctx:i}),Zs.$set(ps);const Rg={};m&2&&(Rg.$$scope={dirty:m,ctx:i}),er.$set(Rg);const Qg={};m&2&&(Qg.$$scope={dirty:m,ctx:i}),or.$set(Qg);const kc={};m&2&&(kc.$$scope={dirty:m,ctx:i}),nr.$set(kc);const Vg={};m&2&&(Vg.$$scope={dirty:m,ctx:i}),rr.$set(Vg);const Kg={};m&2&&(Kg.$$scope={dirty:m,ctx:i}),ar.$set(Kg);const Jg={};m&2&&(Jg.$$scope={dirty:m,ctx:i}),lr.$set(Jg);const Kt={};m&2&&(Kt.$$scope={dirty:m,ctx:i}),cr.$set(Kt);const _o={};m&2&&(_o.$$scope={dirty:m,ctx:i}),hr.$set(_o);const Gg={};m&2&&(Gg.$$scope={dirty:m,ctx:i}),mr.$set(Gg);const Xg={};m&2&&(Xg.$$scope={dirty:m,ctx:i}),gr.$set(Xg);const Yg={};m&2&&(Yg.$$scope={dirty:m,ctx:i}),vr.$set(Yg);const hs={};m&2&&(hs.$$scope={dirty:m,ctx:i}),Tr.$set(hs);const Zg={};m&2&&(Zg.$$scope={dirty:m,ctx:i}),br.$set(Zg)},i(i){iv||(b(_.$$.fragment,i),b(oe.$$.fragment,i),b($e.$$.fragment,i),b(jr.$$.fragment,i),b(Nr.$$.fragment,i),b(Ir.$$.fragment,i),b(Ar.$$.fragment,i),b(Lr.$$.fragment,i),b(Sr.$$.fragment,i),b(Wr.$$.fragment,i),b(Hr.$$.fragment,i),b(Rr.$$.fragment,i),b(Qr.$$.fragment,i),b(Jr.$$.fragment,i),b(Xr.$$.fragment,i),b(Yr.$$.fragment,i),b(Zr.$$.fragment,i),b(ea.$$.fragment,i),b(oa.$$.fragment,i),b(sa.$$.fragment,i),b(aa.$$.fragment,i),b(ia.$$.fragment,i),b(la.$$.fragment,i),b(ma.$$.fragment,i),b(ks.$$.fragment,i),b(ua.$$.fragment,i),b(ga.$$.fragment,i),b(_a.$$.fragment,i),b(ya.$$.fragment,i),b(ys.$$.fragment,i),b(ba.$$.fragment,i),b(wa.$$.fragment,i),b(Fa.$$.fragment,i),b(Ma.$$.fragment,i),b(ws.$$.fragment,i),b(za.$$.fragment,i),b(Pa.$$.fragment,i),b(qa.$$.fragment,i),b(Aa.$$.fragment,i),b(xs.$$.fragment,i),b(Da.$$.fragment,i),b(La.$$.fragment,i),b(Oa.$$.fragment,i),b(Ra.$$.fragment,i),b(Bs.$$.fragment,i),b(Qa.$$.fragment,i),b(Va.$$.fragment,i),b(Ka.$$.fragment,i),b(Ya.$$.fragment,i),b(Ms.$$.fragment,i),b(Za.$$.fragment,i),b(ei.$$.fragment,i),b(ti.$$.fragment,i),b(oi.$$.fragment,i),b(ai.$$.fragment,i),b(Ps.$$.fragment,i),b(ii.$$.fragment,i),b(li.$$.fragment,i),b(di.$$.fragment,i),b(fi.$$.fragment,i),b(js.$$.fragment,i),b(mi.$$.fragment,i),b(ui.$$.fragment,i),b(gi.$$.fragment,i),b(Ti.$$.fragment,i),b(Ns.$$.fragment,i),b(yi.$$.fragment,i),b(bi.$$.fragment,i),b(wi.$$.fragment,i),b(As.$$.fragment,i),b(Bi.$$.fragment,i),b(Ds.$$.fragment,i),b(Ei.$$.fragment,i),b(Mi.$$.fragment,i),b(zi.$$.fragment,i),b(Os.$$.fragment,i),b(Ci.$$.fragment,i),b(Ss.$$.fragment,i),b(Ni.$$.fragment,i),b(Ii.$$.fragment,i),b(Di.$$.fragment,i),b(Ui.$$.fragment,i),b(Wi.$$.fragment,i),b(Hi.$$.fragment,i),b(Hs.$$.fragment,i),b(Ji.$$.fragment,i),b(Rs.$$.fragment,i),b(Gi.$$.fragment,i),b(Xi.$$.fragment,i),b(Yi.$$.fragment,i),b(Vs.$$.fragment,i),b(nl.$$.fragment,i),b(Ks.$$.fragment,i),b(sl.$$.fragment,i),b(rl.$$.fragment,i),b(al.$$.fragment,i),b(Gs.$$.fragment,i),b(cl.$$.fragment,i),b(Xs.$$.fragment,i),b(pl.$$.fragment,i),b(hl.$$.fragment,i),b(fl.$$.fragment,i),b(Zs.$$.fragment,i),b(_l.$$.fragment,i),b(er.$$.fragment,i),b(vl.$$.fragment,i),b(kl.$$.fragment,i),b(Tl.$$.fragment,i),b(or.$$.fragment,i),b(Fl.$$.fragment,i),b(nr.$$.fragment,i),b(xl.$$.fragment,i),b($l.$$.fragment,i),b(Bl.$$.fragment,i),b(rr.$$.fragment,i),b(Pl.$$.fragment,i),b(ar.$$.fragment,i),b(ql.$$.fragment,i),b(jl.$$.fragment,i),b(Cl.$$.fragment,i),b(Ul.$$.fragment,i),b(lr.$$.fragment,i),b(Wl.$$.fragment,i),b(Hl.$$.fragment,i),b(Rl.$$.fragment,i),b(Zl.$$.fragment,i),b(cr.$$.fragment,i),b(ed.$$.fragment,i),b(td.$$.fragment,i),b(od.$$.fragment,i),b(pd.$$.fragment,i),b(hr.$$.fragment,i),b(hd.$$.fragment,i),b(fd.$$.fragment,i),b(md.$$.fragment,i),b(wd.$$.fragment,i),b(mr.$$.fragment,i),b(Fd.$$.fragment,i),b(xd.$$.fragment,i),b($d.$$.fragment,i),b(Cd.$$.fragment,i),b(gr.$$.fragment,i),b(Nd.$$.fragment,i),b(Id.$$.fragment,i),b(Ad.$$.fragment,i),b(Rd.$$.fragment,i),b(vr.$$.fragment,i),b(Qd.$$.fragment,i),b(Vd.$$.fragment,i),b(Kd.$$.fragment,i),b(oc.$$.fragment,i),b(Tr.$$.fragment,i),b(nc.$$.fragment,i),b(sc.$$.fragment,i),b(rc.$$.fragment,i),b(fc.$$.fragment,i),b(br.$$.fragment,i),b(mc.$$.fragment,i),iv=!0)},o(i){w(_.$$.fragment,i),w(oe.$$.fragment,i),w($e.$$.fragment,i),w(jr.$$.fragment,i),w(Nr.$$.fragment,i),w(Ir.$$.fragment,i),w(Ar.$$.fragment,i),w(Lr.$$.fragment,i),w(Sr.$$.fragment,i),w(Wr.$$.fragment,i),w(Hr.$$.fragment,i),w(Rr.$$.fragment,i),w(Qr.$$.fragment,i),w(Jr.$$.fragment,i),w(Xr.$$.fragment,i),w(Yr.$$.fragment,i),w(Zr.$$.fragment,i),w(ea.$$.fragment,i),w(oa.$$.fragment,i),w(sa.$$.fragment,i),w(aa.$$.fragment,i),w(ia.$$.fragment,i),w(la.$$.fragment,i),w(ma.$$.fragment,i),w(ks.$$.fragment,i),w(ua.$$.fragment,i),w(ga.$$.fragment,i),w(_a.$$.fragment,i),w(ya.$$.fragment,i),w(ys.$$.fragment,i),w(ba.$$.fragment,i),w(wa.$$.fragment,i),w(Fa.$$.fragment,i),w(Ma.$$.fragment,i),w(ws.$$.fragment,i),w(za.$$.fragment,i),w(Pa.$$.fragment,i),w(qa.$$.fragment,i),w(Aa.$$.fragment,i),w(xs.$$.fragment,i),w(Da.$$.fragment,i),w(La.$$.fragment,i),w(Oa.$$.fragment,i),w(Ra.$$.fragment,i),w(Bs.$$.fragment,i),w(Qa.$$.fragment,i),w(Va.$$.fragment,i),w(Ka.$$.fragment,i),w(Ya.$$.fragment,i),w(Ms.$$.fragment,i),w(Za.$$.fragment,i),w(ei.$$.fragment,i),w(ti.$$.fragment,i),w(oi.$$.fragment,i),w(ai.$$.fragment,i),w(Ps.$$.fragment,i),w(ii.$$.fragment,i),w(li.$$.fragment,i),w(di.$$.fragment,i),w(fi.$$.fragment,i),w(js.$$.fragment,i),w(mi.$$.fragment,i),w(ui.$$.fragment,i),w(gi.$$.fragment,i),w(Ti.$$.fragment,i),w(Ns.$$.fragment,i),w(yi.$$.fragment,i),w(bi.$$.fragment,i),w(wi.$$.fragment,i),w(As.$$.fragment,i),w(Bi.$$.fragment,i),w(Ds.$$.fragment,i),w(Ei.$$.fragment,i),w(Mi.$$.fragment,i),w(zi.$$.fragment,i),w(Os.$$.fragment,i),w(Ci.$$.fragment,i),w(Ss.$$.fragment,i),w(Ni.$$.fragment,i),w(Ii.$$.fragment,i),w(Di.$$.fragment,i),w(Ui.$$.fragment,i),w(Wi.$$.fragment,i),w(Hi.$$.fragment,i),w(Hs.$$.fragment,i),w(Ji.$$.fragment,i),w(Rs.$$.fragment,i),w(Gi.$$.fragment,i),w(Xi.$$.fragment,i),w(Yi.$$.fragment,i),w(Vs.$$.fragment,i),w(nl.$$.fragment,i),w(Ks.$$.fragment,i),w(sl.$$.fragment,i),w(rl.$$.fragment,i),w(al.$$.fragment,i),w(Gs.$$.fragment,i),w(cl.$$.fragment,i),w(Xs.$$.fragment,i),w(pl.$$.fragment,i),w(hl.$$.fragment,i),w(fl.$$.fragment,i),w(Zs.$$.fragment,i),w(_l.$$.fragment,i),w(er.$$.fragment,i),w(vl.$$.fragment,i),w(kl.$$.fragment,i),w(Tl.$$.fragment,i),w(or.$$.fragment,i),w(Fl.$$.fragment,i),w(nr.$$.fragment,i),w(xl.$$.fragment,i),w($l.$$.fragment,i),w(Bl.$$.fragment,i),w(rr.$$.fragment,i),w(Pl.$$.fragment,i),w(ar.$$.fragment,i),w(ql.$$.fragment,i),w(jl.$$.fragment,i),w(Cl.$$.fragment,i),w(Ul.$$.fragment,i),w(lr.$$.fragment,i),w(Wl.$$.fragment,i),w(Hl.$$.fragment,i),w(Rl.$$.fragment,i),w(Zl.$$.fragment,i),w(cr.$$.fragment,i),w(ed.$$.fragment,i),w(td.$$.fragment,i),w(od.$$.fragment,i),w(pd.$$.fragment,i),w(hr.$$.fragment,i),w(hd.$$.fragment,i),w(fd.$$.fragment,i),w(md.$$.fragment,i),w(wd.$$.fragment,i),w(mr.$$.fragment,i),w(Fd.$$.fragment,i),w(xd.$$.fragment,i),w($d.$$.fragment,i),w(Cd.$$.fragment,i),w(gr.$$.fragment,i),w(Nd.$$.fragment,i),w(Id.$$.fragment,i),w(Ad.$$.fragment,i),w(Rd.$$.fragment,i),w(vr.$$.fragment,i),w(Qd.$$.fragment,i),w(Vd.$$.fragment,i),w(Kd.$$.fragment,i),w(oc.$$.fragment,i),w(Tr.$$.fragment,i),w(nc.$$.fragment,i),w(sc.$$.fragment,i),w(rc.$$.fragment,i),w(fc.$$.fragment,i),w(br.$$.fragment,i),w(mc.$$.fragment,i),iv=!1},d(i){t(p),i&&t($),i&&t(g),F(_),i&&t(X),i&&t(M),F(oe),i&&t(ce),i&&t(J),i&&t(q),i&&t(se),i&&t(pe),i&&t(re),i&&t(he),i&&t(P),i&&t(fe),i&&t(ae),i&&t(me),i&&t(te),i&&t(E),i&&t(K),i&&t(W),i&&t(Te),F($e),i&&t(e_),i&&t(ot),F(jr),F(Nr),i&&t(t_),i&&t(en),F(Ir),i&&t(o_),i&&t(je),F(Ar),F(Lr),F(Sr),F(Wr),F(Hr),i&&t(n_),i&&t(on),F(Rr),i&&t(s_),i&&t(nt),F(Qr),F(Jr),F(Xr),F(Yr),i&&t(r_),i&&t(sn),F(Zr),i&&t(a_),i&&t(rn),F(ea),i&&t(i_),i&&t(an),F(oa),i&&t(l_),i&&t(io),F(sa),F(aa),i&&t(d_),i&&t(ln),F(ia),i&&t(c_),i&&t(Ce),F(la),F(ma),F(ks),F(ua),i&&t(p_),i&&t(cn),F(ga),i&&t(h_),i&&t(st),F(_a),F(ya),F(ys),F(ba),i&&t(f_),i&&t(fn),F(wa),i&&t(m_),i&&t(rt),F(Fa),F(Ma),F(ws),F(za),i&&t(u_),i&&t(un),F(Pa),i&&t(g_),i&&t(at),F(qa),F(Aa),F(xs),F(Da),i&&t(__),i&&t(_n),F(La),i&&t(v_),i&&t(it),F(Oa),F(Ra),F(Bs),F(Qa),i&&t(k_),i&&t(kn),F(Va),i&&t(T_),i&&t(lt),F(Ka),F(Ya),F(Ms),F(Za),F(ei),i&&t(y_),i&&t(yn),F(ti),i&&t(b_),i&&t(dt),F(oi),F(ai),F(Ps),F(ii),i&&t(w_),i&&t(wn),F(li),i&&t(F_),i&&t(ct),F(di),F(fi),F(js),F(mi),i&&t(x_),i&&t(xn),F(ui),i&&t($_),i&&t(pt),F(gi),F(Ti),F(Ns),F(yi),i&&t(B_),i&&t(En),F(bi),i&&t(E_),i&&t(Qe),F(wi),F(As),F(Bi),F(Ds),F(Ei),i&&t(M_),i&&t(zn),F(Mi),i&&t(z_),i&&t(Ve),F(zi),F(Os),F(Ci),F(Ss),F(Ni),i&&t(P_),i&&t(jn),F(Ii),i&&t(q_),i&&t(Ai),F(Di),F(Ui),i&&t(j_),i&&t(Cn),F(Wi),i&&t(C_),i&&t(Ke),F(Hi),F(Hs),F(Ji),F(Rs),F(Gi),i&&t(N_),i&&t(In),F(Xi),i&&t(I_),i&&t(Je),F(Yi),F(Vs),F(nl),F(Ks),F(sl),i&&t(A_),i&&t(Dn),F(rl),i&&t(D_),i&&t(Ge),F(al),F(Gs),F(cl),F(Xs),F(pl),i&&t(L_),i&&t(On),F(hl),i&&t(O_),i&&t(Xe),F(fl),F(Zs),F(_l),F(er),F(vl),i&&t(S_),i&&t(Un),F(kl),i&&t(U_),i&&t(Ye),F(Tl),F(or),F(Fl),F(nr),F(xl),i&&t(W_),i&&t(Hn),F($l),i&&t(H_),i&&t(Ze),F(Bl),F(rr),F(Pl),F(ar),F(ql),i&&t(R_),i&&t(Vn),F(jl),i&&t(Q_),i&&t(Ie),F(Cl),F(Ul),F(lr),F(Wl),i&&t(V_),i&&t(Jn),F(Hl),i&&t(K_),i&&t(Ae),F(Rl),F(Zl),F(cr),F(ed),i&&t(J_),i&&t(Yn),F(td),i&&t(G_),i&&t(De),F(od),F(pd),F(hr),F(hd),i&&t(X_),i&&t(es),F(fd),i&&t(Y_),i&&t(Le),F(md),F(wd),F(mr),F(Fd),i&&t(Z_),i&&t(os),F(xd),i&&t(ev),i&&t(Oe),F($d),F(Cd),F(gr),F(Nd),i&&t(tv),i&&t(ss),F(Id),i&&t(ov),i&&t(Se),F(Ad),F(Rd),F(vr),F(Qd),i&&t(nv),i&&t(as),F(Vd),i&&t(sv),i&&t(Ue),F(Kd),F(oc),F(Tr),F(nc),i&&t(rv),i&&t(ls),F(sc),i&&t(av),i&&t(We),F(rc),F(fc),F(br),F(mc)}}}const uI={local:"bert",sections:[{local:"overview",title:"Overview"},{local:"transformers.BertConfig",title:"BertConfig"},{local:"transformers.BertTokenizer",title:"BertTokenizer"},{local:"transformers.BertTokenizerFast",title:"BertTokenizerFast"},{local:"transformers.models.bert.modeling_bert.BertForPreTrainingOutput",title:"Bert specific outputs"},{local:"transformers.BertModel",title:"BertModel"},{local:"transformers.BertForPreTraining",title:"BertForPreTraining"},{local:"transformers.BertLMHeadModel",title:"BertLMHeadModel"},{local:"transformers.BertForMaskedLM",title:"BertForMaskedLM"},{local:"transformers.BertForNextSentencePrediction",title:"BertForNextSentencePrediction"},{local:"transformers.BertForSequenceClassification",title:"BertForSequenceClassification"},{local:"transformers.BertForMultipleChoice",title:"BertForMultipleChoice"},{local:"transformers.BertForTokenClassification",title:"BertForTokenClassification"},{local:"transformers.BertForQuestionAnswering",title:"BertForQuestionAnswering"},{local:"transformers.TFBertModel",title:"TFBertModel"},{local:"transformers.TFBertForPreTraining",title:"TFBertForPreTraining"},{local:"transformers.TFBertLMHeadModel",title:"TFBertModelLMHeadModel"},{local:"transformers.TFBertForMaskedLM",title:"TFBertForMaskedLM"},{local:"transformers.TFBertForNextSentencePrediction",title:"TFBertForNextSentencePrediction"},{local:"transformers.TFBertForSequenceClassification",title:"TFBertForSequenceClassification"},{local:"transformers.TFBertForMultipleChoice",title:"TFBertForMultipleChoice"},{local:"transformers.TFBertForTokenClassification",title:"TFBertForTokenClassification"},{local:"transformers.TFBertForQuestionAnswering",title:"TFBertForQuestionAnswering"},{local:"transformers.FlaxBertModel",title:"FlaxBertModel"},{local:"transformers.FlaxBertForPreTraining",title:"FlaxBertForPreTraining"},{local:"transformers.FlaxBertForMaskedLM",title:"FlaxBertForMaskedLM"},{local:"transformers.FlaxBertForNextSentencePrediction",title:"FlaxBertForNextSentencePrediction"},{local:"transformers.FlaxBertForSequenceClassification",title:"FlaxBertForSequenceClassification"},{local:"transformers.FlaxBertForMultipleChoice",title:"FlaxBertForMultipleChoice"},{local:"transformers.FlaxBertForTokenClassification",title:"FlaxBertForTokenClassification"},{local:"transformers.FlaxBertForQuestionAnswering",title:"FlaxBertForQuestionAnswering"}],title:"BERT"};function gI(j,p,$){let{fw:g}=p;return j.$$set=v=>{"fw"in v&&$(0,g=v.fw)},[g]}class wI extends PN{constructor(p){super();qN(this,p,gI,mI,jN,{fw:0})}}export{wI as default,uI as metadata};
9,951
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/cpm.mdx-f465cac9.js
import{S as Oe,i as Qe,s as Re,e as n,k as u,w as ne,t as h,L as Ke,c as r,d as a,m as f,a as i,x as re,h as p,b as s,J as t,g as l,y as ie,K as je,q as oe,o as se,B as le}from"../../chunks/vendor-b1433968.js";import{D as Fe}from"../../chunks/Docstring-ff504c58.js";import{I as Ee}from"../../chunks/IconCopyLink-7029626d.js";function Ve(he){let g,z,c,m,H,_,pe,N,ue,W,d,C,S,k,fe,Z,ce,X,y,me,M,ge,de,B,G,ve,O,x,J,we,Q,v,Pe,T,Ce,ye,$,be,R,A,_e,K,w,b,I,E,ke,Y,Me,j,P,L,Te,q,$e,F;return _=new Ee({}),k=new Ee({}),E=new Ee({}),L=new Fe({props:{name:"class transformers.CpmTokenizer",anchor:"transformers.CpmTokenizer",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/cpm/tokenization_cpm.py#L31"}}),{c(){g=n("meta"),z=u(),c=n("h1"),m=n("a"),H=n("span"),ne(_.$$.fragment),pe=u(),N=n("span"),ue=h("CPM"),W=u(),d=n("h2"),C=n("a"),S=n("span"),ne(k.$$.fragment),fe=u(),Z=n("span"),ce=h("Overview"),X=u(),y=n("p"),me=h("The CPM model was proposed in "),M=n("a"),ge=h("CPM: A Large-scale Generative Chinese Pre-trained Language Model"),de=h(` by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.`),B=u(),G=n("p"),ve=h("The abstract from the paper is the following:"),O=u(),x=n("p"),J=n("em"),we=h(`Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3, with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation, cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many NLP tasks in the settings of few-shot (even zero-shot) learning.`),Q=u(),v=n("p"),Pe=h("This model was contributed by "),T=n("a"),Ce=h("canwenxu"),ye=h(`. The original implementation can be found here: `),$=n("a"),be=h("https://github.com/TsinghuaAI/CPM-Generate"),R=u(),A=n("p"),_e=h("Note: We only have a tokenizer here, since the model architecture is the same as GPT-2."),K=u(),w=n("h2"),b=n("a"),I=n("span"),ne(E.$$.fragment),ke=u(),Y=n("span"),Me=h("CpmTokenizer"),j=u(),P=n("div"),ne(L.$$.fragment),Te=u(),q=n("p"),$e=h("Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."),this.h()},l(e){const o=Ke('[data-svelte="svelte-1phssyn"]',document.head);g=r(o,"META",{name:!0,content:!0}),o.forEach(a),z=f(e),c=r(e,"H1",{class:!0});var V=i(c);m=r(V,"A",{id:!0,class:!0,href:!0});var Le=i(m);H=r(Le,"SPAN",{});var ze=i(H);re(_.$$.fragment,ze),ze.forEach(a),Le.forEach(a),pe=f(V),N=r(V,"SPAN",{});var Ge=i(N);ue=p(Ge,"CPM"),Ge.forEach(a),V.forEach(a),W=f(e),d=r(e,"H2",{class:!0});var U=i(d);C=r(U,"A",{id:!0,class:!0,href:!0});var xe=i(C);S=r(xe,"SPAN",{});var Ae=i(S);re(k.$$.fragment,Ae),Ae.forEach(a),xe.forEach(a),fe=f(U),Z=r(U,"SPAN",{});var He=i(Z);ce=p(He,"Overview"),He.forEach(a),U.forEach(a),X=f(e),y=r(e,"P",{});var ee=i(y);me=p(ee,"The CPM model was proposed in "),M=r(ee,"A",{href:!0,rel:!0});var Ne=i(M);ge=p(Ne,"CPM: A Large-scale Generative Chinese Pre-trained Language Model"),Ne.forEach(a),de=p(ee,` by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.`),ee.forEach(a),B=f(e),G=r(e,"P",{});var Se=i(G);ve=p(Se,"The abstract from the paper is the following:"),Se.forEach(a),O=f(e),x=r(e,"P",{});var Ze=i(x);J=r(Ze,"EM",{});var Je=i(J);we=p(Je,`Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3, with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation, cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many NLP tasks in the settings of few-shot (even zero-shot) learning.`),Je.forEach(a),Ze.forEach(a),Q=f(e),v=r(e,"P",{});var D=i(v);Pe=p(D,"This model was contributed by "),T=r(D,"A",{href:!0,rel:!0});var Ie=i(T);Ce=p(Ie,"canwenxu"),Ie.forEach(a),ye=p(D,`. The original implementation can be found here: `),$=r(D,"A",{href:!0,rel:!0});var Ye=i($);be=p(Ye,"https://github.com/TsinghuaAI/CPM-Generate"),Ye.forEach(a),D.forEach(a),R=f(e),A=r(e,"P",{});var qe=i(A);_e=p(qe,"Note: We only have a tokenizer here, since the model architecture is the same as GPT-2."),qe.forEach(a),K=f(e),w=r(e,"H2",{class:!0});var ae=i(w);b=r(ae,"A",{id:!0,class:!0,href:!0});var De=i(b);I=r(De,"SPAN",{});var We=i(I);re(E.$$.fragment,We),We.forEach(a),De.forEach(a),ke=f(ae),Y=r(ae,"SPAN",{});var Xe=i(Y);Me=p(Xe,"CpmTokenizer"),Xe.forEach(a),ae.forEach(a),j=f(e),P=r(e,"DIV",{class:!0});var te=i(P);re(L.$$.fragment,te),Te=f(te),q=r(te,"P",{});var Be=i(q);$e=p(Be,"Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."),Be.forEach(a),te.forEach(a),this.h()},h(){s(g,"name","hf:doc:metadata"),s(g,"content",JSON.stringify(Ue)),s(m,"id","cpm"),s(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),s(m,"href","#cpm"),s(c,"class","relative group"),s(C,"id","overview"),s(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),s(C,"href","#overview"),s(d,"class","relative group"),s(M,"href","https://arxiv.org/abs/2012.00413"),s(M,"rel","nofollow"),s(T,"href","https://huggingface.co/canwenxu"),s(T,"rel","nofollow"),s($,"href","https://github.com/TsinghuaAI/CPM-Generate"),s($,"rel","nofollow"),s(b,"id","transformers.CpmTokenizer"),s(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),s(b,"href","#transformers.CpmTokenizer"),s(w,"class","relative group"),s(P,"class","docstring")},m(e,o){t(document.head,g),l(e,z,o),l(e,c,o),t(c,m),t(m,H),ie(_,H,null),t(c,pe),t(c,N),t(N,ue),l(e,W,o),l(e,d,o),t(d,C),t(C,S),ie(k,S,null),t(d,fe),t(d,Z),t(Z,ce),l(e,X,o),l(e,y,o),t(y,me),t(y,M),t(M,ge),t(y,de),l(e,B,o),l(e,G,o),t(G,ve),l(e,O,o),l(e,x,o),t(x,J),t(J,we),l(e,Q,o),l(e,v,o),t(v,Pe),t(v,T),t(T,Ce),t(v,ye),t(v,$),t($,be),l(e,R,o),l(e,A,o),t(A,_e),l(e,K,o),l(e,w,o),t(w,b),t(b,I),ie(E,I,null),t(w,ke),t(w,Y),t(Y,Me),l(e,j,o),l(e,P,o),ie(L,P,null),t(P,Te),t(P,q),t(q,$e),F=!0},p:je,i(e){F||(oe(_.$$.fragment,e),oe(k.$$.fragment,e),oe(E.$$.fragment,e),oe(L.$$.fragment,e),F=!0)},o(e){se(_.$$.fragment,e),se(k.$$.fragment,e),se(E.$$.fragment,e),se(L.$$.fragment,e),F=!1},d(e){a(g),e&&a(z),e&&a(c),le(_),e&&a(W),e&&a(d),le(k),e&&a(X),e&&a(y),e&&a(B),e&&a(G),e&&a(O),e&&a(x),e&&a(Q),e&&a(v),e&&a(R),e&&a(A),e&&a(K),e&&a(w),le(E),e&&a(j),e&&a(P),le(L)}}}const Ue={local:"cpm",sections:[{local:"overview",title:"Overview"},{local:"transformers.CpmTokenizer",title:"CpmTokenizer"}],title:"CPM"};function ea(he,g,z){let{fw:c}=g;return he.$$set=m=>{"fw"in m&&z(0,c=m.fw)},[c]}class ra extends Oe{constructor(g){super();Qe(this,g,ea,Ve,Re,{fw:0})}}export{ra as default,Ue as metadata};
9,952
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bort.mdx-576efe77.js
import{S as Ze,i as et,s as tt,e as a,k as u,w as Ke,t as n,L as rt,c as o,d as r,m as p,a as i,x as je,h as s,b as l,J as t,g as f,y as Fe,K as at,q as Qe,o as Ve,B as Xe}from"../../chunks/vendor-b1433968.js";import{I as Ye}from"../../chunks/IconCopyLink-7029626d.js";function ot(ae){let d,O,c,m,N,T,oe,C,ie,W,w,g,D,B,ne,G,se,H,E,le,R,he,fe,M,$,ce,K,I,J,ue,j,z,pe,F,v,_,me,L,de,ve,be,y,we,q,ge,Ee,Te,k,Be,A,Re,_e,Q,b,ye,P,ke,Ae,x,Pe,xe,V;return T=new Ye({}),B=new Ye({}),{c(){d=a("meta"),O=u(),c=a("h1"),m=a("a"),N=a("span"),Ke(T.$$.fragment),oe=u(),C=a("span"),ie=n("BORT"),W=u(),w=a("h2"),g=a("a"),D=a("span"),Ke(B.$$.fragment),ne=u(),G=a("span"),se=n("Overview"),H=u(),E=a("p"),le=n("The BORT model was proposed in "),R=a("a"),he=n("Optimal Subarchitecture Extraction for BERT"),fe=n(` by Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the authors refer to as \u201CBort\u201D.`),M=u(),$=a("p"),ce=n("The abstract from the paper is the following:"),K=u(),I=a("p"),J=a("em"),ue=n(`We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as \u201CBort\u201D, is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large (Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%, absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.`),j=u(),z=a("p"),pe=n("Tips:"),F=u(),v=a("ul"),_=a("li"),me=n("BORT\u2019s model architecture is based on BERT, so one can refer to "),L=a("a"),de=n("BERT\u2019s documentation page"),ve=n(` for the model\u2019s API as well as usage examples.`),be=u(),y=a("li"),we=n("BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to "),q=a("a"),ge=n("RoBERTa\u2019s documentation page"),Ee=n(" for the tokenizer\u2019s API as well as usage examples."),Te=u(),k=a("li"),Be=n("BORT requires a specific fine-tuning algorithm, called "),A=a("a"),Re=n("Agora"),_e=n(` , that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the algorithm to make BORT fine-tuning work.`),Q=u(),b=a("p"),ye=n("This model was contributed by "),P=a("a"),ke=n("stefan-it"),Ae=n(". The original code can be found "),x=a("a"),Pe=n("here"),xe=n("."),this.h()},l(e){const h=rt('[data-svelte="svelte-1phssyn"]',document.head);d=o(h,"META",{name:!0,content:!0}),h.forEach(r),O=p(e),c=o(e,"H1",{class:!0});var X=i(c);m=o(X,"A",{id:!0,class:!0,href:!0});var Oe=i(m);N=o(Oe,"SPAN",{});var $e=i(N);je(T.$$.fragment,$e),$e.forEach(r),Oe.forEach(r),oe=p(X),C=o(X,"SPAN",{});var Ie=i(C);ie=s(Ie,"BORT"),Ie.forEach(r),X.forEach(r),W=p(e),w=o(e,"H2",{class:!0});var Y=i(w);g=o(Y,"A",{id:!0,class:!0,href:!0});var ze=i(g);D=o(ze,"SPAN",{});var Le=i(D);je(B.$$.fragment,Le),Le.forEach(r),ze.forEach(r),ne=p(Y),G=o(Y,"SPAN",{});var qe=i(G);se=s(qe,"Overview"),qe.forEach(r),Y.forEach(r),H=p(e),E=o(e,"P",{});var Z=i(E);le=s(Z,"The BORT model was proposed in "),R=o(Z,"A",{href:!0,rel:!0});var Se=i(R);he=s(Se,"Optimal Subarchitecture Extraction for BERT"),Se.forEach(r),fe=s(Z,` by Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the authors refer to as \u201CBort\u201D.`),Z.forEach(r),M=p(e),$=o(e,"P",{});var Ue=i($);ce=s(Ue,"The abstract from the paper is the following:"),Ue.forEach(r),K=p(e),I=o(e,"P",{});var Ne=i(I);J=o(Ne,"EM",{});var Ce=i(J);ue=s(Ce,`We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as \u201CBort\u201D, is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large (Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%, absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.`),Ce.forEach(r),Ne.forEach(r),j=p(e),z=o(e,"P",{});var De=i(z);pe=s(De,"Tips:"),De.forEach(r),F=p(e),v=o(e,"UL",{});var S=i(v);_=o(S,"LI",{});var ee=i(_);me=s(ee,"BORT\u2019s model architecture is based on BERT, so one can refer to "),L=o(ee,"A",{href:!0});var Ge=i(L);de=s(Ge,"BERT\u2019s documentation page"),Ge.forEach(r),ve=s(ee,` for the model\u2019s API as well as usage examples.`),ee.forEach(r),be=p(S),y=o(S,"LI",{});var te=i(y);we=s(te,"BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to "),q=o(te,"A",{href:!0});var Je=i(q);ge=s(Je,"RoBERTa\u2019s documentation page"),Je.forEach(r),Ee=s(te," for the tokenizer\u2019s API as well as usage examples."),te.forEach(r),Te=p(S),k=o(S,"LI",{});var re=i(k);Be=s(re,"BORT requires a specific fine-tuning algorithm, called "),A=o(re,"A",{href:!0,rel:!0});var We=i(A);Re=s(We,"Agora"),We.forEach(r),_e=s(re,` , that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the algorithm to make BORT fine-tuning work.`),re.forEach(r),S.forEach(r),Q=p(e),b=o(e,"P",{});var U=i(b);ye=s(U,"This model was contributed by "),P=o(U,"A",{href:!0,rel:!0});var He=i(P);ke=s(He,"stefan-it"),He.forEach(r),Ae=s(U,". The original code can be found "),x=o(U,"A",{href:!0,rel:!0});var Me=i(x);Pe=s(Me,"here"),Me.forEach(r),xe=s(U,"."),U.forEach(r),this.h()},h(){l(d,"name","hf:doc:metadata"),l(d,"content",JSON.stringify(it)),l(m,"id","bort"),l(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(m,"href","#bort"),l(c,"class","relative group"),l(g,"id","overview"),l(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(g,"href","#overview"),l(w,"class","relative group"),l(R,"href","https://arxiv.org/abs/2010.10499"),l(R,"rel","nofollow"),l(L,"href","bert"),l(q,"href","roberta"),l(A,"href","https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology"),l(A,"rel","nofollow"),l(P,"href","https://huggingface.co/stefan-it"),l(P,"rel","nofollow"),l(x,"href","https://github.com/alexa/bort/"),l(x,"rel","nofollow")},m(e,h){t(document.head,d),f(e,O,h),f(e,c,h),t(c,m),t(m,N),Fe(T,N,null),t(c,oe),t(c,C),t(C,ie),f(e,W,h),f(e,w,h),t(w,g),t(g,D),Fe(B,D,null),t(w,ne),t(w,G),t(G,se),f(e,H,h),f(e,E,h),t(E,le),t(E,R),t(R,he),t(E,fe),f(e,M,h),f(e,$,h),t($,ce),f(e,K,h),f(e,I,h),t(I,J),t(J,ue),f(e,j,h),f(e,z,h),t(z,pe),f(e,F,h),f(e,v,h),t(v,_),t(_,me),t(_,L),t(L,de),t(_,ve),t(v,be),t(v,y),t(y,we),t(y,q),t(q,ge),t(y,Ee),t(v,Te),t(v,k),t(k,Be),t(k,A),t(A,Re),t(k,_e),f(e,Q,h),f(e,b,h),t(b,ye),t(b,P),t(P,ke),t(b,Ae),t(b,x),t(x,Pe),t(b,xe),V=!0},p:at,i(e){V||(Qe(T.$$.fragment,e),Qe(B.$$.fragment,e),V=!0)},o(e){Ve(T.$$.fragment,e),Ve(B.$$.fragment,e),V=!1},d(e){r(d),e&&r(O),e&&r(c),Xe(T),e&&r(W),e&&r(w),Xe(B),e&&r(H),e&&r(E),e&&r(M),e&&r($),e&&r(K),e&&r(I),e&&r(j),e&&r(z),e&&r(F),e&&r(v),e&&r(Q),e&&r(b)}}}const it={local:"bort",sections:[{local:"overview",title:"Overview"}],title:"BORT"};function nt(ae,d,O){let{fw:c}=d;return ae.$$set=m=>{"fw"in m&&O(0,c=m.fw)},[c]}class ht extends Ze{constructor(d){super();et(this,d,nt,ot,tt,{fw:0})}}export{ht as default,it as metadata};
9,953
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/roformer.mdx-d7162626.js
import{S as JT,i as GT,s as XT,e as r,k as l,w as y,t as n,L as YT,c as a,d as o,m as d,a as i,x as F,h as s,b as p,J as e,g as h,y as w,q as b,o as $,B as R}from"../../chunks/vendor-b1433968.js";import{T as ze}from"../../chunks/Tip-c3840994.js";import{D as ne}from"../../chunks/Docstring-ff504c58.js";import{C as qe}from"../../chunks/CodeBlock-a320dbd7.js";import{I as xe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ZT(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function ey(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function oy(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function ty(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function ny(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function sy(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function ry(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function ay(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function iy(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function ly(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function dy(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function cy(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function py(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function my(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function hy(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function fy(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function uy(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function gy(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function _y(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e;return{c(){m=r("p"),E=n("TF 2.0 models accepts two formats as inputs:"),g=l(),u=r("ul"),v=r("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),V=l(),z=r("p"),Y=n("This second option is useful when using "),N=r("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),fe=n("model(inputs)"),le=n("."),K=l(),L=r("p"),oe=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),C=r("ul"),x=r("li"),se=n("a single Tensor with "),W=r("code"),de=n("input_ids"),re=n(" only and nothing else: "),I=r("code"),ue=n("model(inputs_ids)"),ce=l(),q=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=r("code"),ae=n("model([input_ids, attention_mask])"),ie=n(" or "),U=r("code"),pe=n("model([input_ids, attention_mask, token_type_ids])"),G=l(),A=r("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r("code"),_e=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){m=a(c,"P",{});var T=i(m);E=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(o),g=d(c),u=a(c,"UL",{});var X=i(u);v=a(X,"LI",{});var be=i(v);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(o),_=d(X),M=a(X,"LI",{});var ve=i(M);me=s(ve,"having all inputs as a list, tuple or dict in the first positional arguments."),ve.forEach(o),X.forEach(o),V=d(c),z=a(c,"P",{});var j=i(z);Y=s(j,"This second option is useful when using "),N=a(j,"CODE",{});var we=i(N);ee=s(we,"tf.keras.Model.fit"),we.forEach(o),he=s(j,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(j,"CODE",{});var ye=i(O);fe=s(ye,"model(inputs)"),ye.forEach(o),le=s(j,"."),j.forEach(o),K=d(c),L=a(c,"P",{});var Re=i(L);oe=s(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(o),J=d(c),C=a(c,"UL",{});var P=i(C);x=a(P,"LI",{});var H=i(x);se=s(H,"a single Tensor with "),W=a(H,"CODE",{});var $e=i(W);de=s($e,"input_ids"),$e.forEach(o),re=s(H," only and nothing else: "),I=a(H,"CODE",{});var Z=i(I);ue=s(Z,"model(inputs_ids)"),Z.forEach(o),H.forEach(o),ce=d(P),q=a(P,"LI",{});var D=i(q);ge=s(D,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),S=a(D,"CODE",{});var Ee=i(S);ae=s(Ee,"model([input_ids, attention_mask])"),Ee.forEach(o),ie=s(D," or "),U=a(D,"CODE",{});var Te=i(U);pe=s(Te,"model([input_ids, attention_mask, token_type_ids])"),Te.forEach(o),D.forEach(o),G=d(P),A=a(P,"LI",{});var ke=i(A);te=s(ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=a(ke,"CODE",{});var Fe=i(B);_e=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(o),ke.forEach(o),P.forEach(o)},m(c,T){h(c,m,T),e(m,E),h(c,g,T),h(c,u,T),e(u,v),e(v,k),e(u,_),e(u,M),e(M,me),h(c,V,T),h(c,z,T),e(z,Y),e(z,N),e(N,ee),e(z,he),e(z,O),e(O,fe),e(z,le),h(c,K,T),h(c,L,T),e(L,oe),h(c,J,T),h(c,C,T),e(C,x),e(x,se),e(x,W),e(W,de),e(x,re),e(x,I),e(I,ue),e(C,ce),e(C,q),e(q,ge),e(q,S),e(S,ae),e(q,ie),e(q,U),e(U,pe),e(C,G),e(C,A),e(A,te),e(A,B),e(B,_e)},d(c){c&&o(m),c&&o(g),c&&o(u),c&&o(V),c&&o(z),c&&o(K),c&&o(L),c&&o(J),c&&o(C)}}}function ky(Q){let m,E,g,u,v;return{c(){m=r("p"),E=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=r("code"),u=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=a(k,"P",{});var _=i(m);E=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(_,"CODE",{});var M=i(g);u=s(M,"Module"),M.forEach(o),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(o)},m(k,_){h(k,m,_),e(m,E),e(m,g),e(g,u),e(m,v)},d(k){k&&o(m)}}}function vy(Q){let m,E,g,u,v,k,_,M,me,V,z,Y,N,ee,he,O,fe,le,K,L,oe,J,C,x,se,W,de,re,I,ue,ce,q,ge,S,ae,ie,U,pe,G,A,te,B,_e,c,T,X,be,ve,j,we,ye,Re,P,H,$e,Z,D,Ee,Te,ke,Fe,sc,rc,mn,ac,ic,lc,Oo,dc,wr,cc,pc,br,mc,hc,fc,ba,uc,gc,hn,jl,Io,yt,$a,fn,_c,Ra,kc,Pl,Me,un,vc,gn,Tc,$r,yc,_n,Fc,wc,bc,kn,$c,Rr,Rc,Ec,Mc,Ea,zc,Cc,vn,qc,wo,Tn,xc,Ma,jc,Pc,yn,Er,Lc,za,Ac,Dc,Mr,Nc,Ca,Oc,Ic,Ft,Fn,Sc,wn,Wc,qa,Uc,Bc,Hc,po,bn,Qc,xa,Kc,Vc,$n,Jc,So,Gc,ja,Xc,Yc,Pa,Zc,ep,op,La,Ll,Wo,wt,Aa,Rn,tp,Da,np,Al,Ce,En,sp,Mn,rp,Na,ap,ip,lp,bt,zr,dp,cp,Cr,pp,mp,hp,zn,fp,qr,up,gp,_p,Oa,kp,vp,Cn,Tp,bo,qn,yp,Ia,Fp,wp,xn,xr,bp,Sa,$p,Rp,jr,Ep,Wa,Mp,Dl,Uo,$t,Ua,jn,zp,Ba,Cp,Nl,Ue,Pn,qp,Ln,xp,An,jp,Pp,Lp,Dn,Ap,Nn,Dp,Np,Op,Le,Ip,Ha,Sp,Wp,Qa,Up,Bp,Ka,Hp,Qp,Va,Kp,Vp,Ja,Jp,Gp,Ga,Xp,Yp,Zp,He,On,em,Bo,om,Pr,tm,nm,Xa,sm,rm,am,Rt,im,Ya,lm,dm,In,Ol,Ho,Et,Za,Sn,cm,ei,pm,Il,yo,Wn,mm,Qo,hm,oi,fm,um,Un,gm,_m,km,Qe,Bn,vm,Ko,Tm,Lr,ym,Fm,ti,wm,bm,$m,Mt,Rm,ni,Em,Mm,Hn,Sl,Vo,zt,si,Qn,zm,ri,Cm,Wl,Fo,Kn,qm,Jo,xm,ai,jm,Pm,Vn,Lm,Am,Dm,Ke,Jn,Nm,Go,Om,Ar,Im,Sm,ii,Wm,Um,Bm,Ct,Hm,li,Qm,Km,Gn,Ul,Xo,qt,di,Xn,Vm,ci,Jm,Bl,ao,Yn,Gm,pi,Xm,Ym,Zn,Zm,es,eh,oh,th,je,os,nh,Yo,sh,Dr,rh,ah,mi,ih,lh,dh,xt,ch,hi,ph,mh,ts,hh,fi,fh,uh,ns,Hl,Zo,jt,ui,ss,gh,gi,_h,Ql,io,rs,kh,_i,vh,Th,as,yh,is,Fh,wh,bh,Ve,ls,$h,et,Rh,Nr,Eh,Mh,ki,zh,Ch,qh,Pt,xh,vi,jh,Ph,ds,Kl,ot,Lt,Ti,cs,Lh,yi,Ah,Vl,lo,ps,Dh,Fi,Nh,Oh,ms,Ih,hs,Sh,Wh,Uh,Je,fs,Bh,tt,Hh,Or,Qh,Kh,wi,Vh,Jh,Gh,At,Xh,bi,Yh,Zh,us,Jl,nt,Dt,$i,gs,ef,Ri,of,Gl,co,_s,tf,st,nf,Ei,sf,rf,Mi,af,lf,df,ks,cf,vs,pf,mf,hf,Ge,Ts,ff,rt,uf,Ir,gf,_f,zi,kf,vf,Tf,Nt,yf,Ci,Ff,wf,ys,Xl,at,Ot,qi,Fs,bf,xi,$f,Yl,Ae,ws,Rf,ji,Ef,Mf,bs,zf,Sr,Cf,qf,xf,$s,jf,Rs,Pf,Lf,Af,It,Df,Xe,Es,Nf,it,Of,Wr,If,Sf,Pi,Wf,Uf,Bf,St,Hf,Li,Qf,Kf,Ms,Zl,lt,Wt,Ai,zs,Vf,Di,Jf,ed,De,Cs,Gf,qs,Xf,Ni,Yf,Zf,eu,xs,ou,Ur,tu,nu,su,js,ru,Ps,au,iu,lu,Ut,du,Ye,Ls,cu,dt,pu,Br,mu,hu,Oi,fu,uu,gu,Bt,_u,Ii,ku,vu,As,od,ct,Ht,Si,Ds,Tu,Wi,yu,td,Ne,Ns,Fu,Os,wu,Ui,bu,$u,Ru,Is,Eu,Hr,Mu,zu,Cu,Ss,qu,Ws,xu,ju,Pu,Qt,Lu,mo,Us,Au,Be,Du,Bi,Nu,Ou,Hi,Iu,Su,Qi,Wu,Uu,Ki,Bu,Hu,Vi,Qu,Ku,Vu,Ji,Ju,Gu,Bs,nd,pt,Kt,Gi,Hs,Xu,Xi,Yu,sd,Oe,Qs,Zu,Yi,eg,og,Ks,tg,Qr,ng,sg,rg,Vs,ag,Js,ig,lg,dg,Vt,cg,Ze,Gs,pg,mt,mg,Kr,hg,fg,Zi,ug,gg,_g,Jt,kg,el,vg,Tg,Xs,rd,ht,Gt,ol,Ys,yg,tl,Fg,ad,Ie,Zs,wg,nl,bg,$g,er,Rg,Vr,Eg,Mg,zg,or,Cg,tr,qg,xg,jg,Xt,Pg,eo,nr,Lg,ft,Ag,Jr,Dg,Ng,sl,Og,Ig,Sg,Yt,Wg,rl,Ug,Bg,sr,id,ut,Zt,al,rr,Hg,il,Qg,ld,Se,ar,Kg,ll,Vg,Jg,ir,Gg,Gr,Xg,Yg,Zg,lr,e_,dr,o_,t_,n_,en,s_,oo,cr,r_,gt,a_,Xr,i_,l_,dl,d_,c_,p_,on,m_,cl,h_,f_,pr,dd,_t,tn,pl,mr,u_,ml,g_,cd,We,hr,__,kt,k_,hl,v_,T_,fl,y_,F_,w_,fr,b_,Yr,$_,R_,E_,ur,M_,gr,z_,C_,q_,nn,x_,to,_r,j_,vt,P_,Zr,L_,A_,ul,D_,N_,O_,sn,I_,gl,S_,W_,kr,pd;return k=new xe({}),ee=new xe({}),ye=new xe({}),D=new ne({props:{name:"class transformers.RoFormerConfig",anchor:"transformers.RoFormerConfig",parameters:[{name:"vocab_size",val:" = 50000"},{name:"embedding_size",val:" = None"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 1536"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"rotary_value",val:" = False"},{name:"use_cache",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/configuration_roformer.py#L34",parametersDescription:[{anchor:"transformers.RoFormerConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50000) &#x2014; Vocabulary size of the RoFormer model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerModel">TFRoFormerModel</a>.`,name:"vocab_size"},{anchor:"transformers.RoFormerConfig.embedding_size",description:`<strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; Dimensionality of the encoder layers and the pooler layer. Defaults to the <code>hidden_size</code> if not provided.`,name:"embedding_size"},{anchor:"transformers.RoFormerConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.RoFormerConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.RoFormerConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.RoFormerConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.RoFormerConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.RoFormerConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.RoFormerConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.RoFormerConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1536) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 1536).`,name:"max_position_embeddings"},{anchor:"transformers.RoFormerConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerModel">TFRoFormerModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.RoFormerConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.RoFormerConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.RoFormerConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"},{anchor:"transformers.RoFormerConfig.rotary_value",description:`<strong>rotary_value</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not apply rotary position embeddings on value layer.`,name:"rotary_value"}]}}),hn=new qe({props:{code:`from transformers import RoFormerModel, RoFormerConfig # Initializing a RoFormer junnyu/roformer_chinese_base style configuration configuration = RoFormerConfig() # Initializing a model from the junnyu/roformer_chinese_base style configuration model = RoFormerModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerModel, RoFormerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a RoFormer junnyu/roformer_chinese_base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = RoFormerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the junnyu/roformer_chinese_base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),fn=new xe({}),un=new ne({props:{name:"class transformers.RoFormerTokenizer",anchor:"transformers.RoFormerTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/tokenization_roformer.py#L61",parametersDescription:[{anchor:"transformers.RoFormerTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.RoFormerTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.RoFormerTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.RoFormerTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.RoFormerTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RoFormerTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RoFormerTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RoFormerTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RoFormerTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.RoFormerTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),vn=new qe({props:{code:`from transformers import RoFormerTokenizer tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') tokenizer.tokenize("\u4ECA\u5929\u5929\u6C14\u975E\u5E38\u597D\u3002"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.tokenize(<span class="hljs-string">&quot;\u4ECA\u5929\u5929\u6C14\u975E\u5E38\u597D\u3002&quot;</span>) <span class="hljs-comment"># [&#x27;\u4ECA&#x27;, &#x27;\u5929&#x27;, &#x27;\u5929&#x27;, &#x27;\u6C14&#x27;, &#x27;\u975E\u5E38&#x27;, &#x27;\u597D&#x27;, &#x27;\u3002&#x27;]</span>`}}),Tn=new ne({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RoFormerTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/tokenization_roformer.py#L225",parametersDescription:[{anchor:"transformers.RoFormerTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.RoFormerTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Fn=new ne({props:{name:"get_special_tokens_mask",anchor:"transformers.RoFormerTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/tokenization_roformer.py#L250",parametersDescription:[{anchor:"transformers.RoFormerTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RoFormerTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.RoFormerTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),bn=new ne({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.RoFormerTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/tokenization_roformer.py#L278",parametersDescription:[{anchor:"transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),$n=new qe({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Rn=new xe({}),En=new ne({props:{name:"class transformers.RoFormerTokenizerFast",anchor:"transformers.RoFormerTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/tokenization_roformer_fast.py#L63"}}),Cn=new qe({props:{code:`from transformers import RoFormerTokenizerFast tokenizer = RoFormerTokenizerFast.from_pretrained('junnyu/roformer_chinese_base') tokenizer.tokenize("\u4ECA\u5929\u5929\u6C14\u975E\u5E38\u597D\u3002"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizerFast.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.tokenize(<span class="hljs-string">&quot;\u4ECA\u5929\u5929\u6C14\u975E\u5E38\u597D\u3002&quot;</span>) <span class="hljs-comment"># [&#x27;\u4ECA&#x27;, &#x27;\u5929&#x27;, &#x27;\u5929&#x27;, &#x27;\u6C14&#x27;, &#x27;\u975E\u5E38&#x27;, &#x27;\u597D&#x27;, &#x27;\u3002&#x27;]</span>`}}),qn=new ne({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/tokenization_roformer_fast.py#L139",parametersDescription:[{anchor:"transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),jn=new xe({}),Pn=new ne({props:{name:"class transformers.RoFormerModel",anchor:"transformers.RoFormerModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L795",parametersDescription:[{anchor:"transformers.RoFormerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),On=new ne({props:{name:"forward",anchor:"transformers.RoFormerModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L836",parametersDescription:[{anchor:"transformers.RoFormerModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RoFormerModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RoFormerModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RoFormerModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Rt=new ze({props:{$$slots:{default:[ZT]},$$scope:{ctx:Q}}}),In=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerModel import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerModel.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerModel.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Sn=new xe({}),Wn=new ne({props:{name:"class transformers.RoFormerForCausalLM",anchor:"transformers.RoFormerForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1063",parametersDescription:[{anchor:"transformers.RoFormerForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bn=new ne({props:{name:"forward",anchor:"transformers.RoFormerForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1085",parametersDescription:[{anchor:"transformers.RoFormerForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RoFormerForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RoFormerForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RoFormerForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.RoFormerForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mt=new ze({props:{$$slots:{default:[ey]},$$scope:{ctx:Q}}}),Hn=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForCausalLM, RoFormerConfig import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') config = RoFormerConfig.from_pretrained("junnyu/roformer_chinese_base") config.is_decoder = True model = RoFormerForCausalLM.from_pretrained('junnyu/roformer_chinese_base', config=config) inputs = tokenizer("\u4ECA\u5929\u5929\u6C14\u975E\u5E38\u597D\u3002", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForCausalLM, RoFormerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RoFormerConfig.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForCausalLM.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;\u4ECA\u5929\u5929\u6C14\u975E\u5E38\u597D\u3002&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),Qn=new xe({}),Kn=new ne({props:{name:"class transformers.RoFormerForMaskedLM",anchor:"transformers.RoFormerForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L963",parametersDescription:[{anchor:"transformers.RoFormerForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jn=new ne({props:{name:"forward",anchor:"transformers.RoFormerForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L985",parametersDescription:[{anchor:"transformers.RoFormerForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ct=new ze({props:{$$slots:{default:[oy]},$$scope:{ctx:Q}}}),Gn=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForMaskedLM import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Xn=new xe({}),Yn=new ne({props:{name:"class transformers.RoFormerForSequenceClassification",anchor:"transformers.RoFormerForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1236",parametersDescription:[{anchor:"transformers.RoFormerForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),os=new ne({props:{name:"forward",anchor:"transformers.RoFormerForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1246",parametersDescription:[{anchor:"transformers.RoFormerForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xt=new ze({props:{$$slots:{default:[ty]},$$scope:{ctx:Q}}}),ts=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForSequenceClassification import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerForSequenceClassification.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ns=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForSequenceClassification import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerForSequenceClassification.from_pretrained('junnyu/roformer_chinese_base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ss=new xe({}),rs=new ne({props:{name:"class transformers.RoFormerForMultipleChoice",anchor:"transformers.RoFormerForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1327",parametersDescription:[{anchor:"transformers.RoFormerForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ls=new ne({props:{name:"forward",anchor:"transformers.RoFormerForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1338",parametersDescription:[{anchor:"transformers.RoFormerForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pt=new ze({props:{$$slots:{default:[ny]},$$scope:{ctx:Q}}}),ds=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForMultipleChoice import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerForMultipleChoice.from_pretrained('junnyu/roformer_chinese_base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),cs=new xe({}),ps=new ne({props:{name:"class transformers.RoFormerForTokenClassification",anchor:"transformers.RoFormerForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1418",parametersDescription:[{anchor:"transformers.RoFormerForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fs=new ne({props:{name:"forward",anchor:"transformers.RoFormerForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1430",parametersDescription:[{anchor:"transformers.RoFormerForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),At=new ze({props:{$$slots:{default:[sy]},$$scope:{ctx:Q}}}),us=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForTokenClassification import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerForTokenClassification.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),gs=new xe({}),_s=new ne({props:{name:"class transformers.RoFormerForQuestionAnswering",anchor:"transformers.RoFormerForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1504",parametersDescription:[{anchor:"transformers.RoFormerForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ts=new ne({props:{name:"forward",anchor:"transformers.RoFormerForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_roformer.py#L1517",parametersDescription:[{anchor:"transformers.RoFormerForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.RoFormerForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Nt=new ze({props:{$$slots:{default:[ry]},$$scope:{ctx:Q}}}),ys=new qe({props:{code:`from transformers import RoFormerTokenizer, RoFormerForQuestionAnswering import torch tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = RoFormerForQuestionAnswering.from_pretrained('junnyu/roformer_chinese_base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Fs=new xe({}),ws=new ne({props:{name:"class transformers.TFRoFormerModel",anchor:"transformers.TFRoFormerModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L809",parametersDescription:[{anchor:"transformers.TFRoFormerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),It=new ze({props:{$$slots:{default:[ay]},$$scope:{ctx:Q}}}),Es=new ne({props:{name:"call",anchor:"transformers.TFRoFormerModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L815",parametersDescription:[{anchor:"transformers.TFRoFormerModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRoFormerModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRoFormerModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRoFormerModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRoFormerModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRoFormerModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRoFormerModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRoFormerModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRoFormerModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),St=new ze({props:{$$slots:{default:[iy]},$$scope:{ctx:Q}}}),Ms=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerModel import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerModel.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),zs=new xe({}),Cs=new ne({props:{name:"class transformers.TFRoFormerForMaskedLM",anchor:"transformers.TFRoFormerForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L871",parametersDescription:[{anchor:"transformers.TFRoFormerForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ut=new ze({props:{$$slots:{default:[ly]},$$scope:{ctx:Q}}}),Ls=new ne({props:{name:"call",anchor:"transformers.TFRoFormerForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L887",parametersDescription:[{anchor:"transformers.TFRoFormerForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRoFormerForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRoFormerForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRoFormerForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRoFormerForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRoFormerForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRoFormerForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRoFormerForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRoFormerForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRoFormerForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Bt=new ze({props:{$$slots:{default:[dy]},$$scope:{ctx:Q}}}),As=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerForMaskedLM import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ds=new xe({}),Ns=new ne({props:{name:"class transformers.TFRoFormerForCausalLM",anchor:"transformers.TFRoFormerForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L966",parametersDescription:[{anchor:"transformers.TFRoFormerForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qt=new ze({props:{$$slots:{default:[cy]},$$scope:{ctx:Q}}}),Us=new ne({props:{name:"call",anchor:"transformers.TFRoFormerForCausalLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L979",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Bs=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerForCausalLM import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerForCausalLM.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForCausalLM.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Hs=new xe({}),Qs=new ne({props:{name:"class transformers.TFRoFormerForSequenceClassification",anchor:"transformers.TFRoFormerForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1093",parametersDescription:[{anchor:"transformers.TFRoFormerForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vt=new ze({props:{$$slots:{default:[py]},$$scope:{ctx:Q}}}),Gs=new ne({props:{name:"call",anchor:"transformers.TFRoFormerForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1102",parametersDescription:[{anchor:"transformers.TFRoFormerForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRoFormerForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Jt=new ze({props:{$$slots:{default:[my]},$$scope:{ctx:Q}}}),Xs=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerForSequenceClassification import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerForSequenceClassification.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ys=new xe({}),Zs=new ne({props:{name:"class transformers.TFRoFormerForMultipleChoice",anchor:"transformers.TFRoFormerForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1183",parametersDescription:[{anchor:"transformers.TFRoFormerForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xt=new ze({props:{$$slots:{default:[hy]},$$scope:{ctx:Q}}}),nr=new ne({props:{name:"call",anchor:"transformers.TFRoFormerForMultipleChoice.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1204",parametersDescription:[{anchor:"transformers.TFRoFormerForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRoFormerForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Yt=new ze({props:{$$slots:{default:[fy]},$$scope:{ctx:Q}}}),sr=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerForMultipleChoice import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerForMultipleChoice.from_pretrained('junnyu/roformer_chinese_base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),rr=new xe({}),ar=new ne({props:{name:"class transformers.TFRoFormerForTokenClassification",anchor:"transformers.TFRoFormerForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1329",parametersDescription:[{anchor:"transformers.TFRoFormerForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),en=new ze({props:{$$slots:{default:[uy]},$$scope:{ctx:Q}}}),cr=new ne({props:{name:"call",anchor:"transformers.TFRoFormerForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1341",parametersDescription:[{anchor:"transformers.TFRoFormerForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRoFormerForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRoFormerForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRoFormerForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRoFormerForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRoFormerForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRoFormerForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRoFormerForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRoFormerForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRoFormerForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),on=new ze({props:{$$slots:{default:[gy]},$$scope:{ctx:Q}}}),pr=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerForTokenClassification import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerForTokenClassification.from_pretrained('junnyu/roformer_chinese_base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),mr=new xe({}),hr=new ne({props:{name:"class transformers.TFRoFormerForQuestionAnswering",anchor:"transformers.TFRoFormerForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1422",parametersDescription:[{anchor:"transformers.TFRoFormerForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),nn=new ze({props:{$$slots:{default:[_y]},$$scope:{ctx:Q}}}),_r=new ne({props:{name:"call",anchor:"transformers.TFRoFormerForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/roformer/modeling_tf_roformer.py#L1433",parametersDescription:[{anchor:"transformers.TFRoFormerForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFRoFormerForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),sn=new ze({props:{$$slots:{default:[ky]},$$scope:{ctx:Q}}}),kr=new qe({props:{code:`from transformers import RoFormerTokenizer, TFRoFormerForQuestionAnswering import tensorflow as tf tokenizer = RoFormerTokenizer.from_pretrained('junnyu/roformer_chinese_base') model = TFRoFormerForQuestionAnswering.from_pretrained('junnyu/roformer_chinese_base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;junnyu/roformer_chinese_base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){m=r("meta"),E=l(),g=r("h1"),u=r("a"),v=r("span"),y(k.$$.fragment),_=l(),M=r("span"),me=n("RoFormer"),V=l(),z=r("h2"),Y=r("a"),N=r("span"),y(ee.$$.fragment),he=l(),O=r("span"),fe=n("Overview"),le=l(),K=r("p"),L=n("The RoFormer model was proposed in "),oe=r("a"),J=n("RoFormer: Enhanced Transformer with Rotary Position Embedding"),C=n(" by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu."),x=l(),se=r("p"),W=n("The abstract from the paper is the following:"),de=l(),re=r("p"),I=r("em"),ue=n(`Position encoding in transformer architecture provides supervision for dependency modeling between elements at different positions in the sequence. We investigate various methods to encode positional information in transformer-based language models and propose a novel implementation named Rotary Position Embedding(RoPE). The proposed RoPE encodes absolute positional information with rotation matrix and naturally incorporates explicit relative position dependency in self-attention formulation. Notably, RoPE comes with valuable properties such as flexibility of being expand to any sequence lengths, decaying inter-token dependency with increasing relative distances, and capability of equipping the linear self-attention with relative position encoding. As a result, the enhanced transformer with rotary position embedding, or RoFormer, achieves superior performance in tasks with long texts. We release the theoretical analysis along with some preliminary experiment results on Chinese data. The undergoing experiment for English benchmark will soon be updated.`),ce=l(),q=r("p"),ge=n("Tips:"),S=l(),ae=r("ul"),ie=r("li"),U=n(`RoFormer is a BERT-like autoencoding model with rotary position embeddings. Rotary position embeddings have shown improved performance on classification tasks with long texts.`),pe=l(),G=r("p"),A=n("This model was contributed by "),te=r("a"),B=n("junnyu"),_e=n(". The original code can be found "),c=r("a"),T=n("here"),X=n("."),be=l(),ve=r("h2"),j=r("a"),we=r("span"),y(ye.$$.fragment),Re=l(),P=r("span"),H=n("RoFormerConfig"),$e=l(),Z=r("div"),y(D.$$.fragment),Ee=l(),Te=r("p"),ke=n("This is the configuration class to store the configuration of a "),Fe=r("a"),sc=n("RoFormerModel"),rc=n(`. It is used to instantiate an RoFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoFormer `),mn=r("a"),ac=n("junnyu/roformer_chinese_base"),ic=n(" architecture."),lc=l(),Oo=r("p"),dc=n("Configuration objects inherit from "),wr=r("a"),cc=n("PretrainedConfig"),pc=n(` and can be used to control the model outputs. Read the documentation from `),br=r("a"),mc=n("PretrainedConfig"),hc=n(" for more information."),fc=l(),ba=r("p"),uc=n("Example:"),gc=l(),y(hn.$$.fragment),jl=l(),Io=r("h2"),yt=r("a"),$a=r("span"),y(fn.$$.fragment),_c=l(),Ra=r("span"),kc=n("RoFormerTokenizer"),Pl=l(),Me=r("div"),y(un.$$.fragment),vc=l(),gn=r("p"),Tc=n("Construct a RoFormer tokenizer. Based on "),$r=r("em"),yc=n("Rust Jieba <"),_n=r("a"),Fc=n("https://pypi.org/project/rjieba/>"),wc=n("."),bc=l(),kn=r("p"),$c=n("This tokenizer inherits from "),Rr=r("a"),Rc=n("PreTrainedTokenizer"),Ec=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Mc=l(),Ea=r("p"),zc=n("Example:"),Cc=l(),y(vn.$$.fragment),qc=l(),wo=r("div"),y(Tn.$$.fragment),xc=l(),Ma=r("p"),jc=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format:`),Pc=l(),yn=r("ul"),Er=r("li"),Lc=n("single sequence: "),za=r("code"),Ac=n("[CLS] X [SEP]"),Dc=l(),Mr=r("li"),Nc=n("pair of sequences: "),Ca=r("code"),Oc=n("[CLS] A [SEP] B [SEP]"),Ic=l(),Ft=r("div"),y(Fn.$$.fragment),Sc=l(),wn=r("p"),Wc=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),qa=r("code"),Uc=n("prepare_for_model"),Bc=n(" method."),Hc=l(),po=r("div"),y(bn.$$.fragment),Qc=l(),xa=r("p"),Kc=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer sequence pair mask has the following format:`),Vc=l(),y($n.$$.fragment),Jc=l(),So=r("p"),Gc=n("If "),ja=r("code"),Xc=n("token_ids_1"),Yc=n(" is "),Pa=r("code"),Zc=n("None"),ep=n(", this method only returns the first portion of the mask (0s)."),op=l(),La=r("div"),Ll=l(),Wo=r("h2"),wt=r("a"),Aa=r("span"),y(Rn.$$.fragment),tp=l(),Da=r("span"),np=n("RoFormerTokenizerFast"),Al=l(),Ce=r("div"),y(En.$$.fragment),sp=l(),Mn=r("p"),rp=n("Construct a \u201Cfast\u201D RoFormer tokenizer (backed by HuggingFace\u2019s "),Na=r("em"),ap=n("tokenizers"),ip=n(" library)."),lp=l(),bt=r("p"),zr=r("a"),dp=n("RoFormerTokenizerFast"),cp=n(" is almost identical to "),Cr=r("a"),pp=n("BertTokenizerFast"),mp=n(` and runs end-to-end tokenization: punctuation splitting and wordpiece. There are some difference between them when tokenizing Chinese.`),hp=l(),zn=r("p"),fp=n("This tokenizer inherits from "),qr=r("a"),up=n("PreTrainedTokenizerFast"),gp=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),_p=l(),Oa=r("p"),kp=n("Example:"),vp=l(),y(Cn.$$.fragment),Tp=l(),bo=r("div"),y(qn.$$.fragment),yp=l(),Ia=r("p"),Fp=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format:`),wp=l(),xn=r("ul"),xr=r("li"),bp=n("single sequence: "),Sa=r("code"),$p=n("[CLS] X [SEP]"),Rp=l(),jr=r("li"),Ep=n("pair of sequences: "),Wa=r("code"),Mp=n("[CLS] A [SEP] B [SEP]"),Dl=l(),Uo=r("h2"),$t=r("a"),Ua=r("span"),y(jn.$$.fragment),zp=l(),Ba=r("span"),Cp=n("RoFormerModel"),Nl=l(),Ue=r("div"),y(Pn.$$.fragment),qp=l(),Ln=r("p"),xp=n(`The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),An=r("a"),jp=n("torch.nn.Module"),Pp=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lp=l(),Dn=r("p"),Ap=n(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Nn=r("a"),Dp=n(`Attention is all you need`),Np=n(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Op=l(),Le=r("p"),Ip=n("To behave as an decoder the model needs to be initialized with the "),Ha=r("code"),Sp=n("is_decoder"),Wp=n(` argument of the configuration set to `),Qa=r("code"),Up=n("True"),Bp=n(". To be used in a Seq2Seq model, the model needs to initialized with both "),Ka=r("code"),Hp=n("is_decoder"),Qp=n(` argument and `),Va=r("code"),Kp=n("add_cross_attention"),Vp=n(" set to "),Ja=r("code"),Jp=n("True"),Gp=n("; an "),Ga=r("code"),Xp=n("encoder_hidden_states"),Yp=n(` is then expected as an input to the forward pass.`),Zp=l(),He=r("div"),y(On.$$.fragment),em=l(),Bo=r("p"),om=n("The "),Pr=r("a"),tm=n("RoFormerModel"),nm=n(" forward method, overrides the "),Xa=r("code"),sm=n("__call__"),rm=n(" special method."),am=l(),y(Rt.$$.fragment),im=l(),Ya=r("p"),lm=n("Example:"),dm=l(),y(In.$$.fragment),Ol=l(),Ho=r("h2"),Et=r("a"),Za=r("span"),y(Sn.$$.fragment),cm=l(),ei=r("span"),pm=n("RoFormerForCausalLM"),Il=l(),yo=r("div"),y(Wn.$$.fragment),mm=l(),Qo=r("p"),hm=n("RoFormer Model with a "),oi=r("code"),fm=n("language modeling"),um=n(` head on top for CLM fine-tuning. This model is a PyTorch `),Un=r("a"),gm=n("torch.nn.Module"),_m=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),km=l(),Qe=r("div"),y(Bn.$$.fragment),vm=l(),Ko=r("p"),Tm=n("The "),Lr=r("a"),ym=n("RoFormerForCausalLM"),Fm=n(" forward method, overrides the "),ti=r("code"),wm=n("__call__"),bm=n(" special method."),$m=l(),y(Mt.$$.fragment),Rm=l(),ni=r("p"),Em=n("Example:"),Mm=l(),y(Hn.$$.fragment),Sl=l(),Vo=r("h2"),zt=r("a"),si=r("span"),y(Qn.$$.fragment),zm=l(),ri=r("span"),Cm=n("RoFormerForMaskedLM"),Wl=l(),Fo=r("div"),y(Kn.$$.fragment),qm=l(),Jo=r("p"),xm=n("RoFormer Model with a "),ai=r("code"),jm=n("language modeling"),Pm=n(` head on top. This model is a PyTorch `),Vn=r("a"),Lm=n("torch.nn.Module"),Am=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dm=l(),Ke=r("div"),y(Jn.$$.fragment),Nm=l(),Go=r("p"),Om=n("The "),Ar=r("a"),Im=n("RoFormerForMaskedLM"),Sm=n(" forward method, overrides the "),ii=r("code"),Wm=n("__call__"),Um=n(" special method."),Bm=l(),y(Ct.$$.fragment),Hm=l(),li=r("p"),Qm=n("Example:"),Km=l(),y(Gn.$$.fragment),Ul=l(),Xo=r("h2"),qt=r("a"),di=r("span"),y(Xn.$$.fragment),Vm=l(),ci=r("span"),Jm=n("RoFormerForSequenceClassification"),Bl=l(),ao=r("div"),y(Yn.$$.fragment),Gm=l(),pi=r("p"),Xm=n(`RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ym=l(),Zn=r("p"),Zm=n("This model is a PyTorch "),es=r("a"),eh=n("torch.nn.Module"),oh=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),th=l(),je=r("div"),y(os.$$.fragment),nh=l(),Yo=r("p"),sh=n("The "),Dr=r("a"),rh=n("RoFormerForSequenceClassification"),ah=n(" forward method, overrides the "),mi=r("code"),ih=n("__call__"),lh=n(" special method."),dh=l(),y(xt.$$.fragment),ch=l(),hi=r("p"),ph=n("Example of single-label classification:"),mh=l(),y(ts.$$.fragment),hh=l(),fi=r("p"),fh=n("Example of multi-label classification:"),uh=l(),y(ns.$$.fragment),Hl=l(),Zo=r("h2"),jt=r("a"),ui=r("span"),y(ss.$$.fragment),gh=l(),gi=r("span"),_h=n("RoFormerForMultipleChoice"),Ql=l(),io=r("div"),y(rs.$$.fragment),kh=l(),_i=r("p"),vh=n(`RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Th=l(),as=r("p"),yh=n("This model is a PyTorch "),is=r("a"),Fh=n("torch.nn.Module"),wh=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bh=l(),Ve=r("div"),y(ls.$$.fragment),$h=l(),et=r("p"),Rh=n("The "),Nr=r("a"),Eh=n("RoFormerForMultipleChoice"),Mh=n(" forward method, overrides the "),ki=r("code"),zh=n("__call__"),Ch=n(" special method."),qh=l(),y(Pt.$$.fragment),xh=l(),vi=r("p"),jh=n("Example:"),Ph=l(),y(ds.$$.fragment),Kl=l(),ot=r("h2"),Lt=r("a"),Ti=r("span"),y(cs.$$.fragment),Lh=l(),yi=r("span"),Ah=n("RoFormerForTokenClassification"),Vl=l(),lo=r("div"),y(ps.$$.fragment),Dh=l(),Fi=r("p"),Nh=n(`RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Oh=l(),ms=r("p"),Ih=n("This model is a PyTorch "),hs=r("a"),Sh=n("torch.nn.Module"),Wh=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Uh=l(),Je=r("div"),y(fs.$$.fragment),Bh=l(),tt=r("p"),Hh=n("The "),Or=r("a"),Qh=n("RoFormerForTokenClassification"),Kh=n(" forward method, overrides the "),wi=r("code"),Vh=n("__call__"),Jh=n(" special method."),Gh=l(),y(At.$$.fragment),Xh=l(),bi=r("p"),Yh=n("Example:"),Zh=l(),y(us.$$.fragment),Jl=l(),nt=r("h2"),Dt=r("a"),$i=r("span"),y(gs.$$.fragment),ef=l(),Ri=r("span"),of=n("RoFormerForQuestionAnswering"),Gl=l(),co=r("div"),y(_s.$$.fragment),tf=l(),st=r("p"),nf=n(`RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ei=r("code"),sf=n("span start logits"),rf=n(" and "),Mi=r("code"),af=n("span end logits"),lf=n(")."),df=l(),ks=r("p"),cf=n("This model is a PyTorch "),vs=r("a"),pf=n("torch.nn.Module"),mf=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hf=l(),Ge=r("div"),y(Ts.$$.fragment),ff=l(),rt=r("p"),uf=n("The "),Ir=r("a"),gf=n("RoFormerForQuestionAnswering"),_f=n(" forward method, overrides the "),zi=r("code"),kf=n("__call__"),vf=n(" special method."),Tf=l(),y(Nt.$$.fragment),yf=l(),Ci=r("p"),Ff=n("Example:"),wf=l(),y(ys.$$.fragment),Xl=l(),at=r("h2"),Ot=r("a"),qi=r("span"),y(Fs.$$.fragment),bf=l(),xi=r("span"),$f=n("TFRoFormerModel"),Yl=l(),Ae=r("div"),y(ws.$$.fragment),Rf=l(),ji=r("p"),Ef=n("The bare RoFormer Model transformer outputing raw hidden-states without any specific head on top."),Mf=l(),bs=r("p"),zf=n("This model inherits from "),Sr=r("a"),Cf=n("TFPreTrainedModel"),qf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xf=l(),$s=r("p"),jf=n("This model is also a "),Rs=r("a"),Pf=n("tf.keras.Model"),Lf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Af=l(),y(It.$$.fragment),Df=l(),Xe=r("div"),y(Es.$$.fragment),Nf=l(),it=r("p"),Of=n("The "),Wr=r("a"),If=n("TFRoFormerModel"),Sf=n(" forward method, overrides the "),Pi=r("code"),Wf=n("__call__"),Uf=n(" special method."),Bf=l(),y(St.$$.fragment),Hf=l(),Li=r("p"),Qf=n("Example:"),Kf=l(),y(Ms.$$.fragment),Zl=l(),lt=r("h2"),Wt=r("a"),Ai=r("span"),y(zs.$$.fragment),Vf=l(),Di=r("span"),Jf=n("TFRoFormerForMaskedLM"),ed=l(),De=r("div"),y(Cs.$$.fragment),Gf=l(),qs=r("p"),Xf=n("RoFormer Model with a "),Ni=r("code"),Yf=n("language modeling"),Zf=n(" head on top."),eu=l(),xs=r("p"),ou=n("This model inherits from "),Ur=r("a"),tu=n("TFPreTrainedModel"),nu=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),su=l(),js=r("p"),ru=n("This model is also a "),Ps=r("a"),au=n("tf.keras.Model"),iu=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),lu=l(),y(Ut.$$.fragment),du=l(),Ye=r("div"),y(Ls.$$.fragment),cu=l(),dt=r("p"),pu=n("The "),Br=r("a"),mu=n("TFRoFormerForMaskedLM"),hu=n(" forward method, overrides the "),Oi=r("code"),fu=n("__call__"),uu=n(" special method."),gu=l(),y(Bt.$$.fragment),_u=l(),Ii=r("p"),ku=n("Example:"),vu=l(),y(As.$$.fragment),od=l(),ct=r("h2"),Ht=r("a"),Si=r("span"),y(Ds.$$.fragment),Tu=l(),Wi=r("span"),yu=n("TFRoFormerForCausalLM"),td=l(),Ne=r("div"),y(Ns.$$.fragment),Fu=l(),Os=r("p"),wu=n("RoFormer Model with a "),Ui=r("code"),bu=n("language modeling"),$u=n(" head on top for CLM fine-tuning."),Ru=l(),Is=r("p"),Eu=n("This model inherits from "),Hr=r("a"),Mu=n("TFPreTrainedModel"),zu=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cu=l(),Ss=r("p"),qu=n("This model is also a "),Ws=r("a"),xu=n("tf.keras.Model"),ju=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Pu=l(),y(Qt.$$.fragment),Lu=l(),mo=r("div"),y(Us.$$.fragment),Au=l(),Be=r("p"),Du=n("labels ("),Bi=r("code"),Nu=n("tf.Tensor"),Ou=n(" or "),Hi=r("code"),Iu=n("np.ndarray"),Su=n(" of shape "),Qi=r("code"),Wu=n("(batch_size, sequence_length)"),Uu=n(", "),Ki=r("em"),Bu=n("optional"),Hu=n(`): Labels for computing the cross entropy classification loss. Indices should be in `),Vi=r("code"),Qu=n("[0, ..., config.vocab_size - 1]"),Ku=n("."),Vu=l(),Ji=r("p"),Ju=n("Example:"),Gu=l(),y(Bs.$$.fragment),nd=l(),pt=r("h2"),Kt=r("a"),Gi=r("span"),y(Hs.$$.fragment),Xu=l(),Xi=r("span"),Yu=n("TFRoFormerForSequenceClassification"),sd=l(),Oe=r("div"),y(Qs.$$.fragment),Zu=l(),Yi=r("p"),eg=n("RoFormer Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks."),og=l(),Ks=r("p"),tg=n("This model inherits from "),Qr=r("a"),ng=n("TFPreTrainedModel"),sg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rg=l(),Vs=r("p"),ag=n("This model is also a "),Js=r("a"),ig=n("tf.keras.Model"),lg=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),dg=l(),y(Vt.$$.fragment),cg=l(),Ze=r("div"),y(Gs.$$.fragment),pg=l(),mt=r("p"),mg=n("The "),Kr=r("a"),hg=n("TFRoFormerForSequenceClassification"),fg=n(" forward method, overrides the "),Zi=r("code"),ug=n("__call__"),gg=n(" special method."),_g=l(),y(Jt.$$.fragment),kg=l(),el=r("p"),vg=n("Example:"),Tg=l(),y(Xs.$$.fragment),rd=l(),ht=r("h2"),Gt=r("a"),ol=r("span"),y(Ys.$$.fragment),yg=l(),tl=r("span"),Fg=n("TFRoFormerForMultipleChoice"),ad=l(),Ie=r("div"),y(Zs.$$.fragment),wg=l(),nl=r("p"),bg=n(`RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),$g=l(),er=r("p"),Rg=n("This model inherits from "),Vr=r("a"),Eg=n("TFPreTrainedModel"),Mg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zg=l(),or=r("p"),Cg=n("This model is also a "),tr=r("a"),qg=n("tf.keras.Model"),xg=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jg=l(),y(Xt.$$.fragment),Pg=l(),eo=r("div"),y(nr.$$.fragment),Lg=l(),ft=r("p"),Ag=n("The "),Jr=r("a"),Dg=n("TFRoFormerForMultipleChoice"),Ng=n(" forward method, overrides the "),sl=r("code"),Og=n("__call__"),Ig=n(" special method."),Sg=l(),y(Yt.$$.fragment),Wg=l(),rl=r("p"),Ug=n("Example:"),Bg=l(),y(sr.$$.fragment),id=l(),ut=r("h2"),Zt=r("a"),al=r("span"),y(rr.$$.fragment),Hg=l(),il=r("span"),Qg=n("TFRoFormerForTokenClassification"),ld=l(),Se=r("div"),y(ar.$$.fragment),Kg=l(),ll=r("p"),Vg=n(`RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Jg=l(),ir=r("p"),Gg=n("This model inherits from "),Gr=r("a"),Xg=n("TFPreTrainedModel"),Yg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zg=l(),lr=r("p"),e_=n("This model is also a "),dr=r("a"),o_=n("tf.keras.Model"),t_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),n_=l(),y(en.$$.fragment),s_=l(),oo=r("div"),y(cr.$$.fragment),r_=l(),gt=r("p"),a_=n("The "),Xr=r("a"),i_=n("TFRoFormerForTokenClassification"),l_=n(" forward method, overrides the "),dl=r("code"),d_=n("__call__"),c_=n(" special method."),p_=l(),y(on.$$.fragment),m_=l(),cl=r("p"),h_=n("Example:"),f_=l(),y(pr.$$.fragment),dd=l(),_t=r("h2"),tn=r("a"),pl=r("span"),y(mr.$$.fragment),u_=l(),ml=r("span"),g_=n("TFRoFormerForQuestionAnswering"),cd=l(),We=r("div"),y(hr.$$.fragment),__=l(),kt=r("p"),k_=n(`RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),hl=r("code"),v_=n("span start logits"),T_=n(" and "),fl=r("code"),y_=n("span end logits"),F_=n(")."),w_=l(),fr=r("p"),b_=n("This model inherits from "),Yr=r("a"),$_=n("TFPreTrainedModel"),R_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),E_=l(),ur=r("p"),M_=n("This model is also a "),gr=r("a"),z_=n("tf.keras.Model"),C_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),q_=l(),y(nn.$$.fragment),x_=l(),to=r("div"),y(_r.$$.fragment),j_=l(),vt=r("p"),P_=n("The "),Zr=r("a"),L_=n("TFRoFormerForQuestionAnswering"),A_=n(" forward method, overrides the "),ul=r("code"),D_=n("__call__"),N_=n(" special method."),O_=l(),y(sn.$$.fragment),I_=l(),gl=r("p"),S_=n("Example:"),W_=l(),y(kr.$$.fragment),this.h()},l(t){const f=YT('[data-svelte="svelte-1phssyn"]',document.head);m=a(f,"META",{name:!0,content:!0}),f.forEach(o),E=d(t),g=a(t,"H1",{class:!0});var vr=i(g);u=a(vr,"A",{id:!0,class:!0,href:!0});var _l=i(u);v=a(_l,"SPAN",{});var kl=i(v);F(k.$$.fragment,kl),kl.forEach(o),_l.forEach(o),_=d(vr),M=a(vr,"SPAN",{});var vl=i(M);me=s(vl,"RoFormer"),vl.forEach(o),vr.forEach(o),V=d(t),z=a(t,"H2",{class:!0});var Tr=i(z);Y=a(Tr,"A",{id:!0,class:!0,href:!0});var Tl=i(Y);N=a(Tl,"SPAN",{});var yl=i(N);F(ee.$$.fragment,yl),yl.forEach(o),Tl.forEach(o),he=d(Tr),O=a(Tr,"SPAN",{});var Fl=i(O);fe=s(Fl,"Overview"),Fl.forEach(o),Tr.forEach(o),le=d(t),K=a(t,"P",{});var yr=i(K);L=s(yr,"The RoFormer model was proposed in "),oe=a(yr,"A",{href:!0,rel:!0});var wl=i(oe);J=s(wl,"RoFormer: Enhanced Transformer with Rotary Position Embedding"),wl.forEach(o),C=s(yr," by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu."),yr.forEach(o),x=d(t),se=a(t,"P",{});var bl=i(se);W=s(bl,"The abstract from the paper is the following:"),bl.forEach(o),de=d(t),re=a(t,"P",{});var $l=i(re);I=a($l,"EM",{});var Rl=i(I);ue=s(Rl,`Position encoding in transformer architecture provides supervision for dependency modeling between elements at different positions in the sequence. We investigate various methods to encode positional information in transformer-based language models and propose a novel implementation named Rotary Position Embedding(RoPE). The proposed RoPE encodes absolute positional information with rotation matrix and naturally incorporates explicit relative position dependency in self-attention formulation. Notably, RoPE comes with valuable properties such as flexibility of being expand to any sequence lengths, decaying inter-token dependency with increasing relative distances, and capability of equipping the linear self-attention with relative position encoding. As a result, the enhanced transformer with rotary position embedding, or RoFormer, achieves superior performance in tasks with long texts. We release the theoretical analysis along with some preliminary experiment results on Chinese data. The undergoing experiment for English benchmark will soon be updated.`),Rl.forEach(o),$l.forEach(o),ce=d(t),q=a(t,"P",{});var El=i(q);ge=s(El,"Tips:"),El.forEach(o),S=d(t),ae=a(t,"UL",{});var Ml=i(ae);ie=a(Ml,"LI",{});var zl=i(ie);U=s(zl,`RoFormer is a BERT-like autoencoding model with rotary position embeddings. Rotary position embeddings have shown improved performance on classification tasks with long texts.`),zl.forEach(o),Ml.forEach(o),pe=d(t),G=a(t,"P",{});var Tt=i(G);A=s(Tt,"This model was contributed by "),te=a(Tt,"A",{href:!0,rel:!0});var Cl=i(te);B=s(Cl,"junnyu"),Cl.forEach(o),_e=s(Tt,". The original code can be found "),c=a(Tt,"A",{href:!0,rel:!0});var ql=i(c);T=s(ql,"here"),ql.forEach(o),X=s(Tt,"."),Tt.forEach(o),be=d(t),ve=a(t,"H2",{class:!0});var Fr=i(ve);j=a(Fr,"A",{id:!0,class:!0,href:!0});var V_=i(j);we=a(V_,"SPAN",{});var J_=i(we);F(ye.$$.fragment,J_),J_.forEach(o),V_.forEach(o),Re=d(Fr),P=a(Fr,"SPAN",{});var G_=i(P);H=s(G_,"RoFormerConfig"),G_.forEach(o),Fr.forEach(o),$e=d(t),Z=a(t,"DIV",{class:!0});var $o=i(Z);F(D.$$.fragment,$o),Ee=d($o),Te=a($o,"P",{});var ea=i(Te);ke=s(ea,"This is the configuration class to store the configuration of a "),Fe=a(ea,"A",{href:!0});var X_=i(Fe);sc=s(X_,"RoFormerModel"),X_.forEach(o),rc=s(ea,`. It is used to instantiate an RoFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoFormer `),mn=a(ea,"A",{href:!0,rel:!0});var Y_=i(mn);ac=s(Y_,"junnyu/roformer_chinese_base"),Y_.forEach(o),ic=s(ea," architecture."),ea.forEach(o),lc=d($o),Oo=a($o,"P",{});var oa=i(Oo);dc=s(oa,"Configuration objects inherit from "),wr=a(oa,"A",{href:!0});var Z_=i(wr);cc=s(Z_,"PretrainedConfig"),Z_.forEach(o),pc=s(oa,` and can be used to control the model outputs. Read the documentation from `),br=a(oa,"A",{href:!0});var ek=i(br);mc=s(ek,"PretrainedConfig"),ek.forEach(o),hc=s(oa," for more information."),oa.forEach(o),fc=d($o),ba=a($o,"P",{});var ok=i(ba);uc=s(ok,"Example:"),ok.forEach(o),gc=d($o),F(hn.$$.fragment,$o),$o.forEach(o),jl=d(t),Io=a(t,"H2",{class:!0});var md=i(Io);yt=a(md,"A",{id:!0,class:!0,href:!0});var tk=i(yt);$a=a(tk,"SPAN",{});var nk=i($a);F(fn.$$.fragment,nk),nk.forEach(o),tk.forEach(o),_c=d(md),Ra=a(md,"SPAN",{});var sk=i(Ra);kc=s(sk,"RoFormerTokenizer"),sk.forEach(o),md.forEach(o),Pl=d(t),Me=a(t,"DIV",{class:!0});var Pe=i(Me);F(un.$$.fragment,Pe),vc=d(Pe),gn=a(Pe,"P",{});var hd=i(gn);Tc=s(hd,"Construct a RoFormer tokenizer. Based on "),$r=a(hd,"EM",{});var U_=i($r);yc=s(U_,"Rust Jieba <"),_n=a(U_,"A",{href:!0,rel:!0});var rk=i(_n);Fc=s(rk,"https://pypi.org/project/rjieba/>"),rk.forEach(o),U_.forEach(o),wc=s(hd,"."),hd.forEach(o),bc=d(Pe),kn=a(Pe,"P",{});var fd=i(kn);$c=s(fd,"This tokenizer inherits from "),Rr=a(fd,"A",{href:!0});var ak=i(Rr);Rc=s(ak,"PreTrainedTokenizer"),ak.forEach(o),Ec=s(fd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),fd.forEach(o),Mc=d(Pe),Ea=a(Pe,"P",{});var ik=i(Ea);zc=s(ik,"Example:"),ik.forEach(o),Cc=d(Pe),F(vn.$$.fragment,Pe),qc=d(Pe),wo=a(Pe,"DIV",{class:!0});var ta=i(wo);F(Tn.$$.fragment,ta),xc=d(ta),Ma=a(ta,"P",{});var lk=i(Ma);jc=s(lk,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format:`),lk.forEach(o),Pc=d(ta),yn=a(ta,"UL",{});var ud=i(yn);Er=a(ud,"LI",{});var B_=i(Er);Lc=s(B_,"single sequence: "),za=a(B_,"CODE",{});var dk=i(za);Ac=s(dk,"[CLS] X [SEP]"),dk.forEach(o),B_.forEach(o),Dc=d(ud),Mr=a(ud,"LI",{});var H_=i(Mr);Nc=s(H_,"pair of sequences: "),Ca=a(H_,"CODE",{});var ck=i(Ca);Oc=s(ck,"[CLS] A [SEP] B [SEP]"),ck.forEach(o),H_.forEach(o),ud.forEach(o),ta.forEach(o),Ic=d(Pe),Ft=a(Pe,"DIV",{class:!0});var gd=i(Ft);F(Fn.$$.fragment,gd),Sc=d(gd),wn=a(gd,"P",{});var _d=i(wn);Wc=s(_d,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),qa=a(_d,"CODE",{});var pk=i(qa);Uc=s(pk,"prepare_for_model"),pk.forEach(o),Bc=s(_d," method."),_d.forEach(o),gd.forEach(o),Hc=d(Pe),po=a(Pe,"DIV",{class:!0});var rn=i(po);F(bn.$$.fragment,rn),Qc=d(rn),xa=a(rn,"P",{});var mk=i(xa);Kc=s(mk,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer sequence pair mask has the following format:`),mk.forEach(o),Vc=d(rn),F($n.$$.fragment,rn),Jc=d(rn),So=a(rn,"P",{});var na=i(So);Gc=s(na,"If "),ja=a(na,"CODE",{});var hk=i(ja);Xc=s(hk,"token_ids_1"),hk.forEach(o),Yc=s(na," is "),Pa=a(na,"CODE",{});var fk=i(Pa);Zc=s(fk,"None"),fk.forEach(o),ep=s(na,", this method only returns the first portion of the mask (0s)."),na.forEach(o),rn.forEach(o),op=d(Pe),La=a(Pe,"DIV",{class:!0}),i(La).forEach(o),Pe.forEach(o),Ll=d(t),Wo=a(t,"H2",{class:!0});var kd=i(Wo);wt=a(kd,"A",{id:!0,class:!0,href:!0});var uk=i(wt);Aa=a(uk,"SPAN",{});var gk=i(Aa);F(Rn.$$.fragment,gk),gk.forEach(o),uk.forEach(o),tp=d(kd),Da=a(kd,"SPAN",{});var _k=i(Da);np=s(_k,"RoFormerTokenizerFast"),_k.forEach(o),kd.forEach(o),Al=d(t),Ce=a(t,"DIV",{class:!0});var no=i(Ce);F(En.$$.fragment,no),sp=d(no),Mn=a(no,"P",{});var vd=i(Mn);rp=s(vd,"Construct a \u201Cfast\u201D RoFormer tokenizer (backed by HuggingFace\u2019s "),Na=a(vd,"EM",{});var kk=i(Na);ap=s(kk,"tokenizers"),kk.forEach(o),ip=s(vd," library)."),vd.forEach(o),lp=d(no),bt=a(no,"P",{});var xl=i(bt);zr=a(xl,"A",{href:!0});var vk=i(zr);dp=s(vk,"RoFormerTokenizerFast"),vk.forEach(o),cp=s(xl," is almost identical to "),Cr=a(xl,"A",{href:!0});var Tk=i(Cr);pp=s(Tk,"BertTokenizerFast"),Tk.forEach(o),mp=s(xl,` and runs end-to-end tokenization: punctuation splitting and wordpiece. There are some difference between them when tokenizing Chinese.`),xl.forEach(o),hp=d(no),zn=a(no,"P",{});var Td=i(zn);fp=s(Td,"This tokenizer inherits from "),qr=a(Td,"A",{href:!0});var yk=i(qr);up=s(yk,"PreTrainedTokenizerFast"),yk.forEach(o),gp=s(Td,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Td.forEach(o),_p=d(no),Oa=a(no,"P",{});var Fk=i(Oa);kp=s(Fk,"Example:"),Fk.forEach(o),vp=d(no),F(Cn.$$.fragment,no),Tp=d(no),bo=a(no,"DIV",{class:!0});var sa=i(bo);F(qn.$$.fragment,sa),yp=d(sa),Ia=a(sa,"P",{});var wk=i(Ia);Fp=s(wk,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format:`),wk.forEach(o),wp=d(sa),xn=a(sa,"UL",{});var yd=i(xn);xr=a(yd,"LI",{});var Q_=i(xr);bp=s(Q_,"single sequence: "),Sa=a(Q_,"CODE",{});var bk=i(Sa);$p=s(bk,"[CLS] X [SEP]"),bk.forEach(o),Q_.forEach(o),Rp=d(yd),jr=a(yd,"LI",{});var K_=i(jr);Ep=s(K_,"pair of sequences: "),Wa=a(K_,"CODE",{});var $k=i(Wa);Mp=s($k,"[CLS] A [SEP] B [SEP]"),$k.forEach(o),K_.forEach(o),yd.forEach(o),sa.forEach(o),no.forEach(o),Dl=d(t),Uo=a(t,"H2",{class:!0});var Fd=i(Uo);$t=a(Fd,"A",{id:!0,class:!0,href:!0});var Rk=i($t);Ua=a(Rk,"SPAN",{});var Ek=i(Ua);F(jn.$$.fragment,Ek),Ek.forEach(o),Rk.forEach(o),zp=d(Fd),Ba=a(Fd,"SPAN",{});var Mk=i(Ba);Cp=s(Mk,"RoFormerModel"),Mk.forEach(o),Fd.forEach(o),Nl=d(t),Ue=a(t,"DIV",{class:!0});var Ro=i(Ue);F(Pn.$$.fragment,Ro),qp=d(Ro),Ln=a(Ro,"P",{});var wd=i(Ln);xp=s(wd,`The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),An=a(wd,"A",{href:!0,rel:!0});var zk=i(An);jp=s(zk,"torch.nn.Module"),zk.forEach(o),Pp=s(wd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wd.forEach(o),Lp=d(Ro),Dn=a(Ro,"P",{});var bd=i(Dn);Ap=s(bd,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Nn=a(bd,"A",{href:!0,rel:!0});var Ck=i(Nn);Dp=s(Ck,`Attention is all you need`),Ck.forEach(o),Np=s(bd,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),bd.forEach(o),Op=d(Ro),Le=a(Ro,"P",{});var so=i(Le);Ip=s(so,"To behave as an decoder the model needs to be initialized with the "),Ha=a(so,"CODE",{});var qk=i(Ha);Sp=s(qk,"is_decoder"),qk.forEach(o),Wp=s(so,` argument of the configuration set to `),Qa=a(so,"CODE",{});var xk=i(Qa);Up=s(xk,"True"),xk.forEach(o),Bp=s(so,". To be used in a Seq2Seq model, the model needs to initialized with both "),Ka=a(so,"CODE",{});var jk=i(Ka);Hp=s(jk,"is_decoder"),jk.forEach(o),Qp=s(so,` argument and `),Va=a(so,"CODE",{});var Pk=i(Va);Kp=s(Pk,"add_cross_attention"),Pk.forEach(o),Vp=s(so," set to "),Ja=a(so,"CODE",{});var Lk=i(Ja);Jp=s(Lk,"True"),Lk.forEach(o),Gp=s(so,"; an "),Ga=a(so,"CODE",{});var Ak=i(Ga);Xp=s(Ak,"encoder_hidden_states"),Ak.forEach(o),Yp=s(so,` is then expected as an input to the forward pass.`),so.forEach(o),Zp=d(Ro),He=a(Ro,"DIV",{class:!0});var Eo=i(He);F(On.$$.fragment,Eo),em=d(Eo),Bo=a(Eo,"P",{});var ra=i(Bo);om=s(ra,"The "),Pr=a(ra,"A",{href:!0});var Dk=i(Pr);tm=s(Dk,"RoFormerModel"),Dk.forEach(o),nm=s(ra," forward method, overrides the "),Xa=a(ra,"CODE",{});var Nk=i(Xa);sm=s(Nk,"__call__"),Nk.forEach(o),rm=s(ra," special method."),ra.forEach(o),am=d(Eo),F(Rt.$$.fragment,Eo),im=d(Eo),Ya=a(Eo,"P",{});var Ok=i(Ya);lm=s(Ok,"Example:"),Ok.forEach(o),dm=d(Eo),F(In.$$.fragment,Eo),Eo.forEach(o),Ro.forEach(o),Ol=d(t),Ho=a(t,"H2",{class:!0});var $d=i(Ho);Et=a($d,"A",{id:!0,class:!0,href:!0});var Ik=i(Et);Za=a(Ik,"SPAN",{});var Sk=i(Za);F(Sn.$$.fragment,Sk),Sk.forEach(o),Ik.forEach(o),cm=d($d),ei=a($d,"SPAN",{});var Wk=i(ei);pm=s(Wk,"RoFormerForCausalLM"),Wk.forEach(o),$d.forEach(o),Il=d(t),yo=a(t,"DIV",{class:!0});var aa=i(yo);F(Wn.$$.fragment,aa),mm=d(aa),Qo=a(aa,"P",{});var ia=i(Qo);hm=s(ia,"RoFormer Model with a "),oi=a(ia,"CODE",{});var Uk=i(oi);fm=s(Uk,"language modeling"),Uk.forEach(o),um=s(ia,` head on top for CLM fine-tuning. This model is a PyTorch `),Un=a(ia,"A",{href:!0,rel:!0});var Bk=i(Un);gm=s(Bk,"torch.nn.Module"),Bk.forEach(o),_m=s(ia,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ia.forEach(o),km=d(aa),Qe=a(aa,"DIV",{class:!0});var Mo=i(Qe);F(Bn.$$.fragment,Mo),vm=d(Mo),Ko=a(Mo,"P",{});var la=i(Ko);Tm=s(la,"The "),Lr=a(la,"A",{href:!0});var Hk=i(Lr);ym=s(Hk,"RoFormerForCausalLM"),Hk.forEach(o),Fm=s(la," forward method, overrides the "),ti=a(la,"CODE",{});var Qk=i(ti);wm=s(Qk,"__call__"),Qk.forEach(o),bm=s(la," special method."),la.forEach(o),$m=d(Mo),F(Mt.$$.fragment,Mo),Rm=d(Mo),ni=a(Mo,"P",{});var Kk=i(ni);Em=s(Kk,"Example:"),Kk.forEach(o),Mm=d(Mo),F(Hn.$$.fragment,Mo),Mo.forEach(o),aa.forEach(o),Sl=d(t),Vo=a(t,"H2",{class:!0});var Rd=i(Vo);zt=a(Rd,"A",{id:!0,class:!0,href:!0});var Vk=i(zt);si=a(Vk,"SPAN",{});var Jk=i(si);F(Qn.$$.fragment,Jk),Jk.forEach(o),Vk.forEach(o),zm=d(Rd),ri=a(Rd,"SPAN",{});var Gk=i(ri);Cm=s(Gk,"RoFormerForMaskedLM"),Gk.forEach(o),Rd.forEach(o),Wl=d(t),Fo=a(t,"DIV",{class:!0});var da=i(Fo);F(Kn.$$.fragment,da),qm=d(da),Jo=a(da,"P",{});var ca=i(Jo);xm=s(ca,"RoFormer Model with a "),ai=a(ca,"CODE",{});var Xk=i(ai);jm=s(Xk,"language modeling"),Xk.forEach(o),Pm=s(ca,` head on top. This model is a PyTorch `),Vn=a(ca,"A",{href:!0,rel:!0});var Yk=i(Vn);Lm=s(Yk,"torch.nn.Module"),Yk.forEach(o),Am=s(ca,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ca.forEach(o),Dm=d(da),Ke=a(da,"DIV",{class:!0});var zo=i(Ke);F(Jn.$$.fragment,zo),Nm=d(zo),Go=a(zo,"P",{});var pa=i(Go);Om=s(pa,"The "),Ar=a(pa,"A",{href:!0});var Zk=i(Ar);Im=s(Zk,"RoFormerForMaskedLM"),Zk.forEach(o),Sm=s(pa," forward method, overrides the "),ii=a(pa,"CODE",{});var ev=i(ii);Wm=s(ev,"__call__"),ev.forEach(o),Um=s(pa," special method."),pa.forEach(o),Bm=d(zo),F(Ct.$$.fragment,zo),Hm=d(zo),li=a(zo,"P",{});var ov=i(li);Qm=s(ov,"Example:"),ov.forEach(o),Km=d(zo),F(Gn.$$.fragment,zo),zo.forEach(o),da.forEach(o),Ul=d(t),Xo=a(t,"H2",{class:!0});var Ed=i(Xo);qt=a(Ed,"A",{id:!0,class:!0,href:!0});var tv=i(qt);di=a(tv,"SPAN",{});var nv=i(di);F(Xn.$$.fragment,nv),nv.forEach(o),tv.forEach(o),Vm=d(Ed),ci=a(Ed,"SPAN",{});var sv=i(ci);Jm=s(sv,"RoFormerForSequenceClassification"),sv.forEach(o),Ed.forEach(o),Bl=d(t),ao=a(t,"DIV",{class:!0});var an=i(ao);F(Yn.$$.fragment,an),Gm=d(an),pi=a(an,"P",{});var rv=i(pi);Xm=s(rv,`RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),rv.forEach(o),Ym=d(an),Zn=a(an,"P",{});var Md=i(Zn);Zm=s(Md,"This model is a PyTorch "),es=a(Md,"A",{href:!0,rel:!0});var av=i(es);eh=s(av,"torch.nn.Module"),av.forEach(o),oh=s(Md,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Md.forEach(o),th=d(an),je=a(an,"DIV",{class:!0});var ro=i(je);F(os.$$.fragment,ro),nh=d(ro),Yo=a(ro,"P",{});var ma=i(Yo);sh=s(ma,"The "),Dr=a(ma,"A",{href:!0});var iv=i(Dr);rh=s(iv,"RoFormerForSequenceClassification"),iv.forEach(o),ah=s(ma," forward method, overrides the "),mi=a(ma,"CODE",{});var lv=i(mi);ih=s(lv,"__call__"),lv.forEach(o),lh=s(ma," special method."),ma.forEach(o),dh=d(ro),F(xt.$$.fragment,ro),ch=d(ro),hi=a(ro,"P",{});var dv=i(hi);ph=s(dv,"Example of single-label classification:"),dv.forEach(o),mh=d(ro),F(ts.$$.fragment,ro),hh=d(ro),fi=a(ro,"P",{});var cv=i(fi);fh=s(cv,"Example of multi-label classification:"),cv.forEach(o),uh=d(ro),F(ns.$$.fragment,ro),ro.forEach(o),an.forEach(o),Hl=d(t),Zo=a(t,"H2",{class:!0});var zd=i(Zo);jt=a(zd,"A",{id:!0,class:!0,href:!0});var pv=i(jt);ui=a(pv,"SPAN",{});var mv=i(ui);F(ss.$$.fragment,mv),mv.forEach(o),pv.forEach(o),gh=d(zd),gi=a(zd,"SPAN",{});var hv=i(gi);_h=s(hv,"RoFormerForMultipleChoice"),hv.forEach(o),zd.forEach(o),Ql=d(t),io=a(t,"DIV",{class:!0});var ln=i(io);F(rs.$$.fragment,ln),kh=d(ln),_i=a(ln,"P",{});var fv=i(_i);vh=s(fv,`RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),fv.forEach(o),Th=d(ln),as=a(ln,"P",{});var Cd=i(as);yh=s(Cd,"This model is a PyTorch "),is=a(Cd,"A",{href:!0,rel:!0});var uv=i(is);Fh=s(uv,"torch.nn.Module"),uv.forEach(o),wh=s(Cd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cd.forEach(o),bh=d(ln),Ve=a(ln,"DIV",{class:!0});var Co=i(Ve);F(ls.$$.fragment,Co),$h=d(Co),et=a(Co,"P",{});var ha=i(et);Rh=s(ha,"The "),Nr=a(ha,"A",{href:!0});var gv=i(Nr);Eh=s(gv,"RoFormerForMultipleChoice"),gv.forEach(o),Mh=s(ha," forward method, overrides the "),ki=a(ha,"CODE",{});var _v=i(ki);zh=s(_v,"__call__"),_v.forEach(o),Ch=s(ha," special method."),ha.forEach(o),qh=d(Co),F(Pt.$$.fragment,Co),xh=d(Co),vi=a(Co,"P",{});var kv=i(vi);jh=s(kv,"Example:"),kv.forEach(o),Ph=d(Co),F(ds.$$.fragment,Co),Co.forEach(o),ln.forEach(o),Kl=d(t),ot=a(t,"H2",{class:!0});var qd=i(ot);Lt=a(qd,"A",{id:!0,class:!0,href:!0});var vv=i(Lt);Ti=a(vv,"SPAN",{});var Tv=i(Ti);F(cs.$$.fragment,Tv),Tv.forEach(o),vv.forEach(o),Lh=d(qd),yi=a(qd,"SPAN",{});var yv=i(yi);Ah=s(yv,"RoFormerForTokenClassification"),yv.forEach(o),qd.forEach(o),Vl=d(t),lo=a(t,"DIV",{class:!0});var dn=i(lo);F(ps.$$.fragment,dn),Dh=d(dn),Fi=a(dn,"P",{});var Fv=i(Fi);Nh=s(Fv,`RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Fv.forEach(o),Oh=d(dn),ms=a(dn,"P",{});var xd=i(ms);Ih=s(xd,"This model is a PyTorch "),hs=a(xd,"A",{href:!0,rel:!0});var wv=i(hs);Sh=s(wv,"torch.nn.Module"),wv.forEach(o),Wh=s(xd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xd.forEach(o),Uh=d(dn),Je=a(dn,"DIV",{class:!0});var qo=i(Je);F(fs.$$.fragment,qo),Bh=d(qo),tt=a(qo,"P",{});var fa=i(tt);Hh=s(fa,"The "),Or=a(fa,"A",{href:!0});var bv=i(Or);Qh=s(bv,"RoFormerForTokenClassification"),bv.forEach(o),Kh=s(fa," forward method, overrides the "),wi=a(fa,"CODE",{});var $v=i(wi);Vh=s($v,"__call__"),$v.forEach(o),Jh=s(fa," special method."),fa.forEach(o),Gh=d(qo),F(At.$$.fragment,qo),Xh=d(qo),bi=a(qo,"P",{});var Rv=i(bi);Yh=s(Rv,"Example:"),Rv.forEach(o),Zh=d(qo),F(us.$$.fragment,qo),qo.forEach(o),dn.forEach(o),Jl=d(t),nt=a(t,"H2",{class:!0});var jd=i(nt);Dt=a(jd,"A",{id:!0,class:!0,href:!0});var Ev=i(Dt);$i=a(Ev,"SPAN",{});var Mv=i($i);F(gs.$$.fragment,Mv),Mv.forEach(o),Ev.forEach(o),ef=d(jd),Ri=a(jd,"SPAN",{});var zv=i(Ri);of=s(zv,"RoFormerForQuestionAnswering"),zv.forEach(o),jd.forEach(o),Gl=d(t),co=a(t,"DIV",{class:!0});var cn=i(co);F(_s.$$.fragment,cn),tf=d(cn),st=a(cn,"P",{});var ua=i(st);nf=s(ua,`RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ei=a(ua,"CODE",{});var Cv=i(Ei);sf=s(Cv,"span start logits"),Cv.forEach(o),rf=s(ua," and "),Mi=a(ua,"CODE",{});var qv=i(Mi);af=s(qv,"span end logits"),qv.forEach(o),lf=s(ua,")."),ua.forEach(o),df=d(cn),ks=a(cn,"P",{});var Pd=i(ks);cf=s(Pd,"This model is a PyTorch "),vs=a(Pd,"A",{href:!0,rel:!0});var xv=i(vs);pf=s(xv,"torch.nn.Module"),xv.forEach(o),mf=s(Pd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pd.forEach(o),hf=d(cn),Ge=a(cn,"DIV",{class:!0});var xo=i(Ge);F(Ts.$$.fragment,xo),ff=d(xo),rt=a(xo,"P",{});var ga=i(rt);uf=s(ga,"The "),Ir=a(ga,"A",{href:!0});var jv=i(Ir);gf=s(jv,"RoFormerForQuestionAnswering"),jv.forEach(o),_f=s(ga," forward method, overrides the "),zi=a(ga,"CODE",{});var Pv=i(zi);kf=s(Pv,"__call__"),Pv.forEach(o),vf=s(ga," special method."),ga.forEach(o),Tf=d(xo),F(Nt.$$.fragment,xo),yf=d(xo),Ci=a(xo,"P",{});var Lv=i(Ci);Ff=s(Lv,"Example:"),Lv.forEach(o),wf=d(xo),F(ys.$$.fragment,xo),xo.forEach(o),cn.forEach(o),Xl=d(t),at=a(t,"H2",{class:!0});var Ld=i(at);Ot=a(Ld,"A",{id:!0,class:!0,href:!0});var Av=i(Ot);qi=a(Av,"SPAN",{});var Dv=i(qi);F(Fs.$$.fragment,Dv),Dv.forEach(o),Av.forEach(o),bf=d(Ld),xi=a(Ld,"SPAN",{});var Nv=i(xi);$f=s(Nv,"TFRoFormerModel"),Nv.forEach(o),Ld.forEach(o),Yl=d(t),Ae=a(t,"DIV",{class:!0});var ho=i(Ae);F(ws.$$.fragment,ho),Rf=d(ho),ji=a(ho,"P",{});var Ov=i(ji);Ef=s(Ov,"The bare RoFormer Model transformer outputing raw hidden-states without any specific head on top."),Ov.forEach(o),Mf=d(ho),bs=a(ho,"P",{});var Ad=i(bs);zf=s(Ad,"This model inherits from "),Sr=a(Ad,"A",{href:!0});var Iv=i(Sr);Cf=s(Iv,"TFPreTrainedModel"),Iv.forEach(o),qf=s(Ad,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ad.forEach(o),xf=d(ho),$s=a(ho,"P",{});var Dd=i($s);jf=s(Dd,"This model is also a "),Rs=a(Dd,"A",{href:!0,rel:!0});var Sv=i(Rs);Pf=s(Sv,"tf.keras.Model"),Sv.forEach(o),Lf=s(Dd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Dd.forEach(o),Af=d(ho),F(It.$$.fragment,ho),Df=d(ho),Xe=a(ho,"DIV",{class:!0});var jo=i(Xe);F(Es.$$.fragment,jo),Nf=d(jo),it=a(jo,"P",{});var _a=i(it);Of=s(_a,"The "),Wr=a(_a,"A",{href:!0});var Wv=i(Wr);If=s(Wv,"TFRoFormerModel"),Wv.forEach(o),Sf=s(_a," forward method, overrides the "),Pi=a(_a,"CODE",{});var Uv=i(Pi);Wf=s(Uv,"__call__"),Uv.forEach(o),Uf=s(_a," special method."),_a.forEach(o),Bf=d(jo),F(St.$$.fragment,jo),Hf=d(jo),Li=a(jo,"P",{});var Bv=i(Li);Qf=s(Bv,"Example:"),Bv.forEach(o),Kf=d(jo),F(Ms.$$.fragment,jo),jo.forEach(o),ho.forEach(o),Zl=d(t),lt=a(t,"H2",{class:!0});var Nd=i(lt);Wt=a(Nd,"A",{id:!0,class:!0,href:!0});var Hv=i(Wt);Ai=a(Hv,"SPAN",{});var Qv=i(Ai);F(zs.$$.fragment,Qv),Qv.forEach(o),Hv.forEach(o),Vf=d(Nd),Di=a(Nd,"SPAN",{});var Kv=i(Di);Jf=s(Kv,"TFRoFormerForMaskedLM"),Kv.forEach(o),Nd.forEach(o),ed=d(t),De=a(t,"DIV",{class:!0});var fo=i(De);F(Cs.$$.fragment,fo),Gf=d(fo),qs=a(fo,"P",{});var Od=i(qs);Xf=s(Od,"RoFormer Model with a "),Ni=a(Od,"CODE",{});var Vv=i(Ni);Yf=s(Vv,"language modeling"),Vv.forEach(o),Zf=s(Od," head on top."),Od.forEach(o),eu=d(fo),xs=a(fo,"P",{});var Id=i(xs);ou=s(Id,"This model inherits from "),Ur=a(Id,"A",{href:!0});var Jv=i(Ur);tu=s(Jv,"TFPreTrainedModel"),Jv.forEach(o),nu=s(Id,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Id.forEach(o),su=d(fo),js=a(fo,"P",{});var Sd=i(js);ru=s(Sd,"This model is also a "),Ps=a(Sd,"A",{href:!0,rel:!0});var Gv=i(Ps);au=s(Gv,"tf.keras.Model"),Gv.forEach(o),iu=s(Sd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sd.forEach(o),lu=d(fo),F(Ut.$$.fragment,fo),du=d(fo),Ye=a(fo,"DIV",{class:!0});var Po=i(Ye);F(Ls.$$.fragment,Po),cu=d(Po),dt=a(Po,"P",{});var ka=i(dt);pu=s(ka,"The "),Br=a(ka,"A",{href:!0});var Xv=i(Br);mu=s(Xv,"TFRoFormerForMaskedLM"),Xv.forEach(o),hu=s(ka," forward method, overrides the "),Oi=a(ka,"CODE",{});var Yv=i(Oi);fu=s(Yv,"__call__"),Yv.forEach(o),uu=s(ka," special method."),ka.forEach(o),gu=d(Po),F(Bt.$$.fragment,Po),_u=d(Po),Ii=a(Po,"P",{});var Zv=i(Ii);ku=s(Zv,"Example:"),Zv.forEach(o),vu=d(Po),F(As.$$.fragment,Po),Po.forEach(o),fo.forEach(o),od=d(t),ct=a(t,"H2",{class:!0});var Wd=i(ct);Ht=a(Wd,"A",{id:!0,class:!0,href:!0});var eT=i(Ht);Si=a(eT,"SPAN",{});var oT=i(Si);F(Ds.$$.fragment,oT),oT.forEach(o),eT.forEach(o),Tu=d(Wd),Wi=a(Wd,"SPAN",{});var tT=i(Wi);yu=s(tT,"TFRoFormerForCausalLM"),tT.forEach(o),Wd.forEach(o),td=d(t),Ne=a(t,"DIV",{class:!0});var uo=i(Ne);F(Ns.$$.fragment,uo),Fu=d(uo),Os=a(uo,"P",{});var Ud=i(Os);wu=s(Ud,"RoFormer Model with a "),Ui=a(Ud,"CODE",{});var nT=i(Ui);bu=s(nT,"language modeling"),nT.forEach(o),$u=s(Ud," head on top for CLM fine-tuning."),Ud.forEach(o),Ru=d(uo),Is=a(uo,"P",{});var Bd=i(Is);Eu=s(Bd,"This model inherits from "),Hr=a(Bd,"A",{href:!0});var sT=i(Hr);Mu=s(sT,"TFPreTrainedModel"),sT.forEach(o),zu=s(Bd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bd.forEach(o),Cu=d(uo),Ss=a(uo,"P",{});var Hd=i(Ss);qu=s(Hd,"This model is also a "),Ws=a(Hd,"A",{href:!0,rel:!0});var rT=i(Ws);xu=s(rT,"tf.keras.Model"),rT.forEach(o),ju=s(Hd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hd.forEach(o),Pu=d(uo),F(Qt.$$.fragment,uo),Lu=d(uo),mo=a(uo,"DIV",{class:!0});var pn=i(mo);F(Us.$$.fragment,pn),Au=d(pn),Be=a(pn,"P",{});var go=i(Be);Du=s(go,"labels ("),Bi=a(go,"CODE",{});var aT=i(Bi);Nu=s(aT,"tf.Tensor"),aT.forEach(o),Ou=s(go," or "),Hi=a(go,"CODE",{});var iT=i(Hi);Iu=s(iT,"np.ndarray"),iT.forEach(o),Su=s(go," of shape "),Qi=a(go,"CODE",{});var lT=i(Qi);Wu=s(lT,"(batch_size, sequence_length)"),lT.forEach(o),Uu=s(go,", "),Ki=a(go,"EM",{});var dT=i(Ki);Bu=s(dT,"optional"),dT.forEach(o),Hu=s(go,`): Labels for computing the cross entropy classification loss. Indices should be in `),Vi=a(go,"CODE",{});var cT=i(Vi);Qu=s(cT,"[0, ..., config.vocab_size - 1]"),cT.forEach(o),Ku=s(go,"."),go.forEach(o),Vu=d(pn),Ji=a(pn,"P",{});var pT=i(Ji);Ju=s(pT,"Example:"),pT.forEach(o),Gu=d(pn),F(Bs.$$.fragment,pn),pn.forEach(o),uo.forEach(o),nd=d(t),pt=a(t,"H2",{class:!0});var Qd=i(pt);Kt=a(Qd,"A",{id:!0,class:!0,href:!0});var mT=i(Kt);Gi=a(mT,"SPAN",{});var hT=i(Gi);F(Hs.$$.fragment,hT),hT.forEach(o),mT.forEach(o),Xu=d(Qd),Xi=a(Qd,"SPAN",{});var fT=i(Xi);Yu=s(fT,"TFRoFormerForSequenceClassification"),fT.forEach(o),Qd.forEach(o),sd=d(t),Oe=a(t,"DIV",{class:!0});var _o=i(Oe);F(Qs.$$.fragment,_o),Zu=d(_o),Yi=a(_o,"P",{});var uT=i(Yi);eg=s(uT,"RoFormer Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks."),uT.forEach(o),og=d(_o),Ks=a(_o,"P",{});var Kd=i(Ks);tg=s(Kd,"This model inherits from "),Qr=a(Kd,"A",{href:!0});var gT=i(Qr);ng=s(gT,"TFPreTrainedModel"),gT.forEach(o),sg=s(Kd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kd.forEach(o),rg=d(_o),Vs=a(_o,"P",{});var Vd=i(Vs);ag=s(Vd,"This model is also a "),Js=a(Vd,"A",{href:!0,rel:!0});var _T=i(Js);ig=s(_T,"tf.keras.Model"),_T.forEach(o),lg=s(Vd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vd.forEach(o),dg=d(_o),F(Vt.$$.fragment,_o),cg=d(_o),Ze=a(_o,"DIV",{class:!0});var Lo=i(Ze);F(Gs.$$.fragment,Lo),pg=d(Lo),mt=a(Lo,"P",{});var va=i(mt);mg=s(va,"The "),Kr=a(va,"A",{href:!0});var kT=i(Kr);hg=s(kT,"TFRoFormerForSequenceClassification"),kT.forEach(o),fg=s(va," forward method, overrides the "),Zi=a(va,"CODE",{});var vT=i(Zi);ug=s(vT,"__call__"),vT.forEach(o),gg=s(va," special method."),va.forEach(o),_g=d(Lo),F(Jt.$$.fragment,Lo),kg=d(Lo),el=a(Lo,"P",{});var TT=i(el);vg=s(TT,"Example:"),TT.forEach(o),Tg=d(Lo),F(Xs.$$.fragment,Lo),Lo.forEach(o),_o.forEach(o),rd=d(t),ht=a(t,"H2",{class:!0});var Jd=i(ht);Gt=a(Jd,"A",{id:!0,class:!0,href:!0});var yT=i(Gt);ol=a(yT,"SPAN",{});var FT=i(ol);F(Ys.$$.fragment,FT),FT.forEach(o),yT.forEach(o),yg=d(Jd),tl=a(Jd,"SPAN",{});var wT=i(tl);Fg=s(wT,"TFRoFormerForMultipleChoice"),wT.forEach(o),Jd.forEach(o),ad=d(t),Ie=a(t,"DIV",{class:!0});var ko=i(Ie);F(Zs.$$.fragment,ko),wg=d(ko),nl=a(ko,"P",{});var bT=i(nl);bg=s(bT,`RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),bT.forEach(o),$g=d(ko),er=a(ko,"P",{});var Gd=i(er);Rg=s(Gd,"This model inherits from "),Vr=a(Gd,"A",{href:!0});var $T=i(Vr);Eg=s($T,"TFPreTrainedModel"),$T.forEach(o),Mg=s(Gd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gd.forEach(o),zg=d(ko),or=a(ko,"P",{});var Xd=i(or);Cg=s(Xd,"This model is also a "),tr=a(Xd,"A",{href:!0,rel:!0});var RT=i(tr);qg=s(RT,"tf.keras.Model"),RT.forEach(o),xg=s(Xd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Xd.forEach(o),jg=d(ko),F(Xt.$$.fragment,ko),Pg=d(ko),eo=a(ko,"DIV",{class:!0});var Ao=i(eo);F(nr.$$.fragment,Ao),Lg=d(Ao),ft=a(Ao,"P",{});var Ta=i(ft);Ag=s(Ta,"The "),Jr=a(Ta,"A",{href:!0});var ET=i(Jr);Dg=s(ET,"TFRoFormerForMultipleChoice"),ET.forEach(o),Ng=s(Ta," forward method, overrides the "),sl=a(Ta,"CODE",{});var MT=i(sl);Og=s(MT,"__call__"),MT.forEach(o),Ig=s(Ta," special method."),Ta.forEach(o),Sg=d(Ao),F(Yt.$$.fragment,Ao),Wg=d(Ao),rl=a(Ao,"P",{});var zT=i(rl);Ug=s(zT,"Example:"),zT.forEach(o),Bg=d(Ao),F(sr.$$.fragment,Ao),Ao.forEach(o),ko.forEach(o),id=d(t),ut=a(t,"H2",{class:!0});var Yd=i(ut);Zt=a(Yd,"A",{id:!0,class:!0,href:!0});var CT=i(Zt);al=a(CT,"SPAN",{});var qT=i(al);F(rr.$$.fragment,qT),qT.forEach(o),CT.forEach(o),Hg=d(Yd),il=a(Yd,"SPAN",{});var xT=i(il);Qg=s(xT,"TFRoFormerForTokenClassification"),xT.forEach(o),Yd.forEach(o),ld=d(t),Se=a(t,"DIV",{class:!0});var vo=i(Se);F(ar.$$.fragment,vo),Kg=d(vo),ll=a(vo,"P",{});var jT=i(ll);Vg=s(jT,`RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),jT.forEach(o),Jg=d(vo),ir=a(vo,"P",{});var Zd=i(ir);Gg=s(Zd,"This model inherits from "),Gr=a(Zd,"A",{href:!0});var PT=i(Gr);Xg=s(PT,"TFPreTrainedModel"),PT.forEach(o),Yg=s(Zd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zd.forEach(o),Zg=d(vo),lr=a(vo,"P",{});var ec=i(lr);e_=s(ec,"This model is also a "),dr=a(ec,"A",{href:!0,rel:!0});var LT=i(dr);o_=s(LT,"tf.keras.Model"),LT.forEach(o),t_=s(ec,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ec.forEach(o),n_=d(vo),F(en.$$.fragment,vo),s_=d(vo),oo=a(vo,"DIV",{class:!0});var Do=i(oo);F(cr.$$.fragment,Do),r_=d(Do),gt=a(Do,"P",{});var ya=i(gt);a_=s(ya,"The "),Xr=a(ya,"A",{href:!0});var AT=i(Xr);i_=s(AT,"TFRoFormerForTokenClassification"),AT.forEach(o),l_=s(ya," forward method, overrides the "),dl=a(ya,"CODE",{});var DT=i(dl);d_=s(DT,"__call__"),DT.forEach(o),c_=s(ya," special method."),ya.forEach(o),p_=d(Do),F(on.$$.fragment,Do),m_=d(Do),cl=a(Do,"P",{});var NT=i(cl);h_=s(NT,"Example:"),NT.forEach(o),f_=d(Do),F(pr.$$.fragment,Do),Do.forEach(o),vo.forEach(o),dd=d(t),_t=a(t,"H2",{class:!0});var oc=i(_t);tn=a(oc,"A",{id:!0,class:!0,href:!0});var OT=i(tn);pl=a(OT,"SPAN",{});var IT=i(pl);F(mr.$$.fragment,IT),IT.forEach(o),OT.forEach(o),u_=d(oc),ml=a(oc,"SPAN",{});var ST=i(ml);g_=s(ST,"TFRoFormerForQuestionAnswering"),ST.forEach(o),oc.forEach(o),cd=d(t),We=a(t,"DIV",{class:!0});var To=i(We);F(hr.$$.fragment,To),__=d(To),kt=a(To,"P",{});var Fa=i(kt);k_=s(Fa,`RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),hl=a(Fa,"CODE",{});var WT=i(hl);v_=s(WT,"span start logits"),WT.forEach(o),T_=s(Fa," and "),fl=a(Fa,"CODE",{});var UT=i(fl);y_=s(UT,"span end logits"),UT.forEach(o),F_=s(Fa,")."),Fa.forEach(o),w_=d(To),fr=a(To,"P",{});var tc=i(fr);b_=s(tc,"This model inherits from "),Yr=a(tc,"A",{href:!0});var BT=i(Yr);$_=s(BT,"TFPreTrainedModel"),BT.forEach(o),R_=s(tc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tc.forEach(o),E_=d(To),ur=a(To,"P",{});var nc=i(ur);M_=s(nc,"This model is also a "),gr=a(nc,"A",{href:!0,rel:!0});var HT=i(gr);z_=s(HT,"tf.keras.Model"),HT.forEach(o),C_=s(nc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),nc.forEach(o),q_=d(To),F(nn.$$.fragment,To),x_=d(To),to=a(To,"DIV",{class:!0});var No=i(to);F(_r.$$.fragment,No),j_=d(No),vt=a(No,"P",{});var wa=i(vt);P_=s(wa,"The "),Zr=a(wa,"A",{href:!0});var QT=i(Zr);L_=s(QT,"TFRoFormerForQuestionAnswering"),QT.forEach(o),A_=s(wa," forward method, overrides the "),ul=a(wa,"CODE",{});var KT=i(ul);D_=s(KT,"__call__"),KT.forEach(o),N_=s(wa," special method."),wa.forEach(o),O_=d(No),F(sn.$$.fragment,No),I_=d(No),gl=a(No,"P",{});var VT=i(gl);S_=s(VT,"Example:"),VT.forEach(o),W_=d(No),F(kr.$$.fragment,No),No.forEach(o),To.forEach(o),this.h()},h(){p(m,"name","hf:doc:metadata"),p(m,"content",JSON.stringify(Ty)),p(u,"id","roformer"),p(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(u,"href","#roformer"),p(g,"class","relative group"),p(Y,"id","overview"),p(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Y,"href","#overview"),p(z,"class","relative group"),p(oe,"href","https://arxiv.org/pdf/2104.09864v1.pdf"),p(oe,"rel","nofollow"),p(te,"href","https://huggingface.co/junnyu"),p(te,"rel","nofollow"),p(c,"href","https://github.com/ZhuiyiTechnology/roformer"),p(c,"rel","nofollow"),p(j,"id","transformers.RoFormerConfig"),p(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(j,"href","#transformers.RoFormerConfig"),p(ve,"class","relative group"),p(Fe,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerModel"),p(mn,"href","https://huggingface.co/junnyu/roformer_chinese_base"),p(mn,"rel","nofollow"),p(wr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(br,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(Z,"class","docstring"),p(yt,"id","transformers.RoFormerTokenizer"),p(yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(yt,"href","#transformers.RoFormerTokenizer"),p(Io,"class","relative group"),p(_n,"href","https://pypi.org/project/rjieba/%3E"),p(_n,"rel","nofollow"),p(Rr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(wo,"class","docstring"),p(Ft,"class","docstring"),p(po,"class","docstring"),p(La,"class","docstring"),p(Me,"class","docstring"),p(wt,"id","transformers.RoFormerTokenizerFast"),p(wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(wt,"href","#transformers.RoFormerTokenizerFast"),p(Wo,"class","relative group"),p(zr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerTokenizerFast"),p(Cr,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),p(qr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),p(bo,"class","docstring"),p(Ce,"class","docstring"),p($t,"id","transformers.RoFormerModel"),p($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p($t,"href","#transformers.RoFormerModel"),p(Uo,"class","relative group"),p(An,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(An,"rel","nofollow"),p(Nn,"href","https://arxiv.org/abs/1706.03762"),p(Nn,"rel","nofollow"),p(Pr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerModel"),p(He,"class","docstring"),p(Ue,"class","docstring"),p(Et,"id","transformers.RoFormerForCausalLM"),p(Et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Et,"href","#transformers.RoFormerForCausalLM"),p(Ho,"class","relative group"),p(Un,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Un,"rel","nofollow"),p(Lr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForCausalLM"),p(Qe,"class","docstring"),p(yo,"class","docstring"),p(zt,"id","transformers.RoFormerForMaskedLM"),p(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(zt,"href","#transformers.RoFormerForMaskedLM"),p(Vo,"class","relative group"),p(Vn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Vn,"rel","nofollow"),p(Ar,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForMaskedLM"),p(Ke,"class","docstring"),p(Fo,"class","docstring"),p(qt,"id","transformers.RoFormerForSequenceClassification"),p(qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(qt,"href","#transformers.RoFormerForSequenceClassification"),p(Xo,"class","relative group"),p(es,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(es,"rel","nofollow"),p(Dr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForSequenceClassification"),p(je,"class","docstring"),p(ao,"class","docstring"),p(jt,"id","transformers.RoFormerForMultipleChoice"),p(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(jt,"href","#transformers.RoFormerForMultipleChoice"),p(Zo,"class","relative group"),p(is,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(is,"rel","nofollow"),p(Nr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForMultipleChoice"),p(Ve,"class","docstring"),p(io,"class","docstring"),p(Lt,"id","transformers.RoFormerForTokenClassification"),p(Lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Lt,"href","#transformers.RoFormerForTokenClassification"),p(ot,"class","relative group"),p(hs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(hs,"rel","nofollow"),p(Or,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForTokenClassification"),p(Je,"class","docstring"),p(lo,"class","docstring"),p(Dt,"id","transformers.RoFormerForQuestionAnswering"),p(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Dt,"href","#transformers.RoFormerForQuestionAnswering"),p(nt,"class","relative group"),p(vs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(vs,"rel","nofollow"),p(Ir,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.RoFormerForQuestionAnswering"),p(Ge,"class","docstring"),p(co,"class","docstring"),p(Ot,"id","transformers.TFRoFormerModel"),p(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ot,"href","#transformers.TFRoFormerModel"),p(at,"class","relative group"),p(Sr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Rs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Rs,"rel","nofollow"),p(Wr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerModel"),p(Xe,"class","docstring"),p(Ae,"class","docstring"),p(Wt,"id","transformers.TFRoFormerForMaskedLM"),p(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Wt,"href","#transformers.TFRoFormerForMaskedLM"),p(lt,"class","relative group"),p(Ur,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Ps,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Ps,"rel","nofollow"),p(Br,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForMaskedLM"),p(Ye,"class","docstring"),p(De,"class","docstring"),p(Ht,"id","transformers.TFRoFormerForCausalLM"),p(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ht,"href","#transformers.TFRoFormerForCausalLM"),p(ct,"class","relative group"),p(Hr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Ws,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Ws,"rel","nofollow"),p(mo,"class","docstring"),p(Ne,"class","docstring"),p(Kt,"id","transformers.TFRoFormerForSequenceClassification"),p(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Kt,"href","#transformers.TFRoFormerForSequenceClassification"),p(pt,"class","relative group"),p(Qr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Js,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Js,"rel","nofollow"),p(Kr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForSequenceClassification"),p(Ze,"class","docstring"),p(Oe,"class","docstring"),p(Gt,"id","transformers.TFRoFormerForMultipleChoice"),p(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Gt,"href","#transformers.TFRoFormerForMultipleChoice"),p(ht,"class","relative group"),p(Vr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(tr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(tr,"rel","nofollow"),p(Jr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForMultipleChoice"),p(eo,"class","docstring"),p(Ie,"class","docstring"),p(Zt,"id","transformers.TFRoFormerForTokenClassification"),p(Zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Zt,"href","#transformers.TFRoFormerForTokenClassification"),p(ut,"class","relative group"),p(Gr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(dr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(dr,"rel","nofollow"),p(Xr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForTokenClassification"),p(oo,"class","docstring"),p(Se,"class","docstring"),p(tn,"id","transformers.TFRoFormerForQuestionAnswering"),p(tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(tn,"href","#transformers.TFRoFormerForQuestionAnswering"),p(_t,"class","relative group"),p(Yr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(gr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(gr,"rel","nofollow"),p(Zr,"href","/docs/transformers/v4.15.0/en/model_doc/roformer#transformers.TFRoFormerForQuestionAnswering"),p(to,"class","docstring"),p(We,"class","docstring")},m(t,f){e(document.head,m),h(t,E,f),h(t,g,f),e(g,u),e(u,v),w(k,v,null),e(g,_),e(g,M),e(M,me),h(t,V,f),h(t,z,f),e(z,Y),e(Y,N),w(ee,N,null),e(z,he),e(z,O),e(O,fe),h(t,le,f),h(t,K,f),e(K,L),e(K,oe),e(oe,J),e(K,C),h(t,x,f),h(t,se,f),e(se,W),h(t,de,f),h(t,re,f),e(re,I),e(I,ue),h(t,ce,f),h(t,q,f),e(q,ge),h(t,S,f),h(t,ae,f),e(ae,ie),e(ie,U),h(t,pe,f),h(t,G,f),e(G,A),e(G,te),e(te,B),e(G,_e),e(G,c),e(c,T),e(G,X),h(t,be,f),h(t,ve,f),e(ve,j),e(j,we),w(ye,we,null),e(ve,Re),e(ve,P),e(P,H),h(t,$e,f),h(t,Z,f),w(D,Z,null),e(Z,Ee),e(Z,Te),e(Te,ke),e(Te,Fe),e(Fe,sc),e(Te,rc),e(Te,mn),e(mn,ac),e(Te,ic),e(Z,lc),e(Z,Oo),e(Oo,dc),e(Oo,wr),e(wr,cc),e(Oo,pc),e(Oo,br),e(br,mc),e(Oo,hc),e(Z,fc),e(Z,ba),e(ba,uc),e(Z,gc),w(hn,Z,null),h(t,jl,f),h(t,Io,f),e(Io,yt),e(yt,$a),w(fn,$a,null),e(Io,_c),e(Io,Ra),e(Ra,kc),h(t,Pl,f),h(t,Me,f),w(un,Me,null),e(Me,vc),e(Me,gn),e(gn,Tc),e(gn,$r),e($r,yc),e($r,_n),e(_n,Fc),e(gn,wc),e(Me,bc),e(Me,kn),e(kn,$c),e(kn,Rr),e(Rr,Rc),e(kn,Ec),e(Me,Mc),e(Me,Ea),e(Ea,zc),e(Me,Cc),w(vn,Me,null),e(Me,qc),e(Me,wo),w(Tn,wo,null),e(wo,xc),e(wo,Ma),e(Ma,jc),e(wo,Pc),e(wo,yn),e(yn,Er),e(Er,Lc),e(Er,za),e(za,Ac),e(yn,Dc),e(yn,Mr),e(Mr,Nc),e(Mr,Ca),e(Ca,Oc),e(Me,Ic),e(Me,Ft),w(Fn,Ft,null),e(Ft,Sc),e(Ft,wn),e(wn,Wc),e(wn,qa),e(qa,Uc),e(wn,Bc),e(Me,Hc),e(Me,po),w(bn,po,null),e(po,Qc),e(po,xa),e(xa,Kc),e(po,Vc),w($n,po,null),e(po,Jc),e(po,So),e(So,Gc),e(So,ja),e(ja,Xc),e(So,Yc),e(So,Pa),e(Pa,Zc),e(So,ep),e(Me,op),e(Me,La),h(t,Ll,f),h(t,Wo,f),e(Wo,wt),e(wt,Aa),w(Rn,Aa,null),e(Wo,tp),e(Wo,Da),e(Da,np),h(t,Al,f),h(t,Ce,f),w(En,Ce,null),e(Ce,sp),e(Ce,Mn),e(Mn,rp),e(Mn,Na),e(Na,ap),e(Mn,ip),e(Ce,lp),e(Ce,bt),e(bt,zr),e(zr,dp),e(bt,cp),e(bt,Cr),e(Cr,pp),e(bt,mp),e(Ce,hp),e(Ce,zn),e(zn,fp),e(zn,qr),e(qr,up),e(zn,gp),e(Ce,_p),e(Ce,Oa),e(Oa,kp),e(Ce,vp),w(Cn,Ce,null),e(Ce,Tp),e(Ce,bo),w(qn,bo,null),e(bo,yp),e(bo,Ia),e(Ia,Fp),e(bo,wp),e(bo,xn),e(xn,xr),e(xr,bp),e(xr,Sa),e(Sa,$p),e(xn,Rp),e(xn,jr),e(jr,Ep),e(jr,Wa),e(Wa,Mp),h(t,Dl,f),h(t,Uo,f),e(Uo,$t),e($t,Ua),w(jn,Ua,null),e(Uo,zp),e(Uo,Ba),e(Ba,Cp),h(t,Nl,f),h(t,Ue,f),w(Pn,Ue,null),e(Ue,qp),e(Ue,Ln),e(Ln,xp),e(Ln,An),e(An,jp),e(Ln,Pp),e(Ue,Lp),e(Ue,Dn),e(Dn,Ap),e(Dn,Nn),e(Nn,Dp),e(Dn,Np),e(Ue,Op),e(Ue,Le),e(Le,Ip),e(Le,Ha),e(Ha,Sp),e(Le,Wp),e(Le,Qa),e(Qa,Up),e(Le,Bp),e(Le,Ka),e(Ka,Hp),e(Le,Qp),e(Le,Va),e(Va,Kp),e(Le,Vp),e(Le,Ja),e(Ja,Jp),e(Le,Gp),e(Le,Ga),e(Ga,Xp),e(Le,Yp),e(Ue,Zp),e(Ue,He),w(On,He,null),e(He,em),e(He,Bo),e(Bo,om),e(Bo,Pr),e(Pr,tm),e(Bo,nm),e(Bo,Xa),e(Xa,sm),e(Bo,rm),e(He,am),w(Rt,He,null),e(He,im),e(He,Ya),e(Ya,lm),e(He,dm),w(In,He,null),h(t,Ol,f),h(t,Ho,f),e(Ho,Et),e(Et,Za),w(Sn,Za,null),e(Ho,cm),e(Ho,ei),e(ei,pm),h(t,Il,f),h(t,yo,f),w(Wn,yo,null),e(yo,mm),e(yo,Qo),e(Qo,hm),e(Qo,oi),e(oi,fm),e(Qo,um),e(Qo,Un),e(Un,gm),e(Qo,_m),e(yo,km),e(yo,Qe),w(Bn,Qe,null),e(Qe,vm),e(Qe,Ko),e(Ko,Tm),e(Ko,Lr),e(Lr,ym),e(Ko,Fm),e(Ko,ti),e(ti,wm),e(Ko,bm),e(Qe,$m),w(Mt,Qe,null),e(Qe,Rm),e(Qe,ni),e(ni,Em),e(Qe,Mm),w(Hn,Qe,null),h(t,Sl,f),h(t,Vo,f),e(Vo,zt),e(zt,si),w(Qn,si,null),e(Vo,zm),e(Vo,ri),e(ri,Cm),h(t,Wl,f),h(t,Fo,f),w(Kn,Fo,null),e(Fo,qm),e(Fo,Jo),e(Jo,xm),e(Jo,ai),e(ai,jm),e(Jo,Pm),e(Jo,Vn),e(Vn,Lm),e(Jo,Am),e(Fo,Dm),e(Fo,Ke),w(Jn,Ke,null),e(Ke,Nm),e(Ke,Go),e(Go,Om),e(Go,Ar),e(Ar,Im),e(Go,Sm),e(Go,ii),e(ii,Wm),e(Go,Um),e(Ke,Bm),w(Ct,Ke,null),e(Ke,Hm),e(Ke,li),e(li,Qm),e(Ke,Km),w(Gn,Ke,null),h(t,Ul,f),h(t,Xo,f),e(Xo,qt),e(qt,di),w(Xn,di,null),e(Xo,Vm),e(Xo,ci),e(ci,Jm),h(t,Bl,f),h(t,ao,f),w(Yn,ao,null),e(ao,Gm),e(ao,pi),e(pi,Xm),e(ao,Ym),e(ao,Zn),e(Zn,Zm),e(Zn,es),e(es,eh),e(Zn,oh),e(ao,th),e(ao,je),w(os,je,null),e(je,nh),e(je,Yo),e(Yo,sh),e(Yo,Dr),e(Dr,rh),e(Yo,ah),e(Yo,mi),e(mi,ih),e(Yo,lh),e(je,dh),w(xt,je,null),e(je,ch),e(je,hi),e(hi,ph),e(je,mh),w(ts,je,null),e(je,hh),e(je,fi),e(fi,fh),e(je,uh),w(ns,je,null),h(t,Hl,f),h(t,Zo,f),e(Zo,jt),e(jt,ui),w(ss,ui,null),e(Zo,gh),e(Zo,gi),e(gi,_h),h(t,Ql,f),h(t,io,f),w(rs,io,null),e(io,kh),e(io,_i),e(_i,vh),e(io,Th),e(io,as),e(as,yh),e(as,is),e(is,Fh),e(as,wh),e(io,bh),e(io,Ve),w(ls,Ve,null),e(Ve,$h),e(Ve,et),e(et,Rh),e(et,Nr),e(Nr,Eh),e(et,Mh),e(et,ki),e(ki,zh),e(et,Ch),e(Ve,qh),w(Pt,Ve,null),e(Ve,xh),e(Ve,vi),e(vi,jh),e(Ve,Ph),w(ds,Ve,null),h(t,Kl,f),h(t,ot,f),e(ot,Lt),e(Lt,Ti),w(cs,Ti,null),e(ot,Lh),e(ot,yi),e(yi,Ah),h(t,Vl,f),h(t,lo,f),w(ps,lo,null),e(lo,Dh),e(lo,Fi),e(Fi,Nh),e(lo,Oh),e(lo,ms),e(ms,Ih),e(ms,hs),e(hs,Sh),e(ms,Wh),e(lo,Uh),e(lo,Je),w(fs,Je,null),e(Je,Bh),e(Je,tt),e(tt,Hh),e(tt,Or),e(Or,Qh),e(tt,Kh),e(tt,wi),e(wi,Vh),e(tt,Jh),e(Je,Gh),w(At,Je,null),e(Je,Xh),e(Je,bi),e(bi,Yh),e(Je,Zh),w(us,Je,null),h(t,Jl,f),h(t,nt,f),e(nt,Dt),e(Dt,$i),w(gs,$i,null),e(nt,ef),e(nt,Ri),e(Ri,of),h(t,Gl,f),h(t,co,f),w(_s,co,null),e(co,tf),e(co,st),e(st,nf),e(st,Ei),e(Ei,sf),e(st,rf),e(st,Mi),e(Mi,af),e(st,lf),e(co,df),e(co,ks),e(ks,cf),e(ks,vs),e(vs,pf),e(ks,mf),e(co,hf),e(co,Ge),w(Ts,Ge,null),e(Ge,ff),e(Ge,rt),e(rt,uf),e(rt,Ir),e(Ir,gf),e(rt,_f),e(rt,zi),e(zi,kf),e(rt,vf),e(Ge,Tf),w(Nt,Ge,null),e(Ge,yf),e(Ge,Ci),e(Ci,Ff),e(Ge,wf),w(ys,Ge,null),h(t,Xl,f),h(t,at,f),e(at,Ot),e(Ot,qi),w(Fs,qi,null),e(at,bf),e(at,xi),e(xi,$f),h(t,Yl,f),h(t,Ae,f),w(ws,Ae,null),e(Ae,Rf),e(Ae,ji),e(ji,Ef),e(Ae,Mf),e(Ae,bs),e(bs,zf),e(bs,Sr),e(Sr,Cf),e(bs,qf),e(Ae,xf),e(Ae,$s),e($s,jf),e($s,Rs),e(Rs,Pf),e($s,Lf),e(Ae,Af),w(It,Ae,null),e(Ae,Df),e(Ae,Xe),w(Es,Xe,null),e(Xe,Nf),e(Xe,it),e(it,Of),e(it,Wr),e(Wr,If),e(it,Sf),e(it,Pi),e(Pi,Wf),e(it,Uf),e(Xe,Bf),w(St,Xe,null),e(Xe,Hf),e(Xe,Li),e(Li,Qf),e(Xe,Kf),w(Ms,Xe,null),h(t,Zl,f),h(t,lt,f),e(lt,Wt),e(Wt,Ai),w(zs,Ai,null),e(lt,Vf),e(lt,Di),e(Di,Jf),h(t,ed,f),h(t,De,f),w(Cs,De,null),e(De,Gf),e(De,qs),e(qs,Xf),e(qs,Ni),e(Ni,Yf),e(qs,Zf),e(De,eu),e(De,xs),e(xs,ou),e(xs,Ur),e(Ur,tu),e(xs,nu),e(De,su),e(De,js),e(js,ru),e(js,Ps),e(Ps,au),e(js,iu),e(De,lu),w(Ut,De,null),e(De,du),e(De,Ye),w(Ls,Ye,null),e(Ye,cu),e(Ye,dt),e(dt,pu),e(dt,Br),e(Br,mu),e(dt,hu),e(dt,Oi),e(Oi,fu),e(dt,uu),e(Ye,gu),w(Bt,Ye,null),e(Ye,_u),e(Ye,Ii),e(Ii,ku),e(Ye,vu),w(As,Ye,null),h(t,od,f),h(t,ct,f),e(ct,Ht),e(Ht,Si),w(Ds,Si,null),e(ct,Tu),e(ct,Wi),e(Wi,yu),h(t,td,f),h(t,Ne,f),w(Ns,Ne,null),e(Ne,Fu),e(Ne,Os),e(Os,wu),e(Os,Ui),e(Ui,bu),e(Os,$u),e(Ne,Ru),e(Ne,Is),e(Is,Eu),e(Is,Hr),e(Hr,Mu),e(Is,zu),e(Ne,Cu),e(Ne,Ss),e(Ss,qu),e(Ss,Ws),e(Ws,xu),e(Ss,ju),e(Ne,Pu),w(Qt,Ne,null),e(Ne,Lu),e(Ne,mo),w(Us,mo,null),e(mo,Au),e(mo,Be),e(Be,Du),e(Be,Bi),e(Bi,Nu),e(Be,Ou),e(Be,Hi),e(Hi,Iu),e(Be,Su),e(Be,Qi),e(Qi,Wu),e(Be,Uu),e(Be,Ki),e(Ki,Bu),e(Be,Hu),e(Be,Vi),e(Vi,Qu),e(Be,Ku),e(mo,Vu),e(mo,Ji),e(Ji,Ju),e(mo,Gu),w(Bs,mo,null),h(t,nd,f),h(t,pt,f),e(pt,Kt),e(Kt,Gi),w(Hs,Gi,null),e(pt,Xu),e(pt,Xi),e(Xi,Yu),h(t,sd,f),h(t,Oe,f),w(Qs,Oe,null),e(Oe,Zu),e(Oe,Yi),e(Yi,eg),e(Oe,og),e(Oe,Ks),e(Ks,tg),e(Ks,Qr),e(Qr,ng),e(Ks,sg),e(Oe,rg),e(Oe,Vs),e(Vs,ag),e(Vs,Js),e(Js,ig),e(Vs,lg),e(Oe,dg),w(Vt,Oe,null),e(Oe,cg),e(Oe,Ze),w(Gs,Ze,null),e(Ze,pg),e(Ze,mt),e(mt,mg),e(mt,Kr),e(Kr,hg),e(mt,fg),e(mt,Zi),e(Zi,ug),e(mt,gg),e(Ze,_g),w(Jt,Ze,null),e(Ze,kg),e(Ze,el),e(el,vg),e(Ze,Tg),w(Xs,Ze,null),h(t,rd,f),h(t,ht,f),e(ht,Gt),e(Gt,ol),w(Ys,ol,null),e(ht,yg),e(ht,tl),e(tl,Fg),h(t,ad,f),h(t,Ie,f),w(Zs,Ie,null),e(Ie,wg),e(Ie,nl),e(nl,bg),e(Ie,$g),e(Ie,er),e(er,Rg),e(er,Vr),e(Vr,Eg),e(er,Mg),e(Ie,zg),e(Ie,or),e(or,Cg),e(or,tr),e(tr,qg),e(or,xg),e(Ie,jg),w(Xt,Ie,null),e(Ie,Pg),e(Ie,eo),w(nr,eo,null),e(eo,Lg),e(eo,ft),e(ft,Ag),e(ft,Jr),e(Jr,Dg),e(ft,Ng),e(ft,sl),e(sl,Og),e(ft,Ig),e(eo,Sg),w(Yt,eo,null),e(eo,Wg),e(eo,rl),e(rl,Ug),e(eo,Bg),w(sr,eo,null),h(t,id,f),h(t,ut,f),e(ut,Zt),e(Zt,al),w(rr,al,null),e(ut,Hg),e(ut,il),e(il,Qg),h(t,ld,f),h(t,Se,f),w(ar,Se,null),e(Se,Kg),e(Se,ll),e(ll,Vg),e(Se,Jg),e(Se,ir),e(ir,Gg),e(ir,Gr),e(Gr,Xg),e(ir,Yg),e(Se,Zg),e(Se,lr),e(lr,e_),e(lr,dr),e(dr,o_),e(lr,t_),e(Se,n_),w(en,Se,null),e(Se,s_),e(Se,oo),w(cr,oo,null),e(oo,r_),e(oo,gt),e(gt,a_),e(gt,Xr),e(Xr,i_),e(gt,l_),e(gt,dl),e(dl,d_),e(gt,c_),e(oo,p_),w(on,oo,null),e(oo,m_),e(oo,cl),e(cl,h_),e(oo,f_),w(pr,oo,null),h(t,dd,f),h(t,_t,f),e(_t,tn),e(tn,pl),w(mr,pl,null),e(_t,u_),e(_t,ml),e(ml,g_),h(t,cd,f),h(t,We,f),w(hr,We,null),e(We,__),e(We,kt),e(kt,k_),e(kt,hl),e(hl,v_),e(kt,T_),e(kt,fl),e(fl,y_),e(kt,F_),e(We,w_),e(We,fr),e(fr,b_),e(fr,Yr),e(Yr,$_),e(fr,R_),e(We,E_),e(We,ur),e(ur,M_),e(ur,gr),e(gr,z_),e(ur,C_),e(We,q_),w(nn,We,null),e(We,x_),e(We,to),w(_r,to,null),e(to,j_),e(to,vt),e(vt,P_),e(vt,Zr),e(Zr,L_),e(vt,A_),e(vt,ul),e(ul,D_),e(vt,N_),e(to,O_),w(sn,to,null),e(to,I_),e(to,gl),e(gl,S_),e(to,W_),w(kr,to,null),pd=!0},p(t,[f]){const vr={};f&2&&(vr.$$scope={dirty:f,ctx:t}),Rt.$set(vr);const _l={};f&2&&(_l.$$scope={dirty:f,ctx:t}),Mt.$set(_l);const kl={};f&2&&(kl.$$scope={dirty:f,ctx:t}),Ct.$set(kl);const vl={};f&2&&(vl.$$scope={dirty:f,ctx:t}),xt.$set(vl);const Tr={};f&2&&(Tr.$$scope={dirty:f,ctx:t}),Pt.$set(Tr);const Tl={};f&2&&(Tl.$$scope={dirty:f,ctx:t}),At.$set(Tl);const yl={};f&2&&(yl.$$scope={dirty:f,ctx:t}),Nt.$set(yl);const Fl={};f&2&&(Fl.$$scope={dirty:f,ctx:t}),It.$set(Fl);const yr={};f&2&&(yr.$$scope={dirty:f,ctx:t}),St.$set(yr);const wl={};f&2&&(wl.$$scope={dirty:f,ctx:t}),Ut.$set(wl);const bl={};f&2&&(bl.$$scope={dirty:f,ctx:t}),Bt.$set(bl);const $l={};f&2&&($l.$$scope={dirty:f,ctx:t}),Qt.$set($l);const Rl={};f&2&&(Rl.$$scope={dirty:f,ctx:t}),Vt.$set(Rl);const El={};f&2&&(El.$$scope={dirty:f,ctx:t}),Jt.$set(El);const Ml={};f&2&&(Ml.$$scope={dirty:f,ctx:t}),Xt.$set(Ml);const zl={};f&2&&(zl.$$scope={dirty:f,ctx:t}),Yt.$set(zl);const Tt={};f&2&&(Tt.$$scope={dirty:f,ctx:t}),en.$set(Tt);const Cl={};f&2&&(Cl.$$scope={dirty:f,ctx:t}),on.$set(Cl);const ql={};f&2&&(ql.$$scope={dirty:f,ctx:t}),nn.$set(ql);const Fr={};f&2&&(Fr.$$scope={dirty:f,ctx:t}),sn.$set(Fr)},i(t){pd||(b(k.$$.fragment,t),b(ee.$$.fragment,t),b(ye.$$.fragment,t),b(D.$$.fragment,t),b(hn.$$.fragment,t),b(fn.$$.fragment,t),b(un.$$.fragment,t),b(vn.$$.fragment,t),b(Tn.$$.fragment,t),b(Fn.$$.fragment,t),b(bn.$$.fragment,t),b($n.$$.fragment,t),b(Rn.$$.fragment,t),b(En.$$.fragment,t),b(Cn.$$.fragment,t),b(qn.$$.fragment,t),b(jn.$$.fragment,t),b(Pn.$$.fragment,t),b(On.$$.fragment,t),b(Rt.$$.fragment,t),b(In.$$.fragment,t),b(Sn.$$.fragment,t),b(Wn.$$.fragment,t),b(Bn.$$.fragment,t),b(Mt.$$.fragment,t),b(Hn.$$.fragment,t),b(Qn.$$.fragment,t),b(Kn.$$.fragment,t),b(Jn.$$.fragment,t),b(Ct.$$.fragment,t),b(Gn.$$.fragment,t),b(Xn.$$.fragment,t),b(Yn.$$.fragment,t),b(os.$$.fragment,t),b(xt.$$.fragment,t),b(ts.$$.fragment,t),b(ns.$$.fragment,t),b(ss.$$.fragment,t),b(rs.$$.fragment,t),b(ls.$$.fragment,t),b(Pt.$$.fragment,t),b(ds.$$.fragment,t),b(cs.$$.fragment,t),b(ps.$$.fragment,t),b(fs.$$.fragment,t),b(At.$$.fragment,t),b(us.$$.fragment,t),b(gs.$$.fragment,t),b(_s.$$.fragment,t),b(Ts.$$.fragment,t),b(Nt.$$.fragment,t),b(ys.$$.fragment,t),b(Fs.$$.fragment,t),b(ws.$$.fragment,t),b(It.$$.fragment,t),b(Es.$$.fragment,t),b(St.$$.fragment,t),b(Ms.$$.fragment,t),b(zs.$$.fragment,t),b(Cs.$$.fragment,t),b(Ut.$$.fragment,t),b(Ls.$$.fragment,t),b(Bt.$$.fragment,t),b(As.$$.fragment,t),b(Ds.$$.fragment,t),b(Ns.$$.fragment,t),b(Qt.$$.fragment,t),b(Us.$$.fragment,t),b(Bs.$$.fragment,t),b(Hs.$$.fragment,t),b(Qs.$$.fragment,t),b(Vt.$$.fragment,t),b(Gs.$$.fragment,t),b(Jt.$$.fragment,t),b(Xs.$$.fragment,t),b(Ys.$$.fragment,t),b(Zs.$$.fragment,t),b(Xt.$$.fragment,t),b(nr.$$.fragment,t),b(Yt.$$.fragment,t),b(sr.$$.fragment,t),b(rr.$$.fragment,t),b(ar.$$.fragment,t),b(en.$$.fragment,t),b(cr.$$.fragment,t),b(on.$$.fragment,t),b(pr.$$.fragment,t),b(mr.$$.fragment,t),b(hr.$$.fragment,t),b(nn.$$.fragment,t),b(_r.$$.fragment,t),b(sn.$$.fragment,t),b(kr.$$.fragment,t),pd=!0)},o(t){$(k.$$.fragment,t),$(ee.$$.fragment,t),$(ye.$$.fragment,t),$(D.$$.fragment,t),$(hn.$$.fragment,t),$(fn.$$.fragment,t),$(un.$$.fragment,t),$(vn.$$.fragment,t),$(Tn.$$.fragment,t),$(Fn.$$.fragment,t),$(bn.$$.fragment,t),$($n.$$.fragment,t),$(Rn.$$.fragment,t),$(En.$$.fragment,t),$(Cn.$$.fragment,t),$(qn.$$.fragment,t),$(jn.$$.fragment,t),$(Pn.$$.fragment,t),$(On.$$.fragment,t),$(Rt.$$.fragment,t),$(In.$$.fragment,t),$(Sn.$$.fragment,t),$(Wn.$$.fragment,t),$(Bn.$$.fragment,t),$(Mt.$$.fragment,t),$(Hn.$$.fragment,t),$(Qn.$$.fragment,t),$(Kn.$$.fragment,t),$(Jn.$$.fragment,t),$(Ct.$$.fragment,t),$(Gn.$$.fragment,t),$(Xn.$$.fragment,t),$(Yn.$$.fragment,t),$(os.$$.fragment,t),$(xt.$$.fragment,t),$(ts.$$.fragment,t),$(ns.$$.fragment,t),$(ss.$$.fragment,t),$(rs.$$.fragment,t),$(ls.$$.fragment,t),$(Pt.$$.fragment,t),$(ds.$$.fragment,t),$(cs.$$.fragment,t),$(ps.$$.fragment,t),$(fs.$$.fragment,t),$(At.$$.fragment,t),$(us.$$.fragment,t),$(gs.$$.fragment,t),$(_s.$$.fragment,t),$(Ts.$$.fragment,t),$(Nt.$$.fragment,t),$(ys.$$.fragment,t),$(Fs.$$.fragment,t),$(ws.$$.fragment,t),$(It.$$.fragment,t),$(Es.$$.fragment,t),$(St.$$.fragment,t),$(Ms.$$.fragment,t),$(zs.$$.fragment,t),$(Cs.$$.fragment,t),$(Ut.$$.fragment,t),$(Ls.$$.fragment,t),$(Bt.$$.fragment,t),$(As.$$.fragment,t),$(Ds.$$.fragment,t),$(Ns.$$.fragment,t),$(Qt.$$.fragment,t),$(Us.$$.fragment,t),$(Bs.$$.fragment,t),$(Hs.$$.fragment,t),$(Qs.$$.fragment,t),$(Vt.$$.fragment,t),$(Gs.$$.fragment,t),$(Jt.$$.fragment,t),$(Xs.$$.fragment,t),$(Ys.$$.fragment,t),$(Zs.$$.fragment,t),$(Xt.$$.fragment,t),$(nr.$$.fragment,t),$(Yt.$$.fragment,t),$(sr.$$.fragment,t),$(rr.$$.fragment,t),$(ar.$$.fragment,t),$(en.$$.fragment,t),$(cr.$$.fragment,t),$(on.$$.fragment,t),$(pr.$$.fragment,t),$(mr.$$.fragment,t),$(hr.$$.fragment,t),$(nn.$$.fragment,t),$(_r.$$.fragment,t),$(sn.$$.fragment,t),$(kr.$$.fragment,t),pd=!1},d(t){o(m),t&&o(E),t&&o(g),R(k),t&&o(V),t&&o(z),R(ee),t&&o(le),t&&o(K),t&&o(x),t&&o(se),t&&o(de),t&&o(re),t&&o(ce),t&&o(q),t&&o(S),t&&o(ae),t&&o(pe),t&&o(G),t&&o(be),t&&o(ve),R(ye),t&&o($e),t&&o(Z),R(D),R(hn),t&&o(jl),t&&o(Io),R(fn),t&&o(Pl),t&&o(Me),R(un),R(vn),R(Tn),R(Fn),R(bn),R($n),t&&o(Ll),t&&o(Wo),R(Rn),t&&o(Al),t&&o(Ce),R(En),R(Cn),R(qn),t&&o(Dl),t&&o(Uo),R(jn),t&&o(Nl),t&&o(Ue),R(Pn),R(On),R(Rt),R(In),t&&o(Ol),t&&o(Ho),R(Sn),t&&o(Il),t&&o(yo),R(Wn),R(Bn),R(Mt),R(Hn),t&&o(Sl),t&&o(Vo),R(Qn),t&&o(Wl),t&&o(Fo),R(Kn),R(Jn),R(Ct),R(Gn),t&&o(Ul),t&&o(Xo),R(Xn),t&&o(Bl),t&&o(ao),R(Yn),R(os),R(xt),R(ts),R(ns),t&&o(Hl),t&&o(Zo),R(ss),t&&o(Ql),t&&o(io),R(rs),R(ls),R(Pt),R(ds),t&&o(Kl),t&&o(ot),R(cs),t&&o(Vl),t&&o(lo),R(ps),R(fs),R(At),R(us),t&&o(Jl),t&&o(nt),R(gs),t&&o(Gl),t&&o(co),R(_s),R(Ts),R(Nt),R(ys),t&&o(Xl),t&&o(at),R(Fs),t&&o(Yl),t&&o(Ae),R(ws),R(It),R(Es),R(St),R(Ms),t&&o(Zl),t&&o(lt),R(zs),t&&o(ed),t&&o(De),R(Cs),R(Ut),R(Ls),R(Bt),R(As),t&&o(od),t&&o(ct),R(Ds),t&&o(td),t&&o(Ne),R(Ns),R(Qt),R(Us),R(Bs),t&&o(nd),t&&o(pt),R(Hs),t&&o(sd),t&&o(Oe),R(Qs),R(Vt),R(Gs),R(Jt),R(Xs),t&&o(rd),t&&o(ht),R(Ys),t&&o(ad),t&&o(Ie),R(Zs),R(Xt),R(nr),R(Yt),R(sr),t&&o(id),t&&o(ut),R(rr),t&&o(ld),t&&o(Se),R(ar),R(en),R(cr),R(on),R(pr),t&&o(dd),t&&o(_t),R(mr),t&&o(cd),t&&o(We),R(hr),R(nn),R(_r),R(sn),R(kr)}}}const Ty={local:"roformer",sections:[{local:"overview",title:"Overview"},{local:"transformers.RoFormerConfig",title:"RoFormerConfig"},{local:"transformers.RoFormerTokenizer",title:"RoFormerTokenizer"},{local:"transformers.RoFormerTokenizerFast",title:"RoFormerTokenizerFast"},{local:"transformers.RoFormerModel",title:"RoFormerModel"},{local:"transformers.RoFormerForCausalLM",title:"RoFormerForCausalLM"},{local:"transformers.RoFormerForMaskedLM",title:"RoFormerForMaskedLM"},{local:"transformers.RoFormerForSequenceClassification",title:"RoFormerForSequenceClassification"},{local:"transformers.RoFormerForMultipleChoice",title:"RoFormerForMultipleChoice"},{local:"transformers.RoFormerForTokenClassification",title:"RoFormerForTokenClassification"},{local:"transformers.RoFormerForQuestionAnswering",title:"RoFormerForQuestionAnswering"},{local:"transformers.TFRoFormerModel",title:"TFRoFormerModel"},{local:"transformers.TFRoFormerForMaskedLM",title:"TFRoFormerForMaskedLM"},{local:"transformers.TFRoFormerForCausalLM",title:"TFRoFormerForCausalLM"},{local:"transformers.TFRoFormerForSequenceClassification",title:"TFRoFormerForSequenceClassification"},{local:"transformers.TFRoFormerForMultipleChoice",title:"TFRoFormerForMultipleChoice"},{local:"transformers.TFRoFormerForTokenClassification",title:"TFRoFormerForTokenClassification"},{local:"transformers.TFRoFormerForQuestionAnswering",title:"TFRoFormerForQuestionAnswering"}],title:"RoFormer"};function yy(Q,m,E){let{fw:g}=m;return Q.$$set=u=>{"fw"in u&&E(0,g=u.fw)},[g]}class My extends JT{constructor(m){super();GT(this,m,yy,vy,XT,{fw:0})}}export{My as default,Ty as metadata};
9,954
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/xls_r.mdx-6e4465a1.js
import{S as Ze,i as ea,s as aa,e as o,k as p,w as je,t as i,L as ta,c as r,d as a,m as f,a as n,x as Fe,h as l,b as c,J as t,g as h,y as Ge,K as oa,q as He,o as Qe,B as Ye}from"../../chunks/vendor-b1433968.js";import{I as De}from"../../chunks/IconCopyLink-7029626d.js";function ra(ne){let m,x,u,v,B,E,se,W,ie,z,d,g,M,k,le,O,he,K,w,ce,y,pe,fe,I,T,ue,J,C,N,ve,U,P,me,j,_,q,de,ge,R,we,$,_e,Se,F,S,be,A,Le,Ee,G,b,ke,V,ye,Re,H,L,Ae,X,Xe,xe,Q;return E=new De({}),k=new De({}),{c(){m=o("meta"),x=p(),u=o("h1"),v=o("a"),B=o("span"),je(E.$$.fragment),se=p(),W=o("span"),ie=i("XLS-R"),z=p(),d=o("h2"),g=o("a"),M=o("span"),je(k.$$.fragment),le=p(),O=o("span"),he=i("Overview"),K=p(),w=o("p"),ce=i("The XLS-R model was proposed in "),y=o("a"),pe=i("XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale"),fe=i(` by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.`),I=p(),T=o("p"),ue=i("The abstract from the paper is the following:"),J=p(),C=o("p"),N=o("em"),ve=i(`This paper presents XLS-R, a large-scale model for cross-lingual speech representation learning based on wav2vec 2.0. We train models with up to 2B parameters on nearly half a million hours of publicly available speech audio in 128 languages, an order of magnitude more public data than the largest known prior work. Our evaluation covers a wide range of tasks, domains, data regimes and languages, both high and low-resource. On the CoVoST-2 speech translation benchmark, we improve the previous state of the art by an average of 7.4 BLEU over 21 translation directions into English. For speech recognition, XLS-R improves over the best known prior work on BABEL, MLS, CommonVoice as well as VoxPopuli, lowering error rates by 14-34% relative on average. XLS-R also sets a new state of the art on VoxLingua107 language identification. Moreover, we show that with sufficient model size, cross-lingual pretraining can outperform English-only pretraining when translating English speech into other languages, a setting which favors monolingual pretraining. We hope XLS-R can help to improve speech processing tasks for many more languages of the world.`),U=p(),P=o("p"),me=i("Tips:"),j=p(),_=o("ul"),q=o("li"),de=i("XLS-R is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ge=p(),R=o("li"),we=i(`XLS-R model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),$=o("a"),_e=i("Wav2Vec2CTCTokenizer"),Se=i("."),F=p(),S=o("p"),be=i("Relevant checkpoints can be found under "),A=o("a"),Le=i("https://huggingface.co/models?other=xls_r"),Ee=i("."),G=p(),b=o("p"),ke=i("XLS-R\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),V=o("a"),ye=i("Wav2Vec2\u2019s documentation page"),Re=i("."),H=p(),L=o("p"),Ae=i("The original code can be found "),X=o("a"),Xe=i("here"),xe=i("."),this.h()},l(e){const s=ta('[data-svelte="svelte-1phssyn"]',document.head);m=r(s,"META",{name:!0,content:!0}),s.forEach(a),x=f(e),u=r(e,"H1",{class:!0});var Y=n(u);v=r(Y,"A",{id:!0,class:!0,href:!0});var Te=n(v);B=r(Te,"SPAN",{});var Ce=n(B);Fe(E.$$.fragment,Ce),Ce.forEach(a),Te.forEach(a),se=f(Y),W=r(Y,"SPAN",{});var Pe=n(W);ie=l(Pe,"XLS-R"),Pe.forEach(a),Y.forEach(a),z=f(e),d=r(e,"H2",{class:!0});var D=n(d);g=r(D,"A",{id:!0,class:!0,href:!0});var $e=n(g);M=r($e,"SPAN",{});var Ve=n(M);Fe(k.$$.fragment,Ve),Ve.forEach(a),$e.forEach(a),le=f(D),O=r(D,"SPAN",{});var Be=n(O);he=l(Be,"Overview"),Be.forEach(a),D.forEach(a),K=f(e),w=r(e,"P",{});var Z=n(w);ce=l(Z,"The XLS-R model was proposed in "),y=r(Z,"A",{href:!0,rel:!0});var We=n(y);pe=l(We,"XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale"),We.forEach(a),fe=l(Z,` by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.`),Z.forEach(a),I=f(e),T=r(e,"P",{});var Me=n(T);ue=l(Me,"The abstract from the paper is the following:"),Me.forEach(a),J=f(e),C=r(e,"P",{});var Oe=n(C);N=r(Oe,"EM",{});var Ne=n(N);ve=l(Ne,`This paper presents XLS-R, a large-scale model for cross-lingual speech representation learning based on wav2vec 2.0. We train models with up to 2B parameters on nearly half a million hours of publicly available speech audio in 128 languages, an order of magnitude more public data than the largest known prior work. Our evaluation covers a wide range of tasks, domains, data regimes and languages, both high and low-resource. On the CoVoST-2 speech translation benchmark, we improve the previous state of the art by an average of 7.4 BLEU over 21 translation directions into English. For speech recognition, XLS-R improves over the best known prior work on BABEL, MLS, CommonVoice as well as VoxPopuli, lowering error rates by 14-34% relative on average. XLS-R also sets a new state of the art on VoxLingua107 language identification. Moreover, we show that with sufficient model size, cross-lingual pretraining can outperform English-only pretraining when translating English speech into other languages, a setting which favors monolingual pretraining. We hope XLS-R can help to improve speech processing tasks for many more languages of the world.`),Ne.forEach(a),Oe.forEach(a),U=f(e),P=r(e,"P",{});var qe=n(P);me=l(qe,"Tips:"),qe.forEach(a),j=f(e),_=r(e,"UL",{});var ee=n(_);q=r(ee,"LI",{});var ze=n(q);de=l(ze,"XLS-R is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ze.forEach(a),ge=f(ee),R=r(ee,"LI",{});var ae=n(R);we=l(ae,`XLS-R model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),$=r(ae,"A",{href:!0});var Ke=n($);_e=l(Ke,"Wav2Vec2CTCTokenizer"),Ke.forEach(a),Se=l(ae,"."),ae.forEach(a),ee.forEach(a),F=f(e),S=r(e,"P",{});var te=n(S);be=l(te,"Relevant checkpoints can be found under "),A=r(te,"A",{href:!0,rel:!0});var Ie=n(A);Le=l(Ie,"https://huggingface.co/models?other=xls_r"),Ie.forEach(a),Ee=l(te,"."),te.forEach(a),G=f(e),b=r(e,"P",{});var oe=n(b);ke=l(oe,"XLS-R\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),V=r(oe,"A",{href:!0});var Je=n(V);ye=l(Je,"Wav2Vec2\u2019s documentation page"),Je.forEach(a),Re=l(oe,"."),oe.forEach(a),H=f(e),L=r(e,"P",{});var re=n(L);Ae=l(re,"The original code can be found "),X=r(re,"A",{href:!0,rel:!0});var Ue=n(X);Xe=l(Ue,"here"),Ue.forEach(a),xe=l(re,"."),re.forEach(a),this.h()},h(){c(m,"name","hf:doc:metadata"),c(m,"content",JSON.stringify(na)),c(v,"id","xlsr"),c(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(v,"href","#xlsr"),c(u,"class","relative group"),c(g,"id","overview"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#overview"),c(d,"class","relative group"),c(y,"href","https://arxiv.org/abs/2111.09296"),c(y,"rel","nofollow"),c($,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),c(A,"href","https://huggingface.co/models?other=xls_r"),c(A,"rel","nofollow"),c(V,"href","/docs/transformers/v4.15.0/en/wav2vec2"),c(X,"href","https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec"),c(X,"rel","nofollow")},m(e,s){t(document.head,m),h(e,x,s),h(e,u,s),t(u,v),t(v,B),Ge(E,B,null),t(u,se),t(u,W),t(W,ie),h(e,z,s),h(e,d,s),t(d,g),t(g,M),Ge(k,M,null),t(d,le),t(d,O),t(O,he),h(e,K,s),h(e,w,s),t(w,ce),t(w,y),t(y,pe),t(w,fe),h(e,I,s),h(e,T,s),t(T,ue),h(e,J,s),h(e,C,s),t(C,N),t(N,ve),h(e,U,s),h(e,P,s),t(P,me),h(e,j,s),h(e,_,s),t(_,q),t(q,de),t(_,ge),t(_,R),t(R,we),t(R,$),t($,_e),t(R,Se),h(e,F,s),h(e,S,s),t(S,be),t(S,A),t(A,Le),t(S,Ee),h(e,G,s),h(e,b,s),t(b,ke),t(b,V),t(V,ye),t(b,Re),h(e,H,s),h(e,L,s),t(L,Ae),t(L,X),t(X,Xe),t(L,xe),Q=!0},p:oa,i(e){Q||(He(E.$$.fragment,e),He(k.$$.fragment,e),Q=!0)},o(e){Qe(E.$$.fragment,e),Qe(k.$$.fragment,e),Q=!1},d(e){a(m),e&&a(x),e&&a(u),Ye(E),e&&a(z),e&&a(d),Ye(k),e&&a(K),e&&a(w),e&&a(I),e&&a(T),e&&a(J),e&&a(C),e&&a(U),e&&a(P),e&&a(j),e&&a(_),e&&a(F),e&&a(S),e&&a(G),e&&a(b),e&&a(H),e&&a(L)}}}const na={local:"xlsr",sections:[{local:"overview",title:"Overview"}],title:"XLS-R"};function sa(ne,m,x){let{fw:u}=m;return ne.$$set=v=>{"fw"in v&&x(0,u=v.fw)},[u]}class ha extends Ze{constructor(m){super();ea(this,m,sa,ra,aa,{fw:0})}}export{ha as default,na as metadata};
9,955
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/xlnet.mdx-84c14790.js
import{S as my,i as uy,s as fy,e as a,k as l,w as v,t as n,L as gy,c as r,d as t,m as d,a as i,x as T,h as s,b as c,J as e,g as h,y as b,q as w,o as y,B as L}from"../../chunks/vendor-b1433968.js";import{T as Xe}from"../../chunks/Tip-c3840994.js";import{D as C}from"../../chunks/Docstring-ff504c58.js";import{C as Se}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function _y(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function ky(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function vy(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function Ty(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function by(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function wy(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function yy(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function Ly(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke;return{c(){m=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),N=a("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),$=a("li"),pe=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Z=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),D=a("code"),me=n("model(inputs)"),le=n("."),V=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),ne=n("a single Tensor with "),W=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),ce=l(),M=a("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),Y=n("model([input_ids, attention_mask])"),oe=n(" or "),B=a("code"),ae=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),ke=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){m=r(p,"P",{});var F=i(m);x=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=d(p),_=r(p,"UL",{});var K=i(_);N=r(K,"LI",{});var be=i(N);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),g=d(K),$=r(K,"LI",{});var re=i($);pe=s(re,"having all inputs as a list, tuple or dict in the first positional arguments."),re.forEach(t),K.forEach(t),G=d(p),X=r(p,"P",{});var P=i(X);Z=s(P,"This second option is useful when using "),I=r(P,"CODE",{});var ve=i(I);ee=s(ve,"tf.keras.Model.fit"),ve.forEach(t),he=s(P,` method which currently requires having all the tensors in the first argument of the model call function: `),D=r(P,"CODE",{});var we=i(D);me=s(we,"model(inputs)"),we.forEach(t),le=s(P,"."),P.forEach(t),V=d(p),j=r(p,"P",{});var ye=i(j);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),J=d(p),z=r(p,"UL",{});var E=i(z);q=r(E,"LI",{});var U=i(q);ne=s(U,"a single Tensor with "),W=r(U,"CODE",{});var Le=i(W);de=s(Le,"input_ids"),Le.forEach(t),se=s(U," only and nothing else: "),H=r(U,"CODE",{});var Te=i(H);ue=s(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(E),M=r(E,"LI",{});var R=i(M);fe=s(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ne=i(Q);Y=s(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),oe=s(R," or "),B=r(R,"CODE",{});var xe=i(B);ae=s(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),R.forEach(t),ge=d(E),O=r(E,"LI",{});var ie=i(O);_e=s(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(ie,"CODE",{});var Fe=i(A);ke=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ie.forEach(t),E.forEach(t)},m(p,F){h(p,m,F),e(m,x),h(p,f,F),h(p,_,F),e(_,N),e(N,k),e(_,g),e(_,$),e($,pe),h(p,G,F),h(p,X,F),e(X,Z),e(X,I),e(I,ee),e(X,he),e(X,D),e(D,me),e(X,le),h(p,V,F),h(p,j,F),e(j,te),h(p,J,F),h(p,z,F),e(z,q),e(q,ne),e(q,W),e(W,de),e(q,se),e(q,H),e(H,ue),e(z,ce),e(z,M),e(M,fe),e(M,Q),e(Q,Y),e(M,oe),e(M,B),e(B,ae),e(z,ge),e(z,O),e(O,_e),e(O,A),e(A,ke)},d(p){p&&t(m),p&&t(f),p&&t(_),p&&t(G),p&&t(X),p&&t(V),p&&t(j),p&&t(J),p&&t(z)}}}function Ny(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function xy(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke;return{c(){m=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),N=a("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),$=a("li"),pe=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Z=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),D=a("code"),me=n("model(inputs)"),le=n("."),V=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),ne=n("a single Tensor with "),W=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),ce=l(),M=a("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),Y=n("model([input_ids, attention_mask])"),oe=n(" or "),B=a("code"),ae=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),ke=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){m=r(p,"P",{});var F=i(m);x=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=d(p),_=r(p,"UL",{});var K=i(_);N=r(K,"LI",{});var be=i(N);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),g=d(K),$=r(K,"LI",{});var re=i($);pe=s(re,"having all inputs as a list, tuple or dict in the first positional arguments."),re.forEach(t),K.forEach(t),G=d(p),X=r(p,"P",{});var P=i(X);Z=s(P,"This second option is useful when using "),I=r(P,"CODE",{});var ve=i(I);ee=s(ve,"tf.keras.Model.fit"),ve.forEach(t),he=s(P,` method which currently requires having all the tensors in the first argument of the model call function: `),D=r(P,"CODE",{});var we=i(D);me=s(we,"model(inputs)"),we.forEach(t),le=s(P,"."),P.forEach(t),V=d(p),j=r(p,"P",{});var ye=i(j);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),J=d(p),z=r(p,"UL",{});var E=i(z);q=r(E,"LI",{});var U=i(q);ne=s(U,"a single Tensor with "),W=r(U,"CODE",{});var Le=i(W);de=s(Le,"input_ids"),Le.forEach(t),se=s(U," only and nothing else: "),H=r(U,"CODE",{});var Te=i(H);ue=s(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(E),M=r(E,"LI",{});var R=i(M);fe=s(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ne=i(Q);Y=s(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),oe=s(R," or "),B=r(R,"CODE",{});var xe=i(B);ae=s(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),R.forEach(t),ge=d(E),O=r(E,"LI",{});var ie=i(O);_e=s(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(ie,"CODE",{});var Fe=i(A);ke=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ie.forEach(t),E.forEach(t)},m(p,F){h(p,m,F),e(m,x),h(p,f,F),h(p,_,F),e(_,N),e(N,k),e(_,g),e(_,$),e($,pe),h(p,G,F),h(p,X,F),e(X,Z),e(X,I),e(I,ee),e(X,he),e(X,D),e(D,me),e(X,le),h(p,V,F),h(p,j,F),e(j,te),h(p,J,F),h(p,z,F),e(z,q),e(q,ne),e(q,W),e(W,de),e(q,se),e(q,H),e(H,ue),e(z,ce),e(z,M),e(M,fe),e(M,Q),e(Q,Y),e(M,oe),e(M,B),e(B,ae),e(z,ge),e(z,O),e(O,_e),e(O,A),e(A,ke)},d(p){p&&t(m),p&&t(f),p&&t(_),p&&t(G),p&&t(X),p&&t(V),p&&t(j),p&&t(J),p&&t(z)}}}function Fy(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function $y(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke;return{c(){m=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),N=a("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),$=a("li"),pe=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Z=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),D=a("code"),me=n("model(inputs)"),le=n("."),V=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),ne=n("a single Tensor with "),W=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),ce=l(),M=a("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),Y=n("model([input_ids, attention_mask])"),oe=n(" or "),B=a("code"),ae=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),ke=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){m=r(p,"P",{});var F=i(m);x=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=d(p),_=r(p,"UL",{});var K=i(_);N=r(K,"LI",{});var be=i(N);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),g=d(K),$=r(K,"LI",{});var re=i($);pe=s(re,"having all inputs as a list, tuple or dict in the first positional arguments."),re.forEach(t),K.forEach(t),G=d(p),X=r(p,"P",{});var P=i(X);Z=s(P,"This second option is useful when using "),I=r(P,"CODE",{});var ve=i(I);ee=s(ve,"tf.keras.Model.fit"),ve.forEach(t),he=s(P,` method which currently requires having all the tensors in the first argument of the model call function: `),D=r(P,"CODE",{});var we=i(D);me=s(we,"model(inputs)"),we.forEach(t),le=s(P,"."),P.forEach(t),V=d(p),j=r(p,"P",{});var ye=i(j);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),J=d(p),z=r(p,"UL",{});var E=i(z);q=r(E,"LI",{});var U=i(q);ne=s(U,"a single Tensor with "),W=r(U,"CODE",{});var Le=i(W);de=s(Le,"input_ids"),Le.forEach(t),se=s(U," only and nothing else: "),H=r(U,"CODE",{});var Te=i(H);ue=s(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(E),M=r(E,"LI",{});var R=i(M);fe=s(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ne=i(Q);Y=s(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),oe=s(R," or "),B=r(R,"CODE",{});var xe=i(B);ae=s(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),R.forEach(t),ge=d(E),O=r(E,"LI",{});var ie=i(O);_e=s(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(ie,"CODE",{});var Fe=i(A);ke=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ie.forEach(t),E.forEach(t)},m(p,F){h(p,m,F),e(m,x),h(p,f,F),h(p,_,F),e(_,N),e(N,k),e(_,g),e(_,$),e($,pe),h(p,G,F),h(p,X,F),e(X,Z),e(X,I),e(I,ee),e(X,he),e(X,D),e(D,me),e(X,le),h(p,V,F),h(p,j,F),e(j,te),h(p,J,F),h(p,z,F),e(z,q),e(q,ne),e(q,W),e(W,de),e(q,se),e(q,H),e(H,ue),e(z,ce),e(z,M),e(M,fe),e(M,Q),e(Q,Y),e(M,oe),e(M,B),e(B,ae),e(z,ge),e(z,O),e(O,_e),e(O,A),e(A,ke)},d(p){p&&t(m),p&&t(f),p&&t(_),p&&t(G),p&&t(X),p&&t(V),p&&t(j),p&&t(J),p&&t(z)}}}function Xy(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function zy(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke;return{c(){m=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),N=a("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),$=a("li"),pe=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Z=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),D=a("code"),me=n("model(inputs)"),le=n("."),V=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),ne=n("a single Tensor with "),W=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),ce=l(),M=a("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),Y=n("model([input_ids, attention_mask])"),oe=n(" or "),B=a("code"),ae=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),ke=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){m=r(p,"P",{});var F=i(m);x=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=d(p),_=r(p,"UL",{});var K=i(_);N=r(K,"LI",{});var be=i(N);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),g=d(K),$=r(K,"LI",{});var re=i($);pe=s(re,"having all inputs as a list, tuple or dict in the first positional arguments."),re.forEach(t),K.forEach(t),G=d(p),X=r(p,"P",{});var P=i(X);Z=s(P,"This second option is useful when using "),I=r(P,"CODE",{});var ve=i(I);ee=s(ve,"tf.keras.Model.fit"),ve.forEach(t),he=s(P,` method which currently requires having all the tensors in the first argument of the model call function: `),D=r(P,"CODE",{});var we=i(D);me=s(we,"model(inputs)"),we.forEach(t),le=s(P,"."),P.forEach(t),V=d(p),j=r(p,"P",{});var ye=i(j);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),J=d(p),z=r(p,"UL",{});var E=i(z);q=r(E,"LI",{});var U=i(q);ne=s(U,"a single Tensor with "),W=r(U,"CODE",{});var Le=i(W);de=s(Le,"input_ids"),Le.forEach(t),se=s(U," only and nothing else: "),H=r(U,"CODE",{});var Te=i(H);ue=s(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(E),M=r(E,"LI",{});var R=i(M);fe=s(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ne=i(Q);Y=s(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),oe=s(R," or "),B=r(R,"CODE",{});var xe=i(B);ae=s(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),R.forEach(t),ge=d(E),O=r(E,"LI",{});var ie=i(O);_e=s(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(ie,"CODE",{});var Fe=i(A);ke=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ie.forEach(t),E.forEach(t)},m(p,F){h(p,m,F),e(m,x),h(p,f,F),h(p,_,F),e(_,N),e(N,k),e(_,g),e(_,$),e($,pe),h(p,G,F),h(p,X,F),e(X,Z),e(X,I),e(I,ee),e(X,he),e(X,D),e(D,me),e(X,le),h(p,V,F),h(p,j,F),e(j,te),h(p,J,F),h(p,z,F),e(z,q),e(q,ne),e(q,W),e(W,de),e(q,se),e(q,H),e(H,ue),e(z,ce),e(z,M),e(M,fe),e(M,Q),e(Q,Y),e(M,oe),e(M,B),e(B,ae),e(z,ge),e(z,O),e(O,_e),e(O,A),e(A,ke)},d(p){p&&t(m),p&&t(f),p&&t(_),p&&t(G),p&&t(X),p&&t(V),p&&t(j),p&&t(J),p&&t(z)}}}function My(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function qy(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke;return{c(){m=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),N=a("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),$=a("li"),pe=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Z=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),D=a("code"),me=n("model(inputs)"),le=n("."),V=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),ne=n("a single Tensor with "),W=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),ce=l(),M=a("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),Y=n("model([input_ids, attention_mask])"),oe=n(" or "),B=a("code"),ae=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),ke=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){m=r(p,"P",{});var F=i(m);x=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=d(p),_=r(p,"UL",{});var K=i(_);N=r(K,"LI",{});var be=i(N);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),g=d(K),$=r(K,"LI",{});var re=i($);pe=s(re,"having all inputs as a list, tuple or dict in the first positional arguments."),re.forEach(t),K.forEach(t),G=d(p),X=r(p,"P",{});var P=i(X);Z=s(P,"This second option is useful when using "),I=r(P,"CODE",{});var ve=i(I);ee=s(ve,"tf.keras.Model.fit"),ve.forEach(t),he=s(P,` method which currently requires having all the tensors in the first argument of the model call function: `),D=r(P,"CODE",{});var we=i(D);me=s(we,"model(inputs)"),we.forEach(t),le=s(P,"."),P.forEach(t),V=d(p),j=r(p,"P",{});var ye=i(j);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),J=d(p),z=r(p,"UL",{});var E=i(z);q=r(E,"LI",{});var U=i(q);ne=s(U,"a single Tensor with "),W=r(U,"CODE",{});var Le=i(W);de=s(Le,"input_ids"),Le.forEach(t),se=s(U," only and nothing else: "),H=r(U,"CODE",{});var Te=i(H);ue=s(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(E),M=r(E,"LI",{});var R=i(M);fe=s(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ne=i(Q);Y=s(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),oe=s(R," or "),B=r(R,"CODE",{});var xe=i(B);ae=s(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),R.forEach(t),ge=d(E),O=r(E,"LI",{});var ie=i(O);_e=s(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(ie,"CODE",{});var Fe=i(A);ke=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ie.forEach(t),E.forEach(t)},m(p,F){h(p,m,F),e(m,x),h(p,f,F),h(p,_,F),e(_,N),e(N,k),e(_,g),e(_,$),e($,pe),h(p,G,F),h(p,X,F),e(X,Z),e(X,I),e(I,ee),e(X,he),e(X,D),e(D,me),e(X,le),h(p,V,F),h(p,j,F),e(j,te),h(p,J,F),h(p,z,F),e(z,q),e(q,ne),e(q,W),e(W,de),e(q,se),e(q,H),e(H,ue),e(z,ce),e(z,M),e(M,fe),e(M,Q),e(Q,Y),e(M,oe),e(M,B),e(B,ae),e(z,ge),e(z,O),e(O,_e),e(O,A),e(A,ke)},d(p){p&&t(m),p&&t(f),p&&t(_),p&&t(G),p&&t(X),p&&t(V),p&&t(j),p&&t(J),p&&t(z)}}}function Ey(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function Cy(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke;return{c(){m=a("p"),x=n("TF 2.0 models accepts two formats as inputs:"),f=l(),_=a("ul"),N=a("li"),k=n("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),$=a("li"),pe=n("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),X=a("p"),Z=n("This second option is useful when using "),I=a("code"),ee=n("tf.keras.Model.fit"),he=n(` method which currently requires having all the tensors in the first argument of the model call function: `),D=a("code"),me=n("model(inputs)"),le=n("."),V=l(),j=a("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),ne=n("a single Tensor with "),W=a("code"),de=n("input_ids"),se=n(" only and nothing else: "),H=a("code"),ue=n("model(inputs_ids)"),ce=l(),M=a("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=a("code"),Y=n("model([input_ids, attention_mask])"),oe=n(" or "),B=a("code"),ae=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),O=a("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),ke=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){m=r(p,"P",{});var F=i(m);x=s(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=d(p),_=r(p,"UL",{});var K=i(_);N=r(K,"LI",{});var be=i(N);k=s(be,"having all inputs as keyword arguments (like PyTorch models), or"),be.forEach(t),g=d(K),$=r(K,"LI",{});var re=i($);pe=s(re,"having all inputs as a list, tuple or dict in the first positional arguments."),re.forEach(t),K.forEach(t),G=d(p),X=r(p,"P",{});var P=i(X);Z=s(P,"This second option is useful when using "),I=r(P,"CODE",{});var ve=i(I);ee=s(ve,"tf.keras.Model.fit"),ve.forEach(t),he=s(P,` method which currently requires having all the tensors in the first argument of the model call function: `),D=r(P,"CODE",{});var we=i(D);me=s(we,"model(inputs)"),we.forEach(t),le=s(P,"."),P.forEach(t),V=d(p),j=r(p,"P",{});var ye=i(j);te=s(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(t),J=d(p),z=r(p,"UL",{});var E=i(z);q=r(E,"LI",{});var U=i(q);ne=s(U,"a single Tensor with "),W=r(U,"CODE",{});var Le=i(W);de=s(Le,"input_ids"),Le.forEach(t),se=s(U," only and nothing else: "),H=r(U,"CODE",{});var Te=i(H);ue=s(Te,"model(inputs_ids)"),Te.forEach(t),U.forEach(t),ce=d(E),M=r(E,"LI",{});var R=i(M);fe=s(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ne=i(Q);Y=s(Ne,"model([input_ids, attention_mask])"),Ne.forEach(t),oe=s(R," or "),B=r(R,"CODE",{});var xe=i(B);ae=s(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),R.forEach(t),ge=d(E),O=r(E,"LI",{});var ie=i(O);_e=s(ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(ie,"CODE",{});var Fe=i(A);ke=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ie.forEach(t),E.forEach(t)},m(p,F){h(p,m,F),e(m,x),h(p,f,F),h(p,_,F),e(_,N),e(N,k),e(_,g),e(_,$),e($,pe),h(p,G,F),h(p,X,F),e(X,Z),e(X,I),e(I,ee),e(X,he),e(X,D),e(D,me),e(X,le),h(p,V,F),h(p,j,F),e(j,te),h(p,J,F),h(p,z,F),e(z,q),e(q,ne),e(q,W),e(W,de),e(q,se),e(q,H),e(H,ue),e(z,ce),e(z,M),e(M,fe),e(M,Q),e(Q,Y),e(M,oe),e(M,B),e(B,ae),e(z,ge),e(z,O),e(O,_e),e(O,A),e(A,ke)},d(p){p&&t(m),p&&t(f),p&&t(_),p&&t(G),p&&t(X),p&&t(V),p&&t(j),p&&t(J),p&&t(z)}}}function jy(S){let m,x,f,_,N;return{c(){m=a("p"),x=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),_=n("Module"),N=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=r(k,"P",{});var g=i(m);x=s(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(g,"CODE",{});var $=i(f);_=s($,"Module"),$.forEach(t),N=s(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(k,g){h(k,m,g),e(m,x),e(m,f),e(f,_),e(m,N)},d(k){k&&t(m)}}}function Oy(S){let m,x,f,_,N,k,g,$,pe,G,X,Z,I,ee,he,D,me,le,V,j,te,J,z,q,ne,W,de,se,H,ue,ce,M,fe,Q,Y,oe,B,ae,ge,O,_e,A,ke,p,F,K,be,re,P,ve,we,ye,E,U,Le,Te,R,Ne,xe,ie,Fe,Ad,vt,eh,Xn,th,oh,zn,nh,sh,Sd,Ht,Do,Ci,Mn,ah,ji,rh,Id,De,qn,ih,_t,lh,gr,dh,ch,_r,ph,hh,En,mh,uh,fh,Qt,gh,kr,_h,kh,vr,vh,Th,bh,Oi,wh,yh,Cn,Dd,Wt,Ho,Pi,jn,Lh,Ai,Nh,Hd,$e,On,xh,Pn,Fh,An,$h,Xh,zh,Sn,Mh,Tr,qh,Eh,Ch,Bt,jh,Si,Oh,Ph,Ii,Ah,Sh,Ih,Tt,In,Dh,Di,Hh,Qh,Dn,br,Wh,Hi,Bh,Uh,wr,Rh,Qi,Vh,Yh,Qo,Hn,Gh,Qn,Jh,Wi,Kh,Zh,em,lt,Wn,tm,Bi,om,nm,Bn,sm,Ut,am,Ui,rm,im,Ri,lm,dm,cm,Vi,Qd,Rt,Wo,Yi,Un,pm,Gi,hm,Wd,qe,Rn,mm,Vt,um,Ji,fm,gm,Vn,_m,km,vm,Yn,Tm,yr,bm,wm,ym,Yt,Lm,Ki,Nm,xm,Zi,Fm,$m,Xm,bt,Gn,zm,el,Mm,qm,Jn,Lr,Em,tl,Cm,jm,Nr,Om,ol,Pm,Am,dt,Kn,Sm,nl,Im,Dm,Zn,Hm,Gt,Qm,sl,Wm,Bm,al,Um,Rm,Bd,Jt,Bo,rl,es,Vm,il,Ym,Ud,Kt,ts,Gm,os,Jm,xr,Km,Zm,Rd,Zt,ns,eu,ss,tu,Fr,ou,nu,Vd,eo,as,su,rs,au,$r,ru,iu,Yd,to,is,lu,ls,du,Xr,cu,pu,Gd,oo,ds,hu,cs,mu,ll,uu,fu,Jd,no,ps,gu,hs,_u,zr,ku,vu,Kd,so,ms,Tu,us,bu,Mr,wu,yu,Zd,ao,fs,Lu,gs,Nu,qr,xu,Fu,ec,ro,_s,$u,ks,Xu,Er,zu,Mu,tc,io,vs,qu,Ts,Eu,Cr,Cu,ju,oc,lo,bs,Ou,ws,Pu,jr,Au,Su,nc,co,ys,Iu,Ls,Du,dl,Hu,Qu,sc,po,Ns,Wu,xs,Bu,Or,Uu,Ru,ac,ho,Uo,cl,Fs,Vu,pl,Yu,rc,He,$s,Gu,hl,Ju,Ku,Xs,Zu,Pr,ef,tf,of,zs,nf,Ms,sf,af,rf,Ye,qs,lf,mo,df,Ar,cf,pf,ml,hf,mf,uf,Ro,ff,ul,gf,_f,Es,ic,uo,Vo,fl,Cs,kf,gl,vf,lc,Qe,js,Tf,_l,bf,wf,Os,yf,Sr,Lf,Nf,xf,Ps,Ff,As,$f,Xf,zf,Ge,Ss,Mf,fo,qf,Ir,Ef,Cf,kl,jf,Of,Pf,Yo,Af,vl,Sf,If,Is,dc,go,Go,Tl,Ds,Df,bl,Hf,cc,We,Hs,Qf,wl,Wf,Bf,Qs,Uf,Dr,Rf,Vf,Yf,Ws,Gf,Bs,Jf,Kf,Zf,Me,Us,eg,_o,tg,Hr,og,ng,yl,sg,ag,rg,Jo,ig,Ll,lg,dg,Rs,cg,Nl,pg,hg,Vs,pc,ko,Ko,xl,Ys,mg,Fl,ug,hc,Be,Gs,fg,$l,gg,_g,Js,kg,Qr,vg,Tg,bg,Ks,wg,Zs,yg,Lg,Ng,Je,ea,xg,vo,Fg,Wr,$g,Xg,Xl,zg,Mg,qg,Zo,Eg,zl,Cg,jg,ta,mc,To,en,Ml,oa,Og,ql,Pg,uc,Ue,na,Ag,El,Sg,Ig,sa,Dg,Br,Hg,Qg,Wg,aa,Bg,ra,Ug,Rg,Vg,Ke,ia,Yg,bo,Gg,Ur,Jg,Kg,Cl,Zg,e_,t_,tn,o_,jl,n_,s_,la,fc,wo,on,Ol,da,a_,Pl,r_,gc,Re,ca,i_,yo,l_,Al,d_,c_,Sl,p_,h_,m_,pa,u_,Rr,f_,g_,__,ha,k_,ma,v_,T_,b_,Ze,ua,w_,Lo,y_,Vr,L_,N_,Il,x_,F_,$_,nn,X_,Dl,z_,M_,fa,_c,No,sn,Hl,ga,q_,Ql,E_,kc,Ve,_a,C_,xo,j_,Wl,O_,P_,Bl,A_,S_,I_,ka,D_,Yr,H_,Q_,W_,va,B_,Ta,U_,R_,V_,et,ba,Y_,Fo,G_,Gr,J_,K_,Ul,Z_,ek,tk,an,ok,Rl,nk,sk,wa,vc,$o,rn,Vl,ya,ak,Yl,rk,Tc,Ee,La,ik,Gl,lk,dk,Na,ck,Jr,pk,hk,mk,xa,uk,Fa,fk,gk,_k,ln,kk,tt,$a,vk,Xo,Tk,Kr,bk,wk,Jl,yk,Lk,Nk,dn,xk,Kl,Fk,$k,Xa,bc,zo,cn,Zl,za,Xk,ed,zk,wc,Ce,Ma,Mk,td,qk,Ek,qa,Ck,Zr,jk,Ok,Pk,Ea,Ak,Ca,Sk,Ik,Dk,pn,Hk,ot,ja,Qk,Mo,Wk,ei,Bk,Uk,od,Rk,Vk,Yk,hn,Gk,nd,Jk,Kk,Oa,yc,qo,mn,sd,Pa,Zk,ad,ev,Lc,je,Aa,tv,rd,ov,nv,Sa,sv,ti,av,rv,iv,Ia,lv,Da,dv,cv,pv,un,hv,nt,Ha,mv,Eo,uv,oi,fv,gv,id,_v,kv,vv,fn,Tv,ld,bv,wv,Qa,Nc,Co,gn,dd,Wa,yv,cd,Lv,xc,Oe,Ba,Nv,pd,xv,Fv,Ua,$v,ni,Xv,zv,Mv,Ra,qv,Va,Ev,Cv,jv,_n,Ov,st,Ya,Pv,jo,Av,si,Sv,Iv,hd,Dv,Hv,Qv,kn,Wv,md,Bv,Uv,Ga,Fc,Oo,vn,ud,Ja,Rv,fd,Vv,$c,Pe,Ka,Yv,gd,Gv,Jv,Za,Kv,ai,Zv,eT,tT,er,oT,tr,nT,sT,aT,Tn,rT,at,or,iT,Po,lT,ri,dT,cT,_d,pT,hT,mT,bn,uT,kd,fT,gT,nr,Xc,Ao,wn,vd,sr,_T,Td,kT,zc,Ae,ar,vT,So,TT,bd,bT,wT,wd,yT,LT,NT,rr,xT,ii,FT,$T,XT,ir,zT,lr,MT,qT,ET,yn,CT,rt,dr,jT,Io,OT,li,PT,AT,yd,ST,IT,DT,Ln,HT,Ld,QT,WT,cr,Mc;return k=new ze({}),ee=new ze({}),Mn=new ze({}),qn=new C({props:{name:"class transformers.XLNetConfig",anchor:"transformers.XLNetConfig",parameters:[{name:"vocab_size",val:" = 32000"},{name:"d_model",val:" = 1024"},{name:"n_layer",val:" = 24"},{name:"n_head",val:" = 16"},{name:"d_inner",val:" = 4096"},{name:"ff_activation",val:" = 'gelu'"},{name:"untie_r",val:" = True"},{name:"attn_type",val:" = 'bi'"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"dropout",val:" = 0.1"},{name:"mem_len",val:" = 512"},{name:"reuse_len",val:" = None"},{name:"use_mems_eval",val:" = True"},{name:"use_mems_train",val:" = False"},{name:"bi_data",val:" = False"},{name:"clamp_len",val:" = -1"},{name:"same_length",val:" = False"},{name:"summary_type",val:" = 'last'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = 'tanh'"},{name:"summary_last_dropout",val:" = 0.1"},{name:"start_n_top",val:" = 5"},{name:"end_n_top",val:" = 5"},{name:"pad_token_id",val:" = 5"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/configuration_xlnet.py#L32",parametersDescription:[{anchor:"transformers.XLNetConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32000) &#x2014; Vocabulary size of the XLNet model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetModel">XLNetModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetModel">TFXLNetModel</a>.`,name:"vocab_size"},{anchor:"transformers.XLNetConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.XLNetConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.XLNetConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.XLNetConfig.d_inner",description:`<strong>d_inner</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"d_inner"},{anchor:"transformers.XLNetConfig.ff_activation",description:`<strong>ff_activation</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"ff_activation"},{anchor:"transformers.XLNetConfig.untie_r",description:`<strong>untie_r</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to untie relative position biases`,name:"untie_r"},{anchor:"transformers.XLNetConfig.attn_type",description:`<strong>attn_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;bi&quot;</code>) &#x2014; The attention type used by the model. Set <code>&quot;bi&quot;</code> for XLNet, <code>&quot;uni&quot;</code> for Transformer-XL.`,name:"attn_type"},{anchor:"transformers.XLNetConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.XLNetConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.XLNetConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.XLNetConfig.mem_len",description:`<strong>mem_len</strong> (<code>int</code> or <code>None</code>, <em>optional</em>) &#x2014; The number of tokens to cache. The key/value pairs that have already been pre-computed in a previous forward pass won&#x2019;t be re-computed. See the <a href="https://huggingface.co/transformers/quickstart.html#using-the-past" rel="nofollow">quickstart</a> for more information.`,name:"mem_len"},{anchor:"transformers.XLNetConfig.reuse_len",description:`<strong>reuse_len</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of tokens in the current batch to be cached and reused in the future.`,name:"reuse_len"},{anchor:"transformers.XLNetConfig.bi_data",description:`<strong>bi_data</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use bidirectional input pipeline. Usually set to <code>True</code> during pretraining and <code>False</code> during finetuning.`,name:"bi_data"},{anchor:"transformers.XLNetConfig.clamp_len",description:`<strong>clamp_len</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Clamp all relative distances larger than clamp_len. Setting this attribute to -1 means no clamping.`,name:"clamp_len"},{anchor:"transformers.XLNetConfig.same_length",description:`<strong>same_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the same attention length for each token.`,name:"same_length"},{anchor:"transformers.XLNetConfig.summary_type",description:`<strong>summary_type</strong> (<code>str</code>, <em>optional</em>, defaults to &#x201C;last&#x201D;) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Has to be one of the following options:</p> <ul> <li><code>&quot;last&quot;</code>: Take the last token hidden state (like XLNet).</li> <li><code>&quot;first&quot;</code>: Take the first token hidden state (like BERT).</li> <li><code>&quot;mean&quot;</code>: Take the mean of all tokens hidden states.</li> <li><code>&quot;cls_index&quot;</code>: Supply a Tensor of classification token position (like GPT/GPT-2).</li> <li><code>&quot;attn&quot;</code>: Not implemented now, use multi-head attention.</li> </ul>`,name:"summary_type"},{anchor:"transformers.XLNetConfig.summary_use_proj",description:`<strong>summary_use_proj</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Whether or not to add a projection after the vector extraction.`,name:"summary_use_proj"},{anchor:"transformers.XLNetConfig.summary_activation",description:`<strong>summary_activation</strong> (<code>str</code>, <em>optional</em>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Pass <code>&quot;tanh&quot;</code> for a tanh activation to the output, any other value will result in no activation.`,name:"summary_activation"},{anchor:"transformers.XLNetConfig.summary_proj_to_labels",description:`<strong>summary_proj_to_labels</strong> (<code>boo</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Used in the sequence classification and multiple choice models.</p> <p>Whether the projection outputs should have <code>config.num_labels</code> or <code>config.hidden_size</code> classes.`,name:"summary_proj_to_labels"},{anchor:"transformers.XLNetConfig.summary_last_dropout",description:`<strong>summary_last_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Used in the sequence classification and multiple choice models.</p> <p>The dropout ratio to be used after the projection and activation.`,name:"summary_last_dropout"},{anchor:"transformers.XLNetConfig.start_n_top",description:`<strong>start_n_top</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Used in the SQuAD evaluation script.`,name:"start_n_top"},{anchor:"transformers.XLNetConfig.end_n_top",description:`<strong>end_n_top</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Used in the SQuAD evaluation script.`,name:"end_n_top"},{anchor:"transformers.XLNetConfig.use_mems_eval",description:`<strong>use_mems_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should make use of the recurrent memory mechanism in evaluation mode.`,name:"use_mems_eval"},{anchor:"transformers.XLNetConfig.use_mems_train",description:`<strong>use_mems_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should make use of the recurrent memory mechanism in train mode.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>For pretraining, it is recommended to set <code>use_mems_train</code> to <code>True</code>. For fine-tuning, it is recommended to set <code>use_mems_train</code> to <code>False</code> as discussed <a href="https://github.com/zihangdai/xlnet/issues/41#issuecomment-505102587" rel="nofollow">here</a>. If <code>use_mems_train</code> is set to <code>True</code>, one has to make sure that the train batches are correctly pre-processed, <em>e.g.</em> <code>batch_1 = [[This line is], [This is the]]</code> and <code>batch_2 = [[ the first line], [ second line]]</code> and that all batches are of equal size.</p> </div>`,name:"use_mems_train"}]}}),Cn=new Se({props:{code:`from transformers import XLNetConfig, XLNetModel # Initializing a XLNet configuration configuration = XLNetConfig() # Initializing a model from the configuration model = XLNetModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetConfig, XLNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a XLNet configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = XLNetConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),jn=new ze({}),On=new C({props:{name:"class transformers.XLNetTokenizer",anchor:"transformers.XLNetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = False"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '<sep>'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '<cls>'"},{name:"mask_token",val:" = '<mask>'"},{name:"additional_special_tokens",val:" = ['<eop>', '<eod>']"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet.py#L54",parametersDescription:[{anchor:"transformers.XLNetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.XLNetTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.XLNetTokenizer.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.XLNetTokenizer.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.XLNetTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.XLNetTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.XLNetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.XLNetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;sep&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.XLNetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.XLNetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;cls&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.XLNetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.XLNetTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;eop&gt;&quot;, &quot;&lt;eod&gt;&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.XLNetTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),In=new C({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.XLNetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet.py#L252",parametersDescription:[{anchor:"transformers.XLNetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.XLNetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Hn=new C({props:{name:"get_special_tokens_mask",anchor:"transformers.XLNetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet.py#L277",parametersDescription:[{anchor:"transformers.XLNetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLNetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.XLNetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Wn=new C({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.XLNetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet.py#L305",parametersDescription:[{anchor:"transformers.XLNetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLNetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Bn=new Se({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Un=new ze({}),Rn=new C({props:{name:"class transformers.XLNetTokenizerFast",anchor:"transformers.XLNetTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = False"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '<sep>'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '<cls>'"},{name:"mask_token",val:" = '<mask>'"},{name:"additional_special_tokens",val:" = ['<eop>', '<eod>']"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet_fast.py#L64",parametersDescription:[{anchor:"transformers.XLNetTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.XLNetTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.XLNetTokenizerFast.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.XLNetTokenizerFast.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.XLNetTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.XLNetTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.XLNetTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.XLNetTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;sep&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.XLNetTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.XLNetTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;cls&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.XLNetTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.XLNetTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;eop&gt;&quot;, &quot;&lt;eod&gt;&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),Gn=new C({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.XLNetTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet_fast.py#L174",parametersDescription:[{anchor:"transformers.XLNetTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.XLNetTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Kn=new C({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.XLNetTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/tokenization_xlnet_fast.py#L199",parametersDescription:[{anchor:"transformers.XLNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Zn=new Se({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),es=new ze({}),ts=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetModelOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L586",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, hidden_size)</code>) &#x2014; Sequence of hidden-states at the last layer of the model.</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.`,name:"last_hidden_state"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetModelOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ns=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L619",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),as=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L655",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),is=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L721",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ds=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L688",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ps=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_logits",val:": FloatTensor = None"},{name:"end_logits",val:": FloatTensor = None"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L756",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput.start_logits",description:`<strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length,)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput.end_logits",description:`<strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length,)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ms=new C({props:{name:"class transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput",anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"end_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_logits",val:": typing.Optional[torch.FloatTensor] = None"},{name:"mems",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L792",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.start_top_log_probs",description:`<strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_log_probs"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.start_top_index",description:`<strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_index"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.end_top_log_probs",description:`<strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_log_probs"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.end_top_index",description:`<strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_index"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.cls_logits",description:`<strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the <code>is_impossible</code> label of the answers.`,name:"cls_logits"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),fs=new C({props:{name:"class transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput",anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"mems",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L827",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_predict, hidden_size)</code>) &#x2014; Sequence of hidden-states at the last layer of the model.</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.`,name:"last_hidden_state"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),_s=new C({props:{name:"class transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput",anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"mems",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L860",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_predict, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),vs=new C({props:{name:"class transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput",anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"mems",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L896",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),bs=new C({props:{name:"class transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput",anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"mems",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L962",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ys=new C({props:{name:"class transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput",anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"mems",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L929",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ns=new C({props:{name:"class transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput",anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"start_logits",val:": Tensor = None"},{name:"end_logits",val:": Tensor = None"},{name:"mems",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L997",parametersDescription:[{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput.start_logits",description:`<strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length,)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput.end_logits",description:`<strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length,)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput.mems",description:`<strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"mems"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Fs=new ze({}),$s=new C({props:{name:"class transformers.XLNetModel",anchor:"transformers.XLNetModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L932",parametersDescription:[{anchor:"transformers.XLNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qs=new C({props:{name:"forward",anchor:"transformers.XLNetModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1063",parametersDescription:[{anchor:"transformers.XLNetModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetModel.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetModel.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetModel.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetModel.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetModelOutput" >transformers.models.xlnet.modeling_xlnet.XLNetModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, hidden_size)</code>) \u2014 Sequence of hidden-states at the last layer of the model.</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetModelOutput" >transformers.models.xlnet.modeling_xlnet.XLNetModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ro=new Xe({props:{$$slots:{default:[_y]},$$scope:{ctx:S}}}),Es=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetModel import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetModel.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetModel.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Cs=new ze({}),js=new C({props:{name:"class transformers.XLNetLMHeadModel",anchor:"transformers.XLNetLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1298",parametersDescription:[{anchor:"transformers.XLNetLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ss=new C({props:{name:"forward",anchor:"transformers.XLNetLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1358",parametersDescription:[{anchor:"transformers.XLNetLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetLMHeadModel.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetLMHeadModel.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetLMHeadModel.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetLMHeadModel.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLNetLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_predict)</code>, <em>optional</em>) &#x2014; Labels for masked language modeling. <code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is :obj<em>None</em>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.</p> <p>The labels should correspond to the masked input words that should be predicted and depends on <code>target_mapping</code>. Note in order to perform standard auto-regressive language modeling a <em><mask></mask></em> token has to be added to the <code>input_ids</code> (see the <code>prepare_inputs_for_generation</code> function and examples below)</p> <p>Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored, the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput" >transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput" >transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Yo=new Xe({props:{$$slots:{default:[ky]},$$scope:{ctx:S}}}),Is=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetLMHeadModel import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased') # We show how to setup inputs to predict a next token using a bi-directional context. input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling. input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0) assert labels.shape[0] == 1, 'only one word will be predicted' perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) loss = outputs.loss next_token_logits = outputs.logits # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-large-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;xlnet-large-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># We show how to setup inputs to predict a next token using a bi-directional context.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is very &lt;mask&gt;&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># We will predict the masked token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>perm_mask = torch.zeros((<span class="hljs-number">1</span>, input_ids.shape[<span class="hljs-number">1</span>], input_ids.shape[<span class="hljs-number">1</span>]), dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>perm_mask[:, :, -<span class="hljs-number">1</span>] = <span class="hljs-number">1.0</span> <span class="hljs-comment"># Previous tokens don&#x27;t see last token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_mapping = torch.zeros((<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, input_ids.shape[<span class="hljs-number">1</span>]), dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># Shape [1, 1, seq_length] =&gt; let&#x27;s predict one token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_mapping[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, -<span class="hljs-number">1</span>] = <span class="hljs-number">1.0</span> <span class="hljs-comment"># Our first (and only) prediction will be the last token of the sequence (the masked token)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs[<span class="hljs-number">0</span>] <span class="hljs-comment"># Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is very &lt;mask&gt;&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># We will predict the masked token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;cute&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> labels.shape[<span class="hljs-number">0</span>] == <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;only one word will be predicted&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>perm_mask = torch.zeros((<span class="hljs-number">1</span>, input_ids.shape[<span class="hljs-number">1</span>], input_ids.shape[<span class="hljs-number">1</span>]), dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>perm_mask[:, :, -<span class="hljs-number">1</span>] = <span class="hljs-number">1.0</span> <span class="hljs-comment"># Previous tokens don&#x27;t see last token as is done in standard auto-regressive lm training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_mapping = torch.zeros((<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, input_ids.shape[<span class="hljs-number">1</span>]), dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># Shape [1, 1, seq_length] =&gt; let&#x27;s predict one token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_mapping[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, -<span class="hljs-number">1</span>] = <span class="hljs-number">1.0</span> <span class="hljs-comment"># Our first (and only) prediction will be the last token of the sequence (the masked token)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs.logits <span class="hljs-comment"># Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]</span>`}}),Ds=new ze({}),Hs=new C({props:{name:"class transformers.XLNetForSequenceClassification",anchor:"transformers.XLNetForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1481",parametersDescription:[{anchor:"transformers.XLNetForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Us=new C({props:{name:"forward",anchor:"transformers.XLNetForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1494",parametersDescription:[{anchor:"transformers.XLNetForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetForSequenceClassification.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetForSequenceClassification.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetForSequenceClassification.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetForSequenceClassification.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLNetForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Jo=new Xe({props:{$$slots:{default:[vy]},$$scope:{ctx:S}}}),Rs=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetForSequenceClassification import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForSequenceClassification.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Vs=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetForSequenceClassification import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForSequenceClassification.from_pretrained('xlnet-base-cased', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ys=new ze({}),Gs=new C({props:{name:"class transformers.XLNetForMultipleChoice",anchor:"transformers.XLNetForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1687",parametersDescription:[{anchor:"transformers.XLNetForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ea=new C({props:{name:"forward",anchor:"transformers.XLNetForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1698",parametersDescription:[{anchor:"transformers.XLNetForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetForMultipleChoice.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetForMultipleChoice.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetForMultipleChoice.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetForMultipleChoice.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, num_choices, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLNetForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Zo=new Xe({props:{$$slots:{default:[Ty]},$$scope:{ctx:S}}}),ta=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetForMultipleChoice import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),oa=new ze({}),na=new C({props:{name:"class transformers.XLNetForTokenClassification",anchor:"transformers.XLNetForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1590",parametersDescription:[{anchor:"transformers.XLNetForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ia=new C({props:{name:"forward",anchor:"transformers.XLNetForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1601",parametersDescription:[{anchor:"transformers.XLNetForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetForTokenClassification.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetForTokenClassification.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetForTokenClassification.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetForTokenClassification.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLNetForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <em>num_choices</em> is the size of the second dimension of the input tensors. (see <em>input_ids</em> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tn=new Xe({props:{$$slots:{default:[by]},$$scope:{ctx:S}}}),la=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetForTokenClassification import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForTokenClassification.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),da=new ze({}),ca=new C({props:{name:"class transformers.XLNetForQuestionAnsweringSimple",anchor:"transformers.XLNetForQuestionAnsweringSimple",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1790",parametersDescription:[{anchor:"transformers.XLNetForQuestionAnsweringSimple.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ua=new C({props:{name:"forward",anchor:"transformers.XLNetForQuestionAnsweringSimple.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1801",parametersDescription:[{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.XLNetForQuestionAnsweringSimple.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length,)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length,)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nn=new Xe({props:{$$slots:{default:[wy]},$$scope:{ctx:S}}}),fa=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetForQuestionAnsweringSimple import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetForQuestionAnsweringSimple <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetForQuestionAnsweringSimple.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),ga=new ze({}),_a=new C({props:{name:"class transformers.XLNetForQuestionAnswering",anchor:"transformers.XLNetForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1901",parametersDescription:[{anchor:"transformers.XLNetForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ba=new C({props:{name:"forward",anchor:"transformers.XLNetForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"is_impossible",val:" = None"},{name:"cls_index",val:" = None"},{name:"p_mask",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_xlnet.py#L1915",parametersDescription:[{anchor:"transformers.XLNetForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLNetForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLNetForQuestionAnswering.forward.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.XLNetForQuestionAnswering.forward.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.XLNetForQuestionAnswering.forward.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.XLNetForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLNetForQuestionAnswering.forward.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.XLNetForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLNetForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLNetForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLNetForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLNetForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLNetForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.XLNetForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"},{anchor:"transformers.XLNetForQuestionAnswering.forward.is_impossible",description:`<strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels whether a question has an answer or no answer (SQuAD 2.0)`,name:"is_impossible"},{anchor:"transformers.XLNetForQuestionAnswering.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the classification token to use as input for computing plausibility of the answer.`,name:"cls_index"},{anchor:"transformers.XLNetForQuestionAnswering.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Optional mask of tokens which can&#x2019;t be in answers (e.g. [CLS], [PAD], &#x2026;). 1.0 means token should be masked. 0.0 mean token is not masked.`,name:"p_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</p> </li> <li> <p><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top config.start_n_top start token possibilities (beam-search).</p> </li> <li> <p><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top config.start_n_top start token possibilities (beam-search).</p> </li> <li> <p><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</p> </li> <li> <p><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</p> </li> <li> <p><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the <code>is_impossible</code> label of the answers.</p> </li> <li> <p><strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput" >transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),an=new Xe({props:{$$slots:{default:[yy]},$$scope:{ctx:S}}}),wa=new Se({props:{code:`from transformers import XLNetTokenizer, XLNetForQuestionAnswering import torch tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, XLNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),ya=new ze({}),La=new C({props:{name:"class transformers.TFXLNetModel",anchor:"transformers.TFXLNetModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1147",parametersDescription:[{anchor:"transformers.TFXLNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ln=new Xe({props:{$$slots:{default:[Ly]},$$scope:{ctx:S}}}),$a=new C({props:{name:"call",anchor:"transformers.TFXLNetModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1152",parametersDescription:[{anchor:"transformers.TFXLNetModel.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLNetModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLNetModel.call.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.TFXLNetModel.call.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.TFXLNetModel.call.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.TFXLNetModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLNetModel.call.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.TFXLNetModel.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLNetModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLNetModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFXLNetModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFXLNetModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_predict, hidden_size)</code>) \u2014 Sequence of hidden-states at the last layer of the model.</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),dn=new Xe({props:{$$slots:{default:[Ny]},$$scope:{ctx:S}}}),Xa=new Se({props:{code:`from transformers import XLNetTokenizer, TFXLNetModel import tensorflow as tf tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = TFXLNetModel.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, TFXLNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLNetModel.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),za=new ze({}),Ma=new C({props:{name:"class transformers.TFXLNetLMHeadModel",anchor:"transformers.TFXLNetLMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1231",parametersDescription:[{anchor:"transformers.TFXLNetLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),pn=new Xe({props:{$$slots:{default:[xy]},$$scope:{ctx:S}}}),ja=new C({props:{name:"call",anchor:"transformers.TFXLNetLMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1284",parametersDescription:[{anchor:"transformers.TFXLNetLMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLNetLMHeadModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLNetLMHeadModel.call.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.TFXLNetLMHeadModel.call.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.TFXLNetLMHeadModel.call.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.TFXLNetLMHeadModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLNetLMHeadModel.call.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.TFXLNetLMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLNetLMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLNetLMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFXLNetLMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFXLNetLMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFXLNetLMHeadModel.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_predict, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> <p><code>num_predict</code> corresponds to <code>target_mapping.shape[1]</code>. If <code>target_mapping</code> is <code>None</code>, then <code>num_predict</code> corresponds to <code>sequence_length</code>.</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),hn=new Xe({props:{$$slots:{default:[Fy]},$$scope:{ctx:S}}}),Oa=new Se({props:{code:`import tensorflow as tf import numpy as np from transformers import XLNetTokenizer, TFXLNetLMHeadModel tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased') # We show how to setup inputs to predict a next token using a bi-directional context. input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=True))[None, :] # We will predict the masked token perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1])) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token target_mapping = np.zeros((1, 1, input_ids.shape[1])) # Shape [1, 1, seq_length] => let's predict one token target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) outputs = model(input_ids, perm_mask=tf.constant(perm_mask, dtype=tf.float32), target_mapping=tf.constant(target_mapping, dtype=tf.float32)) next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, TFXLNetLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-large-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLNetLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;xlnet-large-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># We show how to setup inputs to predict a next token using a bi-directional context.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tf.constant(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is very &lt;mask&gt;&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>))[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># We will predict the masked token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>perm_mask = np.zeros((<span class="hljs-number">1</span>, input_ids.shape[<span class="hljs-number">1</span>], input_ids.shape[<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>perm_mask[:, :, -<span class="hljs-number">1</span>] = <span class="hljs-number">1.0</span> <span class="hljs-comment"># Previous tokens don&#x27;t see last token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_mapping = np.zeros((<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, input_ids.shape[<span class="hljs-number">1</span>])) <span class="hljs-comment"># Shape [1, 1, seq_length] =&gt; let&#x27;s predict one token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_mapping[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, -<span class="hljs-number">1</span>] = <span class="hljs-number">1.0</span> <span class="hljs-comment"># Our first (and only) prediction will be the last token of the sequence (the masked token)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, perm_mask=tf.constant(perm_mask, dtype=tf.float32), target_mapping=tf.constant(target_mapping, dtype=tf.float32)) <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs[<span class="hljs-number">0</span>] <span class="hljs-comment"># Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]</span>`}}),Pa=new ze({}),Aa=new C({props:{name:"class transformers.TFXLNetForSequenceClassification",anchor:"transformers.TFXLNetForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1407",parametersDescription:[{anchor:"transformers.TFXLNetForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),un=new Xe({props:{$$slots:{default:[$y]},$$scope:{ctx:S}}}),Ha=new C({props:{name:"call",anchor:"transformers.TFXLNetForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1420",parametersDescription:[{anchor:"transformers.TFXLNetForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLNetForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLNetForSequenceClassification.call.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.TFXLNetForSequenceClassification.call.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.TFXLNetForSequenceClassification.call.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.TFXLNetForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLNetForSequenceClassification.call.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.TFXLNetForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLNetForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLNetForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFXLNetForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFXLNetForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFXLNetForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),fn=new Xe({props:{$$slots:{default:[Xy]},$$scope:{ctx:S}}}),Qa=new Se({props:{code:`from transformers import XLNetTokenizer, TFXLNetForSequenceClassification import tensorflow as tf tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = TFXLNetForSequenceClassification.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, TFXLNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLNetForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Wa=new ze({}),Ba=new C({props:{name:"class transformers.TFXLNetForMultipleChoice",anchor:"transformers.TFXLNetForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1523",parametersDescription:[{anchor:"transformers.TFXLNetForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),_n=new Xe({props:{$$slots:{default:[zy]},$$scope:{ctx:S}}}),Ya=new C({props:{name:"call",anchor:"transformers.TFXLNetForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1545",parametersDescription:[{anchor:"transformers.TFXLNetForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLNetForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLNetForMultipleChoice.call.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.TFXLNetForMultipleChoice.call.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.TFXLNetForMultipleChoice.call.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.TFXLNetForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLNetForMultipleChoice.call.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, num_choices, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.TFXLNetForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLNetForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLNetForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFXLNetForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFXLNetForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFXLNetForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),kn=new Xe({props:{$$slots:{default:[My]},$$scope:{ctx:S}}}),Ga=new Se({props:{code:`from transformers import XLNetTokenizer, TFXLNetForMultipleChoice import tensorflow as tf tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = TFXLNetForMultipleChoice.from_pretrained('xlnet-base-cased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, TFXLNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLNetForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ja=new ze({}),Ka=new C({props:{name:"class transformers.TFXLNetForTokenClassification",anchor:"transformers.TFXLNetForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1683",parametersDescription:[{anchor:"transformers.TFXLNetForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tn=new Xe({props:{$$slots:{default:[qy]},$$scope:{ctx:S}}}),or=new C({props:{name:"call",anchor:"transformers.TFXLNetForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1693",parametersDescription:[{anchor:"transformers.TFXLNetForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLNetForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLNetForTokenClassification.call.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.TFXLNetForTokenClassification.call.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.TFXLNetForTokenClassification.call.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.TFXLNetForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLNetForTokenClassification.call.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.TFXLNetForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLNetForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLNetForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFXLNetForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFXLNetForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFXLNetForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),bn=new Xe({props:{$$slots:{default:[Ey]},$$scope:{ctx:S}}}),nr=new Se({props:{code:`from transformers import XLNetTokenizer, TFXLNetForTokenClassification import tensorflow as tf tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = TFXLNetForTokenClassification.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, TFXLNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLNetForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sr=new ze({}),ar=new C({props:{name:"class transformers.TFXLNetForQuestionAnsweringSimple",anchor:"transformers.TFXLNetForQuestionAnsweringSimple",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1791",parametersDescription:[{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig">XLNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yn=new Xe({props:{$$slots:{default:[Cy]},$$scope:{ctx:S}}}),dr=new C({props:{name:"call",anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"mems",val:" = None"},{name:"perm_mask",val:" = None"},{name:"target_mapping",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"input_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_mems",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlnet/modeling_tf_xlnet.py#L1799",parametersDescription:[{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.mems",description:`<strong>mems</strong> (<code>List[torch.FloatTensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (see <code>mems</code> output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> <p><code>use_mems</code> has to be set to <code>True</code> to make use of <code>mems</code>.`,name:"mems"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.perm_mask",description:`<strong>perm_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the attention pattern for each input token with values selected in <code>[0, 1]</code>:</p> <ul> <li>if <code>perm_mask[k, i, j] = 0</code>, i attend to j in batch k;</li> <li>if <code>perm_mask[k, i, j] = 1</code>, i does not attend to j in batch k.</li> </ul> <p>If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation).`,name:"perm_mask"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.target_mapping",description:`<strong>target_mapping</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_predict, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to indicate the output tokens to use. If <code>target_mapping[k, i, j] = 1</code>, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation).`,name:"target_mapping"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.input_mask",description:`<strong>input_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Negative of <code>attention_mask</code>, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base.</p> <p>Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>masked</strong>,</li> <li>0 for tokens that are <strong>not masked</strong>.</li> </ul> <p>You can only uses one of <code>input_mask</code> and <code>attention_mask</code>.`,name:"input_mask"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFXLNetForQuestionAnsweringSimple.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetConfig" >XLNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length,)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length,)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>mems</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) \u2014 Contains pre-computed hidden-states. Can be used (see <code>mems</code> input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput" >transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ln=new Xe({props:{$$slots:{default:[jy]},$$scope:{ctx:S}}}),cr=new Se({props:{code:`from transformers import XLNetTokenizer, TFXLNetForQuestionAnsweringSimple import tensorflow as tf tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = TFXLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer, TFXLNetForQuestionAnsweringSimple <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLNetForQuestionAnsweringSimple.from_pretrained(<span class="hljs-string">&#x27;xlnet-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){m=a("meta"),x=l(),f=a("h1"),_=a("a"),N=a("span"),v(k.$$.fragment),g=l(),$=a("span"),pe=n("XLNet"),G=l(),X=a("h2"),Z=a("a"),I=a("span"),v(ee.$$.fragment),he=l(),D=a("span"),me=n("Overview"),le=l(),V=a("p"),j=n("The XLNet model was proposed in "),te=a("a"),J=n("XLNet: Generalized Autoregressive Pretraining for Language Understanding"),z=n(` by Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order.`),q=l(),ne=a("p"),W=n("The abstract from the paper is the following:"),de=l(),se=a("p"),H=a("em"),ue=n(`With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.`),ce=l(),M=a("p"),fe=n("Tips:"),Q=l(),Y=a("ul"),oe=a("li"),B=n("The specific attention pattern can be controlled at training and test time using the "),ae=a("code"),ge=n("perm_mask"),O=n(" input."),_e=l(),A=a("li"),ke=n(`Due to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained using only a sub-set of the output tokens as target which are selected with the `),p=a("code"),F=n("target_mapping"),K=n(" input."),be=l(),re=a("li"),P=n("To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the "),ve=a("code"),we=n("perm_mask"),ye=n(` and `),E=a("code"),U=n("target_mapping"),Le=n(` inputs to control the attention span and outputs (see examples in `),Te=a("em"),R=n("examples/pytorch/text-generation/run_generation.py"),Ne=n(")"),xe=l(),ie=a("li"),Fe=n("XLNet is one of the few models that has no sequence length limit."),Ad=l(),vt=a("p"),eh=n("This model was contributed by "),Xn=a("a"),th=n("thomwolf"),oh=n(". The original code can be found "),zn=a("a"),nh=n("here"),sh=n("."),Sd=l(),Ht=a("h2"),Do=a("a"),Ci=a("span"),v(Mn.$$.fragment),ah=l(),ji=a("span"),rh=n("XLNetConfig"),Id=l(),De=a("div"),v(qn.$$.fragment),ih=l(),_t=a("p"),lh=n("This is the configuration class to store the configuration of a "),gr=a("a"),dh=n("XLNetModel"),ch=n(` or a `),_r=a("a"),ph=n("TFXLNetModel"),hh=n(`. It is used to instantiate a XLNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),En=a("a"),mh=n("xlnet-large-cased"),uh=n(" architecture."),fh=l(),Qt=a("p"),gh=n("Configuration objects inherit from "),kr=a("a"),_h=n("PretrainedConfig"),kh=n(` and can be used to control the model outputs. Read the documentation from `),vr=a("a"),vh=n("PretrainedConfig"),Th=n(" for more information."),bh=l(),Oi=a("p"),wh=n("Examples:"),yh=l(),v(Cn.$$.fragment),Dd=l(),Wt=a("h2"),Ho=a("a"),Pi=a("span"),v(jn.$$.fragment),Lh=l(),Ai=a("span"),Nh=n("XLNetTokenizer"),Hd=l(),$e=a("div"),v(On.$$.fragment),xh=l(),Pn=a("p"),Fh=n("Construct an XLNet tokenizer. Based on "),An=a("a"),$h=n("SentencePiece"),Xh=n("."),zh=l(),Sn=a("p"),Mh=n("This tokenizer inherits from "),Tr=a("a"),qh=n("PreTrainedTokenizer"),Eh=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ch=l(),Bt=a("p"),jh=n(`Attributes: sp_model (`),Si=a("code"),Oh=n("SentencePieceProcessor"),Ph=n(`): The `),Ii=a("em"),Ah=n("SentencePiece"),Sh=n(" processor that is used for every conversion (string, tokens and IDs)."),Ih=l(),Tt=a("div"),v(In.$$.fragment),Dh=l(),Di=a("p"),Hh=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format:`),Qh=l(),Dn=a("ul"),br=a("li"),Wh=n("single sequence: "),Hi=a("code"),Bh=n("X <sep> <cls>"),Uh=l(),wr=a("li"),Rh=n("pair of sequences: "),Qi=a("code"),Vh=n("A <sep> B <sep> <cls>"),Yh=l(),Qo=a("div"),v(Hn.$$.fragment),Gh=l(),Qn=a("p"),Jh=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Wi=a("code"),Kh=n("prepare_for_model"),Zh=n(" method."),em=l(),lt=a("div"),v(Wn.$$.fragment),tm=l(),Bi=a("p"),om=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format:`),nm=l(),v(Bn.$$.fragment),sm=l(),Ut=a("p"),am=n("If "),Ui=a("code"),rm=n("token_ids_1"),im=n(" is "),Ri=a("code"),lm=n("None"),dm=n(", this method only returns the first portion of the mask (0s)."),cm=l(),Vi=a("div"),Qd=l(),Rt=a("h2"),Wo=a("a"),Yi=a("span"),v(Un.$$.fragment),pm=l(),Gi=a("span"),hm=n("XLNetTokenizerFast"),Wd=l(),qe=a("div"),v(Rn.$$.fragment),mm=l(),Vt=a("p"),um=n("Construct a \u201Cfast\u201D XLNet tokenizer (backed by HuggingFace\u2019s "),Ji=a("em"),fm=n("tokenizers"),gm=n(" library). Based on "),Vn=a("a"),_m=n("Unigram"),km=n("."),vm=l(),Yn=a("p"),Tm=n("This tokenizer inherits from "),yr=a("a"),bm=n("PreTrainedTokenizerFast"),wm=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ym=l(),Yt=a("p"),Lm=n(`Attributes: sp_model (`),Ki=a("code"),Nm=n("SentencePieceProcessor"),xm=n(`): The `),Zi=a("em"),Fm=n("SentencePiece"),$m=n(" processor that is used for every conversion (string, tokens and IDs)."),Xm=l(),bt=a("div"),v(Gn.$$.fragment),zm=l(),el=a("p"),Mm=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format:`),qm=l(),Jn=a("ul"),Lr=a("li"),Em=n("single sequence: "),tl=a("code"),Cm=n("X <sep> <cls>"),jm=l(),Nr=a("li"),Om=n("pair of sequences: "),ol=a("code"),Pm=n("A <sep> B <sep> <cls>"),Am=l(),dt=a("div"),v(Kn.$$.fragment),Sm=l(),nl=a("p"),Im=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format:`),Dm=l(),v(Zn.$$.fragment),Hm=l(),Gt=a("p"),Qm=n("If "),sl=a("code"),Wm=n("token_ids_1"),Bm=n(" is "),al=a("code"),Um=n("None"),Rm=n(", this method only returns the first portion of the mask (0s)."),Bd=l(),Jt=a("h2"),Bo=a("a"),rl=a("span"),v(es.$$.fragment),Vm=l(),il=a("span"),Ym=n("XLNet specific outputs"),Ud=l(),Kt=a("div"),v(ts.$$.fragment),Gm=l(),os=a("p"),Jm=n("Output type of "),xr=a("a"),Km=n("XLNetModel"),Zm=n("."),Rd=l(),Zt=a("div"),v(ns.$$.fragment),eu=l(),ss=a("p"),tu=n("Output type of "),Fr=a("a"),ou=n("XLNetLMHeadModel"),nu=n("."),Vd=l(),eo=a("div"),v(as.$$.fragment),su=l(),rs=a("p"),au=n("Output type of "),$r=a("a"),ru=n("XLNetForSequenceClassification"),iu=n("."),Yd=l(),to=a("div"),v(is.$$.fragment),lu=l(),ls=a("p"),du=n("Output type of "),Xr=a("a"),cu=n("XLNetForMultipleChoice"),pu=n("."),Gd=l(),oo=a("div"),v(ds.$$.fragment),hu=l(),cs=a("p"),mu=n("Output type of "),ll=a("code"),uu=n("XLNetForTokenClassificationOutput"),fu=n("."),Jd=l(),no=a("div"),v(ps.$$.fragment),gu=l(),hs=a("p"),_u=n("Output type of "),zr=a("a"),ku=n("XLNetForQuestionAnsweringSimple"),vu=n("."),Kd=l(),so=a("div"),v(ms.$$.fragment),Tu=l(),us=a("p"),bu=n("Output type of "),Mr=a("a"),wu=n("XLNetForQuestionAnswering"),yu=n("."),Zd=l(),ao=a("div"),v(fs.$$.fragment),Lu=l(),gs=a("p"),Nu=n("Output type of "),qr=a("a"),xu=n("TFXLNetModel"),Fu=n("."),ec=l(),ro=a("div"),v(_s.$$.fragment),$u=l(),ks=a("p"),Xu=n("Output type of "),Er=a("a"),zu=n("TFXLNetLMHeadModel"),Mu=n("."),tc=l(),io=a("div"),v(vs.$$.fragment),qu=l(),Ts=a("p"),Eu=n("Output type of "),Cr=a("a"),Cu=n("TFXLNetForSequenceClassification"),ju=n("."),oc=l(),lo=a("div"),v(bs.$$.fragment),Ou=l(),ws=a("p"),Pu=n("Output type of "),jr=a("a"),Au=n("TFXLNetForMultipleChoice"),Su=n("."),nc=l(),co=a("div"),v(ys.$$.fragment),Iu=l(),Ls=a("p"),Du=n("Output type of "),dl=a("code"),Hu=n("TFXLNetForTokenClassificationOutput"),Qu=n("."),sc=l(),po=a("div"),v(Ns.$$.fragment),Wu=l(),xs=a("p"),Bu=n("Output type of "),Or=a("a"),Uu=n("TFXLNetForQuestionAnsweringSimple"),Ru=n("."),ac=l(),ho=a("h2"),Uo=a("a"),cl=a("span"),v(Fs.$$.fragment),Vu=l(),pl=a("span"),Yu=n("XLNetModel"),rc=l(),He=a("div"),v($s.$$.fragment),Gu=l(),hl=a("p"),Ju=n("The bare XLNet Model transformer outputting raw hidden-states without any specific head on top."),Ku=l(),Xs=a("p"),Zu=n("This model inherits from "),Pr=a("a"),ef=n("PreTrainedModel"),tf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),of=l(),zs=a("p"),nf=n("This model is also a PyTorch "),Ms=a("a"),sf=n("torch.nn.Module"),af=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),rf=l(),Ye=a("div"),v(qs.$$.fragment),lf=l(),mo=a("p"),df=n("The "),Ar=a("a"),cf=n("XLNetModel"),pf=n(" forward method, overrides the "),ml=a("code"),hf=n("__call__"),mf=n(" special method."),uf=l(),v(Ro.$$.fragment),ff=l(),ul=a("p"),gf=n("Example:"),_f=l(),v(Es.$$.fragment),ic=l(),uo=a("h2"),Vo=a("a"),fl=a("span"),v(Cs.$$.fragment),kf=l(),gl=a("span"),vf=n("XLNetLMHeadModel"),lc=l(),Qe=a("div"),v(js.$$.fragment),Tf=l(),_l=a("p"),bf=n("XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings)."),wf=l(),Os=a("p"),yf=n("This model inherits from "),Sr=a("a"),Lf=n("PreTrainedModel"),Nf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xf=l(),Ps=a("p"),Ff=n("This model is also a PyTorch "),As=a("a"),$f=n("torch.nn.Module"),Xf=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zf=l(),Ge=a("div"),v(Ss.$$.fragment),Mf=l(),fo=a("p"),qf=n("The "),Ir=a("a"),Ef=n("XLNetLMHeadModel"),Cf=n(" forward method, overrides the "),kl=a("code"),jf=n("__call__"),Of=n(" special method."),Pf=l(),v(Yo.$$.fragment),Af=l(),vl=a("p"),Sf=n("Examples:"),If=l(),v(Is.$$.fragment),dc=l(),go=a("h2"),Go=a("a"),Tl=a("span"),v(Ds.$$.fragment),Df=l(),bl=a("span"),Hf=n("XLNetForSequenceClassification"),cc=l(),We=a("div"),v(Hs.$$.fragment),Qf=l(),wl=a("p"),Wf=n(`XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Bf=l(),Qs=a("p"),Uf=n("This model inherits from "),Dr=a("a"),Rf=n("PreTrainedModel"),Vf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yf=l(),Ws=a("p"),Gf=n("This model is also a PyTorch "),Bs=a("a"),Jf=n("torch.nn.Module"),Kf=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zf=l(),Me=a("div"),v(Us.$$.fragment),eg=l(),_o=a("p"),tg=n("The "),Hr=a("a"),og=n("XLNetForSequenceClassification"),ng=n(" forward method, overrides the "),yl=a("code"),sg=n("__call__"),ag=n(" special method."),rg=l(),v(Jo.$$.fragment),ig=l(),Ll=a("p"),lg=n("Example of single-label classification:"),dg=l(),v(Rs.$$.fragment),cg=l(),Nl=a("p"),pg=n("Example of multi-label classification:"),hg=l(),v(Vs.$$.fragment),pc=l(),ko=a("h2"),Ko=a("a"),xl=a("span"),v(Ys.$$.fragment),mg=l(),Fl=a("span"),ug=n("XLNetForMultipleChoice"),hc=l(),Be=a("div"),v(Gs.$$.fragment),fg=l(),$l=a("p"),gg=n(`XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks.`),_g=l(),Js=a("p"),kg=n("This model inherits from "),Qr=a("a"),vg=n("PreTrainedModel"),Tg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bg=l(),Ks=a("p"),wg=n("This model is also a PyTorch "),Zs=a("a"),yg=n("torch.nn.Module"),Lg=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ng=l(),Je=a("div"),v(ea.$$.fragment),xg=l(),vo=a("p"),Fg=n("The "),Wr=a("a"),$g=n("XLNetForMultipleChoice"),Xg=n(" forward method, overrides the "),Xl=a("code"),zg=n("__call__"),Mg=n(" special method."),qg=l(),v(Zo.$$.fragment),Eg=l(),zl=a("p"),Cg=n("Example:"),jg=l(),v(ta.$$.fragment),mc=l(),To=a("h2"),en=a("a"),Ml=a("span"),v(oa.$$.fragment),Og=l(),ql=a("span"),Pg=n("XLNetForTokenClassification"),uc=l(),Ue=a("div"),v(na.$$.fragment),Ag=l(),El=a("p"),Sg=n(`XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ig=l(),sa=a("p"),Dg=n("This model inherits from "),Br=a("a"),Hg=n("PreTrainedModel"),Qg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wg=l(),aa=a("p"),Bg=n("This model is also a PyTorch "),ra=a("a"),Ug=n("torch.nn.Module"),Rg=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vg=l(),Ke=a("div"),v(ia.$$.fragment),Yg=l(),bo=a("p"),Gg=n("The "),Ur=a("a"),Jg=n("XLNetForTokenClassification"),Kg=n(" forward method, overrides the "),Cl=a("code"),Zg=n("__call__"),e_=n(" special method."),t_=l(),v(tn.$$.fragment),o_=l(),jl=a("p"),n_=n("Example:"),s_=l(),v(la.$$.fragment),fc=l(),wo=a("h2"),on=a("a"),Ol=a("span"),v(da.$$.fragment),a_=l(),Pl=a("span"),r_=n("XLNetForQuestionAnsweringSimple"),gc=l(),Re=a("div"),v(ca.$$.fragment),i_=l(),yo=a("p"),l_=n(`XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Al=a("code"),d_=n("span start logits"),c_=n(" and "),Sl=a("code"),p_=n("span end logits"),h_=n(")."),m_=l(),pa=a("p"),u_=n("This model inherits from "),Rr=a("a"),f_=n("PreTrainedModel"),g_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),__=l(),ha=a("p"),k_=n("This model is also a PyTorch "),ma=a("a"),v_=n("torch.nn.Module"),T_=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),b_=l(),Ze=a("div"),v(ua.$$.fragment),w_=l(),Lo=a("p"),y_=n("The "),Vr=a("a"),L_=n("XLNetForQuestionAnsweringSimple"),N_=n(" forward method, overrides the "),Il=a("code"),x_=n("__call__"),F_=n(" special method."),$_=l(),v(nn.$$.fragment),X_=l(),Dl=a("p"),z_=n("Example:"),M_=l(),v(fa.$$.fragment),_c=l(),No=a("h2"),sn=a("a"),Hl=a("span"),v(ga.$$.fragment),q_=l(),Ql=a("span"),E_=n("XLNetForQuestionAnswering"),kc=l(),Ve=a("div"),v(_a.$$.fragment),C_=l(),xo=a("p"),j_=n(`XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Wl=a("code"),O_=n("span start logits"),P_=n(" and "),Bl=a("code"),A_=n("span end logits"),S_=n(")."),I_=l(),ka=a("p"),D_=n("This model inherits from "),Yr=a("a"),H_=n("PreTrainedModel"),Q_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),W_=l(),va=a("p"),B_=n("This model is also a PyTorch "),Ta=a("a"),U_=n("torch.nn.Module"),R_=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),V_=l(),et=a("div"),v(ba.$$.fragment),Y_=l(),Fo=a("p"),G_=n("The "),Gr=a("a"),J_=n("XLNetForQuestionAnswering"),K_=n(" forward method, overrides the "),Ul=a("code"),Z_=n("__call__"),ek=n(" special method."),tk=l(),v(an.$$.fragment),ok=l(),Rl=a("p"),nk=n("Example:"),sk=l(),v(wa.$$.fragment),vc=l(),$o=a("h2"),rn=a("a"),Vl=a("span"),v(ya.$$.fragment),ak=l(),Yl=a("span"),rk=n("TFXLNetModel"),Tc=l(),Ee=a("div"),v(La.$$.fragment),ik=l(),Gl=a("p"),lk=n("The bare XLNet Model transformer outputting raw hidden-states without any specific head on top."),dk=l(),Na=a("p"),ck=n("This model inherits from "),Jr=a("a"),pk=n("TFPreTrainedModel"),hk=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mk=l(),xa=a("p"),uk=n("This model is also a "),Fa=a("a"),fk=n("tf.keras.Model"),gk=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_k=l(),v(ln.$$.fragment),kk=l(),tt=a("div"),v($a.$$.fragment),vk=l(),Xo=a("p"),Tk=n("The "),Kr=a("a"),bk=n("TFXLNetModel"),wk=n(" forward method, overrides the "),Jl=a("code"),yk=n("__call__"),Lk=n(" special method."),Nk=l(),v(dn.$$.fragment),xk=l(),Kl=a("p"),Fk=n("Example:"),$k=l(),v(Xa.$$.fragment),bc=l(),zo=a("h2"),cn=a("a"),Zl=a("span"),v(za.$$.fragment),Xk=l(),ed=a("span"),zk=n("TFXLNetLMHeadModel"),wc=l(),Ce=a("div"),v(Ma.$$.fragment),Mk=l(),td=a("p"),qk=n("XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings)."),Ek=l(),qa=a("p"),Ck=n("This model inherits from "),Zr=a("a"),jk=n("TFPreTrainedModel"),Ok=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pk=l(),Ea=a("p"),Ak=n("This model is also a "),Ca=a("a"),Sk=n("tf.keras.Model"),Ik=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Dk=l(),v(pn.$$.fragment),Hk=l(),ot=a("div"),v(ja.$$.fragment),Qk=l(),Mo=a("p"),Wk=n("The "),ei=a("a"),Bk=n("TFXLNetLMHeadModel"),Uk=n(" forward method, overrides the "),od=a("code"),Rk=n("__call__"),Vk=n(" special method."),Yk=l(),v(hn.$$.fragment),Gk=l(),nd=a("p"),Jk=n("Examples:"),Kk=l(),v(Oa.$$.fragment),yc=l(),qo=a("h2"),mn=a("a"),sd=a("span"),v(Pa.$$.fragment),Zk=l(),ad=a("span"),ev=n("TFXLNetForSequenceClassification"),Lc=l(),je=a("div"),v(Aa.$$.fragment),tv=l(),rd=a("p"),ov=n(`XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),nv=l(),Sa=a("p"),sv=n("This model inherits from "),ti=a("a"),av=n("TFPreTrainedModel"),rv=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),iv=l(),Ia=a("p"),lv=n("This model is also a "),Da=a("a"),dv=n("tf.keras.Model"),cv=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),pv=l(),v(un.$$.fragment),hv=l(),nt=a("div"),v(Ha.$$.fragment),mv=l(),Eo=a("p"),uv=n("The "),oi=a("a"),fv=n("TFXLNetForSequenceClassification"),gv=n(" forward method, overrides the "),id=a("code"),_v=n("__call__"),kv=n(" special method."),vv=l(),v(fn.$$.fragment),Tv=l(),ld=a("p"),bv=n("Example:"),wv=l(),v(Qa.$$.fragment),Nc=l(),Co=a("h2"),gn=a("a"),dd=a("span"),v(Wa.$$.fragment),yv=l(),cd=a("span"),Lv=n("TFLNetForMultipleChoice"),xc=l(),Oe=a("div"),v(Ba.$$.fragment),Nv=l(),pd=a("p"),xv=n(`XLNET Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Fv=l(),Ua=a("p"),$v=n("This model inherits from "),ni=a("a"),Xv=n("TFPreTrainedModel"),zv=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mv=l(),Ra=a("p"),qv=n("This model is also a "),Va=a("a"),Ev=n("tf.keras.Model"),Cv=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jv=l(),v(_n.$$.fragment),Ov=l(),st=a("div"),v(Ya.$$.fragment),Pv=l(),jo=a("p"),Av=n("The "),si=a("a"),Sv=n("TFXLNetForMultipleChoice"),Iv=n(" forward method, overrides the "),hd=a("code"),Dv=n("__call__"),Hv=n(" special method."),Qv=l(),v(kn.$$.fragment),Wv=l(),md=a("p"),Bv=n("Example:"),Uv=l(),v(Ga.$$.fragment),Fc=l(),Oo=a("h2"),vn=a("a"),ud=a("span"),v(Ja.$$.fragment),Rv=l(),fd=a("span"),Vv=n("TFXLNetForTokenClassification"),$c=l(),Pe=a("div"),v(Ka.$$.fragment),Yv=l(),gd=a("p"),Gv=n(`XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Jv=l(),Za=a("p"),Kv=n("This model inherits from "),ai=a("a"),Zv=n("TFPreTrainedModel"),eT=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tT=l(),er=a("p"),oT=n("This model is also a "),tr=a("a"),nT=n("tf.keras.Model"),sT=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),aT=l(),v(Tn.$$.fragment),rT=l(),at=a("div"),v(or.$$.fragment),iT=l(),Po=a("p"),lT=n("The "),ri=a("a"),dT=n("TFXLNetForTokenClassification"),cT=n(" forward method, overrides the "),_d=a("code"),pT=n("__call__"),hT=n(" special method."),mT=l(),v(bn.$$.fragment),uT=l(),kd=a("p"),fT=n("Example:"),gT=l(),v(nr.$$.fragment),Xc=l(),Ao=a("h2"),wn=a("a"),vd=a("span"),v(sr.$$.fragment),_T=l(),Td=a("span"),kT=n("TFXLNetForQuestionAnsweringSimple"),zc=l(),Ae=a("div"),v(ar.$$.fragment),vT=l(),So=a("p"),TT=n(`XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),bd=a("code"),bT=n("span start logits"),wT=n(" and "),wd=a("code"),yT=n("span end logits"),LT=n(")."),NT=l(),rr=a("p"),xT=n("This model inherits from "),ii=a("a"),FT=n("TFPreTrainedModel"),$T=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),XT=l(),ir=a("p"),zT=n("This model is also a "),lr=a("a"),MT=n("tf.keras.Model"),qT=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ET=l(),v(yn.$$.fragment),CT=l(),rt=a("div"),v(dr.$$.fragment),jT=l(),Io=a("p"),OT=n("The "),li=a("a"),PT=n("TFXLNetForQuestionAnsweringSimple"),AT=n(" forward method, overrides the "),yd=a("code"),ST=n("__call__"),IT=n(" special method."),DT=l(),v(Ln.$$.fragment),HT=l(),Ld=a("p"),QT=n("Example:"),WT=l(),v(cr.$$.fragment),this.h()},l(o){const u=gy('[data-svelte="svelte-1phssyn"]',document.head);m=r(u,"META",{name:!0,content:!0}),u.forEach(t),x=d(o),f=r(o,"H1",{class:!0});var pr=i(f);_=r(pr,"A",{id:!0,class:!0,href:!0});var Nd=i(_);N=r(Nd,"SPAN",{});var xd=i(N);T(k.$$.fragment,xd),xd.forEach(t),Nd.forEach(t),g=d(pr),$=r(pr,"SPAN",{});var Fd=i($);pe=s(Fd,"XLNet"),Fd.forEach(t),pr.forEach(t),G=d(o),X=r(o,"H2",{class:!0});var hr=i(X);Z=r(hr,"A",{id:!0,class:!0,href:!0});var $d=i(Z);I=r($d,"SPAN",{});var Xd=i(I);T(ee.$$.fragment,Xd),Xd.forEach(t),$d.forEach(t),he=d(hr),D=r(hr,"SPAN",{});var zd=i(D);me=s(zd,"Overview"),zd.forEach(t),hr.forEach(t),le=d(o),V=r(o,"P",{});var mr=i(V);j=s(mr,"The XLNet model was proposed in "),te=r(mr,"A",{href:!0,rel:!0});var Md=i(te);J=s(Md,"XLNet: Generalized Autoregressive Pretraining for Language Understanding"),Md.forEach(t),z=s(mr,` by Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order.`),mr.forEach(t),q=d(o),ne=r(o,"P",{});var qd=i(ne);W=s(qd,"The abstract from the paper is the following:"),qd.forEach(t),de=d(o),se=r(o,"P",{});var Ed=i(se);H=r(Ed,"EM",{});var Cd=i(H);ue=s(Cd,`With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.`),Cd.forEach(t),Ed.forEach(t),ce=d(o),M=r(o,"P",{});var jd=i(M);fe=s(jd,"Tips:"),jd.forEach(t),Q=d(o),Y=r(o,"UL",{});var kt=i(Y);oe=r(kt,"LI",{});var ur=i(oe);B=s(ur,"The specific attention pattern can be controlled at training and test time using the "),ae=r(ur,"CODE",{});var Od=i(ae);ge=s(Od,"perm_mask"),Od.forEach(t),O=s(ur," input."),ur.forEach(t),_e=d(kt),A=r(kt,"LI",{});var fr=i(A);ke=s(fr,`Due to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained using only a sub-set of the output tokens as target which are selected with the `),p=r(fr,"CODE",{});var Pd=i(p);F=s(Pd,"target_mapping"),Pd.forEach(t),K=s(fr," input."),fr.forEach(t),be=d(kt),re=r(kt,"LI",{});var Nn=i(re);P=s(Nn,"To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the "),ve=r(Nn,"CODE",{});var YT=i(ve);we=s(YT,"perm_mask"),YT.forEach(t),ye=s(Nn,` and `),E=r(Nn,"CODE",{});var GT=i(E);U=s(GT,"target_mapping"),GT.forEach(t),Le=s(Nn,` inputs to control the attention span and outputs (see examples in `),Te=r(Nn,"EM",{});var JT=i(Te);R=s(JT,"examples/pytorch/text-generation/run_generation.py"),JT.forEach(t),Ne=s(Nn,")"),Nn.forEach(t),xe=d(kt),ie=r(kt,"LI",{});var KT=i(ie);Fe=s(KT,"XLNet is one of the few models that has no sequence length limit."),KT.forEach(t),kt.forEach(t),Ad=d(o),vt=r(o,"P",{});var di=i(vt);eh=s(di,"This model was contributed by "),Xn=r(di,"A",{href:!0,rel:!0});var ZT=i(Xn);th=s(ZT,"thomwolf"),ZT.forEach(t),oh=s(di,". The original code can be found "),zn=r(di,"A",{href:!0,rel:!0});var e1=i(zn);nh=s(e1,"here"),e1.forEach(t),sh=s(di,"."),di.forEach(t),Sd=d(o),Ht=r(o,"H2",{class:!0});var qc=i(Ht);Do=r(qc,"A",{id:!0,class:!0,href:!0});var t1=i(Do);Ci=r(t1,"SPAN",{});var o1=i(Ci);T(Mn.$$.fragment,o1),o1.forEach(t),t1.forEach(t),ah=d(qc),ji=r(qc,"SPAN",{});var n1=i(ji);rh=s(n1,"XLNetConfig"),n1.forEach(t),qc.forEach(t),Id=d(o),De=r(o,"DIV",{class:!0});var wt=i(De);T(qn.$$.fragment,wt),ih=d(wt),_t=r(wt,"P",{});var xn=i(_t);lh=s(xn,"This is the configuration class to store the configuration of a "),gr=r(xn,"A",{href:!0});var s1=i(gr);dh=s(s1,"XLNetModel"),s1.forEach(t),ch=s(xn,` or a `),_r=r(xn,"A",{href:!0});var a1=i(_r);ph=s(a1,"TFXLNetModel"),a1.forEach(t),hh=s(xn,`. It is used to instantiate a XLNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),En=r(xn,"A",{href:!0,rel:!0});var r1=i(En);mh=s(r1,"xlnet-large-cased"),r1.forEach(t),uh=s(xn," architecture."),xn.forEach(t),fh=d(wt),Qt=r(wt,"P",{});var ci=i(Qt);gh=s(ci,"Configuration objects inherit from "),kr=r(ci,"A",{href:!0});var i1=i(kr);_h=s(i1,"PretrainedConfig"),i1.forEach(t),kh=s(ci,` and can be used to control the model outputs. Read the documentation from `),vr=r(ci,"A",{href:!0});var l1=i(vr);vh=s(l1,"PretrainedConfig"),l1.forEach(t),Th=s(ci," for more information."),ci.forEach(t),bh=d(wt),Oi=r(wt,"P",{});var d1=i(Oi);wh=s(d1,"Examples:"),d1.forEach(t),yh=d(wt),T(Cn.$$.fragment,wt),wt.forEach(t),Dd=d(o),Wt=r(o,"H2",{class:!0});var Ec=i(Wt);Ho=r(Ec,"A",{id:!0,class:!0,href:!0});var c1=i(Ho);Pi=r(c1,"SPAN",{});var p1=i(Pi);T(jn.$$.fragment,p1),p1.forEach(t),c1.forEach(t),Lh=d(Ec),Ai=r(Ec,"SPAN",{});var h1=i(Ai);Nh=s(h1,"XLNetTokenizer"),h1.forEach(t),Ec.forEach(t),Hd=d(o),$e=r(o,"DIV",{class:!0});var Ie=i($e);T(On.$$.fragment,Ie),xh=d(Ie),Pn=r(Ie,"P",{});var Cc=i(Pn);Fh=s(Cc,"Construct an XLNet tokenizer. Based on "),An=r(Cc,"A",{href:!0,rel:!0});var m1=i(An);$h=s(m1,"SentencePiece"),m1.forEach(t),Xh=s(Cc,"."),Cc.forEach(t),zh=d(Ie),Sn=r(Ie,"P",{});var jc=i(Sn);Mh=s(jc,"This tokenizer inherits from "),Tr=r(jc,"A",{href:!0});var u1=i(Tr);qh=s(u1,"PreTrainedTokenizer"),u1.forEach(t),Eh=s(jc,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),jc.forEach(t),Ch=d(Ie),Bt=r(Ie,"P",{});var pi=i(Bt);jh=s(pi,`Attributes: sp_model (`),Si=r(pi,"CODE",{});var f1=i(Si);Oh=s(f1,"SentencePieceProcessor"),f1.forEach(t),Ph=s(pi,`): The `),Ii=r(pi,"EM",{});var g1=i(Ii);Ah=s(g1,"SentencePiece"),g1.forEach(t),Sh=s(pi," processor that is used for every conversion (string, tokens and IDs)."),pi.forEach(t),Ih=d(Ie),Tt=r(Ie,"DIV",{class:!0});var hi=i(Tt);T(In.$$.fragment,hi),Dh=d(hi),Di=r(hi,"P",{});var _1=i(Di);Hh=s(_1,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format:`),_1.forEach(t),Qh=d(hi),Dn=r(hi,"UL",{});var Oc=i(Dn);br=r(Oc,"LI",{});var BT=i(br);Wh=s(BT,"single sequence: "),Hi=r(BT,"CODE",{});var k1=i(Hi);Bh=s(k1,"X <sep> <cls>"),k1.forEach(t),BT.forEach(t),Uh=d(Oc),wr=r(Oc,"LI",{});var UT=i(wr);Rh=s(UT,"pair of sequences: "),Qi=r(UT,"CODE",{});var v1=i(Qi);Vh=s(v1,"A <sep> B <sep> <cls>"),v1.forEach(t),UT.forEach(t),Oc.forEach(t),hi.forEach(t),Yh=d(Ie),Qo=r(Ie,"DIV",{class:!0});var Pc=i(Qo);T(Hn.$$.fragment,Pc),Gh=d(Pc),Qn=r(Pc,"P",{});var Ac=i(Qn);Jh=s(Ac,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Wi=r(Ac,"CODE",{});var T1=i(Wi);Kh=s(T1,"prepare_for_model"),T1.forEach(t),Zh=s(Ac," method."),Ac.forEach(t),Pc.forEach(t),em=d(Ie),lt=r(Ie,"DIV",{class:!0});var Fn=i(lt);T(Wn.$$.fragment,Fn),tm=d(Fn),Bi=r(Fn,"P",{});var b1=i(Bi);om=s(b1,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format:`),b1.forEach(t),nm=d(Fn),T(Bn.$$.fragment,Fn),sm=d(Fn),Ut=r(Fn,"P",{});var mi=i(Ut);am=s(mi,"If "),Ui=r(mi,"CODE",{});var w1=i(Ui);rm=s(w1,"token_ids_1"),w1.forEach(t),im=s(mi," is "),Ri=r(mi,"CODE",{});var y1=i(Ri);lm=s(y1,"None"),y1.forEach(t),dm=s(mi,", this method only returns the first portion of the mask (0s)."),mi.forEach(t),Fn.forEach(t),cm=d(Ie),Vi=r(Ie,"DIV",{class:!0}),i(Vi).forEach(t),Ie.forEach(t),Qd=d(o),Rt=r(o,"H2",{class:!0});var Sc=i(Rt);Wo=r(Sc,"A",{id:!0,class:!0,href:!0});var L1=i(Wo);Yi=r(L1,"SPAN",{});var N1=i(Yi);T(Un.$$.fragment,N1),N1.forEach(t),L1.forEach(t),pm=d(Sc),Gi=r(Sc,"SPAN",{});var x1=i(Gi);hm=s(x1,"XLNetTokenizerFast"),x1.forEach(t),Sc.forEach(t),Wd=d(o),qe=r(o,"DIV",{class:!0});var ct=i(qe);T(Rn.$$.fragment,ct),mm=d(ct),Vt=r(ct,"P",{});var ui=i(Vt);um=s(ui,"Construct a \u201Cfast\u201D XLNet tokenizer (backed by HuggingFace\u2019s "),Ji=r(ui,"EM",{});var F1=i(Ji);fm=s(F1,"tokenizers"),F1.forEach(t),gm=s(ui," library). Based on "),Vn=r(ui,"A",{href:!0,rel:!0});var $1=i(Vn);_m=s($1,"Unigram"),$1.forEach(t),km=s(ui,"."),ui.forEach(t),vm=d(ct),Yn=r(ct,"P",{});var Ic=i(Yn);Tm=s(Ic,"This tokenizer inherits from "),yr=r(Ic,"A",{href:!0});var X1=i(yr);bm=s(X1,"PreTrainedTokenizerFast"),X1.forEach(t),wm=s(Ic,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ic.forEach(t),ym=d(ct),Yt=r(ct,"P",{});var fi=i(Yt);Lm=s(fi,`Attributes: sp_model (`),Ki=r(fi,"CODE",{});var z1=i(Ki);Nm=s(z1,"SentencePieceProcessor"),z1.forEach(t),xm=s(fi,`): The `),Zi=r(fi,"EM",{});var M1=i(Zi);Fm=s(M1,"SentencePiece"),M1.forEach(t),$m=s(fi," processor that is used for every conversion (string, tokens and IDs)."),fi.forEach(t),Xm=d(ct),bt=r(ct,"DIV",{class:!0});var gi=i(bt);T(Gn.$$.fragment,gi),zm=d(gi),el=r(gi,"P",{});var q1=i(el);Mm=s(q1,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format:`),q1.forEach(t),qm=d(gi),Jn=r(gi,"UL",{});var Dc=i(Jn);Lr=r(Dc,"LI",{});var RT=i(Lr);Em=s(RT,"single sequence: "),tl=r(RT,"CODE",{});var E1=i(tl);Cm=s(E1,"X <sep> <cls>"),E1.forEach(t),RT.forEach(t),jm=d(Dc),Nr=r(Dc,"LI",{});var VT=i(Nr);Om=s(VT,"pair of sequences: "),ol=r(VT,"CODE",{});var C1=i(ol);Pm=s(C1,"A <sep> B <sep> <cls>"),C1.forEach(t),VT.forEach(t),Dc.forEach(t),gi.forEach(t),Am=d(ct),dt=r(ct,"DIV",{class:!0});var $n=i(dt);T(Kn.$$.fragment,$n),Sm=d($n),nl=r($n,"P",{});var j1=i(nl);Im=s(j1,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format:`),j1.forEach(t),Dm=d($n),T(Zn.$$.fragment,$n),Hm=d($n),Gt=r($n,"P",{});var _i=i(Gt);Qm=s(_i,"If "),sl=r(_i,"CODE",{});var O1=i(sl);Wm=s(O1,"token_ids_1"),O1.forEach(t),Bm=s(_i," is "),al=r(_i,"CODE",{});var P1=i(al);Um=s(P1,"None"),P1.forEach(t),Rm=s(_i,", this method only returns the first portion of the mask (0s)."),_i.forEach(t),$n.forEach(t),ct.forEach(t),Bd=d(o),Jt=r(o,"H2",{class:!0});var Hc=i(Jt);Bo=r(Hc,"A",{id:!0,class:!0,href:!0});var A1=i(Bo);rl=r(A1,"SPAN",{});var S1=i(rl);T(es.$$.fragment,S1),S1.forEach(t),A1.forEach(t),Vm=d(Hc),il=r(Hc,"SPAN",{});var I1=i(il);Ym=s(I1,"XLNet specific outputs"),I1.forEach(t),Hc.forEach(t),Ud=d(o),Kt=r(o,"DIV",{class:!0});var Qc=i(Kt);T(ts.$$.fragment,Qc),Gm=d(Qc),os=r(Qc,"P",{});var Wc=i(os);Jm=s(Wc,"Output type of "),xr=r(Wc,"A",{href:!0});var D1=i(xr);Km=s(D1,"XLNetModel"),D1.forEach(t),Zm=s(Wc,"."),Wc.forEach(t),Qc.forEach(t),Rd=d(o),Zt=r(o,"DIV",{class:!0});var Bc=i(Zt);T(ns.$$.fragment,Bc),eu=d(Bc),ss=r(Bc,"P",{});var Uc=i(ss);tu=s(Uc,"Output type of "),Fr=r(Uc,"A",{href:!0});var H1=i(Fr);ou=s(H1,"XLNetLMHeadModel"),H1.forEach(t),nu=s(Uc,"."),Uc.forEach(t),Bc.forEach(t),Vd=d(o),eo=r(o,"DIV",{class:!0});var Rc=i(eo);T(as.$$.fragment,Rc),su=d(Rc),rs=r(Rc,"P",{});var Vc=i(rs);au=s(Vc,"Output type of "),$r=r(Vc,"A",{href:!0});var Q1=i($r);ru=s(Q1,"XLNetForSequenceClassification"),Q1.forEach(t),iu=s(Vc,"."),Vc.forEach(t),Rc.forEach(t),Yd=d(o),to=r(o,"DIV",{class:!0});var Yc=i(to);T(is.$$.fragment,Yc),lu=d(Yc),ls=r(Yc,"P",{});var Gc=i(ls);du=s(Gc,"Output type of "),Xr=r(Gc,"A",{href:!0});var W1=i(Xr);cu=s(W1,"XLNetForMultipleChoice"),W1.forEach(t),pu=s(Gc,"."),Gc.forEach(t),Yc.forEach(t),Gd=d(o),oo=r(o,"DIV",{class:!0});var Jc=i(oo);T(ds.$$.fragment,Jc),hu=d(Jc),cs=r(Jc,"P",{});var Kc=i(cs);mu=s(Kc,"Output type of "),ll=r(Kc,"CODE",{});var B1=i(ll);uu=s(B1,"XLNetForTokenClassificationOutput"),B1.forEach(t),fu=s(Kc,"."),Kc.forEach(t),Jc.forEach(t),Jd=d(o),no=r(o,"DIV",{class:!0});var Zc=i(no);T(ps.$$.fragment,Zc),gu=d(Zc),hs=r(Zc,"P",{});var ep=i(hs);_u=s(ep,"Output type of "),zr=r(ep,"A",{href:!0});var U1=i(zr);ku=s(U1,"XLNetForQuestionAnsweringSimple"),U1.forEach(t),vu=s(ep,"."),ep.forEach(t),Zc.forEach(t),Kd=d(o),so=r(o,"DIV",{class:!0});var tp=i(so);T(ms.$$.fragment,tp),Tu=d(tp),us=r(tp,"P",{});var op=i(us);bu=s(op,"Output type of "),Mr=r(op,"A",{href:!0});var R1=i(Mr);wu=s(R1,"XLNetForQuestionAnswering"),R1.forEach(t),yu=s(op,"."),op.forEach(t),tp.forEach(t),Zd=d(o),ao=r(o,"DIV",{class:!0});var np=i(ao);T(fs.$$.fragment,np),Lu=d(np),gs=r(np,"P",{});var sp=i(gs);Nu=s(sp,"Output type of "),qr=r(sp,"A",{href:!0});var V1=i(qr);xu=s(V1,"TFXLNetModel"),V1.forEach(t),Fu=s(sp,"."),sp.forEach(t),np.forEach(t),ec=d(o),ro=r(o,"DIV",{class:!0});var ap=i(ro);T(_s.$$.fragment,ap),$u=d(ap),ks=r(ap,"P",{});var rp=i(ks);Xu=s(rp,"Output type of "),Er=r(rp,"A",{href:!0});var Y1=i(Er);zu=s(Y1,"TFXLNetLMHeadModel"),Y1.forEach(t),Mu=s(rp,"."),rp.forEach(t),ap.forEach(t),tc=d(o),io=r(o,"DIV",{class:!0});var ip=i(io);T(vs.$$.fragment,ip),qu=d(ip),Ts=r(ip,"P",{});var lp=i(Ts);Eu=s(lp,"Output type of "),Cr=r(lp,"A",{href:!0});var G1=i(Cr);Cu=s(G1,"TFXLNetForSequenceClassification"),G1.forEach(t),ju=s(lp,"."),lp.forEach(t),ip.forEach(t),oc=d(o),lo=r(o,"DIV",{class:!0});var dp=i(lo);T(bs.$$.fragment,dp),Ou=d(dp),ws=r(dp,"P",{});var cp=i(ws);Pu=s(cp,"Output type of "),jr=r(cp,"A",{href:!0});var J1=i(jr);Au=s(J1,"TFXLNetForMultipleChoice"),J1.forEach(t),Su=s(cp,"."),cp.forEach(t),dp.forEach(t),nc=d(o),co=r(o,"DIV",{class:!0});var pp=i(co);T(ys.$$.fragment,pp),Iu=d(pp),Ls=r(pp,"P",{});var hp=i(Ls);Du=s(hp,"Output type of "),dl=r(hp,"CODE",{});var K1=i(dl);Hu=s(K1,"TFXLNetForTokenClassificationOutput"),K1.forEach(t),Qu=s(hp,"."),hp.forEach(t),pp.forEach(t),sc=d(o),po=r(o,"DIV",{class:!0});var mp=i(po);T(Ns.$$.fragment,mp),Wu=d(mp),xs=r(mp,"P",{});var up=i(xs);Bu=s(up,"Output type of "),Or=r(up,"A",{href:!0});var Z1=i(Or);Uu=s(Z1,"TFXLNetForQuestionAnsweringSimple"),Z1.forEach(t),Ru=s(up,"."),up.forEach(t),mp.forEach(t),ac=d(o),ho=r(o,"H2",{class:!0});var fp=i(ho);Uo=r(fp,"A",{id:!0,class:!0,href:!0});var eb=i(Uo);cl=r(eb,"SPAN",{});var tb=i(cl);T(Fs.$$.fragment,tb),tb.forEach(t),eb.forEach(t),Vu=d(fp),pl=r(fp,"SPAN",{});var ob=i(pl);Yu=s(ob,"XLNetModel"),ob.forEach(t),fp.forEach(t),rc=d(o),He=r(o,"DIV",{class:!0});var yt=i(He);T($s.$$.fragment,yt),Gu=d(yt),hl=r(yt,"P",{});var nb=i(hl);Ju=s(nb,"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top."),nb.forEach(t),Ku=d(yt),Xs=r(yt,"P",{});var gp=i(Xs);Zu=s(gp,"This model inherits from "),Pr=r(gp,"A",{href:!0});var sb=i(Pr);ef=s(sb,"PreTrainedModel"),sb.forEach(t),tf=s(gp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gp.forEach(t),of=d(yt),zs=r(yt,"P",{});var _p=i(zs);nf=s(_p,"This model is also a PyTorch "),Ms=r(_p,"A",{href:!0,rel:!0});var ab=i(Ms);sf=s(ab,"torch.nn.Module"),ab.forEach(t),af=s(_p,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_p.forEach(t),rf=d(yt),Ye=r(yt,"DIV",{class:!0});var Lt=i(Ye);T(qs.$$.fragment,Lt),lf=d(Lt),mo=r(Lt,"P",{});var ki=i(mo);df=s(ki,"The "),Ar=r(ki,"A",{href:!0});var rb=i(Ar);cf=s(rb,"XLNetModel"),rb.forEach(t),pf=s(ki," forward method, overrides the "),ml=r(ki,"CODE",{});var ib=i(ml);hf=s(ib,"__call__"),ib.forEach(t),mf=s(ki," special method."),ki.forEach(t),uf=d(Lt),T(Ro.$$.fragment,Lt),ff=d(Lt),ul=r(Lt,"P",{});var lb=i(ul);gf=s(lb,"Example:"),lb.forEach(t),_f=d(Lt),T(Es.$$.fragment,Lt),Lt.forEach(t),yt.forEach(t),ic=d(o),uo=r(o,"H2",{class:!0});var kp=i(uo);Vo=r(kp,"A",{id:!0,class:!0,href:!0});var db=i(Vo);fl=r(db,"SPAN",{});var cb=i(fl);T(Cs.$$.fragment,cb),cb.forEach(t),db.forEach(t),kf=d(kp),gl=r(kp,"SPAN",{});var pb=i(gl);vf=s(pb,"XLNetLMHeadModel"),pb.forEach(t),kp.forEach(t),lc=d(o),Qe=r(o,"DIV",{class:!0});var Nt=i(Qe);T(js.$$.fragment,Nt),Tf=d(Nt),_l=r(Nt,"P",{});var hb=i(_l);bf=s(hb,"XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings)."),hb.forEach(t),wf=d(Nt),Os=r(Nt,"P",{});var vp=i(Os);yf=s(vp,"This model inherits from "),Sr=r(vp,"A",{href:!0});var mb=i(Sr);Lf=s(mb,"PreTrainedModel"),mb.forEach(t),Nf=s(vp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vp.forEach(t),xf=d(Nt),Ps=r(Nt,"P",{});var Tp=i(Ps);Ff=s(Tp,"This model is also a PyTorch "),As=r(Tp,"A",{href:!0,rel:!0});var ub=i(As);$f=s(ub,"torch.nn.Module"),ub.forEach(t),Xf=s(Tp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tp.forEach(t),zf=d(Nt),Ge=r(Nt,"DIV",{class:!0});var xt=i(Ge);T(Ss.$$.fragment,xt),Mf=d(xt),fo=r(xt,"P",{});var vi=i(fo);qf=s(vi,"The "),Ir=r(vi,"A",{href:!0});var fb=i(Ir);Ef=s(fb,"XLNetLMHeadModel"),fb.forEach(t),Cf=s(vi," forward method, overrides the "),kl=r(vi,"CODE",{});var gb=i(kl);jf=s(gb,"__call__"),gb.forEach(t),Of=s(vi," special method."),vi.forEach(t),Pf=d(xt),T(Yo.$$.fragment,xt),Af=d(xt),vl=r(xt,"P",{});var _b=i(vl);Sf=s(_b,"Examples:"),_b.forEach(t),If=d(xt),T(Is.$$.fragment,xt),xt.forEach(t),Nt.forEach(t),dc=d(o),go=r(o,"H2",{class:!0});var bp=i(go);Go=r(bp,"A",{id:!0,class:!0,href:!0});var kb=i(Go);Tl=r(kb,"SPAN",{});var vb=i(Tl);T(Ds.$$.fragment,vb),vb.forEach(t),kb.forEach(t),Df=d(bp),bl=r(bp,"SPAN",{});var Tb=i(bl);Hf=s(Tb,"XLNetForSequenceClassification"),Tb.forEach(t),bp.forEach(t),cc=d(o),We=r(o,"DIV",{class:!0});var Ft=i(We);T(Hs.$$.fragment,Ft),Qf=d(Ft),wl=r(Ft,"P",{});var bb=i(wl);Wf=s(bb,`XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),bb.forEach(t),Bf=d(Ft),Qs=r(Ft,"P",{});var wp=i(Qs);Uf=s(wp,"This model inherits from "),Dr=r(wp,"A",{href:!0});var wb=i(Dr);Rf=s(wb,"PreTrainedModel"),wb.forEach(t),Vf=s(wp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wp.forEach(t),Yf=d(Ft),Ws=r(Ft,"P",{});var yp=i(Ws);Gf=s(yp,"This model is also a PyTorch "),Bs=r(yp,"A",{href:!0,rel:!0});var yb=i(Bs);Jf=s(yb,"torch.nn.Module"),yb.forEach(t),Kf=s(yp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yp.forEach(t),Zf=d(Ft),Me=r(Ft,"DIV",{class:!0});var it=i(Me);T(Us.$$.fragment,it),eg=d(it),_o=r(it,"P",{});var Ti=i(_o);tg=s(Ti,"The "),Hr=r(Ti,"A",{href:!0});var Lb=i(Hr);og=s(Lb,"XLNetForSequenceClassification"),Lb.forEach(t),ng=s(Ti," forward method, overrides the "),yl=r(Ti,"CODE",{});var Nb=i(yl);sg=s(Nb,"__call__"),Nb.forEach(t),ag=s(Ti," special method."),Ti.forEach(t),rg=d(it),T(Jo.$$.fragment,it),ig=d(it),Ll=r(it,"P",{});var xb=i(Ll);lg=s(xb,"Example of single-label classification:"),xb.forEach(t),dg=d(it),T(Rs.$$.fragment,it),cg=d(it),Nl=r(it,"P",{});var Fb=i(Nl);pg=s(Fb,"Example of multi-label classification:"),Fb.forEach(t),hg=d(it),T(Vs.$$.fragment,it),it.forEach(t),Ft.forEach(t),pc=d(o),ko=r(o,"H2",{class:!0});var Lp=i(ko);Ko=r(Lp,"A",{id:!0,class:!0,href:!0});var $b=i(Ko);xl=r($b,"SPAN",{});var Xb=i(xl);T(Ys.$$.fragment,Xb),Xb.forEach(t),$b.forEach(t),mg=d(Lp),Fl=r(Lp,"SPAN",{});var zb=i(Fl);ug=s(zb,"XLNetForMultipleChoice"),zb.forEach(t),Lp.forEach(t),hc=d(o),Be=r(o,"DIV",{class:!0});var $t=i(Be);T(Gs.$$.fragment,$t),fg=d($t),$l=r($t,"P",{});var Mb=i($l);gg=s(Mb,`XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks.`),Mb.forEach(t),_g=d($t),Js=r($t,"P",{});var Np=i(Js);kg=s(Np,"This model inherits from "),Qr=r(Np,"A",{href:!0});var qb=i(Qr);vg=s(qb,"PreTrainedModel"),qb.forEach(t),Tg=s(Np,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Np.forEach(t),bg=d($t),Ks=r($t,"P",{});var xp=i(Ks);wg=s(xp,"This model is also a PyTorch "),Zs=r(xp,"A",{href:!0,rel:!0});var Eb=i(Zs);yg=s(Eb,"torch.nn.Module"),Eb.forEach(t),Lg=s(xp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xp.forEach(t),Ng=d($t),Je=r($t,"DIV",{class:!0});var Xt=i(Je);T(ea.$$.fragment,Xt),xg=d(Xt),vo=r(Xt,"P",{});var bi=i(vo);Fg=s(bi,"The "),Wr=r(bi,"A",{href:!0});var Cb=i(Wr);$g=s(Cb,"XLNetForMultipleChoice"),Cb.forEach(t),Xg=s(bi," forward method, overrides the "),Xl=r(bi,"CODE",{});var jb=i(Xl);zg=s(jb,"__call__"),jb.forEach(t),Mg=s(bi," special method."),bi.forEach(t),qg=d(Xt),T(Zo.$$.fragment,Xt),Eg=d(Xt),zl=r(Xt,"P",{});var Ob=i(zl);Cg=s(Ob,"Example:"),Ob.forEach(t),jg=d(Xt),T(ta.$$.fragment,Xt),Xt.forEach(t),$t.forEach(t),mc=d(o),To=r(o,"H2",{class:!0});var Fp=i(To);en=r(Fp,"A",{id:!0,class:!0,href:!0});var Pb=i(en);Ml=r(Pb,"SPAN",{});var Ab=i(Ml);T(oa.$$.fragment,Ab),Ab.forEach(t),Pb.forEach(t),Og=d(Fp),ql=r(Fp,"SPAN",{});var Sb=i(ql);Pg=s(Sb,"XLNetForTokenClassification"),Sb.forEach(t),Fp.forEach(t),uc=d(o),Ue=r(o,"DIV",{class:!0});var zt=i(Ue);T(na.$$.fragment,zt),Ag=d(zt),El=r(zt,"P",{});var Ib=i(El);Sg=s(Ib,`XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ib.forEach(t),Ig=d(zt),sa=r(zt,"P",{});var $p=i(sa);Dg=s($p,"This model inherits from "),Br=r($p,"A",{href:!0});var Db=i(Br);Hg=s(Db,"PreTrainedModel"),Db.forEach(t),Qg=s($p,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$p.forEach(t),Wg=d(zt),aa=r(zt,"P",{});var Xp=i(aa);Bg=s(Xp,"This model is also a PyTorch "),ra=r(Xp,"A",{href:!0,rel:!0});var Hb=i(ra);Ug=s(Hb,"torch.nn.Module"),Hb.forEach(t),Rg=s(Xp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xp.forEach(t),Vg=d(zt),Ke=r(zt,"DIV",{class:!0});var Mt=i(Ke);T(ia.$$.fragment,Mt),Yg=d(Mt),bo=r(Mt,"P",{});var wi=i(bo);Gg=s(wi,"The "),Ur=r(wi,"A",{href:!0});var Qb=i(Ur);Jg=s(Qb,"XLNetForTokenClassification"),Qb.forEach(t),Kg=s(wi," forward method, overrides the "),Cl=r(wi,"CODE",{});var Wb=i(Cl);Zg=s(Wb,"__call__"),Wb.forEach(t),e_=s(wi," special method."),wi.forEach(t),t_=d(Mt),T(tn.$$.fragment,Mt),o_=d(Mt),jl=r(Mt,"P",{});var Bb=i(jl);n_=s(Bb,"Example:"),Bb.forEach(t),s_=d(Mt),T(la.$$.fragment,Mt),Mt.forEach(t),zt.forEach(t),fc=d(o),wo=r(o,"H2",{class:!0});var zp=i(wo);on=r(zp,"A",{id:!0,class:!0,href:!0});var Ub=i(on);Ol=r(Ub,"SPAN",{});var Rb=i(Ol);T(da.$$.fragment,Rb),Rb.forEach(t),Ub.forEach(t),a_=d(zp),Pl=r(zp,"SPAN",{});var Vb=i(Pl);r_=s(Vb,"XLNetForQuestionAnsweringSimple"),Vb.forEach(t),zp.forEach(t),gc=d(o),Re=r(o,"DIV",{class:!0});var qt=i(Re);T(ca.$$.fragment,qt),i_=d(qt),yo=r(qt,"P",{});var yi=i(yo);l_=s(yi,`XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Al=r(yi,"CODE",{});var Yb=i(Al);d_=s(Yb,"span start logits"),Yb.forEach(t),c_=s(yi," and "),Sl=r(yi,"CODE",{});var Gb=i(Sl);p_=s(Gb,"span end logits"),Gb.forEach(t),h_=s(yi,")."),yi.forEach(t),m_=d(qt),pa=r(qt,"P",{});var Mp=i(pa);u_=s(Mp,"This model inherits from "),Rr=r(Mp,"A",{href:!0});var Jb=i(Rr);f_=s(Jb,"PreTrainedModel"),Jb.forEach(t),g_=s(Mp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mp.forEach(t),__=d(qt),ha=r(qt,"P",{});var qp=i(ha);k_=s(qp,"This model is also a PyTorch "),ma=r(qp,"A",{href:!0,rel:!0});var Kb=i(ma);v_=s(Kb,"torch.nn.Module"),Kb.forEach(t),T_=s(qp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qp.forEach(t),b_=d(qt),Ze=r(qt,"DIV",{class:!0});var Et=i(Ze);T(ua.$$.fragment,Et),w_=d(Et),Lo=r(Et,"P",{});var Li=i(Lo);y_=s(Li,"The "),Vr=r(Li,"A",{href:!0});var Zb=i(Vr);L_=s(Zb,"XLNetForQuestionAnsweringSimple"),Zb.forEach(t),N_=s(Li," forward method, overrides the "),Il=r(Li,"CODE",{});var ew=i(Il);x_=s(ew,"__call__"),ew.forEach(t),F_=s(Li," special method."),Li.forEach(t),$_=d(Et),T(nn.$$.fragment,Et),X_=d(Et),Dl=r(Et,"P",{});var tw=i(Dl);z_=s(tw,"Example:"),tw.forEach(t),M_=d(Et),T(fa.$$.fragment,Et),Et.forEach(t),qt.forEach(t),_c=d(o),No=r(o,"H2",{class:!0});var Ep=i(No);sn=r(Ep,"A",{id:!0,class:!0,href:!0});var ow=i(sn);Hl=r(ow,"SPAN",{});var nw=i(Hl);T(ga.$$.fragment,nw),nw.forEach(t),ow.forEach(t),q_=d(Ep),Ql=r(Ep,"SPAN",{});var sw=i(Ql);E_=s(sw,"XLNetForQuestionAnswering"),sw.forEach(t),Ep.forEach(t),kc=d(o),Ve=r(o,"DIV",{class:!0});var Ct=i(Ve);T(_a.$$.fragment,Ct),C_=d(Ct),xo=r(Ct,"P",{});var Ni=i(xo);j_=s(Ni,`XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Wl=r(Ni,"CODE",{});var aw=i(Wl);O_=s(aw,"span start logits"),aw.forEach(t),P_=s(Ni," and "),Bl=r(Ni,"CODE",{});var rw=i(Bl);A_=s(rw,"span end logits"),rw.forEach(t),S_=s(Ni,")."),Ni.forEach(t),I_=d(Ct),ka=r(Ct,"P",{});var Cp=i(ka);D_=s(Cp,"This model inherits from "),Yr=r(Cp,"A",{href:!0});var iw=i(Yr);H_=s(iw,"PreTrainedModel"),iw.forEach(t),Q_=s(Cp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cp.forEach(t),W_=d(Ct),va=r(Ct,"P",{});var jp=i(va);B_=s(jp,"This model is also a PyTorch "),Ta=r(jp,"A",{href:!0,rel:!0});var lw=i(Ta);U_=s(lw,"torch.nn.Module"),lw.forEach(t),R_=s(jp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jp.forEach(t),V_=d(Ct),et=r(Ct,"DIV",{class:!0});var jt=i(et);T(ba.$$.fragment,jt),Y_=d(jt),Fo=r(jt,"P",{});var xi=i(Fo);G_=s(xi,"The "),Gr=r(xi,"A",{href:!0});var dw=i(Gr);J_=s(dw,"XLNetForQuestionAnswering"),dw.forEach(t),K_=s(xi," forward method, overrides the "),Ul=r(xi,"CODE",{});var cw=i(Ul);Z_=s(cw,"__call__"),cw.forEach(t),ek=s(xi," special method."),xi.forEach(t),tk=d(jt),T(an.$$.fragment,jt),ok=d(jt),Rl=r(jt,"P",{});var pw=i(Rl);nk=s(pw,"Example:"),pw.forEach(t),sk=d(jt),T(wa.$$.fragment,jt),jt.forEach(t),Ct.forEach(t),vc=d(o),$o=r(o,"H2",{class:!0});var Op=i($o);rn=r(Op,"A",{id:!0,class:!0,href:!0});var hw=i(rn);Vl=r(hw,"SPAN",{});var mw=i(Vl);T(ya.$$.fragment,mw),mw.forEach(t),hw.forEach(t),ak=d(Op),Yl=r(Op,"SPAN",{});var uw=i(Yl);rk=s(uw,"TFXLNetModel"),uw.forEach(t),Op.forEach(t),Tc=d(o),Ee=r(o,"DIV",{class:!0});var pt=i(Ee);T(La.$$.fragment,pt),ik=d(pt),Gl=r(pt,"P",{});var fw=i(Gl);lk=s(fw,"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top."),fw.forEach(t),dk=d(pt),Na=r(pt,"P",{});var Pp=i(Na);ck=s(Pp,"This model inherits from "),Jr=r(Pp,"A",{href:!0});var gw=i(Jr);pk=s(gw,"TFPreTrainedModel"),gw.forEach(t),hk=s(Pp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pp.forEach(t),mk=d(pt),xa=r(pt,"P",{});var Ap=i(xa);uk=s(Ap,"This model is also a "),Fa=r(Ap,"A",{href:!0,rel:!0});var _w=i(Fa);fk=s(_w,"tf.keras.Model"),_w.forEach(t),gk=s(Ap,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ap.forEach(t),_k=d(pt),T(ln.$$.fragment,pt),kk=d(pt),tt=r(pt,"DIV",{class:!0});var Ot=i(tt);T($a.$$.fragment,Ot),vk=d(Ot),Xo=r(Ot,"P",{});var Fi=i(Xo);Tk=s(Fi,"The "),Kr=r(Fi,"A",{href:!0});var kw=i(Kr);bk=s(kw,"TFXLNetModel"),kw.forEach(t),wk=s(Fi," forward method, overrides the "),Jl=r(Fi,"CODE",{});var vw=i(Jl);yk=s(vw,"__call__"),vw.forEach(t),Lk=s(Fi," special method."),Fi.forEach(t),Nk=d(Ot),T(dn.$$.fragment,Ot),xk=d(Ot),Kl=r(Ot,"P",{});var Tw=i(Kl);Fk=s(Tw,"Example:"),Tw.forEach(t),$k=d(Ot),T(Xa.$$.fragment,Ot),Ot.forEach(t),pt.forEach(t),bc=d(o),zo=r(o,"H2",{class:!0});var Sp=i(zo);cn=r(Sp,"A",{id:!0,class:!0,href:!0});var bw=i(cn);Zl=r(bw,"SPAN",{});var ww=i(Zl);T(za.$$.fragment,ww),ww.forEach(t),bw.forEach(t),Xk=d(Sp),ed=r(Sp,"SPAN",{});var yw=i(ed);zk=s(yw,"TFXLNetLMHeadModel"),yw.forEach(t),Sp.forEach(t),wc=d(o),Ce=r(o,"DIV",{class:!0});var ht=i(Ce);T(Ma.$$.fragment,ht),Mk=d(ht),td=r(ht,"P",{});var Lw=i(td);qk=s(Lw,"XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings)."),Lw.forEach(t),Ek=d(ht),qa=r(ht,"P",{});var Ip=i(qa);Ck=s(Ip,"This model inherits from "),Zr=r(Ip,"A",{href:!0});var Nw=i(Zr);jk=s(Nw,"TFPreTrainedModel"),Nw.forEach(t),Ok=s(Ip,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ip.forEach(t),Pk=d(ht),Ea=r(ht,"P",{});var Dp=i(Ea);Ak=s(Dp,"This model is also a "),Ca=r(Dp,"A",{href:!0,rel:!0});var xw=i(Ca);Sk=s(xw,"tf.keras.Model"),xw.forEach(t),Ik=s(Dp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Dp.forEach(t),Dk=d(ht),T(pn.$$.fragment,ht),Hk=d(ht),ot=r(ht,"DIV",{class:!0});var Pt=i(ot);T(ja.$$.fragment,Pt),Qk=d(Pt),Mo=r(Pt,"P",{});var $i=i(Mo);Wk=s($i,"The "),ei=r($i,"A",{href:!0});var Fw=i(ei);Bk=s(Fw,"TFXLNetLMHeadModel"),Fw.forEach(t),Uk=s($i," forward method, overrides the "),od=r($i,"CODE",{});var $w=i(od);Rk=s($w,"__call__"),$w.forEach(t),Vk=s($i," special method."),$i.forEach(t),Yk=d(Pt),T(hn.$$.fragment,Pt),Gk=d(Pt),nd=r(Pt,"P",{});var Xw=i(nd);Jk=s(Xw,"Examples:"),Xw.forEach(t),Kk=d(Pt),T(Oa.$$.fragment,Pt),Pt.forEach(t),ht.forEach(t),yc=d(o),qo=r(o,"H2",{class:!0});var Hp=i(qo);mn=r(Hp,"A",{id:!0,class:!0,href:!0});var zw=i(mn);sd=r(zw,"SPAN",{});var Mw=i(sd);T(Pa.$$.fragment,Mw),Mw.forEach(t),zw.forEach(t),Zk=d(Hp),ad=r(Hp,"SPAN",{});var qw=i(ad);ev=s(qw,"TFXLNetForSequenceClassification"),qw.forEach(t),Hp.forEach(t),Lc=d(o),je=r(o,"DIV",{class:!0});var mt=i(je);T(Aa.$$.fragment,mt),tv=d(mt),rd=r(mt,"P",{});var Ew=i(rd);ov=s(Ew,`XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ew.forEach(t),nv=d(mt),Sa=r(mt,"P",{});var Qp=i(Sa);sv=s(Qp,"This model inherits from "),ti=r(Qp,"A",{href:!0});var Cw=i(ti);av=s(Cw,"TFPreTrainedModel"),Cw.forEach(t),rv=s(Qp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qp.forEach(t),iv=d(mt),Ia=r(mt,"P",{});var Wp=i(Ia);lv=s(Wp,"This model is also a "),Da=r(Wp,"A",{href:!0,rel:!0});var jw=i(Da);dv=s(jw,"tf.keras.Model"),jw.forEach(t),cv=s(Wp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Wp.forEach(t),pv=d(mt),T(un.$$.fragment,mt),hv=d(mt),nt=r(mt,"DIV",{class:!0});var At=i(nt);T(Ha.$$.fragment,At),mv=d(At),Eo=r(At,"P",{});var Xi=i(Eo);uv=s(Xi,"The "),oi=r(Xi,"A",{href:!0});var Ow=i(oi);fv=s(Ow,"TFXLNetForSequenceClassification"),Ow.forEach(t),gv=s(Xi," forward method, overrides the "),id=r(Xi,"CODE",{});var Pw=i(id);_v=s(Pw,"__call__"),Pw.forEach(t),kv=s(Xi," special method."),Xi.forEach(t),vv=d(At),T(fn.$$.fragment,At),Tv=d(At),ld=r(At,"P",{});var Aw=i(ld);bv=s(Aw,"Example:"),Aw.forEach(t),wv=d(At),T(Qa.$$.fragment,At),At.forEach(t),mt.forEach(t),Nc=d(o),Co=r(o,"H2",{class:!0});var Bp=i(Co);gn=r(Bp,"A",{id:!0,class:!0,href:!0});var Sw=i(gn);dd=r(Sw,"SPAN",{});var Iw=i(dd);T(Wa.$$.fragment,Iw),Iw.forEach(t),Sw.forEach(t),yv=d(Bp),cd=r(Bp,"SPAN",{});var Dw=i(cd);Lv=s(Dw,"TFLNetForMultipleChoice"),Dw.forEach(t),Bp.forEach(t),xc=d(o),Oe=r(o,"DIV",{class:!0});var ut=i(Oe);T(Ba.$$.fragment,ut),Nv=d(ut),pd=r(ut,"P",{});var Hw=i(pd);xv=s(Hw,`XLNET Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Hw.forEach(t),Fv=d(ut),Ua=r(ut,"P",{});var Up=i(Ua);$v=s(Up,"This model inherits from "),ni=r(Up,"A",{href:!0});var Qw=i(ni);Xv=s(Qw,"TFPreTrainedModel"),Qw.forEach(t),zv=s(Up,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Up.forEach(t),Mv=d(ut),Ra=r(ut,"P",{});var Rp=i(Ra);qv=s(Rp,"This model is also a "),Va=r(Rp,"A",{href:!0,rel:!0});var Ww=i(Va);Ev=s(Ww,"tf.keras.Model"),Ww.forEach(t),Cv=s(Rp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Rp.forEach(t),jv=d(ut),T(_n.$$.fragment,ut),Ov=d(ut),st=r(ut,"DIV",{class:!0});var St=i(st);T(Ya.$$.fragment,St),Pv=d(St),jo=r(St,"P",{});var zi=i(jo);Av=s(zi,"The "),si=r(zi,"A",{href:!0});var Bw=i(si);Sv=s(Bw,"TFXLNetForMultipleChoice"),Bw.forEach(t),Iv=s(zi," forward method, overrides the "),hd=r(zi,"CODE",{});var Uw=i(hd);Dv=s(Uw,"__call__"),Uw.forEach(t),Hv=s(zi," special method."),zi.forEach(t),Qv=d(St),T(kn.$$.fragment,St),Wv=d(St),md=r(St,"P",{});var Rw=i(md);Bv=s(Rw,"Example:"),Rw.forEach(t),Uv=d(St),T(Ga.$$.fragment,St),St.forEach(t),ut.forEach(t),Fc=d(o),Oo=r(o,"H2",{class:!0});var Vp=i(Oo);vn=r(Vp,"A",{id:!0,class:!0,href:!0});var Vw=i(vn);ud=r(Vw,"SPAN",{});var Yw=i(ud);T(Ja.$$.fragment,Yw),Yw.forEach(t),Vw.forEach(t),Rv=d(Vp),fd=r(Vp,"SPAN",{});var Gw=i(fd);Vv=s(Gw,"TFXLNetForTokenClassification"),Gw.forEach(t),Vp.forEach(t),$c=d(o),Pe=r(o,"DIV",{class:!0});var ft=i(Pe);T(Ka.$$.fragment,ft),Yv=d(ft),gd=r(ft,"P",{});var Jw=i(gd);Gv=s(Jw,`XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Jw.forEach(t),Jv=d(ft),Za=r(ft,"P",{});var Yp=i(Za);Kv=s(Yp,"This model inherits from "),ai=r(Yp,"A",{href:!0});var Kw=i(ai);Zv=s(Kw,"TFPreTrainedModel"),Kw.forEach(t),eT=s(Yp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yp.forEach(t),tT=d(ft),er=r(ft,"P",{});var Gp=i(er);oT=s(Gp,"This model is also a "),tr=r(Gp,"A",{href:!0,rel:!0});var Zw=i(tr);nT=s(Zw,"tf.keras.Model"),Zw.forEach(t),sT=s(Gp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Gp.forEach(t),aT=d(ft),T(Tn.$$.fragment,ft),rT=d(ft),at=r(ft,"DIV",{class:!0});var It=i(at);T(or.$$.fragment,It),iT=d(It),Po=r(It,"P",{});var Mi=i(Po);lT=s(Mi,"The "),ri=r(Mi,"A",{href:!0});var ey=i(ri);dT=s(ey,"TFXLNetForTokenClassification"),ey.forEach(t),cT=s(Mi," forward method, overrides the "),_d=r(Mi,"CODE",{});var ty=i(_d);pT=s(ty,"__call__"),ty.forEach(t),hT=s(Mi," special method."),Mi.forEach(t),mT=d(It),T(bn.$$.fragment,It),uT=d(It),kd=r(It,"P",{});var oy=i(kd);fT=s(oy,"Example:"),oy.forEach(t),gT=d(It),T(nr.$$.fragment,It),It.forEach(t),ft.forEach(t),Xc=d(o),Ao=r(o,"H2",{class:!0});var Jp=i(Ao);wn=r(Jp,"A",{id:!0,class:!0,href:!0});var ny=i(wn);vd=r(ny,"SPAN",{});var sy=i(vd);T(sr.$$.fragment,sy),sy.forEach(t),ny.forEach(t),_T=d(Jp),Td=r(Jp,"SPAN",{});var ay=i(Td);kT=s(ay,"TFXLNetForQuestionAnsweringSimple"),ay.forEach(t),Jp.forEach(t),zc=d(o),Ae=r(o,"DIV",{class:!0});var gt=i(Ae);T(ar.$$.fragment,gt),vT=d(gt),So=r(gt,"P",{});var qi=i(So);TT=s(qi,`XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),bd=r(qi,"CODE",{});var ry=i(bd);bT=s(ry,"span start logits"),ry.forEach(t),wT=s(qi," and "),wd=r(qi,"CODE",{});var iy=i(wd);yT=s(iy,"span end logits"),iy.forEach(t),LT=s(qi,")."),qi.forEach(t),NT=d(gt),rr=r(gt,"P",{});var Kp=i(rr);xT=s(Kp,"This model inherits from "),ii=r(Kp,"A",{href:!0});var ly=i(ii);FT=s(ly,"TFPreTrainedModel"),ly.forEach(t),$T=s(Kp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kp.forEach(t),XT=d(gt),ir=r(gt,"P",{});var Zp=i(ir);zT=s(Zp,"This model is also a "),lr=r(Zp,"A",{href:!0,rel:!0});var dy=i(lr);MT=s(dy,"tf.keras.Model"),dy.forEach(t),qT=s(Zp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Zp.forEach(t),ET=d(gt),T(yn.$$.fragment,gt),CT=d(gt),rt=r(gt,"DIV",{class:!0});var Dt=i(rt);T(dr.$$.fragment,Dt),jT=d(Dt),Io=r(Dt,"P",{});var Ei=i(Io);OT=s(Ei,"The "),li=r(Ei,"A",{href:!0});var cy=i(li);PT=s(cy,"TFXLNetForQuestionAnsweringSimple"),cy.forEach(t),AT=s(Ei," forward method, overrides the "),yd=r(Ei,"CODE",{});var py=i(yd);ST=s(py,"__call__"),py.forEach(t),IT=s(Ei," special method."),Ei.forEach(t),DT=d(Dt),T(Ln.$$.fragment,Dt),HT=d(Dt),Ld=r(Dt,"P",{});var hy=i(Ld);QT=s(hy,"Example:"),hy.forEach(t),WT=d(Dt),T(cr.$$.fragment,Dt),Dt.forEach(t),gt.forEach(t),this.h()},h(){c(m,"name","hf:doc:metadata"),c(m,"content",JSON.stringify(Py)),c(_,"id","xlnet"),c(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_,"href","#xlnet"),c(f,"class","relative group"),c(Z,"id","overview"),c(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Z,"href","#overview"),c(X,"class","relative group"),c(te,"href","https://arxiv.org/abs/1906.08237"),c(te,"rel","nofollow"),c(Xn,"href","https://huggingface.co/thomwolf"),c(Xn,"rel","nofollow"),c(zn,"href","https://github.com/zihangdai/xlnet/"),c(zn,"rel","nofollow"),c(Do,"id","transformers.XLNetConfig"),c(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Do,"href","#transformers.XLNetConfig"),c(Ht,"class","relative group"),c(gr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetModel"),c(_r,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetModel"),c(En,"href","https://huggingface.co/xlnet-large-cased"),c(En,"rel","nofollow"),c(kr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(vr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(De,"class","docstring"),c(Ho,"id","transformers.XLNetTokenizer"),c(Ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ho,"href","#transformers.XLNetTokenizer"),c(Wt,"class","relative group"),c(An,"href","https://github.com/google/sentencepiece"),c(An,"rel","nofollow"),c(Tr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Tt,"class","docstring"),c(Qo,"class","docstring"),c(lt,"class","docstring"),c(Vi,"class","docstring"),c($e,"class","docstring"),c(Wo,"id","transformers.XLNetTokenizerFast"),c(Wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wo,"href","#transformers.XLNetTokenizerFast"),c(Rt,"class","relative group"),c(Vn,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),c(Vn,"rel","nofollow"),c(yr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(bt,"class","docstring"),c(dt,"class","docstring"),c(qe,"class","docstring"),c(Bo,"id","transformers.models.xlnet.modeling_xlnet.XLNetModelOutput"),c(Bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bo,"href","#transformers.models.xlnet.modeling_xlnet.XLNetModelOutput"),c(Jt,"class","relative group"),c(xr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetModel"),c(Kt,"class","docstring"),c(Fr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetLMHeadModel"),c(Zt,"class","docstring"),c($r,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForSequenceClassification"),c(eo,"class","docstring"),c(Xr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForMultipleChoice"),c(to,"class","docstring"),c(oo,"class","docstring"),c(zr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForQuestionAnsweringSimple"),c(no,"class","docstring"),c(Mr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForQuestionAnswering"),c(so,"class","docstring"),c(qr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetModel"),c(ao,"class","docstring"),c(Er,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetLMHeadModel"),c(ro,"class","docstring"),c(Cr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForSequenceClassification"),c(io,"class","docstring"),c(jr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForMultipleChoice"),c(lo,"class","docstring"),c(co,"class","docstring"),c(Or,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForQuestionAnsweringSimple"),c(po,"class","docstring"),c(Uo,"id","transformers.XLNetModel"),c(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Uo,"href","#transformers.XLNetModel"),c(ho,"class","relative group"),c(Pr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ms,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ms,"rel","nofollow"),c(Ar,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetModel"),c(Ye,"class","docstring"),c(He,"class","docstring"),c(Vo,"id","transformers.XLNetLMHeadModel"),c(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vo,"href","#transformers.XLNetLMHeadModel"),c(uo,"class","relative group"),c(Sr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(As,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(As,"rel","nofollow"),c(Ir,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetLMHeadModel"),c(Ge,"class","docstring"),c(Qe,"class","docstring"),c(Go,"id","transformers.XLNetForSequenceClassification"),c(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Go,"href","#transformers.XLNetForSequenceClassification"),c(go,"class","relative group"),c(Dr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Bs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Bs,"rel","nofollow"),c(Hr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForSequenceClassification"),c(Me,"class","docstring"),c(We,"class","docstring"),c(Ko,"id","transformers.XLNetForMultipleChoice"),c(Ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ko,"href","#transformers.XLNetForMultipleChoice"),c(ko,"class","relative group"),c(Qr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Zs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Zs,"rel","nofollow"),c(Wr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForMultipleChoice"),c(Je,"class","docstring"),c(Be,"class","docstring"),c(en,"id","transformers.XLNetForTokenClassification"),c(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(en,"href","#transformers.XLNetForTokenClassification"),c(To,"class","relative group"),c(Br,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ra,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ra,"rel","nofollow"),c(Ur,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForTokenClassification"),c(Ke,"class","docstring"),c(Ue,"class","docstring"),c(on,"id","transformers.XLNetForQuestionAnsweringSimple"),c(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(on,"href","#transformers.XLNetForQuestionAnsweringSimple"),c(wo,"class","relative group"),c(Rr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ma,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ma,"rel","nofollow"),c(Vr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForQuestionAnsweringSimple"),c(Ze,"class","docstring"),c(Re,"class","docstring"),c(sn,"id","transformers.XLNetForQuestionAnswering"),c(sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(sn,"href","#transformers.XLNetForQuestionAnswering"),c(No,"class","relative group"),c(Yr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ta,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ta,"rel","nofollow"),c(Gr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetForQuestionAnswering"),c(et,"class","docstring"),c(Ve,"class","docstring"),c(rn,"id","transformers.TFXLNetModel"),c(rn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(rn,"href","#transformers.TFXLNetModel"),c($o,"class","relative group"),c(Jr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Fa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Fa,"rel","nofollow"),c(Kr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetModel"),c(tt,"class","docstring"),c(Ee,"class","docstring"),c(cn,"id","transformers.TFXLNetLMHeadModel"),c(cn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(cn,"href","#transformers.TFXLNetLMHeadModel"),c(zo,"class","relative group"),c(Zr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ca,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ca,"rel","nofollow"),c(ei,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetLMHeadModel"),c(ot,"class","docstring"),c(Ce,"class","docstring"),c(mn,"id","transformers.TFXLNetForSequenceClassification"),c(mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(mn,"href","#transformers.TFXLNetForSequenceClassification"),c(qo,"class","relative group"),c(ti,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Da,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Da,"rel","nofollow"),c(oi,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForSequenceClassification"),c(nt,"class","docstring"),c(je,"class","docstring"),c(gn,"id","transformers.TFXLNetForMultipleChoice"),c(gn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(gn,"href","#transformers.TFXLNetForMultipleChoice"),c(Co,"class","relative group"),c(ni,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Va,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Va,"rel","nofollow"),c(si,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForMultipleChoice"),c(st,"class","docstring"),c(Oe,"class","docstring"),c(vn,"id","transformers.TFXLNetForTokenClassification"),c(vn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vn,"href","#transformers.TFXLNetForTokenClassification"),c(Oo,"class","relative group"),c(ai,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(tr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(tr,"rel","nofollow"),c(ri,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForTokenClassification"),c(at,"class","docstring"),c(Pe,"class","docstring"),c(wn,"id","transformers.TFXLNetForQuestionAnsweringSimple"),c(wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wn,"href","#transformers.TFXLNetForQuestionAnsweringSimple"),c(Ao,"class","relative group"),c(ii,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(lr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(lr,"rel","nofollow"),c(li,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.TFXLNetForQuestionAnsweringSimple"),c(rt,"class","docstring"),c(Ae,"class","docstring")},m(o,u){e(document.head,m),h(o,x,u),h(o,f,u),e(f,_),e(_,N),b(k,N,null),e(f,g),e(f,$),e($,pe),h(o,G,u),h(o,X,u),e(X,Z),e(Z,I),b(ee,I,null),e(X,he),e(X,D),e(D,me),h(o,le,u),h(o,V,u),e(V,j),e(V,te),e(te,J),e(V,z),h(o,q,u),h(o,ne,u),e(ne,W),h(o,de,u),h(o,se,u),e(se,H),e(H,ue),h(o,ce,u),h(o,M,u),e(M,fe),h(o,Q,u),h(o,Y,u),e(Y,oe),e(oe,B),e(oe,ae),e(ae,ge),e(oe,O),e(Y,_e),e(Y,A),e(A,ke),e(A,p),e(p,F),e(A,K),e(Y,be),e(Y,re),e(re,P),e(re,ve),e(ve,we),e(re,ye),e(re,E),e(E,U),e(re,Le),e(re,Te),e(Te,R),e(re,Ne),e(Y,xe),e(Y,ie),e(ie,Fe),h(o,Ad,u),h(o,vt,u),e(vt,eh),e(vt,Xn),e(Xn,th),e(vt,oh),e(vt,zn),e(zn,nh),e(vt,sh),h(o,Sd,u),h(o,Ht,u),e(Ht,Do),e(Do,Ci),b(Mn,Ci,null),e(Ht,ah),e(Ht,ji),e(ji,rh),h(o,Id,u),h(o,De,u),b(qn,De,null),e(De,ih),e(De,_t),e(_t,lh),e(_t,gr),e(gr,dh),e(_t,ch),e(_t,_r),e(_r,ph),e(_t,hh),e(_t,En),e(En,mh),e(_t,uh),e(De,fh),e(De,Qt),e(Qt,gh),e(Qt,kr),e(kr,_h),e(Qt,kh),e(Qt,vr),e(vr,vh),e(Qt,Th),e(De,bh),e(De,Oi),e(Oi,wh),e(De,yh),b(Cn,De,null),h(o,Dd,u),h(o,Wt,u),e(Wt,Ho),e(Ho,Pi),b(jn,Pi,null),e(Wt,Lh),e(Wt,Ai),e(Ai,Nh),h(o,Hd,u),h(o,$e,u),b(On,$e,null),e($e,xh),e($e,Pn),e(Pn,Fh),e(Pn,An),e(An,$h),e(Pn,Xh),e($e,zh),e($e,Sn),e(Sn,Mh),e(Sn,Tr),e(Tr,qh),e(Sn,Eh),e($e,Ch),e($e,Bt),e(Bt,jh),e(Bt,Si),e(Si,Oh),e(Bt,Ph),e(Bt,Ii),e(Ii,Ah),e(Bt,Sh),e($e,Ih),e($e,Tt),b(In,Tt,null),e(Tt,Dh),e(Tt,Di),e(Di,Hh),e(Tt,Qh),e(Tt,Dn),e(Dn,br),e(br,Wh),e(br,Hi),e(Hi,Bh),e(Dn,Uh),e(Dn,wr),e(wr,Rh),e(wr,Qi),e(Qi,Vh),e($e,Yh),e($e,Qo),b(Hn,Qo,null),e(Qo,Gh),e(Qo,Qn),e(Qn,Jh),e(Qn,Wi),e(Wi,Kh),e(Qn,Zh),e($e,em),e($e,lt),b(Wn,lt,null),e(lt,tm),e(lt,Bi),e(Bi,om),e(lt,nm),b(Bn,lt,null),e(lt,sm),e(lt,Ut),e(Ut,am),e(Ut,Ui),e(Ui,rm),e(Ut,im),e(Ut,Ri),e(Ri,lm),e(Ut,dm),e($e,cm),e($e,Vi),h(o,Qd,u),h(o,Rt,u),e(Rt,Wo),e(Wo,Yi),b(Un,Yi,null),e(Rt,pm),e(Rt,Gi),e(Gi,hm),h(o,Wd,u),h(o,qe,u),b(Rn,qe,null),e(qe,mm),e(qe,Vt),e(Vt,um),e(Vt,Ji),e(Ji,fm),e(Vt,gm),e(Vt,Vn),e(Vn,_m),e(Vt,km),e(qe,vm),e(qe,Yn),e(Yn,Tm),e(Yn,yr),e(yr,bm),e(Yn,wm),e(qe,ym),e(qe,Yt),e(Yt,Lm),e(Yt,Ki),e(Ki,Nm),e(Yt,xm),e(Yt,Zi),e(Zi,Fm),e(Yt,$m),e(qe,Xm),e(qe,bt),b(Gn,bt,null),e(bt,zm),e(bt,el),e(el,Mm),e(bt,qm),e(bt,Jn),e(Jn,Lr),e(Lr,Em),e(Lr,tl),e(tl,Cm),e(Jn,jm),e(Jn,Nr),e(Nr,Om),e(Nr,ol),e(ol,Pm),e(qe,Am),e(qe,dt),b(Kn,dt,null),e(dt,Sm),e(dt,nl),e(nl,Im),e(dt,Dm),b(Zn,dt,null),e(dt,Hm),e(dt,Gt),e(Gt,Qm),e(Gt,sl),e(sl,Wm),e(Gt,Bm),e(Gt,al),e(al,Um),e(Gt,Rm),h(o,Bd,u),h(o,Jt,u),e(Jt,Bo),e(Bo,rl),b(es,rl,null),e(Jt,Vm),e(Jt,il),e(il,Ym),h(o,Ud,u),h(o,Kt,u),b(ts,Kt,null),e(Kt,Gm),e(Kt,os),e(os,Jm),e(os,xr),e(xr,Km),e(os,Zm),h(o,Rd,u),h(o,Zt,u),b(ns,Zt,null),e(Zt,eu),e(Zt,ss),e(ss,tu),e(ss,Fr),e(Fr,ou),e(ss,nu),h(o,Vd,u),h(o,eo,u),b(as,eo,null),e(eo,su),e(eo,rs),e(rs,au),e(rs,$r),e($r,ru),e(rs,iu),h(o,Yd,u),h(o,to,u),b(is,to,null),e(to,lu),e(to,ls),e(ls,du),e(ls,Xr),e(Xr,cu),e(ls,pu),h(o,Gd,u),h(o,oo,u),b(ds,oo,null),e(oo,hu),e(oo,cs),e(cs,mu),e(cs,ll),e(ll,uu),e(cs,fu),h(o,Jd,u),h(o,no,u),b(ps,no,null),e(no,gu),e(no,hs),e(hs,_u),e(hs,zr),e(zr,ku),e(hs,vu),h(o,Kd,u),h(o,so,u),b(ms,so,null),e(so,Tu),e(so,us),e(us,bu),e(us,Mr),e(Mr,wu),e(us,yu),h(o,Zd,u),h(o,ao,u),b(fs,ao,null),e(ao,Lu),e(ao,gs),e(gs,Nu),e(gs,qr),e(qr,xu),e(gs,Fu),h(o,ec,u),h(o,ro,u),b(_s,ro,null),e(ro,$u),e(ro,ks),e(ks,Xu),e(ks,Er),e(Er,zu),e(ks,Mu),h(o,tc,u),h(o,io,u),b(vs,io,null),e(io,qu),e(io,Ts),e(Ts,Eu),e(Ts,Cr),e(Cr,Cu),e(Ts,ju),h(o,oc,u),h(o,lo,u),b(bs,lo,null),e(lo,Ou),e(lo,ws),e(ws,Pu),e(ws,jr),e(jr,Au),e(ws,Su),h(o,nc,u),h(o,co,u),b(ys,co,null),e(co,Iu),e(co,Ls),e(Ls,Du),e(Ls,dl),e(dl,Hu),e(Ls,Qu),h(o,sc,u),h(o,po,u),b(Ns,po,null),e(po,Wu),e(po,xs),e(xs,Bu),e(xs,Or),e(Or,Uu),e(xs,Ru),h(o,ac,u),h(o,ho,u),e(ho,Uo),e(Uo,cl),b(Fs,cl,null),e(ho,Vu),e(ho,pl),e(pl,Yu),h(o,rc,u),h(o,He,u),b($s,He,null),e(He,Gu),e(He,hl),e(hl,Ju),e(He,Ku),e(He,Xs),e(Xs,Zu),e(Xs,Pr),e(Pr,ef),e(Xs,tf),e(He,of),e(He,zs),e(zs,nf),e(zs,Ms),e(Ms,sf),e(zs,af),e(He,rf),e(He,Ye),b(qs,Ye,null),e(Ye,lf),e(Ye,mo),e(mo,df),e(mo,Ar),e(Ar,cf),e(mo,pf),e(mo,ml),e(ml,hf),e(mo,mf),e(Ye,uf),b(Ro,Ye,null),e(Ye,ff),e(Ye,ul),e(ul,gf),e(Ye,_f),b(Es,Ye,null),h(o,ic,u),h(o,uo,u),e(uo,Vo),e(Vo,fl),b(Cs,fl,null),e(uo,kf),e(uo,gl),e(gl,vf),h(o,lc,u),h(o,Qe,u),b(js,Qe,null),e(Qe,Tf),e(Qe,_l),e(_l,bf),e(Qe,wf),e(Qe,Os),e(Os,yf),e(Os,Sr),e(Sr,Lf),e(Os,Nf),e(Qe,xf),e(Qe,Ps),e(Ps,Ff),e(Ps,As),e(As,$f),e(Ps,Xf),e(Qe,zf),e(Qe,Ge),b(Ss,Ge,null),e(Ge,Mf),e(Ge,fo),e(fo,qf),e(fo,Ir),e(Ir,Ef),e(fo,Cf),e(fo,kl),e(kl,jf),e(fo,Of),e(Ge,Pf),b(Yo,Ge,null),e(Ge,Af),e(Ge,vl),e(vl,Sf),e(Ge,If),b(Is,Ge,null),h(o,dc,u),h(o,go,u),e(go,Go),e(Go,Tl),b(Ds,Tl,null),e(go,Df),e(go,bl),e(bl,Hf),h(o,cc,u),h(o,We,u),b(Hs,We,null),e(We,Qf),e(We,wl),e(wl,Wf),e(We,Bf),e(We,Qs),e(Qs,Uf),e(Qs,Dr),e(Dr,Rf),e(Qs,Vf),e(We,Yf),e(We,Ws),e(Ws,Gf),e(Ws,Bs),e(Bs,Jf),e(Ws,Kf),e(We,Zf),e(We,Me),b(Us,Me,null),e(Me,eg),e(Me,_o),e(_o,tg),e(_o,Hr),e(Hr,og),e(_o,ng),e(_o,yl),e(yl,sg),e(_o,ag),e(Me,rg),b(Jo,Me,null),e(Me,ig),e(Me,Ll),e(Ll,lg),e(Me,dg),b(Rs,Me,null),e(Me,cg),e(Me,Nl),e(Nl,pg),e(Me,hg),b(Vs,Me,null),h(o,pc,u),h(o,ko,u),e(ko,Ko),e(Ko,xl),b(Ys,xl,null),e(ko,mg),e(ko,Fl),e(Fl,ug),h(o,hc,u),h(o,Be,u),b(Gs,Be,null),e(Be,fg),e(Be,$l),e($l,gg),e(Be,_g),e(Be,Js),e(Js,kg),e(Js,Qr),e(Qr,vg),e(Js,Tg),e(Be,bg),e(Be,Ks),e(Ks,wg),e(Ks,Zs),e(Zs,yg),e(Ks,Lg),e(Be,Ng),e(Be,Je),b(ea,Je,null),e(Je,xg),e(Je,vo),e(vo,Fg),e(vo,Wr),e(Wr,$g),e(vo,Xg),e(vo,Xl),e(Xl,zg),e(vo,Mg),e(Je,qg),b(Zo,Je,null),e(Je,Eg),e(Je,zl),e(zl,Cg),e(Je,jg),b(ta,Je,null),h(o,mc,u),h(o,To,u),e(To,en),e(en,Ml),b(oa,Ml,null),e(To,Og),e(To,ql),e(ql,Pg),h(o,uc,u),h(o,Ue,u),b(na,Ue,null),e(Ue,Ag),e(Ue,El),e(El,Sg),e(Ue,Ig),e(Ue,sa),e(sa,Dg),e(sa,Br),e(Br,Hg),e(sa,Qg),e(Ue,Wg),e(Ue,aa),e(aa,Bg),e(aa,ra),e(ra,Ug),e(aa,Rg),e(Ue,Vg),e(Ue,Ke),b(ia,Ke,null),e(Ke,Yg),e(Ke,bo),e(bo,Gg),e(bo,Ur),e(Ur,Jg),e(bo,Kg),e(bo,Cl),e(Cl,Zg),e(bo,e_),e(Ke,t_),b(tn,Ke,null),e(Ke,o_),e(Ke,jl),e(jl,n_),e(Ke,s_),b(la,Ke,null),h(o,fc,u),h(o,wo,u),e(wo,on),e(on,Ol),b(da,Ol,null),e(wo,a_),e(wo,Pl),e(Pl,r_),h(o,gc,u),h(o,Re,u),b(ca,Re,null),e(Re,i_),e(Re,yo),e(yo,l_),e(yo,Al),e(Al,d_),e(yo,c_),e(yo,Sl),e(Sl,p_),e(yo,h_),e(Re,m_),e(Re,pa),e(pa,u_),e(pa,Rr),e(Rr,f_),e(pa,g_),e(Re,__),e(Re,ha),e(ha,k_),e(ha,ma),e(ma,v_),e(ha,T_),e(Re,b_),e(Re,Ze),b(ua,Ze,null),e(Ze,w_),e(Ze,Lo),e(Lo,y_),e(Lo,Vr),e(Vr,L_),e(Lo,N_),e(Lo,Il),e(Il,x_),e(Lo,F_),e(Ze,$_),b(nn,Ze,null),e(Ze,X_),e(Ze,Dl),e(Dl,z_),e(Ze,M_),b(fa,Ze,null),h(o,_c,u),h(o,No,u),e(No,sn),e(sn,Hl),b(ga,Hl,null),e(No,q_),e(No,Ql),e(Ql,E_),h(o,kc,u),h(o,Ve,u),b(_a,Ve,null),e(Ve,C_),e(Ve,xo),e(xo,j_),e(xo,Wl),e(Wl,O_),e(xo,P_),e(xo,Bl),e(Bl,A_),e(xo,S_),e(Ve,I_),e(Ve,ka),e(ka,D_),e(ka,Yr),e(Yr,H_),e(ka,Q_),e(Ve,W_),e(Ve,va),e(va,B_),e(va,Ta),e(Ta,U_),e(va,R_),e(Ve,V_),e(Ve,et),b(ba,et,null),e(et,Y_),e(et,Fo),e(Fo,G_),e(Fo,Gr),e(Gr,J_),e(Fo,K_),e(Fo,Ul),e(Ul,Z_),e(Fo,ek),e(et,tk),b(an,et,null),e(et,ok),e(et,Rl),e(Rl,nk),e(et,sk),b(wa,et,null),h(o,vc,u),h(o,$o,u),e($o,rn),e(rn,Vl),b(ya,Vl,null),e($o,ak),e($o,Yl),e(Yl,rk),h(o,Tc,u),h(o,Ee,u),b(La,Ee,null),e(Ee,ik),e(Ee,Gl),e(Gl,lk),e(Ee,dk),e(Ee,Na),e(Na,ck),e(Na,Jr),e(Jr,pk),e(Na,hk),e(Ee,mk),e(Ee,xa),e(xa,uk),e(xa,Fa),e(Fa,fk),e(xa,gk),e(Ee,_k),b(ln,Ee,null),e(Ee,kk),e(Ee,tt),b($a,tt,null),e(tt,vk),e(tt,Xo),e(Xo,Tk),e(Xo,Kr),e(Kr,bk),e(Xo,wk),e(Xo,Jl),e(Jl,yk),e(Xo,Lk),e(tt,Nk),b(dn,tt,null),e(tt,xk),e(tt,Kl),e(Kl,Fk),e(tt,$k),b(Xa,tt,null),h(o,bc,u),h(o,zo,u),e(zo,cn),e(cn,Zl),b(za,Zl,null),e(zo,Xk),e(zo,ed),e(ed,zk),h(o,wc,u),h(o,Ce,u),b(Ma,Ce,null),e(Ce,Mk),e(Ce,td),e(td,qk),e(Ce,Ek),e(Ce,qa),e(qa,Ck),e(qa,Zr),e(Zr,jk),e(qa,Ok),e(Ce,Pk),e(Ce,Ea),e(Ea,Ak),e(Ea,Ca),e(Ca,Sk),e(Ea,Ik),e(Ce,Dk),b(pn,Ce,null),e(Ce,Hk),e(Ce,ot),b(ja,ot,null),e(ot,Qk),e(ot,Mo),e(Mo,Wk),e(Mo,ei),e(ei,Bk),e(Mo,Uk),e(Mo,od),e(od,Rk),e(Mo,Vk),e(ot,Yk),b(hn,ot,null),e(ot,Gk),e(ot,nd),e(nd,Jk),e(ot,Kk),b(Oa,ot,null),h(o,yc,u),h(o,qo,u),e(qo,mn),e(mn,sd),b(Pa,sd,null),e(qo,Zk),e(qo,ad),e(ad,ev),h(o,Lc,u),h(o,je,u),b(Aa,je,null),e(je,tv),e(je,rd),e(rd,ov),e(je,nv),e(je,Sa),e(Sa,sv),e(Sa,ti),e(ti,av),e(Sa,rv),e(je,iv),e(je,Ia),e(Ia,lv),e(Ia,Da),e(Da,dv),e(Ia,cv),e(je,pv),b(un,je,null),e(je,hv),e(je,nt),b(Ha,nt,null),e(nt,mv),e(nt,Eo),e(Eo,uv),e(Eo,oi),e(oi,fv),e(Eo,gv),e(Eo,id),e(id,_v),e(Eo,kv),e(nt,vv),b(fn,nt,null),e(nt,Tv),e(nt,ld),e(ld,bv),e(nt,wv),b(Qa,nt,null),h(o,Nc,u),h(o,Co,u),e(Co,gn),e(gn,dd),b(Wa,dd,null),e(Co,yv),e(Co,cd),e(cd,Lv),h(o,xc,u),h(o,Oe,u),b(Ba,Oe,null),e(Oe,Nv),e(Oe,pd),e(pd,xv),e(Oe,Fv),e(Oe,Ua),e(Ua,$v),e(Ua,ni),e(ni,Xv),e(Ua,zv),e(Oe,Mv),e(Oe,Ra),e(Ra,qv),e(Ra,Va),e(Va,Ev),e(Ra,Cv),e(Oe,jv),b(_n,Oe,null),e(Oe,Ov),e(Oe,st),b(Ya,st,null),e(st,Pv),e(st,jo),e(jo,Av),e(jo,si),e(si,Sv),e(jo,Iv),e(jo,hd),e(hd,Dv),e(jo,Hv),e(st,Qv),b(kn,st,null),e(st,Wv),e(st,md),e(md,Bv),e(st,Uv),b(Ga,st,null),h(o,Fc,u),h(o,Oo,u),e(Oo,vn),e(vn,ud),b(Ja,ud,null),e(Oo,Rv),e(Oo,fd),e(fd,Vv),h(o,$c,u),h(o,Pe,u),b(Ka,Pe,null),e(Pe,Yv),e(Pe,gd),e(gd,Gv),e(Pe,Jv),e(Pe,Za),e(Za,Kv),e(Za,ai),e(ai,Zv),e(Za,eT),e(Pe,tT),e(Pe,er),e(er,oT),e(er,tr),e(tr,nT),e(er,sT),e(Pe,aT),b(Tn,Pe,null),e(Pe,rT),e(Pe,at),b(or,at,null),e(at,iT),e(at,Po),e(Po,lT),e(Po,ri),e(ri,dT),e(Po,cT),e(Po,_d),e(_d,pT),e(Po,hT),e(at,mT),b(bn,at,null),e(at,uT),e(at,kd),e(kd,fT),e(at,gT),b(nr,at,null),h(o,Xc,u),h(o,Ao,u),e(Ao,wn),e(wn,vd),b(sr,vd,null),e(Ao,_T),e(Ao,Td),e(Td,kT),h(o,zc,u),h(o,Ae,u),b(ar,Ae,null),e(Ae,vT),e(Ae,So),e(So,TT),e(So,bd),e(bd,bT),e(So,wT),e(So,wd),e(wd,yT),e(So,LT),e(Ae,NT),e(Ae,rr),e(rr,xT),e(rr,ii),e(ii,FT),e(rr,$T),e(Ae,XT),e(Ae,ir),e(ir,zT),e(ir,lr),e(lr,MT),e(ir,qT),e(Ae,ET),b(yn,Ae,null),e(Ae,CT),e(Ae,rt),b(dr,rt,null),e(rt,jT),e(rt,Io),e(Io,OT),e(Io,li),e(li,PT),e(Io,AT),e(Io,yd),e(yd,ST),e(Io,IT),e(rt,DT),b(Ln,rt,null),e(rt,HT),e(rt,Ld),e(Ld,QT),e(rt,WT),b(cr,rt,null),Mc=!0},p(o,[u]){const pr={};u&2&&(pr.$$scope={dirty:u,ctx:o}),Ro.$set(pr);const Nd={};u&2&&(Nd.$$scope={dirty:u,ctx:o}),Yo.$set(Nd);const xd={};u&2&&(xd.$$scope={dirty:u,ctx:o}),Jo.$set(xd);const Fd={};u&2&&(Fd.$$scope={dirty:u,ctx:o}),Zo.$set(Fd);const hr={};u&2&&(hr.$$scope={dirty:u,ctx:o}),tn.$set(hr);const $d={};u&2&&($d.$$scope={dirty:u,ctx:o}),nn.$set($d);const Xd={};u&2&&(Xd.$$scope={dirty:u,ctx:o}),an.$set(Xd);const zd={};u&2&&(zd.$$scope={dirty:u,ctx:o}),ln.$set(zd);const mr={};u&2&&(mr.$$scope={dirty:u,ctx:o}),dn.$set(mr);const Md={};u&2&&(Md.$$scope={dirty:u,ctx:o}),pn.$set(Md);const qd={};u&2&&(qd.$$scope={dirty:u,ctx:o}),hn.$set(qd);const Ed={};u&2&&(Ed.$$scope={dirty:u,ctx:o}),un.$set(Ed);const Cd={};u&2&&(Cd.$$scope={dirty:u,ctx:o}),fn.$set(Cd);const jd={};u&2&&(jd.$$scope={dirty:u,ctx:o}),_n.$set(jd);const kt={};u&2&&(kt.$$scope={dirty:u,ctx:o}),kn.$set(kt);const ur={};u&2&&(ur.$$scope={dirty:u,ctx:o}),Tn.$set(ur);const Od={};u&2&&(Od.$$scope={dirty:u,ctx:o}),bn.$set(Od);const fr={};u&2&&(fr.$$scope={dirty:u,ctx:o}),yn.$set(fr);const Pd={};u&2&&(Pd.$$scope={dirty:u,ctx:o}),Ln.$set(Pd)},i(o){Mc||(w(k.$$.fragment,o),w(ee.$$.fragment,o),w(Mn.$$.fragment,o),w(qn.$$.fragment,o),w(Cn.$$.fragment,o),w(jn.$$.fragment,o),w(On.$$.fragment,o),w(In.$$.fragment,o),w(Hn.$$.fragment,o),w(Wn.$$.fragment,o),w(Bn.$$.fragment,o),w(Un.$$.fragment,o),w(Rn.$$.fragment,o),w(Gn.$$.fragment,o),w(Kn.$$.fragment,o),w(Zn.$$.fragment,o),w(es.$$.fragment,o),w(ts.$$.fragment,o),w(ns.$$.fragment,o),w(as.$$.fragment,o),w(is.$$.fragment,o),w(ds.$$.fragment,o),w(ps.$$.fragment,o),w(ms.$$.fragment,o),w(fs.$$.fragment,o),w(_s.$$.fragment,o),w(vs.$$.fragment,o),w(bs.$$.fragment,o),w(ys.$$.fragment,o),w(Ns.$$.fragment,o),w(Fs.$$.fragment,o),w($s.$$.fragment,o),w(qs.$$.fragment,o),w(Ro.$$.fragment,o),w(Es.$$.fragment,o),w(Cs.$$.fragment,o),w(js.$$.fragment,o),w(Ss.$$.fragment,o),w(Yo.$$.fragment,o),w(Is.$$.fragment,o),w(Ds.$$.fragment,o),w(Hs.$$.fragment,o),w(Us.$$.fragment,o),w(Jo.$$.fragment,o),w(Rs.$$.fragment,o),w(Vs.$$.fragment,o),w(Ys.$$.fragment,o),w(Gs.$$.fragment,o),w(ea.$$.fragment,o),w(Zo.$$.fragment,o),w(ta.$$.fragment,o),w(oa.$$.fragment,o),w(na.$$.fragment,o),w(ia.$$.fragment,o),w(tn.$$.fragment,o),w(la.$$.fragment,o),w(da.$$.fragment,o),w(ca.$$.fragment,o),w(ua.$$.fragment,o),w(nn.$$.fragment,o),w(fa.$$.fragment,o),w(ga.$$.fragment,o),w(_a.$$.fragment,o),w(ba.$$.fragment,o),w(an.$$.fragment,o),w(wa.$$.fragment,o),w(ya.$$.fragment,o),w(La.$$.fragment,o),w(ln.$$.fragment,o),w($a.$$.fragment,o),w(dn.$$.fragment,o),w(Xa.$$.fragment,o),w(za.$$.fragment,o),w(Ma.$$.fragment,o),w(pn.$$.fragment,o),w(ja.$$.fragment,o),w(hn.$$.fragment,o),w(Oa.$$.fragment,o),w(Pa.$$.fragment,o),w(Aa.$$.fragment,o),w(un.$$.fragment,o),w(Ha.$$.fragment,o),w(fn.$$.fragment,o),w(Qa.$$.fragment,o),w(Wa.$$.fragment,o),w(Ba.$$.fragment,o),w(_n.$$.fragment,o),w(Ya.$$.fragment,o),w(kn.$$.fragment,o),w(Ga.$$.fragment,o),w(Ja.$$.fragment,o),w(Ka.$$.fragment,o),w(Tn.$$.fragment,o),w(or.$$.fragment,o),w(bn.$$.fragment,o),w(nr.$$.fragment,o),w(sr.$$.fragment,o),w(ar.$$.fragment,o),w(yn.$$.fragment,o),w(dr.$$.fragment,o),w(Ln.$$.fragment,o),w(cr.$$.fragment,o),Mc=!0)},o(o){y(k.$$.fragment,o),y(ee.$$.fragment,o),y(Mn.$$.fragment,o),y(qn.$$.fragment,o),y(Cn.$$.fragment,o),y(jn.$$.fragment,o),y(On.$$.fragment,o),y(In.$$.fragment,o),y(Hn.$$.fragment,o),y(Wn.$$.fragment,o),y(Bn.$$.fragment,o),y(Un.$$.fragment,o),y(Rn.$$.fragment,o),y(Gn.$$.fragment,o),y(Kn.$$.fragment,o),y(Zn.$$.fragment,o),y(es.$$.fragment,o),y(ts.$$.fragment,o),y(ns.$$.fragment,o),y(as.$$.fragment,o),y(is.$$.fragment,o),y(ds.$$.fragment,o),y(ps.$$.fragment,o),y(ms.$$.fragment,o),y(fs.$$.fragment,o),y(_s.$$.fragment,o),y(vs.$$.fragment,o),y(bs.$$.fragment,o),y(ys.$$.fragment,o),y(Ns.$$.fragment,o),y(Fs.$$.fragment,o),y($s.$$.fragment,o),y(qs.$$.fragment,o),y(Ro.$$.fragment,o),y(Es.$$.fragment,o),y(Cs.$$.fragment,o),y(js.$$.fragment,o),y(Ss.$$.fragment,o),y(Yo.$$.fragment,o),y(Is.$$.fragment,o),y(Ds.$$.fragment,o),y(Hs.$$.fragment,o),y(Us.$$.fragment,o),y(Jo.$$.fragment,o),y(Rs.$$.fragment,o),y(Vs.$$.fragment,o),y(Ys.$$.fragment,o),y(Gs.$$.fragment,o),y(ea.$$.fragment,o),y(Zo.$$.fragment,o),y(ta.$$.fragment,o),y(oa.$$.fragment,o),y(na.$$.fragment,o),y(ia.$$.fragment,o),y(tn.$$.fragment,o),y(la.$$.fragment,o),y(da.$$.fragment,o),y(ca.$$.fragment,o),y(ua.$$.fragment,o),y(nn.$$.fragment,o),y(fa.$$.fragment,o),y(ga.$$.fragment,o),y(_a.$$.fragment,o),y(ba.$$.fragment,o),y(an.$$.fragment,o),y(wa.$$.fragment,o),y(ya.$$.fragment,o),y(La.$$.fragment,o),y(ln.$$.fragment,o),y($a.$$.fragment,o),y(dn.$$.fragment,o),y(Xa.$$.fragment,o),y(za.$$.fragment,o),y(Ma.$$.fragment,o),y(pn.$$.fragment,o),y(ja.$$.fragment,o),y(hn.$$.fragment,o),y(Oa.$$.fragment,o),y(Pa.$$.fragment,o),y(Aa.$$.fragment,o),y(un.$$.fragment,o),y(Ha.$$.fragment,o),y(fn.$$.fragment,o),y(Qa.$$.fragment,o),y(Wa.$$.fragment,o),y(Ba.$$.fragment,o),y(_n.$$.fragment,o),y(Ya.$$.fragment,o),y(kn.$$.fragment,o),y(Ga.$$.fragment,o),y(Ja.$$.fragment,o),y(Ka.$$.fragment,o),y(Tn.$$.fragment,o),y(or.$$.fragment,o),y(bn.$$.fragment,o),y(nr.$$.fragment,o),y(sr.$$.fragment,o),y(ar.$$.fragment,o),y(yn.$$.fragment,o),y(dr.$$.fragment,o),y(Ln.$$.fragment,o),y(cr.$$.fragment,o),Mc=!1},d(o){t(m),o&&t(x),o&&t(f),L(k),o&&t(G),o&&t(X),L(ee),o&&t(le),o&&t(V),o&&t(q),o&&t(ne),o&&t(de),o&&t(se),o&&t(ce),o&&t(M),o&&t(Q),o&&t(Y),o&&t(Ad),o&&t(vt),o&&t(Sd),o&&t(Ht),L(Mn),o&&t(Id),o&&t(De),L(qn),L(Cn),o&&t(Dd),o&&t(Wt),L(jn),o&&t(Hd),o&&t($e),L(On),L(In),L(Hn),L(Wn),L(Bn),o&&t(Qd),o&&t(Rt),L(Un),o&&t(Wd),o&&t(qe),L(Rn),L(Gn),L(Kn),L(Zn),o&&t(Bd),o&&t(Jt),L(es),o&&t(Ud),o&&t(Kt),L(ts),o&&t(Rd),o&&t(Zt),L(ns),o&&t(Vd),o&&t(eo),L(as),o&&t(Yd),o&&t(to),L(is),o&&t(Gd),o&&t(oo),L(ds),o&&t(Jd),o&&t(no),L(ps),o&&t(Kd),o&&t(so),L(ms),o&&t(Zd),o&&t(ao),L(fs),o&&t(ec),o&&t(ro),L(_s),o&&t(tc),o&&t(io),L(vs),o&&t(oc),o&&t(lo),L(bs),o&&t(nc),o&&t(co),L(ys),o&&t(sc),o&&t(po),L(Ns),o&&t(ac),o&&t(ho),L(Fs),o&&t(rc),o&&t(He),L($s),L(qs),L(Ro),L(Es),o&&t(ic),o&&t(uo),L(Cs),o&&t(lc),o&&t(Qe),L(js),L(Ss),L(Yo),L(Is),o&&t(dc),o&&t(go),L(Ds),o&&t(cc),o&&t(We),L(Hs),L(Us),L(Jo),L(Rs),L(Vs),o&&t(pc),o&&t(ko),L(Ys),o&&t(hc),o&&t(Be),L(Gs),L(ea),L(Zo),L(ta),o&&t(mc),o&&t(To),L(oa),o&&t(uc),o&&t(Ue),L(na),L(ia),L(tn),L(la),o&&t(fc),o&&t(wo),L(da),o&&t(gc),o&&t(Re),L(ca),L(ua),L(nn),L(fa),o&&t(_c),o&&t(No),L(ga),o&&t(kc),o&&t(Ve),L(_a),L(ba),L(an),L(wa),o&&t(vc),o&&t($o),L(ya),o&&t(Tc),o&&t(Ee),L(La),L(ln),L($a),L(dn),L(Xa),o&&t(bc),o&&t(zo),L(za),o&&t(wc),o&&t(Ce),L(Ma),L(pn),L(ja),L(hn),L(Oa),o&&t(yc),o&&t(qo),L(Pa),o&&t(Lc),o&&t(je),L(Aa),L(un),L(Ha),L(fn),L(Qa),o&&t(Nc),o&&t(Co),L(Wa),o&&t(xc),o&&t(Oe),L(Ba),L(_n),L(Ya),L(kn),L(Ga),o&&t(Fc),o&&t(Oo),L(Ja),o&&t($c),o&&t(Pe),L(Ka),L(Tn),L(or),L(bn),L(nr),o&&t(Xc),o&&t(Ao),L(sr),o&&t(zc),o&&t(Ae),L(ar),L(yn),L(dr),L(Ln),L(cr)}}}const Py={local:"xlnet",sections:[{local:"overview",title:"Overview"},{local:"transformers.XLNetConfig",title:"XLNetConfig"},{local:"transformers.XLNetTokenizer",title:"XLNetTokenizer"},{local:"transformers.XLNetTokenizerFast",title:"XLNetTokenizerFast"},{local:"transformers.models.xlnet.modeling_xlnet.XLNetModelOutput",title:"XLNet specific outputs"},{local:"transformers.XLNetModel",title:"XLNetModel"},{local:"transformers.XLNetLMHeadModel",title:"XLNetLMHeadModel"},{local:"transformers.XLNetForSequenceClassification",title:"XLNetForSequenceClassification"},{local:"transformers.XLNetForMultipleChoice",title:"XLNetForMultipleChoice"},{local:"transformers.XLNetForTokenClassification",title:"XLNetForTokenClassification"},{local:"transformers.XLNetForQuestionAnsweringSimple",title:"XLNetForQuestionAnsweringSimple"},{local:"transformers.XLNetForQuestionAnswering",title:"XLNetForQuestionAnswering"},{local:"transformers.TFXLNetModel",title:"TFXLNetModel"},{local:"transformers.TFXLNetLMHeadModel",title:"TFXLNetLMHeadModel"},{local:"transformers.TFXLNetForSequenceClassification",title:"TFXLNetForSequenceClassification"},{local:"transformers.TFXLNetForMultipleChoice",title:"TFLNetForMultipleChoice"},{local:"transformers.TFXLNetForTokenClassification",title:"TFXLNetForTokenClassification"},{local:"transformers.TFXLNetForQuestionAnsweringSimple",title:"TFXLNetForQuestionAnsweringSimple"}],title:"XLNet"};function Ay(S,m,x){let{fw:f}=m;return S.$$set=_=>{"fw"in _&&x(0,f=_.fw)},[f]}class By extends my{constructor(m){super();uy(this,m,Ay,Oy,fy,{fw:0})}}export{By as default,Py as metadata};
9,956
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/encoderdecoder.mdx-b5cda2a4.js
import{S as Di,i as ji,s as $i,e as a,k as i,w as u,t,L as qi,c as s,d as o,m as c,a as d,x as g,h as r,b as l,J as e,g as h,y as _,q as v,o as b,B as y}from"../../chunks/vendor-b1433968.js";import{T as dd}from"../../chunks/Tip-c3840994.js";import{D as O}from"../../chunks/Docstring-ff504c58.js";import{C as ze}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Gn}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Pi(Q){let m,D,f,w,P;return{c(){m=a("p"),D=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),w=t("Module"),P=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=s(k,"P",{});var T=d(m);D=r(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var F=d(f);w=r(F,"Module"),F.forEach(o),P=r(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(o)},m(k,T){h(k,m,T),e(m,D),e(m,f),e(f,w),e(m,P)},d(k){k&&o(m)}}}function Fi(Q){let m,D,f,w,P;return{c(){m=a("p"),D=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),w=t("Module"),P=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=s(k,"P",{});var T=d(m);D=r(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var F=d(f);w=r(F,"Module"),F.forEach(o),P=r(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(o)},m(k,T){h(k,m,T),e(m,D),e(m,f),e(f,w),e(m,P)},d(k){k&&o(m)}}}function Ci(Q){let m,D,f,w,P;return{c(){m=a("p"),D=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),w=t("Module"),P=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){m=s(k,"P",{});var T=d(m);D=r(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var F=d(f);w=r(F,"Module"),F.forEach(o),P=r(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(o)},m(k,T){h(k,m,T),e(m,D),e(m,f),e(f,w),e(m,P)},d(k){k&&o(m)}}}function Ai(Q){let m,D,f,w,P,k,T,F,Mt,Un,me,zt,wo,Dt,jt,Vn,he,$t,De,qt,Pt,Hn,fe,Ft,Eo,Ct,At,Jn,W,St,xo,Nt,It,je,Lt,Bt,Yn,R,Ot,Ko,Wt,Rt,Qo,Gt,Ut,Zn,$e,Kn,G,Vt,qe,Ht,Jt,Pe,Yt,Zt,Qn,X,ue,Xo,Fe,Kt,en,Qt,Xn,z,Ce,Xt,ge,Mo,er,or,zo,nr,tr,rr,ee,ar,Do,sr,dr,jo,ir,cr,lr,on,pr,mr,Ae,hr,_e,Se,fr,Ne,ur,$o,gr,_r,vr,ve,Ie,br,oe,yr,nn,kr,Tr,tn,wr,Er,et,ne,be,rn,Le,xr,an,Mr,ot,E,Be,zr,te,Dr,sn,jr,$r,dn,qr,Pr,Fr,Oe,Cr,We,Ar,Sr,Nr,cn,Ir,Lr,Re,Br,qo,Or,Wr,Rr,Ge,Gr,Ue,Ur,Vr,Hr,U,Po,Jr,Yr,ln,Zr,Kr,pn,Qr,Xr,ea,C,Ve,oa,re,na,Fo,ta,ra,mn,aa,sa,da,ye,ia,hn,ca,la,He,pa,A,Je,ma,fn,ha,fa,ae,ua,un,ga,_a,gn,va,ba,ya,_n,ka,Ta,Ye,nt,se,ke,vn,Ze,wa,bn,Ea,tt,x,Ke,xa,de,Ma,yn,za,Da,kn,ja,$a,qa,Qe,Pa,Xe,Fa,Ca,Aa,Tn,Sa,Na,eo,Ia,Co,La,Ba,Oa,oo,Wa,no,Ra,Ga,Ua,V,wn,Va,Ha,En,Ja,Ya,xn,Za,Ka,Qa,S,to,Xa,ie,es,Ao,os,ns,Mn,ts,rs,as,Te,ss,zn,ds,is,ro,cs,L,ao,ls,Dn,ps,ms,jn,hs,fs,so,rt,ce,we,$n,io,us,qn,gs,at,M,co,_s,le,vs,Pn,bs,ys,Fn,ks,Ts,ws,lo,Es,po,xs,Ms,zs,Cn,Ds,js,mo,$s,So,qs,Ps,Fs,ho,Cs,fo,As,Ss,Ns,H,No,Is,Ls,An,Bs,Os,Sn,Ws,Rs,Gs,N,uo,Us,pe,Vs,Io,Hs,Js,Nn,Ys,Zs,Ks,Ee,Qs,In,Xs,ed,go,od,B,_o,nd,Ln,td,rd,Bn,ad,sd,vo,st;return k=new Gn({}),$e=new ze({props:{code:`# a workaround to load from pytorch checkpoint _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") _model.encoder.save_pretrained("./encoder") _model.decoder.save_pretrained("./decoder") model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ) # This is only for copying some specific attributes of this particular model. model.config = _model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># a workaround to load from pytorch checkpoint</span> <span class="hljs-meta">&gt;&gt;&gt; </span>_model = EncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/bert2bert-cnn_dailymail-fp16&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>_model.encoder.save_pretrained(<span class="hljs-string">&quot;./encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>_model.decoder.save_pretrained(<span class="hljs-string">&quot;./decoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;./encoder&quot;</span>, <span class="hljs-string">&quot;./decoder&quot;</span>, encoder_from_pt=<span class="hljs-literal">True</span>, decoder_from_pt=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># This is only for copying some specific attributes of this particular model.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config = _model.config`}}),Fe=new Gn({}),Ce=new O({props:{name:"class transformers.EncoderDecoderConfig",anchor:"transformers.EncoderDecoderConfig",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py#L26",parametersDescription:[{anchor:"transformers.EncoderDecoderConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments. Notably:</p> <ul> <li><strong>encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the encoder config.</li> <li><strong>decoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the decoder config.</li> </ul>`,name:"kwargs"}]}}),Ae=new ze({props:{code:`from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel # Initializing a BERT bert-base-uncased style configuration config_encoder = BertConfig() config_decoder = BertConfig() config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) # Initializing a Bert2Bert model from the bert-base-uncased style configurations model = EncoderDecoderModel(config=config) # Accessing the model configuration config_encoder = model.config.encoder config_decoder = model.config.decoder # set decoder config to causal lm config_decoder.is_decoder = True config_decoder.add_cross_attention = True # Saving the model, including its configuration model.save_pretrained('my-model') # loading model and config from pretrained folder encoder_decoder_config = EncoderDecoderConfig.from_pretrained('my-model') model = EncoderDecoderModel.from_pretrained('my-model', config=encoder_decoder_config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, EncoderDecoderConfig, EncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Bert2Bert model from the bert-base-uncased style configurations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = model.config.encoder <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = model.config.decoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set decoder config to causal lm</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.add_cross_attention = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_decoder_config = EncoderDecoderConfig.from_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>, config=encoder_decoder_config)`}}),Se=new O({props:{name:"from_encoder_decoder_configs",anchor:"transformers.EncoderDecoderConfig.from_encoder_decoder_configs",parameters:[{name:"encoder_config",val:": PretrainedConfig"},{name:"decoder_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py#L91",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig" >EncoderDecoderConfig</a></p> `}}),Ie=new O({props:{name:"to_dict",anchor:"transformers.EncoderDecoderConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py#L108",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),Le=new Gn({}),Be=new O({props:{name:"class transformers.EncoderDecoderModel",anchor:"transformers.EncoderDecoderModel",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"decoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py#L168",parametersDescription:[{anchor:"transformers.EncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ve=new O({props:{name:"forward",anchor:"transformers.EncoderDecoderModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py#L425",parametersDescription:[{anchor:"transformers.EncoderDecoderModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.EncoderDecoderModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.EncoderDecoderModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For training, <code>decoder_input_ids</code> are automatically created by the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.`,name:"decoder_input_ids"},{anchor:"transformers.EncoderDecoderModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.EncoderDecoderModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.EncoderDecoderModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.EncoderDecoderModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.EncoderDecoderModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"decoder_inputs_embeds"},{anchor:"transformers.EncoderDecoderModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.EncoderDecoderModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.EncoderDecoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.EncoderDecoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.EncoderDecoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple. kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul>`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig" >EncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ye=new dd({props:{$$slots:{default:[Pi]},$$scope:{ctx:Q}}}),He=new ze({props:{code:`from transformers import EncoderDecoderModel, BertTokenizer import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert from pre-trained checkpoints # training model.config.decoder_start_token_id = tokenizer.cls_token_id model.config.pad_token_id = tokenizer.pad_token_id model.config.vocab_size = model.config.decoder.vocab_size input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids outputs = model(input_ids=input_ids, labels=input_ids) loss, logits = outputs.loss, outputs.logits # save and load from pretrained model.save_pretrained("bert2bert") model = EncoderDecoderModel.from_pretrained("bert2bert") # generation generated = model.generate(input_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> EncoderDecoderModel, BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, <span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-comment"># initialize Bert2Bert from pre-trained checkpoints</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.decoder_start_token_id = tokenizer.cls_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = tokenizer.pad_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.vocab_size = model.config.decoder.vocab_size <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;This is a really long text&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;This is the corresponding summary&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, labels=input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>loss, logits = outputs.loss, outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;bert2bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;bert2bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(input_ids)`}}),Je=new O({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.EncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": str = None"},{name:"decoder_pretrained_model_name_or_path",val:": str = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py#L281",parametersDescription:[{anchor:"transformers.EncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.EncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.EncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.EncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),Ye=new ze({props:{code:`from transformers import EncoderDecoderModel # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # saving model after fine-tuning model.save_pretrained("./bert2bert") # load fine-tuned model model = EncoderDecoderModel.from_pretrained("./bert2bert"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> EncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, <span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./bert2bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./bert2bert&quot;</span>)`}}),Ze=new Gn({}),Ke=new O({props:{name:"class transformers.TFEncoderDecoderModel",anchor:"transformers.TFEncoderDecoderModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py#L152",parametersDescription:[{anchor:"transformers.TFEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),to=new O({props:{name:"call",anchor:"transformers.TFEncoderDecoderModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py#L463",parametersDescription:[{anchor:"transformers.TFEncoderDecoderModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFEncoderDecoderModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFEncoderDecoderModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>Provide for sequence to sequence training to the decoder. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"decoder_input_ids"},{anchor:"transformers.TFEncoderDecoderModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFEncoderDecoderModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.TFEncoderDecoderModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFEncoderDecoderModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFEncoderDecoderModel.call.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"decoder_inputs_embeds"},{anchor:"transformers.TFEncoderDecoderModel.call.labels",description:`<strong>labels</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.TFEncoderDecoderModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFEncoderDecoderModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFEncoderDecoderModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFEncoderDecoderModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFEncoderDecoderModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as \`**decoder_kwargs&#x201C; for the decoder forward function.</li> </ul>`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig" >EncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Te=new dd({props:{$$slots:{default:[Fi]},$$scope:{ctx:Q}}}),ro=new ze({props:{code:`from transformers import TFEncoderDecoderModel, BertTokenizer # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized model = TFEncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-cased', 'gpt2') tokenizer = BertTokenizer.from_pretrained('bert-base-cased') # forward input_ids = tokenizer.encode("Hello, my dog is cute", add_special_tokens=True, return_tensors='tf') # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=input_ids) # training outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids) loss, logits = outputs.loss, outputs.logits # save and load from pretrained model.save_pretrained("bert2gpt2") model = TFEncoderDecoderModel.from_pretrained("bert2gpt2") # generation generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFEncoderDecoderModel, BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, <span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>loss, logits = outputs.loss, outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;bert2gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;bert2gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)`}}),ao=new O({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.TFEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": str = None"},{name:"decoder_pretrained_model_name_or_path",val:": str = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py#L300",parametersDescription:[{anchor:"transformers.TFEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pytorch index checkpoint file</em> (e.g, <code>./pt_model/</code>). In this case, <code>encoder_from_pt</code> should be set to <code>True</code>.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.TFEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pytorch checkpoint file</em> (e.g, <code>./pt_model/</code>). In this case, <code>decoder_from_pt</code> should be set to <code>True</code>.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.TFEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.TFEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),so=new ze({props:{code:`from transformers import TFEncoderDecoderModel # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized model = TFEncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'gpt2') # saving model after fine-tuning model.save_pretrained("./bert2gpt2") # load fine-tuned model model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, <span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./bert2gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./bert2gpt2&quot;</span>)`}}),io=new Gn({}),co=new O({props:{name:"class transformers.FlaxEncoderDecoderModel",anchor:"transformers.FlaxEncoderDecoderModel",parameters:[{name:"config",val:": EncoderDecoderConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py#L306",parametersDescription:[{anchor:"transformers.FlaxEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxEncoderDecoderModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),uo=new O({props:{name:"__call__",anchor:"transformers.FlaxEncoderDecoderModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py#L614",parametersDescription:[{anchor:"transformers.FlaxEncoderDecoderModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For sequence to sequence training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.encoder.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.decoder.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxEncoderDecoderModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>FlaxSeq2SeqLMOutput</code> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig" >EncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ee=new dd({props:{$$slots:{default:[Ci]},$$scope:{ctx:Q}}}),go=new ze({props:{code:`from transformers import FlaxEncoderDecoderModel, BertTokenizer, GPT2Tokenizer # load a fine-tuned bert2gpt2 model model = FlaxEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") # load input & output tokenizer tokenizer_input = BertTokenizer.from_pretrained('bert-base-cased') tokenizer_output = GPT2Tokenizer.from_pretrained('gpt2') article = '''Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done.''' input_ids = tokenizer_input(article, add_special_tokens=True, return_tensors='np').input_ids # use GPT2's eos_token as the pad as well as eos token model.config.eos_token_id = model.config.decoder.eos_token_id model.config.pad_token_id = model.config.eos_token_id sequences = model.generate(input_ids, num_beams=4, max_length=12).sequences summary = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)[0] assert summary == "SAS Alpha Epsilon suspended Sigma Alpha Epsilon members",`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxEncoderDecoderModel, BertTokenizer, GPT2Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load a fine-tuned bert2gpt2 model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/bert2gpt2-cnn_dailymail-fp16&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load input &amp; output tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_input = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_output = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&#x27;&#x27;&#x27;Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members <span class="hljs-meta">... </span>singing a racist chant. SAE&#x27;s national chapter suspended the students, <span class="hljs-meta">... </span>but University of Oklahoma President David Boren took it a step further, <span class="hljs-meta">... </span>saying the university&#x27;s affiliation with the fraternity is permanently done.&#x27;&#x27;&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer_input(article, add_special_tokens=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use GPT2&#x27;s eos_token as the pad as well as eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.eos_token_id = model.config.decoder.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>sequences = model.generate(input_ids, num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">12</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span>summary = tokenizer_output.batch_decode(sequences, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> summary == <span class="hljs-string">&quot;SAS Alpha Epsilon suspended Sigma Alpha Epsilon members&quot;</span>`}}),_o=new O({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.FlaxEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType] = None"},{name:"decoder_pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType] = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py#L739",parametersDescription:[{anchor:"transformers.FlaxEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (:obj: <em>Union[str, os.PathLike]</em>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.FlaxEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (:obj: <em>Union[str, os.PathLike]</em>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.FlaxEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.FlaxEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),vo=new ze({props:{code:`from transformers import FlaxEncoderDecoderModel # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-cased', 'gpt2') # saving model after fine-tuning model.save_pretrained("./bert2gpt2") # load fine-tuned model model = FlaxEncoderDecoderModel.from_pretrained("./bert2gpt2"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, <span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./bert2gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./bert2gpt2&quot;</span>)`}}),{c(){m=a("meta"),D=i(),f=a("h1"),w=a("a"),P=a("span"),u(k.$$.fragment),T=i(),F=a("span"),Mt=t("Encoder Decoder Models"),Un=i(),me=a("p"),zt=t("The "),wo=a("a"),Dt=t("EncoderDecoderModel"),jt=t(` can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder.`),Vn=i(),he=a("p"),$t=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),De=a("a"),qt=t("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Pt=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.`),Hn=i(),fe=a("p"),Ft=t("After such an "),Eo=a("a"),Ct=t("EncoderDecoderModel"),At=t(` has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Jn=i(),W=a("p"),St=t("An application of this architecture could be to leverage two pretrained "),xo=a("a"),Nt=t("BertModel"),It=t(` as the encoder and decoder for a summarization model as was shown in: `),je=a("a"),Lt=t("Text Summarization with Pretrained Encoders"),Bt=t(" by Yang Liu and Mirella Lapata."),Yn=i(),R=a("p"),Ot=t("The "),Ko=a("code"),Wt=t("from_pretrained()"),Rt=t(` currently doesn\u2019t support initializing the model from a pytorch checkpoint. Passing `),Qo=a("code"),Gt=t("from_pt=True"),Ut=t(` to this method will throw an exception. If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is:`),Zn=i(),u($e.$$.fragment),Kn=i(),G=a("p"),Vt=t("This model was contributed by "),qe=a("a"),Ht=t("thomwolf"),Jt=t(`. This model\u2019s TensorFlow and Flax versions were contributed by `),Pe=a("a"),Yt=t("ydshieh"),Zt=t("."),Qn=i(),X=a("h2"),ue=a("a"),Xo=a("span"),u(Fe.$$.fragment),Kt=i(),en=a("span"),Qt=t("EncoderDecoderConfig"),Xn=i(),z=a("div"),u(Ce.$$.fragment),Xt=i(),ge=a("p"),Mo=a("a"),er=t("EncoderDecoderConfig"),or=t(` is the configuration class to store the configuration of a `),zo=a("a"),nr=t("EncoderDecoderModel"),tr=t(`. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs.`),rr=i(),ee=a("p"),ar=t("Configuration objects inherit from "),Do=a("a"),sr=t("PretrainedConfig"),dr=t(` and can be used to control the model outputs. Read the documentation from `),jo=a("a"),ir=t("PretrainedConfig"),cr=t(" for more information."),lr=i(),on=a("p"),pr=t("Examples:"),mr=i(),u(Ae.$$.fragment),hr=i(),_e=a("div"),u(Se.$$.fragment),fr=i(),Ne=a("p"),ur=t("Instantiate a "),$o=a("a"),gr=t("EncoderDecoderConfig"),_r=t(` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),vr=i(),ve=a("div"),u(Ie.$$.fragment),br=i(),oe=a("p"),yr=t("Serializes this instance to a Python dictionary. Override the default "),nn=a("em"),kr=t("to_dict()"),Tr=t(" from "),tn=a("em"),wr=t("PretrainedConfig"),Er=t("."),et=i(),ne=a("h2"),be=a("a"),rn=a("span"),u(Le.$$.fragment),xr=i(),an=a("span"),Mr=t("EncoderDecoderModel"),ot=i(),E=a("div"),u(Be.$$.fragment),zr=i(),te=a("p"),Dr=t(`This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via `),sn=a("code"),jr=t("from_pretrained()"),$r=t(` function and the decoder is loaded via `),dn=a("code"),qr=t("from_pretrained()"),Pr=t(` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),Fr=i(),Oe=a("p"),Cr=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),We=a("a"),Ar=t("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Sr=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Nr=i(),cn=a("p"),Ir=t(`After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Lr=i(),Re=a("p"),Br=t("This model inherits from "),qo=a("a"),Or=t("PreTrainedModel"),Wr=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rr=i(),Ge=a("p"),Gr=t("This model is also a PyTorch "),Ue=a("a"),Ur=t("torch.nn.Module"),Vr=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hr=i(),U=a("p"),Po=a("a"),Jr=t("EncoderDecoderModel"),Yr=t(` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth`),ln=a("em"),Zr=t("~transformers.AutoModel.from_pretrained"),Kr=t(` class method for the encoder and :meth`),pn=a("em"),Qr=t("~transformers.AutoModelForCausalLM.from_pretrained"),Xr=t(" class method for the decoder."),ea=i(),C=a("div"),u(Ve.$$.fragment),oa=i(),re=a("p"),na=t("The "),Fo=a("a"),ta=t("EncoderDecoderModel"),ra=t(" forward method, overrides the "),mn=a("code"),aa=t("__call__"),sa=t(" special method."),da=i(),u(ye.$$.fragment),ia=i(),hn=a("p"),ca=t("Examples:"),la=i(),u(He.$$.fragment),pa=i(),A=a("div"),u(Je.$$.fragment),ma=i(),fn=a("p"),ha=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),fa=i(),ae=a("p"),ua=t("The model is set in evaluation mode by default using "),un=a("code"),ga=t("model.eval()"),_a=t(` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),gn=a("code"),va=t("model.train()"),ba=t("."),ya=i(),_n=a("p"),ka=t("Example:"),Ta=i(),u(Ye.$$.fragment),nt=i(),se=a("h2"),ke=a("a"),vn=a("span"),u(Ze.$$.fragment),wa=i(),bn=a("span"),Ea=t("TFEncoderDecoderModel"),tt=i(),x=a("div"),u(Ke.$$.fragment),xa=i(),de=a("p"),Ma=t(`This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via `),yn=a("code"),za=t("from_pretrained()"),Da=t(` function and the decoder is loaded via `),kn=a("code"),ja=t("from_pretrained()"),$a=t(` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),qa=i(),Qe=a("p"),Pa=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Xe=a("a"),Fa=t("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Ca=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Aa=i(),Tn=a("p"),Sa=t(`After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Na=i(),eo=a("p"),Ia=t("This model inherits from "),Co=a("a"),La=t("TFPreTrainedModel"),Ba=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Oa=i(),oo=a("p"),Wa=t("This model is also a "),no=a("a"),Ra=t("tf.keras.Model"),Ga=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ua=i(),V=a("p"),wn=a("code"),Va=t("TFEncoderDecoder"),Ha=t(` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth`),En=a("em"),Ja=t("~transformers.TFAutoModel.from_pretrained"),Ya=t(` class method for the encoder and :meth`),xn=a("em"),Za=t("~transformers.TFAutoModelForCausalLM.from_pretrained"),Ka=t(" class method for the decoder."),Qa=i(),S=a("div"),u(to.$$.fragment),Xa=i(),ie=a("p"),es=t("The "),Ao=a("a"),os=t("TFEncoderDecoderModel"),ns=t(" forward method, overrides the "),Mn=a("code"),ts=t("__call__"),rs=t(" special method."),as=i(),u(Te.$$.fragment),ss=i(),zn=a("p"),ds=t("Examples:"),is=i(),u(ro.$$.fragment),cs=i(),L=a("div"),u(ao.$$.fragment),ls=i(),Dn=a("p"),ps=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),ms=i(),jn=a("p"),hs=t("Example:"),fs=i(),u(so.$$.fragment),rt=i(),ce=a("h2"),we=a("a"),$n=a("span"),u(io.$$.fragment),us=i(),qn=a("span"),gs=t("FlaxEncoderDecoderModel"),at=i(),M=a("div"),u(co.$$.fragment),_s=i(),le=a("p"),vs=t(`This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via `),Pn=a("code"),bs=t("from_pretrained()"),ys=t(` function and the decoder is loaded via `),Fn=a("code"),ks=t("from_pretrained()"),Ts=t(` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),ws=i(),lo=a("p"),Es=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),po=a("a"),xs=t("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Ms=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),zs=i(),Cn=a("p"),Ds=t(`After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),js=i(),mo=a("p"),$s=t("This model inherits from "),So=a("a"),qs=t("FlaxPreTrainedModel"),Ps=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fs=i(),ho=a("p"),Cs=t("This model is also a Flax Linen "),fo=a("a"),As=t("flax.nn.Module"),Ss=t(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ns=i(),H=a("p"),No=a("a"),Is=t("FlaxEncoderDecoderModel"),Ls=t(` is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as decoder module when created with the :meth`),An=a("em"),Bs=t("~transformers.FlaxAutoModel.from_pretrained"),Os=t(` class method for the encoder and :meth`),Sn=a("em"),Ws=t("~transformers.FlaxAutoModelForCausalLM.from_pretrained"),Rs=t(" class method for the decoder."),Gs=i(),N=a("div"),u(uo.$$.fragment),Us=i(),pe=a("p"),Vs=t("The "),Io=a("a"),Hs=t("FlaxEncoderDecoderModel"),Js=t(" forward method, overrides the "),Nn=a("code"),Ys=t("__call__"),Zs=t(" special method."),Ks=i(),u(Ee.$$.fragment),Qs=i(),In=a("p"),Xs=t("Examples:"),ed=i(),u(go.$$.fragment),od=i(),B=a("div"),u(_o.$$.fragment),nd=i(),Ln=a("p"),td=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),rd=i(),Bn=a("p"),ad=t("Example:"),sd=i(),u(vo.$$.fragment),this.h()},l(n){const p=qi('[data-svelte="svelte-1phssyn"]',document.head);m=s(p,"META",{name:!0,content:!0}),p.forEach(o),D=c(n),f=s(n,"H1",{class:!0});var bo=d(f);w=s(bo,"A",{id:!0,class:!0,href:!0});var On=d(w);P=s(On,"SPAN",{});var Wn=d(P);g(k.$$.fragment,Wn),Wn.forEach(o),On.forEach(o),T=c(bo),F=s(bo,"SPAN",{});var id=d(F);Mt=r(id,"Encoder Decoder Models"),id.forEach(o),bo.forEach(o),Un=c(n),me=s(n,"P",{});var dt=d(me);zt=r(dt,"The "),wo=s(dt,"A",{href:!0});var cd=d(wo);Dt=r(cd,"EncoderDecoderModel"),cd.forEach(o),jt=r(dt,` can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder.`),dt.forEach(o),Vn=c(n),he=s(n,"P",{});var it=d(he);$t=r(it,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),De=s(it,"A",{href:!0,rel:!0});var ld=d(De);qt=r(ld,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),ld.forEach(o),Pt=r(it,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.`),it.forEach(o),Hn=c(n),fe=s(n,"P",{});var ct=d(fe);Ft=r(ct,"After such an "),Eo=s(ct,"A",{href:!0});var pd=d(Eo);Ct=r(pd,"EncoderDecoderModel"),pd.forEach(o),At=r(ct,` has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),ct.forEach(o),Jn=c(n),W=s(n,"P",{});var Lo=d(W);St=r(Lo,"An application of this architecture could be to leverage two pretrained "),xo=s(Lo,"A",{href:!0});var md=d(xo);Nt=r(md,"BertModel"),md.forEach(o),It=r(Lo,` as the encoder and decoder for a summarization model as was shown in: `),je=s(Lo,"A",{href:!0,rel:!0});var hd=d(je);Lt=r(hd,"Text Summarization with Pretrained Encoders"),hd.forEach(o),Bt=r(Lo," by Yang Liu and Mirella Lapata."),Lo.forEach(o),Yn=c(n),R=s(n,"P",{});var Bo=d(R);Ot=r(Bo,"The "),Ko=s(Bo,"CODE",{});var fd=d(Ko);Wt=r(fd,"from_pretrained()"),fd.forEach(o),Rt=r(Bo,` currently doesn\u2019t support initializing the model from a pytorch checkpoint. Passing `),Qo=s(Bo,"CODE",{});var ud=d(Qo);Gt=r(ud,"from_pt=True"),ud.forEach(o),Ut=r(Bo,` to this method will throw an exception. If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is:`),Bo.forEach(o),Zn=c(n),g($e.$$.fragment,n),Kn=c(n),G=s(n,"P",{});var Oo=d(G);Vt=r(Oo,"This model was contributed by "),qe=s(Oo,"A",{href:!0,rel:!0});var gd=d(qe);Ht=r(gd,"thomwolf"),gd.forEach(o),Jt=r(Oo,`. This model\u2019s TensorFlow and Flax versions were contributed by `),Pe=s(Oo,"A",{href:!0,rel:!0});var _d=d(Pe);Yt=r(_d,"ydshieh"),_d.forEach(o),Zt=r(Oo,"."),Oo.forEach(o),Qn=c(n),X=s(n,"H2",{class:!0});var lt=d(X);ue=s(lt,"A",{id:!0,class:!0,href:!0});var vd=d(ue);Xo=s(vd,"SPAN",{});var bd=d(Xo);g(Fe.$$.fragment,bd),bd.forEach(o),vd.forEach(o),Kt=c(lt),en=s(lt,"SPAN",{});var yd=d(en);Qt=r(yd,"EncoderDecoderConfig"),yd.forEach(o),lt.forEach(o),Xn=c(n),z=s(n,"DIV",{class:!0});var I=d(z);g(Ce.$$.fragment,I),Xt=c(I),ge=s(I,"P",{});var Rn=d(ge);Mo=s(Rn,"A",{href:!0});var kd=d(Mo);er=r(kd,"EncoderDecoderConfig"),kd.forEach(o),or=r(Rn,` is the configuration class to store the configuration of a `),zo=s(Rn,"A",{href:!0});var Td=d(zo);nr=r(Td,"EncoderDecoderModel"),Td.forEach(o),tr=r(Rn,`. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs.`),Rn.forEach(o),rr=c(I),ee=s(I,"P",{});var Wo=d(ee);ar=r(Wo,"Configuration objects inherit from "),Do=s(Wo,"A",{href:!0});var wd=d(Do);sr=r(wd,"PretrainedConfig"),wd.forEach(o),dr=r(Wo,` and can be used to control the model outputs. Read the documentation from `),jo=s(Wo,"A",{href:!0});var Ed=d(jo);ir=r(Ed,"PretrainedConfig"),Ed.forEach(o),cr=r(Wo," for more information."),Wo.forEach(o),lr=c(I),on=s(I,"P",{});var xd=d(on);pr=r(xd,"Examples:"),xd.forEach(o),mr=c(I),g(Ae.$$.fragment,I),hr=c(I),_e=s(I,"DIV",{class:!0});var pt=d(_e);g(Se.$$.fragment,pt),fr=c(pt),Ne=s(pt,"P",{});var mt=d(Ne);ur=r(mt,"Instantiate a "),$o=s(mt,"A",{href:!0});var Md=d($o);gr=r(Md,"EncoderDecoderConfig"),Md.forEach(o),_r=r(mt,` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),mt.forEach(o),pt.forEach(o),vr=c(I),ve=s(I,"DIV",{class:!0});var ht=d(ve);g(Ie.$$.fragment,ht),br=c(ht),oe=s(ht,"P",{});var Ro=d(oe);yr=r(Ro,"Serializes this instance to a Python dictionary. Override the default "),nn=s(Ro,"EM",{});var zd=d(nn);kr=r(zd,"to_dict()"),zd.forEach(o),Tr=r(Ro," from "),tn=s(Ro,"EM",{});var Dd=d(tn);wr=r(Dd,"PretrainedConfig"),Dd.forEach(o),Er=r(Ro,"."),Ro.forEach(o),ht.forEach(o),I.forEach(o),et=c(n),ne=s(n,"H2",{class:!0});var ft=d(ne);be=s(ft,"A",{id:!0,class:!0,href:!0});var jd=d(be);rn=s(jd,"SPAN",{});var $d=d(rn);g(Le.$$.fragment,$d),$d.forEach(o),jd.forEach(o),xr=c(ft),an=s(ft,"SPAN",{});var qd=d(an);Mr=r(qd,"EncoderDecoderModel"),qd.forEach(o),ft.forEach(o),ot=c(n),E=s(n,"DIV",{class:!0});var j=d(E);g(Be.$$.fragment,j),zr=c(j),te=s(j,"P",{});var Go=d(te);Dr=r(Go,`This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via `),sn=s(Go,"CODE",{});var Pd=d(sn);jr=r(Pd,"from_pretrained()"),Pd.forEach(o),$r=r(Go,` function and the decoder is loaded via `),dn=s(Go,"CODE",{});var Fd=d(dn);qr=r(Fd,"from_pretrained()"),Fd.forEach(o),Pr=r(Go,` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),Go.forEach(o),Fr=c(j),Oe=s(j,"P",{});var ut=d(Oe);Cr=r(ut,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),We=s(ut,"A",{href:!0,rel:!0});var Cd=d(We);Ar=r(Cd,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Cd.forEach(o),Sr=r(ut,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),ut.forEach(o),Nr=c(j),cn=s(j,"P",{});var Ad=d(cn);Ir=r(Ad,`After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Ad.forEach(o),Lr=c(j),Re=s(j,"P",{});var gt=d(Re);Br=r(gt,"This model inherits from "),qo=s(gt,"A",{href:!0});var Sd=d(qo);Or=r(Sd,"PreTrainedModel"),Sd.forEach(o),Wr=r(gt,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gt.forEach(o),Rr=c(j),Ge=s(j,"P",{});var _t=d(Ge);Gr=r(_t,"This model is also a PyTorch "),Ue=s(_t,"A",{href:!0,rel:!0});var Nd=d(Ue);Ur=r(Nd,"torch.nn.Module"),Nd.forEach(o),Vr=r(_t,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_t.forEach(o),Hr=c(j),U=s(j,"P",{});var yo=d(U);Po=s(yo,"A",{href:!0});var Id=d(Po);Jr=r(Id,"EncoderDecoderModel"),Id.forEach(o),Yr=r(yo,` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth`),ln=s(yo,"EM",{});var Ld=d(ln);Zr=r(Ld,"~transformers.AutoModel.from_pretrained"),Ld.forEach(o),Kr=r(yo,` class method for the encoder and :meth`),pn=s(yo,"EM",{});var Bd=d(pn);Qr=r(Bd,"~transformers.AutoModelForCausalLM.from_pretrained"),Bd.forEach(o),Xr=r(yo," class method for the decoder."),yo.forEach(o),ea=c(j),C=s(j,"DIV",{class:!0});var J=d(C);g(Ve.$$.fragment,J),oa=c(J),re=s(J,"P",{});var Uo=d(re);na=r(Uo,"The "),Fo=s(Uo,"A",{href:!0});var Od=d(Fo);ta=r(Od,"EncoderDecoderModel"),Od.forEach(o),ra=r(Uo," forward method, overrides the "),mn=s(Uo,"CODE",{});var Wd=d(mn);aa=r(Wd,"__call__"),Wd.forEach(o),sa=r(Uo," special method."),Uo.forEach(o),da=c(J),g(ye.$$.fragment,J),ia=c(J),hn=s(J,"P",{});var Rd=d(hn);ca=r(Rd,"Examples:"),Rd.forEach(o),la=c(J),g(He.$$.fragment,J),J.forEach(o),pa=c(j),A=s(j,"DIV",{class:!0});var Y=d(A);g(Je.$$.fragment,Y),ma=c(Y),fn=s(Y,"P",{});var Gd=d(fn);ha=r(Gd,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),Gd.forEach(o),fa=c(Y),ae=s(Y,"P",{});var Vo=d(ae);ua=r(Vo,"The model is set in evaluation mode by default using "),un=s(Vo,"CODE",{});var Ud=d(un);ga=r(Ud,"model.eval()"),Ud.forEach(o),_a=r(Vo,` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),gn=s(Vo,"CODE",{});var Vd=d(gn);va=r(Vd,"model.train()"),Vd.forEach(o),ba=r(Vo,"."),Vo.forEach(o),ya=c(Y),_n=s(Y,"P",{});var Hd=d(_n);ka=r(Hd,"Example:"),Hd.forEach(o),Ta=c(Y),g(Ye.$$.fragment,Y),Y.forEach(o),j.forEach(o),nt=c(n),se=s(n,"H2",{class:!0});var vt=d(se);ke=s(vt,"A",{id:!0,class:!0,href:!0});var Jd=d(ke);vn=s(Jd,"SPAN",{});var Yd=d(vn);g(Ze.$$.fragment,Yd),Yd.forEach(o),Jd.forEach(o),wa=c(vt),bn=s(vt,"SPAN",{});var Zd=d(bn);Ea=r(Zd,"TFEncoderDecoderModel"),Zd.forEach(o),vt.forEach(o),tt=c(n),x=s(n,"DIV",{class:!0});var $=d(x);g(Ke.$$.fragment,$),xa=c($),de=s($,"P",{});var Ho=d(de);Ma=r(Ho,`This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via `),yn=s(Ho,"CODE",{});var Kd=d(yn);za=r(Kd,"from_pretrained()"),Kd.forEach(o),Da=r(Ho,` function and the decoder is loaded via `),kn=s(Ho,"CODE",{});var Qd=d(kn);ja=r(Qd,"from_pretrained()"),Qd.forEach(o),$a=r(Ho,` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),Ho.forEach(o),qa=c($),Qe=s($,"P",{});var bt=d(Qe);Pa=r(bt,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Xe=s(bt,"A",{href:!0,rel:!0});var Xd=d(Xe);Fa=r(Xd,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Xd.forEach(o),Ca=r(bt,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),bt.forEach(o),Aa=c($),Tn=s($,"P",{});var ei=d(Tn);Sa=r(ei,`After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),ei.forEach(o),Na=c($),eo=s($,"P",{});var yt=d(eo);Ia=r(yt,"This model inherits from "),Co=s(yt,"A",{href:!0});var oi=d(Co);La=r(oi,"TFPreTrainedModel"),oi.forEach(o),Ba=r(yt,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yt.forEach(o),Oa=c($),oo=s($,"P",{});var kt=d(oo);Wa=r(kt,"This model is also a "),no=s(kt,"A",{href:!0,rel:!0});var ni=d(no);Ra=r(ni,"tf.keras.Model"),ni.forEach(o),Ga=r(kt,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),kt.forEach(o),Ua=c($),V=s($,"P",{});var ko=d(V);wn=s(ko,"CODE",{});var ti=d(wn);Va=r(ti,"TFEncoderDecoder"),ti.forEach(o),Ha=r(ko,` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth`),En=s(ko,"EM",{});var ri=d(En);Ja=r(ri,"~transformers.TFAutoModel.from_pretrained"),ri.forEach(o),Ya=r(ko,` class method for the encoder and :meth`),xn=s(ko,"EM",{});var ai=d(xn);Za=r(ai,"~transformers.TFAutoModelForCausalLM.from_pretrained"),ai.forEach(o),Ka=r(ko," class method for the decoder."),ko.forEach(o),Qa=c($),S=s($,"DIV",{class:!0});var Z=d(S);g(to.$$.fragment,Z),Xa=c(Z),ie=s(Z,"P",{});var Jo=d(ie);es=r(Jo,"The "),Ao=s(Jo,"A",{href:!0});var si=d(Ao);os=r(si,"TFEncoderDecoderModel"),si.forEach(o),ns=r(Jo," forward method, overrides the "),Mn=s(Jo,"CODE",{});var di=d(Mn);ts=r(di,"__call__"),di.forEach(o),rs=r(Jo," special method."),Jo.forEach(o),as=c(Z),g(Te.$$.fragment,Z),ss=c(Z),zn=s(Z,"P",{});var ii=d(zn);ds=r(ii,"Examples:"),ii.forEach(o),is=c(Z),g(ro.$$.fragment,Z),Z.forEach(o),cs=c($),L=s($,"DIV",{class:!0});var xe=d(L);g(ao.$$.fragment,xe),ls=c(xe),Dn=s(xe,"P",{});var ci=d(Dn);ps=r(ci,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),ci.forEach(o),ms=c(xe),jn=s(xe,"P",{});var li=d(jn);hs=r(li,"Example:"),li.forEach(o),fs=c(xe),g(so.$$.fragment,xe),xe.forEach(o),$.forEach(o),rt=c(n),ce=s(n,"H2",{class:!0});var Tt=d(ce);we=s(Tt,"A",{id:!0,class:!0,href:!0});var pi=d(we);$n=s(pi,"SPAN",{});var mi=d($n);g(io.$$.fragment,mi),mi.forEach(o),pi.forEach(o),us=c(Tt),qn=s(Tt,"SPAN",{});var hi=d(qn);gs=r(hi,"FlaxEncoderDecoderModel"),hi.forEach(o),Tt.forEach(o),at=c(n),M=s(n,"DIV",{class:!0});var q=d(M);g(co.$$.fragment,q),_s=c(q),le=s(q,"P",{});var Yo=d(le);vs=r(Yo,`This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via `),Pn=s(Yo,"CODE",{});var fi=d(Pn);bs=r(fi,"from_pretrained()"),fi.forEach(o),ys=r(Yo,` function and the decoder is loaded via `),Fn=s(Yo,"CODE",{});var ui=d(Fn);ks=r(ui,"from_pretrained()"),ui.forEach(o),Ts=r(Yo,` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),Yo.forEach(o),ws=c(q),lo=s(q,"P",{});var wt=d(lo);Es=r(wt,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),po=s(wt,"A",{href:!0,rel:!0});var gi=d(po);xs=r(gi,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),gi.forEach(o),Ms=r(wt,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),wt.forEach(o),zs=c(q),Cn=s(q,"P",{});var _i=d(Cn);Ds=r(_i,`After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),_i.forEach(o),js=c(q),mo=s(q,"P",{});var Et=d(mo);$s=r(Et,"This model inherits from "),So=s(Et,"A",{href:!0});var vi=d(So);qs=r(vi,"FlaxPreTrainedModel"),vi.forEach(o),Ps=r(Et,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Et.forEach(o),Fs=c(q),ho=s(q,"P",{});var xt=d(ho);Cs=r(xt,"This model is also a Flax Linen "),fo=s(xt,"A",{href:!0,rel:!0});var bi=d(fo);As=r(bi,"flax.nn.Module"),bi.forEach(o),Ss=r(xt,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),xt.forEach(o),Ns=c(q),H=s(q,"P",{});var To=d(H);No=s(To,"A",{href:!0});var yi=d(No);Is=r(yi,"FlaxEncoderDecoderModel"),yi.forEach(o),Ls=r(To,` is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as decoder module when created with the :meth`),An=s(To,"EM",{});var ki=d(An);Bs=r(ki,"~transformers.FlaxAutoModel.from_pretrained"),ki.forEach(o),Os=r(To,` class method for the encoder and :meth`),Sn=s(To,"EM",{});var Ti=d(Sn);Ws=r(Ti,"~transformers.FlaxAutoModelForCausalLM.from_pretrained"),Ti.forEach(o),Rs=r(To," class method for the decoder."),To.forEach(o),Gs=c(q),N=s(q,"DIV",{class:!0});var K=d(N);g(uo.$$.fragment,K),Us=c(K),pe=s(K,"P",{});var Zo=d(pe);Vs=r(Zo,"The "),Io=s(Zo,"A",{href:!0});var wi=d(Io);Hs=r(wi,"FlaxEncoderDecoderModel"),wi.forEach(o),Js=r(Zo," forward method, overrides the "),Nn=s(Zo,"CODE",{});var Ei=d(Nn);Ys=r(Ei,"__call__"),Ei.forEach(o),Zs=r(Zo," special method."),Zo.forEach(o),Ks=c(K),g(Ee.$$.fragment,K),Qs=c(K),In=s(K,"P",{});var xi=d(In);Xs=r(xi,"Examples:"),xi.forEach(o),ed=c(K),g(go.$$.fragment,K),K.forEach(o),od=c(q),B=s(q,"DIV",{class:!0});var Me=d(B);g(_o.$$.fragment,Me),nd=c(Me),Ln=s(Me,"P",{});var Mi=d(Ln);td=r(Mi,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),Mi.forEach(o),rd=c(Me),Bn=s(Me,"P",{});var zi=d(Bn);ad=r(zi,"Example:"),zi.forEach(o),sd=c(Me),g(vo.$$.fragment,Me),Me.forEach(o),q.forEach(o),this.h()},h(){l(m,"name","hf:doc:metadata"),l(m,"content",JSON.stringify(Si)),l(w,"id","encoder-decoder-models"),l(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(w,"href","#encoder-decoder-models"),l(f,"class","relative group"),l(wo,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),l(De,"href","https://arxiv.org/abs/1907.12461"),l(De,"rel","nofollow"),l(Eo,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),l(xo,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel"),l(je,"href","https://arxiv.org/abs/1908.08345"),l(je,"rel","nofollow"),l(qe,"href","https://github.com/thomwolf"),l(qe,"rel","nofollow"),l(Pe,"href","https://github.com/ydshieh"),l(Pe,"rel","nofollow"),l(ue,"id","transformers.EncoderDecoderConfig"),l(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ue,"href","#transformers.EncoderDecoderConfig"),l(X,"class","relative group"),l(Mo,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig"),l(zo,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),l(Do,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(jo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l($o,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig"),l(_e,"class","docstring"),l(ve,"class","docstring"),l(z,"class","docstring"),l(be,"id","transformers.EncoderDecoderModel"),l(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(be,"href","#transformers.EncoderDecoderModel"),l(ne,"class","relative group"),l(We,"href","https://arxiv.org/abs/1907.12461"),l(We,"rel","nofollow"),l(qo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ue,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ue,"rel","nofollow"),l(Po,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),l(Fo,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),l(C,"class","docstring"),l(A,"class","docstring"),l(E,"class","docstring"),l(ke,"id","transformers.TFEncoderDecoderModel"),l(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ke,"href","#transformers.TFEncoderDecoderModel"),l(se,"class","relative group"),l(Xe,"href","https://arxiv.org/abs/1907.12461"),l(Xe,"rel","nofollow"),l(Co,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),l(no,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(no,"rel","nofollow"),l(Ao,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.TFEncoderDecoderModel"),l(S,"class","docstring"),l(L,"class","docstring"),l(x,"class","docstring"),l(we,"id","transformers.FlaxEncoderDecoderModel"),l(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(we,"href","#transformers.FlaxEncoderDecoderModel"),l(ce,"class","relative group"),l(po,"href","https://arxiv.org/abs/1907.12461"),l(po,"rel","nofollow"),l(So,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(fo,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),l(fo,"rel","nofollow"),l(No,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.FlaxEncoderDecoderModel"),l(Io,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.FlaxEncoderDecoderModel"),l(N,"class","docstring"),l(B,"class","docstring"),l(M,"class","docstring")},m(n,p){e(document.head,m),h(n,D,p),h(n,f,p),e(f,w),e(w,P),_(k,P,null),e(f,T),e(f,F),e(F,Mt),h(n,Un,p),h(n,me,p),e(me,zt),e(me,wo),e(wo,Dt),e(me,jt),h(n,Vn,p),h(n,he,p),e(he,$t),e(he,De),e(De,qt),e(he,Pt),h(n,Hn,p),h(n,fe,p),e(fe,Ft),e(fe,Eo),e(Eo,Ct),e(fe,At),h(n,Jn,p),h(n,W,p),e(W,St),e(W,xo),e(xo,Nt),e(W,It),e(W,je),e(je,Lt),e(W,Bt),h(n,Yn,p),h(n,R,p),e(R,Ot),e(R,Ko),e(Ko,Wt),e(R,Rt),e(R,Qo),e(Qo,Gt),e(R,Ut),h(n,Zn,p),_($e,n,p),h(n,Kn,p),h(n,G,p),e(G,Vt),e(G,qe),e(qe,Ht),e(G,Jt),e(G,Pe),e(Pe,Yt),e(G,Zt),h(n,Qn,p),h(n,X,p),e(X,ue),e(ue,Xo),_(Fe,Xo,null),e(X,Kt),e(X,en),e(en,Qt),h(n,Xn,p),h(n,z,p),_(Ce,z,null),e(z,Xt),e(z,ge),e(ge,Mo),e(Mo,er),e(ge,or),e(ge,zo),e(zo,nr),e(ge,tr),e(z,rr),e(z,ee),e(ee,ar),e(ee,Do),e(Do,sr),e(ee,dr),e(ee,jo),e(jo,ir),e(ee,cr),e(z,lr),e(z,on),e(on,pr),e(z,mr),_(Ae,z,null),e(z,hr),e(z,_e),_(Se,_e,null),e(_e,fr),e(_e,Ne),e(Ne,ur),e(Ne,$o),e($o,gr),e(Ne,_r),e(z,vr),e(z,ve),_(Ie,ve,null),e(ve,br),e(ve,oe),e(oe,yr),e(oe,nn),e(nn,kr),e(oe,Tr),e(oe,tn),e(tn,wr),e(oe,Er),h(n,et,p),h(n,ne,p),e(ne,be),e(be,rn),_(Le,rn,null),e(ne,xr),e(ne,an),e(an,Mr),h(n,ot,p),h(n,E,p),_(Be,E,null),e(E,zr),e(E,te),e(te,Dr),e(te,sn),e(sn,jr),e(te,$r),e(te,dn),e(dn,qr),e(te,Pr),e(E,Fr),e(E,Oe),e(Oe,Cr),e(Oe,We),e(We,Ar),e(Oe,Sr),e(E,Nr),e(E,cn),e(cn,Ir),e(E,Lr),e(E,Re),e(Re,Br),e(Re,qo),e(qo,Or),e(Re,Wr),e(E,Rr),e(E,Ge),e(Ge,Gr),e(Ge,Ue),e(Ue,Ur),e(Ge,Vr),e(E,Hr),e(E,U),e(U,Po),e(Po,Jr),e(U,Yr),e(U,ln),e(ln,Zr),e(U,Kr),e(U,pn),e(pn,Qr),e(U,Xr),e(E,ea),e(E,C),_(Ve,C,null),e(C,oa),e(C,re),e(re,na),e(re,Fo),e(Fo,ta),e(re,ra),e(re,mn),e(mn,aa),e(re,sa),e(C,da),_(ye,C,null),e(C,ia),e(C,hn),e(hn,ca),e(C,la),_(He,C,null),e(E,pa),e(E,A),_(Je,A,null),e(A,ma),e(A,fn),e(fn,ha),e(A,fa),e(A,ae),e(ae,ua),e(ae,un),e(un,ga),e(ae,_a),e(ae,gn),e(gn,va),e(ae,ba),e(A,ya),e(A,_n),e(_n,ka),e(A,Ta),_(Ye,A,null),h(n,nt,p),h(n,se,p),e(se,ke),e(ke,vn),_(Ze,vn,null),e(se,wa),e(se,bn),e(bn,Ea),h(n,tt,p),h(n,x,p),_(Ke,x,null),e(x,xa),e(x,de),e(de,Ma),e(de,yn),e(yn,za),e(de,Da),e(de,kn),e(kn,ja),e(de,$a),e(x,qa),e(x,Qe),e(Qe,Pa),e(Qe,Xe),e(Xe,Fa),e(Qe,Ca),e(x,Aa),e(x,Tn),e(Tn,Sa),e(x,Na),e(x,eo),e(eo,Ia),e(eo,Co),e(Co,La),e(eo,Ba),e(x,Oa),e(x,oo),e(oo,Wa),e(oo,no),e(no,Ra),e(oo,Ga),e(x,Ua),e(x,V),e(V,wn),e(wn,Va),e(V,Ha),e(V,En),e(En,Ja),e(V,Ya),e(V,xn),e(xn,Za),e(V,Ka),e(x,Qa),e(x,S),_(to,S,null),e(S,Xa),e(S,ie),e(ie,es),e(ie,Ao),e(Ao,os),e(ie,ns),e(ie,Mn),e(Mn,ts),e(ie,rs),e(S,as),_(Te,S,null),e(S,ss),e(S,zn),e(zn,ds),e(S,is),_(ro,S,null),e(x,cs),e(x,L),_(ao,L,null),e(L,ls),e(L,Dn),e(Dn,ps),e(L,ms),e(L,jn),e(jn,hs),e(L,fs),_(so,L,null),h(n,rt,p),h(n,ce,p),e(ce,we),e(we,$n),_(io,$n,null),e(ce,us),e(ce,qn),e(qn,gs),h(n,at,p),h(n,M,p),_(co,M,null),e(M,_s),e(M,le),e(le,vs),e(le,Pn),e(Pn,bs),e(le,ys),e(le,Fn),e(Fn,ks),e(le,Ts),e(M,ws),e(M,lo),e(lo,Es),e(lo,po),e(po,xs),e(lo,Ms),e(M,zs),e(M,Cn),e(Cn,Ds),e(M,js),e(M,mo),e(mo,$s),e(mo,So),e(So,qs),e(mo,Ps),e(M,Fs),e(M,ho),e(ho,Cs),e(ho,fo),e(fo,As),e(ho,Ss),e(M,Ns),e(M,H),e(H,No),e(No,Is),e(H,Ls),e(H,An),e(An,Bs),e(H,Os),e(H,Sn),e(Sn,Ws),e(H,Rs),e(M,Gs),e(M,N),_(uo,N,null),e(N,Us),e(N,pe),e(pe,Vs),e(pe,Io),e(Io,Hs),e(pe,Js),e(pe,Nn),e(Nn,Ys),e(pe,Zs),e(N,Ks),_(Ee,N,null),e(N,Qs),e(N,In),e(In,Xs),e(N,ed),_(go,N,null),e(M,od),e(M,B),_(_o,B,null),e(B,nd),e(B,Ln),e(Ln,td),e(B,rd),e(B,Bn),e(Bn,ad),e(B,sd),_(vo,B,null),st=!0},p(n,[p]){const bo={};p&2&&(bo.$$scope={dirty:p,ctx:n}),ye.$set(bo);const On={};p&2&&(On.$$scope={dirty:p,ctx:n}),Te.$set(On);const Wn={};p&2&&(Wn.$$scope={dirty:p,ctx:n}),Ee.$set(Wn)},i(n){st||(v(k.$$.fragment,n),v($e.$$.fragment,n),v(Fe.$$.fragment,n),v(Ce.$$.fragment,n),v(Ae.$$.fragment,n),v(Se.$$.fragment,n),v(Ie.$$.fragment,n),v(Le.$$.fragment,n),v(Be.$$.fragment,n),v(Ve.$$.fragment,n),v(ye.$$.fragment,n),v(He.$$.fragment,n),v(Je.$$.fragment,n),v(Ye.$$.fragment,n),v(Ze.$$.fragment,n),v(Ke.$$.fragment,n),v(to.$$.fragment,n),v(Te.$$.fragment,n),v(ro.$$.fragment,n),v(ao.$$.fragment,n),v(so.$$.fragment,n),v(io.$$.fragment,n),v(co.$$.fragment,n),v(uo.$$.fragment,n),v(Ee.$$.fragment,n),v(go.$$.fragment,n),v(_o.$$.fragment,n),v(vo.$$.fragment,n),st=!0)},o(n){b(k.$$.fragment,n),b($e.$$.fragment,n),b(Fe.$$.fragment,n),b(Ce.$$.fragment,n),b(Ae.$$.fragment,n),b(Se.$$.fragment,n),b(Ie.$$.fragment,n),b(Le.$$.fragment,n),b(Be.$$.fragment,n),b(Ve.$$.fragment,n),b(ye.$$.fragment,n),b(He.$$.fragment,n),b(Je.$$.fragment,n),b(Ye.$$.fragment,n),b(Ze.$$.fragment,n),b(Ke.$$.fragment,n),b(to.$$.fragment,n),b(Te.$$.fragment,n),b(ro.$$.fragment,n),b(ao.$$.fragment,n),b(so.$$.fragment,n),b(io.$$.fragment,n),b(co.$$.fragment,n),b(uo.$$.fragment,n),b(Ee.$$.fragment,n),b(go.$$.fragment,n),b(_o.$$.fragment,n),b(vo.$$.fragment,n),st=!1},d(n){o(m),n&&o(D),n&&o(f),y(k),n&&o(Un),n&&o(me),n&&o(Vn),n&&o(he),n&&o(Hn),n&&o(fe),n&&o(Jn),n&&o(W),n&&o(Yn),n&&o(R),n&&o(Zn),y($e,n),n&&o(Kn),n&&o(G),n&&o(Qn),n&&o(X),y(Fe),n&&o(Xn),n&&o(z),y(Ce),y(Ae),y(Se),y(Ie),n&&o(et),n&&o(ne),y(Le),n&&o(ot),n&&o(E),y(Be),y(Ve),y(ye),y(He),y(Je),y(Ye),n&&o(nt),n&&o(se),y(Ze),n&&o(tt),n&&o(x),y(Ke),y(to),y(Te),y(ro),y(ao),y(so),n&&o(rt),n&&o(ce),y(io),n&&o(at),n&&o(M),y(co),y(uo),y(Ee),y(go),y(_o),y(vo)}}}const Si={local:"encoder-decoder-models",sections:[{local:"transformers.EncoderDecoderConfig",title:"EncoderDecoderConfig"},{local:"transformers.EncoderDecoderModel",title:"EncoderDecoderModel"},{local:"transformers.TFEncoderDecoderModel",title:"TFEncoderDecoderModel"},{local:"transformers.FlaxEncoderDecoderModel",title:"FlaxEncoderDecoderModel"}],title:"Encoder Decoder Models"};function Ni(Q,m,D){let{fw:f}=m;return Q.$$set=w=>{"fw"in w&&D(0,f=w.fw)},[f]}class Gi extends Di{constructor(m){super();ji(this,m,Ni,Ai,$i,{fw:0})}}export{Gi as default,Si as metadata};
9,957
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/xlmprophetnet.mdx-15c467c5.js
import{S as Vn,i as Wn,s as Un,e as o,k as p,w as m,t as a,L as Jn,c as r,d as s,m as d,a as n,x as u,h as i,b as l,J as t,g as h,y as f,K as Yn,q as g,o as _,B as k}from"../../chunks/vendor-b1433968.js";import{D as y}from"../../chunks/Docstring-ff504c58.js";import{C as Ot}from"../../chunks/CodeBlock-a320dbd7.js";import{I as R}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Qn(As){let X,We,M,x,dt,pe,Ss,ct,Is,Ht,j,ht,Fs,Gs,de,Bs,Rs,Vt,q,O,mt,ce,Os,ut,Hs,Wt,H,Vs,he,Ws,Us,Ut,Ue,Js,Jt,Je,Ys,Yt,Ye,ft,Qs,Qt,V,Zs,me,Ks,eo,Zt,T,W,gt,ue,to,_t,so,Kt,D,fe,oo,ge,ro,Qe,no,ao,es,C,U,kt,_e,io,vt,lo,ts,v,ke,po,E,co,Ze,ho,mo,Ke,uo,fo,ve,go,_o,ko,Pe,vo,et,Po,wo,bo,A,No,Pt,$o,Lo,wt,Mo,xo,Eo,z,we,yo,bt,Xo,zo,be,tt,jo,Nt,qo,To,st,Do,$t,Co,Ao,J,Ne,So,Lt,Io,Fo,Y,$e,Go,Mt,Bo,Ro,Q,Le,Oo,Me,Ho,xt,Vo,Wo,ss,S,Z,Et,xe,Uo,yt,Jo,os,w,Ee,Yo,ye,Qo,ot,Zo,Ko,er,Xt,tr,sr,Xe,rs,I,K,zt,ze,or,jt,rr,ns,b,je,nr,qe,ar,rt,ir,lr,pr,qt,dr,cr,Te,as,F,ee,Tt,De,hr,Dt,mr,is,N,Ce,ur,Ae,fr,nt,gr,_r,kr,Ct,vr,Pr,Se,ls,G,te,At,Ie,wr,St,br,ps,$,Fe,Nr,Ge,$r,at,Lr,Mr,xr,It,Er,yr,Be,ds,B,se,Ft,Re,Xr,Gt,zr,cs,L,Oe,jr,He,qr,it,Tr,Dr,Cr,Bt,Ar,Sr,Ve,hs;return pe=new R({}),ce=new R({}),ue=new R({}),fe=new y({props:{name:"class transformers.XLMProphetNetConfig",anchor:"transformers.XLMProphetNetConfig",parameters:[{name:"activation_dropout",val:" = 0.1"},{name:"activation_function",val:" = 'gelu'"},{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 1024"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"num_encoder_layers",val:" = 12"},{name:"num_encoder_attention_heads",val:" = 16"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"num_decoder_layers",val:" = 12"},{name:"num_decoder_attention_heads",val:" = 16"},{name:"attention_dropout",val:" = 0.1"},{name:"dropout",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"init_std",val:" = 0.02"},{name:"is_encoder_decoder",val:" = True"},{name:"add_cross_attention",val:" = True"},{name:"decoder_start_token_id",val:" = 0"},{name:"ngram",val:" = 2"},{name:"num_buckets",val:" = 32"},{name:"relative_max_distance",val:" = 128"},{name:"disable_ngram_loss",val:" = False"},{name:"eps",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py#L29"}}),_e=new R({}),ke=new y({props:{name:"class transformers.XLMProphetNetTokenizer",anchor:"transformers.XLMProphetNetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '[SEP]'"},{name:"eos_token",val:" = '[SEP]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"unk_token",val:" = '[UNK]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py#L57",parametersDescription:[{anchor:"transformers.XLMProphetNetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.XLMProphetNetTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.XLMProphetNetTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.XLMProphetNetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.XLMProphetNetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.XLMProphetNetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.XLMProphetNetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.XLMProphetNetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.XLMProphetNetTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.XLMProphetNetTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),we=new y({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.XLMProphetNetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py#L308",parametersDescription:[{anchor:"transformers.XLMProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.XLMProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ne=new y({props:{name:"convert_tokens_to_string",anchor:"transformers.XLMProphetNetTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py#L290"}}),$e=new y({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.XLMProphetNetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py#L239",parametersDescription:[{anchor:"transformers.XLMProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Le=new y({props:{name:"get_special_tokens_mask",anchor:"transformers.XLMProphetNetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py#L211",parametersDescription:[{anchor:"transformers.XLMProphetNetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.XLMProphetNetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.XLMProphetNetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),xe=new R({}),Ee=new y({props:{name:"class transformers.XLMProphetNetModel",anchor:"transformers.XLMProphetNetModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py#L84"}}),Xe=new Ot({props:{code:`from transformers import XLMProphetNetTokenizer, XLMProphetNetModel tokenizer = XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') model = XLMProphetNetModel.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state # main stream hidden states last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMProphetNetTokenizer, XLMProphetNetModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMProphetNetModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-comment"># main stream hidden states</span> <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states_ngram = outputs.last_hidden_state_ngram <span class="hljs-comment"># predict hidden states</span>`}}),ze=new R({}),je=new y({props:{name:"class transformers.XLMProphetNetEncoder",anchor:"transformers.XLMProphetNetEncoder",parameters:[{name:"config",val:": ProphetNetConfig"},{name:"word_embeddings",val:": Embedding = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py#L38"}}),Te=new Ot({props:{code:`from transformers import XLMProphetNetTokenizer, XLMProphetNetEncoder import torch tokenizer = XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') model = XLMProphetNetEncoder.from_pretrained('patrickvonplaten/xprophetnet-large-uncased-standalone') assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMProphetNetTokenizer, XLMProphetNetEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMProphetNetEncoder.from_pretrained(<span class="hljs-string">&#x27;patrickvonplaten/xprophetnet-large-uncased-standalone&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),De=new R({}),Ce=new y({props:{name:"class transformers.XLMProphetNetDecoder",anchor:"transformers.XLMProphetNetDecoder",parameters:[{name:"config",val:": ProphetNetConfig"},{name:"word_embeddings",val:": Embedding = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py#L61"}}),Se=new Ot({props:{code:`from transformers import XLMProphetNetTokenizer, XLMProphetNetDecoder import torch tokenizer = XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') model = XLMProphetNetDecoder.from_pretrained('patrickvonplaten/xprophetnet-large-uncased-standalone', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMProphetNetTokenizer, XLMProphetNetDecoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMProphetNetDecoder.from_pretrained(<span class="hljs-string">&#x27;patrickvonplaten/xprophetnet-large-uncased-standalone&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ie=new R({}),Fe=new y({props:{name:"class transformers.XLMProphetNetForConditionalGeneration",anchor:"transformers.XLMProphetNetForConditionalGeneration",parameters:[{name:"config",val:": ProphetNetConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py#L108"}}),Be=new Ot({props:{code:`from transformers import XLMProphetNetTokenizer, XLMProphetNetForConditionalGeneration tokenizer = XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') model = XLMProphetNetForConditionalGeneration.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) logits_next_token = outputs.logits # logits to predict next token as usual logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMProphetNetTokenizer, XLMProphetNetForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMProphetNetForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_next_token = outputs.logits <span class="hljs-comment"># logits to predict next token as usual</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_ngram_next_tokens = outputs.logits_ngram <span class="hljs-comment"># logits to predict 2nd, 3rd, ... next tokens</span>`}}),Re=new R({}),Oe=new y({props:{name:"class transformers.XLMProphetNetForCausalLM",anchor:"transformers.XLMProphetNetForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py#L132"}}),Ve=new Ot({props:{code:`from transformers import XLMProphetNetTokenizer, XLMProphetNetForCausalLM import torch tokenizer = XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') model = XLMProphetNetForCausalLM.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # Model can also be used with EncoderDecoder framework from transformers import EncoderDecoderModel, XLMProphetNetTokenizer, XLMRobertaTokenizer import torch tokenizer_enc = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large') tokenizer_dec = XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased') model = EncoderDecoderModel.from_encoder_decoder_pretrained("xlm-roberta-large", 'microsoft/xprophetnet-large-wiki100-cased') ARTICLE = ( "the us state department said wednesday it had received no " "formal word from bolivia that it was expelling the us ambassador there " "but said the charges made against him are \`\` baseless ." ) input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids labels = tokenizer_dec("us rejects charges against its ambassador in bolivia", return_tensors="pt").input_ids outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:]) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMProphetNetTokenizer, XLMProphetNetForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMProphetNetForCausalLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model can also be used with EncoderDecoder framework</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> EncoderDecoderModel, XLMProphetNetTokenizer, XLMRobertaTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_enc = XLMRobertaTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-roberta-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_dec = XLMProphetNetTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&quot;xlm-roberta-large&quot;</span>, <span class="hljs-string">&#x27;microsoft/xprophetnet-large-wiki100-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE = ( <span class="hljs-meta">... </span><span class="hljs-string">&quot;the us state department said wednesday it had received no &quot;</span> <span class="hljs-meta">... </span><span class="hljs-string">&quot;formal word from bolivia that it was expelling the us ambassador there &quot;</span> <span class="hljs-meta">... </span><span class="hljs-string">&quot;but said the charges made against him are \`\` baseless .&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer_enc(ARTICLE, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer_dec(<span class="hljs-string">&quot;us rejects charges against its ambassador in bolivia&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-<span class="hljs-number">1</span>], labels=labels[:, <span class="hljs-number">1</span>:]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),{c(){X=o("meta"),We=p(),M=o("h1"),x=o("a"),dt=o("span"),m(pe.$$.fragment),Ss=p(),ct=o("span"),Is=a("XLM-ProphetNet"),Ht=p(),j=o("p"),ht=o("strong"),Fs=a("DISCLAIMER:"),Gs=a(" If you see something strange, file a "),de=o("a"),Bs=a("Github Issue"),Rs=a(` and assign @patrickvonplaten`),Vt=p(),q=o("h2"),O=o("a"),mt=o("span"),m(ce.$$.fragment),Os=p(),ut=o("span"),Hs=a("Overview"),Wt=p(),H=o("p"),Vs=a("The XLM-ProphetNet model was proposed in "),he=o("a"),Ws=a("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),Us=a(` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020.`),Ut=p(),Ue=o("p"),Js=a(`XLM-ProphetNet is an encoder-decoder model and can predict n-future tokens for \u201Cngram\u201D language modeling instead of just the next token. Its architecture is identical to ProhpetNet, but the model was trained on the multi-lingual \u201Cwiki100\u201D Wikipedia dump.`),Jt=p(),Je=o("p"),Ys=a("The abstract from the paper is the following:"),Yt=p(),Ye=o("p"),ft=o("em"),Qs=a(`In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.`),Qt=p(),V=o("p"),Zs=a("The Authors\u2019 code can be found "),me=o("a"),Ks=a("here"),eo=a("."),Zt=p(),T=o("h2"),W=o("a"),gt=o("span"),m(ue.$$.fragment),to=p(),_t=o("span"),so=a("XLMProphetNetConfig"),Kt=p(),D=o("div"),m(fe.$$.fragment),oo=p(),ge=o("p"),ro=a("This class overrides "),Qe=o("a"),no=a("ProphetNetConfig"),ao=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),es=p(),C=o("h2"),U=o("a"),kt=o("span"),m(_e.$$.fragment),io=p(),vt=o("span"),lo=a("XLMProphetNetTokenizer"),ts=p(),v=o("div"),m(ke.$$.fragment),po=p(),E=o("p"),co=a("Adapted from "),Ze=o("a"),ho=a("RobertaTokenizer"),mo=a(" and "),Ke=o("a"),uo=a("XLNetTokenizer"),fo=a(`. Based on `),ve=o("a"),go=a("SentencePiece"),_o=a("."),ko=p(),Pe=o("p"),vo=a("This tokenizer inherits from "),et=o("a"),Po=a("PreTrainedTokenizer"),wo=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),bo=p(),A=o("p"),No=a(`Attributes: sp_model (`),Pt=o("code"),$o=a("SentencePieceProcessor"),Lo=a(`): The `),wt=o("em"),Mo=a("SentencePiece"),xo=a(" processor that is used for every conversion (string, tokens and IDs)."),Eo=p(),z=o("div"),m(we.$$.fragment),yo=p(),bt=o("p"),Xo=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A XLMProphetNet sequence has the following format:`),zo=p(),be=o("ul"),tt=o("li"),jo=a("single sequence: "),Nt=o("code"),qo=a("X [SEP]"),To=p(),st=o("li"),Do=a("pair of sequences: "),$t=o("code"),Co=a("A [SEP] B [SEP]"),Ao=p(),J=o("div"),m(Ne.$$.fragment),So=p(),Lt=o("p"),Io=a("Converts a sequence of tokens (strings for sub-words) in a single string."),Fo=p(),Y=o("div"),m($e.$$.fragment),Go=p(),Mt=o("p"),Bo=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet does not make use of token type ids, therefore a list of zeros is returned.`),Ro=p(),Q=o("div"),m(Le.$$.fragment),Oo=p(),Me=o("p"),Ho=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),xt=o("code"),Vo=a("prepare_for_model"),Wo=a(" method."),ss=p(),S=o("h2"),Z=o("a"),Et=o("span"),m(xe.$$.fragment),Uo=p(),yt=o("span"),Jo=a("XLMProphetNetModel"),os=p(),w=o("div"),m(Ee.$$.fragment),Yo=p(),ye=o("p"),Qo=a("This class overrides "),ot=o("a"),Zo=a("ProphetNetModel"),Ko=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),er=p(),Xt=o("p"),tr=a("Example:"),sr=p(),m(Xe.$$.fragment),rs=p(),I=o("h2"),K=o("a"),zt=o("span"),m(ze.$$.fragment),or=p(),jt=o("span"),rr=a("XLMProphetNetEncoder"),ns=p(),b=o("div"),m(je.$$.fragment),nr=p(),qe=o("p"),ar=a("This class overrides "),rt=o("a"),ir=a("ProphetNetEncoder"),lr=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),pr=p(),qt=o("p"),dr=a("Example:"),cr=p(),m(Te.$$.fragment),as=p(),F=o("h2"),ee=o("a"),Tt=o("span"),m(De.$$.fragment),hr=p(),Dt=o("span"),mr=a("XLMProphetNetDecoder"),is=p(),N=o("div"),m(Ce.$$.fragment),ur=p(),Ae=o("p"),fr=a("This class overrides "),nt=o("a"),gr=a("ProphetNetDecoder"),_r=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),kr=p(),Ct=o("p"),vr=a("Example:"),Pr=p(),m(Se.$$.fragment),ls=p(),G=o("h2"),te=o("a"),At=o("span"),m(Ie.$$.fragment),wr=p(),St=o("span"),br=a("XLMProphetNetForConditionalGeneration"),ps=p(),$=o("div"),m(Fe.$$.fragment),Nr=p(),Ge=o("p"),$r=a("This class overrides "),at=o("a"),Lr=a("ProphetNetForConditionalGeneration"),Mr=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),xr=p(),It=o("p"),Er=a("Example:"),yr=p(),m(Be.$$.fragment),ds=p(),B=o("h2"),se=o("a"),Ft=o("span"),m(Re.$$.fragment),Xr=p(),Gt=o("span"),zr=a("XLMProphetNetForCausalLM"),cs=p(),L=o("div"),m(Oe.$$.fragment),jr=p(),He=o("p"),qr=a("This class overrides "),it=o("a"),Tr=a("ProphetNetForCausalLM"),Dr=a(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Cr=p(),Bt=o("p"),Ar=a("Example:"),Sr=p(),m(Ve.$$.fragment),this.h()},l(e){const c=Jn('[data-svelte="svelte-1phssyn"]',document.head);X=r(c,"META",{name:!0,content:!0}),c.forEach(s),We=d(e),M=r(e,"H1",{class:!0});var ms=n(M);x=r(ms,"A",{id:!0,class:!0,href:!0});var Gr=n(x);dt=r(Gr,"SPAN",{});var Br=n(dt);u(pe.$$.fragment,Br),Br.forEach(s),Gr.forEach(s),Ss=d(ms),ct=r(ms,"SPAN",{});var Rr=n(ct);Is=i(Rr,"XLM-ProphetNet"),Rr.forEach(s),ms.forEach(s),Ht=d(e),j=r(e,"P",{});var Rt=n(j);ht=r(Rt,"STRONG",{});var Or=n(ht);Fs=i(Or,"DISCLAIMER:"),Or.forEach(s),Gs=i(Rt," If you see something strange, file a "),de=r(Rt,"A",{href:!0,rel:!0});var Hr=n(de);Bs=i(Hr,"Github Issue"),Hr.forEach(s),Rs=i(Rt,` and assign @patrickvonplaten`),Rt.forEach(s),Vt=d(e),q=r(e,"H2",{class:!0});var us=n(q);O=r(us,"A",{id:!0,class:!0,href:!0});var Vr=n(O);mt=r(Vr,"SPAN",{});var Wr=n(mt);u(ce.$$.fragment,Wr),Wr.forEach(s),Vr.forEach(s),Os=d(us),ut=r(us,"SPAN",{});var Ur=n(ut);Hs=i(Ur,"Overview"),Ur.forEach(s),us.forEach(s),Wt=d(e),H=r(e,"P",{});var fs=n(H);Vs=i(fs,"The XLM-ProphetNet model was proposed in "),he=r(fs,"A",{href:!0,rel:!0});var Jr=n(he);Ws=i(Jr,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),Jr.forEach(s),Us=i(fs,` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020.`),fs.forEach(s),Ut=d(e),Ue=r(e,"P",{});var Yr=n(Ue);Js=i(Yr,`XLM-ProphetNet is an encoder-decoder model and can predict n-future tokens for \u201Cngram\u201D language modeling instead of just the next token. Its architecture is identical to ProhpetNet, but the model was trained on the multi-lingual \u201Cwiki100\u201D Wikipedia dump.`),Yr.forEach(s),Jt=d(e),Je=r(e,"P",{});var Qr=n(Je);Ys=i(Qr,"The abstract from the paper is the following:"),Qr.forEach(s),Yt=d(e),Ye=r(e,"P",{});var Zr=n(Ye);ft=r(Zr,"EM",{});var Kr=n(ft);Qs=i(Kr,`In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.`),Kr.forEach(s),Zr.forEach(s),Qt=d(e),V=r(e,"P",{});var gs=n(V);Zs=i(gs,"The Authors\u2019 code can be found "),me=r(gs,"A",{href:!0,rel:!0});var en=n(me);Ks=i(en,"here"),en.forEach(s),eo=i(gs,"."),gs.forEach(s),Zt=d(e),T=r(e,"H2",{class:!0});var _s=n(T);W=r(_s,"A",{id:!0,class:!0,href:!0});var tn=n(W);gt=r(tn,"SPAN",{});var sn=n(gt);u(ue.$$.fragment,sn),sn.forEach(s),tn.forEach(s),to=d(_s),_t=r(_s,"SPAN",{});var on=n(_t);so=i(on,"XLMProphetNetConfig"),on.forEach(s),_s.forEach(s),Kt=d(e),D=r(e,"DIV",{class:!0});var ks=n(D);u(fe.$$.fragment,ks),oo=d(ks),ge=r(ks,"P",{});var vs=n(ge);ro=i(vs,"This class overrides "),Qe=r(vs,"A",{href:!0});var rn=n(Qe);no=i(rn,"ProphetNetConfig"),rn.forEach(s),ao=i(vs,`. Please check the superclass for the appropriate documentation alongside usage examples.`),vs.forEach(s),ks.forEach(s),es=d(e),C=r(e,"H2",{class:!0});var Ps=n(C);U=r(Ps,"A",{id:!0,class:!0,href:!0});var nn=n(U);kt=r(nn,"SPAN",{});var an=n(kt);u(_e.$$.fragment,an),an.forEach(s),nn.forEach(s),io=d(Ps),vt=r(Ps,"SPAN",{});var ln=n(vt);lo=i(ln,"XLMProphetNetTokenizer"),ln.forEach(s),Ps.forEach(s),ts=d(e),v=r(e,"DIV",{class:!0});var P=n(v);u(ke.$$.fragment,P),po=d(P),E=r(P,"P",{});var oe=n(E);co=i(oe,"Adapted from "),Ze=r(oe,"A",{href:!0});var pn=n(Ze);ho=i(pn,"RobertaTokenizer"),pn.forEach(s),mo=i(oe," and "),Ke=r(oe,"A",{href:!0});var dn=n(Ke);uo=i(dn,"XLNetTokenizer"),dn.forEach(s),fo=i(oe,`. Based on `),ve=r(oe,"A",{href:!0,rel:!0});var cn=n(ve);go=i(cn,"SentencePiece"),cn.forEach(s),_o=i(oe,"."),oe.forEach(s),ko=d(P),Pe=r(P,"P",{});var ws=n(Pe);vo=i(ws,"This tokenizer inherits from "),et=r(ws,"A",{href:!0});var hn=n(et);Po=i(hn,"PreTrainedTokenizer"),hn.forEach(s),wo=i(ws,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ws.forEach(s),bo=d(P),A=r(P,"P",{});var lt=n(A);No=i(lt,`Attributes: sp_model (`),Pt=r(lt,"CODE",{});var mn=n(Pt);$o=i(mn,"SentencePieceProcessor"),mn.forEach(s),Lo=i(lt,`): The `),wt=r(lt,"EM",{});var un=n(wt);Mo=i(un,"SentencePiece"),un.forEach(s),xo=i(lt," processor that is used for every conversion (string, tokens and IDs)."),lt.forEach(s),Eo=d(P),z=r(P,"DIV",{class:!0});var pt=n(z);u(we.$$.fragment,pt),yo=d(pt),bt=r(pt,"P",{});var fn=n(bt);Xo=i(fn,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A XLMProphetNet sequence has the following format:`),fn.forEach(s),zo=d(pt),be=r(pt,"UL",{});var bs=n(be);tt=r(bs,"LI",{});var Ir=n(tt);jo=i(Ir,"single sequence: "),Nt=r(Ir,"CODE",{});var gn=n(Nt);qo=i(gn,"X [SEP]"),gn.forEach(s),Ir.forEach(s),To=d(bs),st=r(bs,"LI",{});var Fr=n(st);Do=i(Fr,"pair of sequences: "),$t=r(Fr,"CODE",{});var _n=n($t);Co=i(_n,"A [SEP] B [SEP]"),_n.forEach(s),Fr.forEach(s),bs.forEach(s),pt.forEach(s),Ao=d(P),J=r(P,"DIV",{class:!0});var Ns=n(J);u(Ne.$$.fragment,Ns),So=d(Ns),Lt=r(Ns,"P",{});var kn=n(Lt);Io=i(kn,"Converts a sequence of tokens (strings for sub-words) in a single string."),kn.forEach(s),Ns.forEach(s),Fo=d(P),Y=r(P,"DIV",{class:!0});var $s=n(Y);u($e.$$.fragment,$s),Go=d($s),Mt=r($s,"P",{});var vn=n(Mt);Bo=i(vn,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet does not make use of token type ids, therefore a list of zeros is returned.`),vn.forEach(s),$s.forEach(s),Ro=d(P),Q=r(P,"DIV",{class:!0});var Ls=n(Q);u(Le.$$.fragment,Ls),Oo=d(Ls),Me=r(Ls,"P",{});var Ms=n(Me);Ho=i(Ms,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),xt=r(Ms,"CODE",{});var Pn=n(xt);Vo=i(Pn,"prepare_for_model"),Pn.forEach(s),Wo=i(Ms," method."),Ms.forEach(s),Ls.forEach(s),P.forEach(s),ss=d(e),S=r(e,"H2",{class:!0});var xs=n(S);Z=r(xs,"A",{id:!0,class:!0,href:!0});var wn=n(Z);Et=r(wn,"SPAN",{});var bn=n(Et);u(xe.$$.fragment,bn),bn.forEach(s),wn.forEach(s),Uo=d(xs),yt=r(xs,"SPAN",{});var Nn=n(yt);Jo=i(Nn,"XLMProphetNetModel"),Nn.forEach(s),xs.forEach(s),os=d(e),w=r(e,"DIV",{class:!0});var re=n(w);u(Ee.$$.fragment,re),Yo=d(re),ye=r(re,"P",{});var Es=n(ye);Qo=i(Es,"This class overrides "),ot=r(Es,"A",{href:!0});var $n=n(ot);Zo=i($n,"ProphetNetModel"),$n.forEach(s),Ko=i(Es,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Es.forEach(s),er=d(re),Xt=r(re,"P",{});var Ln=n(Xt);tr=i(Ln,"Example:"),Ln.forEach(s),sr=d(re),u(Xe.$$.fragment,re),re.forEach(s),rs=d(e),I=r(e,"H2",{class:!0});var ys=n(I);K=r(ys,"A",{id:!0,class:!0,href:!0});var Mn=n(K);zt=r(Mn,"SPAN",{});var xn=n(zt);u(ze.$$.fragment,xn),xn.forEach(s),Mn.forEach(s),or=d(ys),jt=r(ys,"SPAN",{});var En=n(jt);rr=i(En,"XLMProphetNetEncoder"),En.forEach(s),ys.forEach(s),ns=d(e),b=r(e,"DIV",{class:!0});var ne=n(b);u(je.$$.fragment,ne),nr=d(ne),qe=r(ne,"P",{});var Xs=n(qe);ar=i(Xs,"This class overrides "),rt=r(Xs,"A",{href:!0});var yn=n(rt);ir=i(yn,"ProphetNetEncoder"),yn.forEach(s),lr=i(Xs,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Xs.forEach(s),pr=d(ne),qt=r(ne,"P",{});var Xn=n(qt);dr=i(Xn,"Example:"),Xn.forEach(s),cr=d(ne),u(Te.$$.fragment,ne),ne.forEach(s),as=d(e),F=r(e,"H2",{class:!0});var zs=n(F);ee=r(zs,"A",{id:!0,class:!0,href:!0});var zn=n(ee);Tt=r(zn,"SPAN",{});var jn=n(Tt);u(De.$$.fragment,jn),jn.forEach(s),zn.forEach(s),hr=d(zs),Dt=r(zs,"SPAN",{});var qn=n(Dt);mr=i(qn,"XLMProphetNetDecoder"),qn.forEach(s),zs.forEach(s),is=d(e),N=r(e,"DIV",{class:!0});var ae=n(N);u(Ce.$$.fragment,ae),ur=d(ae),Ae=r(ae,"P",{});var js=n(Ae);fr=i(js,"This class overrides "),nt=r(js,"A",{href:!0});var Tn=n(nt);gr=i(Tn,"ProphetNetDecoder"),Tn.forEach(s),_r=i(js,`. Please check the superclass for the appropriate documentation alongside usage examples.`),js.forEach(s),kr=d(ae),Ct=r(ae,"P",{});var Dn=n(Ct);vr=i(Dn,"Example:"),Dn.forEach(s),Pr=d(ae),u(Se.$$.fragment,ae),ae.forEach(s),ls=d(e),G=r(e,"H2",{class:!0});var qs=n(G);te=r(qs,"A",{id:!0,class:!0,href:!0});var Cn=n(te);At=r(Cn,"SPAN",{});var An=n(At);u(Ie.$$.fragment,An),An.forEach(s),Cn.forEach(s),wr=d(qs),St=r(qs,"SPAN",{});var Sn=n(St);br=i(Sn,"XLMProphetNetForConditionalGeneration"),Sn.forEach(s),qs.forEach(s),ps=d(e),$=r(e,"DIV",{class:!0});var ie=n($);u(Fe.$$.fragment,ie),Nr=d(ie),Ge=r(ie,"P",{});var Ts=n(Ge);$r=i(Ts,"This class overrides "),at=r(Ts,"A",{href:!0});var In=n(at);Lr=i(In,"ProphetNetForConditionalGeneration"),In.forEach(s),Mr=i(Ts,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ts.forEach(s),xr=d(ie),It=r(ie,"P",{});var Fn=n(It);Er=i(Fn,"Example:"),Fn.forEach(s),yr=d(ie),u(Be.$$.fragment,ie),ie.forEach(s),ds=d(e),B=r(e,"H2",{class:!0});var Ds=n(B);se=r(Ds,"A",{id:!0,class:!0,href:!0});var Gn=n(se);Ft=r(Gn,"SPAN",{});var Bn=n(Ft);u(Re.$$.fragment,Bn),Bn.forEach(s),Gn.forEach(s),Xr=d(Ds),Gt=r(Ds,"SPAN",{});var Rn=n(Gt);zr=i(Rn,"XLMProphetNetForCausalLM"),Rn.forEach(s),Ds.forEach(s),cs=d(e),L=r(e,"DIV",{class:!0});var le=n(L);u(Oe.$$.fragment,le),jr=d(le),He=r(le,"P",{});var Cs=n(He);qr=i(Cs,"This class overrides "),it=r(Cs,"A",{href:!0});var On=n(it);Tr=i(On,"ProphetNetForCausalLM"),On.forEach(s),Dr=i(Cs,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Cs.forEach(s),Cr=d(le),Bt=r(le,"P",{});var Hn=n(Bt);Ar=i(Hn,"Example:"),Hn.forEach(s),Sr=d(le),u(Ve.$$.fragment,le),le.forEach(s),this.h()},h(){l(X,"name","hf:doc:metadata"),l(X,"content",JSON.stringify(Zn)),l(x,"id","xlmprophetnet"),l(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(x,"href","#xlmprophetnet"),l(M,"class","relative group"),l(de,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),l(de,"rel","nofollow"),l(O,"id","overview"),l(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(O,"href","#overview"),l(q,"class","relative group"),l(he,"href","https://arxiv.org/abs/2001.04063"),l(he,"rel","nofollow"),l(me,"href","https://github.com/microsoft/ProphetNet"),l(me,"rel","nofollow"),l(W,"id","transformers.XLMProphetNetConfig"),l(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(W,"href","#transformers.XLMProphetNetConfig"),l(T,"class","relative group"),l(Qe,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetConfig"),l(D,"class","docstring"),l(U,"id","transformers.XLMProphetNetTokenizer"),l(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(U,"href","#transformers.XLMProphetNetTokenizer"),l(C,"class","relative group"),l(Ze,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),l(Ke,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),l(ve,"href","https://github.com/google/sentencepiece"),l(ve,"rel","nofollow"),l(et,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(z,"class","docstring"),l(J,"class","docstring"),l(Y,"class","docstring"),l(Q,"class","docstring"),l(v,"class","docstring"),l(Z,"id","transformers.XLMProphetNetModel"),l(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Z,"href","#transformers.XLMProphetNetModel"),l(S,"class","relative group"),l(ot,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetModel"),l(w,"class","docstring"),l(K,"id","transformers.XLMProphetNetEncoder"),l(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(K,"href","#transformers.XLMProphetNetEncoder"),l(I,"class","relative group"),l(rt,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetEncoder"),l(b,"class","docstring"),l(ee,"id","transformers.XLMProphetNetDecoder"),l(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ee,"href","#transformers.XLMProphetNetDecoder"),l(F,"class","relative group"),l(nt,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetDecoder"),l(N,"class","docstring"),l(te,"id","transformers.XLMProphetNetForConditionalGeneration"),l(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(te,"href","#transformers.XLMProphetNetForConditionalGeneration"),l(G,"class","relative group"),l(at,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration"),l($,"class","docstring"),l(se,"id","transformers.XLMProphetNetForCausalLM"),l(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(se,"href","#transformers.XLMProphetNetForCausalLM"),l(B,"class","relative group"),l(it,"href","/docs/transformers/v4.15.0/en/model_doc/prophetnet#transformers.ProphetNetForCausalLM"),l(L,"class","docstring")},m(e,c){t(document.head,X),h(e,We,c),h(e,M,c),t(M,x),t(x,dt),f(pe,dt,null),t(M,Ss),t(M,ct),t(ct,Is),h(e,Ht,c),h(e,j,c),t(j,ht),t(ht,Fs),t(j,Gs),t(j,de),t(de,Bs),t(j,Rs),h(e,Vt,c),h(e,q,c),t(q,O),t(O,mt),f(ce,mt,null),t(q,Os),t(q,ut),t(ut,Hs),h(e,Wt,c),h(e,H,c),t(H,Vs),t(H,he),t(he,Ws),t(H,Us),h(e,Ut,c),h(e,Ue,c),t(Ue,Js),h(e,Jt,c),h(e,Je,c),t(Je,Ys),h(e,Yt,c),h(e,Ye,c),t(Ye,ft),t(ft,Qs),h(e,Qt,c),h(e,V,c),t(V,Zs),t(V,me),t(me,Ks),t(V,eo),h(e,Zt,c),h(e,T,c),t(T,W),t(W,gt),f(ue,gt,null),t(T,to),t(T,_t),t(_t,so),h(e,Kt,c),h(e,D,c),f(fe,D,null),t(D,oo),t(D,ge),t(ge,ro),t(ge,Qe),t(Qe,no),t(ge,ao),h(e,es,c),h(e,C,c),t(C,U),t(U,kt),f(_e,kt,null),t(C,io),t(C,vt),t(vt,lo),h(e,ts,c),h(e,v,c),f(ke,v,null),t(v,po),t(v,E),t(E,co),t(E,Ze),t(Ze,ho),t(E,mo),t(E,Ke),t(Ke,uo),t(E,fo),t(E,ve),t(ve,go),t(E,_o),t(v,ko),t(v,Pe),t(Pe,vo),t(Pe,et),t(et,Po),t(Pe,wo),t(v,bo),t(v,A),t(A,No),t(A,Pt),t(Pt,$o),t(A,Lo),t(A,wt),t(wt,Mo),t(A,xo),t(v,Eo),t(v,z),f(we,z,null),t(z,yo),t(z,bt),t(bt,Xo),t(z,zo),t(z,be),t(be,tt),t(tt,jo),t(tt,Nt),t(Nt,qo),t(be,To),t(be,st),t(st,Do),t(st,$t),t($t,Co),t(v,Ao),t(v,J),f(Ne,J,null),t(J,So),t(J,Lt),t(Lt,Io),t(v,Fo),t(v,Y),f($e,Y,null),t(Y,Go),t(Y,Mt),t(Mt,Bo),t(v,Ro),t(v,Q),f(Le,Q,null),t(Q,Oo),t(Q,Me),t(Me,Ho),t(Me,xt),t(xt,Vo),t(Me,Wo),h(e,ss,c),h(e,S,c),t(S,Z),t(Z,Et),f(xe,Et,null),t(S,Uo),t(S,yt),t(yt,Jo),h(e,os,c),h(e,w,c),f(Ee,w,null),t(w,Yo),t(w,ye),t(ye,Qo),t(ye,ot),t(ot,Zo),t(ye,Ko),t(w,er),t(w,Xt),t(Xt,tr),t(w,sr),f(Xe,w,null),h(e,rs,c),h(e,I,c),t(I,K),t(K,zt),f(ze,zt,null),t(I,or),t(I,jt),t(jt,rr),h(e,ns,c),h(e,b,c),f(je,b,null),t(b,nr),t(b,qe),t(qe,ar),t(qe,rt),t(rt,ir),t(qe,lr),t(b,pr),t(b,qt),t(qt,dr),t(b,cr),f(Te,b,null),h(e,as,c),h(e,F,c),t(F,ee),t(ee,Tt),f(De,Tt,null),t(F,hr),t(F,Dt),t(Dt,mr),h(e,is,c),h(e,N,c),f(Ce,N,null),t(N,ur),t(N,Ae),t(Ae,fr),t(Ae,nt),t(nt,gr),t(Ae,_r),t(N,kr),t(N,Ct),t(Ct,vr),t(N,Pr),f(Se,N,null),h(e,ls,c),h(e,G,c),t(G,te),t(te,At),f(Ie,At,null),t(G,wr),t(G,St),t(St,br),h(e,ps,c),h(e,$,c),f(Fe,$,null),t($,Nr),t($,Ge),t(Ge,$r),t(Ge,at),t(at,Lr),t(Ge,Mr),t($,xr),t($,It),t(It,Er),t($,yr),f(Be,$,null),h(e,ds,c),h(e,B,c),t(B,se),t(se,Ft),f(Re,Ft,null),t(B,Xr),t(B,Gt),t(Gt,zr),h(e,cs,c),h(e,L,c),f(Oe,L,null),t(L,jr),t(L,He),t(He,qr),t(He,it),t(it,Tr),t(He,Dr),t(L,Cr),t(L,Bt),t(Bt,Ar),t(L,Sr),f(Ve,L,null),hs=!0},p:Yn,i(e){hs||(g(pe.$$.fragment,e),g(ce.$$.fragment,e),g(ue.$$.fragment,e),g(fe.$$.fragment,e),g(_e.$$.fragment,e),g(ke.$$.fragment,e),g(we.$$.fragment,e),g(Ne.$$.fragment,e),g($e.$$.fragment,e),g(Le.$$.fragment,e),g(xe.$$.fragment,e),g(Ee.$$.fragment,e),g(Xe.$$.fragment,e),g(ze.$$.fragment,e),g(je.$$.fragment,e),g(Te.$$.fragment,e),g(De.$$.fragment,e),g(Ce.$$.fragment,e),g(Se.$$.fragment,e),g(Ie.$$.fragment,e),g(Fe.$$.fragment,e),g(Be.$$.fragment,e),g(Re.$$.fragment,e),g(Oe.$$.fragment,e),g(Ve.$$.fragment,e),hs=!0)},o(e){_(pe.$$.fragment,e),_(ce.$$.fragment,e),_(ue.$$.fragment,e),_(fe.$$.fragment,e),_(_e.$$.fragment,e),_(ke.$$.fragment,e),_(we.$$.fragment,e),_(Ne.$$.fragment,e),_($e.$$.fragment,e),_(Le.$$.fragment,e),_(xe.$$.fragment,e),_(Ee.$$.fragment,e),_(Xe.$$.fragment,e),_(ze.$$.fragment,e),_(je.$$.fragment,e),_(Te.$$.fragment,e),_(De.$$.fragment,e),_(Ce.$$.fragment,e),_(Se.$$.fragment,e),_(Ie.$$.fragment,e),_(Fe.$$.fragment,e),_(Be.$$.fragment,e),_(Re.$$.fragment,e),_(Oe.$$.fragment,e),_(Ve.$$.fragment,e),hs=!1},d(e){s(X),e&&s(We),e&&s(M),k(pe),e&&s(Ht),e&&s(j),e&&s(Vt),e&&s(q),k(ce),e&&s(Wt),e&&s(H),e&&s(Ut),e&&s(Ue),e&&s(Jt),e&&s(Je),e&&s(Yt),e&&s(Ye),e&&s(Qt),e&&s(V),e&&s(Zt),e&&s(T),k(ue),e&&s(Kt),e&&s(D),k(fe),e&&s(es),e&&s(C),k(_e),e&&s(ts),e&&s(v),k(ke),k(we),k(Ne),k($e),k(Le),e&&s(ss),e&&s(S),k(xe),e&&s(os),e&&s(w),k(Ee),k(Xe),e&&s(rs),e&&s(I),k(ze),e&&s(ns),e&&s(b),k(je),k(Te),e&&s(as),e&&s(F),k(De),e&&s(is),e&&s(N),k(Ce),k(Se),e&&s(ls),e&&s(G),k(Ie),e&&s(ps),e&&s($),k(Fe),k(Be),e&&s(ds),e&&s(B),k(Re),e&&s(cs),e&&s(L),k(Oe),k(Ve)}}}const Zn={local:"xlmprophetnet",sections:[{local:"overview",title:"Overview"},{local:"transformers.XLMProphetNetConfig",title:"XLMProphetNetConfig"},{local:"transformers.XLMProphetNetTokenizer",title:"XLMProphetNetTokenizer"},{local:"transformers.XLMProphetNetModel",title:"XLMProphetNetModel"},{local:"transformers.XLMProphetNetEncoder",title:"XLMProphetNetEncoder"},{local:"transformers.XLMProphetNetDecoder",title:"XLMProphetNetDecoder"},{local:"transformers.XLMProphetNetForConditionalGeneration",title:"XLMProphetNetForConditionalGeneration"},{local:"transformers.XLMProphetNetForCausalLM",title:"XLMProphetNetForCausalLM"}],title:"XLM-ProphetNet"};function Kn(As,X,We){let{fw:M}=X;return As.$$set=x=>{"fw"in x&&We(0,M=x.fw)},[M]}class na extends Vn{constructor(X){super();Wn(this,X,Kn,Qn,Un,{fw:0})}}export{na as default,Zn as metadata};
9,958
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/megatron_gpt2.mdx-7bc0724f.js
import{S as Pt,i as bt,s as $t,e as r,k as c,w as D,t as n,L as At,c as i,d as t,m as f,a as m,x as H,h as l,b as p,J as a,g as s,y as V,K as Et,q as j,o as J,B as K}from"../../chunks/vendor-b1433968.js";import{I as Tt}from"../../chunks/IconCopyLink-7029626d.js";import{C as be}from"../../chunks/CodeBlock-a320dbd7.js";import"../../chunks/CopyButton-f65cb278.js";function kt($e){let u,R,h,d,Q,b,Ae,X,Ee,oe,w,y,Y,$,ke,Z,Oe,re,T,Me,A,Ge,Se,ne,I,Ce,ie,x,ee,Le,le,z,Ne,se,P,Re,E,Ie,xe,me,g,ze,k,Be,Ue,O,Fe,We,pe,B,qe,ce,M,fe,G,he,U,De,de,v,He,te,Ve,je,ae,Je,Ke,ue,S,ge,C,ve,_,Qe,L,Xe,Ye,N,Ze,et,_e;return b=new Tt({}),$=new Tt({}),M=new be({props:{code:",",highlighted:""}}),G=new be({props:{code:`wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_gpt2_345m_v0_0.zip,`,highlighted:`wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_gpt2_345m_v0_0.zip`}}),S=new be({props:{code:",",highlighted:""}}),C=new be({props:{code:"python3 $PATH_TO_TRANSFORMERS/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_gpt2_345m_v0_0.zip,",highlighted:'python3 <span class="hljs-variable">$PATH_TO_TRANSFORMERS</span>/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_gpt2_345m_v0_0.zip'}}),{c(){u=r("meta"),R=c(),h=r("h1"),d=r("a"),Q=r("span"),D(b.$$.fragment),Ae=c(),X=r("span"),Ee=n("MegatronGPT2"),oe=c(),w=r("h2"),y=r("a"),Y=r("span"),D($.$$.fragment),ke=c(),Z=r("span"),Oe=n("Overview"),re=c(),T=r("p"),Me=n("The MegatronGPT2 model was proposed in "),A=r("a"),Ge=n(`Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism`),Se=n(` by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.`),ne=c(),I=r("p"),Ce=n("The abstract from the paper is the following:"),ie=c(),x=r("p"),ee=r("em"),Le=n(`Recent work in language modeling demonstrates that training large transformer models advances the state of the art in Natural Language Processing applications. However, very large models can be quite difficult to train due to memory constraints. In this work, we present our techniques for training very large transformer models and implement a simple, efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain 15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9 billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).`),le=c(),z=r("p"),Ne=n("Tips:"),se=c(),P=r("p"),Re=n("We have provided pretrained "),E=r("a"),Ie=n("GPT2-345M"),xe=n(` checkpoints for use to evaluate or finetuning downstream tasks.`),me=c(),g=r("p"),ze=n("To access these checkpoints, first "),k=r("a"),Be=n("sign up"),Ue=n(` for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the `),O=r("a"),Fe=n("NGC documentation"),We=n("."),pe=c(),B=r("p"),qe=n("Alternatively, you can directly download the checkpoints using:"),ce=c(),D(M.$$.fragment),fe=c(),D(G.$$.fragment),he=c(),U=r("p"),De=n(`Once you have obtained the checkpoint from NVIDIA GPU Cloud (NGC), you have to convert it to a format that will easily be loaded by Hugging Face Transformers GPT2 implementation.`),de=c(),v=r("p"),He=n("The following command allows you to do the conversion. We assume that the folder "),te=r("code"),Ve=n("models/megatron_gpt2"),je=n(` contains `),ae=r("code"),Je=n("megatron_gpt2_345m_v0_0.zip"),Ke=n(" and that the command is run from that folder:"),ue=c(),D(S.$$.fragment),ge=c(),D(C.$$.fragment),ve=c(),_=r("p"),Qe=n("This model was contributed by "),L=r("a"),Xe=n("jdemouth"),Ye=n(". The original code can be found "),N=r("a"),Ze=n("here"),et=n(`. That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it contains a hybrid model parallel approach using \u201Ctensor parallel\u201D and \u201Cpipeline parallel\u201D techniques.`),this.h()},l(e){const o=At('[data-svelte="svelte-1phssyn"]',document.head);u=i(o,"META",{name:!0,content:!0}),o.forEach(t),R=f(e),h=i(e,"H1",{class:!0});var we=m(h);d=i(we,"A",{id:!0,class:!0,href:!0});var tt=m(d);Q=i(tt,"SPAN",{});var at=m(Q);H(b.$$.fragment,at),at.forEach(t),tt.forEach(t),Ae=f(we),X=i(we,"SPAN",{});var ot=m(X);Ee=l(ot,"MegatronGPT2"),ot.forEach(t),we.forEach(t),oe=f(e),w=i(e,"H2",{class:!0});var ye=m(w);y=i(ye,"A",{id:!0,class:!0,href:!0});var rt=m(y);Y=i(rt,"SPAN",{});var nt=m(Y);H($.$$.fragment,nt),nt.forEach(t),rt.forEach(t),ke=f(ye),Z=i(ye,"SPAN",{});var it=m(Z);Oe=l(it,"Overview"),it.forEach(t),ye.forEach(t),re=f(e),T=i(e,"P",{});var Te=m(T);Me=l(Te,"The MegatronGPT2 model was proposed in "),A=i(Te,"A",{href:!0,rel:!0});var lt=m(A);Ge=l(lt,`Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism`),lt.forEach(t),Se=l(Te,` by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.`),Te.forEach(t),ne=f(e),I=i(e,"P",{});var st=m(I);Ce=l(st,"The abstract from the paper is the following:"),st.forEach(t),ie=f(e),x=i(e,"P",{});var mt=m(x);ee=i(mt,"EM",{});var pt=m(ee);Le=l(pt,`Recent work in language modeling demonstrates that training large transformer models advances the state of the art in Natural Language Processing applications. However, very large models can be quite difficult to train due to memory constraints. In this work, we present our techniques for training very large transformer models and implement a simple, efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain 15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9 billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).`),pt.forEach(t),mt.forEach(t),le=f(e),z=i(e,"P",{});var ct=m(z);Ne=l(ct,"Tips:"),ct.forEach(t),se=f(e),P=i(e,"P",{});var Pe=m(P);Re=l(Pe,"We have provided pretrained "),E=i(Pe,"A",{href:!0,rel:!0});var ft=m(E);Ie=l(ft,"GPT2-345M"),ft.forEach(t),xe=l(Pe,` checkpoints for use to evaluate or finetuning downstream tasks.`),Pe.forEach(t),me=f(e),g=i(e,"P",{});var F=m(g);ze=l(F,"To access these checkpoints, first "),k=i(F,"A",{href:!0,rel:!0});var ht=m(k);Be=l(ht,"sign up"),ht.forEach(t),Ue=l(F,` for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the `),O=i(F,"A",{href:!0,rel:!0});var dt=m(O);Fe=l(dt,"NGC documentation"),dt.forEach(t),We=l(F,"."),F.forEach(t),pe=f(e),B=i(e,"P",{});var ut=m(B);qe=l(ut,"Alternatively, you can directly download the checkpoints using:"),ut.forEach(t),ce=f(e),H(M.$$.fragment,e),fe=f(e),H(G.$$.fragment,e),he=f(e),U=i(e,"P",{});var gt=m(U);De=l(gt,`Once you have obtained the checkpoint from NVIDIA GPU Cloud (NGC), you have to convert it to a format that will easily be loaded by Hugging Face Transformers GPT2 implementation.`),gt.forEach(t),de=f(e),v=i(e,"P",{});var W=m(v);He=l(W,"The following command allows you to do the conversion. We assume that the folder "),te=i(W,"CODE",{});var vt=m(te);Ve=l(vt,"models/megatron_gpt2"),vt.forEach(t),je=l(W,` contains `),ae=i(W,"CODE",{});var _t=m(ae);Je=l(_t,"megatron_gpt2_345m_v0_0.zip"),_t.forEach(t),Ke=l(W," and that the command is run from that folder:"),W.forEach(t),ue=f(e),H(S.$$.fragment,e),ge=f(e),H(C.$$.fragment,e),ve=f(e),_=i(e,"P",{});var q=m(_);Qe=l(q,"This model was contributed by "),L=i(q,"A",{href:!0,rel:!0});var wt=m(L);Xe=l(wt,"jdemouth"),wt.forEach(t),Ye=l(q,". The original code can be found "),N=i(q,"A",{href:!0,rel:!0});var yt=m(N);Ze=l(yt,"here"),yt.forEach(t),et=l(q,`. That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it contains a hybrid model parallel approach using \u201Ctensor parallel\u201D and \u201Cpipeline parallel\u201D techniques.`),q.forEach(t),this.h()},h(){p(u,"name","hf:doc:metadata"),p(u,"content",JSON.stringify(Ot)),p(d,"id","megatrongpt2"),p(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(d,"href","#megatrongpt2"),p(h,"class","relative group"),p(y,"id","overview"),p(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(y,"href","#overview"),p(w,"class","relative group"),p(A,"href","https://arxiv.org/abs/1909.08053"),p(A,"rel","nofollow"),p(E,"href","https://ngc.nvidia.com/catalog/models/nvidia:megatron_lm_345m"),p(E,"rel","nofollow"),p(k,"href","https://ngc.nvidia.com/signup"),p(k,"rel","nofollow"),p(O,"href","https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1"),p(O,"rel","nofollow"),p(L,"href","https://huggingface.co/jdemouth"),p(L,"rel","nofollow"),p(N,"href","https://github.com/NVIDIA/Megatron-LM"),p(N,"rel","nofollow")},m(e,o){a(document.head,u),s(e,R,o),s(e,h,o),a(h,d),a(d,Q),V(b,Q,null),a(h,Ae),a(h,X),a(X,Ee),s(e,oe,o),s(e,w,o),a(w,y),a(y,Y),V($,Y,null),a(w,ke),a(w,Z),a(Z,Oe),s(e,re,o),s(e,T,o),a(T,Me),a(T,A),a(A,Ge),a(T,Se),s(e,ne,o),s(e,I,o),a(I,Ce),s(e,ie,o),s(e,x,o),a(x,ee),a(ee,Le),s(e,le,o),s(e,z,o),a(z,Ne),s(e,se,o),s(e,P,o),a(P,Re),a(P,E),a(E,Ie),a(P,xe),s(e,me,o),s(e,g,o),a(g,ze),a(g,k),a(k,Be),a(g,Ue),a(g,O),a(O,Fe),a(g,We),s(e,pe,o),s(e,B,o),a(B,qe),s(e,ce,o),V(M,e,o),s(e,fe,o),V(G,e,o),s(e,he,o),s(e,U,o),a(U,De),s(e,de,o),s(e,v,o),a(v,He),a(v,te),a(te,Ve),a(v,je),a(v,ae),a(ae,Je),a(v,Ke),s(e,ue,o),V(S,e,o),s(e,ge,o),V(C,e,o),s(e,ve,o),s(e,_,o),a(_,Qe),a(_,L),a(L,Xe),a(_,Ye),a(_,N),a(N,Ze),a(_,et),_e=!0},p:Et,i(e){_e||(j(b.$$.fragment,e),j($.$$.fragment,e),j(M.$$.fragment,e),j(G.$$.fragment,e),j(S.$$.fragment,e),j(C.$$.fragment,e),_e=!0)},o(e){J(b.$$.fragment,e),J($.$$.fragment,e),J(M.$$.fragment,e),J(G.$$.fragment,e),J(S.$$.fragment,e),J(C.$$.fragment,e),_e=!1},d(e){t(u),e&&t(R),e&&t(h),K(b),e&&t(oe),e&&t(w),K($),e&&t(re),e&&t(T),e&&t(ne),e&&t(I),e&&t(ie),e&&t(x),e&&t(le),e&&t(z),e&&t(se),e&&t(P),e&&t(me),e&&t(g),e&&t(pe),e&&t(B),e&&t(ce),K(M,e),e&&t(fe),K(G,e),e&&t(he),e&&t(U),e&&t(de),e&&t(v),e&&t(ue),K(S,e),e&&t(ge),K(C,e),e&&t(ve),e&&t(_)}}}const Ot={local:"megatrongpt2",sections:[{local:"overview",title:"Overview"}],title:"MegatronGPT2"};function Mt($e,u,R){let{fw:h}=u;return $e.$$set=d=>{"fw"in d&&R(0,h=d.fw)},[h]}class Nt extends Pt{constructor(u){super();bt(this,u,Mt,kt,$t,{fw:0})}}export{Nt as default,Ot as metadata};
9,959
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/tapas.mdx-473b1d82.js
import{S as S0,i as D0,s as N0,e as o,k as l,w as v,t as a,L as L0,c as r,d as s,m as d,a as i,x as b,h as n,b as p,M as Q0,J as e,g as c,y as w,q as y,o as k,B as j}from"../../chunks/vendor-b1433968.js";import{T as Fs}from"../../chunks/Tip-c3840994.js";import{D as pe}from"../../chunks/Docstring-ff504c58.js";import{C as xt}from"../../chunks/CodeBlock-a320dbd7.js";import{C as Ur}from"../../chunks/CodeBlockFw-e3b92d56.js";import{I as ss}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function O0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function I0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function W0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function U0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function H0(U){let m,x,f,g,q,T,_,F,_e,K,A,Y,O,Z,Te,I,ve,ce,W,N,ee,se,z,$,oe,H,he,V,L,te,be,C,we,Q,ae,ye,B,ke,re,P,je,G,ue;return{c(){m=o("p"),x=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=o("ul"),q=o("li"),T=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=o("li"),_e=a("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),A=o("p"),Y=a("This second option is useful when using "),O=o("code"),Z=a("tf.keras.Model.fit"),Te=a(` method which currently requires having all the tensors in the first argument of the model call function: `),I=o("code"),ve=a("model(inputs)"),ce=a("."),W=l(),N=o("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),se=l(),z=o("ul"),$=o("li"),oe=a("a single Tensor with "),H=o("code"),he=a("input_ids"),V=a(" only and nothing else: "),L=o("code"),te=a("model(inputs_ids)"),be=l(),C=o("li"),we=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=o("code"),ae=a("model([input_ids, attention_mask])"),ye=a(" or "),B=o("code"),ke=a("model([input_ids, attention_mask, token_type_ids])"),re=l(),P=o("li"),je=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=o("code"),ue=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){m=r(u,"P",{});var E=i(m);x=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(s),f=d(u),g=r(u,"UL",{});var J=i(g);q=r(J,"LI",{});var xe=i(q);T=n(xe,"having all inputs as keyword arguments (like PyTorch models), or"),xe.forEach(s),_=d(J),F=r(J,"LI",{});var Ge=i(F);_e=n(Ge,"having all inputs as a list, tuple or dict in the first positional arguments."),Ge.forEach(s),J.forEach(s),K=d(u),A=r(u,"P",{});var S=i(A);Y=n(S,"This second option is useful when using "),O=r(S,"CODE",{});var Fe=i(O);Z=n(Fe,"tf.keras.Model.fit"),Fe.forEach(s),Te=n(S,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(S,"CODE",{});var $e=i(I);ve=n($e,"model(inputs)"),$e.forEach(s),ce=n(S,"."),S.forEach(s),W=d(u),N=r(u,"P",{});var Re=i(N);ee=n(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(s),se=d(u),z=r(u,"UL",{});var D=i(z);$=r(D,"LI",{});var M=i($);oe=n(M,"a single Tensor with "),H=r(M,"CODE",{});var Ve=i(H);he=n(Ve,"input_ids"),Ve.forEach(s),V=n(M," only and nothing else: "),L=r(M,"CODE",{});var Ce=i(L);te=n(Ce,"model(inputs_ids)"),Ce.forEach(s),M.forEach(s),be=d(D),C=r(D,"LI",{});var R=i(C);we=n(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ke=i(Q);ae=n(Ke,"model([input_ids, attention_mask])"),Ke.forEach(s),ye=n(R," or "),B=r(R,"CODE",{});var qe=i(B);ke=n(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(s),R.forEach(s),re=d(D),P=r(D,"LI",{});var Ee=i(P);je=n(Ee,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(Ee,"CODE",{});var Ye=i(G);ue=n(Ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ye.forEach(s),Ee.forEach(s),D.forEach(s)},m(u,E){c(u,m,E),e(m,x),c(u,f,E),c(u,g,E),e(g,q),e(q,T),e(g,_),e(g,F),e(F,_e),c(u,K,E),c(u,A,E),e(A,Y),e(A,O),e(O,Z),e(A,Te),e(A,I),e(I,ve),e(A,ce),c(u,W,E),c(u,N,E),e(N,ee),c(u,se,E),c(u,z,E),e(z,$),e($,oe),e($,H),e(H,he),e($,V),e($,L),e(L,te),e(z,be),e(z,C),e(C,we),e(C,Q),e(Q,ae),e(C,ye),e(C,B),e(B,ke),e(z,re),e(z,P),e(P,je),e(P,G),e(G,ue)},d(u){u&&s(m),u&&s(f),u&&s(g),u&&s(K),u&&s(A),u&&s(W),u&&s(N),u&&s(se),u&&s(z)}}}function B0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function G0(U){let m,x,f,g,q,T,_,F,_e,K,A,Y,O,Z,Te,I,ve,ce,W,N,ee,se,z,$,oe,H,he,V,L,te,be,C,we,Q,ae,ye,B,ke,re,P,je,G,ue;return{c(){m=o("p"),x=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=o("ul"),q=o("li"),T=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=o("li"),_e=a("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),A=o("p"),Y=a("This second option is useful when using "),O=o("code"),Z=a("tf.keras.Model.fit"),Te=a(` method which currently requires having all the tensors in the first argument of the model call function: `),I=o("code"),ve=a("model(inputs)"),ce=a("."),W=l(),N=o("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),se=l(),z=o("ul"),$=o("li"),oe=a("a single Tensor with "),H=o("code"),he=a("input_ids"),V=a(" only and nothing else: "),L=o("code"),te=a("model(inputs_ids)"),be=l(),C=o("li"),we=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=o("code"),ae=a("model([input_ids, attention_mask])"),ye=a(" or "),B=o("code"),ke=a("model([input_ids, attention_mask, token_type_ids])"),re=l(),P=o("li"),je=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=o("code"),ue=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){m=r(u,"P",{});var E=i(m);x=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(s),f=d(u),g=r(u,"UL",{});var J=i(g);q=r(J,"LI",{});var xe=i(q);T=n(xe,"having all inputs as keyword arguments (like PyTorch models), or"),xe.forEach(s),_=d(J),F=r(J,"LI",{});var Ge=i(F);_e=n(Ge,"having all inputs as a list, tuple or dict in the first positional arguments."),Ge.forEach(s),J.forEach(s),K=d(u),A=r(u,"P",{});var S=i(A);Y=n(S,"This second option is useful when using "),O=r(S,"CODE",{});var Fe=i(O);Z=n(Fe,"tf.keras.Model.fit"),Fe.forEach(s),Te=n(S,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(S,"CODE",{});var $e=i(I);ve=n($e,"model(inputs)"),$e.forEach(s),ce=n(S,"."),S.forEach(s),W=d(u),N=r(u,"P",{});var Re=i(N);ee=n(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(s),se=d(u),z=r(u,"UL",{});var D=i(z);$=r(D,"LI",{});var M=i($);oe=n(M,"a single Tensor with "),H=r(M,"CODE",{});var Ve=i(H);he=n(Ve,"input_ids"),Ve.forEach(s),V=n(M," only and nothing else: "),L=r(M,"CODE",{});var Ce=i(L);te=n(Ce,"model(inputs_ids)"),Ce.forEach(s),M.forEach(s),be=d(D),C=r(D,"LI",{});var R=i(C);we=n(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ke=i(Q);ae=n(Ke,"model([input_ids, attention_mask])"),Ke.forEach(s),ye=n(R," or "),B=r(R,"CODE",{});var qe=i(B);ke=n(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(s),R.forEach(s),re=d(D),P=r(D,"LI",{});var Ee=i(P);je=n(Ee,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(Ee,"CODE",{});var Ye=i(G);ue=n(Ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ye.forEach(s),Ee.forEach(s),D.forEach(s)},m(u,E){c(u,m,E),e(m,x),c(u,f,E),c(u,g,E),e(g,q),e(q,T),e(g,_),e(g,F),e(F,_e),c(u,K,E),c(u,A,E),e(A,Y),e(A,O),e(O,Z),e(A,Te),e(A,I),e(I,ve),e(A,ce),c(u,W,E),c(u,N,E),e(N,ee),c(u,se,E),c(u,z,E),e(z,$),e($,oe),e($,H),e(H,he),e($,V),e($,L),e(L,te),e(z,be),e(z,C),e(C,we),e(C,Q),e(Q,ae),e(C,ye),e(C,B),e(B,ke),e(z,re),e(z,P),e(P,je),e(P,G),e(G,ue)},d(u){u&&s(m),u&&s(f),u&&s(g),u&&s(K),u&&s(A),u&&s(W),u&&s(N),u&&s(se),u&&s(z)}}}function R0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function V0(U){let m,x,f,g,q,T,_,F,_e,K,A,Y,O,Z,Te,I,ve,ce,W,N,ee,se,z,$,oe,H,he,V,L,te,be,C,we,Q,ae,ye,B,ke,re,P,je,G,ue;return{c(){m=o("p"),x=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=o("ul"),q=o("li"),T=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=o("li"),_e=a("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),A=o("p"),Y=a("This second option is useful when using "),O=o("code"),Z=a("tf.keras.Model.fit"),Te=a(` method which currently requires having all the tensors in the first argument of the model call function: `),I=o("code"),ve=a("model(inputs)"),ce=a("."),W=l(),N=o("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),se=l(),z=o("ul"),$=o("li"),oe=a("a single Tensor with "),H=o("code"),he=a("input_ids"),V=a(" only and nothing else: "),L=o("code"),te=a("model(inputs_ids)"),be=l(),C=o("li"),we=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=o("code"),ae=a("model([input_ids, attention_mask])"),ye=a(" or "),B=o("code"),ke=a("model([input_ids, attention_mask, token_type_ids])"),re=l(),P=o("li"),je=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=o("code"),ue=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){m=r(u,"P",{});var E=i(m);x=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(s),f=d(u),g=r(u,"UL",{});var J=i(g);q=r(J,"LI",{});var xe=i(q);T=n(xe,"having all inputs as keyword arguments (like PyTorch models), or"),xe.forEach(s),_=d(J),F=r(J,"LI",{});var Ge=i(F);_e=n(Ge,"having all inputs as a list, tuple or dict in the first positional arguments."),Ge.forEach(s),J.forEach(s),K=d(u),A=r(u,"P",{});var S=i(A);Y=n(S,"This second option is useful when using "),O=r(S,"CODE",{});var Fe=i(O);Z=n(Fe,"tf.keras.Model.fit"),Fe.forEach(s),Te=n(S,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(S,"CODE",{});var $e=i(I);ve=n($e,"model(inputs)"),$e.forEach(s),ce=n(S,"."),S.forEach(s),W=d(u),N=r(u,"P",{});var Re=i(N);ee=n(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(s),se=d(u),z=r(u,"UL",{});var D=i(z);$=r(D,"LI",{});var M=i($);oe=n(M,"a single Tensor with "),H=r(M,"CODE",{});var Ve=i(H);he=n(Ve,"input_ids"),Ve.forEach(s),V=n(M," only and nothing else: "),L=r(M,"CODE",{});var Ce=i(L);te=n(Ce,"model(inputs_ids)"),Ce.forEach(s),M.forEach(s),be=d(D),C=r(D,"LI",{});var R=i(C);we=n(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ke=i(Q);ae=n(Ke,"model([input_ids, attention_mask])"),Ke.forEach(s),ye=n(R," or "),B=r(R,"CODE",{});var qe=i(B);ke=n(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(s),R.forEach(s),re=d(D),P=r(D,"LI",{});var Ee=i(P);je=n(Ee,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(Ee,"CODE",{});var Ye=i(G);ue=n(Ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ye.forEach(s),Ee.forEach(s),D.forEach(s)},m(u,E){c(u,m,E),e(m,x),c(u,f,E),c(u,g,E),e(g,q),e(q,T),e(g,_),e(g,F),e(F,_e),c(u,K,E),c(u,A,E),e(A,Y),e(A,O),e(O,Z),e(A,Te),e(A,I),e(I,ve),e(A,ce),c(u,W,E),c(u,N,E),e(N,ee),c(u,se,E),c(u,z,E),e(z,$),e($,oe),e($,H),e(H,he),e($,V),e($,L),e(L,te),e(z,be),e(z,C),e(C,we),e(C,Q),e(Q,ae),e(C,ye),e(C,B),e(B,ke),e(z,re),e(z,P),e(P,je),e(P,G),e(G,ue)},d(u){u&&s(m),u&&s(f),u&&s(g),u&&s(K),u&&s(A),u&&s(W),u&&s(N),u&&s(se),u&&s(z)}}}function K0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function Y0(U){let m,x,f,g,q,T,_,F,_e,K,A,Y,O,Z,Te,I,ve,ce,W,N,ee,se,z,$,oe,H,he,V,L,te,be,C,we,Q,ae,ye,B,ke,re,P,je,G,ue;return{c(){m=o("p"),x=a("TF 2.0 models accepts two formats as inputs:"),f=l(),g=o("ul"),q=o("li"),T=a("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),F=o("li"),_e=a("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),A=o("p"),Y=a("This second option is useful when using "),O=o("code"),Z=a("tf.keras.Model.fit"),Te=a(` method which currently requires having all the tensors in the first argument of the model call function: `),I=o("code"),ve=a("model(inputs)"),ce=a("."),W=l(),N=o("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),se=l(),z=o("ul"),$=o("li"),oe=a("a single Tensor with "),H=o("code"),he=a("input_ids"),V=a(" only and nothing else: "),L=o("code"),te=a("model(inputs_ids)"),be=l(),C=o("li"),we=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=o("code"),ae=a("model([input_ids, attention_mask])"),ye=a(" or "),B=o("code"),ke=a("model([input_ids, attention_mask, token_type_ids])"),re=l(),P=o("li"),je=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=o("code"),ue=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){m=r(u,"P",{});var E=i(m);x=n(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(s),f=d(u),g=r(u,"UL",{});var J=i(g);q=r(J,"LI",{});var xe=i(q);T=n(xe,"having all inputs as keyword arguments (like PyTorch models), or"),xe.forEach(s),_=d(J),F=r(J,"LI",{});var Ge=i(F);_e=n(Ge,"having all inputs as a list, tuple or dict in the first positional arguments."),Ge.forEach(s),J.forEach(s),K=d(u),A=r(u,"P",{});var S=i(A);Y=n(S,"This second option is useful when using "),O=r(S,"CODE",{});var Fe=i(O);Z=n(Fe,"tf.keras.Model.fit"),Fe.forEach(s),Te=n(S,` method which currently requires having all the tensors in the first argument of the model call function: `),I=r(S,"CODE",{});var $e=i(I);ve=n($e,"model(inputs)"),$e.forEach(s),ce=n(S,"."),S.forEach(s),W=d(u),N=r(u,"P",{});var Re=i(N);ee=n(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(s),se=d(u),z=r(u,"UL",{});var D=i(z);$=r(D,"LI",{});var M=i($);oe=n(M,"a single Tensor with "),H=r(M,"CODE",{});var Ve=i(H);he=n(Ve,"input_ids"),Ve.forEach(s),V=n(M," only and nothing else: "),L=r(M,"CODE",{});var Ce=i(L);te=n(Ce,"model(inputs_ids)"),Ce.forEach(s),M.forEach(s),be=d(D),C=r(D,"LI",{});var R=i(C);we=n(R,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Q=r(R,"CODE",{});var Ke=i(Q);ae=n(Ke,"model([input_ids, attention_mask])"),Ke.forEach(s),ye=n(R," or "),B=r(R,"CODE",{});var qe=i(B);ke=n(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(s),R.forEach(s),re=d(D),P=r(D,"LI",{});var Ee=i(P);je=n(Ee,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(Ee,"CODE",{});var Ye=i(G);ue=n(Ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ye.forEach(s),Ee.forEach(s),D.forEach(s)},m(u,E){c(u,m,E),e(m,x),c(u,f,E),c(u,g,E),e(g,q),e(q,T),e(g,_),e(g,F),e(F,_e),c(u,K,E),c(u,A,E),e(A,Y),e(A,O),e(O,Z),e(A,Te),e(A,I),e(I,ve),e(A,ce),c(u,W,E),c(u,N,E),e(N,ee),c(u,se,E),c(u,z,E),e(z,$),e($,oe),e($,H),e(H,he),e($,V),e($,L),e(L,te),e(z,be),e(z,C),e(C,we),e(C,Q),e(Q,ae),e(C,ye),e(C,B),e(B,ke),e(z,re),e(z,P),e(P,je),e(P,G),e(G,ue)},d(u){u&&s(m),u&&s(f),u&&s(g),u&&s(K),u&&s(A),u&&s(W),u&&s(N),u&&s(se),u&&s(z)}}}function J0(U){let m,x,f,g,q;return{c(){m=o("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=a("Module"),q=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){m=r(T,"P",{});var _=i(m);x=n(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var F=i(f);g=n(F,"Module"),F.forEach(s),q=n(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(s)},m(T,_){c(T,m,_),e(m,x),e(m,f),e(f,g),e(m,q)},d(T){T&&s(m)}}}function X0(U){let m,x,f,g,q,T,_,F,_e,K,A,Y,O,Z,Te,I,ve,ce,W,N,ee,se,z,$,oe,H,he,V,L,te,be,C,we,Q,ae,ye,B,ke,re,P,je,G,ue,u,E,J,xe,Ge,S,Fe,$e,Re,D,M,Ve,Ce,R,Ke,qe,Ee,Ye,fa,ih,lh,ap,Ft,Iy,np,_o,dh,op,_s,ph,ga,ch,hh,_a,uh,mh,Ta,fh,gh,rp,To,_h,ip,Ts,Me,Th,Hr,vh,bh,vo,wh,yh,Br,kh,jh,va,qh,Eh,Gr,xh,Fh,Rr,Ah,zh,$h,Je,Ch,Vr,Mh,Ph,Kr,Sh,Dh,Yr,Nh,Lh,Jr,Qh,Oh,ba,Ih,Wh,Uh,Vs,Hh,Xr,Bh,Gh,Zr,Rh,Vh,Kh,ei,Yh,lp,Ks,At,si,wa,Jh,ti,Xh,dp,zt,Zh,bo,eu,su,pp,wo,ai,tu,cp,$t,au,yo,nu,ou,hp,zs,ni,ru,iu,ya,lu,oi,du,pu,cu,ka,hu,ri,uu,mu,up,ko,fu,mp,Ct,ii,Ys,li,di,gu,_u,pi,ci,Tu,vu,hi,ui,bu,wu,Js,Xs,mi,yu,ku,fi,ju,qu,gi,Eu,xu,Zs,_i,Fu,Au,Ti,zu,$u,vi,Cu,Mu,et,bi,Pu,Su,wi,Du,Nu,yi,Lu,fp,$s,Qu,ja,Ou,Iu,qa,Wu,Uu,gp,Ea,_p,Cs,Hu,jo,Bu,Gu,qo,Ru,Vu,Tp,xa,vp,Mt,Ku,Fa,Yu,Ju,bp,Pt,Xu,Aa,Zu,em,wp,Eo,ki,sm,yp,St,tm,za,am,nm,kp,ie,xo,ji,om,rm,im,Fo,qi,lm,dm,pm,Ao,Ei,cm,hm,um,zo,xi,mm,fm,gm,$o,Fi,_m,Tm,vm,Co,Ai,bm,wm,ym,Mo,zi,km,jm,qm,Po,$i,Em,xm,Fm,So,Ci,Am,zm,jp,We,$m,$a,Cm,Mm,Ca,Pm,Sm,Mi,Dm,Nm,Pi,Lm,Qm,Si,Om,Im,qp,Do,Di,Wm,Ep,Ae,Um,No,Hm,Bm,Ni,Gm,Rm,Li,Vm,Km,Qi,Ym,Jm,Lo,Xm,Zm,Qo,ef,sf,xp,Dt,Oi,Ma,Ii,Wi,tf,af,Ui,Hi,nf,of,st,Pa,Bi,rf,lf,As,Gi,df,pf,Ri,cf,hf,Vi,uf,mf,Ki,ff,gf,Sa,Yi,_f,Tf,Pe,Ji,vf,bf,Xi,wf,yf,Zi,kf,jf,el,qf,Ef,sl,xf,Ff,tl,Af,zf,al,$f,Cf,Da,nl,Mf,Pf,ms,ol,Sf,Df,rl,Nf,Lf,il,Qf,Of,ll,If,Wf,dl,Uf,Fp,le,Oo,Hf,Bf,pl,Gf,Rf,cl,Vf,Kf,hl,Yf,Jf,ul,Xf,Zf,ml,eg,sg,fl,tg,ag,gl,ng,og,Ap,Na,zp,vs,rg,Io,ig,lg,_l,dg,pg,Tl,cg,hg,$p,La,Cp,X,ug,vl,mg,fg,bl,gg,_g,wl,Tg,vg,yl,bg,wg,kl,yg,kg,jl,jg,qg,Wo,Eg,xg,Qa,Fg,Ag,Oa,zg,$g,Mp,Uo,ql,Cg,Pp,Ms,Mg,Ho,Pg,Sg,Bo,Dg,Ng,Sp,Ia,Dp,tt,Nt,El,Wa,Lg,xl,Qg,Np,me,Og,Go,Ig,Wg,Ro,Ug,Hg,Fl,Bg,Gg,Al,Rg,Vg,zl,Kg,Yg,Vo,Jg,Xg,$l,Zg,e_,Lp,Lt,s_,Cl,t_,a_,Qp,Ua,Op,Ue,n_,Ml,o_,r_,Pl,i_,l_,Sl,d_,p_,Ha,c_,h_,Ba,u_,m_,Ip,at,Qt,Dl,Ga,f_,Nl,g_,Wp,nt,Ra,__,Va,T_,Ko,v_,b_,Up,ot,Ot,Ll,Ka,w_,Ql,y_,Hp,Xe,Ya,k_,fs,j_,Yo,q_,E_,Ol,x_,F_,Il,A_,z_,Jo,$_,C_,M_,Ja,P_,Xa,S_,D_,N_,Wl,L_,Q_,Za,Bp,rt,It,Ul,en,O_,Hl,I_,Gp,de,sn,W_,Bl,U_,H_,ne,B_,Xo,G_,R_,Zo,V_,K_,Gl,Y_,J_,Rl,X_,Z_,Vl,eT,sT,Kl,tT,aT,Yl,nT,oT,Jl,rT,iT,Xl,lT,dT,pT,Se,Zl,cT,hT,ed,uT,mT,sd,fT,gT,td,_T,TT,ad,vT,bT,nd,wT,yT,od,kT,jT,er,sr,qT,ET,xT,Wt,tn,FT,rd,AT,zT,Ps,an,$T,nn,CT,tr,MT,PT,ST,on,DT,rn,NT,LT,QT,id,Rp,it,Ut,ld,ln,OT,dd,IT,Vp,De,dn,WT,pn,UT,ar,HT,BT,GT,cn,RT,hn,VT,KT,YT,un,JT,nr,XT,ZT,ev,mn,sv,fn,tv,av,nv,ts,gn,ov,lt,rv,or,iv,lv,pd,dv,pv,cv,Ht,hv,cd,uv,mv,_n,Kp,dt,Bt,hd,Tn,fv,ud,gv,Yp,gs,vn,_v,pt,Tv,md,vv,bv,rr,wv,yv,kv,bn,jv,wn,qv,Ev,xv,as,yn,Fv,ct,Av,ir,zv,$v,fd,Cv,Mv,Pv,Gt,Sv,gd,Dv,Nv,kn,Jp,ht,Rt,_d,jn,Lv,Td,Qv,Xp,Ze,qn,Ov,vd,Iv,Wv,En,Uv,lr,Hv,Bv,Gv,xn,Rv,Fn,Vv,Kv,Yv,ns,An,Jv,ut,Xv,dr,Zv,eb,bd,sb,tb,ab,Vt,nb,wd,ob,rb,zn,Zp,mt,Kt,yd,$n,ib,kd,lb,ec,es,Cn,db,ft,pb,jd,cb,hb,qd,ub,mb,fb,Mn,gb,pr,_b,Tb,vb,Pn,bb,Sn,wb,yb,kb,os,Dn,jb,gt,qb,cr,Eb,xb,Ed,Fb,Ab,zb,Yt,$b,xd,Cb,Mb,Nn,sc,_t,Jt,Fd,Ln,Pb,Ad,Sb,tc,Ne,Qn,Db,zd,Nb,Lb,On,Qb,hr,Ob,Ib,Wb,In,Ub,Wn,Hb,Bb,Gb,Xt,Rb,rs,Un,Vb,Tt,Kb,ur,Yb,Jb,$d,Xb,Zb,ew,Zt,sw,Cd,tw,aw,Hn,ac,vt,ea,Md,Bn,nw,Pd,ow,nc,Le,Gn,rw,Rn,iw,Sd,lw,dw,pw,Vn,cw,mr,hw,uw,mw,Kn,fw,Yn,gw,_w,Tw,sa,vw,is,Jn,bw,bt,ww,fr,yw,kw,Dd,jw,qw,Ew,ta,xw,Nd,Fw,Aw,Xn,oc,wt,aa,Ld,Zn,zw,Qd,$w,rc,Qe,eo,Cw,Od,Mw,Pw,so,Sw,gr,Dw,Nw,Lw,to,Qw,ao,Ow,Iw,Ww,na,Uw,ls,no,Hw,yt,Bw,_r,Gw,Rw,Id,Vw,Kw,Yw,oa,Jw,Wd,Xw,Zw,oo,ic,kt,ra,Ud,ro,ey,Hd,sy,lc,Oe,io,ty,jt,ay,Bd,ny,oy,Gd,ry,iy,ly,lo,dy,Tr,py,cy,hy,po,uy,co,my,fy,gy,ia,_y,ds,ho,Ty,qt,vy,vr,by,wy,Rd,yy,ky,jy,la,qy,Vd,Ey,xy,uo,dc;return T=new ss({}),Z=new ss({}),wa=new ss({}),Ea=new Ur({props:{pt:{code:`from transformers import TapasConfig, TapasForQuestionAnswering # for example, the base sized model with default SQA configuration model = TapasForQuestionAnswering.from_pretrained('google/tapas-base') # or, the base sized model with WTQ configuration config = TapasConfig.from_pretrained('google/tapas-base-finetuned-wtq') model = TapasForQuestionAnswering.from_pretrained('google/tapas-base', config=config) # or, the base sized model with WikiSQL configuration config = TapasConfig('google-base-finetuned-wikisql-supervised') model = TapasForQuestionAnswering.from_pretrained('google/tapas-base', config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasConfig, TapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># for example, the base sized model with default SQA configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or, the base sized model with WTQ configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or, the base sized model with WikiSQL configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig(<span class="hljs-string">&#x27;google-base-finetuned-wikisql-supervised&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>, config=config)`},tf:{code:`from transformers import TapasConfig, TFTapasForQuestionAnswering # for example, the base sized model with default SQA configuration model = TFTapasForQuestionAnswering.from_pretrained('google/tapas-base') # or, the base sized model with WTQ configuration config = TapasConfig.from_pretrained('google/tapas-base-finetuned-wtq') model = TFTapasForQuestionAnswering.from_pretrained('google/tapas-base', config=config) # or, the base sized model with WikiSQL configuration config = TapasConfig('google-base-finetuned-wikisql-supervised') model = TFTapasForQuestionAnswering.from_pretrained('google/tapas-base', config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasConfig, TFTapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># for example, the base sized model with default SQA configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or, the base sized model with WTQ configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or, the base sized model with WikiSQL configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig(<span class="hljs-string">&#x27;google-base-finetuned-wikisql-supervised&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>, config=config)`}}}),xa=new Ur({props:{pt:{code:`from transformers import TapasConfig, TapasForQuestionAnswering # you can initialize the classification heads any way you want (see docs of TapasConfig) config = TapasConfig(num_aggregation_labels=3, average_logits_per_cell=True) # initializing the pre-trained base sized model with our custom classification heads model = TapasForQuestionAnswering.from_pretrained('google/tapas-base', config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasConfig, TapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can initialize the classification heads any way you want (see docs of TapasConfig)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig(num_aggregation_labels=<span class="hljs-number">3</span>, average_logits_per_cell=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initializing the pre-trained base sized model with our custom classification heads</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>, config=config)`},tf:{code:`from transformers import TapasConfig, TFTapasForQuestionAnswering # you can initialize the classification heads any way you want (see docs of TapasConfig) config = TapasConfig(num_aggregation_labels=3, average_logits_per_cell=True) # initializing the pre-trained base sized model with our custom classification heads model = TFTapasForQuestionAnswering.from_pretrained('google/tapas-base', config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasConfig, TFTapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can initialize the classification heads any way you want (see docs of TapasConfig)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig(num_aggregation_labels=<span class="hljs-number">3</span>, average_logits_per_cell=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initializing the pre-trained base sized model with our custom classification heads</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>, config=config)`}}}),Na=new Ur({props:{pt:{code:`from transformers import TapasTokenizer import pandas as pd model_name = 'google/tapas-base' tokenizer = TapasTokenizer.from_pretrained(model_name) data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Number of movies': ["87", "53", "69"]} queries = ["What is the name of the first actor?", "How many movies has George Clooney played in?", "What is the total number of movies?"] answer_coordinates = [[(0, 0)], [(2, 1)], [(0, 1), (1, 1), (2, 1)]] answer_text = [["Brad Pitt"], ["69"], ["209"]] table = pd.DataFrame.from_dict(data) inputs = tokenizer(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding='max_length', return_tensors='pt') inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;google/tapas-base&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;What is the name of the first actor?&quot;</span>, <span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;What is the total number of movies?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>answer_coordinates = [[(<span class="hljs-number">0</span>, <span class="hljs-number">0</span>)], [(<span class="hljs-number">2</span>, <span class="hljs-number">1</span>)], [(<span class="hljs-number">0</span>, <span class="hljs-number">1</span>), (<span class="hljs-number">1</span>, <span class="hljs-number">1</span>), (<span class="hljs-number">2</span>, <span class="hljs-number">1</span>)]] <span class="hljs-meta">&gt;&gt;&gt; </span>answer_text = [[<span class="hljs-string">&quot;Brad Pitt&quot;</span>], [<span class="hljs-string">&quot;69&quot;</span>], [<span class="hljs-string">&quot;209&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding=<span class="hljs-string">&#x27;max_length&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs {<span class="hljs-string">&#x27;input_ids&#x27;</span>: tensor([[ ... ]]), <span class="hljs-string">&#x27;attention_mask&#x27;</span>: tensor([[...]]), <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: tensor([[[...]]]), <span class="hljs-string">&#x27;numeric_values&#x27;</span>: tensor([[ ... ]]), <span class="hljs-string">&#x27;numeric_values_scale: tensor([[ ... ]]), labels: tensor([[ ... ]])}</span>`},tf:{code:`from transformers import TapasTokenizer import pandas as pd model_name = 'google/tapas-base' tokenizer = TapasTokenizer.from_pretrained(model_name) data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Number of movies': ["87", "53", "69"]} queries = ["What is the name of the first actor?", "How many movies has George Clooney played in?", "What is the total number of movies?"] answer_coordinates = [[(0, 0)], [(2, 1)], [(0, 1), (1, 1), (2, 1)]] answer_text = [["Brad Pitt"], ["69"], ["209"]] table = pd.DataFrame.from_dict(data) inputs = tokenizer(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding='max_length', return_tensors='tf') inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;google/tapas-base&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;What is the name of the first actor?&quot;</span>, <span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;What is the total number of movies?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>answer_coordinates = [[(<span class="hljs-number">0</span>, <span class="hljs-number">0</span>)], [(<span class="hljs-number">2</span>, <span class="hljs-number">1</span>)], [(<span class="hljs-number">0</span>, <span class="hljs-number">1</span>), (<span class="hljs-number">1</span>, <span class="hljs-number">1</span>), (<span class="hljs-number">2</span>, <span class="hljs-number">1</span>)]] <span class="hljs-meta">&gt;&gt;&gt; </span>answer_text = [[<span class="hljs-string">&quot;Brad Pitt&quot;</span>], [<span class="hljs-string">&quot;69&quot;</span>], [<span class="hljs-string">&quot;209&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding=<span class="hljs-string">&#x27;max_length&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs {<span class="hljs-string">&#x27;input_ids&#x27;</span>: tensor([[ ... ]]), <span class="hljs-string">&#x27;attention_mask&#x27;</span>: tensor([[...]]), <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: tensor([[[...]]]), <span class="hljs-string">&#x27;numeric_values&#x27;</span>: tensor([[ ... ]]), <span class="hljs-string">&#x27;numeric_values_scale: tensor([[ ... ]]), labels: tensor([[ ... ]])}</span>`}}}),La=new Ur({props:{pt:{code:`import torch import pandas as pd tsv_path = "your_path_to_the_tsv_file" table_csv_path = "your_path_to_a_directory_containing_all_csv_files" class TableDataset(torch.utils.data.Dataset): def __init__(self, data, tokenizer): self.data = data self.tokenizer = tokenizer def __getitem__(self, idx): item = data.iloc[idx] table = pd.read_csv(table_csv_path + item.table_file).astype(str) # be sure to make your table data text only encoding = self.tokenizer(table=table, queries=item.question, answer_coordinates=item.answer_coordinates, answer_text=item.answer_text, truncation=True, padding="max_length", return_tensors="pt" ) # remove the batch dimension which the tokenizer adds by default encoding = {key: val.squeeze(0) for key, val in encoding.items()} # add the float_answer which is also required (weak supervision for aggregation case) encoding["float_answer"] = torch.tensor(item.float_answer) return encoding def __len__(self): return len(self.data) data = pd.read_csv(tsv_path, sep='\\t') train_dataset = TableDataset(data, tokenizer) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=32)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tsv_path = <span class="hljs-string">&quot;your_path_to_the_tsv_file&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>table_csv_path = <span class="hljs-string">&quot;your_path_to_a_directory_containing_all_csv_files&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">TableDataset</span>(torch.utils.data.Dataset): <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params">self, data, tokenizer</span>): <span class="hljs-meta">... </span> self.data = data <span class="hljs-meta">... </span> self.tokenizer = tokenizer ... <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, idx</span>): <span class="hljs-meta">... </span> item = data.iloc[idx] <span class="hljs-meta">... </span> table = pd.read_csv(table_csv_path + item.table_file).astype(<span class="hljs-built_in">str</span>) <span class="hljs-comment"># be sure to make your table data text only</span> <span class="hljs-meta">... </span> encoding = self.tokenizer(table=table, <span class="hljs-meta">... </span> queries=item.question, <span class="hljs-meta">... </span> answer_coordinates=item.answer_coordinates, <span class="hljs-meta">... </span> answer_text=item.answer_text, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-string">&quot;max_length&quot;</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-comment"># remove the batch dimension which the tokenizer adds by default</span> <span class="hljs-meta">... </span> encoding = {key: val.squeeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> key, val <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">... </span> <span class="hljs-comment"># add the float_answer which is also required (weak supervision for aggregation case)</span> <span class="hljs-meta">... </span> encoding[<span class="hljs-string">&quot;float_answer&quot;</span>] = torch.tensor(item.float_answer) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> encoding ... <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> <span class="hljs-built_in">len</span>(self.data) <span class="hljs-meta">&gt;&gt;&gt; </span>data = pd.read_csv(tsv_path, sep=<span class="hljs-string">&#x27;\\t&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataset = TableDataset(data, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=<span class="hljs-number">32</span>)`},tf:{code:`import tensorflow as tf import pandas as pd tsv_path = "your_path_to_the_tsv_file" table_csv_path = "your_path_to_a_directory_containing_all_csv_files" class TableDataset: def __init__(self, data, tokenizer): self.data = data self.tokenizer = tokenizer def __iter__(self): for idx in range(self.__len__()): item = self.data.iloc[idx] table = pd.read_csv(table_csv_path + item.table_file).astype(str) # be sure to make your table data text only encoding = self.tokenizer(table=table, queries=item.question, answer_coordinates=item.answer_coordinates, answer_text=item.answer_text, truncation=True, padding="max_length", return_tensors="tf" ) # remove the batch dimension which the tokenizer adds by default encoding = {key: tf.squeeze(val,0) for key, val in encoding.items()} # add the float_answer which is also required (weak supervision for aggregation case) encoding["float_answer"] = tf.convert_to_tensor(item.float_answer,dtype=tf.float32) yield encoding['input_ids'], encoding['attention_mask'], encoding['numeric_values'], \\ encoding['numeric_values_scale'], encoding['token_type_ids'], encoding['labels'], \\ encoding['float_answer'] def __len__(self): return len(self.data) data = pd.read_csv(tsv_path, sep='\\t') train_dataset = TableDataset(data, tokenizer) output_signature = ( tf.TensorSpec(shape=(512,), dtype=tf.int32), tf.TensorSpec(shape=(512,), dtype=tf.int32), tf.TensorSpec(shape=(512,), dtype=tf.float32), tf.TensorSpec(shape=(512,), dtype=tf.float32), tf.TensorSpec(shape=(512,7), dtype=tf.int32), tf.TensorSpec(shape=(512,), dtype=tf.int32), tf.TensorSpec(shape=(512,), dtype=tf.float32)) train_dataloader = tf.data.Dataset.from_generator(train_dataset, output_signature=output_signature).batch(32)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tsv_path = <span class="hljs-string">&quot;your_path_to_the_tsv_file&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>table_csv_path = <span class="hljs-string">&quot;your_path_to_a_directory_containing_all_csv_files&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">TableDataset</span>: <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params">self, data, tokenizer</span>): <span class="hljs-meta">... </span> self.data = data <span class="hljs-meta">... </span> self.tokenizer = tokenizer ... <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__iter__</span>(<span class="hljs-params">self</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> idx <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(self.__len__()): <span class="hljs-meta">... </span> item = self.data.iloc[idx] <span class="hljs-meta">... </span> table = pd.read_csv(table_csv_path + item.table_file).astype(<span class="hljs-built_in">str</span>) <span class="hljs-comment"># be sure to make your table data text only</span> <span class="hljs-meta">... </span> encoding = self.tokenizer(table=table, <span class="hljs-meta">... </span> queries=item.question, <span class="hljs-meta">... </span> answer_coordinates=item.answer_coordinates, <span class="hljs-meta">... </span> answer_text=item.answer_text, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-string">&quot;max_length&quot;</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-comment"># remove the batch dimension which the tokenizer adds by default</span> <span class="hljs-meta">... </span> encoding = {key: tf.squeeze(val,<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> key, val <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">... </span> <span class="hljs-comment"># add the float_answer which is also required (weak supervision for aggregation case)</span> <span class="hljs-meta">... </span> encoding[<span class="hljs-string">&quot;float_answer&quot;</span>] = tf.convert_to_tensor(item.float_answer,dtype=tf.float32) <span class="hljs-meta">... </span> <span class="hljs-keyword">yield</span> encoding[<span class="hljs-string">&#x27;input_ids&#x27;</span>], encoding[<span class="hljs-string">&#x27;attention_mask&#x27;</span>], encoding[<span class="hljs-string">&#x27;numeric_values&#x27;</span>], \\ <span class="hljs-meta">... </span> encoding[<span class="hljs-string">&#x27;numeric_values_scale&#x27;</span>], encoding[<span class="hljs-string">&#x27;token_type_ids&#x27;</span>], encoding[<span class="hljs-string">&#x27;labels&#x27;</span>], \\ <span class="hljs-meta">... </span> encoding[<span class="hljs-string">&#x27;float_answer&#x27;</span>] ... <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> <span class="hljs-built_in">len</span>(self.data) <span class="hljs-meta">&gt;&gt;&gt; </span>data = pd.read_csv(tsv_path, sep=<span class="hljs-string">&#x27;\\t&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataset = TableDataset(data, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>output_signature = ( <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,), dtype=tf.int32), <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,), dtype=tf.int32), <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,), dtype=tf.float32), <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,), dtype=tf.float32), <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,<span class="hljs-number">7</span>), dtype=tf.int32), <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,), dtype=tf.int32), <span class="hljs-meta">... </span>tf.TensorSpec(shape=(<span class="hljs-number">512</span>,), dtype=tf.float32)) <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader = tf.data.Dataset.from_generator(train_dataset, output_signature=output_signature).batch(<span class="hljs-number">32</span>)`}}}),Ia=new Ur({props:{pt:{code:`from transformers import TapasConfig, TapasForQuestionAnswering, AdamW # this is the default WTQ configuration config = TapasConfig( num_aggregation_labels = 4, use_answer_as_supervision = True, answer_loss_cutoff = 0.664694, cell_selection_preference = 0.207951, huber_loss_delta = 0.121194, init_cell_selection_weights_to_zero = True, select_one_column = True, allow_empty_column_selection = False, temperature = 0.0352513, ) model = TapasForQuestionAnswering.from_pretrained("google/tapas-base", config=config) optimizer = AdamW(model.parameters(), lr=5e-5) model.train() for epoch in range(2): # loop over the dataset multiple times for batch in train_dataloader: # get the inputs; input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] token_type_ids = batch["token_type_ids"] labels = batch["labels"] numeric_values = batch["numeric_values"] numeric_values_scale = batch["numeric_values_scale"] float_answer = batch["float_answer"] # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer) loss = outputs.loss loss.backward() optimizer.step()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasConfig, TapasForQuestionAnswering, AdamW <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># this is the default WTQ configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig( <span class="hljs-meta">... </span> num_aggregation_labels = <span class="hljs-number">4</span>, <span class="hljs-meta">... </span> use_answer_as_supervision = <span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> answer_loss_cutoff = <span class="hljs-number">0.664694</span>, <span class="hljs-meta">... </span> cell_selection_preference = <span class="hljs-number">0.207951</span>, <span class="hljs-meta">... </span> huber_loss_delta = <span class="hljs-number">0.121194</span>, <span class="hljs-meta">... </span> init_cell_selection_weights_to_zero = <span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> select_one_column = <span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> allow_empty_column_selection = <span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> temperature = <span class="hljs-number">0.0352513</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;google/tapas-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamW(model.parameters(), lr=<span class="hljs-number">5e-5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">2</span>): <span class="hljs-comment"># loop over the dataset multiple times</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> <span class="hljs-comment"># get the inputs; </span> <span class="hljs-meta">... </span> input_ids = batch[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> attention_mask = batch[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">... </span> token_type_ids = batch[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">... </span> labels = batch[<span class="hljs-string">&quot;labels&quot;</span>] <span class="hljs-meta">... </span> numeric_values = batch[<span class="hljs-string">&quot;numeric_values&quot;</span>] <span class="hljs-meta">... </span> numeric_values_scale = batch[<span class="hljs-string">&quot;numeric_values_scale&quot;</span>] <span class="hljs-meta">... </span> float_answer = batch[<span class="hljs-string">&quot;float_answer&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-comment"># zero the parameter gradients</span> <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> <span class="hljs-comment"># forward + backward + optimize</span> <span class="hljs-meta">... </span> outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, <span class="hljs-meta">... </span> float_answer=float_answer) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> loss.backward() <span class="hljs-meta">... </span> optimizer.step()`},tf:{code:`import tensorflow as tf from transformers import TapasConfig, TFTapasForQuestionAnswering # this is the default WTQ configuration config = TapasConfig( num_aggregation_labels = 4, use_answer_as_supervision = True, answer_loss_cutoff = 0.664694, cell_selection_preference = 0.207951, huber_loss_delta = 0.121194, init_cell_selection_weights_to_zero = True, select_one_column = True, allow_empty_column_selection = False, temperature = 0.0352513, ) model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base", config=config) optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5) for epoch in range(2): # loop over the dataset multiple times for batch in train_dataloader: # get the inputs; input_ids = batch[0] attention_mask = batch[1] token_type_ids = batch[4] labels = batch[-1] numeric_values = batch[2] numeric_values_scale = batch[3] float_answer = batch[6] # forward + backward + optimize with tf.GradientTape() as tape: outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer ) grads = tape.gradient(outputs.loss, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasConfig, TFTapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># this is the default WTQ configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = TapasConfig( <span class="hljs-meta">... </span> num_aggregation_labels = <span class="hljs-number">4</span>, <span class="hljs-meta">... </span> use_answer_as_supervision = <span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> answer_loss_cutoff = <span class="hljs-number">0.664694</span>, <span class="hljs-meta">... </span> cell_selection_preference = <span class="hljs-number">0.207951</span>, <span class="hljs-meta">... </span> huber_loss_delta = <span class="hljs-number">0.121194</span>, <span class="hljs-meta">... </span> init_cell_selection_weights_to_zero = <span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> select_one_column = <span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> allow_empty_column_selection = <span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> temperature = <span class="hljs-number">0.0352513</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;google/tapas-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = tf.keras.optimizers.Adam(learning_rate=<span class="hljs-number">5e-5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">2</span>): <span class="hljs-comment"># loop over the dataset multiple times</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> <span class="hljs-comment"># get the inputs; </span> <span class="hljs-meta">... </span> input_ids = batch[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> attention_mask = batch[<span class="hljs-number">1</span>] <span class="hljs-meta">... </span> token_type_ids = batch[<span class="hljs-number">4</span>] <span class="hljs-meta">... </span> labels = batch[-<span class="hljs-number">1</span>] <span class="hljs-meta">... </span> numeric_values = batch[<span class="hljs-number">2</span>] <span class="hljs-meta">... </span> numeric_values_scale = batch[<span class="hljs-number">3</span>] <span class="hljs-meta">... </span> float_answer = batch[<span class="hljs-number">6</span>] <span class="hljs-meta">... </span> <span class="hljs-comment"># forward + backward + optimize</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> tf.GradientTape() <span class="hljs-keyword">as</span> tape: <span class="hljs-meta">... </span> outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, <span class="hljs-meta">... </span> float_answer=float_answer ) <span class="hljs-meta">... </span> grads = tape.gradient(outputs.loss, model.trainable_weights) <span class="hljs-meta">... </span> optimizer.apply_gradients(<span class="hljs-built_in">zip</span>(grads, model.trainable_weights))`}}}),Wa=new ss({}),Ua=new Ur({props:{pt:{code:`from transformers import TapasTokenizer, TapasForQuestionAnswering import pandas as pd model_name = 'google/tapas-base-finetuned-wtq' model = TapasForQuestionAnswering.from_pretrained(model_name) tokenizer = TapasTokenizer.from_pretrained(model_name) data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Number of movies': ["87", "53", "69"]} queries = ["What is the name of the first actor?", "How many movies has George Clooney played in?", "What is the total number of movies?"] table = pd.DataFrame.from_dict(data) inputs = tokenizer(table=table, queries=queries, padding='max_length', return_tensors="pt") outputs = model(**inputs) predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits.detach(), outputs.logits_aggregation.detach() ) # let's print out the results: id2aggregation = {0: "NONE", 1: "SUM", 2: "AVERAGE", 3:"COUNT"} aggregation_predictions_string = [id2aggregation[x] for x in predicted_aggregation_indices] answers = [] for coordinates in predicted_answer_coordinates: if len(coordinates) == 1: # only a single cell: answers.append(table.iat[coordinates[0]]) else: # multiple cells cell_values = [] for coordinate in coordinates: cell_values.append(table.iat[coordinate]) answers.append(", ".join(cell_values)) display(table) print("") for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string): print(query) if predicted_agg == "NONE": print("Predicted answer: " + answer) else: print("Predicted answer: " + predicted_agg + " > " + answer) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;What is the name of the first actor?&quot;</span>, <span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;What is the total number of movies?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&#x27;max_length&#x27;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( <span class="hljs-meta">... </span> inputs, <span class="hljs-meta">... </span> outputs.logits.detach(), <span class="hljs-meta">... </span> outputs.logits_aggregation.detach() <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># let&#x27;s print out the results:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>id2aggregation = {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;NONE&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;SUM&quot;</span>, <span class="hljs-number">2</span>: <span class="hljs-string">&quot;AVERAGE&quot;</span>, <span class="hljs-number">3</span>:<span class="hljs-string">&quot;COUNT&quot;</span>} <span class="hljs-meta">&gt;&gt;&gt; </span>aggregation_predictions_string = [id2aggregation[x] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> predicted_aggregation_indices] <span class="hljs-meta">&gt;&gt;&gt; </span>answers = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> coordinates <span class="hljs-keyword">in</span> predicted_answer_coordinates: <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> <span class="hljs-built_in">len</span>(coordinates) == <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># only a single cell:</span> <span class="hljs-meta">... </span> answers.append(table.iat[coordinates[<span class="hljs-number">0</span>]]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># multiple cells</span> <span class="hljs-meta">... </span> cell_values = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> coordinate <span class="hljs-keyword">in</span> coordinates: <span class="hljs-meta">... </span> cell_values.append(table.iat[coordinate]) <span class="hljs-meta">... </span> answers.append(<span class="hljs-string">&quot;, &quot;</span>.join(cell_values)) <span class="hljs-meta">&gt;&gt;&gt; </span>display(table) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> query, answer, predicted_agg <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(queries, answers, aggregation_predictions_string): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(query) <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> predicted_agg == <span class="hljs-string">&quot;NONE&quot;</span>: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted answer: &quot;</span> + answer) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted answer: &quot;</span> + predicted_agg + <span class="hljs-string">&quot; &gt; &quot;</span> + answer) What <span class="hljs-keyword">is</span> the name of the first actor? Predicted answer: Brad Pitt How many movies has George Clooney played <span class="hljs-keyword">in</span>? Predicted answer: COUNT &gt; <span class="hljs-number">69</span> What <span class="hljs-keyword">is</span> the total number of movies? Predicted answer: SUM &gt; <span class="hljs-number">87</span>, <span class="hljs-number">53</span>, <span class="hljs-number">69</span>`},tf:{code:`from transformers import TapasTokenizer, TFTapasForQuestionAnswering import pandas as pd model_name = 'google/tapas-base-finetuned-wtq' model = TFTapasForQuestionAnswering.from_pretrained(model_name) tokenizer = TapasTokenizer.from_pretrained(model_name) data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Number of movies': ["87", "53", "69"]} queries = ["What is the name of the first actor?", "How many movies has George Clooney played in?", "What is the total number of movies?"] table = pd.DataFrame.from_dict(data) inputs = tokenizer(table=table, queries=queries, padding='max_length', return_tensors="tf") outputs = model(**inputs) predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits, outputs.logits_aggregation ) # let's print out the results: id2aggregation = {0: "NONE", 1: "SUM", 2: "AVERAGE", 3:"COUNT"} aggregation_predictions_string = [id2aggregation[x] for x in predicted_aggregation_indices] answers = [] for coordinates in predicted_answer_coordinates: if len(coordinates) == 1: # only a single cell: answers.append(table.iat[coordinates[0]]) else: # multiple cells cell_values = [] for coordinate in coordinates: cell_values.append(table.iat[coordinate]) answers.append(", ".join(cell_values)) display(table) print("") for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string): print(query) if predicted_agg == "NONE": print("Predicted answer: " + answer) else: print("Predicted answer: " + predicted_agg + " > " + answer) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TFTapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFTapasForQuestionAnswering.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;What is the name of the first actor?&quot;</span>, <span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;What is the total number of movies?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&#x27;max_length&#x27;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( <span class="hljs-meta">... </span> inputs, <span class="hljs-meta">... </span> outputs.logits, <span class="hljs-meta">... </span> outputs.logits_aggregation <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># let&#x27;s print out the results:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>id2aggregation = {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;NONE&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;SUM&quot;</span>, <span class="hljs-number">2</span>: <span class="hljs-string">&quot;AVERAGE&quot;</span>, <span class="hljs-number">3</span>:<span class="hljs-string">&quot;COUNT&quot;</span>} <span class="hljs-meta">&gt;&gt;&gt; </span>aggregation_predictions_string = [id2aggregation[x] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> predicted_aggregation_indices] <span class="hljs-meta">&gt;&gt;&gt; </span>answers = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> coordinates <span class="hljs-keyword">in</span> predicted_answer_coordinates: <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> <span class="hljs-built_in">len</span>(coordinates) == <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># only a single cell:</span> <span class="hljs-meta">... </span> answers.append(table.iat[coordinates[<span class="hljs-number">0</span>]]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># multiple cells</span> <span class="hljs-meta">... </span> cell_values = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> coordinate <span class="hljs-keyword">in</span> coordinates: <span class="hljs-meta">... </span> cell_values.append(table.iat[coordinate]) <span class="hljs-meta">... </span> answers.append(<span class="hljs-string">&quot;, &quot;</span>.join(cell_values)) <span class="hljs-meta">&gt;&gt;&gt; </span>display(table) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> query, answer, predicted_agg <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(queries, answers, aggregation_predictions_string): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(query) <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> predicted_agg == <span class="hljs-string">&quot;NONE&quot;</span>: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted answer: &quot;</span> + answer) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted answer: &quot;</span> + predicted_agg + <span class="hljs-string">&quot; &gt; &quot;</span> + answer) What <span class="hljs-keyword">is</span> the name of the first actor? Predicted answer: Brad Pitt How many movies has George Clooney played <span class="hljs-keyword">in</span>? Predicted answer: COUNT &gt; <span class="hljs-number">69</span> What <span class="hljs-keyword">is</span> the total number of movies? Predicted answer: SUM &gt; <span class="hljs-number">87</span>, <span class="hljs-number">53</span>, <span class="hljs-number">69</span>`}}}),Ga=new ss({}),Ra=new pe({props:{name:"class transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput",anchor:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"logits_aggregation",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L106",parametersDescription:[{anchor:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> (and possibly <code>answer</code>, <code>aggregation_labels</code>, <code>numeric_values</code> and <code>numeric_values_scale</code> are provided)) &#x2014; Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations.`,name:"loss"},{anchor:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Prediction scores of the cell selection head, for every token.`,name:"logits"},{anchor:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput.logits_aggregation",description:`<strong>logits_aggregation</strong> (<code>torch.FloatTensor</code>, <em>optional</em>, of shape <code>(batch_size, num_aggregation_labels)</code>) &#x2014; Prediction scores of the aggregation head, for every aggregation operator.`,name:"logits_aggregation"},{anchor:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ka=new ss({}),Ya=new pe({props:{name:"class transformers.TapasConfig",anchor:"transformers.TapasConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 1024"},{name:"type_vocab_sizes",val:" = [3, 256, 256, 2, 256, 256, 10]"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"positive_label_weight",val:" = 10.0"},{name:"num_aggregation_labels",val:" = 0"},{name:"aggregation_loss_weight",val:" = 1.0"},{name:"use_answer_as_supervision",val:" = None"},{name:"answer_loss_importance",val:" = 1.0"},{name:"use_normalized_answer_loss",val:" = False"},{name:"huber_loss_delta",val:" = None"},{name:"temperature",val:" = 1.0"},{name:"aggregation_temperature",val:" = 1.0"},{name:"use_gumbel_for_cells",val:" = False"},{name:"use_gumbel_for_aggregation",val:" = False"},{name:"average_approximation_function",val:" = 'ratio'"},{name:"cell_selection_preference",val:" = None"},{name:"answer_loss_cutoff",val:" = None"},{name:"max_num_rows",val:" = 64"},{name:"max_num_columns",val:" = 32"},{name:"average_logits_per_cell",val:" = False"},{name:"select_one_column",val:" = True"},{name:"allow_empty_column_selection",val:" = False"},{name:"init_cell_selection_weights_to_zero",val:" = False"},{name:"reset_position_index_per_cell",val:" = True"},{name:"disable_per_token_loss",val:" = False"},{name:"aggregation_labels",val:" = None"},{name:"no_aggregation_label_index",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/configuration_tapas.py#L37",parametersDescription:[{anchor:"transformers.TapasConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the TAPAS model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasModel">TapasModel</a>.`,name:"vocab_size"},{anchor:"transformers.TapasConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.TapasConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.TapasConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.TapasConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.TapasConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;swish&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.TapasConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.TapasConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.TapasConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.TapasConfig.type_vocab_sizes",description:`<strong>type_vocab_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[3, 256, 256, 2, 256, 256, 10]</code>) &#x2014; The vocabulary sizes of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasModel">TapasModel</a>.`,name:"type_vocab_sizes"},{anchor:"transformers.TapasConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.TapasConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.TapasConfig.positive_label_weight",description:`<strong>positive_label_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 10.0) &#x2014; Weight for positive labels.`,name:"positive_label_weight"},{anchor:"transformers.TapasConfig.num_aggregation_labels",description:`<strong>num_aggregation_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The number of aggregation operators to predict.`,name:"num_aggregation_labels"},{anchor:"transformers.TapasConfig.aggregation_loss_weight",description:`<strong>aggregation_loss_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Importance weight for the aggregation loss.`,name:"aggregation_loss_weight"},{anchor:"transformers.TapasConfig.use_answer_as_supervision",description:`<strong>use_answer_as_supervision</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to use the answer as the only supervision for aggregation examples.`,name:"use_answer_as_supervision"},{anchor:"transformers.TapasConfig.answer_loss_importance",description:`<strong>answer_loss_importance</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Importance weight for the regression loss.`,name:"answer_loss_importance"},{anchor:"transformers.TapasConfig.use_normalized_answer_loss",description:`<strong>use_normalized_answer_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to normalize the answer loss by the maximum of the predicted and expected value.`,name:"use_normalized_answer_loss"},{anchor:"transformers.TapasConfig.huber_loss_delta",description:`<strong>huber_loss_delta</strong> (<code>float</code>, <em>optional</em>) &#x2014; Delta parameter used to calculate the regression loss.`,name:"huber_loss_delta"},{anchor:"transformers.TapasConfig.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Value used to control (OR change) the skewness of cell logits probabilities.`,name:"temperature"},{anchor:"transformers.TapasConfig.aggregation_temperature",description:`<strong>aggregation_temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Scales aggregation logits to control the skewness of probabilities.`,name:"aggregation_temperature"},{anchor:"transformers.TapasConfig.use_gumbel_for_cells",description:`<strong>use_gumbel_for_cells</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply Gumbel-Softmax to cell selection.`,name:"use_gumbel_for_cells"},{anchor:"transformers.TapasConfig.use_gumbel_for_aggregation",description:`<strong>use_gumbel_for_aggregation</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply Gumbel-Softmax to aggregation selection.`,name:"use_gumbel_for_aggregation"},{anchor:"transformers.TapasConfig.average_approximation_function",description:`<strong>average_approximation_function</strong> (<code>string</code>, <em>optional</em>, defaults to <code>&quot;ratio&quot;</code>) &#x2014; Method to calculate the expected average of cells in the weak supervision case. One of <code>&quot;ratio&quot;</code>, <code>&quot;first_order&quot;</code> or <code>&quot;second_order&quot;</code>.`,name:"average_approximation_function"},{anchor:"transformers.TapasConfig.cell_selection_preference",description:`<strong>cell_selection_preference</strong> (<code>float</code>, <em>optional</em>) &#x2014; Preference for cell selection in ambiguous cases. Only applicable in case of weak supervision for aggregation (WTQ, WikiSQL). If the total mass of the aggregation probabilities (excluding the &#x201C;NONE&#x201D; operator) is higher than this hyperparameter, then aggregation is predicted for an example.`,name:"cell_selection_preference"},{anchor:"transformers.TapasConfig.answer_loss_cutoff",description:`<strong>answer_loss_cutoff</strong> (<code>float</code>, <em>optional</em>) &#x2014; Ignore examples with answer loss larger than cutoff.`,name:"answer_loss_cutoff"},{anchor:"transformers.TapasConfig.max_num_rows",description:`<strong>max_num_rows</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Maximum number of rows.`,name:"max_num_rows"},{anchor:"transformers.TapasConfig.max_num_columns",description:`<strong>max_num_columns</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Maximum number of columns.`,name:"max_num_columns"},{anchor:"transformers.TapasConfig.average_logits_per_cell",description:`<strong>average_logits_per_cell</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to average logits per cell.`,name:"average_logits_per_cell"},{anchor:"transformers.TapasConfig.select_one_column",description:`<strong>select_one_column</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to constrain the model to only select cells from a single column.`,name:"select_one_column"},{anchor:"transformers.TapasConfig.allow_empty_column_selection",description:`<strong>allow_empty_column_selection</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to allow not to select any column.`,name:"allow_empty_column_selection"},{anchor:"transformers.TapasConfig.init_cell_selection_weights_to_zero",description:`<strong>init_cell_selection_weights_to_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to initialize cell selection weights to 0 so that the initial probabilities are 50%.`,name:"init_cell_selection_weights_to_zero"},{anchor:"transformers.TapasConfig.reset_position_index_per_cell",description:`<strong>reset_position_index_per_cell</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to restart position indexes at every cell (i.e. use relative position embeddings).`,name:"reset_position_index_per_cell"},{anchor:"transformers.TapasConfig.disable_per_token_loss",description:`<strong>disable_per_token_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to disable any (strong or weak) supervision on cells.`,name:"disable_per_token_loss"},{anchor:"transformers.TapasConfig.aggregation_labels",description:`<strong>aggregation_labels</strong> (<code>Dict[int, label]</code>, <em>optional</em>) &#x2014; The aggregation labels used to aggregate the results. For example, the WTQ models have the following aggregation labels: <code>{0: &quot;NONE&quot;, 1: &quot;SUM&quot;, 2: &quot;AVERAGE&quot;, 3: &quot;COUNT&quot;}</code>`,name:"aggregation_labels"},{anchor:"transformers.TapasConfig.no_aggregation_label_index",description:`<strong>no_aggregation_label_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If the aggregation labels are defined and one of these labels represents &#x201C;No aggregation&#x201D;, this should be set to its index. For example, the WTQ models have the &#x201C;NONE&#x201D; aggregation label at index 0, so that value should be set to 0 for these models.`,name:"no_aggregation_label_index"}]}}),Za=new xt({props:{code:`from transformers import TapasModel, TapasConfig # Initializing a default (SQA) Tapas configuration configuration = TapasConfig() # Initializing a model from the configuration model = TapasModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasModel, TapasConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a default (SQA) Tapas configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = TapasConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),en=new ss({}),sn=new pe({props:{name:"class transformers.TapasTokenizer",anchor:"transformers.TapasTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"empty_token",val:" = '[EMPTY]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"cell_trim_length",val:": int = -1"},{name:"max_column_id",val:": int = None"},{name:"max_row_id",val:": int = None"},{name:"strip_column_names",val:": bool = False"},{name:"update_answer_coordinates",val:": bool = False"},{name:"min_question_length",val:" = None"},{name:"max_question_length",val:" = None"},{name:"model_max_length",val:": int = 512"},{name:"additional_special_tokens",val:": typing.Optional[typing.List[str]] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/tokenization_tapas.py#L190",parametersDescription:[{anchor:"transformers.TapasTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.TapasTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.TapasTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.TapasTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.TapasTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.TapasTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.TapasTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.TapasTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.TapasTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.TapasTokenizer.empty_token",description:`<strong>empty_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[EMPTY]&quot;</code>) &#x2014; The token used for empty cell values in a table. Empty cell values include &quot;&quot;, &#x201C;n/a&#x201D;, &#x201C;nan&#x201D; and &#x201D;?&#x201C;.`,name:"empty_token"},{anchor:"transformers.TapasTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"},{anchor:"transformers.TapasTokenizer.cell_trim_length",description:`<strong>cell_trim_length</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If &gt; 0: Trim cells so that the length is &lt;= this value. Also disables further cell trimming, should thus be used with <code>truncation</code> set to <code>True</code>.`,name:"cell_trim_length"},{anchor:"transformers.TapasTokenizer.max_column_id",description:`<strong>max_column_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; Max column id to extract.`,name:"max_column_id"},{anchor:"transformers.TapasTokenizer.max_row_id",description:`<strong>max_row_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; Max row id to extract.`,name:"max_row_id"},{anchor:"transformers.TapasTokenizer.strip_column_names",description:`<strong>strip_column_names</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to add empty strings instead of column names.`,name:"strip_column_names"},{anchor:"transformers.TapasTokenizer.update_answer_coordinates",description:`<strong>update_answer_coordinates</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to recompute the answer coordinates from the answer text.`,name:"update_answer_coordinates"},{anchor:"transformers.TapasTokenizer.min_question_length",description:`<strong>min_question_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Minimum length of each question in terms of tokens (will be skipped otherwise).`,name:"min_question_length"},{anchor:"transformers.TapasTokenizer.max_question_length",description:`<strong>max_question_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of each question in terms of tokens (will be skipped otherwise).`,name:"max_question_length"}]}}),tn=new pe({props:{name:"__call__",anchor:"transformers.TapasTokenizer.__call__",parameters:[{name:"table",val:": pd.DataFrame"},{name:"queries",val:": typing.Union[str, typing.List[str], typing.List[int], typing.List[typing.List[str]], typing.List[typing.List[int]], NoneType] = None"},{name:"answer_coordinates",val:": typing.Union[typing.List[typing.Tuple], typing.List[typing.List[typing.Tuple]], NoneType] = None"},{name:"answer_text",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.models.tapas.tokenization_tapas.TapasTruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/tokenization_tapas.py#L531",parametersDescription:[{anchor:"transformers.TapasTokenizer.__call__.table",description:`<strong>table</strong> (<code>pd.DataFrame</code>) &#x2014; Table containing tabular data. Note that all cell values must be text. Use <em>.astype(str)</em> on a Pandas dataframe to convert it to string.`,name:"table"},{anchor:"transformers.TapasTokenizer.__call__.queries",description:`<strong>queries</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Question or batch of questions related to a table to be encoded. Note that in case of a batch, all questions must refer to the <strong>same</strong> table.`,name:"queries"},{anchor:"transformers.TapasTokenizer.__call__.answer_coordinates",description:`<strong>answer_coordinates</strong> (<code>List[Tuple]</code> or <code>List[List[Tuple]]</code>, <em>optional</em>) &#x2014; Answer coordinates of each table-question pair in the batch. In case only a single table-question pair is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of tuples (each list corresponding to a single table-question pair).`,name:"answer_coordinates"},{anchor:"transformers.TapasTokenizer.__call__.answer_text",description:`<strong>answer_text</strong> (<code>List[str]</code> or <code>List[List[str]]</code>, <em>optional</em>) &#x2014; Answer text of each table-question pair in the batch. In case only a single table-question pair is provided, then the answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a single table-question pair).`,name:"answer_text"},{anchor:"transformers.TapasTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.TapasTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.TapasTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <code>TapasTruncationStrategy</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;drop_rows_to_fit&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.TapasTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.TapasTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.TapasTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.TapasTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}]}}),an=new pe({props:{name:"convert_logits_to_predictions",anchor:"transformers.TapasTokenizer.convert_logits_to_predictions",parameters:[{name:"data",val:""},{name:"logits",val:""},{name:"logits_agg",val:" = None"},{name:"cell_classification_threshold",val:" = 0.5"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/tokenization_tapas.py#L1891",parametersDescription:[{anchor:"transformers.TapasTokenizer.convert_logits_to_predictions.data",description:`<strong>data</strong> (<code>dict</code>) &#x2014; Dictionary mapping features to actual values. Should be created using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>.`,name:"data"},{anchor:"transformers.TapasTokenizer.convert_logits_to_predictions.logits",description:`<strong>logits</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Tensor containing the logits at the token level.`,name:"logits"},{anchor:"transformers.TapasTokenizer.convert_logits_to_predictions.logits_agg",description:`<strong>logits_agg</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_aggregation_labels)</code>, <em>optional</em>) &#x2014; Tensor containing the aggregation logits.`,name:"logits_agg"},{anchor:"transformers.TapasTokenizer.convert_logits_to_predictions.cell_classification_threshold",description:`<strong>cell_classification_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Threshold to be used for cell selection. All table cells for which their probability is larger than this threshold will be selected.`,name:"cell_classification_threshold"}],returnDescription:` <ul> <li>predicted_answer_coordinates (<code>List[List[[tuple]]</code> of length <code>batch_size</code>): Predicted answer coordinates as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index).</li> <li>predicted_aggregation_indices (<code>List[int]</code>of length <code>batch_size</code>, <em>optional</em>, returned when <code>logits_aggregation</code> is provided): Predicted aggregation operator indices of the aggregation head.</li> </ul> `,returnType:` <p><code>tuple</code> comprising various elements depending on the inputs</p> `}}),ln=new ss({}),dn=new pe({props:{name:"class transformers.TapasModel",anchor:"transformers.TapasModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L857",parametersDescription:[{anchor:"transformers.TapasModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gn=new pe({props:{name:"forward",anchor:"transformers.TapasModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L896",parametersDescription:[{anchor:"transformers.TapasModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TapasModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TapasModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TapasModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TapasModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: - 1 indicates the head is <strong>not masked</strong>, - 0 indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.TapasModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TapasModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TapasModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TapasModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ht=new Fs({props:{$$slots:{default:[O0]},$$scope:{ctx:U}}}),_n=new xt({props:{code:`from transformers import TapasTokenizer, TapasModel import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base') model = TapasModel.from_pretrained('google/tapas-base') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasModel.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;How old is Brad Pitt?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Tn=new ss({}),vn=new pe({props:{name:"class transformers.TapasForMaskedLM",anchor:"transformers.TapasForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L1010",parametersDescription:[{anchor:"transformers.TapasForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yn=new pe({props:{name:"forward",anchor:"transformers.TapasForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L1029",parametersDescription:[{anchor:"transformers.TapasForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TapasForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TapasForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TapasForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TapasForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: - 1 indicates the head is <strong>not masked</strong>, - 0 indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.TapasForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TapasForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TapasForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TapasForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TapasForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Gt=new Fs({props:{$$slots:{default:[I0]},$$scope:{ctx:U}}}),kn=new xt({props:{code:`from transformers import TapasTokenizer, TapasForMaskedLM import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base') model = TapasForMaskedLM.from_pretrained('google/tapas-base') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) inputs = tokenizer(table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt") labels = tokenizer(table=table, queries="How many movies has George Clooney played in?", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=<span class="hljs-string">&quot;How many [MASK] has George [MASK] played in?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(table=table, queries=<span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),jn=new ss({}),qn=new pe({props:{name:"class transformers.TapasForSequenceClassification",anchor:"transformers.TapasForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L1462",parametersDescription:[{anchor:"transformers.TapasForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),An=new pe({props:{name:"forward",anchor:"transformers.TapasForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L1474",parametersDescription:[{anchor:"transformers.TapasForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TapasForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TapasForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TapasForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TapasForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: - 1 indicates the head is <strong>not masked</strong>, - 0 indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.TapasForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TapasForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TapasForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TapasForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TapasForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy). Note: this is called &#x201C;classification_class_index&#x201D; in the original implementation.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Vt=new Fs({props:{$$slots:{default:[W0]},$$scope:{ctx:U}}}),zn=new xt({props:{code:`from transformers import TapasTokenizer, TapasForSequenceClassification import torch import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base-finetuned-tabfact') model = TapasForSequenceClassification.from_pretrained('google/tapas-base-finetuned-tabfact') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) queries = ["There is only one actor who is 45 years old", "There are 3 actors which played in more than 60 movies"] inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-tabfact&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-tabfact&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;There is only one actor who is 45 years old&quot;</span>, <span class="hljs-string">&quot;There are 3 actors which played in more than 60 movies&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>, <span class="hljs-number">0</span>]) <span class="hljs-comment"># 1 means entailed, 0 means refuted</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$n=new ss({}),Cn=new pe({props:{name:"class transformers.TapasForQuestionAnswering",anchor:"transformers.TapasForQuestionAnswering",parameters:[{name:"config",val:": TapasConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L1119",parametersDescription:[{anchor:"transformers.TapasForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Dn=new pe({props:{name:"forward",anchor:"transformers.TapasForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"table_mask",val:" = None"},{name:"labels",val:" = None"},{name:"aggregation_labels",val:" = None"},{name:"float_answer",val:" = None"},{name:"numeric_values",val:" = None"},{name:"numeric_values_scale",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tapas.py#L1154",parametersDescription:[{anchor:"transformers.TapasForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TapasForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TapasForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TapasForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TapasForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: - 1 indicates the head is <strong>not masked</strong>, - 0 indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.TapasForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TapasForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TapasForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TapasForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TapasForQuestionAnswering.forward.table_mask",description:`<strong>table_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0.`,name:"table_mask"},{anchor:"transformers.TapasForQuestionAnswering.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>.</p> <ul> <li>1 for tokens that are <strong>part of the answer</strong>,</li> <li>0 for tokens that are <strong>not part of the answer</strong>.</li> </ul>`,name:"labels"},{anchor:"transformers.TapasForQuestionAnswering.forward.aggregation_labels",description:`<strong>aggregation_labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>) &#x2014; Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in <code>[0, ..., config.num_aggregation_labels - 1]</code>. Only required in case of strong supervision for aggregation (WikiSQL-supervised).`,name:"aggregation_labels"},{anchor:"transformers.TapasForQuestionAnswering.forward.float_answer",description:`<strong>float_answer</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>) &#x2014; Float answer for every example in the batch. Set to <em>float(&#x2018;nan&#x2019;)</em> for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.`,name:"float_answer"},{anchor:"transformers.TapasForQuestionAnswering.forward.numeric_values",description:`<strong>numeric_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss.`,name:"numeric_values"},{anchor:"transformers.TapasForQuestionAnswering.forward.numeric_values_scale",description:`<strong>numeric_values_scale</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Scale of the numeric values of every token. Can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss.`,name:"numeric_values_scale"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput" >transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> (and possibly <code>answer</code>, <code>aggregation_labels</code>, <code>numeric_values</code> and <code>numeric_values_scale</code> are provided)) \u2014 Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Prediction scores of the cell selection head, for every token.</li> <li><strong>logits_aggregation</strong> (<code>torch.FloatTensor</code>, <em>optional</em>, of shape <code>(batch_size, num_aggregation_labels)</code>) \u2014 Prediction scores of the aggregation head, for every aggregation operator.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput" >transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Yt=new Fs({props:{$$slots:{default:[U0]},$$scope:{ctx:U}}}),Nn=new xt({props:{code:`from transformers import TapasTokenizer, TapasForQuestionAnswering import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base-finetuned-wtq') model = TapasForQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits logits_aggregation = outputs.logits_aggregation,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;How old is Brad Pitt?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>logits_aggregation = outputs.logits_aggregation`}}),Ln=new ss({}),Qn=new pe({props:{name:"class transformers.TFTapasModel",anchor:"transformers.TFTapasModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L977",parametersDescription:[{anchor:"transformers.TFTapasModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xt=new Fs({props:{$$slots:{default:[H0]},$$scope:{ctx:U}}}),Un=new pe({props:{name:"call",anchor:"transformers.TFTapasModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L983",parametersDescription:[{anchor:"transformers.TFTapasModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTapasModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFTapasModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFTapasModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFTapasModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTapasModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTapasModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTapasModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTapasModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTapasModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),Zt=new Fs({props:{$$slots:{default:[B0]},$$scope:{ctx:U}}}),Hn=new xt({props:{code:`from transformers import TapasTokenizer, TapasModel import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base') model = TapasModel.from_pretrained('google/tapas-base') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasModel.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;How old is Brad Pitt?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Bn=new ss({}),Gn=new pe({props:{name:"class transformers.TFTapasForMaskedLM",anchor:"transformers.TFTapasForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L1066",parametersDescription:[{anchor:"transformers.TFTapasForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),sa=new Fs({props:{$$slots:{default:[G0]},$$scope:{ctx:U}}}),Jn=new pe({props:{name:"call",anchor:"transformers.TFTapasForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L1082",parametersDescription:[{anchor:"transformers.TFTapasForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTapasForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFTapasForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFTapasForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFTapasForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTapasForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTapasForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTapasForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTapasForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTapasForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFTapasForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ta=new Fs({props:{$$slots:{default:[R0]},$$scope:{ctx:U}}}),Xn=new xt({props:{code:`from transformers import TapasTokenizer, TapasForMaskedLM import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base') model = TapasForMaskedLM.from_pretrained('google/tapas-base') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) inputs = tokenizer(table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="tf") labels = tokenizer(table=table, queries="How many movies has George Clooney played in?", return_tensors="tf")["input_ids"] outputs = model(**inputs, labels=labels) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=<span class="hljs-string">&quot;How many [MASK] has George [MASK] played in?&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(table=table, queries=<span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Zn=new ss({}),eo=new pe({props:{name:"class transformers.TFTapasForSequenceClassification",anchor:"transformers.TFTapasForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L1640",parametersDescription:[{anchor:"transformers.TFTapasForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),na=new Fs({props:{$$slots:{default:[V0]},$$scope:{ctx:U}}}),no=new pe({props:{name:"call",anchor:"transformers.TFTapasForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L1651",parametersDescription:[{anchor:"transformers.TFTapasForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTapasForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFTapasForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFTapasForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFTapasForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTapasForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTapasForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTapasForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTapasForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTapasForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFTapasForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy). Note: this is called &#x201C;classification_class_index&#x201D; in the original implementation.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),oa=new Fs({props:{$$slots:{default:[K0]},$$scope:{ctx:U}}}),oo=new xt({props:{code:`from transformers import TapasTokenizer, TapasForSequenceClassification import tensorflow as tf import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base-finetuned-tabfact') model = TapasForSequenceClassification.from_pretrained('google/tapas-base-finetuned-tabfact') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) queries = ["There is only one actor who is 45 years old", "There are 3 actors which played in more than 60 movies"] inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-tabfact&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-tabfact&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;There is only one actor who is 45 years old&quot;</span>, <span class="hljs-string">&quot;There are 3 actors which played in more than 60 movies&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tf.convert_to_tensor([<span class="hljs-number">1</span>, <span class="hljs-number">0</span>]) <span class="hljs-comment"># 1 means entailed, 0 means refuted</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ro=new ss({}),io=new pe({props:{name:"class transformers.TFTapasForQuestionAnswering",anchor:"transformers.TFTapasForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L1284",parametersDescription:[{anchor:"transformers.TFTapasForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ia=new Fs({props:{$$slots:{default:[Y0]},$$scope:{ctx:U}}}),ho=new pe({props:{name:"call",anchor:"transformers.TFTapasForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"table_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"aggregation_labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"float_answer",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"numeric_values",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"numeric_values_scale",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/tapas/modeling_tf_tapas.py#L1306",parametersDescription:[{anchor:"transformers.TFTapasForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFTapasForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFTapasForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 7)</code>, <em>optional</em>) &#x2014; Token indices that encode tabular structure. Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. See this class for more info.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFTapasForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. If <code>reset_position_index_per_cell</code> of <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig">TapasConfig</a> is set to <code>True</code>, relative position embeddings will be used. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFTapasForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFTapasForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFTapasForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFTapasForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFTapasForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFTapasForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFTapasForQuestionAnswering.call.table_mask",description:`<strong>table_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0.`,name:"table_mask"},{anchor:"transformers.TFTapasForQuestionAnswering.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>.</p> <ul> <li>1 for tokens that are <strong>part of the answer</strong>,</li> <li>0 for tokens that are <strong>not part of the answer</strong>.</li> </ul>`,name:"labels"},{anchor:"transformers.TFTapasForQuestionAnswering.call.aggregation_labels",description:`<strong>aggregation_labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>) &#x2014; Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in <code>[0, ..., config.num_aggregation_labels - 1]</code>. Only required in case of strong supervision for aggregation (WikiSQL-supervised).`,name:"aggregation_labels"},{anchor:"transformers.TFTapasForQuestionAnswering.call.float_answer",description:`<strong>float_answer</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>) &#x2014; Float answer for every example in the batch. Set to <em>float(&#x2018;nan&#x2019;)</em> for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.`,name:"float_answer"},{anchor:"transformers.TFTapasForQuestionAnswering.call.numeric_values",description:`<strong>numeric_values</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss.`,name:"numeric_values"},{anchor:"transformers.TFTapasForQuestionAnswering.call.numeric_values_scale",description:`<strong>numeric_values_scale</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, seq_length)</code>, <em>optional</em>) &#x2014; Scale of the numeric values of every token. Can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer">TapasTokenizer</a>. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss.`,name:"numeric_values_scale"}],returnDescription:` <p>A <code>transformers.models.tapas.modeling_tf_tapas.TFTableQuestionAnsweringOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig" >TapasConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> (and possibly <code>answer</code>, <code>aggregation_labels</code>, <code>numeric_values</code> and <code>numeric_values_scale</code> are provided)) \u2014 Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations.</li> <li><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Prediction scores of the cell selection head, for every token.</li> <li><strong>logits_aggregation</strong> (<code>tf.Tensor</code>, <em>optional</em>, of shape <code>(batch_size, num_aggregation_labels)</code>) \u2014 Prediction scores of the aggregation head, for every aggregation operator.</li> <li><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.tapas.modeling_tf_tapas.TFTableQuestionAnsweringOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),la=new Fs({props:{$$slots:{default:[J0]},$$scope:{ctx:U}}}),uo=new xt({props:{code:`from transformers import TapasTokenizer, TapasForQuestionAnswering import pandas as pd tokenizer = TapasTokenizer.from_pretrained('google/tapas-base-finetuned-wtq') model = TapasForQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq') data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") outputs = model(**inputs) logits = outputs.logits logits_aggregation = outputs.logits_aggregation,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TapasTokenizer, TapasForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TapasTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TapasForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/tapas-base-finetuned-wtq&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&#x27;Actors&#x27;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Age&#x27;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;Number of movies&#x27;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>] <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>queries = [<span class="hljs-string">&quot;How many movies has George Clooney played in?&quot;</span>, <span class="hljs-string">&quot;How old is Brad Pitt?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(table=table, queries=queries, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>logits_aggregation = outputs.logits_aggregation`}}),{c(){m=o("meta"),x=l(),f=o("h1"),g=o("a"),q=o("span"),v(T.$$.fragment),_=l(),F=o("span"),_e=a("TAPAS"),K=l(),A=o("h2"),Y=o("a"),O=o("span"),v(Z.$$.fragment),Te=l(),I=o("span"),ve=a("Overview"),ce=l(),W=o("p"),N=a("The TAPAS model was proposed in "),ee=o("a"),se=a("TAPAS: Weakly Supervised Table Parsing via Pre-training"),z=a(` by Jonathan Herzig, Pawe\u0142 Krzysztof Nowak, Thomas M\xFCller, Francesco Piccinno and Julian Martin Eisenschlos. It\u2019s a BERT-based model specifically designed (and pre-trained) for answering questions about tabular data. Compared to BERT, TAPAS uses relative position embeddings and has 7 token types that encode tabular structure. TAPAS is pre-trained on the masked language modeling (MLM) objective on a large dataset comprising millions of tables from English Wikipedia and corresponding texts.`),$=l(),oe=o("p"),H=a("For question answering, TAPAS has 2 heads on top: a cell selection head and an aggregation head, for (optionally) performing aggregations (such as counting or summing) among selected cells. TAPAS has been fine-tuned on several datasets:"),he=l(),V=o("ul"),L=o("li"),te=o("a"),be=a("SQA"),C=a(" (Sequential Question Answering by Microsoft)"),we=l(),Q=o("li"),ae=o("a"),ye=a("WTQ"),B=a(" (Wiki Table Questions by Stanford University)"),ke=l(),re=o("li"),P=o("a"),je=a("WikiSQL"),G=a(" (by Salesforce)."),ue=l(),u=o("p"),E=a("It achieves state-of-the-art on both SQA and WTQ, while having comparable performance to SOTA on WikiSQL, with a much simpler architecture."),J=l(),xe=o("p"),Ge=a("The abstract from the paper is the following:"),S=l(),Fe=o("p"),$e=o("em"),Re=a("Answering natural language questions over tables is usually seen as a semantic parsing task. To alleviate the collection cost of full logical forms, one popular approach focuses on weak supervision consisting of denotations instead of logical forms. However, training semantic parsers from weak supervision poses difficulties, and in addition, the generated logical forms are only used as an intermediate step prior to retrieving the denotation. In this paper, we present TAPAS, an approach to question answering over tables without generating logical forms. TAPAS trains from weak supervision, and predicts the denotation by selecting table cells and optionally applying a corresponding aggregation operator to such selection. TAPAS extends BERT\u2019s architecture to encode tables as input, initializes from an effective joint pre-training of text segments and tables crawled from Wikipedia, and is trained end-to-end. We experiment with three different semantic parsing datasets, and find that TAPAS outperforms or rivals semantic parsing models by improving state-of-the-art accuracy on SQA from 55.1 to 67.2 and performing on par with the state-of-the-art on WIKISQL and WIKITQ, but with a simpler model architecture. We additionally find that transfer learning, which is trivial in our setting, from WIKISQL to WIKITQ, yields 48.7 accuracy, 4.2 points above the state-of-the-art."),D=l(),M=o("p"),Ve=a("In addition, the authors have further pre-trained TAPAS to recognize "),Ce=o("strong"),R=a("table entailment"),Ke=a(", by creating a balanced dataset of millions of automatically created training examples which are learned in an intermediate step prior to fine-tuning. The authors of TAPAS call this further pre-training intermediate pre-training (since TAPAS is first pre-trained on MLM, and then on another dataset). They found that intermediate pre-training further improves performance on SQA, achieving a new state-of-the-art as well as state-of-the-art on "),qe=o("a"),Ee=a("TabFact"),Ye=a(", a large-scale dataset with 16k Wikipedia tables for table entailment (a binary classification task). For more details, see their follow-up paper: "),fa=o("a"),ih=a("Understanding tables with intermediate pre-training"),lh=a(" by Julian Martin Eisenschlos, Syrine Krichene and Thomas M\xFCller."),ap=l(),Ft=o("img"),np=l(),_o=o("small"),dh=a("TAPAS architecture. Taken from the [official blog post](https://ai.googleblog.com/2020/04/using-neural-networks-to-find-answers.html)."),op=l(),_s=o("p"),ph=a("This model was contributed by "),ga=o("a"),ch=a("nielsr"),hh=a(". The Tensorflow version of this model was contributed by "),_a=o("a"),uh=a("kamalkraj"),mh=a(". The original code can be found "),Ta=o("a"),fh=a("here"),gh=a("."),rp=l(),To=o("p"),_h=a("Tips:"),ip=l(),Ts=o("ul"),Me=o("li"),Th=a("TAPAS is a model that uses relative position embeddings by default (restarting the position embeddings at every cell of the table). Note that this is something that was added after the publication of the original TAPAS paper. According to the authors, this usually results in a slightly better performance, and allows you to encode longer sequences without running out of embeddings. This is reflected in the "),Hr=o("code"),vh=a("reset_position_index_per_cell"),bh=a(" parameter of "),vo=o("a"),wh=a("TapasConfig"),yh=a(", which is set to "),Br=o("code"),kh=a("True"),jh=a(" by default. The default versions of the models available on the "),va=o("a"),qh=a("hub"),Eh=a(" all use relative position embeddings. You can still use the ones with absolute position embeddings by passing in an additional argument "),Gr=o("code"),xh=a('revision="no_reset"'),Fh=a(" when calling the "),Rr=o("code"),Ah=a("from_pretrained()"),zh=a(" method. Note that it\u2019s usually advised to pad the inputs on the right rather than the left."),$h=l(),Je=o("li"),Ch=a("TAPAS is based on BERT, so "),Vr=o("code"),Mh=a("TAPAS-base"),Ph=a(" for example corresponds to a "),Kr=o("code"),Sh=a("BERT-base"),Dh=a(" architecture. Of course, "),Yr=o("code"),Nh=a("TAPAS-large"),Lh=a(" will result in the best performance (the results reported in the paper are from "),Jr=o("code"),Qh=a("TAPAS-large"),Oh=a("). Results of the various sized models are shown on the "),ba=o("a"),Ih=a("original Github repository"),Wh=a("."),Uh=l(),Vs=o("li"),Hh=a("TAPAS has checkpoints fine-tuned on SQA, which are capable of answering questions related to a table in a conversational set-up. This means that you can ask follow-up questions such as \u201Cwhat is his age?\u201D related to the previous question. Note that the forward pass of TAPAS is a bit different in case of a conversational set-up: in that case, you have to feed every table-question pair one by one to the model, such that the "),Xr=o("code"),Bh=a("prev_labels"),Gh=a(" token type ids can be overwritten by the predicted "),Zr=o("code"),Rh=a("labels"),Vh=a(" of the model to the previous question. See \u201CUsage\u201D section for more info."),Kh=l(),ei=o("li"),Yh=a("TAPAS is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard. Note that TAPAS can be used as an encoder in the EncoderDecoderModel framework, to combine it with an autoregressive text decoder such as GPT-2."),lp=l(),Ks=o("h2"),At=o("a"),si=o("span"),v(wa.$$.fragment),Jh=l(),ti=o("span"),Xh=a("Usage: fine-tuning"),dp=l(),zt=o("p"),Zh=a("Here we explain how you can fine-tune "),bo=o("a"),eu=a("TapasForQuestionAnswering"),su=a(" on your own dataset."),pp=l(),wo=o("p"),ai=o("strong"),tu=a("STEP 1: Choose one of the 3 ways in which you can use TAPAS - or experiment"),cp=l(),$t=o("p"),au=a("Basically, there are 3 different ways in which one can fine-tune "),yo=o("a"),nu=a("TapasForQuestionAnswering"),ou=a(", corresponding to the different datasets on which Tapas was fine-tuned:"),hp=l(),zs=o("ol"),ni=o("li"),ru=a("SQA: if you\u2019re interested in asking follow-up questions related to a table, in a conversational set-up. For example if you first ask \u201Cwhat\u2019s the name of the first actor?\u201D then you can ask a follow-up question such as \u201Chow old is he?\u201C. Here, questions do not involve any aggregation (all questions are cell selection questions)."),iu=l(),ya=o("li"),lu=a("WTQ: if you\u2019re not interested in asking questions in a conversational set-up, but rather just asking questions related to a table, which might involve aggregation, such as counting a number of rows, summing up cell values or averaging cell values. You can then for example ask \u201Cwhat\u2019s the total number of goals Cristiano Ronaldo made in his career?\u201C. This case is also called "),oi=o("strong"),du=a("weak supervision"),pu=a(", since the model itself must learn the appropriate aggregation operator (SUM/COUNT/AVERAGE/NONE) given only the answer to the question as supervision."),cu=l(),ka=o("li"),hu=a("WikiSQL-supervised: this dataset is based on WikiSQL with the model being given the ground truth aggregation operator during training. This is also called "),ri=o("strong"),uu=a("strong supervision"),mu=a(". Here, learning the appropriate aggregation operator is much easier."),up=l(),ko=o("p"),fu=a("To summarize:"),mp=l(),Ct=o("table"),ii=o("thead"),Ys=o("tr"),li=o("th"),di=o("strong"),gu=a("Task"),_u=l(),pi=o("th"),ci=o("strong"),Tu=a("Example dataset"),vu=l(),hi=o("th"),ui=o("strong"),bu=a("Description"),wu=l(),Js=o("tbody"),Xs=o("tr"),mi=o("td"),yu=a("Conversational"),ku=l(),fi=o("td"),ju=a("SQA"),qu=l(),gi=o("td"),Eu=a("Conversational, only cell selection questions"),xu=l(),Zs=o("tr"),_i=o("td"),Fu=a("Weak supervision for aggregation"),Au=l(),Ti=o("td"),zu=a("WTQ"),$u=l(),vi=o("td"),Cu=a("Questions might involve aggregation, and the model must learn this given only the answer as supervision"),Mu=l(),et=o("tr"),bi=o("td"),Pu=a("Strong supervision for aggregation"),Su=l(),wi=o("td"),Du=a("WikiSQL-supervised"),Nu=l(),yi=o("td"),Lu=a("Questions might involve aggregation, and the model must learn this given the gold aggregation operator"),fp=l(),$s=o("p"),Qu=a(`Initializing a model with a pre-trained base and randomly initialized classification heads from the hub can be done as shown below. Be sure to have installed the `),ja=o("a"),Ou=a("torch-scatter"),Iu=a(" dependency for your environment in case you\u2019re using PyTorch, or the "),qa=o("a"),Wu=a("tensorflow_probability"),Uu=a(` dependency in case you\u2019re using Tensorflow:`),gp=l(),v(Ea.$$.fragment),_p=l(),Cs=o("p"),Hu=a("Of course, you don\u2019t necessarily have to follow one of these three ways in which TAPAS was fine-tuned. You can also experiment by defining any hyperparameters you want when initializing "),jo=o("a"),Bu=a("TapasConfig"),Gu=a(", and then create a "),qo=o("a"),Ru=a("TapasForQuestionAnswering"),Vu=a(" based on that configuration. For example, if you have a dataset that has both conversational questions and questions that might involve aggregation, then you can do it this way. Here\u2019s an example:"),Tp=l(),v(xa.$$.fragment),vp=l(),Mt=o("p"),Ku=a("What you can also do is start from an already fine-tuned checkpoint. A note here is that the already fine-tuned checkpoint on WTQ has some issues due to the L2-loss which is somewhat brittle. See "),Fa=o("a"),Yu=a("here"),Ju=a(" for more info."),bp=l(),Pt=o("p"),Xu=a("For a list of all pre-trained and fine-tuned TAPAS checkpoints available on HuggingFace\u2019s hub, see "),Aa=o("a"),Zu=a("here"),em=a("."),wp=l(),Eo=o("p"),ki=o("strong"),sm=a("STEP 2: Prepare your data in the SQA format"),yp=l(),St=o("p"),tm=a("Second, no matter what you picked above, you should prepare your dataset in the "),za=o("a"),am=a("SQA"),nm=a(" format. This format is a TSV/CSV file with the following columns:"),kp=l(),ie=o("ul"),xo=o("li"),ji=o("code"),om=a("id"),rm=a(": optional, id of the table-question pair, for bookkeeping purposes."),im=l(),Fo=o("li"),qi=o("code"),lm=a("annotator"),dm=a(": optional, id of the person who annotated the table-question pair, for bookkeeping purposes."),pm=l(),Ao=o("li"),Ei=o("code"),cm=a("position"),hm=a(": integer indicating if the question is the first, second, third,\u2026 related to the table. Only required in case of conversational setup (SQA). You don\u2019t need this column in case you\u2019re going for WTQ/WikiSQL-supervised."),um=l(),zo=o("li"),xi=o("code"),mm=a("question"),fm=a(": string"),gm=l(),$o=o("li"),Fi=o("code"),_m=a("table_file"),Tm=a(": string, name of a csv file containing the tabular data"),vm=l(),Co=o("li"),Ai=o("code"),bm=a("answer_coordinates"),wm=a(": list of one or more tuples (each tuple being a cell coordinate, i.e. row, column pair that is part of the answer)"),ym=l(),Mo=o("li"),zi=o("code"),km=a("answer_text"),jm=a(": list of one or more strings (each string being a cell value that is part of the answer)"),qm=l(),Po=o("li"),$i=o("code"),Em=a("aggregation_label"),xm=a(": index of the aggregation operator. Only required in case of strong supervision for aggregation (the WikiSQL-supervised case)"),Fm=l(),So=o("li"),Ci=o("code"),Am=a("float_answer"),zm=a(": the float answer to the question, if there is one (np.nan if there isn\u2019t). Only required in case of weak supervision for aggregation (such as WTQ and WikiSQL)"),jp=l(),We=o("p"),$m=a("The tables themselves should be present in a folder, each table being a separate csv file. Note that the authors of the TAPAS algorithm used conversion scripts with some automated logic to convert the other datasets (WTQ, WikiSQL) into the SQA format. The author explains this "),$a=o("a"),Cm=a("here"),Mm=a(". A conversion of this script that works with HuggingFace\u2019s implementation can be found "),Ca=o("a"),Pm=a("here"),Sm=a(". Interestingly, these conversion scripts are not perfect (the "),Mi=o("code"),Dm=a("answer_coordinates"),Nm=a(" and "),Pi=o("code"),Lm=a("float_answer"),Qm=a(" fields are populated based on the "),Si=o("code"),Om=a("answer_text"),Im=a("), meaning that WTQ and WikiSQL results could actually be improved."),qp=l(),Do=o("p"),Di=o("strong"),Wm=a("STEP 3: Convert your data into PyTorch/TensorFlow tensors using TapasTokenizer"),Ep=l(),Ae=o("p"),Um=a("Third, given that you\u2019ve prepared your data in this TSV/CSV format (and corresponding CSV files containing the tabular data), you can then use "),No=o("a"),Hm=a("TapasTokenizer"),Bm=a(" to convert table-question pairs into "),Ni=o("code"),Gm=a("input_ids"),Rm=a(", "),Li=o("code"),Vm=a("attention_mask"),Km=a(", "),Qi=o("code"),Ym=a("token_type_ids"),Jm=a(" and so on. Again, based on which of the three cases you picked above, "),Lo=o("a"),Xm=a("TapasForQuestionAnswering"),Zm=a("/"),Qo=o("a"),ef=a("TFTapasForQuestionAnswering"),sf=a(` requires different inputs to be fine-tuned:`),xp=l(),Dt=o("table"),Oi=o("thead"),Ma=o("tr"),Ii=o("th"),Wi=o("strong"),tf=a("Task"),af=l(),Ui=o("th"),Hi=o("strong"),nf=a("Required inputs"),of=l(),st=o("tbody"),Pa=o("tr"),Bi=o("td"),rf=a("Conversational"),lf=l(),As=o("td"),Gi=o("code"),df=a("input_ids"),pf=a(", "),Ri=o("code"),cf=a("attention_mask"),hf=a(", "),Vi=o("code"),uf=a("token_type_ids"),mf=a(", "),Ki=o("code"),ff=a("labels"),gf=l(),Sa=o("tr"),Yi=o("td"),_f=a("Weak supervision for aggregation"),Tf=l(),Pe=o("td"),Ji=o("code"),vf=a("input_ids"),bf=a(", "),Xi=o("code"),wf=a("attention_mask"),yf=a(", "),Zi=o("code"),kf=a("token_type_ids"),jf=a(", "),el=o("code"),qf=a("labels"),Ef=a(", "),sl=o("code"),xf=a("numeric_values"),Ff=a(", "),tl=o("code"),Af=a("numeric_values_scale"),zf=a(", "),al=o("code"),$f=a("float_answer"),Cf=l(),Da=o("tr"),nl=o("td"),Mf=a("Strong supervision for aggregation"),Pf=l(),ms=o("td"),ol=o("code"),Sf=a("input ids"),Df=a(", "),rl=o("code"),Nf=a("attention mask"),Lf=a(", "),il=o("code"),Qf=a("token type ids"),Of=a(", "),ll=o("code"),If=a("labels"),Wf=a(", "),dl=o("code"),Uf=a("aggregation_labels"),Fp=l(),le=o("p"),Oo=o("a"),Hf=a("TapasTokenizer"),Bf=a(" creates the "),pl=o("code"),Gf=a("labels"),Rf=a(", "),cl=o("code"),Vf=a("numeric_values"),Kf=a(" and "),hl=o("code"),Yf=a("numeric_values_scale"),Jf=a(" based on the "),ul=o("code"),Xf=a("answer_coordinates"),Zf=a(" and "),ml=o("code"),eg=a("answer_text"),sg=a(" columns of the TSV file. The "),fl=o("code"),tg=a("float_answer"),ag=a(" and "),gl=o("code"),ng=a("aggregation_labels"),og=a(" are already in the TSV file of step 2. Here\u2019s an example:"),Ap=l(),v(Na.$$.fragment),zp=l(),vs=o("p"),rg=a("Note that "),Io=o("a"),ig=a("TapasTokenizer"),lg=a(" expects the data of the table to be "),_l=o("strong"),dg=a("text-only"),pg=a(". You can use "),Tl=o("code"),cg=a(".astype(str)"),hg=a(` on a dataframe to turn it into text-only data. Of course, this only shows how to encode a single training example. It is advised to create a dataloader to iterate over batches:`),$p=l(),v(La.$$.fragment),Cp=l(),X=o("p"),ug=a("Note that here, we encode each table-question pair independently. This is fine as long as your dataset is "),vl=o("strong"),mg=a("not conversational"),fg=a(". In case your dataset involves conversational questions (such as in SQA), then you should first group together the "),bl=o("code"),gg=a("queries"),_g=a(", "),wl=o("code"),Tg=a("answer_coordinates"),vg=a(" and "),yl=o("code"),bg=a("answer_text"),wg=a(" per table (in the order of their "),kl=o("code"),yg=a("position"),kg=a(` index) and batch encode each table with its questions. This will make sure that the `),jl=o("code"),jg=a("prev_labels"),qg=a(" token types (see docs of "),Wo=o("a"),Eg=a("TapasTokenizer"),xg=a(") are set correctly. See "),Qa=o("a"),Fg=a("this notebook"),Ag=a(" for more info. See "),Oa=o("a"),zg=a("this notebook"),$g=a(" for more info regarding using the TensorFlow model."),Mp=l(),Uo=o("p"),ql=o("strong"),Cg=a("STEP 4: Train (fine-tune) TapasForQuestionAnswering/TFTapasForQuestionAnswering"),Pp=l(),Ms=o("p"),Mg=a("You can then fine-tune "),Ho=o("a"),Pg=a("TapasForQuestionAnswering"),Sg=a(" or "),Bo=o("a"),Dg=a("TFTapasForQuestionAnswering"),Ng=a(" as follows (shown here for the weak supervision for aggregation case):"),Sp=l(),v(Ia.$$.fragment),Dp=l(),tt=o("h2"),Nt=o("a"),El=o("span"),v(Wa.$$.fragment),Lg=l(),xl=o("span"),Qg=a("Usage: inference"),Np=l(),me=o("p"),Og=a("Here we explain how you can use "),Go=o("a"),Ig=a("TapasForQuestionAnswering"),Wg=a(" or "),Ro=o("a"),Ug=a("TFTapasForQuestionAnswering"),Hg=a(" for inference (i.e. making predictions on new data). For inference, only "),Fl=o("code"),Bg=a("input_ids"),Gg=a(", "),Al=o("code"),Rg=a("attention_mask"),Vg=a(" and "),zl=o("code"),Kg=a("token_type_ids"),Yg=a(" (which you can obtain using "),Vo=o("a"),Jg=a("TapasTokenizer"),Xg=a(") have to be provided to the model to obtain the logits. Next, you can use the handy "),$l=o("code"),Zg=a("convert_logits_to_predictions"),e_=a(" method to convert these into predicted coordinates and optional aggregation indices."),Lp=l(),Lt=o("p"),s_=a("However, note that inference is "),Cl=o("strong"),t_=a("different"),a_=a(" depending on whether or not the setup is conversational. In a non-conversational set-up, inference can be done in parallel on all table-question pairs of a batch. Here\u2019s an example of that:"),Qp=l(),v(Ua.$$.fragment),Op=l(),Ue=o("p"),n_=a("In case of a conversational set-up, then each table-question pair must be provided "),Ml=o("strong"),o_=a("sequentially"),r_=a(" to the model, such that the "),Pl=o("code"),i_=a("prev_labels"),l_=a(" token types can be overwritten by the predicted "),Sl=o("code"),d_=a("labels"),p_=a(" of the previous table-question pair. Again, more info can be found in "),Ha=o("a"),c_=a("this notebook"),h_=a(" (for PyTorch) and "),Ba=o("a"),u_=a("this notebook"),m_=a(" (for TensorFlow)."),Ip=l(),at=o("h2"),Qt=o("a"),Dl=o("span"),v(Ga.$$.fragment),f_=l(),Nl=o("span"),g_=a("TAPAS specific outputs"),Wp=l(),nt=o("div"),v(Ra.$$.fragment),__=l(),Va=o("p"),T_=a("Output type of "),Ko=o("a"),v_=a("TapasForQuestionAnswering"),b_=a("."),Up=l(),ot=o("h2"),Ot=o("a"),Ll=o("span"),v(Ka.$$.fragment),w_=l(),Ql=o("span"),y_=a("TapasConfig"),Hp=l(),Xe=o("div"),v(Ya.$$.fragment),k_=l(),fs=o("p"),j_=a("This is the configuration class to store the configuration of a "),Yo=o("a"),q_=a("TapasModel"),E_=a(`. It is used to instantiate a TAPAS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TAPAS `),Ol=o("em"),x_=a("tapas-base-finetuned-sqa"),F_=a(` architecture. Configuration objects inherit from `),Il=o("code"),A_=a("PreTrainedConfig"),z_=a(` and can be used to control the model outputs. Read the documentation from `),Jo=o("a"),$_=a("PretrainedConfig"),C_=a(" for more information."),M_=l(),Ja=o("p"),P_=a(`Hyperparameters additional to BERT are taken from run_task_main.py and hparam_utils.py of the original implementation. Original implementation available at `),Xa=o("a"),S_=a("https://github.com/google-research/tapas/tree/master"),D_=a("."),N_=l(),Wl=o("p"),L_=a("Example:"),Q_=l(),v(Za.$$.fragment),Bp=l(),rt=o("h2"),It=o("a"),Ul=o("span"),v(en.$$.fragment),O_=l(),Hl=o("span"),I_=a("TapasTokenizer"),Gp=l(),de=o("div"),v(sn.$$.fragment),W_=l(),Bl=o("p"),U_=a(`Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by TAPAS models.`),H_=l(),ne=o("p"),B_=a("This tokenizer inherits from "),Xo=o("a"),G_=a("PreTrainedTokenizer"),R_=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. `),Zo=o("a"),V_=a("TapasTokenizer"),K_=a(` creates several token type ids to encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `),Gl=o("code"),Y_=a("segment_ids"),J_=a(", "),Rl=o("code"),X_=a("column_ids"),Z_=a(", "),Vl=o("code"),eT=a("row_ids"),sT=a(`, `),Kl=o("code"),tT=a("prev_labels"),aT=a(", "),Yl=o("code"),nT=a("column_ranks"),oT=a(", "),Jl=o("code"),rT=a("inv_column_ranks"),iT=a(" and "),Xl=o("code"),lT=a("numeric_relations"),dT=a(":"),pT=l(),Se=o("ul"),Zl=o("li"),cT=a(`segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and padding.`),hT=l(),ed=o("li"),uT=a(`column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding.`),mT=l(),sd=o("li"),fT=a(`row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding. Tokens of column headers are also 0.`),gT=l(),td=o("li"),_T=a(`prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in a conversational setup (such as SQA).`),TT=l(),ad=o("li"),vT=a(`column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a column \u201Cnumber of movies\u201D with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2 respectively. 0 for all question tokens, special tokens and padding.`),bT=l(),nd=o("li"),wT=a(`inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if you have a column \u201Cnumber of movies\u201D with values 87, 53 and 69, then the inverse column ranks of these tokens are 1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding.`),yT=l(),od=o("li"),kT=a(`numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all question tokens, special tokens and padding.`),jT=l(),er=o("p"),sr=o("a"),qT=a("TapasTokenizer"),ET=a(` runs end-to-end tokenization on a table and associated sentences: punctuation splitting and wordpiece.`),xT=l(),Wt=o("div"),v(tn.$$.fragment),FT=l(),rd=o("p"),AT=a("Main method to tokenize and prepare for the model one or several sequence(s) related to a table."),zT=l(),Ps=o("div"),v(an.$$.fragment),$T=l(),nn=o("p"),CT=a("Converts logits of "),tr=o("a"),MT=a("TapasForQuestionAnswering"),PT=a(` to actual predicted answer coordinates and optional aggregation indices.`),ST=l(),on=o("p"),DT=a("The original implementation, on which this function is based, can be found "),rn=o("a"),NT=a("here"),LT=a("."),QT=l(),id=o("div"),Rp=l(),it=o("h2"),Ut=o("a"),ld=o("span"),v(ln.$$.fragment),OT=l(),dd=o("span"),IT=a("TapasModel"),Vp=l(),De=o("div"),v(dn.$$.fragment),WT=l(),pn=o("p"),UT=a(`The bare Tapas Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),ar=o("a"),HT=a("PreTrainedModel"),BT=a(`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),GT=l(),cn=o("p"),RT=a("This model is also a PyTorch "),hn=o("a"),VT=a("torch.nn.Module"),KT=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),YT=l(),un=o("p"),JT=a("This class is a small change compared to "),nr=o("a"),XT=a("BertModel"),ZT=a(`, taking into account the additional token type ids.`),ev=l(),mn=o("p"),sv=a(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),fn=o("a"),tv=a(`Attention is all you need`),av=a(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),nv=l(),ts=o("div"),v(gn.$$.fragment),ov=l(),lt=o("p"),rv=a("The "),or=o("a"),iv=a("TapasModel"),lv=a(" forward method, overrides the "),pd=o("code"),dv=a("__call__"),pv=a(" special method."),cv=l(),v(Ht.$$.fragment),hv=l(),cd=o("p"),uv=a("Examples:"),mv=l(),v(_n.$$.fragment),Kp=l(),dt=o("h2"),Bt=o("a"),hd=o("span"),v(Tn.$$.fragment),fv=l(),ud=o("span"),gv=a("TapasForMaskedLM"),Yp=l(),gs=o("div"),v(vn.$$.fragment),_v=l(),pt=o("p"),Tv=a("Tapas Model with a "),md=o("code"),vv=a("language modeling"),bv=a(` head on top. This model inherits from `),rr=o("a"),wv=a("PreTrainedModel"),yv=a(`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kv=l(),bn=o("p"),jv=a("This model is also a PyTorch "),wn=o("a"),qv=a("torch.nn.Module"),Ev=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xv=l(),as=o("div"),v(yn.$$.fragment),Fv=l(),ct=o("p"),Av=a("The "),ir=o("a"),zv=a("TapasForMaskedLM"),$v=a(" forward method, overrides the "),fd=o("code"),Cv=a("__call__"),Mv=a(" special method."),Pv=l(),v(Gt.$$.fragment),Sv=l(),gd=o("p"),Dv=a("Examples:"),Nv=l(),v(kn.$$.fragment),Jp=l(),ht=o("h2"),Rt=o("a"),_d=o("span"),v(jn.$$.fragment),Lv=l(),Td=o("span"),Qv=a("TapasForSequenceClassification"),Xp=l(),Ze=o("div"),v(qn.$$.fragment),Ov=l(),vd=o("p"),Iv=a(`Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020).`),Wv=l(),En=o("p"),Uv=a("This model inherits from "),lr=o("a"),Hv=a("PreTrainedModel"),Bv=a(`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gv=l(),xn=o("p"),Rv=a("This model is also a PyTorch "),Fn=o("a"),Vv=a("torch.nn.Module"),Kv=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yv=l(),ns=o("div"),v(An.$$.fragment),Jv=l(),ut=o("p"),Xv=a("The "),dr=o("a"),Zv=a("TapasForSequenceClassification"),eb=a(" forward method, overrides the "),bd=o("code"),sb=a("__call__"),tb=a(" special method."),ab=l(),v(Vt.$$.fragment),nb=l(),wd=o("p"),ob=a("Examples:"),rb=l(),v(zn.$$.fragment),Zp=l(),mt=o("h2"),Kt=o("a"),yd=o("span"),v($n.$$.fragment),ib=l(),kd=o("span"),lb=a("TapasForQuestionAnswering"),ec=l(),es=o("div"),v(Cn.$$.fragment),db=l(),ft=o("p"),pb=a(`Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `),jd=o("code"),cb=a("logits"),hb=a(" and optional "),qd=o("code"),ub=a("logits_aggregation"),mb=a(`), e.g. for SQA, WTQ or WikiSQL-supervised tasks.`),fb=l(),Mn=o("p"),gb=a("This model inherits from "),pr=o("a"),_b=a("PreTrainedModel"),Tb=a(`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vb=l(),Pn=o("p"),bb=a("This model is also a PyTorch "),Sn=o("a"),wb=a("torch.nn.Module"),yb=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kb=l(),os=o("div"),v(Dn.$$.fragment),jb=l(),gt=o("p"),qb=a("The "),cr=o("a"),Eb=a("TapasForQuestionAnswering"),xb=a(" forward method, overrides the "),Ed=o("code"),Fb=a("__call__"),Ab=a(" special method."),zb=l(),v(Yt.$$.fragment),$b=l(),xd=o("p"),Cb=a("Examples:"),Mb=l(),v(Nn.$$.fragment),sc=l(),_t=o("h2"),Jt=o("a"),Fd=o("span"),v(Ln.$$.fragment),Pb=l(),Ad=o("span"),Sb=a("TFTapasModel"),tc=l(),Ne=o("div"),v(Qn.$$.fragment),Db=l(),zd=o("p"),Nb=a("The bare Tapas Model transformer outputting raw hidden-states without any specific head on top."),Lb=l(),On=o("p"),Qb=a("This model inherits from "),hr=o("a"),Ob=a("TFPreTrainedModel"),Ib=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wb=l(),In=o("p"),Ub=a("This model is also a "),Wn=o("a"),Hb=a("tf.keras.Model"),Bb=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Gb=l(),v(Xt.$$.fragment),Rb=l(),rs=o("div"),v(Un.$$.fragment),Vb=l(),Tt=o("p"),Kb=a("The "),ur=o("a"),Yb=a("TFTapasModel"),Jb=a(" forward method, overrides the "),$d=o("code"),Xb=a("__call__"),Zb=a(" special method."),ew=l(),v(Zt.$$.fragment),sw=l(),Cd=o("p"),tw=a("Examples:"),aw=l(),v(Hn.$$.fragment),ac=l(),vt=o("h2"),ea=o("a"),Md=o("span"),v(Bn.$$.fragment),nw=l(),Pd=o("span"),ow=a("TFTapasForMaskedLM"),nc=l(),Le=o("div"),v(Gn.$$.fragment),rw=l(),Rn=o("p"),iw=a("Tapas Model with a "),Sd=o("code"),lw=a("language modeling"),dw=a(" head on top."),pw=l(),Vn=o("p"),cw=a("This model inherits from "),mr=o("a"),hw=a("TFPreTrainedModel"),uw=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mw=l(),Kn=o("p"),fw=a("This model is also a "),Yn=o("a"),gw=a("tf.keras.Model"),_w=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Tw=l(),v(sa.$$.fragment),vw=l(),is=o("div"),v(Jn.$$.fragment),bw=l(),bt=o("p"),ww=a("The "),fr=o("a"),yw=a("TFTapasForMaskedLM"),kw=a(" forward method, overrides the "),Dd=o("code"),jw=a("__call__"),qw=a(" special method."),Ew=l(),v(ta.$$.fragment),xw=l(),Nd=o("p"),Fw=a("Examples:"),Aw=l(),v(Xn.$$.fragment),oc=l(),wt=o("h2"),aa=o("a"),Ld=o("span"),v(Zn.$$.fragment),zw=l(),Qd=o("span"),$w=a("TFTapasForSequenceClassification"),rc=l(),Qe=o("div"),v(eo.$$.fragment),Cw=l(),Od=o("p"),Mw=a(`Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020).`),Pw=l(),so=o("p"),Sw=a("This model inherits from "),gr=o("a"),Dw=a("TFPreTrainedModel"),Nw=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lw=l(),to=o("p"),Qw=a("This model is also a "),ao=o("a"),Ow=a("tf.keras.Model"),Iw=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ww=l(),v(na.$$.fragment),Uw=l(),ls=o("div"),v(no.$$.fragment),Hw=l(),yt=o("p"),Bw=a("The "),_r=o("a"),Gw=a("TFTapasForSequenceClassification"),Rw=a(" forward method, overrides the "),Id=o("code"),Vw=a("__call__"),Kw=a(" special method."),Yw=l(),v(oa.$$.fragment),Jw=l(),Wd=o("p"),Xw=a("Examples:"),Zw=l(),v(oo.$$.fragment),ic=l(),kt=o("h2"),ra=o("a"),Ud=o("span"),v(ro.$$.fragment),ey=l(),Hd=o("span"),sy=a("TFTapasForQuestionAnswering"),lc=l(),Oe=o("div"),v(io.$$.fragment),ty=l(),jt=o("p"),ay=a(`Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `),Bd=o("code"),ny=a("logits"),oy=a(" and optional "),Gd=o("code"),ry=a("logits_aggregation"),iy=a(`), e.g. for SQA, WTQ or WikiSQL-supervised tasks.`),ly=l(),lo=o("p"),dy=a("This model inherits from "),Tr=o("a"),py=a("TFPreTrainedModel"),cy=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hy=l(),po=o("p"),uy=a("This model is also a "),co=o("a"),my=a("tf.keras.Model"),fy=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),gy=l(),v(ia.$$.fragment),_y=l(),ds=o("div"),v(ho.$$.fragment),Ty=l(),qt=o("p"),vy=a("The "),vr=o("a"),by=a("TFTapasForQuestionAnswering"),wy=a(" forward method, overrides the "),Rd=o("code"),yy=a("__call__"),ky=a(" special method."),jy=l(),v(la.$$.fragment),qy=l(),Vd=o("p"),Ey=a("Examples:"),xy=l(),v(uo.$$.fragment),this.h()},l(t){const h=L0('[data-svelte="svelte-1phssyn"]',document.head);m=r(h,"META",{name:!0,content:!0}),h.forEach(s),x=d(t),f=r(t,"H1",{class:!0});var mo=i(f);g=r(mo,"A",{id:!0,class:!0,href:!0});var Kd=i(g);q=r(Kd,"SPAN",{});var Yd=i(q);b(T.$$.fragment,Yd),Yd.forEach(s),Kd.forEach(s),_=d(mo),F=r(mo,"SPAN",{});var Jd=i(F);_e=n(Jd,"TAPAS"),Jd.forEach(s),mo.forEach(s),K=d(t),A=r(t,"H2",{class:!0});var fo=i(A);Y=r(fo,"A",{id:!0,class:!0,href:!0});var Xd=i(Y);O=r(Xd,"SPAN",{});var Zd=i(O);b(Z.$$.fragment,Zd),Zd.forEach(s),Xd.forEach(s),Te=d(fo),I=r(fo,"SPAN",{});var ep=i(I);ve=n(ep,"Overview"),ep.forEach(s),fo.forEach(s),ce=d(t),W=r(t,"P",{});var go=i(W);N=n(go,"The TAPAS model was proposed in "),ee=r(go,"A",{href:!0,rel:!0});var sp=i(ee);se=n(sp,"TAPAS: Weakly Supervised Table Parsing via Pre-training"),sp.forEach(s),z=n(go,` by Jonathan Herzig, Pawe\u0142 Krzysztof Nowak, Thomas M\xFCller, Francesco Piccinno and Julian Martin Eisenschlos. It\u2019s a BERT-based model specifically designed (and pre-trained) for answering questions about tabular data. Compared to BERT, TAPAS uses relative position embeddings and has 7 token types that encode tabular structure. TAPAS is pre-trained on the masked language modeling (MLM) objective on a large dataset comprising millions of tables from English Wikipedia and corresponding texts.`),go.forEach(s),$=d(t),oe=r(t,"P",{});var tp=i(oe);H=n(tp,"For question answering, TAPAS has 2 heads on top: a cell selection head and an aggregation head, for (optionally) performing aggregations (such as counting or summing) among selected cells. TAPAS has been fine-tuned on several datasets:"),tp.forEach(s),he=d(t),V=r(t,"UL",{});var Et=i(V);L=r(Et,"LI",{});var Fy=i(L);te=r(Fy,"A",{href:!0,rel:!0});var Wy=i(te);be=n(Wy,"SQA"),Wy.forEach(s),C=n(Fy," (Sequential Question Answering by Microsoft)"),Fy.forEach(s),we=d(Et),Q=r(Et,"LI",{});var Ay=i(Q);ae=r(Ay,"A",{href:!0,rel:!0});var Uy=i(ae);ye=n(Uy,"WTQ"),Uy.forEach(s),B=n(Ay," (Wiki Table Questions by Stanford University)"),Ay.forEach(s),ke=d(Et),re=r(Et,"LI",{});var zy=i(re);P=r(zy,"A",{href:!0,rel:!0});var Hy=i(P);je=n(Hy,"WikiSQL"),Hy.forEach(s),G=n(zy," (by Salesforce)."),zy.forEach(s),Et.forEach(s),ue=d(t),u=r(t,"P",{});var By=i(u);E=n(By,"It achieves state-of-the-art on both SQA and WTQ, while having comparable performance to SOTA on WikiSQL, with a much simpler architecture."),By.forEach(s),J=d(t),xe=r(t,"P",{});var Gy=i(xe);Ge=n(Gy,"The abstract from the paper is the following:"),Gy.forEach(s),S=d(t),Fe=r(t,"P",{});var Ry=i(Fe);$e=r(Ry,"EM",{});var Vy=i($e);Re=n(Vy,"Answering natural language questions over tables is usually seen as a semantic parsing task. To alleviate the collection cost of full logical forms, one popular approach focuses on weak supervision consisting of denotations instead of logical forms. However, training semantic parsers from weak supervision poses difficulties, and in addition, the generated logical forms are only used as an intermediate step prior to retrieving the denotation. In this paper, we present TAPAS, an approach to question answering over tables without generating logical forms. TAPAS trains from weak supervision, and predicts the denotation by selecting table cells and optionally applying a corresponding aggregation operator to such selection. TAPAS extends BERT\u2019s architecture to encode tables as input, initializes from an effective joint pre-training of text segments and tables crawled from Wikipedia, and is trained end-to-end. We experiment with three different semantic parsing datasets, and find that TAPAS outperforms or rivals semantic parsing models by improving state-of-the-art accuracy on SQA from 55.1 to 67.2 and performing on par with the state-of-the-art on WIKISQL and WIKITQ, but with a simpler model architecture. We additionally find that transfer learning, which is trivial in our setting, from WIKISQL to WIKITQ, yields 48.7 accuracy, 4.2 points above the state-of-the-art."),Vy.forEach(s),Ry.forEach(s),D=d(t),M=r(t,"P",{});var da=i(M);Ve=n(da,"In addition, the authors have further pre-trained TAPAS to recognize "),Ce=r(da,"STRONG",{});var Ky=i(Ce);R=n(Ky,"table entailment"),Ky.forEach(s),Ke=n(da,", by creating a balanced dataset of millions of automatically created training examples which are learned in an intermediate step prior to fine-tuning. The authors of TAPAS call this further pre-training intermediate pre-training (since TAPAS is first pre-trained on MLM, and then on another dataset). They found that intermediate pre-training further improves performance on SQA, achieving a new state-of-the-art as well as state-of-the-art on "),qe=r(da,"A",{href:!0,rel:!0});var Yy=i(qe);Ee=n(Yy,"TabFact"),Yy.forEach(s),Ye=n(da,", a large-scale dataset with 16k Wikipedia tables for table entailment (a binary classification task). For more details, see their follow-up paper: "),fa=r(da,"A",{href:!0,rel:!0});var Jy=i(fa);ih=n(Jy,"Understanding tables with intermediate pre-training"),Jy.forEach(s),lh=n(da," by Julian Martin Eisenschlos, Syrine Krichene and Thomas M\xFCller."),da.forEach(s),ap=d(t),Ft=r(t,"IMG",{src:!0,alt:!0,width:!0}),np=d(t),_o=r(t,"SMALL",{});var Xy=i(_o);dh=n(Xy,"TAPAS architecture. Taken from the [official blog post](https://ai.googleblog.com/2020/04/using-neural-networks-to-find-answers.html)."),Xy.forEach(s),op=d(t),_s=r(t,"P",{});var pa=i(_s);ph=n(pa,"This model was contributed by "),ga=r(pa,"A",{href:!0,rel:!0});var Zy=i(ga);ch=n(Zy,"nielsr"),Zy.forEach(s),hh=n(pa,". The Tensorflow version of this model was contributed by "),_a=r(pa,"A",{href:!0,rel:!0});var ek=i(_a);uh=n(ek,"kamalkraj"),ek.forEach(s),mh=n(pa,". The original code can be found "),Ta=r(pa,"A",{href:!0,rel:!0});var sk=i(Ta);fh=n(sk,"here"),sk.forEach(s),gh=n(pa,"."),pa.forEach(s),rp=d(t),To=r(t,"P",{});var tk=i(To);_h=n(tk,"Tips:"),tk.forEach(s),ip=d(t),Ts=r(t,"UL",{});var ca=i(Ts);Me=r(ca,"LI",{});var ps=i(Me);Th=n(ps,"TAPAS is a model that uses relative position embeddings by default (restarting the position embeddings at every cell of the table). Note that this is something that was added after the publication of the original TAPAS paper. According to the authors, this usually results in a slightly better performance, and allows you to encode longer sequences without running out of embeddings. This is reflected in the "),Hr=r(ps,"CODE",{});var ak=i(Hr);vh=n(ak,"reset_position_index_per_cell"),ak.forEach(s),bh=n(ps," parameter of "),vo=r(ps,"A",{href:!0});var nk=i(vo);wh=n(nk,"TapasConfig"),nk.forEach(s),yh=n(ps,", which is set to "),Br=r(ps,"CODE",{});var ok=i(Br);kh=n(ok,"True"),ok.forEach(s),jh=n(ps," by default. The default versions of the models available on the "),va=r(ps,"A",{href:!0,rel:!0});var rk=i(va);qh=n(rk,"hub"),rk.forEach(s),Eh=n(ps," all use relative position embeddings. You can still use the ones with absolute position embeddings by passing in an additional argument "),Gr=r(ps,"CODE",{});var ik=i(Gr);xh=n(ik,'revision="no_reset"'),ik.forEach(s),Fh=n(ps," when calling the "),Rr=r(ps,"CODE",{});var lk=i(Rr);Ah=n(lk,"from_pretrained()"),lk.forEach(s),zh=n(ps," method. Note that it\u2019s usually advised to pad the inputs on the right rather than the left."),ps.forEach(s),$h=d(ca),Je=r(ca,"LI",{});var bs=i(Je);Ch=n(bs,"TAPAS is based on BERT, so "),Vr=r(bs,"CODE",{});var dk=i(Vr);Mh=n(dk,"TAPAS-base"),dk.forEach(s),Ph=n(bs," for example corresponds to a "),Kr=r(bs,"CODE",{});var pk=i(Kr);Sh=n(pk,"BERT-base"),pk.forEach(s),Dh=n(bs," architecture. Of course, "),Yr=r(bs,"CODE",{});var ck=i(Yr);Nh=n(ck,"TAPAS-large"),ck.forEach(s),Lh=n(bs," will result in the best performance (the results reported in the paper are from "),Jr=r(bs,"CODE",{});var hk=i(Jr);Qh=n(hk,"TAPAS-large"),hk.forEach(s),Oh=n(bs,"). Results of the various sized models are shown on the "),ba=r(bs,"A",{href:!0,rel:!0});var uk=i(ba);Ih=n(uk,"original Github repository"),uk.forEach(s),Wh=n(bs,"."),bs.forEach(s),Uh=d(ca),Vs=r(ca,"LI",{});var br=i(Vs);Hh=n(br,"TAPAS has checkpoints fine-tuned on SQA, which are capable of answering questions related to a table in a conversational set-up. This means that you can ask follow-up questions such as \u201Cwhat is his age?\u201D related to the previous question. Note that the forward pass of TAPAS is a bit different in case of a conversational set-up: in that case, you have to feed every table-question pair one by one to the model, such that the "),Xr=r(br,"CODE",{});var mk=i(Xr);Bh=n(mk,"prev_labels"),mk.forEach(s),Gh=n(br," token type ids can be overwritten by the predicted "),Zr=r(br,"CODE",{});var fk=i(Zr);Rh=n(fk,"labels"),fk.forEach(s),Vh=n(br," of the model to the previous question. See \u201CUsage\u201D section for more info."),br.forEach(s),Kh=d(ca),ei=r(ca,"LI",{});var gk=i(ei);Yh=n(gk,"TAPAS is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard. Note that TAPAS can be used as an encoder in the EncoderDecoderModel framework, to combine it with an autoregressive text decoder such as GPT-2."),gk.forEach(s),ca.forEach(s),lp=d(t),Ks=r(t,"H2",{class:!0});var pc=i(Ks);At=r(pc,"A",{id:!0,class:!0,href:!0});var _k=i(At);si=r(_k,"SPAN",{});var Tk=i(si);b(wa.$$.fragment,Tk),Tk.forEach(s),_k.forEach(s),Jh=d(pc),ti=r(pc,"SPAN",{});var vk=i(ti);Xh=n(vk,"Usage: fine-tuning"),vk.forEach(s),pc.forEach(s),dp=d(t),zt=r(t,"P",{});var cc=i(zt);Zh=n(cc,"Here we explain how you can fine-tune "),bo=r(cc,"A",{href:!0});var bk=i(bo);eu=n(bk,"TapasForQuestionAnswering"),bk.forEach(s),su=n(cc," on your own dataset."),cc.forEach(s),pp=d(t),wo=r(t,"P",{});var wk=i(wo);ai=r(wk,"STRONG",{});var yk=i(ai);tu=n(yk,"STEP 1: Choose one of the 3 ways in which you can use TAPAS - or experiment"),yk.forEach(s),wk.forEach(s),cp=d(t),$t=r(t,"P",{});var hc=i($t);au=n(hc,"Basically, there are 3 different ways in which one can fine-tune "),yo=r(hc,"A",{href:!0});var kk=i(yo);nu=n(kk,"TapasForQuestionAnswering"),kk.forEach(s),ou=n(hc,", corresponding to the different datasets on which Tapas was fine-tuned:"),hc.forEach(s),hp=d(t),zs=r(t,"OL",{});var wr=i(zs);ni=r(wr,"LI",{});var jk=i(ni);ru=n(jk,"SQA: if you\u2019re interested in asking follow-up questions related to a table, in a conversational set-up. For example if you first ask \u201Cwhat\u2019s the name of the first actor?\u201D then you can ask a follow-up question such as \u201Chow old is he?\u201C. Here, questions do not involve any aggregation (all questions are cell selection questions)."),jk.forEach(s),iu=d(wr),ya=r(wr,"LI",{});var uc=i(ya);lu=n(uc,"WTQ: if you\u2019re not interested in asking questions in a conversational set-up, but rather just asking questions related to a table, which might involve aggregation, such as counting a number of rows, summing up cell values or averaging cell values. You can then for example ask \u201Cwhat\u2019s the total number of goals Cristiano Ronaldo made in his career?\u201C. This case is also called "),oi=r(uc,"STRONG",{});var qk=i(oi);du=n(qk,"weak supervision"),qk.forEach(s),pu=n(uc,", since the model itself must learn the appropriate aggregation operator (SUM/COUNT/AVERAGE/NONE) given only the answer to the question as supervision."),uc.forEach(s),cu=d(wr),ka=r(wr,"LI",{});var mc=i(ka);hu=n(mc,"WikiSQL-supervised: this dataset is based on WikiSQL with the model being given the ground truth aggregation operator during training. This is also called "),ri=r(mc,"STRONG",{});var Ek=i(ri);uu=n(Ek,"strong supervision"),Ek.forEach(s),mu=n(mc,". Here, learning the appropriate aggregation operator is much easier."),mc.forEach(s),wr.forEach(s),up=d(t),ko=r(t,"P",{});var xk=i(ko);fu=n(xk,"To summarize:"),xk.forEach(s),mp=d(t),Ct=r(t,"TABLE",{});var fc=i(Ct);ii=r(fc,"THEAD",{});var Fk=i(ii);Ys=r(Fk,"TR",{});var yr=i(Ys);li=r(yr,"TH",{});var Ak=i(li);di=r(Ak,"STRONG",{});var zk=i(di);gu=n(zk,"Task"),zk.forEach(s),Ak.forEach(s),_u=d(yr),pi=r(yr,"TH",{});var $k=i(pi);ci=r($k,"STRONG",{});var Ck=i(ci);Tu=n(Ck,"Example dataset"),Ck.forEach(s),$k.forEach(s),vu=d(yr),hi=r(yr,"TH",{});var Mk=i(hi);ui=r(Mk,"STRONG",{});var Pk=i(ui);bu=n(Pk,"Description"),Pk.forEach(s),Mk.forEach(s),yr.forEach(s),Fk.forEach(s),wu=d(fc),Js=r(fc,"TBODY",{});var kr=i(Js);Xs=r(kr,"TR",{});var jr=i(Xs);mi=r(jr,"TD",{});var Sk=i(mi);yu=n(Sk,"Conversational"),Sk.forEach(s),ku=d(jr),fi=r(jr,"TD",{});var Dk=i(fi);ju=n(Dk,"SQA"),Dk.forEach(s),qu=d(jr),gi=r(jr,"TD",{});var Nk=i(gi);Eu=n(Nk,"Conversational, only cell selection questions"),Nk.forEach(s),jr.forEach(s),xu=d(kr),Zs=r(kr,"TR",{});var qr=i(Zs);_i=r(qr,"TD",{});var Lk=i(_i);Fu=n(Lk,"Weak supervision for aggregation"),Lk.forEach(s),Au=d(qr),Ti=r(qr,"TD",{});var Qk=i(Ti);zu=n(Qk,"WTQ"),Qk.forEach(s),$u=d(qr),vi=r(qr,"TD",{});var Ok=i(vi);Cu=n(Ok,"Questions might involve aggregation, and the model must learn this given only the answer as supervision"),Ok.forEach(s),qr.forEach(s),Mu=d(kr),et=r(kr,"TR",{});var Er=i(et);bi=r(Er,"TD",{});var Ik=i(bi);Pu=n(Ik,"Strong supervision for aggregation"),Ik.forEach(s),Su=d(Er),wi=r(Er,"TD",{});var Wk=i(wi);Du=n(Wk,"WikiSQL-supervised"),Wk.forEach(s),Nu=d(Er),yi=r(Er,"TD",{});var Uk=i(yi);Lu=n(Uk,"Questions might involve aggregation, and the model must learn this given the gold aggregation operator"),Uk.forEach(s),Er.forEach(s),kr.forEach(s),fc.forEach(s),fp=d(t),$s=r(t,"P",{});var xr=i($s);Qu=n(xr,`Initializing a model with a pre-trained base and randomly initialized classification heads from the hub can be done as shown below. Be sure to have installed the `),ja=r(xr,"A",{href:!0,rel:!0});var Hk=i(ja);Ou=n(Hk,"torch-scatter"),Hk.forEach(s),Iu=n(xr," dependency for your environment in case you\u2019re using PyTorch, or the "),qa=r(xr,"A",{href:!0,rel:!0});var Bk=i(qa);Wu=n(Bk,"tensorflow_probability"),Bk.forEach(s),Uu=n(xr,` dependency in case you\u2019re using Tensorflow:`),xr.forEach(s),gp=d(t),b(Ea.$$.fragment,t),_p=d(t),Cs=r(t,"P",{});var Fr=i(Cs);Hu=n(Fr,"Of course, you don\u2019t necessarily have to follow one of these three ways in which TAPAS was fine-tuned. You can also experiment by defining any hyperparameters you want when initializing "),jo=r(Fr,"A",{href:!0});var Gk=i(jo);Bu=n(Gk,"TapasConfig"),Gk.forEach(s),Gu=n(Fr,", and then create a "),qo=r(Fr,"A",{href:!0});var Rk=i(qo);Ru=n(Rk,"TapasForQuestionAnswering"),Rk.forEach(s),Vu=n(Fr," based on that configuration. For example, if you have a dataset that has both conversational questions and questions that might involve aggregation, then you can do it this way. Here\u2019s an example:"),Fr.forEach(s),Tp=d(t),b(xa.$$.fragment,t),vp=d(t),Mt=r(t,"P",{});var gc=i(Mt);Ku=n(gc,"What you can also do is start from an already fine-tuned checkpoint. A note here is that the already fine-tuned checkpoint on WTQ has some issues due to the L2-loss which is somewhat brittle. See "),Fa=r(gc,"A",{href:!0,rel:!0});var Vk=i(Fa);Yu=n(Vk,"here"),Vk.forEach(s),Ju=n(gc," for more info."),gc.forEach(s),bp=d(t),Pt=r(t,"P",{});var _c=i(Pt);Xu=n(_c,"For a list of all pre-trained and fine-tuned TAPAS checkpoints available on HuggingFace\u2019s hub, see "),Aa=r(_c,"A",{href:!0,rel:!0});var Kk=i(Aa);Zu=n(Kk,"here"),Kk.forEach(s),em=n(_c,"."),_c.forEach(s),wp=d(t),Eo=r(t,"P",{});var Yk=i(Eo);ki=r(Yk,"STRONG",{});var Jk=i(ki);sm=n(Jk,"STEP 2: Prepare your data in the SQA format"),Jk.forEach(s),Yk.forEach(s),yp=d(t),St=r(t,"P",{});var Tc=i(St);tm=n(Tc,"Second, no matter what you picked above, you should prepare your dataset in the "),za=r(Tc,"A",{href:!0,rel:!0});var Xk=i(za);am=n(Xk,"SQA"),Xk.forEach(s),nm=n(Tc," format. This format is a TSV/CSV file with the following columns:"),Tc.forEach(s),kp=d(t),ie=r(t,"UL",{});var ze=i(ie);xo=r(ze,"LI",{});var $y=i(xo);ji=r($y,"CODE",{});var Zk=i(ji);om=n(Zk,"id"),Zk.forEach(s),rm=n($y,": optional, id of the table-question pair, for bookkeeping purposes."),$y.forEach(s),im=d(ze),Fo=r(ze,"LI",{});var Cy=i(Fo);qi=r(Cy,"CODE",{});var e1=i(qi);lm=n(e1,"annotator"),e1.forEach(s),dm=n(Cy,": optional, id of the person who annotated the table-question pair, for bookkeeping purposes."),Cy.forEach(s),pm=d(ze),Ao=r(ze,"LI",{});var My=i(Ao);Ei=r(My,"CODE",{});var s1=i(Ei);cm=n(s1,"position"),s1.forEach(s),hm=n(My,": integer indicating if the question is the first, second, third,\u2026 related to the table. Only required in case of conversational setup (SQA). You don\u2019t need this column in case you\u2019re going for WTQ/WikiSQL-supervised."),My.forEach(s),um=d(ze),zo=r(ze,"LI",{});var Py=i(zo);xi=r(Py,"CODE",{});var t1=i(xi);mm=n(t1,"question"),t1.forEach(s),fm=n(Py,": string"),Py.forEach(s),gm=d(ze),$o=r(ze,"LI",{});var Sy=i($o);Fi=r(Sy,"CODE",{});var a1=i(Fi);_m=n(a1,"table_file"),a1.forEach(s),Tm=n(Sy,": string, name of a csv file containing the tabular data"),Sy.forEach(s),vm=d(ze),Co=r(ze,"LI",{});var Dy=i(Co);Ai=r(Dy,"CODE",{});var n1=i(Ai);bm=n(n1,"answer_coordinates"),n1.forEach(s),wm=n(Dy,": list of one or more tuples (each tuple being a cell coordinate, i.e. row, column pair that is part of the answer)"),Dy.forEach(s),ym=d(ze),Mo=r(ze,"LI",{});var Ny=i(Mo);zi=r(Ny,"CODE",{});var o1=i(zi);km=n(o1,"answer_text"),o1.forEach(s),jm=n(Ny,": list of one or more strings (each string being a cell value that is part of the answer)"),Ny.forEach(s),qm=d(ze),Po=r(ze,"LI",{});var Ly=i(Po);$i=r(Ly,"CODE",{});var r1=i($i);Em=n(r1,"aggregation_label"),r1.forEach(s),xm=n(Ly,": index of the aggregation operator. Only required in case of strong supervision for aggregation (the WikiSQL-supervised case)"),Ly.forEach(s),Fm=d(ze),So=r(ze,"LI",{});var Qy=i(So);Ci=r(Qy,"CODE",{});var i1=i(Ci);Am=n(i1,"float_answer"),i1.forEach(s),zm=n(Qy,": the float answer to the question, if there is one (np.nan if there isn\u2019t). Only required in case of weak supervision for aggregation (such as WTQ and WikiSQL)"),Qy.forEach(s),ze.forEach(s),jp=d(t),We=r(t,"P",{});var ws=i(We);$m=n(ws,"The tables themselves should be present in a folder, each table being a separate csv file. Note that the authors of the TAPAS algorithm used conversion scripts with some automated logic to convert the other datasets (WTQ, WikiSQL) into the SQA format. The author explains this "),$a=r(ws,"A",{href:!0,rel:!0});var l1=i($a);Cm=n(l1,"here"),l1.forEach(s),Mm=n(ws,". A conversion of this script that works with HuggingFace\u2019s implementation can be found "),Ca=r(ws,"A",{href:!0,rel:!0});var d1=i(Ca);Pm=n(d1,"here"),d1.forEach(s),Sm=n(ws,". Interestingly, these conversion scripts are not perfect (the "),Mi=r(ws,"CODE",{});var p1=i(Mi);Dm=n(p1,"answer_coordinates"),p1.forEach(s),Nm=n(ws," and "),Pi=r(ws,"CODE",{});var c1=i(Pi);Lm=n(c1,"float_answer"),c1.forEach(s),Qm=n(ws," fields are populated based on the "),Si=r(ws,"CODE",{});var h1=i(Si);Om=n(h1,"answer_text"),h1.forEach(s),Im=n(ws,"), meaning that WTQ and WikiSQL results could actually be improved."),ws.forEach(s),qp=d(t),Do=r(t,"P",{});var u1=i(Do);Di=r(u1,"STRONG",{});var m1=i(Di);Wm=n(m1,"STEP 3: Convert your data into PyTorch/TensorFlow tensors using TapasTokenizer"),m1.forEach(s),u1.forEach(s),Ep=d(t),Ae=r(t,"P",{});var cs=i(Ae);Um=n(cs,"Third, given that you\u2019ve prepared your data in this TSV/CSV format (and corresponding CSV files containing the tabular data), you can then use "),No=r(cs,"A",{href:!0});var f1=i(No);Hm=n(f1,"TapasTokenizer"),f1.forEach(s),Bm=n(cs," to convert table-question pairs into "),Ni=r(cs,"CODE",{});var g1=i(Ni);Gm=n(g1,"input_ids"),g1.forEach(s),Rm=n(cs,", "),Li=r(cs,"CODE",{});var _1=i(Li);Vm=n(_1,"attention_mask"),_1.forEach(s),Km=n(cs,", "),Qi=r(cs,"CODE",{});var T1=i(Qi);Ym=n(T1,"token_type_ids"),T1.forEach(s),Jm=n(cs," and so on. Again, based on which of the three cases you picked above, "),Lo=r(cs,"A",{href:!0});var v1=i(Lo);Xm=n(v1,"TapasForQuestionAnswering"),v1.forEach(s),Zm=n(cs,"/"),Qo=r(cs,"A",{href:!0});var b1=i(Qo);ef=n(b1,"TFTapasForQuestionAnswering"),b1.forEach(s),sf=n(cs,` requires different inputs to be fine-tuned:`),cs.forEach(s),xp=d(t),Dt=r(t,"TABLE",{});var vc=i(Dt);Oi=r(vc,"THEAD",{});var w1=i(Oi);Ma=r(w1,"TR",{});var bc=i(Ma);Ii=r(bc,"TH",{});var y1=i(Ii);Wi=r(y1,"STRONG",{});var k1=i(Wi);tf=n(k1,"Task"),k1.forEach(s),y1.forEach(s),af=d(bc),Ui=r(bc,"TH",{});var j1=i(Ui);Hi=r(j1,"STRONG",{});var q1=i(Hi);nf=n(q1,"Required inputs"),q1.forEach(s),j1.forEach(s),bc.forEach(s),w1.forEach(s),of=d(vc),st=r(vc,"TBODY",{});var Ar=i(st);Pa=r(Ar,"TR",{});var wc=i(Pa);Bi=r(wc,"TD",{});var E1=i(Bi);rf=n(E1,"Conversational"),E1.forEach(s),lf=d(wc),As=r(wc,"TD",{});var ha=i(As);Gi=r(ha,"CODE",{});var x1=i(Gi);df=n(x1,"input_ids"),x1.forEach(s),pf=n(ha,", "),Ri=r(ha,"CODE",{});var F1=i(Ri);cf=n(F1,"attention_mask"),F1.forEach(s),hf=n(ha,", "),Vi=r(ha,"CODE",{});var A1=i(Vi);uf=n(A1,"token_type_ids"),A1.forEach(s),mf=n(ha,", "),Ki=r(ha,"CODE",{});var z1=i(Ki);ff=n(z1,"labels"),z1.forEach(s),ha.forEach(s),wc.forEach(s),gf=d(Ar),Sa=r(Ar,"TR",{});var yc=i(Sa);Yi=r(yc,"TD",{});var $1=i(Yi);_f=n($1,"Weak supervision for aggregation"),$1.forEach(s),Tf=d(yc),Pe=r(yc,"TD",{});var hs=i(Pe);Ji=r(hs,"CODE",{});var C1=i(Ji);vf=n(C1,"input_ids"),C1.forEach(s),bf=n(hs,", "),Xi=r(hs,"CODE",{});var M1=i(Xi);wf=n(M1,"attention_mask"),M1.forEach(s),yf=n(hs,", "),Zi=r(hs,"CODE",{});var P1=i(Zi);kf=n(P1,"token_type_ids"),P1.forEach(s),jf=n(hs,", "),el=r(hs,"CODE",{});var S1=i(el);qf=n(S1,"labels"),S1.forEach(s),Ef=n(hs,", "),sl=r(hs,"CODE",{});var D1=i(sl);xf=n(D1,"numeric_values"),D1.forEach(s),Ff=n(hs,", "),tl=r(hs,"CODE",{});var N1=i(tl);Af=n(N1,"numeric_values_scale"),N1.forEach(s),zf=n(hs,", "),al=r(hs,"CODE",{});var L1=i(al);$f=n(L1,"float_answer"),L1.forEach(s),hs.forEach(s),yc.forEach(s),Cf=d(Ar),Da=r(Ar,"TR",{});var kc=i(Da);nl=r(kc,"TD",{});var Q1=i(nl);Mf=n(Q1,"Strong supervision for aggregation"),Q1.forEach(s),Pf=d(kc),ms=r(kc,"TD",{});var Ss=i(ms);ol=r(Ss,"CODE",{});var O1=i(ol);Sf=n(O1,"input ids"),O1.forEach(s),Df=n(Ss,", "),rl=r(Ss,"CODE",{});var I1=i(rl);Nf=n(I1,"attention mask"),I1.forEach(s),Lf=n(Ss,", "),il=r(Ss,"CODE",{});var W1=i(il);Qf=n(W1,"token type ids"),W1.forEach(s),Of=n(Ss,", "),ll=r(Ss,"CODE",{});var U1=i(ll);If=n(U1,"labels"),U1.forEach(s),Wf=n(Ss,", "),dl=r(Ss,"CODE",{});var H1=i(dl);Uf=n(H1,"aggregation_labels"),H1.forEach(s),Ss.forEach(s),kc.forEach(s),Ar.forEach(s),vc.forEach(s),Fp=d(t),le=r(t,"P",{});var Ie=i(le);Oo=r(Ie,"A",{href:!0});var B1=i(Oo);Hf=n(B1,"TapasTokenizer"),B1.forEach(s),Bf=n(Ie," creates the "),pl=r(Ie,"CODE",{});var G1=i(pl);Gf=n(G1,"labels"),G1.forEach(s),Rf=n(Ie,", "),cl=r(Ie,"CODE",{});var R1=i(cl);Vf=n(R1,"numeric_values"),R1.forEach(s),Kf=n(Ie," and "),hl=r(Ie,"CODE",{});var V1=i(hl);Yf=n(V1,"numeric_values_scale"),V1.forEach(s),Jf=n(Ie," based on the "),ul=r(Ie,"CODE",{});var K1=i(ul);Xf=n(K1,"answer_coordinates"),K1.forEach(s),Zf=n(Ie," and "),ml=r(Ie,"CODE",{});var Y1=i(ml);eg=n(Y1,"answer_text"),Y1.forEach(s),sg=n(Ie," columns of the TSV file. The "),fl=r(Ie,"CODE",{});var J1=i(fl);tg=n(J1,"float_answer"),J1.forEach(s),ag=n(Ie," and "),gl=r(Ie,"CODE",{});var X1=i(gl);ng=n(X1,"aggregation_labels"),X1.forEach(s),og=n(Ie," are already in the TSV file of step 2. Here\u2019s an example:"),Ie.forEach(s),Ap=d(t),b(Na.$$.fragment,t),zp=d(t),vs=r(t,"P",{});var ua=i(vs);rg=n(ua,"Note that "),Io=r(ua,"A",{href:!0});var Z1=i(Io);ig=n(Z1,"TapasTokenizer"),Z1.forEach(s),lg=n(ua," expects the data of the table to be "),_l=r(ua,"STRONG",{});var e2=i(_l);dg=n(e2,"text-only"),e2.forEach(s),pg=n(ua,". You can use "),Tl=r(ua,"CODE",{});var s2=i(Tl);cg=n(s2,".astype(str)"),s2.forEach(s),hg=n(ua,` on a dataframe to turn it into text-only data. Of course, this only shows how to encode a single training example. It is advised to create a dataloader to iterate over batches:`),ua.forEach(s),$p=d(t),b(La.$$.fragment,t),Cp=d(t),X=r(t,"P",{});var fe=i(X);ug=n(fe,"Note that here, we encode each table-question pair independently. This is fine as long as your dataset is "),vl=r(fe,"STRONG",{});var t2=i(vl);mg=n(t2,"not conversational"),t2.forEach(s),fg=n(fe,". In case your dataset involves conversational questions (such as in SQA), then you should first group together the "),bl=r(fe,"CODE",{});var a2=i(bl);gg=n(a2,"queries"),a2.forEach(s),_g=n(fe,", "),wl=r(fe,"CODE",{});var n2=i(wl);Tg=n(n2,"answer_coordinates"),n2.forEach(s),vg=n(fe," and "),yl=r(fe,"CODE",{});var o2=i(yl);bg=n(o2,"answer_text"),o2.forEach(s),wg=n(fe," per table (in the order of their "),kl=r(fe,"CODE",{});var r2=i(kl);yg=n(r2,"position"),r2.forEach(s),kg=n(fe,` index) and batch encode each table with its questions. This will make sure that the `),jl=r(fe,"CODE",{});var i2=i(jl);jg=n(i2,"prev_labels"),i2.forEach(s),qg=n(fe," token types (see docs of "),Wo=r(fe,"A",{href:!0});var l2=i(Wo);Eg=n(l2,"TapasTokenizer"),l2.forEach(s),xg=n(fe,") are set correctly. See "),Qa=r(fe,"A",{href:!0,rel:!0});var d2=i(Qa);Fg=n(d2,"this notebook"),d2.forEach(s),Ag=n(fe," for more info. See "),Oa=r(fe,"A",{href:!0,rel:!0});var p2=i(Oa);zg=n(p2,"this notebook"),p2.forEach(s),$g=n(fe," for more info regarding using the TensorFlow model."),fe.forEach(s),Mp=d(t),Uo=r(t,"P",{});var c2=i(Uo);ql=r(c2,"STRONG",{});var h2=i(ql);Cg=n(h2,"STEP 4: Train (fine-tune) TapasForQuestionAnswering/TFTapasForQuestionAnswering"),h2.forEach(s),c2.forEach(s),Pp=d(t),Ms=r(t,"P",{});var zr=i(Ms);Mg=n(zr,"You can then fine-tune "),Ho=r(zr,"A",{href:!0});var u2=i(Ho);Pg=n(u2,"TapasForQuestionAnswering"),u2.forEach(s),Sg=n(zr," or "),Bo=r(zr,"A",{href:!0});var m2=i(Bo);Dg=n(m2,"TFTapasForQuestionAnswering"),m2.forEach(s),Ng=n(zr," as follows (shown here for the weak supervision for aggregation case):"),zr.forEach(s),Sp=d(t),b(Ia.$$.fragment,t),Dp=d(t),tt=r(t,"H2",{class:!0});var jc=i(tt);Nt=r(jc,"A",{id:!0,class:!0,href:!0});var f2=i(Nt);El=r(f2,"SPAN",{});var g2=i(El);b(Wa.$$.fragment,g2),g2.forEach(s),f2.forEach(s),Lg=d(jc),xl=r(jc,"SPAN",{});var _2=i(xl);Qg=n(_2,"Usage: inference"),_2.forEach(s),jc.forEach(s),Np=d(t),me=r(t,"P",{});var He=i(me);Og=n(He,"Here we explain how you can use "),Go=r(He,"A",{href:!0});var T2=i(Go);Ig=n(T2,"TapasForQuestionAnswering"),T2.forEach(s),Wg=n(He," or "),Ro=r(He,"A",{href:!0});var v2=i(Ro);Ug=n(v2,"TFTapasForQuestionAnswering"),v2.forEach(s),Hg=n(He," for inference (i.e. making predictions on new data). For inference, only "),Fl=r(He,"CODE",{});var b2=i(Fl);Bg=n(b2,"input_ids"),b2.forEach(s),Gg=n(He,", "),Al=r(He,"CODE",{});var w2=i(Al);Rg=n(w2,"attention_mask"),w2.forEach(s),Vg=n(He," and "),zl=r(He,"CODE",{});var y2=i(zl);Kg=n(y2,"token_type_ids"),y2.forEach(s),Yg=n(He," (which you can obtain using "),Vo=r(He,"A",{href:!0});var k2=i(Vo);Jg=n(k2,"TapasTokenizer"),k2.forEach(s),Xg=n(He,") have to be provided to the model to obtain the logits. Next, you can use the handy "),$l=r(He,"CODE",{});var j2=i($l);Zg=n(j2,"convert_logits_to_predictions"),j2.forEach(s),e_=n(He," method to convert these into predicted coordinates and optional aggregation indices."),He.forEach(s),Lp=d(t),Lt=r(t,"P",{});var qc=i(Lt);s_=n(qc,"However, note that inference is "),Cl=r(qc,"STRONG",{});var q2=i(Cl);t_=n(q2,"different"),q2.forEach(s),a_=n(qc," depending on whether or not the setup is conversational. In a non-conversational set-up, inference can be done in parallel on all table-question pairs of a batch. Here\u2019s an example of that:"),qc.forEach(s),Qp=d(t),b(Ua.$$.fragment,t),Op=d(t),Ue=r(t,"P",{});var ys=i(Ue);n_=n(ys,"In case of a conversational set-up, then each table-question pair must be provided "),Ml=r(ys,"STRONG",{});var E2=i(Ml);o_=n(E2,"sequentially"),E2.forEach(s),r_=n(ys," to the model, such that the "),Pl=r(ys,"CODE",{});var x2=i(Pl);i_=n(x2,"prev_labels"),x2.forEach(s),l_=n(ys," token types can be overwritten by the predicted "),Sl=r(ys,"CODE",{});var F2=i(Sl);d_=n(F2,"labels"),F2.forEach(s),p_=n(ys," of the previous table-question pair. Again, more info can be found in "),Ha=r(ys,"A",{href:!0,rel:!0});var A2=i(Ha);c_=n(A2,"this notebook"),A2.forEach(s),h_=n(ys," (for PyTorch) and "),Ba=r(ys,"A",{href:!0,rel:!0});var z2=i(Ba);u_=n(z2,"this notebook"),z2.forEach(s),m_=n(ys," (for TensorFlow)."),ys.forEach(s),Ip=d(t),at=r(t,"H2",{class:!0});var Ec=i(at);Qt=r(Ec,"A",{id:!0,class:!0,href:!0});var $2=i(Qt);Dl=r($2,"SPAN",{});var C2=i(Dl);b(Ga.$$.fragment,C2),C2.forEach(s),$2.forEach(s),f_=d(Ec),Nl=r(Ec,"SPAN",{});var M2=i(Nl);g_=n(M2,"TAPAS specific outputs"),M2.forEach(s),Ec.forEach(s),Wp=d(t),nt=r(t,"DIV",{class:!0});var xc=i(nt);b(Ra.$$.fragment,xc),__=d(xc),Va=r(xc,"P",{});var Fc=i(Va);T_=n(Fc,"Output type of "),Ko=r(Fc,"A",{href:!0});var P2=i(Ko);v_=n(P2,"TapasForQuestionAnswering"),P2.forEach(s),b_=n(Fc,"."),Fc.forEach(s),xc.forEach(s),Up=d(t),ot=r(t,"H2",{class:!0});var Ac=i(ot);Ot=r(Ac,"A",{id:!0,class:!0,href:!0});var S2=i(Ot);Ll=r(S2,"SPAN",{});var D2=i(Ll);b(Ka.$$.fragment,D2),D2.forEach(s),S2.forEach(s),w_=d(Ac),Ql=r(Ac,"SPAN",{});var N2=i(Ql);y_=n(N2,"TapasConfig"),N2.forEach(s),Ac.forEach(s),Hp=d(t),Xe=r(t,"DIV",{class:!0});var Ds=i(Xe);b(Ya.$$.fragment,Ds),k_=d(Ds),fs=r(Ds,"P",{});var Ns=i(fs);j_=n(Ns,"This is the configuration class to store the configuration of a "),Yo=r(Ns,"A",{href:!0});var L2=i(Yo);q_=n(L2,"TapasModel"),L2.forEach(s),E_=n(Ns,`. It is used to instantiate a TAPAS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TAPAS `),Ol=r(Ns,"EM",{});var Q2=i(Ol);x_=n(Q2,"tapas-base-finetuned-sqa"),Q2.forEach(s),F_=n(Ns,` architecture. Configuration objects inherit from `),Il=r(Ns,"CODE",{});var O2=i(Il);A_=n(O2,"PreTrainedConfig"),O2.forEach(s),z_=n(Ns,` and can be used to control the model outputs. Read the documentation from `),Jo=r(Ns,"A",{href:!0});var I2=i(Jo);$_=n(I2,"PretrainedConfig"),I2.forEach(s),C_=n(Ns," for more information."),Ns.forEach(s),M_=d(Ds),Ja=r(Ds,"P",{});var zc=i(Ja);P_=n(zc,`Hyperparameters additional to BERT are taken from run_task_main.py and hparam_utils.py of the original implementation. Original implementation available at `),Xa=r(zc,"A",{href:!0,rel:!0});var W2=i(Xa);S_=n(W2,"https://github.com/google-research/tapas/tree/master"),W2.forEach(s),D_=n(zc,"."),zc.forEach(s),N_=d(Ds),Wl=r(Ds,"P",{});var U2=i(Wl);L_=n(U2,"Example:"),U2.forEach(s),Q_=d(Ds),b(Za.$$.fragment,Ds),Ds.forEach(s),Bp=d(t),rt=r(t,"H2",{class:!0});var $c=i(rt);It=r($c,"A",{id:!0,class:!0,href:!0});var H2=i(It);Ul=r(H2,"SPAN",{});var B2=i(Ul);b(en.$$.fragment,B2),B2.forEach(s),H2.forEach(s),O_=d($c),Hl=r($c,"SPAN",{});var G2=i(Hl);I_=n(G2,"TapasTokenizer"),G2.forEach(s),$c.forEach(s),Gp=d(t),de=r(t,"DIV",{class:!0});var Be=i(de);b(sn.$$.fragment,Be),W_=d(Be),Bl=r(Be,"P",{});var R2=i(Bl);U_=n(R2,`Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by TAPAS models.`),R2.forEach(s),H_=d(Be),ne=r(Be,"P",{});var ge=i(ne);B_=n(ge,"This tokenizer inherits from "),Xo=r(ge,"A",{href:!0});var V2=i(Xo);G_=n(V2,"PreTrainedTokenizer"),V2.forEach(s),R_=n(ge,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. `),Zo=r(ge,"A",{href:!0});var K2=i(Zo);V_=n(K2,"TapasTokenizer"),K2.forEach(s),K_=n(ge,` creates several token type ids to encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `),Gl=r(ge,"CODE",{});var Y2=i(Gl);Y_=n(Y2,"segment_ids"),Y2.forEach(s),J_=n(ge,", "),Rl=r(ge,"CODE",{});var J2=i(Rl);X_=n(J2,"column_ids"),J2.forEach(s),Z_=n(ge,", "),Vl=r(ge,"CODE",{});var X2=i(Vl);eT=n(X2,"row_ids"),X2.forEach(s),sT=n(ge,`, `),Kl=r(ge,"CODE",{});var Z2=i(Kl);tT=n(Z2,"prev_labels"),Z2.forEach(s),aT=n(ge,", "),Yl=r(ge,"CODE",{});var ej=i(Yl);nT=n(ej,"column_ranks"),ej.forEach(s),oT=n(ge,", "),Jl=r(ge,"CODE",{});var sj=i(Jl);rT=n(sj,"inv_column_ranks"),sj.forEach(s),iT=n(ge," and "),Xl=r(ge,"CODE",{});var tj=i(Xl);lT=n(tj,"numeric_relations"),tj.forEach(s),dT=n(ge,":"),ge.forEach(s),pT=d(Be),Se=r(Be,"UL",{});var us=i(Se);Zl=r(us,"LI",{});var aj=i(Zl);cT=n(aj,`segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and padding.`),aj.forEach(s),hT=d(us),ed=r(us,"LI",{});var nj=i(ed);uT=n(nj,`column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding.`),nj.forEach(s),mT=d(us),sd=r(us,"LI",{});var oj=i(sd);fT=n(oj,`row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding. Tokens of column headers are also 0.`),oj.forEach(s),gT=d(us),td=r(us,"LI",{});var rj=i(td);_T=n(rj,`prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in a conversational setup (such as SQA).`),rj.forEach(s),TT=d(us),ad=r(us,"LI",{});var ij=i(ad);vT=n(ij,`column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a column \u201Cnumber of movies\u201D with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2 respectively. 0 for all question tokens, special tokens and padding.`),ij.forEach(s),bT=d(us),nd=r(us,"LI",{});var lj=i(nd);wT=n(lj,`inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if you have a column \u201Cnumber of movies\u201D with values 87, 53 and 69, then the inverse column ranks of these tokens are 1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding.`),lj.forEach(s),yT=d(us),od=r(us,"LI",{});var dj=i(od);kT=n(dj,`numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all question tokens, special tokens and padding.`),dj.forEach(s),us.forEach(s),jT=d(Be),er=r(Be,"P",{});var Oy=i(er);sr=r(Oy,"A",{href:!0});var pj=i(sr);qT=n(pj,"TapasTokenizer"),pj.forEach(s),ET=n(Oy,` runs end-to-end tokenization on a table and associated sentences: punctuation splitting and wordpiece.`),Oy.forEach(s),xT=d(Be),Wt=r(Be,"DIV",{class:!0});var Cc=i(Wt);b(tn.$$.fragment,Cc),FT=d(Cc),rd=r(Cc,"P",{});var cj=i(rd);AT=n(cj,"Main method to tokenize and prepare for the model one or several sequence(s) related to a table."),cj.forEach(s),Cc.forEach(s),zT=d(Be),Ps=r(Be,"DIV",{class:!0});var $r=i(Ps);b(an.$$.fragment,$r),$T=d($r),nn=r($r,"P",{});var Mc=i(nn);CT=n(Mc,"Converts logits of "),tr=r(Mc,"A",{href:!0});var hj=i(tr);MT=n(hj,"TapasForQuestionAnswering"),hj.forEach(s),PT=n(Mc,` to actual predicted answer coordinates and optional aggregation indices.`),Mc.forEach(s),ST=d($r),on=r($r,"P",{});var Pc=i(on);DT=n(Pc,"The original implementation, on which this function is based, can be found "),rn=r(Pc,"A",{href:!0,rel:!0});var uj=i(rn);NT=n(uj,"here"),uj.forEach(s),LT=n(Pc,"."),Pc.forEach(s),$r.forEach(s),QT=d(Be),id=r(Be,"DIV",{class:!0}),i(id).forEach(s),Be.forEach(s),Rp=d(t),it=r(t,"H2",{class:!0});var Sc=i(it);Ut=r(Sc,"A",{id:!0,class:!0,href:!0});var mj=i(Ut);ld=r(mj,"SPAN",{});var fj=i(ld);b(ln.$$.fragment,fj),fj.forEach(s),mj.forEach(s),OT=d(Sc),dd=r(Sc,"SPAN",{});var gj=i(dd);IT=n(gj,"TapasModel"),gj.forEach(s),Sc.forEach(s),Vp=d(t),De=r(t,"DIV",{class:!0});var ks=i(De);b(dn.$$.fragment,ks),WT=d(ks),pn=r(ks,"P",{});var Dc=i(pn);UT=n(Dc,`The bare Tapas Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),ar=r(Dc,"A",{href:!0});var _j=i(ar);HT=n(_j,"PreTrainedModel"),_j.forEach(s),BT=n(Dc,`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Dc.forEach(s),GT=d(ks),cn=r(ks,"P",{});var Nc=i(cn);RT=n(Nc,"This model is also a PyTorch "),hn=r(Nc,"A",{href:!0,rel:!0});var Tj=i(hn);VT=n(Tj,"torch.nn.Module"),Tj.forEach(s),KT=n(Nc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nc.forEach(s),YT=d(ks),un=r(ks,"P",{});var Lc=i(un);JT=n(Lc,"This class is a small change compared to "),nr=r(Lc,"A",{href:!0});var vj=i(nr);XT=n(vj,"BertModel"),vj.forEach(s),ZT=n(Lc,`, taking into account the additional token type ids.`),Lc.forEach(s),ev=d(ks),mn=r(ks,"P",{});var Qc=i(mn);sv=n(Qc,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),fn=r(Qc,"A",{href:!0,rel:!0});var bj=i(fn);tv=n(bj,`Attention is all you need`),bj.forEach(s),av=n(Qc,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Qc.forEach(s),nv=d(ks),ts=r(ks,"DIV",{class:!0});var Ls=i(ts);b(gn.$$.fragment,Ls),ov=d(Ls),lt=r(Ls,"P",{});var Cr=i(lt);rv=n(Cr,"The "),or=r(Cr,"A",{href:!0});var wj=i(or);iv=n(wj,"TapasModel"),wj.forEach(s),lv=n(Cr," forward method, overrides the "),pd=r(Cr,"CODE",{});var yj=i(pd);dv=n(yj,"__call__"),yj.forEach(s),pv=n(Cr," special method."),Cr.forEach(s),cv=d(Ls),b(Ht.$$.fragment,Ls),hv=d(Ls),cd=r(Ls,"P",{});var kj=i(cd);uv=n(kj,"Examples:"),kj.forEach(s),mv=d(Ls),b(_n.$$.fragment,Ls),Ls.forEach(s),ks.forEach(s),Kp=d(t),dt=r(t,"H2",{class:!0});var Oc=i(dt);Bt=r(Oc,"A",{id:!0,class:!0,href:!0});var jj=i(Bt);hd=r(jj,"SPAN",{});var qj=i(hd);b(Tn.$$.fragment,qj),qj.forEach(s),jj.forEach(s),fv=d(Oc),ud=r(Oc,"SPAN",{});var Ej=i(ud);gv=n(Ej,"TapasForMaskedLM"),Ej.forEach(s),Oc.forEach(s),Yp=d(t),gs=r(t,"DIV",{class:!0});var ma=i(gs);b(vn.$$.fragment,ma),_v=d(ma),pt=r(ma,"P",{});var Mr=i(pt);Tv=n(Mr,"Tapas Model with a "),md=r(Mr,"CODE",{});var xj=i(md);vv=n(xj,"language modeling"),xj.forEach(s),bv=n(Mr,` head on top. This model inherits from `),rr=r(Mr,"A",{href:!0});var Fj=i(rr);wv=n(Fj,"PreTrainedModel"),Fj.forEach(s),yv=n(Mr,`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mr.forEach(s),kv=d(ma),bn=r(ma,"P",{});var Ic=i(bn);jv=n(Ic,"This model is also a PyTorch "),wn=r(Ic,"A",{href:!0,rel:!0});var Aj=i(wn);qv=n(Aj,"torch.nn.Module"),Aj.forEach(s),Ev=n(Ic,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ic.forEach(s),xv=d(ma),as=r(ma,"DIV",{class:!0});var Qs=i(as);b(yn.$$.fragment,Qs),Fv=d(Qs),ct=r(Qs,"P",{});var Pr=i(ct);Av=n(Pr,"The "),ir=r(Pr,"A",{href:!0});var zj=i(ir);zv=n(zj,"TapasForMaskedLM"),zj.forEach(s),$v=n(Pr," forward method, overrides the "),fd=r(Pr,"CODE",{});var $j=i(fd);Cv=n($j,"__call__"),$j.forEach(s),Mv=n(Pr," special method."),Pr.forEach(s),Pv=d(Qs),b(Gt.$$.fragment,Qs),Sv=d(Qs),gd=r(Qs,"P",{});var Cj=i(gd);Dv=n(Cj,"Examples:"),Cj.forEach(s),Nv=d(Qs),b(kn.$$.fragment,Qs),Qs.forEach(s),ma.forEach(s),Jp=d(t),ht=r(t,"H2",{class:!0});var Wc=i(ht);Rt=r(Wc,"A",{id:!0,class:!0,href:!0});var Mj=i(Rt);_d=r(Mj,"SPAN",{});var Pj=i(_d);b(jn.$$.fragment,Pj),Pj.forEach(s),Mj.forEach(s),Lv=d(Wc),Td=r(Wc,"SPAN",{});var Sj=i(Td);Qv=n(Sj,"TapasForSequenceClassification"),Sj.forEach(s),Wc.forEach(s),Xp=d(t),Ze=r(t,"DIV",{class:!0});var Os=i(Ze);b(qn.$$.fragment,Os),Ov=d(Os),vd=r(Os,"P",{});var Dj=i(vd);Iv=n(Dj,`Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020).`),Dj.forEach(s),Wv=d(Os),En=r(Os,"P",{});var Uc=i(En);Uv=n(Uc,"This model inherits from "),lr=r(Uc,"A",{href:!0});var Nj=i(lr);Hv=n(Nj,"PreTrainedModel"),Nj.forEach(s),Bv=n(Uc,`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Uc.forEach(s),Gv=d(Os),xn=r(Os,"P",{});var Hc=i(xn);Rv=n(Hc,"This model is also a PyTorch "),Fn=r(Hc,"A",{href:!0,rel:!0});var Lj=i(Fn);Vv=n(Lj,"torch.nn.Module"),Lj.forEach(s),Kv=n(Hc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hc.forEach(s),Yv=d(Os),ns=r(Os,"DIV",{class:!0});var Is=i(ns);b(An.$$.fragment,Is),Jv=d(Is),ut=r(Is,"P",{});var Sr=i(ut);Xv=n(Sr,"The "),dr=r(Sr,"A",{href:!0});var Qj=i(dr);Zv=n(Qj,"TapasForSequenceClassification"),Qj.forEach(s),eb=n(Sr," forward method, overrides the "),bd=r(Sr,"CODE",{});var Oj=i(bd);sb=n(Oj,"__call__"),Oj.forEach(s),tb=n(Sr," special method."),Sr.forEach(s),ab=d(Is),b(Vt.$$.fragment,Is),nb=d(Is),wd=r(Is,"P",{});var Ij=i(wd);ob=n(Ij,"Examples:"),Ij.forEach(s),rb=d(Is),b(zn.$$.fragment,Is),Is.forEach(s),Os.forEach(s),Zp=d(t),mt=r(t,"H2",{class:!0});var Bc=i(mt);Kt=r(Bc,"A",{id:!0,class:!0,href:!0});var Wj=i(Kt);yd=r(Wj,"SPAN",{});var Uj=i(yd);b($n.$$.fragment,Uj),Uj.forEach(s),Wj.forEach(s),ib=d(Bc),kd=r(Bc,"SPAN",{});var Hj=i(kd);lb=n(Hj,"TapasForQuestionAnswering"),Hj.forEach(s),Bc.forEach(s),ec=d(t),es=r(t,"DIV",{class:!0});var Ws=i(es);b(Cn.$$.fragment,Ws),db=d(Ws),ft=r(Ws,"P",{});var Dr=i(ft);pb=n(Dr,`Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `),jd=r(Dr,"CODE",{});var Bj=i(jd);cb=n(Bj,"logits"),Bj.forEach(s),hb=n(Dr," and optional "),qd=r(Dr,"CODE",{});var Gj=i(qd);ub=n(Gj,"logits_aggregation"),Gj.forEach(s),mb=n(Dr,`), e.g. for SQA, WTQ or WikiSQL-supervised tasks.`),Dr.forEach(s),fb=d(Ws),Mn=r(Ws,"P",{});var Gc=i(Mn);gb=n(Gc,"This model inherits from "),pr=r(Gc,"A",{href:!0});var Rj=i(pr);_b=n(Rj,"PreTrainedModel"),Rj.forEach(s),Tb=n(Gc,`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gc.forEach(s),vb=d(Ws),Pn=r(Ws,"P",{});var Rc=i(Pn);bb=n(Rc,"This model is also a PyTorch "),Sn=r(Rc,"A",{href:!0,rel:!0});var Vj=i(Sn);wb=n(Vj,"torch.nn.Module"),Vj.forEach(s),yb=n(Rc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rc.forEach(s),kb=d(Ws),os=r(Ws,"DIV",{class:!0});var Us=i(os);b(Dn.$$.fragment,Us),jb=d(Us),gt=r(Us,"P",{});var Nr=i(gt);qb=n(Nr,"The "),cr=r(Nr,"A",{href:!0});var Kj=i(cr);Eb=n(Kj,"TapasForQuestionAnswering"),Kj.forEach(s),xb=n(Nr," forward method, overrides the "),Ed=r(Nr,"CODE",{});var Yj=i(Ed);Fb=n(Yj,"__call__"),Yj.forEach(s),Ab=n(Nr," special method."),Nr.forEach(s),zb=d(Us),b(Yt.$$.fragment,Us),$b=d(Us),xd=r(Us,"P",{});var Jj=i(xd);Cb=n(Jj,"Examples:"),Jj.forEach(s),Mb=d(Us),b(Nn.$$.fragment,Us),Us.forEach(s),Ws.forEach(s),sc=d(t),_t=r(t,"H2",{class:!0});var Vc=i(_t);Jt=r(Vc,"A",{id:!0,class:!0,href:!0});var Xj=i(Jt);Fd=r(Xj,"SPAN",{});var Zj=i(Fd);b(Ln.$$.fragment,Zj),Zj.forEach(s),Xj.forEach(s),Pb=d(Vc),Ad=r(Vc,"SPAN",{});var e0=i(Ad);Sb=n(e0,"TFTapasModel"),e0.forEach(s),Vc.forEach(s),tc=d(t),Ne=r(t,"DIV",{class:!0});var js=i(Ne);b(Qn.$$.fragment,js),Db=d(js),zd=r(js,"P",{});var s0=i(zd);Nb=n(s0,"The bare Tapas Model transformer outputting raw hidden-states without any specific head on top."),s0.forEach(s),Lb=d(js),On=r(js,"P",{});var Kc=i(On);Qb=n(Kc,"This model inherits from "),hr=r(Kc,"A",{href:!0});var t0=i(hr);Ob=n(t0,"TFPreTrainedModel"),t0.forEach(s),Ib=n(Kc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kc.forEach(s),Wb=d(js),In=r(js,"P",{});var Yc=i(In);Ub=n(Yc,"This model is also a "),Wn=r(Yc,"A",{href:!0,rel:!0});var a0=i(Wn);Hb=n(a0,"tf.keras.Model"),a0.forEach(s),Bb=n(Yc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Yc.forEach(s),Gb=d(js),b(Xt.$$.fragment,js),Rb=d(js),rs=r(js,"DIV",{class:!0});var Hs=i(rs);b(Un.$$.fragment,Hs),Vb=d(Hs),Tt=r(Hs,"P",{});var Lr=i(Tt);Kb=n(Lr,"The "),ur=r(Lr,"A",{href:!0});var n0=i(ur);Yb=n(n0,"TFTapasModel"),n0.forEach(s),Jb=n(Lr," forward method, overrides the "),$d=r(Lr,"CODE",{});var o0=i($d);Xb=n(o0,"__call__"),o0.forEach(s),Zb=n(Lr," special method."),Lr.forEach(s),ew=d(Hs),b(Zt.$$.fragment,Hs),sw=d(Hs),Cd=r(Hs,"P",{});var r0=i(Cd);tw=n(r0,"Examples:"),r0.forEach(s),aw=d(Hs),b(Hn.$$.fragment,Hs),Hs.forEach(s),js.forEach(s),ac=d(t),vt=r(t,"H2",{class:!0});var Jc=i(vt);ea=r(Jc,"A",{id:!0,class:!0,href:!0});var i0=i(ea);Md=r(i0,"SPAN",{});var l0=i(Md);b(Bn.$$.fragment,l0),l0.forEach(s),i0.forEach(s),nw=d(Jc),Pd=r(Jc,"SPAN",{});var d0=i(Pd);ow=n(d0,"TFTapasForMaskedLM"),d0.forEach(s),Jc.forEach(s),nc=d(t),Le=r(t,"DIV",{class:!0});var qs=i(Le);b(Gn.$$.fragment,qs),rw=d(qs),Rn=r(qs,"P",{});var Xc=i(Rn);iw=n(Xc,"Tapas Model with a "),Sd=r(Xc,"CODE",{});var p0=i(Sd);lw=n(p0,"language modeling"),p0.forEach(s),dw=n(Xc," head on top."),Xc.forEach(s),pw=d(qs),Vn=r(qs,"P",{});var Zc=i(Vn);cw=n(Zc,"This model inherits from "),mr=r(Zc,"A",{href:!0});var c0=i(mr);hw=n(c0,"TFPreTrainedModel"),c0.forEach(s),uw=n(Zc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zc.forEach(s),mw=d(qs),Kn=r(qs,"P",{});var eh=i(Kn);fw=n(eh,"This model is also a "),Yn=r(eh,"A",{href:!0,rel:!0});var h0=i(Yn);gw=n(h0,"tf.keras.Model"),h0.forEach(s),_w=n(eh,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),eh.forEach(s),Tw=d(qs),b(sa.$$.fragment,qs),vw=d(qs),is=r(qs,"DIV",{class:!0});var Bs=i(is);b(Jn.$$.fragment,Bs),bw=d(Bs),bt=r(Bs,"P",{});var Qr=i(bt);ww=n(Qr,"The "),fr=r(Qr,"A",{href:!0});var u0=i(fr);yw=n(u0,"TFTapasForMaskedLM"),u0.forEach(s),kw=n(Qr," forward method, overrides the "),Dd=r(Qr,"CODE",{});var m0=i(Dd);jw=n(m0,"__call__"),m0.forEach(s),qw=n(Qr," special method."),Qr.forEach(s),Ew=d(Bs),b(ta.$$.fragment,Bs),xw=d(Bs),Nd=r(Bs,"P",{});var f0=i(Nd);Fw=n(f0,"Examples:"),f0.forEach(s),Aw=d(Bs),b(Xn.$$.fragment,Bs),Bs.forEach(s),qs.forEach(s),oc=d(t),wt=r(t,"H2",{class:!0});var sh=i(wt);aa=r(sh,"A",{id:!0,class:!0,href:!0});var g0=i(aa);Ld=r(g0,"SPAN",{});var _0=i(Ld);b(Zn.$$.fragment,_0),_0.forEach(s),g0.forEach(s),zw=d(sh),Qd=r(sh,"SPAN",{});var T0=i(Qd);$w=n(T0,"TFTapasForSequenceClassification"),T0.forEach(s),sh.forEach(s),rc=d(t),Qe=r(t,"DIV",{class:!0});var Es=i(Qe);b(eo.$$.fragment,Es),Cw=d(Es),Od=r(Es,"P",{});var v0=i(Od);Mw=n(v0,`Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020).`),v0.forEach(s),Pw=d(Es),so=r(Es,"P",{});var th=i(so);Sw=n(th,"This model inherits from "),gr=r(th,"A",{href:!0});var b0=i(gr);Dw=n(b0,"TFPreTrainedModel"),b0.forEach(s),Nw=n(th,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),th.forEach(s),Lw=d(Es),to=r(Es,"P",{});var ah=i(to);Qw=n(ah,"This model is also a "),ao=r(ah,"A",{href:!0,rel:!0});var w0=i(ao);Ow=n(w0,"tf.keras.Model"),w0.forEach(s),Iw=n(ah,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ah.forEach(s),Ww=d(Es),b(na.$$.fragment,Es),Uw=d(Es),ls=r(Es,"DIV",{class:!0});var Gs=i(ls);b(no.$$.fragment,Gs),Hw=d(Gs),yt=r(Gs,"P",{});var Or=i(yt);Bw=n(Or,"The "),_r=r(Or,"A",{href:!0});var y0=i(_r);Gw=n(y0,"TFTapasForSequenceClassification"),y0.forEach(s),Rw=n(Or," forward method, overrides the "),Id=r(Or,"CODE",{});var k0=i(Id);Vw=n(k0,"__call__"),k0.forEach(s),Kw=n(Or," special method."),Or.forEach(s),Yw=d(Gs),b(oa.$$.fragment,Gs),Jw=d(Gs),Wd=r(Gs,"P",{});var j0=i(Wd);Xw=n(j0,"Examples:"),j0.forEach(s),Zw=d(Gs),b(oo.$$.fragment,Gs),Gs.forEach(s),Es.forEach(s),ic=d(t),kt=r(t,"H2",{class:!0});var nh=i(kt);ra=r(nh,"A",{id:!0,class:!0,href:!0});var q0=i(ra);Ud=r(q0,"SPAN",{});var E0=i(Ud);b(ro.$$.fragment,E0),E0.forEach(s),q0.forEach(s),ey=d(nh),Hd=r(nh,"SPAN",{});var x0=i(Hd);sy=n(x0,"TFTapasForQuestionAnswering"),x0.forEach(s),nh.forEach(s),lc=d(t),Oe=r(t,"DIV",{class:!0});var xs=i(Oe);b(io.$$.fragment,xs),ty=d(xs),jt=r(xs,"P",{});var Ir=i(jt);ay=n(Ir,`Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `),Bd=r(Ir,"CODE",{});var F0=i(Bd);ny=n(F0,"logits"),F0.forEach(s),oy=n(Ir," and optional "),Gd=r(Ir,"CODE",{});var A0=i(Gd);ry=n(A0,"logits_aggregation"),A0.forEach(s),iy=n(Ir,`), e.g. for SQA, WTQ or WikiSQL-supervised tasks.`),Ir.forEach(s),ly=d(xs),lo=r(xs,"P",{});var oh=i(lo);dy=n(oh,"This model inherits from "),Tr=r(oh,"A",{href:!0});var z0=i(Tr);py=n(z0,"TFPreTrainedModel"),z0.forEach(s),cy=n(oh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oh.forEach(s),hy=d(xs),po=r(xs,"P",{});var rh=i(po);uy=n(rh,"This model is also a "),co=r(rh,"A",{href:!0,rel:!0});var $0=i(co);my=n($0,"tf.keras.Model"),$0.forEach(s),fy=n(rh,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),rh.forEach(s),gy=d(xs),b(ia.$$.fragment,xs),_y=d(xs),ds=r(xs,"DIV",{class:!0});var Rs=i(ds);b(ho.$$.fragment,Rs),Ty=d(Rs),qt=r(Rs,"P",{});var Wr=i(qt);vy=n(Wr,"The "),vr=r(Wr,"A",{href:!0});var C0=i(vr);by=n(C0,"TFTapasForQuestionAnswering"),C0.forEach(s),wy=n(Wr," forward method, overrides the "),Rd=r(Wr,"CODE",{});var M0=i(Rd);yy=n(M0,"__call__"),M0.forEach(s),ky=n(Wr," special method."),Wr.forEach(s),jy=d(Rs),b(la.$$.fragment,Rs),qy=d(Rs),Vd=r(Rs,"P",{});var P0=i(Vd);Ey=n(P0,"Examples:"),P0.forEach(s),xy=d(Rs),b(uo.$$.fragment,Rs),Rs.forEach(s),xs.forEach(s),this.h()},h(){p(m,"name","hf:doc:metadata"),p(m,"content",JSON.stringify(Z0)),p(g,"id","tapas"),p(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(g,"href","#tapas"),p(f,"class","relative group"),p(Y,"id","overview"),p(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Y,"href","#overview"),p(A,"class","relative group"),p(ee,"href","https://www.aclweb.org/anthology/2020.acl-main.398"),p(ee,"rel","nofollow"),p(te,"href","https://www.microsoft.com/en-us/download/details.aspx?id=54253"),p(te,"rel","nofollow"),p(ae,"href","https://github.com/ppasupat/WikiTableQuestions"),p(ae,"rel","nofollow"),p(P,"href","https://github.com/salesforce/WikiSQL"),p(P,"rel","nofollow"),p(qe,"href","https://github.com/wenhuchen/Table-Fact-Checking"),p(qe,"rel","nofollow"),p(fa,"href","https://www.aclweb.org/anthology/2020.findings-emnlp.27/"),p(fa,"rel","nofollow"),Q0(Ft.src,Iy="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tapas_architecture.png")||p(Ft,"src",Iy),p(Ft,"alt","drawing"),p(Ft,"width","600"),p(ga,"href","https://huggingface.co/nielsr"),p(ga,"rel","nofollow"),p(_a,"href","https://huggingface.co/kamalkraj"),p(_a,"rel","nofollow"),p(Ta,"href","https://github.com/google-research/tapas"),p(Ta,"rel","nofollow"),p(vo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig"),p(va,"href","https://huggingface.co/models?search=tapas"),p(va,"rel","nofollow"),p(ba,"href","https://github.com/google-research/tapas%3E"),p(ba,"rel","nofollow"),p(At,"id","usage-finetuning"),p(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(At,"href","#usage-finetuning"),p(Ks,"class","relative group"),p(bo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(yo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(ja,"href","https://github.com/rusty1s/pytorch_scatter"),p(ja,"rel","nofollow"),p(qa,"href","https://github.com/tensorflow/probability"),p(qa,"rel","nofollow"),p(jo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasConfig"),p(qo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(Fa,"href","https://github.com/google-research/tapas/issues/91#issuecomment-735719340"),p(Fa,"rel","nofollow"),p(Aa,"href","https://huggingface.co/models?search=tapas"),p(Aa,"rel","nofollow"),p(za,"href","https://www.microsoft.com/en-us/download/details.aspx?id=54253"),p(za,"rel","nofollow"),p($a,"href","https://github.com/google-research/tapas/issues/50#issuecomment-705465960"),p($a,"rel","nofollow"),p(Ca,"href","https://github.com/NielsRogge/tapas_utils"),p(Ca,"rel","nofollow"),p(No,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(Lo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(Qo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForQuestionAnswering"),p(Oo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(Io,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(Wo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(Qa,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb"),p(Qa,"rel","nofollow"),p(Oa,"href","https://github.com/kamalkraj/Tapas-Tutorial/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb"),p(Oa,"rel","nofollow"),p(Ho,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(Bo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForQuestionAnswering"),p(Nt,"id","usage-inference"),p(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Nt,"href","#usage-inference"),p(tt,"class","relative group"),p(Go,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(Ro,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForQuestionAnswering"),p(Vo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(Ha,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb"),p(Ha,"rel","nofollow"),p(Ba,"href","https://github.com/kamalkraj/Tapas-Tutorial/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb"),p(Ba,"rel","nofollow"),p(Qt,"id","transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput"),p(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Qt,"href","#transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput"),p(at,"class","relative group"),p(Ko,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(nt,"class","docstring"),p(Ot,"id","transformers.TapasConfig"),p(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ot,"href","#transformers.TapasConfig"),p(ot,"class","relative group"),p(Yo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasModel"),p(Jo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(Xa,"href","https://github.com/google-research/tapas/tree/master"),p(Xa,"rel","nofollow"),p(Xe,"class","docstring"),p(It,"id","transformers.TapasTokenizer"),p(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(It,"href","#transformers.TapasTokenizer"),p(rt,"class","relative group"),p(Xo,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(Zo,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(sr,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasTokenizer"),p(Wt,"class","docstring"),p(tr,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(rn,"href","https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288"),p(rn,"rel","nofollow"),p(Ps,"class","docstring"),p(id,"class","docstring"),p(de,"class","docstring"),p(Ut,"id","transformers.TapasModel"),p(Ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ut,"href","#transformers.TapasModel"),p(it,"class","relative group"),p(ar,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(hn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(hn,"rel","nofollow"),p(nr,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel"),p(fn,"href","https://arxiv.org/abs/1706.03762"),p(fn,"rel","nofollow"),p(or,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasModel"),p(ts,"class","docstring"),p(De,"class","docstring"),p(Bt,"id","transformers.TapasForMaskedLM"),p(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Bt,"href","#transformers.TapasForMaskedLM"),p(dt,"class","relative group"),p(rr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(wn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(wn,"rel","nofollow"),p(ir,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForMaskedLM"),p(as,"class","docstring"),p(gs,"class","docstring"),p(Rt,"id","transformers.TapasForSequenceClassification"),p(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Rt,"href","#transformers.TapasForSequenceClassification"),p(ht,"class","relative group"),p(lr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Fn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Fn,"rel","nofollow"),p(dr,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForSequenceClassification"),p(ns,"class","docstring"),p(Ze,"class","docstring"),p(Kt,"id","transformers.TapasForQuestionAnswering"),p(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Kt,"href","#transformers.TapasForQuestionAnswering"),p(mt,"class","relative group"),p(pr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Sn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Sn,"rel","nofollow"),p(cr,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TapasForQuestionAnswering"),p(os,"class","docstring"),p(es,"class","docstring"),p(Jt,"id","transformers.TFTapasModel"),p(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Jt,"href","#transformers.TFTapasModel"),p(_t,"class","relative group"),p(hr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Wn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Wn,"rel","nofollow"),p(ur,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasModel"),p(rs,"class","docstring"),p(Ne,"class","docstring"),p(ea,"id","transformers.TFTapasForMaskedLM"),p(ea,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ea,"href","#transformers.TFTapasForMaskedLM"),p(vt,"class","relative group"),p(mr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Yn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Yn,"rel","nofollow"),p(fr,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForMaskedLM"),p(is,"class","docstring"),p(Le,"class","docstring"),p(aa,"id","transformers.TFTapasForSequenceClassification"),p(aa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(aa,"href","#transformers.TFTapasForSequenceClassification"),p(wt,"class","relative group"),p(gr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(ao,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(ao,"rel","nofollow"),p(_r,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForSequenceClassification"),p(ls,"class","docstring"),p(Qe,"class","docstring"),p(ra,"id","transformers.TFTapasForQuestionAnswering"),p(ra,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ra,"href","#transformers.TFTapasForQuestionAnswering"),p(kt,"class","relative group"),p(Tr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(co,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(co,"rel","nofollow"),p(vr,"href","/docs/transformers/v4.15.0/en/model_doc/tapas#transformers.TFTapasForQuestionAnswering"),p(ds,"class","docstring"),p(Oe,"class","docstring")},m(t,h){e(document.head,m),c(t,x,h),c(t,f,h),e(f,g),e(g,q),w(T,q,null),e(f,_),e(f,F),e(F,_e),c(t,K,h),c(t,A,h),e(A,Y),e(Y,O),w(Z,O,null),e(A,Te),e(A,I),e(I,ve),c(t,ce,h),c(t,W,h),e(W,N),e(W,ee),e(ee,se),e(W,z),c(t,$,h),c(t,oe,h),e(oe,H),c(t,he,h),c(t,V,h),e(V,L),e(L,te),e(te,be),e(L,C),e(V,we),e(V,Q),e(Q,ae),e(ae,ye),e(Q,B),e(V,ke),e(V,re),e(re,P),e(P,je),e(re,G),c(t,ue,h),c(t,u,h),e(u,E),c(t,J,h),c(t,xe,h),e(xe,Ge),c(t,S,h),c(t,Fe,h),e(Fe,$e),e($e,Re),c(t,D,h),c(t,M,h),e(M,Ve),e(M,Ce),e(Ce,R),e(M,Ke),e(M,qe),e(qe,Ee),e(M,Ye),e(M,fa),e(fa,ih),e(M,lh),c(t,ap,h),c(t,Ft,h),c(t,np,h),c(t,_o,h),e(_o,dh),c(t,op,h),c(t,_s,h),e(_s,ph),e(_s,ga),e(ga,ch),e(_s,hh),e(_s,_a),e(_a,uh),e(_s,mh),e(_s,Ta),e(Ta,fh),e(_s,gh),c(t,rp,h),c(t,To,h),e(To,_h),c(t,ip,h),c(t,Ts,h),e(Ts,Me),e(Me,Th),e(Me,Hr),e(Hr,vh),e(Me,bh),e(Me,vo),e(vo,wh),e(Me,yh),e(Me,Br),e(Br,kh),e(Me,jh),e(Me,va),e(va,qh),e(Me,Eh),e(Me,Gr),e(Gr,xh),e(Me,Fh),e(Me,Rr),e(Rr,Ah),e(Me,zh),e(Ts,$h),e(Ts,Je),e(Je,Ch),e(Je,Vr),e(Vr,Mh),e(Je,Ph),e(Je,Kr),e(Kr,Sh),e(Je,Dh),e(Je,Yr),e(Yr,Nh),e(Je,Lh),e(Je,Jr),e(Jr,Qh),e(Je,Oh),e(Je,ba),e(ba,Ih),e(Je,Wh),e(Ts,Uh),e(Ts,Vs),e(Vs,Hh),e(Vs,Xr),e(Xr,Bh),e(Vs,Gh),e(Vs,Zr),e(Zr,Rh),e(Vs,Vh),e(Ts,Kh),e(Ts,ei),e(ei,Yh),c(t,lp,h),c(t,Ks,h),e(Ks,At),e(At,si),w(wa,si,null),e(Ks,Jh),e(Ks,ti),e(ti,Xh),c(t,dp,h),c(t,zt,h),e(zt,Zh),e(zt,bo),e(bo,eu),e(zt,su),c(t,pp,h),c(t,wo,h),e(wo,ai),e(ai,tu),c(t,cp,h),c(t,$t,h),e($t,au),e($t,yo),e(yo,nu),e($t,ou),c(t,hp,h),c(t,zs,h),e(zs,ni),e(ni,ru),e(zs,iu),e(zs,ya),e(ya,lu),e(ya,oi),e(oi,du),e(ya,pu),e(zs,cu),e(zs,ka),e(ka,hu),e(ka,ri),e(ri,uu),e(ka,mu),c(t,up,h),c(t,ko,h),e(ko,fu),c(t,mp,h),c(t,Ct,h),e(Ct,ii),e(ii,Ys),e(Ys,li),e(li,di),e(di,gu),e(Ys,_u),e(Ys,pi),e(pi,ci),e(ci,Tu),e(Ys,vu),e(Ys,hi),e(hi,ui),e(ui,bu),e(Ct,wu),e(Ct,Js),e(Js,Xs),e(Xs,mi),e(mi,yu),e(Xs,ku),e(Xs,fi),e(fi,ju),e(Xs,qu),e(Xs,gi),e(gi,Eu),e(Js,xu),e(Js,Zs),e(Zs,_i),e(_i,Fu),e(Zs,Au),e(Zs,Ti),e(Ti,zu),e(Zs,$u),e(Zs,vi),e(vi,Cu),e(Js,Mu),e(Js,et),e(et,bi),e(bi,Pu),e(et,Su),e(et,wi),e(wi,Du),e(et,Nu),e(et,yi),e(yi,Lu),c(t,fp,h),c(t,$s,h),e($s,Qu),e($s,ja),e(ja,Ou),e($s,Iu),e($s,qa),e(qa,Wu),e($s,Uu),c(t,gp,h),w(Ea,t,h),c(t,_p,h),c(t,Cs,h),e(Cs,Hu),e(Cs,jo),e(jo,Bu),e(Cs,Gu),e(Cs,qo),e(qo,Ru),e(Cs,Vu),c(t,Tp,h),w(xa,t,h),c(t,vp,h),c(t,Mt,h),e(Mt,Ku),e(Mt,Fa),e(Fa,Yu),e(Mt,Ju),c(t,bp,h),c(t,Pt,h),e(Pt,Xu),e(Pt,Aa),e(Aa,Zu),e(Pt,em),c(t,wp,h),c(t,Eo,h),e(Eo,ki),e(ki,sm),c(t,yp,h),c(t,St,h),e(St,tm),e(St,za),e(za,am),e(St,nm),c(t,kp,h),c(t,ie,h),e(ie,xo),e(xo,ji),e(ji,om),e(xo,rm),e(ie,im),e(ie,Fo),e(Fo,qi),e(qi,lm),e(Fo,dm),e(ie,pm),e(ie,Ao),e(Ao,Ei),e(Ei,cm),e(Ao,hm),e(ie,um),e(ie,zo),e(zo,xi),e(xi,mm),e(zo,fm),e(ie,gm),e(ie,$o),e($o,Fi),e(Fi,_m),e($o,Tm),e(ie,vm),e(ie,Co),e(Co,Ai),e(Ai,bm),e(Co,wm),e(ie,ym),e(ie,Mo),e(Mo,zi),e(zi,km),e(Mo,jm),e(ie,qm),e(ie,Po),e(Po,$i),e($i,Em),e(Po,xm),e(ie,Fm),e(ie,So),e(So,Ci),e(Ci,Am),e(So,zm),c(t,jp,h),c(t,We,h),e(We,$m),e(We,$a),e($a,Cm),e(We,Mm),e(We,Ca),e(Ca,Pm),e(We,Sm),e(We,Mi),e(Mi,Dm),e(We,Nm),e(We,Pi),e(Pi,Lm),e(We,Qm),e(We,Si),e(Si,Om),e(We,Im),c(t,qp,h),c(t,Do,h),e(Do,Di),e(Di,Wm),c(t,Ep,h),c(t,Ae,h),e(Ae,Um),e(Ae,No),e(No,Hm),e(Ae,Bm),e(Ae,Ni),e(Ni,Gm),e(Ae,Rm),e(Ae,Li),e(Li,Vm),e(Ae,Km),e(Ae,Qi),e(Qi,Ym),e(Ae,Jm),e(Ae,Lo),e(Lo,Xm),e(Ae,Zm),e(Ae,Qo),e(Qo,ef),e(Ae,sf),c(t,xp,h),c(t,Dt,h),e(Dt,Oi),e(Oi,Ma),e(Ma,Ii),e(Ii,Wi),e(Wi,tf),e(Ma,af),e(Ma,Ui),e(Ui,Hi),e(Hi,nf),e(Dt,of),e(Dt,st),e(st,Pa),e(Pa,Bi),e(Bi,rf),e(Pa,lf),e(Pa,As),e(As,Gi),e(Gi,df),e(As,pf),e(As,Ri),e(Ri,cf),e(As,hf),e(As,Vi),e(Vi,uf),e(As,mf),e(As,Ki),e(Ki,ff),e(st,gf),e(st,Sa),e(Sa,Yi),e(Yi,_f),e(Sa,Tf),e(Sa,Pe),e(Pe,Ji),e(Ji,vf),e(Pe,bf),e(Pe,Xi),e(Xi,wf),e(Pe,yf),e(Pe,Zi),e(Zi,kf),e(Pe,jf),e(Pe,el),e(el,qf),e(Pe,Ef),e(Pe,sl),e(sl,xf),e(Pe,Ff),e(Pe,tl),e(tl,Af),e(Pe,zf),e(Pe,al),e(al,$f),e(st,Cf),e(st,Da),e(Da,nl),e(nl,Mf),e(Da,Pf),e(Da,ms),e(ms,ol),e(ol,Sf),e(ms,Df),e(ms,rl),e(rl,Nf),e(ms,Lf),e(ms,il),e(il,Qf),e(ms,Of),e(ms,ll),e(ll,If),e(ms,Wf),e(ms,dl),e(dl,Uf),c(t,Fp,h),c(t,le,h),e(le,Oo),e(Oo,Hf),e(le,Bf),e(le,pl),e(pl,Gf),e(le,Rf),e(le,cl),e(cl,Vf),e(le,Kf),e(le,hl),e(hl,Yf),e(le,Jf),e(le,ul),e(ul,Xf),e(le,Zf),e(le,ml),e(ml,eg),e(le,sg),e(le,fl),e(fl,tg),e(le,ag),e(le,gl),e(gl,ng),e(le,og),c(t,Ap,h),w(Na,t,h),c(t,zp,h),c(t,vs,h),e(vs,rg),e(vs,Io),e(Io,ig),e(vs,lg),e(vs,_l),e(_l,dg),e(vs,pg),e(vs,Tl),e(Tl,cg),e(vs,hg),c(t,$p,h),w(La,t,h),c(t,Cp,h),c(t,X,h),e(X,ug),e(X,vl),e(vl,mg),e(X,fg),e(X,bl),e(bl,gg),e(X,_g),e(X,wl),e(wl,Tg),e(X,vg),e(X,yl),e(yl,bg),e(X,wg),e(X,kl),e(kl,yg),e(X,kg),e(X,jl),e(jl,jg),e(X,qg),e(X,Wo),e(Wo,Eg),e(X,xg),e(X,Qa),e(Qa,Fg),e(X,Ag),e(X,Oa),e(Oa,zg),e(X,$g),c(t,Mp,h),c(t,Uo,h),e(Uo,ql),e(ql,Cg),c(t,Pp,h),c(t,Ms,h),e(Ms,Mg),e(Ms,Ho),e(Ho,Pg),e(Ms,Sg),e(Ms,Bo),e(Bo,Dg),e(Ms,Ng),c(t,Sp,h),w(Ia,t,h),c(t,Dp,h),c(t,tt,h),e(tt,Nt),e(Nt,El),w(Wa,El,null),e(tt,Lg),e(tt,xl),e(xl,Qg),c(t,Np,h),c(t,me,h),e(me,Og),e(me,Go),e(Go,Ig),e(me,Wg),e(me,Ro),e(Ro,Ug),e(me,Hg),e(me,Fl),e(Fl,Bg),e(me,Gg),e(me,Al),e(Al,Rg),e(me,Vg),e(me,zl),e(zl,Kg),e(me,Yg),e(me,Vo),e(Vo,Jg),e(me,Xg),e(me,$l),e($l,Zg),e(me,e_),c(t,Lp,h),c(t,Lt,h),e(Lt,s_),e(Lt,Cl),e(Cl,t_),e(Lt,a_),c(t,Qp,h),w(Ua,t,h),c(t,Op,h),c(t,Ue,h),e(Ue,n_),e(Ue,Ml),e(Ml,o_),e(Ue,r_),e(Ue,Pl),e(Pl,i_),e(Ue,l_),e(Ue,Sl),e(Sl,d_),e(Ue,p_),e(Ue,Ha),e(Ha,c_),e(Ue,h_),e(Ue,Ba),e(Ba,u_),e(Ue,m_),c(t,Ip,h),c(t,at,h),e(at,Qt),e(Qt,Dl),w(Ga,Dl,null),e(at,f_),e(at,Nl),e(Nl,g_),c(t,Wp,h),c(t,nt,h),w(Ra,nt,null),e(nt,__),e(nt,Va),e(Va,T_),e(Va,Ko),e(Ko,v_),e(Va,b_),c(t,Up,h),c(t,ot,h),e(ot,Ot),e(Ot,Ll),w(Ka,Ll,null),e(ot,w_),e(ot,Ql),e(Ql,y_),c(t,Hp,h),c(t,Xe,h),w(Ya,Xe,null),e(Xe,k_),e(Xe,fs),e(fs,j_),e(fs,Yo),e(Yo,q_),e(fs,E_),e(fs,Ol),e(Ol,x_),e(fs,F_),e(fs,Il),e(Il,A_),e(fs,z_),e(fs,Jo),e(Jo,$_),e(fs,C_),e(Xe,M_),e(Xe,Ja),e(Ja,P_),e(Ja,Xa),e(Xa,S_),e(Ja,D_),e(Xe,N_),e(Xe,Wl),e(Wl,L_),e(Xe,Q_),w(Za,Xe,null),c(t,Bp,h),c(t,rt,h),e(rt,It),e(It,Ul),w(en,Ul,null),e(rt,O_),e(rt,Hl),e(Hl,I_),c(t,Gp,h),c(t,de,h),w(sn,de,null),e(de,W_),e(de,Bl),e(Bl,U_),e(de,H_),e(de,ne),e(ne,B_),e(ne,Xo),e(Xo,G_),e(ne,R_),e(ne,Zo),e(Zo,V_),e(ne,K_),e(ne,Gl),e(Gl,Y_),e(ne,J_),e(ne,Rl),e(Rl,X_),e(ne,Z_),e(ne,Vl),e(Vl,eT),e(ne,sT),e(ne,Kl),e(Kl,tT),e(ne,aT),e(ne,Yl),e(Yl,nT),e(ne,oT),e(ne,Jl),e(Jl,rT),e(ne,iT),e(ne,Xl),e(Xl,lT),e(ne,dT),e(de,pT),e(de,Se),e(Se,Zl),e(Zl,cT),e(Se,hT),e(Se,ed),e(ed,uT),e(Se,mT),e(Se,sd),e(sd,fT),e(Se,gT),e(Se,td),e(td,_T),e(Se,TT),e(Se,ad),e(ad,vT),e(Se,bT),e(Se,nd),e(nd,wT),e(Se,yT),e(Se,od),e(od,kT),e(de,jT),e(de,er),e(er,sr),e(sr,qT),e(er,ET),e(de,xT),e(de,Wt),w(tn,Wt,null),e(Wt,FT),e(Wt,rd),e(rd,AT),e(de,zT),e(de,Ps),w(an,Ps,null),e(Ps,$T),e(Ps,nn),e(nn,CT),e(nn,tr),e(tr,MT),e(nn,PT),e(Ps,ST),e(Ps,on),e(on,DT),e(on,rn),e(rn,NT),e(on,LT),e(de,QT),e(de,id),c(t,Rp,h),c(t,it,h),e(it,Ut),e(Ut,ld),w(ln,ld,null),e(it,OT),e(it,dd),e(dd,IT),c(t,Vp,h),c(t,De,h),w(dn,De,null),e(De,WT),e(De,pn),e(pn,UT),e(pn,ar),e(ar,HT),e(pn,BT),e(De,GT),e(De,cn),e(cn,RT),e(cn,hn),e(hn,VT),e(cn,KT),e(De,YT),e(De,un),e(un,JT),e(un,nr),e(nr,XT),e(un,ZT),e(De,ev),e(De,mn),e(mn,sv),e(mn,fn),e(fn,tv),e(mn,av),e(De,nv),e(De,ts),w(gn,ts,null),e(ts,ov),e(ts,lt),e(lt,rv),e(lt,or),e(or,iv),e(lt,lv),e(lt,pd),e(pd,dv),e(lt,pv),e(ts,cv),w(Ht,ts,null),e(ts,hv),e(ts,cd),e(cd,uv),e(ts,mv),w(_n,ts,null),c(t,Kp,h),c(t,dt,h),e(dt,Bt),e(Bt,hd),w(Tn,hd,null),e(dt,fv),e(dt,ud),e(ud,gv),c(t,Yp,h),c(t,gs,h),w(vn,gs,null),e(gs,_v),e(gs,pt),e(pt,Tv),e(pt,md),e(md,vv),e(pt,bv),e(pt,rr),e(rr,wv),e(pt,yv),e(gs,kv),e(gs,bn),e(bn,jv),e(bn,wn),e(wn,qv),e(bn,Ev),e(gs,xv),e(gs,as),w(yn,as,null),e(as,Fv),e(as,ct),e(ct,Av),e(ct,ir),e(ir,zv),e(ct,$v),e(ct,fd),e(fd,Cv),e(ct,Mv),e(as,Pv),w(Gt,as,null),e(as,Sv),e(as,gd),e(gd,Dv),e(as,Nv),w(kn,as,null),c(t,Jp,h),c(t,ht,h),e(ht,Rt),e(Rt,_d),w(jn,_d,null),e(ht,Lv),e(ht,Td),e(Td,Qv),c(t,Xp,h),c(t,Ze,h),w(qn,Ze,null),e(Ze,Ov),e(Ze,vd),e(vd,Iv),e(Ze,Wv),e(Ze,En),e(En,Uv),e(En,lr),e(lr,Hv),e(En,Bv),e(Ze,Gv),e(Ze,xn),e(xn,Rv),e(xn,Fn),e(Fn,Vv),e(xn,Kv),e(Ze,Yv),e(Ze,ns),w(An,ns,null),e(ns,Jv),e(ns,ut),e(ut,Xv),e(ut,dr),e(dr,Zv),e(ut,eb),e(ut,bd),e(bd,sb),e(ut,tb),e(ns,ab),w(Vt,ns,null),e(ns,nb),e(ns,wd),e(wd,ob),e(ns,rb),w(zn,ns,null),c(t,Zp,h),c(t,mt,h),e(mt,Kt),e(Kt,yd),w($n,yd,null),e(mt,ib),e(mt,kd),e(kd,lb),c(t,ec,h),c(t,es,h),w(Cn,es,null),e(es,db),e(es,ft),e(ft,pb),e(ft,jd),e(jd,cb),e(ft,hb),e(ft,qd),e(qd,ub),e(ft,mb),e(es,fb),e(es,Mn),e(Mn,gb),e(Mn,pr),e(pr,_b),e(Mn,Tb),e(es,vb),e(es,Pn),e(Pn,bb),e(Pn,Sn),e(Sn,wb),e(Pn,yb),e(es,kb),e(es,os),w(Dn,os,null),e(os,jb),e(os,gt),e(gt,qb),e(gt,cr),e(cr,Eb),e(gt,xb),e(gt,Ed),e(Ed,Fb),e(gt,Ab),e(os,zb),w(Yt,os,null),e(os,$b),e(os,xd),e(xd,Cb),e(os,Mb),w(Nn,os,null),c(t,sc,h),c(t,_t,h),e(_t,Jt),e(Jt,Fd),w(Ln,Fd,null),e(_t,Pb),e(_t,Ad),e(Ad,Sb),c(t,tc,h),c(t,Ne,h),w(Qn,Ne,null),e(Ne,Db),e(Ne,zd),e(zd,Nb),e(Ne,Lb),e(Ne,On),e(On,Qb),e(On,hr),e(hr,Ob),e(On,Ib),e(Ne,Wb),e(Ne,In),e(In,Ub),e(In,Wn),e(Wn,Hb),e(In,Bb),e(Ne,Gb),w(Xt,Ne,null),e(Ne,Rb),e(Ne,rs),w(Un,rs,null),e(rs,Vb),e(rs,Tt),e(Tt,Kb),e(Tt,ur),e(ur,Yb),e(Tt,Jb),e(Tt,$d),e($d,Xb),e(Tt,Zb),e(rs,ew),w(Zt,rs,null),e(rs,sw),e(rs,Cd),e(Cd,tw),e(rs,aw),w(Hn,rs,null),c(t,ac,h),c(t,vt,h),e(vt,ea),e(ea,Md),w(Bn,Md,null),e(vt,nw),e(vt,Pd),e(Pd,ow),c(t,nc,h),c(t,Le,h),w(Gn,Le,null),e(Le,rw),e(Le,Rn),e(Rn,iw),e(Rn,Sd),e(Sd,lw),e(Rn,dw),e(Le,pw),e(Le,Vn),e(Vn,cw),e(Vn,mr),e(mr,hw),e(Vn,uw),e(Le,mw),e(Le,Kn),e(Kn,fw),e(Kn,Yn),e(Yn,gw),e(Kn,_w),e(Le,Tw),w(sa,Le,null),e(Le,vw),e(Le,is),w(Jn,is,null),e(is,bw),e(is,bt),e(bt,ww),e(bt,fr),e(fr,yw),e(bt,kw),e(bt,Dd),e(Dd,jw),e(bt,qw),e(is,Ew),w(ta,is,null),e(is,xw),e(is,Nd),e(Nd,Fw),e(is,Aw),w(Xn,is,null),c(t,oc,h),c(t,wt,h),e(wt,aa),e(aa,Ld),w(Zn,Ld,null),e(wt,zw),e(wt,Qd),e(Qd,$w),c(t,rc,h),c(t,Qe,h),w(eo,Qe,null),e(Qe,Cw),e(Qe,Od),e(Od,Mw),e(Qe,Pw),e(Qe,so),e(so,Sw),e(so,gr),e(gr,Dw),e(so,Nw),e(Qe,Lw),e(Qe,to),e(to,Qw),e(to,ao),e(ao,Ow),e(to,Iw),e(Qe,Ww),w(na,Qe,null),e(Qe,Uw),e(Qe,ls),w(no,ls,null),e(ls,Hw),e(ls,yt),e(yt,Bw),e(yt,_r),e(_r,Gw),e(yt,Rw),e(yt,Id),e(Id,Vw),e(yt,Kw),e(ls,Yw),w(oa,ls,null),e(ls,Jw),e(ls,Wd),e(Wd,Xw),e(ls,Zw),w(oo,ls,null),c(t,ic,h),c(t,kt,h),e(kt,ra),e(ra,Ud),w(ro,Ud,null),e(kt,ey),e(kt,Hd),e(Hd,sy),c(t,lc,h),c(t,Oe,h),w(io,Oe,null),e(Oe,ty),e(Oe,jt),e(jt,ay),e(jt,Bd),e(Bd,ny),e(jt,oy),e(jt,Gd),e(Gd,ry),e(jt,iy),e(Oe,ly),e(Oe,lo),e(lo,dy),e(lo,Tr),e(Tr,py),e(lo,cy),e(Oe,hy),e(Oe,po),e(po,uy),e(po,co),e(co,my),e(po,fy),e(Oe,gy),w(ia,Oe,null),e(Oe,_y),e(Oe,ds),w(ho,ds,null),e(ds,Ty),e(ds,qt),e(qt,vy),e(qt,vr),e(vr,by),e(qt,wy),e(qt,Rd),e(Rd,yy),e(qt,ky),e(ds,jy),w(la,ds,null),e(ds,qy),e(ds,Vd),e(Vd,Ey),e(ds,xy),w(uo,ds,null),dc=!0},p(t,[h]){const mo={};h&2&&(mo.$$scope={dirty:h,ctx:t}),Ht.$set(mo);const Kd={};h&2&&(Kd.$$scope={dirty:h,ctx:t}),Gt.$set(Kd);const Yd={};h&2&&(Yd.$$scope={dirty:h,ctx:t}),Vt.$set(Yd);const Jd={};h&2&&(Jd.$$scope={dirty:h,ctx:t}),Yt.$set(Jd);const fo={};h&2&&(fo.$$scope={dirty:h,ctx:t}),Xt.$set(fo);const Xd={};h&2&&(Xd.$$scope={dirty:h,ctx:t}),Zt.$set(Xd);const Zd={};h&2&&(Zd.$$scope={dirty:h,ctx:t}),sa.$set(Zd);const ep={};h&2&&(ep.$$scope={dirty:h,ctx:t}),ta.$set(ep);const go={};h&2&&(go.$$scope={dirty:h,ctx:t}),na.$set(go);const sp={};h&2&&(sp.$$scope={dirty:h,ctx:t}),oa.$set(sp);const tp={};h&2&&(tp.$$scope={dirty:h,ctx:t}),ia.$set(tp);const Et={};h&2&&(Et.$$scope={dirty:h,ctx:t}),la.$set(Et)},i(t){dc||(y(T.$$.fragment,t),y(Z.$$.fragment,t),y(wa.$$.fragment,t),y(Ea.$$.fragment,t),y(xa.$$.fragment,t),y(Na.$$.fragment,t),y(La.$$.fragment,t),y(Ia.$$.fragment,t),y(Wa.$$.fragment,t),y(Ua.$$.fragment,t),y(Ga.$$.fragment,t),y(Ra.$$.fragment,t),y(Ka.$$.fragment,t),y(Ya.$$.fragment,t),y(Za.$$.fragment,t),y(en.$$.fragment,t),y(sn.$$.fragment,t),y(tn.$$.fragment,t),y(an.$$.fragment,t),y(ln.$$.fragment,t),y(dn.$$.fragment,t),y(gn.$$.fragment,t),y(Ht.$$.fragment,t),y(_n.$$.fragment,t),y(Tn.$$.fragment,t),y(vn.$$.fragment,t),y(yn.$$.fragment,t),y(Gt.$$.fragment,t),y(kn.$$.fragment,t),y(jn.$$.fragment,t),y(qn.$$.fragment,t),y(An.$$.fragment,t),y(Vt.$$.fragment,t),y(zn.$$.fragment,t),y($n.$$.fragment,t),y(Cn.$$.fragment,t),y(Dn.$$.fragment,t),y(Yt.$$.fragment,t),y(Nn.$$.fragment,t),y(Ln.$$.fragment,t),y(Qn.$$.fragment,t),y(Xt.$$.fragment,t),y(Un.$$.fragment,t),y(Zt.$$.fragment,t),y(Hn.$$.fragment,t),y(Bn.$$.fragment,t),y(Gn.$$.fragment,t),y(sa.$$.fragment,t),y(Jn.$$.fragment,t),y(ta.$$.fragment,t),y(Xn.$$.fragment,t),y(Zn.$$.fragment,t),y(eo.$$.fragment,t),y(na.$$.fragment,t),y(no.$$.fragment,t),y(oa.$$.fragment,t),y(oo.$$.fragment,t),y(ro.$$.fragment,t),y(io.$$.fragment,t),y(ia.$$.fragment,t),y(ho.$$.fragment,t),y(la.$$.fragment,t),y(uo.$$.fragment,t),dc=!0)},o(t){k(T.$$.fragment,t),k(Z.$$.fragment,t),k(wa.$$.fragment,t),k(Ea.$$.fragment,t),k(xa.$$.fragment,t),k(Na.$$.fragment,t),k(La.$$.fragment,t),k(Ia.$$.fragment,t),k(Wa.$$.fragment,t),k(Ua.$$.fragment,t),k(Ga.$$.fragment,t),k(Ra.$$.fragment,t),k(Ka.$$.fragment,t),k(Ya.$$.fragment,t),k(Za.$$.fragment,t),k(en.$$.fragment,t),k(sn.$$.fragment,t),k(tn.$$.fragment,t),k(an.$$.fragment,t),k(ln.$$.fragment,t),k(dn.$$.fragment,t),k(gn.$$.fragment,t),k(Ht.$$.fragment,t),k(_n.$$.fragment,t),k(Tn.$$.fragment,t),k(vn.$$.fragment,t),k(yn.$$.fragment,t),k(Gt.$$.fragment,t),k(kn.$$.fragment,t),k(jn.$$.fragment,t),k(qn.$$.fragment,t),k(An.$$.fragment,t),k(Vt.$$.fragment,t),k(zn.$$.fragment,t),k($n.$$.fragment,t),k(Cn.$$.fragment,t),k(Dn.$$.fragment,t),k(Yt.$$.fragment,t),k(Nn.$$.fragment,t),k(Ln.$$.fragment,t),k(Qn.$$.fragment,t),k(Xt.$$.fragment,t),k(Un.$$.fragment,t),k(Zt.$$.fragment,t),k(Hn.$$.fragment,t),k(Bn.$$.fragment,t),k(Gn.$$.fragment,t),k(sa.$$.fragment,t),k(Jn.$$.fragment,t),k(ta.$$.fragment,t),k(Xn.$$.fragment,t),k(Zn.$$.fragment,t),k(eo.$$.fragment,t),k(na.$$.fragment,t),k(no.$$.fragment,t),k(oa.$$.fragment,t),k(oo.$$.fragment,t),k(ro.$$.fragment,t),k(io.$$.fragment,t),k(ia.$$.fragment,t),k(ho.$$.fragment,t),k(la.$$.fragment,t),k(uo.$$.fragment,t),dc=!1},d(t){s(m),t&&s(x),t&&s(f),j(T),t&&s(K),t&&s(A),j(Z),t&&s(ce),t&&s(W),t&&s($),t&&s(oe),t&&s(he),t&&s(V),t&&s(ue),t&&s(u),t&&s(J),t&&s(xe),t&&s(S),t&&s(Fe),t&&s(D),t&&s(M),t&&s(ap),t&&s(Ft),t&&s(np),t&&s(_o),t&&s(op),t&&s(_s),t&&s(rp),t&&s(To),t&&s(ip),t&&s(Ts),t&&s(lp),t&&s(Ks),j(wa),t&&s(dp),t&&s(zt),t&&s(pp),t&&s(wo),t&&s(cp),t&&s($t),t&&s(hp),t&&s(zs),t&&s(up),t&&s(ko),t&&s(mp),t&&s(Ct),t&&s(fp),t&&s($s),t&&s(gp),j(Ea,t),t&&s(_p),t&&s(Cs),t&&s(Tp),j(xa,t),t&&s(vp),t&&s(Mt),t&&s(bp),t&&s(Pt),t&&s(wp),t&&s(Eo),t&&s(yp),t&&s(St),t&&s(kp),t&&s(ie),t&&s(jp),t&&s(We),t&&s(qp),t&&s(Do),t&&s(Ep),t&&s(Ae),t&&s(xp),t&&s(Dt),t&&s(Fp),t&&s(le),t&&s(Ap),j(Na,t),t&&s(zp),t&&s(vs),t&&s($p),j(La,t),t&&s(Cp),t&&s(X),t&&s(Mp),t&&s(Uo),t&&s(Pp),t&&s(Ms),t&&s(Sp),j(Ia,t),t&&s(Dp),t&&s(tt),j(Wa),t&&s(Np),t&&s(me),t&&s(Lp),t&&s(Lt),t&&s(Qp),j(Ua,t),t&&s(Op),t&&s(Ue),t&&s(Ip),t&&s(at),j(Ga),t&&s(Wp),t&&s(nt),j(Ra),t&&s(Up),t&&s(ot),j(Ka),t&&s(Hp),t&&s(Xe),j(Ya),j(Za),t&&s(Bp),t&&s(rt),j(en),t&&s(Gp),t&&s(de),j(sn),j(tn),j(an),t&&s(Rp),t&&s(it),j(ln),t&&s(Vp),t&&s(De),j(dn),j(gn),j(Ht),j(_n),t&&s(Kp),t&&s(dt),j(Tn),t&&s(Yp),t&&s(gs),j(vn),j(yn),j(Gt),j(kn),t&&s(Jp),t&&s(ht),j(jn),t&&s(Xp),t&&s(Ze),j(qn),j(An),j(Vt),j(zn),t&&s(Zp),t&&s(mt),j($n),t&&s(ec),t&&s(es),j(Cn),j(Dn),j(Yt),j(Nn),t&&s(sc),t&&s(_t),j(Ln),t&&s(tc),t&&s(Ne),j(Qn),j(Xt),j(Un),j(Zt),j(Hn),t&&s(ac),t&&s(vt),j(Bn),t&&s(nc),t&&s(Le),j(Gn),j(sa),j(Jn),j(ta),j(Xn),t&&s(oc),t&&s(wt),j(Zn),t&&s(rc),t&&s(Qe),j(eo),j(na),j(no),j(oa),j(oo),t&&s(ic),t&&s(kt),j(ro),t&&s(lc),t&&s(Oe),j(io),j(ia),j(ho),j(la),j(uo)}}}const Z0={local:"tapas",sections:[{local:"overview",title:"Overview"},{local:"usage-finetuning",title:"Usage: fine-tuning"},{local:"usage-inference",title:"Usage: inference"},{local:"transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput",title:"TAPAS specific outputs"},{local:"transformers.TapasConfig",title:"TapasConfig"},{local:"transformers.TapasTokenizer",title:"TapasTokenizer"},{local:"transformers.TapasModel",title:"TapasModel"},{local:"transformers.TapasForMaskedLM",title:"TapasForMaskedLM"},{local:"transformers.TapasForSequenceClassification",title:"TapasForSequenceClassification"},{local:"transformers.TapasForQuestionAnswering",title:"TapasForQuestionAnswering"},{local:"transformers.TFTapasModel",title:"TFTapasModel"},{local:"transformers.TFTapasForMaskedLM",title:"TFTapasForMaskedLM"},{local:"transformers.TFTapasForSequenceClassification",title:"TFTapasForSequenceClassification"},{local:"transformers.TFTapasForQuestionAnswering",title:"TFTapasForQuestionAnswering"}],title:"TAPAS"};function eq(U,m,x){let{fw:f}=m;return U.$$set=g=>{"fw"in g&&x(0,f=g.fw)},[f]}class lq extends S0{constructor(m){super();D0(this,m,eq,X0,N0,{fw:0})}}export{lq as default,Z0 as metadata};
9,960
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bert_japanese.mdx-68844b9e.js
import{S as Ht,i as Kt,s as Ut,e as o,k as u,w as F,t as l,L as Ft,c as s,d as t,m as f,a as n,x as V,h as i,b as c,J as a,g as p,y as G,K as Vt,q as Q,o as X,B as Y}from"../../chunks/vendor-b1433968.js";import{D as Gt}from"../../chunks/Docstring-ff504c58.js";import{C as Wt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as kt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Qt(Me){let _,I,m,d,Z,y,Ce,ee,Se,ce,k,g,te,x,Le,ae,Ie,ue,N,Ne,fe,R,Re,me,z,v,De,A,Oe,We,B,He,Ke,Ue,oe,Fe,he,h,Ve,se,Ge,Qe,re,Xe,Ye,ne,Ze,et,de,T,tt,J,at,ot,_e,D,st,ke,q,ve,O,rt,be,P,we,W,nt,ge,H,M,lt,K,it,pt,ze,j,ct,C,ut,ft,Te,b,E,le,S,mt,ie,ht,je,w,L,dt,pe,_t,Ee;return y=new kt({}),x=new kt({}),q=new Wt({props:{code:`import torch from transformers import AutoModel, AutoTokenizer bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese") tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese") ## Input Japanese Text line = "\u543E\u8F29\u306F\u732B\u3067\u3042\u308B\u3002" inputs = tokenizer(line, return_tensors="pt") print(tokenizer.decode(inputs['input_ids'][0])) outputs = bertjapanese(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>bertjapanese = AutoModel.from_pretrained(<span class="hljs-string">&quot;cl-tohoku/bert-base-japanese&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;cl-tohoku/bert-base-japanese&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment">## Input Japanese Text</span> <span class="hljs-meta">&gt;&gt;&gt; </span>line = <span class="hljs-string">&quot;\u543E\u8F29\u306F\u732B\u3067\u3042\u308B\u3002&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(line, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(inputs[<span class="hljs-string">&#x27;input_ids&#x27;</span>][<span class="hljs-number">0</span>])) [CLS] \u543E\u8F29 \u306F \u732B \u3067 \u3042\u308B \u3002 [SEP] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = bertjapanese(**inputs)`}}),P=new Wt({props:{code:`bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char") tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char") ## Input Japanese Text line = "\u543E\u8F29\u306F\u732B\u3067\u3042\u308B\u3002" inputs = tokenizer(line, return_tensors="pt") print(tokenizer.decode(inputs['input_ids'][0])) outputs = bertjapanese(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>bertjapanese = AutoModel.from_pretrained(<span class="hljs-string">&quot;cl-tohoku/bert-base-japanese-char&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;cl-tohoku/bert-base-japanese-char&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment">## Input Japanese Text</span> <span class="hljs-meta">&gt;&gt;&gt; </span>line = <span class="hljs-string">&quot;\u543E\u8F29\u306F\u732B\u3067\u3042\u308B\u3002&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(line, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(inputs[<span class="hljs-string">&#x27;input_ids&#x27;</span>][<span class="hljs-number">0</span>])) [CLS] \u543E \u8F29 \u306F \u732B \u3067 \u3042 \u308B \u3002 [SEP] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = bertjapanese(**inputs)`}}),S=new kt({}),L=new Gt({props:{name:"class transformers.BertJapaneseTokenizer",anchor:"transformers.BertJapaneseTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = False"},{name:"do_word_tokenize",val:" = True"},{name:"do_subword_tokenize",val:" = True"},{name:"word_tokenizer_type",val:" = 'basic'"},{name:"subword_tokenizer_type",val:" = 'wordpiece'"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"mecab_kwargs",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_japanese/tokenization_bert_japanese.py#L72",parametersDescription:[{anchor:"transformers.BertJapaneseTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to a one-wordpiece-per-line vocabulary file.`,name:"vocab_file"},{anchor:"transformers.BertJapaneseTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to lower case the input. Only has an effect when do_basic_tokenize=True.`,name:"do_lower_case"},{anchor:"transformers.BertJapaneseTokenizer.do_word_tokenize",description:`<strong>do_word_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to do word tokenization.`,name:"do_word_tokenize"},{anchor:"transformers.BertJapaneseTokenizer.do_subword_tokenize",description:`<strong>do_subword_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to do subword tokenization.`,name:"do_subword_tokenize"},{anchor:"transformers.BertJapaneseTokenizer.word_tokenizer_type",description:`<strong>word_tokenizer_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;basic&quot;</code>) &#x2014; Type of word tokenizer.`,name:"word_tokenizer_type"},{anchor:"transformers.BertJapaneseTokenizer.subword_tokenizer_type",description:`<strong>subword_tokenizer_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;wordpiece&quot;</code>) &#x2014; Type of subword tokenizer.`,name:"subword_tokenizer_type"},{anchor:"transformers.BertJapaneseTokenizer.mecab_kwargs",description:`<strong>mecab_kwargs</strong> (<code>str</code>, <em>optional</em>) &#x2014; Dictionary passed to the <code>MecabTokenizer</code> constructor.`,name:"mecab_kwargs"}]}}),{c(){_=o("meta"),I=u(),m=o("h1"),d=o("a"),Z=o("span"),F(y.$$.fragment),Ce=u(),ee=o("span"),Se=l("BertJapanese"),ce=u(),k=o("h2"),g=o("a"),te=o("span"),F(x.$$.fragment),Le=u(),ae=o("span"),Ie=l("Overview"),ue=u(),N=o("p"),Ne=l("The BERT models trained on Japanese text."),fe=u(),R=o("p"),Re=l("There are models with two different tokenization methods:"),me=u(),z=o("ul"),v=o("li"),De=l("Tokenize with MeCab and WordPiece. This requires some extra dependencies, "),A=o("a"),Oe=l("fugashi"),We=l(" which is a wrapper around "),B=o("a"),He=l("MeCab"),Ke=l("."),Ue=u(),oe=o("li"),Fe=l("Tokenize into characters."),he=u(),h=o("p"),Ve=l("To use "),se=o("em"),Ge=l("MecabTokenizer"),Qe=l(", you should "),re=o("code"),Xe=l('pip install transformers["ja"]'),Ye=l(" (or "),ne=o("code"),Ze=l('pip install -e .["ja"]'),et=l(` if you install from source) to install dependencies.`),de=u(),T=o("p"),tt=l("See "),J=o("a"),at=l("details on cl-tohoku repository"),ot=l("."),_e=u(),D=o("p"),st=l("Example of using a model with MeCab and WordPiece tokenization:"),ke=u(),F(q.$$.fragment),ve=u(),O=o("p"),rt=l("Example of using a model with Character tokenization:"),be=u(),F(P.$$.fragment),we=u(),W=o("p"),nt=l("Tips:"),ge=u(),H=o("ul"),M=o("li"),lt=l("This implementation is the same as BERT, except for tokenization method. Refer to the "),K=o("a"),it=l("documentation of BERT"),pt=l(" for more usage examples."),ze=u(),j=o("p"),ct=l("This model was contributed by "),C=o("a"),ut=l("cl-tohoku"),ft=l("."),Te=u(),b=o("h2"),E=o("a"),le=o("span"),F(S.$$.fragment),mt=u(),ie=o("span"),ht=l("BertJapaneseTokenizer"),je=u(),w=o("div"),F(L.$$.fragment),dt=u(),pe=o("p"),_t=l("Construct a BERT tokenizer for Japanese text, based on a MecabTokenizer."),this.h()},l(e){const r=Ft('[data-svelte="svelte-1phssyn"]',document.head);_=s(r,"META",{name:!0,content:!0}),r.forEach(t),I=f(e),m=s(e,"H1",{class:!0});var $e=n(m);d=s($e,"A",{id:!0,class:!0,href:!0});var vt=n(d);Z=s(vt,"SPAN",{});var bt=n(Z);V(y.$$.fragment,bt),bt.forEach(t),vt.forEach(t),Ce=f($e),ee=s($e,"SPAN",{});var wt=n(ee);Se=i(wt,"BertJapanese"),wt.forEach(t),$e.forEach(t),ce=f(e),k=s(e,"H2",{class:!0});var ye=n(k);g=s(ye,"A",{id:!0,class:!0,href:!0});var gt=n(g);te=s(gt,"SPAN",{});var zt=n(te);V(x.$$.fragment,zt),zt.forEach(t),gt.forEach(t),Le=f(ye),ae=s(ye,"SPAN",{});var Tt=n(ae);Ie=i(Tt,"Overview"),Tt.forEach(t),ye.forEach(t),ue=f(e),N=s(e,"P",{});var jt=n(N);Ne=i(jt,"The BERT models trained on Japanese text."),jt.forEach(t),fe=f(e),R=s(e,"P",{});var Et=n(R);Re=i(Et,"There are models with two different tokenization methods:"),Et.forEach(t),me=f(e),z=s(e,"UL",{});var xe=n(z);v=s(xe,"LI",{});var U=n(v);De=i(U,"Tokenize with MeCab and WordPiece. This requires some extra dependencies, "),A=s(U,"A",{href:!0,rel:!0});var $t=n(A);Oe=i($t,"fugashi"),$t.forEach(t),We=i(U," which is a wrapper around "),B=s(U,"A",{href:!0,rel:!0});var yt=n(B);He=i(yt,"MeCab"),yt.forEach(t),Ke=i(U,"."),U.forEach(t),Ue=f(xe),oe=s(xe,"LI",{});var xt=n(oe);Fe=i(xt,"Tokenize into characters."),xt.forEach(t),xe.forEach(t),he=f(e),h=s(e,"P",{});var $=n(h);Ve=i($,"To use "),se=s($,"EM",{});var At=n(se);Ge=i(At,"MecabTokenizer"),At.forEach(t),Qe=i($,", you should "),re=s($,"CODE",{});var Bt=n(re);Xe=i(Bt,'pip install transformers["ja"]'),Bt.forEach(t),Ye=i($," (or "),ne=s($,"CODE",{});var Jt=n(ne);Ze=i(Jt,'pip install -e .["ja"]'),Jt.forEach(t),et=i($,` if you install from source) to install dependencies.`),$.forEach(t),de=f(e),T=s(e,"P",{});var Ae=n(T);tt=i(Ae,"See "),J=s(Ae,"A",{href:!0,rel:!0});var qt=n(J);at=i(qt,"details on cl-tohoku repository"),qt.forEach(t),ot=i(Ae,"."),Ae.forEach(t),_e=f(e),D=s(e,"P",{});var Pt=n(D);st=i(Pt,"Example of using a model with MeCab and WordPiece tokenization:"),Pt.forEach(t),ke=f(e),V(q.$$.fragment,e),ve=f(e),O=s(e,"P",{});var Mt=n(O);rt=i(Mt,"Example of using a model with Character tokenization:"),Mt.forEach(t),be=f(e),V(P.$$.fragment,e),we=f(e),W=s(e,"P",{});var Ct=n(W);nt=i(Ct,"Tips:"),Ct.forEach(t),ge=f(e),H=s(e,"UL",{});var St=n(H);M=s(St,"LI",{});var Be=n(M);lt=i(Be,"This implementation is the same as BERT, except for tokenization method. Refer to the "),K=s(Be,"A",{href:!0});var Lt=n(K);it=i(Lt,"documentation of BERT"),Lt.forEach(t),pt=i(Be," for more usage examples."),Be.forEach(t),St.forEach(t),ze=f(e),j=s(e,"P",{});var Je=n(j);ct=i(Je,"This model was contributed by "),C=s(Je,"A",{href:!0,rel:!0});var It=n(C);ut=i(It,"cl-tohoku"),It.forEach(t),ft=i(Je,"."),Je.forEach(t),Te=f(e),b=s(e,"H2",{class:!0});var qe=n(b);E=s(qe,"A",{id:!0,class:!0,href:!0});var Nt=n(E);le=s(Nt,"SPAN",{});var Rt=n(le);V(S.$$.fragment,Rt),Rt.forEach(t),Nt.forEach(t),mt=f(qe),ie=s(qe,"SPAN",{});var Dt=n(ie);ht=i(Dt,"BertJapaneseTokenizer"),Dt.forEach(t),qe.forEach(t),je=f(e),w=s(e,"DIV",{class:!0});var Pe=n(w);V(L.$$.fragment,Pe),dt=f(Pe),pe=s(Pe,"P",{});var Ot=n(pe);_t=i(Ot,"Construct a BERT tokenizer for Japanese text, based on a MecabTokenizer."),Ot.forEach(t),Pe.forEach(t),this.h()},h(){c(_,"name","hf:doc:metadata"),c(_,"content",JSON.stringify(Xt)),c(d,"id","bertjapanese"),c(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(d,"href","#bertjapanese"),c(m,"class","relative group"),c(g,"id","overview"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#overview"),c(k,"class","relative group"),c(A,"href","https://github.com/polm/fugashi"),c(A,"rel","nofollow"),c(B,"href","https://taku910.github.io/mecab/"),c(B,"rel","nofollow"),c(J,"href","https://github.com/cl-tohoku/bert-japanese"),c(J,"rel","nofollow"),c(K,"href","bert"),c(C,"href","https://huggingface.co/cl-tohoku"),c(C,"rel","nofollow"),c(E,"id","transformers.BertJapaneseTokenizer"),c(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(E,"href","#transformers.BertJapaneseTokenizer"),c(b,"class","relative group"),c(w,"class","docstring")},m(e,r){a(document.head,_),p(e,I,r),p(e,m,r),a(m,d),a(d,Z),G(y,Z,null),a(m,Ce),a(m,ee),a(ee,Se),p(e,ce,r),p(e,k,r),a(k,g),a(g,te),G(x,te,null),a(k,Le),a(k,ae),a(ae,Ie),p(e,ue,r),p(e,N,r),a(N,Ne),p(e,fe,r),p(e,R,r),a(R,Re),p(e,me,r),p(e,z,r),a(z,v),a(v,De),a(v,A),a(A,Oe),a(v,We),a(v,B),a(B,He),a(v,Ke),a(z,Ue),a(z,oe),a(oe,Fe),p(e,he,r),p(e,h,r),a(h,Ve),a(h,se),a(se,Ge),a(h,Qe),a(h,re),a(re,Xe),a(h,Ye),a(h,ne),a(ne,Ze),a(h,et),p(e,de,r),p(e,T,r),a(T,tt),a(T,J),a(J,at),a(T,ot),p(e,_e,r),p(e,D,r),a(D,st),p(e,ke,r),G(q,e,r),p(e,ve,r),p(e,O,r),a(O,rt),p(e,be,r),G(P,e,r),p(e,we,r),p(e,W,r),a(W,nt),p(e,ge,r),p(e,H,r),a(H,M),a(M,lt),a(M,K),a(K,it),a(M,pt),p(e,ze,r),p(e,j,r),a(j,ct),a(j,C),a(C,ut),a(j,ft),p(e,Te,r),p(e,b,r),a(b,E),a(E,le),G(S,le,null),a(b,mt),a(b,ie),a(ie,ht),p(e,je,r),p(e,w,r),G(L,w,null),a(w,dt),a(w,pe),a(pe,_t),Ee=!0},p:Vt,i(e){Ee||(Q(y.$$.fragment,e),Q(x.$$.fragment,e),Q(q.$$.fragment,e),Q(P.$$.fragment,e),Q(S.$$.fragment,e),Q(L.$$.fragment,e),Ee=!0)},o(e){X(y.$$.fragment,e),X(x.$$.fragment,e),X(q.$$.fragment,e),X(P.$$.fragment,e),X(S.$$.fragment,e),X(L.$$.fragment,e),Ee=!1},d(e){t(_),e&&t(I),e&&t(m),Y(y),e&&t(ce),e&&t(k),Y(x),e&&t(ue),e&&t(N),e&&t(fe),e&&t(R),e&&t(me),e&&t(z),e&&t(he),e&&t(h),e&&t(de),e&&t(T),e&&t(_e),e&&t(D),e&&t(ke),Y(q,e),e&&t(ve),e&&t(O),e&&t(be),Y(P,e),e&&t(we),e&&t(W),e&&t(ge),e&&t(H),e&&t(ze),e&&t(j),e&&t(Te),e&&t(b),Y(S),e&&t(je),e&&t(w),Y(L)}}}const Xt={local:"bertjapanese",sections:[{local:"overview",title:"Overview"},{local:"transformers.BertJapaneseTokenizer",title:"BertJapaneseTokenizer"}],title:"BertJapanese"};function Yt(Me,_,I){let{fw:m}=_;return Me.$$set=d=>{"fw"in d&&I(0,m=d.fw)},[m]}class sa extends Ht{constructor(_){super();Kt(this,_,Yt,Qt,Ut,{fw:0})}}export{sa as default,Xt as metadata};
9,961
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/canine.mdx-c05e1874.js
import{S as ud,i as fd,s as md,e as o,k as l,w as m,t as r,L as gd,c as s,d as t,m as c,a,x as g,h as i,b as d,J as e,g as h,y as _,q as v,o as k,B as C}from"../../chunks/vendor-b1433968.js";import{T as Oo}from"../../chunks/Tip-c3840994.js";import{D as F}from"../../chunks/Docstring-ff504c58.js";import{C as te}from"../../chunks/CodeBlock-a320dbd7.js";import{I as V}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function _d(D){let u,T,f,y,$;return{c(){u=o("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),y=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){u=s(b,"P",{});var w=a(u);T=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(w,"CODE",{});var z=a(f);y=i(z,"Module"),z.forEach(t),$=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(b,w){h(b,u,w),e(u,T),e(u,f),e(f,y),e(u,$)},d(b){b&&t(u)}}}function vd(D){let u,T,f,y,$;return{c(){u=o("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),y=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){u=s(b,"P",{});var w=a(u);T=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(w,"CODE",{});var z=a(f);y=i(z,"Module"),z.forEach(t),$=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(b,w){h(b,u,w),e(u,T),e(u,f),e(f,y),e(u,$)},d(b){b&&t(u)}}}function kd(D){let u,T,f,y,$;return{c(){u=o("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),y=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){u=s(b,"P",{});var w=a(u);T=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(w,"CODE",{});var z=a(f);y=i(z,"Module"),z.forEach(t),$=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(b,w){h(b,u,w),e(u,T),e(u,f),e(f,y),e(u,$)},d(b){b&&t(u)}}}function Cd(D){let u,T,f,y,$;return{c(){u=o("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),y=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){u=s(b,"P",{});var w=a(u);T=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(w,"CODE",{});var z=a(f);y=i(z,"Module"),z.forEach(t),$=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(b,w){h(b,u,w),e(u,T),e(u,f),e(f,y),e(u,$)},d(b){b&&t(u)}}}function wd(D){let u,T,f,y,$;return{c(){u=o("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),y=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){u=s(b,"P",{});var w=a(u);T=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(w,"CODE",{});var z=a(f);y=i(z,"Module"),z.forEach(t),$=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(b,w){h(b,u,w),e(u,T),e(u,f),e(f,y),e(u,$)},d(b){b&&t(u)}}}function bd(D){let u,T,f,y,$,b,w,z,Ss,Wo,oe,ye,jt,Re,Ds,Pt,Ls,Bo,Te,Os,Ve,Ws,Bs,Qo,Yn,Qs,Ho,Kn,Nt,Hs,Uo,Zn,Us,Ro,x,At,It,Rs,Vs,St,Je,Js,et,Gs,Xs,Ys,Dt,Lt,Ks,Zs,Ot,Wt,ea,na,Bt,nt,Ge,ta,oa,sa,Qt,tt,Xe,aa,ra,Vo,J,ia,Ye,la,ca,Ke,da,pa,Jo,se,$e,Ht,Ze,ha,Ut,ua,Go,ot,fa,Xo,en,Yo,st,ma,Ko,nn,Zo,ae,ze,Rt,tn,ga,Vt,_a,es,re,on,va,L,ka,at,Ca,wa,rt,ba,ya,Jt,Ta,$a,Gt,za,Ea,ns,ie,Ee,Xt,sn,qa,Yt,xa,ts,M,an,Fa,le,Ma,it,ja,Pa,rn,Na,Aa,Ia,ce,Sa,lt,Da,La,ct,Oa,Wa,Ba,Kt,Qa,Ha,ln,os,de,qe,Zt,cn,Ua,eo,Ra,ss,E,dn,Va,no,Ja,Ga,xe,dt,Xa,Ya,pt,Ka,Za,er,pn,nr,ht,tr,or,sr,G,hn,ar,to,rr,ir,un,ut,lr,oo,cr,dr,ft,pr,so,hr,ur,Fe,fn,fr,mn,mr,ao,gr,_r,vr,H,gn,kr,ro,Cr,wr,_n,br,pe,yr,io,Tr,$r,lo,zr,Er,as,he,Me,co,vn,qr,po,xr,rs,R,kn,Fr,Cn,Mr,wn,jr,Pr,Nr,j,bn,Ar,ue,Ir,mt,Sr,Dr,ho,Lr,Or,Wr,je,Br,uo,Qr,Hr,yn,is,fe,Pe,fo,Tn,Ur,mo,Rr,ls,O,$n,Vr,go,Jr,Gr,zn,Xr,En,Yr,Kr,Zr,q,qn,ei,me,ni,gt,ti,oi,_o,si,ai,ri,Ne,ii,vo,li,ci,xn,di,ko,pi,hi,Fn,cs,ge,Ae,Co,Mn,ui,wo,fi,ds,W,jn,mi,bo,gi,_i,Pn,vi,Nn,ki,Ci,wi,P,An,bi,_e,yi,_t,Ti,$i,yo,zi,Ei,qi,Ie,xi,To,Fi,Mi,In,ps,ve,Se,$o,Sn,ji,zo,Pi,hs,B,Dn,Ni,Eo,Ai,Ii,Ln,Si,On,Di,Li,Oi,N,Wn,Wi,ke,Bi,vt,Qi,Hi,qo,Ui,Ri,Vi,De,Ji,xo,Gi,Xi,Bn,us,Ce,Le,Fo,Qn,Yi,Mo,Ki,fs,Q,Hn,Zi,we,el,jo,nl,tl,Po,ol,sl,al,Un,rl,Rn,il,ll,cl,A,Vn,dl,be,pl,kt,hl,ul,No,fl,ml,gl,Oe,_l,Ao,vl,kl,Jn,ms;return b=new V({}),Re=new V({}),Ze=new V({}),en=new te({props:{code:`from transformers import CanineModel import torch model = CanineModel.from_pretrained('google/canine-c') # model pre-trained with autoregressive character loss text = "hello world" # use Python's built-in ord() function to turn each character into its unicode code point id input_ids = torch.tensor([[ord(char) for char in text]]) outputs = model(input_ids) # forward pass pooled_output = outputs.pooler_output sequence_output = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineModel.from_pretrained(<span class="hljs-string">&#x27;google/canine-c&#x27;</span>) <span class="hljs-comment"># model pre-trained with autoregressive character loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use Python&#x27;s built-in ord() function to turn each character into its unicode code point id</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([[<span class="hljs-built_in">ord</span>(char) <span class="hljs-keyword">for</span> char <span class="hljs-keyword">in</span> text]]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_output = outputs.last_hidden_state`}}),nn=new te({props:{code:`from transformers import CanineTokenizer, CanineModel model = CanineModel.from_pretrained('google/canine-c') tokenizer = CanineTokenizer.from_pretrained('google/canine-c') inputs = ["Life is like a box of chocolates.", "You never know what you gonna get."] encoding = tokenizer(inputs, padding="longest", truncation=True, return_tensors="pt") outputs = model(**encoding) # forward pass pooled_output = outputs.pooler_output sequence_output = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineModel.from_pretrained(<span class="hljs-string">&#x27;google/canine-c&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-c&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = [<span class="hljs-string">&quot;Life is like a box of chocolates.&quot;</span>, <span class="hljs-string">&quot;You never know what you gonna get.&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(inputs, padding=<span class="hljs-string">&quot;longest&quot;</span>, truncation=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_output = outputs.last_hidden_state`}}),tn=new V({}),on=new F({props:{name:"class transformers.models.canine.modeling_canine.CanineModelOutputWithPooling",anchor:"transformers.models.canine.modeling_canine.CanineModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"pooler_output",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L66",parametersDescription:[{anchor:"transformers.models.canine.modeling_canine.CanineModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final shallow Transformer encoder).`,name:"last_hidden_state"},{anchor:"transformers.models.canine.modeling_canine.CanineModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Hidden-state of the first token of the sequence (classification token) at the last layer of the deep Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.`,name:"pooler_output"},{anchor:"transformers.models.canine.modeling_canine.CanineModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the input to each encoder + one for the output of each layer of each encoder) of shape <code>(batch_size, sequence_length, hidden_size)</code> and <code>(batch_size, sequence_length // config.downsampling_rate, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial input to each Transformer encoder. The hidden states of the shallow encoders have length <code>sequence_length</code>, but the hidden states of the deep encoder have length <code>sequence_length</code> // <code>config.downsampling_rate</code>.`,name:"hidden_states"},{anchor:"transformers.models.canine.modeling_canine.CanineModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of the 3 Transformer encoders of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code> and <code>(batch_size, num_heads, sequence_length // config.downsampling_rate, sequence_length // config.downsampling_rate)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),sn=new V({}),an=new F({props:{name:"class transformers.CanineConfig",anchor:"transformers.CanineConfig",parameters:[{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 16384"},{name:"type_vocab_size",val:" = 16"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 57344"},{name:"eos_token_id",val:" = 57345"},{name:"downsampling_rate",val:" = 4"},{name:"upsampling_kernel_size",val:" = 4"},{name:"num_hash_functions",val:" = 8"},{name:"num_hash_buckets",val:" = 16384"},{name:"local_transformer_stride",val:" = 128"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/configuration_canine.py#L29",parametersDescription:[{anchor:"transformers.CanineConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.CanineConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the deep Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.CanineConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoders.`,name:"num_attention_heads"},{anchor:"transformers.CanineConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoders.`,name:"intermediate_size"},{anchor:"transformers.CanineConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.CanineConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoders, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.CanineConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.CanineConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The maximum sequence length that this model might ever be used with.`,name:"max_position_embeddings"},{anchor:"transformers.CanineConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineModel">CanineModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.CanineConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.CanineConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.CanineConfig.downsampling_rate",description:`<strong>downsampling_rate</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The rate at which to downsample the original character sequence length before applying the deep Transformer encoder.`,name:"downsampling_rate"},{anchor:"transformers.CanineConfig.upsampling_kernel_size",description:`<strong>upsampling_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when projecting back from <code>hidden_size</code>*2 to <code>hidden_size</code>.`,name:"upsampling_kernel_size"},{anchor:"transformers.CanineConfig.num_hash_functions",description:`<strong>num_hash_functions</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The number of hash functions to use. Each hash function has its own embedding matrix.`,name:"num_hash_functions"},{anchor:"transformers.CanineConfig.num_hash_buckets",description:`<strong>num_hash_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The number of hash buckets to use.`,name:"num_hash_buckets"},{anchor:"transformers.CanineConfig.local_transformer_stride",description:`<strong>local_transformer_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good TPU/XLA memory alignment.`,name:"local_transformer_stride"}]}}),ln=new te({props:{code:`from transformers import CanineModel, CanineConfig # Initializing a CANINE google/canine-s style configuration configuration = CanineConfig() # Initializing a model from the google/canine-s style configuration model = CanineModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineModel, CanineConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CANINE google/canine-s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = CanineConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the google/canine-s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),cn=new V({}),dn=new F({props:{name:"class transformers.CanineTokenizer",anchor:"transformers.CanineTokenizer",parameters:[{name:"bos_token",val:" = '\\ue000'"},{name:"eos_token",val:" = '\\ue001'"},{name:"sep_token",val:" = '\\ue001'"},{name:"cls_token",val:" = '\\ue000'"},{name:"pad_token",val:" = '\\x00'"},{name:"mask_token",val:" = '\\ue003'"},{name:"add_prefix_space",val:" = False"},{name:"model_max_length",val:" = 2048"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/tokenization_canine.py#L63",parametersDescription:[{anchor:"transformers.CanineTokenizer.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sentence length the model accepts.`,name:"model_max_length"}]}}),hn=new F({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.CanineTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/tokenization_canine.py#L156",parametersDescription:[{anchor:"transformers.CanineTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.CanineTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),fn=new F({props:{name:"get_special_tokens_mask",anchor:"transformers.CanineTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/tokenization_canine.py#L183",parametersDescription:[{anchor:"transformers.CanineTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CanineTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.CanineTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),gn=new F({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.CanineTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/tokenization_canine.py#L211",parametersDescription:[{anchor:"transformers.CanineTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CanineTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),_n=new te({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),vn=new V({}),kn=new F({props:{name:"class transformers.CanineModel",anchor:"transformers.CanineModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L986",parametersDescription:[{anchor:"transformers.CanineModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bn=new F({props:{name:"forward",anchor:"transformers.CanineModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1094",parametersDescription:[{anchor:"transformers.CanineModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer">CanineTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CanineModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CanineModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CanineModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CanineModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CanineModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CanineModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CanineModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CanineModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.models.canine.modeling_canine.CanineModelOutputWithPooling" >transformers.models.canine.modeling_canine.CanineModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig" >CanineConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final shallow Transformer encoder).</li> <li><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Hidden-state of the first token of the sequence (classification token) at the last layer of the deep Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the input to each encoder + one for the output of each layer of each encoder) of shape <code>(batch_size, sequence_length, hidden_size)</code> and <code>(batch_size, sequence_length // config.downsampling_rate, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial input to each Transformer encoder. The hidden states of the shallow encoders have length <code>sequence_length</code>, but the hidden states of the deep encoder have length <code>sequence_length</code> // <code>config.downsampling_rate</code>.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of the 3 Transformer encoders of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code> and <code>(batch_size, num_heads, sequence_length // config.downsampling_rate, sequence_length // config.downsampling_rate)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.models.canine.modeling_canine.CanineModelOutputWithPooling" >transformers.models.canine.modeling_canine.CanineModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),je=new Oo({props:{$$slots:{default:[_d]},$$scope:{ctx:D}}}),yn=new te({props:{code:`from transformers import CanineTokenizer, CanineModel import torch tokenizer = CanineTokenizer.from_pretrained('google/canine-s') model = CanineModel.from_pretrained('google/canine-s') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineModel.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Tn=new V({}),$n=new F({props:{name:"class transformers.CanineForSequenceClassification",anchor:"transformers.CanineForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1264",parametersDescription:[{anchor:"transformers.CanineForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qn=new F({props:{name:"forward",anchor:"transformers.CanineForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1276",parametersDescription:[{anchor:"transformers.CanineForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer">CanineTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CanineForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CanineForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CanineForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CanineForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CanineForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CanineForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CanineForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CanineForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.CanineForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig" >CanineConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new Oo({props:{$$slots:{default:[vd]},$$scope:{ctx:D}}}),xn=new te({props:{code:`from transformers import CanineTokenizer, CanineForSequenceClassification import torch tokenizer = CanineTokenizer.from_pretrained('google/canine-s') model = CanineForSequenceClassification.from_pretrained('google/canine-s') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Fn=new te({props:{code:`from transformers import CanineTokenizer, CanineForSequenceClassification import torch tokenizer = CanineTokenizer.from_pretrained('google/canine-s') model = CanineForSequenceClassification.from_pretrained('google/canine-s', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Mn=new V({}),jn=new F({props:{name:"class transformers.CanineForMultipleChoice",anchor:"transformers.CanineForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1361",parametersDescription:[{anchor:"transformers.CanineForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),An=new F({props:{name:"forward",anchor:"transformers.CanineForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1372",parametersDescription:[{anchor:"transformers.CanineForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer">CanineTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CanineForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CanineForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CanineForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CanineForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CanineForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CanineForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CanineForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CanineForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.CanineForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig" >CanineConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ie=new Oo({props:{$$slots:{default:[kd]},$$scope:{ctx:D}}}),In=new te({props:{code:`from transformers import CanineTokenizer, CanineForMultipleChoice import torch tokenizer = CanineTokenizer.from_pretrained('google/canine-s') model = CanineForMultipleChoice.from_pretrained('google/canine-s') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Sn=new V({}),Dn=new F({props:{name:"class transformers.CanineForTokenClassification",anchor:"transformers.CanineForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1452",parametersDescription:[{anchor:"transformers.CanineForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wn=new F({props:{name:"forward",anchor:"transformers.CanineForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1464",parametersDescription:[{anchor:"transformers.CanineForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer">CanineTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CanineForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CanineForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CanineForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CanineForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CanineForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CanineForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CanineForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CanineForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.CanineForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig" >CanineConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),De=new Oo({props:{$$slots:{default:[Cd]},$$scope:{ctx:D}}}),Bn=new te({props:{code:`from transformers import CanineTokenizer, CanineForTokenClassification import torch tokenizer = CanineTokenizer.from_pretrained('google/canine-s') model = CanineForTokenClassification.from_pretrained('google/canine-s') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Qn=new V({}),Hn=new F({props:{name:"class transformers.CanineForQuestionAnswering",anchor:"transformers.CanineForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1540",parametersDescription:[{anchor:"transformers.CanineForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig">CanineConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vn=new F({props:{name:"forward",anchor:"transformers.CanineForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/canine/modeling_canine.py#L1551",parametersDescription:[{anchor:"transformers.CanineForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer">CanineTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CanineForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CanineForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CanineForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CanineForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CanineForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CanineForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CanineForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CanineForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.CanineForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.CanineForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineConfig" >CanineConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Oe=new Oo({props:{$$slots:{default:[wd]},$$scope:{ctx:D}}}),Jn=new te({props:{code:`from transformers import CanineTokenizer, CanineForQuestionAnswering import torch tokenizer = CanineTokenizer.from_pretrained('google/canine-s') model = CanineForQuestionAnswering.from_pretrained('google/canine-s') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CanineTokenizer, CanineForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CanineTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CanineForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/canine-s&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){u=o("meta"),T=l(),f=o("h1"),y=o("a"),$=o("span"),m(b.$$.fragment),w=l(),z=o("span"),Ss=r("CANINE"),Wo=l(),oe=o("h2"),ye=o("a"),jt=o("span"),m(Re.$$.fragment),Ds=l(),Pt=o("span"),Ls=r("Overview"),Bo=l(),Te=o("p"),Os=r("The CANINE model was proposed in "),Ve=o("a"),Ws=r(`CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation`),Bs=r(` by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. It\u2019s among the first papers that trains a Transformer without using an explicit tokenization step (such as Byte Pair Encoding (BPE), WordPiece or SentencePiece). Instead, the model is trained directly at a Unicode character-level. Training at a character-level inevitably comes with a longer sequence length, which CANINE solves with an efficient downsampling strategy, before applying a deep Transformer encoder.`),Qo=l(),Yn=o("p"),Qs=r("The abstract from the paper is the following:"),Ho=l(),Kn=o("p"),Nt=o("em"),Hs=r(`Pipelined NLP systems have largely been superseded by end-to-end neural modeling, yet nearly all commonly-used models still require an explicit tokenization step. While recent tokenization approaches based on data-derived subword lexicons are less brittle than manually engineered tokenizers, these techniques are not equally suited to all languages, and the use of any fixed vocabulary may limit a model\u2019s ability to adapt. In this paper, we present CANINE, a neural encoder that operates directly on character sequences, without explicit tokenization or vocabulary, and a pre-training strategy that operates either directly on characters or optionally uses subwords as a soft inductive bias. To use its finer-grained input effectively and efficiently, CANINE combines downsampling, which reduces the input sequence length, with a deep transformer stack, which encodes context. CANINE outperforms a comparable mBERT model by 2.8 F1 on TyDi QA, a challenging multilingual benchmark, despite having 28% fewer model parameters.`),Uo=l(),Zn=o("p"),Us=r("Tips:"),Ro=l(),x=o("ul"),At=o("li"),It=o("p"),Rs=r(`CANINE uses no less than 3 Transformer encoders internally: 2 \u201Cshallow\u201D encoders (which only consist of a single layer) and 1 \u201Cdeep\u201D encoder (which is a regular BERT encoder). First, a \u201Cshallow\u201D encoder is used to contextualize the character embeddings, using local attention. Next, after downsampling, a \u201Cdeep\u201D encoder is applied. Finally, after upsampling, a \u201Cshallow\u201D encoder is used to create the final character embeddings. Details regarding up- and downsampling can be found in the paper.`),Vs=l(),St=o("li"),Je=o("p"),Js=r("CANINE uses a max sequence length of 2048 characters by default. One can use "),et=o("a"),Gs=r("CanineTokenizer"),Xs=r(` to prepare text for the model.`),Ys=l(),Dt=o("li"),Lt=o("p"),Ks=r(`Classification can be done by placing a linear layer on top of the final hidden state of the special [CLS] token (which has a predefined Unicode code point). For token classification tasks however, the downsampled sequence of tokens needs to be upsampled again to match the length of the original character sequence (which is 2048). The details for this can be found in the paper.`),Zs=l(),Ot=o("li"),Wt=o("p"),ea=r("Models:"),na=l(),Bt=o("li"),nt=o("p"),Ge=o("a"),ta=r("google/canine-c"),oa=r(`: Pre-trained with autoregressive character loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB).`),sa=l(),Qt=o("li"),tt=o("p"),Xe=o("a"),aa=r("google/canine-s"),ra=r(`: Pre-trained with subword loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB).`),Vo=l(),J=o("p"),ia=r("This model was contributed by "),Ye=o("a"),la=r("nielsr"),ca=r(". The original code can be found "),Ke=o("a"),da=r("here"),pa=r("."),Jo=l(),se=o("h3"),$e=o("a"),Ht=o("span"),m(Ze.$$.fragment),ha=l(),Ut=o("span"),ua=r("Example"),Go=l(),ot=o("p"),fa=r("CANINE works on raw characters, so it can be used without a tokenizer:"),Xo=l(),m(en.$$.fragment),Yo=l(),st=o("p"),ma=r(`For batched inference and training, it is however recommended to make use of the tokenizer (to pad/truncate all sequences to the same length):`),Ko=l(),m(nn.$$.fragment),Zo=l(),ae=o("h2"),ze=o("a"),Rt=o("span"),m(tn.$$.fragment),ga=l(),Vt=o("span"),_a=r("CANINE specific outputs"),es=l(),re=o("div"),m(on.$$.fragment),va=l(),L=o("p"),ka=r("Output type of "),at=o("a"),Ca=r("CanineModel"),wa=r(`. Based on `),rt=o("a"),ba=r("BaseModelOutputWithPooling"),ya=r(`, but with slightly different `),Jt=o("code"),Ta=r("hidden_states"),$a=r(" and "),Gt=o("code"),za=r("attentions"),Ea=r(`, as these also include the hidden states and attentions of the shallow Transformer encoders.`),ns=l(),ie=o("h2"),Ee=o("a"),Xt=o("span"),m(sn.$$.fragment),qa=l(),Yt=o("span"),xa=r("CanineConfig"),ts=l(),M=o("div"),m(an.$$.fragment),Fa=l(),le=o("p"),Ma=r("This is the configuration class to store the configuration of a "),it=o("a"),ja=r("CanineModel"),Pa=r(`. It is used to instantiate an CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CANINE `),rn=o("a"),Na=r("google/canine-s"),Aa=r(" architecture."),Ia=l(),ce=o("p"),Sa=r("Configuration objects inherit from "),lt=o("a"),Da=r("PretrainedConfig"),La=r(` and can be used to control the model outputs. Read the documentation from `),ct=o("a"),Oa=r("PretrainedConfig"),Wa=r(" for more information."),Ba=l(),Kt=o("p"),Qa=r("Example:"),Ha=l(),m(ln.$$.fragment),os=l(),de=o("h2"),qe=o("a"),Zt=o("span"),m(cn.$$.fragment),Ua=l(),eo=o("span"),Ra=r("CanineTokenizer"),ss=l(),E=o("div"),m(dn.$$.fragment),Va=l(),no=o("p"),Ja=r(`Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then converts each character into its Unicode code point.`),Ga=l(),xe=o("p"),dt=o("a"),Xa=r("CanineTokenizer"),Ya=r(" inherits from "),pt=o("a"),Ka=r("PreTrainedTokenizer"),Za=r("."),er=l(),pn=o("p"),nr=r("Refer to superclass "),ht=o("a"),tr=r("PreTrainedTokenizer"),or=r(` for usage examples and documentation concerning parameters.`),sr=l(),G=o("div"),m(hn.$$.fragment),ar=l(),to=o("p"),rr=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CANINE sequence has the following format:`),ir=l(),un=o("ul"),ut=o("li"),lr=r("single sequence: "),oo=o("code"),cr=r("[CLS] X [SEP]"),dr=l(),ft=o("li"),pr=r("pair of sequences: "),so=o("code"),hr=r("[CLS] A [SEP] B [SEP]"),ur=l(),Fe=o("div"),m(fn.$$.fragment),fr=l(),mn=o("p"),mr=r(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ao=o("code"),gr=r("prepare_for_model"),_r=r(" method."),vr=l(),H=o("div"),m(gn.$$.fragment),kr=l(),ro=o("p"),Cr=r(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE sequence pair mask has the following format:`),wr=l(),m(_n.$$.fragment),br=l(),pe=o("p"),yr=r("If "),io=o("code"),Tr=r("token_ids_1"),$r=r(" is "),lo=o("code"),zr=r("None"),Er=r(", this method only returns the first portion of the mask (0s)."),as=l(),he=o("h2"),Me=o("a"),co=o("span"),m(vn.$$.fragment),qr=l(),po=o("span"),xr=r("CanineModel"),rs=l(),R=o("div"),m(kn.$$.fragment),Fr=l(),Cn=o("p"),Mr=r(`The bare CANINE Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),wn=o("a"),jr=r("torch.nn.Module"),Pr=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nr=l(),j=o("div"),m(bn.$$.fragment),Ar=l(),ue=o("p"),Ir=r("The "),mt=o("a"),Sr=r("CanineModel"),Dr=r(" forward method, overrides the "),ho=o("code"),Lr=r("__call__"),Or=r(" special method."),Wr=l(),m(je.$$.fragment),Br=l(),uo=o("p"),Qr=r("Example:"),Hr=l(),m(yn.$$.fragment),is=l(),fe=o("h2"),Pe=o("a"),fo=o("span"),m(Tn.$$.fragment),Ur=l(),mo=o("span"),Rr=r("CanineForSequenceClassification"),ls=l(),O=o("div"),m($n.$$.fragment),Vr=l(),go=o("p"),Jr=r(`CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Gr=l(),zn=o("p"),Xr=r("This model is a PyTorch "),En=o("a"),Yr=r("torch.nn.Module"),Kr=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zr=l(),q=o("div"),m(qn.$$.fragment),ei=l(),me=o("p"),ni=r("The "),gt=o("a"),ti=r("CanineForSequenceClassification"),oi=r(" forward method, overrides the "),_o=o("code"),si=r("__call__"),ai=r(" special method."),ri=l(),m(Ne.$$.fragment),ii=l(),vo=o("p"),li=r("Example of single-label classification:"),ci=l(),m(xn.$$.fragment),di=l(),ko=o("p"),pi=r("Example of multi-label classification:"),hi=l(),m(Fn.$$.fragment),cs=l(),ge=o("h2"),Ae=o("a"),Co=o("span"),m(Mn.$$.fragment),ui=l(),wo=o("span"),fi=r("CanineForMultipleChoice"),ds=l(),W=o("div"),m(jn.$$.fragment),mi=l(),bo=o("p"),gi=r(`CANINE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),_i=l(),Pn=o("p"),vi=r("This model is a PyTorch "),Nn=o("a"),ki=r("torch.nn.Module"),Ci=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wi=l(),P=o("div"),m(An.$$.fragment),bi=l(),_e=o("p"),yi=r("The "),_t=o("a"),Ti=r("CanineForMultipleChoice"),$i=r(" forward method, overrides the "),yo=o("code"),zi=r("__call__"),Ei=r(" special method."),qi=l(),m(Ie.$$.fragment),xi=l(),To=o("p"),Fi=r("Example:"),Mi=l(),m(In.$$.fragment),ps=l(),ve=o("h2"),Se=o("a"),$o=o("span"),m(Sn.$$.fragment),ji=l(),zo=o("span"),Pi=r("CanineForTokenClassification"),hs=l(),B=o("div"),m(Dn.$$.fragment),Ni=l(),Eo=o("p"),Ai=r(`CANINE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ii=l(),Ln=o("p"),Si=r("This model is a PyTorch "),On=o("a"),Di=r("torch.nn.Module"),Li=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Oi=l(),N=o("div"),m(Wn.$$.fragment),Wi=l(),ke=o("p"),Bi=r("The "),vt=o("a"),Qi=r("CanineForTokenClassification"),Hi=r(" forward method, overrides the "),qo=o("code"),Ui=r("__call__"),Ri=r(" special method."),Vi=l(),m(De.$$.fragment),Ji=l(),xo=o("p"),Gi=r("Example:"),Xi=l(),m(Bn.$$.fragment),us=l(),Ce=o("h2"),Le=o("a"),Fo=o("span"),m(Qn.$$.fragment),Yi=l(),Mo=o("span"),Ki=r("CanineForQuestionAnswering"),fs=l(),Q=o("div"),m(Hn.$$.fragment),Zi=l(),we=o("p"),el=r(`CANINE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),jo=o("code"),nl=r("span start logits"),tl=r(" and "),Po=o("code"),ol=r("span end logits"),sl=r(")."),al=l(),Un=o("p"),rl=r("This model is a PyTorch "),Rn=o("a"),il=r("torch.nn.Module"),ll=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cl=l(),A=o("div"),m(Vn.$$.fragment),dl=l(),be=o("p"),pl=r("The "),kt=o("a"),hl=r("CanineForQuestionAnswering"),ul=r(" forward method, overrides the "),No=o("code"),fl=r("__call__"),ml=r(" special method."),gl=l(),m(Oe.$$.fragment),_l=l(),Ao=o("p"),vl=r("Example:"),kl=l(),m(Jn.$$.fragment),this.h()},l(n){const p=gd('[data-svelte="svelte-1phssyn"]',document.head);u=s(p,"META",{name:!0,content:!0}),p.forEach(t),T=c(n),f=s(n,"H1",{class:!0});var Gn=a(f);y=s(Gn,"A",{id:!0,class:!0,href:!0});var Io=a(y);$=s(Io,"SPAN",{});var So=a($);g(b.$$.fragment,So),So.forEach(t),Io.forEach(t),w=c(Gn),z=s(Gn,"SPAN",{});var Do=a(z);Ss=i(Do,"CANINE"),Do.forEach(t),Gn.forEach(t),Wo=c(n),oe=s(n,"H2",{class:!0});var Xn=a(oe);ye=s(Xn,"A",{id:!0,class:!0,href:!0});var Tl=a(ye);jt=s(Tl,"SPAN",{});var $l=a(jt);g(Re.$$.fragment,$l),$l.forEach(t),Tl.forEach(t),Ds=c(Xn),Pt=s(Xn,"SPAN",{});var zl=a(Pt);Ls=i(zl,"Overview"),zl.forEach(t),Xn.forEach(t),Bo=c(n),Te=s(n,"P",{});var gs=a(Te);Os=i(gs,"The CANINE model was proposed in "),Ve=s(gs,"A",{href:!0,rel:!0});var El=a(Ve);Ws=i(El,`CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation`),El.forEach(t),Bs=i(gs,` by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. It\u2019s among the first papers that trains a Transformer without using an explicit tokenization step (such as Byte Pair Encoding (BPE), WordPiece or SentencePiece). Instead, the model is trained directly at a Unicode character-level. Training at a character-level inevitably comes with a longer sequence length, which CANINE solves with an efficient downsampling strategy, before applying a deep Transformer encoder.`),gs.forEach(t),Qo=c(n),Yn=s(n,"P",{});var ql=a(Yn);Qs=i(ql,"The abstract from the paper is the following:"),ql.forEach(t),Ho=c(n),Kn=s(n,"P",{});var xl=a(Kn);Nt=s(xl,"EM",{});var Fl=a(Nt);Hs=i(Fl,`Pipelined NLP systems have largely been superseded by end-to-end neural modeling, yet nearly all commonly-used models still require an explicit tokenization step. While recent tokenization approaches based on data-derived subword lexicons are less brittle than manually engineered tokenizers, these techniques are not equally suited to all languages, and the use of any fixed vocabulary may limit a model\u2019s ability to adapt. In this paper, we present CANINE, a neural encoder that operates directly on character sequences, without explicit tokenization or vocabulary, and a pre-training strategy that operates either directly on characters or optionally uses subwords as a soft inductive bias. To use its finer-grained input effectively and efficiently, CANINE combines downsampling, which reduces the input sequence length, with a deep transformer stack, which encodes context. CANINE outperforms a comparable mBERT model by 2.8 F1 on TyDi QA, a challenging multilingual benchmark, despite having 28% fewer model parameters.`),Fl.forEach(t),xl.forEach(t),Uo=c(n),Zn=s(n,"P",{});var Ml=a(Zn);Us=i(Ml,"Tips:"),Ml.forEach(t),Ro=c(n),x=s(n,"UL",{});var U=a(x);At=s(U,"LI",{});var jl=a(At);It=s(jl,"P",{});var Pl=a(It);Rs=i(Pl,`CANINE uses no less than 3 Transformer encoders internally: 2 \u201Cshallow\u201D encoders (which only consist of a single layer) and 1 \u201Cdeep\u201D encoder (which is a regular BERT encoder). First, a \u201Cshallow\u201D encoder is used to contextualize the character embeddings, using local attention. Next, after downsampling, a \u201Cdeep\u201D encoder is applied. Finally, after upsampling, a \u201Cshallow\u201D encoder is used to create the final character embeddings. Details regarding up- and downsampling can be found in the paper.`),Pl.forEach(t),jl.forEach(t),Vs=c(U),St=s(U,"LI",{});var Nl=a(St);Je=s(Nl,"P",{});var _s=a(Je);Js=i(_s,"CANINE uses a max sequence length of 2048 characters by default. One can use "),et=s(_s,"A",{href:!0});var Al=a(et);Gs=i(Al,"CanineTokenizer"),Al.forEach(t),Xs=i(_s,` to prepare text for the model.`),_s.forEach(t),Nl.forEach(t),Ys=c(U),Dt=s(U,"LI",{});var Il=a(Dt);Lt=s(Il,"P",{});var Sl=a(Lt);Ks=i(Sl,`Classification can be done by placing a linear layer on top of the final hidden state of the special [CLS] token (which has a predefined Unicode code point). For token classification tasks however, the downsampled sequence of tokens needs to be upsampled again to match the length of the original character sequence (which is 2048). The details for this can be found in the paper.`),Sl.forEach(t),Il.forEach(t),Zs=c(U),Ot=s(U,"LI",{});var Dl=a(Ot);Wt=s(Dl,"P",{});var Ll=a(Wt);ea=i(Ll,"Models:"),Ll.forEach(t),Dl.forEach(t),na=c(U),Bt=s(U,"LI",{});var Ol=a(Bt);nt=s(Ol,"P",{});var Cl=a(nt);Ge=s(Cl,"A",{href:!0,rel:!0});var Wl=a(Ge);ta=i(Wl,"google/canine-c"),Wl.forEach(t),oa=i(Cl,`: Pre-trained with autoregressive character loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB).`),Cl.forEach(t),Ol.forEach(t),sa=c(U),Qt=s(U,"LI",{});var Bl=a(Qt);tt=s(Bl,"P",{});var wl=a(tt);Xe=s(wl,"A",{href:!0,rel:!0});var Ql=a(Xe);aa=i(Ql,"google/canine-s"),Ql.forEach(t),ra=i(wl,`: Pre-trained with subword loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB).`),wl.forEach(t),Bl.forEach(t),U.forEach(t),Vo=c(n),J=s(n,"P",{});var Ct=a(J);ia=i(Ct,"This model was contributed by "),Ye=s(Ct,"A",{href:!0,rel:!0});var Hl=a(Ye);la=i(Hl,"nielsr"),Hl.forEach(t),ca=i(Ct,". The original code can be found "),Ke=s(Ct,"A",{href:!0,rel:!0});var Ul=a(Ke);da=i(Ul,"here"),Ul.forEach(t),pa=i(Ct,"."),Ct.forEach(t),Jo=c(n),se=s(n,"H3",{class:!0});var vs=a(se);$e=s(vs,"A",{id:!0,class:!0,href:!0});var Rl=a($e);Ht=s(Rl,"SPAN",{});var Vl=a(Ht);g(Ze.$$.fragment,Vl),Vl.forEach(t),Rl.forEach(t),ha=c(vs),Ut=s(vs,"SPAN",{});var Jl=a(Ut);ua=i(Jl,"Example"),Jl.forEach(t),vs.forEach(t),Go=c(n),ot=s(n,"P",{});var Gl=a(ot);fa=i(Gl,"CANINE works on raw characters, so it can be used without a tokenizer:"),Gl.forEach(t),Xo=c(n),g(en.$$.fragment,n),Yo=c(n),st=s(n,"P",{});var Xl=a(st);ma=i(Xl,`For batched inference and training, it is however recommended to make use of the tokenizer (to pad/truncate all sequences to the same length):`),Xl.forEach(t),Ko=c(n),g(nn.$$.fragment,n),Zo=c(n),ae=s(n,"H2",{class:!0});var ks=a(ae);ze=s(ks,"A",{id:!0,class:!0,href:!0});var Yl=a(ze);Rt=s(Yl,"SPAN",{});var Kl=a(Rt);g(tn.$$.fragment,Kl),Kl.forEach(t),Yl.forEach(t),ga=c(ks),Vt=s(ks,"SPAN",{});var Zl=a(Vt);_a=i(Zl,"CANINE specific outputs"),Zl.forEach(t),ks.forEach(t),es=c(n),re=s(n,"DIV",{class:!0});var Cs=a(re);g(on.$$.fragment,Cs),va=c(Cs),L=s(Cs,"P",{});var X=a(L);ka=i(X,"Output type of "),at=s(X,"A",{href:!0});var ec=a(at);Ca=i(ec,"CanineModel"),ec.forEach(t),wa=i(X,`. Based on `),rt=s(X,"A",{href:!0});var nc=a(rt);ba=i(nc,"BaseModelOutputWithPooling"),nc.forEach(t),ya=i(X,`, but with slightly different `),Jt=s(X,"CODE",{});var tc=a(Jt);Ta=i(tc,"hidden_states"),tc.forEach(t),$a=i(X," and "),Gt=s(X,"CODE",{});var oc=a(Gt);za=i(oc,"attentions"),oc.forEach(t),Ea=i(X,`, as these also include the hidden states and attentions of the shallow Transformer encoders.`),X.forEach(t),Cs.forEach(t),ns=c(n),ie=s(n,"H2",{class:!0});var ws=a(ie);Ee=s(ws,"A",{id:!0,class:!0,href:!0});var sc=a(Ee);Xt=s(sc,"SPAN",{});var ac=a(Xt);g(sn.$$.fragment,ac),ac.forEach(t),sc.forEach(t),qa=c(ws),Yt=s(ws,"SPAN",{});var rc=a(Yt);xa=i(rc,"CanineConfig"),rc.forEach(t),ws.forEach(t),ts=c(n),M=s(n,"DIV",{class:!0});var Y=a(M);g(an.$$.fragment,Y),Fa=c(Y),le=s(Y,"P",{});var wt=a(le);Ma=i(wt,"This is the configuration class to store the configuration of a "),it=s(wt,"A",{href:!0});var ic=a(it);ja=i(ic,"CanineModel"),ic.forEach(t),Pa=i(wt,`. It is used to instantiate an CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CANINE `),rn=s(wt,"A",{href:!0,rel:!0});var lc=a(rn);Na=i(lc,"google/canine-s"),lc.forEach(t),Aa=i(wt," architecture."),wt.forEach(t),Ia=c(Y),ce=s(Y,"P",{});var bt=a(ce);Sa=i(bt,"Configuration objects inherit from "),lt=s(bt,"A",{href:!0});var cc=a(lt);Da=i(cc,"PretrainedConfig"),cc.forEach(t),La=i(bt,` and can be used to control the model outputs. Read the documentation from `),ct=s(bt,"A",{href:!0});var dc=a(ct);Oa=i(dc,"PretrainedConfig"),dc.forEach(t),Wa=i(bt," for more information."),bt.forEach(t),Ba=c(Y),Kt=s(Y,"P",{});var pc=a(Kt);Qa=i(pc,"Example:"),pc.forEach(t),Ha=c(Y),g(ln.$$.fragment,Y),Y.forEach(t),os=c(n),de=s(n,"H2",{class:!0});var bs=a(de);qe=s(bs,"A",{id:!0,class:!0,href:!0});var hc=a(qe);Zt=s(hc,"SPAN",{});var uc=a(Zt);g(cn.$$.fragment,uc),uc.forEach(t),hc.forEach(t),Ua=c(bs),eo=s(bs,"SPAN",{});var fc=a(eo);Ra=i(fc,"CanineTokenizer"),fc.forEach(t),bs.forEach(t),ss=c(n),E=s(n,"DIV",{class:!0});var I=a(E);g(dn.$$.fragment,I),Va=c(I),no=s(I,"P",{});var mc=a(no);Ja=i(mc,`Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then converts each character into its Unicode code point.`),mc.forEach(t),Ga=c(I),xe=s(I,"P",{});var Lo=a(xe);dt=s(Lo,"A",{href:!0});var gc=a(dt);Xa=i(gc,"CanineTokenizer"),gc.forEach(t),Ya=i(Lo," inherits from "),pt=s(Lo,"A",{href:!0});var _c=a(pt);Ka=i(_c,"PreTrainedTokenizer"),_c.forEach(t),Za=i(Lo,"."),Lo.forEach(t),er=c(I),pn=s(I,"P",{});var ys=a(pn);nr=i(ys,"Refer to superclass "),ht=s(ys,"A",{href:!0});var vc=a(ht);tr=i(vc,"PreTrainedTokenizer"),vc.forEach(t),or=i(ys,` for usage examples and documentation concerning parameters.`),ys.forEach(t),sr=c(I),G=s(I,"DIV",{class:!0});var yt=a(G);g(hn.$$.fragment,yt),ar=c(yt),to=s(yt,"P",{});var kc=a(to);rr=i(kc,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CANINE sequence has the following format:`),kc.forEach(t),ir=c(yt),un=s(yt,"UL",{});var Ts=a(un);ut=s(Ts,"LI",{});var bl=a(ut);lr=i(bl,"single sequence: "),oo=s(bl,"CODE",{});var Cc=a(oo);cr=i(Cc,"[CLS] X [SEP]"),Cc.forEach(t),bl.forEach(t),dr=c(Ts),ft=s(Ts,"LI",{});var yl=a(ft);pr=i(yl,"pair of sequences: "),so=s(yl,"CODE",{});var wc=a(so);hr=i(wc,"[CLS] A [SEP] B [SEP]"),wc.forEach(t),yl.forEach(t),Ts.forEach(t),yt.forEach(t),ur=c(I),Fe=s(I,"DIV",{class:!0});var $s=a(Fe);g(fn.$$.fragment,$s),fr=c($s),mn=s($s,"P",{});var zs=a(mn);mr=i(zs,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ao=s(zs,"CODE",{});var bc=a(ao);gr=i(bc,"prepare_for_model"),bc.forEach(t),_r=i(zs," method."),zs.forEach(t),$s.forEach(t),vr=c(I),H=s(I,"DIV",{class:!0});var We=a(H);g(gn.$$.fragment,We),kr=c(We),ro=s(We,"P",{});var yc=a(ro);Cr=i(yc,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE sequence pair mask has the following format:`),yc.forEach(t),wr=c(We),g(_n.$$.fragment,We),br=c(We),pe=s(We,"P",{});var Tt=a(pe);yr=i(Tt,"If "),io=s(Tt,"CODE",{});var Tc=a(io);Tr=i(Tc,"token_ids_1"),Tc.forEach(t),$r=i(Tt," is "),lo=s(Tt,"CODE",{});var $c=a(lo);zr=i($c,"None"),$c.forEach(t),Er=i(Tt,", this method only returns the first portion of the mask (0s)."),Tt.forEach(t),We.forEach(t),I.forEach(t),as=c(n),he=s(n,"H2",{class:!0});var Es=a(he);Me=s(Es,"A",{id:!0,class:!0,href:!0});var zc=a(Me);co=s(zc,"SPAN",{});var Ec=a(co);g(vn.$$.fragment,Ec),Ec.forEach(t),zc.forEach(t),qr=c(Es),po=s(Es,"SPAN",{});var qc=a(po);xr=i(qc,"CanineModel"),qc.forEach(t),Es.forEach(t),rs=c(n),R=s(n,"DIV",{class:!0});var $t=a(R);g(kn.$$.fragment,$t),Fr=c($t),Cn=s($t,"P",{});var qs=a(Cn);Mr=i(qs,`The bare CANINE Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),wn=s(qs,"A",{href:!0,rel:!0});var xc=a(wn);jr=i(xc,"torch.nn.Module"),xc.forEach(t),Pr=i(qs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qs.forEach(t),Nr=c($t),j=s($t,"DIV",{class:!0});var K=a(j);g(bn.$$.fragment,K),Ar=c(K),ue=s(K,"P",{});var zt=a(ue);Ir=i(zt,"The "),mt=s(zt,"A",{href:!0});var Fc=a(mt);Sr=i(Fc,"CanineModel"),Fc.forEach(t),Dr=i(zt," forward method, overrides the "),ho=s(zt,"CODE",{});var Mc=a(ho);Lr=i(Mc,"__call__"),Mc.forEach(t),Or=i(zt," special method."),zt.forEach(t),Wr=c(K),g(je.$$.fragment,K),Br=c(K),uo=s(K,"P",{});var jc=a(uo);Qr=i(jc,"Example:"),jc.forEach(t),Hr=c(K),g(yn.$$.fragment,K),K.forEach(t),$t.forEach(t),is=c(n),fe=s(n,"H2",{class:!0});var xs=a(fe);Pe=s(xs,"A",{id:!0,class:!0,href:!0});var Pc=a(Pe);fo=s(Pc,"SPAN",{});var Nc=a(fo);g(Tn.$$.fragment,Nc),Nc.forEach(t),Pc.forEach(t),Ur=c(xs),mo=s(xs,"SPAN",{});var Ac=a(mo);Rr=i(Ac,"CanineForSequenceClassification"),Ac.forEach(t),xs.forEach(t),ls=c(n),O=s(n,"DIV",{class:!0});var Be=a(O);g($n.$$.fragment,Be),Vr=c(Be),go=s(Be,"P",{});var Ic=a(go);Jr=i(Ic,`CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ic.forEach(t),Gr=c(Be),zn=s(Be,"P",{});var Fs=a(zn);Xr=i(Fs,"This model is a PyTorch "),En=s(Fs,"A",{href:!0,rel:!0});var Sc=a(En);Yr=i(Sc,"torch.nn.Module"),Sc.forEach(t),Kr=i(Fs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fs.forEach(t),Zr=c(Be),q=s(Be,"DIV",{class:!0});var S=a(q);g(qn.$$.fragment,S),ei=c(S),me=s(S,"P",{});var Et=a(me);ni=i(Et,"The "),gt=s(Et,"A",{href:!0});var Dc=a(gt);ti=i(Dc,"CanineForSequenceClassification"),Dc.forEach(t),oi=i(Et," forward method, overrides the "),_o=s(Et,"CODE",{});var Lc=a(_o);si=i(Lc,"__call__"),Lc.forEach(t),ai=i(Et," special method."),Et.forEach(t),ri=c(S),g(Ne.$$.fragment,S),ii=c(S),vo=s(S,"P",{});var Oc=a(vo);li=i(Oc,"Example of single-label classification:"),Oc.forEach(t),ci=c(S),g(xn.$$.fragment,S),di=c(S),ko=s(S,"P",{});var Wc=a(ko);pi=i(Wc,"Example of multi-label classification:"),Wc.forEach(t),hi=c(S),g(Fn.$$.fragment,S),S.forEach(t),Be.forEach(t),cs=c(n),ge=s(n,"H2",{class:!0});var Ms=a(ge);Ae=s(Ms,"A",{id:!0,class:!0,href:!0});var Bc=a(Ae);Co=s(Bc,"SPAN",{});var Qc=a(Co);g(Mn.$$.fragment,Qc),Qc.forEach(t),Bc.forEach(t),ui=c(Ms),wo=s(Ms,"SPAN",{});var Hc=a(wo);fi=i(Hc,"CanineForMultipleChoice"),Hc.forEach(t),Ms.forEach(t),ds=c(n),W=s(n,"DIV",{class:!0});var Qe=a(W);g(jn.$$.fragment,Qe),mi=c(Qe),bo=s(Qe,"P",{});var Uc=a(bo);gi=i(Uc,`CANINE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Uc.forEach(t),_i=c(Qe),Pn=s(Qe,"P",{});var js=a(Pn);vi=i(js,"This model is a PyTorch "),Nn=s(js,"A",{href:!0,rel:!0});var Rc=a(Nn);ki=i(Rc,"torch.nn.Module"),Rc.forEach(t),Ci=i(js,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),js.forEach(t),wi=c(Qe),P=s(Qe,"DIV",{class:!0});var Z=a(P);g(An.$$.fragment,Z),bi=c(Z),_e=s(Z,"P",{});var qt=a(_e);yi=i(qt,"The "),_t=s(qt,"A",{href:!0});var Vc=a(_t);Ti=i(Vc,"CanineForMultipleChoice"),Vc.forEach(t),$i=i(qt," forward method, overrides the "),yo=s(qt,"CODE",{});var Jc=a(yo);zi=i(Jc,"__call__"),Jc.forEach(t),Ei=i(qt," special method."),qt.forEach(t),qi=c(Z),g(Ie.$$.fragment,Z),xi=c(Z),To=s(Z,"P",{});var Gc=a(To);Fi=i(Gc,"Example:"),Gc.forEach(t),Mi=c(Z),g(In.$$.fragment,Z),Z.forEach(t),Qe.forEach(t),ps=c(n),ve=s(n,"H2",{class:!0});var Ps=a(ve);Se=s(Ps,"A",{id:!0,class:!0,href:!0});var Xc=a(Se);$o=s(Xc,"SPAN",{});var Yc=a($o);g(Sn.$$.fragment,Yc),Yc.forEach(t),Xc.forEach(t),ji=c(Ps),zo=s(Ps,"SPAN",{});var Kc=a(zo);Pi=i(Kc,"CanineForTokenClassification"),Kc.forEach(t),Ps.forEach(t),hs=c(n),B=s(n,"DIV",{class:!0});var He=a(B);g(Dn.$$.fragment,He),Ni=c(He),Eo=s(He,"P",{});var Zc=a(Eo);Ai=i(Zc,`CANINE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zc.forEach(t),Ii=c(He),Ln=s(He,"P",{});var Ns=a(Ln);Si=i(Ns,"This model is a PyTorch "),On=s(Ns,"A",{href:!0,rel:!0});var ed=a(On);Di=i(ed,"torch.nn.Module"),ed.forEach(t),Li=i(Ns,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ns.forEach(t),Oi=c(He),N=s(He,"DIV",{class:!0});var ee=a(N);g(Wn.$$.fragment,ee),Wi=c(ee),ke=s(ee,"P",{});var xt=a(ke);Bi=i(xt,"The "),vt=s(xt,"A",{href:!0});var nd=a(vt);Qi=i(nd,"CanineForTokenClassification"),nd.forEach(t),Hi=i(xt," forward method, overrides the "),qo=s(xt,"CODE",{});var td=a(qo);Ui=i(td,"__call__"),td.forEach(t),Ri=i(xt," special method."),xt.forEach(t),Vi=c(ee),g(De.$$.fragment,ee),Ji=c(ee),xo=s(ee,"P",{});var od=a(xo);Gi=i(od,"Example:"),od.forEach(t),Xi=c(ee),g(Bn.$$.fragment,ee),ee.forEach(t),He.forEach(t),us=c(n),Ce=s(n,"H2",{class:!0});var As=a(Ce);Le=s(As,"A",{id:!0,class:!0,href:!0});var sd=a(Le);Fo=s(sd,"SPAN",{});var ad=a(Fo);g(Qn.$$.fragment,ad),ad.forEach(t),sd.forEach(t),Yi=c(As),Mo=s(As,"SPAN",{});var rd=a(Mo);Ki=i(rd,"CanineForQuestionAnswering"),rd.forEach(t),As.forEach(t),fs=c(n),Q=s(n,"DIV",{class:!0});var Ue=a(Q);g(Hn.$$.fragment,Ue),Zi=c(Ue),we=s(Ue,"P",{});var Ft=a(we);el=i(Ft,`CANINE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),jo=s(Ft,"CODE",{});var id=a(jo);nl=i(id,"span start logits"),id.forEach(t),tl=i(Ft," and "),Po=s(Ft,"CODE",{});var ld=a(Po);ol=i(ld,"span end logits"),ld.forEach(t),sl=i(Ft,")."),Ft.forEach(t),al=c(Ue),Un=s(Ue,"P",{});var Is=a(Un);rl=i(Is,"This model is a PyTorch "),Rn=s(Is,"A",{href:!0,rel:!0});var cd=a(Rn);il=i(cd,"torch.nn.Module"),cd.forEach(t),ll=i(Is,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Is.forEach(t),cl=c(Ue),A=s(Ue,"DIV",{class:!0});var ne=a(A);g(Vn.$$.fragment,ne),dl=c(ne),be=s(ne,"P",{});var Mt=a(be);pl=i(Mt,"The "),kt=s(Mt,"A",{href:!0});var dd=a(kt);hl=i(dd,"CanineForQuestionAnswering"),dd.forEach(t),ul=i(Mt," forward method, overrides the "),No=s(Mt,"CODE",{});var pd=a(No);fl=i(pd,"__call__"),pd.forEach(t),ml=i(Mt," special method."),Mt.forEach(t),gl=c(ne),g(Oe.$$.fragment,ne),_l=c(ne),Ao=s(ne,"P",{});var hd=a(Ao);vl=i(hd,"Example:"),hd.forEach(t),kl=c(ne),g(Jn.$$.fragment,ne),ne.forEach(t),Ue.forEach(t),this.h()},h(){d(u,"name","hf:doc:metadata"),d(u,"content",JSON.stringify(yd)),d(y,"id","canine"),d(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(y,"href","#canine"),d(f,"class","relative group"),d(ye,"id","overview"),d(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ye,"href","#overview"),d(oe,"class","relative group"),d(Ve,"href","https://arxiv.org/abs/2103.06874"),d(Ve,"rel","nofollow"),d(et,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer"),d(Ge,"href","https://huggingface.co/google/canine-c"),d(Ge,"rel","nofollow"),d(Xe,"href","https://huggingface.co/google/canine-s"),d(Xe,"rel","nofollow"),d(Ye,"href","https://huggingface.co/nielsr"),d(Ye,"rel","nofollow"),d(Ke,"href","https://github.com/google-research/language/tree/master/language/canine"),d(Ke,"rel","nofollow"),d($e,"id","example"),d($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d($e,"href","#example"),d(se,"class","relative group"),d(ze,"id","transformers.models.canine.modeling_canine.CanineModelOutputWithPooling"),d(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ze,"href","#transformers.models.canine.modeling_canine.CanineModelOutputWithPooling"),d(ae,"class","relative group"),d(at,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineModel"),d(rt,"href","/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling"),d(re,"class","docstring"),d(Ee,"id","transformers.CanineConfig"),d(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ee,"href","#transformers.CanineConfig"),d(ie,"class","relative group"),d(it,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineModel"),d(rn,"href","https://huggingface.co/google/canine-s"),d(rn,"rel","nofollow"),d(lt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(ct,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(M,"class","docstring"),d(qe,"id","transformers.CanineTokenizer"),d(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(qe,"href","#transformers.CanineTokenizer"),d(de,"class","relative group"),d(dt,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineTokenizer"),d(pt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(ht,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(G,"class","docstring"),d(Fe,"class","docstring"),d(H,"class","docstring"),d(E,"class","docstring"),d(Me,"id","transformers.CanineModel"),d(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Me,"href","#transformers.CanineModel"),d(he,"class","relative group"),d(wn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(wn,"rel","nofollow"),d(mt,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineModel"),d(j,"class","docstring"),d(R,"class","docstring"),d(Pe,"id","transformers.CanineForSequenceClassification"),d(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Pe,"href","#transformers.CanineForSequenceClassification"),d(fe,"class","relative group"),d(En,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(En,"rel","nofollow"),d(gt,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForSequenceClassification"),d(q,"class","docstring"),d(O,"class","docstring"),d(Ae,"id","transformers.CanineForMultipleChoice"),d(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ae,"href","#transformers.CanineForMultipleChoice"),d(ge,"class","relative group"),d(Nn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Nn,"rel","nofollow"),d(_t,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForMultipleChoice"),d(P,"class","docstring"),d(W,"class","docstring"),d(Se,"id","transformers.CanineForTokenClassification"),d(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Se,"href","#transformers.CanineForTokenClassification"),d(ve,"class","relative group"),d(On,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(On,"rel","nofollow"),d(vt,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForTokenClassification"),d(N,"class","docstring"),d(B,"class","docstring"),d(Le,"id","transformers.CanineForQuestionAnswering"),d(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Le,"href","#transformers.CanineForQuestionAnswering"),d(Ce,"class","relative group"),d(Rn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Rn,"rel","nofollow"),d(kt,"href","/docs/transformers/v4.15.0/en/model_doc/canine#transformers.CanineForQuestionAnswering"),d(A,"class","docstring"),d(Q,"class","docstring")},m(n,p){e(document.head,u),h(n,T,p),h(n,f,p),e(f,y),e(y,$),_(b,$,null),e(f,w),e(f,z),e(z,Ss),h(n,Wo,p),h(n,oe,p),e(oe,ye),e(ye,jt),_(Re,jt,null),e(oe,Ds),e(oe,Pt),e(Pt,Ls),h(n,Bo,p),h(n,Te,p),e(Te,Os),e(Te,Ve),e(Ve,Ws),e(Te,Bs),h(n,Qo,p),h(n,Yn,p),e(Yn,Qs),h(n,Ho,p),h(n,Kn,p),e(Kn,Nt),e(Nt,Hs),h(n,Uo,p),h(n,Zn,p),e(Zn,Us),h(n,Ro,p),h(n,x,p),e(x,At),e(At,It),e(It,Rs),e(x,Vs),e(x,St),e(St,Je),e(Je,Js),e(Je,et),e(et,Gs),e(Je,Xs),e(x,Ys),e(x,Dt),e(Dt,Lt),e(Lt,Ks),e(x,Zs),e(x,Ot),e(Ot,Wt),e(Wt,ea),e(x,na),e(x,Bt),e(Bt,nt),e(nt,Ge),e(Ge,ta),e(nt,oa),e(x,sa),e(x,Qt),e(Qt,tt),e(tt,Xe),e(Xe,aa),e(tt,ra),h(n,Vo,p),h(n,J,p),e(J,ia),e(J,Ye),e(Ye,la),e(J,ca),e(J,Ke),e(Ke,da),e(J,pa),h(n,Jo,p),h(n,se,p),e(se,$e),e($e,Ht),_(Ze,Ht,null),e(se,ha),e(se,Ut),e(Ut,ua),h(n,Go,p),h(n,ot,p),e(ot,fa),h(n,Xo,p),_(en,n,p),h(n,Yo,p),h(n,st,p),e(st,ma),h(n,Ko,p),_(nn,n,p),h(n,Zo,p),h(n,ae,p),e(ae,ze),e(ze,Rt),_(tn,Rt,null),e(ae,ga),e(ae,Vt),e(Vt,_a),h(n,es,p),h(n,re,p),_(on,re,null),e(re,va),e(re,L),e(L,ka),e(L,at),e(at,Ca),e(L,wa),e(L,rt),e(rt,ba),e(L,ya),e(L,Jt),e(Jt,Ta),e(L,$a),e(L,Gt),e(Gt,za),e(L,Ea),h(n,ns,p),h(n,ie,p),e(ie,Ee),e(Ee,Xt),_(sn,Xt,null),e(ie,qa),e(ie,Yt),e(Yt,xa),h(n,ts,p),h(n,M,p),_(an,M,null),e(M,Fa),e(M,le),e(le,Ma),e(le,it),e(it,ja),e(le,Pa),e(le,rn),e(rn,Na),e(le,Aa),e(M,Ia),e(M,ce),e(ce,Sa),e(ce,lt),e(lt,Da),e(ce,La),e(ce,ct),e(ct,Oa),e(ce,Wa),e(M,Ba),e(M,Kt),e(Kt,Qa),e(M,Ha),_(ln,M,null),h(n,os,p),h(n,de,p),e(de,qe),e(qe,Zt),_(cn,Zt,null),e(de,Ua),e(de,eo),e(eo,Ra),h(n,ss,p),h(n,E,p),_(dn,E,null),e(E,Va),e(E,no),e(no,Ja),e(E,Ga),e(E,xe),e(xe,dt),e(dt,Xa),e(xe,Ya),e(xe,pt),e(pt,Ka),e(xe,Za),e(E,er),e(E,pn),e(pn,nr),e(pn,ht),e(ht,tr),e(pn,or),e(E,sr),e(E,G),_(hn,G,null),e(G,ar),e(G,to),e(to,rr),e(G,ir),e(G,un),e(un,ut),e(ut,lr),e(ut,oo),e(oo,cr),e(un,dr),e(un,ft),e(ft,pr),e(ft,so),e(so,hr),e(E,ur),e(E,Fe),_(fn,Fe,null),e(Fe,fr),e(Fe,mn),e(mn,mr),e(mn,ao),e(ao,gr),e(mn,_r),e(E,vr),e(E,H),_(gn,H,null),e(H,kr),e(H,ro),e(ro,Cr),e(H,wr),_(_n,H,null),e(H,br),e(H,pe),e(pe,yr),e(pe,io),e(io,Tr),e(pe,$r),e(pe,lo),e(lo,zr),e(pe,Er),h(n,as,p),h(n,he,p),e(he,Me),e(Me,co),_(vn,co,null),e(he,qr),e(he,po),e(po,xr),h(n,rs,p),h(n,R,p),_(kn,R,null),e(R,Fr),e(R,Cn),e(Cn,Mr),e(Cn,wn),e(wn,jr),e(Cn,Pr),e(R,Nr),e(R,j),_(bn,j,null),e(j,Ar),e(j,ue),e(ue,Ir),e(ue,mt),e(mt,Sr),e(ue,Dr),e(ue,ho),e(ho,Lr),e(ue,Or),e(j,Wr),_(je,j,null),e(j,Br),e(j,uo),e(uo,Qr),e(j,Hr),_(yn,j,null),h(n,is,p),h(n,fe,p),e(fe,Pe),e(Pe,fo),_(Tn,fo,null),e(fe,Ur),e(fe,mo),e(mo,Rr),h(n,ls,p),h(n,O,p),_($n,O,null),e(O,Vr),e(O,go),e(go,Jr),e(O,Gr),e(O,zn),e(zn,Xr),e(zn,En),e(En,Yr),e(zn,Kr),e(O,Zr),e(O,q),_(qn,q,null),e(q,ei),e(q,me),e(me,ni),e(me,gt),e(gt,ti),e(me,oi),e(me,_o),e(_o,si),e(me,ai),e(q,ri),_(Ne,q,null),e(q,ii),e(q,vo),e(vo,li),e(q,ci),_(xn,q,null),e(q,di),e(q,ko),e(ko,pi),e(q,hi),_(Fn,q,null),h(n,cs,p),h(n,ge,p),e(ge,Ae),e(Ae,Co),_(Mn,Co,null),e(ge,ui),e(ge,wo),e(wo,fi),h(n,ds,p),h(n,W,p),_(jn,W,null),e(W,mi),e(W,bo),e(bo,gi),e(W,_i),e(W,Pn),e(Pn,vi),e(Pn,Nn),e(Nn,ki),e(Pn,Ci),e(W,wi),e(W,P),_(An,P,null),e(P,bi),e(P,_e),e(_e,yi),e(_e,_t),e(_t,Ti),e(_e,$i),e(_e,yo),e(yo,zi),e(_e,Ei),e(P,qi),_(Ie,P,null),e(P,xi),e(P,To),e(To,Fi),e(P,Mi),_(In,P,null),h(n,ps,p),h(n,ve,p),e(ve,Se),e(Se,$o),_(Sn,$o,null),e(ve,ji),e(ve,zo),e(zo,Pi),h(n,hs,p),h(n,B,p),_(Dn,B,null),e(B,Ni),e(B,Eo),e(Eo,Ai),e(B,Ii),e(B,Ln),e(Ln,Si),e(Ln,On),e(On,Di),e(Ln,Li),e(B,Oi),e(B,N),_(Wn,N,null),e(N,Wi),e(N,ke),e(ke,Bi),e(ke,vt),e(vt,Qi),e(ke,Hi),e(ke,qo),e(qo,Ui),e(ke,Ri),e(N,Vi),_(De,N,null),e(N,Ji),e(N,xo),e(xo,Gi),e(N,Xi),_(Bn,N,null),h(n,us,p),h(n,Ce,p),e(Ce,Le),e(Le,Fo),_(Qn,Fo,null),e(Ce,Yi),e(Ce,Mo),e(Mo,Ki),h(n,fs,p),h(n,Q,p),_(Hn,Q,null),e(Q,Zi),e(Q,we),e(we,el),e(we,jo),e(jo,nl),e(we,tl),e(we,Po),e(Po,ol),e(we,sl),e(Q,al),e(Q,Un),e(Un,rl),e(Un,Rn),e(Rn,il),e(Un,ll),e(Q,cl),e(Q,A),_(Vn,A,null),e(A,dl),e(A,be),e(be,pl),e(be,kt),e(kt,hl),e(be,ul),e(be,No),e(No,fl),e(be,ml),e(A,gl),_(Oe,A,null),e(A,_l),e(A,Ao),e(Ao,vl),e(A,kl),_(Jn,A,null),ms=!0},p(n,[p]){const Gn={};p&2&&(Gn.$$scope={dirty:p,ctx:n}),je.$set(Gn);const Io={};p&2&&(Io.$$scope={dirty:p,ctx:n}),Ne.$set(Io);const So={};p&2&&(So.$$scope={dirty:p,ctx:n}),Ie.$set(So);const Do={};p&2&&(Do.$$scope={dirty:p,ctx:n}),De.$set(Do);const Xn={};p&2&&(Xn.$$scope={dirty:p,ctx:n}),Oe.$set(Xn)},i(n){ms||(v(b.$$.fragment,n),v(Re.$$.fragment,n),v(Ze.$$.fragment,n),v(en.$$.fragment,n),v(nn.$$.fragment,n),v(tn.$$.fragment,n),v(on.$$.fragment,n),v(sn.$$.fragment,n),v(an.$$.fragment,n),v(ln.$$.fragment,n),v(cn.$$.fragment,n),v(dn.$$.fragment,n),v(hn.$$.fragment,n),v(fn.$$.fragment,n),v(gn.$$.fragment,n),v(_n.$$.fragment,n),v(vn.$$.fragment,n),v(kn.$$.fragment,n),v(bn.$$.fragment,n),v(je.$$.fragment,n),v(yn.$$.fragment,n),v(Tn.$$.fragment,n),v($n.$$.fragment,n),v(qn.$$.fragment,n),v(Ne.$$.fragment,n),v(xn.$$.fragment,n),v(Fn.$$.fragment,n),v(Mn.$$.fragment,n),v(jn.$$.fragment,n),v(An.$$.fragment,n),v(Ie.$$.fragment,n),v(In.$$.fragment,n),v(Sn.$$.fragment,n),v(Dn.$$.fragment,n),v(Wn.$$.fragment,n),v(De.$$.fragment,n),v(Bn.$$.fragment,n),v(Qn.$$.fragment,n),v(Hn.$$.fragment,n),v(Vn.$$.fragment,n),v(Oe.$$.fragment,n),v(Jn.$$.fragment,n),ms=!0)},o(n){k(b.$$.fragment,n),k(Re.$$.fragment,n),k(Ze.$$.fragment,n),k(en.$$.fragment,n),k(nn.$$.fragment,n),k(tn.$$.fragment,n),k(on.$$.fragment,n),k(sn.$$.fragment,n),k(an.$$.fragment,n),k(ln.$$.fragment,n),k(cn.$$.fragment,n),k(dn.$$.fragment,n),k(hn.$$.fragment,n),k(fn.$$.fragment,n),k(gn.$$.fragment,n),k(_n.$$.fragment,n),k(vn.$$.fragment,n),k(kn.$$.fragment,n),k(bn.$$.fragment,n),k(je.$$.fragment,n),k(yn.$$.fragment,n),k(Tn.$$.fragment,n),k($n.$$.fragment,n),k(qn.$$.fragment,n),k(Ne.$$.fragment,n),k(xn.$$.fragment,n),k(Fn.$$.fragment,n),k(Mn.$$.fragment,n),k(jn.$$.fragment,n),k(An.$$.fragment,n),k(Ie.$$.fragment,n),k(In.$$.fragment,n),k(Sn.$$.fragment,n),k(Dn.$$.fragment,n),k(Wn.$$.fragment,n),k(De.$$.fragment,n),k(Bn.$$.fragment,n),k(Qn.$$.fragment,n),k(Hn.$$.fragment,n),k(Vn.$$.fragment,n),k(Oe.$$.fragment,n),k(Jn.$$.fragment,n),ms=!1},d(n){t(u),n&&t(T),n&&t(f),C(b),n&&t(Wo),n&&t(oe),C(Re),n&&t(Bo),n&&t(Te),n&&t(Qo),n&&t(Yn),n&&t(Ho),n&&t(Kn),n&&t(Uo),n&&t(Zn),n&&t(Ro),n&&t(x),n&&t(Vo),n&&t(J),n&&t(Jo),n&&t(se),C(Ze),n&&t(Go),n&&t(ot),n&&t(Xo),C(en,n),n&&t(Yo),n&&t(st),n&&t(Ko),C(nn,n),n&&t(Zo),n&&t(ae),C(tn),n&&t(es),n&&t(re),C(on),n&&t(ns),n&&t(ie),C(sn),n&&t(ts),n&&t(M),C(an),C(ln),n&&t(os),n&&t(de),C(cn),n&&t(ss),n&&t(E),C(dn),C(hn),C(fn),C(gn),C(_n),n&&t(as),n&&t(he),C(vn),n&&t(rs),n&&t(R),C(kn),C(bn),C(je),C(yn),n&&t(is),n&&t(fe),C(Tn),n&&t(ls),n&&t(O),C($n),C(qn),C(Ne),C(xn),C(Fn),n&&t(cs),n&&t(ge),C(Mn),n&&t(ds),n&&t(W),C(jn),C(An),C(Ie),C(In),n&&t(ps),n&&t(ve),C(Sn),n&&t(hs),n&&t(B),C(Dn),C(Wn),C(De),C(Bn),n&&t(us),n&&t(Ce),C(Qn),n&&t(fs),n&&t(Q),C(Hn),C(Vn),C(Oe),C(Jn)}}}const yd={local:"canine",sections:[{local:"overview",sections:[{local:"example",title:"Example"}],title:"Overview"},{local:"transformers.models.canine.modeling_canine.CanineModelOutputWithPooling",title:"CANINE specific outputs"},{local:"transformers.CanineConfig",title:"CanineConfig"},{local:"transformers.CanineTokenizer",title:"CanineTokenizer"},{local:"transformers.CanineModel",title:"CanineModel"},{local:"transformers.CanineForSequenceClassification",title:"CanineForSequenceClassification"},{local:"transformers.CanineForMultipleChoice",title:"CanineForMultipleChoice"},{local:"transformers.CanineForTokenClassification",title:"CanineForTokenClassification"},{local:"transformers.CanineForQuestionAnswering",title:"CanineForQuestionAnswering"}],title:"CANINE"};function Td(D,u,T){let{fw:f}=u;return D.$$set=y=>{"fw"in y&&T(0,f=y.fw)},[f]}class Md extends ud{constructor(u){super();fd(this,u,Td,bd,md,{fw:0})}}export{Md as default,yd as metadata};
9,962
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/visionencoderdecoder.mdx-87804abc.js
import{S as Ns,i as Rs,s as Bs,e as a,k as c,w,t as n,L as Gs,c as s,d as o,m as l,a as d,x as E,h as t,b as i,J as e,g as _,y as x,q as T,o as k,B as j}from"../../chunks/vendor-b1433968.js";import{T as Os}from"../../chunks/Tip-c3840994.js";import{D as oe}from"../../chunks/Docstring-ff504c58.js";import{C as tn}from"../../chunks/CodeBlock-a320dbd7.js";import{I as $n}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ws(me){let p,P,g,D,A;return{c(){p=a("p"),P=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),D=n("Module"),A=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(M){p=s(M,"P",{});var $=d(p);P=t($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s($,"CODE",{});var L=d(g);D=t(L,"Module"),L.forEach(o),A=t($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(M,$){_(M,p,$),e(p,P),e(p,g),e(g,D),e(p,A)},d(M){M&&o(p)}}}function Us(me){let p,P,g,D,A;return{c(){p=a("p"),P=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),D=n("Module"),A=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(M){p=s(M,"P",{});var $=d(p);P=t($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s($,"CODE",{});var L=d(g);D=t(L,"Module"),L.forEach(o),A=t($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(M,$){_(M,p,$),e(p,P),e(p,g),e(g,D),e(p,A)},d(M){M&&o(p)}}}function Hs(me){let p,P,g,D,A,M,$,L,Pn,rn,u,Cn,Je,qn,zn,To,Fn,An,Ye,Sn,Ln,Ke,In,On,Qe,Nn,Rn,ko,Bn,Gn,Xe,Wn,Un,eo,Hn,Zn,oo,Jn,Yn,an,ne,Kn,he,Qn,Xn,sn,I,et,no,ot,nt,to,tt,rt,dn,W,te,jo,fe,at,Do,st,cn,V,ge,dt,re,ro,it,ct,ao,lt,pt,mt,U,ht,so,ft,gt,io,ut,_t,vt,Mo,bt,yt,ue,wt,ae,_e,Et,ve,xt,co,Tt,kt,jt,se,be,Dt,H,Mt,Vo,Vt,$t,$o,Pt,Ct,ln,Z,de,Po,ye,qt,Co,zt,pn,h,we,Ft,J,At,qo,St,Lt,zo,It,Ot,Nt,Ee,Rt,xe,Bt,Gt,Wt,Te,Ut,ke,Ht,Zt,Jt,Fo,Yt,Kt,je,Qt,lo,Xt,er,or,De,nr,Me,tr,rr,ar,O,po,sr,dr,Ao,ir,cr,So,lr,pr,mr,C,Ve,hr,Y,fr,mo,gr,ur,Lo,_r,vr,br,ie,yr,Io,wr,Er,$e,xr,q,Pe,Tr,Oo,kr,jr,K,Dr,No,Mr,Vr,Ro,$r,Pr,Cr,Bo,qr,zr,Ce,mn,Q,ce,Go,qe,Fr,Wo,Ar,hn,f,ze,Sr,X,Lr,Uo,Ir,Or,Ho,Nr,Rr,Br,Fe,Gr,Ae,Wr,Ur,Hr,Se,Zr,Le,Jr,Yr,Kr,Zo,Qr,Xr,Ie,ea,ho,oa,na,ta,Oe,ra,Ne,aa,sa,da,N,fo,ia,ca,Jo,la,pa,Yo,ma,ha,fa,z,Re,ga,ee,ua,go,_a,va,Ko,ba,ya,wa,le,Ea,Qo,xa,Ta,Be,ka,S,Ge,ja,Xo,Da,Ma,en,Va,$a,We,fn;return M=new $n({}),fe=new $n({}),ge=new oe({props:{name:"class transformers.VisionEncoderDecoderConfig",anchor:"transformers.VisionEncoderDecoderConfig",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L27",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments. Notably:</p> <ul> <li><strong>encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the encoder config.</li> <li><strong>decoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the decoder config.</li> </ul>`,name:"kwargs"}]}}),ue=new tn({props:{code:`from transformers import BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel # Initializing a ViT & BERT style configuration config_encoder = ViTConfig() config_decoder = BertConfig() config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) # Initializing a ViTBert model from a ViT & bert-base-uncased style configurations model = VisionEncoderDecoderModel(config=config) # Accessing the model configuration config_encoder = model.config.encoder config_decoder = model.config.decoder # set decoder config to causal lm config_decoder.is_decoder = True config_decoder.add_cross_attention = True # Saving the model, including its configuration model.save_pretrained('my-model') # loading model and config from pretrained folder encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained('my-model') model = VisionEncoderDecoderModel.from_pretrained('my-model', config=encoder_decoder_config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViT &amp; BERT style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViTBert model from a ViT &amp; bert-base-uncased style configurations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = model.config.encoder <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = model.config.decoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set decoder config to causal lm</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.add_cross_attention = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>, config=encoder_decoder_config)`}}),_e=new oe({props:{name:"from_encoder_decoder_configs",anchor:"transformers.VisionEncoderDecoderConfig.from_encoder_decoder_configs",parameters:[{name:"encoder_config",val:": PretrainedConfig"},{name:"decoder_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L93",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a></p> `}}),be=new oe({props:{name:"to_dict",anchor:"transformers.VisionEncoderDecoderConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L110",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),ye=new $n({}),we=new oe({props:{name:"class transformers.VisionEncoderDecoderModel",anchor:"transformers.VisionEncoderDecoderModel",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"decoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L150",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ve=new oe({props:{name:"forward",anchor:"transformers.VisionEncoderDecoderModel.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L393",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.VisionEncoderDecoderModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For training, <code>decoder_input_ids</code> are automatically created by the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.`,name:"decoder_input_ids"},{anchor:"transformers.VisionEncoderDecoderModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.VisionEncoderDecoderModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.VisionEncoderDecoderModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.VisionEncoderDecoderModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"decoder_inputs_embeds"},{anchor:"transformers.VisionEncoderDecoderModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.VisionEncoderDecoderModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.VisionEncoderDecoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisionEncoderDecoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisionEncoderDecoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple. kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul>`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ie=new Os({props:{$$slots:{default:[Ws]},$$scope:{ctx:me}}}),$e=new tn({props:{code:`from transformers import TrOCRProcessor, VisionEncoderDecoderModel import requests from PIL import Image import torch processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten') model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten') # load image from the IAM dataset url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") # training model.config.decoder_start_token_id = processor.tokenizer.cls_token_id model.config.pad_token_id = processor.tokenizer.pad_token_id model.config.vocab_size = model.config.decoder.vocab_size pixel_values = processor(image, return_tensors="pt").pixel_values text = "hello world" labels = processor.tokenizer(text, return_tensors="pt").input_ids outputs = model(pixel_values=pixel_values, labels=labels) loss = outputs.loss # inference (generation) generated_ids = model.generate(pixel_values) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRProcessor, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = TrOCRProcessor.from_pretrained(<span class="hljs-string">&#x27;microsoft/trocr-base-handwritten&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/trocr-base-handwritten&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load image from the IAM dataset</span> <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.decoder_start_token_id = processor.tokenizer.cls_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = processor.tokenizer.pad_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.vocab_size = model.config.decoder.vocab_size <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = processor.tokenizer(text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference (generation)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_text = processor.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>]`}}),Pe=new oe({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": str = None"},{name:"decoder_pretrained_model_name_or_path",val:": str = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L247",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>) &#x2014; Information necessary to initiate the image encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the text decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),Ce=new tn({props:{code:`from transformers import VisionEncoderDecoderModel # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained('google/vit-base-patch16-224-in21k', 'bert-base-uncased') # saving model after fine-tuning model.save_pretrained("./vit-bert") # load fine-tuned model model = VisionEncoderDecoderModel.from_pretrained("./vit-bert"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>, <span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>)`}}),qe=new $n({}),ze=new oe({props:{name:"class transformers.FlaxVisionEncoderDecoderModel",anchor:"transformers.FlaxVisionEncoderDecoderModel",parameters:[{name:"config",val:": VisionEncoderDecoderConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L273",parametersDescription:[{anchor:"transformers.FlaxVisionEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Re=new oe({props:{name:"__call__",anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__",parameters:[{name:"pixel_values",val:": ndarray"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L584",parametersDescription:[{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using the vision model&#x2019;s feature extractor. For example, using <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a>`,name:"decoder_input_ids"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.decoder.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>FlaxSeq2SeqLMOutput</code> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),le=new Os({props:{$$slots:{default:[Us]},$$scope:{ctx:me}}}),Be=new tn({props:{code:`from transformers import FlaxVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') # load output tokenizer tokenizer_output = GPT2Tokenizer.from_pretrained('gpt2') # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained('vit', 'gpt2') pixel_values = feature_extractor(images=image, return_tensors="np").pixel_values # use GPT2's eos_token as the pad as well as eos token model.config.eos_token_id = model.config.decoder.eos_token_id model.config.pad_token_id = model.config.eos_token_id # generation sequences = model.generate(pixel_values, num_beams=4, max_length=12).sequences captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load output tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_output = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;vit&#x27;</span>, <span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use GPT2&#x27;s eos_token as the pad as well as eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.eos_token_id = model.config.decoder.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequences = model.generate(pixel_values, num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">12</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span>captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),Ge=new oe({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType] = None"},{name:"decoder_pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType] = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L707",parametersDescription:[{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (:obj: <em>Union[str, os.PathLike]</em>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (:obj: <em>Union[str, os.PathLike]</em>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),We=new tn({props:{code:`from transformers import FlaxVisionEncoderDecoderModel # initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained('google/vit-base-patch16-224-in21k', 'gpt2') # saving model after fine-tuning model.save_pretrained("./vit-gpt2") # load fine-tuned model model = FlaxVisionEncoderDecoderModel.from_pretrained("./vit-gpt2"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>, <span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-gpt2&quot;</span>)`}}),{c(){p=a("meta"),P=c(),g=a("h1"),D=a("a"),A=a("span"),w(M.$$.fragment),$=c(),L=a("span"),Pn=n("Vision Encoder Decoder Models"),rn=c(),u=a("p"),Cn=n("The "),Je=a("a"),qn=n("VisionEncoderDecoderModel"),zn=n(` can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder (`),To=a("em"),Fn=n("e.g."),An=c(),Ye=a("a"),Sn=n("ViT"),Ln=n(", "),Ke=a("a"),In=n("BEiT"),On=n(", "),Qe=a("a"),Nn=n("DeiT"),Rn=n(`) and any pretrained language model as the decoder (`),ko=a("em"),Bn=n("e.g."),Gn=c(),Xe=a("a"),Wn=n("RoBERTa"),Un=n(", "),eo=a("a"),Hn=n("GPT2"),Zn=n(", "),oo=a("a"),Jn=n("BERT"),Yn=n(")."),an=c(),ne=a("p"),Kn=n(`The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for example) `),he=a("a"),Qn=n("TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Xn=n(` by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.`),sn=c(),I=a("p"),et=n("An example of how to use a "),no=a("a"),ot=n("VisionEncoderDecoderModel"),nt=n(" for inference can be seen in "),to=a("a"),tt=n("TrOCR"),rt=n("."),dn=c(),W=a("h2"),te=a("a"),jo=a("span"),w(fe.$$.fragment),at=c(),Do=a("span"),st=n("VisionEncoderDecoderConfig"),cn=c(),V=a("div"),w(ge.$$.fragment),dt=c(),re=a("p"),ro=a("a"),it=n("VisionEncoderDecoderConfig"),ct=n(` is the configuration class to store the configuration of a `),ao=a("a"),lt=n("VisionEncoderDecoderModel"),pt=n(`. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the specified arguments, defining the encoder and decoder configs.`),mt=c(),U=a("p"),ht=n("Configuration objects inherit from "),so=a("a"),ft=n("PretrainedConfig"),gt=n(` and can be used to control the model outputs. Read the documentation from `),io=a("a"),ut=n("PretrainedConfig"),_t=n(" for more information."),vt=c(),Mo=a("p"),bt=n("Examples:"),yt=c(),w(ue.$$.fragment),wt=c(),ae=a("div"),w(_e.$$.fragment),Et=c(),ve=a("p"),xt=n("Instantiate a "),co=a("a"),Tt=n("VisionEncoderDecoderConfig"),kt=n(` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),jt=c(),se=a("div"),w(be.$$.fragment),Dt=c(),H=a("p"),Mt=n("Serializes this instance to a Python dictionary. Override the default "),Vo=a("em"),Vt=n("to_dict()"),$t=n(" from "),$o=a("em"),Pt=n("PretrainedConfig"),Ct=n("."),ln=c(),Z=a("h2"),de=a("a"),Po=a("span"),w(ye.$$.fragment),qt=c(),Co=a("span"),zt=n("VisionEncoderDecoderModel"),pn=c(),h=a("div"),w(we.$$.fragment),Ft=c(),J=a("p"),At=n(`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),qo=a("code"),St=n("from_pretrained()"),Lt=n(` function and the decoder is loaded via `),zo=a("code"),It=n("from_pretrained()"),Ot=n(` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Nt=c(),Ee=a("p"),Rt=n(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),xe=a("a"),Bt=n("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Gt=n(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Wt=c(),Te=a("p"),Ut=n("Additionally, in "),ke=a("a"),Ht=n("TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Zt=n(` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),Jt=c(),Fo=a("p"),Yt=n(`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Kt=c(),je=a("p"),Qt=n("This model inherits from "),lo=a("a"),Xt=n("PreTrainedModel"),er=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),or=c(),De=a("p"),nr=n("This model is also a PyTorch "),Me=a("a"),tr=n("torch.nn.Module"),rr=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ar=c(),O=a("p"),po=a("a"),sr=n("VisionEncoderDecoderModel"),dr=n(` is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one as decoder when created with the :meth`),Ao=a("em"),ir=n("~transformers.AutoModel.from_pretrained"),cr=n(` class method for the encoder and :meth`),So=a("em"),lr=n("~transformers.AutoModelForCausalLM.from_pretrained"),pr=n(" class method for the decoder."),mr=c(),C=a("div"),w(Ve.$$.fragment),hr=c(),Y=a("p"),fr=n("The "),mo=a("a"),gr=n("VisionEncoderDecoderModel"),ur=n(" forward method, overrides the "),Lo=a("code"),_r=n("__call__"),vr=n(" special method."),br=c(),w(ie.$$.fragment),yr=c(),Io=a("p"),wr=n("Examples:"),Er=c(),w($e.$$.fragment),xr=c(),q=a("div"),w(Pe.$$.fragment),Tr=c(),Oo=a("p"),kr=n(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),jr=c(),K=a("p"),Dr=n("The model is set in evaluation mode by default using "),No=a("code"),Mr=n("model.eval()"),Vr=n(` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),Ro=a("code"),$r=n("model.train()"),Pr=n("."),Cr=c(),Bo=a("p"),qr=n("Example:"),zr=c(),w(Ce.$$.fragment),mn=c(),Q=a("h2"),ce=a("a"),Go=a("span"),w(qe.$$.fragment),Fr=c(),Wo=a("span"),Ar=n("FlaxVisionEncoderDecoderModel"),hn=c(),f=a("div"),w(ze.$$.fragment),Sr=c(),X=a("p"),Lr=n(`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),Uo=a("code"),Ir=n("from_pretrained()"),Or=n(` function and the decoder is loaded via `),Ho=a("code"),Nr=n("from_pretrained()"),Rr=n(` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Br=c(),Fe=a("p"),Gr=n(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Ae=a("a"),Wr=n("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Ur=n(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Hr=c(),Se=a("p"),Zr=n("Additionally, in "),Le=a("a"),Jr=n("TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Yr=n(` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),Kr=c(),Zo=a("p"),Qr=n(`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Xr=c(),Ie=a("p"),ea=n("This model inherits from "),ho=a("a"),oa=n("FlaxPreTrainedModel"),na=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ta=c(),Oe=a("p"),ra=n("This model is also a Flax Linen "),Ne=a("a"),aa=n("flax.nn.Module"),sa=n(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),da=c(),N=a("p"),fo=a("a"),ia=n("FlaxVisionEncoderDecoderModel"),ca=n(` is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base vision model classes of the library as encoder module and another one as decoder module when created with the :meth`),Jo=a("em"),la=n("~transformers.FlaxAutoModel.from_pretrained"),pa=n(` class method for the encoder and :meth`),Yo=a("em"),ma=n("~transformers.FlaxAutoModelForCausalLM.from_pretrained"),ha=n(" class method for the decoder."),fa=c(),z=a("div"),w(Re.$$.fragment),ga=c(),ee=a("p"),ua=n("The "),go=a("a"),_a=n("FlaxVisionEncoderDecoderModel"),va=n(" forward method, overrides the "),Ko=a("code"),ba=n("__call__"),ya=n(" special method."),wa=c(),w(le.$$.fragment),Ea=c(),Qo=a("p"),xa=n("Examples:"),Ta=c(),w(Be.$$.fragment),ka=c(),S=a("div"),w(Ge.$$.fragment),ja=c(),Xo=a("p"),Da=n(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),Ma=c(),en=a("p"),Va=n("Example:"),$a=c(),w(We.$$.fragment),this.h()},l(r){const m=Gs('[data-svelte="svelte-1phssyn"]',document.head);p=s(m,"META",{name:!0,content:!0}),m.forEach(o),P=l(r),g=s(r,"H1",{class:!0});var Ue=d(g);D=s(Ue,"A",{id:!0,class:!0,href:!0});var on=d(D);A=s(on,"SPAN",{});var Pa=d(A);E(M.$$.fragment,Pa),Pa.forEach(o),on.forEach(o),$=l(Ue),L=s(Ue,"SPAN",{});var Ca=d(L);Pn=t(Ca,"Vision Encoder Decoder Models"),Ca.forEach(o),Ue.forEach(o),rn=l(r),u=s(r,"P",{});var v=d(u);Cn=t(v,"The "),Je=s(v,"A",{href:!0});var qa=d(Je);qn=t(qa,"VisionEncoderDecoderModel"),qa.forEach(o),zn=t(v,` can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder (`),To=s(v,"EM",{});var za=d(To);Fn=t(za,"e.g."),za.forEach(o),An=l(v),Ye=s(v,"A",{href:!0});var Fa=d(Ye);Sn=t(Fa,"ViT"),Fa.forEach(o),Ln=t(v,", "),Ke=s(v,"A",{href:!0});var Aa=d(Ke);In=t(Aa,"BEiT"),Aa.forEach(o),On=t(v,", "),Qe=s(v,"A",{href:!0});var Sa=d(Qe);Nn=t(Sa,"DeiT"),Sa.forEach(o),Rn=t(v,`) and any pretrained language model as the decoder (`),ko=s(v,"EM",{});var La=d(ko);Bn=t(La,"e.g."),La.forEach(o),Gn=l(v),Xe=s(v,"A",{href:!0});var Ia=d(Xe);Wn=t(Ia,"RoBERTa"),Ia.forEach(o),Un=t(v,", "),eo=s(v,"A",{href:!0});var Oa=d(eo);Hn=t(Oa,"GPT2"),Oa.forEach(o),Zn=t(v,", "),oo=s(v,"A",{href:!0});var Na=d(oo);Jn=t(Na,"BERT"),Na.forEach(o),Yn=t(v,")."),v.forEach(o),an=l(r),ne=s(r,"P",{});var gn=d(ne);Kn=t(gn,`The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for example) `),he=s(gn,"A",{href:!0,rel:!0});var Ra=d(he);Qn=t(Ra,"TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Ra.forEach(o),Xn=t(gn,` by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.`),gn.forEach(o),sn=l(r),I=s(r,"P",{});var uo=d(I);et=t(uo,"An example of how to use a "),no=s(uo,"A",{href:!0});var Ba=d(no);ot=t(Ba,"VisionEncoderDecoderModel"),Ba.forEach(o),nt=t(uo," for inference can be seen in "),to=s(uo,"A",{href:!0});var Ga=d(to);tt=t(Ga,"TrOCR"),Ga.forEach(o),rt=t(uo,"."),uo.forEach(o),dn=l(r),W=s(r,"H2",{class:!0});var un=d(W);te=s(un,"A",{id:!0,class:!0,href:!0});var Wa=d(te);jo=s(Wa,"SPAN",{});var Ua=d(jo);E(fe.$$.fragment,Ua),Ua.forEach(o),Wa.forEach(o),at=l(un),Do=s(un,"SPAN",{});var Ha=d(Do);st=t(Ha,"VisionEncoderDecoderConfig"),Ha.forEach(o),un.forEach(o),cn=l(r),V=s(r,"DIV",{class:!0});var F=d(V);E(ge.$$.fragment,F),dt=l(F),re=s(F,"P",{});var nn=d(re);ro=s(nn,"A",{href:!0});var Za=d(ro);it=t(Za,"VisionEncoderDecoderConfig"),Za.forEach(o),ct=t(nn,` is the configuration class to store the configuration of a `),ao=s(nn,"A",{href:!0});var Ja=d(ao);lt=t(Ja,"VisionEncoderDecoderModel"),Ja.forEach(o),pt=t(nn,`. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the specified arguments, defining the encoder and decoder configs.`),nn.forEach(o),mt=l(F),U=s(F,"P",{});var _o=d(U);ht=t(_o,"Configuration objects inherit from "),so=s(_o,"A",{href:!0});var Ya=d(so);ft=t(Ya,"PretrainedConfig"),Ya.forEach(o),gt=t(_o,` and can be used to control the model outputs. Read the documentation from `),io=s(_o,"A",{href:!0});var Ka=d(io);ut=t(Ka,"PretrainedConfig"),Ka.forEach(o),_t=t(_o," for more information."),_o.forEach(o),vt=l(F),Mo=s(F,"P",{});var Qa=d(Mo);bt=t(Qa,"Examples:"),Qa.forEach(o),yt=l(F),E(ue.$$.fragment,F),wt=l(F),ae=s(F,"DIV",{class:!0});var _n=d(ae);E(_e.$$.fragment,_n),Et=l(_n),ve=s(_n,"P",{});var vn=d(ve);xt=t(vn,"Instantiate a "),co=s(vn,"A",{href:!0});var Xa=d(co);Tt=t(Xa,"VisionEncoderDecoderConfig"),Xa.forEach(o),kt=t(vn,` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),vn.forEach(o),_n.forEach(o),jt=l(F),se=s(F,"DIV",{class:!0});var bn=d(se);E(be.$$.fragment,bn),Dt=l(bn),H=s(bn,"P",{});var vo=d(H);Mt=t(vo,"Serializes this instance to a Python dictionary. Override the default "),Vo=s(vo,"EM",{});var es=d(Vo);Vt=t(es,"to_dict()"),es.forEach(o),$t=t(vo," from "),$o=s(vo,"EM",{});var os=d($o);Pt=t(os,"PretrainedConfig"),os.forEach(o),Ct=t(vo,"."),vo.forEach(o),bn.forEach(o),F.forEach(o),ln=l(r),Z=s(r,"H2",{class:!0});var yn=d(Z);de=s(yn,"A",{id:!0,class:!0,href:!0});var ns=d(de);Po=s(ns,"SPAN",{});var ts=d(Po);E(ye.$$.fragment,ts),ts.forEach(o),ns.forEach(o),qt=l(yn),Co=s(yn,"SPAN",{});var rs=d(Co);zt=t(rs,"VisionEncoderDecoderModel"),rs.forEach(o),yn.forEach(o),pn=l(r),h=s(r,"DIV",{class:!0});var b=d(h);E(we.$$.fragment,b),Ft=l(b),J=s(b,"P",{});var bo=d(J);At=t(bo,`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),qo=s(bo,"CODE",{});var as=d(qo);St=t(as,"from_pretrained()"),as.forEach(o),Lt=t(bo,` function and the decoder is loaded via `),zo=s(bo,"CODE",{});var ss=d(zo);It=t(ss,"from_pretrained()"),ss.forEach(o),Ot=t(bo,` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),bo.forEach(o),Nt=l(b),Ee=s(b,"P",{});var wn=d(Ee);Rt=t(wn,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),xe=s(wn,"A",{href:!0,rel:!0});var ds=d(xe);Bt=t(ds,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),ds.forEach(o),Gt=t(wn,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),wn.forEach(o),Wt=l(b),Te=s(b,"P",{});var En=d(Te);Ut=t(En,"Additionally, in "),ke=s(En,"A",{href:!0,rel:!0});var is=d(ke);Ht=t(is,"TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),is.forEach(o),Zt=t(En,` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),En.forEach(o),Jt=l(b),Fo=s(b,"P",{});var cs=d(Fo);Yt=t(cs,`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),cs.forEach(o),Kt=l(b),je=s(b,"P",{});var xn=d(je);Qt=t(xn,"This model inherits from "),lo=s(xn,"A",{href:!0});var ls=d(lo);Xt=t(ls,"PreTrainedModel"),ls.forEach(o),er=t(xn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xn.forEach(o),or=l(b),De=s(b,"P",{});var Tn=d(De);nr=t(Tn,"This model is also a PyTorch "),Me=s(Tn,"A",{href:!0,rel:!0});var ps=d(Me);tr=t(ps,"torch.nn.Module"),ps.forEach(o),rr=t(Tn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tn.forEach(o),ar=l(b),O=s(b,"P",{});var He=d(O);po=s(He,"A",{href:!0});var ms=d(po);sr=t(ms,"VisionEncoderDecoderModel"),ms.forEach(o),dr=t(He,` is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one as decoder when created with the :meth`),Ao=s(He,"EM",{});var hs=d(Ao);ir=t(hs,"~transformers.AutoModel.from_pretrained"),hs.forEach(o),cr=t(He,` class method for the encoder and :meth`),So=s(He,"EM",{});var fs=d(So);lr=t(fs,"~transformers.AutoModelForCausalLM.from_pretrained"),fs.forEach(o),pr=t(He," class method for the decoder."),He.forEach(o),mr=l(b),C=s(b,"DIV",{class:!0});var R=d(C);E(Ve.$$.fragment,R),hr=l(R),Y=s(R,"P",{});var yo=d(Y);fr=t(yo,"The "),mo=s(yo,"A",{href:!0});var gs=d(mo);gr=t(gs,"VisionEncoderDecoderModel"),gs.forEach(o),ur=t(yo," forward method, overrides the "),Lo=s(yo,"CODE",{});var us=d(Lo);_r=t(us,"__call__"),us.forEach(o),vr=t(yo," special method."),yo.forEach(o),br=l(R),E(ie.$$.fragment,R),yr=l(R),Io=s(R,"P",{});var _s=d(Io);wr=t(_s,"Examples:"),_s.forEach(o),Er=l(R),E($e.$$.fragment,R),R.forEach(o),xr=l(b),q=s(b,"DIV",{class:!0});var B=d(q);E(Pe.$$.fragment,B),Tr=l(B),Oo=s(B,"P",{});var vs=d(Oo);kr=t(vs,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),vs.forEach(o),jr=l(B),K=s(B,"P",{});var wo=d(K);Dr=t(wo,"The model is set in evaluation mode by default using "),No=s(wo,"CODE",{});var bs=d(No);Mr=t(bs,"model.eval()"),bs.forEach(o),Vr=t(wo,` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),Ro=s(wo,"CODE",{});var ys=d(Ro);$r=t(ys,"model.train()"),ys.forEach(o),Pr=t(wo,"."),wo.forEach(o),Cr=l(B),Bo=s(B,"P",{});var ws=d(Bo);qr=t(ws,"Example:"),ws.forEach(o),zr=l(B),E(Ce.$$.fragment,B),B.forEach(o),b.forEach(o),mn=l(r),Q=s(r,"H2",{class:!0});var kn=d(Q);ce=s(kn,"A",{id:!0,class:!0,href:!0});var Es=d(ce);Go=s(Es,"SPAN",{});var xs=d(Go);E(qe.$$.fragment,xs),xs.forEach(o),Es.forEach(o),Fr=l(kn),Wo=s(kn,"SPAN",{});var Ts=d(Wo);Ar=t(Ts,"FlaxVisionEncoderDecoderModel"),Ts.forEach(o),kn.forEach(o),hn=l(r),f=s(r,"DIV",{class:!0});var y=d(f);E(ze.$$.fragment,y),Sr=l(y),X=s(y,"P",{});var Eo=d(X);Lr=t(Eo,`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),Uo=s(Eo,"CODE",{});var ks=d(Uo);Ir=t(ks,"from_pretrained()"),ks.forEach(o),Or=t(Eo,` function and the decoder is loaded via `),Ho=s(Eo,"CODE",{});var js=d(Ho);Nr=t(js,"from_pretrained()"),js.forEach(o),Rr=t(Eo,` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Eo.forEach(o),Br=l(y),Fe=s(y,"P",{});var jn=d(Fe);Gr=t(jn,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Ae=s(jn,"A",{href:!0,rel:!0});var Ds=d(Ae);Wr=t(Ds,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Ds.forEach(o),Ur=t(jn,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),jn.forEach(o),Hr=l(y),Se=s(y,"P",{});var Dn=d(Se);Zr=t(Dn,"Additionally, in "),Le=s(Dn,"A",{href:!0,rel:!0});var Ms=d(Le);Jr=t(Ms,"TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Ms.forEach(o),Yr=t(Dn,` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),Dn.forEach(o),Kr=l(y),Zo=s(y,"P",{});var Vs=d(Zo);Qr=t(Vs,`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Vs.forEach(o),Xr=l(y),Ie=s(y,"P",{});var Mn=d(Ie);ea=t(Mn,"This model inherits from "),ho=s(Mn,"A",{href:!0});var $s=d(ho);oa=t($s,"FlaxPreTrainedModel"),$s.forEach(o),na=t(Mn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mn.forEach(o),ta=l(y),Oe=s(y,"P",{});var Vn=d(Oe);ra=t(Vn,"This model is also a Flax Linen "),Ne=s(Vn,"A",{href:!0,rel:!0});var Ps=d(Ne);aa=t(Ps,"flax.nn.Module"),Ps.forEach(o),sa=t(Vn,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Vn.forEach(o),da=l(y),N=s(y,"P",{});var Ze=d(N);fo=s(Ze,"A",{href:!0});var Cs=d(fo);ia=t(Cs,"FlaxVisionEncoderDecoderModel"),Cs.forEach(o),ca=t(Ze,` is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base vision model classes of the library as encoder module and another one as decoder module when created with the :meth`),Jo=s(Ze,"EM",{});var qs=d(Jo);la=t(qs,"~transformers.FlaxAutoModel.from_pretrained"),qs.forEach(o),pa=t(Ze,` class method for the encoder and :meth`),Yo=s(Ze,"EM",{});var zs=d(Yo);ma=t(zs,"~transformers.FlaxAutoModelForCausalLM.from_pretrained"),zs.forEach(o),ha=t(Ze," class method for the decoder."),Ze.forEach(o),fa=l(y),z=s(y,"DIV",{class:!0});var G=d(z);E(Re.$$.fragment,G),ga=l(G),ee=s(G,"P",{});var xo=d(ee);ua=t(xo,"The "),go=s(xo,"A",{href:!0});var Fs=d(go);_a=t(Fs,"FlaxVisionEncoderDecoderModel"),Fs.forEach(o),va=t(xo," forward method, overrides the "),Ko=s(xo,"CODE",{});var As=d(Ko);ba=t(As,"__call__"),As.forEach(o),ya=t(xo," special method."),xo.forEach(o),wa=l(G),E(le.$$.fragment,G),Ea=l(G),Qo=s(G,"P",{});var Ss=d(Qo);xa=t(Ss,"Examples:"),Ss.forEach(o),Ta=l(G),E(Be.$$.fragment,G),G.forEach(o),ka=l(y),S=s(y,"DIV",{class:!0});var pe=d(S);E(Ge.$$.fragment,pe),ja=l(pe),Xo=s(pe,"P",{});var Ls=d(Xo);Da=t(Ls,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),Ls.forEach(o),Ma=l(pe),en=s(pe,"P",{});var Is=d(en);Va=t(Is,"Example:"),Is.forEach(o),$a=l(pe),E(We.$$.fragment,pe),pe.forEach(o),y.forEach(o),this.h()},h(){i(p,"name","hf:doc:metadata"),i(p,"content",JSON.stringify(Zs)),i(D,"id","vision-encoder-decoder-models"),i(D,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(D,"href","#vision-encoder-decoder-models"),i(g,"class","relative group"),i(Je,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel"),i(Ye,"href","/docs/transformers/v4.15.0/en/vit"),i(Ke,"href","/docs/transformers/v4.15.0/en/beit"),i(Qe,"href","/docs/transformers/v4.15.0/en/deit"),i(Xe,"href","/docs/transformers/v4.15.0/en/roberta"),i(eo,"href","/docs/transformers/v4.15.0/en/gpt2"),i(oo,"href","/docs/transformers/v4.15.0/en/bert"),i(he,"href","https://arxiv.org/abs/2109.10282"),i(he,"rel","nofollow"),i(no,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel"),i(to,"href","/docs/transformers/v4.15.0/en/trocr"),i(te,"id","transformers.VisionEncoderDecoderConfig"),i(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(te,"href","#transformers.VisionEncoderDecoderConfig"),i(W,"class","relative group"),i(ro,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig"),i(ao,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel"),i(so,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(io,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),i(co,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig"),i(ae,"class","docstring"),i(se,"class","docstring"),i(V,"class","docstring"),i(de,"id","transformers.VisionEncoderDecoderModel"),i(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(de,"href","#transformers.VisionEncoderDecoderModel"),i(Z,"class","relative group"),i(xe,"href","https://arxiv.org/abs/1907.12461"),i(xe,"rel","nofollow"),i(ke,"href","https://arxiv.org/abs/2109.10282"),i(ke,"rel","nofollow"),i(lo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),i(Me,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(Me,"rel","nofollow"),i(po,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel"),i(mo,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel"),i(C,"class","docstring"),i(q,"class","docstring"),i(h,"class","docstring"),i(ce,"id","transformers.FlaxVisionEncoderDecoderModel"),i(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(ce,"href","#transformers.FlaxVisionEncoderDecoderModel"),i(Q,"class","relative group"),i(Ae,"href","https://arxiv.org/abs/1907.12461"),i(Ae,"rel","nofollow"),i(Le,"href","https://arxiv.org/abs/2109.10282"),i(Le,"rel","nofollow"),i(ho,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),i(Ne,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),i(Ne,"rel","nofollow"),i(fo,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel"),i(go,"href","/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel"),i(z,"class","docstring"),i(S,"class","docstring"),i(f,"class","docstring")},m(r,m){e(document.head,p),_(r,P,m),_(r,g,m),e(g,D),e(D,A),x(M,A,null),e(g,$),e(g,L),e(L,Pn),_(r,rn,m),_(r,u,m),e(u,Cn),e(u,Je),e(Je,qn),e(u,zn),e(u,To),e(To,Fn),e(u,An),e(u,Ye),e(Ye,Sn),e(u,Ln),e(u,Ke),e(Ke,In),e(u,On),e(u,Qe),e(Qe,Nn),e(u,Rn),e(u,ko),e(ko,Bn),e(u,Gn),e(u,Xe),e(Xe,Wn),e(u,Un),e(u,eo),e(eo,Hn),e(u,Zn),e(u,oo),e(oo,Jn),e(u,Yn),_(r,an,m),_(r,ne,m),e(ne,Kn),e(ne,he),e(he,Qn),e(ne,Xn),_(r,sn,m),_(r,I,m),e(I,et),e(I,no),e(no,ot),e(I,nt),e(I,to),e(to,tt),e(I,rt),_(r,dn,m),_(r,W,m),e(W,te),e(te,jo),x(fe,jo,null),e(W,at),e(W,Do),e(Do,st),_(r,cn,m),_(r,V,m),x(ge,V,null),e(V,dt),e(V,re),e(re,ro),e(ro,it),e(re,ct),e(re,ao),e(ao,lt),e(re,pt),e(V,mt),e(V,U),e(U,ht),e(U,so),e(so,ft),e(U,gt),e(U,io),e(io,ut),e(U,_t),e(V,vt),e(V,Mo),e(Mo,bt),e(V,yt),x(ue,V,null),e(V,wt),e(V,ae),x(_e,ae,null),e(ae,Et),e(ae,ve),e(ve,xt),e(ve,co),e(co,Tt),e(ve,kt),e(V,jt),e(V,se),x(be,se,null),e(se,Dt),e(se,H),e(H,Mt),e(H,Vo),e(Vo,Vt),e(H,$t),e(H,$o),e($o,Pt),e(H,Ct),_(r,ln,m),_(r,Z,m),e(Z,de),e(de,Po),x(ye,Po,null),e(Z,qt),e(Z,Co),e(Co,zt),_(r,pn,m),_(r,h,m),x(we,h,null),e(h,Ft),e(h,J),e(J,At),e(J,qo),e(qo,St),e(J,Lt),e(J,zo),e(zo,It),e(J,Ot),e(h,Nt),e(h,Ee),e(Ee,Rt),e(Ee,xe),e(xe,Bt),e(Ee,Gt),e(h,Wt),e(h,Te),e(Te,Ut),e(Te,ke),e(ke,Ht),e(Te,Zt),e(h,Jt),e(h,Fo),e(Fo,Yt),e(h,Kt),e(h,je),e(je,Qt),e(je,lo),e(lo,Xt),e(je,er),e(h,or),e(h,De),e(De,nr),e(De,Me),e(Me,tr),e(De,rr),e(h,ar),e(h,O),e(O,po),e(po,sr),e(O,dr),e(O,Ao),e(Ao,ir),e(O,cr),e(O,So),e(So,lr),e(O,pr),e(h,mr),e(h,C),x(Ve,C,null),e(C,hr),e(C,Y),e(Y,fr),e(Y,mo),e(mo,gr),e(Y,ur),e(Y,Lo),e(Lo,_r),e(Y,vr),e(C,br),x(ie,C,null),e(C,yr),e(C,Io),e(Io,wr),e(C,Er),x($e,C,null),e(h,xr),e(h,q),x(Pe,q,null),e(q,Tr),e(q,Oo),e(Oo,kr),e(q,jr),e(q,K),e(K,Dr),e(K,No),e(No,Mr),e(K,Vr),e(K,Ro),e(Ro,$r),e(K,Pr),e(q,Cr),e(q,Bo),e(Bo,qr),e(q,zr),x(Ce,q,null),_(r,mn,m),_(r,Q,m),e(Q,ce),e(ce,Go),x(qe,Go,null),e(Q,Fr),e(Q,Wo),e(Wo,Ar),_(r,hn,m),_(r,f,m),x(ze,f,null),e(f,Sr),e(f,X),e(X,Lr),e(X,Uo),e(Uo,Ir),e(X,Or),e(X,Ho),e(Ho,Nr),e(X,Rr),e(f,Br),e(f,Fe),e(Fe,Gr),e(Fe,Ae),e(Ae,Wr),e(Fe,Ur),e(f,Hr),e(f,Se),e(Se,Zr),e(Se,Le),e(Le,Jr),e(Se,Yr),e(f,Kr),e(f,Zo),e(Zo,Qr),e(f,Xr),e(f,Ie),e(Ie,ea),e(Ie,ho),e(ho,oa),e(Ie,na),e(f,ta),e(f,Oe),e(Oe,ra),e(Oe,Ne),e(Ne,aa),e(Oe,sa),e(f,da),e(f,N),e(N,fo),e(fo,ia),e(N,ca),e(N,Jo),e(Jo,la),e(N,pa),e(N,Yo),e(Yo,ma),e(N,ha),e(f,fa),e(f,z),x(Re,z,null),e(z,ga),e(z,ee),e(ee,ua),e(ee,go),e(go,_a),e(ee,va),e(ee,Ko),e(Ko,ba),e(ee,ya),e(z,wa),x(le,z,null),e(z,Ea),e(z,Qo),e(Qo,xa),e(z,Ta),x(Be,z,null),e(f,ka),e(f,S),x(Ge,S,null),e(S,ja),e(S,Xo),e(Xo,Da),e(S,Ma),e(S,en),e(en,Va),e(S,$a),x(We,S,null),fn=!0},p(r,[m]){const Ue={};m&2&&(Ue.$$scope={dirty:m,ctx:r}),ie.$set(Ue);const on={};m&2&&(on.$$scope={dirty:m,ctx:r}),le.$set(on)},i(r){fn||(T(M.$$.fragment,r),T(fe.$$.fragment,r),T(ge.$$.fragment,r),T(ue.$$.fragment,r),T(_e.$$.fragment,r),T(be.$$.fragment,r),T(ye.$$.fragment,r),T(we.$$.fragment,r),T(Ve.$$.fragment,r),T(ie.$$.fragment,r),T($e.$$.fragment,r),T(Pe.$$.fragment,r),T(Ce.$$.fragment,r),T(qe.$$.fragment,r),T(ze.$$.fragment,r),T(Re.$$.fragment,r),T(le.$$.fragment,r),T(Be.$$.fragment,r),T(Ge.$$.fragment,r),T(We.$$.fragment,r),fn=!0)},o(r){k(M.$$.fragment,r),k(fe.$$.fragment,r),k(ge.$$.fragment,r),k(ue.$$.fragment,r),k(_e.$$.fragment,r),k(be.$$.fragment,r),k(ye.$$.fragment,r),k(we.$$.fragment,r),k(Ve.$$.fragment,r),k(ie.$$.fragment,r),k($e.$$.fragment,r),k(Pe.$$.fragment,r),k(Ce.$$.fragment,r),k(qe.$$.fragment,r),k(ze.$$.fragment,r),k(Re.$$.fragment,r),k(le.$$.fragment,r),k(Be.$$.fragment,r),k(Ge.$$.fragment,r),k(We.$$.fragment,r),fn=!1},d(r){o(p),r&&o(P),r&&o(g),j(M),r&&o(rn),r&&o(u),r&&o(an),r&&o(ne),r&&o(sn),r&&o(I),r&&o(dn),r&&o(W),j(fe),r&&o(cn),r&&o(V),j(ge),j(ue),j(_e),j(be),r&&o(ln),r&&o(Z),j(ye),r&&o(pn),r&&o(h),j(we),j(Ve),j(ie),j($e),j(Pe),j(Ce),r&&o(mn),r&&o(Q),j(qe),r&&o(hn),r&&o(f),j(ze),j(Re),j(le),j(Be),j(Ge),j(We)}}}const Zs={local:"vision-encoder-decoder-models",sections:[{local:"transformers.VisionEncoderDecoderConfig",title:"VisionEncoderDecoderConfig"},{local:"transformers.VisionEncoderDecoderModel",title:"VisionEncoderDecoderModel"},{local:"transformers.FlaxVisionEncoderDecoderModel",title:"FlaxVisionEncoderDecoderModel"}],title:"Vision Encoder Decoder Models"};function Js(me,p,P){let{fw:g}=p;return me.$$set=D=>{"fw"in D&&P(0,g=D.fw)},[g]}class nd extends Ns{constructor(p){super();Rs(this,p,Js,Hs,Bs,{fw:0})}}export{nd as default,Zs as metadata};
9,963
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/detr.mdx-8108b66d.js
import{S as Gg,i as Bg,s as Vg,e as a,k as d,w as u,t as o,L as Yg,c as s,d as t,m as c,a as i,x as g,h as n,b as l,J as e,g as m,y as _,q as b,o as v,B as D}from"../../chunks/vendor-b1433968.js";import{T as Hs}from"../../chunks/Tip-c3840994.js";import{D as S}from"../../chunks/Docstring-ff504c58.js";import{C as Us}from"../../chunks/CodeBlock-a320dbd7.js";import{I as xt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Qg(ee){let p,E;return{c(){p=a("p"),E=o(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(f){p=s(f,"P",{});var y=i(p);E=n(y,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),y.forEach(t)},m(f,y){m(f,p,y),e(p,E)},d(f){f&&t(p)}}}function Jg(ee){let p,E,f,y,j;return{c(){p=a("p"),E=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),y=o("Module"),j=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){p=s(x,"P",{});var T=i(p);E=n(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var U=i(f);y=n(U,"Module"),U.forEach(t),j=n(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(t)},m(x,T){m(x,p,T),e(p,E),e(p,f),e(f,y),e(p,j)},d(x){x&&t(p)}}}function Kg(ee){let p,E,f,y,j;return{c(){p=a("p"),E=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),y=o("Module"),j=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){p=s(x,"P",{});var T=i(p);E=n(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var U=i(f);y=n(U,"Module"),U.forEach(t),j=n(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(t)},m(x,T){m(x,p,T),e(p,E),e(p,f),e(f,y),e(p,j)},d(x){x&&t(p)}}}function Zg(ee){let p,E,f,y,j;return{c(){p=a("p"),E=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),y=o("Module"),j=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){p=s(x,"P",{});var T=i(p);E=n(T,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(T,"CODE",{});var U=i(f);y=n(U,"Module"),U.forEach(t),j=n(T,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),T.forEach(t)},m(x,T){m(x,p,T),e(p,E),e(p,f),e(f,y),e(p,j)},d(x){x&&t(p)}}}function Xg(ee){let p,E,f,y,j,x,T,U,Ws,Ca,we,He,On,Tt,Gs,kn,Bs,ja,Ue,Vs,wt,Ys,Qs,qa,$o,Js,Pa,zo,$n,Ks,Na,fe,Zs,Et,Xs,ei,Ft,ti,oi,Sa,We,ni,Ot,ri,ai,Aa,Ge,si,Co,ii,di,Ma,F,ci,zn,li,hi,Cn,mi,pi,jn,fi,ui,qn,gi,_i,Pn,bi,vi,Nn,Di,yi,Sn,xi,Ti,An,wi,Ei,Ia,z,Fi,Mn,Oi,ki,In,$i,zi,Ln,Ci,ji,Rn,qi,Pi,Hn,Ni,Si,Un,Ai,Mi,La,J,Ii,Wn,Li,Ri,kt,Hi,Ui,$t,Wi,Gi,Ra,K,Bi,jo,Vi,Yi,qo,Qi,Ji,Po,Ki,Zi,Ha,No,Xi,Ua,k,te,ed,Gn,td,od,Bn,nd,rd,So,ad,sd,id,Vn,dd,cd,oe,ld,Yn,hd,md,Ao,pd,fd,Qn,ud,gd,_d,ne,bd,Jn,vd,Dd,Mo,yd,xd,Kn,Td,wd,Ed,Y,Fd,Zn,Od,kd,Xn,$d,zd,er,Cd,jd,zt,qd,Pd,Nd,q,Io,Sd,Ad,Lo,Md,Id,Ct,Ld,Rd,tr,Hd,Ud,Ro,Wd,Gd,or,Bd,Vd,Yd,re,Qd,Ho,Jd,Kd,nr,Zd,Xd,Uo,ec,tc,oc,Ee,nc,rr,rc,ac,jt,sc,ic,Wa,Wo,dc,Ga,Be,ar,ae,sr,cc,lc,ir,hc,mc,dr,pc,fc,cr,uc,gc,A,se,lr,hr,_c,bc,mr,vc,Dc,pr,yc,xc,fr,Tc,wc,ie,ur,gr,Ec,Fc,_r,Go,Oc,kc,br,Bo,$c,zc,vr,Vo,Cc,jc,de,Dr,yr,qc,Pc,xr,Nc,Sc,Tr,Ac,Mc,wr,Ic,Lc,ce,qt,Er,Rc,Hc,Yo,Uc,Wc,Fe,Gc,Fr,Bc,Vc,Or,Yc,Qc,Jc,M,Kc,kr,Zc,Xc,$r,el,tl,zr,ol,nl,Cr,rl,al,jr,sl,il,dl,le,cl,qr,ll,hl,Pr,ml,pl,Nr,fl,ul,gl,he,Qo,Sr,_l,bl,vl,Ar,Jo,Dl,yl,Mr,Ko,xl,Tl,Pt,Zo,wl,El,Xo,Fl,Ol,me,Ir,Lr,kl,$l,Nt,Rr,zl,Cl,Hr,jl,ql,Oe,Ur,Pl,Nl,Wr,Sl,Al,Gr,Ml,Il,pe,Br,Ll,Rl,Vr,Hl,Ul,Yr,Wl,Gl,Qr,Bl,Ba,w,Vl,en,Yl,Ql,Jr,Jl,Kl,Kr,Zl,Xl,Zr,eh,th,tn,oh,nh,Xr,rh,ah,ea,sh,ih,St,dh,ch,At,lh,hh,Va,ke,Ve,ta,Mt,mh,oa,ph,Ya,$e,It,fh,na,uh,Qa,ze,Lt,gh,Rt,_h,on,bh,vh,Ja,Ce,Ht,Dh,Ut,yh,nn,xh,Th,Ka,je,Ye,ra,Wt,wh,aa,Eh,Za,I,Gt,Fh,qe,Oh,rn,kh,$h,Bt,zh,Ch,jh,Pe,qh,an,Ph,Nh,sn,Sh,Ah,Mh,sa,Ih,Lh,Vt,Xa,Ne,Qe,ia,Yt,Rh,da,Hh,es,O,Qt,Uh,ca,Wh,Gh,Jt,Bh,la,Vh,Yh,Qh,ue,Kt,Jh,ha,Kh,Zh,Je,Xh,Ke,Zt,em,Xt,tm,ma,om,nm,rm,Ze,eo,am,to,sm,dn,im,dm,cm,Xe,oo,lm,no,hm,cn,mm,pm,fm,et,ro,um,ao,gm,ln,_m,bm,ts,Se,tt,pa,so,vm,fa,Dm,os,L,io,ym,ua,xm,Tm,co,wm,hn,Em,Fm,Om,lo,km,ho,$m,zm,Cm,W,mo,jm,Ae,qm,mn,Pm,Nm,ga,Sm,Am,Mm,ot,Im,_a,Lm,Rm,po,ns,Me,nt,ba,fo,Hm,va,Um,rs,R,uo,Wm,Da,Gm,Bm,go,Vm,pn,Ym,Qm,Jm,_o,Km,bo,Zm,Xm,ep,G,vo,tp,Ie,op,fn,np,rp,ya,ap,sp,ip,rt,dp,xa,cp,lp,Do,as,Le,at,Ta,yo,hp,wa,mp,ss,H,xo,pp,Ea,fp,up,To,gp,un,_p,bp,vp,wo,Dp,Eo,yp,xp,Tp,B,Fo,wp,Re,Ep,gn,Fp,Op,Fa,kp,$p,zp,st,Cp,Oa,jp,qp,Oo,is;return x=new xt({}),Tt=new xt({}),Mt=new xt({}),It=new S({props:{name:"class transformers.models.detr.modeling_detr.DetrModelOutput",anchor:"transformers.models.detr.modeling_detr.DetrModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"intermediate_hidden_states",val:": typing.Optional[torch.FloatTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L92",parametersDescription:[{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrModelOutput.intermediate_hidden_states",description:`<strong>intermediate_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(config.decoder_layers, batch_size, sequence_length, hidden_size)</code>, <em>optional</em>, returned when <code>config.auxiliary_loss=True</code>) &#x2014; Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm.`,name:"intermediate_hidden_states"}]}}),Lt=new S({props:{name:"class transformers.models.detr.modeling_detr.DetrObjectDetectionOutput",anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"loss_dict",val:": typing.Optional[typing.Dict] = None"},{name:"logits",val:": FloatTensor = None"},{name:"pred_boxes",val:": FloatTensor = None"},{name:"auxiliary_outputs",val:": typing.Optional[typing.List[typing.Dict]] = None"},{name:"last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L129",parametersDescription:[{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> are provided)) &#x2014; Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss.`,name:"loss"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.loss_dict",description:`<strong>loss_dict</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; A dictionary containing the individual losses. Useful for logging.`,name:"loss_dict"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, num_classes + 1)</code>) &#x2014; Classification logits (including no-object) for all queries.`,name:"logits"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.pred_boxes",description:`<strong>pred_boxes</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, 4)</code>) &#x2014; Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process">post_process()</a> to retrieve the unnormalized bounding boxes.`,name:"pred_boxes"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.auxiliary_outputs",description:`<strong>auxiliary_outputs</strong> (<code>list[Dict]</code>, <em>optional</em>) &#x2014; Optional, only returned when auxilary losses are activated (i.e. <code>config.auxiliary_loss</code> is set to <em>True</em>) and labels are provided. It is a list of dictionaries containing the two above keys (<code>logits</code> and <code>pred_boxes</code>) for each decoder layer.`,name:"auxiliary_outputs"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.detr.modeling_detr.DetrObjectDetectionOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}]}}),Ht=new S({props:{name:"class transformers.models.detr.modeling_detr.DetrSegmentationOutput",anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"loss_dict",val:": typing.Optional[typing.Dict] = None"},{name:"logits",val:": FloatTensor = None"},{name:"pred_boxes",val:": FloatTensor = None"},{name:"pred_masks",val:": FloatTensor = None"},{name:"auxiliary_outputs",val:": typing.Optional[typing.List[typing.Dict]] = None"},{name:"last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L189",parametersDescription:[{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> are provided)) &#x2014; Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss.`,name:"loss"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.loss_dict",description:`<strong>loss_dict</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; A dictionary containing the individual losses. Useful for logging.`,name:"loss_dict"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, num_classes + 1)</code>) &#x2014; Classification logits (including no-object) for all queries.`,name:"logits"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.pred_boxes",description:`<strong>pred_boxes</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, 4)</code>) &#x2014; Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process">post_process()</a> to retrieve the unnormalized bounding boxes.`,name:"pred_boxes"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.pred_masks",description:`<strong>pred_masks</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, height/4, width/4)</code>) &#x2014; Segmentation masks logits for all queries. See also <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_segmentation">post_process_segmentation()</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_panoptic">post_process_panoptic()</a> to evaluate instance and panoptic segmentation masks respectively.`,name:"pred_masks"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.auxiliary_outputs",description:`<strong>auxiliary_outputs</strong> (<code>list[Dict]</code>, <em>optional</em>) &#x2014; Optional, only returned when auxiliary losses are activated (i.e. <code>config.auxiliary_loss</code> is set to <em>True</em>) and labels are provided. It is a list of dictionaries containing the two above keys (<code>logits</code> and <code>pred_boxes</code>) for each decoder layer.`,name:"auxiliary_outputs"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.models.detr.modeling_detr.DetrSegmentationOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}]}}),Wt=new xt({}),Gt=new S({props:{name:"class transformers.DetrConfig",anchor:"transformers.DetrConfig",parameters:[{name:"num_queries",val:" = 100"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 6"},{name:"encoder_ffn_dim",val:" = 2048"},{name:"encoder_attention_heads",val:" = 8"},{name:"decoder_layers",val:" = 6"},{name:"decoder_ffn_dim",val:" = 2048"},{name:"decoder_attention_heads",val:" = 8"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'relu'"},{name:"d_model",val:" = 256"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"init_xavier_std",val:" = 1.0"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"auxiliary_loss",val:" = False"},{name:"position_embedding_type",val:" = 'sine'"},{name:"backbone",val:" = 'resnet50'"},{name:"dilation",val:" = False"},{name:"class_cost",val:" = 1"},{name:"bbox_cost",val:" = 5"},{name:"giou_cost",val:" = 2"},{name:"mask_loss_coefficient",val:" = 1"},{name:"dice_loss_coefficient",val:" = 1"},{name:"bbox_loss_coefficient",val:" = 5"},{name:"giou_loss_coefficient",val:" = 2"},{name:"eos_coefficient",val:" = 0.1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/configuration_detr.py#L29",parametersDescription:[{anchor:"transformers.DetrConfig.num_queries",description:`<strong>num_queries</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Number of object queries, i.e. detection slots. This is the maximal number of objects <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrModel">DetrModel</a> can detect in a single image. For COCO, we recommend 100 queries.`,name:"num_queries"},{anchor:"transformers.DetrConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimension of the layers.`,name:"d_model"},{anchor:"transformers.DetrConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 6) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.DetrConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 6) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.DetrConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.DetrConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.DetrConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.DetrConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.DetrConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.DetrConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.DetrConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.DetrConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.DetrConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"init_std"},{anchor:"transformers.DetrConfig.init_xavier_std",description:`<strong>init_xavier_std</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_xavier_std"},{anchor:"transformers.DetrConfig.auxiliary_loss",description:`<strong>auxiliary_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether auxiliary decoding losses (loss at each decoder layer) are to be used.`,name:"auxiliary_loss"},{anchor:"transformers.DetrConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sine&quot;</code>) &#x2014; Type of position embeddings to be used on top of the image features. One of <code>&quot;sine&quot;</code> or <code>&quot;learned&quot;</code>.`,name:"position_embedding_type"},{anchor:"transformers.DetrConfig.backbone",description:`<strong>backbone</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;resnet50&quot;</code>) &#x2014; Name of convolutional backbone to use. Supports any convolutional backbone from the timm package. For a list of all available models, see <a href="https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model" rel="nofollow">this page</a>.`,name:"backbone"},{anchor:"transformers.DetrConfig.dilation",description:`<strong>dilation</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to replace stride with dilation in the last convolutional block (DC5).`,name:"dilation"},{anchor:"transformers.DetrConfig.class_cost",description:`<strong>class_cost</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Relative weight of the classification error in the Hungarian matching cost.`,name:"class_cost"},{anchor:"transformers.DetrConfig.bbox_cost",description:`<strong>bbox_cost</strong> (<code>float</code>, <em>optional</em>, defaults to 5) &#x2014; Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.`,name:"bbox_cost"},{anchor:"transformers.DetrConfig.giou_cost",description:`<strong>giou_cost</strong> (<code>float</code>, <em>optional</em>, defaults to 2) &#x2014; Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.`,name:"giou_cost"},{anchor:"transformers.DetrConfig.mask_loss_coefficient",description:`<strong>mask_loss_coefficient</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Relative weight of the Focal loss in the panoptic segmentation loss.`,name:"mask_loss_coefficient"},{anchor:"transformers.DetrConfig.dice_loss_coefficient",description:`<strong>dice_loss_coefficient</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.`,name:"dice_loss_coefficient"},{anchor:"transformers.DetrConfig.bbox_loss_coefficient",description:`<strong>bbox_loss_coefficient</strong> (<code>float</code>, <em>optional</em>, defaults to 5) &#x2014; Relative weight of the L1 bounding box loss in the object detection loss.`,name:"bbox_loss_coefficient"},{anchor:"transformers.DetrConfig.giou_loss_coefficient",description:`<strong>giou_loss_coefficient</strong> (<code>float</code>, <em>optional</em>, defaults to 2) &#x2014; Relative weight of the generalized IoU loss in the object detection loss.`,name:"giou_loss_coefficient"},{anchor:"transformers.DetrConfig.eos_coefficient",description:`<strong>eos_coefficient</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Relative classification weight of the &#x2018;no-object&#x2019; class in the object detection loss.`,name:"eos_coefficient"}]}}),Vt=new Us({props:{code:`from transformers import DetrModel, DetrConfig # Initializing a DETR facebook/detr-resnet-50 style configuration configuration = DetrConfig() # Initializing a model from the facebook/detr-resnet-50 style configuration model = DetrModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DetrModel, DetrConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a DETR facebook/detr-resnet-50 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = DetrConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/detr-resnet-50 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = DetrModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Yt=new xt({}),Qt=new S({props:{name:"class transformers.DetrFeatureExtractor",anchor:"transformers.DetrFeatureExtractor",parameters:[{name:"format",val:" = 'coco_detection'"},{name:"do_resize",val:" = True"},{name:"size",val:" = 800"},{name:"max_size",val:" = 1333"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/feature_extraction_detr.py#L123",parametersDescription:[{anchor:"transformers.DetrFeatureExtractor.format",description:`<strong>format</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;coco_detection&quot;</code>) &#x2014; Data format of the annotations. One of &#x201C;coco_detection&#x201D; or &#x201C;coco_panoptic&#x201D;.`,name:"format"},{anchor:"transformers.DetrFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.DetrFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code>, <em>optional</em>, defaults to 800) &#x2014; Resize the input to the given size. Only has an effect if <code>do_resize</code> is set to <code>True</code>. If size is a sequence like <code>(width, height)</code>, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if <code>height &gt; width</code>, then image will be rescaled to <code>(size * height / width, size)</code>.`,name:"size"},{anchor:"transformers.DetrFeatureExtractor.max_size",description:`<strong>max_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>1333</code>) &#x2014; The largest size an image dimension can have (otherwise it&#x2019;s capped). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"max_size"},{anchor:"transformers.DetrFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with mean and standard deviation.`,name:"do_normalize"},{anchor:"transformers.DetrFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.`,name:"image_mean"},{anchor:"transformers.DetrFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std.`,name:"image_std"}]}}),Kt=new S({props:{name:"__call__",anchor:"transformers.DetrFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"annotations",val:": typing.Union[typing.List[typing.Dict], typing.List[typing.List[typing.Dict]]] = None"},{name:"return_segmentation_masks",val:": typing.Optional[bool] = False"},{name:"masks_path",val:": typing.Optional[pathlib.Path] = None"},{name:"pad_and_return_pixel_mask",val:": typing.Optional[bool] = True"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/feature_extraction_detr.py#L404",parametersDescription:[{anchor:"transformers.DetrFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.DetrFeatureExtractor.__call__.annotations",description:`<strong>annotations</strong> (<code>Dict</code>, <code>List[Dict]</code>, <em>optional</em>) &#x2014; The corresponding annotations in COCO format.</p> <p>In case <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor">DetrFeatureExtractor</a> was initialized with <code>format = &quot;coco_detection&quot;</code>, the annotations for each image should have the following format: {&#x2018;image_id&#x2019;: int, &#x2018;annotations&#x2019;: [annotation]}, with the annotations being a list of COCO object annotations.</p> <p>In case <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor">DetrFeatureExtractor</a> was initialized with <code>format = &quot;coco_panoptic&quot;</code>, the annotations for each image should have the following format: {&#x2018;image_id&#x2019;: int, &#x2018;file_name&#x2019;: str, &#x2018;segments_info&#x2019;: [segment_info]} with segments_info being a list of COCO panoptic annotations.`,name:"annotations"},{anchor:"transformers.DetrFeatureExtractor.__call__.return_segmentation_masks",description:`<strong>return_segmentation_masks</strong> (<code>Dict</code>, <code>List[Dict]</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to also include instance segmentation masks as part of the labels in case <code>format = &quot;coco_detection&quot;</code>.`,name:"return_segmentation_masks"},{anchor:"transformers.DetrFeatureExtractor.__call__.masks_path",description:`<strong>masks_path</strong> (<code>pathlib.Path</code>, <em>optional</em>) &#x2014; Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only relevant in case <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor">DetrFeatureExtractor</a> was initialized with <code>format = &quot;coco_panoptic&quot;</code>.`,name:"masks_path"},{anchor:"transformers.DetrFeatureExtractor.__call__.pad_and_return_pixel_mask",description:`<strong>pad_and_return_pixel_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to pad images up to the largest image in a batch and create a pixel mask.</p> <p>If left to the default, will return a pixel mask that is:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul>`,name:"pad_and_return_pixel_mask"},{anchor:"transformers.DetrFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of NumPy arrays. If set to <code>&apos;pt&apos;</code>, return PyTorch <code>torch.Tensor</code> objects.`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model.</li> <li><strong>pixel_mask</strong> \u2014 Pixel mask to be fed to a model (when <code>pad_and_return_pixel_mask=True</code> or if <em>\u201Cpixel_mask\u201D</em> is in <code>self.model_input_names</code>).</li> <li><strong>labels</strong> \u2014 Optional labels to be fed to a model (when <code>annotations</code> are provided)</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),Je=new Hs({props:{warning:"&lcub;true}",$$slots:{default:[Qg]},$$scope:{ctx:ee}}}),Zt=new S({props:{name:"pad_and_create_pixel_mask",anchor:"transformers.DetrFeatureExtractor.pad_and_create_pixel_mask",parameters:[{name:"pixel_values_list",val:": typing.List[ForwardRef('torch.Tensor')]"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/feature_extraction_detr.py#L631",parametersDescription:[{anchor:"transformers.DetrFeatureExtractor.pad_and_create_pixel_mask.pixel_values_list",description:`<strong>pixel_values_list</strong> (<code>List[torch.Tensor]</code>) &#x2014; List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W).`,name:"pixel_values_list"},{anchor:"transformers.DetrFeatureExtractor.pad_and_create_pixel_mask.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of NumPy arrays. If set to <code>&apos;pt&apos;</code>, return PyTorch <code>torch.Tensor</code> objects.`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model.</li> <li><strong>pixel_mask</strong> \u2014 Pixel mask to be fed to a model (when <code>pad_and_return_pixel_mask=True</code> or if <em>\u201Cpixel_mask\u201D</em> is in <code>self.model_input_names</code>).</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),eo=new S({props:{name:"post_process",anchor:"transformers.DetrFeatureExtractor.post_process",parameters:[{name:"outputs",val:""},{name:"target_sizes",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/feature_extraction_detr.py#L675",parametersDescription:[{anchor:"transformers.DetrFeatureExtractor.post_process.outputs",description:`<strong>outputs</strong> (<code>DetrObjectDetectionOutput</code>) &#x2014; Raw outputs of the model.`,name:"outputs"},{anchor:"transformers.DetrFeatureExtractor.post_process.target_sizes",description:`<strong>target_sizes</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, 2)</code>, <em>optional</em>) &#x2014; Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding.`,name:"target_sizes"}],returnDescription:` <p>A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model.</p> `,returnType:` <p><code>List[Dict]</code></p> `}}),oo=new S({props:{name:"post_process_segmentation",anchor:"transformers.DetrFeatureExtractor.post_process_segmentation",parameters:[{name:"outputs",val:""},{name:"target_sizes",val:""},{name:"threshold",val:" = 0.9"},{name:"mask_threshold",val:" = 0.5"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/feature_extraction_detr.py#L713",parametersDescription:[{anchor:"transformers.DetrFeatureExtractor.post_process_segmentation.outputs",description:`<strong>outputs</strong> (<code>DetrSegmentationOutput</code>) &#x2014; Raw outputs of the model.`,name:"outputs"},{anchor:"transformers.DetrFeatureExtractor.post_process_segmentation.target_sizes",description:`<strong>target_sizes</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, 2)</code> or <code>List[Tuple]</code> of length <code>batch_size</code>) &#x2014; Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.`,name:"target_sizes"},{anchor:"transformers.DetrFeatureExtractor.post_process_segmentation.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; Threshold to use to filter out queries.`,name:"threshold"},{anchor:"transformers.DetrFeatureExtractor.post_process_segmentation.mask_threshold",description:`<strong>mask_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Threshold to use when turning the predicted masks into binary values.`,name:"mask_threshold"}],returnDescription:` <p>A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model.</p> `,returnType:` <p><code>List[Dict]</code></p> `}}),ro=new S({props:{name:"post_process_panoptic",anchor:"transformers.DetrFeatureExtractor.post_process_panoptic",parameters:[{name:"outputs",val:""},{name:"processed_sizes",val:""},{name:"target_sizes",val:" = None"},{name:"is_thing_map",val:" = None"},{name:"threshold",val:" = 0.85"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/feature_extraction_detr.py#L800",parametersDescription:[{anchor:"transformers.DetrFeatureExtractor.post_process_panoptic.outputs",description:`<strong>outputs</strong> (<code>DetrSegmentationOutput</code>) &#x2014; Raw outputs of the model.`,name:"outputs"},{anchor:"transformers.DetrFeatureExtractor.post_process_panoptic.processed_sizes",description:`<strong>processed_sizes</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, 2)</code> or <code>List[Tuple]</code> of length <code>batch_size</code>) &#x2014; Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data augmentation but before batching.`,name:"processed_sizes"},{anchor:"transformers.DetrFeatureExtractor.post_process_panoptic.target_sizes",description:`<strong>target_sizes</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, 2)</code> or <code>List[Tuple]</code> of length <code>batch_size</code>, <em>optional</em>) &#x2014; Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to None, it will default to the <code>processed_sizes</code>.`,name:"target_sizes"},{anchor:"transformers.DetrFeatureExtractor.post_process_panoptic.is_thing_map",description:`<strong>is_thing_map</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, 2)</code>, <em>optional</em>) &#x2014; Dictionary mapping class indices to either True or False, depending on whether or not they are a thing. If not set, defaults to the <code>is_thing_map</code> of COCO panoptic.`,name:"is_thing_map"},{anchor:"transformers.DetrFeatureExtractor.post_process_panoptic.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.85) &#x2014; Threshold to use to filter out queries.`,name:"threshold"}],returnDescription:` <p>A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model.</p> `,returnType:` <p><code>List[Dict]</code></p> `}}),so=new xt({}),io=new S({props:{name:"class transformers.DetrModel",anchor:"transformers.DetrModel",parameters:[{name:"config",val:": DetrConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L1157",parametersDescription:[{anchor:"transformers.DetrModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig">DetrConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mo=new S({props:{name:"forward",anchor:"transformers.DetrModel.forward",parameters:[{name:"pixel_values",val:""},{name:"pixel_mask",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L1191",parametersDescription:[{anchor:"transformers.DetrModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it.</p> <p>Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor">DetrFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.__call__">DetrFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.DetrModel.forward.pixel_mask",description:`<strong>pixel_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding pixel values. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"pixel_mask"},{anchor:"transformers.DetrModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_queries)</code>, <em>optional</em>) &#x2014; Not used by default. Can be used to mask object queries.`,name:"decoder_attention_mask"},{anchor:"transformers.DetrModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.DetrModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image.`,name:"inputs_embeds"},{anchor:"transformers.DetrModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation.`,name:"decoder_inputs_embeds"},{anchor:"transformers.DetrModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DetrModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DetrModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.models.detr.modeling_detr.DetrModelOutput" >transformers.models.detr.modeling_detr.DetrModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig" >DetrConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</li> <li><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</li> <li><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> <li><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</li> <li><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</li> <li><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>intermediate_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(config.decoder_layers, batch_size, sequence_length, hidden_size)</code>, <em>optional</em>, returned when <code>config.auxiliary_loss=True</code>) \u2014 Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.models.detr.modeling_detr.DetrModelOutput" >transformers.models.detr.modeling_detr.DetrModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ot=new Hs({props:{$$slots:{default:[Jg]},$$scope:{ctx:ee}}}),po=new Us({props:{code:`from transformers import DetrFeatureExtractor, DetrModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50') model = DetrModel.from_pretrained('facebook/detr-resnet-50') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DetrFeatureExtractor, DetrModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = DetrFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/detr-resnet-50&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DetrModel.from_pretrained(<span class="hljs-string">&#x27;facebook/detr-resnet-50&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),fo=new xt({}),uo=new S({props:{name:"class transformers.DetrForObjectDetection",anchor:"transformers.DetrForObjectDetection",parameters:[{name:"config",val:": DetrConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L1315",parametersDescription:[{anchor:"transformers.DetrForObjectDetection.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig">DetrConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),vo=new S({props:{name:"forward",anchor:"transformers.DetrForObjectDetection.forward",parameters:[{name:"pixel_values",val:""},{name:"pixel_mask",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L1341",parametersDescription:[{anchor:"transformers.DetrForObjectDetection.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it.</p> <p>Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor">DetrFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.__call__">DetrFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.DetrForObjectDetection.forward.pixel_mask",description:`<strong>pixel_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding pixel values. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"pixel_mask"},{anchor:"transformers.DetrForObjectDetection.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_queries)</code>, <em>optional</em>) &#x2014; Not used by default. Can be used to mask object queries.`,name:"decoder_attention_mask"},{anchor:"transformers.DetrForObjectDetection.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.DetrForObjectDetection.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image.`,name:"inputs_embeds"},{anchor:"transformers.DetrForObjectDetection.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation.`,name:"decoder_inputs_embeds"},{anchor:"transformers.DetrForObjectDetection.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DetrForObjectDetection.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DetrForObjectDetection.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DetrForObjectDetection.forward.labels",description:`<strong>labels</strong> (<code>List[Dict]</code> of len <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: &#x2018;class_labels&#x2019; and &#x2018;boxes&#x2019; (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a <code>torch.LongTensor</code> of len <code>(number of bounding boxes in the image,)</code> and the boxes a <code>torch.FloatTensor</code> of shape <code>(number of bounding boxes in the image, 4)</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.models.detr.modeling_detr.DetrObjectDetectionOutput" >transformers.models.detr.modeling_detr.DetrObjectDetectionOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig" >DetrConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> are provided)) \u2014 Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss.</li> <li><strong>loss_dict</strong> (<code>Dict</code>, <em>optional</em>) \u2014 A dictionary containing the individual losses. Useful for logging.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, num_classes + 1)</code>) \u2014 Classification logits (including no-object) for all queries.</li> <li><strong>pred_boxes</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, 4)</code>) \u2014 Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process" >post_process()</a> to retrieve the unnormalized bounding boxes.</li> <li><strong>auxiliary_outputs</strong> (<code>list[Dict]</code>, <em>optional</em>) \u2014 Optional, only returned when auxilary losses are activated (i.e. <code>config.auxiliary_loss</code> is set to <em>True</em>) and labels are provided. It is a list of dictionaries containing the two above keys (<code>logits</code> and <code>pred_boxes</code>) for each decoder layer.</li> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</li> <li><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</li> <li><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> <li><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</li> <li><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</li> <li><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.models.detr.modeling_detr.DetrObjectDetectionOutput" >transformers.models.detr.modeling_detr.DetrObjectDetectionOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),rt=new Hs({props:{$$slots:{default:[Kg]},$$scope:{ctx:ee}}}),Do=new Us({props:{code:`from transformers import DetrFeatureExtractor, DetrForObjectDetection from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50') model = DetrForObjectDetection.from_pretrained('facebook/detr-resnet-50') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) # model predicts bounding boxes and corresponding COCO classes logits = outputs.logits bboxes = outputs.pred_boxes,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DetrFeatureExtractor, DetrForObjectDetection <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = DetrFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/detr-resnet-50&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DetrForObjectDetection.from_pretrained(<span class="hljs-string">&#x27;facebook/detr-resnet-50&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts bounding boxes and corresponding COCO classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>bboxes = outputs.pred_boxes`}}),yo=new xt({}),xo=new S({props:{name:"class transformers.DetrForSegmentation",anchor:"transformers.DetrForSegmentation",parameters:[{name:"config",val:": DetrConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L1472",parametersDescription:[{anchor:"transformers.DetrForSegmentation.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig">DetrConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fo=new S({props:{name:"forward",anchor:"transformers.DetrForSegmentation.forward",parameters:[{name:"pixel_values",val:""},{name:"pixel_mask",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/detr/modeling_detr.py#L1494",parametersDescription:[{anchor:"transformers.DetrForSegmentation.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it.</p> <p>Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor">DetrFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.__call__">DetrFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.DetrForSegmentation.forward.pixel_mask",description:`<strong>pixel_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding pixel values. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"pixel_mask"},{anchor:"transformers.DetrForSegmentation.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_queries)</code>, <em>optional</em>) &#x2014; Not used by default. Can be used to mask object queries.`,name:"decoder_attention_mask"},{anchor:"transformers.DetrForSegmentation.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.DetrForSegmentation.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image.`,name:"inputs_embeds"},{anchor:"transformers.DetrForSegmentation.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation.`,name:"decoder_inputs_embeds"},{anchor:"transformers.DetrForSegmentation.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.DetrForSegmentation.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.DetrForSegmentation.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.DetrForSegmentation.forward.labels",description:`<strong>labels</strong> (<code>List[Dict]</code> of len <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each dictionary containing at least the following 3 keys: &#x2018;class_labels&#x2019;, &#x2018;boxes&#x2019; and &#x2018;masks&#x2019; (the class labels, bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves should be a <code>torch.LongTensor</code> of len <code>(number of bounding boxes in the image,)</code>, the boxes a <code>torch.FloatTensor</code> of shape <code>(number of bounding boxes in the image, 4)</code> and the masks a <code>torch.FloatTensor</code> of shape <code>(number of bounding boxes in the image, height, width)</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.models.detr.modeling_detr.DetrSegmentationOutput" >transformers.models.detr.modeling_detr.DetrSegmentationOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig" >DetrConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> are provided)) \u2014 Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss.</li> <li><strong>loss_dict</strong> (<code>Dict</code>, <em>optional</em>) \u2014 A dictionary containing the individual losses. Useful for logging.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, num_classes + 1)</code>) \u2014 Classification logits (including no-object) for all queries.</li> <li><strong>pred_boxes</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, 4)</code>) \u2014 Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process" >post_process()</a> to retrieve the unnormalized bounding boxes.</li> <li><strong>pred_masks</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_queries, height/4, width/4)</code>) \u2014 Segmentation masks logits for all queries. See also <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_segmentation" >post_process_segmentation()</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_panoptic" >post_process_panoptic()</a> to evaluate instance and panoptic segmentation masks respectively.</li> <li><strong>auxiliary_outputs</strong> (<code>list[Dict]</code>, <em>optional</em>) \u2014 Optional, only returned when auxiliary losses are activated (i.e. <code>config.auxiliary_loss</code> is set to <em>True</em>) and labels are provided. It is a list of dictionaries containing the two above keys (<code>logits</code> and <code>pred_boxes</code>) for each decoder layer.</li> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</li> <li><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</li> <li><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> <li><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</li> <li><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</li> <li><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/detr#transformers.models.detr.modeling_detr.DetrSegmentationOutput" >transformers.models.detr.modeling_detr.DetrSegmentationOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),st=new Hs({props:{$$slots:{default:[Zg]},$$scope:{ctx:ee}}}),Oo=new Us({props:{code:`from transformers import DetrFeatureExtractor, DetrForSegmentation from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50-panoptic') model = DetrForSegmentation.from_pretrained('facebook/detr-resnet-50-panoptic') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) # model predicts COCO classes, bounding boxes, and masks logits = outputs.logits bboxes = outputs.pred_boxes masks = outputs.pred_masks,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DetrFeatureExtractor, DetrForSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = DetrFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;facebook/detr-resnet-50-panoptic&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DetrForSegmentation.from_pretrained(<span class="hljs-string">&#x27;facebook/detr-resnet-50-panoptic&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts COCO classes, bounding boxes, and masks</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>bboxes = outputs.pred_boxes <span class="hljs-meta">&gt;&gt;&gt; </span>masks = outputs.pred_masks`}}),{c(){p=a("meta"),E=d(),f=a("h1"),y=a("a"),j=a("span"),u(x.$$.fragment),T=d(),U=a("span"),Ws=o("DETR"),Ca=d(),we=a("h2"),He=a("a"),On=a("span"),u(Tt.$$.fragment),Gs=d(),kn=a("span"),Bs=o("Overview"),ja=d(),Ue=a("p"),Vs=o("The DETR model was proposed in "),wt=a("a"),Ys=o("End-to-End Object Detection with Transformers"),Qs=o(` by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov and Sergey Zagoruyko. DETR consists of a convolutional backbone followed by an encoder-decoder Transformer which can be trained end-to-end for object detection. It greatly simplifies a lot of the complexity of models like Faster-R-CNN and Mask-R-CNN, which use things like region proposals, non-maximum suppression procedure and anchor generation. Moreover, DETR can also be naturally extended to perform panoptic segmentation, by simply adding a mask head on top of the decoder outputs.`),qa=d(),$o=a("p"),Js=o("The abstract from the paper is the following:"),Pa=d(),zo=a("p"),$n=a("em"),Ks=o(`We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines.`),Na=d(),fe=a("p"),Zs=o("This model was contributed by "),Et=a("a"),Xs=o("nielsr"),ei=o(". The original code can be found "),Ft=a("a"),ti=o("here"),oi=o("."),Sa=d(),We=a("p"),ni=o("The quickest way to get started with DETR is by checking the "),Ot=a("a"),ri=o("example notebooks"),ai=o(` (which showcase both inference and fine-tuning on custom data).`),Aa=d(),Ge=a("p"),si=o("Here\u2019s a TLDR explaining how "),Co=a("a"),ii=o("DetrForObjectDetection"),di=o(" works:"),Ma=d(),F=a("p"),ci=o(`First, an image is sent through a pre-trained convolutional backbone (in the paper, the authors use ResNet-50/ResNet-101). Let\u2019s assume we also add a batch dimension. This means that the input to the backbone is a tensor of shape `),zn=a("code"),li=o("(batch_size, 3, height, width)"),hi=o(`, assuming the image has 3 color channels (RGB). The CNN backbone outputs a new lower-resolution feature map, typically of shape `),Cn=a("code"),mi=o("(batch_size, 2048, height/32, width/32)"),pi=o(`. This is then projected to match the hidden dimension of the Transformer of DETR, which is `),jn=a("code"),fi=o("256"),ui=o(` by default, using a `),qn=a("code"),gi=o("nn.Conv2D"),_i=o(" layer. So now, we have a tensor of shape "),Pn=a("code"),bi=o("(batch_size, 256, height/32, width/32)."),vi=o(` Next, the feature map is flattened and transposed to obtain a tensor of shape `),Nn=a("code"),Di=o("(batch_size, seq_len, d_model)"),yi=o(` = `),Sn=a("code"),xi=o("(batch_size, width/32*height/32, 256)"),Ti=o(`. So a difference with NLP models is that the sequence length is actually longer than usual, but with a smaller `),An=a("code"),wi=o("d_model"),Ei=o(" (which in NLP is typically 768 or higher)."),Ia=d(),z=a("p"),Fi=o("Next, this is sent through the encoder, outputting "),Mn=a("code"),Oi=o("encoder_hidden_states"),ki=o(` of the same shape (you can consider these as image features). Next, so-called `),In=a("strong"),$i=o("object queries"),zi=o(` are sent through the decoder. This is a tensor of shape `),Ln=a("code"),Ci=o("(batch_size, num_queries, d_model)"),ji=o(", with "),Rn=a("code"),qi=o("num_queries"),Pi=o(` typically set to 100 and initialized with zeros. These input embeddings are learnt positional encodings that the authors refer to as object queries, and similarly to the encoder, they are added to the input of each attention layer. Each object query will look for a particular object in the image. The decoder updates these embeddings through multiple self-attention and encoder-decoder attention layers to output `),Hn=a("code"),Ni=o("decoder_hidden_states"),Si=o(" of the same shape: "),Un=a("code"),Ai=o("(batch_size, num_queries, d_model)"),Mi=o(`. Next, two heads are added on top for object detection: a linear layer for classifying each object query into one of the objects or \u201Cno object\u201D, and a MLP to predict bounding boxes for each query.`),La=d(),J=a("p"),Ii=o("The model is trained using a "),Wn=a("strong"),Li=o("bipartite matching loss"),Ri=o(`: so what we actually do is compare the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a \u201Cno object\u201D as class and \u201Cno bounding box\u201D as bounding box). The `),kt=a("a"),Hi=o("Hungarian matching algorithm"),Ui=o(` is used to find an optimal one-to-one mapping of each of the N queries to each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and `),$t=a("a"),Wi=o("generalized IoU loss"),Gi=o(` (for the bounding boxes) are used to optimize the parameters of the model.`),Ra=d(),K=a("p"),Bi=o(`DETR can be naturally extended to perform panoptic segmentation (which unifies semantic segmentation and instance segmentation). `),jo=a("a"),Vi=o("DetrForSegmentation"),Yi=o(` adds a segmentation mask head on top of `),qo=a("a"),Qi=o("DetrForObjectDetection"),Ji=o(`. The mask head can be trained either jointly, or in a two steps process, where one first trains a `),Po=a("a"),Ki=o("DetrForObjectDetection"),Zi=o(` model to detect bounding boxes around both \u201Cthings\u201D (instances) and \u201Cstuff\u201D (background things like trees, roads, sky), then freeze all the weights and train only the mask head for 25 epochs. Experimentally, these two approaches give similar results. Note that predicting boxes is required for the training to be possible, since the Hungarian matching is computed using distances between boxes.`),Ha=d(),No=a("p"),Xi=o("Tips:"),Ua=d(),k=a("ul"),te=a("li"),ed=o("DETR uses so-called "),Gn=a("strong"),td=o("object queries"),od=o(` to detect objects in an image. The number of queries determines the maximum number of objects that can be detected in a single image, and is set to 100 by default (see parameter `),Bn=a("code"),nd=o("num_queries"),rd=o(" of "),So=a("a"),ad=o("DetrConfig"),sd=o(`). Note that it\u2019s good to have some slack (in COCO, the authors used 100, while the maximum number of objects in a COCO image is ~70).`),id=d(),Vn=a("li"),dd=o(`The decoder of DETR updates the query embeddings in parallel. This is different from language models like GPT-2, which use autoregressive decoding instead of parallel. Hence, no causal attention mask is used.`),cd=d(),oe=a("li"),ld=o(`DETR adds position embeddings to the hidden states at each self-attention and cross-attention layer before projecting to queries and keys. For the position embeddings of the image, one can choose between fixed sinusoidal or learned absolute position embeddings. By default, the parameter `),Yn=a("code"),hd=o("position_embedding_type"),md=o(` of `),Ao=a("a"),pd=o("DetrConfig"),fd=o(" is set to "),Qn=a("code"),ud=o('"sine"'),gd=o("."),_d=d(),ne=a("li"),bd=o(`During training, the authors of DETR did find it helpful to use auxiliary losses in the decoder, especially to help the model output the correct number of objects of each class. If you set the parameter `),Jn=a("code"),vd=o("auxiliary_loss"),Dd=o(` of `),Mo=a("a"),yd=o("DetrConfig"),xd=o(" to "),Kn=a("code"),Td=o("True"),wd=o(`, then prediction feedforward neural networks and Hungarian losses are added after each decoder layer (with the FFNs sharing parameters).`),Ed=d(),Y=a("li"),Fd=o(`If you want to train the model in a distributed environment across multiple nodes, then one should update the `),Zn=a("em"),Od=o("num_boxes"),kd=o(" variable in the "),Xn=a("em"),$d=o("DetrLoss"),zd=o(" class of "),er=a("em"),Cd=o("modeling_detr.py"),jd=o(`. When training on multiple nodes, this should be set to the average number of target boxes across all nodes, as can be seen in the original implementation `),zt=a("a"),qd=o("here"),Pd=o("."),Nd=d(),q=a("li"),Io=a("a"),Sd=o("DetrForObjectDetection"),Ad=o(" and "),Lo=a("a"),Md=o("DetrForSegmentation"),Id=o(` can be initialized with any convolutional backbone available in the `),Ct=a("a"),Ld=o("timm library"),Rd=o(`. Initializing with a MobileNet backbone for example can be done by setting the `),tr=a("code"),Hd=o("backbone"),Ud=o(` attribute of `),Ro=a("a"),Wd=o("DetrConfig"),Gd=o(" to "),or=a("code"),Bd=o('"tf_mobilenetv3_small_075"'),Vd=o(`, and then initializing the model with that config.`),Yd=d(),re=a("li"),Qd=o(`DETR resizes the input images such that the shortest side is at least a certain amount of pixels while the longest is at most 1333 pixels. At training time, scale augmentation is used such that the shortest side is randomly set to at least 480 and at most 800 pixels. At inference time, the shortest side is set to 800. One can use `),Ho=a("a"),Jd=o("DetrFeatureExtractor"),Kd=o(` to prepare images (and optional annotations in COCO format) for the model. Due to this resizing, images in a batch can have different sizes. DETR solves this by padding images up to the largest size in a batch, and by creating a pixel mask that indicates which pixels are real/which are padding. Alternatively, one can also define a custom `),nr=a("code"),Zd=o("collate_fn"),Xd=o(` in order to batch images together, using `),Uo=a("a"),ec=o("pad_and_create_pixel_mask()"),tc=o("."),oc=d(),Ee=a("li"),nc=o("The size of the images will determine the amount of memory being used, and will thus determine the "),rr=a("code"),rc=o("batch_size"),ac=o(`. It is advised to use a batch size of 2 per GPU. See `),jt=a("a"),sc=o("this Github thread"),ic=o(" for more info."),Wa=d(),Wo=a("p"),dc=o("As a summary, consider the following table:"),Ga=d(),Be=a("table"),ar=a("thead"),ae=a("tr"),sr=a("th"),cc=o("Task"),lc=d(),ir=a("th"),hc=o("Object detection"),mc=d(),dr=a("th"),pc=o("Instance segmentation"),fc=d(),cr=a("th"),uc=o("Panoptic segmentation"),gc=d(),A=a("tbody"),se=a("tr"),lr=a("td"),hr=a("strong"),_c=o("Description"),bc=d(),mr=a("td"),vc=o("Predicting bounding boxes and class labels around objects in an image"),Dc=d(),pr=a("td"),yc=o("Predicting masks around objects (i.e. instances) in an image"),xc=d(),fr=a("td"),Tc=o("Predicting masks around both objects (i.e. instances) as well as \u201Cstuff\u201D (i.e. background things like trees and roads) in an image"),wc=d(),ie=a("tr"),ur=a("td"),gr=a("strong"),Ec=o("Model"),Fc=d(),_r=a("td"),Go=a("a"),Oc=o("DetrForObjectDetection"),kc=d(),br=a("td"),Bo=a("a"),$c=o("DetrForSegmentation"),zc=d(),vr=a("td"),Vo=a("a"),Cc=o("DetrForSegmentation"),jc=d(),de=a("tr"),Dr=a("td"),yr=a("strong"),qc=o("Example dataset"),Pc=d(),xr=a("td"),Nc=o("COCO detection"),Sc=d(),Tr=a("td"),Ac=o("COCO detection, COCO panoptic"),Mc=d(),wr=a("td"),Ic=o("COCO panoptic"),Lc=d(),ce=a("tr"),qt=a("td"),Er=a("strong"),Rc=o("Format of annotations to provide to"),Hc=d(),Yo=a("a"),Uc=o("DetrFeatureExtractor"),Wc=d(),Fe=a("td"),Gc=o("{\u2018image_id\u2019: "),Fr=a("code"),Bc=o("int"),Vc=o(", \u2018annotations\u2019: "),Or=a("code"),Yc=o("List[Dict]"),Qc=o("} each Dict being a COCO object annotation"),Jc=d(),M=a("td"),Kc=o("{\u2018image_id\u2019: "),kr=a("code"),Zc=o("int"),Xc=o(", \u2018annotations\u2019: "),$r=a("code"),el=o("List[Dict]"),tl=o("} (in case of COCO detection) or {\u2018file_name\u2019: "),zr=a("code"),ol=o("str"),nl=o(", \u2018image_id\u2019: "),Cr=a("code"),rl=o("int"),al=o(", \u2018segments_info\u2019: "),jr=a("code"),sl=o("List[Dict]"),il=o("} (in case of COCO panoptic)"),dl=d(),le=a("td"),cl=o("{\u2018file_name\u2019: "),qr=a("code"),ll=o("str"),hl=o(", \u2018image_id\u2019: "),Pr=a("code"),ml=o("int"),pl=o(", \u2018segments_info\u2019: "),Nr=a("code"),fl=o("List[Dict]"),ul=o("} and masks_path (path to directory containing PNG files of the masks)"),gl=d(),he=a("tr"),Qo=a("td"),Sr=a("strong"),_l=o("Postprocessing"),bl=o(" (i.e. converting the output of the model to COCO API)"),vl=d(),Ar=a("td"),Jo=a("a"),Dl=o("post_process()"),yl=d(),Mr=a("td"),Ko=a("a"),xl=o("post_process_segmentation()"),Tl=d(),Pt=a("td"),Zo=a("a"),wl=o("post_process_segmentation()"),El=o(", "),Xo=a("a"),Fl=o("post_process_panoptic()"),Ol=d(),me=a("tr"),Ir=a("td"),Lr=a("strong"),kl=o("evaluators"),$l=d(),Nt=a("td"),Rr=a("code"),zl=o("CocoEvaluator"),Cl=o(" with "),Hr=a("code"),jl=o('iou_types="bbox"'),ql=d(),Oe=a("td"),Ur=a("code"),Pl=o("CocoEvaluator"),Nl=o(" with "),Wr=a("code"),Sl=o('iou_types="bbox"'),Al=o(" or "),Gr=a("code"),Ml=o('"segm"'),Il=d(),pe=a("td"),Br=a("code"),Ll=o("CocoEvaluator"),Rl=o(" with "),Vr=a("code"),Hl=o('iou_tupes="bbox"'),Ul=o(" or "),Yr=a("code"),Wl=o('"segm"'),Gl=o(", "),Qr=a("code"),Bl=o("PanopticEvaluator"),Ba=d(),w=a("p"),Vl=o(`In short, one should prepare the data either in COCO detection or COCO panoptic format, then use `),en=a("a"),Yl=o("DetrFeatureExtractor"),Ql=o(" to create "),Jr=a("code"),Jl=o("pixel_values"),Kl=o(", "),Kr=a("code"),Zl=o("pixel_mask"),Xl=o(` and optional `),Zr=a("code"),eh=o("labels"),th=o(`, which can then be used to train (or fine-tune) a model. For evaluation, one should first convert the outputs of the model using one of the postprocessing methods of `),tn=a("a"),oh=o("DetrFeatureExtractor"),nh=o(`. These can be be provided to either `),Xr=a("code"),rh=o("CocoEvaluator"),ah=o(" or "),ea=a("code"),sh=o("PanopticEvaluator"),ih=o(`, which allow you to calculate metrics like mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are implemented in the `),St=a("a"),dh=o("original repository"),ch=o(". See the "),At=a("a"),lh=o("example notebooks"),hh=o(" for more info regarding evaluation."),Va=d(),ke=a("h2"),Ve=a("a"),ta=a("span"),u(Mt.$$.fragment),mh=d(),oa=a("span"),ph=o("DETR specific outputs"),Ya=d(),$e=a("div"),u(It.$$.fragment),fh=d(),na=a("p"),uh=o(`Base class for outputs of the DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses.`),Qa=d(),ze=a("div"),u(Lt.$$.fragment),gh=d(),Rt=a("p"),_h=o("Output type of "),on=a("a"),bh=o("DetrForObjectDetection"),vh=o("."),Ja=d(),Ce=a("div"),u(Ht.$$.fragment),Dh=d(),Ut=a("p"),yh=o("Output type of "),nn=a("a"),xh=o("DetrForSegmentation"),Th=o("."),Ka=d(),je=a("h2"),Ye=a("a"),ra=a("span"),u(Wt.$$.fragment),wh=d(),aa=a("span"),Eh=o("DetrConfig"),Za=d(),I=a("div"),u(Gt.$$.fragment),Fh=d(),qe=a("p"),Oh=o("This is the configuration class to store the configuration of a "),rn=a("a"),kh=o("DetrModel"),$h=o(`. It is used to instantiate a DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DETR `),Bt=a("a"),zh=o("facebook/detr-resnet-50"),Ch=o(" architecture."),jh=d(),Pe=a("p"),qh=o("Configuration objects inherit from "),an=a("a"),Ph=o("PretrainedConfig"),Nh=o(` and can be used to control the model outputs. Read the documentation from `),sn=a("a"),Sh=o("PretrainedConfig"),Ah=o(" for more information."),Mh=d(),sa=a("p"),Ih=o("Examples:"),Lh=d(),u(Vt.$$.fragment),Xa=d(),Ne=a("h2"),Qe=a("a"),ia=a("span"),u(Yt.$$.fragment),Rh=d(),da=a("span"),Hh=o("DetrFeatureExtractor"),es=d(),O=a("div"),u(Qt.$$.fragment),Uh=d(),ca=a("p"),Wh=o("Constructs a DETR feature extractor."),Gh=d(),Jt=a("p"),Bh=o("This feature extractor inherits from "),la=a("code"),Vh=o("FeatureExtractionMixin"),Yh=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Qh=d(),ue=a("div"),u(Kt.$$.fragment),Jh=d(),ha=a("p"),Kh=o(`Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding.`),Zh=d(),u(Je.$$.fragment),Xh=d(),Ke=a("div"),u(Zt.$$.fragment),em=d(),Xt=a("p"),tm=o("Pad images up to the largest image in a batch and create a corresponding "),ma=a("code"),om=o("pixel_mask"),nm=o("."),rm=d(),Ze=a("div"),u(eo.$$.fragment),am=d(),to=a("p"),sm=o("Converts the output of "),dn=a("a"),im=o("DetrForObjectDetection"),dm=o(` into the format expected by the COCO api. Only supports PyTorch.`),cm=d(),Xe=a("div"),u(oo.$$.fragment),lm=d(),no=a("p"),hm=o("Converts the output of "),cn=a("a"),mm=o("DetrForSegmentation"),pm=o(` into image segmentation predictions. Only supports PyTorch.`),fm=d(),et=a("div"),u(ro.$$.fragment),um=d(),ao=a("p"),gm=o("Converts the output of "),ln=a("a"),_m=o("DetrForSegmentation"),bm=o(` into actual panoptic predictions. Only supports PyTorch.`),ts=d(),Se=a("h2"),tt=a("a"),pa=a("span"),u(so.$$.fragment),vm=d(),fa=a("span"),Dm=o("DetrModel"),os=d(),L=a("div"),u(io.$$.fragment),ym=d(),ua=a("p"),xm=o(`The bare DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top.`),Tm=d(),co=a("p"),wm=o("This model inherits from "),hn=a("a"),Em=o("PreTrainedModel"),Fm=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Om=d(),lo=a("p"),km=o("This model is also a PyTorch "),ho=a("a"),$m=o("torch.nn.Module"),zm=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cm=d(),W=a("div"),u(mo.$$.fragment),jm=d(),Ae=a("p"),qm=o("The "),mn=a("a"),Pm=o("DetrModel"),Nm=o(" forward method, overrides the "),ga=a("code"),Sm=o("__call__"),Am=o(" special method."),Mm=d(),u(ot.$$.fragment),Im=d(),_a=a("p"),Lm=o("Examples:"),Rm=d(),u(po.$$.fragment),ns=d(),Me=a("h2"),nt=a("a"),ba=a("span"),u(fo.$$.fragment),Hm=d(),va=a("span"),Um=o("DetrForObjectDetection"),rs=d(),R=a("div"),u(uo.$$.fragment),Wm=d(),Da=a("p"),Gm=o(`DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection.`),Bm=d(),go=a("p"),Vm=o("This model inherits from "),pn=a("a"),Ym=o("PreTrainedModel"),Qm=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jm=d(),_o=a("p"),Km=o("This model is also a PyTorch "),bo=a("a"),Zm=o("torch.nn.Module"),Xm=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ep=d(),G=a("div"),u(vo.$$.fragment),tp=d(),Ie=a("p"),op=o("The "),fn=a("a"),np=o("DetrForObjectDetection"),rp=o(" forward method, overrides the "),ya=a("code"),ap=o("__call__"),sp=o(" special method."),ip=d(),u(rt.$$.fragment),dp=d(),xa=a("p"),cp=o("Examples:"),lp=d(),u(Do.$$.fragment),as=d(),Le=a("h2"),at=a("a"),Ta=a("span"),u(yo.$$.fragment),hp=d(),wa=a("span"),mp=o("DetrForSegmentation"),ss=d(),H=a("div"),u(xo.$$.fragment),pp=d(),Ea=a("p"),fp=o(`DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks such as COCO panoptic.`),up=d(),To=a("p"),gp=o("This model inherits from "),un=a("a"),_p=o("PreTrainedModel"),bp=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vp=d(),wo=a("p"),Dp=o("This model is also a PyTorch "),Eo=a("a"),yp=o("torch.nn.Module"),xp=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tp=d(),B=a("div"),u(Fo.$$.fragment),wp=d(),Re=a("p"),Ep=o("The "),gn=a("a"),Fp=o("DetrForSegmentation"),Op=o(" forward method, overrides the "),Fa=a("code"),kp=o("__call__"),$p=o(" special method."),zp=d(),u(st.$$.fragment),Cp=d(),Oa=a("p"),jp=o("Examples:"),qp=d(),u(Oo.$$.fragment),this.h()},l(r){const h=Yg('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(t),E=c(r),f=s(r,"H1",{class:!0});var ko=i(f);y=s(ko,"A",{id:!0,class:!0,href:!0});var ka=i(y);j=s(ka,"SPAN",{});var $a=i(j);g(x.$$.fragment,$a),$a.forEach(t),ka.forEach(t),T=c(ko),U=s(ko,"SPAN",{});var za=i(U);Ws=n(za,"DETR"),za.forEach(t),ko.forEach(t),Ca=c(r),we=s(r,"H2",{class:!0});var ds=i(we);He=s(ds,"A",{id:!0,class:!0,href:!0});var Np=i(He);On=s(Np,"SPAN",{});var Sp=i(On);g(Tt.$$.fragment,Sp),Sp.forEach(t),Np.forEach(t),Gs=c(ds),kn=s(ds,"SPAN",{});var Ap=i(kn);Bs=n(Ap,"Overview"),Ap.forEach(t),ds.forEach(t),ja=c(r),Ue=s(r,"P",{});var cs=i(Ue);Vs=n(cs,"The DETR model was proposed in "),wt=s(cs,"A",{href:!0,rel:!0});var Mp=i(wt);Ys=n(Mp,"End-to-End Object Detection with Transformers"),Mp.forEach(t),Qs=n(cs,` by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov and Sergey Zagoruyko. DETR consists of a convolutional backbone followed by an encoder-decoder Transformer which can be trained end-to-end for object detection. It greatly simplifies a lot of the complexity of models like Faster-R-CNN and Mask-R-CNN, which use things like region proposals, non-maximum suppression procedure and anchor generation. Moreover, DETR can also be naturally extended to perform panoptic segmentation, by simply adding a mask head on top of the decoder outputs.`),cs.forEach(t),qa=c(r),$o=s(r,"P",{});var Ip=i($o);Js=n(Ip,"The abstract from the paper is the following:"),Ip.forEach(t),Pa=c(r),zo=s(r,"P",{});var Lp=i(zo);$n=s(Lp,"EM",{});var Rp=i($n);Ks=n(Rp,`We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines.`),Rp.forEach(t),Lp.forEach(t),Na=c(r),fe=s(r,"P",{});var _n=i(fe);Zs=n(_n,"This model was contributed by "),Et=s(_n,"A",{href:!0,rel:!0});var Hp=i(Et);Xs=n(Hp,"nielsr"),Hp.forEach(t),ei=n(_n,". The original code can be found "),Ft=s(_n,"A",{href:!0,rel:!0});var Up=i(Ft);ti=n(Up,"here"),Up.forEach(t),oi=n(_n,"."),_n.forEach(t),Sa=c(r),We=s(r,"P",{});var ls=i(We);ni=n(ls,"The quickest way to get started with DETR is by checking the "),Ot=s(ls,"A",{href:!0,rel:!0});var Wp=i(Ot);ri=n(Wp,"example notebooks"),Wp.forEach(t),ai=n(ls,` (which showcase both inference and fine-tuning on custom data).`),ls.forEach(t),Aa=c(r),Ge=s(r,"P",{});var hs=i(Ge);si=n(hs,"Here\u2019s a TLDR explaining how "),Co=s(hs,"A",{href:!0});var Gp=i(Co);ii=n(Gp,"DetrForObjectDetection"),Gp.forEach(t),di=n(hs," works:"),hs.forEach(t),Ma=c(r),F=s(r,"P",{});var C=i(F);ci=n(C,`First, an image is sent through a pre-trained convolutional backbone (in the paper, the authors use ResNet-50/ResNet-101). Let\u2019s assume we also add a batch dimension. This means that the input to the backbone is a tensor of shape `),zn=s(C,"CODE",{});var Bp=i(zn);li=n(Bp,"(batch_size, 3, height, width)"),Bp.forEach(t),hi=n(C,`, assuming the image has 3 color channels (RGB). The CNN backbone outputs a new lower-resolution feature map, typically of shape `),Cn=s(C,"CODE",{});var Vp=i(Cn);mi=n(Vp,"(batch_size, 2048, height/32, width/32)"),Vp.forEach(t),pi=n(C,`. This is then projected to match the hidden dimension of the Transformer of DETR, which is `),jn=s(C,"CODE",{});var Yp=i(jn);fi=n(Yp,"256"),Yp.forEach(t),ui=n(C,` by default, using a `),qn=s(C,"CODE",{});var Qp=i(qn);gi=n(Qp,"nn.Conv2D"),Qp.forEach(t),_i=n(C," layer. So now, we have a tensor of shape "),Pn=s(C,"CODE",{});var Jp=i(Pn);bi=n(Jp,"(batch_size, 256, height/32, width/32)."),Jp.forEach(t),vi=n(C,` Next, the feature map is flattened and transposed to obtain a tensor of shape `),Nn=s(C,"CODE",{});var Kp=i(Nn);Di=n(Kp,"(batch_size, seq_len, d_model)"),Kp.forEach(t),yi=n(C,` = `),Sn=s(C,"CODE",{});var Zp=i(Sn);xi=n(Zp,"(batch_size, width/32*height/32, 256)"),Zp.forEach(t),Ti=n(C,`. So a difference with NLP models is that the sequence length is actually longer than usual, but with a smaller `),An=s(C,"CODE",{});var Xp=i(An);wi=n(Xp,"d_model"),Xp.forEach(t),Ei=n(C," (which in NLP is typically 768 or higher)."),C.forEach(t),Ia=c(r),z=s(r,"P",{});var V=i(z);Fi=n(V,"Next, this is sent through the encoder, outputting "),Mn=s(V,"CODE",{});var ef=i(Mn);Oi=n(ef,"encoder_hidden_states"),ef.forEach(t),ki=n(V,` of the same shape (you can consider these as image features). Next, so-called `),In=s(V,"STRONG",{});var tf=i(In);$i=n(tf,"object queries"),tf.forEach(t),zi=n(V,` are sent through the decoder. This is a tensor of shape `),Ln=s(V,"CODE",{});var of=i(Ln);Ci=n(of,"(batch_size, num_queries, d_model)"),of.forEach(t),ji=n(V,", with "),Rn=s(V,"CODE",{});var nf=i(Rn);qi=n(nf,"num_queries"),nf.forEach(t),Pi=n(V,` typically set to 100 and initialized with zeros. These input embeddings are learnt positional encodings that the authors refer to as object queries, and similarly to the encoder, they are added to the input of each attention layer. Each object query will look for a particular object in the image. The decoder updates these embeddings through multiple self-attention and encoder-decoder attention layers to output `),Hn=s(V,"CODE",{});var rf=i(Hn);Ni=n(rf,"decoder_hidden_states"),rf.forEach(t),Si=n(V," of the same shape: "),Un=s(V,"CODE",{});var af=i(Un);Ai=n(af,"(batch_size, num_queries, d_model)"),af.forEach(t),Mi=n(V,`. Next, two heads are added on top for object detection: a linear layer for classifying each object query into one of the objects or \u201Cno object\u201D, and a MLP to predict bounding boxes for each query.`),V.forEach(t),La=c(r),J=s(r,"P",{});var it=i(J);Ii=n(it,"The model is trained using a "),Wn=s(it,"STRONG",{});var sf=i(Wn);Li=n(sf,"bipartite matching loss"),sf.forEach(t),Ri=n(it,`: so what we actually do is compare the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a \u201Cno object\u201D as class and \u201Cno bounding box\u201D as bounding box). The `),kt=s(it,"A",{href:!0,rel:!0});var df=i(kt);Hi=n(df,"Hungarian matching algorithm"),df.forEach(t),Ui=n(it,` is used to find an optimal one-to-one mapping of each of the N queries to each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and `),$t=s(it,"A",{href:!0,rel:!0});var cf=i($t);Wi=n(cf,"generalized IoU loss"),cf.forEach(t),Gi=n(it,` (for the bounding boxes) are used to optimize the parameters of the model.`),it.forEach(t),Ra=c(r),K=s(r,"P",{});var dt=i(K);Bi=n(dt,`DETR can be naturally extended to perform panoptic segmentation (which unifies semantic segmentation and instance segmentation). `),jo=s(dt,"A",{href:!0});var lf=i(jo);Vi=n(lf,"DetrForSegmentation"),lf.forEach(t),Yi=n(dt,` adds a segmentation mask head on top of `),qo=s(dt,"A",{href:!0});var hf=i(qo);Qi=n(hf,"DetrForObjectDetection"),hf.forEach(t),Ji=n(dt,`. The mask head can be trained either jointly, or in a two steps process, where one first trains a `),Po=s(dt,"A",{href:!0});var mf=i(Po);Ki=n(mf,"DetrForObjectDetection"),mf.forEach(t),Zi=n(dt,` model to detect bounding boxes around both \u201Cthings\u201D (instances) and \u201Cstuff\u201D (background things like trees, roads, sky), then freeze all the weights and train only the mask head for 25 epochs. Experimentally, these two approaches give similar results. Note that predicting boxes is required for the training to be possible, since the Hungarian matching is computed using distances between boxes.`),dt.forEach(t),Ha=c(r),No=s(r,"P",{});var pf=i(No);Xi=n(pf,"Tips:"),pf.forEach(t),Ua=c(r),k=s(r,"UL",{});var P=i(k);te=s(P,"LI",{});var ct=i(te);ed=n(ct,"DETR uses so-called "),Gn=s(ct,"STRONG",{});var ff=i(Gn);td=n(ff,"object queries"),ff.forEach(t),od=n(ct,` to detect objects in an image. The number of queries determines the maximum number of objects that can be detected in a single image, and is set to 100 by default (see parameter `),Bn=s(ct,"CODE",{});var uf=i(Bn);nd=n(uf,"num_queries"),uf.forEach(t),rd=n(ct," of "),So=s(ct,"A",{href:!0});var gf=i(So);ad=n(gf,"DetrConfig"),gf.forEach(t),sd=n(ct,`). Note that it\u2019s good to have some slack (in COCO, the authors used 100, while the maximum number of objects in a COCO image is ~70).`),ct.forEach(t),id=c(P),Vn=s(P,"LI",{});var _f=i(Vn);dd=n(_f,`The decoder of DETR updates the query embeddings in parallel. This is different from language models like GPT-2, which use autoregressive decoding instead of parallel. Hence, no causal attention mask is used.`),_f.forEach(t),cd=c(P),oe=s(P,"LI",{});var lt=i(oe);ld=n(lt,`DETR adds position embeddings to the hidden states at each self-attention and cross-attention layer before projecting to queries and keys. For the position embeddings of the image, one can choose between fixed sinusoidal or learned absolute position embeddings. By default, the parameter `),Yn=s(lt,"CODE",{});var bf=i(Yn);hd=n(bf,"position_embedding_type"),bf.forEach(t),md=n(lt,` of `),Ao=s(lt,"A",{href:!0});var vf=i(Ao);pd=n(vf,"DetrConfig"),vf.forEach(t),fd=n(lt," is set to "),Qn=s(lt,"CODE",{});var Df=i(Qn);ud=n(Df,'"sine"'),Df.forEach(t),gd=n(lt,"."),lt.forEach(t),_d=c(P),ne=s(P,"LI",{});var ht=i(ne);bd=n(ht,`During training, the authors of DETR did find it helpful to use auxiliary losses in the decoder, especially to help the model output the correct number of objects of each class. If you set the parameter `),Jn=s(ht,"CODE",{});var yf=i(Jn);vd=n(yf,"auxiliary_loss"),yf.forEach(t),Dd=n(ht,` of `),Mo=s(ht,"A",{href:!0});var xf=i(Mo);yd=n(xf,"DetrConfig"),xf.forEach(t),xd=n(ht," to "),Kn=s(ht,"CODE",{});var Tf=i(Kn);Td=n(Tf,"True"),Tf.forEach(t),wd=n(ht,`, then prediction feedforward neural networks and Hungarian losses are added after each decoder layer (with the FFNs sharing parameters).`),ht.forEach(t),Ed=c(P),Y=s(P,"LI",{});var ge=i(Y);Fd=n(ge,`If you want to train the model in a distributed environment across multiple nodes, then one should update the `),Zn=s(ge,"EM",{});var wf=i(Zn);Od=n(wf,"num_boxes"),wf.forEach(t),kd=n(ge," variable in the "),Xn=s(ge,"EM",{});var Ef=i(Xn);$d=n(Ef,"DetrLoss"),Ef.forEach(t),zd=n(ge," class of "),er=s(ge,"EM",{});var Ff=i(er);Cd=n(Ff,"modeling_detr.py"),Ff.forEach(t),jd=n(ge,`. When training on multiple nodes, this should be set to the average number of target boxes across all nodes, as can be seen in the original implementation `),zt=s(ge,"A",{href:!0,rel:!0});var Of=i(zt);qd=n(Of,"here"),Of.forEach(t),Pd=n(ge,"."),ge.forEach(t),Nd=c(P),q=s(P,"LI",{});var Q=i(q);Io=s(Q,"A",{href:!0});var kf=i(Io);Sd=n(kf,"DetrForObjectDetection"),kf.forEach(t),Ad=n(Q," and "),Lo=s(Q,"A",{href:!0});var $f=i(Lo);Md=n($f,"DetrForSegmentation"),$f.forEach(t),Id=n(Q,` can be initialized with any convolutional backbone available in the `),Ct=s(Q,"A",{href:!0,rel:!0});var zf=i(Ct);Ld=n(zf,"timm library"),zf.forEach(t),Rd=n(Q,`. Initializing with a MobileNet backbone for example can be done by setting the `),tr=s(Q,"CODE",{});var Cf=i(tr);Hd=n(Cf,"backbone"),Cf.forEach(t),Ud=n(Q,` attribute of `),Ro=s(Q,"A",{href:!0});var jf=i(Ro);Wd=n(jf,"DetrConfig"),jf.forEach(t),Gd=n(Q," to "),or=s(Q,"CODE",{});var qf=i(or);Bd=n(qf,'"tf_mobilenetv3_small_075"'),qf.forEach(t),Vd=n(Q,`, and then initializing the model with that config.`),Q.forEach(t),Yd=c(P),re=s(P,"LI",{});var mt=i(re);Qd=n(mt,`DETR resizes the input images such that the shortest side is at least a certain amount of pixels while the longest is at most 1333 pixels. At training time, scale augmentation is used such that the shortest side is randomly set to at least 480 and at most 800 pixels. At inference time, the shortest side is set to 800. One can use `),Ho=s(mt,"A",{href:!0});var Pf=i(Ho);Jd=n(Pf,"DetrFeatureExtractor"),Pf.forEach(t),Kd=n(mt,` to prepare images (and optional annotations in COCO format) for the model. Due to this resizing, images in a batch can have different sizes. DETR solves this by padding images up to the largest size in a batch, and by creating a pixel mask that indicates which pixels are real/which are padding. Alternatively, one can also define a custom `),nr=s(mt,"CODE",{});var Nf=i(nr);Zd=n(Nf,"collate_fn"),Nf.forEach(t),Xd=n(mt,` in order to batch images together, using `),Uo=s(mt,"A",{href:!0});var Sf=i(Uo);ec=n(Sf,"pad_and_create_pixel_mask()"),Sf.forEach(t),tc=n(mt,"."),mt.forEach(t),oc=c(P),Ee=s(P,"LI",{});var bn=i(Ee);nc=n(bn,"The size of the images will determine the amount of memory being used, and will thus determine the "),rr=s(bn,"CODE",{});var Af=i(rr);rc=n(Af,"batch_size"),Af.forEach(t),ac=n(bn,`. It is advised to use a batch size of 2 per GPU. See `),jt=s(bn,"A",{href:!0,rel:!0});var Mf=i(jt);sc=n(Mf,"this Github thread"),Mf.forEach(t),ic=n(bn," for more info."),bn.forEach(t),P.forEach(t),Wa=c(r),Wo=s(r,"P",{});var If=i(Wo);dc=n(If,"As a summary, consider the following table:"),If.forEach(t),Ga=c(r),Be=s(r,"TABLE",{});var ms=i(Be);ar=s(ms,"THEAD",{});var Lf=i(ar);ae=s(Lf,"TR",{});var pt=i(ae);sr=s(pt,"TH",{});var Rf=i(sr);cc=n(Rf,"Task"),Rf.forEach(t),lc=c(pt),ir=s(pt,"TH",{});var Hf=i(ir);hc=n(Hf,"Object detection"),Hf.forEach(t),mc=c(pt),dr=s(pt,"TH",{});var Uf=i(dr);pc=n(Uf,"Instance segmentation"),Uf.forEach(t),fc=c(pt),cr=s(pt,"TH",{});var Wf=i(cr);uc=n(Wf,"Panoptic segmentation"),Wf.forEach(t),pt.forEach(t),Lf.forEach(t),gc=c(ms),A=s(ms,"TBODY",{});var Z=i(A);se=s(Z,"TR",{});var ft=i(se);lr=s(ft,"TD",{});var Gf=i(lr);hr=s(Gf,"STRONG",{});var Bf=i(hr);_c=n(Bf,"Description"),Bf.forEach(t),Gf.forEach(t),bc=c(ft),mr=s(ft,"TD",{});var Vf=i(mr);vc=n(Vf,"Predicting bounding boxes and class labels around objects in an image"),Vf.forEach(t),Dc=c(ft),pr=s(ft,"TD",{});var Yf=i(pr);yc=n(Yf,"Predicting masks around objects (i.e. instances) in an image"),Yf.forEach(t),xc=c(ft),fr=s(ft,"TD",{});var Qf=i(fr);Tc=n(Qf,"Predicting masks around both objects (i.e. instances) as well as \u201Cstuff\u201D (i.e. background things like trees and roads) in an image"),Qf.forEach(t),ft.forEach(t),wc=c(Z),ie=s(Z,"TR",{});var ut=i(ie);ur=s(ut,"TD",{});var Jf=i(ur);gr=s(Jf,"STRONG",{});var Kf=i(gr);Ec=n(Kf,"Model"),Kf.forEach(t),Jf.forEach(t),Fc=c(ut),_r=s(ut,"TD",{});var Zf=i(_r);Go=s(Zf,"A",{href:!0});var Xf=i(Go);Oc=n(Xf,"DetrForObjectDetection"),Xf.forEach(t),Zf.forEach(t),kc=c(ut),br=s(ut,"TD",{});var eu=i(br);Bo=s(eu,"A",{href:!0});var tu=i(Bo);$c=n(tu,"DetrForSegmentation"),tu.forEach(t),eu.forEach(t),zc=c(ut),vr=s(ut,"TD",{});var ou=i(vr);Vo=s(ou,"A",{href:!0});var nu=i(Vo);Cc=n(nu,"DetrForSegmentation"),nu.forEach(t),ou.forEach(t),ut.forEach(t),jc=c(Z),de=s(Z,"TR",{});var gt=i(de);Dr=s(gt,"TD",{});var ru=i(Dr);yr=s(ru,"STRONG",{});var au=i(yr);qc=n(au,"Example dataset"),au.forEach(t),ru.forEach(t),Pc=c(gt),xr=s(gt,"TD",{});var su=i(xr);Nc=n(su,"COCO detection"),su.forEach(t),Sc=c(gt),Tr=s(gt,"TD",{});var iu=i(Tr);Ac=n(iu,"COCO detection, COCO panoptic"),iu.forEach(t),Mc=c(gt),wr=s(gt,"TD",{});var du=i(wr);Ic=n(du,"COCO panoptic"),du.forEach(t),gt.forEach(t),Lc=c(Z),ce=s(Z,"TR",{});var _t=i(ce);qt=s(_t,"TD",{});var ps=i(qt);Er=s(ps,"STRONG",{});var cu=i(Er);Rc=n(cu,"Format of annotations to provide to"),cu.forEach(t),Hc=c(ps),Yo=s(ps,"A",{href:!0});var lu=i(Yo);Uc=n(lu,"DetrFeatureExtractor"),lu.forEach(t),ps.forEach(t),Wc=c(_t),Fe=s(_t,"TD",{});var vn=i(Fe);Gc=n(vn,"{\u2018image_id\u2019: "),Fr=s(vn,"CODE",{});var hu=i(Fr);Bc=n(hu,"int"),hu.forEach(t),Vc=n(vn,", \u2018annotations\u2019: "),Or=s(vn,"CODE",{});var mu=i(Or);Yc=n(mu,"List[Dict]"),mu.forEach(t),Qc=n(vn,"} each Dict being a COCO object annotation"),vn.forEach(t),Jc=c(_t),M=s(_t,"TD",{});var X=i(M);Kc=n(X,"{\u2018image_id\u2019: "),kr=s(X,"CODE",{});var pu=i(kr);Zc=n(pu,"int"),pu.forEach(t),Xc=n(X,", \u2018annotations\u2019: "),$r=s(X,"CODE",{});var fu=i($r);el=n(fu,"List[Dict]"),fu.forEach(t),tl=n(X,"} (in case of COCO detection) or {\u2018file_name\u2019: "),zr=s(X,"CODE",{});var uu=i(zr);ol=n(uu,"str"),uu.forEach(t),nl=n(X,", \u2018image_id\u2019: "),Cr=s(X,"CODE",{});var gu=i(Cr);rl=n(gu,"int"),gu.forEach(t),al=n(X,", \u2018segments_info\u2019: "),jr=s(X,"CODE",{});var _u=i(jr);sl=n(_u,"List[Dict]"),_u.forEach(t),il=n(X,"} (in case of COCO panoptic)"),X.forEach(t),dl=c(_t),le=s(_t,"TD",{});var bt=i(le);cl=n(bt,"{\u2018file_name\u2019: "),qr=s(bt,"CODE",{});var bu=i(qr);ll=n(bu,"str"),bu.forEach(t),hl=n(bt,", \u2018image_id\u2019: "),Pr=s(bt,"CODE",{});var vu=i(Pr);ml=n(vu,"int"),vu.forEach(t),pl=n(bt,", \u2018segments_info\u2019: "),Nr=s(bt,"CODE",{});var Du=i(Nr);fl=n(Du,"List[Dict]"),Du.forEach(t),ul=n(bt,"} and masks_path (path to directory containing PNG files of the masks)"),bt.forEach(t),_t.forEach(t),gl=c(Z),he=s(Z,"TR",{});var vt=i(he);Qo=s(vt,"TD",{});var Pp=i(Qo);Sr=s(Pp,"STRONG",{});var yu=i(Sr);_l=n(yu,"Postprocessing"),yu.forEach(t),bl=n(Pp," (i.e. converting the output of the model to COCO API)"),Pp.forEach(t),vl=c(vt),Ar=s(vt,"TD",{});var xu=i(Ar);Jo=s(xu,"A",{href:!0});var Tu=i(Jo);Dl=n(Tu,"post_process()"),Tu.forEach(t),xu.forEach(t),yl=c(vt),Mr=s(vt,"TD",{});var wu=i(Mr);Ko=s(wu,"A",{href:!0});var Eu=i(Ko);xl=n(Eu,"post_process_segmentation()"),Eu.forEach(t),wu.forEach(t),Tl=c(vt),Pt=s(vt,"TD",{});var fs=i(Pt);Zo=s(fs,"A",{href:!0});var Fu=i(Zo);wl=n(Fu,"post_process_segmentation()"),Fu.forEach(t),El=n(fs,", "),Xo=s(fs,"A",{href:!0});var Ou=i(Xo);Fl=n(Ou,"post_process_panoptic()"),Ou.forEach(t),fs.forEach(t),vt.forEach(t),Ol=c(Z),me=s(Z,"TR",{});var Dt=i(me);Ir=s(Dt,"TD",{});var ku=i(Ir);Lr=s(ku,"STRONG",{});var $u=i(Lr);kl=n($u,"evaluators"),$u.forEach(t),ku.forEach(t),$l=c(Dt),Nt=s(Dt,"TD",{});var us=i(Nt);Rr=s(us,"CODE",{});var zu=i(Rr);zl=n(zu,"CocoEvaluator"),zu.forEach(t),Cl=n(us," with "),Hr=s(us,"CODE",{});var Cu=i(Hr);jl=n(Cu,'iou_types="bbox"'),Cu.forEach(t),us.forEach(t),ql=c(Dt),Oe=s(Dt,"TD",{});var Dn=i(Oe);Ur=s(Dn,"CODE",{});var ju=i(Ur);Pl=n(ju,"CocoEvaluator"),ju.forEach(t),Nl=n(Dn," with "),Wr=s(Dn,"CODE",{});var qu=i(Wr);Sl=n(qu,'iou_types="bbox"'),qu.forEach(t),Al=n(Dn," or "),Gr=s(Dn,"CODE",{});var Pu=i(Gr);Ml=n(Pu,'"segm"'),Pu.forEach(t),Dn.forEach(t),Il=c(Dt),pe=s(Dt,"TD",{});var yt=i(pe);Br=s(yt,"CODE",{});var Nu=i(Br);Ll=n(Nu,"CocoEvaluator"),Nu.forEach(t),Rl=n(yt," with "),Vr=s(yt,"CODE",{});var Su=i(Vr);Hl=n(Su,'iou_tupes="bbox"'),Su.forEach(t),Ul=n(yt," or "),Yr=s(yt,"CODE",{});var Au=i(Yr);Wl=n(Au,'"segm"'),Au.forEach(t),Gl=n(yt,", "),Qr=s(yt,"CODE",{});var Mu=i(Qr);Bl=n(Mu,"PanopticEvaluator"),Mu.forEach(t),yt.forEach(t),Dt.forEach(t),Z.forEach(t),ms.forEach(t),Ba=c(r),w=s(r,"P",{});var $=i(w);Vl=n($,`In short, one should prepare the data either in COCO detection or COCO panoptic format, then use `),en=s($,"A",{href:!0});var Iu=i(en);Yl=n(Iu,"DetrFeatureExtractor"),Iu.forEach(t),Ql=n($," to create "),Jr=s($,"CODE",{});var Lu=i(Jr);Jl=n(Lu,"pixel_values"),Lu.forEach(t),Kl=n($,", "),Kr=s($,"CODE",{});var Ru=i(Kr);Zl=n(Ru,"pixel_mask"),Ru.forEach(t),Xl=n($,` and optional `),Zr=s($,"CODE",{});var Hu=i(Zr);eh=n(Hu,"labels"),Hu.forEach(t),th=n($,`, which can then be used to train (or fine-tune) a model. For evaluation, one should first convert the outputs of the model using one of the postprocessing methods of `),tn=s($,"A",{href:!0});var Uu=i(tn);oh=n(Uu,"DetrFeatureExtractor"),Uu.forEach(t),nh=n($,`. These can be be provided to either `),Xr=s($,"CODE",{});var Wu=i(Xr);rh=n(Wu,"CocoEvaluator"),Wu.forEach(t),ah=n($," or "),ea=s($,"CODE",{});var Gu=i(ea);sh=n(Gu,"PanopticEvaluator"),Gu.forEach(t),ih=n($,`, which allow you to calculate metrics like mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are implemented in the `),St=s($,"A",{href:!0,rel:!0});var Bu=i(St);dh=n(Bu,"original repository"),Bu.forEach(t),ch=n($,". See the "),At=s($,"A",{href:!0,rel:!0});var Vu=i(At);lh=n(Vu,"example notebooks"),Vu.forEach(t),hh=n($," for more info regarding evaluation."),$.forEach(t),Va=c(r),ke=s(r,"H2",{class:!0});var gs=i(ke);Ve=s(gs,"A",{id:!0,class:!0,href:!0});var Yu=i(Ve);ta=s(Yu,"SPAN",{});var Qu=i(ta);g(Mt.$$.fragment,Qu),Qu.forEach(t),Yu.forEach(t),mh=c(gs),oa=s(gs,"SPAN",{});var Ju=i(oa);ph=n(Ju,"DETR specific outputs"),Ju.forEach(t),gs.forEach(t),Ya=c(r),$e=s(r,"DIV",{class:!0});var _s=i($e);g(It.$$.fragment,_s),fh=c(_s),na=s(_s,"P",{});var Ku=i(na);uh=n(Ku,`Base class for outputs of the DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses.`),Ku.forEach(t),_s.forEach(t),Qa=c(r),ze=s(r,"DIV",{class:!0});var bs=i(ze);g(Lt.$$.fragment,bs),gh=c(bs),Rt=s(bs,"P",{});var vs=i(Rt);_h=n(vs,"Output type of "),on=s(vs,"A",{href:!0});var Zu=i(on);bh=n(Zu,"DetrForObjectDetection"),Zu.forEach(t),vh=n(vs,"."),vs.forEach(t),bs.forEach(t),Ja=c(r),Ce=s(r,"DIV",{class:!0});var Ds=i(Ce);g(Ht.$$.fragment,Ds),Dh=c(Ds),Ut=s(Ds,"P",{});var ys=i(Ut);yh=n(ys,"Output type of "),nn=s(ys,"A",{href:!0});var Xu=i(nn);xh=n(Xu,"DetrForSegmentation"),Xu.forEach(t),Th=n(ys,"."),ys.forEach(t),Ds.forEach(t),Ka=c(r),je=s(r,"H2",{class:!0});var xs=i(je);Ye=s(xs,"A",{id:!0,class:!0,href:!0});var eg=i(Ye);ra=s(eg,"SPAN",{});var tg=i(ra);g(Wt.$$.fragment,tg),tg.forEach(t),eg.forEach(t),wh=c(xs),aa=s(xs,"SPAN",{});var og=i(aa);Eh=n(og,"DetrConfig"),og.forEach(t),xs.forEach(t),Za=c(r),I=s(r,"DIV",{class:!0});var _e=i(I);g(Gt.$$.fragment,_e),Fh=c(_e),qe=s(_e,"P",{});var yn=i(qe);Oh=n(yn,"This is the configuration class to store the configuration of a "),rn=s(yn,"A",{href:!0});var ng=i(rn);kh=n(ng,"DetrModel"),ng.forEach(t),$h=n(yn,`. It is used to instantiate a DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DETR `),Bt=s(yn,"A",{href:!0,rel:!0});var rg=i(Bt);zh=n(rg,"facebook/detr-resnet-50"),rg.forEach(t),Ch=n(yn," architecture."),yn.forEach(t),jh=c(_e),Pe=s(_e,"P",{});var xn=i(Pe);qh=n(xn,"Configuration objects inherit from "),an=s(xn,"A",{href:!0});var ag=i(an);Ph=n(ag,"PretrainedConfig"),ag.forEach(t),Nh=n(xn,` and can be used to control the model outputs. Read the documentation from `),sn=s(xn,"A",{href:!0});var sg=i(sn);Sh=n(sg,"PretrainedConfig"),sg.forEach(t),Ah=n(xn," for more information."),xn.forEach(t),Mh=c(_e),sa=s(_e,"P",{});var ig=i(sa);Ih=n(ig,"Examples:"),ig.forEach(t),Lh=c(_e),g(Vt.$$.fragment,_e),_e.forEach(t),Xa=c(r),Ne=s(r,"H2",{class:!0});var Ts=i(Ne);Qe=s(Ts,"A",{id:!0,class:!0,href:!0});var dg=i(Qe);ia=s(dg,"SPAN",{});var cg=i(ia);g(Yt.$$.fragment,cg),cg.forEach(t),dg.forEach(t),Rh=c(Ts),da=s(Ts,"SPAN",{});var lg=i(da);Hh=n(lg,"DetrFeatureExtractor"),lg.forEach(t),Ts.forEach(t),es=c(r),O=s(r,"DIV",{class:!0});var N=i(O);g(Qt.$$.fragment,N),Uh=c(N),ca=s(N,"P",{});var hg=i(ca);Wh=n(hg,"Constructs a DETR feature extractor."),hg.forEach(t),Gh=c(N),Jt=s(N,"P",{});var ws=i(Jt);Bh=n(ws,"This feature extractor inherits from "),la=s(ws,"CODE",{});var mg=i(la);Vh=n(mg,"FeatureExtractionMixin"),mg.forEach(t),Yh=n(ws,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ws.forEach(t),Qh=c(N),ue=s(N,"DIV",{class:!0});var Tn=i(ue);g(Kt.$$.fragment,Tn),Jh=c(Tn),ha=s(Tn,"P",{});var pg=i(ha);Kh=n(pg,`Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding.`),pg.forEach(t),Zh=c(Tn),g(Je.$$.fragment,Tn),Tn.forEach(t),Xh=c(N),Ke=s(N,"DIV",{class:!0});var Es=i(Ke);g(Zt.$$.fragment,Es),em=c(Es),Xt=s(Es,"P",{});var Fs=i(Xt);tm=n(Fs,"Pad images up to the largest image in a batch and create a corresponding "),ma=s(Fs,"CODE",{});var fg=i(ma);om=n(fg,"pixel_mask"),fg.forEach(t),nm=n(Fs,"."),Fs.forEach(t),Es.forEach(t),rm=c(N),Ze=s(N,"DIV",{class:!0});var Os=i(Ze);g(eo.$$.fragment,Os),am=c(Os),to=s(Os,"P",{});var ks=i(to);sm=n(ks,"Converts the output of "),dn=s(ks,"A",{href:!0});var ug=i(dn);im=n(ug,"DetrForObjectDetection"),ug.forEach(t),dm=n(ks,` into the format expected by the COCO api. Only supports PyTorch.`),ks.forEach(t),Os.forEach(t),cm=c(N),Xe=s(N,"DIV",{class:!0});var $s=i(Xe);g(oo.$$.fragment,$s),lm=c($s),no=s($s,"P",{});var zs=i(no);hm=n(zs,"Converts the output of "),cn=s(zs,"A",{href:!0});var gg=i(cn);mm=n(gg,"DetrForSegmentation"),gg.forEach(t),pm=n(zs,` into image segmentation predictions. Only supports PyTorch.`),zs.forEach(t),$s.forEach(t),fm=c(N),et=s(N,"DIV",{class:!0});var Cs=i(et);g(ro.$$.fragment,Cs),um=c(Cs),ao=s(Cs,"P",{});var js=i(ao);gm=n(js,"Converts the output of "),ln=s(js,"A",{href:!0});var _g=i(ln);_m=n(_g,"DetrForSegmentation"),_g.forEach(t),bm=n(js,` into actual panoptic predictions. Only supports PyTorch.`),js.forEach(t),Cs.forEach(t),N.forEach(t),ts=c(r),Se=s(r,"H2",{class:!0});var qs=i(Se);tt=s(qs,"A",{id:!0,class:!0,href:!0});var bg=i(tt);pa=s(bg,"SPAN",{});var vg=i(pa);g(so.$$.fragment,vg),vg.forEach(t),bg.forEach(t),vm=c(qs),fa=s(qs,"SPAN",{});var Dg=i(fa);Dm=n(Dg,"DetrModel"),Dg.forEach(t),qs.forEach(t),os=c(r),L=s(r,"DIV",{class:!0});var be=i(L);g(io.$$.fragment,be),ym=c(be),ua=s(be,"P",{});var yg=i(ua);xm=n(yg,`The bare DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top.`),yg.forEach(t),Tm=c(be),co=s(be,"P",{});var Ps=i(co);wm=n(Ps,"This model inherits from "),hn=s(Ps,"A",{href:!0});var xg=i(hn);Em=n(xg,"PreTrainedModel"),xg.forEach(t),Fm=n(Ps,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ps.forEach(t),Om=c(be),lo=s(be,"P",{});var Ns=i(lo);km=n(Ns,"This model is also a PyTorch "),ho=s(Ns,"A",{href:!0,rel:!0});var Tg=i(ho);$m=n(Tg,"torch.nn.Module"),Tg.forEach(t),zm=n(Ns,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ns.forEach(t),Cm=c(be),W=s(be,"DIV",{class:!0});var ve=i(W);g(mo.$$.fragment,ve),jm=c(ve),Ae=s(ve,"P",{});var wn=i(Ae);qm=n(wn,"The "),mn=s(wn,"A",{href:!0});var wg=i(mn);Pm=n(wg,"DetrModel"),wg.forEach(t),Nm=n(wn," forward method, overrides the "),ga=s(wn,"CODE",{});var Eg=i(ga);Sm=n(Eg,"__call__"),Eg.forEach(t),Am=n(wn," special method."),wn.forEach(t),Mm=c(ve),g(ot.$$.fragment,ve),Im=c(ve),_a=s(ve,"P",{});var Fg=i(_a);Lm=n(Fg,"Examples:"),Fg.forEach(t),Rm=c(ve),g(po.$$.fragment,ve),ve.forEach(t),be.forEach(t),ns=c(r),Me=s(r,"H2",{class:!0});var Ss=i(Me);nt=s(Ss,"A",{id:!0,class:!0,href:!0});var Og=i(nt);ba=s(Og,"SPAN",{});var kg=i(ba);g(fo.$$.fragment,kg),kg.forEach(t),Og.forEach(t),Hm=c(Ss),va=s(Ss,"SPAN",{});var $g=i(va);Um=n($g,"DetrForObjectDetection"),$g.forEach(t),Ss.forEach(t),rs=c(r),R=s(r,"DIV",{class:!0});var De=i(R);g(uo.$$.fragment,De),Wm=c(De),Da=s(De,"P",{});var zg=i(Da);Gm=n(zg,`DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection.`),zg.forEach(t),Bm=c(De),go=s(De,"P",{});var As=i(go);Vm=n(As,"This model inherits from "),pn=s(As,"A",{href:!0});var Cg=i(pn);Ym=n(Cg,"PreTrainedModel"),Cg.forEach(t),Qm=n(As,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),As.forEach(t),Jm=c(De),_o=s(De,"P",{});var Ms=i(_o);Km=n(Ms,"This model is also a PyTorch "),bo=s(Ms,"A",{href:!0,rel:!0});var jg=i(bo);Zm=n(jg,"torch.nn.Module"),jg.forEach(t),Xm=n(Ms,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ms.forEach(t),ep=c(De),G=s(De,"DIV",{class:!0});var ye=i(G);g(vo.$$.fragment,ye),tp=c(ye),Ie=s(ye,"P",{});var En=i(Ie);op=n(En,"The "),fn=s(En,"A",{href:!0});var qg=i(fn);np=n(qg,"DetrForObjectDetection"),qg.forEach(t),rp=n(En," forward method, overrides the "),ya=s(En,"CODE",{});var Pg=i(ya);ap=n(Pg,"__call__"),Pg.forEach(t),sp=n(En," special method."),En.forEach(t),ip=c(ye),g(rt.$$.fragment,ye),dp=c(ye),xa=s(ye,"P",{});var Ng=i(xa);cp=n(Ng,"Examples:"),Ng.forEach(t),lp=c(ye),g(Do.$$.fragment,ye),ye.forEach(t),De.forEach(t),as=c(r),Le=s(r,"H2",{class:!0});var Is=i(Le);at=s(Is,"A",{id:!0,class:!0,href:!0});var Sg=i(at);Ta=s(Sg,"SPAN",{});var Ag=i(Ta);g(yo.$$.fragment,Ag),Ag.forEach(t),Sg.forEach(t),hp=c(Is),wa=s(Is,"SPAN",{});var Mg=i(wa);mp=n(Mg,"DetrForSegmentation"),Mg.forEach(t),Is.forEach(t),ss=c(r),H=s(r,"DIV",{class:!0});var xe=i(H);g(xo.$$.fragment,xe),pp=c(xe),Ea=s(xe,"P",{});var Ig=i(Ea);fp=n(Ig,`DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks such as COCO panoptic.`),Ig.forEach(t),up=c(xe),To=s(xe,"P",{});var Ls=i(To);gp=n(Ls,"This model inherits from "),un=s(Ls,"A",{href:!0});var Lg=i(un);_p=n(Lg,"PreTrainedModel"),Lg.forEach(t),bp=n(Ls,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ls.forEach(t),vp=c(xe),wo=s(xe,"P",{});var Rs=i(wo);Dp=n(Rs,"This model is also a PyTorch "),Eo=s(Rs,"A",{href:!0,rel:!0});var Rg=i(Eo);yp=n(Rg,"torch.nn.Module"),Rg.forEach(t),xp=n(Rs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rs.forEach(t),Tp=c(xe),B=s(xe,"DIV",{class:!0});var Te=i(B);g(Fo.$$.fragment,Te),wp=c(Te),Re=s(Te,"P",{});var Fn=i(Re);Ep=n(Fn,"The "),gn=s(Fn,"A",{href:!0});var Hg=i(gn);Fp=n(Hg,"DetrForSegmentation"),Hg.forEach(t),Op=n(Fn," forward method, overrides the "),Fa=s(Fn,"CODE",{});var Ug=i(Fa);kp=n(Ug,"__call__"),Ug.forEach(t),$p=n(Fn," special method."),Fn.forEach(t),zp=c(Te),g(st.$$.fragment,Te),Cp=c(Te),Oa=s(Te,"P",{});var Wg=i(Oa);jp=n(Wg,"Examples:"),Wg.forEach(t),qp=c(Te),g(Oo.$$.fragment,Te),Te.forEach(t),xe.forEach(t),this.h()},h(){l(p,"name","hf:doc:metadata"),l(p,"content",JSON.stringify(e_)),l(y,"id","detr"),l(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(y,"href","#detr"),l(f,"class","relative group"),l(He,"id","overview"),l(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(He,"href","#overview"),l(we,"class","relative group"),l(wt,"href","https://arxiv.org/abs/2005.12872"),l(wt,"rel","nofollow"),l(Et,"href","https://huggingface.co/nielsr"),l(Et,"rel","nofollow"),l(Ft,"href","https://github.com/facebookresearch/detr"),l(Ft,"rel","nofollow"),l(Ot,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR"),l(Ot,"rel","nofollow"),l(Co,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(kt,"href","https://en.wikipedia.org/wiki/Hungarian_algorithm"),l(kt,"rel","nofollow"),l($t,"href","https://giou.stanford.edu/"),l($t,"rel","nofollow"),l(jo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(qo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(Po,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(So,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig"),l(Ao,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig"),l(Mo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig"),l(zt,"href","https://github.com/facebookresearch/detr/blob/a54b77800eb8e64e3ad0d8237789fcbf2f8350c5/models/detr.py#L227-L232"),l(zt,"rel","nofollow"),l(Io,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(Lo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(Ct,"href","https://github.com/rwightman/pytorch-image-models"),l(Ct,"rel","nofollow"),l(Ro,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrConfig"),l(Ho,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor"),l(Uo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.pad_and_create_pixel_mask"),l(jt,"href","https://github.com/facebookresearch/detr/issues/150"),l(jt,"rel","nofollow"),l(Go,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(Bo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(Vo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(Yo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor"),l(Jo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process"),l(Ko,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_segmentation"),l(Zo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_segmentation"),l(Xo,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor.post_process_panoptic"),l(en,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor"),l(tn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrFeatureExtractor"),l(St,"href","https://github.com/facebookresearch/detr"),l(St,"rel","nofollow"),l(At,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR"),l(At,"rel","nofollow"),l(Ve,"id","transformers.models.detr.modeling_detr.DetrModelOutput"),l(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ve,"href","#transformers.models.detr.modeling_detr.DetrModelOutput"),l(ke,"class","relative group"),l($e,"class","docstring"),l(on,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(ze,"class","docstring"),l(nn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(Ce,"class","docstring"),l(Ye,"id","transformers.DetrConfig"),l(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ye,"href","#transformers.DetrConfig"),l(je,"class","relative group"),l(rn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrModel"),l(Bt,"href","https://huggingface.co/facebook/detr-resnet-50"),l(Bt,"rel","nofollow"),l(an,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(sn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(I,"class","docstring"),l(Qe,"id","transformers.DetrFeatureExtractor"),l(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Qe,"href","#transformers.DetrFeatureExtractor"),l(Ne,"class","relative group"),l(ue,"class","docstring"),l(Ke,"class","docstring"),l(dn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(Ze,"class","docstring"),l(cn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(Xe,"class","docstring"),l(ln,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(et,"class","docstring"),l(O,"class","docstring"),l(tt,"id","transformers.DetrModel"),l(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(tt,"href","#transformers.DetrModel"),l(Se,"class","relative group"),l(hn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(ho,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(ho,"rel","nofollow"),l(mn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrModel"),l(W,"class","docstring"),l(L,"class","docstring"),l(nt,"id","transformers.DetrForObjectDetection"),l(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(nt,"href","#transformers.DetrForObjectDetection"),l(Me,"class","relative group"),l(pn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(bo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(bo,"rel","nofollow"),l(fn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForObjectDetection"),l(G,"class","docstring"),l(R,"class","docstring"),l(at,"id","transformers.DetrForSegmentation"),l(at,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(at,"href","#transformers.DetrForSegmentation"),l(Le,"class","relative group"),l(un,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Eo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Eo,"rel","nofollow"),l(gn,"href","/docs/transformers/v4.15.0/en/model_doc/detr#transformers.DetrForSegmentation"),l(B,"class","docstring"),l(H,"class","docstring")},m(r,h){e(document.head,p),m(r,E,h),m(r,f,h),e(f,y),e(y,j),_(x,j,null),e(f,T),e(f,U),e(U,Ws),m(r,Ca,h),m(r,we,h),e(we,He),e(He,On),_(Tt,On,null),e(we,Gs),e(we,kn),e(kn,Bs),m(r,ja,h),m(r,Ue,h),e(Ue,Vs),e(Ue,wt),e(wt,Ys),e(Ue,Qs),m(r,qa,h),m(r,$o,h),e($o,Js),m(r,Pa,h),m(r,zo,h),e(zo,$n),e($n,Ks),m(r,Na,h),m(r,fe,h),e(fe,Zs),e(fe,Et),e(Et,Xs),e(fe,ei),e(fe,Ft),e(Ft,ti),e(fe,oi),m(r,Sa,h),m(r,We,h),e(We,ni),e(We,Ot),e(Ot,ri),e(We,ai),m(r,Aa,h),m(r,Ge,h),e(Ge,si),e(Ge,Co),e(Co,ii),e(Ge,di),m(r,Ma,h),m(r,F,h),e(F,ci),e(F,zn),e(zn,li),e(F,hi),e(F,Cn),e(Cn,mi),e(F,pi),e(F,jn),e(jn,fi),e(F,ui),e(F,qn),e(qn,gi),e(F,_i),e(F,Pn),e(Pn,bi),e(F,vi),e(F,Nn),e(Nn,Di),e(F,yi),e(F,Sn),e(Sn,xi),e(F,Ti),e(F,An),e(An,wi),e(F,Ei),m(r,Ia,h),m(r,z,h),e(z,Fi),e(z,Mn),e(Mn,Oi),e(z,ki),e(z,In),e(In,$i),e(z,zi),e(z,Ln),e(Ln,Ci),e(z,ji),e(z,Rn),e(Rn,qi),e(z,Pi),e(z,Hn),e(Hn,Ni),e(z,Si),e(z,Un),e(Un,Ai),e(z,Mi),m(r,La,h),m(r,J,h),e(J,Ii),e(J,Wn),e(Wn,Li),e(J,Ri),e(J,kt),e(kt,Hi),e(J,Ui),e(J,$t),e($t,Wi),e(J,Gi),m(r,Ra,h),m(r,K,h),e(K,Bi),e(K,jo),e(jo,Vi),e(K,Yi),e(K,qo),e(qo,Qi),e(K,Ji),e(K,Po),e(Po,Ki),e(K,Zi),m(r,Ha,h),m(r,No,h),e(No,Xi),m(r,Ua,h),m(r,k,h),e(k,te),e(te,ed),e(te,Gn),e(Gn,td),e(te,od),e(te,Bn),e(Bn,nd),e(te,rd),e(te,So),e(So,ad),e(te,sd),e(k,id),e(k,Vn),e(Vn,dd),e(k,cd),e(k,oe),e(oe,ld),e(oe,Yn),e(Yn,hd),e(oe,md),e(oe,Ao),e(Ao,pd),e(oe,fd),e(oe,Qn),e(Qn,ud),e(oe,gd),e(k,_d),e(k,ne),e(ne,bd),e(ne,Jn),e(Jn,vd),e(ne,Dd),e(ne,Mo),e(Mo,yd),e(ne,xd),e(ne,Kn),e(Kn,Td),e(ne,wd),e(k,Ed),e(k,Y),e(Y,Fd),e(Y,Zn),e(Zn,Od),e(Y,kd),e(Y,Xn),e(Xn,$d),e(Y,zd),e(Y,er),e(er,Cd),e(Y,jd),e(Y,zt),e(zt,qd),e(Y,Pd),e(k,Nd),e(k,q),e(q,Io),e(Io,Sd),e(q,Ad),e(q,Lo),e(Lo,Md),e(q,Id),e(q,Ct),e(Ct,Ld),e(q,Rd),e(q,tr),e(tr,Hd),e(q,Ud),e(q,Ro),e(Ro,Wd),e(q,Gd),e(q,or),e(or,Bd),e(q,Vd),e(k,Yd),e(k,re),e(re,Qd),e(re,Ho),e(Ho,Jd),e(re,Kd),e(re,nr),e(nr,Zd),e(re,Xd),e(re,Uo),e(Uo,ec),e(re,tc),e(k,oc),e(k,Ee),e(Ee,nc),e(Ee,rr),e(rr,rc),e(Ee,ac),e(Ee,jt),e(jt,sc),e(Ee,ic),m(r,Wa,h),m(r,Wo,h),e(Wo,dc),m(r,Ga,h),m(r,Be,h),e(Be,ar),e(ar,ae),e(ae,sr),e(sr,cc),e(ae,lc),e(ae,ir),e(ir,hc),e(ae,mc),e(ae,dr),e(dr,pc),e(ae,fc),e(ae,cr),e(cr,uc),e(Be,gc),e(Be,A),e(A,se),e(se,lr),e(lr,hr),e(hr,_c),e(se,bc),e(se,mr),e(mr,vc),e(se,Dc),e(se,pr),e(pr,yc),e(se,xc),e(se,fr),e(fr,Tc),e(A,wc),e(A,ie),e(ie,ur),e(ur,gr),e(gr,Ec),e(ie,Fc),e(ie,_r),e(_r,Go),e(Go,Oc),e(ie,kc),e(ie,br),e(br,Bo),e(Bo,$c),e(ie,zc),e(ie,vr),e(vr,Vo),e(Vo,Cc),e(A,jc),e(A,de),e(de,Dr),e(Dr,yr),e(yr,qc),e(de,Pc),e(de,xr),e(xr,Nc),e(de,Sc),e(de,Tr),e(Tr,Ac),e(de,Mc),e(de,wr),e(wr,Ic),e(A,Lc),e(A,ce),e(ce,qt),e(qt,Er),e(Er,Rc),e(qt,Hc),e(qt,Yo),e(Yo,Uc),e(ce,Wc),e(ce,Fe),e(Fe,Gc),e(Fe,Fr),e(Fr,Bc),e(Fe,Vc),e(Fe,Or),e(Or,Yc),e(Fe,Qc),e(ce,Jc),e(ce,M),e(M,Kc),e(M,kr),e(kr,Zc),e(M,Xc),e(M,$r),e($r,el),e(M,tl),e(M,zr),e(zr,ol),e(M,nl),e(M,Cr),e(Cr,rl),e(M,al),e(M,jr),e(jr,sl),e(M,il),e(ce,dl),e(ce,le),e(le,cl),e(le,qr),e(qr,ll),e(le,hl),e(le,Pr),e(Pr,ml),e(le,pl),e(le,Nr),e(Nr,fl),e(le,ul),e(A,gl),e(A,he),e(he,Qo),e(Qo,Sr),e(Sr,_l),e(Qo,bl),e(he,vl),e(he,Ar),e(Ar,Jo),e(Jo,Dl),e(he,yl),e(he,Mr),e(Mr,Ko),e(Ko,xl),e(he,Tl),e(he,Pt),e(Pt,Zo),e(Zo,wl),e(Pt,El),e(Pt,Xo),e(Xo,Fl),e(A,Ol),e(A,me),e(me,Ir),e(Ir,Lr),e(Lr,kl),e(me,$l),e(me,Nt),e(Nt,Rr),e(Rr,zl),e(Nt,Cl),e(Nt,Hr),e(Hr,jl),e(me,ql),e(me,Oe),e(Oe,Ur),e(Ur,Pl),e(Oe,Nl),e(Oe,Wr),e(Wr,Sl),e(Oe,Al),e(Oe,Gr),e(Gr,Ml),e(me,Il),e(me,pe),e(pe,Br),e(Br,Ll),e(pe,Rl),e(pe,Vr),e(Vr,Hl),e(pe,Ul),e(pe,Yr),e(Yr,Wl),e(pe,Gl),e(pe,Qr),e(Qr,Bl),m(r,Ba,h),m(r,w,h),e(w,Vl),e(w,en),e(en,Yl),e(w,Ql),e(w,Jr),e(Jr,Jl),e(w,Kl),e(w,Kr),e(Kr,Zl),e(w,Xl),e(w,Zr),e(Zr,eh),e(w,th),e(w,tn),e(tn,oh),e(w,nh),e(w,Xr),e(Xr,rh),e(w,ah),e(w,ea),e(ea,sh),e(w,ih),e(w,St),e(St,dh),e(w,ch),e(w,At),e(At,lh),e(w,hh),m(r,Va,h),m(r,ke,h),e(ke,Ve),e(Ve,ta),_(Mt,ta,null),e(ke,mh),e(ke,oa),e(oa,ph),m(r,Ya,h),m(r,$e,h),_(It,$e,null),e($e,fh),e($e,na),e(na,uh),m(r,Qa,h),m(r,ze,h),_(Lt,ze,null),e(ze,gh),e(ze,Rt),e(Rt,_h),e(Rt,on),e(on,bh),e(Rt,vh),m(r,Ja,h),m(r,Ce,h),_(Ht,Ce,null),e(Ce,Dh),e(Ce,Ut),e(Ut,yh),e(Ut,nn),e(nn,xh),e(Ut,Th),m(r,Ka,h),m(r,je,h),e(je,Ye),e(Ye,ra),_(Wt,ra,null),e(je,wh),e(je,aa),e(aa,Eh),m(r,Za,h),m(r,I,h),_(Gt,I,null),e(I,Fh),e(I,qe),e(qe,Oh),e(qe,rn),e(rn,kh),e(qe,$h),e(qe,Bt),e(Bt,zh),e(qe,Ch),e(I,jh),e(I,Pe),e(Pe,qh),e(Pe,an),e(an,Ph),e(Pe,Nh),e(Pe,sn),e(sn,Sh),e(Pe,Ah),e(I,Mh),e(I,sa),e(sa,Ih),e(I,Lh),_(Vt,I,null),m(r,Xa,h),m(r,Ne,h),e(Ne,Qe),e(Qe,ia),_(Yt,ia,null),e(Ne,Rh),e(Ne,da),e(da,Hh),m(r,es,h),m(r,O,h),_(Qt,O,null),e(O,Uh),e(O,ca),e(ca,Wh),e(O,Gh),e(O,Jt),e(Jt,Bh),e(Jt,la),e(la,Vh),e(Jt,Yh),e(O,Qh),e(O,ue),_(Kt,ue,null),e(ue,Jh),e(ue,ha),e(ha,Kh),e(ue,Zh),_(Je,ue,null),e(O,Xh),e(O,Ke),_(Zt,Ke,null),e(Ke,em),e(Ke,Xt),e(Xt,tm),e(Xt,ma),e(ma,om),e(Xt,nm),e(O,rm),e(O,Ze),_(eo,Ze,null),e(Ze,am),e(Ze,to),e(to,sm),e(to,dn),e(dn,im),e(to,dm),e(O,cm),e(O,Xe),_(oo,Xe,null),e(Xe,lm),e(Xe,no),e(no,hm),e(no,cn),e(cn,mm),e(no,pm),e(O,fm),e(O,et),_(ro,et,null),e(et,um),e(et,ao),e(ao,gm),e(ao,ln),e(ln,_m),e(ao,bm),m(r,ts,h),m(r,Se,h),e(Se,tt),e(tt,pa),_(so,pa,null),e(Se,vm),e(Se,fa),e(fa,Dm),m(r,os,h),m(r,L,h),_(io,L,null),e(L,ym),e(L,ua),e(ua,xm),e(L,Tm),e(L,co),e(co,wm),e(co,hn),e(hn,Em),e(co,Fm),e(L,Om),e(L,lo),e(lo,km),e(lo,ho),e(ho,$m),e(lo,zm),e(L,Cm),e(L,W),_(mo,W,null),e(W,jm),e(W,Ae),e(Ae,qm),e(Ae,mn),e(mn,Pm),e(Ae,Nm),e(Ae,ga),e(ga,Sm),e(Ae,Am),e(W,Mm),_(ot,W,null),e(W,Im),e(W,_a),e(_a,Lm),e(W,Rm),_(po,W,null),m(r,ns,h),m(r,Me,h),e(Me,nt),e(nt,ba),_(fo,ba,null),e(Me,Hm),e(Me,va),e(va,Um),m(r,rs,h),m(r,R,h),_(uo,R,null),e(R,Wm),e(R,Da),e(Da,Gm),e(R,Bm),e(R,go),e(go,Vm),e(go,pn),e(pn,Ym),e(go,Qm),e(R,Jm),e(R,_o),e(_o,Km),e(_o,bo),e(bo,Zm),e(_o,Xm),e(R,ep),e(R,G),_(vo,G,null),e(G,tp),e(G,Ie),e(Ie,op),e(Ie,fn),e(fn,np),e(Ie,rp),e(Ie,ya),e(ya,ap),e(Ie,sp),e(G,ip),_(rt,G,null),e(G,dp),e(G,xa),e(xa,cp),e(G,lp),_(Do,G,null),m(r,as,h),m(r,Le,h),e(Le,at),e(at,Ta),_(yo,Ta,null),e(Le,hp),e(Le,wa),e(wa,mp),m(r,ss,h),m(r,H,h),_(xo,H,null),e(H,pp),e(H,Ea),e(Ea,fp),e(H,up),e(H,To),e(To,gp),e(To,un),e(un,_p),e(To,bp),e(H,vp),e(H,wo),e(wo,Dp),e(wo,Eo),e(Eo,yp),e(wo,xp),e(H,Tp),e(H,B),_(Fo,B,null),e(B,wp),e(B,Re),e(Re,Ep),e(Re,gn),e(gn,Fp),e(Re,Op),e(Re,Fa),e(Fa,kp),e(Re,$p),e(B,zp),_(st,B,null),e(B,Cp),e(B,Oa),e(Oa,jp),e(B,qp),_(Oo,B,null),is=!0},p(r,[h]){const ko={};h&2&&(ko.$$scope={dirty:h,ctx:r}),Je.$set(ko);const ka={};h&2&&(ka.$$scope={dirty:h,ctx:r}),ot.$set(ka);const $a={};h&2&&($a.$$scope={dirty:h,ctx:r}),rt.$set($a);const za={};h&2&&(za.$$scope={dirty:h,ctx:r}),st.$set(za)},i(r){is||(b(x.$$.fragment,r),b(Tt.$$.fragment,r),b(Mt.$$.fragment,r),b(It.$$.fragment,r),b(Lt.$$.fragment,r),b(Ht.$$.fragment,r),b(Wt.$$.fragment,r),b(Gt.$$.fragment,r),b(Vt.$$.fragment,r),b(Yt.$$.fragment,r),b(Qt.$$.fragment,r),b(Kt.$$.fragment,r),b(Je.$$.fragment,r),b(Zt.$$.fragment,r),b(eo.$$.fragment,r),b(oo.$$.fragment,r),b(ro.$$.fragment,r),b(so.$$.fragment,r),b(io.$$.fragment,r),b(mo.$$.fragment,r),b(ot.$$.fragment,r),b(po.$$.fragment,r),b(fo.$$.fragment,r),b(uo.$$.fragment,r),b(vo.$$.fragment,r),b(rt.$$.fragment,r),b(Do.$$.fragment,r),b(yo.$$.fragment,r),b(xo.$$.fragment,r),b(Fo.$$.fragment,r),b(st.$$.fragment,r),b(Oo.$$.fragment,r),is=!0)},o(r){v(x.$$.fragment,r),v(Tt.$$.fragment,r),v(Mt.$$.fragment,r),v(It.$$.fragment,r),v(Lt.$$.fragment,r),v(Ht.$$.fragment,r),v(Wt.$$.fragment,r),v(Gt.$$.fragment,r),v(Vt.$$.fragment,r),v(Yt.$$.fragment,r),v(Qt.$$.fragment,r),v(Kt.$$.fragment,r),v(Je.$$.fragment,r),v(Zt.$$.fragment,r),v(eo.$$.fragment,r),v(oo.$$.fragment,r),v(ro.$$.fragment,r),v(so.$$.fragment,r),v(io.$$.fragment,r),v(mo.$$.fragment,r),v(ot.$$.fragment,r),v(po.$$.fragment,r),v(fo.$$.fragment,r),v(uo.$$.fragment,r),v(vo.$$.fragment,r),v(rt.$$.fragment,r),v(Do.$$.fragment,r),v(yo.$$.fragment,r),v(xo.$$.fragment,r),v(Fo.$$.fragment,r),v(st.$$.fragment,r),v(Oo.$$.fragment,r),is=!1},d(r){t(p),r&&t(E),r&&t(f),D(x),r&&t(Ca),r&&t(we),D(Tt),r&&t(ja),r&&t(Ue),r&&t(qa),r&&t($o),r&&t(Pa),r&&t(zo),r&&t(Na),r&&t(fe),r&&t(Sa),r&&t(We),r&&t(Aa),r&&t(Ge),r&&t(Ma),r&&t(F),r&&t(Ia),r&&t(z),r&&t(La),r&&t(J),r&&t(Ra),r&&t(K),r&&t(Ha),r&&t(No),r&&t(Ua),r&&t(k),r&&t(Wa),r&&t(Wo),r&&t(Ga),r&&t(Be),r&&t(Ba),r&&t(w),r&&t(Va),r&&t(ke),D(Mt),r&&t(Ya),r&&t($e),D(It),r&&t(Qa),r&&t(ze),D(Lt),r&&t(Ja),r&&t(Ce),D(Ht),r&&t(Ka),r&&t(je),D(Wt),r&&t(Za),r&&t(I),D(Gt),D(Vt),r&&t(Xa),r&&t(Ne),D(Yt),r&&t(es),r&&t(O),D(Qt),D(Kt),D(Je),D(Zt),D(eo),D(oo),D(ro),r&&t(ts),r&&t(Se),D(so),r&&t(os),r&&t(L),D(io),D(mo),D(ot),D(po),r&&t(ns),r&&t(Me),D(fo),r&&t(rs),r&&t(R),D(uo),D(vo),D(rt),D(Do),r&&t(as),r&&t(Le),D(yo),r&&t(ss),r&&t(H),D(xo),D(Fo),D(st),D(Oo)}}}const e_={local:"detr",sections:[{local:"overview",title:"Overview"},{local:"transformers.models.detr.modeling_detr.DetrModelOutput",title:"DETR specific outputs"},{local:"transformers.DetrConfig",title:"DetrConfig"},{local:"transformers.DetrFeatureExtractor",title:"DetrFeatureExtractor"},{local:"transformers.DetrModel",title:"DetrModel"},{local:"transformers.DetrForObjectDetection",title:"DetrForObjectDetection"},{local:"transformers.DetrForSegmentation",title:"DetrForSegmentation"}],title:"DETR"};function t_(ee,p,E){let{fw:f}=p;return ee.$$set=y=>{"fw"in y&&E(0,f=y.fw)},[f]}class d_ extends Gg{constructor(p){super();Bg(this,p,t_,Xg,Vg,{fw:0})}}export{d_ as default,e_ as metadata};
9,964
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/clip.mdx-3812a086.js
import{S as pg,i as hg,s as mg,e as n,k as d,w as _,t as r,L as fg,c as s,d as o,m as c,a,x as v,h as i,b as l,J as e,g as m,y as P,q as I,o as C,B as b}from"../../chunks/vendor-b1433968.js";import{T as re}from"../../chunks/Tip-c3840994.js";import{D as T}from"../../chunks/Docstring-ff504c58.js";import{C as ne}from"../../chunks/CodeBlock-a320dbd7.js";import{I as O}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ug(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("When used with "),f=n("code"),x=r("is_split_into_words=True"),$=r(`, this tokenizer will add a space before each word (even the first one).`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"When used with "),f=s(g,"CODE",{});var k=a(f);x=i(k,"is_split_into_words=True"),k.forEach(o),$=i(g,`, this tokenizer will add a space before each word (even the first one).`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function gg(y){let p,L,f,x,$,u,g,k;return{c(){p=n("p"),L=r("When used with "),f=n("code"),x=r("is_split_into_words=True"),$=r(`, this tokenizer needs to be instantiated with `),u=n("code"),g=r("add_prefix_space=True"),k=r(".")},l(E){p=s(E,"P",{});var w=a(p);L=i(w,"When used with "),f=s(w,"CODE",{});var A=a(f);x=i(A,"is_split_into_words=True"),A.forEach(o),$=i(w,`, this tokenizer needs to be instantiated with `),u=s(w,"CODE",{});var V=a(u);g=i(V,"add_prefix_space=True"),V.forEach(o),k=i(w,"."),w.forEach(o)},m(E,w){m(E,p,w),e(p,L),e(p,f),e(f,x),e(p,$),e(p,u),e(u,g),e(p,k)},d(E){E&&o(p)}}}function _g(y){let p,L,f,x,$,u,g,k;return{c(){p=n("p"),L=r(`This class method is simply calling CLIPFeatureExtractor\u2019s `),f=n("code"),x=r("from_pretrained"),$=r(` and CLIPTokenizer\u2019s `),u=n("code"),g=r("from_pretrained"),k=r(`. Please refer to the docstrings of the methods above for more information.`)},l(E){p=s(E,"P",{});var w=a(p);L=i(w,`This class method is simply calling CLIPFeatureExtractor\u2019s `),f=s(w,"CODE",{});var A=a(f);x=i(A,"from_pretrained"),A.forEach(o),$=i(w,` and CLIPTokenizer\u2019s `),u=s(w,"CODE",{});var V=a(u);g=i(V,"from_pretrained"),V.forEach(o),k=i(w,`. Please refer to the docstrings of the methods above for more information.`),w.forEach(o)},m(E,w){m(E,p,w),e(p,L),e(p,f),e(f,x),e(p,$),e(p,u),e(u,g),e(p,k)},d(E){E&&o(p)}}}function vg(y){let p,L,f,x,$,u,g,k;return{c(){p=n("p"),L=r("This class method is simply calling "),f=n("code"),x=r("save_pretrained"),$=r(` and `),u=n("code"),g=r("save_pretrained"),k=r(`. Please refer to the docstrings of the methods above for more information.`)},l(E){p=s(E,"P",{});var w=a(p);L=i(w,"This class method is simply calling "),f=s(w,"CODE",{});var A=a(f);x=i(A,"save_pretrained"),A.forEach(o),$=i(w,` and `),u=s(w,"CODE",{});var V=a(u);g=i(V,"save_pretrained"),V.forEach(o),k=i(w,`. Please refer to the docstrings of the methods above for more information.`),w.forEach(o)},m(E,w){m(E,p,w),e(p,L),e(p,f),e(f,x),e(p,$),e(p,u),e(u,g),e(p,k)},d(E){E&&o(p)}}}function Pg(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function Ig(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function Cg(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function bg(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function xg(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function Lg(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function $g(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function kg(y){let p,L,f,x,$;return{c(){p=n("p"),L=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),x=r("Module"),$=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var g=a(p);L=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var k=a(f);x=i(k,"Module"),k.forEach(o),$=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,p,g),e(p,L),e(p,f),e(f,x),e(p,$)},d(u){u&&o(p)}}}function wg(y){let p,L,f,x,$,u,g,k,E,w,A,V,js,Ot,ji,Ms,Mi,fr,ot,qi,St,Fi,Ai,ur,$n,Di,gr,kn,qs,Vi,_r,we,nt,Fs,Wt,Ni,As,Oi,vr,wn,Si,Pr,st,Wi,Tn,Bi,Ri,Ir,D,Hi,yn,Ui,Gi,zn,Ji,Ki,En,Xi,Yi,jn,Zi,Qi,Mn,el,tl,qn,ol,nl,Cr,Bt,br,de,sl,Rt,al,rl,Ht,il,ll,xr,Te,at,Ds,Ut,dl,Vs,cl,Lr,se,Gt,pl,rt,Fn,hl,ml,An,fl,ul,gl,ye,_l,Dn,vl,Pl,Vn,Il,Cl,bl,it,Jt,xl,Kt,Ll,Nn,$l,kl,$r,ze,lt,Ns,Xt,wl,Os,Tl,kr,S,Yt,yl,Ee,zl,On,El,jl,Zt,Ml,ql,Fl,je,Al,Sn,Dl,Vl,Wn,Nl,Ol,Sl,Ss,Wl,Bl,Qt,wr,Me,dt,Ws,eo,Rl,Bs,Hl,Tr,W,to,Ul,qe,Gl,Bn,Jl,Kl,oo,Xl,Yl,Zl,Fe,Ql,Rn,ed,td,Hn,od,nd,sd,Rs,ad,rd,no,yr,Ae,ct,Hs,so,id,Us,ld,zr,z,ao,dd,Gs,cd,pd,Js,hd,md,ro,fd,Ks,ud,gd,_d,pt,vd,io,Pd,Un,Id,Cd,bd,ae,lo,xd,Xs,Ld,$d,Ys,Gn,kd,Zs,wd,Td,Qs,yd,zd,ht,co,Ed,po,jd,ea,Md,qd,Fd,ce,ho,Ad,Jn,Dd,Kn,Vd,Nd,ta,Od,Sd,oa,Er,De,mt,na,mo,Wd,sa,Bd,jr,q,fo,Rd,uo,Hd,aa,Ud,Gd,Jd,ra,Kd,Xd,go,Yd,_o,Zd,ia,Qd,ec,tc,ft,oc,vo,nc,Xn,sc,ac,Mr,Ve,ut,la,Po,rc,da,ic,qr,B,Io,lc,ca,dc,cc,Co,pc,pa,hc,mc,fc,gt,bo,uc,xo,gc,ha,_c,vc,Pc,_t,Lo,Ic,Ne,Cc,ma,bc,xc,fa,Lc,$c,Fr,Oe,vt,ua,$o,kc,ga,wc,Ar,F,ko,Tc,_a,yc,zc,H,Yn,Ec,jc,Zn,Mc,qc,Qn,Fc,Ac,va,Dc,Vc,es,Nc,Oc,Sc,Pt,wo,Wc,To,Bc,ts,Rc,Hc,Uc,It,yo,Gc,zo,Jc,os,Kc,Xc,Yc,pe,Eo,Zc,jo,Qc,ns,ep,tp,op,Ct,np,he,Mo,sp,Se,ap,Pa,rp,ip,ss,lp,dp,cp,bt,Dr,We,xt,Ia,qo,pp,Ca,hp,Vr,R,Fo,mp,Ao,fp,Do,up,gp,_p,U,Vo,vp,Be,Pp,as,Ip,Cp,ba,bp,xp,Lp,Lt,$p,xa,kp,wp,No,Tp,G,Oo,yp,Re,zp,rs,Ep,jp,La,Mp,qp,Fp,$t,Ap,$a,Dp,Vp,So,Np,J,Wo,Op,He,Sp,is,Wp,Bp,ka,Rp,Hp,Up,kt,Gp,wa,Jp,Kp,Bo,Nr,Ue,wt,Ta,Ro,Xp,ya,Yp,Or,Ho,K,Uo,Zp,Ge,Qp,ls,eh,th,za,oh,nh,sh,Tt,ah,Ea,rh,ih,Go,Sr,Je,yt,ja,Jo,lh,Ma,dh,Wr,Ko,X,Xo,ch,Ke,ph,ds,hh,mh,qa,fh,uh,gh,zt,_h,Fa,vh,Ph,Yo,Br,Xe,Et,Aa,Zo,Ih,Da,Ch,Rr,j,Qo,bh,en,xh,cs,Lh,$h,kh,tn,wh,on,Th,yh,zh,Va,Eh,jh,ie,Na,nn,Mh,qh,Oa,sn,Fh,Ah,Sa,an,Dh,Vh,Wa,rn,Nh,Oh,Y,ln,Sh,Ye,Wh,Ba,Bh,Rh,Ra,Hh,Uh,Gh,jt,Jh,Ha,Kh,Xh,dn,Yh,me,cn,Zh,Ua,Qh,em,pn,tm,fe,hn,om,Ga,nm,sm,mn,Hr,Ze,Mt,Ja,fn,am,Ka,rm,Ur,un,Z,gn,im,Qe,lm,Xa,dm,cm,Ya,pm,hm,mm,qt,fm,Za,um,gm,_n,Gr,et,Ft,Qa,vn,_m,er,vm,Jr,Pn,Q,In,Pm,tt,Im,tr,Cm,bm,or,xm,Lm,$m,At,km,nr,wm,Tm,Cn,Kr;return u=new O({}),Ot=new O({}),Wt=new O({}),Bt=new ne({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),Ut=new O({}),Gt=new T({props:{name:"class transformers.CLIPConfig",anchor:"transformers.CLIPConfig",parameters:[{name:"text_config_dict",val:" = None"},{name:"vision_config_dict",val:" = None"},{name:"projection_dim",val:" = 512"},{name:"logit_scale_init_value",val:" = 2.6592"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/configuration_clip.py#L209",parametersDescription:[{anchor:"transformers.CLIPConfig.text_config_dict",description:`<strong>text_config_dict</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Dictionary of configuration options used to initialize <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextConfig">CLIPTextConfig</a>.`,name:"text_config_dict"},{anchor:"transformers.CLIPConfig.vision_config_dict",description:`<strong>vision_config_dict</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Dictionary of configuration options used to initialize <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionConfig">CLIPVisionConfig</a>.`,name:"vision_config_dict"},{anchor:"transformers.CLIPConfig.projection_dim",description:`<strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimentionality of text and vision projection layers.`,name:"projection_dim"},{anchor:"transformers.CLIPConfig.logit_scale_init_value",description:`<strong>logit_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 2.6592) &#x2014; The inital value of the <em>logit_scale</em> paramter. Default is used as per the original CLIP implementation.`,name:"logit_scale_init_value"},{anchor:"transformers.CLIPConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments.`,name:"kwargs"}]}}),Jt=new T({props:{name:"from_text_vision_configs",anchor:"transformers.CLIPConfig.from_text_vision_configs",parameters:[{name:"text_config",val:": CLIPTextConfig"},{name:"vision_config",val:": CLIPVisionConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/configuration_clip.py#L259",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig" >CLIPConfig</a></p> `}}),Xt=new O({}),Yt=new T({props:{name:"class transformers.CLIPTextConfig",anchor:"transformers.CLIPTextConfig",parameters:[{name:"vocab_size",val:" = 49408"},{name:"hidden_size",val:" = 512"},{name:"intermediate_size",val:" = 2048"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 8"},{name:"max_position_embeddings",val:" = 77"},{name:"hidden_act",val:" = 'quick_gelu'"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"dropout",val:" = 0.0"},{name:"attention_dropout",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"initializer_factor",val:" = 1.0"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/configuration_clip.py#L31",parametersDescription:[{anchor:"transformers.CLIPTextConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 49408) &#x2014; Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel">CLIPModel</a>.`,name:"vocab_size"},{anchor:"transformers.CLIPTextConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.CLIPTextConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.CLIPTextConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.CLIPTextConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.CLIPTextConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 77) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.CLIPTextConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;quick_gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> \`<code>&quot;quick_gelu&quot;</code> are supported. layer_norm_eps (<code>float</code>, <em>optional</em>, defaults to 1e-5): The epsilon used by the layer normalization layers.`,name:"hidden_act"},{anchor:"transformers.CLIPTextConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.CLIPTextConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.CLIPTextConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.CLIPTextConfig.initializer_factor",description:`<strong>initializer_factor</strong> (\`float&#x201C;, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"}]}}),Qt=new ne({props:{code:`from transformers import CLIPTextModel, CLIPTextConfig # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration configuration = CLIPTextConfig() # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration model = CLIPTextModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTextModel, CLIPTextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = CLIPTextConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPTextModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),eo=new O({}),to=new T({props:{name:"class transformers.CLIPVisionConfig",anchor:"transformers.CLIPVisionConfig",parameters:[{name:"hidden_size",val:" = 768"},{name:"intermediate_size",val:" = 3072"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"image_size",val:" = 224"},{name:"patch_size",val:" = 32"},{name:"hidden_act",val:" = 'quick_gelu'"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"dropout",val:" = 0.0"},{name:"attention_dropout",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"initializer_factor",val:" = 1.0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/configuration_clip.py#L122",parametersDescription:[{anchor:"transformers.CLIPVisionConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.CLIPVisionConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.CLIPVisionConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.CLIPVisionConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.CLIPVisionConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.CLIPVisionConfig.patch_size",description:`<strong>patch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The size (resolution) of each patch.`,name:"patch_size"},{anchor:"transformers.CLIPVisionConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;quick_gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> \`<code>&quot;quick_gelu&quot;</code> are supported. layer_norm_eps (<code>float</code>, <em>optional</em>, defaults to 1e-5): The epsilon used by the layer normalization layers.`,name:"hidden_act"},{anchor:"transformers.CLIPVisionConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.CLIPVisionConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.CLIPVisionConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.CLIPVisionConfig.initializer_factor",description:`<strong>initializer_factor</strong> (\`float&#x201C;, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"}]}}),no=new ne({props:{code:`from transformers import CLIPVisionModel, CLIPVisionConfig # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration configuration = CLIPVisionConfig() # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration model = CLIPVisionModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPVisionModel, CLIPVisionConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = CLIPVisionConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPVisionModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),so=new O({}),ao=new T({props:{name:"class transformers.CLIPTokenizer",anchor:"transformers.CLIPTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|startoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"pad_token",val:" = '<|endoftext|>'"},{name:"add_prefix_space",val:" = False"},{name:"do_lower_case",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/tokenization_clip.py#L100",parametersDescription:[{anchor:"transformers.CLIPTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.CLIPTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.CLIPTokenizer.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.CLIPTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.CLIPTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The beginning of sequence token.`,name:"bos_token"},{anchor:"transformers.CLIPTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.CLIPTokenizer.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (CLIP tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"}]}}),pt=new re({props:{$$slots:{default:[ug]},$$scope:{ctx:y}}}),lo=new T({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.CLIPTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/tokenization_clip.py#L216",parametersDescription:[{anchor:"transformers.CLIPTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),co=new T({props:{name:"get_special_tokens_mask",anchor:"transformers.CLIPTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/tokenization_clip.py#L240",parametersDescription:[{anchor:"transformers.CLIPTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.CLIPTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ho=new T({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2818",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),mo=new O({}),fo=new T({props:{name:"class transformers.CLIPTokenizerFast",anchor:"transformers.CLIPTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|startoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"pad_token",val:" = '<|endoftext|>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/tokenization_clip_fast.py#L50",parametersDescription:[{anchor:"transformers.CLIPTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.CLIPTokenizerFast.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.CLIPTokenizerFast.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.CLIPTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.CLIPTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The beginning of sequence token.`,name:"bos_token"},{anchor:"transformers.CLIPTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.CLIPTokenizerFast.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (CLIP tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"},{anchor:"transformers.CLIPTokenizerFast.trim_offsets",description:`<strong>trim_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the post-processing step should trim offsets to avoid including whitespaces.`,name:"trim_offsets"}]}}),go=new ne({props:{code:`from transformers import CLIPTokenizerFast tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32") tokenizer("Hello world")['input_ids'] tokenizer(" Hello world")['input_ids'],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizerFast</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = CLIPTokenizerFast.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [15496, 995] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [18435, 995]`}}),ft=new re({props:{$$slots:{default:[gg]},$$scope:{ctx:y}}}),Po=new O({}),Io=new T({props:{name:"class transformers.CLIPFeatureExtractor",anchor:"transformers.CLIPFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 224"},{name:"resample",val:" = 3"},{name:"do_center_crop",val:" = True"},{name:"crop_size",val:" = 224"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/feature_extraction_clip.py#L31",parametersDescription:[{anchor:"transformers.CLIPFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.CLIPFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.CLIPFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.CLIPFeatureExtractor.do_center_crop",description:`<strong>do_center_crop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to crop the input at the center. If the input size is smaller than <code>crop_size</code> along any edge, the image is padded with 0&#x2019;s and then center cropped.`,name:"do_center_crop"},{anchor:"transformers.CLIPFeatureExtractor.crop_size",description:`<strong>crop_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Desired output size when applying center-cropping. Only has an effect if <code>do_center_crop</code> is set to <code>True</code>.`,name:"crop_size"},{anchor:"transformers.CLIPFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with <code>image_mean</code> and <code>image_std</code>.`,name:"do_normalize"},{anchor:"transformers.CLIPFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.`,name:"image_mean"},{anchor:"transformers.CLIPFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.`,name:"image_std"}]}}),bo=new T({props:{name:"center_crop",anchor:"transformers.CLIPFeatureExtractor.center_crop",parameters:[{name:"image",val:""},{name:"size",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/feature_extraction_clip.py#L160",parametersDescription:[{anchor:"transformers.CLIPFeatureExtractor.center_crop.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.`,name:"image"},{anchor:"transformers.CLIPFeatureExtractor.center_crop.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to which crop the image.`,name:"size"}]}}),Lo=new T({props:{name:"resize",anchor:"transformers.CLIPFeatureExtractor.resize",parameters:[{name:"image",val:""},{name:"size",val:""},{name:"resample",val:" = 3"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/feature_extraction_clip.py#L186",parametersDescription:[{anchor:"transformers.CLIPFeatureExtractor.resize.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.`,name:"image"},{anchor:"transformers.CLIPFeatureExtractor.resize.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to use for resizing the image. If <code>int</code> it will be resized to match the shorter side`,name:"size"},{anchor:"transformers.CLIPFeatureExtractor.resize.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; The filter to user for resampling.`,name:"resample"}]}}),$o=new O({}),ko=new T({props:{name:"class transformers.CLIPProcessor",anchor:"transformers.CLIPProcessor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/processing_clip.py#L23",parametersDescription:[{anchor:"transformers.CLIPProcessor.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>) &#x2014; The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.CLIPProcessor.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>) &#x2014; The tokenizer is a required input.`,name:"tokenizer"}]}}),wo=new T({props:{name:"batch_decode",anchor:"transformers.CLIPProcessor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/processing_clip.py#L162"}}),yo=new T({props:{name:"decode",anchor:"transformers.CLIPProcessor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/processing_clip.py#L170"}}),Eo=new T({props:{name:"from_pretrained",anchor:"transformers.CLIPProcessor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/processing_clip.py#L72",parametersDescription:[{anchor:"transformers.CLIPProcessor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>clip-vit-base-patch32</code>, or namespaced under a user or organization name, like <code>openai/clip-vit-base-patch32</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <code>save_pretrained</code> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>.</li> </ul>`,name:"pretrained_model_name_or_path"}]}}),Ct=new re({props:{$$slots:{default:[_g]},$$scope:{ctx:y}}}),Mo=new T({props:{name:"save_pretrained",anchor:"transformers.CLIPProcessor.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/processing_clip.py#L50",parametersDescription:[{anchor:"transformers.CLIPProcessor.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"}]}}),bt=new re({props:{$$slots:{default:[vg]},$$scope:{ctx:y}}}),qo=new O({}),Fo=new T({props:{name:"class transformers.CLIPModel",anchor:"transformers.CLIPModel",parameters:[{name:"config",val:": CLIPConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_clip.py#L842",parametersDescription:[{anchor:"transformers.CLIPModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vo=new T({props:{name:"forward",anchor:"transformers.CLIPModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"pixel_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"return_loss",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_clip.py#L957",parametersDescription:[{anchor:"transformers.CLIPModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CLIPModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CLIPModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CLIPModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.CLIPModel.forward.return_loss",description:`<strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.`,name:"return_loss"},{anchor:"transformers.CLIPModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_clip.CLIPOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>return_loss</code> is <code>True</code>) \u2014 Contrastive loss for image-text similarity.</li> <li><strong>logits_per_image:(<code>torch.FloatTensor</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>torch.FloatTensor</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>image_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_clip.CLIPOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Lt=new re({props:{$$slots:{default:[Pg]},$$scope:{ctx:y}}}),No=new ne({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),Oo=new T({props:{name:"get_text_features",anchor:"transformers.CLIPModel.get_text_features",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_clip.py#L875",parametersDescription:[{anchor:"transformers.CLIPModel.get_text_features.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CLIPModel.get_text_features.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CLIPModel.get_text_features.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CLIPModel.get_text_features.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPModel.get_text_features.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPModel.get_text_features.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</p> `,returnType:` <p>text_features (<code>torch.FloatTensor</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),$t=new re({props:{$$slots:{default:[Ig]},$$scope:{ctx:y}}}),So=new ne({props:{code:`from transformers import CLIPTokenizer, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") text_features = model.get_text_features(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text_features = model.get_text_features(**inputs)`}}),Wo=new T({props:{name:"get_image_features",anchor:"transformers.CLIPModel.get_image_features",parameters:[{name:"pixel_values",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_clip.py#L915",parametersDescription:[{anchor:"transformers.CLIPModel.get_image_features.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.CLIPModel.get_image_features.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPModel.get_image_features.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPModel.get_image_features.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</p> `,returnType:` <p>image_features (<code>torch.FloatTensor</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),kt=new re({props:{$$slots:{default:[Cg]},$$scope:{ctx:y}}}),Bo=new ne({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt") image_features = model.get_image_features(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image_features = model.get_image_features(**inputs)`}}),Ro=new O({}),Uo=new T({props:{name:"forward",anchor:"transformers.CLIPTextModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_clip.py#L693",parametersDescription:[{anchor:"transformers.CLIPTextModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CLIPTextModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CLIPTextModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CLIPTextModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPTextModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPTextModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPTextConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Tt=new re({props:{$$slots:{default:[bg]},$$scope:{ctx:y}}}),Go=new ne({props:{code:`from transformers import CLIPTokenizer, CLIPTextModel model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output # pooled (EOS token) states,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, CLIPTextModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPTextModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-comment"># pooled (EOS token) states</span>`}}),Jo=new O({}),Xo=new T({props:{name:"forward",anchor:"transformers.CLIPVisionModel.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_clip.py#L802",parametersDescription:[{anchor:"transformers.CLIPVisionModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.CLIPVisionModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPVisionModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPVisionModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPVisionConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),zt=new re({props:{$$slots:{default:[xg]},$$scope:{ctx:y}}}),Yo=new ne({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPVisionModel model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output # pooled CLS states,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPVisionModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPVisionModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-comment"># pooled CLS states</span>`}}),Zo=new O({}),Qo=new T({props:{name:"class transformers.FlaxCLIPModel",anchor:"transformers.FlaxCLIPModel",parameters:[{name:"config",val:": CLIPConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_flax_clip.py#L1115",parametersDescription:[{anchor:"transformers.FlaxCLIPModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxCLIPModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ln=new T({props:{name:"__call__",anchor:"transformers.FlaxCLIPPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"pixel_values",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_flax_clip.py#L742",parametersDescription:[{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.return_loss",description:`<strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.`,name:"return_loss"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>logits_per_image:(<code>jnp.ndarray</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>jnp.ndarray</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>image_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),jt=new re({props:{$$slots:{default:[Lg]},$$scope:{ctx:y}}}),dn=new ne({props:{code:`import jax from PIL import Image import requests from transformers import CLIPProcessor, FlaxCLIPModel model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="np", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, FlaxCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = jax.nn.softmax(logits_per_image, axis=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),cn=new T({props:{name:"get_text_features",anchor:"transformers.FlaxCLIPPreTrainedModel.get_text_features",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_flax_clip.py#L787",parametersDescription:[{anchor:"transformers.FlaxCLIPPreTrainedModel.get_text_features.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"}],returnDescription:` <p>The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</p> `,returnType:` <p>text_features (<code>jnp.ndarray</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),pn=new ne({props:{code:`from transformers import CLIPTokenizer, FlaxCLIPModel model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") text_features = model.get_text_features(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, FlaxCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text_features = model.get_text_features(**inputs)`}}),hn=new T({props:{name:"get_image_features",anchor:"transformers.FlaxCLIPPreTrainedModel.get_image_features",parameters:[{name:"pixel_values",val:""},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_flax_clip.py#L855",parametersDescription:[{anchor:"transformers.FlaxCLIPPreTrainedModel.get_image_features.pixel_values",description:`<strong>pixel_values</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"}],returnDescription:` <p>The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a></p> `,returnType:` <p>image_features (<code>jnp.ndarray</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),mn=new ne({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, FlaxCLIPModel model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="np") image_features = model.get_image_features(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, FlaxCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image_features = model.get_image_features(**inputs)`}}),fn=new O({}),gn=new T({props:{name:"__call__",anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_flax_clip.py#L609",parametersDescription:[{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPTextConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qt=new re({props:{$$slots:{default:[$g]},$$scope:{ctx:y}}}),_n=new ne({props:{code:`from transformers import CLIPTokenizer, FlaxCLIPTextModel model = FlaxCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooler_output = outputs.pooler_output # pooled (EOS token) states,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, FlaxCLIPTextModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPTextModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooler_output = outputs.pooler_output <span class="hljs-comment"># pooled (EOS token) states</span>`}}),vn=new O({}),In=new T({props:{name:"__call__",anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/clip/modeling_flax_clip.py#L678",parametersDescription:[{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPVisionConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),At=new re({props:{$$slots:{default:[kg]},$$scope:{ctx:y}}}),Cn=new ne({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, FlaxCLIPVisionModel model = FlaxCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="np") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooler_output = outputs.pooler_output # pooled CLS states,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, FlaxCLIPVisionModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPVisionModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooler_output = outputs.pooler_output <span class="hljs-comment"># pooled CLS states</span>`}}),{c(){p=n("meta"),L=d(),f=n("h1"),x=n("a"),$=n("span"),_(u.$$.fragment),g=d(),k=n("span"),E=r("CLIP"),w=d(),A=n("h2"),V=n("a"),js=n("span"),_(Ot.$$.fragment),ji=d(),Ms=n("span"),Mi=r("Overview"),fr=d(),ot=n("p"),qi=r("The CLIP model was proposed in "),St=n("a"),Fi=r("Learning Transferable Visual Models From Natural Language Supervision"),Ai=r(` by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP (Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing for the task, similarly to the zero-shot capabilities of GPT-2 and 3.`),ur=d(),$n=n("p"),Di=r("The abstract from the paper is the following:"),gr=d(),kn=n("p"),qs=n("em"),Vi=r(`State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at this https URL.`),_r=d(),we=n("h2"),nt=n("a"),Fs=n("span"),_(Wt.$$.fragment),Ni=d(),As=n("span"),Oi=r("Usage"),vr=d(),wn=n("p"),Si=r(`CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text features. Both the text and visual features are then projected to a latent space with identical dimension. The dot product between the projected image and text features is then used as a similar score.`),Pr=d(),st=n("p"),Wi=r(`To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. The `),Tn=n("a"),Bi=r("CLIPFeatureExtractor"),Ri=r(" can be used to resize (or rescale) and normalize images for the model."),Ir=d(),D=n("p"),Hi=r("The "),yn=n("a"),Ui=r("CLIPTokenizer"),Gi=r(" is used to encode the text. The "),zn=n("a"),Ji=r("CLIPProcessor"),Ki=r(` wraps `),En=n("a"),Xi=r("CLIPFeatureExtractor"),Yi=r(" and "),jn=n("a"),Zi=r("CLIPTokenizer"),Qi=r(` into a single instance to both encode the text and prepare the images. The following example shows how to get the image-text similarity scores using `),Mn=n("a"),el=r("CLIPProcessor"),tl=r(" and "),qn=n("a"),ol=r("CLIPModel"),nl=r("."),Cr=d(),_(Bt.$$.fragment),br=d(),de=n("p"),sl=r("This model was contributed by "),Rt=n("a"),al=r("valhalla"),rl=r(". The original code can be found "),Ht=n("a"),il=r("here"),ll=r("."),xr=d(),Te=n("h2"),at=n("a"),Ds=n("span"),_(Ut.$$.fragment),dl=d(),Vs=n("span"),cl=r("CLIPConfig"),Lr=d(),se=n("div"),_(Gt.$$.fragment),pl=d(),rt=n("p"),Fn=n("a"),hl=r("CLIPConfig"),ml=r(` is the configuration class to store the configuration of a `),An=n("a"),fl=r("CLIPModel"),ul=r(`. It is used to instantiate CLIP model according to the specified arguments, defining the text model and vision model configs.`),gl=d(),ye=n("p"),_l=r("Configuration objects inherit from "),Dn=n("a"),vl=r("PretrainedConfig"),Pl=r(` and can be used to control the model outputs. Read the documentation from `),Vn=n("a"),Il=r("PretrainedConfig"),Cl=r(" for more information."),bl=d(),it=n("div"),_(Jt.$$.fragment),xl=d(),Kt=n("p"),Ll=r("Instantiate a "),Nn=n("a"),$l=r("CLIPConfig"),kl=r(` (or a derived class) from clip text model configuration and clip vision model configuration.`),$r=d(),ze=n("h2"),lt=n("a"),Ns=n("span"),_(Xt.$$.fragment),wl=d(),Os=n("span"),Tl=r("CLIPTextConfig"),kr=d(),S=n("div"),_(Yt.$$.fragment),yl=d(),Ee=n("p"),zl=r("This is the configuration class to store the configuration of a "),On=n("a"),El=r("CLIPModel"),jl=r(`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),Zt=n("a"),Ml=r("openai/clip-vit-base-patch32"),ql=r(" architecture."),Fl=d(),je=n("p"),Al=r("Configuration objects inherit from "),Sn=n("a"),Dl=r("PretrainedConfig"),Vl=r(` and can be used to control the model outputs. Read the documentation from `),Wn=n("a"),Nl=r("PretrainedConfig"),Ol=r(" for more information."),Sl=d(),Ss=n("p"),Wl=r("Example:"),Bl=d(),_(Qt.$$.fragment),wr=d(),Me=n("h2"),dt=n("a"),Ws=n("span"),_(eo.$$.fragment),Rl=d(),Bs=n("span"),Hl=r("CLIPVisionConfig"),Tr=d(),W=n("div"),_(to.$$.fragment),Ul=d(),qe=n("p"),Gl=r("This is the configuration class to store the configuration of a "),Bn=n("a"),Jl=r("CLIPModel"),Kl=r(`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),oo=n("a"),Xl=r("openai/clip-vit-base-patch32"),Yl=r(" architecture."),Zl=d(),Fe=n("p"),Ql=r("Configuration objects inherit from "),Rn=n("a"),ed=r("PretrainedConfig"),td=r(` and can be used to control the model outputs. Read the documentation from `),Hn=n("a"),od=r("PretrainedConfig"),nd=r(" for more information."),sd=d(),Rs=n("p"),ad=r("Example:"),rd=d(),_(no.$$.fragment),yr=d(),Ae=n("h2"),ct=n("a"),Hs=n("span"),_(so.$$.fragment),id=d(),Us=n("span"),ld=r("CLIPTokenizer"),zr=d(),z=n("div"),_(ao.$$.fragment),dd=d(),Gs=n("p"),cd=r("Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding."),pd=d(),Js=n("p"),hd=r(`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),md=d(),ro=n("p"),fd=r("You can get around that behavior by passing "),Ks=n("code"),ud=r("add_prefix_space=True"),gd=r(` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),_d=d(),_(pt.$$.fragment),vd=d(),io=n("p"),Pd=r("This tokenizer inherits from "),Un=n("a"),Id=r("PreTrainedTokenizer"),Cd=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),bd=d(),ae=n("div"),_(lo.$$.fragment),xd=d(),Xs=n("p"),Ld=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format:`),$d=d(),Ys=n("ul"),Gn=n("li"),kd=r("single sequence: "),Zs=n("code"),wd=r("<|startoftext|> X <|endoftext|>"),Td=d(),Qs=n("p"),yd=r("Pairs of sequences are not the expected use case, but they will be handled without a separator."),zd=d(),ht=n("div"),_(co.$$.fragment),Ed=d(),po=n("p"),jd=r(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ea=n("code"),Md=r("prepare_for_model"),qd=r(" method."),Fd=d(),ce=n("div"),_(ho.$$.fragment),Ad=d(),Jn=n("p"),Dd=r("Create the token type IDs corresponding to the sequences passed. "),Kn=n("a"),Vd=r("What are token type IDs?"),Nd=d(),ta=n("p"),Od=r("Should be overridden in a subclass if the model has a special way of building those."),Sd=d(),oa=n("div"),Er=d(),De=n("h2"),mt=n("a"),na=n("span"),_(mo.$$.fragment),Wd=d(),sa=n("span"),Bd=r("CLIPTokenizerFast"),jr=d(),q=n("div"),_(fo.$$.fragment),Rd=d(),uo=n("p"),Hd=r("Construct a \u201Cfast\u201D CLIP tokenizer (backed by HuggingFace\u2019s "),aa=n("em"),Ud=r("tokenizers"),Gd=r(` library). Based on byte-level Byte-Pair-Encoding.`),Jd=d(),ra=n("p"),Kd=r(`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),Xd=d(),_(go.$$.fragment),Yd=d(),_o=n("p"),Zd=r("You can get around that behavior by passing "),ia=n("code"),Qd=r("add_prefix_space=True"),ec=r(` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),tc=d(),_(ft.$$.fragment),oc=d(),vo=n("p"),nc=r("This tokenizer inherits from "),Xn=n("a"),sc=r("PreTrainedTokenizerFast"),ac=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Mr=d(),Ve=n("h2"),ut=n("a"),la=n("span"),_(Po.$$.fragment),rc=d(),da=n("span"),ic=r("CLIPFeatureExtractor"),qr=d(),B=n("div"),_(Io.$$.fragment),lc=d(),ca=n("p"),dc=r("Constructs a CLIP feature extractor."),cc=d(),Co=n("p"),pc=r("This feature extractor inherits from "),pa=n("code"),hc=r("FeatureExtractionMixin"),mc=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),fc=d(),gt=n("div"),_(bo.$$.fragment),uc=d(),xo=n("p"),gc=r("Crops "),ha=n("code"),_c=r("image"),vc=r(` to the given size using a center crop. Note that if the image is too small to be cropped to the size is given, it will be padded (so the returned result has the size asked).`),Pc=d(),_t=n("div"),_(Lo.$$.fragment),Ic=d(),Ne=n("p"),Cc=r("Resizes "),ma=n("code"),bc=r("image"),xc=r(". Note that this will trigger a conversion of "),fa=n("code"),Lc=r("image"),$c=r(" to a PIL Image."),Fr=d(),Oe=n("h2"),vt=n("a"),ua=n("span"),_($o.$$.fragment),kc=d(),ga=n("span"),wc=r("CLIPProcessor"),Ar=d(),F=n("div"),_(ko.$$.fragment),Tc=d(),_a=n("p"),yc=r("Constructs a CLIP processor which wraps a CLIP feature extractor and a CLIP tokenizer into a single processor."),zc=d(),H=n("p"),Yn=n("a"),Ec=r("CLIPProcessor"),jc=r(" offers all the functionalities of "),Zn=n("a"),Mc=r("CLIPFeatureExtractor"),qc=r(` and `),Qn=n("a"),Fc=r("CLIPTokenizer"),Ac=r(". See the "),va=n("code"),Dc=r("__call__()"),Vc=r(` and `),es=n("a"),Nc=r("decode()"),Oc=r(" for more information."),Sc=d(),Pt=n("div"),_(wo.$$.fragment),Wc=d(),To=n("p"),Bc=r(`This method forwards all its arguments to CLIPTokenizer\u2019s `),ts=n("a"),Rc=r("batch_decode()"),Hc=r(`. Please refer to the docstring of this method for more information.`),Uc=d(),It=n("div"),_(yo.$$.fragment),Gc=d(),zo=n("p"),Jc=r("This method forwards all its arguments to CLIPTokenizer\u2019s "),os=n("a"),Kc=r("decode()"),Xc=r(`. Please refer to the docstring of this method for more information.`),Yc=d(),pe=n("div"),_(Eo.$$.fragment),Zc=d(),jo=n("p"),Qc=r("Instantiate a "),ns=n("a"),ep=r("CLIPProcessor"),tp=r(" from a pretrained CLIP processor."),op=d(),_(Ct.$$.fragment),np=d(),he=n("div"),_(Mo.$$.fragment),sp=d(),Se=n("p"),ap=r("Save a CLIP feature extractor object and CLIP tokenizer object to the directory "),Pa=n("code"),rp=r("save_directory"),ip=r(`, so that it can be re-loaded using the `),ss=n("a"),lp=r("from_pretrained()"),dp=r(" class method."),cp=d(),_(bt.$$.fragment),Dr=d(),We=n("h2"),xt=n("a"),Ia=n("span"),_(qo.$$.fragment),pp=d(),Ca=n("span"),hp=r("CLIPModel"),Vr=d(),R=n("div"),_(Fo.$$.fragment),mp=d(),Ao=n("p"),fp=r("This model is a PyTorch "),Do=n("a"),up=r("torch.nn.Module"),gp=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_p=d(),U=n("div"),_(Vo.$$.fragment),vp=d(),Be=n("p"),Pp=r("The "),as=n("a"),Ip=r("CLIPModel"),Cp=r(" forward method, overrides the "),ba=n("code"),bp=r("__call__"),xp=r(" special method."),Lp=d(),_(Lt.$$.fragment),$p=d(),xa=n("p"),kp=r("Examples:"),wp=d(),_(No.$$.fragment),Tp=d(),G=n("div"),_(Oo.$$.fragment),yp=d(),Re=n("p"),zp=r("The "),rs=n("a"),Ep=r("CLIPModel"),jp=r(" forward method, overrides the "),La=n("code"),Mp=r("__call__"),qp=r(" special method."),Fp=d(),_($t.$$.fragment),Ap=d(),$a=n("p"),Dp=r("Examples:"),Vp=d(),_(So.$$.fragment),Np=d(),J=n("div"),_(Wo.$$.fragment),Op=d(),He=n("p"),Sp=r("The "),is=n("a"),Wp=r("CLIPModel"),Bp=r(" forward method, overrides the "),ka=n("code"),Rp=r("__call__"),Hp=r(" special method."),Up=d(),_(kt.$$.fragment),Gp=d(),wa=n("p"),Jp=r("Examples:"),Kp=d(),_(Bo.$$.fragment),Nr=d(),Ue=n("h2"),wt=n("a"),Ta=n("span"),_(Ro.$$.fragment),Xp=d(),ya=n("span"),Yp=r("CLIPTextModel"),Or=d(),Ho=n("div"),K=n("div"),_(Uo.$$.fragment),Zp=d(),Ge=n("p"),Qp=r("The "),ls=n("a"),eh=r("CLIPTextModel"),th=r(" forward method, overrides the "),za=n("code"),oh=r("__call__"),nh=r(" special method."),sh=d(),_(Tt.$$.fragment),ah=d(),Ea=n("p"),rh=r("Examples:"),ih=d(),_(Go.$$.fragment),Sr=d(),Je=n("h2"),yt=n("a"),ja=n("span"),_(Jo.$$.fragment),lh=d(),Ma=n("span"),dh=r("CLIPVisionModel"),Wr=d(),Ko=n("div"),X=n("div"),_(Xo.$$.fragment),ch=d(),Ke=n("p"),ph=r("The "),ds=n("a"),hh=r("CLIPVisionModel"),mh=r(" forward method, overrides the "),qa=n("code"),fh=r("__call__"),uh=r(" special method."),gh=d(),_(zt.$$.fragment),_h=d(),Fa=n("p"),vh=r("Examples:"),Ph=d(),_(Yo.$$.fragment),Br=d(),Xe=n("h2"),Et=n("a"),Aa=n("span"),_(Zo.$$.fragment),Ih=d(),Da=n("span"),Ch=r("FlaxCLIPModel"),Rr=d(),j=n("div"),_(Qo.$$.fragment),bh=d(),en=n("p"),xh=r("This model inherits from "),cs=n("a"),Lh=r("FlaxPreTrainedModel"),$h=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),kh=d(),tn=n("p"),wh=r("This model is also a Flax Linen "),on=n("a"),Th=r("flax.linen.Module"),yh=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),zh=d(),Va=n("p"),Eh=r("Finally, this model supports inherent JAX features such as:"),jh=d(),ie=n("ul"),Na=n("li"),nn=n("a"),Mh=r("Just-In-Time (JIT) compilation"),qh=d(),Oa=n("li"),sn=n("a"),Fh=r("Automatic Differentiation"),Ah=d(),Sa=n("li"),an=n("a"),Dh=r("Vectorization"),Vh=d(),Wa=n("li"),rn=n("a"),Nh=r("Parallelization"),Oh=d(),Y=n("div"),_(ln.$$.fragment),Sh=d(),Ye=n("p"),Wh=r("The "),Ba=n("code"),Bh=r("FlaxCLIPPreTrainedModel"),Rh=r(" forward method, overrides the "),Ra=n("code"),Hh=r("__call__"),Uh=r(" special method."),Gh=d(),_(jt.$$.fragment),Jh=d(),Ha=n("p"),Kh=r("Example:"),Xh=d(),_(dn.$$.fragment),Yh=d(),me=n("div"),_(cn.$$.fragment),Zh=d(),Ua=n("p"),Qh=r("Examples:"),em=d(),_(pn.$$.fragment),tm=d(),fe=n("div"),_(hn.$$.fragment),om=d(),Ga=n("p"),nm=r("Examples:"),sm=d(),_(mn.$$.fragment),Hr=d(),Ze=n("h2"),Mt=n("a"),Ja=n("span"),_(fn.$$.fragment),am=d(),Ka=n("span"),rm=r("FlaxCLIPTextModel"),Ur=d(),un=n("div"),Z=n("div"),_(gn.$$.fragment),im=d(),Qe=n("p"),lm=r("The "),Xa=n("code"),dm=r("FlaxCLIPTextPreTrainedModel"),cm=r(" forward method, overrides the "),Ya=n("code"),pm=r("__call__"),hm=r(" special method."),mm=d(),_(qt.$$.fragment),fm=d(),Za=n("p"),um=r("Example:"),gm=d(),_(_n.$$.fragment),Gr=d(),et=n("h2"),Ft=n("a"),Qa=n("span"),_(vn.$$.fragment),_m=d(),er=n("span"),vm=r("FlaxCLIPVisionModel"),Jr=d(),Pn=n("div"),Q=n("div"),_(In.$$.fragment),Pm=d(),tt=n("p"),Im=r("The "),tr=n("code"),Cm=r("FlaxCLIPVisionPreTrainedModel"),bm=r(" forward method, overrides the "),or=n("code"),xm=r("__call__"),Lm=r(" special method."),$m=d(),_(At.$$.fragment),km=d(),nr=n("p"),wm=r("Example:"),Tm=d(),_(Cn.$$.fragment),this.h()},l(t){const h=fg('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(o),L=c(t),f=s(t,"H1",{class:!0});var bn=a(f);x=s(bn,"A",{id:!0,class:!0,href:!0});var sr=a(x);$=s(sr,"SPAN",{});var ar=a($);v(u.$$.fragment,ar),ar.forEach(o),sr.forEach(o),g=c(bn),k=s(bn,"SPAN",{});var rr=a(k);E=i(rr,"CLIP"),rr.forEach(o),bn.forEach(o),w=c(t),A=s(t,"H2",{class:!0});var xn=a(A);V=s(xn,"A",{id:!0,class:!0,href:!0});var ir=a(V);js=s(ir,"SPAN",{});var lr=a(js);v(Ot.$$.fragment,lr),lr.forEach(o),ir.forEach(o),ji=c(xn),Ms=s(xn,"SPAN",{});var dr=a(Ms);Mi=i(dr,"Overview"),dr.forEach(o),xn.forEach(o),fr=c(t),ot=s(t,"P",{});var Ln=a(ot);qi=i(Ln,"The CLIP model was proposed in "),St=s(Ln,"A",{href:!0,rel:!0});var cr=a(St);Fi=i(cr,"Learning Transferable Visual Models From Natural Language Supervision"),cr.forEach(o),Ai=i(Ln,` by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP (Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing for the task, similarly to the zero-shot capabilities of GPT-2 and 3.`),Ln.forEach(o),ur=c(t),$n=s(t,"P",{});var pr=a($n);Di=i(pr,"The abstract from the paper is the following:"),pr.forEach(o),gr=c(t),kn=s(t,"P",{});var hr=a(kn);qs=s(hr,"EM",{});var Em=a(qs);Vi=i(Em,`State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at this https URL.`),Em.forEach(o),hr.forEach(o),_r=c(t),we=s(t,"H2",{class:!0});var Xr=a(we);nt=s(Xr,"A",{id:!0,class:!0,href:!0});var jm=a(nt);Fs=s(jm,"SPAN",{});var Mm=a(Fs);v(Wt.$$.fragment,Mm),Mm.forEach(o),jm.forEach(o),Ni=c(Xr),As=s(Xr,"SPAN",{});var qm=a(As);Oi=i(qm,"Usage"),qm.forEach(o),Xr.forEach(o),vr=c(t),wn=s(t,"P",{});var Fm=a(wn);Si=i(Fm,`CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text features. Both the text and visual features are then projected to a latent space with identical dimension. The dot product between the projected image and text features is then used as a similar score.`),Fm.forEach(o),Pr=c(t),st=s(t,"P",{});var Yr=a(st);Wi=i(Yr,`To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. The `),Tn=s(Yr,"A",{href:!0});var Am=a(Tn);Bi=i(Am,"CLIPFeatureExtractor"),Am.forEach(o),Ri=i(Yr," can be used to resize (or rescale) and normalize images for the model."),Yr.forEach(o),Ir=c(t),D=s(t,"P",{});var ee=a(D);Hi=i(ee,"The "),yn=s(ee,"A",{href:!0});var Dm=a(yn);Ui=i(Dm,"CLIPTokenizer"),Dm.forEach(o),Gi=i(ee," is used to encode the text. The "),zn=s(ee,"A",{href:!0});var Vm=a(zn);Ji=i(Vm,"CLIPProcessor"),Vm.forEach(o),Ki=i(ee,` wraps `),En=s(ee,"A",{href:!0});var Nm=a(En);Xi=i(Nm,"CLIPFeatureExtractor"),Nm.forEach(o),Yi=i(ee," and "),jn=s(ee,"A",{href:!0});var Om=a(jn);Zi=i(Om,"CLIPTokenizer"),Om.forEach(o),Qi=i(ee,` into a single instance to both encode the text and prepare the images. The following example shows how to get the image-text similarity scores using `),Mn=s(ee,"A",{href:!0});var Sm=a(Mn);el=i(Sm,"CLIPProcessor"),Sm.forEach(o),tl=i(ee," and "),qn=s(ee,"A",{href:!0});var Wm=a(qn);ol=i(Wm,"CLIPModel"),Wm.forEach(o),nl=i(ee,"."),ee.forEach(o),Cr=c(t),v(Bt.$$.fragment,t),br=c(t),de=s(t,"P",{});var ps=a(de);sl=i(ps,"This model was contributed by "),Rt=s(ps,"A",{href:!0,rel:!0});var Bm=a(Rt);al=i(Bm,"valhalla"),Bm.forEach(o),rl=i(ps,". The original code can be found "),Ht=s(ps,"A",{href:!0,rel:!0});var Rm=a(Ht);il=i(Rm,"here"),Rm.forEach(o),ll=i(ps,"."),ps.forEach(o),xr=c(t),Te=s(t,"H2",{class:!0});var Zr=a(Te);at=s(Zr,"A",{id:!0,class:!0,href:!0});var Hm=a(at);Ds=s(Hm,"SPAN",{});var Um=a(Ds);v(Ut.$$.fragment,Um),Um.forEach(o),Hm.forEach(o),dl=c(Zr),Vs=s(Zr,"SPAN",{});var Gm=a(Vs);cl=i(Gm,"CLIPConfig"),Gm.forEach(o),Zr.forEach(o),Lr=c(t),se=s(t,"DIV",{class:!0});var Dt=a(se);v(Gt.$$.fragment,Dt),pl=c(Dt),rt=s(Dt,"P",{});var mr=a(rt);Fn=s(mr,"A",{href:!0});var Jm=a(Fn);hl=i(Jm,"CLIPConfig"),Jm.forEach(o),ml=i(mr,` is the configuration class to store the configuration of a `),An=s(mr,"A",{href:!0});var Km=a(An);fl=i(Km,"CLIPModel"),Km.forEach(o),ul=i(mr,`. It is used to instantiate CLIP model according to the specified arguments, defining the text model and vision model configs.`),mr.forEach(o),gl=c(Dt),ye=s(Dt,"P",{});var hs=a(ye);_l=i(hs,"Configuration objects inherit from "),Dn=s(hs,"A",{href:!0});var Xm=a(Dn);vl=i(Xm,"PretrainedConfig"),Xm.forEach(o),Pl=i(hs,` and can be used to control the model outputs. Read the documentation from `),Vn=s(hs,"A",{href:!0});var Ym=a(Vn);Il=i(Ym,"PretrainedConfig"),Ym.forEach(o),Cl=i(hs," for more information."),hs.forEach(o),bl=c(Dt),it=s(Dt,"DIV",{class:!0});var Qr=a(it);v(Jt.$$.fragment,Qr),xl=c(Qr),Kt=s(Qr,"P",{});var ei=a(Kt);Ll=i(ei,"Instantiate a "),Nn=s(ei,"A",{href:!0});var Zm=a(Nn);$l=i(Zm,"CLIPConfig"),Zm.forEach(o),kl=i(ei,` (or a derived class) from clip text model configuration and clip vision model configuration.`),ei.forEach(o),Qr.forEach(o),Dt.forEach(o),$r=c(t),ze=s(t,"H2",{class:!0});var ti=a(ze);lt=s(ti,"A",{id:!0,class:!0,href:!0});var Qm=a(lt);Ns=s(Qm,"SPAN",{});var ef=a(Ns);v(Xt.$$.fragment,ef),ef.forEach(o),Qm.forEach(o),wl=c(ti),Os=s(ti,"SPAN",{});var tf=a(Os);Tl=i(tf,"CLIPTextConfig"),tf.forEach(o),ti.forEach(o),kr=c(t),S=s(t,"DIV",{class:!0});var ue=a(S);v(Yt.$$.fragment,ue),yl=c(ue),Ee=s(ue,"P",{});var ms=a(Ee);zl=i(ms,"This is the configuration class to store the configuration of a "),On=s(ms,"A",{href:!0});var of=a(On);El=i(of,"CLIPModel"),of.forEach(o),jl=i(ms,`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),Zt=s(ms,"A",{href:!0,rel:!0});var nf=a(Zt);Ml=i(nf,"openai/clip-vit-base-patch32"),nf.forEach(o),ql=i(ms," architecture."),ms.forEach(o),Fl=c(ue),je=s(ue,"P",{});var fs=a(je);Al=i(fs,"Configuration objects inherit from "),Sn=s(fs,"A",{href:!0});var sf=a(Sn);Dl=i(sf,"PretrainedConfig"),sf.forEach(o),Vl=i(fs,` and can be used to control the model outputs. Read the documentation from `),Wn=s(fs,"A",{href:!0});var af=a(Wn);Nl=i(af,"PretrainedConfig"),af.forEach(o),Ol=i(fs," for more information."),fs.forEach(o),Sl=c(ue),Ss=s(ue,"P",{});var rf=a(Ss);Wl=i(rf,"Example:"),rf.forEach(o),Bl=c(ue),v(Qt.$$.fragment,ue),ue.forEach(o),wr=c(t),Me=s(t,"H2",{class:!0});var oi=a(Me);dt=s(oi,"A",{id:!0,class:!0,href:!0});var lf=a(dt);Ws=s(lf,"SPAN",{});var df=a(Ws);v(eo.$$.fragment,df),df.forEach(o),lf.forEach(o),Rl=c(oi),Bs=s(oi,"SPAN",{});var cf=a(Bs);Hl=i(cf,"CLIPVisionConfig"),cf.forEach(o),oi.forEach(o),Tr=c(t),W=s(t,"DIV",{class:!0});var ge=a(W);v(to.$$.fragment,ge),Ul=c(ge),qe=s(ge,"P",{});var us=a(qe);Gl=i(us,"This is the configuration class to store the configuration of a "),Bn=s(us,"A",{href:!0});var pf=a(Bn);Jl=i(pf,"CLIPModel"),pf.forEach(o),Kl=i(us,`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),oo=s(us,"A",{href:!0,rel:!0});var hf=a(oo);Xl=i(hf,"openai/clip-vit-base-patch32"),hf.forEach(o),Yl=i(us," architecture."),us.forEach(o),Zl=c(ge),Fe=s(ge,"P",{});var gs=a(Fe);Ql=i(gs,"Configuration objects inherit from "),Rn=s(gs,"A",{href:!0});var mf=a(Rn);ed=i(mf,"PretrainedConfig"),mf.forEach(o),td=i(gs,` and can be used to control the model outputs. Read the documentation from `),Hn=s(gs,"A",{href:!0});var ff=a(Hn);od=i(ff,"PretrainedConfig"),ff.forEach(o),nd=i(gs," for more information."),gs.forEach(o),sd=c(ge),Rs=s(ge,"P",{});var uf=a(Rs);ad=i(uf,"Example:"),uf.forEach(o),rd=c(ge),v(no.$$.fragment,ge),ge.forEach(o),yr=c(t),Ae=s(t,"H2",{class:!0});var ni=a(Ae);ct=s(ni,"A",{id:!0,class:!0,href:!0});var gf=a(ct);Hs=s(gf,"SPAN",{});var _f=a(Hs);v(so.$$.fragment,_f),_f.forEach(o),gf.forEach(o),id=c(ni),Us=s(ni,"SPAN",{});var vf=a(Us);ld=i(vf,"CLIPTokenizer"),vf.forEach(o),ni.forEach(o),zr=c(t),z=s(t,"DIV",{class:!0});var M=a(z);v(ao.$$.fragment,M),dd=c(M),Gs=s(M,"P",{});var Pf=a(Gs);cd=i(Pf,"Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding."),Pf.forEach(o),pd=c(M),Js=s(M,"P",{});var If=a(Js);hd=i(If,`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),If.forEach(o),md=c(M),ro=s(M,"P",{});var si=a(ro);fd=i(si,"You can get around that behavior by passing "),Ks=s(si,"CODE",{});var Cf=a(Ks);ud=i(Cf,"add_prefix_space=True"),Cf.forEach(o),gd=i(si,` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),si.forEach(o),_d=c(M),v(pt.$$.fragment,M),vd=c(M),io=s(M,"P",{});var ai=a(io);Pd=i(ai,"This tokenizer inherits from "),Un=s(ai,"A",{href:!0});var bf=a(Un);Id=i(bf,"PreTrainedTokenizer"),bf.forEach(o),Cd=i(ai,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ai.forEach(o),bd=c(M),ae=s(M,"DIV",{class:!0});var Vt=a(ae);v(lo.$$.fragment,Vt),xd=c(Vt),Xs=s(Vt,"P",{});var xf=a(Xs);Ld=i(xf,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format:`),xf.forEach(o),$d=c(Vt),Ys=s(Vt,"UL",{});var Lf=a(Ys);Gn=s(Lf,"LI",{});var ym=a(Gn);kd=i(ym,"single sequence: "),Zs=s(ym,"CODE",{});var $f=a(Zs);wd=i($f,"<|startoftext|> X <|endoftext|>"),$f.forEach(o),ym.forEach(o),Lf.forEach(o),Td=c(Vt),Qs=s(Vt,"P",{});var kf=a(Qs);yd=i(kf,"Pairs of sequences are not the expected use case, but they will be handled without a separator."),kf.forEach(o),Vt.forEach(o),zd=c(M),ht=s(M,"DIV",{class:!0});var ri=a(ht);v(co.$$.fragment,ri),Ed=c(ri),po=s(ri,"P",{});var ii=a(po);jd=i(ii,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ea=s(ii,"CODE",{});var wf=a(ea);Md=i(wf,"prepare_for_model"),wf.forEach(o),qd=i(ii," method."),ii.forEach(o),ri.forEach(o),Fd=c(M),ce=s(M,"DIV",{class:!0});var _s=a(ce);v(ho.$$.fragment,_s),Ad=c(_s),Jn=s(_s,"P",{});var zm=a(Jn);Dd=i(zm,"Create the token type IDs corresponding to the sequences passed. "),Kn=s(zm,"A",{href:!0});var Tf=a(Kn);Vd=i(Tf,"What are token type IDs?"),Tf.forEach(o),zm.forEach(o),Nd=c(_s),ta=s(_s,"P",{});var yf=a(ta);Od=i(yf,"Should be overridden in a subclass if the model has a special way of building those."),yf.forEach(o),_s.forEach(o),Sd=c(M),oa=s(M,"DIV",{class:!0}),a(oa).forEach(o),M.forEach(o),Er=c(t),De=s(t,"H2",{class:!0});var li=a(De);mt=s(li,"A",{id:!0,class:!0,href:!0});var zf=a(mt);na=s(zf,"SPAN",{});var Ef=a(na);v(mo.$$.fragment,Ef),Ef.forEach(o),zf.forEach(o),Wd=c(li),sa=s(li,"SPAN",{});var jf=a(sa);Bd=i(jf,"CLIPTokenizerFast"),jf.forEach(o),li.forEach(o),jr=c(t),q=s(t,"DIV",{class:!0});var te=a(q);v(fo.$$.fragment,te),Rd=c(te),uo=s(te,"P",{});var di=a(uo);Hd=i(di,"Construct a \u201Cfast\u201D CLIP tokenizer (backed by HuggingFace\u2019s "),aa=s(di,"EM",{});var Mf=a(aa);Ud=i(Mf,"tokenizers"),Mf.forEach(o),Gd=i(di,` library). Based on byte-level Byte-Pair-Encoding.`),di.forEach(o),Jd=c(te),ra=s(te,"P",{});var qf=a(ra);Kd=i(qf,`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),qf.forEach(o),Xd=c(te),v(go.$$.fragment,te),Yd=c(te),_o=s(te,"P",{});var ci=a(_o);Zd=i(ci,"You can get around that behavior by passing "),ia=s(ci,"CODE",{});var Ff=a(ia);Qd=i(Ff,"add_prefix_space=True"),Ff.forEach(o),ec=i(ci,` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),ci.forEach(o),tc=c(te),v(ft.$$.fragment,te),oc=c(te),vo=s(te,"P",{});var pi=a(vo);nc=i(pi,"This tokenizer inherits from "),Xn=s(pi,"A",{href:!0});var Af=a(Xn);sc=i(Af,"PreTrainedTokenizerFast"),Af.forEach(o),ac=i(pi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),pi.forEach(o),te.forEach(o),Mr=c(t),Ve=s(t,"H2",{class:!0});var hi=a(Ve);ut=s(hi,"A",{id:!0,class:!0,href:!0});var Df=a(ut);la=s(Df,"SPAN",{});var Vf=a(la);v(Po.$$.fragment,Vf),Vf.forEach(o),Df.forEach(o),rc=c(hi),da=s(hi,"SPAN",{});var Nf=a(da);ic=i(Nf,"CLIPFeatureExtractor"),Nf.forEach(o),hi.forEach(o),qr=c(t),B=s(t,"DIV",{class:!0});var _e=a(B);v(Io.$$.fragment,_e),lc=c(_e),ca=s(_e,"P",{});var Of=a(ca);dc=i(Of,"Constructs a CLIP feature extractor."),Of.forEach(o),cc=c(_e),Co=s(_e,"P",{});var mi=a(Co);pc=i(mi,"This feature extractor inherits from "),pa=s(mi,"CODE",{});var Sf=a(pa);hc=i(Sf,"FeatureExtractionMixin"),Sf.forEach(o),mc=i(mi,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),mi.forEach(o),fc=c(_e),gt=s(_e,"DIV",{class:!0});var fi=a(gt);v(bo.$$.fragment,fi),uc=c(fi),xo=s(fi,"P",{});var ui=a(xo);gc=i(ui,"Crops "),ha=s(ui,"CODE",{});var Wf=a(ha);_c=i(Wf,"image"),Wf.forEach(o),vc=i(ui,` to the given size using a center crop. Note that if the image is too small to be cropped to the size is given, it will be padded (so the returned result has the size asked).`),ui.forEach(o),fi.forEach(o),Pc=c(_e),_t=s(_e,"DIV",{class:!0});var gi=a(_t);v(Lo.$$.fragment,gi),Ic=c(gi),Ne=s(gi,"P",{});var vs=a(Ne);Cc=i(vs,"Resizes "),ma=s(vs,"CODE",{});var Bf=a(ma);bc=i(Bf,"image"),Bf.forEach(o),xc=i(vs,". Note that this will trigger a conversion of "),fa=s(vs,"CODE",{});var Rf=a(fa);Lc=i(Rf,"image"),Rf.forEach(o),$c=i(vs," to a PIL Image."),vs.forEach(o),gi.forEach(o),_e.forEach(o),Fr=c(t),Oe=s(t,"H2",{class:!0});var _i=a(Oe);vt=s(_i,"A",{id:!0,class:!0,href:!0});var Hf=a(vt);ua=s(Hf,"SPAN",{});var Uf=a(ua);v($o.$$.fragment,Uf),Uf.forEach(o),Hf.forEach(o),kc=c(_i),ga=s(_i,"SPAN",{});var Gf=a(ga);wc=i(Gf,"CLIPProcessor"),Gf.forEach(o),_i.forEach(o),Ar=c(t),F=s(t,"DIV",{class:!0});var oe=a(F);v(ko.$$.fragment,oe),Tc=c(oe),_a=s(oe,"P",{});var Jf=a(_a);yc=i(Jf,"Constructs a CLIP processor which wraps a CLIP feature extractor and a CLIP tokenizer into a single processor."),Jf.forEach(o),zc=c(oe),H=s(oe,"P",{});var le=a(H);Yn=s(le,"A",{href:!0});var Kf=a(Yn);Ec=i(Kf,"CLIPProcessor"),Kf.forEach(o),jc=i(le," offers all the functionalities of "),Zn=s(le,"A",{href:!0});var Xf=a(Zn);Mc=i(Xf,"CLIPFeatureExtractor"),Xf.forEach(o),qc=i(le,` and `),Qn=s(le,"A",{href:!0});var Yf=a(Qn);Fc=i(Yf,"CLIPTokenizer"),Yf.forEach(o),Ac=i(le,". See the "),va=s(le,"CODE",{});var Zf=a(va);Dc=i(Zf,"__call__()"),Zf.forEach(o),Vc=i(le,` and `),es=s(le,"A",{href:!0});var Qf=a(es);Nc=i(Qf,"decode()"),Qf.forEach(o),Oc=i(le," for more information."),le.forEach(o),Sc=c(oe),Pt=s(oe,"DIV",{class:!0});var vi=a(Pt);v(wo.$$.fragment,vi),Wc=c(vi),To=s(vi,"P",{});var Pi=a(To);Bc=i(Pi,`This method forwards all its arguments to CLIPTokenizer\u2019s `),ts=s(Pi,"A",{href:!0});var eu=a(ts);Rc=i(eu,"batch_decode()"),eu.forEach(o),Hc=i(Pi,`. Please refer to the docstring of this method for more information.`),Pi.forEach(o),vi.forEach(o),Uc=c(oe),It=s(oe,"DIV",{class:!0});var Ii=a(It);v(yo.$$.fragment,Ii),Gc=c(Ii),zo=s(Ii,"P",{});var Ci=a(zo);Jc=i(Ci,"This method forwards all its arguments to CLIPTokenizer\u2019s "),os=s(Ci,"A",{href:!0});var tu=a(os);Kc=i(tu,"decode()"),tu.forEach(o),Xc=i(Ci,`. Please refer to the docstring of this method for more information.`),Ci.forEach(o),Ii.forEach(o),Yc=c(oe),pe=s(oe,"DIV",{class:!0});var Ps=a(pe);v(Eo.$$.fragment,Ps),Zc=c(Ps),jo=s(Ps,"P",{});var bi=a(jo);Qc=i(bi,"Instantiate a "),ns=s(bi,"A",{href:!0});var ou=a(ns);ep=i(ou,"CLIPProcessor"),ou.forEach(o),tp=i(bi," from a pretrained CLIP processor."),bi.forEach(o),op=c(Ps),v(Ct.$$.fragment,Ps),Ps.forEach(o),np=c(oe),he=s(oe,"DIV",{class:!0});var Is=a(he);v(Mo.$$.fragment,Is),sp=c(Is),Se=s(Is,"P",{});var Cs=a(Se);ap=i(Cs,"Save a CLIP feature extractor object and CLIP tokenizer object to the directory "),Pa=s(Cs,"CODE",{});var nu=a(Pa);rp=i(nu,"save_directory"),nu.forEach(o),ip=i(Cs,`, so that it can be re-loaded using the `),ss=s(Cs,"A",{href:!0});var su=a(ss);lp=i(su,"from_pretrained()"),su.forEach(o),dp=i(Cs," class method."),Cs.forEach(o),cp=c(Is),v(bt.$$.fragment,Is),Is.forEach(o),oe.forEach(o),Dr=c(t),We=s(t,"H2",{class:!0});var xi=a(We);xt=s(xi,"A",{id:!0,class:!0,href:!0});var au=a(xt);Ia=s(au,"SPAN",{});var ru=a(Ia);v(qo.$$.fragment,ru),ru.forEach(o),au.forEach(o),pp=c(xi),Ca=s(xi,"SPAN",{});var iu=a(Ca);hp=i(iu,"CLIPModel"),iu.forEach(o),xi.forEach(o),Vr=c(t),R=s(t,"DIV",{class:!0});var ve=a(R);v(Fo.$$.fragment,ve),mp=c(ve),Ao=s(ve,"P",{});var Li=a(Ao);fp=i(Li,"This model is a PyTorch "),Do=s(Li,"A",{href:!0,rel:!0});var lu=a(Do);up=i(lu,"torch.nn.Module"),lu.forEach(o),gp=i(Li,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Li.forEach(o),_p=c(ve),U=s(ve,"DIV",{class:!0});var Pe=a(U);v(Vo.$$.fragment,Pe),vp=c(Pe),Be=s(Pe,"P",{});var bs=a(Be);Pp=i(bs,"The "),as=s(bs,"A",{href:!0});var du=a(as);Ip=i(du,"CLIPModel"),du.forEach(o),Cp=i(bs," forward method, overrides the "),ba=s(bs,"CODE",{});var cu=a(ba);bp=i(cu,"__call__"),cu.forEach(o),xp=i(bs," special method."),bs.forEach(o),Lp=c(Pe),v(Lt.$$.fragment,Pe),$p=c(Pe),xa=s(Pe,"P",{});var pu=a(xa);kp=i(pu,"Examples:"),pu.forEach(o),wp=c(Pe),v(No.$$.fragment,Pe),Pe.forEach(o),Tp=c(ve),G=s(ve,"DIV",{class:!0});var Ie=a(G);v(Oo.$$.fragment,Ie),yp=c(Ie),Re=s(Ie,"P",{});var xs=a(Re);zp=i(xs,"The "),rs=s(xs,"A",{href:!0});var hu=a(rs);Ep=i(hu,"CLIPModel"),hu.forEach(o),jp=i(xs," forward method, overrides the "),La=s(xs,"CODE",{});var mu=a(La);Mp=i(mu,"__call__"),mu.forEach(o),qp=i(xs," special method."),xs.forEach(o),Fp=c(Ie),v($t.$$.fragment,Ie),Ap=c(Ie),$a=s(Ie,"P",{});var fu=a($a);Dp=i(fu,"Examples:"),fu.forEach(o),Vp=c(Ie),v(So.$$.fragment,Ie),Ie.forEach(o),Np=c(ve),J=s(ve,"DIV",{class:!0});var Ce=a(J);v(Wo.$$.fragment,Ce),Op=c(Ce),He=s(Ce,"P",{});var Ls=a(He);Sp=i(Ls,"The "),is=s(Ls,"A",{href:!0});var uu=a(is);Wp=i(uu,"CLIPModel"),uu.forEach(o),Bp=i(Ls," forward method, overrides the "),ka=s(Ls,"CODE",{});var gu=a(ka);Rp=i(gu,"__call__"),gu.forEach(o),Hp=i(Ls," special method."),Ls.forEach(o),Up=c(Ce),v(kt.$$.fragment,Ce),Gp=c(Ce),wa=s(Ce,"P",{});var _u=a(wa);Jp=i(_u,"Examples:"),_u.forEach(o),Kp=c(Ce),v(Bo.$$.fragment,Ce),Ce.forEach(o),ve.forEach(o),Nr=c(t),Ue=s(t,"H2",{class:!0});var $i=a(Ue);wt=s($i,"A",{id:!0,class:!0,href:!0});var vu=a(wt);Ta=s(vu,"SPAN",{});var Pu=a(Ta);v(Ro.$$.fragment,Pu),Pu.forEach(o),vu.forEach(o),Xp=c($i),ya=s($i,"SPAN",{});var Iu=a(ya);Yp=i(Iu,"CLIPTextModel"),Iu.forEach(o),$i.forEach(o),Or=c(t),Ho=s(t,"DIV",{class:!0});var Cu=a(Ho);K=s(Cu,"DIV",{class:!0});var be=a(K);v(Uo.$$.fragment,be),Zp=c(be),Ge=s(be,"P",{});var $s=a(Ge);Qp=i($s,"The "),ls=s($s,"A",{href:!0});var bu=a(ls);eh=i(bu,"CLIPTextModel"),bu.forEach(o),th=i($s," forward method, overrides the "),za=s($s,"CODE",{});var xu=a(za);oh=i(xu,"__call__"),xu.forEach(o),nh=i($s," special method."),$s.forEach(o),sh=c(be),v(Tt.$$.fragment,be),ah=c(be),Ea=s(be,"P",{});var Lu=a(Ea);rh=i(Lu,"Examples:"),Lu.forEach(o),ih=c(be),v(Go.$$.fragment,be),be.forEach(o),Cu.forEach(o),Sr=c(t),Je=s(t,"H2",{class:!0});var ki=a(Je);yt=s(ki,"A",{id:!0,class:!0,href:!0});var $u=a(yt);ja=s($u,"SPAN",{});var ku=a(ja);v(Jo.$$.fragment,ku),ku.forEach(o),$u.forEach(o),lh=c(ki),Ma=s(ki,"SPAN",{});var wu=a(Ma);dh=i(wu,"CLIPVisionModel"),wu.forEach(o),ki.forEach(o),Wr=c(t),Ko=s(t,"DIV",{class:!0});var Tu=a(Ko);X=s(Tu,"DIV",{class:!0});var xe=a(X);v(Xo.$$.fragment,xe),ch=c(xe),Ke=s(xe,"P",{});var ks=a(Ke);ph=i(ks,"The "),ds=s(ks,"A",{href:!0});var yu=a(ds);hh=i(yu,"CLIPVisionModel"),yu.forEach(o),mh=i(ks," forward method, overrides the "),qa=s(ks,"CODE",{});var zu=a(qa);fh=i(zu,"__call__"),zu.forEach(o),uh=i(ks," special method."),ks.forEach(o),gh=c(xe),v(zt.$$.fragment,xe),_h=c(xe),Fa=s(xe,"P",{});var Eu=a(Fa);vh=i(Eu,"Examples:"),Eu.forEach(o),Ph=c(xe),v(Yo.$$.fragment,xe),xe.forEach(o),Tu.forEach(o),Br=c(t),Xe=s(t,"H2",{class:!0});var wi=a(Xe);Et=s(wi,"A",{id:!0,class:!0,href:!0});var ju=a(Et);Aa=s(ju,"SPAN",{});var Mu=a(Aa);v(Zo.$$.fragment,Mu),Mu.forEach(o),ju.forEach(o),Ih=c(wi),Da=s(wi,"SPAN",{});var qu=a(Da);Ch=i(qu,"FlaxCLIPModel"),qu.forEach(o),wi.forEach(o),Rr=c(t),j=s(t,"DIV",{class:!0});var N=a(j);v(Qo.$$.fragment,N),bh=c(N),en=s(N,"P",{});var Ti=a(en);xh=i(Ti,"This model inherits from "),cs=s(Ti,"A",{href:!0});var Fu=a(cs);Lh=i(Fu,"FlaxPreTrainedModel"),Fu.forEach(o),$h=i(Ti,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Ti.forEach(o),kh=c(N),tn=s(N,"P",{});var yi=a(tn);wh=i(yi,"This model is also a Flax Linen "),on=s(yi,"A",{href:!0,rel:!0});var Au=a(on);Th=i(Au,"flax.linen.Module"),Au.forEach(o),yh=i(yi,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),yi.forEach(o),zh=c(N),Va=s(N,"P",{});var Du=a(Va);Eh=i(Du,"Finally, this model supports inherent JAX features such as:"),Du.forEach(o),jh=c(N),ie=s(N,"UL",{});var Nt=a(ie);Na=s(Nt,"LI",{});var Vu=a(Na);nn=s(Vu,"A",{href:!0,rel:!0});var Nu=a(nn);Mh=i(Nu,"Just-In-Time (JIT) compilation"),Nu.forEach(o),Vu.forEach(o),qh=c(Nt),Oa=s(Nt,"LI",{});var Ou=a(Oa);sn=s(Ou,"A",{href:!0,rel:!0});var Su=a(sn);Fh=i(Su,"Automatic Differentiation"),Su.forEach(o),Ou.forEach(o),Ah=c(Nt),Sa=s(Nt,"LI",{});var Wu=a(Sa);an=s(Wu,"A",{href:!0,rel:!0});var Bu=a(an);Dh=i(Bu,"Vectorization"),Bu.forEach(o),Wu.forEach(o),Vh=c(Nt),Wa=s(Nt,"LI",{});var Ru=a(Wa);rn=s(Ru,"A",{href:!0,rel:!0});var Hu=a(rn);Nh=i(Hu,"Parallelization"),Hu.forEach(o),Ru.forEach(o),Nt.forEach(o),Oh=c(N),Y=s(N,"DIV",{class:!0});var Le=a(Y);v(ln.$$.fragment,Le),Sh=c(Le),Ye=s(Le,"P",{});var ws=a(Ye);Wh=i(ws,"The "),Ba=s(ws,"CODE",{});var Uu=a(Ba);Bh=i(Uu,"FlaxCLIPPreTrainedModel"),Uu.forEach(o),Rh=i(ws," forward method, overrides the "),Ra=s(ws,"CODE",{});var Gu=a(Ra);Hh=i(Gu,"__call__"),Gu.forEach(o),Uh=i(ws," special method."),ws.forEach(o),Gh=c(Le),v(jt.$$.fragment,Le),Jh=c(Le),Ha=s(Le,"P",{});var Ju=a(Ha);Kh=i(Ju,"Example:"),Ju.forEach(o),Xh=c(Le),v(dn.$$.fragment,Le),Le.forEach(o),Yh=c(N),me=s(N,"DIV",{class:!0});var Ts=a(me);v(cn.$$.fragment,Ts),Zh=c(Ts),Ua=s(Ts,"P",{});var Ku=a(Ua);Qh=i(Ku,"Examples:"),Ku.forEach(o),em=c(Ts),v(pn.$$.fragment,Ts),Ts.forEach(o),tm=c(N),fe=s(N,"DIV",{class:!0});var ys=a(fe);v(hn.$$.fragment,ys),om=c(ys),Ga=s(ys,"P",{});var Xu=a(Ga);nm=i(Xu,"Examples:"),Xu.forEach(o),sm=c(ys),v(mn.$$.fragment,ys),ys.forEach(o),N.forEach(o),Hr=c(t),Ze=s(t,"H2",{class:!0});var zi=a(Ze);Mt=s(zi,"A",{id:!0,class:!0,href:!0});var Yu=a(Mt);Ja=s(Yu,"SPAN",{});var Zu=a(Ja);v(fn.$$.fragment,Zu),Zu.forEach(o),Yu.forEach(o),am=c(zi),Ka=s(zi,"SPAN",{});var Qu=a(Ka);rm=i(Qu,"FlaxCLIPTextModel"),Qu.forEach(o),zi.forEach(o),Ur=c(t),un=s(t,"DIV",{class:!0});var eg=a(un);Z=s(eg,"DIV",{class:!0});var $e=a(Z);v(gn.$$.fragment,$e),im=c($e),Qe=s($e,"P",{});var zs=a(Qe);lm=i(zs,"The "),Xa=s(zs,"CODE",{});var tg=a(Xa);dm=i(tg,"FlaxCLIPTextPreTrainedModel"),tg.forEach(o),cm=i(zs," forward method, overrides the "),Ya=s(zs,"CODE",{});var og=a(Ya);pm=i(og,"__call__"),og.forEach(o),hm=i(zs," special method."),zs.forEach(o),mm=c($e),v(qt.$$.fragment,$e),fm=c($e),Za=s($e,"P",{});var ng=a(Za);um=i(ng,"Example:"),ng.forEach(o),gm=c($e),v(_n.$$.fragment,$e),$e.forEach(o),eg.forEach(o),Gr=c(t),et=s(t,"H2",{class:!0});var Ei=a(et);Ft=s(Ei,"A",{id:!0,class:!0,href:!0});var sg=a(Ft);Qa=s(sg,"SPAN",{});var ag=a(Qa);v(vn.$$.fragment,ag),ag.forEach(o),sg.forEach(o),_m=c(Ei),er=s(Ei,"SPAN",{});var rg=a(er);vm=i(rg,"FlaxCLIPVisionModel"),rg.forEach(o),Ei.forEach(o),Jr=c(t),Pn=s(t,"DIV",{class:!0});var ig=a(Pn);Q=s(ig,"DIV",{class:!0});var ke=a(Q);v(In.$$.fragment,ke),Pm=c(ke),tt=s(ke,"P",{});var Es=a(tt);Im=i(Es,"The "),tr=s(Es,"CODE",{});var lg=a(tr);Cm=i(lg,"FlaxCLIPVisionPreTrainedModel"),lg.forEach(o),bm=i(Es," forward method, overrides the "),or=s(Es,"CODE",{});var dg=a(or);xm=i(dg,"__call__"),dg.forEach(o),Lm=i(Es," special method."),Es.forEach(o),$m=c(ke),v(At.$$.fragment,ke),km=c(ke),nr=s(ke,"P",{});var cg=a(nr);wm=i(cg,"Example:"),cg.forEach(o),Tm=c(ke),v(Cn.$$.fragment,ke),ke.forEach(o),ig.forEach(o),this.h()},h(){l(p,"name","hf:doc:metadata"),l(p,"content",JSON.stringify(Tg)),l(x,"id","clip"),l(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(x,"href","#clip"),l(f,"class","relative group"),l(V,"id","overview"),l(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(V,"href","#overview"),l(A,"class","relative group"),l(St,"href","https://arxiv.org/abs/2103.00020"),l(St,"rel","nofollow"),l(nt,"id","usage"),l(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(nt,"href","#usage"),l(we,"class","relative group"),l(Tn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor"),l(yn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer"),l(zn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor"),l(En,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor"),l(jn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer"),l(Mn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor"),l(qn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(Rt,"href","https://huggingface.co/valhalla"),l(Rt,"rel","nofollow"),l(Ht,"href","https://github.com/openai/CLIP"),l(Ht,"rel","nofollow"),l(at,"id","transformers.CLIPConfig"),l(at,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(at,"href","#transformers.CLIPConfig"),l(Te,"class","relative group"),l(Fn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig"),l(An,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(Dn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Vn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Nn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPConfig"),l(it,"class","docstring"),l(se,"class","docstring"),l(lt,"id","transformers.CLIPTextConfig"),l(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(lt,"href","#transformers.CLIPTextConfig"),l(ze,"class","relative group"),l(On,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(Zt,"href","https://huggingface.co/openai/clip-vit-base-patch32"),l(Zt,"rel","nofollow"),l(Sn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Wn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(S,"class","docstring"),l(dt,"id","transformers.CLIPVisionConfig"),l(dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(dt,"href","#transformers.CLIPVisionConfig"),l(Me,"class","relative group"),l(Bn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(oo,"href","https://huggingface.co/openai/clip-vit-base-patch32"),l(oo,"rel","nofollow"),l(Rn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Hn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(W,"class","docstring"),l(ct,"id","transformers.CLIPTokenizer"),l(ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ct,"href","#transformers.CLIPTokenizer"),l(Ae,"class","relative group"),l(Un,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(ae,"class","docstring"),l(ht,"class","docstring"),l(Kn,"href","../glossary#token-type-ids"),l(ce,"class","docstring"),l(oa,"class","docstring"),l(z,"class","docstring"),l(mt,"id","transformers.CLIPTokenizerFast"),l(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(mt,"href","#transformers.CLIPTokenizerFast"),l(De,"class","relative group"),l(Xn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(q,"class","docstring"),l(ut,"id","transformers.CLIPFeatureExtractor"),l(ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ut,"href","#transformers.CLIPFeatureExtractor"),l(Ve,"class","relative group"),l(gt,"class","docstring"),l(_t,"class","docstring"),l(B,"class","docstring"),l(vt,"id","transformers.CLIPProcessor"),l(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(vt,"href","#transformers.CLIPProcessor"),l(Oe,"class","relative group"),l(Yn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor"),l(Zn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPFeatureExtractor"),l(Qn,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer"),l(es,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor.decode"),l(ts,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),l(Pt,"class","docstring"),l(os,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),l(It,"class","docstring"),l(ns,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor"),l(pe,"class","docstring"),l(ss,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPProcessor.from_pretrained"),l(he,"class","docstring"),l(F,"class","docstring"),l(xt,"id","transformers.CLIPModel"),l(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(xt,"href","#transformers.CLIPModel"),l(We,"class","relative group"),l(Do,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Do,"rel","nofollow"),l(as,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(U,"class","docstring"),l(rs,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(G,"class","docstring"),l(is,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPModel"),l(J,"class","docstring"),l(R,"class","docstring"),l(wt,"id","transformers.CLIPTextModel"),l(wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(wt,"href","#transformers.CLIPTextModel"),l(Ue,"class","relative group"),l(ls,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextModel"),l(K,"class","docstring"),l(Ho,"class","docstring"),l(yt,"id","transformers.CLIPVisionModel"),l(yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(yt,"href","#transformers.CLIPVisionModel"),l(Je,"class","relative group"),l(ds,"href","/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionModel"),l(X,"class","docstring"),l(Ko,"class","docstring"),l(Et,"id","transformers.FlaxCLIPModel"),l(Et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Et,"href","#transformers.FlaxCLIPModel"),l(Xe,"class","relative group"),l(cs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(on,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(on,"rel","nofollow"),l(nn,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(nn,"rel","nofollow"),l(sn,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(sn,"rel","nofollow"),l(an,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(an,"rel","nofollow"),l(rn,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(rn,"rel","nofollow"),l(Y,"class","docstring"),l(me,"class","docstring"),l(fe,"class","docstring"),l(j,"class","docstring"),l(Mt,"id","transformers.FlaxCLIPTextModel"),l(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Mt,"href","#transformers.FlaxCLIPTextModel"),l(Ze,"class","relative group"),l(Z,"class","docstring"),l(un,"class","docstring"),l(Ft,"id","transformers.FlaxCLIPVisionModel"),l(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ft,"href","#transformers.FlaxCLIPVisionModel"),l(et,"class","relative group"),l(Q,"class","docstring"),l(Pn,"class","docstring")},m(t,h){e(document.head,p),m(t,L,h),m(t,f,h),e(f,x),e(x,$),P(u,$,null),e(f,g),e(f,k),e(k,E),m(t,w,h),m(t,A,h),e(A,V),e(V,js),P(Ot,js,null),e(A,ji),e(A,Ms),e(Ms,Mi),m(t,fr,h),m(t,ot,h),e(ot,qi),e(ot,St),e(St,Fi),e(ot,Ai),m(t,ur,h),m(t,$n,h),e($n,Di),m(t,gr,h),m(t,kn,h),e(kn,qs),e(qs,Vi),m(t,_r,h),m(t,we,h),e(we,nt),e(nt,Fs),P(Wt,Fs,null),e(we,Ni),e(we,As),e(As,Oi),m(t,vr,h),m(t,wn,h),e(wn,Si),m(t,Pr,h),m(t,st,h),e(st,Wi),e(st,Tn),e(Tn,Bi),e(st,Ri),m(t,Ir,h),m(t,D,h),e(D,Hi),e(D,yn),e(yn,Ui),e(D,Gi),e(D,zn),e(zn,Ji),e(D,Ki),e(D,En),e(En,Xi),e(D,Yi),e(D,jn),e(jn,Zi),e(D,Qi),e(D,Mn),e(Mn,el),e(D,tl),e(D,qn),e(qn,ol),e(D,nl),m(t,Cr,h),P(Bt,t,h),m(t,br,h),m(t,de,h),e(de,sl),e(de,Rt),e(Rt,al),e(de,rl),e(de,Ht),e(Ht,il),e(de,ll),m(t,xr,h),m(t,Te,h),e(Te,at),e(at,Ds),P(Ut,Ds,null),e(Te,dl),e(Te,Vs),e(Vs,cl),m(t,Lr,h),m(t,se,h),P(Gt,se,null),e(se,pl),e(se,rt),e(rt,Fn),e(Fn,hl),e(rt,ml),e(rt,An),e(An,fl),e(rt,ul),e(se,gl),e(se,ye),e(ye,_l),e(ye,Dn),e(Dn,vl),e(ye,Pl),e(ye,Vn),e(Vn,Il),e(ye,Cl),e(se,bl),e(se,it),P(Jt,it,null),e(it,xl),e(it,Kt),e(Kt,Ll),e(Kt,Nn),e(Nn,$l),e(Kt,kl),m(t,$r,h),m(t,ze,h),e(ze,lt),e(lt,Ns),P(Xt,Ns,null),e(ze,wl),e(ze,Os),e(Os,Tl),m(t,kr,h),m(t,S,h),P(Yt,S,null),e(S,yl),e(S,Ee),e(Ee,zl),e(Ee,On),e(On,El),e(Ee,jl),e(Ee,Zt),e(Zt,Ml),e(Ee,ql),e(S,Fl),e(S,je),e(je,Al),e(je,Sn),e(Sn,Dl),e(je,Vl),e(je,Wn),e(Wn,Nl),e(je,Ol),e(S,Sl),e(S,Ss),e(Ss,Wl),e(S,Bl),P(Qt,S,null),m(t,wr,h),m(t,Me,h),e(Me,dt),e(dt,Ws),P(eo,Ws,null),e(Me,Rl),e(Me,Bs),e(Bs,Hl),m(t,Tr,h),m(t,W,h),P(to,W,null),e(W,Ul),e(W,qe),e(qe,Gl),e(qe,Bn),e(Bn,Jl),e(qe,Kl),e(qe,oo),e(oo,Xl),e(qe,Yl),e(W,Zl),e(W,Fe),e(Fe,Ql),e(Fe,Rn),e(Rn,ed),e(Fe,td),e(Fe,Hn),e(Hn,od),e(Fe,nd),e(W,sd),e(W,Rs),e(Rs,ad),e(W,rd),P(no,W,null),m(t,yr,h),m(t,Ae,h),e(Ae,ct),e(ct,Hs),P(so,Hs,null),e(Ae,id),e(Ae,Us),e(Us,ld),m(t,zr,h),m(t,z,h),P(ao,z,null),e(z,dd),e(z,Gs),e(Gs,cd),e(z,pd),e(z,Js),e(Js,hd),e(z,md),e(z,ro),e(ro,fd),e(ro,Ks),e(Ks,ud),e(ro,gd),e(z,_d),P(pt,z,null),e(z,vd),e(z,io),e(io,Pd),e(io,Un),e(Un,Id),e(io,Cd),e(z,bd),e(z,ae),P(lo,ae,null),e(ae,xd),e(ae,Xs),e(Xs,Ld),e(ae,$d),e(ae,Ys),e(Ys,Gn),e(Gn,kd),e(Gn,Zs),e(Zs,wd),e(ae,Td),e(ae,Qs),e(Qs,yd),e(z,zd),e(z,ht),P(co,ht,null),e(ht,Ed),e(ht,po),e(po,jd),e(po,ea),e(ea,Md),e(po,qd),e(z,Fd),e(z,ce),P(ho,ce,null),e(ce,Ad),e(ce,Jn),e(Jn,Dd),e(Jn,Kn),e(Kn,Vd),e(ce,Nd),e(ce,ta),e(ta,Od),e(z,Sd),e(z,oa),m(t,Er,h),m(t,De,h),e(De,mt),e(mt,na),P(mo,na,null),e(De,Wd),e(De,sa),e(sa,Bd),m(t,jr,h),m(t,q,h),P(fo,q,null),e(q,Rd),e(q,uo),e(uo,Hd),e(uo,aa),e(aa,Ud),e(uo,Gd),e(q,Jd),e(q,ra),e(ra,Kd),e(q,Xd),P(go,q,null),e(q,Yd),e(q,_o),e(_o,Zd),e(_o,ia),e(ia,Qd),e(_o,ec),e(q,tc),P(ft,q,null),e(q,oc),e(q,vo),e(vo,nc),e(vo,Xn),e(Xn,sc),e(vo,ac),m(t,Mr,h),m(t,Ve,h),e(Ve,ut),e(ut,la),P(Po,la,null),e(Ve,rc),e(Ve,da),e(da,ic),m(t,qr,h),m(t,B,h),P(Io,B,null),e(B,lc),e(B,ca),e(ca,dc),e(B,cc),e(B,Co),e(Co,pc),e(Co,pa),e(pa,hc),e(Co,mc),e(B,fc),e(B,gt),P(bo,gt,null),e(gt,uc),e(gt,xo),e(xo,gc),e(xo,ha),e(ha,_c),e(xo,vc),e(B,Pc),e(B,_t),P(Lo,_t,null),e(_t,Ic),e(_t,Ne),e(Ne,Cc),e(Ne,ma),e(ma,bc),e(Ne,xc),e(Ne,fa),e(fa,Lc),e(Ne,$c),m(t,Fr,h),m(t,Oe,h),e(Oe,vt),e(vt,ua),P($o,ua,null),e(Oe,kc),e(Oe,ga),e(ga,wc),m(t,Ar,h),m(t,F,h),P(ko,F,null),e(F,Tc),e(F,_a),e(_a,yc),e(F,zc),e(F,H),e(H,Yn),e(Yn,Ec),e(H,jc),e(H,Zn),e(Zn,Mc),e(H,qc),e(H,Qn),e(Qn,Fc),e(H,Ac),e(H,va),e(va,Dc),e(H,Vc),e(H,es),e(es,Nc),e(H,Oc),e(F,Sc),e(F,Pt),P(wo,Pt,null),e(Pt,Wc),e(Pt,To),e(To,Bc),e(To,ts),e(ts,Rc),e(To,Hc),e(F,Uc),e(F,It),P(yo,It,null),e(It,Gc),e(It,zo),e(zo,Jc),e(zo,os),e(os,Kc),e(zo,Xc),e(F,Yc),e(F,pe),P(Eo,pe,null),e(pe,Zc),e(pe,jo),e(jo,Qc),e(jo,ns),e(ns,ep),e(jo,tp),e(pe,op),P(Ct,pe,null),e(F,np),e(F,he),P(Mo,he,null),e(he,sp),e(he,Se),e(Se,ap),e(Se,Pa),e(Pa,rp),e(Se,ip),e(Se,ss),e(ss,lp),e(Se,dp),e(he,cp),P(bt,he,null),m(t,Dr,h),m(t,We,h),e(We,xt),e(xt,Ia),P(qo,Ia,null),e(We,pp),e(We,Ca),e(Ca,hp),m(t,Vr,h),m(t,R,h),P(Fo,R,null),e(R,mp),e(R,Ao),e(Ao,fp),e(Ao,Do),e(Do,up),e(Ao,gp),e(R,_p),e(R,U),P(Vo,U,null),e(U,vp),e(U,Be),e(Be,Pp),e(Be,as),e(as,Ip),e(Be,Cp),e(Be,ba),e(ba,bp),e(Be,xp),e(U,Lp),P(Lt,U,null),e(U,$p),e(U,xa),e(xa,kp),e(U,wp),P(No,U,null),e(R,Tp),e(R,G),P(Oo,G,null),e(G,yp),e(G,Re),e(Re,zp),e(Re,rs),e(rs,Ep),e(Re,jp),e(Re,La),e(La,Mp),e(Re,qp),e(G,Fp),P($t,G,null),e(G,Ap),e(G,$a),e($a,Dp),e(G,Vp),P(So,G,null),e(R,Np),e(R,J),P(Wo,J,null),e(J,Op),e(J,He),e(He,Sp),e(He,is),e(is,Wp),e(He,Bp),e(He,ka),e(ka,Rp),e(He,Hp),e(J,Up),P(kt,J,null),e(J,Gp),e(J,wa),e(wa,Jp),e(J,Kp),P(Bo,J,null),m(t,Nr,h),m(t,Ue,h),e(Ue,wt),e(wt,Ta),P(Ro,Ta,null),e(Ue,Xp),e(Ue,ya),e(ya,Yp),m(t,Or,h),m(t,Ho,h),e(Ho,K),P(Uo,K,null),e(K,Zp),e(K,Ge),e(Ge,Qp),e(Ge,ls),e(ls,eh),e(Ge,th),e(Ge,za),e(za,oh),e(Ge,nh),e(K,sh),P(Tt,K,null),e(K,ah),e(K,Ea),e(Ea,rh),e(K,ih),P(Go,K,null),m(t,Sr,h),m(t,Je,h),e(Je,yt),e(yt,ja),P(Jo,ja,null),e(Je,lh),e(Je,Ma),e(Ma,dh),m(t,Wr,h),m(t,Ko,h),e(Ko,X),P(Xo,X,null),e(X,ch),e(X,Ke),e(Ke,ph),e(Ke,ds),e(ds,hh),e(Ke,mh),e(Ke,qa),e(qa,fh),e(Ke,uh),e(X,gh),P(zt,X,null),e(X,_h),e(X,Fa),e(Fa,vh),e(X,Ph),P(Yo,X,null),m(t,Br,h),m(t,Xe,h),e(Xe,Et),e(Et,Aa),P(Zo,Aa,null),e(Xe,Ih),e(Xe,Da),e(Da,Ch),m(t,Rr,h),m(t,j,h),P(Qo,j,null),e(j,bh),e(j,en),e(en,xh),e(en,cs),e(cs,Lh),e(en,$h),e(j,kh),e(j,tn),e(tn,wh),e(tn,on),e(on,Th),e(tn,yh),e(j,zh),e(j,Va),e(Va,Eh),e(j,jh),e(j,ie),e(ie,Na),e(Na,nn),e(nn,Mh),e(ie,qh),e(ie,Oa),e(Oa,sn),e(sn,Fh),e(ie,Ah),e(ie,Sa),e(Sa,an),e(an,Dh),e(ie,Vh),e(ie,Wa),e(Wa,rn),e(rn,Nh),e(j,Oh),e(j,Y),P(ln,Y,null),e(Y,Sh),e(Y,Ye),e(Ye,Wh),e(Ye,Ba),e(Ba,Bh),e(Ye,Rh),e(Ye,Ra),e(Ra,Hh),e(Ye,Uh),e(Y,Gh),P(jt,Y,null),e(Y,Jh),e(Y,Ha),e(Ha,Kh),e(Y,Xh),P(dn,Y,null),e(j,Yh),e(j,me),P(cn,me,null),e(me,Zh),e(me,Ua),e(Ua,Qh),e(me,em),P(pn,me,null),e(j,tm),e(j,fe),P(hn,fe,null),e(fe,om),e(fe,Ga),e(Ga,nm),e(fe,sm),P(mn,fe,null),m(t,Hr,h),m(t,Ze,h),e(Ze,Mt),e(Mt,Ja),P(fn,Ja,null),e(Ze,am),e(Ze,Ka),e(Ka,rm),m(t,Ur,h),m(t,un,h),e(un,Z),P(gn,Z,null),e(Z,im),e(Z,Qe),e(Qe,lm),e(Qe,Xa),e(Xa,dm),e(Qe,cm),e(Qe,Ya),e(Ya,pm),e(Qe,hm),e(Z,mm),P(qt,Z,null),e(Z,fm),e(Z,Za),e(Za,um),e(Z,gm),P(_n,Z,null),m(t,Gr,h),m(t,et,h),e(et,Ft),e(Ft,Qa),P(vn,Qa,null),e(et,_m),e(et,er),e(er,vm),m(t,Jr,h),m(t,Pn,h),e(Pn,Q),P(In,Q,null),e(Q,Pm),e(Q,tt),e(tt,Im),e(tt,tr),e(tr,Cm),e(tt,bm),e(tt,or),e(or,xm),e(tt,Lm),e(Q,$m),P(At,Q,null),e(Q,km),e(Q,nr),e(nr,wm),e(Q,Tm),P(Cn,Q,null),Kr=!0},p(t,[h]){const bn={};h&2&&(bn.$$scope={dirty:h,ctx:t}),pt.$set(bn);const sr={};h&2&&(sr.$$scope={dirty:h,ctx:t}),ft.$set(sr);const ar={};h&2&&(ar.$$scope={dirty:h,ctx:t}),Ct.$set(ar);const rr={};h&2&&(rr.$$scope={dirty:h,ctx:t}),bt.$set(rr);const xn={};h&2&&(xn.$$scope={dirty:h,ctx:t}),Lt.$set(xn);const ir={};h&2&&(ir.$$scope={dirty:h,ctx:t}),$t.$set(ir);const lr={};h&2&&(lr.$$scope={dirty:h,ctx:t}),kt.$set(lr);const dr={};h&2&&(dr.$$scope={dirty:h,ctx:t}),Tt.$set(dr);const Ln={};h&2&&(Ln.$$scope={dirty:h,ctx:t}),zt.$set(Ln);const cr={};h&2&&(cr.$$scope={dirty:h,ctx:t}),jt.$set(cr);const pr={};h&2&&(pr.$$scope={dirty:h,ctx:t}),qt.$set(pr);const hr={};h&2&&(hr.$$scope={dirty:h,ctx:t}),At.$set(hr)},i(t){Kr||(I(u.$$.fragment,t),I(Ot.$$.fragment,t),I(Wt.$$.fragment,t),I(Bt.$$.fragment,t),I(Ut.$$.fragment,t),I(Gt.$$.fragment,t),I(Jt.$$.fragment,t),I(Xt.$$.fragment,t),I(Yt.$$.fragment,t),I(Qt.$$.fragment,t),I(eo.$$.fragment,t),I(to.$$.fragment,t),I(no.$$.fragment,t),I(so.$$.fragment,t),I(ao.$$.fragment,t),I(pt.$$.fragment,t),I(lo.$$.fragment,t),I(co.$$.fragment,t),I(ho.$$.fragment,t),I(mo.$$.fragment,t),I(fo.$$.fragment,t),I(go.$$.fragment,t),I(ft.$$.fragment,t),I(Po.$$.fragment,t),I(Io.$$.fragment,t),I(bo.$$.fragment,t),I(Lo.$$.fragment,t),I($o.$$.fragment,t),I(ko.$$.fragment,t),I(wo.$$.fragment,t),I(yo.$$.fragment,t),I(Eo.$$.fragment,t),I(Ct.$$.fragment,t),I(Mo.$$.fragment,t),I(bt.$$.fragment,t),I(qo.$$.fragment,t),I(Fo.$$.fragment,t),I(Vo.$$.fragment,t),I(Lt.$$.fragment,t),I(No.$$.fragment,t),I(Oo.$$.fragment,t),I($t.$$.fragment,t),I(So.$$.fragment,t),I(Wo.$$.fragment,t),I(kt.$$.fragment,t),I(Bo.$$.fragment,t),I(Ro.$$.fragment,t),I(Uo.$$.fragment,t),I(Tt.$$.fragment,t),I(Go.$$.fragment,t),I(Jo.$$.fragment,t),I(Xo.$$.fragment,t),I(zt.$$.fragment,t),I(Yo.$$.fragment,t),I(Zo.$$.fragment,t),I(Qo.$$.fragment,t),I(ln.$$.fragment,t),I(jt.$$.fragment,t),I(dn.$$.fragment,t),I(cn.$$.fragment,t),I(pn.$$.fragment,t),I(hn.$$.fragment,t),I(mn.$$.fragment,t),I(fn.$$.fragment,t),I(gn.$$.fragment,t),I(qt.$$.fragment,t),I(_n.$$.fragment,t),I(vn.$$.fragment,t),I(In.$$.fragment,t),I(At.$$.fragment,t),I(Cn.$$.fragment,t),Kr=!0)},o(t){C(u.$$.fragment,t),C(Ot.$$.fragment,t),C(Wt.$$.fragment,t),C(Bt.$$.fragment,t),C(Ut.$$.fragment,t),C(Gt.$$.fragment,t),C(Jt.$$.fragment,t),C(Xt.$$.fragment,t),C(Yt.$$.fragment,t),C(Qt.$$.fragment,t),C(eo.$$.fragment,t),C(to.$$.fragment,t),C(no.$$.fragment,t),C(so.$$.fragment,t),C(ao.$$.fragment,t),C(pt.$$.fragment,t),C(lo.$$.fragment,t),C(co.$$.fragment,t),C(ho.$$.fragment,t),C(mo.$$.fragment,t),C(fo.$$.fragment,t),C(go.$$.fragment,t),C(ft.$$.fragment,t),C(Po.$$.fragment,t),C(Io.$$.fragment,t),C(bo.$$.fragment,t),C(Lo.$$.fragment,t),C($o.$$.fragment,t),C(ko.$$.fragment,t),C(wo.$$.fragment,t),C(yo.$$.fragment,t),C(Eo.$$.fragment,t),C(Ct.$$.fragment,t),C(Mo.$$.fragment,t),C(bt.$$.fragment,t),C(qo.$$.fragment,t),C(Fo.$$.fragment,t),C(Vo.$$.fragment,t),C(Lt.$$.fragment,t),C(No.$$.fragment,t),C(Oo.$$.fragment,t),C($t.$$.fragment,t),C(So.$$.fragment,t),C(Wo.$$.fragment,t),C(kt.$$.fragment,t),C(Bo.$$.fragment,t),C(Ro.$$.fragment,t),C(Uo.$$.fragment,t),C(Tt.$$.fragment,t),C(Go.$$.fragment,t),C(Jo.$$.fragment,t),C(Xo.$$.fragment,t),C(zt.$$.fragment,t),C(Yo.$$.fragment,t),C(Zo.$$.fragment,t),C(Qo.$$.fragment,t),C(ln.$$.fragment,t),C(jt.$$.fragment,t),C(dn.$$.fragment,t),C(cn.$$.fragment,t),C(pn.$$.fragment,t),C(hn.$$.fragment,t),C(mn.$$.fragment,t),C(fn.$$.fragment,t),C(gn.$$.fragment,t),C(qt.$$.fragment,t),C(_n.$$.fragment,t),C(vn.$$.fragment,t),C(In.$$.fragment,t),C(At.$$.fragment,t),C(Cn.$$.fragment,t),Kr=!1},d(t){o(p),t&&o(L),t&&o(f),b(u),t&&o(w),t&&o(A),b(Ot),t&&o(fr),t&&o(ot),t&&o(ur),t&&o($n),t&&o(gr),t&&o(kn),t&&o(_r),t&&o(we),b(Wt),t&&o(vr),t&&o(wn),t&&o(Pr),t&&o(st),t&&o(Ir),t&&o(D),t&&o(Cr),b(Bt,t),t&&o(br),t&&o(de),t&&o(xr),t&&o(Te),b(Ut),t&&o(Lr),t&&o(se),b(Gt),b(Jt),t&&o($r),t&&o(ze),b(Xt),t&&o(kr),t&&o(S),b(Yt),b(Qt),t&&o(wr),t&&o(Me),b(eo),t&&o(Tr),t&&o(W),b(to),b(no),t&&o(yr),t&&o(Ae),b(so),t&&o(zr),t&&o(z),b(ao),b(pt),b(lo),b(co),b(ho),t&&o(Er),t&&o(De),b(mo),t&&o(jr),t&&o(q),b(fo),b(go),b(ft),t&&o(Mr),t&&o(Ve),b(Po),t&&o(qr),t&&o(B),b(Io),b(bo),b(Lo),t&&o(Fr),t&&o(Oe),b($o),t&&o(Ar),t&&o(F),b(ko),b(wo),b(yo),b(Eo),b(Ct),b(Mo),b(bt),t&&o(Dr),t&&o(We),b(qo),t&&o(Vr),t&&o(R),b(Fo),b(Vo),b(Lt),b(No),b(Oo),b($t),b(So),b(Wo),b(kt),b(Bo),t&&o(Nr),t&&o(Ue),b(Ro),t&&o(Or),t&&o(Ho),b(Uo),b(Tt),b(Go),t&&o(Sr),t&&o(Je),b(Jo),t&&o(Wr),t&&o(Ko),b(Xo),b(zt),b(Yo),t&&o(Br),t&&o(Xe),b(Zo),t&&o(Rr),t&&o(j),b(Qo),b(ln),b(jt),b(dn),b(cn),b(pn),b(hn),b(mn),t&&o(Hr),t&&o(Ze),b(fn),t&&o(Ur),t&&o(un),b(gn),b(qt),b(_n),t&&o(Gr),t&&o(et),b(vn),t&&o(Jr),t&&o(Pn),b(In),b(At),b(Cn)}}}const Tg={local:"clip",sections:[{local:"overview",title:"Overview"},{local:"usage",title:"Usage"},{local:"transformers.CLIPConfig",title:"CLIPConfig"},{local:"transformers.CLIPTextConfig",title:"CLIPTextConfig"},{local:"transformers.CLIPVisionConfig",title:"CLIPVisionConfig"},{local:"transformers.CLIPTokenizer",title:"CLIPTokenizer"},{local:"transformers.CLIPTokenizerFast",title:"CLIPTokenizerFast"},{local:"transformers.CLIPFeatureExtractor",title:"CLIPFeatureExtractor"},{local:"transformers.CLIPProcessor",title:"CLIPProcessor"},{local:"transformers.CLIPModel",title:"CLIPModel"},{local:"transformers.CLIPTextModel",title:"CLIPTextModel"},{local:"transformers.CLIPVisionModel",title:"CLIPVisionModel"},{local:"transformers.FlaxCLIPModel",title:"FlaxCLIPModel"},{local:"transformers.FlaxCLIPTextModel",title:"FlaxCLIPTextModel"},{local:"transformers.FlaxCLIPVisionModel",title:"FlaxCLIPVisionModel"}],title:"CLIP"};function yg(y,p,L){let{fw:f}=p;return y.$$set=x=>{"fw"in x&&L(0,f=x.fw)},[f]}class Ag extends pg{constructor(p){super();hg(this,p,yg,wg,mg,{fw:0})}}export{Ag as default,Tg as metadata};
9,965
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bartpho.mdx-9423c3a1.js
import{S as Uo,i as Go,s as Ho,e as s,k as p,w as v,t as r,L as Ko,c as n,d as o,m as c,a,x as T,h as i,b as d,J as t,g as h,y as w,K as Jo,q as y,o as q,B}from"../../chunks/vendor-b1433968.js";import{D as Pe}from"../../chunks/Docstring-ff504c58.js";import{C as Wo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ho}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Qo(nt){let _,ee,f,g,ge,S,at,_e,rt,je,z,x,ke,M,it,be,lt,Le,R,pt,I,ct,dt,De,te,ht,Se,oe,ve,mt,Me,se,ut,Ie,N,Ne,ne,ft,Ce,ae,C,gt,re,_t,kt,Oe,O,Ve,ie,Te,bt,Fe,k,vt,V,Tt,wt,F,yt,qt,Xe,A,P,we,X,Bt,ye,zt,We,m,W,At,$,$t,le,Et,xt,U,Rt,Pt,jt,G,Lt,pe,Dt,St,Mt,E,It,qe,Nt,Ct,Be,Ot,Vt,Ft,b,H,Xt,ze,Wt,Ut,K,ce,Gt,Ae,Ht,Kt,de,Jt,$e,Qt,Yt,j,J,Zt,Ee,eo,to,L,Q,oo,xe,so,no,D,Y,ao,Z,ro,Re,io,lo,Ue;return S=new ho({}),M=new ho({}),N=new Wo({props:{code:`import torch from transformers import AutoModel, AutoTokenizer bartpho = AutoModel.from_pretrained("vinai/bartpho-syllable") tokenizer = AutoTokenizer.from_pretrained("vinai/bartpho-syllable") line = "Ch\xFAng t\xF4i l\xE0 nh\u1EEFng nghi\xEAn c\u1EE9u vi\xEAn." input_ids = tokenizer(line, return_tensors="pt") with torch.no_grad(): features = bartpho(**input_ids) # Models outputs are now tuples # With TensorFlow 2.0+: from transformers import TFAutoModel bartpho = TFAutoModel.from_pretrained("vinai/bartpho-syllable") input_ids = tokenizer(line, return_tensors="tf") features = bartpho(**input_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>bartpho = AutoModel.from_pretrained(<span class="hljs-string">&quot;vinai/bartpho-syllable&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;vinai/bartpho-syllable&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>line = <span class="hljs-string">&quot;Ch\xFAng t\xF4i l\xE0 nh\u1EEFng nghi\xEAn c\u1EE9u vi\xEAn.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(line, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> features = bartpho(**input_ids) <span class="hljs-comment"># Models outputs are now tuples</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># With TensorFlow 2.0+:</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>bartpho = TFAutoModel.from_pretrained(<span class="hljs-string">&quot;vinai/bartpho-syllable&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(line, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>features = bartpho(**input_ids)`}}),O=new Wo({props:{code:`from transformers import MBartForConditionalGeneration bartpho = MBartForConditionalGeneration.from_pretrained("vinai/bartpho-syllable") TXT = 'Ch\xFAng t\xF4i l\xE0 <mask> nghi\xEAn c\u1EE9u vi\xEAn.' input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] logits = bartpho(input_ids).logits masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5) print(tokenizer.decode(predictions).split()),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>bartpho = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;vinai/bartpho-syllable&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&#x27;Ch\xFAng t\xF4i l\xE0 &lt;mask&gt; nghi\xEAn c\u1EE9u vi\xEAn.&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer([TXT], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>logits = bartpho(input_ids).logits <span class="hljs-meta">&gt;&gt;&gt; </span>masked_index = (input_ids[<span class="hljs-number">0</span>] == tokenizer.mask_token_id).nonzero().item() <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits[<span class="hljs-number">0</span>, masked_index].softmax(dim=<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>values, predictions = probs.topk(<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(predictions).split())`}}),X=new ho({}),W=new Pe({props:{name:"class transformers.BartphoTokenizer",anchor:"transformers.BartphoTokenizer",parameters:[{name:"vocab_file",val:""},{name:"monolingual_vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bartpho/tokenization_bartpho.py#L46",parametersDescription:[{anchor:"transformers.BartphoTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types.`,name:"vocab_file"},{anchor:"transformers.BartphoTokenizer.monolingual_vocab_file",description:`<strong>monolingual_vocab_file</strong> (<code>str</code>) &#x2014; Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized types extracted from the multilingual vocabulary vocab_file of 250K types.`,name:"monolingual_vocab_file"},{anchor:"transformers.BartphoTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.BartphoTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.BartphoTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BartphoTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BartphoTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BartphoTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BartphoTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.BartphoTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.BartphoTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),H=new Pe({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BartphoTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bartpho/tokenization_bartpho.py#L183",parametersDescription:[{anchor:"transformers.BartphoTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BartphoTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),J=new Pe({props:{name:"convert_tokens_to_string",anchor:"transformers.BartphoTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bartpho/tokenization_bartpho.py#L285"}}),Q=new Pe({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BartphoTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bartpho/tokenization_bartpho.py#L237",parametersDescription:[{anchor:"transformers.BartphoTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BartphoTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Y=new Pe({props:{name:"get_special_tokens_mask",anchor:"transformers.BartphoTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bartpho/tokenization_bartpho.py#L209",parametersDescription:[{anchor:"transformers.BartphoTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BartphoTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BartphoTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){_=s("meta"),ee=p(),f=s("h1"),g=s("a"),ge=s("span"),v(S.$$.fragment),at=p(),_e=s("span"),rt=r("BARTpho"),je=p(),z=s("h2"),x=s("a"),ke=s("span"),v(M.$$.fragment),it=p(),be=s("span"),lt=r("Overview"),Le=p(),R=s("p"),pt=r("The BARTpho model was proposed in "),I=s("a"),ct=r("BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese"),dt=r(" by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen."),De=p(),te=s("p"),ht=r("The abstract from the paper is the following:"),Se=p(),oe=s("p"),ve=s("em"),mt=r(`We present BARTpho with two versions \u2014 BARTpho_word and BARTpho_syllable \u2014 the first public large-scale monolingual sequence-to-sequence models pre-trained for Vietnamese. Our BARTpho uses the \u201Clarge\u201D architecture and pre-training scheme of the sequence-to-sequence denoising model BART, thus especially suitable for generative NLP tasks. Experiments on a downstream task of Vietnamese text summarization show that in both automatic and human evaluations, our BARTpho outperforms the strong baseline mBART and improves the state-of-the-art. We release BARTpho to facilitate future research and applications of generative Vietnamese NLP tasks.`),Me=p(),se=s("p"),ut=r("Example of use:"),Ie=p(),v(N.$$.fragment),Ne=p(),ne=s("p"),ft=r("Tips:"),Ce=p(),ae=s("ul"),C=s("li"),gt=r(`Following mBART, BARTpho uses the \u201Clarge\u201D architecture of BART with an additional layer-normalization layer on top of both the encoder and decoder. Thus, usage examples in the `),re=s("a"),_t=r("documentation of BART"),kt=r(`, when adapting to use with BARTpho, should be adjusted by replacing the BART-specialized classes with the mBART-specialized counterparts. For example:`),Oe=p(),v(O.$$.fragment),Ve=p(),ie=s("ul"),Te=s("li"),bt=r(`This implementation is only for tokenization: \u201Cmonolingual_vocab_file\u201D consists of Vietnamese-specialized types extracted from the pre-trained SentencePiece model \u201Cvocab_file\u201D that is available from the multilingual XLM-RoBERTa. Other languages, if employing this pre-trained multilingual SentencePiece model \u201Cvocab_file\u201D for subword segmentation, can reuse BartphoTokenizer with their own language-specialized \u201Cmonolingual_vocab_file\u201D.`),Fe=p(),k=s("p"),vt=r("This model was contributed by "),V=s("a"),Tt=r("dqnguyen"),wt=r(". The original code can be found "),F=s("a"),yt=r("here"),qt=r("."),Xe=p(),A=s("h2"),P=s("a"),we=s("span"),v(X.$$.fragment),Bt=p(),ye=s("span"),zt=r("BartphoTokenizer"),We=p(),m=s("div"),v(W.$$.fragment),At=p(),$=s("p"),$t=r("Adapted from "),le=s("a"),Et=r("XLMRobertaTokenizer"),xt=r(". Based on "),U=s("a"),Rt=r("SentencePiece"),Pt=r("."),jt=p(),G=s("p"),Lt=r("This tokenizer inherits from "),pe=s("a"),Dt=r("PreTrainedTokenizer"),St=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Mt=p(),E=s("p"),It=r(`Attributes: sp_model (`),qe=s("code"),Nt=r("SentencePieceProcessor"),Ct=r(`): The `),Be=s("em"),Ot=r("SentencePiece"),Vt=r(" processor that is used for every conversion (string, tokens and IDs)."),Ft=p(),b=s("div"),v(H.$$.fragment),Xt=p(),ze=s("p"),Wt=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BARTPho sequence has the following format:`),Ut=p(),K=s("ul"),ce=s("li"),Gt=r("single sequence: "),Ae=s("code"),Ht=r("<s> X </s>"),Kt=p(),de=s("li"),Jt=r("pair of sequences: "),$e=s("code"),Qt=r("<s> A </s></s> B </s>"),Yt=p(),j=s("div"),v(J.$$.fragment),Zt=p(),Ee=s("p"),eo=r("Converts a sequence of tokens (strings for sub-words) in a single string."),to=p(),L=s("div"),v(Q.$$.fragment),oo=p(),xe=s("p"),so=r(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not make use of token type ids, therefore a list of zeros is returned.`),no=p(),D=s("div"),v(Y.$$.fragment),ao=p(),Z=s("p"),ro=r(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Re=s("code"),io=r("prepare_for_model"),lo=r(" method."),this.h()},l(e){const l=Ko('[data-svelte="svelte-1phssyn"]',document.head);_=n(l,"META",{name:!0,content:!0}),l.forEach(o),ee=c(e),f=n(e,"H1",{class:!0});var Ge=a(f);g=n(Ge,"A",{id:!0,class:!0,href:!0});var mo=a(g);ge=n(mo,"SPAN",{});var uo=a(ge);T(S.$$.fragment,uo),uo.forEach(o),mo.forEach(o),at=c(Ge),_e=n(Ge,"SPAN",{});var fo=a(_e);rt=i(fo,"BARTpho"),fo.forEach(o),Ge.forEach(o),je=c(e),z=n(e,"H2",{class:!0});var He=a(z);x=n(He,"A",{id:!0,class:!0,href:!0});var go=a(x);ke=n(go,"SPAN",{});var _o=a(ke);T(M.$$.fragment,_o),_o.forEach(o),go.forEach(o),it=c(He),be=n(He,"SPAN",{});var ko=a(be);lt=i(ko,"Overview"),ko.forEach(o),He.forEach(o),Le=c(e),R=n(e,"P",{});var Ke=a(R);pt=i(Ke,"The BARTpho model was proposed in "),I=n(Ke,"A",{href:!0,rel:!0});var bo=a(I);ct=i(bo,"BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese"),bo.forEach(o),dt=i(Ke," by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen."),Ke.forEach(o),De=c(e),te=n(e,"P",{});var vo=a(te);ht=i(vo,"The abstract from the paper is the following:"),vo.forEach(o),Se=c(e),oe=n(e,"P",{});var To=a(oe);ve=n(To,"EM",{});var wo=a(ve);mt=i(wo,`We present BARTpho with two versions \u2014 BARTpho_word and BARTpho_syllable \u2014 the first public large-scale monolingual sequence-to-sequence models pre-trained for Vietnamese. Our BARTpho uses the \u201Clarge\u201D architecture and pre-training scheme of the sequence-to-sequence denoising model BART, thus especially suitable for generative NLP tasks. Experiments on a downstream task of Vietnamese text summarization show that in both automatic and human evaluations, our BARTpho outperforms the strong baseline mBART and improves the state-of-the-art. We release BARTpho to facilitate future research and applications of generative Vietnamese NLP tasks.`),wo.forEach(o),To.forEach(o),Me=c(e),se=n(e,"P",{});var yo=a(se);ut=i(yo,"Example of use:"),yo.forEach(o),Ie=c(e),T(N.$$.fragment,e),Ne=c(e),ne=n(e,"P",{});var qo=a(ne);ft=i(qo,"Tips:"),qo.forEach(o),Ce=c(e),ae=n(e,"UL",{});var Bo=a(ae);C=n(Bo,"LI",{});var Je=a(C);gt=i(Je,`Following mBART, BARTpho uses the \u201Clarge\u201D architecture of BART with an additional layer-normalization layer on top of both the encoder and decoder. Thus, usage examples in the `),re=n(Je,"A",{href:!0});var zo=a(re);_t=i(zo,"documentation of BART"),zo.forEach(o),kt=i(Je,`, when adapting to use with BARTpho, should be adjusted by replacing the BART-specialized classes with the mBART-specialized counterparts. For example:`),Je.forEach(o),Bo.forEach(o),Oe=c(e),T(O.$$.fragment,e),Ve=c(e),ie=n(e,"UL",{});var Ao=a(ie);Te=n(Ao,"LI",{});var $o=a(Te);bt=i($o,`This implementation is only for tokenization: \u201Cmonolingual_vocab_file\u201D consists of Vietnamese-specialized types extracted from the pre-trained SentencePiece model \u201Cvocab_file\u201D that is available from the multilingual XLM-RoBERTa. Other languages, if employing this pre-trained multilingual SentencePiece model \u201Cvocab_file\u201D for subword segmentation, can reuse BartphoTokenizer with their own language-specialized \u201Cmonolingual_vocab_file\u201D.`),$o.forEach(o),Ao.forEach(o),Fe=c(e),k=n(e,"P",{});var he=a(k);vt=i(he,"This model was contributed by "),V=n(he,"A",{href:!0,rel:!0});var Eo=a(V);Tt=i(Eo,"dqnguyen"),Eo.forEach(o),wt=i(he,". The original code can be found "),F=n(he,"A",{href:!0,rel:!0});var xo=a(F);yt=i(xo,"here"),xo.forEach(o),qt=i(he,"."),he.forEach(o),Xe=c(e),A=n(e,"H2",{class:!0});var Qe=a(A);P=n(Qe,"A",{id:!0,class:!0,href:!0});var Ro=a(P);we=n(Ro,"SPAN",{});var Po=a(we);T(X.$$.fragment,Po),Po.forEach(o),Ro.forEach(o),Bt=c(Qe),ye=n(Qe,"SPAN",{});var jo=a(ye);zt=i(jo,"BartphoTokenizer"),jo.forEach(o),Qe.forEach(o),We=c(e),m=n(e,"DIV",{class:!0});var u=a(m);T(W.$$.fragment,u),At=c(u),$=n(u,"P",{});var me=a($);$t=i(me,"Adapted from "),le=n(me,"A",{href:!0});var Lo=a(le);Et=i(Lo,"XLMRobertaTokenizer"),Lo.forEach(o),xt=i(me,". Based on "),U=n(me,"A",{href:!0,rel:!0});var Do=a(U);Rt=i(Do,"SentencePiece"),Do.forEach(o),Pt=i(me,"."),me.forEach(o),jt=c(u),G=n(u,"P",{});var Ye=a(G);Lt=i(Ye,"This tokenizer inherits from "),pe=n(Ye,"A",{href:!0});var So=a(pe);Dt=i(So,"PreTrainedTokenizer"),So.forEach(o),St=i(Ye,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ye.forEach(o),Mt=c(u),E=n(u,"P",{});var ue=a(E);It=i(ue,`Attributes: sp_model (`),qe=n(ue,"CODE",{});var Mo=a(qe);Nt=i(Mo,"SentencePieceProcessor"),Mo.forEach(o),Ct=i(ue,`): The `),Be=n(ue,"EM",{});var Io=a(Be);Ot=i(Io,"SentencePiece"),Io.forEach(o),Vt=i(ue," processor that is used for every conversion (string, tokens and IDs)."),ue.forEach(o),Ft=c(u),b=n(u,"DIV",{class:!0});var fe=a(b);T(H.$$.fragment,fe),Xt=c(fe),ze=n(fe,"P",{});var No=a(ze);Wt=i(No,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BARTPho sequence has the following format:`),No.forEach(o),Ut=c(fe),K=n(fe,"UL",{});var Ze=a(K);ce=n(Ze,"LI",{});var po=a(ce);Gt=i(po,"single sequence: "),Ae=n(po,"CODE",{});var Co=a(Ae);Ht=i(Co,"<s> X </s>"),Co.forEach(o),po.forEach(o),Kt=c(Ze),de=n(Ze,"LI",{});var co=a(de);Jt=i(co,"pair of sequences: "),$e=n(co,"CODE",{});var Oo=a($e);Qt=i(Oo,"<s> A </s></s> B </s>"),Oo.forEach(o),co.forEach(o),Ze.forEach(o),fe.forEach(o),Yt=c(u),j=n(u,"DIV",{class:!0});var et=a(j);T(J.$$.fragment,et),Zt=c(et),Ee=n(et,"P",{});var Vo=a(Ee);eo=i(Vo,"Converts a sequence of tokens (strings for sub-words) in a single string."),Vo.forEach(o),et.forEach(o),to=c(u),L=n(u,"DIV",{class:!0});var tt=a(L);T(Q.$$.fragment,tt),oo=c(tt),xe=n(tt,"P",{});var Fo=a(xe);so=i(Fo,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not make use of token type ids, therefore a list of zeros is returned.`),Fo.forEach(o),tt.forEach(o),no=c(u),D=n(u,"DIV",{class:!0});var ot=a(D);T(Y.$$.fragment,ot),ao=c(ot),Z=n(ot,"P",{});var st=a(Z);ro=i(st,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Re=n(st,"CODE",{});var Xo=a(Re);io=i(Xo,"prepare_for_model"),Xo.forEach(o),lo=i(st," method."),st.forEach(o),ot.forEach(o),u.forEach(o),this.h()},h(){d(_,"name","hf:doc:metadata"),d(_,"content",JSON.stringify(Yo)),d(g,"id","bartpho"),d(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(g,"href","#bartpho"),d(f,"class","relative group"),d(x,"id","overview"),d(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(x,"href","#overview"),d(z,"class","relative group"),d(I,"href","https://arxiv.org/abs/2109.09701"),d(I,"rel","nofollow"),d(re,"href","bart"),d(V,"href","https://huggingface.co/dqnguyen"),d(V,"rel","nofollow"),d(F,"href","https://github.com/VinAIResearch/BARTpho"),d(F,"rel","nofollow"),d(P,"id","transformers.BartphoTokenizer"),d(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(P,"href","#transformers.BartphoTokenizer"),d(A,"class","relative group"),d(le,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizer"),d(U,"href","https://github.com/google/sentencepiece"),d(U,"rel","nofollow"),d(pe,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(b,"class","docstring"),d(j,"class","docstring"),d(L,"class","docstring"),d(D,"class","docstring"),d(m,"class","docstring")},m(e,l){t(document.head,_),h(e,ee,l),h(e,f,l),t(f,g),t(g,ge),w(S,ge,null),t(f,at),t(f,_e),t(_e,rt),h(e,je,l),h(e,z,l),t(z,x),t(x,ke),w(M,ke,null),t(z,it),t(z,be),t(be,lt),h(e,Le,l),h(e,R,l),t(R,pt),t(R,I),t(I,ct),t(R,dt),h(e,De,l),h(e,te,l),t(te,ht),h(e,Se,l),h(e,oe,l),t(oe,ve),t(ve,mt),h(e,Me,l),h(e,se,l),t(se,ut),h(e,Ie,l),w(N,e,l),h(e,Ne,l),h(e,ne,l),t(ne,ft),h(e,Ce,l),h(e,ae,l),t(ae,C),t(C,gt),t(C,re),t(re,_t),t(C,kt),h(e,Oe,l),w(O,e,l),h(e,Ve,l),h(e,ie,l),t(ie,Te),t(Te,bt),h(e,Fe,l),h(e,k,l),t(k,vt),t(k,V),t(V,Tt),t(k,wt),t(k,F),t(F,yt),t(k,qt),h(e,Xe,l),h(e,A,l),t(A,P),t(P,we),w(X,we,null),t(A,Bt),t(A,ye),t(ye,zt),h(e,We,l),h(e,m,l),w(W,m,null),t(m,At),t(m,$),t($,$t),t($,le),t(le,Et),t($,xt),t($,U),t(U,Rt),t($,Pt),t(m,jt),t(m,G),t(G,Lt),t(G,pe),t(pe,Dt),t(G,St),t(m,Mt),t(m,E),t(E,It),t(E,qe),t(qe,Nt),t(E,Ct),t(E,Be),t(Be,Ot),t(E,Vt),t(m,Ft),t(m,b),w(H,b,null),t(b,Xt),t(b,ze),t(ze,Wt),t(b,Ut),t(b,K),t(K,ce),t(ce,Gt),t(ce,Ae),t(Ae,Ht),t(K,Kt),t(K,de),t(de,Jt),t(de,$e),t($e,Qt),t(m,Yt),t(m,j),w(J,j,null),t(j,Zt),t(j,Ee),t(Ee,eo),t(m,to),t(m,L),w(Q,L,null),t(L,oo),t(L,xe),t(xe,so),t(m,no),t(m,D),w(Y,D,null),t(D,ao),t(D,Z),t(Z,ro),t(Z,Re),t(Re,io),t(Z,lo),Ue=!0},p:Jo,i(e){Ue||(y(S.$$.fragment,e),y(M.$$.fragment,e),y(N.$$.fragment,e),y(O.$$.fragment,e),y(X.$$.fragment,e),y(W.$$.fragment,e),y(H.$$.fragment,e),y(J.$$.fragment,e),y(Q.$$.fragment,e),y(Y.$$.fragment,e),Ue=!0)},o(e){q(S.$$.fragment,e),q(M.$$.fragment,e),q(N.$$.fragment,e),q(O.$$.fragment,e),q(X.$$.fragment,e),q(W.$$.fragment,e),q(H.$$.fragment,e),q(J.$$.fragment,e),q(Q.$$.fragment,e),q(Y.$$.fragment,e),Ue=!1},d(e){o(_),e&&o(ee),e&&o(f),B(S),e&&o(je),e&&o(z),B(M),e&&o(Le),e&&o(R),e&&o(De),e&&o(te),e&&o(Se),e&&o(oe),e&&o(Me),e&&o(se),e&&o(Ie),B(N,e),e&&o(Ne),e&&o(ne),e&&o(Ce),e&&o(ae),e&&o(Oe),B(O,e),e&&o(Ve),e&&o(ie),e&&o(Fe),e&&o(k),e&&o(Xe),e&&o(A),B(X),e&&o(We),e&&o(m),B(W),B(H),B(J),B(Q),B(Y)}}}const Yo={local:"bartpho",sections:[{local:"overview",title:"Overview"},{local:"transformers.BartphoTokenizer",title:"BartphoTokenizer"}],title:"BARTpho"};function Zo(nt,_,ee){let{fw:f}=_;return nt.$$set=g=>{"fw"in g&&ee(0,f=g.fw)},[f]}class as extends Uo{constructor(_){super();Go(this,_,Zo,Qo,Ho,{fw:0})}}export{as as default,Yo as metadata};
9,966
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/layoutlm.mdx-458a185b.js
import{S as Hh,i as Rh,s as Kh,e as a,k as l,w as L,t as n,L as Vh,c as r,d as t,m as d,a as i,x as v,h as s,b as c,J as e,g as u,y as w,q as T,o as M,B as x}from"../../chunks/vendor-b1433968.js";import{T as Qe}from"../../chunks/Tip-c3840994.js";import{D as _e}from"../../chunks/Docstring-ff504c58.js";import{C as tt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Ve}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Xh(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function Yh(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function Zh(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function Gh(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function Jh(B){let h,$,f,g,b,y,_,j,re,Y,z,ae,I,Z,ne,A,ie,se,J,D,G,Q,F,E,le,W,V,ee,H,te,de,q,ce,O,pe,oe,R,ue,he,P,me,U,fe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=a("ul"),b=a("li"),y=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),j=a("li"),re=n("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),z=a("p"),ae=n("This second option is useful when using "),I=a("code"),Z=n("tf.keras.Model.fit"),ne=n(` method which currently requires having all the tensors in the first argument of the model call function: `),A=a("code"),ie=n("model(inputs)"),se=n("."),J=l(),D=a("p"),G=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Q=l(),F=a("ul"),E=a("li"),le=n("a single Tensor with "),W=a("code"),V=n("input_ids"),ee=n(" only and nothing else: "),H=a("code"),te=n("model(inputs_ids)"),de=l(),q=a("li"),ce=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),pe=n("model([input_ids, attention_mask])"),oe=n(" or "),R=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),he=l(),P=a("li"),me=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),fe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var k=i(h);$=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(p),g=r(p,"UL",{});var X=i(g);b=r(X,"LI",{});var je=i(b);y=s(je,"having all inputs as keyword arguments (like PyTorch models), or"),je.forEach(t),_=d(X),j=r(X,"LI",{});var Me=i(j);re=s(Me,"having all inputs as a list, tuple or dict in the first positional arguments."),Me.forEach(t),X.forEach(t),Y=d(p),z=r(p,"P",{});var N=i(z);ae=s(N,"This second option is useful when using "),I=r(N,"CODE",{});var be=i(I);Z=s(be,"tf.keras.Model.fit"),be.forEach(t),ne=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),A=r(N,"CODE",{});var Fe=i(A);ie=s(Fe,"model(inputs)"),Fe.forEach(t),se=s(N,"."),N.forEach(t),J=d(p),D=r(p,"P",{});var xe=i(D);G=s(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),Q=d(p),F=r(p,"UL",{});var S=i(F);E=r(S,"LI",{});var K=i(E);le=s(K,"a single Tensor with "),W=r(K,"CODE",{});var $e=i(W);V=s($e,"input_ids"),$e.forEach(t),ee=s(K," only and nothing else: "),H=r(K,"CODE",{});var ye=i(H);te=s(ye,"model(inputs_ids)"),ye.forEach(t),K.forEach(t),de=d(S),q=r(S,"LI",{});var C=i(q);ce=s(C,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(C,"CODE",{});var ze=i(O);pe=s(ze,"model([input_ids, attention_mask])"),ze.forEach(t),oe=s(C," or "),R=r(C,"CODE",{});var ke=i(R);ue=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),C.forEach(t),he=d(S),P=r(S,"LI",{});var ge=i(P);me=s(ge,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(ge,"CODE",{});var Ee=i(U);fe=s(Ee,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ee.forEach(t),ge.forEach(t),S.forEach(t)},m(p,k){u(p,h,k),e(h,$),u(p,f,k),u(p,g,k),e(g,b),e(b,y),e(g,_),e(g,j),e(j,re),u(p,Y,k),u(p,z,k),e(z,ae),e(z,I),e(I,Z),e(z,ne),e(z,A),e(A,ie),e(z,se),u(p,J,k),u(p,D,k),e(D,G),u(p,Q,k),u(p,F,k),e(F,E),e(E,le),e(E,W),e(W,V),e(E,ee),e(E,H),e(H,te),e(F,de),e(F,q),e(q,ce),e(q,O),e(O,pe),e(q,oe),e(q,R),e(R,ue),e(F,he),e(F,P),e(P,me),e(P,U),e(U,fe)},d(p){p&&t(h),p&&t(f),p&&t(g),p&&t(Y),p&&t(z),p&&t(J),p&&t(D),p&&t(Q),p&&t(F)}}}function Qh(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function em(B){let h,$,f,g,b,y,_,j,re,Y,z,ae,I,Z,ne,A,ie,se,J,D,G,Q,F,E,le,W,V,ee,H,te,de,q,ce,O,pe,oe,R,ue,he,P,me,U,fe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=a("ul"),b=a("li"),y=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),j=a("li"),re=n("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),z=a("p"),ae=n("This second option is useful when using "),I=a("code"),Z=n("tf.keras.Model.fit"),ne=n(` method which currently requires having all the tensors in the first argument of the model call function: `),A=a("code"),ie=n("model(inputs)"),se=n("."),J=l(),D=a("p"),G=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Q=l(),F=a("ul"),E=a("li"),le=n("a single Tensor with "),W=a("code"),V=n("input_ids"),ee=n(" only and nothing else: "),H=a("code"),te=n("model(inputs_ids)"),de=l(),q=a("li"),ce=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),pe=n("model([input_ids, attention_mask])"),oe=n(" or "),R=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),he=l(),P=a("li"),me=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),fe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var k=i(h);$=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(p),g=r(p,"UL",{});var X=i(g);b=r(X,"LI",{});var je=i(b);y=s(je,"having all inputs as keyword arguments (like PyTorch models), or"),je.forEach(t),_=d(X),j=r(X,"LI",{});var Me=i(j);re=s(Me,"having all inputs as a list, tuple or dict in the first positional arguments."),Me.forEach(t),X.forEach(t),Y=d(p),z=r(p,"P",{});var N=i(z);ae=s(N,"This second option is useful when using "),I=r(N,"CODE",{});var be=i(I);Z=s(be,"tf.keras.Model.fit"),be.forEach(t),ne=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),A=r(N,"CODE",{});var Fe=i(A);ie=s(Fe,"model(inputs)"),Fe.forEach(t),se=s(N,"."),N.forEach(t),J=d(p),D=r(p,"P",{});var xe=i(D);G=s(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),Q=d(p),F=r(p,"UL",{});var S=i(F);E=r(S,"LI",{});var K=i(E);le=s(K,"a single Tensor with "),W=r(K,"CODE",{});var $e=i(W);V=s($e,"input_ids"),$e.forEach(t),ee=s(K," only and nothing else: "),H=r(K,"CODE",{});var ye=i(H);te=s(ye,"model(inputs_ids)"),ye.forEach(t),K.forEach(t),de=d(S),q=r(S,"LI",{});var C=i(q);ce=s(C,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(C,"CODE",{});var ze=i(O);pe=s(ze,"model([input_ids, attention_mask])"),ze.forEach(t),oe=s(C," or "),R=r(C,"CODE",{});var ke=i(R);ue=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),C.forEach(t),he=d(S),P=r(S,"LI",{});var ge=i(P);me=s(ge,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(ge,"CODE",{});var Ee=i(U);fe=s(Ee,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ee.forEach(t),ge.forEach(t),S.forEach(t)},m(p,k){u(p,h,k),e(h,$),u(p,f,k),u(p,g,k),e(g,b),e(b,y),e(g,_),e(g,j),e(j,re),u(p,Y,k),u(p,z,k),e(z,ae),e(z,I),e(I,Z),e(z,ne),e(z,A),e(A,ie),e(z,se),u(p,J,k),u(p,D,k),e(D,G),u(p,Q,k),u(p,F,k),e(F,E),e(E,le),e(E,W),e(W,V),e(E,ee),e(E,H),e(H,te),e(F,de),e(F,q),e(q,ce),e(q,O),e(O,pe),e(q,oe),e(q,R),e(R,ue),e(F,he),e(F,P),e(P,me),e(P,U),e(U,fe)},d(p){p&&t(h),p&&t(f),p&&t(g),p&&t(Y),p&&t(z),p&&t(J),p&&t(D),p&&t(Q),p&&t(F)}}}function tm(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function om(B){let h,$,f,g,b,y,_,j,re,Y,z,ae,I,Z,ne,A,ie,se,J,D,G,Q,F,E,le,W,V,ee,H,te,de,q,ce,O,pe,oe,R,ue,he,P,me,U,fe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=a("ul"),b=a("li"),y=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),j=a("li"),re=n("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),z=a("p"),ae=n("This second option is useful when using "),I=a("code"),Z=n("tf.keras.Model.fit"),ne=n(` method which currently requires having all the tensors in the first argument of the model call function: `),A=a("code"),ie=n("model(inputs)"),se=n("."),J=l(),D=a("p"),G=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Q=l(),F=a("ul"),E=a("li"),le=n("a single Tensor with "),W=a("code"),V=n("input_ids"),ee=n(" only and nothing else: "),H=a("code"),te=n("model(inputs_ids)"),de=l(),q=a("li"),ce=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),pe=n("model([input_ids, attention_mask])"),oe=n(" or "),R=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),he=l(),P=a("li"),me=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),fe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var k=i(h);$=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(p),g=r(p,"UL",{});var X=i(g);b=r(X,"LI",{});var je=i(b);y=s(je,"having all inputs as keyword arguments (like PyTorch models), or"),je.forEach(t),_=d(X),j=r(X,"LI",{});var Me=i(j);re=s(Me,"having all inputs as a list, tuple or dict in the first positional arguments."),Me.forEach(t),X.forEach(t),Y=d(p),z=r(p,"P",{});var N=i(z);ae=s(N,"This second option is useful when using "),I=r(N,"CODE",{});var be=i(I);Z=s(be,"tf.keras.Model.fit"),be.forEach(t),ne=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),A=r(N,"CODE",{});var Fe=i(A);ie=s(Fe,"model(inputs)"),Fe.forEach(t),se=s(N,"."),N.forEach(t),J=d(p),D=r(p,"P",{});var xe=i(D);G=s(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),Q=d(p),F=r(p,"UL",{});var S=i(F);E=r(S,"LI",{});var K=i(E);le=s(K,"a single Tensor with "),W=r(K,"CODE",{});var $e=i(W);V=s($e,"input_ids"),$e.forEach(t),ee=s(K," only and nothing else: "),H=r(K,"CODE",{});var ye=i(H);te=s(ye,"model(inputs_ids)"),ye.forEach(t),K.forEach(t),de=d(S),q=r(S,"LI",{});var C=i(q);ce=s(C,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(C,"CODE",{});var ze=i(O);pe=s(ze,"model([input_ids, attention_mask])"),ze.forEach(t),oe=s(C," or "),R=r(C,"CODE",{});var ke=i(R);ue=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),C.forEach(t),he=d(S),P=r(S,"LI",{});var ge=i(P);me=s(ge,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(ge,"CODE",{});var Ee=i(U);fe=s(Ee,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ee.forEach(t),ge.forEach(t),S.forEach(t)},m(p,k){u(p,h,k),e(h,$),u(p,f,k),u(p,g,k),e(g,b),e(b,y),e(g,_),e(g,j),e(j,re),u(p,Y,k),u(p,z,k),e(z,ae),e(z,I),e(I,Z),e(z,ne),e(z,A),e(A,ie),e(z,se),u(p,J,k),u(p,D,k),e(D,G),u(p,Q,k),u(p,F,k),e(F,E),e(E,le),e(E,W),e(W,V),e(E,ee),e(E,H),e(H,te),e(F,de),e(F,q),e(q,ce),e(q,O),e(O,pe),e(q,oe),e(q,R),e(R,ue),e(F,he),e(F,P),e(P,me),e(P,U),e(U,fe)},d(p){p&&t(h),p&&t(f),p&&t(g),p&&t(Y),p&&t(z),p&&t(J),p&&t(D),p&&t(Q),p&&t(F)}}}function nm(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function sm(B){let h,$,f,g,b,y,_,j,re,Y,z,ae,I,Z,ne,A,ie,se,J,D,G,Q,F,E,le,W,V,ee,H,te,de,q,ce,O,pe,oe,R,ue,he,P,me,U,fe;return{c(){h=a("p"),$=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=a("ul"),b=a("li"),y=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),j=a("li"),re=n("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),z=a("p"),ae=n("This second option is useful when using "),I=a("code"),Z=n("tf.keras.Model.fit"),ne=n(` method which currently requires having all the tensors in the first argument of the model call function: `),A=a("code"),ie=n("model(inputs)"),se=n("."),J=l(),D=a("p"),G=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Q=l(),F=a("ul"),E=a("li"),le=n("a single Tensor with "),W=a("code"),V=n("input_ids"),ee=n(" only and nothing else: "),H=a("code"),te=n("model(inputs_ids)"),de=l(),q=a("li"),ce=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),pe=n("model([input_ids, attention_mask])"),oe=n(" or "),R=a("code"),ue=n("model([input_ids, attention_mask, token_type_ids])"),he=l(),P=a("li"),me=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=a("code"),fe=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){h=r(p,"P",{});var k=i(h);$=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(p),g=r(p,"UL",{});var X=i(g);b=r(X,"LI",{});var je=i(b);y=s(je,"having all inputs as keyword arguments (like PyTorch models), or"),je.forEach(t),_=d(X),j=r(X,"LI",{});var Me=i(j);re=s(Me,"having all inputs as a list, tuple or dict in the first positional arguments."),Me.forEach(t),X.forEach(t),Y=d(p),z=r(p,"P",{});var N=i(z);ae=s(N,"This second option is useful when using "),I=r(N,"CODE",{});var be=i(I);Z=s(be,"tf.keras.Model.fit"),be.forEach(t),ne=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),A=r(N,"CODE",{});var Fe=i(A);ie=s(Fe,"model(inputs)"),Fe.forEach(t),se=s(N,"."),N.forEach(t),J=d(p),D=r(p,"P",{});var xe=i(D);G=s(xe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),xe.forEach(t),Q=d(p),F=r(p,"UL",{});var S=i(F);E=r(S,"LI",{});var K=i(E);le=s(K,"a single Tensor with "),W=r(K,"CODE",{});var $e=i(W);V=s($e,"input_ids"),$e.forEach(t),ee=s(K," only and nothing else: "),H=r(K,"CODE",{});var ye=i(H);te=s(ye,"model(inputs_ids)"),ye.forEach(t),K.forEach(t),de=d(S),q=r(S,"LI",{});var C=i(q);ce=s(C,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(C,"CODE",{});var ze=i(O);pe=s(ze,"model([input_ids, attention_mask])"),ze.forEach(t),oe=s(C," or "),R=r(C,"CODE",{});var ke=i(R);ue=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),C.forEach(t),he=d(S),P=r(S,"LI",{});var ge=i(P);me=s(ge,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),U=r(ge,"CODE",{});var Ee=i(U);fe=s(Ee,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ee.forEach(t),ge.forEach(t),S.forEach(t)},m(p,k){u(p,h,k),e(h,$),u(p,f,k),u(p,g,k),e(g,b),e(b,y),e(g,_),e(g,j),e(j,re),u(p,Y,k),u(p,z,k),e(z,ae),e(z,I),e(I,Z),e(z,ne),e(z,A),e(A,ie),e(z,se),u(p,J,k),u(p,D,k),e(D,G),u(p,Q,k),u(p,F,k),e(F,E),e(E,le),e(E,W),e(W,V),e(E,ee),e(E,H),e(H,te),e(F,de),e(F,q),e(q,ce),e(q,O),e(O,pe),e(q,oe),e(q,R),e(R,ue),e(F,he),e(F,P),e(P,me),e(P,U),e(U,fe)},d(p){p&&t(h),p&&t(f),p&&t(g),p&&t(Y),p&&t(z),p&&t(J),p&&t(D),p&&t(Q),p&&t(F)}}}function am(B){let h,$,f,g,b;return{c(){h=a("p"),$=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),b=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=r(y,"P",{});var _=i(h);$=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var j=i(f);g=s(j,"Module"),j.forEach(t),b=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(y,_){u(y,h,_),e(h,$),e(h,f),e(f,g),e(h,b)},d(y){y&&t(h)}}}function rm(B){let h,$,f,g,b,y,_,j,re,Y,z,ae,I,Z,ne,A,ie,se,J,D,G,Q,F,E,le,W,V,ee,H,te,de,q,ce,O,pe,oe,R,ue,he,P,me,U,fe,p,k,X,je,Me,N,be,Fe,xe,S,K,$e,ye,C,ze,ke,ge,Ee,Kn,Hr,Rr,zs,Kr,Vr,go,Xr,Yr,_o,Zr,Gr,Da,yo,Aa,ot,Jr,Es,Qr,ei,qs,ti,oi,Oa,bo,Ua,Vn,et,ni,Xn,si,ai,ko,ri,ii,Lo,li,di,Wa,nt,ci,vo,pi,ui,wo,hi,mi,Ba,ft,Dt,Cs,To,fi,Ps,gi,Ha,qe,Mo,_i,gt,yi,Yn,bi,ki,xo,Li,vi,wi,_t,Ti,Zn,Mi,xi,Gn,$i,ji,Fi,Ns,zi,Ei,$o,Ra,yt,At,Ss,jo,qi,Is,Ci,Ka,Be,Fo,Pi,Ds,Ni,Si,Ot,Jn,Ii,Di,Qn,Ai,Oi,Ui,zo,Wi,es,Bi,Hi,Va,bt,Ut,As,Eo,Ri,Os,Ki,Xa,He,qo,Vi,Us,Xi,Yi,Wt,ts,Zi,Gi,os,Ji,Qi,el,Co,tl,ns,ol,nl,Ya,kt,Bt,Ws,Po,sl,Bs,al,Za,Re,No,rl,So,il,Io,ll,dl,cl,Do,pl,Ao,ul,hl,ml,Ne,Oo,fl,Lt,gl,ss,_l,yl,Hs,bl,kl,Ll,Ht,vl,Rs,wl,Tl,Uo,Ga,vt,Rt,Ks,Wo,Ml,Vs,xl,Ja,Ke,Bo,$l,wt,jl,Xs,Fl,zl,Ho,El,ql,Cl,Ro,Pl,Ko,Nl,Sl,Il,Se,Vo,Dl,Tt,Al,as,Ol,Ul,Ys,Wl,Bl,Hl,Kt,Rl,Zs,Kl,Vl,Xo,Qa,Mt,Vt,Gs,Yo,Xl,Js,Yl,er,Ce,Zo,Zl,Go,Gl,Qs,Jl,Ql,ed,Jo,td,Qo,od,nd,sd,en,ad,tn,rd,id,ld,Ie,on,dd,xt,cd,rs,pd,ud,ea,hd,md,fd,Xt,gd,ta,_d,yd,nn,tr,$t,Yt,oa,sn,bd,na,kd,or,Pe,an,Ld,Zt,vd,sa,wd,is,Td,aa,Md,xd,$d,rn,jd,ln,Fd,zd,Ed,dn,qd,cn,Cd,Pd,Nd,De,pn,Sd,jt,Id,ls,Dd,Ad,ra,Od,Ud,Wd,Gt,Bd,ia,Hd,Rd,un,nr,Ft,Jt,la,hn,Kd,da,Vd,sr,Le,mn,Xd,ca,Yd,Zd,fn,Gd,ds,Jd,Qd,ec,gn,tc,_n,oc,nc,sc,Qt,ac,Ae,yn,rc,zt,ic,cs,lc,dc,pa,cc,pc,uc,eo,hc,ua,mc,fc,bn,ar,Et,to,ha,kn,gc,ma,_c,rr,ve,Ln,yc,vn,bc,fa,kc,Lc,vc,wn,wc,ps,Tc,Mc,xc,Tn,$c,Mn,jc,Fc,zc,oo,Ec,Oe,xn,qc,qt,Cc,us,Pc,Nc,ga,Sc,Ic,Dc,no,Ac,_a,Oc,Uc,$n,ir,Ct,so,ya,jn,Wc,ba,Bc,lr,we,Fn,Hc,ka,Rc,Kc,zn,Vc,hs,Xc,Yc,Zc,En,Gc,qn,Jc,Qc,ep,ao,tp,Ue,Cn,op,Pt,np,ms,sp,ap,La,rp,ip,lp,ro,dp,va,cp,pp,Pn,dr,Nt,io,wa,Nn,up,Ta,hp,cr,Te,Sn,mp,Ma,fp,gp,In,_p,fs,yp,bp,kp,Dn,Lp,An,vp,wp,Tp,lo,Mp,We,On,xp,St,$p,gs,jp,Fp,xa,zp,Ep,qp,co,Cp,$a,Pp,Np,Un,pr;return y=new Ve({}),A=new Ve({}),yo=new tt({props:{code:`def normalize_bbox(bbox, width, height): return [ int(1000 * (bbox[0] / width)), int(1000 * (bbox[1] / height)), int(1000 * (bbox[2] / width)), int(1000 * (bbox[3] / height)), ],`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">normalize_bbox</span>(<span class="hljs-params">bbox, width, height</span>): <span class="hljs-keyword">return</span> [ <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">0</span>] / width)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">1</span>] / height)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">2</span>] / width)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">3</span>] / height)), ]`}}),bo=new tt({props:{code:`from PIL import Image image = Image.open("name_of_your_document - can be a png file, pdf, etc.") width, height = image.size,`,highlighted:`<span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>) width, height = image.size`}}),To=new Ve({}),Mo=new _e({props:{name:"class transformers.LayoutLMConfig",anchor:"transformers.LayoutLMConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"max_2d_position_embeddings",val:" = 1024"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/configuration_layoutlm.py#L35",parametersDescription:[{anchor:"transformers.LayoutLMConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LayoutLM model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel">LayoutLMModel</a>.`,name:"vocab_size"},{anchor:"transformers.LayoutLMConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.LayoutLMConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.LayoutLMConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.LayoutLMConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.LayoutLMConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.LayoutLMConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.LayoutLMConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.LayoutLMConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.LayoutLMConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed into <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel">LayoutLMModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.LayoutLMConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.LayoutLMConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.LayoutLMConfig.max_2d_position_embeddings",description:`<strong>max_2d_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum value that the 2D position embedding might ever used. Typically set this to something large just in case (e.g., 1024).`,name:"max_2d_position_embeddings"}]}}),$o=new tt({props:{code:`from transformers import LayoutLMModel, LayoutLMConfig # Initializing a LayoutLM configuration configuration = LayoutLMConfig() # Initializing a model from the configuration model = LayoutLMModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMModel, LayoutLMConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a LayoutLM configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = LayoutLMConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),jo=new Ve({}),Fo=new _e({props:{name:"class transformers.LayoutLMTokenizer",anchor:"transformers.LayoutLMTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/tokenization_layoutlm.py#L46"}}),Eo=new Ve({}),qo=new _e({props:{name:"class transformers.LayoutLMTokenizerFast",anchor:"transformers.LayoutLMTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py#L51"}}),Po=new Ve({}),No=new _e({props:{name:"class transformers.LayoutLMModel",anchor:"transformers.LayoutLMModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L704",parametersDescription:[{anchor:"transformers.LayoutLMModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Oo=new _e({props:{name:"forward",anchor:"transformers.LayoutLMModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L730",parametersDescription:[{anchor:"transformers.LayoutLMModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMModel.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. See <a href="#Overview">Overview</a> for normalization.`,name:"bbox"},{anchor:"transformers.LayoutLMModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>: <code>1</code> for tokens that are NOT MASKED, <code>0</code> for MASKED tokens.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>: <code>0</code> corresponds to a <em>sentence A</em> token, <code>1</code> corresponds to a <em>sentence B</em> token</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: <code>1</code> indicates the head is <strong>not masked</strong>, <code>0</code> indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.LayoutLMModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the attentions tensors of all attention layers are returned. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the hidden states of all layers are returned. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ht=new Qe({props:{$$slots:{default:[Xh]},$$scope:{ctx:B}}}),Uo=new tt({props:{code:`from transformers import LayoutLMTokenizer, LayoutLMModel import torch tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = LayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "world"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="pt") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = torch.tensor([token_boxes]) outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, LayoutLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = torch.tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Wo=new Ve({}),Bo=new _e({props:{name:"class transformers.LayoutLMForMaskedLM",anchor:"transformers.LayoutLMForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L850",parametersDescription:[{anchor:"transformers.LayoutLMForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vo=new _e({props:{name:"forward",anchor:"transformers.LayoutLMForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L869",parametersDescription:[{anchor:"transformers.LayoutLMForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMForMaskedLM.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. See <a href="#Overview">Overview</a> for normalization.`,name:"bbox"},{anchor:"transformers.LayoutLMForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>: <code>1</code> for tokens that are NOT MASKED, <code>0</code> for MASKED tokens.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>: <code>0</code> corresponds to a <em>sentence A</em> token, <code>1</code> corresponds to a <em>sentence B</em> token</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: <code>1</code> indicates the head is <strong>not masked</strong>, <code>0</code> indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.LayoutLMForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the attentions tensors of all attention layers are returned. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the hidden states of all layers are returned. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LayoutLMForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Kt=new Qe({props:{$$slots:{default:[Yh]},$$scope:{ctx:B}}}),Xo=new tt({props:{code:`from transformers import LayoutLMTokenizer, LayoutLMForMaskedLM import torch tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = LayoutLMForMaskedLM.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "[MASK]"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="pt") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = torch.tensor([token_boxes]) labels = tokenizer("Hello world", return_tensors="pt")["input_ids"] outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, LayoutLMForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;[MASK]&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = torch.tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),Yo=new Ve({}),Zo=new _e({props:{name:"class transformers.LayoutLMForSequenceClassification",anchor:"transformers.LayoutLMForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L973",parametersDescription:[{anchor:"transformers.LayoutLMForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),on=new _e({props:{name:"forward",anchor:"transformers.LayoutLMForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L987",parametersDescription:[{anchor:"transformers.LayoutLMForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. See <a href="#Overview">Overview</a> for normalization.`,name:"bbox"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>: <code>1</code> for tokens that are NOT MASKED, <code>0</code> for MASKED tokens.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>: <code>0</code> corresponds to a <em>sentence A</em> token, <code>1</code> corresponds to a <em>sentence B</em> token</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: <code>1</code> indicates the head is <strong>not masked</strong>, <code>0</code> indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the attentions tensors of all attention layers are returned. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the hidden states of all layers are returned. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LayoutLMForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xt=new Qe({props:{$$slots:{default:[Zh]},$$scope:{ctx:B}}}),nn=new tt({props:{code:`from transformers import LayoutLMTokenizer, LayoutLMForSequenceClassification import torch tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = LayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "world"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="pt") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = torch.tensor([token_boxes]) sequence_label = torch.tensor([1]) outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=sequence_label) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, LayoutLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = torch.tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_label = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=sequence_label) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sn=new Ve({}),an=new _e({props:{name:"class transformers.LayoutLMForTokenClassification",anchor:"transformers.LayoutLMForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L1104",parametersDescription:[{anchor:"transformers.LayoutLMForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),pn=new _e({props:{name:"forward",anchor:"transformers.LayoutLMForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"bbox",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_layoutlm.py#L1118",parametersDescription:[{anchor:"transformers.LayoutLMForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LayoutLMForTokenClassification.forward.bbox",description:`<strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. See <a href="#Overview">Overview</a> for normalization.`,name:"bbox"},{anchor:"transformers.LayoutLMForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>: <code>1</code> for tokens that are NOT MASKED, <code>0</code> for MASKED tokens.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LayoutLMForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>: <code>0</code> corresponds to a <em>sentence A</em> token, <code>1</code> corresponds to a <em>sentence B</em> token</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LayoutLMForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LayoutLMForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>: <code>1</code> indicates the head is <strong>not masked</strong>, <code>0</code> indicates the head is <strong>masked</strong>.`,name:"head_mask"},{anchor:"transformers.LayoutLMForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LayoutLMForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the attentions tensors of all attention layers are returned. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LayoutLMForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the hidden states of all layers are returned. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LayoutLMForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LayoutLMForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Gt=new Qe({props:{$$slots:{default:[Gh]},$$scope:{ctx:B}}}),un=new tt({props:{code:`from transformers import LayoutLMTokenizer, LayoutLMForTokenClassification import torch tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = LayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "world"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="pt") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = torch.tensor([token_boxes]) token_labels = torch.tensor([1,1,0,0]).unsqueeze(0) # batch size of 1 outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=token_labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, LayoutLMForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = torch.tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>token_labels = torch.tensor([<span class="hljs-number">1</span>,<span class="hljs-number">1</span>,<span class="hljs-number">0</span>,<span class="hljs-number">0</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># batch size of 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=token_labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),hn=new Ve({}),mn=new _e({props:{name:"class transformers.TFLayoutLMModel",anchor:"transformers.TFLayoutLMModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L920",parametersDescription:[{anchor:"transformers.TFLayoutLMModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qt=new Qe({props:{$$slots:{default:[Jh]},$$scope:{ctx:B}}}),yn=new _e({props:{name:"call",anchor:"transformers.TFLayoutLMModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"bbox",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L926",parametersDescription:[{anchor:"transformers.TFLayoutLMModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLayoutLMModel.call.bbox",description:`<strong>bbox</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding Boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings- 1]</code>.`,name:"bbox"},{anchor:"transformers.TFLayoutLMModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLayoutLMModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLayoutLMModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLayoutLMModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLayoutLMModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLayoutLMModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFLayoutLMModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFLayoutLMModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFLayoutLMModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),eo=new Qe({props:{$$slots:{default:[Qh]},$$scope:{ctx:B}}}),bn=new tt({props:{code:`from transformers import LayoutLMTokenizer, TFLayoutLMModel import tensorflow as tf tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "world"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="tf") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = tf.convert_to_tensor([token_boxes]) outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, TFLayoutLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLayoutLMModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = tf.convert_to_tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),kn=new Ve({}),Ln=new _e({props:{name:"class transformers.TFLayoutLMForMaskedLM",anchor:"transformers.TFLayoutLMForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L1036",parametersDescription:[{anchor:"transformers.TFLayoutLMForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),oo=new Qe({props:{$$slots:{default:[em]},$$scope:{ctx:B}}}),xn=new _e({props:{name:"call",anchor:"transformers.TFLayoutLMForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"bbox",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L1064",parametersDescription:[{anchor:"transformers.TFLayoutLMForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.bbox",description:`<strong>bbox</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding Boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings- 1]</code>.`,name:"bbox"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLayoutLMForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),no=new Qe({props:{$$slots:{default:[tm]},$$scope:{ctx:B}}}),$n=new tt({props:{code:`from transformers import LayoutLMTokenizer, TFLayoutLMForMaskedLM import tensorflow as tf tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = TFLayoutLMForMaskedLM.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "[MASK]"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="tf") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = tf.convert_to_tensor([token_boxes]) labels = tokenizer("Hello world", return_tensors="tf")["input_ids"] outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, TFLayoutLMForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLayoutLMForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;[MASK]&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = tf.convert_to_tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),jn=new Ve({}),Fn=new _e({props:{name:"class transformers.TFLayoutLMForSequenceClassification",anchor:"transformers.TFLayoutLMForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L1182",parametersDescription:[{anchor:"transformers.TFLayoutLMForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ao=new Qe({props:{$$slots:{default:[om]},$$scope:{ctx:B}}}),Cn=new _e({props:{name:"call",anchor:"transformers.TFLayoutLMForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"bbox",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L1200",parametersDescription:[{anchor:"transformers.TFLayoutLMForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.bbox",description:`<strong>bbox</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding Boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings- 1]</code>.`,name:"bbox"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLayoutLMForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ro=new Qe({props:{$$slots:{default:[nm]},$$scope:{ctx:B}}}),Pn=new tt({props:{code:`from transformers import LayoutLMTokenizer, TFLayoutLMForSequenceClassification import tensorflow as tf tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "world"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="tf") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = tf.convert_to_tensor([token_boxes]) sequence_label = tf.convert_to_tensor([1]) outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=sequence_label) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, TFLayoutLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLayoutLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = tf.convert_to_tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_label = tf.convert_to_tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=sequence_label) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Nn=new Ve({}),Sn=new _e({props:{name:"class transformers.TFLayoutLMForTokenClassification",anchor:"transformers.TFLayoutLMForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L1317",parametersDescription:[{anchor:"transformers.TFLayoutLMForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig">LayoutLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lo=new Qe({props:{$$slots:{default:[sm]},$$scope:{ctx:B}}}),On=new _e({props:{name:"call",anchor:"transformers.TFLayoutLMForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"bbox",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/layoutlm/modeling_tf_layoutlm.py#L1341",parametersDescription:[{anchor:"transformers.TFLayoutLMForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer">LayoutLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.bbox",description:`<strong>bbox</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding Boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings- 1]</code>.`,name:"bbox"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLayoutLMForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMConfig" >LayoutLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),co=new Qe({props:{$$slots:{default:[am]},$$scope:{ctx:B}}}),Un=new tt({props:{code:`from transformers import LayoutLMTokenizer, TFLayoutLMForTokenClassification import torch tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased') model = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased') words = ["Hello", "world"] normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] token_boxes = [] for word, box in zip(words, normalized_word_boxes): word_tokens = tokenizer.tokenize(word) token_boxes.extend([box] * len(word_tokens)) # add bounding boxes of cls + sep tokens token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] encoding = tokenizer(' '.join(words), return_tensors="tf") input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] token_type_ids = encoding["token_type_ids"] bbox = tf.convert_to_tensor([token_boxes]) token_labels = tf.convert_to_tensor([1,1,0,0]) outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=token_labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMTokenizer, TFLayoutLMForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LayoutLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLayoutLMForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/layoutlm-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;Hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>normalized_word_boxes = [<span class="hljs-number">637</span>, <span class="hljs-number">773</span>, <span class="hljs-number">693</span>, <span class="hljs-number">782</span>], [<span class="hljs-number">698</span>, <span class="hljs-number">773</span>, <span class="hljs-number">733</span>, <span class="hljs-number">782</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> word, box <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(words, normalized_word_boxes): <span class="hljs-meta">... </span> word_tokens = tokenizer.tokenize(word) <span class="hljs-meta">... </span> token_boxes.extend([box] * <span class="hljs-built_in">len</span>(word_tokens)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add bounding boxes of cls + sep tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>token_boxes = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]] + token_boxes + [[<span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>, <span class="hljs-number">1000</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&#x27; &#x27;</span>.join(words), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = encoding[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>attention_mask = encoding[<span class="hljs-string">&quot;attention_mask&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_type_ids = encoding[<span class="hljs-string">&quot;token_type_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>bbox = tf.convert_to_tensor([token_boxes]) <span class="hljs-meta">&gt;&gt;&gt; </span>token_labels = tf.convert_to_tensor([<span class="hljs-number">1</span>,<span class="hljs-number">1</span>,<span class="hljs-number">0</span>,<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, <span class="hljs-meta">... </span> labels=token_labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){h=a("meta"),$=l(),f=a("h1"),g=a("a"),b=a("span"),L(y.$$.fragment),_=l(),j=a("span"),re=n("LayoutLM"),Y=l(),z=a("a"),ae=l(),I=a("h2"),Z=a("a"),ne=a("span"),L(A.$$.fragment),ie=l(),se=a("span"),J=n("Overview"),D=l(),G=a("p"),Q=n("The LayoutLM model was proposed in the paper "),F=a("a"),E=n(`LayoutLM: Pre-training of Text and Layout for Document Image Understanding`),le=n(` by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and Ming Zhou. It\u2019s a simple but effective pretraining method of text and layout for document image understanding and information extraction tasks, such as form understanding and receipt understanding. It obtains state-of-the-art results on several downstream tasks:`),W=l(),V=a("ul"),ee=a("li"),H=n("form understanding: the "),te=a("a"),de=n("FUNSD"),q=n(` dataset (a collection of 199 annotated forms comprising more than 30,000 words).`),ce=l(),O=a("li"),pe=n("receipt understanding: the "),oe=a("a"),R=n("SROIE"),ue=n(` dataset (a collection of 626 receipts for training and 347 receipts for testing).`),he=l(),P=a("li"),me=n("document image classification: the "),U=a("a"),fe=n("RVL-CDIP"),p=n(` dataset (a collection of 400,000 images belonging to one of 16 classes).`),k=l(),X=a("p"),je=n("The abstract from the paper is the following:"),Me=l(),N=a("p"),be=a("em"),Fe=n(`Pre-training techniques have been verified successfully in a variety of NLP tasks in recent years. Despite the widespread use of pretraining models for NLP applications, they almost exclusively focus on text-level manipulation, while neglecting layout and style information that is vital for document image understanding. In this paper, we propose the LayoutLM to jointly model interactions between text and layout information across scanned document images, which is beneficial for a great number of real-world document image understanding tasks such as information extraction from scanned documents. Furthermore, we also leverage image features to incorporate words\u2019 visual information into LayoutLM. To the best of our knowledge, this is the first time that text and layout are jointly learned in a single framework for document-level pretraining. It achieves new state-of-the-art results in several downstream tasks, including form understanding (from 70.72 to 79.27), receipt understanding (from 94.02 to 95.24) and document image classification (from 93.07 to 94.42).`),xe=l(),S=a("p"),K=n("Tips:"),$e=l(),ye=a("ul"),C=a("li"),ze=n("In addition to "),ke=a("em"),ge=n("input_ids"),Ee=n(", "),Kn=a("a"),Hr=n("forward()"),Rr=n(" also expects the input "),zs=a("code"),Kr=n("bbox"),Vr=n(`, which are the bounding boxes (i.e. 2D-positions) of the input tokens. These can be obtained using an external OCR engine such as Google\u2019s `),go=a("a"),Xr=n("Tesseract"),Yr=n(" (there\u2019s a "),_o=a("a"),Zr=n("Python wrapper"),Gr=n(` available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function:`),Da=l(),L(yo.$$.fragment),Aa=l(),ot=a("p"),Jr=n("Here, "),Es=a("code"),Qr=n("width"),ei=n(" and "),qs=a("code"),ti=n("height"),oi=n(` correspond to the width and height of the original document in which the token occurs. Those can be obtained using the Python Image Library (PIL) library for example, as follows:`),Oa=l(),L(bo.$$.fragment),Ua=l(),Vn=a("ul"),et=a("li"),ni=n("For a demo which shows how to fine-tune "),Xn=a("a"),si=n("LayoutLMForTokenClassification"),ai=n(" on the "),ko=a("a"),ri=n("FUNSD dataset"),ii=n(" (a collection of annotated forms), see "),Lo=a("a"),li=n("this notebook"),di=n(`. It includes an inference part, which shows how to use Google\u2019s Tesseract on a new document.`),Wa=l(),nt=a("p"),ci=n("This model was contributed by "),vo=a("a"),pi=n("liminghao1630"),ui=n(`. The original code can be found `),wo=a("a"),hi=n("here"),mi=n("."),Ba=l(),ft=a("h2"),Dt=a("a"),Cs=a("span"),L(To.$$.fragment),fi=l(),Ps=a("span"),gi=n("LayoutLMConfig"),Ha=l(),qe=a("div"),L(Mo.$$.fragment),_i=l(),gt=a("p"),yi=n("This is the configuration class to store the configuration of a "),Yn=a("a"),bi=n("LayoutLMModel"),ki=n(`. It is used to instantiate a LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLM `),xo=a("a"),Li=n("layoutlm-base-uncased"),vi=n(" architecture."),wi=l(),_t=a("p"),Ti=n("Configuration objects inherit from "),Zn=a("a"),Mi=n("BertConfig"),xi=n(` and can be used to control the model outputs. Read the documentation from `),Gn=a("a"),$i=n("BertConfig"),ji=n(" for more information."),Fi=l(),Ns=a("p"),zi=n("Examples:"),Ei=l(),L($o.$$.fragment),Ra=l(),yt=a("h2"),At=a("a"),Ss=a("span"),L(jo.$$.fragment),qi=l(),Is=a("span"),Ci=n("LayoutLMTokenizer"),Ka=l(),Be=a("div"),L(Fo.$$.fragment),Pi=l(),Ds=a("p"),Ni=n("Constructs a LayoutLM tokenizer."),Si=l(),Ot=a("p"),Jn=a("a"),Ii=n("LayoutLMTokenizer"),Di=n(" is identical to "),Qn=a("a"),Ai=n("BertTokenizer"),Oi=n(` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Ui=l(),zo=a("p"),Wi=n("Refer to superclass "),es=a("a"),Bi=n("BertTokenizer"),Hi=n(` for usage examples and documentation concerning parameters.`),Va=l(),bt=a("h2"),Ut=a("a"),As=a("span"),L(Eo.$$.fragment),Ri=l(),Os=a("span"),Ki=n("LayoutLMTokenizerFast"),Xa=l(),He=a("div"),L(qo.$$.fragment),Vi=l(),Us=a("p"),Xi=n("Constructs a \u201CFast\u201D LayoutLMTokenizer."),Yi=l(),Wt=a("p"),ts=a("a"),Zi=n("LayoutLMTokenizerFast"),Gi=n(" is identical to "),os=a("a"),Ji=n("BertTokenizerFast"),Qi=n(` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),el=l(),Co=a("p"),tl=n("Refer to superclass "),ns=a("a"),ol=n("BertTokenizerFast"),nl=n(` for usage examples and documentation concerning parameters.`),Ya=l(),kt=a("h2"),Bt=a("a"),Ws=a("span"),L(Po.$$.fragment),sl=l(),Bs=a("span"),al=n("LayoutLMModel"),Za=l(),Re=a("div"),L(No.$$.fragment),rl=l(),So=a("p"),il=n(`The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top. The LayoutLM model was proposed in `),Io=a("a"),ll=n("LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),dl=n(" by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),cl=l(),Do=a("p"),pl=n("This model is a PyTorch "),Ao=a("a"),ul=n("torch.nn.Module"),hl=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ml=l(),Ne=a("div"),L(Oo.$$.fragment),fl=l(),Lt=a("p"),gl=n("The "),ss=a("a"),_l=n("LayoutLMModel"),yl=n(" forward method, overrides the "),Hs=a("code"),bl=n("__call__"),kl=n(" special method."),Ll=l(),L(Ht.$$.fragment),vl=l(),Rs=a("p"),wl=n("Examples:"),Tl=l(),L(Uo.$$.fragment),Ga=l(),vt=a("h2"),Rt=a("a"),Ks=a("span"),L(Wo.$$.fragment),Ml=l(),Vs=a("span"),xl=n("LayoutLMForMaskedLM"),Ja=l(),Ke=a("div"),L(Bo.$$.fragment),$l=l(),wt=a("p"),jl=n("LayoutLM Model with a "),Xs=a("code"),Fl=n("language modeling"),zl=n(` head on top. The LayoutLM model was proposed in `),Ho=a("a"),El=n("LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),ql=n(" by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),Cl=l(),Ro=a("p"),Pl=n("This model is a PyTorch "),Ko=a("a"),Nl=n("torch.nn.Module"),Sl=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Il=l(),Se=a("div"),L(Vo.$$.fragment),Dl=l(),Tt=a("p"),Al=n("The "),as=a("a"),Ol=n("LayoutLMForMaskedLM"),Ul=n(" forward method, overrides the "),Ys=a("code"),Wl=n("__call__"),Bl=n(" special method."),Hl=l(),L(Kt.$$.fragment),Rl=l(),Zs=a("p"),Kl=n("Examples:"),Vl=l(),L(Xo.$$.fragment),Qa=l(),Mt=a("h2"),Vt=a("a"),Gs=a("span"),L(Yo.$$.fragment),Xl=l(),Js=a("span"),Yl=n("LayoutLMForSequenceClassification"),er=l(),Ce=a("div"),L(Zo.$$.fragment),Zl=l(),Go=a("p"),Gl=n(`LayoutLM Model with a sequence classification head on top (a linear layer on top of the pooled output) e.g. for document image classification tasks such as the `),Qs=a("code"),Jl=n("RVL-CDIP <https://www.cs.cmu.edu/~aharley/rvl-cdip/>"),Ql=n("__ dataset."),ed=l(),Jo=a("p"),td=n("The LayoutLM model was proposed in "),Qo=a("a"),od=n("LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),nd=n(" by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),sd=l(),en=a("p"),ad=n("This model is a PyTorch "),tn=a("a"),rd=n("torch.nn.Module"),id=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ld=l(),Ie=a("div"),L(on.$$.fragment),dd=l(),xt=a("p"),cd=n("The "),rs=a("a"),pd=n("LayoutLMForSequenceClassification"),ud=n(" forward method, overrides the "),ea=a("code"),hd=n("__call__"),md=n(" special method."),fd=l(),L(Xt.$$.fragment),gd=l(),ta=a("p"),_d=n("Examples:"),yd=l(),L(nn.$$.fragment),tr=l(),$t=a("h2"),Yt=a("a"),oa=a("span"),L(sn.$$.fragment),bd=l(),na=a("span"),kd=n("LayoutLMForTokenClassification"),or=l(),Pe=a("div"),L(an.$$.fragment),Ld=l(),Zt=a("p"),vd=n(`LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for sequence labeling (information extraction) tasks such as the `),sa=a("code"),wd=n("FUNSD <https://guillaumejaume.github.io/FUNSD/>"),is=a("strong"),Td=n("dataset and the "),aa=a("code"),Md=n("SROIE <https://rrc.cvc.uab.es/?ch=13>"),xd=n(" dataset."),$d=l(),rn=a("p"),jd=n("The LayoutLM model was proposed in "),ln=a("a"),Fd=n("LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),zd=n(" by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),Ed=l(),dn=a("p"),qd=n("This model is a PyTorch "),cn=a("a"),Cd=n("torch.nn.Module"),Pd=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nd=l(),De=a("div"),L(pn.$$.fragment),Sd=l(),jt=a("p"),Id=n("The "),ls=a("a"),Dd=n("LayoutLMForTokenClassification"),Ad=n(" forward method, overrides the "),ra=a("code"),Od=n("__call__"),Ud=n(" special method."),Wd=l(),L(Gt.$$.fragment),Bd=l(),ia=a("p"),Hd=n("Examples:"),Rd=l(),L(un.$$.fragment),nr=l(),Ft=a("h2"),Jt=a("a"),la=a("span"),L(hn.$$.fragment),Kd=l(),da=a("span"),Vd=n("TFLayoutLMModel"),sr=l(),Le=a("div"),L(mn.$$.fragment),Xd=l(),ca=a("p"),Yd=n("The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top."),Zd=l(),fn=a("p"),Gd=n("This model inherits from "),ds=a("a"),Jd=n("TFPreTrainedModel"),Qd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ec=l(),gn=a("p"),tc=n("This model is also a "),_n=a("a"),oc=n("tf.keras.Model"),nc=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sc=l(),L(Qt.$$.fragment),ac=l(),Ae=a("div"),L(yn.$$.fragment),rc=l(),zt=a("p"),ic=n("The "),cs=a("a"),lc=n("TFLayoutLMModel"),dc=n(" forward method, overrides the "),pa=a("code"),cc=n("__call__"),pc=n(" special method."),uc=l(),L(eo.$$.fragment),hc=l(),ua=a("p"),mc=n("Examples:"),fc=l(),L(bn.$$.fragment),ar=l(),Et=a("h2"),to=a("a"),ha=a("span"),L(kn.$$.fragment),gc=l(),ma=a("span"),_c=n("TFLayoutLMForMaskedLM"),rr=l(),ve=a("div"),L(Ln.$$.fragment),yc=l(),vn=a("p"),bc=n("LayoutLM Model with a "),fa=a("code"),kc=n("language modeling"),Lc=n(" head on top."),vc=l(),wn=a("p"),wc=n("This model inherits from "),ps=a("a"),Tc=n("TFPreTrainedModel"),Mc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xc=l(),Tn=a("p"),$c=n("This model is also a "),Mn=a("a"),jc=n("tf.keras.Model"),Fc=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),zc=l(),L(oo.$$.fragment),Ec=l(),Oe=a("div"),L(xn.$$.fragment),qc=l(),qt=a("p"),Cc=n("The "),us=a("a"),Pc=n("TFLayoutLMForMaskedLM"),Nc=n(" forward method, overrides the "),ga=a("code"),Sc=n("__call__"),Ic=n(" special method."),Dc=l(),L(no.$$.fragment),Ac=l(),_a=a("p"),Oc=n("Examples:"),Uc=l(),L($n.$$.fragment),ir=l(),Ct=a("h2"),so=a("a"),ya=a("span"),L(jn.$$.fragment),Wc=l(),ba=a("span"),Bc=n("TFLayoutLMForSequenceClassification"),lr=l(),we=a("div"),L(Fn.$$.fragment),Hc=l(),ka=a("p"),Rc=n(`LayoutLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Kc=l(),zn=a("p"),Vc=n("This model inherits from "),hs=a("a"),Xc=n("TFPreTrainedModel"),Yc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zc=l(),En=a("p"),Gc=n("This model is also a "),qn=a("a"),Jc=n("tf.keras.Model"),Qc=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ep=l(),L(ao.$$.fragment),tp=l(),Ue=a("div"),L(Cn.$$.fragment),op=l(),Pt=a("p"),np=n("The "),ms=a("a"),sp=n("TFLayoutLMForSequenceClassification"),ap=n(" forward method, overrides the "),La=a("code"),rp=n("__call__"),ip=n(" special method."),lp=l(),L(ro.$$.fragment),dp=l(),va=a("p"),cp=n("Examples:"),pp=l(),L(Pn.$$.fragment),dr=l(),Nt=a("h2"),io=a("a"),wa=a("span"),L(Nn.$$.fragment),up=l(),Ta=a("span"),hp=n("TFLayoutLMForTokenClassification"),cr=l(),Te=a("div"),L(Sn.$$.fragment),mp=l(),Ma=a("p"),fp=n(`LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),gp=l(),In=a("p"),_p=n("This model inherits from "),fs=a("a"),yp=n("TFPreTrainedModel"),bp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kp=l(),Dn=a("p"),Lp=n("This model is also a "),An=a("a"),vp=n("tf.keras.Model"),wp=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Tp=l(),L(lo.$$.fragment),Mp=l(),We=a("div"),L(On.$$.fragment),xp=l(),St=a("p"),$p=n("The "),gs=a("a"),jp=n("TFLayoutLMForTokenClassification"),Fp=n(" forward method, overrides the "),xa=a("code"),zp=n("__call__"),Ep=n(" special method."),qp=l(),L(co.$$.fragment),Cp=l(),$a=a("p"),Pp=n("Examples:"),Np=l(),L(Un.$$.fragment),this.h()},l(o){const m=Vh('[data-svelte="svelte-1phssyn"]',document.head);h=r(m,"META",{name:!0,content:!0}),m.forEach(t),$=d(o),f=r(o,"H1",{class:!0});var Wn=i(f);g=r(Wn,"A",{id:!0,class:!0,href:!0});var ja=i(g);b=r(ja,"SPAN",{});var Fa=i(b);v(y.$$.fragment,Fa),Fa.forEach(t),ja.forEach(t),_=d(Wn),j=r(Wn,"SPAN",{});var za=i(j);re=s(za,"LayoutLM"),za.forEach(t),Wn.forEach(t),Y=d(o),z=r(o,"A",{id:!0}),i(z).forEach(t),ae=d(o),I=r(o,"H2",{class:!0});var Bn=i(I);Z=r(Bn,"A",{id:!0,class:!0,href:!0});var Ea=i(Z);ne=r(Ea,"SPAN",{});var qa=i(ne);v(A.$$.fragment,qa),qa.forEach(t),Ea.forEach(t),ie=d(Bn),se=r(Bn,"SPAN",{});var Ca=i(se);J=s(Ca,"Overview"),Ca.forEach(t),Bn.forEach(t),D=d(o),G=r(o,"P",{});var Hn=i(G);Q=s(Hn,"The LayoutLM model was proposed in the paper "),F=r(Hn,"A",{href:!0,rel:!0});var Pa=i(F);E=s(Pa,`LayoutLM: Pre-training of Text and Layout for Document Image Understanding`),Pa.forEach(t),le=s(Hn,` by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and Ming Zhou. It\u2019s a simple but effective pretraining method of text and layout for document image understanding and information extraction tasks, such as form understanding and receipt understanding. It obtains state-of-the-art results on several downstream tasks:`),Hn.forEach(t),W=d(o),V=r(o,"UL",{});var It=i(V);ee=r(It,"LI",{});var Rn=i(ee);H=s(Rn,"form understanding: the "),te=r(Rn,"A",{href:!0,rel:!0});var Ip=i(te);de=s(Ip,"FUNSD"),Ip.forEach(t),q=s(Rn,` dataset (a collection of 199 annotated forms comprising more than 30,000 words).`),Rn.forEach(t),ce=d(It),O=r(It,"LI",{});var ur=i(O);pe=s(ur,"receipt understanding: the "),oe=r(ur,"A",{href:!0,rel:!0});var Dp=i(oe);R=s(Dp,"SROIE"),Dp.forEach(t),ue=s(ur,` dataset (a collection of 626 receipts for training and 347 receipts for testing).`),ur.forEach(t),he=d(It),P=r(It,"LI",{});var hr=i(P);me=s(hr,"document image classification: the "),U=r(hr,"A",{href:!0,rel:!0});var Ap=i(U);fe=s(Ap,"RVL-CDIP"),Ap.forEach(t),p=s(hr,` dataset (a collection of 400,000 images belonging to one of 16 classes).`),hr.forEach(t),It.forEach(t),k=d(o),X=r(o,"P",{});var Op=i(X);je=s(Op,"The abstract from the paper is the following:"),Op.forEach(t),Me=d(o),N=r(o,"P",{});var Up=i(N);be=r(Up,"EM",{});var Wp=i(be);Fe=s(Wp,`Pre-training techniques have been verified successfully in a variety of NLP tasks in recent years. Despite the widespread use of pretraining models for NLP applications, they almost exclusively focus on text-level manipulation, while neglecting layout and style information that is vital for document image understanding. In this paper, we propose the LayoutLM to jointly model interactions between text and layout information across scanned document images, which is beneficial for a great number of real-world document image understanding tasks such as information extraction from scanned documents. Furthermore, we also leverage image features to incorporate words\u2019 visual information into LayoutLM. To the best of our knowledge, this is the first time that text and layout are jointly learned in a single framework for document-level pretraining. It achieves new state-of-the-art results in several downstream tasks, including form understanding (from 70.72 to 79.27), receipt understanding (from 94.02 to 95.24) and document image classification (from 93.07 to 94.42).`),Wp.forEach(t),Up.forEach(t),xe=d(o),S=r(o,"P",{});var Bp=i(S);K=s(Bp,"Tips:"),Bp.forEach(t),$e=d(o),ye=r(o,"UL",{});var Hp=i(ye);C=r(Hp,"LI",{});var Xe=i(C);ze=s(Xe,"In addition to "),ke=r(Xe,"EM",{});var Rp=i(ke);ge=s(Rp,"input_ids"),Rp.forEach(t),Ee=s(Xe,", "),Kn=r(Xe,"A",{href:!0});var Kp=i(Kn);Hr=s(Kp,"forward()"),Kp.forEach(t),Rr=s(Xe," also expects the input "),zs=r(Xe,"CODE",{});var Vp=i(zs);Kr=s(Vp,"bbox"),Vp.forEach(t),Vr=s(Xe,`, which are the bounding boxes (i.e. 2D-positions) of the input tokens. These can be obtained using an external OCR engine such as Google\u2019s `),go=r(Xe,"A",{href:!0,rel:!0});var Xp=i(go);Xr=s(Xp,"Tesseract"),Xp.forEach(t),Yr=s(Xe," (there\u2019s a "),_o=r(Xe,"A",{href:!0,rel:!0});var Yp=i(_o);Zr=s(Yp,"Python wrapper"),Yp.forEach(t),Gr=s(Xe,` available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function:`),Xe.forEach(t),Hp.forEach(t),Da=d(o),v(yo.$$.fragment,o),Aa=d(o),ot=r(o,"P",{});var _s=i(ot);Jr=s(_s,"Here, "),Es=r(_s,"CODE",{});var Zp=i(Es);Qr=s(Zp,"width"),Zp.forEach(t),ei=s(_s," and "),qs=r(_s,"CODE",{});var Gp=i(qs);ti=s(Gp,"height"),Gp.forEach(t),oi=s(_s,` correspond to the width and height of the original document in which the token occurs. Those can be obtained using the Python Image Library (PIL) library for example, as follows:`),_s.forEach(t),Oa=d(o),v(bo.$$.fragment,o),Ua=d(o),Vn=r(o,"UL",{});var Jp=i(Vn);et=r(Jp,"LI",{});var po=i(et);ni=s(po,"For a demo which shows how to fine-tune "),Xn=r(po,"A",{href:!0});var Qp=i(Xn);si=s(Qp,"LayoutLMForTokenClassification"),Qp.forEach(t),ai=s(po," on the "),ko=r(po,"A",{href:!0,rel:!0});var eu=i(ko);ri=s(eu,"FUNSD dataset"),eu.forEach(t),ii=s(po," (a collection of annotated forms), see "),Lo=r(po,"A",{href:!0,rel:!0});var tu=i(Lo);li=s(tu,"this notebook"),tu.forEach(t),di=s(po,`. It includes an inference part, which shows how to use Google\u2019s Tesseract on a new document.`),po.forEach(t),Jp.forEach(t),Wa=d(o),nt=r(o,"P",{});var ys=i(nt);ci=s(ys,"This model was contributed by "),vo=r(ys,"A",{href:!0,rel:!0});var ou=i(vo);pi=s(ou,"liminghao1630"),ou.forEach(t),ui=s(ys,`. The original code can be found `),wo=r(ys,"A",{href:!0,rel:!0});var nu=i(wo);hi=s(nu,"here"),nu.forEach(t),mi=s(ys,"."),ys.forEach(t),Ba=d(o),ft=r(o,"H2",{class:!0});var mr=i(ft);Dt=r(mr,"A",{id:!0,class:!0,href:!0});var su=i(Dt);Cs=r(su,"SPAN",{});var au=i(Cs);v(To.$$.fragment,au),au.forEach(t),su.forEach(t),fi=d(mr),Ps=r(mr,"SPAN",{});var ru=i(Ps);gi=s(ru,"LayoutLMConfig"),ru.forEach(t),mr.forEach(t),Ha=d(o),qe=r(o,"DIV",{class:!0});var st=i(qe);v(Mo.$$.fragment,st),_i=d(st),gt=r(st,"P",{});var bs=i(gt);yi=s(bs,"This is the configuration class to store the configuration of a "),Yn=r(bs,"A",{href:!0});var iu=i(Yn);bi=s(iu,"LayoutLMModel"),iu.forEach(t),ki=s(bs,`. It is used to instantiate a LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLM `),xo=r(bs,"A",{href:!0,rel:!0});var lu=i(xo);Li=s(lu,"layoutlm-base-uncased"),lu.forEach(t),vi=s(bs," architecture."),bs.forEach(t),wi=d(st),_t=r(st,"P",{});var ks=i(_t);Ti=s(ks,"Configuration objects inherit from "),Zn=r(ks,"A",{href:!0});var du=i(Zn);Mi=s(du,"BertConfig"),du.forEach(t),xi=s(ks,` and can be used to control the model outputs. Read the documentation from `),Gn=r(ks,"A",{href:!0});var cu=i(Gn);$i=s(cu,"BertConfig"),cu.forEach(t),ji=s(ks," for more information."),ks.forEach(t),Fi=d(st),Ns=r(st,"P",{});var pu=i(Ns);zi=s(pu,"Examples:"),pu.forEach(t),Ei=d(st),v($o.$$.fragment,st),st.forEach(t),Ra=d(o),yt=r(o,"H2",{class:!0});var fr=i(yt);At=r(fr,"A",{id:!0,class:!0,href:!0});var uu=i(At);Ss=r(uu,"SPAN",{});var hu=i(Ss);v(jo.$$.fragment,hu),hu.forEach(t),uu.forEach(t),qi=d(fr),Is=r(fr,"SPAN",{});var mu=i(Is);Ci=s(mu,"LayoutLMTokenizer"),mu.forEach(t),fr.forEach(t),Ka=d(o),Be=r(o,"DIV",{class:!0});var uo=i(Be);v(Fo.$$.fragment,uo),Pi=d(uo),Ds=r(uo,"P",{});var fu=i(Ds);Ni=s(fu,"Constructs a LayoutLM tokenizer."),fu.forEach(t),Si=d(uo),Ot=r(uo,"P",{});var Na=i(Ot);Jn=r(Na,"A",{href:!0});var gu=i(Jn);Ii=s(gu,"LayoutLMTokenizer"),gu.forEach(t),Di=s(Na," is identical to "),Qn=r(Na,"A",{href:!0});var _u=i(Qn);Ai=s(_u,"BertTokenizer"),_u.forEach(t),Oi=s(Na,` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Na.forEach(t),Ui=d(uo),zo=r(uo,"P",{});var gr=i(zo);Wi=s(gr,"Refer to superclass "),es=r(gr,"A",{href:!0});var yu=i(es);Bi=s(yu,"BertTokenizer"),yu.forEach(t),Hi=s(gr,` for usage examples and documentation concerning parameters.`),gr.forEach(t),uo.forEach(t),Va=d(o),bt=r(o,"H2",{class:!0});var _r=i(bt);Ut=r(_r,"A",{id:!0,class:!0,href:!0});var bu=i(Ut);As=r(bu,"SPAN",{});var ku=i(As);v(Eo.$$.fragment,ku),ku.forEach(t),bu.forEach(t),Ri=d(_r),Os=r(_r,"SPAN",{});var Lu=i(Os);Ki=s(Lu,"LayoutLMTokenizerFast"),Lu.forEach(t),_r.forEach(t),Xa=d(o),He=r(o,"DIV",{class:!0});var ho=i(He);v(qo.$$.fragment,ho),Vi=d(ho),Us=r(ho,"P",{});var vu=i(Us);Xi=s(vu,"Constructs a \u201CFast\u201D LayoutLMTokenizer."),vu.forEach(t),Yi=d(ho),Wt=r(ho,"P",{});var Sa=i(Wt);ts=r(Sa,"A",{href:!0});var wu=i(ts);Zi=s(wu,"LayoutLMTokenizerFast"),wu.forEach(t),Gi=s(Sa," is identical to "),os=r(Sa,"A",{href:!0});var Tu=i(os);Ji=s(Tu,"BertTokenizerFast"),Tu.forEach(t),Qi=s(Sa,` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Sa.forEach(t),el=d(ho),Co=r(ho,"P",{});var yr=i(Co);tl=s(yr,"Refer to superclass "),ns=r(yr,"A",{href:!0});var Mu=i(ns);ol=s(Mu,"BertTokenizerFast"),Mu.forEach(t),nl=s(yr,` for usage examples and documentation concerning parameters.`),yr.forEach(t),ho.forEach(t),Ya=d(o),kt=r(o,"H2",{class:!0});var br=i(kt);Bt=r(br,"A",{id:!0,class:!0,href:!0});var xu=i(Bt);Ws=r(xu,"SPAN",{});var $u=i(Ws);v(Po.$$.fragment,$u),$u.forEach(t),xu.forEach(t),sl=d(br),Bs=r(br,"SPAN",{});var ju=i(Bs);al=s(ju,"LayoutLMModel"),ju.forEach(t),br.forEach(t),Za=d(o),Re=r(o,"DIV",{class:!0});var mo=i(Re);v(No.$$.fragment,mo),rl=d(mo),So=r(mo,"P",{});var kr=i(So);il=s(kr,`The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top. The LayoutLM model was proposed in `),Io=r(kr,"A",{href:!0,rel:!0});var Fu=i(Io);ll=s(Fu,"LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),Fu.forEach(t),dl=s(kr," by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),kr.forEach(t),cl=d(mo),Do=r(mo,"P",{});var Lr=i(Do);pl=s(Lr,"This model is a PyTorch "),Ao=r(Lr,"A",{href:!0,rel:!0});var zu=i(Ao);ul=s(zu,"torch.nn.Module"),zu.forEach(t),hl=s(Lr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lr.forEach(t),ml=d(mo),Ne=r(mo,"DIV",{class:!0});var at=i(Ne);v(Oo.$$.fragment,at),fl=d(at),Lt=r(at,"P",{});var Ls=i(Lt);gl=s(Ls,"The "),ss=r(Ls,"A",{href:!0});var Eu=i(ss);_l=s(Eu,"LayoutLMModel"),Eu.forEach(t),yl=s(Ls," forward method, overrides the "),Hs=r(Ls,"CODE",{});var qu=i(Hs);bl=s(qu,"__call__"),qu.forEach(t),kl=s(Ls," special method."),Ls.forEach(t),Ll=d(at),v(Ht.$$.fragment,at),vl=d(at),Rs=r(at,"P",{});var Cu=i(Rs);wl=s(Cu,"Examples:"),Cu.forEach(t),Tl=d(at),v(Uo.$$.fragment,at),at.forEach(t),mo.forEach(t),Ga=d(o),vt=r(o,"H2",{class:!0});var vr=i(vt);Rt=r(vr,"A",{id:!0,class:!0,href:!0});var Pu=i(Rt);Ks=r(Pu,"SPAN",{});var Nu=i(Ks);v(Wo.$$.fragment,Nu),Nu.forEach(t),Pu.forEach(t),Ml=d(vr),Vs=r(vr,"SPAN",{});var Su=i(Vs);xl=s(Su,"LayoutLMForMaskedLM"),Su.forEach(t),vr.forEach(t),Ja=d(o),Ke=r(o,"DIV",{class:!0});var fo=i(Ke);v(Bo.$$.fragment,fo),$l=d(fo),wt=r(fo,"P",{});var vs=i(wt);jl=s(vs,"LayoutLM Model with a "),Xs=r(vs,"CODE",{});var Iu=i(Xs);Fl=s(Iu,"language modeling"),Iu.forEach(t),zl=s(vs,` head on top. The LayoutLM model was proposed in `),Ho=r(vs,"A",{href:!0,rel:!0});var Du=i(Ho);El=s(Du,"LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),Du.forEach(t),ql=s(vs," by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),vs.forEach(t),Cl=d(fo),Ro=r(fo,"P",{});var wr=i(Ro);Pl=s(wr,"This model is a PyTorch "),Ko=r(wr,"A",{href:!0,rel:!0});var Au=i(Ko);Nl=s(Au,"torch.nn.Module"),Au.forEach(t),Sl=s(wr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wr.forEach(t),Il=d(fo),Se=r(fo,"DIV",{class:!0});var rt=i(Se);v(Vo.$$.fragment,rt),Dl=d(rt),Tt=r(rt,"P",{});var ws=i(Tt);Al=s(ws,"The "),as=r(ws,"A",{href:!0});var Ou=i(as);Ol=s(Ou,"LayoutLMForMaskedLM"),Ou.forEach(t),Ul=s(ws," forward method, overrides the "),Ys=r(ws,"CODE",{});var Uu=i(Ys);Wl=s(Uu,"__call__"),Uu.forEach(t),Bl=s(ws," special method."),ws.forEach(t),Hl=d(rt),v(Kt.$$.fragment,rt),Rl=d(rt),Zs=r(rt,"P",{});var Wu=i(Zs);Kl=s(Wu,"Examples:"),Wu.forEach(t),Vl=d(rt),v(Xo.$$.fragment,rt),rt.forEach(t),fo.forEach(t),Qa=d(o),Mt=r(o,"H2",{class:!0});var Tr=i(Mt);Vt=r(Tr,"A",{id:!0,class:!0,href:!0});var Bu=i(Vt);Gs=r(Bu,"SPAN",{});var Hu=i(Gs);v(Yo.$$.fragment,Hu),Hu.forEach(t),Bu.forEach(t),Xl=d(Tr),Js=r(Tr,"SPAN",{});var Ru=i(Js);Yl=s(Ru,"LayoutLMForSequenceClassification"),Ru.forEach(t),Tr.forEach(t),er=d(o),Ce=r(o,"DIV",{class:!0});var it=i(Ce);v(Zo.$$.fragment,it),Zl=d(it),Go=r(it,"P",{});var Mr=i(Go);Gl=s(Mr,`LayoutLM Model with a sequence classification head on top (a linear layer on top of the pooled output) e.g. for document image classification tasks such as the `),Qs=r(Mr,"CODE",{});var Ku=i(Qs);Jl=s(Ku,"RVL-CDIP <https://www.cs.cmu.edu/~aharley/rvl-cdip/>"),Ku.forEach(t),Ql=s(Mr,"__ dataset."),Mr.forEach(t),ed=d(it),Jo=r(it,"P",{});var xr=i(Jo);td=s(xr,"The LayoutLM model was proposed in "),Qo=r(xr,"A",{href:!0,rel:!0});var Vu=i(Qo);od=s(Vu,"LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),Vu.forEach(t),nd=s(xr," by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),xr.forEach(t),sd=d(it),en=r(it,"P",{});var $r=i(en);ad=s($r,"This model is a PyTorch "),tn=r($r,"A",{href:!0,rel:!0});var Xu=i(tn);rd=s(Xu,"torch.nn.Module"),Xu.forEach(t),id=s($r,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$r.forEach(t),ld=d(it),Ie=r(it,"DIV",{class:!0});var lt=i(Ie);v(on.$$.fragment,lt),dd=d(lt),xt=r(lt,"P",{});var Ts=i(xt);cd=s(Ts,"The "),rs=r(Ts,"A",{href:!0});var Yu=i(rs);pd=s(Yu,"LayoutLMForSequenceClassification"),Yu.forEach(t),ud=s(Ts," forward method, overrides the "),ea=r(Ts,"CODE",{});var Zu=i(ea);hd=s(Zu,"__call__"),Zu.forEach(t),md=s(Ts," special method."),Ts.forEach(t),fd=d(lt),v(Xt.$$.fragment,lt),gd=d(lt),ta=r(lt,"P",{});var Gu=i(ta);_d=s(Gu,"Examples:"),Gu.forEach(t),yd=d(lt),v(nn.$$.fragment,lt),lt.forEach(t),it.forEach(t),tr=d(o),$t=r(o,"H2",{class:!0});var jr=i($t);Yt=r(jr,"A",{id:!0,class:!0,href:!0});var Ju=i(Yt);oa=r(Ju,"SPAN",{});var Qu=i(oa);v(sn.$$.fragment,Qu),Qu.forEach(t),Ju.forEach(t),bd=d(jr),na=r(jr,"SPAN",{});var eh=i(na);kd=s(eh,"LayoutLMForTokenClassification"),eh.forEach(t),jr.forEach(t),or=d(o),Pe=r(o,"DIV",{class:!0});var dt=i(Pe);v(an.$$.fragment,dt),Ld=d(dt),Zt=r(dt,"P",{});var Ia=i(Zt);vd=s(Ia,`LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for sequence labeling (information extraction) tasks such as the `),sa=r(Ia,"CODE",{});var th=i(sa);wd=s(th,"FUNSD <https://guillaumejaume.github.io/FUNSD/>"),th.forEach(t),is=r(Ia,"STRONG",{});var Sp=i(is);Td=s(Sp,"dataset and the "),aa=r(Sp,"CODE",{});var oh=i(aa);Md=s(oh,"SROIE <https://rrc.cvc.uab.es/?ch=13>"),oh.forEach(t),Sp.forEach(t),xd=s(Ia," dataset."),Ia.forEach(t),$d=d(dt),rn=r(dt,"P",{});var Fr=i(rn);jd=s(Fr,"The LayoutLM model was proposed in "),ln=r(Fr,"A",{href:!0,rel:!0});var nh=i(ln);Fd=s(nh,"LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),nh.forEach(t),zd=s(Fr," by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou."),Fr.forEach(t),Ed=d(dt),dn=r(dt,"P",{});var zr=i(dn);qd=s(zr,"This model is a PyTorch "),cn=r(zr,"A",{href:!0,rel:!0});var sh=i(cn);Cd=s(sh,"torch.nn.Module"),sh.forEach(t),Pd=s(zr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zr.forEach(t),Nd=d(dt),De=r(dt,"DIV",{class:!0});var ct=i(De);v(pn.$$.fragment,ct),Sd=d(ct),jt=r(ct,"P",{});var Ms=i(jt);Id=s(Ms,"The "),ls=r(Ms,"A",{href:!0});var ah=i(ls);Dd=s(ah,"LayoutLMForTokenClassification"),ah.forEach(t),Ad=s(Ms," forward method, overrides the "),ra=r(Ms,"CODE",{});var rh=i(ra);Od=s(rh,"__call__"),rh.forEach(t),Ud=s(Ms," special method."),Ms.forEach(t),Wd=d(ct),v(Gt.$$.fragment,ct),Bd=d(ct),ia=r(ct,"P",{});var ih=i(ia);Hd=s(ih,"Examples:"),ih.forEach(t),Rd=d(ct),v(un.$$.fragment,ct),ct.forEach(t),dt.forEach(t),nr=d(o),Ft=r(o,"H2",{class:!0});var Er=i(Ft);Jt=r(Er,"A",{id:!0,class:!0,href:!0});var lh=i(Jt);la=r(lh,"SPAN",{});var dh=i(la);v(hn.$$.fragment,dh),dh.forEach(t),lh.forEach(t),Kd=d(Er),da=r(Er,"SPAN",{});var ch=i(da);Vd=s(ch,"TFLayoutLMModel"),ch.forEach(t),Er.forEach(t),sr=d(o),Le=r(o,"DIV",{class:!0});var Ye=i(Le);v(mn.$$.fragment,Ye),Xd=d(Ye),ca=r(Ye,"P",{});var ph=i(ca);Yd=s(ph,"The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top."),ph.forEach(t),Zd=d(Ye),fn=r(Ye,"P",{});var qr=i(fn);Gd=s(qr,"This model inherits from "),ds=r(qr,"A",{href:!0});var uh=i(ds);Jd=s(uh,"TFPreTrainedModel"),uh.forEach(t),Qd=s(qr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qr.forEach(t),ec=d(Ye),gn=r(Ye,"P",{});var Cr=i(gn);tc=s(Cr,"This model is also a "),_n=r(Cr,"A",{href:!0,rel:!0});var hh=i(_n);oc=s(hh,"tf.keras.Model"),hh.forEach(t),nc=s(Cr,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cr.forEach(t),sc=d(Ye),v(Qt.$$.fragment,Ye),ac=d(Ye),Ae=r(Ye,"DIV",{class:!0});var pt=i(Ae);v(yn.$$.fragment,pt),rc=d(pt),zt=r(pt,"P",{});var xs=i(zt);ic=s(xs,"The "),cs=r(xs,"A",{href:!0});var mh=i(cs);lc=s(mh,"TFLayoutLMModel"),mh.forEach(t),dc=s(xs," forward method, overrides the "),pa=r(xs,"CODE",{});var fh=i(pa);cc=s(fh,"__call__"),fh.forEach(t),pc=s(xs," special method."),xs.forEach(t),uc=d(pt),v(eo.$$.fragment,pt),hc=d(pt),ua=r(pt,"P",{});var gh=i(ua);mc=s(gh,"Examples:"),gh.forEach(t),fc=d(pt),v(bn.$$.fragment,pt),pt.forEach(t),Ye.forEach(t),ar=d(o),Et=r(o,"H2",{class:!0});var Pr=i(Et);to=r(Pr,"A",{id:!0,class:!0,href:!0});var _h=i(to);ha=r(_h,"SPAN",{});var yh=i(ha);v(kn.$$.fragment,yh),yh.forEach(t),_h.forEach(t),gc=d(Pr),ma=r(Pr,"SPAN",{});var bh=i(ma);_c=s(bh,"TFLayoutLMForMaskedLM"),bh.forEach(t),Pr.forEach(t),rr=d(o),ve=r(o,"DIV",{class:!0});var Ze=i(ve);v(Ln.$$.fragment,Ze),yc=d(Ze),vn=r(Ze,"P",{});var Nr=i(vn);bc=s(Nr,"LayoutLM Model with a "),fa=r(Nr,"CODE",{});var kh=i(fa);kc=s(kh,"language modeling"),kh.forEach(t),Lc=s(Nr," head on top."),Nr.forEach(t),vc=d(Ze),wn=r(Ze,"P",{});var Sr=i(wn);wc=s(Sr,"This model inherits from "),ps=r(Sr,"A",{href:!0});var Lh=i(ps);Tc=s(Lh,"TFPreTrainedModel"),Lh.forEach(t),Mc=s(Sr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sr.forEach(t),xc=d(Ze),Tn=r(Ze,"P",{});var Ir=i(Tn);$c=s(Ir,"This model is also a "),Mn=r(Ir,"A",{href:!0,rel:!0});var vh=i(Mn);jc=s(vh,"tf.keras.Model"),vh.forEach(t),Fc=s(Ir,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ir.forEach(t),zc=d(Ze),v(oo.$$.fragment,Ze),Ec=d(Ze),Oe=r(Ze,"DIV",{class:!0});var ut=i(Oe);v(xn.$$.fragment,ut),qc=d(ut),qt=r(ut,"P",{});var $s=i(qt);Cc=s($s,"The "),us=r($s,"A",{href:!0});var wh=i(us);Pc=s(wh,"TFLayoutLMForMaskedLM"),wh.forEach(t),Nc=s($s," forward method, overrides the "),ga=r($s,"CODE",{});var Th=i(ga);Sc=s(Th,"__call__"),Th.forEach(t),Ic=s($s," special method."),$s.forEach(t),Dc=d(ut),v(no.$$.fragment,ut),Ac=d(ut),_a=r(ut,"P",{});var Mh=i(_a);Oc=s(Mh,"Examples:"),Mh.forEach(t),Uc=d(ut),v($n.$$.fragment,ut),ut.forEach(t),Ze.forEach(t),ir=d(o),Ct=r(o,"H2",{class:!0});var Dr=i(Ct);so=r(Dr,"A",{id:!0,class:!0,href:!0});var xh=i(so);ya=r(xh,"SPAN",{});var $h=i(ya);v(jn.$$.fragment,$h),$h.forEach(t),xh.forEach(t),Wc=d(Dr),ba=r(Dr,"SPAN",{});var jh=i(ba);Bc=s(jh,"TFLayoutLMForSequenceClassification"),jh.forEach(t),Dr.forEach(t),lr=d(o),we=r(o,"DIV",{class:!0});var Ge=i(we);v(Fn.$$.fragment,Ge),Hc=d(Ge),ka=r(Ge,"P",{});var Fh=i(ka);Rc=s(Fh,`LayoutLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Fh.forEach(t),Kc=d(Ge),zn=r(Ge,"P",{});var Ar=i(zn);Vc=s(Ar,"This model inherits from "),hs=r(Ar,"A",{href:!0});var zh=i(hs);Xc=s(zh,"TFPreTrainedModel"),zh.forEach(t),Yc=s(Ar,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ar.forEach(t),Zc=d(Ge),En=r(Ge,"P",{});var Or=i(En);Gc=s(Or,"This model is also a "),qn=r(Or,"A",{href:!0,rel:!0});var Eh=i(qn);Jc=s(Eh,"tf.keras.Model"),Eh.forEach(t),Qc=s(Or,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Or.forEach(t),ep=d(Ge),v(ao.$$.fragment,Ge),tp=d(Ge),Ue=r(Ge,"DIV",{class:!0});var ht=i(Ue);v(Cn.$$.fragment,ht),op=d(ht),Pt=r(ht,"P",{});var js=i(Pt);np=s(js,"The "),ms=r(js,"A",{href:!0});var qh=i(ms);sp=s(qh,"TFLayoutLMForSequenceClassification"),qh.forEach(t),ap=s(js," forward method, overrides the "),La=r(js,"CODE",{});var Ch=i(La);rp=s(Ch,"__call__"),Ch.forEach(t),ip=s(js," special method."),js.forEach(t),lp=d(ht),v(ro.$$.fragment,ht),dp=d(ht),va=r(ht,"P",{});var Ph=i(va);cp=s(Ph,"Examples:"),Ph.forEach(t),pp=d(ht),v(Pn.$$.fragment,ht),ht.forEach(t),Ge.forEach(t),dr=d(o),Nt=r(o,"H2",{class:!0});var Ur=i(Nt);io=r(Ur,"A",{id:!0,class:!0,href:!0});var Nh=i(io);wa=r(Nh,"SPAN",{});var Sh=i(wa);v(Nn.$$.fragment,Sh),Sh.forEach(t),Nh.forEach(t),up=d(Ur),Ta=r(Ur,"SPAN",{});var Ih=i(Ta);hp=s(Ih,"TFLayoutLMForTokenClassification"),Ih.forEach(t),Ur.forEach(t),cr=d(o),Te=r(o,"DIV",{class:!0});var Je=i(Te);v(Sn.$$.fragment,Je),mp=d(Je),Ma=r(Je,"P",{});var Dh=i(Ma);fp=s(Dh,`LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Dh.forEach(t),gp=d(Je),In=r(Je,"P",{});var Wr=i(In);_p=s(Wr,"This model inherits from "),fs=r(Wr,"A",{href:!0});var Ah=i(fs);yp=s(Ah,"TFPreTrainedModel"),Ah.forEach(t),bp=s(Wr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wr.forEach(t),kp=d(Je),Dn=r(Je,"P",{});var Br=i(Dn);Lp=s(Br,"This model is also a "),An=r(Br,"A",{href:!0,rel:!0});var Oh=i(An);vp=s(Oh,"tf.keras.Model"),Oh.forEach(t),wp=s(Br,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Br.forEach(t),Tp=d(Je),v(lo.$$.fragment,Je),Mp=d(Je),We=r(Je,"DIV",{class:!0});var mt=i(We);v(On.$$.fragment,mt),xp=d(mt),St=r(mt,"P",{});var Fs=i(St);$p=s(Fs,"The "),gs=r(Fs,"A",{href:!0});var Uh=i(gs);jp=s(Uh,"TFLayoutLMForTokenClassification"),Uh.forEach(t),Fp=s(Fs," forward method, overrides the "),xa=r(Fs,"CODE",{});var Wh=i(xa);zp=s(Wh,"__call__"),Wh.forEach(t),Ep=s(Fs," special method."),Fs.forEach(t),qp=d(mt),v(co.$$.fragment,mt),Cp=d(mt),$a=r(mt,"P",{});var Bh=i($a);Pp=s(Bh,"Examples:"),Bh.forEach(t),Np=d(mt),v(Un.$$.fragment,mt),mt.forEach(t),Je.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(im)),c(g,"id","layoutlm"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#layoutlm"),c(f,"class","relative group"),c(z,"id","Overview"),c(Z,"id","overview"),c(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Z,"href","#overview"),c(I,"class","relative group"),c(F,"href","https://arxiv.org/abs/1912.13318"),c(F,"rel","nofollow"),c(te,"href","https://guillaumejaume.github.io/FUNSD/"),c(te,"rel","nofollow"),c(oe,"href","https://rrc.cvc.uab.es/?ch=13"),c(oe,"rel","nofollow"),c(U,"href","https://www.cs.cmu.edu/~aharley/rvl-cdip/"),c(U,"rel","nofollow"),c(Kn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel.forward"),c(go,"href","https://github.com/tesseract-ocr/tesseract"),c(go,"rel","nofollow"),c(_o,"href","https://pypi.org/project/pytesseract/"),c(_o,"rel","nofollow"),c(Xn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForTokenClassification"),c(ko,"href","https://guillaumejaume.github.io/FUNSD/"),c(ko,"rel","nofollow"),c(Lo,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb"),c(Lo,"rel","nofollow"),c(vo,"href","https://huggingface.co/liminghao1630"),c(vo,"rel","nofollow"),c(wo,"href","https://github.com/microsoft/unilm/tree/master/layoutlm"),c(wo,"rel","nofollow"),c(Dt,"id","transformers.LayoutLMConfig"),c(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dt,"href","#transformers.LayoutLMConfig"),c(ft,"class","relative group"),c(Yn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel"),c(xo,"href","https://huggingface.co/microsoft/layoutlm-base-uncased"),c(xo,"rel","nofollow"),c(Zn,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig"),c(Gn,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertConfig"),c(qe,"class","docstring"),c(At,"id","transformers.LayoutLMTokenizer"),c(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(At,"href","#transformers.LayoutLMTokenizer"),c(yt,"class","relative group"),c(Jn,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizer"),c(Qn,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(es,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(Be,"class","docstring"),c(Ut,"id","transformers.LayoutLMTokenizerFast"),c(Ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ut,"href","#transformers.LayoutLMTokenizerFast"),c(bt,"class","relative group"),c(ts,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMTokenizerFast"),c(os,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(ns,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(He,"class","docstring"),c(Bt,"id","transformers.LayoutLMModel"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#transformers.LayoutLMModel"),c(kt,"class","relative group"),c(Io,"href","https://arxiv.org/abs/1912.13318"),c(Io,"rel","nofollow"),c(Ao,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ao,"rel","nofollow"),c(ss,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMModel"),c(Ne,"class","docstring"),c(Re,"class","docstring"),c(Rt,"id","transformers.LayoutLMForMaskedLM"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.LayoutLMForMaskedLM"),c(vt,"class","relative group"),c(Ho,"href","https://arxiv.org/abs/1912.13318"),c(Ho,"rel","nofollow"),c(Ko,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ko,"rel","nofollow"),c(as,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForMaskedLM"),c(Se,"class","docstring"),c(Ke,"class","docstring"),c(Vt,"id","transformers.LayoutLMForSequenceClassification"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.LayoutLMForSequenceClassification"),c(Mt,"class","relative group"),c(Qo,"href","https://arxiv.org/abs/1912.13318"),c(Qo,"rel","nofollow"),c(tn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(tn,"rel","nofollow"),c(rs,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForSequenceClassification"),c(Ie,"class","docstring"),c(Ce,"class","docstring"),c(Yt,"id","transformers.LayoutLMForTokenClassification"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#transformers.LayoutLMForTokenClassification"),c($t,"class","relative group"),c(ln,"href","https://arxiv.org/abs/1912.13318"),c(ln,"rel","nofollow"),c(cn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(cn,"rel","nofollow"),c(ls,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.LayoutLMForTokenClassification"),c(De,"class","docstring"),c(Pe,"class","docstring"),c(Jt,"id","transformers.TFLayoutLMModel"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#transformers.TFLayoutLMModel"),c(Ft,"class","relative group"),c(ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(_n,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(_n,"rel","nofollow"),c(cs,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMModel"),c(Ae,"class","docstring"),c(Le,"class","docstring"),c(to,"id","transformers.TFLayoutLMForMaskedLM"),c(to,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(to,"href","#transformers.TFLayoutLMForMaskedLM"),c(Et,"class","relative group"),c(ps,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Mn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Mn,"rel","nofollow"),c(us,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForMaskedLM"),c(Oe,"class","docstring"),c(ve,"class","docstring"),c(so,"id","transformers.TFLayoutLMForSequenceClassification"),c(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(so,"href","#transformers.TFLayoutLMForSequenceClassification"),c(Ct,"class","relative group"),c(hs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(qn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(qn,"rel","nofollow"),c(ms,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForSequenceClassification"),c(Ue,"class","docstring"),c(we,"class","docstring"),c(io,"id","transformers.TFLayoutLMForTokenClassification"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.TFLayoutLMForTokenClassification"),c(Nt,"class","relative group"),c(fs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(An,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(An,"rel","nofollow"),c(gs,"href","/docs/transformers/v4.15.0/en/model_doc/layoutlm#transformers.TFLayoutLMForTokenClassification"),c(We,"class","docstring"),c(Te,"class","docstring")},m(o,m){e(document.head,h),u(o,$,m),u(o,f,m),e(f,g),e(g,b),w(y,b,null),e(f,_),e(f,j),e(j,re),u(o,Y,m),u(o,z,m),u(o,ae,m),u(o,I,m),e(I,Z),e(Z,ne),w(A,ne,null),e(I,ie),e(I,se),e(se,J),u(o,D,m),u(o,G,m),e(G,Q),e(G,F),e(F,E),e(G,le),u(o,W,m),u(o,V,m),e(V,ee),e(ee,H),e(ee,te),e(te,de),e(ee,q),e(V,ce),e(V,O),e(O,pe),e(O,oe),e(oe,R),e(O,ue),e(V,he),e(V,P),e(P,me),e(P,U),e(U,fe),e(P,p),u(o,k,m),u(o,X,m),e(X,je),u(o,Me,m),u(o,N,m),e(N,be),e(be,Fe),u(o,xe,m),u(o,S,m),e(S,K),u(o,$e,m),u(o,ye,m),e(ye,C),e(C,ze),e(C,ke),e(ke,ge),e(C,Ee),e(C,Kn),e(Kn,Hr),e(C,Rr),e(C,zs),e(zs,Kr),e(C,Vr),e(C,go),e(go,Xr),e(C,Yr),e(C,_o),e(_o,Zr),e(C,Gr),u(o,Da,m),w(yo,o,m),u(o,Aa,m),u(o,ot,m),e(ot,Jr),e(ot,Es),e(Es,Qr),e(ot,ei),e(ot,qs),e(qs,ti),e(ot,oi),u(o,Oa,m),w(bo,o,m),u(o,Ua,m),u(o,Vn,m),e(Vn,et),e(et,ni),e(et,Xn),e(Xn,si),e(et,ai),e(et,ko),e(ko,ri),e(et,ii),e(et,Lo),e(Lo,li),e(et,di),u(o,Wa,m),u(o,nt,m),e(nt,ci),e(nt,vo),e(vo,pi),e(nt,ui),e(nt,wo),e(wo,hi),e(nt,mi),u(o,Ba,m),u(o,ft,m),e(ft,Dt),e(Dt,Cs),w(To,Cs,null),e(ft,fi),e(ft,Ps),e(Ps,gi),u(o,Ha,m),u(o,qe,m),w(Mo,qe,null),e(qe,_i),e(qe,gt),e(gt,yi),e(gt,Yn),e(Yn,bi),e(gt,ki),e(gt,xo),e(xo,Li),e(gt,vi),e(qe,wi),e(qe,_t),e(_t,Ti),e(_t,Zn),e(Zn,Mi),e(_t,xi),e(_t,Gn),e(Gn,$i),e(_t,ji),e(qe,Fi),e(qe,Ns),e(Ns,zi),e(qe,Ei),w($o,qe,null),u(o,Ra,m),u(o,yt,m),e(yt,At),e(At,Ss),w(jo,Ss,null),e(yt,qi),e(yt,Is),e(Is,Ci),u(o,Ka,m),u(o,Be,m),w(Fo,Be,null),e(Be,Pi),e(Be,Ds),e(Ds,Ni),e(Be,Si),e(Be,Ot),e(Ot,Jn),e(Jn,Ii),e(Ot,Di),e(Ot,Qn),e(Qn,Ai),e(Ot,Oi),e(Be,Ui),e(Be,zo),e(zo,Wi),e(zo,es),e(es,Bi),e(zo,Hi),u(o,Va,m),u(o,bt,m),e(bt,Ut),e(Ut,As),w(Eo,As,null),e(bt,Ri),e(bt,Os),e(Os,Ki),u(o,Xa,m),u(o,He,m),w(qo,He,null),e(He,Vi),e(He,Us),e(Us,Xi),e(He,Yi),e(He,Wt),e(Wt,ts),e(ts,Zi),e(Wt,Gi),e(Wt,os),e(os,Ji),e(Wt,Qi),e(He,el),e(He,Co),e(Co,tl),e(Co,ns),e(ns,ol),e(Co,nl),u(o,Ya,m),u(o,kt,m),e(kt,Bt),e(Bt,Ws),w(Po,Ws,null),e(kt,sl),e(kt,Bs),e(Bs,al),u(o,Za,m),u(o,Re,m),w(No,Re,null),e(Re,rl),e(Re,So),e(So,il),e(So,Io),e(Io,ll),e(So,dl),e(Re,cl),e(Re,Do),e(Do,pl),e(Do,Ao),e(Ao,ul),e(Do,hl),e(Re,ml),e(Re,Ne),w(Oo,Ne,null),e(Ne,fl),e(Ne,Lt),e(Lt,gl),e(Lt,ss),e(ss,_l),e(Lt,yl),e(Lt,Hs),e(Hs,bl),e(Lt,kl),e(Ne,Ll),w(Ht,Ne,null),e(Ne,vl),e(Ne,Rs),e(Rs,wl),e(Ne,Tl),w(Uo,Ne,null),u(o,Ga,m),u(o,vt,m),e(vt,Rt),e(Rt,Ks),w(Wo,Ks,null),e(vt,Ml),e(vt,Vs),e(Vs,xl),u(o,Ja,m),u(o,Ke,m),w(Bo,Ke,null),e(Ke,$l),e(Ke,wt),e(wt,jl),e(wt,Xs),e(Xs,Fl),e(wt,zl),e(wt,Ho),e(Ho,El),e(wt,ql),e(Ke,Cl),e(Ke,Ro),e(Ro,Pl),e(Ro,Ko),e(Ko,Nl),e(Ro,Sl),e(Ke,Il),e(Ke,Se),w(Vo,Se,null),e(Se,Dl),e(Se,Tt),e(Tt,Al),e(Tt,as),e(as,Ol),e(Tt,Ul),e(Tt,Ys),e(Ys,Wl),e(Tt,Bl),e(Se,Hl),w(Kt,Se,null),e(Se,Rl),e(Se,Zs),e(Zs,Kl),e(Se,Vl),w(Xo,Se,null),u(o,Qa,m),u(o,Mt,m),e(Mt,Vt),e(Vt,Gs),w(Yo,Gs,null),e(Mt,Xl),e(Mt,Js),e(Js,Yl),u(o,er,m),u(o,Ce,m),w(Zo,Ce,null),e(Ce,Zl),e(Ce,Go),e(Go,Gl),e(Go,Qs),e(Qs,Jl),e(Go,Ql),e(Ce,ed),e(Ce,Jo),e(Jo,td),e(Jo,Qo),e(Qo,od),e(Jo,nd),e(Ce,sd),e(Ce,en),e(en,ad),e(en,tn),e(tn,rd),e(en,id),e(Ce,ld),e(Ce,Ie),w(on,Ie,null),e(Ie,dd),e(Ie,xt),e(xt,cd),e(xt,rs),e(rs,pd),e(xt,ud),e(xt,ea),e(ea,hd),e(xt,md),e(Ie,fd),w(Xt,Ie,null),e(Ie,gd),e(Ie,ta),e(ta,_d),e(Ie,yd),w(nn,Ie,null),u(o,tr,m),u(o,$t,m),e($t,Yt),e(Yt,oa),w(sn,oa,null),e($t,bd),e($t,na),e(na,kd),u(o,or,m),u(o,Pe,m),w(an,Pe,null),e(Pe,Ld),e(Pe,Zt),e(Zt,vd),e(Zt,sa),e(sa,wd),e(Zt,is),e(is,Td),e(is,aa),e(aa,Md),e(Zt,xd),e(Pe,$d),e(Pe,rn),e(rn,jd),e(rn,ln),e(ln,Fd),e(rn,zd),e(Pe,Ed),e(Pe,dn),e(dn,qd),e(dn,cn),e(cn,Cd),e(dn,Pd),e(Pe,Nd),e(Pe,De),w(pn,De,null),e(De,Sd),e(De,jt),e(jt,Id),e(jt,ls),e(ls,Dd),e(jt,Ad),e(jt,ra),e(ra,Od),e(jt,Ud),e(De,Wd),w(Gt,De,null),e(De,Bd),e(De,ia),e(ia,Hd),e(De,Rd),w(un,De,null),u(o,nr,m),u(o,Ft,m),e(Ft,Jt),e(Jt,la),w(hn,la,null),e(Ft,Kd),e(Ft,da),e(da,Vd),u(o,sr,m),u(o,Le,m),w(mn,Le,null),e(Le,Xd),e(Le,ca),e(ca,Yd),e(Le,Zd),e(Le,fn),e(fn,Gd),e(fn,ds),e(ds,Jd),e(fn,Qd),e(Le,ec),e(Le,gn),e(gn,tc),e(gn,_n),e(_n,oc),e(gn,nc),e(Le,sc),w(Qt,Le,null),e(Le,ac),e(Le,Ae),w(yn,Ae,null),e(Ae,rc),e(Ae,zt),e(zt,ic),e(zt,cs),e(cs,lc),e(zt,dc),e(zt,pa),e(pa,cc),e(zt,pc),e(Ae,uc),w(eo,Ae,null),e(Ae,hc),e(Ae,ua),e(ua,mc),e(Ae,fc),w(bn,Ae,null),u(o,ar,m),u(o,Et,m),e(Et,to),e(to,ha),w(kn,ha,null),e(Et,gc),e(Et,ma),e(ma,_c),u(o,rr,m),u(o,ve,m),w(Ln,ve,null),e(ve,yc),e(ve,vn),e(vn,bc),e(vn,fa),e(fa,kc),e(vn,Lc),e(ve,vc),e(ve,wn),e(wn,wc),e(wn,ps),e(ps,Tc),e(wn,Mc),e(ve,xc),e(ve,Tn),e(Tn,$c),e(Tn,Mn),e(Mn,jc),e(Tn,Fc),e(ve,zc),w(oo,ve,null),e(ve,Ec),e(ve,Oe),w(xn,Oe,null),e(Oe,qc),e(Oe,qt),e(qt,Cc),e(qt,us),e(us,Pc),e(qt,Nc),e(qt,ga),e(ga,Sc),e(qt,Ic),e(Oe,Dc),w(no,Oe,null),e(Oe,Ac),e(Oe,_a),e(_a,Oc),e(Oe,Uc),w($n,Oe,null),u(o,ir,m),u(o,Ct,m),e(Ct,so),e(so,ya),w(jn,ya,null),e(Ct,Wc),e(Ct,ba),e(ba,Bc),u(o,lr,m),u(o,we,m),w(Fn,we,null),e(we,Hc),e(we,ka),e(ka,Rc),e(we,Kc),e(we,zn),e(zn,Vc),e(zn,hs),e(hs,Xc),e(zn,Yc),e(we,Zc),e(we,En),e(En,Gc),e(En,qn),e(qn,Jc),e(En,Qc),e(we,ep),w(ao,we,null),e(we,tp),e(we,Ue),w(Cn,Ue,null),e(Ue,op),e(Ue,Pt),e(Pt,np),e(Pt,ms),e(ms,sp),e(Pt,ap),e(Pt,La),e(La,rp),e(Pt,ip),e(Ue,lp),w(ro,Ue,null),e(Ue,dp),e(Ue,va),e(va,cp),e(Ue,pp),w(Pn,Ue,null),u(o,dr,m),u(o,Nt,m),e(Nt,io),e(io,wa),w(Nn,wa,null),e(Nt,up),e(Nt,Ta),e(Ta,hp),u(o,cr,m),u(o,Te,m),w(Sn,Te,null),e(Te,mp),e(Te,Ma),e(Ma,fp),e(Te,gp),e(Te,In),e(In,_p),e(In,fs),e(fs,yp),e(In,bp),e(Te,kp),e(Te,Dn),e(Dn,Lp),e(Dn,An),e(An,vp),e(Dn,wp),e(Te,Tp),w(lo,Te,null),e(Te,Mp),e(Te,We),w(On,We,null),e(We,xp),e(We,St),e(St,$p),e(St,gs),e(gs,jp),e(St,Fp),e(St,xa),e(xa,zp),e(St,Ep),e(We,qp),w(co,We,null),e(We,Cp),e(We,$a),e($a,Pp),e(We,Np),w(Un,We,null),pr=!0},p(o,[m]){const Wn={};m&2&&(Wn.$$scope={dirty:m,ctx:o}),Ht.$set(Wn);const ja={};m&2&&(ja.$$scope={dirty:m,ctx:o}),Kt.$set(ja);const Fa={};m&2&&(Fa.$$scope={dirty:m,ctx:o}),Xt.$set(Fa);const za={};m&2&&(za.$$scope={dirty:m,ctx:o}),Gt.$set(za);const Bn={};m&2&&(Bn.$$scope={dirty:m,ctx:o}),Qt.$set(Bn);const Ea={};m&2&&(Ea.$$scope={dirty:m,ctx:o}),eo.$set(Ea);const qa={};m&2&&(qa.$$scope={dirty:m,ctx:o}),oo.$set(qa);const Ca={};m&2&&(Ca.$$scope={dirty:m,ctx:o}),no.$set(Ca);const Hn={};m&2&&(Hn.$$scope={dirty:m,ctx:o}),ao.$set(Hn);const Pa={};m&2&&(Pa.$$scope={dirty:m,ctx:o}),ro.$set(Pa);const It={};m&2&&(It.$$scope={dirty:m,ctx:o}),lo.$set(It);const Rn={};m&2&&(Rn.$$scope={dirty:m,ctx:o}),co.$set(Rn)},i(o){pr||(T(y.$$.fragment,o),T(A.$$.fragment,o),T(yo.$$.fragment,o),T(bo.$$.fragment,o),T(To.$$.fragment,o),T(Mo.$$.fragment,o),T($o.$$.fragment,o),T(jo.$$.fragment,o),T(Fo.$$.fragment,o),T(Eo.$$.fragment,o),T(qo.$$.fragment,o),T(Po.$$.fragment,o),T(No.$$.fragment,o),T(Oo.$$.fragment,o),T(Ht.$$.fragment,o),T(Uo.$$.fragment,o),T(Wo.$$.fragment,o),T(Bo.$$.fragment,o),T(Vo.$$.fragment,o),T(Kt.$$.fragment,o),T(Xo.$$.fragment,o),T(Yo.$$.fragment,o),T(Zo.$$.fragment,o),T(on.$$.fragment,o),T(Xt.$$.fragment,o),T(nn.$$.fragment,o),T(sn.$$.fragment,o),T(an.$$.fragment,o),T(pn.$$.fragment,o),T(Gt.$$.fragment,o),T(un.$$.fragment,o),T(hn.$$.fragment,o),T(mn.$$.fragment,o),T(Qt.$$.fragment,o),T(yn.$$.fragment,o),T(eo.$$.fragment,o),T(bn.$$.fragment,o),T(kn.$$.fragment,o),T(Ln.$$.fragment,o),T(oo.$$.fragment,o),T(xn.$$.fragment,o),T(no.$$.fragment,o),T($n.$$.fragment,o),T(jn.$$.fragment,o),T(Fn.$$.fragment,o),T(ao.$$.fragment,o),T(Cn.$$.fragment,o),T(ro.$$.fragment,o),T(Pn.$$.fragment,o),T(Nn.$$.fragment,o),T(Sn.$$.fragment,o),T(lo.$$.fragment,o),T(On.$$.fragment,o),T(co.$$.fragment,o),T(Un.$$.fragment,o),pr=!0)},o(o){M(y.$$.fragment,o),M(A.$$.fragment,o),M(yo.$$.fragment,o),M(bo.$$.fragment,o),M(To.$$.fragment,o),M(Mo.$$.fragment,o),M($o.$$.fragment,o),M(jo.$$.fragment,o),M(Fo.$$.fragment,o),M(Eo.$$.fragment,o),M(qo.$$.fragment,o),M(Po.$$.fragment,o),M(No.$$.fragment,o),M(Oo.$$.fragment,o),M(Ht.$$.fragment,o),M(Uo.$$.fragment,o),M(Wo.$$.fragment,o),M(Bo.$$.fragment,o),M(Vo.$$.fragment,o),M(Kt.$$.fragment,o),M(Xo.$$.fragment,o),M(Yo.$$.fragment,o),M(Zo.$$.fragment,o),M(on.$$.fragment,o),M(Xt.$$.fragment,o),M(nn.$$.fragment,o),M(sn.$$.fragment,o),M(an.$$.fragment,o),M(pn.$$.fragment,o),M(Gt.$$.fragment,o),M(un.$$.fragment,o),M(hn.$$.fragment,o),M(mn.$$.fragment,o),M(Qt.$$.fragment,o),M(yn.$$.fragment,o),M(eo.$$.fragment,o),M(bn.$$.fragment,o),M(kn.$$.fragment,o),M(Ln.$$.fragment,o),M(oo.$$.fragment,o),M(xn.$$.fragment,o),M(no.$$.fragment,o),M($n.$$.fragment,o),M(jn.$$.fragment,o),M(Fn.$$.fragment,o),M(ao.$$.fragment,o),M(Cn.$$.fragment,o),M(ro.$$.fragment,o),M(Pn.$$.fragment,o),M(Nn.$$.fragment,o),M(Sn.$$.fragment,o),M(lo.$$.fragment,o),M(On.$$.fragment,o),M(co.$$.fragment,o),M(Un.$$.fragment,o),pr=!1},d(o){t(h),o&&t($),o&&t(f),x(y),o&&t(Y),o&&t(z),o&&t(ae),o&&t(I),x(A),o&&t(D),o&&t(G),o&&t(W),o&&t(V),o&&t(k),o&&t(X),o&&t(Me),o&&t(N),o&&t(xe),o&&t(S),o&&t($e),o&&t(ye),o&&t(Da),x(yo,o),o&&t(Aa),o&&t(ot),o&&t(Oa),x(bo,o),o&&t(Ua),o&&t(Vn),o&&t(Wa),o&&t(nt),o&&t(Ba),o&&t(ft),x(To),o&&t(Ha),o&&t(qe),x(Mo),x($o),o&&t(Ra),o&&t(yt),x(jo),o&&t(Ka),o&&t(Be),x(Fo),o&&t(Va),o&&t(bt),x(Eo),o&&t(Xa),o&&t(He),x(qo),o&&t(Ya),o&&t(kt),x(Po),o&&t(Za),o&&t(Re),x(No),x(Oo),x(Ht),x(Uo),o&&t(Ga),o&&t(vt),x(Wo),o&&t(Ja),o&&t(Ke),x(Bo),x(Vo),x(Kt),x(Xo),o&&t(Qa),o&&t(Mt),x(Yo),o&&t(er),o&&t(Ce),x(Zo),x(on),x(Xt),x(nn),o&&t(tr),o&&t($t),x(sn),o&&t(or),o&&t(Pe),x(an),x(pn),x(Gt),x(un),o&&t(nr),o&&t(Ft),x(hn),o&&t(sr),o&&t(Le),x(mn),x(Qt),x(yn),x(eo),x(bn),o&&t(ar),o&&t(Et),x(kn),o&&t(rr),o&&t(ve),x(Ln),x(oo),x(xn),x(no),x($n),o&&t(ir),o&&t(Ct),x(jn),o&&t(lr),o&&t(we),x(Fn),x(ao),x(Cn),x(ro),x(Pn),o&&t(dr),o&&t(Nt),x(Nn),o&&t(cr),o&&t(Te),x(Sn),x(lo),x(On),x(co),x(Un)}}}const im={local:"layoutlm",sections:[{local:"overview",title:"Overview"},{local:"transformers.LayoutLMConfig",title:"LayoutLMConfig"},{local:"transformers.LayoutLMTokenizer",title:"LayoutLMTokenizer"},{local:"transformers.LayoutLMTokenizerFast",title:"LayoutLMTokenizerFast"},{local:"transformers.LayoutLMModel",title:"LayoutLMModel"},{local:"transformers.LayoutLMForMaskedLM",title:"LayoutLMForMaskedLM"},{local:"transformers.LayoutLMForSequenceClassification",title:"LayoutLMForSequenceClassification"},{local:"transformers.LayoutLMForTokenClassification",title:"LayoutLMForTokenClassification"},{local:"transformers.TFLayoutLMModel",title:"TFLayoutLMModel"},{local:"transformers.TFLayoutLMForMaskedLM",title:"TFLayoutLMForMaskedLM"},{local:"transformers.TFLayoutLMForSequenceClassification",title:"TFLayoutLMForSequenceClassification"},{local:"transformers.TFLayoutLMForTokenClassification",title:"TFLayoutLMForTokenClassification"}],title:"LayoutLM"};function lm(B,h,$){let{fw:f}=h;return B.$$set=g=>{"fw"in g&&$(0,f=g.fw)},[f]}class fm extends Hh{constructor(h){super();Rh(this,h,lm,rm,Kh,{fw:0})}}export{fm as default,im as metadata};
9,967
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/qdqbert.mdx-c08d7079.js
import{S as Ru,i as Uu,s as Vu,e as r,k as d,w as _,t as n,L as Xu,c as a,d as o,m as c,a as i,x as b,h as s,b as l,J as e,g as u,y as v,q as k,o as Q,B as w}from"../../chunks/vendor-b1433968.js";import{T as ut}from"../../chunks/Tip-c3840994.js";import{D as x}from"../../chunks/Docstring-ff504c58.js";import{C as Z}from"../../chunks/CodeBlock-a320dbd7.js";import{I as K}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ju(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function Gu(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function Ku(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function Zu(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function Yu(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function em(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function tm(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function om(D){let h,y,m,T,$;return{c(){h=r("p"),y=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),T=n("Module"),$=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=a(g,"P",{});var f=i(h);y=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(f,"CODE",{});var q=i(m);T=s(q,"Module"),q.forEach(o),$=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){u(g,h,f),e(h,y),e(h,m),e(m,T),e(h,$)},d(g){g&&o(h)}}}function nm(D){let h,y,m,T,$,g,f,q,_a,Zs,_e,Ie,zn,mt,ba,Bn,va,Ys,Oe,ka,ft,Qa,wa,er,Ro,Ta,tr,Uo,Mn,ya,or,Vo,$a,nr,Y,xn,En,qa,Da,Fn,We,za,gt,Ba,Ma,Pn,xa,Ea,jn,_t,Fa,Cn,Pa,ja,Ca,An,bt,Aa,Xo,Na,Sa,sr,He,La,vt,Ia,Oa,rr,be,Re,Nn,kt,Wa,Sn,Ha,ar,E,Ra,Ln,Ua,Va,Qt,Xa,Ja,In,Ga,Ka,On,Za,Ya,wt,ei,ti,ir,Ue,oi,Wn,ni,si,lr,Tt,dr,ve,Ve,Hn,yt,ri,Rn,ai,cr,Jo,ii,pr,$t,hr,ke,Xe,Un,qt,li,Vn,di,ur,te,ci,Dt,pi,hi,zt,ui,mi,mr,Bt,fr,Qe,Je,Xn,Mt,fi,Jn,gi,gr,F,xt,_i,we,bi,Go,vi,ki,Et,Qi,wi,Ti,Te,yi,Ko,$i,qi,Zo,Di,zi,Bi,Gn,Mi,xi,Ft,_r,ye,Ge,Kn,Pt,Ei,Zn,Fi,br,z,jt,Pi,Yn,ji,Ci,Ct,Ai,Yo,Ni,Si,Li,At,Ii,Nt,Oi,Wi,Hi,St,Ri,Lt,Ui,Vi,Xi,M,Ji,es,Gi,Ki,ts,Zi,Yi,os,el,tl,ns,ol,nl,ss,sl,rl,rs,al,il,ll,I,It,dl,$e,cl,en,pl,hl,as,ul,ml,fl,Ke,gl,is,_l,bl,Ot,vr,qe,Ze,ls,Wt,vl,ds,kl,kr,P,Ht,Ql,Rt,wl,cs,Tl,yl,$l,Ut,ql,tn,Dl,zl,Bl,Vt,Ml,Xt,xl,El,Fl,O,Jt,Pl,De,jl,on,Cl,Al,ps,Nl,Sl,Ll,Ye,Il,hs,Ol,Wl,Gt,Qr,ze,et,us,Kt,Hl,ms,Rl,wr,j,Zt,Ul,Yt,Vl,fs,Xl,Jl,Gl,eo,Kl,nn,Zl,Yl,ed,to,td,oo,od,nd,sd,W,no,rd,Be,ad,sn,id,ld,gs,dd,cd,pd,tt,hd,_s,ud,md,so,Tr,Me,ot,bs,ro,fd,vs,gd,yr,C,ao,_d,ks,bd,vd,io,kd,rn,Qd,wd,Td,lo,yd,co,$d,qd,Dd,B,po,zd,xe,Bd,an,Md,xd,Qs,Ed,Fd,Pd,nt,jd,ws,Cd,Ad,ho,Nd,Ts,Sd,Ld,uo,$r,Ee,st,ys,mo,Id,$s,Od,qr,A,fo,Wd,go,Hd,qs,Rd,Ud,Vd,_o,Xd,ln,Jd,Gd,Kd,bo,Zd,vo,Yd,ec,tc,H,ko,oc,Fe,nc,dn,sc,rc,Ds,ac,ic,lc,rt,dc,zs,cc,pc,Qo,Dr,Pe,at,Bs,wo,hc,Ms,uc,zr,N,To,mc,xs,fc,gc,yo,_c,cn,bc,vc,kc,$o,Qc,qo,wc,Tc,yc,R,Do,$c,je,qc,pn,Dc,zc,Es,Bc,Mc,xc,it,Ec,Fs,Fc,Pc,zo,Br,Ce,lt,Ps,Bo,jc,js,Cc,Mr,S,Mo,Ac,Cs,Nc,Sc,xo,Lc,hn,Ic,Oc,Wc,Eo,Hc,Fo,Rc,Uc,Vc,U,Po,Xc,Ae,Jc,un,Gc,Kc,As,Zc,Yc,ep,dt,tp,Ns,op,np,jo,xr,Ne,ct,Ss,Co,sp,Ls,rp,Er,L,Ao,ap,Se,ip,Is,lp,dp,Os,cp,pp,hp,No,up,mn,mp,fp,gp,So,_p,Lo,bp,vp,kp,V,Io,Qp,Le,wp,fn,Tp,yp,Ws,$p,qp,Dp,pt,zp,Hs,Bp,Mp,Oo,Fr;return g=new K({}),mt=new K({}),kt=new K({}),Tt=new Z({props:{code:`import pytorch_quantization.nn as quant_nn from pytorch_quantization.tensor_quant import QuantDescriptor # The default tensor quantizer is set to use Max calibration method input_desc = QuantDescriptor(num_bits=8, calib_method="max") # The default tensor quantizer is set to be per-channel quantization for weights weight_desc = QuantDescriptor(num_bits=8, axis=((0,))) quant_nn.QuantLinear.set_default_quant_desc_input(input_desc) quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> pytorch_quantization.nn <span class="hljs-keyword">as</span> quant_nn</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> pytorch_quantization.tensor_quant <span class="hljs-keyword">import</span> QuantDescriptor</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># The default tensor quantizer is set to use Max calibration method</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">input_desc = QuantDescriptor(num_bits=<span class="hljs-number">8</span>, calib_method=<span class="hljs-string">&quot;max&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># The default tensor quantizer is set to be per-channel quantization for weights</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">weight_desc = QuantDescriptor(num_bits=<span class="hljs-number">8</span>, axis=((<span class="hljs-number">0</span>,)))</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">quant_nn.QuantLinear.set_default_quant_desc_input(input_desc)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc)</span>`}}),yt=new K({}),$t=new Z({props:{code:`# Find the TensorQuantizer and enable calibration for name, module in model.named_modules(): if name.endswith('_input_quantizer'): module.enable_calib() module.disable_quant() # Use full precision data to calibrate # Feeding data samples model(x) # ... # Finalize calibration for name, module in model.named_modules(): if name.endswith('_input_quantizer'): module.load_calib_amax() module.enable_quant() # If running on GPU, it needs to call .cuda() again because new tensors will be created by calibration process model.cuda() # Keep running the quantized model # ...,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># Find the TensorQuantizer and enable calibration</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">for</span> name, module <span class="hljs-keyword">in</span> model.named_modules():</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"> <span class="hljs-keyword">if</span> name.endswith(<span class="hljs-string">&#x27;_input_quantizer&#x27;</span>):</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"> module.enable_calib()</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"> module.disable_quant() <span class="hljs-comment"># Use full precision data to calibrate</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># Feeding data samples</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">model(x)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># ...</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># Finalize calibration</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">for</span> name, module <span class="hljs-keyword">in</span> model.named_modules():</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"> <span class="hljs-keyword">if</span> name.endswith(<span class="hljs-string">&#x27;_input_quantizer&#x27;</span>):</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"> module.load_calib_amax()</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"> module.enable_quant()</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># If running on GPU, it needs to call .cuda() again because new tensors will be created by calibration process</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">model.cuda()</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># Keep running the quantized model</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># ...</span></span>`}}),qt=new K({}),Bt=new Z({props:{code:`from pytorch_quantization.nn import TensorQuantizer TensorQuantizer.use_fb_fake_quant = True # Load the calibrated model ... # ONNX export torch.onnx.export(...),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> pytorch_quantization.nn <span class="hljs-keyword">import</span> TensorQuantizer</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">TensorQuantizer.use_fb_fake_quant = <span class="hljs-literal">True</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># Load the calibrated model</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">...</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># ONNX export</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">torch.onnx.export(...)</span>`}}),Mt=new K({}),xt=new x({props:{name:"class transformers.QDQBertConfig",anchor:"transformers.QDQBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = False"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/configuration_qdqbert.py#L29",parametersDescription:[{anchor:"transformers.QDQBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertModel">QDQBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.QDQBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.QDQBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.QDQBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.QDQBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.QDQBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.QDQBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.QDQBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.QDQBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.QDQBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertModel">QDQBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.QDQBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.QDQBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.QDQBertConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"}]}}),Ft=new Z({props:{code:`from transformers import QDQBertModel, QDQBertConfig # Initializing a QDQBERT bert-base-uncased style configuration configuration = QDQBertConfig() # Initializing a model from the bert-base-uncased style configuration model = QDQBertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> QDQBertModel, QDQBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a QDQBERT bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = QDQBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Pt=new K({}),jt=new x({props:{name:"class transformers.QDQBertModel",anchor:"transformers.QDQBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L831",parametersDescription:[{anchor:"transformers.QDQBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),It=new x({props:{name:"forward",anchor:"transformers.QDQBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L872",parametersDescription:[{anchor:"transformers.QDQBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.QDQBertModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.QDQBertModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.QDQBertModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ke=new ut({props:{$$slots:{default:[Ju]},$$scope:{ctx:D}}}),Ot=new Z({props:{code:`from transformers import BertTokenizer, QDQBertModel import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertModel.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Wt=new K({}),Ht=new x({props:{name:"class transformers.QDQBertLMHeadModel",anchor:"transformers.QDQBertLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1013",parametersDescription:[{anchor:"transformers.QDQBertLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jt=new x({props:{name:"forward",anchor:"transformers.QDQBertLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1036",parametersDescription:[{anchor:"transformers.QDQBertLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertLMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertLMHeadModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.QDQBertLMHeadModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.QDQBertLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.QDQBertLMHeadModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.QDQBertLMHeadModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ye=new ut({props:{$$slots:{default:[Gu]},$$scope:{ctx:D}}}),Gt=new Z({props:{code:`from transformers import BertTokenizer, QDQBertLMHeadModel, QDQBertConfig import torch tokenizer = BertTokenizer.from_pretrained('bert-base-cased') config = QDQBertConfig.from_pretrained("bert-base-cased") config.is_decoder = True model = QDQBertLMHeadModel.from_pretrained('bert-base-cased', config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertLMHeadModel, QDQBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = QDQBertConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;bert-base-cased&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),Kt=new K({}),Zt=new x({props:{name:"class transformers.QDQBertForMaskedLM",anchor:"transformers.QDQBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1161",parametersDescription:[{anchor:"transformers.QDQBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),no=new x({props:{name:"forward",anchor:"transformers.QDQBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1187",parametersDescription:[{anchor:"transformers.QDQBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tt=new ut({props:{$$slots:{default:[Ku]},$$scope:{ctx:D}}}),so=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForMaskedLM import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForMaskedLM.from_pretrained('bert-base-uncased') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ro=new K({}),ao=new x({props:{name:"class transformers.QDQBertForSequenceClassification",anchor:"transformers.QDQBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1374",parametersDescription:[{anchor:"transformers.QDQBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),po=new x({props:{name:"forward",anchor:"transformers.QDQBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1386",parametersDescription:[{anchor:"transformers.QDQBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nt=new ut({props:{$$slots:{default:[Zu]},$$scope:{ctx:D}}}),ho=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForSequenceClassification.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),uo=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForSequenceClassification.from_pretrained('bert-base-uncased', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),mo=new K({}),fo=new x({props:{name:"class transformers.QDQBertForNextSentencePrediction",anchor:"transformers.QDQBertForNextSentencePrediction",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1271",parametersDescription:[{anchor:"transformers.QDQBertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ko=new x({props:{name:"forward",anchor:"transformers.QDQBertForNextSentencePrediction.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1281",parametersDescription:[{anchor:"transformers.QDQBertForNextSentencePrediction.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertForNextSentencePrediction.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring). Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) \u2014 Next sequence prediction (classification) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),rt=new ut({props:{$$slots:{default:[Yu]},$$scope:{ctx:D}}}),Qo=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForNextSentencePrediction import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForNextSentencePrediction.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='pt') outputs = model(**encoding, labels=torch.LongTensor([1])) logits = outputs.logits assert logits[0, 0] < logits[0, 1] # next sentence was random,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=torch.LongTensor([<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># next sentence was random</span>`}}),wo=new K({}),To=new x({props:{name:"class transformers.QDQBertForMultipleChoice",anchor:"transformers.QDQBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1471",parametersDescription:[{anchor:"transformers.QDQBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Do=new x({props:{name:"forward",anchor:"transformers.QDQBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1482",parametersDescription:[{anchor:"transformers.QDQBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),it=new ut({props:{$$slots:{default:[em]},$$scope:{ctx:D}}}),zo=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForMultipleChoice import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForMultipleChoice.from_pretrained('bert-base-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Bo=new K({}),Mo=new x({props:{name:"class transformers.QDQBertForTokenClassification",anchor:"transformers.QDQBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1562",parametersDescription:[{anchor:"transformers.QDQBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Po=new x({props:{name:"forward",anchor:"transformers.QDQBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1577",parametersDescription:[{anchor:"transformers.QDQBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),dt=new ut({props:{$$slots:{default:[tm]},$$scope:{ctx:D}}}),jo=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForTokenClassification import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForTokenClassification.from_pretrained('bert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Co=new K({}),Ao=new x({props:{name:"class transformers.QDQBertForQuestionAnswering",anchor:"transformers.QDQBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1653",parametersDescription:[{anchor:"transformers.QDQBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig">QDQBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Io=new x({props:{name:"forward",anchor:"transformers.QDQBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/qdqbert/modeling_qdqbert.py#L1667",parametersDescription:[{anchor:"transformers.QDQBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.QDQBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertConfig" >QDQBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),pt=new ut({props:{$$slots:{default:[om]},$$scope:{ctx:D}}}),Oo=new Z({props:{code:`from transformers import BertTokenizer, QDQBertForQuestionAnswering import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = QDQBertForQuestionAnswering.from_pretrained('bert-base-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, QDQBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = QDQBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){h=r("meta"),y=d(),m=r("h1"),T=r("a"),$=r("span"),_(g.$$.fragment),f=d(),q=r("span"),_a=n("QDQBERT"),Zs=d(),_e=r("h2"),Ie=r("a"),zn=r("span"),_(mt.$$.fragment),ba=d(),Bn=r("span"),va=n("Overview"),Ys=d(),Oe=r("p"),ka=n("The QDQBERT model can be referenced in "),ft=r("a"),Qa=n(`Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation`),wa=n(` by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.`),er=d(),Ro=r("p"),Ta=n("The abstract from the paper is the following:"),tr=d(),Uo=r("p"),Mn=r("em"),ya=n(`Quantization techniques can reduce the size of Deep Neural Networks and improve inference latency and throughput by taking advantage of high throughput integer instructions. In this paper we review the mathematical aspects of quantization parameters and evaluate their choices on a wide range of neural network models for different application domains, including vision, speech, and language. We focus on quantization techniques that are amenable to acceleration by processors with high-throughput integer math pipelines. We also present a workflow for 8-bit quantization that is able to maintain accuracy within 1% of the floating-point baseline on all networks studied, including models that are more difficult to quantize, such as MobileNets and BERT-large.`),or=d(),Vo=r("p"),$a=n("Tips:"),nr=d(),Y=r("ul"),xn=r("li"),En=r("p"),qa=n(`QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to (i) linear layer inputs and weights, (ii) matmul inputs, (iii) residual add inputs, in BERT model.`),Da=d(),Fn=r("li"),We=r("p"),za=n("QDQBERT requires the dependency of "),gt=r("a"),Ba=n("Pytorch Quantization Toolkit"),Ma=n(". To install "),Pn=r("code"),xa=n("pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com"),Ea=d(),jn=r("li"),_t=r("p"),Fa=n("QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example "),Cn=r("em"),Pa=n("bert-base-uncased"),ja=n(`), and perform Quantization Aware Training/Post Training Quantization.`),Ca=d(),An=r("li"),bt=r("p"),Aa=n(`A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for SQUAD task can be found at `),Xo=r("a"),Na=n("transformers/examples/research_projects/quantization-qdqbert/"),Sa=n("."),sr=d(),He=r("p"),La=n("This model was contributed by "),vt=r("a"),Ia=n("shangz"),Oa=n("."),rr=d(),be=r("h3"),Re=r("a"),Nn=r("span"),_(kt.$$.fragment),Wa=d(),Sn=r("span"),Ha=n("Set default quantizers"),ar=d(),E=r("p"),Ra=n(`QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to BERT by `),Ln=r("code"),Ua=n("TensorQuantizer"),Va=n(" in "),Qt=r("a"),Xa=n("Pytorch Quantization Toolkit"),Ja=n(". "),In=r("code"),Ga=n("TensorQuantizer"),Ka=n(` is the module for quantizing tensors, with `),On=r("code"),Za=n("QuantDescriptor"),Ya=n(" defining how the tensor should be quantized. Refer to "),wt=r("a"),ei=n(`Pytorch Quantization Toolkit userguide`),ti=n(" for more details."),ir=d(),Ue=r("p"),oi=n("Before creating QDQBERT model, one has to set the default "),Wn=r("code"),ni=n("QuantDescriptor"),si=n(` defining default tensor quantizers. Example:`),lr=d(),_(Tt.$$.fragment),dr=d(),ve=r("h3"),Ve=r("a"),Hn=r("span"),_(yt.$$.fragment),ri=d(),Rn=r("span"),ai=n("Calibration"),cr=d(),Jo=r("p"),ii=n(`Calibration is the terminology of passing data samples to the quantizer and deciding the best scaling factors for tensors. After setting up the tensor quantizers, one can use the following example to calibrate the model:`),pr=d(),_($t.$$.fragment),hr=d(),ke=r("h3"),Xe=r("a"),Un=r("span"),_(qt.$$.fragment),li=d(),Vn=r("span"),di=n("Export to ONNX"),ur=d(),te=r("p"),ci=n("The goal of exporting to ONNX is to deploy inference by "),Dt=r("a"),pi=n("TensorRT"),hi=n(`. Fake quantization will be broken into a pair of QuantizeLinear/DequantizeLinear ONNX ops. After setting static member of TensorQuantizer to use Pytorch\u2019s own fake quantization functions, fake quantized model can be exported to ONNX, follow the instructions in `),zt=r("a"),ui=n("torch.onnx"),mi=n(". Example:"),mr=d(),_(Bt.$$.fragment),fr=d(),Qe=r("h2"),Je=r("a"),Xn=r("span"),_(Mt.$$.fragment),fi=d(),Jn=r("span"),gi=n("QDQBertConfig"),gr=d(),F=r("div"),_(xt.$$.fragment),_i=d(),we=r("p"),bi=n("This is the configuration class to store the configuration of a "),Go=r("a"),vi=n("QDQBertModel"),ki=n(`. It is used to instantiate an QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT `),Et=r("a"),Qi=n("bert-base-uncased"),wi=n(" architecture."),Ti=d(),Te=r("p"),yi=n("Configuration objects inherit from "),Ko=r("a"),$i=n("PretrainedConfig"),qi=n(` and can be used to control the model outputs. Read the documentation from `),Zo=r("a"),Di=n("PretrainedConfig"),zi=n(" for more information."),Bi=d(),Gn=r("p"),Mi=n("Examples:"),xi=d(),_(Ft.$$.fragment),_r=d(),ye=r("h2"),Ge=r("a"),Kn=r("span"),_(Pt.$$.fragment),Ei=d(),Zn=r("span"),Fi=n("QDQBertModel"),br=d(),z=r("div"),_(jt.$$.fragment),Pi=d(),Yn=r("p"),ji=n("The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top."),Ci=d(),Ct=r("p"),Ai=n("This model inherits from "),Yo=r("a"),Ni=n("PreTrainedModel"),Si=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Li=d(),At=r("p"),Ii=n("This model is also a PyTorch "),Nt=r("a"),Oi=n("torch.nn.Module"),Wi=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hi=d(),St=r("p"),Ri=n(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Lt=r("a"),Ui=n(`Attention is all you need`),Vi=n(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Xi=d(),M=r("p"),Ji=n("To behave as an decoder the model needs to be initialized with the "),es=r("code"),Gi=n("is_decoder"),Ki=n(` argument of the configuration set to `),ts=r("code"),Zi=n("True"),Yi=n(". To be used in a Seq2Seq model, the model needs to initialized with both "),os=r("code"),el=n("is_decoder"),tl=n(` argument and `),ns=r("code"),ol=n("add_cross_attention"),nl=n(" set to "),ss=r("code"),sl=n("True"),rl=n("; an "),rs=r("code"),al=n("encoder_hidden_states"),il=n(` is then expected as an input to the forward pass.`),ll=d(),I=r("div"),_(It.$$.fragment),dl=d(),$e=r("p"),cl=n("The "),en=r("a"),pl=n("QDQBertModel"),hl=n(" forward method, overrides the "),as=r("code"),ul=n("__call__"),ml=n(" special method."),fl=d(),_(Ke.$$.fragment),gl=d(),is=r("p"),_l=n("Example:"),bl=d(),_(Ot.$$.fragment),vr=d(),qe=r("h2"),Ze=r("a"),ls=r("span"),_(Wt.$$.fragment),vl=d(),ds=r("span"),kl=n("QDQBertLMHeadModel"),kr=d(),P=r("div"),_(Ht.$$.fragment),Ql=d(),Rt=r("p"),wl=n("QDQBERT Model with a "),cs=r("code"),Tl=n("language modeling"),yl=n(" head on top for CLM fine-tuning."),$l=d(),Ut=r("p"),ql=n("This model inherits from "),tn=r("a"),Dl=n("PreTrainedModel"),zl=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bl=d(),Vt=r("p"),Ml=n("This model is also a PyTorch "),Xt=r("a"),xl=n("torch.nn.Module"),El=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fl=d(),O=r("div"),_(Jt.$$.fragment),Pl=d(),De=r("p"),jl=n("The "),on=r("a"),Cl=n("QDQBertLMHeadModel"),Al=n(" forward method, overrides the "),ps=r("code"),Nl=n("__call__"),Sl=n(" special method."),Ll=d(),_(Ye.$$.fragment),Il=d(),hs=r("p"),Ol=n("Example:"),Wl=d(),_(Gt.$$.fragment),Qr=d(),ze=r("h2"),et=r("a"),us=r("span"),_(Kt.$$.fragment),Hl=d(),ms=r("span"),Rl=n("QDQBertForMaskedLM"),wr=d(),j=r("div"),_(Zt.$$.fragment),Ul=d(),Yt=r("p"),Vl=n("QDQBERT Model with a "),fs=r("code"),Xl=n("language modeling"),Jl=n(" head on top."),Gl=d(),eo=r("p"),Kl=n("This model inherits from "),nn=r("a"),Zl=n("PreTrainedModel"),Yl=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ed=d(),to=r("p"),td=n("This model is also a PyTorch "),oo=r("a"),od=n("torch.nn.Module"),nd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sd=d(),W=r("div"),_(no.$$.fragment),rd=d(),Be=r("p"),ad=n("The "),sn=r("a"),id=n("QDQBertForMaskedLM"),ld=n(" forward method, overrides the "),gs=r("code"),dd=n("__call__"),cd=n(" special method."),pd=d(),_(tt.$$.fragment),hd=d(),_s=r("p"),ud=n("Example:"),md=d(),_(so.$$.fragment),Tr=d(),Me=r("h2"),ot=r("a"),bs=r("span"),_(ro.$$.fragment),fd=d(),vs=r("span"),gd=n("QDQBertForSequenceClassification"),yr=d(),C=r("div"),_(ao.$$.fragment),_d=d(),ks=r("p"),bd=n(`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),vd=d(),io=r("p"),kd=n("This model inherits from "),rn=r("a"),Qd=n("PreTrainedModel"),wd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Td=d(),lo=r("p"),yd=n("This model is also a PyTorch "),co=r("a"),$d=n("torch.nn.Module"),qd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dd=d(),B=r("div"),_(po.$$.fragment),zd=d(),xe=r("p"),Bd=n("The "),an=r("a"),Md=n("QDQBertForSequenceClassification"),xd=n(" forward method, overrides the "),Qs=r("code"),Ed=n("__call__"),Fd=n(" special method."),Pd=d(),_(nt.$$.fragment),jd=d(),ws=r("p"),Cd=n("Example of single-label classification:"),Ad=d(),_(ho.$$.fragment),Nd=d(),Ts=r("p"),Sd=n("Example of multi-label classification:"),Ld=d(),_(uo.$$.fragment),$r=d(),Ee=r("h2"),st=r("a"),ys=r("span"),_(mo.$$.fragment),Id=d(),$s=r("span"),Od=n("QDQBertForNextSentencePrediction"),qr=d(),A=r("div"),_(fo.$$.fragment),Wd=d(),go=r("p"),Hd=n("Bert Model with a "),qs=r("code"),Rd=n("next sentence prediction (classification)"),Ud=n(" head on top."),Vd=d(),_o=r("p"),Xd=n("This model inherits from "),ln=r("a"),Jd=n("PreTrainedModel"),Gd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kd=d(),bo=r("p"),Zd=n("This model is also a PyTorch "),vo=r("a"),Yd=n("torch.nn.Module"),ec=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),tc=d(),H=r("div"),_(ko.$$.fragment),oc=d(),Fe=r("p"),nc=n("The "),dn=r("a"),sc=n("QDQBertForNextSentencePrediction"),rc=n(" forward method, overrides the "),Ds=r("code"),ac=n("__call__"),ic=n(" special method."),lc=d(),_(rt.$$.fragment),dc=d(),zs=r("p"),cc=n("Example:"),pc=d(),_(Qo.$$.fragment),Dr=d(),Pe=r("h2"),at=r("a"),Bs=r("span"),_(wo.$$.fragment),hc=d(),Ms=r("span"),uc=n("QDQBertForMultipleChoice"),zr=d(),N=r("div"),_(To.$$.fragment),mc=d(),xs=r("p"),fc=n(`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),gc=d(),yo=r("p"),_c=n("This model inherits from "),cn=r("a"),bc=n("PreTrainedModel"),vc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kc=d(),$o=r("p"),Qc=n("This model is also a PyTorch "),qo=r("a"),wc=n("torch.nn.Module"),Tc=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yc=d(),R=r("div"),_(Do.$$.fragment),$c=d(),je=r("p"),qc=n("The "),pn=r("a"),Dc=n("QDQBertForMultipleChoice"),zc=n(" forward method, overrides the "),Es=r("code"),Bc=n("__call__"),Mc=n(" special method."),xc=d(),_(it.$$.fragment),Ec=d(),Fs=r("p"),Fc=n("Example:"),Pc=d(),_(zo.$$.fragment),Br=d(),Ce=r("h2"),lt=r("a"),Ps=r("span"),_(Bo.$$.fragment),jc=d(),js=r("span"),Cc=n("QDQBertForTokenClassification"),Mr=d(),S=r("div"),_(Mo.$$.fragment),Ac=d(),Cs=r("p"),Nc=n(`QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Sc=d(),xo=r("p"),Lc=n("This model inherits from "),hn=r("a"),Ic=n("PreTrainedModel"),Oc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wc=d(),Eo=r("p"),Hc=n("This model is also a PyTorch "),Fo=r("a"),Rc=n("torch.nn.Module"),Uc=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vc=d(),U=r("div"),_(Po.$$.fragment),Xc=d(),Ae=r("p"),Jc=n("The "),un=r("a"),Gc=n("QDQBertForTokenClassification"),Kc=n(" forward method, overrides the "),As=r("code"),Zc=n("__call__"),Yc=n(" special method."),ep=d(),_(dt.$$.fragment),tp=d(),Ns=r("p"),op=n("Example:"),np=d(),_(jo.$$.fragment),xr=d(),Ne=r("h2"),ct=r("a"),Ss=r("span"),_(Co.$$.fragment),sp=d(),Ls=r("span"),rp=n("QDQBertForQuestionAnswering"),Er=d(),L=r("div"),_(Ao.$$.fragment),ap=d(),Se=r("p"),ip=n(`QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Is=r("code"),lp=n("span start logits"),dp=n(" and "),Os=r("code"),cp=n("span end logits"),pp=n(")."),hp=d(),No=r("p"),up=n("This model inherits from "),mn=r("a"),mp=n("PreTrainedModel"),fp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gp=d(),So=r("p"),_p=n("This model is also a PyTorch "),Lo=r("a"),bp=n("torch.nn.Module"),vp=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kp=d(),V=r("div"),_(Io.$$.fragment),Qp=d(),Le=r("p"),wp=n("The "),fn=r("a"),Tp=n("QDQBertForQuestionAnswering"),yp=n(" forward method, overrides the "),Ws=r("code"),$p=n("__call__"),qp=n(" special method."),Dp=d(),_(pt.$$.fragment),zp=d(),Hs=r("p"),Bp=n("Example:"),Mp=d(),_(Oo.$$.fragment),this.h()},l(t){const p=Xu('[data-svelte="svelte-1phssyn"]',document.head);h=a(p,"META",{name:!0,content:!0}),p.forEach(o),y=c(t),m=a(t,"H1",{class:!0});var Wo=i(m);T=a(Wo,"A",{id:!0,class:!0,href:!0});var Rs=i(T);$=a(Rs,"SPAN",{});var Us=i($);b(g.$$.fragment,Us),Us.forEach(o),Rs.forEach(o),f=c(Wo),q=a(Wo,"SPAN",{});var Vs=i(q);_a=s(Vs,"QDQBERT"),Vs.forEach(o),Wo.forEach(o),Zs=c(t),_e=a(t,"H2",{class:!0});var Ho=i(_e);Ie=a(Ho,"A",{id:!0,class:!0,href:!0});var Xs=i(Ie);zn=a(Xs,"SPAN",{});var Js=i(zn);b(mt.$$.fragment,Js),Js.forEach(o),Xs.forEach(o),ba=c(Ho),Bn=a(Ho,"SPAN",{});var Gs=i(Bn);va=s(Gs,"Overview"),Gs.forEach(o),Ho.forEach(o),Ys=c(t),Oe=a(t,"P",{});var Pr=i(Oe);ka=s(Pr,"The QDQBERT model can be referenced in "),ft=a(Pr,"A",{href:!0,rel:!0});var xp=i(ft);Qa=s(xp,`Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation`),xp.forEach(o),wa=s(Pr,` by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.`),Pr.forEach(o),er=c(t),Ro=a(t,"P",{});var Ep=i(Ro);Ta=s(Ep,"The abstract from the paper is the following:"),Ep.forEach(o),tr=c(t),Uo=a(t,"P",{});var Fp=i(Uo);Mn=a(Fp,"EM",{});var Pp=i(Mn);ya=s(Pp,`Quantization techniques can reduce the size of Deep Neural Networks and improve inference latency and throughput by taking advantage of high throughput integer instructions. In this paper we review the mathematical aspects of quantization parameters and evaluate their choices on a wide range of neural network models for different application domains, including vision, speech, and language. We focus on quantization techniques that are amenable to acceleration by processors with high-throughput integer math pipelines. We also present a workflow for 8-bit quantization that is able to maintain accuracy within 1% of the floating-point baseline on all networks studied, including models that are more difficult to quantize, such as MobileNets and BERT-large.`),Pp.forEach(o),Fp.forEach(o),or=c(t),Vo=a(t,"P",{});var jp=i(Vo);$a=s(jp,"Tips:"),jp.forEach(o),nr=c(t),Y=a(t,"UL",{});var ht=i(Y);xn=a(ht,"LI",{});var Cp=i(xn);En=a(Cp,"P",{});var Ap=i(En);qa=s(Ap,`QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to (i) linear layer inputs and weights, (ii) matmul inputs, (iii) residual add inputs, in BERT model.`),Ap.forEach(o),Cp.forEach(o),Da=c(ht),Fn=a(ht,"LI",{});var Np=i(Fn);We=a(Np,"P",{});var Ks=i(We);za=s(Ks,"QDQBERT requires the dependency of "),gt=a(Ks,"A",{href:!0,rel:!0});var Sp=i(gt);Ba=s(Sp,"Pytorch Quantization Toolkit"),Sp.forEach(o),Ma=s(Ks,". To install "),Pn=a(Ks,"CODE",{});var Lp=i(Pn);xa=s(Lp,"pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com"),Lp.forEach(o),Ks.forEach(o),Np.forEach(o),Ea=c(ht),jn=a(ht,"LI",{});var Ip=i(jn);_t=a(Ip,"P",{});var jr=i(_t);Fa=s(jr,"QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example "),Cn=a(jr,"EM",{});var Op=i(Cn);Pa=s(Op,"bert-base-uncased"),Op.forEach(o),ja=s(jr,`), and perform Quantization Aware Training/Post Training Quantization.`),jr.forEach(o),Ip.forEach(o),Ca=c(ht),An=a(ht,"LI",{});var Wp=i(An);bt=a(Wp,"P",{});var Cr=i(bt);Aa=s(Cr,`A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for SQUAD task can be found at `),Xo=a(Cr,"A",{href:!0});var Hp=i(Xo);Na=s(Hp,"transformers/examples/research_projects/quantization-qdqbert/"),Hp.forEach(o),Sa=s(Cr,"."),Cr.forEach(o),Wp.forEach(o),ht.forEach(o),sr=c(t),He=a(t,"P",{});var Ar=i(He);La=s(Ar,"This model was contributed by "),vt=a(Ar,"A",{href:!0,rel:!0});var Rp=i(vt);Ia=s(Rp,"shangz"),Rp.forEach(o),Oa=s(Ar,"."),Ar.forEach(o),rr=c(t),be=a(t,"H3",{class:!0});var Nr=i(be);Re=a(Nr,"A",{id:!0,class:!0,href:!0});var Up=i(Re);Nn=a(Up,"SPAN",{});var Vp=i(Nn);b(kt.$$.fragment,Vp),Vp.forEach(o),Up.forEach(o),Wa=c(Nr),Sn=a(Nr,"SPAN",{});var Xp=i(Sn);Ha=s(Xp,"Set default quantizers"),Xp.forEach(o),Nr.forEach(o),ar=c(t),E=a(t,"P",{});var ee=i(E);Ra=s(ee,`QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to BERT by `),Ln=a(ee,"CODE",{});var Jp=i(Ln);Ua=s(Jp,"TensorQuantizer"),Jp.forEach(o),Va=s(ee," in "),Qt=a(ee,"A",{href:!0,rel:!0});var Gp=i(Qt);Xa=s(Gp,"Pytorch Quantization Toolkit"),Gp.forEach(o),Ja=s(ee,". "),In=a(ee,"CODE",{});var Kp=i(In);Ga=s(Kp,"TensorQuantizer"),Kp.forEach(o),Ka=s(ee,` is the module for quantizing tensors, with `),On=a(ee,"CODE",{});var Zp=i(On);Za=s(Zp,"QuantDescriptor"),Zp.forEach(o),Ya=s(ee," defining how the tensor should be quantized. Refer to "),wt=a(ee,"A",{href:!0,rel:!0});var Yp=i(wt);ei=s(Yp,`Pytorch Quantization Toolkit userguide`),Yp.forEach(o),ti=s(ee," for more details."),ee.forEach(o),ir=c(t),Ue=a(t,"P",{});var Sr=i(Ue);oi=s(Sr,"Before creating QDQBERT model, one has to set the default "),Wn=a(Sr,"CODE",{});var eh=i(Wn);ni=s(eh,"QuantDescriptor"),eh.forEach(o),si=s(Sr,` defining default tensor quantizers. Example:`),Sr.forEach(o),lr=c(t),b(Tt.$$.fragment,t),dr=c(t),ve=a(t,"H3",{class:!0});var Lr=i(ve);Ve=a(Lr,"A",{id:!0,class:!0,href:!0});var th=i(Ve);Hn=a(th,"SPAN",{});var oh=i(Hn);b(yt.$$.fragment,oh),oh.forEach(o),th.forEach(o),ri=c(Lr),Rn=a(Lr,"SPAN",{});var nh=i(Rn);ai=s(nh,"Calibration"),nh.forEach(o),Lr.forEach(o),cr=c(t),Jo=a(t,"P",{});var sh=i(Jo);ii=s(sh,`Calibration is the terminology of passing data samples to the quantizer and deciding the best scaling factors for tensors. After setting up the tensor quantizers, one can use the following example to calibrate the model:`),sh.forEach(o),pr=c(t),b($t.$$.fragment,t),hr=c(t),ke=a(t,"H3",{class:!0});var Ir=i(ke);Xe=a(Ir,"A",{id:!0,class:!0,href:!0});var rh=i(Xe);Un=a(rh,"SPAN",{});var ah=i(Un);b(qt.$$.fragment,ah),ah.forEach(o),rh.forEach(o),li=c(Ir),Vn=a(Ir,"SPAN",{});var ih=i(Vn);di=s(ih,"Export to ONNX"),ih.forEach(o),Ir.forEach(o),ur=c(t),te=a(t,"P",{});var gn=i(te);ci=s(gn,"The goal of exporting to ONNX is to deploy inference by "),Dt=a(gn,"A",{href:!0,rel:!0});var lh=i(Dt);pi=s(lh,"TensorRT"),lh.forEach(o),hi=s(gn,`. Fake quantization will be broken into a pair of QuantizeLinear/DequantizeLinear ONNX ops. After setting static member of TensorQuantizer to use Pytorch\u2019s own fake quantization functions, fake quantized model can be exported to ONNX, follow the instructions in `),zt=a(gn,"A",{href:!0,rel:!0});var dh=i(zt);ui=s(dh,"torch.onnx"),dh.forEach(o),mi=s(gn,". Example:"),gn.forEach(o),mr=c(t),b(Bt.$$.fragment,t),fr=c(t),Qe=a(t,"H2",{class:!0});var Or=i(Qe);Je=a(Or,"A",{id:!0,class:!0,href:!0});var ch=i(Je);Xn=a(ch,"SPAN",{});var ph=i(Xn);b(Mt.$$.fragment,ph),ph.forEach(o),ch.forEach(o),fi=c(Or),Jn=a(Or,"SPAN",{});var hh=i(Jn);gi=s(hh,"QDQBertConfig"),hh.forEach(o),Or.forEach(o),gr=c(t),F=a(t,"DIV",{class:!0});var oe=i(F);b(xt.$$.fragment,oe),_i=c(oe),we=a(oe,"P",{});var _n=i(we);bi=s(_n,"This is the configuration class to store the configuration of a "),Go=a(_n,"A",{href:!0});var uh=i(Go);vi=s(uh,"QDQBertModel"),uh.forEach(o),ki=s(_n,`. It is used to instantiate an QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT `),Et=a(_n,"A",{href:!0,rel:!0});var mh=i(Et);Qi=s(mh,"bert-base-uncased"),mh.forEach(o),wi=s(_n," architecture."),_n.forEach(o),Ti=c(oe),Te=a(oe,"P",{});var bn=i(Te);yi=s(bn,"Configuration objects inherit from "),Ko=a(bn,"A",{href:!0});var fh=i(Ko);$i=s(fh,"PretrainedConfig"),fh.forEach(o),qi=s(bn,` and can be used to control the model outputs. Read the documentation from `),Zo=a(bn,"A",{href:!0});var gh=i(Zo);Di=s(gh,"PretrainedConfig"),gh.forEach(o),zi=s(bn," for more information."),bn.forEach(o),Bi=c(oe),Gn=a(oe,"P",{});var _h=i(Gn);Mi=s(_h,"Examples:"),_h.forEach(o),xi=c(oe),b(Ft.$$.fragment,oe),oe.forEach(o),_r=c(t),ye=a(t,"H2",{class:!0});var Wr=i(ye);Ge=a(Wr,"A",{id:!0,class:!0,href:!0});var bh=i(Ge);Kn=a(bh,"SPAN",{});var vh=i(Kn);b(Pt.$$.fragment,vh),vh.forEach(o),bh.forEach(o),Ei=c(Wr),Zn=a(Wr,"SPAN",{});var kh=i(Zn);Fi=s(kh,"QDQBertModel"),kh.forEach(o),Wr.forEach(o),br=c(t),z=a(t,"DIV",{class:!0});var X=i(z);b(jt.$$.fragment,X),Pi=c(X),Yn=a(X,"P",{});var Qh=i(Yn);ji=s(Qh,"The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top."),Qh.forEach(o),Ci=c(X),Ct=a(X,"P",{});var Hr=i(Ct);Ai=s(Hr,"This model inherits from "),Yo=a(Hr,"A",{href:!0});var wh=i(Yo);Ni=s(wh,"PreTrainedModel"),wh.forEach(o),Si=s(Hr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hr.forEach(o),Li=c(X),At=a(X,"P",{});var Rr=i(At);Ii=s(Rr,"This model is also a PyTorch "),Nt=a(Rr,"A",{href:!0,rel:!0});var Th=i(Nt);Oi=s(Th,"torch.nn.Module"),Th.forEach(o),Wi=s(Rr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rr.forEach(o),Hi=c(X),St=a(X,"P",{});var Ur=i(St);Ri=s(Ur,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Lt=a(Ur,"A",{href:!0,rel:!0});var yh=i(Lt);Ui=s(yh,`Attention is all you need`),yh.forEach(o),Vi=s(Ur,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Ur.forEach(o),Xi=c(X),M=a(X,"P",{});var J=i(M);Ji=s(J,"To behave as an decoder the model needs to be initialized with the "),es=a(J,"CODE",{});var $h=i(es);Gi=s($h,"is_decoder"),$h.forEach(o),Ki=s(J,` argument of the configuration set to `),ts=a(J,"CODE",{});var qh=i(ts);Zi=s(qh,"True"),qh.forEach(o),Yi=s(J,". To be used in a Seq2Seq model, the model needs to initialized with both "),os=a(J,"CODE",{});var Dh=i(os);el=s(Dh,"is_decoder"),Dh.forEach(o),tl=s(J,` argument and `),ns=a(J,"CODE",{});var zh=i(ns);ol=s(zh,"add_cross_attention"),zh.forEach(o),nl=s(J," set to "),ss=a(J,"CODE",{});var Bh=i(ss);sl=s(Bh,"True"),Bh.forEach(o),rl=s(J,"; an "),rs=a(J,"CODE",{});var Mh=i(rs);al=s(Mh,"encoder_hidden_states"),Mh.forEach(o),il=s(J,` is then expected as an input to the forward pass.`),J.forEach(o),ll=c(X),I=a(X,"DIV",{class:!0});var ne=i(I);b(It.$$.fragment,ne),dl=c(ne),$e=a(ne,"P",{});var vn=i($e);cl=s(vn,"The "),en=a(vn,"A",{href:!0});var xh=i(en);pl=s(xh,"QDQBertModel"),xh.forEach(o),hl=s(vn," forward method, overrides the "),as=a(vn,"CODE",{});var Eh=i(as);ul=s(Eh,"__call__"),Eh.forEach(o),ml=s(vn," special method."),vn.forEach(o),fl=c(ne),b(Ke.$$.fragment,ne),gl=c(ne),is=a(ne,"P",{});var Fh=i(is);_l=s(Fh,"Example:"),Fh.forEach(o),bl=c(ne),b(Ot.$$.fragment,ne),ne.forEach(o),X.forEach(o),vr=c(t),qe=a(t,"H2",{class:!0});var Vr=i(qe);Ze=a(Vr,"A",{id:!0,class:!0,href:!0});var Ph=i(Ze);ls=a(Ph,"SPAN",{});var jh=i(ls);b(Wt.$$.fragment,jh),jh.forEach(o),Ph.forEach(o),vl=c(Vr),ds=a(Vr,"SPAN",{});var Ch=i(ds);kl=s(Ch,"QDQBertLMHeadModel"),Ch.forEach(o),Vr.forEach(o),kr=c(t),P=a(t,"DIV",{class:!0});var se=i(P);b(Ht.$$.fragment,se),Ql=c(se),Rt=a(se,"P",{});var Xr=i(Rt);wl=s(Xr,"QDQBERT Model with a "),cs=a(Xr,"CODE",{});var Ah=i(cs);Tl=s(Ah,"language modeling"),Ah.forEach(o),yl=s(Xr," head on top for CLM fine-tuning."),Xr.forEach(o),$l=c(se),Ut=a(se,"P",{});var Jr=i(Ut);ql=s(Jr,"This model inherits from "),tn=a(Jr,"A",{href:!0});var Nh=i(tn);Dl=s(Nh,"PreTrainedModel"),Nh.forEach(o),zl=s(Jr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jr.forEach(o),Bl=c(se),Vt=a(se,"P",{});var Gr=i(Vt);Ml=s(Gr,"This model is also a PyTorch "),Xt=a(Gr,"A",{href:!0,rel:!0});var Sh=i(Xt);xl=s(Sh,"torch.nn.Module"),Sh.forEach(o),El=s(Gr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gr.forEach(o),Fl=c(se),O=a(se,"DIV",{class:!0});var re=i(O);b(Jt.$$.fragment,re),Pl=c(re),De=a(re,"P",{});var kn=i(De);jl=s(kn,"The "),on=a(kn,"A",{href:!0});var Lh=i(on);Cl=s(Lh,"QDQBertLMHeadModel"),Lh.forEach(o),Al=s(kn," forward method, overrides the "),ps=a(kn,"CODE",{});var Ih=i(ps);Nl=s(Ih,"__call__"),Ih.forEach(o),Sl=s(kn," special method."),kn.forEach(o),Ll=c(re),b(Ye.$$.fragment,re),Il=c(re),hs=a(re,"P",{});var Oh=i(hs);Ol=s(Oh,"Example:"),Oh.forEach(o),Wl=c(re),b(Gt.$$.fragment,re),re.forEach(o),se.forEach(o),Qr=c(t),ze=a(t,"H2",{class:!0});var Kr=i(ze);et=a(Kr,"A",{id:!0,class:!0,href:!0});var Wh=i(et);us=a(Wh,"SPAN",{});var Hh=i(us);b(Kt.$$.fragment,Hh),Hh.forEach(o),Wh.forEach(o),Hl=c(Kr),ms=a(Kr,"SPAN",{});var Rh=i(ms);Rl=s(Rh,"QDQBertForMaskedLM"),Rh.forEach(o),Kr.forEach(o),wr=c(t),j=a(t,"DIV",{class:!0});var ae=i(j);b(Zt.$$.fragment,ae),Ul=c(ae),Yt=a(ae,"P",{});var Zr=i(Yt);Vl=s(Zr,"QDQBERT Model with a "),fs=a(Zr,"CODE",{});var Uh=i(fs);Xl=s(Uh,"language modeling"),Uh.forEach(o),Jl=s(Zr," head on top."),Zr.forEach(o),Gl=c(ae),eo=a(ae,"P",{});var Yr=i(eo);Kl=s(Yr,"This model inherits from "),nn=a(Yr,"A",{href:!0});var Vh=i(nn);Zl=s(Vh,"PreTrainedModel"),Vh.forEach(o),Yl=s(Yr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yr.forEach(o),ed=c(ae),to=a(ae,"P",{});var ea=i(to);td=s(ea,"This model is also a PyTorch "),oo=a(ea,"A",{href:!0,rel:!0});var Xh=i(oo);od=s(Xh,"torch.nn.Module"),Xh.forEach(o),nd=s(ea,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ea.forEach(o),sd=c(ae),W=a(ae,"DIV",{class:!0});var ie=i(W);b(no.$$.fragment,ie),rd=c(ie),Be=a(ie,"P",{});var Qn=i(Be);ad=s(Qn,"The "),sn=a(Qn,"A",{href:!0});var Jh=i(sn);id=s(Jh,"QDQBertForMaskedLM"),Jh.forEach(o),ld=s(Qn," forward method, overrides the "),gs=a(Qn,"CODE",{});var Gh=i(gs);dd=s(Gh,"__call__"),Gh.forEach(o),cd=s(Qn," special method."),Qn.forEach(o),pd=c(ie),b(tt.$$.fragment,ie),hd=c(ie),_s=a(ie,"P",{});var Kh=i(_s);ud=s(Kh,"Example:"),Kh.forEach(o),md=c(ie),b(so.$$.fragment,ie),ie.forEach(o),ae.forEach(o),Tr=c(t),Me=a(t,"H2",{class:!0});var ta=i(Me);ot=a(ta,"A",{id:!0,class:!0,href:!0});var Zh=i(ot);bs=a(Zh,"SPAN",{});var Yh=i(bs);b(ro.$$.fragment,Yh),Yh.forEach(o),Zh.forEach(o),fd=c(ta),vs=a(ta,"SPAN",{});var eu=i(vs);gd=s(eu,"QDQBertForSequenceClassification"),eu.forEach(o),ta.forEach(o),yr=c(t),C=a(t,"DIV",{class:!0});var le=i(C);b(ao.$$.fragment,le),_d=c(le),ks=a(le,"P",{});var tu=i(ks);bd=s(tu,`Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),tu.forEach(o),vd=c(le),io=a(le,"P",{});var oa=i(io);kd=s(oa,"This model inherits from "),rn=a(oa,"A",{href:!0});var ou=i(rn);Qd=s(ou,"PreTrainedModel"),ou.forEach(o),wd=s(oa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oa.forEach(o),Td=c(le),lo=a(le,"P",{});var na=i(lo);yd=s(na,"This model is also a PyTorch "),co=a(na,"A",{href:!0,rel:!0});var nu=i(co);$d=s(nu,"torch.nn.Module"),nu.forEach(o),qd=s(na,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),na.forEach(o),Dd=c(le),B=a(le,"DIV",{class:!0});var G=i(B);b(po.$$.fragment,G),zd=c(G),xe=a(G,"P",{});var wn=i(xe);Bd=s(wn,"The "),an=a(wn,"A",{href:!0});var su=i(an);Md=s(su,"QDQBertForSequenceClassification"),su.forEach(o),xd=s(wn," forward method, overrides the "),Qs=a(wn,"CODE",{});var ru=i(Qs);Ed=s(ru,"__call__"),ru.forEach(o),Fd=s(wn," special method."),wn.forEach(o),Pd=c(G),b(nt.$$.fragment,G),jd=c(G),ws=a(G,"P",{});var au=i(ws);Cd=s(au,"Example of single-label classification:"),au.forEach(o),Ad=c(G),b(ho.$$.fragment,G),Nd=c(G),Ts=a(G,"P",{});var iu=i(Ts);Sd=s(iu,"Example of multi-label classification:"),iu.forEach(o),Ld=c(G),b(uo.$$.fragment,G),G.forEach(o),le.forEach(o),$r=c(t),Ee=a(t,"H2",{class:!0});var sa=i(Ee);st=a(sa,"A",{id:!0,class:!0,href:!0});var lu=i(st);ys=a(lu,"SPAN",{});var du=i(ys);b(mo.$$.fragment,du),du.forEach(o),lu.forEach(o),Id=c(sa),$s=a(sa,"SPAN",{});var cu=i($s);Od=s(cu,"QDQBertForNextSentencePrediction"),cu.forEach(o),sa.forEach(o),qr=c(t),A=a(t,"DIV",{class:!0});var de=i(A);b(fo.$$.fragment,de),Wd=c(de),go=a(de,"P",{});var ra=i(go);Hd=s(ra,"Bert Model with a "),qs=a(ra,"CODE",{});var pu=i(qs);Rd=s(pu,"next sentence prediction (classification)"),pu.forEach(o),Ud=s(ra," head on top."),ra.forEach(o),Vd=c(de),_o=a(de,"P",{});var aa=i(_o);Xd=s(aa,"This model inherits from "),ln=a(aa,"A",{href:!0});var hu=i(ln);Jd=s(hu,"PreTrainedModel"),hu.forEach(o),Gd=s(aa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),aa.forEach(o),Kd=c(de),bo=a(de,"P",{});var ia=i(bo);Zd=s(ia,"This model is also a PyTorch "),vo=a(ia,"A",{href:!0,rel:!0});var uu=i(vo);Yd=s(uu,"torch.nn.Module"),uu.forEach(o),ec=s(ia,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ia.forEach(o),tc=c(de),H=a(de,"DIV",{class:!0});var ce=i(H);b(ko.$$.fragment,ce),oc=c(ce),Fe=a(ce,"P",{});var Tn=i(Fe);nc=s(Tn,"The "),dn=a(Tn,"A",{href:!0});var mu=i(dn);sc=s(mu,"QDQBertForNextSentencePrediction"),mu.forEach(o),rc=s(Tn," forward method, overrides the "),Ds=a(Tn,"CODE",{});var fu=i(Ds);ac=s(fu,"__call__"),fu.forEach(o),ic=s(Tn," special method."),Tn.forEach(o),lc=c(ce),b(rt.$$.fragment,ce),dc=c(ce),zs=a(ce,"P",{});var gu=i(zs);cc=s(gu,"Example:"),gu.forEach(o),pc=c(ce),b(Qo.$$.fragment,ce),ce.forEach(o),de.forEach(o),Dr=c(t),Pe=a(t,"H2",{class:!0});var la=i(Pe);at=a(la,"A",{id:!0,class:!0,href:!0});var _u=i(at);Bs=a(_u,"SPAN",{});var bu=i(Bs);b(wo.$$.fragment,bu),bu.forEach(o),_u.forEach(o),hc=c(la),Ms=a(la,"SPAN",{});var vu=i(Ms);uc=s(vu,"QDQBertForMultipleChoice"),vu.forEach(o),la.forEach(o),zr=c(t),N=a(t,"DIV",{class:!0});var pe=i(N);b(To.$$.fragment,pe),mc=c(pe),xs=a(pe,"P",{});var ku=i(xs);fc=s(ku,`Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ku.forEach(o),gc=c(pe),yo=a(pe,"P",{});var da=i(yo);_c=s(da,"This model inherits from "),cn=a(da,"A",{href:!0});var Qu=i(cn);bc=s(Qu,"PreTrainedModel"),Qu.forEach(o),vc=s(da,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),da.forEach(o),kc=c(pe),$o=a(pe,"P",{});var ca=i($o);Qc=s(ca,"This model is also a PyTorch "),qo=a(ca,"A",{href:!0,rel:!0});var wu=i(qo);wc=s(wu,"torch.nn.Module"),wu.forEach(o),Tc=s(ca,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ca.forEach(o),yc=c(pe),R=a(pe,"DIV",{class:!0});var he=i(R);b(Do.$$.fragment,he),$c=c(he),je=a(he,"P",{});var yn=i(je);qc=s(yn,"The "),pn=a(yn,"A",{href:!0});var Tu=i(pn);Dc=s(Tu,"QDQBertForMultipleChoice"),Tu.forEach(o),zc=s(yn," forward method, overrides the "),Es=a(yn,"CODE",{});var yu=i(Es);Bc=s(yu,"__call__"),yu.forEach(o),Mc=s(yn," special method."),yn.forEach(o),xc=c(he),b(it.$$.fragment,he),Ec=c(he),Fs=a(he,"P",{});var $u=i(Fs);Fc=s($u,"Example:"),$u.forEach(o),Pc=c(he),b(zo.$$.fragment,he),he.forEach(o),pe.forEach(o),Br=c(t),Ce=a(t,"H2",{class:!0});var pa=i(Ce);lt=a(pa,"A",{id:!0,class:!0,href:!0});var qu=i(lt);Ps=a(qu,"SPAN",{});var Du=i(Ps);b(Bo.$$.fragment,Du),Du.forEach(o),qu.forEach(o),jc=c(pa),js=a(pa,"SPAN",{});var zu=i(js);Cc=s(zu,"QDQBertForTokenClassification"),zu.forEach(o),pa.forEach(o),Mr=c(t),S=a(t,"DIV",{class:!0});var ue=i(S);b(Mo.$$.fragment,ue),Ac=c(ue),Cs=a(ue,"P",{});var Bu=i(Cs);Nc=s(Bu,`QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Bu.forEach(o),Sc=c(ue),xo=a(ue,"P",{});var ha=i(xo);Lc=s(ha,"This model inherits from "),hn=a(ha,"A",{href:!0});var Mu=i(hn);Ic=s(Mu,"PreTrainedModel"),Mu.forEach(o),Oc=s(ha,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ha.forEach(o),Wc=c(ue),Eo=a(ue,"P",{});var ua=i(Eo);Hc=s(ua,"This model is also a PyTorch "),Fo=a(ua,"A",{href:!0,rel:!0});var xu=i(Fo);Rc=s(xu,"torch.nn.Module"),xu.forEach(o),Uc=s(ua,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ua.forEach(o),Vc=c(ue),U=a(ue,"DIV",{class:!0});var me=i(U);b(Po.$$.fragment,me),Xc=c(me),Ae=a(me,"P",{});var $n=i(Ae);Jc=s($n,"The "),un=a($n,"A",{href:!0});var Eu=i(un);Gc=s(Eu,"QDQBertForTokenClassification"),Eu.forEach(o),Kc=s($n," forward method, overrides the "),As=a($n,"CODE",{});var Fu=i(As);Zc=s(Fu,"__call__"),Fu.forEach(o),Yc=s($n," special method."),$n.forEach(o),ep=c(me),b(dt.$$.fragment,me),tp=c(me),Ns=a(me,"P",{});var Pu=i(Ns);op=s(Pu,"Example:"),Pu.forEach(o),np=c(me),b(jo.$$.fragment,me),me.forEach(o),ue.forEach(o),xr=c(t),Ne=a(t,"H2",{class:!0});var ma=i(Ne);ct=a(ma,"A",{id:!0,class:!0,href:!0});var ju=i(ct);Ss=a(ju,"SPAN",{});var Cu=i(Ss);b(Co.$$.fragment,Cu),Cu.forEach(o),ju.forEach(o),sp=c(ma),Ls=a(ma,"SPAN",{});var Au=i(Ls);rp=s(Au,"QDQBertForQuestionAnswering"),Au.forEach(o),ma.forEach(o),Er=c(t),L=a(t,"DIV",{class:!0});var fe=i(L);b(Ao.$$.fragment,fe),ap=c(fe),Se=a(fe,"P",{});var qn=i(Se);ip=s(qn,`QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Is=a(qn,"CODE",{});var Nu=i(Is);lp=s(Nu,"span start logits"),Nu.forEach(o),dp=s(qn," and "),Os=a(qn,"CODE",{});var Su=i(Os);cp=s(Su,"span end logits"),Su.forEach(o),pp=s(qn,")."),qn.forEach(o),hp=c(fe),No=a(fe,"P",{});var fa=i(No);up=s(fa,"This model inherits from "),mn=a(fa,"A",{href:!0});var Lu=i(mn);mp=s(Lu,"PreTrainedModel"),Lu.forEach(o),fp=s(fa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fa.forEach(o),gp=c(fe),So=a(fe,"P",{});var ga=i(So);_p=s(ga,"This model is also a PyTorch "),Lo=a(ga,"A",{href:!0,rel:!0});var Iu=i(Lo);bp=s(Iu,"torch.nn.Module"),Iu.forEach(o),vp=s(ga,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ga.forEach(o),kp=c(fe),V=a(fe,"DIV",{class:!0});var ge=i(V);b(Io.$$.fragment,ge),Qp=c(ge),Le=a(ge,"P",{});var Dn=i(Le);wp=s(Dn,"The "),fn=a(Dn,"A",{href:!0});var Ou=i(fn);Tp=s(Ou,"QDQBertForQuestionAnswering"),Ou.forEach(o),yp=s(Dn," forward method, overrides the "),Ws=a(Dn,"CODE",{});var Wu=i(Ws);$p=s(Wu,"__call__"),Wu.forEach(o),qp=s(Dn," special method."),Dn.forEach(o),Dp=c(ge),b(pt.$$.fragment,ge),zp=c(ge),Hs=a(ge,"P",{});var Hu=i(Hs);Bp=s(Hu,"Example:"),Hu.forEach(o),Mp=c(ge),b(Oo.$$.fragment,ge),ge.forEach(o),fe.forEach(o),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(sm)),l(T,"id","qdqbert"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#qdqbert"),l(m,"class","relative group"),l(Ie,"id","overview"),l(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ie,"href","#overview"),l(_e,"class","relative group"),l(ft,"href","https://arxiv.org/abs/2004.09602"),l(ft,"rel","nofollow"),l(gt,"href","https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization"),l(gt,"rel","nofollow"),l(Xo,"href","examples/research_projects/quantization-qdqbert/"),l(vt,"href","https://huggingface.co/shangz"),l(vt,"rel","nofollow"),l(Re,"id","set-default-quantizers"),l(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Re,"href","#set-default-quantizers"),l(be,"class","relative group"),l(Qt,"href","https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization"),l(Qt,"rel","nofollow"),l(wt,"href","https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/userguide.html"),l(wt,"rel","nofollow"),l(Ve,"id","calibration"),l(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ve,"href","#calibration"),l(ve,"class","relative group"),l(Xe,"id","export-to-onnx"),l(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Xe,"href","#export-to-onnx"),l(ke,"class","relative group"),l(Dt,"href","https://developer.nvidia.com/tensorrt"),l(Dt,"rel","nofollow"),l(zt,"href","https://pytorch.org/docs/stable/onnx.html"),l(zt,"rel","nofollow"),l(Je,"id","transformers.QDQBertConfig"),l(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Je,"href","#transformers.QDQBertConfig"),l(Qe,"class","relative group"),l(Go,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertModel"),l(Et,"href","https://huggingface.co/bert-base-uncased"),l(Et,"rel","nofollow"),l(Ko,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Zo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(F,"class","docstring"),l(Ge,"id","transformers.QDQBertModel"),l(Ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ge,"href","#transformers.QDQBertModel"),l(ye,"class","relative group"),l(Yo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Nt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Nt,"rel","nofollow"),l(Lt,"href","https://arxiv.org/abs/1706.03762"),l(Lt,"rel","nofollow"),l(en,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertModel"),l(I,"class","docstring"),l(z,"class","docstring"),l(Ze,"id","transformers.QDQBertLMHeadModel"),l(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ze,"href","#transformers.QDQBertLMHeadModel"),l(qe,"class","relative group"),l(tn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Xt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Xt,"rel","nofollow"),l(on,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertLMHeadModel"),l(O,"class","docstring"),l(P,"class","docstring"),l(et,"id","transformers.QDQBertForMaskedLM"),l(et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(et,"href","#transformers.QDQBertForMaskedLM"),l(ze,"class","relative group"),l(nn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(oo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(oo,"rel","nofollow"),l(sn,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForMaskedLM"),l(W,"class","docstring"),l(j,"class","docstring"),l(ot,"id","transformers.QDQBertForSequenceClassification"),l(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ot,"href","#transformers.QDQBertForSequenceClassification"),l(Me,"class","relative group"),l(rn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(co,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(co,"rel","nofollow"),l(an,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForSequenceClassification"),l(B,"class","docstring"),l(C,"class","docstring"),l(st,"id","transformers.QDQBertForNextSentencePrediction"),l(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(st,"href","#transformers.QDQBertForNextSentencePrediction"),l(Ee,"class","relative group"),l(ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(vo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(vo,"rel","nofollow"),l(dn,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForNextSentencePrediction"),l(H,"class","docstring"),l(A,"class","docstring"),l(at,"id","transformers.QDQBertForMultipleChoice"),l(at,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(at,"href","#transformers.QDQBertForMultipleChoice"),l(Pe,"class","relative group"),l(cn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(qo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(qo,"rel","nofollow"),l(pn,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForMultipleChoice"),l(R,"class","docstring"),l(N,"class","docstring"),l(lt,"id","transformers.QDQBertForTokenClassification"),l(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(lt,"href","#transformers.QDQBertForTokenClassification"),l(Ce,"class","relative group"),l(hn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Fo,"rel","nofollow"),l(un,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForTokenClassification"),l(U,"class","docstring"),l(S,"class","docstring"),l(ct,"id","transformers.QDQBertForQuestionAnswering"),l(ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ct,"href","#transformers.QDQBertForQuestionAnswering"),l(Ne,"class","relative group"),l(mn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Lo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Lo,"rel","nofollow"),l(fn,"href","/docs/transformers/v4.15.0/en/model_doc/qdqbert#transformers.QDQBertForQuestionAnswering"),l(V,"class","docstring"),l(L,"class","docstring")},m(t,p){e(document.head,h),u(t,y,p),u(t,m,p),e(m,T),e(T,$),v(g,$,null),e(m,f),e(m,q),e(q,_a),u(t,Zs,p),u(t,_e,p),e(_e,Ie),e(Ie,zn),v(mt,zn,null),e(_e,ba),e(_e,Bn),e(Bn,va),u(t,Ys,p),u(t,Oe,p),e(Oe,ka),e(Oe,ft),e(ft,Qa),e(Oe,wa),u(t,er,p),u(t,Ro,p),e(Ro,Ta),u(t,tr,p),u(t,Uo,p),e(Uo,Mn),e(Mn,ya),u(t,or,p),u(t,Vo,p),e(Vo,$a),u(t,nr,p),u(t,Y,p),e(Y,xn),e(xn,En),e(En,qa),e(Y,Da),e(Y,Fn),e(Fn,We),e(We,za),e(We,gt),e(gt,Ba),e(We,Ma),e(We,Pn),e(Pn,xa),e(Y,Ea),e(Y,jn),e(jn,_t),e(_t,Fa),e(_t,Cn),e(Cn,Pa),e(_t,ja),e(Y,Ca),e(Y,An),e(An,bt),e(bt,Aa),e(bt,Xo),e(Xo,Na),e(bt,Sa),u(t,sr,p),u(t,He,p),e(He,La),e(He,vt),e(vt,Ia),e(He,Oa),u(t,rr,p),u(t,be,p),e(be,Re),e(Re,Nn),v(kt,Nn,null),e(be,Wa),e(be,Sn),e(Sn,Ha),u(t,ar,p),u(t,E,p),e(E,Ra),e(E,Ln),e(Ln,Ua),e(E,Va),e(E,Qt),e(Qt,Xa),e(E,Ja),e(E,In),e(In,Ga),e(E,Ka),e(E,On),e(On,Za),e(E,Ya),e(E,wt),e(wt,ei),e(E,ti),u(t,ir,p),u(t,Ue,p),e(Ue,oi),e(Ue,Wn),e(Wn,ni),e(Ue,si),u(t,lr,p),v(Tt,t,p),u(t,dr,p),u(t,ve,p),e(ve,Ve),e(Ve,Hn),v(yt,Hn,null),e(ve,ri),e(ve,Rn),e(Rn,ai),u(t,cr,p),u(t,Jo,p),e(Jo,ii),u(t,pr,p),v($t,t,p),u(t,hr,p),u(t,ke,p),e(ke,Xe),e(Xe,Un),v(qt,Un,null),e(ke,li),e(ke,Vn),e(Vn,di),u(t,ur,p),u(t,te,p),e(te,ci),e(te,Dt),e(Dt,pi),e(te,hi),e(te,zt),e(zt,ui),e(te,mi),u(t,mr,p),v(Bt,t,p),u(t,fr,p),u(t,Qe,p),e(Qe,Je),e(Je,Xn),v(Mt,Xn,null),e(Qe,fi),e(Qe,Jn),e(Jn,gi),u(t,gr,p),u(t,F,p),v(xt,F,null),e(F,_i),e(F,we),e(we,bi),e(we,Go),e(Go,vi),e(we,ki),e(we,Et),e(Et,Qi),e(we,wi),e(F,Ti),e(F,Te),e(Te,yi),e(Te,Ko),e(Ko,$i),e(Te,qi),e(Te,Zo),e(Zo,Di),e(Te,zi),e(F,Bi),e(F,Gn),e(Gn,Mi),e(F,xi),v(Ft,F,null),u(t,_r,p),u(t,ye,p),e(ye,Ge),e(Ge,Kn),v(Pt,Kn,null),e(ye,Ei),e(ye,Zn),e(Zn,Fi),u(t,br,p),u(t,z,p),v(jt,z,null),e(z,Pi),e(z,Yn),e(Yn,ji),e(z,Ci),e(z,Ct),e(Ct,Ai),e(Ct,Yo),e(Yo,Ni),e(Ct,Si),e(z,Li),e(z,At),e(At,Ii),e(At,Nt),e(Nt,Oi),e(At,Wi),e(z,Hi),e(z,St),e(St,Ri),e(St,Lt),e(Lt,Ui),e(St,Vi),e(z,Xi),e(z,M),e(M,Ji),e(M,es),e(es,Gi),e(M,Ki),e(M,ts),e(ts,Zi),e(M,Yi),e(M,os),e(os,el),e(M,tl),e(M,ns),e(ns,ol),e(M,nl),e(M,ss),e(ss,sl),e(M,rl),e(M,rs),e(rs,al),e(M,il),e(z,ll),e(z,I),v(It,I,null),e(I,dl),e(I,$e),e($e,cl),e($e,en),e(en,pl),e($e,hl),e($e,as),e(as,ul),e($e,ml),e(I,fl),v(Ke,I,null),e(I,gl),e(I,is),e(is,_l),e(I,bl),v(Ot,I,null),u(t,vr,p),u(t,qe,p),e(qe,Ze),e(Ze,ls),v(Wt,ls,null),e(qe,vl),e(qe,ds),e(ds,kl),u(t,kr,p),u(t,P,p),v(Ht,P,null),e(P,Ql),e(P,Rt),e(Rt,wl),e(Rt,cs),e(cs,Tl),e(Rt,yl),e(P,$l),e(P,Ut),e(Ut,ql),e(Ut,tn),e(tn,Dl),e(Ut,zl),e(P,Bl),e(P,Vt),e(Vt,Ml),e(Vt,Xt),e(Xt,xl),e(Vt,El),e(P,Fl),e(P,O),v(Jt,O,null),e(O,Pl),e(O,De),e(De,jl),e(De,on),e(on,Cl),e(De,Al),e(De,ps),e(ps,Nl),e(De,Sl),e(O,Ll),v(Ye,O,null),e(O,Il),e(O,hs),e(hs,Ol),e(O,Wl),v(Gt,O,null),u(t,Qr,p),u(t,ze,p),e(ze,et),e(et,us),v(Kt,us,null),e(ze,Hl),e(ze,ms),e(ms,Rl),u(t,wr,p),u(t,j,p),v(Zt,j,null),e(j,Ul),e(j,Yt),e(Yt,Vl),e(Yt,fs),e(fs,Xl),e(Yt,Jl),e(j,Gl),e(j,eo),e(eo,Kl),e(eo,nn),e(nn,Zl),e(eo,Yl),e(j,ed),e(j,to),e(to,td),e(to,oo),e(oo,od),e(to,nd),e(j,sd),e(j,W),v(no,W,null),e(W,rd),e(W,Be),e(Be,ad),e(Be,sn),e(sn,id),e(Be,ld),e(Be,gs),e(gs,dd),e(Be,cd),e(W,pd),v(tt,W,null),e(W,hd),e(W,_s),e(_s,ud),e(W,md),v(so,W,null),u(t,Tr,p),u(t,Me,p),e(Me,ot),e(ot,bs),v(ro,bs,null),e(Me,fd),e(Me,vs),e(vs,gd),u(t,yr,p),u(t,C,p),v(ao,C,null),e(C,_d),e(C,ks),e(ks,bd),e(C,vd),e(C,io),e(io,kd),e(io,rn),e(rn,Qd),e(io,wd),e(C,Td),e(C,lo),e(lo,yd),e(lo,co),e(co,$d),e(lo,qd),e(C,Dd),e(C,B),v(po,B,null),e(B,zd),e(B,xe),e(xe,Bd),e(xe,an),e(an,Md),e(xe,xd),e(xe,Qs),e(Qs,Ed),e(xe,Fd),e(B,Pd),v(nt,B,null),e(B,jd),e(B,ws),e(ws,Cd),e(B,Ad),v(ho,B,null),e(B,Nd),e(B,Ts),e(Ts,Sd),e(B,Ld),v(uo,B,null),u(t,$r,p),u(t,Ee,p),e(Ee,st),e(st,ys),v(mo,ys,null),e(Ee,Id),e(Ee,$s),e($s,Od),u(t,qr,p),u(t,A,p),v(fo,A,null),e(A,Wd),e(A,go),e(go,Hd),e(go,qs),e(qs,Rd),e(go,Ud),e(A,Vd),e(A,_o),e(_o,Xd),e(_o,ln),e(ln,Jd),e(_o,Gd),e(A,Kd),e(A,bo),e(bo,Zd),e(bo,vo),e(vo,Yd),e(bo,ec),e(A,tc),e(A,H),v(ko,H,null),e(H,oc),e(H,Fe),e(Fe,nc),e(Fe,dn),e(dn,sc),e(Fe,rc),e(Fe,Ds),e(Ds,ac),e(Fe,ic),e(H,lc),v(rt,H,null),e(H,dc),e(H,zs),e(zs,cc),e(H,pc),v(Qo,H,null),u(t,Dr,p),u(t,Pe,p),e(Pe,at),e(at,Bs),v(wo,Bs,null),e(Pe,hc),e(Pe,Ms),e(Ms,uc),u(t,zr,p),u(t,N,p),v(To,N,null),e(N,mc),e(N,xs),e(xs,fc),e(N,gc),e(N,yo),e(yo,_c),e(yo,cn),e(cn,bc),e(yo,vc),e(N,kc),e(N,$o),e($o,Qc),e($o,qo),e(qo,wc),e($o,Tc),e(N,yc),e(N,R),v(Do,R,null),e(R,$c),e(R,je),e(je,qc),e(je,pn),e(pn,Dc),e(je,zc),e(je,Es),e(Es,Bc),e(je,Mc),e(R,xc),v(it,R,null),e(R,Ec),e(R,Fs),e(Fs,Fc),e(R,Pc),v(zo,R,null),u(t,Br,p),u(t,Ce,p),e(Ce,lt),e(lt,Ps),v(Bo,Ps,null),e(Ce,jc),e(Ce,js),e(js,Cc),u(t,Mr,p),u(t,S,p),v(Mo,S,null),e(S,Ac),e(S,Cs),e(Cs,Nc),e(S,Sc),e(S,xo),e(xo,Lc),e(xo,hn),e(hn,Ic),e(xo,Oc),e(S,Wc),e(S,Eo),e(Eo,Hc),e(Eo,Fo),e(Fo,Rc),e(Eo,Uc),e(S,Vc),e(S,U),v(Po,U,null),e(U,Xc),e(U,Ae),e(Ae,Jc),e(Ae,un),e(un,Gc),e(Ae,Kc),e(Ae,As),e(As,Zc),e(Ae,Yc),e(U,ep),v(dt,U,null),e(U,tp),e(U,Ns),e(Ns,op),e(U,np),v(jo,U,null),u(t,xr,p),u(t,Ne,p),e(Ne,ct),e(ct,Ss),v(Co,Ss,null),e(Ne,sp),e(Ne,Ls),e(Ls,rp),u(t,Er,p),u(t,L,p),v(Ao,L,null),e(L,ap),e(L,Se),e(Se,ip),e(Se,Is),e(Is,lp),e(Se,dp),e(Se,Os),e(Os,cp),e(Se,pp),e(L,hp),e(L,No),e(No,up),e(No,mn),e(mn,mp),e(No,fp),e(L,gp),e(L,So),e(So,_p),e(So,Lo),e(Lo,bp),e(So,vp),e(L,kp),e(L,V),v(Io,V,null),e(V,Qp),e(V,Le),e(Le,wp),e(Le,fn),e(fn,Tp),e(Le,yp),e(Le,Ws),e(Ws,$p),e(Le,qp),e(V,Dp),v(pt,V,null),e(V,zp),e(V,Hs),e(Hs,Bp),e(V,Mp),v(Oo,V,null),Fr=!0},p(t,[p]){const Wo={};p&2&&(Wo.$$scope={dirty:p,ctx:t}),Ke.$set(Wo);const Rs={};p&2&&(Rs.$$scope={dirty:p,ctx:t}),Ye.$set(Rs);const Us={};p&2&&(Us.$$scope={dirty:p,ctx:t}),tt.$set(Us);const Vs={};p&2&&(Vs.$$scope={dirty:p,ctx:t}),nt.$set(Vs);const Ho={};p&2&&(Ho.$$scope={dirty:p,ctx:t}),rt.$set(Ho);const Xs={};p&2&&(Xs.$$scope={dirty:p,ctx:t}),it.$set(Xs);const Js={};p&2&&(Js.$$scope={dirty:p,ctx:t}),dt.$set(Js);const Gs={};p&2&&(Gs.$$scope={dirty:p,ctx:t}),pt.$set(Gs)},i(t){Fr||(k(g.$$.fragment,t),k(mt.$$.fragment,t),k(kt.$$.fragment,t),k(Tt.$$.fragment,t),k(yt.$$.fragment,t),k($t.$$.fragment,t),k(qt.$$.fragment,t),k(Bt.$$.fragment,t),k(Mt.$$.fragment,t),k(xt.$$.fragment,t),k(Ft.$$.fragment,t),k(Pt.$$.fragment,t),k(jt.$$.fragment,t),k(It.$$.fragment,t),k(Ke.$$.fragment,t),k(Ot.$$.fragment,t),k(Wt.$$.fragment,t),k(Ht.$$.fragment,t),k(Jt.$$.fragment,t),k(Ye.$$.fragment,t),k(Gt.$$.fragment,t),k(Kt.$$.fragment,t),k(Zt.$$.fragment,t),k(no.$$.fragment,t),k(tt.$$.fragment,t),k(so.$$.fragment,t),k(ro.$$.fragment,t),k(ao.$$.fragment,t),k(po.$$.fragment,t),k(nt.$$.fragment,t),k(ho.$$.fragment,t),k(uo.$$.fragment,t),k(mo.$$.fragment,t),k(fo.$$.fragment,t),k(ko.$$.fragment,t),k(rt.$$.fragment,t),k(Qo.$$.fragment,t),k(wo.$$.fragment,t),k(To.$$.fragment,t),k(Do.$$.fragment,t),k(it.$$.fragment,t),k(zo.$$.fragment,t),k(Bo.$$.fragment,t),k(Mo.$$.fragment,t),k(Po.$$.fragment,t),k(dt.$$.fragment,t),k(jo.$$.fragment,t),k(Co.$$.fragment,t),k(Ao.$$.fragment,t),k(Io.$$.fragment,t),k(pt.$$.fragment,t),k(Oo.$$.fragment,t),Fr=!0)},o(t){Q(g.$$.fragment,t),Q(mt.$$.fragment,t),Q(kt.$$.fragment,t),Q(Tt.$$.fragment,t),Q(yt.$$.fragment,t),Q($t.$$.fragment,t),Q(qt.$$.fragment,t),Q(Bt.$$.fragment,t),Q(Mt.$$.fragment,t),Q(xt.$$.fragment,t),Q(Ft.$$.fragment,t),Q(Pt.$$.fragment,t),Q(jt.$$.fragment,t),Q(It.$$.fragment,t),Q(Ke.$$.fragment,t),Q(Ot.$$.fragment,t),Q(Wt.$$.fragment,t),Q(Ht.$$.fragment,t),Q(Jt.$$.fragment,t),Q(Ye.$$.fragment,t),Q(Gt.$$.fragment,t),Q(Kt.$$.fragment,t),Q(Zt.$$.fragment,t),Q(no.$$.fragment,t),Q(tt.$$.fragment,t),Q(so.$$.fragment,t),Q(ro.$$.fragment,t),Q(ao.$$.fragment,t),Q(po.$$.fragment,t),Q(nt.$$.fragment,t),Q(ho.$$.fragment,t),Q(uo.$$.fragment,t),Q(mo.$$.fragment,t),Q(fo.$$.fragment,t),Q(ko.$$.fragment,t),Q(rt.$$.fragment,t),Q(Qo.$$.fragment,t),Q(wo.$$.fragment,t),Q(To.$$.fragment,t),Q(Do.$$.fragment,t),Q(it.$$.fragment,t),Q(zo.$$.fragment,t),Q(Bo.$$.fragment,t),Q(Mo.$$.fragment,t),Q(Po.$$.fragment,t),Q(dt.$$.fragment,t),Q(jo.$$.fragment,t),Q(Co.$$.fragment,t),Q(Ao.$$.fragment,t),Q(Io.$$.fragment,t),Q(pt.$$.fragment,t),Q(Oo.$$.fragment,t),Fr=!1},d(t){o(h),t&&o(y),t&&o(m),w(g),t&&o(Zs),t&&o(_e),w(mt),t&&o(Ys),t&&o(Oe),t&&o(er),t&&o(Ro),t&&o(tr),t&&o(Uo),t&&o(or),t&&o(Vo),t&&o(nr),t&&o(Y),t&&o(sr),t&&o(He),t&&o(rr),t&&o(be),w(kt),t&&o(ar),t&&o(E),t&&o(ir),t&&o(Ue),t&&o(lr),w(Tt,t),t&&o(dr),t&&o(ve),w(yt),t&&o(cr),t&&o(Jo),t&&o(pr),w($t,t),t&&o(hr),t&&o(ke),w(qt),t&&o(ur),t&&o(te),t&&o(mr),w(Bt,t),t&&o(fr),t&&o(Qe),w(Mt),t&&o(gr),t&&o(F),w(xt),w(Ft),t&&o(_r),t&&o(ye),w(Pt),t&&o(br),t&&o(z),w(jt),w(It),w(Ke),w(Ot),t&&o(vr),t&&o(qe),w(Wt),t&&o(kr),t&&o(P),w(Ht),w(Jt),w(Ye),w(Gt),t&&o(Qr),t&&o(ze),w(Kt),t&&o(wr),t&&o(j),w(Zt),w(no),w(tt),w(so),t&&o(Tr),t&&o(Me),w(ro),t&&o(yr),t&&o(C),w(ao),w(po),w(nt),w(ho),w(uo),t&&o($r),t&&o(Ee),w(mo),t&&o(qr),t&&o(A),w(fo),w(ko),w(rt),w(Qo),t&&o(Dr),t&&o(Pe),w(wo),t&&o(zr),t&&o(N),w(To),w(Do),w(it),w(zo),t&&o(Br),t&&o(Ce),w(Bo),t&&o(Mr),t&&o(S),w(Mo),w(Po),w(dt),w(jo),t&&o(xr),t&&o(Ne),w(Co),t&&o(Er),t&&o(L),w(Ao),w(Io),w(pt),w(Oo)}}}const sm={local:"qdqbert",sections:[{local:"overview",sections:[{local:"set-default-quantizers",title:"Set default quantizers"},{local:"calibration",title:"Calibration"},{local:"export-to-onnx",title:"Export to ONNX"}],title:"Overview"},{local:"transformers.QDQBertConfig",title:"QDQBertConfig"},{local:"transformers.QDQBertModel",title:"QDQBertModel"},{local:"transformers.QDQBertLMHeadModel",title:"QDQBertLMHeadModel"},{local:"transformers.QDQBertForMaskedLM",title:"QDQBertForMaskedLM"},{local:"transformers.QDQBertForSequenceClassification",title:"QDQBertForSequenceClassification"},{local:"transformers.QDQBertForNextSentencePrediction",title:"QDQBertForNextSentencePrediction"},{local:"transformers.QDQBertForMultipleChoice",title:"QDQBertForMultipleChoice"},{local:"transformers.QDQBertForTokenClassification",title:"QDQBertForTokenClassification"},{local:"transformers.QDQBertForQuestionAnswering",title:"QDQBertForQuestionAnswering"}],title:"QDQBERT"};function rm(D,h,y){let{fw:m}=h;return D.$$set=T=>{"fw"in T&&y(0,m=T.fw)},[m]}class hm extends Ru{constructor(h){super();Uu(this,h,rm,nm,Vu,{fw:0})}}export{hm as default,sm as metadata};
9,968
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/phobert.mdx-46ecb08c.js
import{S as hn,i as dn,s as mn,e as o,k as l,w as v,t as a,L as fn,c as s,d as n,m as p,a as r,x as T,h as i,b as c,J as t,g as m,y as w,K as un,q as y,o as E,B as P}from"../../chunks/vendor-b1433968.js";import{D as ae}from"../../chunks/Docstring-ff504c58.js";import{C as gn}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Ct}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function _n(We){let _,Y,u,g,ie,j,Fe,le,Ue,we,$,z,pe,N,Xe,ce,He,ye,x,Ge,I,Je,Qe,Ee,K,Ye,Pe,Z,he,Ke,$e,ee,Ze,qe,C,ze,k,et,M,tt,nt,O,ot,st,xe,q,A,de,S,rt,me,at,Ae,d,V,it,fe,lt,pt,W,ct,te,ht,dt,mt,L,F,ft,ue,ut,gt,b,U,_t,ge,kt,bt,X,ne,vt,_e,Tt,wt,oe,yt,ke,Et,Pt,D,H,$t,be,qt,zt,B,G,xt,ve,At,Lt,R,J,Dt,Q,Bt,Te,Rt,jt,Le;return j=new Ct({}),N=new Ct({}),C=new gn({props:{code:`import torch from transformers import AutoModel, AutoTokenizer phobert = AutoModel.from_pretrained("vinai/phobert-base") tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base") # INPUT TEXT MUST BE ALREADY WORD-SEGMENTED! line = "T\xF4i l\xE0 sinh_vi\xEAn tr\u01B0\u1EDDng \u0111\u1EA1i_h\u1ECDc C\xF4ng_ngh\u1EC7 ." input_ids = torch.tensor([tokenizer.encode(line)]) with torch.no_grad(): features = phobert(input_ids) # Models outputs are now tuples # With TensorFlow 2.0+: # from transformers import TFAutoModel # phobert = TFAutoModel.from_pretrained("vinai/phobert-base"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">import</span> torch</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoTokenizer</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">phobert = AutoModel.from_pretrained(<span class="hljs-string">&quot;vinai/phobert-base&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;vinai/phobert-base&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># INPUT TEXT MUST BE ALREADY WORD-SEGMENTED!</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">line = <span class="hljs-string">&quot;T\xF4i l\xE0 sinh_vi\xEAn tr\u01B0\u1EDDng \u0111\u1EA1i_h\u1ECDc C\xF4ng_ngh\u1EC7 .&quot;</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">input_ids = torch.tensor([tokenizer.encode(line)])</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">with</span> torch.no_grad():</span> <span class="hljs-meta">...</span> <span class="language-python"> features = phobert(input_ids) <span class="hljs-comment"># Models outputs are now tuples</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># With TensorFlow 2.0+:</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># from transformers import TFAutoModel</span></span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-comment"># phobert = TFAutoModel.from_pretrained(&quot;vinai/phobert-base&quot;)</span></span>`}}),S=new Ct({}),V=new ae({props:{name:"class transformers.PhobertTokenizer",anchor:"transformers.PhobertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/phobert/tokenization_phobert.py#L68",parametersDescription:[{anchor:"transformers.PhobertTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.PhobertTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.PhobertTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>st</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.PhobertTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.PhobertTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.PhobertTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.PhobertTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.PhobertTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.PhobertTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),F=new ae({props:{name:"add_from_file",anchor:"transformers.PhobertTokenizer.add_from_file",parameters:[{name:"f",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/phobert/tokenization_phobert.py#L341"}}),U=new ae({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.PhobertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/phobert/tokenization_phobert.py#L164",parametersDescription:[{anchor:"transformers.PhobertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.PhobertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),H=new ae({props:{name:"convert_tokens_to_string",anchor:"transformers.PhobertTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/phobert/tokenization_phobert.py#L311"}}),G=new ae({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PhobertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/phobert/tokenization_phobert.py#L218",parametersDescription:[{anchor:"transformers.PhobertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.PhobertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),J=new ae({props:{name:"get_special_tokens_mask",anchor:"transformers.PhobertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/phobert/tokenization_phobert.py#L190",parametersDescription:[{anchor:"transformers.PhobertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.PhobertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.PhobertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){_=o("meta"),Y=l(),u=o("h1"),g=o("a"),ie=o("span"),v(j.$$.fragment),Fe=l(),le=o("span"),Ue=a("PhoBERT"),we=l(),$=o("h2"),z=o("a"),pe=o("span"),v(N.$$.fragment),Xe=l(),ce=o("span"),He=a("Overview"),ye=l(),x=o("p"),Ge=a("The PhoBERT model was proposed in "),I=o("a"),Je=a("PhoBERT: Pre-trained language models for Vietnamese"),Qe=a(" by Dat Quoc Nguyen, Anh Tuan Nguyen."),Ee=l(),K=o("p"),Ye=a("The abstract from the paper is the following:"),Pe=l(),Z=o("p"),he=o("em"),Ke=a(`We present PhoBERT with two versions, PhoBERT-base and PhoBERT-large, the first public large-scale monolingual language models pre-trained for Vietnamese. Experimental results show that PhoBERT consistently outperforms the recent best pre-trained multilingual model XLM-R (Conneau et al., 2020) and improves the state-of-the-art in multiple Vietnamese-specific NLP tasks including Part-of-speech tagging, Dependency parsing, Named-entity recognition and Natural language inference.`),$e=l(),ee=o("p"),Ze=a("Example of use:"),qe=l(),v(C.$$.fragment),ze=l(),k=o("p"),et=a("This model was contributed by "),M=o("a"),tt=a("dqnguyen"),nt=a(". The original code can be found "),O=o("a"),ot=a("here"),st=a("."),xe=l(),q=o("h2"),A=o("a"),de=o("span"),v(S.$$.fragment),rt=l(),me=o("span"),at=a("PhobertTokenizer"),Ae=l(),d=o("div"),v(V.$$.fragment),it=l(),fe=o("p"),lt=a("Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding."),pt=l(),W=o("p"),ct=a("This tokenizer inherits from "),te=o("a"),ht=a("PreTrainedTokenizer"),dt=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),mt=l(),L=o("div"),v(F.$$.fragment),ft=l(),ue=o("p"),ut=a("Loads a pre-existing dictionary from a text file and adds its symbols to this instance."),gt=l(),b=o("div"),v(U.$$.fragment),_t=l(),ge=o("p"),kt=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A PhoBERT sequence has the following format:`),bt=l(),X=o("ul"),ne=o("li"),vt=a("single sequence: "),_e=o("code"),Tt=a("<s> X </s>"),wt=l(),oe=o("li"),yt=a("pair of sequences: "),ke=o("code"),Et=a("<s> A </s></s> B </s>"),Pt=l(),D=o("div"),v(H.$$.fragment),$t=l(),be=o("p"),qt=a("Converts a sequence of tokens (string) in a single string."),zt=l(),B=o("div"),v(G.$$.fragment),xt=l(),ve=o("p"),At=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not make use of token type ids, therefore a list of zeros is returned.`),Lt=l(),R=o("div"),v(J.$$.fragment),Dt=l(),Q=o("p"),Bt=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Te=o("code"),Rt=a("prepare_for_model"),jt=a(" method."),this.h()},l(e){const h=fn('[data-svelte="svelte-1phssyn"]',document.head);_=s(h,"META",{name:!0,content:!0}),h.forEach(n),Y=p(e),u=s(e,"H1",{class:!0});var De=r(u);g=s(De,"A",{id:!0,class:!0,href:!0});var Mt=r(g);ie=s(Mt,"SPAN",{});var Ot=r(ie);T(j.$$.fragment,Ot),Ot.forEach(n),Mt.forEach(n),Fe=p(De),le=s(De,"SPAN",{});var St=r(le);Ue=i(St,"PhoBERT"),St.forEach(n),De.forEach(n),we=p(e),$=s(e,"H2",{class:!0});var Be=r($);z=s(Be,"A",{id:!0,class:!0,href:!0});var Vt=r(z);pe=s(Vt,"SPAN",{});var Wt=r(pe);T(N.$$.fragment,Wt),Wt.forEach(n),Vt.forEach(n),Xe=p(Be),ce=s(Be,"SPAN",{});var Ft=r(ce);He=i(Ft,"Overview"),Ft.forEach(n),Be.forEach(n),ye=p(e),x=s(e,"P",{});var Re=r(x);Ge=i(Re,"The PhoBERT model was proposed in "),I=s(Re,"A",{href:!0,rel:!0});var Ut=r(I);Je=i(Ut,"PhoBERT: Pre-trained language models for Vietnamese"),Ut.forEach(n),Qe=i(Re," by Dat Quoc Nguyen, Anh Tuan Nguyen."),Re.forEach(n),Ee=p(e),K=s(e,"P",{});var Xt=r(K);Ye=i(Xt,"The abstract from the paper is the following:"),Xt.forEach(n),Pe=p(e),Z=s(e,"P",{});var Ht=r(Z);he=s(Ht,"EM",{});var Gt=r(he);Ke=i(Gt,`We present PhoBERT with two versions, PhoBERT-base and PhoBERT-large, the first public large-scale monolingual language models pre-trained for Vietnamese. Experimental results show that PhoBERT consistently outperforms the recent best pre-trained multilingual model XLM-R (Conneau et al., 2020) and improves the state-of-the-art in multiple Vietnamese-specific NLP tasks including Part-of-speech tagging, Dependency parsing, Named-entity recognition and Natural language inference.`),Gt.forEach(n),Ht.forEach(n),$e=p(e),ee=s(e,"P",{});var Jt=r(ee);Ze=i(Jt,"Example of use:"),Jt.forEach(n),qe=p(e),T(C.$$.fragment,e),ze=p(e),k=s(e,"P",{});var se=r(k);et=i(se,"This model was contributed by "),M=s(se,"A",{href:!0,rel:!0});var Qt=r(M);tt=i(Qt,"dqnguyen"),Qt.forEach(n),nt=i(se,". The original code can be found "),O=s(se,"A",{href:!0,rel:!0});var Yt=r(O);ot=i(Yt,"here"),Yt.forEach(n),st=i(se,"."),se.forEach(n),xe=p(e),q=s(e,"H2",{class:!0});var je=r(q);A=s(je,"A",{id:!0,class:!0,href:!0});var Kt=r(A);de=s(Kt,"SPAN",{});var Zt=r(de);T(S.$$.fragment,Zt),Zt.forEach(n),Kt.forEach(n),rt=p(je),me=s(je,"SPAN",{});var en=r(me);at=i(en,"PhobertTokenizer"),en.forEach(n),je.forEach(n),Ae=p(e),d=s(e,"DIV",{class:!0});var f=r(d);T(V.$$.fragment,f),it=p(f),fe=s(f,"P",{});var tn=r(fe);lt=i(tn,"Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding."),tn.forEach(n),pt=p(f),W=s(f,"P",{});var Ne=r(W);ct=i(Ne,"This tokenizer inherits from "),te=s(Ne,"A",{href:!0});var nn=r(te);ht=i(nn,"PreTrainedTokenizer"),nn.forEach(n),dt=i(Ne,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ne.forEach(n),mt=p(f),L=s(f,"DIV",{class:!0});var Ie=r(L);T(F.$$.fragment,Ie),ft=p(Ie),ue=s(Ie,"P",{});var on=r(ue);ut=i(on,"Loads a pre-existing dictionary from a text file and adds its symbols to this instance."),on.forEach(n),Ie.forEach(n),gt=p(f),b=s(f,"DIV",{class:!0});var re=r(b);T(U.$$.fragment,re),_t=p(re),ge=s(re,"P",{});var sn=r(ge);kt=i(sn,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A PhoBERT sequence has the following format:`),sn.forEach(n),bt=p(re),X=s(re,"UL",{});var Ce=r(X);ne=s(Ce,"LI",{});var Nt=r(ne);vt=i(Nt,"single sequence: "),_e=s(Nt,"CODE",{});var rn=r(_e);Tt=i(rn,"<s> X </s>"),rn.forEach(n),Nt.forEach(n),wt=p(Ce),oe=s(Ce,"LI",{});var It=r(oe);yt=i(It,"pair of sequences: "),ke=s(It,"CODE",{});var an=r(ke);Et=i(an,"<s> A </s></s> B </s>"),an.forEach(n),It.forEach(n),Ce.forEach(n),re.forEach(n),Pt=p(f),D=s(f,"DIV",{class:!0});var Me=r(D);T(H.$$.fragment,Me),$t=p(Me),be=s(Me,"P",{});var ln=r(be);qt=i(ln,"Converts a sequence of tokens (string) in a single string."),ln.forEach(n),Me.forEach(n),zt=p(f),B=s(f,"DIV",{class:!0});var Oe=r(B);T(G.$$.fragment,Oe),xt=p(Oe),ve=s(Oe,"P",{});var pn=r(ve);At=i(pn,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not make use of token type ids, therefore a list of zeros is returned.`),pn.forEach(n),Oe.forEach(n),Lt=p(f),R=s(f,"DIV",{class:!0});var Se=r(R);T(J.$$.fragment,Se),Dt=p(Se),Q=s(Se,"P",{});var Ve=r(Q);Bt=i(Ve,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Te=s(Ve,"CODE",{});var cn=r(Te);Rt=i(cn,"prepare_for_model"),cn.forEach(n),jt=i(Ve," method."),Ve.forEach(n),Se.forEach(n),f.forEach(n),this.h()},h(){c(_,"name","hf:doc:metadata"),c(_,"content",JSON.stringify(kn)),c(g,"id","phobert"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#phobert"),c(u,"class","relative group"),c(z,"id","overview"),c(z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(z,"href","#overview"),c($,"class","relative group"),c(I,"href","https://www.aclweb.org/anthology/2020.findings-emnlp.92.pdf"),c(I,"rel","nofollow"),c(M,"href","https://huggingface.co/dqnguyen"),c(M,"rel","nofollow"),c(O,"href","https://github.com/VinAIResearch/PhoBERT"),c(O,"rel","nofollow"),c(A,"id","transformers.PhobertTokenizer"),c(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(A,"href","#transformers.PhobertTokenizer"),c(q,"class","relative group"),c(te,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(L,"class","docstring"),c(b,"class","docstring"),c(D,"class","docstring"),c(B,"class","docstring"),c(R,"class","docstring"),c(d,"class","docstring")},m(e,h){t(document.head,_),m(e,Y,h),m(e,u,h),t(u,g),t(g,ie),w(j,ie,null),t(u,Fe),t(u,le),t(le,Ue),m(e,we,h),m(e,$,h),t($,z),t(z,pe),w(N,pe,null),t($,Xe),t($,ce),t(ce,He),m(e,ye,h),m(e,x,h),t(x,Ge),t(x,I),t(I,Je),t(x,Qe),m(e,Ee,h),m(e,K,h),t(K,Ye),m(e,Pe,h),m(e,Z,h),t(Z,he),t(he,Ke),m(e,$e,h),m(e,ee,h),t(ee,Ze),m(e,qe,h),w(C,e,h),m(e,ze,h),m(e,k,h),t(k,et),t(k,M),t(M,tt),t(k,nt),t(k,O),t(O,ot),t(k,st),m(e,xe,h),m(e,q,h),t(q,A),t(A,de),w(S,de,null),t(q,rt),t(q,me),t(me,at),m(e,Ae,h),m(e,d,h),w(V,d,null),t(d,it),t(d,fe),t(fe,lt),t(d,pt),t(d,W),t(W,ct),t(W,te),t(te,ht),t(W,dt),t(d,mt),t(d,L),w(F,L,null),t(L,ft),t(L,ue),t(ue,ut),t(d,gt),t(d,b),w(U,b,null),t(b,_t),t(b,ge),t(ge,kt),t(b,bt),t(b,X),t(X,ne),t(ne,vt),t(ne,_e),t(_e,Tt),t(X,wt),t(X,oe),t(oe,yt),t(oe,ke),t(ke,Et),t(d,Pt),t(d,D),w(H,D,null),t(D,$t),t(D,be),t(be,qt),t(d,zt),t(d,B),w(G,B,null),t(B,xt),t(B,ve),t(ve,At),t(d,Lt),t(d,R),w(J,R,null),t(R,Dt),t(R,Q),t(Q,Bt),t(Q,Te),t(Te,Rt),t(Q,jt),Le=!0},p:un,i(e){Le||(y(j.$$.fragment,e),y(N.$$.fragment,e),y(C.$$.fragment,e),y(S.$$.fragment,e),y(V.$$.fragment,e),y(F.$$.fragment,e),y(U.$$.fragment,e),y(H.$$.fragment,e),y(G.$$.fragment,e),y(J.$$.fragment,e),Le=!0)},o(e){E(j.$$.fragment,e),E(N.$$.fragment,e),E(C.$$.fragment,e),E(S.$$.fragment,e),E(V.$$.fragment,e),E(F.$$.fragment,e),E(U.$$.fragment,e),E(H.$$.fragment,e),E(G.$$.fragment,e),E(J.$$.fragment,e),Le=!1},d(e){n(_),e&&n(Y),e&&n(u),P(j),e&&n(we),e&&n($),P(N),e&&n(ye),e&&n(x),e&&n(Ee),e&&n(K),e&&n(Pe),e&&n(Z),e&&n($e),e&&n(ee),e&&n(qe),P(C,e),e&&n(ze),e&&n(k),e&&n(xe),e&&n(q),P(S),e&&n(Ae),e&&n(d),P(V),P(F),P(U),P(H),P(G),P(J)}}}const kn={local:"phobert",sections:[{local:"overview",title:"Overview"},{local:"transformers.PhobertTokenizer",title:"PhobertTokenizer"}],title:"PhoBERT"};function bn(We,_,Y){let{fw:u}=_;return We.$$set=g=>{"fw"in g&&Y(0,u=g.fw)},[u]}class Pn extends hn{constructor(_){super();dn(this,_,bn,_n,mn,{fw:0})}}export{Pn as default,kn as metadata};
9,969
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/speechencoderdecoder.mdx-5d5cff9d.js
import{S as ur,i as gr,s as _r,e as a,k as i,w as k,t,L as vr,c as s,d as o,m as l,a as d,x as T,h as r,b as c,J as e,g as _,y as D,q as $,o as M,B as x}from"../../chunks/vendor-b1433968.js";import{T as br}from"../../chunks/Tip-c3840994.js";import{D as Ve}from"../../chunks/Docstring-ff504c58.js";import{C as Tt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Dt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function wr(Be){let f,j,g,v,I;return{c(){f=a("p"),j=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a("code"),v=t("Module"),I=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){f=s(S,"P",{});var C=d(f);j=r(C,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(C,"CODE",{});var J=d(g);v=r(J,"Module"),J.forEach(o),I=r(C,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),C.forEach(o)},m(S,C){_(S,f,C),e(f,j),e(f,g),e(g,v),e(f,I)},d(S){S&&o(f)}}}function yr(Be){let f,j,g,v,I,S,C,J,Mo,ho,b,xo,ye,jo,Co,Oe,Po,qo,Ee,zo,Ao,Se,Lo,Fo,po,P,Io,Re,No,Wo,X,Vo,Bo,mo,q,Oo,ke,Ro,Ho,Te,Jo,Uo,fo,N,U,He,ee,Go,Je,Yo,uo,u,oe,Zo,G,De,Ko,Qo,$e,Xo,en,on,W,nn,Me,tn,rn,xe,an,sn,dn,Ue,cn,ln,ne,hn,Y,te,pn,re,mn,je,fn,un,gn,Z,ae,_n,V,vn,Ge,bn,wn,Ye,yn,En,go,B,K,Ze,se,Sn,Ke,kn,_o,h,de,Tn,O,Dn,Qe,$n,Mn,Xe,xn,jn,Cn,ce,Pn,ie,qn,zn,An,le,Ln,he,Fn,In,Nn,eo,Wn,Vn,pe,Bn,Ce,On,Rn,Hn,me,Jn,fe,Un,Gn,Yn,z,Pe,Zn,Kn,oo,Qn,Xn,no,et,ot,nt,w,ue,tt,R,rt,qe,at,st,to,dt,ct,it,Q,lt,ro,ht,pt,ge,mt,y,_e,ft,ao,ut,gt,H,_t,so,vt,bt,co,wt,yt,Et,io,St,kt,ve,vo;return S=new Dt({}),ee=new Dt({}),oe=new Ve({props:{name:"class transformers.SpeechEncoderDecoderConfig",anchor:"transformers.SpeechEncoderDecoderConfig",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py#L27",parametersDescription:[{anchor:"transformers.SpeechEncoderDecoderConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments. Notably:</p> <ul> <li><strong>encoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the encoder config.</li> <li><strong>decoder</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the decoder config.</li> </ul>`,name:"kwargs"}]}}),ne=new Tt({props:{code:`from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel # Initializing a Wav2Vec2 & BERT style configuration config_encoder = Wav2Vec2Config() config_decoder = BertConfig() config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) # Initializing a Wav2Vec2Bert model from a Wav2Vec2 & bert-base-uncased style configurations model = SpeechEncoderDecoderModel(config=config) # Accessing the model configuration config_encoder = model.config.encoder config_decoder = model.config.decoder # set decoder config to causal lm config_decoder.is_decoder = True config_decoder.add_cross_attention = True # Saving the model, including its configuration model.save_pretrained('my-model') # loading model and config from pretrained folder encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained('my-model') model = SpeechEncoderDecoderModel.from_pretrained('my-model', config=encoder_decoder_config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Wav2Vec2 &amp; BERT style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = Wav2Vec2Config() <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Wav2Vec2Bert model from a Wav2Vec2 &amp; bert-base-uncased style configurations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = model.config.encoder <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = model.config.decoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set decoder config to causal lm</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.add_cross_attention = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>, config=encoder_decoder_config)`}}),te=new Ve({props:{name:"from_encoder_decoder_configs",anchor:"transformers.SpeechEncoderDecoderConfig.from_encoder_decoder_configs",parameters:[{name:"encoder_config",val:": PretrainedConfig"},{name:"decoder_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py#L92",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig" >SpeechEncoderDecoderConfig</a></p> `}}),ae=new Ve({props:{name:"to_dict",anchor:"transformers.SpeechEncoderDecoderConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py#L109",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),se=new Dt({}),de=new Ve({props:{name:"class transformers.SpeechEncoderDecoderModel",anchor:"transformers.SpeechEncoderDecoderModel",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"decoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py#L170",parametersDescription:[{anchor:"transformers.SpeechEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig">SpeechEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ue=new Ve({props:{name:"forward",anchor:"transformers.SpeechEncoderDecoderModel.forward",parameters:[{name:"input_values",val:" = None"},{name:"input_features",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py#L416",parametersDescription:[{anchor:"transformers.SpeechEncoderDecoderModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.input_features",description:`<strong>input_features</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, feature_size)</code>, <em>optional</em>) &#x2014; Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a <code>.flac</code> or <code>.wav</code> audio file into an array of type <code>List[float]</code> or a <code>numpy.ndarray</code>, <em>e.g.</em> via the soundfile library (<code>pip install soundfile</code>). To prepare the array into <code>input_features</code>, the <a href="/docs/transformers/v4.15.0/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer">Speech2TextTokenizer</a> should be used for extracting the fbank features, padding and conversion into a tensor of type <code>torch.FloatTensor</code>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a>`,name:"input_features"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For training, <code>decoder_input_ids</code> are automatically created by the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.`,name:"decoder_input_ids"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"decoder_inputs_embeds"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SpeechEncoderDecoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple. kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul>`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig" >SpeechEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Q=new br({props:{$$slots:{default:[wr]},$$scope:{ctx:Be}}}),ge=new Tt({props:{code:`from transformers import SpeechEncoderDecoderModel, Speech2Text2Processor from datasets import load_dataset import torch processor = Speech2Text2Processor.from_pretrained('facebook/s2t-wav2vec2-large-en-de') model = SpeechEncoderDecoderModel.from_pretrained('facebook/s2t-wav2vec2-large-en-de') ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]) outputs = model(input_values=input_values, decoder_input_ids=decoder_input_ids) # inference (generation) generated = model.generate(input_values) translation = processor.batch_decode(generated),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SpeechEncoderDecoderModel, Speech2Text2Processor <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Speech2Text2Processor.from_pretrained(<span class="hljs-string">&#x27;facebook/s2t-wav2vec2-large-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&#x27;facebook/s2t-wav2vec2-large-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_values=input_values, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference (generation)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(input_values) <span class="hljs-meta">&gt;&gt;&gt; </span>translation = processor.batch_decode(generated)`}}),_e=new Ve({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": str = None"},{name:"decoder_pretrained_model_name_or_path",val:": str = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py#L269",parametersDescription:[{anchor:"transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (:obj: <em>str</em>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),ve=new Tt({props:{code:`from transformers import SpeechEncoderDecoderModel # initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained('facebook/wav2vec2-base-960h', 'bert-base-uncased') # saving model after fine-tuning model.save_pretrained("./wav2vec2bert") # load fine-tuned model model = SpeechEncoderDecoderModel.from_pretrained("./wav2vec2bert"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SpeechEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(<span class="hljs-string">&#x27;facebook/wav2vec2-base-960h&#x27;</span>, <span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./wav2vec2bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./wav2vec2bert&quot;</span>)`}}),{c(){f=a("meta"),j=i(),g=a("h1"),v=a("a"),I=a("span"),k(S.$$.fragment),C=i(),J=a("span"),Mo=t("Speech Encoder Decoder Models"),ho=i(),b=a("p"),xo=t("The "),ye=a("a"),jo=t("SpeechEncoderDecoderModel"),Co=t(` can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder (`),Oe=a("em"),Po=t("e.g."),qo=i(),Ee=a("a"),zo=t("Wav2Vec2"),Ao=t(", "),Se=a("a"),Lo=t("Hubert"),Fo=t(") and any pretrained autoregressive model as the decoder."),po=i(),P=a("p"),Io=t(`The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech recognition and speech translation has `),Re=a("em"),No=t("e.g."),Wo=t(" been shown in "),X=a("a"),Vo=t(`Large-Scale Self- and Semi-Supervised Learning for Speech Translation`),Bo=t(` by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.`),mo=i(),q=a("p"),Oo=t("An example of how to use a "),ke=a("a"),Ro=t("SpeechEncoderDecoderModel"),Ho=t(` for inference can be seen in `),Te=a("a"),Jo=t("Speech2Text2"),Uo=t("."),fo=i(),N=a("h2"),U=a("a"),He=a("span"),k(ee.$$.fragment),Go=i(),Je=a("span"),Yo=t("SpeechEncoderDecoderConfig"),uo=i(),u=a("div"),k(oe.$$.fragment),Zo=i(),G=a("p"),De=a("a"),Ko=t("SpeechEncoderDecoderConfig"),Qo=t(` is the configuration class to store the configuration of a `),$e=a("a"),Xo=t("SpeechEncoderDecoderModel"),en=t(`. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs.`),on=i(),W=a("p"),nn=t("Configuration objects inherit from "),Me=a("a"),tn=t("PretrainedConfig"),rn=t(` and can be used to control the model outputs. Read the documentation from `),xe=a("a"),an=t("PretrainedConfig"),sn=t(" for more information."),dn=i(),Ue=a("p"),cn=t("Examples:"),ln=i(),k(ne.$$.fragment),hn=i(),Y=a("div"),k(te.$$.fragment),pn=i(),re=a("p"),mn=t("Instantiate a "),je=a("a"),fn=t("SpeechEncoderDecoderConfig"),un=t(` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),gn=i(),Z=a("div"),k(ae.$$.fragment),_n=i(),V=a("p"),vn=t("Serializes this instance to a Python dictionary. Override the default "),Ge=a("em"),bn=t("to_dict()"),wn=t(" from "),Ye=a("em"),yn=t("PretrainedConfig"),En=t("."),go=i(),B=a("h2"),K=a("a"),Ze=a("span"),k(se.$$.fragment),Sn=i(),Ke=a("span"),kn=t("SpeechEncoderDecoderModel"),_o=i(),h=a("div"),k(de.$$.fragment),Tn=i(),O=a("p"),Dn=t(`This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),Qe=a("code"),$n=t("from_pretrained()"),Mn=t(` function and the decoder is loaded via `),Xe=a("code"),xn=t("from_pretrained()"),jn=t(` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),Cn=i(),ce=a("p"),Pn=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),ie=a("a"),qn=t("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),zn=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),An=i(),le=a("p"),Ln=t("Additionally, in "),he=a("a"),Fn=t("Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),In=t(` it is shown how leveraging large pretrained speech models for speech translation yields a significant performance improvement.`),Nn=i(),eo=a("p"),Wn=t(`After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Vn=i(),pe=a("p"),Bn=t("This model inherits from "),Ce=a("a"),On=t("PreTrainedModel"),Rn=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hn=i(),me=a("p"),Jn=t("This model is also a PyTorch "),fe=a("a"),Un=t("torch.nn.Module"),Gn=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yn=i(),z=a("p"),Pe=a("a"),Zn=t("SpeechEncoderDecoderModel"),Kn=t(` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth`),oo=a("em"),Qn=t("~transformers.AutoModel.from_pretrained"),Xn=t(` class method for the encoder and :meth`),no=a("em"),et=t("~transformers.AutoModelForCausalLM.from_pretrained"),ot=t(" class method for the decoder."),nt=i(),w=a("div"),k(ue.$$.fragment),tt=i(),R=a("p"),rt=t("The "),qe=a("a"),at=t("SpeechEncoderDecoderModel"),st=t(" forward method, overrides the "),to=a("code"),dt=t("__call__"),ct=t(" special method."),it=i(),k(Q.$$.fragment),lt=i(),ro=a("p"),ht=t("Examples:"),pt=i(),k(ge.$$.fragment),mt=i(),y=a("div"),k(_e.$$.fragment),ft=i(),ao=a("p"),ut=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),gt=i(),H=a("p"),_t=t("The model is set in evaluation mode by default using "),so=a("code"),vt=t("model.eval()"),bt=t(` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),co=a("code"),wt=t("model.train()"),yt=t("."),Et=i(),io=a("p"),St=t("Example:"),kt=i(),k(ve.$$.fragment),this.h()},l(n){const p=vr('[data-svelte="svelte-1phssyn"]',document.head);f=s(p,"META",{name:!0,content:!0}),p.forEach(o),j=l(n),g=s(n,"H1",{class:!0});var be=d(g);v=s(be,"A",{id:!0,class:!0,href:!0});var $t=d(v);I=s($t,"SPAN",{});var Mt=d(I);T(S.$$.fragment,Mt),Mt.forEach(o),$t.forEach(o),C=l(be),J=s(be,"SPAN",{});var xt=d(J);Mo=r(xt,"Speech Encoder Decoder Models"),xt.forEach(o),be.forEach(o),ho=l(n),b=s(n,"P",{});var A=d(b);xo=r(A,"The "),ye=s(A,"A",{href:!0});var jt=d(ye);jo=r(jt,"SpeechEncoderDecoderModel"),jt.forEach(o),Co=r(A,` can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder (`),Oe=s(A,"EM",{});var Ct=d(Oe);Po=r(Ct,"e.g."),Ct.forEach(o),qo=l(A),Ee=s(A,"A",{href:!0});var Pt=d(Ee);zo=r(Pt,"Wav2Vec2"),Pt.forEach(o),Ao=r(A,", "),Se=s(A,"A",{href:!0});var qt=d(Se);Lo=r(qt,"Hubert"),qt.forEach(o),Fo=r(A,") and any pretrained autoregressive model as the decoder."),A.forEach(o),po=l(n),P=s(n,"P",{});var ze=d(P);Io=r(ze,`The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech recognition and speech translation has `),Re=s(ze,"EM",{});var zt=d(Re);No=r(zt,"e.g."),zt.forEach(o),Wo=r(ze," been shown in "),X=s(ze,"A",{href:!0,rel:!0});var At=d(X);Vo=r(At,`Large-Scale Self- and Semi-Supervised Learning for Speech Translation`),At.forEach(o),Bo=r(ze,` by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.`),ze.forEach(o),mo=l(n),q=s(n,"P",{});var Ae=d(q);Oo=r(Ae,"An example of how to use a "),ke=s(Ae,"A",{href:!0});var Lt=d(ke);Ro=r(Lt,"SpeechEncoderDecoderModel"),Lt.forEach(o),Ho=r(Ae,` for inference can be seen in `),Te=s(Ae,"A",{href:!0});var Ft=d(Te);Jo=r(Ft,"Speech2Text2"),Ft.forEach(o),Uo=r(Ae,"."),Ae.forEach(o),fo=l(n),N=s(n,"H2",{class:!0});var bo=d(N);U=s(bo,"A",{id:!0,class:!0,href:!0});var It=d(U);He=s(It,"SPAN",{});var Nt=d(He);T(ee.$$.fragment,Nt),Nt.forEach(o),It.forEach(o),Go=l(bo),Je=s(bo,"SPAN",{});var Wt=d(Je);Yo=r(Wt,"SpeechEncoderDecoderConfig"),Wt.forEach(o),bo.forEach(o),uo=l(n),u=s(n,"DIV",{class:!0});var E=d(u);T(oe.$$.fragment,E),Zo=l(E),G=s(E,"P",{});var lo=d(G);De=s(lo,"A",{href:!0});var Vt=d(De);Ko=r(Vt,"SpeechEncoderDecoderConfig"),Vt.forEach(o),Qo=r(lo,` is the configuration class to store the configuration of a `),$e=s(lo,"A",{href:!0});var Bt=d($e);Xo=r(Bt,"SpeechEncoderDecoderModel"),Bt.forEach(o),en=r(lo,`. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs.`),lo.forEach(o),on=l(E),W=s(E,"P",{});var Le=d(W);nn=r(Le,"Configuration objects inherit from "),Me=s(Le,"A",{href:!0});var Ot=d(Me);tn=r(Ot,"PretrainedConfig"),Ot.forEach(o),rn=r(Le,` and can be used to control the model outputs. Read the documentation from `),xe=s(Le,"A",{href:!0});var Rt=d(xe);an=r(Rt,"PretrainedConfig"),Rt.forEach(o),sn=r(Le," for more information."),Le.forEach(o),dn=l(E),Ue=s(E,"P",{});var Ht=d(Ue);cn=r(Ht,"Examples:"),Ht.forEach(o),ln=l(E),T(ne.$$.fragment,E),hn=l(E),Y=s(E,"DIV",{class:!0});var wo=d(Y);T(te.$$.fragment,wo),pn=l(wo),re=s(wo,"P",{});var yo=d(re);mn=r(yo,"Instantiate a "),je=s(yo,"A",{href:!0});var Jt=d(je);fn=r(Jt,"SpeechEncoderDecoderConfig"),Jt.forEach(o),un=r(yo,` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),yo.forEach(o),wo.forEach(o),gn=l(E),Z=s(E,"DIV",{class:!0});var Eo=d(Z);T(ae.$$.fragment,Eo),_n=l(Eo),V=s(Eo,"P",{});var Fe=d(V);vn=r(Fe,"Serializes this instance to a Python dictionary. Override the default "),Ge=s(Fe,"EM",{});var Ut=d(Ge);bn=r(Ut,"to_dict()"),Ut.forEach(o),wn=r(Fe," from "),Ye=s(Fe,"EM",{});var Gt=d(Ye);yn=r(Gt,"PretrainedConfig"),Gt.forEach(o),En=r(Fe,"."),Fe.forEach(o),Eo.forEach(o),E.forEach(o),go=l(n),B=s(n,"H2",{class:!0});var So=d(B);K=s(So,"A",{id:!0,class:!0,href:!0});var Yt=d(K);Ze=s(Yt,"SPAN",{});var Zt=d(Ze);T(se.$$.fragment,Zt),Zt.forEach(o),Yt.forEach(o),Sn=l(So),Ke=s(So,"SPAN",{});var Kt=d(Ke);kn=r(Kt,"SpeechEncoderDecoderModel"),Kt.forEach(o),So.forEach(o),_o=l(n),h=s(n,"DIV",{class:!0});var m=d(h);T(de.$$.fragment,m),Tn=l(m),O=s(m,"P",{});var Ie=d(O);Dn=r(Ie,`This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),Qe=s(Ie,"CODE",{});var Qt=d(Qe);$n=r(Qt,"from_pretrained()"),Qt.forEach(o),Mn=r(Ie,` function and the decoder is loaded via `),Xe=s(Ie,"CODE",{});var Xt=d(Xe);xn=r(Xt,"from_pretrained()"),Xt.forEach(o),jn=r(Ie,` function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.`),Ie.forEach(o),Cn=l(m),ce=s(m,"P",{});var ko=d(ce);Pn=r(ko,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),ie=s(ko,"A",{href:!0,rel:!0});var er=d(ie);qn=r(er,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),er.forEach(o),zn=r(ko,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),ko.forEach(o),An=l(m),le=s(m,"P",{});var To=d(le);Ln=r(To,"Additionally, in "),he=s(To,"A",{href:!0,rel:!0});var or=d(he);Fn=r(or,"Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),or.forEach(o),In=r(To,` it is shown how leveraging large pretrained speech models for speech translation yields a significant performance improvement.`),To.forEach(o),Nn=l(m),eo=s(m,"P",{});var nr=d(eo);Wn=r(nr,`After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),nr.forEach(o),Vn=l(m),pe=s(m,"P",{});var Do=d(pe);Bn=r(Do,"This model inherits from "),Ce=s(Do,"A",{href:!0});var tr=d(Ce);On=r(tr,"PreTrainedModel"),tr.forEach(o),Rn=r(Do,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Do.forEach(o),Hn=l(m),me=s(m,"P",{});var $o=d(me);Jn=r($o,"This model is also a PyTorch "),fe=s($o,"A",{href:!0,rel:!0});var rr=d(fe);Un=r(rr,"torch.nn.Module"),rr.forEach(o),Gn=r($o,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$o.forEach(o),Yn=l(m),z=s(m,"P",{});var we=d(z);Pe=s(we,"A",{href:!0});var ar=d(Pe);Zn=r(ar,"SpeechEncoderDecoderModel"),ar.forEach(o),Kn=r(we,` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth`),oo=s(we,"EM",{});var sr=d(oo);Qn=r(sr,"~transformers.AutoModel.from_pretrained"),sr.forEach(o),Xn=r(we,` class method for the encoder and :meth`),no=s(we,"EM",{});var dr=d(no);et=r(dr,"~transformers.AutoModelForCausalLM.from_pretrained"),dr.forEach(o),ot=r(we," class method for the decoder."),we.forEach(o),nt=l(m),w=s(m,"DIV",{class:!0});var L=d(w);T(ue.$$.fragment,L),tt=l(L),R=s(L,"P",{});var Ne=d(R);rt=r(Ne,"The "),qe=s(Ne,"A",{href:!0});var cr=d(qe);at=r(cr,"SpeechEncoderDecoderModel"),cr.forEach(o),st=r(Ne," forward method, overrides the "),to=s(Ne,"CODE",{});var ir=d(to);dt=r(ir,"__call__"),ir.forEach(o),ct=r(Ne," special method."),Ne.forEach(o),it=l(L),T(Q.$$.fragment,L),lt=l(L),ro=s(L,"P",{});var lr=d(ro);ht=r(lr,"Examples:"),lr.forEach(o),pt=l(L),T(ge.$$.fragment,L),L.forEach(o),mt=l(m),y=s(m,"DIV",{class:!0});var F=d(y);T(_e.$$.fragment,F),ft=l(F),ao=s(F,"P",{});var hr=d(ao);ut=r(hr,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),hr.forEach(o),gt=l(F),H=s(F,"P",{});var We=d(H);_t=r(We,"The model is set in evaluation mode by default using "),so=s(We,"CODE",{});var pr=d(so);vt=r(pr,"model.eval()"),pr.forEach(o),bt=r(We,` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),co=s(We,"CODE",{});var mr=d(co);wt=r(mr,"model.train()"),mr.forEach(o),yt=r(We,"."),We.forEach(o),Et=l(F),io=s(F,"P",{});var fr=d(io);St=r(fr,"Example:"),fr.forEach(o),kt=l(F),T(ve.$$.fragment,F),F.forEach(o),m.forEach(o),this.h()},h(){c(f,"name","hf:doc:metadata"),c(f,"content",JSON.stringify(Er)),c(v,"id","speech-encoder-decoder-models"),c(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(v,"href","#speech-encoder-decoder-models"),c(g,"class","relative group"),c(ye,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),c(Ee,"href","/docs/transformers/v4.15.0/en/wav2vec2"),c(Se,"href","/docs/transformers/v4.15.0/en/hubert"),c(X,"href","https://arxiv.org/abs/2104.06678"),c(X,"rel","nofollow"),c(ke,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),c(Te,"href","/docs/transformers/v4.15.0/en/speech_to_text_2"),c(U,"id","transformers.SpeechEncoderDecoderConfig"),c(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(U,"href","#transformers.SpeechEncoderDecoderConfig"),c(N,"class","relative group"),c(De,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig"),c($e,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),c(Me,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(xe,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(je,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderConfig"),c(Y,"class","docstring"),c(Z,"class","docstring"),c(u,"class","docstring"),c(K,"id","transformers.SpeechEncoderDecoderModel"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#transformers.SpeechEncoderDecoderModel"),c(B,"class","relative group"),c(ie,"href","https://arxiv.org/abs/1907.12461"),c(ie,"rel","nofollow"),c(he,"href","https://arxiv.org/abs/2104.06678"),c(he,"rel","nofollow"),c(Ce,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(fe,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(fe,"rel","nofollow"),c(Pe,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),c(qe,"href","/docs/transformers/v4.15.0/en/model_doc/speechencoderdecoder#transformers.SpeechEncoderDecoderModel"),c(w,"class","docstring"),c(y,"class","docstring"),c(h,"class","docstring")},m(n,p){e(document.head,f),_(n,j,p),_(n,g,p),e(g,v),e(v,I),D(S,I,null),e(g,C),e(g,J),e(J,Mo),_(n,ho,p),_(n,b,p),e(b,xo),e(b,ye),e(ye,jo),e(b,Co),e(b,Oe),e(Oe,Po),e(b,qo),e(b,Ee),e(Ee,zo),e(b,Ao),e(b,Se),e(Se,Lo),e(b,Fo),_(n,po,p),_(n,P,p),e(P,Io),e(P,Re),e(Re,No),e(P,Wo),e(P,X),e(X,Vo),e(P,Bo),_(n,mo,p),_(n,q,p),e(q,Oo),e(q,ke),e(ke,Ro),e(q,Ho),e(q,Te),e(Te,Jo),e(q,Uo),_(n,fo,p),_(n,N,p),e(N,U),e(U,He),D(ee,He,null),e(N,Go),e(N,Je),e(Je,Yo),_(n,uo,p),_(n,u,p),D(oe,u,null),e(u,Zo),e(u,G),e(G,De),e(De,Ko),e(G,Qo),e(G,$e),e($e,Xo),e(G,en),e(u,on),e(u,W),e(W,nn),e(W,Me),e(Me,tn),e(W,rn),e(W,xe),e(xe,an),e(W,sn),e(u,dn),e(u,Ue),e(Ue,cn),e(u,ln),D(ne,u,null),e(u,hn),e(u,Y),D(te,Y,null),e(Y,pn),e(Y,re),e(re,mn),e(re,je),e(je,fn),e(re,un),e(u,gn),e(u,Z),D(ae,Z,null),e(Z,_n),e(Z,V),e(V,vn),e(V,Ge),e(Ge,bn),e(V,wn),e(V,Ye),e(Ye,yn),e(V,En),_(n,go,p),_(n,B,p),e(B,K),e(K,Ze),D(se,Ze,null),e(B,Sn),e(B,Ke),e(Ke,kn),_(n,_o,p),_(n,h,p),D(de,h,null),e(h,Tn),e(h,O),e(O,Dn),e(O,Qe),e(Qe,$n),e(O,Mn),e(O,Xe),e(Xe,xn),e(O,jn),e(h,Cn),e(h,ce),e(ce,Pn),e(ce,ie),e(ie,qn),e(ce,zn),e(h,An),e(h,le),e(le,Ln),e(le,he),e(he,Fn),e(le,In),e(h,Nn),e(h,eo),e(eo,Wn),e(h,Vn),e(h,pe),e(pe,Bn),e(pe,Ce),e(Ce,On),e(pe,Rn),e(h,Hn),e(h,me),e(me,Jn),e(me,fe),e(fe,Un),e(me,Gn),e(h,Yn),e(h,z),e(z,Pe),e(Pe,Zn),e(z,Kn),e(z,oo),e(oo,Qn),e(z,Xn),e(z,no),e(no,et),e(z,ot),e(h,nt),e(h,w),D(ue,w,null),e(w,tt),e(w,R),e(R,rt),e(R,qe),e(qe,at),e(R,st),e(R,to),e(to,dt),e(R,ct),e(w,it),D(Q,w,null),e(w,lt),e(w,ro),e(ro,ht),e(w,pt),D(ge,w,null),e(h,mt),e(h,y),D(_e,y,null),e(y,ft),e(y,ao),e(ao,ut),e(y,gt),e(y,H),e(H,_t),e(H,so),e(so,vt),e(H,bt),e(H,co),e(co,wt),e(H,yt),e(y,Et),e(y,io),e(io,St),e(y,kt),D(ve,y,null),vo=!0},p(n,[p]){const be={};p&2&&(be.$$scope={dirty:p,ctx:n}),Q.$set(be)},i(n){vo||($(S.$$.fragment,n),$(ee.$$.fragment,n),$(oe.$$.fragment,n),$(ne.$$.fragment,n),$(te.$$.fragment,n),$(ae.$$.fragment,n),$(se.$$.fragment,n),$(de.$$.fragment,n),$(ue.$$.fragment,n),$(Q.$$.fragment,n),$(ge.$$.fragment,n),$(_e.$$.fragment,n),$(ve.$$.fragment,n),vo=!0)},o(n){M(S.$$.fragment,n),M(ee.$$.fragment,n),M(oe.$$.fragment,n),M(ne.$$.fragment,n),M(te.$$.fragment,n),M(ae.$$.fragment,n),M(se.$$.fragment,n),M(de.$$.fragment,n),M(ue.$$.fragment,n),M(Q.$$.fragment,n),M(ge.$$.fragment,n),M(_e.$$.fragment,n),M(ve.$$.fragment,n),vo=!1},d(n){o(f),n&&o(j),n&&o(g),x(S),n&&o(ho),n&&o(b),n&&o(po),n&&o(P),n&&o(mo),n&&o(q),n&&o(fo),n&&o(N),x(ee),n&&o(uo),n&&o(u),x(oe),x(ne),x(te),x(ae),n&&o(go),n&&o(B),x(se),n&&o(_o),n&&o(h),x(de),x(ue),x(Q),x(ge),x(_e),x(ve)}}}const Er={local:"speech-encoder-decoder-models",sections:[{local:"transformers.SpeechEncoderDecoderConfig",title:"SpeechEncoderDecoderConfig"},{local:"transformers.SpeechEncoderDecoderModel",title:"SpeechEncoderDecoderModel"}],title:"Speech Encoder Decoder Models"};function Sr(Be,f,j){let{fw:g}=f;return Be.$$set=v=>{"fw"in v&&j(0,g=v.fw)},[g]}class jr extends ur{constructor(f){super();gr(this,f,Sr,yr,_r,{fw:0})}}export{jr as default,Er as metadata};
9,970
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/gpt2.mdx-f1491089.js
import{S as bb,i as kb,s as yb,e as n,k as l,w as v,t as a,L as wb,c as s,d as t,m as d,a as i,x as b,h as r,b as c,J as e,g as h,y as k,q as y,o as w,B as P}from"../../chunks/vendor-b1433968.js";import{T as Le}from"../../chunks/Tip-c3840994.js";import{D as H}from"../../chunks/Docstring-ff504c58.js";import{C as Me}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Pb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("When used with "),f=n("code"),T=a("is_split_into_words=True"),$=a(`, this tokenizer will add a space before each word (even the first one).`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"When used with "),f=s(g,"CODE",{});var x=i(f);T=r(x,"is_split_into_words=True"),x.forEach(t),$=r(g,`, this tokenizer will add a space before each word (even the first one).`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function $b(q){let p,M,f,T,$,_,g,x;return{c(){p=n("p"),M=a("When used with "),f=n("code"),T=a("is_split_into_words=True"),$=a(`, this tokenizer needs to be instantiated with `),_=n("code"),g=a("add_prefix_space=True"),x=a(".")},l(Q){p=s(Q,"P",{});var C=i(p);M=r(C,"When used with "),f=s(C,"CODE",{});var z=i(f);T=r(z,"is_split_into_words=True"),z.forEach(t),$=r(C,`, this tokenizer needs to be instantiated with `),_=s(C,"CODE",{});var R=i(_);g=r(R,"add_prefix_space=True"),R.forEach(t),x=r(C,"."),C.forEach(t)},m(Q,C){h(Q,p,C),e(p,M),e(p,f),e(f,T),e(p,$),e(p,_),e(_,g),e(p,x)},d(Q){Q&&t(p)}}}function Mb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Gb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function xb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function zb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Fb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function jb(q){let p,M,f,T,$,_,g,x,Q,C,z,R,S,Z,ue,A,me,ce,W,L,ee,te,F,E,se,V,pe,ae,O,fe,he,j,ge,B,U,re,J,_e,oe,N,ie,K,Te;return{c(){p=n("p"),M=a("TF 2.0 models accepts two formats as inputs:"),f=l(),T=n("ul"),$=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),x=n("li"),Q=a("having all inputs as a list, tuple or dict in the first positional arguments."),C=l(),z=n("p"),R=a("This second option is useful when using "),S=n("code"),Z=a("tf.keras.Model.fit"),ue=a(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),me=a("model(inputs)"),ce=a("."),W=l(),L=n("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),E=n("li"),se=a("a single Tensor with "),V=n("code"),pe=a("input_ids"),ae=a(" only and nothing else: "),O=n("code"),fe=a("model(inputs_ids)"),he=l(),j=n("li"),ge=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),U=a("model([input_ids, attention_mask])"),re=a(" or "),J=n("code"),_e=a("model([input_ids, attention_mask, token_type_ids])"),oe=l(),N=n("li"),ie=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=n("code"),Te=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var G=i(p);M=r(G,"TF 2.0 models accepts two formats as inputs:"),G.forEach(t),f=d(u),T=s(u,"UL",{});var ne=i(T);$=s(ne,"LI",{});var Fe=i($);_=r(Fe,"having all inputs as keyword arguments (like PyTorch models), or"),Fe.forEach(t),g=d(ne),x=s(ne,"LI",{});var Se=i(x);Q=r(Se,"having all inputs as a list, tuple or dict in the first positional arguments."),Se.forEach(t),ne.forEach(t),C=d(u),z=s(u,"P",{});var I=i(z);R=r(I,"This second option is useful when using "),S=s(I,"CODE",{});var je=i(S);Z=r(je,"tf.keras.Model.fit"),je.forEach(t),ue=r(I,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(I,"CODE",{});var Ae=i(A);me=r(Ae,"model(inputs)"),Ae.forEach(t),ce=r(I,"."),I.forEach(t),W=d(u),L=s(u,"P",{});var Oe=i(L);ee=r(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),te=d(u),F=s(u,"UL",{});var D=i(F);E=s(D,"LI",{});var X=i(E);se=r(X,"a single Tensor with "),V=s(X,"CODE",{});var We=i(V);pe=r(We,"input_ids"),We.forEach(t),ae=r(X," only and nothing else: "),O=s(X,"CODE",{});var Ee=i(O);fe=r(Ee,"model(inputs_ids)"),Ee.forEach(t),X.forEach(t),he=d(D),j=s(D,"LI",{});var Y=i(j);ge=r(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(Y,"CODE",{});var Be=i(B);U=r(Be,"model([input_ids, attention_mask])"),Be.forEach(t),re=r(Y," or "),J=s(Y,"CODE",{});var qe=i(J);_e=r(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(t),Y.forEach(t),oe=d(D),N=s(D,"LI",{});var ve=i(N);ie=r(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=s(ve,"CODE",{});var Ue=i(K);Te=r(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),ve.forEach(t),D.forEach(t)},m(u,G){h(u,p,G),e(p,M),h(u,f,G),h(u,T,G),e(T,$),e($,_),e(T,g),e(T,x),e(x,Q),h(u,C,G),h(u,z,G),e(z,R),e(z,S),e(S,Z),e(z,ue),e(z,A),e(A,me),e(z,ce),h(u,W,G),h(u,L,G),e(L,ee),h(u,te,G),h(u,F,G),e(F,E),e(E,se),e(E,V),e(V,pe),e(E,ae),e(E,O),e(O,fe),e(F,he),e(F,j),e(j,ge),e(j,B),e(B,U),e(j,re),e(j,J),e(J,_e),e(F,oe),e(F,N),e(N,ie),e(N,K),e(K,Te)},d(u){u&&t(p),u&&t(f),u&&t(T),u&&t(C),u&&t(z),u&&t(W),u&&t(L),u&&t(te),u&&t(F)}}}function Eb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function qb(q){let p,M,f,T,$,_,g,x,Q,C,z,R,S,Z,ue,A,me,ce,W,L,ee,te,F,E,se,V,pe,ae,O,fe,he,j,ge,B,U,re,J,_e,oe,N,ie,K,Te;return{c(){p=n("p"),M=a("TF 2.0 models accepts two formats as inputs:"),f=l(),T=n("ul"),$=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),x=n("li"),Q=a("having all inputs as a list, tuple or dict in the first positional arguments."),C=l(),z=n("p"),R=a("This second option is useful when using "),S=n("code"),Z=a("tf.keras.Model.fit"),ue=a(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),me=a("model(inputs)"),ce=a("."),W=l(),L=n("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),E=n("li"),se=a("a single Tensor with "),V=n("code"),pe=a("input_ids"),ae=a(" only and nothing else: "),O=n("code"),fe=a("model(inputs_ids)"),he=l(),j=n("li"),ge=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),U=a("model([input_ids, attention_mask])"),re=a(" or "),J=n("code"),_e=a("model([input_ids, attention_mask, token_type_ids])"),oe=l(),N=n("li"),ie=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=n("code"),Te=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var G=i(p);M=r(G,"TF 2.0 models accepts two formats as inputs:"),G.forEach(t),f=d(u),T=s(u,"UL",{});var ne=i(T);$=s(ne,"LI",{});var Fe=i($);_=r(Fe,"having all inputs as keyword arguments (like PyTorch models), or"),Fe.forEach(t),g=d(ne),x=s(ne,"LI",{});var Se=i(x);Q=r(Se,"having all inputs as a list, tuple or dict in the first positional arguments."),Se.forEach(t),ne.forEach(t),C=d(u),z=s(u,"P",{});var I=i(z);R=r(I,"This second option is useful when using "),S=s(I,"CODE",{});var je=i(S);Z=r(je,"tf.keras.Model.fit"),je.forEach(t),ue=r(I,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(I,"CODE",{});var Ae=i(A);me=r(Ae,"model(inputs)"),Ae.forEach(t),ce=r(I,"."),I.forEach(t),W=d(u),L=s(u,"P",{});var Oe=i(L);ee=r(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),te=d(u),F=s(u,"UL",{});var D=i(F);E=s(D,"LI",{});var X=i(E);se=r(X,"a single Tensor with "),V=s(X,"CODE",{});var We=i(V);pe=r(We,"input_ids"),We.forEach(t),ae=r(X," only and nothing else: "),O=s(X,"CODE",{});var Ee=i(O);fe=r(Ee,"model(inputs_ids)"),Ee.forEach(t),X.forEach(t),he=d(D),j=s(D,"LI",{});var Y=i(j);ge=r(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(Y,"CODE",{});var Be=i(B);U=r(Be,"model([input_ids, attention_mask])"),Be.forEach(t),re=r(Y," or "),J=s(Y,"CODE",{});var qe=i(J);_e=r(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(t),Y.forEach(t),oe=d(D),N=s(D,"LI",{});var ve=i(N);ie=r(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=s(ve,"CODE",{});var Ue=i(K);Te=r(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),ve.forEach(t),D.forEach(t)},m(u,G){h(u,p,G),e(p,M),h(u,f,G),h(u,T,G),e(T,$),e($,_),e(T,g),e(T,x),e(x,Q),h(u,C,G),h(u,z,G),e(z,R),e(z,S),e(S,Z),e(z,ue),e(z,A),e(A,me),e(z,ce),h(u,W,G),h(u,L,G),e(L,ee),h(u,te,G),h(u,F,G),e(F,E),e(E,se),e(E,V),e(V,pe),e(E,ae),e(E,O),e(O,fe),e(F,he),e(F,j),e(j,ge),e(j,B),e(B,U),e(j,re),e(j,J),e(J,_e),e(F,oe),e(F,N),e(N,ie),e(N,K),e(K,Te)},d(u){u&&t(p),u&&t(f),u&&t(T),u&&t(C),u&&t(z),u&&t(W),u&&t(L),u&&t(te),u&&t(F)}}}function Cb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Db(q){let p,M,f,T,$,_,g,x,Q,C,z,R,S,Z,ue,A,me,ce,W,L,ee,te,F,E,se,V,pe,ae,O,fe,he,j,ge,B,U,re,J,_e,oe,N,ie,K,Te;return{c(){p=n("p"),M=a("TF 2.0 models accepts two formats as inputs:"),f=l(),T=n("ul"),$=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),x=n("li"),Q=a("having all inputs as a list, tuple or dict in the first positional arguments."),C=l(),z=n("p"),R=a("This second option is useful when using "),S=n("code"),Z=a("tf.keras.Model.fit"),ue=a(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),me=a("model(inputs)"),ce=a("."),W=l(),L=n("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),E=n("li"),se=a("a single Tensor with "),V=n("code"),pe=a("input_ids"),ae=a(" only and nothing else: "),O=n("code"),fe=a("model(inputs_ids)"),he=l(),j=n("li"),ge=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),U=a("model([input_ids, attention_mask])"),re=a(" or "),J=n("code"),_e=a("model([input_ids, attention_mask, token_type_ids])"),oe=l(),N=n("li"),ie=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=n("code"),Te=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var G=i(p);M=r(G,"TF 2.0 models accepts two formats as inputs:"),G.forEach(t),f=d(u),T=s(u,"UL",{});var ne=i(T);$=s(ne,"LI",{});var Fe=i($);_=r(Fe,"having all inputs as keyword arguments (like PyTorch models), or"),Fe.forEach(t),g=d(ne),x=s(ne,"LI",{});var Se=i(x);Q=r(Se,"having all inputs as a list, tuple or dict in the first positional arguments."),Se.forEach(t),ne.forEach(t),C=d(u),z=s(u,"P",{});var I=i(z);R=r(I,"This second option is useful when using "),S=s(I,"CODE",{});var je=i(S);Z=r(je,"tf.keras.Model.fit"),je.forEach(t),ue=r(I,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(I,"CODE",{});var Ae=i(A);me=r(Ae,"model(inputs)"),Ae.forEach(t),ce=r(I,"."),I.forEach(t),W=d(u),L=s(u,"P",{});var Oe=i(L);ee=r(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),te=d(u),F=s(u,"UL",{});var D=i(F);E=s(D,"LI",{});var X=i(E);se=r(X,"a single Tensor with "),V=s(X,"CODE",{});var We=i(V);pe=r(We,"input_ids"),We.forEach(t),ae=r(X," only and nothing else: "),O=s(X,"CODE",{});var Ee=i(O);fe=r(Ee,"model(inputs_ids)"),Ee.forEach(t),X.forEach(t),he=d(D),j=s(D,"LI",{});var Y=i(j);ge=r(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(Y,"CODE",{});var Be=i(B);U=r(Be,"model([input_ids, attention_mask])"),Be.forEach(t),re=r(Y," or "),J=s(Y,"CODE",{});var qe=i(J);_e=r(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(t),Y.forEach(t),oe=d(D),N=s(D,"LI",{});var ve=i(N);ie=r(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=s(ve,"CODE",{});var Ue=i(K);Te=r(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),ve.forEach(t),D.forEach(t)},m(u,G){h(u,p,G),e(p,M),h(u,f,G),h(u,T,G),e(T,$),e($,_),e(T,g),e(T,x),e(x,Q),h(u,C,G),h(u,z,G),e(z,R),e(z,S),e(S,Z),e(z,ue),e(z,A),e(A,me),e(z,ce),h(u,W,G),h(u,L,G),e(L,ee),h(u,te,G),h(u,F,G),e(F,E),e(E,se),e(E,V),e(V,pe),e(E,ae),e(E,O),e(O,fe),e(F,he),e(F,j),e(j,ge),e(j,B),e(B,U),e(j,re),e(j,J),e(J,_e),e(F,oe),e(F,N),e(N,ie),e(N,K),e(K,Te)},d(u){u&&t(p),u&&t(f),u&&t(T),u&&t(C),u&&t(z),u&&t(W),u&&t(L),u&&t(te),u&&t(F)}}}function Hb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Lb(q){let p,M,f,T,$,_,g,x,Q,C,z,R,S,Z,ue,A,me,ce,W,L,ee,te,F,E,se,V,pe,ae,O,fe,he,j,ge,B,U,re,J,_e,oe,N,ie,K,Te;return{c(){p=n("p"),M=a("TF 2.0 models accepts two formats as inputs:"),f=l(),T=n("ul"),$=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),g=l(),x=n("li"),Q=a("having all inputs as a list, tuple or dict in the first positional arguments."),C=l(),z=n("p"),R=a("This second option is useful when using "),S=n("code"),Z=a("tf.keras.Model.fit"),ue=a(` method which currently requires having all the tensors in the first argument of the model call function: `),A=n("code"),me=a("model(inputs)"),ce=a("."),W=l(),L=n("p"),ee=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),te=l(),F=n("ul"),E=n("li"),se=a("a single Tensor with "),V=n("code"),pe=a("input_ids"),ae=a(" only and nothing else: "),O=n("code"),fe=a("model(inputs_ids)"),he=l(),j=n("li"),ge=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=n("code"),U=a("model([input_ids, attention_mask])"),re=a(" or "),J=n("code"),_e=a("model([input_ids, attention_mask, token_type_ids])"),oe=l(),N=n("li"),ie=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=n("code"),Te=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var G=i(p);M=r(G,"TF 2.0 models accepts two formats as inputs:"),G.forEach(t),f=d(u),T=s(u,"UL",{});var ne=i(T);$=s(ne,"LI",{});var Fe=i($);_=r(Fe,"having all inputs as keyword arguments (like PyTorch models), or"),Fe.forEach(t),g=d(ne),x=s(ne,"LI",{});var Se=i(x);Q=r(Se,"having all inputs as a list, tuple or dict in the first positional arguments."),Se.forEach(t),ne.forEach(t),C=d(u),z=s(u,"P",{});var I=i(z);R=r(I,"This second option is useful when using "),S=s(I,"CODE",{});var je=i(S);Z=r(je,"tf.keras.Model.fit"),je.forEach(t),ue=r(I,` method which currently requires having all the tensors in the first argument of the model call function: `),A=s(I,"CODE",{});var Ae=i(A);me=r(Ae,"model(inputs)"),Ae.forEach(t),ce=r(I,"."),I.forEach(t),W=d(u),L=s(u,"P",{});var Oe=i(L);ee=r(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),te=d(u),F=s(u,"UL",{});var D=i(F);E=s(D,"LI",{});var X=i(E);se=r(X,"a single Tensor with "),V=s(X,"CODE",{});var We=i(V);pe=r(We,"input_ids"),We.forEach(t),ae=r(X," only and nothing else: "),O=s(X,"CODE",{});var Ee=i(O);fe=r(Ee,"model(inputs_ids)"),Ee.forEach(t),X.forEach(t),he=d(D),j=s(D,"LI",{});var Y=i(j);ge=r(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(Y,"CODE",{});var Be=i(B);U=r(Be,"model([input_ids, attention_mask])"),Be.forEach(t),re=r(Y," or "),J=s(Y,"CODE",{});var qe=i(J);_e=r(qe,"model([input_ids, attention_mask, token_type_ids])"),qe.forEach(t),Y.forEach(t),oe=d(D),N=s(D,"LI",{});var ve=i(N);ie=r(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),K=s(ve,"CODE",{});var Ue=i(K);Te=r(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),ve.forEach(t),D.forEach(t)},m(u,G){h(u,p,G),e(p,M),h(u,f,G),h(u,T,G),e(T,$),e($,_),e(T,g),e(T,x),e(x,Q),h(u,C,G),h(u,z,G),e(z,R),e(z,S),e(S,Z),e(z,ue),e(z,A),e(A,me),e(z,ce),h(u,W,G),h(u,L,G),e(L,ee),h(u,te,G),h(u,F,G),e(F,E),e(E,se),e(E,V),e(V,pe),e(E,ae),e(E,O),e(O,fe),e(F,he),e(F,j),e(j,ge),e(j,B),e(B,U),e(j,re),e(j,J),e(J,_e),e(F,oe),e(F,N),e(N,ie),e(N,K),e(K,Te)},d(u){u&&t(p),u&&t(f),u&&t(T),u&&t(C),u&&t(z),u&&t(W),u&&t(L),u&&t(te),u&&t(F)}}}function Nb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Ib(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Sb(q){let p,M,f,T,$;return{c(){p=n("p"),M=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var g=i(p);M=r(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(g,"CODE",{});var x=i(f);T=r(x,"Module"),x.forEach(t),$=r(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){h(_,p,g),e(p,M),e(p,f),e(f,T),e(p,$)},d(_){_&&t(p)}}}function Ab(q){let p,M,f,T,$,_,g,x,Q,C,z,R,S,Z,ue,A,me,ce,W,L,ee,te,F,E,se,V,pe,ae,O,fe,he,j,ge,B,U,re,J,_e,oe,N,ie,K,Te,u,G,ne,Fe,Se,I,je,Ae,Oe,D,X,We,Ee,Y,Be,qe,ve,Ue,wa,sc,ac,mr,rc,ic,Pa,lc,dc,cc,kt,pc,fr,hc,uc,gr,mc,fc,dn,gc,_c,Fl,Bt,cn,Tc,vc,_r,bc,kc,jl,Mt,yc,pn,wc,Pc,hn,$c,Mc,El,Ut,wo,Tr,un,Gc,vr,xc,ql,Re,mn,zc,yt,Fc,$a,jc,Ec,Ma,qc,Cc,fn,Dc,Hc,Lc,Rt,Nc,Ga,Ic,Sc,xa,Ac,Oc,Wc,br,Bc,Uc,gn,Cl,Vt,Po,kr,_n,Rc,yr,Vc,Dl,le,Tn,Jc,wr,Kc,Xc,Pr,Yc,Qc,vn,Zc,bn,ep,$r,tp,op,np,$o,sp,kn,ap,za,rp,ip,lp,Mr,Hl,Jt,Mo,Gr,yn,dp,xr,cp,Ll,be,wn,pp,Pn,hp,zr,up,mp,fp,Fr,gp,_p,$n,Tp,Mn,vp,jr,bp,kp,yp,Go,wp,Gn,Pp,Fa,$p,Mp,Nl,Kt,xo,Er,xn,Gp,qr,xp,Il,Xt,zn,zp,Cr,Fp,Sl,Yt,Fn,jp,Dr,Ep,Al,Qt,zo,Hr,jn,qp,Lr,Cp,Ol,ke,En,Dp,Nr,Hp,Lp,qn,Np,ja,Ip,Sp,Ap,Cn,Op,Dn,Wp,Bp,Up,Ke,Hn,Rp,Zt,Vp,Ea,Jp,Kp,Ir,Xp,Yp,Qp,Fo,Zp,Sr,eh,th,Ln,oh,Xe,Nn,nh,Ar,sh,ah,Or,rh,ih,Wr,lh,dh,In,ch,gt,Sn,ph,Br,hh,uh,Ur,mh,fh,An,Wl,eo,jo,Rr,On,gh,Vr,_h,Bl,ye,Wn,Th,Jr,vh,bh,Bn,kh,qa,yh,wh,Ph,Un,$h,Rn,Mh,Gh,xh,Ye,Vn,zh,to,Fh,Ca,jh,Eh,Kr,qh,Ch,Dh,Eo,Hh,Xr,Lh,Nh,Jn,Ih,Qe,Kn,Sh,Yr,Ah,Oh,Qr,Wh,Bh,Zr,Uh,Rh,Xn,Vh,_t,Yn,Jh,ei,Kh,Xh,ti,Yh,Qh,Qn,Ul,oo,qo,oi,Zn,Zh,ni,eu,Rl,Ve,es,tu,si,ou,nu,ts,su,Da,au,ru,iu,os,lu,ns,du,cu,pu,Ze,ss,hu,no,uu,Ha,mu,fu,ai,gu,_u,Tu,Co,vu,ri,bu,ku,as,Vl,so,Do,ii,rs,yu,li,wu,Jl,we,is,Pu,di,$u,Mu,La,Na,Gu,xu,zu,mt,Fu,ci,ju,Eu,pi,qu,Cu,hi,Du,Hu,ui,Lu,Nu,Iu,ls,Su,Ia,Au,Ou,Wu,ds,Bu,cs,Uu,Ru,Vu,Ge,ps,Ju,ao,Ku,Sa,Xu,Yu,mi,Qu,Zu,em,Ho,tm,fi,om,nm,hs,sm,gi,am,rm,us,Kl,ro,Lo,_i,ms,im,Ti,lm,Xl,Je,fs,dm,vi,cm,pm,gs,hm,Aa,um,mm,fm,_s,gm,Ts,_m,Tm,vm,et,vs,bm,io,km,Oa,ym,wm,bi,Pm,$m,Mm,No,Gm,ki,xm,zm,bs,Yl,lo,Io,yi,ks,Fm,wi,jm,Ql,Ce,ys,Em,Pi,qm,Cm,ws,Dm,Wa,Hm,Lm,Nm,Ps,Im,$s,Sm,Am,Om,So,Wm,tt,Ms,Bm,co,Um,Ba,Rm,Vm,$i,Jm,Km,Xm,Ao,Ym,Mi,Qm,Zm,Gs,Zl,po,Oo,Gi,xs,ef,xi,tf,ed,De,zs,of,zi,nf,sf,Fs,af,Ua,rf,lf,df,js,cf,Es,pf,hf,uf,Wo,mf,ot,qs,ff,ho,gf,Ra,_f,Tf,Fi,vf,bf,kf,Bo,yf,ji,wf,Pf,Cs,td,uo,Uo,Ei,Ds,$f,qi,Mf,od,He,Hs,Gf,Ci,xf,zf,Ls,Ff,Va,jf,Ef,qf,Ns,Cf,Is,Df,Hf,Lf,Ro,Nf,nt,Ss,If,mo,Sf,Ja,Af,Of,Di,Wf,Bf,Uf,Vo,Rf,Hi,Vf,Jf,As,nd,fo,Jo,Li,Os,Kf,Ni,Xf,sd,de,Ws,Yf,Ii,Qf,Zf,Ka,Xa,eg,tg,og,ft,ng,Si,sg,ag,Ai,rg,ig,Oi,lg,dg,Wi,cg,pg,hg,Bs,ug,Ya,mg,fg,gg,Us,_g,Rs,Tg,vg,bg,Ko,kg,st,Vs,yg,go,wg,Qa,Pg,$g,Bi,Mg,Gg,xg,Xo,zg,Ui,Fg,jg,Js,ad,_o,Yo,Ri,Ks,Eg,Vi,qg,rd,To,Xs,Cg,Ji,Dg,id,vo,Qo,Ki,Ys,Hg,Xi,Lg,ld,Pe,Qs,Ng,Yi,Ig,Sg,Zs,Ag,Za,Og,Wg,Bg,ea,Ug,ta,Rg,Vg,Jg,Qi,Kg,Xg,wt,Zi,oa,Yg,Qg,el,na,Zg,e_,tl,sa,t_,o_,ol,aa,n_,s_,at,ra,a_,bo,r_,nl,i_,l_,sl,d_,c_,p_,Zo,h_,al,u_,m_,ia,dd,ko,en,rl,la,f_,il,g_,cd,$e,da,__,ll,T_,v_,ca,b_,er,k_,y_,w_,pa,P_,ha,$_,M_,G_,dl,x_,z_,Pt,cl,ua,F_,j_,pl,ma,E_,q_,hl,fa,C_,D_,ul,ga,H_,L_,rt,_a,N_,yo,I_,ml,S_,A_,fl,O_,W_,B_,tn,U_,gl,R_,V_,Ta,pd;return _=new ze({}),Z=new ze({}),un=new ze({}),mn=new H({props:{name:"class transformers.GPT2Config",anchor:"transformers.GPT2Config",parameters:[{name:"vocab_size",val:" = 50257"},{name:"n_positions",val:" = 1024"},{name:"n_embd",val:" = 768"},{name:"n_layer",val:" = 12"},{name:"n_head",val:" = 12"},{name:"n_inner",val:" = None"},{name:"activation_function",val:" = 'gelu_new'"},{name:"resid_pdrop",val:" = 0.1"},{name:"embd_pdrop",val:" = 0.1"},{name:"attn_pdrop",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-05"},{name:"initializer_range",val:" = 0.02"},{name:"summary_type",val:" = 'cls_index'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = None"},{name:"summary_proj_to_labels",val:" = True"},{name:"summary_first_dropout",val:" = 0.1"},{name:"scale_attn_weights",val:" = True"},{name:"use_cache",val:" = True"},{name:"bos_token_id",val:" = 50256"},{name:"eos_token_id",val:" = 50256"},{name:"scale_attn_by_inverse_layer_idx",val:" = False"},{name:"reorder_and_upcast_attn",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/configuration_gpt2.py#L38",parametersDescription:[{anchor:"transformers.GPT2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50257) &#x2014; Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Model">GPT2Model</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2Model">TFGPT2Model</a>.`,name:"vocab_size"},{anchor:"transformers.GPT2Config.n_positions",description:`<strong>n_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"n_positions"},{anchor:"transformers.GPT2Config.n_embd",description:`<strong>n_embd</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the embeddings and hidden states.`,name:"n_embd"},{anchor:"transformers.GPT2Config.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.GPT2Config.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.GPT2Config.n_inner",description:`<strong>n_inner</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; Dimensionality of the inner feed-forward layers. <code>None</code> will set it to 4 times n_embd`,name:"n_inner"},{anchor:"transformers.GPT2Config.activation_function",description:`<strong>activation_function</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; Activation function, to be selected in the list <code>[&quot;relu&quot;, &quot;silu&quot;, &quot;gelu&quot;, &quot;tanh&quot;, &quot;gelu_new&quot;]</code>.`,name:"activation_function"},{anchor:"transformers.GPT2Config.resid_pdrop",description:`<strong>resid_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"resid_pdrop"},{anchor:"transformers.GPT2Config.embd_pdrop",description:`<strong>embd_pdrop</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the embeddings.`,name:"embd_pdrop"},{anchor:"transformers.GPT2Config.attn_pdrop",description:`<strong>attn_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention.`,name:"attn_pdrop"},{anchor:"transformers.GPT2Config.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon to use in the layer normalization layers.`,name:"layer_norm_epsilon"},{anchor:"transformers.GPT2Config.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.GPT2Config.summary_type",description:`<strong>summary_type</strong> (<code>string</code>, <em>optional</em>, defaults to <code>&quot;cls_index&quot;</code>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2DoubleHeadsModel">GPT2DoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2DoubleHeadsModel">TFGPT2DoubleHeadsModel</a>.</p> <p>Has to be one of the following options:</p> <ul> <li><code>&quot;last&quot;</code>: Take the last token hidden state (like XLNet).</li> <li><code>&quot;first&quot;</code>: Take the first token hidden state (like BERT).</li> <li><code>&quot;mean&quot;</code>: Take the mean of all tokens hidden states.</li> <li><code>&quot;cls_index&quot;</code>: Supply a Tensor of classification token position (like GPT/GPT-2).</li> <li><code>&quot;attn&quot;</code>: Not implemented now, use multi-head attention.</li> </ul>`,name:"summary_type"},{anchor:"transformers.GPT2Config.summary_use_proj",description:`<strong>summary_use_proj</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2DoubleHeadsModel">GPT2DoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2DoubleHeadsModel">TFGPT2DoubleHeadsModel</a>.</p> <p>Whether or not to add a projection after the vector extraction.`,name:"summary_use_proj"},{anchor:"transformers.GPT2Config.summary_activation",description:`<strong>summary_activation</strong> (<code>str</code>, <em>optional</em>) &#x2014; Argument used when doing sequence summary. Used in for the multiple choice head in <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2DoubleHeadsModel">GPT2DoubleHeadsModel</a>.</p> <p>Pass <code>&quot;tanh&quot;</code> for a tanh activation to the output, any other value will result in no activation.`,name:"summary_activation"},{anchor:"transformers.GPT2Config.summary_proj_to_labels",description:`<strong>summary_proj_to_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2DoubleHeadsModel">GPT2DoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2DoubleHeadsModel">TFGPT2DoubleHeadsModel</a>.</p> <p>Whether the projection outputs should have <code>config.num_labels</code> or <code>config.hidden_size</code> classes.`,name:"summary_proj_to_labels"},{anchor:"transformers.GPT2Config.summary_first_dropout",description:`<strong>summary_first_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Argument used when doing sequence summary, used in the models <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2DoubleHeadsModel">GPT2DoubleHeadsModel</a> and <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2DoubleHeadsModel">TFGPT2DoubleHeadsModel</a>.</p> <p>The dropout ratio to be used after the projection and activation.`,name:"summary_first_dropout"},{anchor:"transformers.GPT2Config.scale_attn_weights",description:`<strong>scale_attn_weights</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Scale attention weights by dividing by sqrt(hidden_size)..`,name:"scale_attn_weights"},{anchor:"transformers.GPT2Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.GPT2Config.scale_attn_by_inverse_layer_idx",description:`<strong>scale_attn_by_inverse_layer_idx</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to additionally scale attention weights by <code>1 / layer_idx + 1</code>.`,name:"scale_attn_by_inverse_layer_idx"},{anchor:"transformers.GPT2Config.reorder_and_upcast_attn",description:`<strong>reorder_and_upcast_attn</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision.`,name:"reorder_and_upcast_attn"}]}}),gn=new Me({props:{code:`from transformers import GPT2Model, GPT2Config # Initializing a GPT2 configuration configuration = GPT2Config() # Initializing a model from the configuration model = GPT2Model(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Model, GPT2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a GPT2 configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = GPT2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2Model(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),_n=new ze({}),Tn=new H({props:{name:"class transformers.GPT2Tokenizer",anchor:"transformers.GPT2Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|endoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/tokenization_gpt2.py#L104",parametersDescription:[{anchor:"transformers.GPT2Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.GPT2Tokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.GPT2Tokenizer.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.GPT2Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.GPT2Tokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The beginning of sequence token.`,name:"bos_token"},{anchor:"transformers.GPT2Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.GPT2Tokenizer.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"}]}}),vn=new Me({props:{code:`from transformers import GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer("Hello world")['input_ids'] tokenizer(" Hello world")['input_ids'],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [15496, 995] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [18435, 995]`}}),$o=new Le({props:{$$slots:{default:[Pb]},$$scope:{ctx:q}}}),yn=new ze({}),wn=new H({props:{name:"class transformers.GPT2TokenizerFast",anchor:"transformers.GPT2TokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|endoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/tokenization_gpt2_fast.py#L70",parametersDescription:[{anchor:"transformers.GPT2TokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.GPT2TokenizerFast.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.GPT2TokenizerFast.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.GPT2TokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.GPT2TokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The beginning of sequence token.`,name:"bos_token"},{anchor:"transformers.GPT2TokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.GPT2TokenizerFast.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"},{anchor:"transformers.GPT2TokenizerFast.trim_offsets",description:`<strong>trim_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the post-processing step should trim offsets to avoid including whitespaces.`,name:"trim_offsets"}]}}),$n=new Me({props:{code:`from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") tokenizer("Hello world")['input_ids'] tokenizer(" Hello world")['input_ids'],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2TokenizerFast</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = GPT2TokenizerFast.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [15496, 995] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [18435, 995]`}}),Go=new Le({props:{$$slots:{default:[$b]},$$scope:{ctx:q}}}),xn=new ze({}),zn=new H({props:{name:"class transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput",anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"mc_loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"mc_logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L492",parametersDescription:[{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.mc_loss",description:`<strong>mc_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>mc_labels</code> is provided) &#x2014; Multiple choice classification loss.`,name:"mc_loss"},{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.mc_logits",description:`<strong>mc_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).`,name:"mc_logits"},{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of length <code>config.n_layers</code>, containing tuples of tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Fn=new H({props:{name:"class transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput",anchor:"transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput",parameters:[{name:"logits",val:": Tensor = None"},{name:"mc_logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L588",parametersDescription:[{anchor:"transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput.mc_logits",description:`<strong>mc_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).`,name:"mc_logits"},{anchor:"transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),jn=new ze({}),En=new H({props:{name:"class transformers.GPT2Model",anchor:"transformers.GPT2Model",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L665",parametersDescription:[{anchor:"transformers.GPT2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Hn=new H({props:{name:"forward",anchor:"transformers.GPT2Model.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L734",parametersDescription:[{anchor:"transformers.GPT2Model.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPT2Model.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPT2Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPT2Model.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPT2Model.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPT2Model.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPT2Model.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPT2Model.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPT2Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPT2Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPT2Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fo=new Le({props:{$$slots:{default:[Mb]},$$scope:{ctx:q}}}),Ln=new Me({props:{code:`from transformers import GPT2Tokenizer, GPT2Model import torch tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2Model.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Nn=new H({props:{name:"parallelize",anchor:"transformers.GPT2Model.parallelize",parameters:[{name:"device_map",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L688",parametersDescription:[{anchor:"transformers.GPT2Model.parallelize.device_map",description:`<strong>device_map</strong> (<code>Dict[int, list]</code>, optional, defaults to None) &#x2014; A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the following number of attention modules:</p> <ul> <li>gpt2: 12</li> <li>gpt2-medium: 24</li> <li>gpt2-large: 36</li> <li>gpt2-xl: 48</li> </ul>`,name:"device_map"}]}}),In=new Me({props:{code:`# Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: model = GPT2LMHeadModel.from_pretrained('gpt2-xl') device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8], 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]} model.parallelize(device_map),`,highlighted:`<span class="hljs-comment"># Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:</span> model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2-xl&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>, <span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">22</span>, <span class="hljs-number">23</span>, <span class="hljs-number">24</span>, <span class="hljs-number">25</span>, <span class="hljs-number">26</span>, <span class="hljs-number">27</span>, <span class="hljs-number">28</span>, <span class="hljs-number">29</span>, <span class="hljs-number">30</span>, <span class="hljs-number">31</span>, <span class="hljs-number">32</span>, <span class="hljs-number">33</span>, <span class="hljs-number">34</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">35</span>, <span class="hljs-number">36</span>, <span class="hljs-number">37</span>, <span class="hljs-number">38</span>, <span class="hljs-number">39</span>, <span class="hljs-number">40</span>, <span class="hljs-number">41</span>, <span class="hljs-number">42</span>, <span class="hljs-number">43</span>, <span class="hljs-number">44</span>, <span class="hljs-number">45</span>, <span class="hljs-number">46</span>, <span class="hljs-number">47</span>]} model.parallelize(device_map)`}}),Sn=new H({props:{name:"deparallelize",anchor:"transformers.GPT2Model.deparallelize",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L708"}}),An=new Me({props:{code:`# On a 4 GPU machine with gpt2-large: model = GPT2LMHeadModel.from_pretrained('gpt2-large') device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7], 1: [8, 9, 10, 11, 12, 13, 14, 15], 2: [16, 17, 18, 19, 20, 21, 22, 23], 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache(),`,highlighted:`<span class="hljs-comment"># On a 4 GPU machine with gpt2-large:</span> model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2-large&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">8</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">16</span>, <span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">24</span>, <span class="hljs-number">25</span>, <span class="hljs-number">26</span>, <span class="hljs-number">27</span>, <span class="hljs-number">28</span>, <span class="hljs-number">29</span>, <span class="hljs-number">30</span>, <span class="hljs-number">31</span>, <span class="hljs-number">32</span>, <span class="hljs-number">33</span>, <span class="hljs-number">34</span>, <span class="hljs-number">35</span>]} model.parallelize(device_map) <span class="hljs-comment"># Splits the model across several devices</span> model.deparallelize() <span class="hljs-comment"># Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()</span>`}}),On=new ze({}),Wn=new H({props:{name:"class transformers.GPT2LMHeadModel",anchor:"transformers.GPT2LMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L943",parametersDescription:[{anchor:"transformers.GPT2LMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vn=new H({props:{name:"forward",anchor:"transformers.GPT2LMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1012",parametersDescription:[{anchor:"transformers.GPT2LMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPT2LMHeadModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPT2LMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPT2LMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPT2LMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPT2LMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPT2LMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPT2LMHeadModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPT2LMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPT2LMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPT2LMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPT2LMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Eo=new Le({props:{$$slots:{default:[Gb]},$$scope:{ctx:q}}}),Jn=new Me({props:{code:`import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained('gpt2') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2LMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Kn=new H({props:{name:"parallelize",anchor:"transformers.GPT2LMHeadModel.parallelize",parameters:[{name:"device_map",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L958",parametersDescription:[{anchor:"transformers.GPT2LMHeadModel.parallelize.device_map",description:`<strong>device_map</strong> (<code>Dict[int, list]</code>, optional, defaults to None) &#x2014; A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the following number of attention modules:</p> <ul> <li>gpt2: 12</li> <li>gpt2-medium: 24</li> <li>gpt2-large: 36</li> <li>gpt2-xl: 48</li> </ul>`,name:"device_map"}]}}),Xn=new Me({props:{code:`# Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: model = GPT2LMHeadModel.from_pretrained('gpt2-xl') device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8], 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]} model.parallelize(device_map),`,highlighted:`<span class="hljs-comment"># Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:</span> model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2-xl&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>, <span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">22</span>, <span class="hljs-number">23</span>, <span class="hljs-number">24</span>, <span class="hljs-number">25</span>, <span class="hljs-number">26</span>, <span class="hljs-number">27</span>, <span class="hljs-number">28</span>, <span class="hljs-number">29</span>, <span class="hljs-number">30</span>, <span class="hljs-number">31</span>, <span class="hljs-number">32</span>, <span class="hljs-number">33</span>, <span class="hljs-number">34</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">35</span>, <span class="hljs-number">36</span>, <span class="hljs-number">37</span>, <span class="hljs-number">38</span>, <span class="hljs-number">39</span>, <span class="hljs-number">40</span>, <span class="hljs-number">41</span>, <span class="hljs-number">42</span>, <span class="hljs-number">43</span>, <span class="hljs-number">44</span>, <span class="hljs-number">45</span>, <span class="hljs-number">46</span>, <span class="hljs-number">47</span>]} model.parallelize(device_map)`}}),Yn=new H({props:{name:"deparallelize",anchor:"transformers.GPT2LMHeadModel.deparallelize",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L970"}}),Qn=new Me({props:{code:`# On a 4 GPU machine with gpt2-large: model = GPT2LMHeadModel.from_pretrained('gpt2-large') device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7], 1: [8, 9, 10, 11, 12, 13, 14, 15], 2: [16, 17, 18, 19, 20, 21, 22, 23], 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache(),`,highlighted:`<span class="hljs-comment"># On a 4 GPU machine with gpt2-large:</span> model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2-large&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">8</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">16</span>, <span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">24</span>, <span class="hljs-number">25</span>, <span class="hljs-number">26</span>, <span class="hljs-number">27</span>, <span class="hljs-number">28</span>, <span class="hljs-number">29</span>, <span class="hljs-number">30</span>, <span class="hljs-number">31</span>, <span class="hljs-number">32</span>, <span class="hljs-number">33</span>, <span class="hljs-number">34</span>, <span class="hljs-number">35</span>]} model.parallelize(device_map) <span class="hljs-comment"># Splits the model across several devices</span> model.deparallelize() <span class="hljs-comment"># Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()</span>`}}),Zn=new ze({}),es=new H({props:{name:"class transformers.GPT2DoubleHeadsModel",anchor:"transformers.GPT2DoubleHeadsModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1112",parametersDescription:[{anchor:"transformers.GPT2DoubleHeadsModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ss=new H({props:{name:"forward",anchor:"transformers.GPT2DoubleHeadsModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"mc_token_ids",val:" = None"},{name:"labels",val:" = None"},{name:"mc_labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1186",parametersDescription:[{anchor:"transformers.GPT2DoubleHeadsModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.mc_token_ids",description:`<strong>mc_token_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices)</code>, <em>optional</em>, default to index of the last token of the input) &#x2014; Index of the classification token in each input sequence. Selected in the range <code>[0, input_ids.size(-1) - 1[</code>.`,name:"mc_token_ids"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>`,name:"labels"},{anchor:"transformers.GPT2DoubleHeadsModel.forward.mc_labels",description:`<strong>mc_labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <em>num_choices</em> is the size of the second dimension of the input tensors. (see <em>input_ids</em> above)`,name:"mc_labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput" >transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>mc_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>mc_labels</code> is provided) \u2014 Multiple choice classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>mc_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of length <code>config.n_layers</code>, containing tuples of tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput" >transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Co=new Le({props:{$$slots:{default:[xb]},$$scope:{ctx:q}}}),as=new Me({props:{code:`import torch from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2DoubleHeadsModel.from_pretrained('gpt2') # Add a [CLS] to the vocabulary (we should train it also!) num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'}) embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] encoded_choices = [tokenizer.encode(s) for s in choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_logits = outputs.logits mc_logits = outputs.mc_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2DoubleHeadsModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2DoubleHeadsModel.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Add a [CLS] to the vocabulary (we should train it also!)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_added_tokens = tokenizer.add_special_tokens({<span class="hljs-string">&#x27;cls_token&#x27;</span>: <span class="hljs-string">&#x27;[CLS]&#x27;</span>}) <span class="hljs-meta">&gt;&gt;&gt; </span>embedding_layer = model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-comment"># Update the model embeddings with the new vocabulary size</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choices = [<span class="hljs-string">&quot;Hello, my dog is cute [CLS]&quot;</span>, <span class="hljs-string">&quot;Hello, my cat is cute [CLS]&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_choices = [tokenizer.encode(s) <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> choices] <span class="hljs-meta">&gt;&gt;&gt; </span>cls_token_location = [tokens.index(tokenizer.cls_token_id) <span class="hljs-keyword">for</span> tokens <span class="hljs-keyword">in</span> encoded_choices] <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(encoded_choices).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size: 1, number of choices: 2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>mc_token_ids = torch.tensor([cls_token_location]) <span class="hljs-comment"># Batch size: 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, mc_token_ids=mc_token_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>lm_logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>mc_logits = outputs.mc_logits`}}),rs=new ze({}),is=new H({props:{name:"class transformers.GPT2ForSequenceClassification",anchor:"transformers.GPT2ForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1325",parametersDescription:[{anchor:"transformers.GPT2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ps=new H({props:{name:"forward",anchor:"transformers.GPT2ForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1341",parametersDescription:[{anchor:"transformers.GPT2ForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPT2ForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPT2ForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPT2ForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPT2ForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPT2ForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPT2ForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPT2ForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPT2ForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPT2ForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPT2ForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPT2ForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ho=new Le({props:{$$slots:{default:[zb]},$$scope:{ctx:q}}}),hs=new Me({props:{code:`from transformers import GPT2Tokenizer, GPT2ForSequenceClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialogRPT-updown') model = GPT2ForSequenceClassification.from_pretrained('microsoft/DialogRPT-updown') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),us=new Me({props:{code:`from transformers import GPT2Tokenizer, GPT2ForSequenceClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialogRPT-updown') model = GPT2ForSequenceClassification.from_pretrained('microsoft/DialogRPT-updown', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ms=new ze({}),fs=new H({props:{name:"class transformers.GPT2ForTokenClassification",anchor:"transformers.GPT2ForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1450",parametersDescription:[{anchor:"transformers.GPT2ForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),vs=new H({props:{name:"forward",anchor:"transformers.GPT2ForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_gpt2.py#L1472",parametersDescription:[{anchor:"transformers.GPT2ForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.GPT2ForTokenClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.GPT2ForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.GPT2ForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, input_ids_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.GPT2ForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.GPT2ForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.GPT2ForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).`,name:"inputs_embeds"},{anchor:"transformers.GPT2ForTokenClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.GPT2ForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.GPT2ForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.GPT2ForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.GPT2ForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),No=new Le({props:{$$slots:{default:[Fb]},$$scope:{ctx:q}}}),bs=new Me({props:{code:`from transformers import GPT2Tokenizer, GPT2ForTokenClassification import torch tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialogRPT-updown') model = GPT2ForTokenClassification.from_pretrained('microsoft/DialogRPT-updown') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2ForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = GPT2ForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ks=new ze({}),ys=new H({props:{name:"class transformers.TFGPT2Model",anchor:"transformers.TFGPT2Model",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L726",parametersDescription:[{anchor:"transformers.TFGPT2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),So=new Le({props:{$$slots:{default:[jb]},$$scope:{ctx:q}}}),Ms=new H({props:{name:"call",anchor:"transformers.TFGPT2Model.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L731",parametersDescription:[{anchor:"transformers.TFGPT2Model.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFGPT2Model.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFGPT2Model.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFGPT2Model.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFGPT2Model.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFGPT2Model.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFGPT2Model.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFGPT2Model.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFGPT2Model.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFGPT2Model.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFGPT2Model.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFGPT2Model.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFGPT2Model.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFGPT2Model.call.past",description:`<strong>past</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past"},{anchor:"transformers.TFGPT2Model.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ao=new Le({props:{$$slots:{default:[Eb]},$$scope:{ctx:q}}}),Gs=new Me({props:{code:`from transformers import GPT2Tokenizer, TFGPT2Model import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, TFGPT2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFGPT2Model.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),xs=new ze({}),zs=new H({props:{name:"class transformers.TFGPT2LMHeadModel",anchor:"transformers.TFGPT2LMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L842",parametersDescription:[{anchor:"transformers.TFGPT2LMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wo=new Le({props:{$$slots:{default:[qb]},$$scope:{ctx:q}}}),qs=new H({props:{name:"call",anchor:"transformers.TFGPT2LMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L860",parametersDescription:[{anchor:"transformers.TFGPT2LMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFGPT2LMHeadModel.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFGPT2LMHeadModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFGPT2LMHeadModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFGPT2LMHeadModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFGPT2LMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFGPT2LMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFGPT2LMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFGPT2LMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFGPT2LMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFGPT2LMHeadModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFGPT2LMHeadModel.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFGPT2LMHeadModel.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFGPT2LMHeadModel.call.past",description:`<strong>past</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past"},{anchor:"transformers.TFGPT2LMHeadModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFGPT2LMHeadModel.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Bo=new Le({props:{$$slots:{default:[Cb]},$$scope:{ctx:q}}}),Cs=new Me({props:{code:`from transformers import GPT2Tokenizer, TFGPT2LMHeadModel import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2LMHeadModel.from_pretrained('gpt2') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, TFGPT2LMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFGPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ds=new ze({}),Hs=new H({props:{name:"class transformers.TFGPT2DoubleHeadsModel",anchor:"transformers.TFGPT2DoubleHeadsModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L993",parametersDescription:[{anchor:"transformers.TFGPT2DoubleHeadsModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ro=new Le({props:{$$slots:{default:[Db]},$$scope:{ctx:q}}}),Ss=new H({props:{name:"call",anchor:"transformers.TFGPT2DoubleHeadsModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"mc_token_ids",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L1002",parametersDescription:[{anchor:"transformers.TFGPT2DoubleHeadsModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFGPT2DoubleHeadsModel.call.mc_token_ids",description:`<strong>mc_token_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, num_choices)</code>, <em>optional</em>, default to index of the last token of the input) &#x2014; Index of the classification token in each input sequence. Selected in the range <code>[0, input_ids.size(-1) - 1[</code>.`,name:"mc_token_ids"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput" >transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>mc_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput" >transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Vo=new Le({props:{$$slots:{default:[Hb]},$$scope:{ctx:q}}}),As=new Me({props:{code:`import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2DoubleHeadsModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2') # Add a [CLS] to the vocabulary (we should train it also!) num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'}) embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] encoded_choices = [tokenizer.encode(s) for s in choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_prediction_scores, mc_prediction_scores = outputs[:2],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, TFGPT2DoubleHeadsModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFGPT2DoubleHeadsModel.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Add a [CLS] to the vocabulary (we should train it also!)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_added_tokens = tokenizer.add_special_tokens({<span class="hljs-string">&#x27;cls_token&#x27;</span>: <span class="hljs-string">&#x27;[CLS]&#x27;</span>}) <span class="hljs-meta">&gt;&gt;&gt; </span>embedding_layer = model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-comment"># Update the model embeddings with the new vocabulary size</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choices = [<span class="hljs-string">&quot;Hello, my dog is cute [CLS]&quot;</span>, <span class="hljs-string">&quot;Hello, my cat is cute [CLS]&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_choices = [tokenizer.encode(s) <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> choices] <span class="hljs-meta">&gt;&gt;&gt; </span>cls_token_location = [tokens.index(tokenizer.cls_token_id) <span class="hljs-keyword">for</span> tokens <span class="hljs-keyword">in</span> encoded_choices] <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tf.constant(encoded_choices)[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># Batch size: 1, number of choices: 2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>mc_token_ids = tf.constant([cls_token_location]) <span class="hljs-comment"># Batch size: 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, mc_token_ids=mc_token_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>lm_prediction_scores, mc_prediction_scores = outputs[:<span class="hljs-number">2</span>]`}}),Os=new ze({}),Ws=new H({props:{name:"class transformers.TFGPT2ForSequenceClassification",anchor:"transformers.TFGPT2ForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L1162",parametersDescription:[{anchor:"transformers.TFGPT2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ko=new Le({props:{$$slots:{default:[Lb]},$$scope:{ctx:q}}}),Vs=new H({props:{name:"call",anchor:"transformers.TFGPT2ForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_tf_gpt2.py#L1174",parametersDescription:[{anchor:"transformers.TFGPT2ForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFGPT2ForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast" >transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast" >transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast</a> or <code>tuple(tf.Tensor)</code></p> `}}),Xo=new Le({props:{$$slots:{default:[Nb]},$$scope:{ctx:q}}}),Js=new Me({props:{code:`from transformers import GPT2Tokenizer, TFGPT2ForSequenceClassification import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialogRPT-updown') model = TFGPT2ForSequenceClassification.from_pretrained('microsoft/DialogRPT-updown') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, TFGPT2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFGPT2ForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/DialogRPT-updown&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ks=new ze({}),Xs=new H({props:{name:"class transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast",anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/modeling_tf_outputs.py#L717",parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ys=new ze({}),Qs=new H({props:{name:"class transformers.FlaxGPT2Model",anchor:"transformers.FlaxGPT2Model",parameters:[{name:"config",val:": GPT2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_flax_gpt2.py#L654",parametersDescription:[{anchor:"transformers.FlaxGPT2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxGPT2Model.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ra=new H({props:{name:"__call__",anchor:"transformers.FlaxGPT2PreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"encoder_hidden_states",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_flax_gpt2.py#L447",parametersDescription:[{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code>. Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Zo=new Le({props:{$$slots:{default:[Ib]},$$scope:{ctx:q}}}),ia=new Me({props:{code:`from transformers import GPT2Tokenizer, FlaxGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = FlaxGPT2Model.from_pretrained('gpt2') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, FlaxGPT2Model <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxGPT2Model.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),la=new ze({}),da=new H({props:{name:"class transformers.FlaxGPT2LMHeadModel",anchor:"transformers.FlaxGPT2LMHeadModel",parameters:[{name:"config",val:": GPT2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_flax_gpt2.py#L732",parametersDescription:[{anchor:"transformers.FlaxGPT2LMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config">GPT2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxGPT2LMHeadModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),_a=new H({props:{name:"__call__",anchor:"transformers.FlaxGPT2PreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"encoder_hidden_states",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/gpt2/modeling_flax_gpt2.py#L447",parametersDescription:[{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code>. Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxGPT2PreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Config" >GPT2Config</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tn=new Le({props:{$$slots:{default:[Sb]},$$scope:{ctx:q}}}),Ta=new Me({props:{code:`from transformers import GPT2Tokenizer, FlaxGPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = FlaxGPT2LMHeadModel.from_pretrained('gpt2') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) # retrieve logts for next token next_token_logits = outputs.logits[:, -1],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, FlaxGPT2LMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxGPT2LMHeadModel.from_pretrained(<span class="hljs-string">&#x27;gpt2&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve logts for next token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs.logits[:, -<span class="hljs-number">1</span>]`}}),{c(){p=n("meta"),M=l(),f=n("h1"),T=n("a"),$=n("span"),v(_.$$.fragment),g=l(),x=n("span"),Q=a("OpenAI GPT2"),C=l(),z=n("h2"),R=n("a"),S=n("span"),v(Z.$$.fragment),ue=l(),A=n("span"),me=a("Overview"),ce=l(),W=n("p"),L=a("OpenAI GPT-2 model was proposed in "),ee=n("a"),te=a("Language Models are Unsupervised Multitask Learners"),F=a(` by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever. It\u2019s a causal (unidirectional) transformer pretrained using language modeling on a very large corpus of ~40 GB of text data.`),E=l(),se=n("p"),V=a("The abstract from the paper is the following:"),pe=l(),ae=n("p"),O=n("em"),fe=a(`GPT-2 is a large transformer-based language model with 1.5 billion parameters, trained on a dataset[1] of 8 million web pages. GPT-2 is trained with a simple objective: predict the next word, given all of the previous words within some text. The diversity of the dataset causes this simple goal to contain naturally occurring demonstrations of many tasks across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X the parameters and trained on more than 10X the amount of data.`),he=l(),j=n("p"),ge=a("Tips:"),B=l(),U=n("ul"),re=n("li"),J=a(`GPT-2 is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),_e=l(),oe=n("li"),N=a(`GPT-2 was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the `),ie=n("em"),K=a("run_generation.py"),Te=a(" example script."),u=l(),G=n("li"),ne=a("The model can take the "),Fe=n("em"),Se=a("past_key_values"),I=a(" (for PyTorch) or "),je=n("em"),Ae=a("past"),Oe=a(` (for TF) as input, which is the previously computed key/value attention pairs. Using this (`),D=n("em"),X=a("past_key_values"),We=a(" or "),Ee=n("em"),Y=a("past"),Be=a(`) value prevents the model from re-computing pre-computed values in the context of text generation. For PyTorch, see `),qe=n("em"),ve=a("past_key_values"),Ue=a(` argument of the `),wa=n("a"),sc=a("GPT2Model.forward()"),ac=a(" method, or for TF the "),mr=n("em"),rc=a("past"),ic=a(` argument of the `),Pa=n("a"),lc=a("TFGPT2Model.call()"),dc=a(" method for more information on its usage."),cc=l(),kt=n("li"),pc=a("Enabling the "),fr=n("em"),hc=a("scale_attn_by_inverse_layer_idx"),uc=a(" and "),gr=n("em"),mc=a("reorder_and_upcast_attn"),fc=a(` flags will apply the training stability improvements from `),dn=n("a"),gc=a("Mistral"),_c=a(" (for PyTorch only)."),Fl=l(),Bt=n("p"),cn=n("a"),Tc=a("Write With Transformer"),vc=a(` is a webapp created and hosted by Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five different sizes: small, medium, large, xl and a distilled version of the small checkpoint: `),_r=n("em"),bc=a("distilgpt-2"),kc=a("."),jl=l(),Mt=n("p"),yc=a("This model was contributed by "),pn=n("a"),wc=a("thomwolf"),Pc=a(". The original code can be found "),hn=n("a"),$c=a("here"),Mc=a("."),El=l(),Ut=n("h2"),wo=n("a"),Tr=n("span"),v(un.$$.fragment),Gc=l(),vr=n("span"),xc=a("GPT2Config"),ql=l(),Re=n("div"),v(mn.$$.fragment),zc=l(),yt=n("p"),Fc=a("This is the configuration class to store the configuration of a "),$a=n("a"),jc=a("GPT2Model"),Ec=a(` or a `),Ma=n("a"),qc=a("TFGPT2Model"),Cc=a(`. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-2 `),fn=n("a"),Dc=a("small"),Hc=a(" architecture."),Lc=l(),Rt=n("p"),Nc=a("Configuration objects inherit from "),Ga=n("a"),Ic=a("PretrainedConfig"),Sc=a(` and can be used to control the model outputs. Read the documentation from `),xa=n("a"),Ac=a("PretrainedConfig"),Oc=a(" for more information."),Wc=l(),br=n("p"),Bc=a("Example:"),Uc=l(),v(gn.$$.fragment),Cl=l(),Vt=n("h2"),Po=n("a"),kr=n("span"),v(_n.$$.fragment),Rc=l(),yr=n("span"),Vc=a("GPT2Tokenizer"),Dl=l(),le=n("div"),v(Tn.$$.fragment),Jc=l(),wr=n("p"),Kc=a("Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding."),Xc=l(),Pr=n("p"),Yc=a(`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),Qc=l(),v(vn.$$.fragment),Zc=l(),bn=n("p"),ep=a("You can get around that behavior by passing "),$r=n("code"),tp=a("add_prefix_space=True"),op=a(` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),np=l(),v($o.$$.fragment),sp=l(),kn=n("p"),ap=a("This tokenizer inherits from "),za=n("a"),rp=a("PreTrainedTokenizer"),ip=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),lp=l(),Mr=n("div"),Hl=l(),Jt=n("h2"),Mo=n("a"),Gr=n("span"),v(yn.$$.fragment),dp=l(),xr=n("span"),cp=a("GPT2TokenizerFast"),Ll=l(),be=n("div"),v(wn.$$.fragment),pp=l(),Pn=n("p"),hp=a("Construct a \u201Cfast\u201D GPT-2 tokenizer (backed by HuggingFace\u2019s "),zr=n("em"),up=a("tokenizers"),mp=a(` library). Based on byte-level Byte-Pair-Encoding.`),fp=l(),Fr=n("p"),gp=a(`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),_p=l(),v($n.$$.fragment),Tp=l(),Mn=n("p"),vp=a("You can get around that behavior by passing "),jr=n("code"),bp=a("add_prefix_space=True"),kp=a(` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance.`),yp=l(),v(Go.$$.fragment),wp=l(),Gn=n("p"),Pp=a("This tokenizer inherits from "),Fa=n("a"),$p=a("PreTrainedTokenizerFast"),Mp=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Nl=l(),Kt=n("h2"),xo=n("a"),Er=n("span"),v(xn.$$.fragment),Gp=l(),qr=n("span"),xp=a("GPT2 specific outputs"),Il=l(),Xt=n("div"),v(zn.$$.fragment),zp=l(),Cr=n("p"),Fp=a("Base class for outputs of models predicting if two sentences are consecutive or not."),Sl=l(),Yt=n("div"),v(Fn.$$.fragment),jp=l(),Dr=n("p"),Ep=a("Base class for outputs of models predicting if two sentences are consecutive or not."),Al=l(),Qt=n("h2"),zo=n("a"),Hr=n("span"),v(jn.$$.fragment),qp=l(),Lr=n("span"),Cp=a("GPT2Model"),Ol=l(),ke=n("div"),v(En.$$.fragment),Dp=l(),Nr=n("p"),Hp=a("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top."),Lp=l(),qn=n("p"),Np=a("This model inherits from "),ja=n("a"),Ip=a("PreTrainedModel"),Sp=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ap=l(),Cn=n("p"),Op=a("This model is also a PyTorch "),Dn=n("a"),Wp=a("torch.nn.Module"),Bp=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Up=l(),Ke=n("div"),v(Hn.$$.fragment),Rp=l(),Zt=n("p"),Vp=a("The "),Ea=n("a"),Jp=a("GPT2Model"),Kp=a(" forward method, overrides the "),Ir=n("code"),Xp=a("__call__"),Yp=a(" special method."),Qp=l(),v(Fo.$$.fragment),Zp=l(),Sr=n("p"),eh=a("Example:"),th=l(),v(Ln.$$.fragment),oh=l(),Xe=n("div"),v(Nn.$$.fragment),nh=l(),Ar=n("p"),sh=a("This is an experimental feature and is a subject to change at a moment\u2019s notice."),ah=l(),Or=n("p"),rh=a(`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),ih=l(),Wr=n("p"),lh=a("Example:"),dh=l(),v(In.$$.fragment),ch=l(),gt=n("div"),v(Sn.$$.fragment),ph=l(),Br=n("p"),hh=a("Moves the model to cpu from a model parallel state."),uh=l(),Ur=n("p"),mh=a("Example:"),fh=l(),v(An.$$.fragment),Wl=l(),eo=n("h2"),jo=n("a"),Rr=n("span"),v(On.$$.fragment),gh=l(),Vr=n("span"),_h=a("GPT2LMHeadModel"),Bl=l(),ye=n("div"),v(Wn.$$.fragment),Th=l(),Jr=n("p"),vh=a(`The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),bh=l(),Bn=n("p"),kh=a("This model inherits from "),qa=n("a"),yh=a("PreTrainedModel"),wh=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ph=l(),Un=n("p"),$h=a("This model is also a PyTorch "),Rn=n("a"),Mh=a("torch.nn.Module"),Gh=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xh=l(),Ye=n("div"),v(Vn.$$.fragment),zh=l(),to=n("p"),Fh=a("The "),Ca=n("a"),jh=a("GPT2LMHeadModel"),Eh=a(" forward method, overrides the "),Kr=n("code"),qh=a("__call__"),Ch=a(" special method."),Dh=l(),v(Eo.$$.fragment),Hh=l(),Xr=n("p"),Lh=a("Example:"),Nh=l(),v(Jn.$$.fragment),Ih=l(),Qe=n("div"),v(Kn.$$.fragment),Sh=l(),Yr=n("p"),Ah=a("This is an experimental feature and is a subject to change at a moment\u2019s notice."),Oh=l(),Qr=n("p"),Wh=a(`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),Bh=l(),Zr=n("p"),Uh=a("Example:"),Rh=l(),v(Xn.$$.fragment),Vh=l(),_t=n("div"),v(Yn.$$.fragment),Jh=l(),ei=n("p"),Kh=a("Moves the model to cpu from a model parallel state."),Xh=l(),ti=n("p"),Yh=a("Example:"),Qh=l(),v(Qn.$$.fragment),Ul=l(),oo=n("h2"),qo=n("a"),oi=n("span"),v(Zn.$$.fragment),Zh=l(),ni=n("span"),eu=a("GPT2DoubleHeadsModel"),Rl=l(),Ve=n("div"),v(es.$$.fragment),tu=l(),si=n("p"),ou=a(`The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),nu=l(),ts=n("p"),su=a("This model inherits from "),Da=n("a"),au=a("PreTrainedModel"),ru=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),iu=l(),os=n("p"),lu=a("This model is also a PyTorch "),ns=n("a"),du=a("torch.nn.Module"),cu=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pu=l(),Ze=n("div"),v(ss.$$.fragment),hu=l(),no=n("p"),uu=a("The "),Ha=n("a"),mu=a("GPT2DoubleHeadsModel"),fu=a(" forward method, overrides the "),ai=n("code"),gu=a("__call__"),_u=a(" special method."),Tu=l(),v(Co.$$.fragment),vu=l(),ri=n("p"),bu=a("Example:"),ku=l(),v(as.$$.fragment),Vl=l(),so=n("h2"),Do=n("a"),ii=n("span"),v(rs.$$.fragment),yu=l(),li=n("span"),wu=a("GPT2ForSequenceClassification"),Jl=l(),we=n("div"),v(is.$$.fragment),Pu=l(),di=n("p"),$u=a("The GPT2 Model transformer with a sequence classification head on top (linear layer)."),Mu=l(),La=n("p"),Na=n("a"),Gu=a("GPT2ForSequenceClassification"),xu=a(` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),zu=l(),mt=n("p"),Fu=a(`Since it does classification on the last token, it requires to know the position of the last token. If a `),ci=n("code"),ju=a("pad_token_id"),Eu=a(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),pi=n("code"),qu=a("pad_token_id"),Cu=a(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),hi=n("code"),Du=a("inputs_embeds"),Hu=a(" are passed instead of "),ui=n("code"),Lu=a("input_ids"),Nu=a(`, it does the same (take the last value in each row of the batch).`),Iu=l(),ls=n("p"),Su=a("This model inherits from "),Ia=n("a"),Au=a("PreTrainedModel"),Ou=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wu=l(),ds=n("p"),Bu=a("This model is also a PyTorch "),cs=n("a"),Uu=a("torch.nn.Module"),Ru=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vu=l(),Ge=n("div"),v(ps.$$.fragment),Ju=l(),ao=n("p"),Ku=a("The "),Sa=n("a"),Xu=a("GPT2ForSequenceClassification"),Yu=a(" forward method, overrides the "),mi=n("code"),Qu=a("__call__"),Zu=a(" special method."),em=l(),v(Ho.$$.fragment),tm=l(),fi=n("p"),om=a("Example of single-label classification:"),nm=l(),v(hs.$$.fragment),sm=l(),gi=n("p"),am=a("Example of multi-label classification:"),rm=l(),v(us.$$.fragment),Kl=l(),ro=n("h2"),Lo=n("a"),_i=n("span"),v(ms.$$.fragment),im=l(),Ti=n("span"),lm=a("GPT2ForTokenClassification"),Xl=l(),Je=n("div"),v(fs.$$.fragment),dm=l(),vi=n("p"),cm=a(`GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),pm=l(),gs=n("p"),hm=a("This model inherits from "),Aa=n("a"),um=a("PreTrainedModel"),mm=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fm=l(),_s=n("p"),gm=a("This model is also a PyTorch "),Ts=n("a"),_m=a("torch.nn.Module"),Tm=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vm=l(),et=n("div"),v(vs.$$.fragment),bm=l(),io=n("p"),km=a("The "),Oa=n("a"),ym=a("GPT2ForTokenClassification"),wm=a(" forward method, overrides the "),bi=n("code"),Pm=a("__call__"),$m=a(" special method."),Mm=l(),v(No.$$.fragment),Gm=l(),ki=n("p"),xm=a("Example:"),zm=l(),v(bs.$$.fragment),Yl=l(),lo=n("h2"),Io=n("a"),yi=n("span"),v(ks.$$.fragment),Fm=l(),wi=n("span"),jm=a("TFGPT2Model"),Ql=l(),Ce=n("div"),v(ys.$$.fragment),Em=l(),Pi=n("p"),qm=a("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top."),Cm=l(),ws=n("p"),Dm=a("This model inherits from "),Wa=n("a"),Hm=a("TFPreTrainedModel"),Lm=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nm=l(),Ps=n("p"),Im=a("This model is also a "),$s=n("a"),Sm=a("tf.keras.Model"),Am=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Om=l(),v(So.$$.fragment),Wm=l(),tt=n("div"),v(Ms.$$.fragment),Bm=l(),co=n("p"),Um=a("The "),Ba=n("a"),Rm=a("TFGPT2Model"),Vm=a(" forward method, overrides the "),$i=n("code"),Jm=a("__call__"),Km=a(" special method."),Xm=l(),v(Ao.$$.fragment),Ym=l(),Mi=n("p"),Qm=a("Example:"),Zm=l(),v(Gs.$$.fragment),Zl=l(),po=n("h2"),Oo=n("a"),Gi=n("span"),v(xs.$$.fragment),ef=l(),xi=n("span"),tf=a("TFGPT2LMHeadModel"),ed=l(),De=n("div"),v(zs.$$.fragment),of=l(),zi=n("p"),nf=a(`The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),sf=l(),Fs=n("p"),af=a("This model inherits from "),Ua=n("a"),rf=a("TFPreTrainedModel"),lf=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),df=l(),js=n("p"),cf=a("This model is also a "),Es=n("a"),pf=a("tf.keras.Model"),hf=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),uf=l(),v(Wo.$$.fragment),mf=l(),ot=n("div"),v(qs.$$.fragment),ff=l(),ho=n("p"),gf=a("The "),Ra=n("a"),_f=a("TFGPT2LMHeadModel"),Tf=a(" forward method, overrides the "),Fi=n("code"),vf=a("__call__"),bf=a(" special method."),kf=l(),v(Bo.$$.fragment),yf=l(),ji=n("p"),wf=a("Example:"),Pf=l(),v(Cs.$$.fragment),td=l(),uo=n("h2"),Uo=n("a"),Ei=n("span"),v(Ds.$$.fragment),$f=l(),qi=n("span"),Mf=a("TFGPT2DoubleHeadsModel"),od=l(),He=n("div"),v(Hs.$$.fragment),Gf=l(),Ci=n("p"),xf=a(`The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),zf=l(),Ls=n("p"),Ff=a("This model inherits from "),Va=n("a"),jf=a("TFPreTrainedModel"),Ef=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qf=l(),Ns=n("p"),Cf=a("This model is also a "),Is=n("a"),Df=a("tf.keras.Model"),Hf=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Lf=l(),v(Ro.$$.fragment),Nf=l(),nt=n("div"),v(Ss.$$.fragment),If=l(),mo=n("p"),Sf=a("The "),Ja=n("a"),Af=a("TFGPT2DoubleHeadsModel"),Of=a(" forward method, overrides the "),Di=n("code"),Wf=a("__call__"),Bf=a(" special method."),Uf=l(),v(Vo.$$.fragment),Rf=l(),Hi=n("p"),Vf=a("Examples:"),Jf=l(),v(As.$$.fragment),nd=l(),fo=n("h2"),Jo=n("a"),Li=n("span"),v(Os.$$.fragment),Kf=l(),Ni=n("span"),Xf=a("TFGPT2ForSequenceClassification"),sd=l(),de=n("div"),v(Ws.$$.fragment),Yf=l(),Ii=n("p"),Qf=a("The GPT2 Model transformer with a sequence classification head on top (linear layer)."),Zf=l(),Ka=n("p"),Xa=n("a"),eg=a("TFGPT2ForSequenceClassification"),tg=a(` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),og=l(),ft=n("p"),ng=a(`Since it does classification on the last token, it requires to know the position of the last token. If a `),Si=n("code"),sg=a("pad_token_id"),ag=a(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Ai=n("code"),rg=a("pad_token_id"),ig=a(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Oi=n("code"),lg=a("inputs_embeds"),dg=a(" are passed instead of "),Wi=n("code"),cg=a("input_ids"),pg=a(`, it does the same (take the last value in each row of the batch).`),hg=l(),Bs=n("p"),ug=a("This model inherits from "),Ya=n("a"),mg=a("TFPreTrainedModel"),fg=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gg=l(),Us=n("p"),_g=a("This model is also a "),Rs=n("a"),Tg=a("tf.keras.Model"),vg=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bg=l(),v(Ko.$$.fragment),kg=l(),st=n("div"),v(Vs.$$.fragment),yg=l(),go=n("p"),wg=a("The "),Qa=n("a"),Pg=a("TFGPT2ForSequenceClassification"),$g=a(" forward method, overrides the "),Bi=n("code"),Mg=a("__call__"),Gg=a(" special method."),xg=l(),v(Xo.$$.fragment),zg=l(),Ui=n("p"),Fg=a("Example:"),jg=l(),v(Js.$$.fragment),ad=l(),_o=n("h2"),Yo=n("a"),Ri=n("span"),v(Ks.$$.fragment),Eg=l(),Vi=n("span"),qg=a("TFSequenceClassifierOutputWithPast"),rd=l(),To=n("div"),v(Xs.$$.fragment),Cg=l(),Ji=n("p"),Dg=a("Base class for outputs of sentence classification models."),id=l(),vo=n("h2"),Qo=n("a"),Ki=n("span"),v(Ys.$$.fragment),Hg=l(),Xi=n("span"),Lg=a("FlaxGPT2Model"),ld=l(),Pe=n("div"),v(Qs.$$.fragment),Ng=l(),Yi=n("p"),Ig=a("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top."),Sg=l(),Zs=n("p"),Ag=a("This model inherits from "),Za=n("a"),Og=a("FlaxPreTrainedModel"),Wg=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bg=l(),ea=n("p"),Ug=a("This model is also a Flax Linen "),ta=n("a"),Rg=a("flax.nn.Module"),Vg=a(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Jg=l(),Qi=n("p"),Kg=a("Finally, this model supports inherent JAX features such as:"),Xg=l(),wt=n("ul"),Zi=n("li"),oa=n("a"),Yg=a("Just-In-Time (JIT) compilation"),Qg=l(),el=n("li"),na=n("a"),Zg=a("Automatic Differentiation"),e_=l(),tl=n("li"),sa=n("a"),t_=a("Vectorization"),o_=l(),ol=n("li"),aa=n("a"),n_=a("Parallelization"),s_=l(),at=n("div"),v(ra.$$.fragment),a_=l(),bo=n("p"),r_=a("The "),nl=n("code"),i_=a("FlaxGPT2PreTrainedModel"),l_=a(" forward method, overrides the "),sl=n("code"),d_=a("__call__"),c_=a(" special method."),p_=l(),v(Zo.$$.fragment),h_=l(),al=n("p"),u_=a("Example:"),m_=l(),v(ia.$$.fragment),dd=l(),ko=n("h2"),en=n("a"),rl=n("span"),v(la.$$.fragment),f_=l(),il=n("span"),g_=a("FlaxGPT2LMHeadModel"),cd=l(),$e=n("div"),v(da.$$.fragment),__=l(),ll=n("p"),T_=a(`The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),v_=l(),ca=n("p"),b_=a("This model inherits from "),er=n("a"),k_=a("FlaxPreTrainedModel"),y_=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),w_=l(),pa=n("p"),P_=a("This model is also a Flax Linen "),ha=n("a"),$_=a("flax.nn.Module"),M_=a(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),G_=l(),dl=n("p"),x_=a("Finally, this model supports inherent JAX features such as:"),z_=l(),Pt=n("ul"),cl=n("li"),ua=n("a"),F_=a("Just-In-Time (JIT) compilation"),j_=l(),pl=n("li"),ma=n("a"),E_=a("Automatic Differentiation"),q_=l(),hl=n("li"),fa=n("a"),C_=a("Vectorization"),D_=l(),ul=n("li"),ga=n("a"),H_=a("Parallelization"),L_=l(),rt=n("div"),v(_a.$$.fragment),N_=l(),yo=n("p"),I_=a("The "),ml=n("code"),S_=a("FlaxGPT2PreTrainedModel"),A_=a(" forward method, overrides the "),fl=n("code"),O_=a("__call__"),W_=a(" special method."),B_=l(),v(tn.$$.fragment),U_=l(),gl=n("p"),R_=a("Example:"),V_=l(),v(Ta.$$.fragment),this.h()},l(o){const m=wb('[data-svelte="svelte-1phssyn"]',document.head);p=s(m,"META",{name:!0,content:!0}),m.forEach(t),M=d(o),f=s(o,"H1",{class:!0});var va=i(f);T=s(va,"A",{id:!0,class:!0,href:!0});var _l=i(T);$=s(_l,"SPAN",{});var Tl=i($);b(_.$$.fragment,Tl),Tl.forEach(t),_l.forEach(t),g=d(va),x=s(va,"SPAN",{});var vl=i(x);Q=r(vl,"OpenAI GPT2"),vl.forEach(t),va.forEach(t),C=d(o),z=s(o,"H2",{class:!0});var ba=i(z);R=s(ba,"A",{id:!0,class:!0,href:!0});var bl=i(R);S=s(bl,"SPAN",{});var kl=i(S);b(Z.$$.fragment,kl),kl.forEach(t),bl.forEach(t),ue=d(ba),A=s(ba,"SPAN",{});var yl=i(A);me=r(yl,"Overview"),yl.forEach(t),ba.forEach(t),ce=d(o),W=s(o,"P",{});var ka=i(W);L=r(ka,"OpenAI GPT-2 model was proposed in "),ee=s(ka,"A",{href:!0,rel:!0});var wl=i(ee);te=r(wl,"Language Models are Unsupervised Multitask Learners"),wl.forEach(t),F=r(ka,` by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever. It\u2019s a causal (unidirectional) transformer pretrained using language modeling on a very large corpus of ~40 GB of text data.`),ka.forEach(t),E=d(o),se=s(o,"P",{});var Pl=i(se);V=r(Pl,"The abstract from the paper is the following:"),Pl.forEach(t),pe=d(o),ae=s(o,"P",{});var $l=i(ae);O=s($l,"EM",{});var Ml=i(O);fe=r(Ml,`GPT-2 is a large transformer-based language model with 1.5 billion parameters, trained on a dataset[1] of 8 million web pages. GPT-2 is trained with a simple objective: predict the next word, given all of the previous words within some text. The diversity of the dataset causes this simple goal to contain naturally occurring demonstrations of many tasks across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X the parameters and trained on more than 10X the amount of data.`),Ml.forEach(t),$l.forEach(t),he=d(o),j=s(o,"P",{});var Gl=i(j);ge=r(Gl,"Tips:"),Gl.forEach(t),B=d(o),U=s(o,"UL",{});var $t=i(U);re=s($t,"LI",{});var xl=i(re);J=r(xl,`GPT-2 is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),xl.forEach(t),_e=d($t),oe=s($t,"LI",{});var ya=i(oe);N=r(ya,`GPT-2 was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the `),ie=s(ya,"EM",{});var X_=i(ie);K=r(X_,"run_generation.py"),X_.forEach(t),Te=r(ya," example script."),ya.forEach(t),u=d($t),G=s($t,"LI",{});var xe=i(G);ne=r(xe,"The model can take the "),Fe=s(xe,"EM",{});var Y_=i(Fe);Se=r(Y_,"past_key_values"),Y_.forEach(t),I=r(xe," (for PyTorch) or "),je=s(xe,"EM",{});var Q_=i(je);Ae=r(Q_,"past"),Q_.forEach(t),Oe=r(xe,` (for TF) as input, which is the previously computed key/value attention pairs. Using this (`),D=s(xe,"EM",{});var Z_=i(D);X=r(Z_,"past_key_values"),Z_.forEach(t),We=r(xe," or "),Ee=s(xe,"EM",{});var eT=i(Ee);Y=r(eT,"past"),eT.forEach(t),Be=r(xe,`) value prevents the model from re-computing pre-computed values in the context of text generation. For PyTorch, see `),qe=s(xe,"EM",{});var tT=i(qe);ve=r(tT,"past_key_values"),tT.forEach(t),Ue=r(xe,` argument of the `),wa=s(xe,"A",{href:!0});var oT=i(wa);sc=r(oT,"GPT2Model.forward()"),oT.forEach(t),ac=r(xe," method, or for TF the "),mr=s(xe,"EM",{});var nT=i(mr);rc=r(nT,"past"),nT.forEach(t),ic=r(xe,` argument of the `),Pa=s(xe,"A",{href:!0});var sT=i(Pa);lc=r(sT,"TFGPT2Model.call()"),sT.forEach(t),dc=r(xe," method for more information on its usage."),xe.forEach(t),cc=d($t),kt=s($t,"LI",{});var on=i(kt);pc=r(on,"Enabling the "),fr=s(on,"EM",{});var aT=i(fr);hc=r(aT,"scale_attn_by_inverse_layer_idx"),aT.forEach(t),uc=r(on," and "),gr=s(on,"EM",{});var rT=i(gr);mc=r(rT,"reorder_and_upcast_attn"),rT.forEach(t),fc=r(on,` flags will apply the training stability improvements from `),dn=s(on,"A",{href:!0,rel:!0});var iT=i(dn);gc=r(iT,"Mistral"),iT.forEach(t),_c=r(on," (for PyTorch only)."),on.forEach(t),$t.forEach(t),Fl=d(o),Bt=s(o,"P",{});var zl=i(Bt);cn=s(zl,"A",{href:!0,rel:!0});var lT=i(cn);Tc=r(lT,"Write With Transformer"),lT.forEach(t),vc=r(zl,` is a webapp created and hosted by Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five different sizes: small, medium, large, xl and a distilled version of the small checkpoint: `),_r=s(zl,"EM",{});var dT=i(_r);bc=r(dT,"distilgpt-2"),dT.forEach(t),kc=r(zl,"."),zl.forEach(t),jl=d(o),Mt=s(o,"P",{});var tr=i(Mt);yc=r(tr,"This model was contributed by "),pn=s(tr,"A",{href:!0,rel:!0});var cT=i(pn);wc=r(cT,"thomwolf"),cT.forEach(t),Pc=r(tr,". The original code can be found "),hn=s(tr,"A",{href:!0,rel:!0});var pT=i(hn);$c=r(pT,"here"),pT.forEach(t),Mc=r(tr,"."),tr.forEach(t),El=d(o),Ut=s(o,"H2",{class:!0});var hd=i(Ut);wo=s(hd,"A",{id:!0,class:!0,href:!0});var hT=i(wo);Tr=s(hT,"SPAN",{});var uT=i(Tr);b(un.$$.fragment,uT),uT.forEach(t),hT.forEach(t),Gc=d(hd),vr=s(hd,"SPAN",{});var mT=i(vr);xc=r(mT,"GPT2Config"),mT.forEach(t),hd.forEach(t),ql=d(o),Re=s(o,"DIV",{class:!0});var Gt=i(Re);b(mn.$$.fragment,Gt),zc=d(Gt),yt=s(Gt,"P",{});var nn=i(yt);Fc=r(nn,"This is the configuration class to store the configuration of a "),$a=s(nn,"A",{href:!0});var fT=i($a);jc=r(fT,"GPT2Model"),fT.forEach(t),Ec=r(nn,` or a `),Ma=s(nn,"A",{href:!0});var gT=i(Ma);qc=r(gT,"TFGPT2Model"),gT.forEach(t),Cc=r(nn,`. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-2 `),fn=s(nn,"A",{href:!0,rel:!0});var _T=i(fn);Dc=r(_T,"small"),_T.forEach(t),Hc=r(nn," architecture."),nn.forEach(t),Lc=d(Gt),Rt=s(Gt,"P",{});var or=i(Rt);Nc=r(or,"Configuration objects inherit from "),Ga=s(or,"A",{href:!0});var TT=i(Ga);Ic=r(TT,"PretrainedConfig"),TT.forEach(t),Sc=r(or,` and can be used to control the model outputs. Read the documentation from `),xa=s(or,"A",{href:!0});var vT=i(xa);Ac=r(vT,"PretrainedConfig"),vT.forEach(t),Oc=r(or," for more information."),or.forEach(t),Wc=d(Gt),br=s(Gt,"P",{});var bT=i(br);Bc=r(bT,"Example:"),bT.forEach(t),Uc=d(Gt),b(gn.$$.fragment,Gt),Gt.forEach(t),Cl=d(o),Vt=s(o,"H2",{class:!0});var ud=i(Vt);Po=s(ud,"A",{id:!0,class:!0,href:!0});var kT=i(Po);kr=s(kT,"SPAN",{});var yT=i(kr);b(_n.$$.fragment,yT),yT.forEach(t),kT.forEach(t),Rc=d(ud),yr=s(ud,"SPAN",{});var wT=i(yr);Vc=r(wT,"GPT2Tokenizer"),wT.forEach(t),ud.forEach(t),Dl=d(o),le=s(o,"DIV",{class:!0});var Ne=i(le);b(Tn.$$.fragment,Ne),Jc=d(Ne),wr=s(Ne,"P",{});var PT=i(wr);Kc=r(PT,"Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding."),PT.forEach(t),Xc=d(Ne),Pr=s(Ne,"P",{});var $T=i(Pr);Yc=r($T,`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),$T.forEach(t),Qc=d(Ne),b(vn.$$.fragment,Ne),Zc=d(Ne),bn=s(Ne,"P",{});var md=i(bn);ep=r(md,"You can get around that behavior by passing "),$r=s(md,"CODE",{});var MT=i($r);tp=r(MT,"add_prefix_space=True"),MT.forEach(t),op=r(md,` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),md.forEach(t),np=d(Ne),b($o.$$.fragment,Ne),sp=d(Ne),kn=s(Ne,"P",{});var fd=i(kn);ap=r(fd,"This tokenizer inherits from "),za=s(fd,"A",{href:!0});var GT=i(za);rp=r(GT,"PreTrainedTokenizer"),GT.forEach(t),ip=r(fd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),fd.forEach(t),lp=d(Ne),Mr=s(Ne,"DIV",{class:!0}),i(Mr).forEach(t),Ne.forEach(t),Hl=d(o),Jt=s(o,"H2",{class:!0});var gd=i(Jt);Mo=s(gd,"A",{id:!0,class:!0,href:!0});var xT=i(Mo);Gr=s(xT,"SPAN",{});var zT=i(Gr);b(yn.$$.fragment,zT),zT.forEach(t),xT.forEach(t),dp=d(gd),xr=s(gd,"SPAN",{});var FT=i(xr);cp=r(FT,"GPT2TokenizerFast"),FT.forEach(t),gd.forEach(t),Ll=d(o),be=s(o,"DIV",{class:!0});var it=i(be);b(wn.$$.fragment,it),pp=d(it),Pn=s(it,"P",{});var _d=i(Pn);hp=r(_d,"Construct a \u201Cfast\u201D GPT-2 tokenizer (backed by HuggingFace\u2019s "),zr=s(_d,"EM",{});var jT=i(zr);up=r(jT,"tokenizers"),jT.forEach(t),mp=r(_d,` library). Based on byte-level Byte-Pair-Encoding.`),_d.forEach(t),fp=d(it),Fr=s(it,"P",{});var ET=i(Fr);gp=r(ET,`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),ET.forEach(t),_p=d(it),b($n.$$.fragment,it),Tp=d(it),Mn=s(it,"P",{});var Td=i(Mn);vp=r(Td,"You can get around that behavior by passing "),jr=s(Td,"CODE",{});var qT=i(jr);bp=r(qT,"add_prefix_space=True"),qT.forEach(t),kp=r(Td,` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance.`),Td.forEach(t),yp=d(it),b(Go.$$.fragment,it),wp=d(it),Gn=s(it,"P",{});var vd=i(Gn);Pp=r(vd,"This tokenizer inherits from "),Fa=s(vd,"A",{href:!0});var CT=i(Fa);$p=r(CT,"PreTrainedTokenizerFast"),CT.forEach(t),Mp=r(vd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),vd.forEach(t),it.forEach(t),Nl=d(o),Kt=s(o,"H2",{class:!0});var bd=i(Kt);xo=s(bd,"A",{id:!0,class:!0,href:!0});var DT=i(xo);Er=s(DT,"SPAN",{});var HT=i(Er);b(xn.$$.fragment,HT),HT.forEach(t),DT.forEach(t),Gp=d(bd),qr=s(bd,"SPAN",{});var LT=i(qr);xp=r(LT,"GPT2 specific outputs"),LT.forEach(t),bd.forEach(t),Il=d(o),Xt=s(o,"DIV",{class:!0});var kd=i(Xt);b(zn.$$.fragment,kd),zp=d(kd),Cr=s(kd,"P",{});var NT=i(Cr);Fp=r(NT,"Base class for outputs of models predicting if two sentences are consecutive or not."),NT.forEach(t),kd.forEach(t),Sl=d(o),Yt=s(o,"DIV",{class:!0});var yd=i(Yt);b(Fn.$$.fragment,yd),jp=d(yd),Dr=s(yd,"P",{});var IT=i(Dr);Ep=r(IT,"Base class for outputs of models predicting if two sentences are consecutive or not."),IT.forEach(t),yd.forEach(t),Al=d(o),Qt=s(o,"H2",{class:!0});var wd=i(Qt);zo=s(wd,"A",{id:!0,class:!0,href:!0});var ST=i(zo);Hr=s(ST,"SPAN",{});var AT=i(Hr);b(jn.$$.fragment,AT),AT.forEach(t),ST.forEach(t),qp=d(wd),Lr=s(wd,"SPAN",{});var OT=i(Lr);Cp=r(OT,"GPT2Model"),OT.forEach(t),wd.forEach(t),Ol=d(o),ke=s(o,"DIV",{class:!0});var lt=i(ke);b(En.$$.fragment,lt),Dp=d(lt),Nr=s(lt,"P",{});var WT=i(Nr);Hp=r(WT,"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top."),WT.forEach(t),Lp=d(lt),qn=s(lt,"P",{});var Pd=i(qn);Np=r(Pd,"This model inherits from "),ja=s(Pd,"A",{href:!0});var BT=i(ja);Ip=r(BT,"PreTrainedModel"),BT.forEach(t),Sp=r(Pd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pd.forEach(t),Ap=d(lt),Cn=s(lt,"P",{});var $d=i(Cn);Op=r($d,"This model is also a PyTorch "),Dn=s($d,"A",{href:!0,rel:!0});var UT=i(Dn);Wp=r(UT,"torch.nn.Module"),UT.forEach(t),Bp=r($d,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$d.forEach(t),Up=d(lt),Ke=s(lt,"DIV",{class:!0});var xt=i(Ke);b(Hn.$$.fragment,xt),Rp=d(xt),Zt=s(xt,"P",{});var nr=i(Zt);Vp=r(nr,"The "),Ea=s(nr,"A",{href:!0});var RT=i(Ea);Jp=r(RT,"GPT2Model"),RT.forEach(t),Kp=r(nr," forward method, overrides the "),Ir=s(nr,"CODE",{});var VT=i(Ir);Xp=r(VT,"__call__"),VT.forEach(t),Yp=r(nr," special method."),nr.forEach(t),Qp=d(xt),b(Fo.$$.fragment,xt),Zp=d(xt),Sr=s(xt,"P",{});var JT=i(Sr);eh=r(JT,"Example:"),JT.forEach(t),th=d(xt),b(Ln.$$.fragment,xt),xt.forEach(t),oh=d(lt),Xe=s(lt,"DIV",{class:!0});var zt=i(Xe);b(Nn.$$.fragment,zt),nh=d(zt),Ar=s(zt,"P",{});var KT=i(Ar);sh=r(KT,"This is an experimental feature and is a subject to change at a moment\u2019s notice."),KT.forEach(t),ah=d(zt),Or=s(zt,"P",{});var XT=i(Or);rh=r(XT,`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),XT.forEach(t),ih=d(zt),Wr=s(zt,"P",{});var YT=i(Wr);lh=r(YT,"Example:"),YT.forEach(t),dh=d(zt),b(In.$$.fragment,zt),zt.forEach(t),ch=d(lt),gt=s(lt,"DIV",{class:!0});var sn=i(gt);b(Sn.$$.fragment,sn),ph=d(sn),Br=s(sn,"P",{});var QT=i(Br);hh=r(QT,"Moves the model to cpu from a model parallel state."),QT.forEach(t),uh=d(sn),Ur=s(sn,"P",{});var ZT=i(Ur);mh=r(ZT,"Example:"),ZT.forEach(t),fh=d(sn),b(An.$$.fragment,sn),sn.forEach(t),lt.forEach(t),Wl=d(o),eo=s(o,"H2",{class:!0});var Md=i(eo);jo=s(Md,"A",{id:!0,class:!0,href:!0});var e2=i(jo);Rr=s(e2,"SPAN",{});var t2=i(Rr);b(On.$$.fragment,t2),t2.forEach(t),e2.forEach(t),gh=d(Md),Vr=s(Md,"SPAN",{});var o2=i(Vr);_h=r(o2,"GPT2LMHeadModel"),o2.forEach(t),Md.forEach(t),Bl=d(o),ye=s(o,"DIV",{class:!0});var dt=i(ye);b(Wn.$$.fragment,dt),Th=d(dt),Jr=s(dt,"P",{});var n2=i(Jr);vh=r(n2,`The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),n2.forEach(t),bh=d(dt),Bn=s(dt,"P",{});var Gd=i(Bn);kh=r(Gd,"This model inherits from "),qa=s(Gd,"A",{href:!0});var s2=i(qa);yh=r(s2,"PreTrainedModel"),s2.forEach(t),wh=r(Gd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gd.forEach(t),Ph=d(dt),Un=s(dt,"P",{});var xd=i(Un);$h=r(xd,"This model is also a PyTorch "),Rn=s(xd,"A",{href:!0,rel:!0});var a2=i(Rn);Mh=r(a2,"torch.nn.Module"),a2.forEach(t),Gh=r(xd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xd.forEach(t),xh=d(dt),Ye=s(dt,"DIV",{class:!0});var Ft=i(Ye);b(Vn.$$.fragment,Ft),zh=d(Ft),to=s(Ft,"P",{});var sr=i(to);Fh=r(sr,"The "),Ca=s(sr,"A",{href:!0});var r2=i(Ca);jh=r(r2,"GPT2LMHeadModel"),r2.forEach(t),Eh=r(sr," forward method, overrides the "),Kr=s(sr,"CODE",{});var i2=i(Kr);qh=r(i2,"__call__"),i2.forEach(t),Ch=r(sr," special method."),sr.forEach(t),Dh=d(Ft),b(Eo.$$.fragment,Ft),Hh=d(Ft),Xr=s(Ft,"P",{});var l2=i(Xr);Lh=r(l2,"Example:"),l2.forEach(t),Nh=d(Ft),b(Jn.$$.fragment,Ft),Ft.forEach(t),Ih=d(dt),Qe=s(dt,"DIV",{class:!0});var jt=i(Qe);b(Kn.$$.fragment,jt),Sh=d(jt),Yr=s(jt,"P",{});var d2=i(Yr);Ah=r(d2,"This is an experimental feature and is a subject to change at a moment\u2019s notice."),d2.forEach(t),Oh=d(jt),Qr=s(jt,"P",{});var c2=i(Qr);Wh=r(c2,`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),c2.forEach(t),Bh=d(jt),Zr=s(jt,"P",{});var p2=i(Zr);Uh=r(p2,"Example:"),p2.forEach(t),Rh=d(jt),b(Xn.$$.fragment,jt),jt.forEach(t),Vh=d(dt),_t=s(dt,"DIV",{class:!0});var an=i(_t);b(Yn.$$.fragment,an),Jh=d(an),ei=s(an,"P",{});var h2=i(ei);Kh=r(h2,"Moves the model to cpu from a model parallel state."),h2.forEach(t),Xh=d(an),ti=s(an,"P",{});var u2=i(ti);Yh=r(u2,"Example:"),u2.forEach(t),Qh=d(an),b(Qn.$$.fragment,an),an.forEach(t),dt.forEach(t),Ul=d(o),oo=s(o,"H2",{class:!0});var zd=i(oo);qo=s(zd,"A",{id:!0,class:!0,href:!0});var m2=i(qo);oi=s(m2,"SPAN",{});var f2=i(oi);b(Zn.$$.fragment,f2),f2.forEach(t),m2.forEach(t),Zh=d(zd),ni=s(zd,"SPAN",{});var g2=i(ni);eu=r(g2,"GPT2DoubleHeadsModel"),g2.forEach(t),zd.forEach(t),Rl=d(o),Ve=s(o,"DIV",{class:!0});var Et=i(Ve);b(es.$$.fragment,Et),tu=d(Et),si=s(Et,"P",{});var _2=i(si);ou=r(_2,`The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),_2.forEach(t),nu=d(Et),ts=s(Et,"P",{});var Fd=i(ts);su=r(Fd,"This model inherits from "),Da=s(Fd,"A",{href:!0});var T2=i(Da);au=r(T2,"PreTrainedModel"),T2.forEach(t),ru=r(Fd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fd.forEach(t),iu=d(Et),os=s(Et,"P",{});var jd=i(os);lu=r(jd,"This model is also a PyTorch "),ns=s(jd,"A",{href:!0,rel:!0});var v2=i(ns);du=r(v2,"torch.nn.Module"),v2.forEach(t),cu=r(jd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jd.forEach(t),pu=d(Et),Ze=s(Et,"DIV",{class:!0});var qt=i(Ze);b(ss.$$.fragment,qt),hu=d(qt),no=s(qt,"P",{});var ar=i(no);uu=r(ar,"The "),Ha=s(ar,"A",{href:!0});var b2=i(Ha);mu=r(b2,"GPT2DoubleHeadsModel"),b2.forEach(t),fu=r(ar," forward method, overrides the "),ai=s(ar,"CODE",{});var k2=i(ai);gu=r(k2,"__call__"),k2.forEach(t),_u=r(ar," special method."),ar.forEach(t),Tu=d(qt),b(Co.$$.fragment,qt),vu=d(qt),ri=s(qt,"P",{});var y2=i(ri);bu=r(y2,"Example:"),y2.forEach(t),ku=d(qt),b(as.$$.fragment,qt),qt.forEach(t),Et.forEach(t),Vl=d(o),so=s(o,"H2",{class:!0});var Ed=i(so);Do=s(Ed,"A",{id:!0,class:!0,href:!0});var w2=i(Do);ii=s(w2,"SPAN",{});var P2=i(ii);b(rs.$$.fragment,P2),P2.forEach(t),w2.forEach(t),yu=d(Ed),li=s(Ed,"SPAN",{});var $2=i(li);wu=r($2,"GPT2ForSequenceClassification"),$2.forEach(t),Ed.forEach(t),Jl=d(o),we=s(o,"DIV",{class:!0});var ct=i(we);b(is.$$.fragment,ct),Pu=d(ct),di=s(ct,"P",{});var M2=i(di);$u=r(M2,"The GPT2 Model transformer with a sequence classification head on top (linear layer)."),M2.forEach(t),Mu=d(ct),La=s(ct,"P",{});var J_=i(La);Na=s(J_,"A",{href:!0});var G2=i(Na);Gu=r(G2,"GPT2ForSequenceClassification"),G2.forEach(t),xu=r(J_,` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),J_.forEach(t),zu=d(ct),mt=s(ct,"P",{});var Ct=i(mt);Fu=r(Ct,`Since it does classification on the last token, it requires to know the position of the last token. If a `),ci=s(Ct,"CODE",{});var x2=i(ci);ju=r(x2,"pad_token_id"),x2.forEach(t),Eu=r(Ct,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),pi=s(Ct,"CODE",{});var z2=i(pi);qu=r(z2,"pad_token_id"),z2.forEach(t),Cu=r(Ct,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),hi=s(Ct,"CODE",{});var F2=i(hi);Du=r(F2,"inputs_embeds"),F2.forEach(t),Hu=r(Ct," are passed instead of "),ui=s(Ct,"CODE",{});var j2=i(ui);Lu=r(j2,"input_ids"),j2.forEach(t),Nu=r(Ct,`, it does the same (take the last value in each row of the batch).`),Ct.forEach(t),Iu=d(ct),ls=s(ct,"P",{});var qd=i(ls);Su=r(qd,"This model inherits from "),Ia=s(qd,"A",{href:!0});var E2=i(Ia);Au=r(E2,"PreTrainedModel"),E2.forEach(t),Ou=r(qd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qd.forEach(t),Wu=d(ct),ds=s(ct,"P",{});var Cd=i(ds);Bu=r(Cd,"This model is also a PyTorch "),cs=s(Cd,"A",{href:!0,rel:!0});var q2=i(cs);Uu=r(q2,"torch.nn.Module"),q2.forEach(t),Ru=r(Cd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cd.forEach(t),Vu=d(ct),Ge=s(ct,"DIV",{class:!0});var pt=i(Ge);b(ps.$$.fragment,pt),Ju=d(pt),ao=s(pt,"P",{});var rr=i(ao);Ku=r(rr,"The "),Sa=s(rr,"A",{href:!0});var C2=i(Sa);Xu=r(C2,"GPT2ForSequenceClassification"),C2.forEach(t),Yu=r(rr," forward method, overrides the "),mi=s(rr,"CODE",{});var D2=i(mi);Qu=r(D2,"__call__"),D2.forEach(t),Zu=r(rr," special method."),rr.forEach(t),em=d(pt),b(Ho.$$.fragment,pt),tm=d(pt),fi=s(pt,"P",{});var H2=i(fi);om=r(H2,"Example of single-label classification:"),H2.forEach(t),nm=d(pt),b(hs.$$.fragment,pt),sm=d(pt),gi=s(pt,"P",{});var L2=i(gi);am=r(L2,"Example of multi-label classification:"),L2.forEach(t),rm=d(pt),b(us.$$.fragment,pt),pt.forEach(t),ct.forEach(t),Kl=d(o),ro=s(o,"H2",{class:!0});var Dd=i(ro);Lo=s(Dd,"A",{id:!0,class:!0,href:!0});var N2=i(Lo);_i=s(N2,"SPAN",{});var I2=i(_i);b(ms.$$.fragment,I2),I2.forEach(t),N2.forEach(t),im=d(Dd),Ti=s(Dd,"SPAN",{});var S2=i(Ti);lm=r(S2,"GPT2ForTokenClassification"),S2.forEach(t),Dd.forEach(t),Xl=d(o),Je=s(o,"DIV",{class:!0});var Dt=i(Je);b(fs.$$.fragment,Dt),dm=d(Dt),vi=s(Dt,"P",{});var A2=i(vi);cm=r(A2,`GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),A2.forEach(t),pm=d(Dt),gs=s(Dt,"P",{});var Hd=i(gs);hm=r(Hd,"This model inherits from "),Aa=s(Hd,"A",{href:!0});var O2=i(Aa);um=r(O2,"PreTrainedModel"),O2.forEach(t),mm=r(Hd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hd.forEach(t),fm=d(Dt),_s=s(Dt,"P",{});var Ld=i(_s);gm=r(Ld,"This model is also a PyTorch "),Ts=s(Ld,"A",{href:!0,rel:!0});var W2=i(Ts);_m=r(W2,"torch.nn.Module"),W2.forEach(t),Tm=r(Ld,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ld.forEach(t),vm=d(Dt),et=s(Dt,"DIV",{class:!0});var Ht=i(et);b(vs.$$.fragment,Ht),bm=d(Ht),io=s(Ht,"P",{});var ir=i(io);km=r(ir,"The "),Oa=s(ir,"A",{href:!0});var B2=i(Oa);ym=r(B2,"GPT2ForTokenClassification"),B2.forEach(t),wm=r(ir," forward method, overrides the "),bi=s(ir,"CODE",{});var U2=i(bi);Pm=r(U2,"__call__"),U2.forEach(t),$m=r(ir," special method."),ir.forEach(t),Mm=d(Ht),b(No.$$.fragment,Ht),Gm=d(Ht),ki=s(Ht,"P",{});var R2=i(ki);xm=r(R2,"Example:"),R2.forEach(t),zm=d(Ht),b(bs.$$.fragment,Ht),Ht.forEach(t),Dt.forEach(t),Yl=d(o),lo=s(o,"H2",{class:!0});var Nd=i(lo);Io=s(Nd,"A",{id:!0,class:!0,href:!0});var V2=i(Io);yi=s(V2,"SPAN",{});var J2=i(yi);b(ks.$$.fragment,J2),J2.forEach(t),V2.forEach(t),Fm=d(Nd),wi=s(Nd,"SPAN",{});var K2=i(wi);jm=r(K2,"TFGPT2Model"),K2.forEach(t),Nd.forEach(t),Ql=d(o),Ce=s(o,"DIV",{class:!0});var Tt=i(Ce);b(ys.$$.fragment,Tt),Em=d(Tt),Pi=s(Tt,"P",{});var X2=i(Pi);qm=r(X2,"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top."),X2.forEach(t),Cm=d(Tt),ws=s(Tt,"P",{});var Id=i(ws);Dm=r(Id,"This model inherits from "),Wa=s(Id,"A",{href:!0});var Y2=i(Wa);Hm=r(Y2,"TFPreTrainedModel"),Y2.forEach(t),Lm=r(Id,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Id.forEach(t),Nm=d(Tt),Ps=s(Tt,"P",{});var Sd=i(Ps);Im=r(Sd,"This model is also a "),$s=s(Sd,"A",{href:!0,rel:!0});var Q2=i($s);Sm=r(Q2,"tf.keras.Model"),Q2.forEach(t),Am=r(Sd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sd.forEach(t),Om=d(Tt),b(So.$$.fragment,Tt),Wm=d(Tt),tt=s(Tt,"DIV",{class:!0});var Lt=i(tt);b(Ms.$$.fragment,Lt),Bm=d(Lt),co=s(Lt,"P",{});var lr=i(co);Um=r(lr,"The "),Ba=s(lr,"A",{href:!0});var Z2=i(Ba);Rm=r(Z2,"TFGPT2Model"),Z2.forEach(t),Vm=r(lr," forward method, overrides the "),$i=s(lr,"CODE",{});var ev=i($i);Jm=r(ev,"__call__"),ev.forEach(t),Km=r(lr," special method."),lr.forEach(t),Xm=d(Lt),b(Ao.$$.fragment,Lt),Ym=d(Lt),Mi=s(Lt,"P",{});var tv=i(Mi);Qm=r(tv,"Example:"),tv.forEach(t),Zm=d(Lt),b(Gs.$$.fragment,Lt),Lt.forEach(t),Tt.forEach(t),Zl=d(o),po=s(o,"H2",{class:!0});var Ad=i(po);Oo=s(Ad,"A",{id:!0,class:!0,href:!0});var ov=i(Oo);Gi=s(ov,"SPAN",{});var nv=i(Gi);b(xs.$$.fragment,nv),nv.forEach(t),ov.forEach(t),ef=d(Ad),xi=s(Ad,"SPAN",{});var sv=i(xi);tf=r(sv,"TFGPT2LMHeadModel"),sv.forEach(t),Ad.forEach(t),ed=d(o),De=s(o,"DIV",{class:!0});var vt=i(De);b(zs.$$.fragment,vt),of=d(vt),zi=s(vt,"P",{});var av=i(zi);nf=r(av,`The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),av.forEach(t),sf=d(vt),Fs=s(vt,"P",{});var Od=i(Fs);af=r(Od,"This model inherits from "),Ua=s(Od,"A",{href:!0});var rv=i(Ua);rf=r(rv,"TFPreTrainedModel"),rv.forEach(t),lf=r(Od,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Od.forEach(t),df=d(vt),js=s(vt,"P",{});var Wd=i(js);cf=r(Wd,"This model is also a "),Es=s(Wd,"A",{href:!0,rel:!0});var iv=i(Es);pf=r(iv,"tf.keras.Model"),iv.forEach(t),hf=r(Wd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Wd.forEach(t),uf=d(vt),b(Wo.$$.fragment,vt),mf=d(vt),ot=s(vt,"DIV",{class:!0});var Nt=i(ot);b(qs.$$.fragment,Nt),ff=d(Nt),ho=s(Nt,"P",{});var dr=i(ho);gf=r(dr,"The "),Ra=s(dr,"A",{href:!0});var lv=i(Ra);_f=r(lv,"TFGPT2LMHeadModel"),lv.forEach(t),Tf=r(dr," forward method, overrides the "),Fi=s(dr,"CODE",{});var dv=i(Fi);vf=r(dv,"__call__"),dv.forEach(t),bf=r(dr," special method."),dr.forEach(t),kf=d(Nt),b(Bo.$$.fragment,Nt),yf=d(Nt),ji=s(Nt,"P",{});var cv=i(ji);wf=r(cv,"Example:"),cv.forEach(t),Pf=d(Nt),b(Cs.$$.fragment,Nt),Nt.forEach(t),vt.forEach(t),td=d(o),uo=s(o,"H2",{class:!0});var Bd=i(uo);Uo=s(Bd,"A",{id:!0,class:!0,href:!0});var pv=i(Uo);Ei=s(pv,"SPAN",{});var hv=i(Ei);b(Ds.$$.fragment,hv),hv.forEach(t),pv.forEach(t),$f=d(Bd),qi=s(Bd,"SPAN",{});var uv=i(qi);Mf=r(uv,"TFGPT2DoubleHeadsModel"),uv.forEach(t),Bd.forEach(t),od=d(o),He=s(o,"DIV",{class:!0});var bt=i(He);b(Hs.$$.fragment,bt),Gf=d(bt),Ci=s(bt,"P",{});var mv=i(Ci);xf=r(mv,`The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence).`),mv.forEach(t),zf=d(bt),Ls=s(bt,"P",{});var Ud=i(Ls);Ff=r(Ud,"This model inherits from "),Va=s(Ud,"A",{href:!0});var fv=i(Va);jf=r(fv,"TFPreTrainedModel"),fv.forEach(t),Ef=r(Ud,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ud.forEach(t),qf=d(bt),Ns=s(bt,"P",{});var Rd=i(Ns);Cf=r(Rd,"This model is also a "),Is=s(Rd,"A",{href:!0,rel:!0});var gv=i(Is);Df=r(gv,"tf.keras.Model"),gv.forEach(t),Hf=r(Rd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Rd.forEach(t),Lf=d(bt),b(Ro.$$.fragment,bt),Nf=d(bt),nt=s(bt,"DIV",{class:!0});var It=i(nt);b(Ss.$$.fragment,It),If=d(It),mo=s(It,"P",{});var cr=i(mo);Sf=r(cr,"The "),Ja=s(cr,"A",{href:!0});var _v=i(Ja);Af=r(_v,"TFGPT2DoubleHeadsModel"),_v.forEach(t),Of=r(cr," forward method, overrides the "),Di=s(cr,"CODE",{});var Tv=i(Di);Wf=r(Tv,"__call__"),Tv.forEach(t),Bf=r(cr," special method."),cr.forEach(t),Uf=d(It),b(Vo.$$.fragment,It),Rf=d(It),Hi=s(It,"P",{});var vv=i(Hi);Vf=r(vv,"Examples:"),vv.forEach(t),Jf=d(It),b(As.$$.fragment,It),It.forEach(t),bt.forEach(t),nd=d(o),fo=s(o,"H2",{class:!0});var Vd=i(fo);Jo=s(Vd,"A",{id:!0,class:!0,href:!0});var bv=i(Jo);Li=s(bv,"SPAN",{});var kv=i(Li);b(Os.$$.fragment,kv),kv.forEach(t),bv.forEach(t),Kf=d(Vd),Ni=s(Vd,"SPAN",{});var yv=i(Ni);Xf=r(yv,"TFGPT2ForSequenceClassification"),yv.forEach(t),Vd.forEach(t),sd=d(o),de=s(o,"DIV",{class:!0});var Ie=i(de);b(Ws.$$.fragment,Ie),Yf=d(Ie),Ii=s(Ie,"P",{});var wv=i(Ii);Qf=r(wv,"The GPT2 Model transformer with a sequence classification head on top (linear layer)."),wv.forEach(t),Zf=d(Ie),Ka=s(Ie,"P",{});var K_=i(Ka);Xa=s(K_,"A",{href:!0});var Pv=i(Xa);eg=r(Pv,"TFGPT2ForSequenceClassification"),Pv.forEach(t),tg=r(K_,` uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do.`),K_.forEach(t),og=d(Ie),ft=s(Ie,"P",{});var St=i(ft);ng=r(St,`Since it does classification on the last token, it requires to know the position of the last token. If a `),Si=s(St,"CODE",{});var $v=i(Si);sg=r($v,"pad_token_id"),$v.forEach(t),ag=r(St,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Ai=s(St,"CODE",{});var Mv=i(Ai);rg=r(Mv,"pad_token_id"),Mv.forEach(t),ig=r(St,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Oi=s(St,"CODE",{});var Gv=i(Oi);lg=r(Gv,"inputs_embeds"),Gv.forEach(t),dg=r(St," are passed instead of "),Wi=s(St,"CODE",{});var xv=i(Wi);cg=r(xv,"input_ids"),xv.forEach(t),pg=r(St,`, it does the same (take the last value in each row of the batch).`),St.forEach(t),hg=d(Ie),Bs=s(Ie,"P",{});var Jd=i(Bs);ug=r(Jd,"This model inherits from "),Ya=s(Jd,"A",{href:!0});var zv=i(Ya);mg=r(zv,"TFPreTrainedModel"),zv.forEach(t),fg=r(Jd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jd.forEach(t),gg=d(Ie),Us=s(Ie,"P",{});var Kd=i(Us);_g=r(Kd,"This model is also a "),Rs=s(Kd,"A",{href:!0,rel:!0});var Fv=i(Rs);Tg=r(Fv,"tf.keras.Model"),Fv.forEach(t),vg=r(Kd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Kd.forEach(t),bg=d(Ie),b(Ko.$$.fragment,Ie),kg=d(Ie),st=s(Ie,"DIV",{class:!0});var At=i(st);b(Vs.$$.fragment,At),yg=d(At),go=s(At,"P",{});var pr=i(go);wg=r(pr,"The "),Qa=s(pr,"A",{href:!0});var jv=i(Qa);Pg=r(jv,"TFGPT2ForSequenceClassification"),jv.forEach(t),$g=r(pr," forward method, overrides the "),Bi=s(pr,"CODE",{});var Ev=i(Bi);Mg=r(Ev,"__call__"),Ev.forEach(t),Gg=r(pr," special method."),pr.forEach(t),xg=d(At),b(Xo.$$.fragment,At),zg=d(At),Ui=s(At,"P",{});var qv=i(Ui);Fg=r(qv,"Example:"),qv.forEach(t),jg=d(At),b(Js.$$.fragment,At),At.forEach(t),Ie.forEach(t),ad=d(o),_o=s(o,"H2",{class:!0});var Xd=i(_o);Yo=s(Xd,"A",{id:!0,class:!0,href:!0});var Cv=i(Yo);Ri=s(Cv,"SPAN",{});var Dv=i(Ri);b(Ks.$$.fragment,Dv),Dv.forEach(t),Cv.forEach(t),Eg=d(Xd),Vi=s(Xd,"SPAN",{});var Hv=i(Vi);qg=r(Hv,"TFSequenceClassifierOutputWithPast"),Hv.forEach(t),Xd.forEach(t),rd=d(o),To=s(o,"DIV",{class:!0});var Yd=i(To);b(Xs.$$.fragment,Yd),Cg=d(Yd),Ji=s(Yd,"P",{});var Lv=i(Ji);Dg=r(Lv,"Base class for outputs of sentence classification models."),Lv.forEach(t),Yd.forEach(t),id=d(o),vo=s(o,"H2",{class:!0});var Qd=i(vo);Qo=s(Qd,"A",{id:!0,class:!0,href:!0});var Nv=i(Qo);Ki=s(Nv,"SPAN",{});var Iv=i(Ki);b(Ys.$$.fragment,Iv),Iv.forEach(t),Nv.forEach(t),Hg=d(Qd),Xi=s(Qd,"SPAN",{});var Sv=i(Xi);Lg=r(Sv,"FlaxGPT2Model"),Sv.forEach(t),Qd.forEach(t),ld=d(o),Pe=s(o,"DIV",{class:!0});var ht=i(Pe);b(Qs.$$.fragment,ht),Ng=d(ht),Yi=s(ht,"P",{});var Av=i(Yi);Ig=r(Av,"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top."),Av.forEach(t),Sg=d(ht),Zs=s(ht,"P",{});var Zd=i(Zs);Ag=r(Zd,"This model inherits from "),Za=s(Zd,"A",{href:!0});var Ov=i(Za);Og=r(Ov,"FlaxPreTrainedModel"),Ov.forEach(t),Wg=r(Zd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zd.forEach(t),Bg=d(ht),ea=s(ht,"P",{});var ec=i(ea);Ug=r(ec,"This model is also a Flax Linen "),ta=s(ec,"A",{href:!0,rel:!0});var Wv=i(ta);Rg=r(Wv,"flax.nn.Module"),Wv.forEach(t),Vg=r(ec,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ec.forEach(t),Jg=d(ht),Qi=s(ht,"P",{});var Bv=i(Qi);Kg=r(Bv,"Finally, this model supports inherent JAX features such as:"),Bv.forEach(t),Xg=d(ht),wt=s(ht,"UL",{});var rn=i(wt);Zi=s(rn,"LI",{});var Uv=i(Zi);oa=s(Uv,"A",{href:!0,rel:!0});var Rv=i(oa);Yg=r(Rv,"Just-In-Time (JIT) compilation"),Rv.forEach(t),Uv.forEach(t),Qg=d(rn),el=s(rn,"LI",{});var Vv=i(el);na=s(Vv,"A",{href:!0,rel:!0});var Jv=i(na);Zg=r(Jv,"Automatic Differentiation"),Jv.forEach(t),Vv.forEach(t),e_=d(rn),tl=s(rn,"LI",{});var Kv=i(tl);sa=s(Kv,"A",{href:!0,rel:!0});var Xv=i(sa);t_=r(Xv,"Vectorization"),Xv.forEach(t),Kv.forEach(t),o_=d(rn),ol=s(rn,"LI",{});var Yv=i(ol);aa=s(Yv,"A",{href:!0,rel:!0});var Qv=i(aa);n_=r(Qv,"Parallelization"),Qv.forEach(t),Yv.forEach(t),rn.forEach(t),s_=d(ht),at=s(ht,"DIV",{class:!0});var Ot=i(at);b(ra.$$.fragment,Ot),a_=d(Ot),bo=s(Ot,"P",{});var hr=i(bo);r_=r(hr,"The "),nl=s(hr,"CODE",{});var Zv=i(nl);i_=r(Zv,"FlaxGPT2PreTrainedModel"),Zv.forEach(t),l_=r(hr," forward method, overrides the "),sl=s(hr,"CODE",{});var eb=i(sl);d_=r(eb,"__call__"),eb.forEach(t),c_=r(hr," special method."),hr.forEach(t),p_=d(Ot),b(Zo.$$.fragment,Ot),h_=d(Ot),al=s(Ot,"P",{});var tb=i(al);u_=r(tb,"Example:"),tb.forEach(t),m_=d(Ot),b(ia.$$.fragment,Ot),Ot.forEach(t),ht.forEach(t),dd=d(o),ko=s(o,"H2",{class:!0});var tc=i(ko);en=s(tc,"A",{id:!0,class:!0,href:!0});var ob=i(en);rl=s(ob,"SPAN",{});var nb=i(rl);b(la.$$.fragment,nb),nb.forEach(t),ob.forEach(t),f_=d(tc),il=s(tc,"SPAN",{});var sb=i(il);g_=r(sb,"FlaxGPT2LMHeadModel"),sb.forEach(t),tc.forEach(t),cd=d(o),$e=s(o,"DIV",{class:!0});var ut=i($e);b(da.$$.fragment,ut),__=d(ut),ll=s(ut,"P",{});var ab=i(ll);T_=r(ab,`The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),ab.forEach(t),v_=d(ut),ca=s(ut,"P",{});var oc=i(ca);b_=r(oc,"This model inherits from "),er=s(oc,"A",{href:!0});var rb=i(er);k_=r(rb,"FlaxPreTrainedModel"),rb.forEach(t),y_=r(oc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oc.forEach(t),w_=d(ut),pa=s(ut,"P",{});var nc=i(pa);P_=r(nc,"This model is also a Flax Linen "),ha=s(nc,"A",{href:!0,rel:!0});var ib=i(ha);$_=r(ib,"flax.nn.Module"),ib.forEach(t),M_=r(nc,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),nc.forEach(t),G_=d(ut),dl=s(ut,"P",{});var lb=i(dl);x_=r(lb,"Finally, this model supports inherent JAX features such as:"),lb.forEach(t),z_=d(ut),Pt=s(ut,"UL",{});var ln=i(Pt);cl=s(ln,"LI",{});var db=i(cl);ua=s(db,"A",{href:!0,rel:!0});var cb=i(ua);F_=r(cb,"Just-In-Time (JIT) compilation"),cb.forEach(t),db.forEach(t),j_=d(ln),pl=s(ln,"LI",{});var pb=i(pl);ma=s(pb,"A",{href:!0,rel:!0});var hb=i(ma);E_=r(hb,"Automatic Differentiation"),hb.forEach(t),pb.forEach(t),q_=d(ln),hl=s(ln,"LI",{});var ub=i(hl);fa=s(ub,"A",{href:!0,rel:!0});var mb=i(fa);C_=r(mb,"Vectorization"),mb.forEach(t),ub.forEach(t),D_=d(ln),ul=s(ln,"LI",{});var fb=i(ul);ga=s(fb,"A",{href:!0,rel:!0});var gb=i(ga);H_=r(gb,"Parallelization"),gb.forEach(t),fb.forEach(t),ln.forEach(t),L_=d(ut),rt=s(ut,"DIV",{class:!0});var Wt=i(rt);b(_a.$$.fragment,Wt),N_=d(Wt),yo=s(Wt,"P",{});var ur=i(yo);I_=r(ur,"The "),ml=s(ur,"CODE",{});var _b=i(ml);S_=r(_b,"FlaxGPT2PreTrainedModel"),_b.forEach(t),A_=r(ur," forward method, overrides the "),fl=s(ur,"CODE",{});var Tb=i(fl);O_=r(Tb,"__call__"),Tb.forEach(t),W_=r(ur," special method."),ur.forEach(t),B_=d(Wt),b(tn.$$.fragment,Wt),U_=d(Wt),gl=s(Wt,"P",{});var vb=i(gl);R_=r(vb,"Example:"),vb.forEach(t),V_=d(Wt),b(Ta.$$.fragment,Wt),Wt.forEach(t),ut.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(Ob)),c(T,"id","openai-gpt2"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#openai-gpt2"),c(f,"class","relative group"),c(R,"id","overview"),c(R,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(R,"href","#overview"),c(z,"class","relative group"),c(ee,"href","https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf"),c(ee,"rel","nofollow"),c(wa,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Model.forward"),c(Pa,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2Model.call"),c(dn,"href","https://github.com/stanford-crfm/mistral/"),c(dn,"rel","nofollow"),c(cn,"href","https://transformer.huggingface.co/doc/gpt2-large"),c(cn,"rel","nofollow"),c(pn,"href","https://huggingface.co/thomwolf"),c(pn,"rel","nofollow"),c(hn,"href","https://openai.com/blog/better-language-models/"),c(hn,"rel","nofollow"),c(wo,"id","transformers.GPT2Config"),c(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wo,"href","#transformers.GPT2Config"),c(Ut,"class","relative group"),c($a,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Model"),c(Ma,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2Model"),c(fn,"href","https://huggingface.co/gpt2"),c(fn,"rel","nofollow"),c(Ga,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(xa,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Re,"class","docstring"),c(Po,"id","transformers.GPT2Tokenizer"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.GPT2Tokenizer"),c(Vt,"class","relative group"),c(za,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Mr,"class","docstring"),c(le,"class","docstring"),c(Mo,"id","transformers.GPT2TokenizerFast"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#transformers.GPT2TokenizerFast"),c(Jt,"class","relative group"),c(Fa,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(be,"class","docstring"),c(xo,"id","transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput"),c(xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xo,"href","#transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput"),c(Kt,"class","relative group"),c(Xt,"class","docstring"),c(Yt,"class","docstring"),c(zo,"id","transformers.GPT2Model"),c(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zo,"href","#transformers.GPT2Model"),c(Qt,"class","relative group"),c(ja,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Dn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Dn,"rel","nofollow"),c(Ea,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2Model"),c(Ke,"class","docstring"),c(Xe,"class","docstring"),c(gt,"class","docstring"),c(ke,"class","docstring"),c(jo,"id","transformers.GPT2LMHeadModel"),c(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jo,"href","#transformers.GPT2LMHeadModel"),c(eo,"class","relative group"),c(qa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Rn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Rn,"rel","nofollow"),c(Ca,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2LMHeadModel"),c(Ye,"class","docstring"),c(Qe,"class","docstring"),c(_t,"class","docstring"),c(ye,"class","docstring"),c(qo,"id","transformers.GPT2DoubleHeadsModel"),c(qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qo,"href","#transformers.GPT2DoubleHeadsModel"),c(oo,"class","relative group"),c(Da,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ns,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ns,"rel","nofollow"),c(Ha,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2DoubleHeadsModel"),c(Ze,"class","docstring"),c(Ve,"class","docstring"),c(Do,"id","transformers.GPT2ForSequenceClassification"),c(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Do,"href","#transformers.GPT2ForSequenceClassification"),c(so,"class","relative group"),c(Na,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForSequenceClassification"),c(Ia,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(cs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(cs,"rel","nofollow"),c(Sa,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForSequenceClassification"),c(Ge,"class","docstring"),c(we,"class","docstring"),c(Lo,"id","transformers.GPT2ForTokenClassification"),c(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lo,"href","#transformers.GPT2ForTokenClassification"),c(ro,"class","relative group"),c(Aa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ts,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ts,"rel","nofollow"),c(Oa,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.GPT2ForTokenClassification"),c(et,"class","docstring"),c(Je,"class","docstring"),c(Io,"id","transformers.TFGPT2Model"),c(Io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Io,"href","#transformers.TFGPT2Model"),c(lo,"class","relative group"),c(Wa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c($s,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c($s,"rel","nofollow"),c(Ba,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2Model"),c(tt,"class","docstring"),c(Ce,"class","docstring"),c(Oo,"id","transformers.TFGPT2LMHeadModel"),c(Oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oo,"href","#transformers.TFGPT2LMHeadModel"),c(po,"class","relative group"),c(Ua,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Es,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Es,"rel","nofollow"),c(Ra,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2LMHeadModel"),c(ot,"class","docstring"),c(De,"class","docstring"),c(Uo,"id","transformers.TFGPT2DoubleHeadsModel"),c(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Uo,"href","#transformers.TFGPT2DoubleHeadsModel"),c(uo,"class","relative group"),c(Va,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Is,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Is,"rel","nofollow"),c(Ja,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2DoubleHeadsModel"),c(nt,"class","docstring"),c(He,"class","docstring"),c(Jo,"id","transformers.TFGPT2ForSequenceClassification"),c(Jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jo,"href","#transformers.TFGPT2ForSequenceClassification"),c(fo,"class","relative group"),c(Xa,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2ForSequenceClassification"),c(Ya,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Rs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Rs,"rel","nofollow"),c(Qa,"href","/docs/transformers/v4.15.0/en/model_doc/gpt2#transformers.TFGPT2ForSequenceClassification"),c(st,"class","docstring"),c(de,"class","docstring"),c(Yo,"id","transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast"),c(Yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yo,"href","#transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast"),c(_o,"class","relative group"),c(To,"class","docstring"),c(Qo,"id","transformers.FlaxGPT2Model"),c(Qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qo,"href","#transformers.FlaxGPT2Model"),c(vo,"class","relative group"),c(Za,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ta,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(ta,"rel","nofollow"),c(oa,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(oa,"rel","nofollow"),c(na,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(na,"rel","nofollow"),c(sa,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(sa,"rel","nofollow"),c(aa,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(aa,"rel","nofollow"),c(at,"class","docstring"),c(Pe,"class","docstring"),c(en,"id","transformers.FlaxGPT2LMHeadModel"),c(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(en,"href","#transformers.FlaxGPT2LMHeadModel"),c(ko,"class","relative group"),c(er,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ha,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(ha,"rel","nofollow"),c(ua,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(ua,"rel","nofollow"),c(ma,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(ma,"rel","nofollow"),c(fa,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(fa,"rel","nofollow"),c(ga,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(ga,"rel","nofollow"),c(rt,"class","docstring"),c($e,"class","docstring")},m(o,m){e(document.head,p),h(o,M,m),h(o,f,m),e(f,T),e(T,$),k(_,$,null),e(f,g),e(f,x),e(x,Q),h(o,C,m),h(o,z,m),e(z,R),e(R,S),k(Z,S,null),e(z,ue),e(z,A),e(A,me),h(o,ce,m),h(o,W,m),e(W,L),e(W,ee),e(ee,te),e(W,F),h(o,E,m),h(o,se,m),e(se,V),h(o,pe,m),h(o,ae,m),e(ae,O),e(O,fe),h(o,he,m),h(o,j,m),e(j,ge),h(o,B,m),h(o,U,m),e(U,re),e(re,J),e(U,_e),e(U,oe),e(oe,N),e(oe,ie),e(ie,K),e(oe,Te),e(U,u),e(U,G),e(G,ne),e(G,Fe),e(Fe,Se),e(G,I),e(G,je),e(je,Ae),e(G,Oe),e(G,D),e(D,X),e(G,We),e(G,Ee),e(Ee,Y),e(G,Be),e(G,qe),e(qe,ve),e(G,Ue),e(G,wa),e(wa,sc),e(G,ac),e(G,mr),e(mr,rc),e(G,ic),e(G,Pa),e(Pa,lc),e(G,dc),e(U,cc),e(U,kt),e(kt,pc),e(kt,fr),e(fr,hc),e(kt,uc),e(kt,gr),e(gr,mc),e(kt,fc),e(kt,dn),e(dn,gc),e(kt,_c),h(o,Fl,m),h(o,Bt,m),e(Bt,cn),e(cn,Tc),e(Bt,vc),e(Bt,_r),e(_r,bc),e(Bt,kc),h(o,jl,m),h(o,Mt,m),e(Mt,yc),e(Mt,pn),e(pn,wc),e(Mt,Pc),e(Mt,hn),e(hn,$c),e(Mt,Mc),h(o,El,m),h(o,Ut,m),e(Ut,wo),e(wo,Tr),k(un,Tr,null),e(Ut,Gc),e(Ut,vr),e(vr,xc),h(o,ql,m),h(o,Re,m),k(mn,Re,null),e(Re,zc),e(Re,yt),e(yt,Fc),e(yt,$a),e($a,jc),e(yt,Ec),e(yt,Ma),e(Ma,qc),e(yt,Cc),e(yt,fn),e(fn,Dc),e(yt,Hc),e(Re,Lc),e(Re,Rt),e(Rt,Nc),e(Rt,Ga),e(Ga,Ic),e(Rt,Sc),e(Rt,xa),e(xa,Ac),e(Rt,Oc),e(Re,Wc),e(Re,br),e(br,Bc),e(Re,Uc),k(gn,Re,null),h(o,Cl,m),h(o,Vt,m),e(Vt,Po),e(Po,kr),k(_n,kr,null),e(Vt,Rc),e(Vt,yr),e(yr,Vc),h(o,Dl,m),h(o,le,m),k(Tn,le,null),e(le,Jc),e(le,wr),e(wr,Kc),e(le,Xc),e(le,Pr),e(Pr,Yc),e(le,Qc),k(vn,le,null),e(le,Zc),e(le,bn),e(bn,ep),e(bn,$r),e($r,tp),e(bn,op),e(le,np),k($o,le,null),e(le,sp),e(le,kn),e(kn,ap),e(kn,za),e(za,rp),e(kn,ip),e(le,lp),e(le,Mr),h(o,Hl,m),h(o,Jt,m),e(Jt,Mo),e(Mo,Gr),k(yn,Gr,null),e(Jt,dp),e(Jt,xr),e(xr,cp),h(o,Ll,m),h(o,be,m),k(wn,be,null),e(be,pp),e(be,Pn),e(Pn,hp),e(Pn,zr),e(zr,up),e(Pn,mp),e(be,fp),e(be,Fr),e(Fr,gp),e(be,_p),k($n,be,null),e(be,Tp),e(be,Mn),e(Mn,vp),e(Mn,jr),e(jr,bp),e(Mn,kp),e(be,yp),k(Go,be,null),e(be,wp),e(be,Gn),e(Gn,Pp),e(Gn,Fa),e(Fa,$p),e(Gn,Mp),h(o,Nl,m),h(o,Kt,m),e(Kt,xo),e(xo,Er),k(xn,Er,null),e(Kt,Gp),e(Kt,qr),e(qr,xp),h(o,Il,m),h(o,Xt,m),k(zn,Xt,null),e(Xt,zp),e(Xt,Cr),e(Cr,Fp),h(o,Sl,m),h(o,Yt,m),k(Fn,Yt,null),e(Yt,jp),e(Yt,Dr),e(Dr,Ep),h(o,Al,m),h(o,Qt,m),e(Qt,zo),e(zo,Hr),k(jn,Hr,null),e(Qt,qp),e(Qt,Lr),e(Lr,Cp),h(o,Ol,m),h(o,ke,m),k(En,ke,null),e(ke,Dp),e(ke,Nr),e(Nr,Hp),e(ke,Lp),e(ke,qn),e(qn,Np),e(qn,ja),e(ja,Ip),e(qn,Sp),e(ke,Ap),e(ke,Cn),e(Cn,Op),e(Cn,Dn),e(Dn,Wp),e(Cn,Bp),e(ke,Up),e(ke,Ke),k(Hn,Ke,null),e(Ke,Rp),e(Ke,Zt),e(Zt,Vp),e(Zt,Ea),e(Ea,Jp),e(Zt,Kp),e(Zt,Ir),e(Ir,Xp),e(Zt,Yp),e(Ke,Qp),k(Fo,Ke,null),e(Ke,Zp),e(Ke,Sr),e(Sr,eh),e(Ke,th),k(Ln,Ke,null),e(ke,oh),e(ke,Xe),k(Nn,Xe,null),e(Xe,nh),e(Xe,Ar),e(Ar,sh),e(Xe,ah),e(Xe,Or),e(Or,rh),e(Xe,ih),e(Xe,Wr),e(Wr,lh),e(Xe,dh),k(In,Xe,null),e(ke,ch),e(ke,gt),k(Sn,gt,null),e(gt,ph),e(gt,Br),e(Br,hh),e(gt,uh),e(gt,Ur),e(Ur,mh),e(gt,fh),k(An,gt,null),h(o,Wl,m),h(o,eo,m),e(eo,jo),e(jo,Rr),k(On,Rr,null),e(eo,gh),e(eo,Vr),e(Vr,_h),h(o,Bl,m),h(o,ye,m),k(Wn,ye,null),e(ye,Th),e(ye,Jr),e(Jr,vh),e(ye,bh),e(ye,Bn),e(Bn,kh),e(Bn,qa),e(qa,yh),e(Bn,wh),e(ye,Ph),e(ye,Un),e(Un,$h),e(Un,Rn),e(Rn,Mh),e(Un,Gh),e(ye,xh),e(ye,Ye),k(Vn,Ye,null),e(Ye,zh),e(Ye,to),e(to,Fh),e(to,Ca),e(Ca,jh),e(to,Eh),e(to,Kr),e(Kr,qh),e(to,Ch),e(Ye,Dh),k(Eo,Ye,null),e(Ye,Hh),e(Ye,Xr),e(Xr,Lh),e(Ye,Nh),k(Jn,Ye,null),e(ye,Ih),e(ye,Qe),k(Kn,Qe,null),e(Qe,Sh),e(Qe,Yr),e(Yr,Ah),e(Qe,Oh),e(Qe,Qr),e(Qr,Wh),e(Qe,Bh),e(Qe,Zr),e(Zr,Uh),e(Qe,Rh),k(Xn,Qe,null),e(ye,Vh),e(ye,_t),k(Yn,_t,null),e(_t,Jh),e(_t,ei),e(ei,Kh),e(_t,Xh),e(_t,ti),e(ti,Yh),e(_t,Qh),k(Qn,_t,null),h(o,Ul,m),h(o,oo,m),e(oo,qo),e(qo,oi),k(Zn,oi,null),e(oo,Zh),e(oo,ni),e(ni,eu),h(o,Rl,m),h(o,Ve,m),k(es,Ve,null),e(Ve,tu),e(Ve,si),e(si,ou),e(Ve,nu),e(Ve,ts),e(ts,su),e(ts,Da),e(Da,au),e(ts,ru),e(Ve,iu),e(Ve,os),e(os,lu),e(os,ns),e(ns,du),e(os,cu),e(Ve,pu),e(Ve,Ze),k(ss,Ze,null),e(Ze,hu),e(Ze,no),e(no,uu),e(no,Ha),e(Ha,mu),e(no,fu),e(no,ai),e(ai,gu),e(no,_u),e(Ze,Tu),k(Co,Ze,null),e(Ze,vu),e(Ze,ri),e(ri,bu),e(Ze,ku),k(as,Ze,null),h(o,Vl,m),h(o,so,m),e(so,Do),e(Do,ii),k(rs,ii,null),e(so,yu),e(so,li),e(li,wu),h(o,Jl,m),h(o,we,m),k(is,we,null),e(we,Pu),e(we,di),e(di,$u),e(we,Mu),e(we,La),e(La,Na),e(Na,Gu),e(La,xu),e(we,zu),e(we,mt),e(mt,Fu),e(mt,ci),e(ci,ju),e(mt,Eu),e(mt,pi),e(pi,qu),e(mt,Cu),e(mt,hi),e(hi,Du),e(mt,Hu),e(mt,ui),e(ui,Lu),e(mt,Nu),e(we,Iu),e(we,ls),e(ls,Su),e(ls,Ia),e(Ia,Au),e(ls,Ou),e(we,Wu),e(we,ds),e(ds,Bu),e(ds,cs),e(cs,Uu),e(ds,Ru),e(we,Vu),e(we,Ge),k(ps,Ge,null),e(Ge,Ju),e(Ge,ao),e(ao,Ku),e(ao,Sa),e(Sa,Xu),e(ao,Yu),e(ao,mi),e(mi,Qu),e(ao,Zu),e(Ge,em),k(Ho,Ge,null),e(Ge,tm),e(Ge,fi),e(fi,om),e(Ge,nm),k(hs,Ge,null),e(Ge,sm),e(Ge,gi),e(gi,am),e(Ge,rm),k(us,Ge,null),h(o,Kl,m),h(o,ro,m),e(ro,Lo),e(Lo,_i),k(ms,_i,null),e(ro,im),e(ro,Ti),e(Ti,lm),h(o,Xl,m),h(o,Je,m),k(fs,Je,null),e(Je,dm),e(Je,vi),e(vi,cm),e(Je,pm),e(Je,gs),e(gs,hm),e(gs,Aa),e(Aa,um),e(gs,mm),e(Je,fm),e(Je,_s),e(_s,gm),e(_s,Ts),e(Ts,_m),e(_s,Tm),e(Je,vm),e(Je,et),k(vs,et,null),e(et,bm),e(et,io),e(io,km),e(io,Oa),e(Oa,ym),e(io,wm),e(io,bi),e(bi,Pm),e(io,$m),e(et,Mm),k(No,et,null),e(et,Gm),e(et,ki),e(ki,xm),e(et,zm),k(bs,et,null),h(o,Yl,m),h(o,lo,m),e(lo,Io),e(Io,yi),k(ks,yi,null),e(lo,Fm),e(lo,wi),e(wi,jm),h(o,Ql,m),h(o,Ce,m),k(ys,Ce,null),e(Ce,Em),e(Ce,Pi),e(Pi,qm),e(Ce,Cm),e(Ce,ws),e(ws,Dm),e(ws,Wa),e(Wa,Hm),e(ws,Lm),e(Ce,Nm),e(Ce,Ps),e(Ps,Im),e(Ps,$s),e($s,Sm),e(Ps,Am),e(Ce,Om),k(So,Ce,null),e(Ce,Wm),e(Ce,tt),k(Ms,tt,null),e(tt,Bm),e(tt,co),e(co,Um),e(co,Ba),e(Ba,Rm),e(co,Vm),e(co,$i),e($i,Jm),e(co,Km),e(tt,Xm),k(Ao,tt,null),e(tt,Ym),e(tt,Mi),e(Mi,Qm),e(tt,Zm),k(Gs,tt,null),h(o,Zl,m),h(o,po,m),e(po,Oo),e(Oo,Gi),k(xs,Gi,null),e(po,ef),e(po,xi),e(xi,tf),h(o,ed,m),h(o,De,m),k(zs,De,null),e(De,of),e(De,zi),e(zi,nf),e(De,sf),e(De,Fs),e(Fs,af),e(Fs,Ua),e(Ua,rf),e(Fs,lf),e(De,df),e(De,js),e(js,cf),e(js,Es),e(Es,pf),e(js,hf),e(De,uf),k(Wo,De,null),e(De,mf),e(De,ot),k(qs,ot,null),e(ot,ff),e(ot,ho),e(ho,gf),e(ho,Ra),e(Ra,_f),e(ho,Tf),e(ho,Fi),e(Fi,vf),e(ho,bf),e(ot,kf),k(Bo,ot,null),e(ot,yf),e(ot,ji),e(ji,wf),e(ot,Pf),k(Cs,ot,null),h(o,td,m),h(o,uo,m),e(uo,Uo),e(Uo,Ei),k(Ds,Ei,null),e(uo,$f),e(uo,qi),e(qi,Mf),h(o,od,m),h(o,He,m),k(Hs,He,null),e(He,Gf),e(He,Ci),e(Ci,xf),e(He,zf),e(He,Ls),e(Ls,Ff),e(Ls,Va),e(Va,jf),e(Ls,Ef),e(He,qf),e(He,Ns),e(Ns,Cf),e(Ns,Is),e(Is,Df),e(Ns,Hf),e(He,Lf),k(Ro,He,null),e(He,Nf),e(He,nt),k(Ss,nt,null),e(nt,If),e(nt,mo),e(mo,Sf),e(mo,Ja),e(Ja,Af),e(mo,Of),e(mo,Di),e(Di,Wf),e(mo,Bf),e(nt,Uf),k(Vo,nt,null),e(nt,Rf),e(nt,Hi),e(Hi,Vf),e(nt,Jf),k(As,nt,null),h(o,nd,m),h(o,fo,m),e(fo,Jo),e(Jo,Li),k(Os,Li,null),e(fo,Kf),e(fo,Ni),e(Ni,Xf),h(o,sd,m),h(o,de,m),k(Ws,de,null),e(de,Yf),e(de,Ii),e(Ii,Qf),e(de,Zf),e(de,Ka),e(Ka,Xa),e(Xa,eg),e(Ka,tg),e(de,og),e(de,ft),e(ft,ng),e(ft,Si),e(Si,sg),e(ft,ag),e(ft,Ai),e(Ai,rg),e(ft,ig),e(ft,Oi),e(Oi,lg),e(ft,dg),e(ft,Wi),e(Wi,cg),e(ft,pg),e(de,hg),e(de,Bs),e(Bs,ug),e(Bs,Ya),e(Ya,mg),e(Bs,fg),e(de,gg),e(de,Us),e(Us,_g),e(Us,Rs),e(Rs,Tg),e(Us,vg),e(de,bg),k(Ko,de,null),e(de,kg),e(de,st),k(Vs,st,null),e(st,yg),e(st,go),e(go,wg),e(go,Qa),e(Qa,Pg),e(go,$g),e(go,Bi),e(Bi,Mg),e(go,Gg),e(st,xg),k(Xo,st,null),e(st,zg),e(st,Ui),e(Ui,Fg),e(st,jg),k(Js,st,null),h(o,ad,m),h(o,_o,m),e(_o,Yo),e(Yo,Ri),k(Ks,Ri,null),e(_o,Eg),e(_o,Vi),e(Vi,qg),h(o,rd,m),h(o,To,m),k(Xs,To,null),e(To,Cg),e(To,Ji),e(Ji,Dg),h(o,id,m),h(o,vo,m),e(vo,Qo),e(Qo,Ki),k(Ys,Ki,null),e(vo,Hg),e(vo,Xi),e(Xi,Lg),h(o,ld,m),h(o,Pe,m),k(Qs,Pe,null),e(Pe,Ng),e(Pe,Yi),e(Yi,Ig),e(Pe,Sg),e(Pe,Zs),e(Zs,Ag),e(Zs,Za),e(Za,Og),e(Zs,Wg),e(Pe,Bg),e(Pe,ea),e(ea,Ug),e(ea,ta),e(ta,Rg),e(ea,Vg),e(Pe,Jg),e(Pe,Qi),e(Qi,Kg),e(Pe,Xg),e(Pe,wt),e(wt,Zi),e(Zi,oa),e(oa,Yg),e(wt,Qg),e(wt,el),e(el,na),e(na,Zg),e(wt,e_),e(wt,tl),e(tl,sa),e(sa,t_),e(wt,o_),e(wt,ol),e(ol,aa),e(aa,n_),e(Pe,s_),e(Pe,at),k(ra,at,null),e(at,a_),e(at,bo),e(bo,r_),e(bo,nl),e(nl,i_),e(bo,l_),e(bo,sl),e(sl,d_),e(bo,c_),e(at,p_),k(Zo,at,null),e(at,h_),e(at,al),e(al,u_),e(at,m_),k(ia,at,null),h(o,dd,m),h(o,ko,m),e(ko,en),e(en,rl),k(la,rl,null),e(ko,f_),e(ko,il),e(il,g_),h(o,cd,m),h(o,$e,m),k(da,$e,null),e($e,__),e($e,ll),e(ll,T_),e($e,v_),e($e,ca),e(ca,b_),e(ca,er),e(er,k_),e(ca,y_),e($e,w_),e($e,pa),e(pa,P_),e(pa,ha),e(ha,$_),e(pa,M_),e($e,G_),e($e,dl),e(dl,x_),e($e,z_),e($e,Pt),e(Pt,cl),e(cl,ua),e(ua,F_),e(Pt,j_),e(Pt,pl),e(pl,ma),e(ma,E_),e(Pt,q_),e(Pt,hl),e(hl,fa),e(fa,C_),e(Pt,D_),e(Pt,ul),e(ul,ga),e(ga,H_),e($e,L_),e($e,rt),k(_a,rt,null),e(rt,N_),e(rt,yo),e(yo,I_),e(yo,ml),e(ml,S_),e(yo,A_),e(yo,fl),e(fl,O_),e(yo,W_),e(rt,B_),k(tn,rt,null),e(rt,U_),e(rt,gl),e(gl,R_),e(rt,V_),k(Ta,rt,null),pd=!0},p(o,[m]){const va={};m&2&&(va.$$scope={dirty:m,ctx:o}),$o.$set(va);const _l={};m&2&&(_l.$$scope={dirty:m,ctx:o}),Go.$set(_l);const Tl={};m&2&&(Tl.$$scope={dirty:m,ctx:o}),Fo.$set(Tl);const vl={};m&2&&(vl.$$scope={dirty:m,ctx:o}),Eo.$set(vl);const ba={};m&2&&(ba.$$scope={dirty:m,ctx:o}),Co.$set(ba);const bl={};m&2&&(bl.$$scope={dirty:m,ctx:o}),Ho.$set(bl);const kl={};m&2&&(kl.$$scope={dirty:m,ctx:o}),No.$set(kl);const yl={};m&2&&(yl.$$scope={dirty:m,ctx:o}),So.$set(yl);const ka={};m&2&&(ka.$$scope={dirty:m,ctx:o}),Ao.$set(ka);const wl={};m&2&&(wl.$$scope={dirty:m,ctx:o}),Wo.$set(wl);const Pl={};m&2&&(Pl.$$scope={dirty:m,ctx:o}),Bo.$set(Pl);const $l={};m&2&&($l.$$scope={dirty:m,ctx:o}),Ro.$set($l);const Ml={};m&2&&(Ml.$$scope={dirty:m,ctx:o}),Vo.$set(Ml);const Gl={};m&2&&(Gl.$$scope={dirty:m,ctx:o}),Ko.$set(Gl);const $t={};m&2&&($t.$$scope={dirty:m,ctx:o}),Xo.$set($t);const xl={};m&2&&(xl.$$scope={dirty:m,ctx:o}),Zo.$set(xl);const ya={};m&2&&(ya.$$scope={dirty:m,ctx:o}),tn.$set(ya)},i(o){pd||(y(_.$$.fragment,o),y(Z.$$.fragment,o),y(un.$$.fragment,o),y(mn.$$.fragment,o),y(gn.$$.fragment,o),y(_n.$$.fragment,o),y(Tn.$$.fragment,o),y(vn.$$.fragment,o),y($o.$$.fragment,o),y(yn.$$.fragment,o),y(wn.$$.fragment,o),y($n.$$.fragment,o),y(Go.$$.fragment,o),y(xn.$$.fragment,o),y(zn.$$.fragment,o),y(Fn.$$.fragment,o),y(jn.$$.fragment,o),y(En.$$.fragment,o),y(Hn.$$.fragment,o),y(Fo.$$.fragment,o),y(Ln.$$.fragment,o),y(Nn.$$.fragment,o),y(In.$$.fragment,o),y(Sn.$$.fragment,o),y(An.$$.fragment,o),y(On.$$.fragment,o),y(Wn.$$.fragment,o),y(Vn.$$.fragment,o),y(Eo.$$.fragment,o),y(Jn.$$.fragment,o),y(Kn.$$.fragment,o),y(Xn.$$.fragment,o),y(Yn.$$.fragment,o),y(Qn.$$.fragment,o),y(Zn.$$.fragment,o),y(es.$$.fragment,o),y(ss.$$.fragment,o),y(Co.$$.fragment,o),y(as.$$.fragment,o),y(rs.$$.fragment,o),y(is.$$.fragment,o),y(ps.$$.fragment,o),y(Ho.$$.fragment,o),y(hs.$$.fragment,o),y(us.$$.fragment,o),y(ms.$$.fragment,o),y(fs.$$.fragment,o),y(vs.$$.fragment,o),y(No.$$.fragment,o),y(bs.$$.fragment,o),y(ks.$$.fragment,o),y(ys.$$.fragment,o),y(So.$$.fragment,o),y(Ms.$$.fragment,o),y(Ao.$$.fragment,o),y(Gs.$$.fragment,o),y(xs.$$.fragment,o),y(zs.$$.fragment,o),y(Wo.$$.fragment,o),y(qs.$$.fragment,o),y(Bo.$$.fragment,o),y(Cs.$$.fragment,o),y(Ds.$$.fragment,o),y(Hs.$$.fragment,o),y(Ro.$$.fragment,o),y(Ss.$$.fragment,o),y(Vo.$$.fragment,o),y(As.$$.fragment,o),y(Os.$$.fragment,o),y(Ws.$$.fragment,o),y(Ko.$$.fragment,o),y(Vs.$$.fragment,o),y(Xo.$$.fragment,o),y(Js.$$.fragment,o),y(Ks.$$.fragment,o),y(Xs.$$.fragment,o),y(Ys.$$.fragment,o),y(Qs.$$.fragment,o),y(ra.$$.fragment,o),y(Zo.$$.fragment,o),y(ia.$$.fragment,o),y(la.$$.fragment,o),y(da.$$.fragment,o),y(_a.$$.fragment,o),y(tn.$$.fragment,o),y(Ta.$$.fragment,o),pd=!0)},o(o){w(_.$$.fragment,o),w(Z.$$.fragment,o),w(un.$$.fragment,o),w(mn.$$.fragment,o),w(gn.$$.fragment,o),w(_n.$$.fragment,o),w(Tn.$$.fragment,o),w(vn.$$.fragment,o),w($o.$$.fragment,o),w(yn.$$.fragment,o),w(wn.$$.fragment,o),w($n.$$.fragment,o),w(Go.$$.fragment,o),w(xn.$$.fragment,o),w(zn.$$.fragment,o),w(Fn.$$.fragment,o),w(jn.$$.fragment,o),w(En.$$.fragment,o),w(Hn.$$.fragment,o),w(Fo.$$.fragment,o),w(Ln.$$.fragment,o),w(Nn.$$.fragment,o),w(In.$$.fragment,o),w(Sn.$$.fragment,o),w(An.$$.fragment,o),w(On.$$.fragment,o),w(Wn.$$.fragment,o),w(Vn.$$.fragment,o),w(Eo.$$.fragment,o),w(Jn.$$.fragment,o),w(Kn.$$.fragment,o),w(Xn.$$.fragment,o),w(Yn.$$.fragment,o),w(Qn.$$.fragment,o),w(Zn.$$.fragment,o),w(es.$$.fragment,o),w(ss.$$.fragment,o),w(Co.$$.fragment,o),w(as.$$.fragment,o),w(rs.$$.fragment,o),w(is.$$.fragment,o),w(ps.$$.fragment,o),w(Ho.$$.fragment,o),w(hs.$$.fragment,o),w(us.$$.fragment,o),w(ms.$$.fragment,o),w(fs.$$.fragment,o),w(vs.$$.fragment,o),w(No.$$.fragment,o),w(bs.$$.fragment,o),w(ks.$$.fragment,o),w(ys.$$.fragment,o),w(So.$$.fragment,o),w(Ms.$$.fragment,o),w(Ao.$$.fragment,o),w(Gs.$$.fragment,o),w(xs.$$.fragment,o),w(zs.$$.fragment,o),w(Wo.$$.fragment,o),w(qs.$$.fragment,o),w(Bo.$$.fragment,o),w(Cs.$$.fragment,o),w(Ds.$$.fragment,o),w(Hs.$$.fragment,o),w(Ro.$$.fragment,o),w(Ss.$$.fragment,o),w(Vo.$$.fragment,o),w(As.$$.fragment,o),w(Os.$$.fragment,o),w(Ws.$$.fragment,o),w(Ko.$$.fragment,o),w(Vs.$$.fragment,o),w(Xo.$$.fragment,o),w(Js.$$.fragment,o),w(Ks.$$.fragment,o),w(Xs.$$.fragment,o),w(Ys.$$.fragment,o),w(Qs.$$.fragment,o),w(ra.$$.fragment,o),w(Zo.$$.fragment,o),w(ia.$$.fragment,o),w(la.$$.fragment,o),w(da.$$.fragment,o),w(_a.$$.fragment,o),w(tn.$$.fragment,o),w(Ta.$$.fragment,o),pd=!1},d(o){t(p),o&&t(M),o&&t(f),P(_),o&&t(C),o&&t(z),P(Z),o&&t(ce),o&&t(W),o&&t(E),o&&t(se),o&&t(pe),o&&t(ae),o&&t(he),o&&t(j),o&&t(B),o&&t(U),o&&t(Fl),o&&t(Bt),o&&t(jl),o&&t(Mt),o&&t(El),o&&t(Ut),P(un),o&&t(ql),o&&t(Re),P(mn),P(gn),o&&t(Cl),o&&t(Vt),P(_n),o&&t(Dl),o&&t(le),P(Tn),P(vn),P($o),o&&t(Hl),o&&t(Jt),P(yn),o&&t(Ll),o&&t(be),P(wn),P($n),P(Go),o&&t(Nl),o&&t(Kt),P(xn),o&&t(Il),o&&t(Xt),P(zn),o&&t(Sl),o&&t(Yt),P(Fn),o&&t(Al),o&&t(Qt),P(jn),o&&t(Ol),o&&t(ke),P(En),P(Hn),P(Fo),P(Ln),P(Nn),P(In),P(Sn),P(An),o&&t(Wl),o&&t(eo),P(On),o&&t(Bl),o&&t(ye),P(Wn),P(Vn),P(Eo),P(Jn),P(Kn),P(Xn),P(Yn),P(Qn),o&&t(Ul),o&&t(oo),P(Zn),o&&t(Rl),o&&t(Ve),P(es),P(ss),P(Co),P(as),o&&t(Vl),o&&t(so),P(rs),o&&t(Jl),o&&t(we),P(is),P(ps),P(Ho),P(hs),P(us),o&&t(Kl),o&&t(ro),P(ms),o&&t(Xl),o&&t(Je),P(fs),P(vs),P(No),P(bs),o&&t(Yl),o&&t(lo),P(ks),o&&t(Ql),o&&t(Ce),P(ys),P(So),P(Ms),P(Ao),P(Gs),o&&t(Zl),o&&t(po),P(xs),o&&t(ed),o&&t(De),P(zs),P(Wo),P(qs),P(Bo),P(Cs),o&&t(td),o&&t(uo),P(Ds),o&&t(od),o&&t(He),P(Hs),P(Ro),P(Ss),P(Vo),P(As),o&&t(nd),o&&t(fo),P(Os),o&&t(sd),o&&t(de),P(Ws),P(Ko),P(Vs),P(Xo),P(Js),o&&t(ad),o&&t(_o),P(Ks),o&&t(rd),o&&t(To),P(Xs),o&&t(id),o&&t(vo),P(Ys),o&&t(ld),o&&t(Pe),P(Qs),P(ra),P(Zo),P(ia),o&&t(dd),o&&t(ko),P(la),o&&t(cd),o&&t($e),P(da),P(_a),P(tn),P(Ta)}}}const Ob={local:"openai-gpt2",sections:[{local:"overview",title:"Overview"},{local:"transformers.GPT2Config",title:"GPT2Config"},{local:"transformers.GPT2Tokenizer",title:"GPT2Tokenizer"},{local:"transformers.GPT2TokenizerFast",title:"GPT2TokenizerFast"},{local:"transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput",title:"GPT2 specific outputs"},{local:"transformers.GPT2Model",title:"GPT2Model"},{local:"transformers.GPT2LMHeadModel",title:"GPT2LMHeadModel"},{local:"transformers.GPT2DoubleHeadsModel",title:"GPT2DoubleHeadsModel"},{local:"transformers.GPT2ForSequenceClassification",title:"GPT2ForSequenceClassification"},{local:"transformers.GPT2ForTokenClassification",title:"GPT2ForTokenClassification"},{local:"transformers.TFGPT2Model",title:"TFGPT2Model"},{local:"transformers.TFGPT2LMHeadModel",title:"TFGPT2LMHeadModel"},{local:"transformers.TFGPT2DoubleHeadsModel",title:"TFGPT2DoubleHeadsModel"},{local:"transformers.TFGPT2ForSequenceClassification",title:"TFGPT2ForSequenceClassification"},{local:"transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast",title:"TFSequenceClassifierOutputWithPast"},{local:"transformers.FlaxGPT2Model",title:"FlaxGPT2Model"},{local:"transformers.FlaxGPT2LMHeadModel",title:"FlaxGPT2LMHeadModel"}],title:"OpenAI GPT2"};function Wb(q,p,M){let{fw:f}=p;return q.$$set=T=>{"fw"in T&&M(0,f=T.fw)},[f]}class Xb extends bb{constructor(p){super();kb(this,p,Wb,Ab,yb,{fw:0})}}export{Xb as default,Ob as metadata};
9,971
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/vision_text_dual_encoder.mdx-f69ef3f1.js
import{S as vi,i as xi,s as Ti,e as a,k as d,w as v,t as n,L as Ei,c as s,d as t,m as c,a as i,x,h as r,b as l,J as e,g as u,y as T,q as E,o as b,B as $}from"../../chunks/vendor-b1433968.js";import{T as on}from"../../chunks/Tip-c3840994.js";import{D as R}from"../../chunks/Docstring-ff504c58.js";import{C as ns}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Bt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function bi(J){let m,w,f,g,k,h,_,P;return{c(){m=a("p"),w=n(`This class method is simply calling AutoFeatureExtractor\u2019s `),f=a("code"),g=n("from_pretrained"),k=n(` and AutoTokenizer\u2019s `),h=a("code"),_=n("from_pretrained"),P=n(`. Please refer to the docstrings of the methods above for more information.`)},l(I){m=s(I,"P",{});var D=i(m);w=r(D,`This class method is simply calling AutoFeatureExtractor\u2019s `),f=s(D,"CODE",{});var A=i(f);g=r(A,"from_pretrained"),A.forEach(t),k=r(D,` and AutoTokenizer\u2019s `),h=s(D,"CODE",{});var L=i(h);_=r(L,"from_pretrained"),L.forEach(t),P=r(D,`. Please refer to the docstrings of the methods above for more information.`),D.forEach(t)},m(I,D){u(I,m,D),e(m,w),e(m,f),e(f,g),e(m,k),e(m,h),e(h,_),e(m,P)},d(I){I&&t(m)}}}function $i(J){let m,w,f,g,k,h,_,P;return{c(){m=a("p"),w=n("This class method is simply calling "),f=a("code"),g=n("save_pretrained"),k=n(` and `),h=a("code"),_=n("save_pretrained"),P=n(`. Please refer to the docstrings of the methods above for more information.`)},l(I){m=s(I,"P",{});var D=i(m);w=r(D,"This class method is simply calling "),f=s(D,"CODE",{});var A=i(f);g=r(A,"save_pretrained"),A.forEach(t),k=r(D,` and `),h=s(D,"CODE",{});var L=i(h);_=r(L,"save_pretrained"),L.forEach(t),P=r(D,`. Please refer to the docstrings of the methods above for more information.`),D.forEach(t)},m(I,D){u(I,m,D),e(m,w),e(m,f),e(f,g),e(m,k),e(m,h),e(h,_),e(m,P)},d(I){I&&t(m)}}}function yi(J){let m,w,f,g,k;return{c(){m=a("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(h){m=s(h,"P",{});var _=i(m);w=r(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var P=i(f);g=r(P,"Module"),P.forEach(t),k=r(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(h,_){u(h,m,_),e(m,w),e(m,f),e(f,g),e(m,k)},d(h){h&&t(m)}}}function wi(J){let m,w,f,g,k;return{c(){m=a("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),g=n("Module"),k=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(h){m=s(h,"P",{});var _=i(m);w=r(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var P=i(f);g=r(P,"Module"),P.forEach(t),k=r(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(h,_){u(h,m,_),e(m,w),e(m,f),e(f,g),e(m,k)},d(h){h&&t(m)}}}function Di(J){let m,w,f,g,k,h,_,P,I,D,A,L,Wt,Ee,nn,Rt,rn,bo,V,an,mt,sn,ln,Jt,dn,cn,ft,mn,fn,pt,pn,hn,ht,un,gn,Ut,_n,vn,ut,xn,Tn,gt,En,bn,$o,ie,$n,be,yn,wn,yo,Y,le,Ht,$e,Dn,Zt,Vn,wo,j,ye,kn,Z,_t,Pn,jn,vt,zn,Mn,xt,Cn,Fn,In,ee,An,Tt,Ln,qn,Et,Sn,On,Nn,Gt,Bn,Wn,we,Rn,de,De,Jn,Ve,Un,bt,Hn,Zn,Gn,ce,ke,Xn,Pe,Kn,$t,Qn,Yn,Do,te,me,Xt,je,er,Kt,tr,Vo,z,ze,or,Qt,nr,rr,q,yt,ar,sr,wt,ir,lr,Dt,dr,cr,Yt,mr,fr,Vt,pr,hr,ur,fe,Me,gr,Ce,_r,kt,vr,xr,Tr,pe,Fe,Er,Ie,br,Pt,$r,yr,wr,G,Ae,Dr,Le,Vr,jt,kr,Pr,jr,he,zr,X,qe,Mr,oe,Cr,eo,Fr,Ir,zt,Ar,Lr,qr,ue,ko,ne,ge,to,Se,Sr,oo,Or,Po,M,Oe,Nr,Ne,Br,no,Wr,Rr,Jr,Be,Ur,We,Hr,Zr,Gr,ro,Xr,Kr,Re,Qr,Mt,Yr,ea,ta,Je,oa,Ue,na,ra,aa,S,He,sa,re,ia,Ct,la,da,ao,ca,ma,fa,_e,pa,so,ha,ua,Ze,jo,ae,ve,io,Ge,ga,lo,_a,zo,y,Xe,va,Ke,xa,co,Ta,Ea,ba,Qe,$a,Ye,ya,wa,Da,mo,Va,ka,et,Pa,Ft,ja,za,Ma,tt,Ca,ot,Fa,Ia,Aa,fo,La,qa,U,po,nt,Sa,Oa,ho,rt,Na,Ba,uo,at,Wa,Ra,go,st,Ja,Ua,O,it,Ha,se,Za,It,Ga,Xa,_o,Ka,Qa,Ya,xe,es,vo,ts,os,lt,Mo;return h=new Bt({}),Ee=new Bt({}),$e=new Bt({}),ye=new R({props:{name:"class transformers.VisionTextDualEncoderConfig",anchor:"transformers.VisionTextDualEncoderConfig",parameters:[{name:"projection_dim",val:" = 512"},{name:"logit_scale_init_value",val:" = 2.6592"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L28",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderConfig.text_config_dict",description:`<strong>text_config_dict</strong> (<code>dict</code>) &#x2014; Dictionary of configuration options that defines text model config.`,name:"text_config_dict"},{anchor:"transformers.VisionTextDualEncoderConfig.vision_config_dict",description:`<strong>vision_config_dict</strong> (<code>dict</code>) &#x2014; Dictionary of configuration options that defines vison model config.`,name:"vision_config_dict"},{anchor:"transformers.VisionTextDualEncoderConfig.projection_dim",description:`<strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimentionality of text and vision projection layers.`,name:"projection_dim"},{anchor:"transformers.VisionTextDualEncoderConfig.logit_scale_init_value",description:`<strong>logit_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 2.6592) &#x2014; The inital value of the <em>logit_scale</em> paramter. Default is used as per the original CLIP implementation.`,name:"logit_scale_init_value"},{anchor:"transformers.VisionTextDualEncoderConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments.`,name:"kwargs"}]}}),we=new ns({props:{code:`from transformers import ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel # Initializing a BERT and ViT configuration config_vision = ViTConfig() config_text = BertConfig() config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=512) # Initializing a BERT and ViT model model = VisionTextDualEncoderModel(config=config) # Accessing the model configuration config_vision = model.config.vision_config config_text = model.config.text_config # Saving the model, including its configuration model.save_pretrained('my-model') # loading model and config from pretrained folder vision_text_config = VisionTextDualEncoderConfig.from_pretrained('vit-bert') model = VisionTextDualEncoderModel.from_pretrained('vit-bert', config=vision_text_config),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT and ViT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_vision = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_text = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=<span class="hljs-number">512</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT and ViT model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_vision = model.config.vision_config <span class="hljs-meta">&gt;&gt;&gt; </span>config_text = model.config.text_config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&#x27;my-model&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>vision_text_config = VisionTextDualEncoderConfig.from_pretrained(<span class="hljs-string">&#x27;vit-bert&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&#x27;vit-bert&#x27;</span>, config=vision_text_config)`}}),De=new R({props:{name:"from_vision_text_configs",anchor:"transformers.VisionTextDualEncoderConfig.from_vision_text_configs",parameters:[{name:"vision_config",val:": PretrainedConfig"},{name:"text_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L106",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a></p> `}}),ke=new R({props:{name:"to_dict",anchor:"transformers.VisionTextDualEncoderConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L118",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),je=new Bt({}),ze=new R({props:{name:"class transformers.VisionTextDualEncoderProcessor",anchor:"transformers.VisionTextDualEncoderProcessor",parameters:[{name:"feature_extractor",val:": FeatureExtractionMixin"},{name:"tokenizer",val:": typing.Union[transformers.tokenization_utils.PreTrainedTokenizer, transformers.tokenization_utils_fast.PreTrainedTokenizerFast]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L28",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderProcessor.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>) &#x2014; The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.VisionTextDualEncoderProcessor.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer is a required input.`,name:"tokenizer"}]}}),Me=new R({props:{name:"batch_decode",anchor:"transformers.VisionTextDualEncoderProcessor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L175"}}),Fe=new R({props:{name:"decode",anchor:"transformers.VisionTextDualEncoderProcessor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L183"}}),Ae=new R({props:{name:"from_pretrained",anchor:"transformers.VisionTextDualEncoderProcessor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L84",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderProcessor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <code>save_pretrained</code> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>.</li> </ul>`,name:"pretrained_model_name_or_path"}]}}),he=new on({props:{$$slots:{default:[bi]},$$scope:{ctx:J}}}),qe=new R({props:{name:"save_pretrained",anchor:"transformers.VisionTextDualEncoderProcessor.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L61",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderProcessor.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"}]}}),ue=new on({props:{$$slots:{default:[$i]},$$scope:{ctx:J}}}),Se=new Bt({}),Oe=new R({props:{name:"class transformers.VisionTextDualEncoderModel",anchor:"transformers.VisionTextDualEncoderModel",parameters:[{name:"config",val:": typing.Optional[transformers.models.vision_text_dual_encoder.configuration_vision_text_dual_encoder.VisionTextDualEncoderConfig] = None"},{name:"vision_model",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"text_model",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py#L166",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),He=new R({props:{name:"forward",anchor:"transformers.VisionTextDualEncoderModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"pixel_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"return_loss",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py#L296",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisionTextDualEncoderModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisionTextDualEncoderModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisionTextDualEncoderModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.VisionTextDualEncoderModel.forward.return_loss",description:`<strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.`,name:"return_loss"},{anchor:"transformers.VisionTextDualEncoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisionTextDualEncoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisionTextDualEncoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_clip.CLIPOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>return_loss</code> is <code>True</code>) \u2014 Contrastive loss for image-text similarity.</li> <li><strong>logits_per_image:(<code>torch.FloatTensor</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>torch.FloatTensor</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>image_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_clip.CLIPOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),_e=new on({props:{$$slots:{default:[yi]},$$scope:{ctx:J}}}),Ze=new ns({props:{code:`from PIL import Image import requests from transformers import VisionTextDualEncoderModel, VisionTextDualEncoderProcessor, ViTFeatureExtractor, BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) model = VisionTextDualEncoderModel.from_vision_text_pretrained("google/vit-base-patch16-224", "bert-base-uncased") # contrastive training urls = ["http://images.cocodataset.org/val2017/000000039769.jpg", "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg] images = [Image.open(requests.get(url, stream=True).raw) for url in urls] inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="pt", padding=True) outputs = model(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, pixel_values=inputs.pixel_values, return_loss=True) loss, logits_per_image = outputs.loss, outputs.logits_per_imag # this is the image-text similarity score # save and load from pretrained model.save_pretrained("vit-bert") model = VisionTextDualEncoderModel.from_pretrained("vit-bert") # inference outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisionTextDualEncoderModel, VisionTextDualEncoderProcessor, ViTFeatureExtractor, BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_vision_text_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># contrastive training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>urls = [<span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>, <span class="hljs-string">&quot;https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg] &gt;&gt;&gt; images = [Image.open(requests.get(url, stream=True).raw) for url in urls] &gt;&gt;&gt; inputs = processor(text=[&quot;</span>a photo of a cat<span class="hljs-string">&quot;, &quot;</span>a photo of a dog<span class="hljs-string">&quot;], images=images, return_tensors=&quot;</span>pt<span class="hljs-string">&quot;, padding=True) &gt;&gt;&gt; outputs = model(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, pixel_values=inputs.pixel_values, return_loss=True) &gt;&gt;&gt; loss, logits_per_image = outputs.loss, outputs.logits_per_imag # this is the image-text similarity score &gt;&gt;&gt; # save and load from pretrained &gt;&gt;&gt; model.save_pretrained(&quot;</span>vit-bert<span class="hljs-string">&quot;) &gt;&gt;&gt; model = VisionTextDualEncoderModel.from_pretrained(&quot;</span>vit-bert<span class="hljs-string">&quot;) &gt;&gt;&gt; # inference &gt;&gt;&gt; outputs = model(**inputs) &gt;&gt;&gt; logits_per_image = outputs.logits_per_image # this is the image-text similarity score &gt;&gt;&gt; probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities</span>`}}),Ge=new Bt({}),Xe=new R({props:{name:"class transformers.FlaxVisionTextDualEncoderModel",anchor:"transformers.FlaxVisionTextDualEncoderModel",parameters:[{name:"config",val:": VisionTextDualEncoderConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py#L220",parametersDescription:[{anchor:"transformers.FlaxVisionTextDualEncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),it=new R({props:{name:"__call__",anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__",parameters:[{name:"input_ids",val:""},{name:"pixel_values",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py#L252",parametersDescription:[{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a>) and inputs.</p> <ul> <li><strong>logits_per_image:(<code>jnp.ndarray</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>jnp.ndarray</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>image_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/v4.15.0/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),xe=new on({props:{$$slots:{default:[wi]},$$scope:{ctx:J}}}),lt=new ns({props:{code:`from PIL import Image import requests import jax from transformers import FlaxVisionTextDualEncoderModel, VisionTextDualEncoderProcessor, ViTFeatureExtractor, BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained("google/vit-base-patch16-224", "bert-base-uncased") # contrastive training urls = ["http://images.cocodataset.org/val2017/000000039769.jpg", "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg] images = [Image.open(requests.get(url, stream=True).raw) for url in urls] inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="np", padding=True) outputs = model(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, pixel_values=inputs.pixel_values, return_loss=True) loss, logits_per_image = outputs.loss, outputs.logits_per_imag # this is the image-text similarity score # save and load from pretrained model.save_pretrained("vit-bert") model = FlaxVisionTextDualEncoderModel.from_pretrained("vit-bert") # inference outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionTextDualEncoderModel, VisionTextDualEncoderProcessor, ViTFeatureExtractor, BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># contrastive training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>urls = [<span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>, <span class="hljs-string">&quot;https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg] &gt;&gt;&gt; images = [Image.open(requests.get(url, stream=True).raw) for url in urls] &gt;&gt;&gt; inputs = processor(text=[&quot;</span>a photo of a cat<span class="hljs-string">&quot;, &quot;</span>a photo of a dog<span class="hljs-string">&quot;], images=images, return_tensors=&quot;</span>np<span class="hljs-string">&quot;, padding=True) &gt;&gt;&gt; outputs = model(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, pixel_values=inputs.pixel_values, return_loss=True) &gt;&gt;&gt; loss, logits_per_image = outputs.loss, outputs.logits_per_imag # this is the image-text similarity score &gt;&gt;&gt; # save and load from pretrained &gt;&gt;&gt; model.save_pretrained(&quot;</span>vit-bert<span class="hljs-string">&quot;) &gt;&gt;&gt; model = FlaxVisionTextDualEncoderModel.from_pretrained(&quot;</span>vit-bert<span class="hljs-string">&quot;) &gt;&gt;&gt; # inference &gt;&gt;&gt; outputs = model(**inputs) &gt;&gt;&gt; logits_per_image = outputs.logits_per_image # this is the image-text similarity score &gt;&gt;&gt; probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities</span>`}}),{c(){m=a("meta"),w=d(),f=a("h1"),g=a("a"),k=a("span"),v(h.$$.fragment),_=d(),P=a("span"),I=n("VisionTextDualEncoder"),D=d(),A=a("h2"),L=a("a"),Wt=a("span"),v(Ee.$$.fragment),nn=d(),Rt=a("span"),rn=n("Overview"),bo=d(),V=a("p"),an=n("The "),mt=a("a"),sn=n("VisionTextDualEncoderModel"),ln=n(` can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (`),Jt=a("em"),dn=n("e.g."),cn=d(),ft=a("a"),mn=n("ViT"),fn=n(", "),pt=a("a"),pn=n("BEiT"),hn=n(", "),ht=a("a"),un=n("DeiT"),gn=n(") and any pretrained text autoencoding model as the text encoder ("),Ut=a("em"),_n=n("e.g."),vn=d(),ut=a("a"),xn=n("RoBERTa"),Tn=n(", "),gt=a("a"),En=n("BERT"),bn=n(`). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval.`),$o=d(),ie=a("p"),$n=n("In "),be=a("a"),yn=n("LiT: Zero-Shot Transfer with Locked-image Text Tuning"),wn=n(` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),yo=d(),Y=a("h2"),le=a("a"),Ht=a("span"),v($e.$$.fragment),Dn=d(),Zt=a("span"),Vn=n("VisionTextDualEncoderConfig"),wo=d(),j=a("div"),v(ye.$$.fragment),kn=d(),Z=a("p"),_t=a("a"),Pn=n("VisionTextDualEncoderConfig"),jn=n(` is the configuration class to store the configuration of a `),vt=a("a"),zn=n("VisionTextDualEncoderModel"),Mn=n(`. It is used to instantiate `),xt=a("a"),Cn=n("VisionTextDualEncoderModel"),Fn=n(` model according to the specified arguments, defining the text model and vision model configs.`),In=d(),ee=a("p"),An=n("Configuration objects inherit from "),Tt=a("a"),Ln=n("PretrainedConfig"),qn=n(` and can be used to control the model outputs. Read the documentation from `),Et=a("a"),Sn=n("PretrainedConfig"),On=n(" for more information."),Nn=d(),Gt=a("p"),Bn=n("Examples:"),Wn=d(),v(we.$$.fragment),Rn=d(),de=a("div"),v(De.$$.fragment),Jn=d(),Ve=a("p"),Un=n("Instantiate a "),bt=a("a"),Hn=n("VisionTextDualEncoderConfig"),Zn=n(` (or a derived class) from text model configuration and vision model configuration.`),Gn=d(),ce=a("div"),v(ke.$$.fragment),Xn=d(),Pe=a("p"),Kn=n(`Serializes this instance to a Python dictionary. Override the default `),$t=a("a"),Qn=n("to_dict()"),Yn=n("."),Do=d(),te=a("h2"),me=a("a"),Xt=a("span"),v(je.$$.fragment),er=d(),Kt=a("span"),tr=n("VisionTextDualEncoderProcessor"),Vo=d(),z=a("div"),v(ze.$$.fragment),or=d(),Qt=a("p"),nr=n(`Constructs a VisionTextDualEncoder processor which wraps a vision feature extractor and a tokenizer into a single processor.`),rr=d(),q=a("p"),yt=a("a"),ar=n("VisionTextDualEncoderProcessor"),sr=n(` offers all the functionalities of `),wt=a("a"),ir=n("AutoFeatureExtractor"),lr=n(" and "),Dt=a("a"),dr=n("AutoTokenizer"),cr=n(`. See the `),Yt=a("code"),mr=n("__call__()"),fr=n(` and `),Vt=a("a"),pr=n("decode()"),hr=n(" for more information."),ur=d(),fe=a("div"),v(Me.$$.fragment),gr=d(),Ce=a("p"),_r=n(`This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s `),kt=a("a"),vr=n("batch_decode()"),xr=n(`. Please refer to the docstring of this method for more information.`),Tr=d(),pe=a("div"),v(Fe.$$.fragment),Er=d(),Ie=a("p"),br=n(`This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s `),Pt=a("a"),$r=n("decode()"),yr=n(`. Please refer to the docstring of this method for more information.`),wr=d(),G=a("div"),v(Ae.$$.fragment),Dr=d(),Le=a("p"),Vr=n("Instantiate a "),jt=a("a"),kr=n("VisionTextDualEncoderProcessor"),Pr=n(` from a pretrained VisionTextDualEncoder processor.`),jr=d(),v(he.$$.fragment),zr=d(),X=a("div"),v(qe.$$.fragment),Mr=d(),oe=a("p"),Cr=n(`Save a VisionTextDualEncoder feature extractor object and VisionTextDualEncoder tokenizer object to the directory `),eo=a("code"),Fr=n("save_directory"),Ir=n(`, so that it can be re-loaded using the `),zt=a("a"),Ar=n("from_pretrained()"),Lr=n(" class method."),qr=d(),v(ue.$$.fragment),ko=d(),ne=a("h2"),ge=a("a"),to=a("span"),v(Se.$$.fragment),Sr=d(),oo=a("span"),Or=n("VisionTextDualEncoderModel"),Po=d(),M=a("div"),v(Oe.$$.fragment),Nr=d(),Ne=a("p"),Br=n(`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),no=a("code"),Wr=n("from_pretrained()"),Rr=n(` method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),Jr=d(),Be=a("p"),Ur=n("In "),We=a("a"),Hr=n("LiT: Zero-Shot Transfer with Locked-image Text Tuning"),Zr=n(` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Gr=d(),ro=a("p"),Xr=n(`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Kr=d(),Re=a("p"),Qr=n("This model inherits from "),Mt=a("a"),Yr=n("PreTrainedModel"),ea=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ta=d(),Je=a("p"),oa=n("This model is also a PyTorch "),Ue=a("a"),na=n("torch.nn.Module"),ra=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),aa=d(),S=a("div"),v(He.$$.fragment),sa=d(),re=a("p"),ia=n("The "),Ct=a("a"),la=n("VisionTextDualEncoderModel"),da=n(" forward method, overrides the "),ao=a("code"),ca=n("__call__"),ma=n(" special method."),fa=d(),v(_e.$$.fragment),pa=d(),so=a("p"),ha=n("Examples:"),ua=d(),v(Ze.$$.fragment),jo=d(),ae=a("h2"),ve=a("a"),io=a("span"),v(Ge.$$.fragment),ga=d(),lo=a("span"),_a=n("FlaxVisionTextDualEncoderModel"),zo=d(),y=a("div"),v(Xe.$$.fragment),va=d(),Ke=a("p"),xa=n(`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),co=a("code"),Ta=n("from_pretrained()"),Ea=n(` method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),ba=d(),Qe=a("p"),$a=n("In "),Ye=a("a"),ya=n("LiT: Zero-Shot Transfer with Locked-image Text Tuning"),wa=n(` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Da=d(),mo=a("p"),Va=n(`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),ka=d(),et=a("p"),Pa=n("This model inherits from "),Ft=a("a"),ja=n("PreTrainedModel"),za=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ma=d(),tt=a("p"),Ca=n("This model is also a Flax Linen "),ot=a("a"),Fa=n("flax.linen.Module"),Ia=n(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Aa=d(),fo=a("p"),La=n("Finally, this model supports inherent JAX features such as:"),qa=d(),U=a("ul"),po=a("li"),nt=a("a"),Sa=n("Just-In-Time (JIT) compilation"),Oa=d(),ho=a("li"),rt=a("a"),Na=n("Automatic Differentiation"),Ba=d(),uo=a("li"),at=a("a"),Wa=n("Vectorization"),Ra=d(),go=a("li"),st=a("a"),Ja=n("Parallelization"),Ua=d(),O=a("div"),v(it.$$.fragment),Ha=d(),se=a("p"),Za=n("The "),It=a("a"),Ga=n("FlaxVisionTextDualEncoderModel"),Xa=n(" forward method, overrides the "),_o=a("code"),Ka=n("__call__"),Qa=n(" special method."),Ya=d(),v(xe.$$.fragment),es=d(),vo=a("p"),ts=n("Examples:"),os=d(),v(lt.$$.fragment),this.h()},l(o){const p=Ei('[data-svelte="svelte-1phssyn"]',document.head);m=s(p,"META",{name:!0,content:!0}),p.forEach(t),w=c(o),f=s(o,"H1",{class:!0});var dt=i(f);g=s(dt,"A",{id:!0,class:!0,href:!0});var xo=i(g);k=s(xo,"SPAN",{});var To=i(k);x(h.$$.fragment,To),To.forEach(t),xo.forEach(t),_=c(dt),P=s(dt,"SPAN",{});var Eo=i(P);I=r(Eo,"VisionTextDualEncoder"),Eo.forEach(t),dt.forEach(t),D=c(o),A=s(o,"H2",{class:!0});var Co=i(A);L=s(Co,"A",{id:!0,class:!0,href:!0});var rs=i(L);Wt=s(rs,"SPAN",{});var as=i(Wt);x(Ee.$$.fragment,as),as.forEach(t),rs.forEach(t),nn=c(Co),Rt=s(Co,"SPAN",{});var ss=i(Rt);rn=r(ss,"Overview"),ss.forEach(t),Co.forEach(t),bo=c(o),V=s(o,"P",{});var C=i(V);an=r(C,"The "),mt=s(C,"A",{href:!0});var is=i(mt);sn=r(is,"VisionTextDualEncoderModel"),is.forEach(t),ln=r(C,` can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (`),Jt=s(C,"EM",{});var ls=i(Jt);dn=r(ls,"e.g."),ls.forEach(t),cn=c(C),ft=s(C,"A",{href:!0});var ds=i(ft);mn=r(ds,"ViT"),ds.forEach(t),fn=r(C,", "),pt=s(C,"A",{href:!0});var cs=i(pt);pn=r(cs,"BEiT"),cs.forEach(t),hn=r(C,", "),ht=s(C,"A",{href:!0});var ms=i(ht);un=r(ms,"DeiT"),ms.forEach(t),gn=r(C,") and any pretrained text autoencoding model as the text encoder ("),Ut=s(C,"EM",{});var fs=i(Ut);_n=r(fs,"e.g."),fs.forEach(t),vn=c(C),ut=s(C,"A",{href:!0});var ps=i(ut);xn=r(ps,"RoBERTa"),ps.forEach(t),Tn=r(C,", "),gt=s(C,"A",{href:!0});var hs=i(gt);En=r(hs,"BERT"),hs.forEach(t),bn=r(C,`). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval.`),C.forEach(t),$o=c(o),ie=s(o,"P",{});var Fo=i(ie);$n=r(Fo,"In "),be=s(Fo,"A",{href:!0,rel:!0});var us=i(be);yn=r(us,"LiT: Zero-Shot Transfer with Locked-image Text Tuning"),us.forEach(t),wn=r(Fo,` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Fo.forEach(t),yo=c(o),Y=s(o,"H2",{class:!0});var Io=i(Y);le=s(Io,"A",{id:!0,class:!0,href:!0});var gs=i(le);Ht=s(gs,"SPAN",{});var _s=i(Ht);x($e.$$.fragment,_s),_s.forEach(t),gs.forEach(t),Dn=c(Io),Zt=s(Io,"SPAN",{});var vs=i(Zt);Vn=r(vs,"VisionTextDualEncoderConfig"),vs.forEach(t),Io.forEach(t),wo=c(o),j=s(o,"DIV",{class:!0});var N=i(j);x(ye.$$.fragment,N),kn=c(N),Z=s(N,"P",{});var ct=i(Z);_t=s(ct,"A",{href:!0});var xs=i(_t);Pn=r(xs,"VisionTextDualEncoderConfig"),xs.forEach(t),jn=r(ct,` is the configuration class to store the configuration of a `),vt=s(ct,"A",{href:!0});var Ts=i(vt);zn=r(Ts,"VisionTextDualEncoderModel"),Ts.forEach(t),Mn=r(ct,`. It is used to instantiate `),xt=s(ct,"A",{href:!0});var Es=i(xt);Cn=r(Es,"VisionTextDualEncoderModel"),Es.forEach(t),Fn=r(ct,` model according to the specified arguments, defining the text model and vision model configs.`),ct.forEach(t),In=c(N),ee=s(N,"P",{});var At=i(ee);An=r(At,"Configuration objects inherit from "),Tt=s(At,"A",{href:!0});var bs=i(Tt);Ln=r(bs,"PretrainedConfig"),bs.forEach(t),qn=r(At,` and can be used to control the model outputs. Read the documentation from `),Et=s(At,"A",{href:!0});var $s=i(Et);Sn=r($s,"PretrainedConfig"),$s.forEach(t),On=r(At," for more information."),At.forEach(t),Nn=c(N),Gt=s(N,"P",{});var ys=i(Gt);Bn=r(ys,"Examples:"),ys.forEach(t),Wn=c(N),x(we.$$.fragment,N),Rn=c(N),de=s(N,"DIV",{class:!0});var Ao=i(de);x(De.$$.fragment,Ao),Jn=c(Ao),Ve=s(Ao,"P",{});var Lo=i(Ve);Un=r(Lo,"Instantiate a "),bt=s(Lo,"A",{href:!0});var ws=i(bt);Hn=r(ws,"VisionTextDualEncoderConfig"),ws.forEach(t),Zn=r(Lo,` (or a derived class) from text model configuration and vision model configuration.`),Lo.forEach(t),Ao.forEach(t),Gn=c(N),ce=s(N,"DIV",{class:!0});var qo=i(ce);x(ke.$$.fragment,qo),Xn=c(qo),Pe=s(qo,"P",{});var So=i(Pe);Kn=r(So,`Serializes this instance to a Python dictionary. Override the default `),$t=s(So,"A",{href:!0});var Ds=i($t);Qn=r(Ds,"to_dict()"),Ds.forEach(t),Yn=r(So,"."),So.forEach(t),qo.forEach(t),N.forEach(t),Do=c(o),te=s(o,"H2",{class:!0});var Oo=i(te);me=s(Oo,"A",{id:!0,class:!0,href:!0});var Vs=i(me);Xt=s(Vs,"SPAN",{});var ks=i(Xt);x(je.$$.fragment,ks),ks.forEach(t),Vs.forEach(t),er=c(Oo),Kt=s(Oo,"SPAN",{});var Ps=i(Kt);tr=r(Ps,"VisionTextDualEncoderProcessor"),Ps.forEach(t),Oo.forEach(t),Vo=c(o),z=s(o,"DIV",{class:!0});var B=i(z);x(ze.$$.fragment,B),or=c(B),Qt=s(B,"P",{});var js=i(Qt);nr=r(js,`Constructs a VisionTextDualEncoder processor which wraps a vision feature extractor and a tokenizer into a single processor.`),js.forEach(t),rr=c(B),q=s(B,"P",{});var H=i(q);yt=s(H,"A",{href:!0});var zs=i(yt);ar=r(zs,"VisionTextDualEncoderProcessor"),zs.forEach(t),sr=r(H,` offers all the functionalities of `),wt=s(H,"A",{href:!0});var Ms=i(wt);ir=r(Ms,"AutoFeatureExtractor"),Ms.forEach(t),lr=r(H," and "),Dt=s(H,"A",{href:!0});var Cs=i(Dt);dr=r(Cs,"AutoTokenizer"),Cs.forEach(t),cr=r(H,`. See the `),Yt=s(H,"CODE",{});var Fs=i(Yt);mr=r(Fs,"__call__()"),Fs.forEach(t),fr=r(H,` and `),Vt=s(H,"A",{href:!0});var Is=i(Vt);pr=r(Is,"decode()"),Is.forEach(t),hr=r(H," for more information."),H.forEach(t),ur=c(B),fe=s(B,"DIV",{class:!0});var No=i(fe);x(Me.$$.fragment,No),gr=c(No),Ce=s(No,"P",{});var Bo=i(Ce);_r=r(Bo,`This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s `),kt=s(Bo,"A",{href:!0});var As=i(kt);vr=r(As,"batch_decode()"),As.forEach(t),xr=r(Bo,`. Please refer to the docstring of this method for more information.`),Bo.forEach(t),No.forEach(t),Tr=c(B),pe=s(B,"DIV",{class:!0});var Wo=i(pe);x(Fe.$$.fragment,Wo),Er=c(Wo),Ie=s(Wo,"P",{});var Ro=i(Ie);br=r(Ro,`This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s `),Pt=s(Ro,"A",{href:!0});var Ls=i(Pt);$r=r(Ls,"decode()"),Ls.forEach(t),yr=r(Ro,`. Please refer to the docstring of this method for more information.`),Ro.forEach(t),Wo.forEach(t),wr=c(B),G=s(B,"DIV",{class:!0});var Lt=i(G);x(Ae.$$.fragment,Lt),Dr=c(Lt),Le=s(Lt,"P",{});var Jo=i(Le);Vr=r(Jo,"Instantiate a "),jt=s(Jo,"A",{href:!0});var qs=i(jt);kr=r(qs,"VisionTextDualEncoderProcessor"),qs.forEach(t),Pr=r(Jo,` from a pretrained VisionTextDualEncoder processor.`),Jo.forEach(t),jr=c(Lt),x(he.$$.fragment,Lt),Lt.forEach(t),zr=c(B),X=s(B,"DIV",{class:!0});var qt=i(X);x(qe.$$.fragment,qt),Mr=c(qt),oe=s(qt,"P",{});var St=i(oe);Cr=r(St,`Save a VisionTextDualEncoder feature extractor object and VisionTextDualEncoder tokenizer object to the directory `),eo=s(St,"CODE",{});var Ss=i(eo);Fr=r(Ss,"save_directory"),Ss.forEach(t),Ir=r(St,`, so that it can be re-loaded using the `),zt=s(St,"A",{href:!0});var Os=i(zt);Ar=r(Os,"from_pretrained()"),Os.forEach(t),Lr=r(St," class method."),St.forEach(t),qr=c(qt),x(ue.$$.fragment,qt),qt.forEach(t),B.forEach(t),ko=c(o),ne=s(o,"H2",{class:!0});var Uo=i(ne);ge=s(Uo,"A",{id:!0,class:!0,href:!0});var Ns=i(ge);to=s(Ns,"SPAN",{});var Bs=i(to);x(Se.$$.fragment,Bs),Bs.forEach(t),Ns.forEach(t),Sr=c(Uo),oo=s(Uo,"SPAN",{});var Ws=i(oo);Or=r(Ws,"VisionTextDualEncoderModel"),Ws.forEach(t),Uo.forEach(t),Po=c(o),M=s(o,"DIV",{class:!0});var W=i(M);x(Oe.$$.fragment,W),Nr=c(W),Ne=s(W,"P",{});var Ho=i(Ne);Br=r(Ho,`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),no=s(Ho,"CODE",{});var Rs=i(no);Wr=r(Rs,"from_pretrained()"),Rs.forEach(t),Rr=r(Ho,` method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),Ho.forEach(t),Jr=c(W),Be=s(W,"P",{});var Zo=i(Be);Ur=r(Zo,"In "),We=s(Zo,"A",{href:!0,rel:!0});var Js=i(We);Hr=r(Js,"LiT: Zero-Shot Transfer with Locked-image Text Tuning"),Js.forEach(t),Zr=r(Zo,` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Zo.forEach(t),Gr=c(W),ro=s(W,"P",{});var Us=i(ro);Xr=r(Us,`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Us.forEach(t),Kr=c(W),Re=s(W,"P",{});var Go=i(Re);Qr=r(Go,"This model inherits from "),Mt=s(Go,"A",{href:!0});var Hs=i(Mt);Yr=r(Hs,"PreTrainedModel"),Hs.forEach(t),ea=r(Go,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Go.forEach(t),ta=c(W),Je=s(W,"P",{});var Xo=i(Je);oa=r(Xo,"This model is also a PyTorch "),Ue=s(Xo,"A",{href:!0,rel:!0});var Zs=i(Ue);na=r(Zs,"torch.nn.Module"),Zs.forEach(t),ra=r(Xo,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xo.forEach(t),aa=c(W),S=s(W,"DIV",{class:!0});var K=i(S);x(He.$$.fragment,K),sa=c(K),re=s(K,"P",{});var Ot=i(re);ia=r(Ot,"The "),Ct=s(Ot,"A",{href:!0});var Gs=i(Ct);la=r(Gs,"VisionTextDualEncoderModel"),Gs.forEach(t),da=r(Ot," forward method, overrides the "),ao=s(Ot,"CODE",{});var Xs=i(ao);ca=r(Xs,"__call__"),Xs.forEach(t),ma=r(Ot," special method."),Ot.forEach(t),fa=c(K),x(_e.$$.fragment,K),pa=c(K),so=s(K,"P",{});var Ks=i(so);ha=r(Ks,"Examples:"),Ks.forEach(t),ua=c(K),x(Ze.$$.fragment,K),K.forEach(t),W.forEach(t),jo=c(o),ae=s(o,"H2",{class:!0});var Ko=i(ae);ve=s(Ko,"A",{id:!0,class:!0,href:!0});var Qs=i(ve);io=s(Qs,"SPAN",{});var Ys=i(io);x(Ge.$$.fragment,Ys),Ys.forEach(t),Qs.forEach(t),ga=c(Ko),lo=s(Ko,"SPAN",{});var ei=i(lo);_a=r(ei,"FlaxVisionTextDualEncoderModel"),ei.forEach(t),Ko.forEach(t),zo=c(o),y=s(o,"DIV",{class:!0});var F=i(y);x(Xe.$$.fragment,F),va=c(F),Ke=s(F,"P",{});var Qo=i(Ke);xa=r(Qo,`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),co=s(Qo,"CODE",{});var ti=i(co);Ta=r(ti,"from_pretrained()"),ti.forEach(t),Ea=r(Qo,` method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),Qo.forEach(t),ba=c(F),Qe=s(F,"P",{});var Yo=i(Qe);$a=r(Yo,"In "),Ye=s(Yo,"A",{href:!0,rel:!0});var oi=i(Ye);ya=r(oi,"LiT: Zero-Shot Transfer with Locked-image Text Tuning"),oi.forEach(t),wa=r(Yo,` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Yo.forEach(t),Da=c(F),mo=s(F,"P",{});var ni=i(mo);Va=r(ni,`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),ni.forEach(t),ka=c(F),et=s(F,"P",{});var en=i(et);Pa=r(en,"This model inherits from "),Ft=s(en,"A",{href:!0});var ri=i(Ft);ja=r(ri,"PreTrainedModel"),ri.forEach(t),za=r(en,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),en.forEach(t),Ma=c(F),tt=s(F,"P",{});var tn=i(tt);Ca=r(tn,"This model is also a Flax Linen "),ot=s(tn,"A",{href:!0,rel:!0});var ai=i(ot);Fa=r(ai,"flax.linen.Module"),ai.forEach(t),Ia=r(tn,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),tn.forEach(t),Aa=c(F),fo=s(F,"P",{});var si=i(fo);La=r(si,"Finally, this model supports inherent JAX features such as:"),si.forEach(t),qa=c(F),U=s(F,"UL",{});var Te=i(U);po=s(Te,"LI",{});var ii=i(po);nt=s(ii,"A",{href:!0,rel:!0});var li=i(nt);Sa=r(li,"Just-In-Time (JIT) compilation"),li.forEach(t),ii.forEach(t),Oa=c(Te),ho=s(Te,"LI",{});var di=i(ho);rt=s(di,"A",{href:!0,rel:!0});var ci=i(rt);Na=r(ci,"Automatic Differentiation"),ci.forEach(t),di.forEach(t),Ba=c(Te),uo=s(Te,"LI",{});var mi=i(uo);at=s(mi,"A",{href:!0,rel:!0});var fi=i(at);Wa=r(fi,"Vectorization"),fi.forEach(t),mi.forEach(t),Ra=c(Te),go=s(Te,"LI",{});var pi=i(go);st=s(pi,"A",{href:!0,rel:!0});var hi=i(st);Ja=r(hi,"Parallelization"),hi.forEach(t),pi.forEach(t),Te.forEach(t),Ua=c(F),O=s(F,"DIV",{class:!0});var Q=i(O);x(it.$$.fragment,Q),Ha=c(Q),se=s(Q,"P",{});var Nt=i(se);Za=r(Nt,"The "),It=s(Nt,"A",{href:!0});var ui=i(It);Ga=r(ui,"FlaxVisionTextDualEncoderModel"),ui.forEach(t),Xa=r(Nt," forward method, overrides the "),_o=s(Nt,"CODE",{});var gi=i(_o);Ka=r(gi,"__call__"),gi.forEach(t),Qa=r(Nt," special method."),Nt.forEach(t),Ya=c(Q),x(xe.$$.fragment,Q),es=c(Q),vo=s(Q,"P",{});var _i=i(vo);ts=r(_i,"Examples:"),_i.forEach(t),os=c(Q),x(lt.$$.fragment,Q),Q.forEach(t),F.forEach(t),this.h()},h(){l(m,"name","hf:doc:metadata"),l(m,"content",JSON.stringify(Vi)),l(g,"id","visiontextdualencoder"),l(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(g,"href","#visiontextdualencoder"),l(f,"class","relative group"),l(L,"id","overview"),l(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(L,"href","#overview"),l(A,"class","relative group"),l(mt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderModel"),l(ft,"href","/docs/transformers/v4.15.0/en/vit"),l(pt,"href","/docs/transformers/v4.15.0/en/beit"),l(ht,"href","/docs/transformers/v4.15.0/en/deit"),l(ut,"href","/docs/transformers/v4.15.0/en/roberta"),l(gt,"href","/docs/transformers/v4.15.0/en/bert"),l(be,"href","https://arxiv.org/abs/2111.07991"),l(be,"rel","nofollow"),l(le,"id","transformers.VisionTextDualEncoderConfig"),l(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(le,"href","#transformers.VisionTextDualEncoderConfig"),l(Y,"class","relative group"),l(_t,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig"),l(vt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderModel"),l(xt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderModel"),l(Tt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Et,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(bt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderConfig"),l(de,"class","docstring"),l($t,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig.to_dict"),l(ce,"class","docstring"),l(j,"class","docstring"),l(me,"id","transformers.VisionTextDualEncoderProcessor"),l(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(me,"href","#transformers.VisionTextDualEncoderProcessor"),l(te,"class","relative group"),l(yt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderProcessor"),l(wt,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoFeatureExtractor"),l(Dt,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoTokenizer"),l(Vt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderProcessor.decode"),l(kt,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),l(fe,"class","docstring"),l(Pt,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),l(pe,"class","docstring"),l(jt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderProcessor"),l(G,"class","docstring"),l(zt,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderProcessor.from_pretrained"),l(X,"class","docstring"),l(z,"class","docstring"),l(ge,"id","transformers.VisionTextDualEncoderModel"),l(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ge,"href","#transformers.VisionTextDualEncoderModel"),l(ne,"class","relative group"),l(We,"href","https://arxiv.org/abs/2111.07991"),l(We,"rel","nofollow"),l(Mt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ue,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ue,"rel","nofollow"),l(Ct,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.VisionTextDualEncoderModel"),l(S,"class","docstring"),l(M,"class","docstring"),l(ve,"id","transformers.FlaxVisionTextDualEncoderModel"),l(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ve,"href","#transformers.FlaxVisionTextDualEncoderModel"),l(ae,"class","relative group"),l(Ye,"href","https://arxiv.org/abs/2111.07991"),l(Ye,"rel","nofollow"),l(Ft,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(ot,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(ot,"rel","nofollow"),l(nt,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(nt,"rel","nofollow"),l(rt,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(rt,"rel","nofollow"),l(at,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(at,"rel","nofollow"),l(st,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(st,"rel","nofollow"),l(It,"href","/docs/transformers/v4.15.0/en/model_doc/vision_text_dual_encoder#transformers.FlaxVisionTextDualEncoderModel"),l(O,"class","docstring"),l(y,"class","docstring")},m(o,p){e(document.head,m),u(o,w,p),u(o,f,p),e(f,g),e(g,k),T(h,k,null),e(f,_),e(f,P),e(P,I),u(o,D,p),u(o,A,p),e(A,L),e(L,Wt),T(Ee,Wt,null),e(A,nn),e(A,Rt),e(Rt,rn),u(o,bo,p),u(o,V,p),e(V,an),e(V,mt),e(mt,sn),e(V,ln),e(V,Jt),e(Jt,dn),e(V,cn),e(V,ft),e(ft,mn),e(V,fn),e(V,pt),e(pt,pn),e(V,hn),e(V,ht),e(ht,un),e(V,gn),e(V,Ut),e(Ut,_n),e(V,vn),e(V,ut),e(ut,xn),e(V,Tn),e(V,gt),e(gt,En),e(V,bn),u(o,$o,p),u(o,ie,p),e(ie,$n),e(ie,be),e(be,yn),e(ie,wn),u(o,yo,p),u(o,Y,p),e(Y,le),e(le,Ht),T($e,Ht,null),e(Y,Dn),e(Y,Zt),e(Zt,Vn),u(o,wo,p),u(o,j,p),T(ye,j,null),e(j,kn),e(j,Z),e(Z,_t),e(_t,Pn),e(Z,jn),e(Z,vt),e(vt,zn),e(Z,Mn),e(Z,xt),e(xt,Cn),e(Z,Fn),e(j,In),e(j,ee),e(ee,An),e(ee,Tt),e(Tt,Ln),e(ee,qn),e(ee,Et),e(Et,Sn),e(ee,On),e(j,Nn),e(j,Gt),e(Gt,Bn),e(j,Wn),T(we,j,null),e(j,Rn),e(j,de),T(De,de,null),e(de,Jn),e(de,Ve),e(Ve,Un),e(Ve,bt),e(bt,Hn),e(Ve,Zn),e(j,Gn),e(j,ce),T(ke,ce,null),e(ce,Xn),e(ce,Pe),e(Pe,Kn),e(Pe,$t),e($t,Qn),e(Pe,Yn),u(o,Do,p),u(o,te,p),e(te,me),e(me,Xt),T(je,Xt,null),e(te,er),e(te,Kt),e(Kt,tr),u(o,Vo,p),u(o,z,p),T(ze,z,null),e(z,or),e(z,Qt),e(Qt,nr),e(z,rr),e(z,q),e(q,yt),e(yt,ar),e(q,sr),e(q,wt),e(wt,ir),e(q,lr),e(q,Dt),e(Dt,dr),e(q,cr),e(q,Yt),e(Yt,mr),e(q,fr),e(q,Vt),e(Vt,pr),e(q,hr),e(z,ur),e(z,fe),T(Me,fe,null),e(fe,gr),e(fe,Ce),e(Ce,_r),e(Ce,kt),e(kt,vr),e(Ce,xr),e(z,Tr),e(z,pe),T(Fe,pe,null),e(pe,Er),e(pe,Ie),e(Ie,br),e(Ie,Pt),e(Pt,$r),e(Ie,yr),e(z,wr),e(z,G),T(Ae,G,null),e(G,Dr),e(G,Le),e(Le,Vr),e(Le,jt),e(jt,kr),e(Le,Pr),e(G,jr),T(he,G,null),e(z,zr),e(z,X),T(qe,X,null),e(X,Mr),e(X,oe),e(oe,Cr),e(oe,eo),e(eo,Fr),e(oe,Ir),e(oe,zt),e(zt,Ar),e(oe,Lr),e(X,qr),T(ue,X,null),u(o,ko,p),u(o,ne,p),e(ne,ge),e(ge,to),T(Se,to,null),e(ne,Sr),e(ne,oo),e(oo,Or),u(o,Po,p),u(o,M,p),T(Oe,M,null),e(M,Nr),e(M,Ne),e(Ne,Br),e(Ne,no),e(no,Wr),e(Ne,Rr),e(M,Jr),e(M,Be),e(Be,Ur),e(Be,We),e(We,Hr),e(Be,Zr),e(M,Gr),e(M,ro),e(ro,Xr),e(M,Kr),e(M,Re),e(Re,Qr),e(Re,Mt),e(Mt,Yr),e(Re,ea),e(M,ta),e(M,Je),e(Je,oa),e(Je,Ue),e(Ue,na),e(Je,ra),e(M,aa),e(M,S),T(He,S,null),e(S,sa),e(S,re),e(re,ia),e(re,Ct),e(Ct,la),e(re,da),e(re,ao),e(ao,ca),e(re,ma),e(S,fa),T(_e,S,null),e(S,pa),e(S,so),e(so,ha),e(S,ua),T(Ze,S,null),u(o,jo,p),u(o,ae,p),e(ae,ve),e(ve,io),T(Ge,io,null),e(ae,ga),e(ae,lo),e(lo,_a),u(o,zo,p),u(o,y,p),T(Xe,y,null),e(y,va),e(y,Ke),e(Ke,xa),e(Ke,co),e(co,Ta),e(Ke,Ea),e(y,ba),e(y,Qe),e(Qe,$a),e(Qe,Ye),e(Ye,ya),e(Qe,wa),e(y,Da),e(y,mo),e(mo,Va),e(y,ka),e(y,et),e(et,Pa),e(et,Ft),e(Ft,ja),e(et,za),e(y,Ma),e(y,tt),e(tt,Ca),e(tt,ot),e(ot,Fa),e(tt,Ia),e(y,Aa),e(y,fo),e(fo,La),e(y,qa),e(y,U),e(U,po),e(po,nt),e(nt,Sa),e(U,Oa),e(U,ho),e(ho,rt),e(rt,Na),e(U,Ba),e(U,uo),e(uo,at),e(at,Wa),e(U,Ra),e(U,go),e(go,st),e(st,Ja),e(y,Ua),e(y,O),T(it,O,null),e(O,Ha),e(O,se),e(se,Za),e(se,It),e(It,Ga),e(se,Xa),e(se,_o),e(_o,Ka),e(se,Qa),e(O,Ya),T(xe,O,null),e(O,es),e(O,vo),e(vo,ts),e(O,os),T(lt,O,null),Mo=!0},p(o,[p]){const dt={};p&2&&(dt.$$scope={dirty:p,ctx:o}),he.$set(dt);const xo={};p&2&&(xo.$$scope={dirty:p,ctx:o}),ue.$set(xo);const To={};p&2&&(To.$$scope={dirty:p,ctx:o}),_e.$set(To);const Eo={};p&2&&(Eo.$$scope={dirty:p,ctx:o}),xe.$set(Eo)},i(o){Mo||(E(h.$$.fragment,o),E(Ee.$$.fragment,o),E($e.$$.fragment,o),E(ye.$$.fragment,o),E(we.$$.fragment,o),E(De.$$.fragment,o),E(ke.$$.fragment,o),E(je.$$.fragment,o),E(ze.$$.fragment,o),E(Me.$$.fragment,o),E(Fe.$$.fragment,o),E(Ae.$$.fragment,o),E(he.$$.fragment,o),E(qe.$$.fragment,o),E(ue.$$.fragment,o),E(Se.$$.fragment,o),E(Oe.$$.fragment,o),E(He.$$.fragment,o),E(_e.$$.fragment,o),E(Ze.$$.fragment,o),E(Ge.$$.fragment,o),E(Xe.$$.fragment,o),E(it.$$.fragment,o),E(xe.$$.fragment,o),E(lt.$$.fragment,o),Mo=!0)},o(o){b(h.$$.fragment,o),b(Ee.$$.fragment,o),b($e.$$.fragment,o),b(ye.$$.fragment,o),b(we.$$.fragment,o),b(De.$$.fragment,o),b(ke.$$.fragment,o),b(je.$$.fragment,o),b(ze.$$.fragment,o),b(Me.$$.fragment,o),b(Fe.$$.fragment,o),b(Ae.$$.fragment,o),b(he.$$.fragment,o),b(qe.$$.fragment,o),b(ue.$$.fragment,o),b(Se.$$.fragment,o),b(Oe.$$.fragment,o),b(He.$$.fragment,o),b(_e.$$.fragment,o),b(Ze.$$.fragment,o),b(Ge.$$.fragment,o),b(Xe.$$.fragment,o),b(it.$$.fragment,o),b(xe.$$.fragment,o),b(lt.$$.fragment,o),Mo=!1},d(o){t(m),o&&t(w),o&&t(f),$(h),o&&t(D),o&&t(A),$(Ee),o&&t(bo),o&&t(V),o&&t($o),o&&t(ie),o&&t(yo),o&&t(Y),$($e),o&&t(wo),o&&t(j),$(ye),$(we),$(De),$(ke),o&&t(Do),o&&t(te),$(je),o&&t(Vo),o&&t(z),$(ze),$(Me),$(Fe),$(Ae),$(he),$(qe),$(ue),o&&t(ko),o&&t(ne),$(Se),o&&t(Po),o&&t(M),$(Oe),$(He),$(_e),$(Ze),o&&t(jo),o&&t(ae),$(Ge),o&&t(zo),o&&t(y),$(Xe),$(it),$(xe),$(lt)}}}const Vi={local:"visiontextdualencoder",sections:[{local:"overview",title:"Overview"},{local:"transformers.VisionTextDualEncoderConfig",title:"VisionTextDualEncoderConfig"},{local:"transformers.VisionTextDualEncoderProcessor",title:"VisionTextDualEncoderProcessor"},{local:"transformers.VisionTextDualEncoderModel",title:"VisionTextDualEncoderModel"},{local:"transformers.FlaxVisionTextDualEncoderModel",title:"FlaxVisionTextDualEncoderModel"}],title:"VisionTextDualEncoder"};function ki(J,m,w){let{fw:f}=m;return J.$$set=g=>{"fw"in g&&w(0,f=g.fw)},[f]}class Ii extends vi{constructor(m){super();xi(this,m,ki,Di,Ti,{fw:0})}}export{Ii as default,Vi as metadata};
9,972
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/sew.mdx-fffd17ee.js
import{S as vs,i as ws,s as bs,e as n,k as c,w as v,t as r,L as Es,c as a,d as o,m as d,a as s,x as w,h as i,b as l,J as e,g as m,y as b,q as E,o as y,B as S}from"../../chunks/vendor-b1433968.js";import{T as ya}from"../../chunks/Tip-c3840994.js";import{D as Ue}from"../../chunks/Docstring-ff504c58.js";import{C as uo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as pt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ys(K){let f,W,h,_,C;return{c(){f=n("p"),W=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),_=r("Module"),C=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){f=a(u,"P",{});var g=s(f);W=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=a(g,"CODE",{});var q=s(h);_=i(q,"Module"),q.forEach(o),C=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,f,g),e(f,W),e(f,h),e(h,_),e(f,C)},d(u){u&&o(f)}}}function Ss(K){let f,W,h,_,C;return{c(){f=n("p"),W=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),_=r("Module"),C=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){f=a(u,"P",{});var g=s(f);W=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=a(g,"CODE",{});var q=s(h);_=i(q,"Module"),q.forEach(o),C=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,f,g),e(f,W),e(f,h),e(h,_),e(f,C)},d(u){u&&o(f)}}}function Ws(K){let f,W,h,_,C;return{c(){f=n("p"),W=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),_=r("Module"),C=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){f=a(u,"P",{});var g=s(f);W=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=a(g,"CODE",{});var q=s(h);_=i(q,"Module"),q.forEach(o),C=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(o)},m(u,g){m(u,f,g),e(f,W),e(f,h),e(h,_),e(f,C)},d(u){u&&o(f)}}}function Cs(K){let f,W,h,_,C,u,g,q,go,zt,L,Z,ft,de,_o,mt,vo,At,ee,wo,pe,bo,Eo,Dt,Re,yo,Vt,Je,ht,So,Nt,Be,Wo,Ot,te,ut,Co,ko,fe,To,Qe,$o,xo,Kt,oe,qo,me,Po,jo,Lt,I,ne,gt,he,Fo,_t,Mo,It,T,ue,zo,H,Ao,Ye,Do,Vo,ge,No,Oo,Ko,U,Lo,Ge,Io,Ho,Xe,Uo,Ro,Jo,vt,Bo,Qo,_e,Ht,R,ae,wt,ve,Yo,bt,Go,Ut,$,we,Xo,be,Zo,Ee,en,tn,on,ye,nn,Ze,an,sn,rn,Se,ln,We,cn,dn,pn,P,Ce,fn,J,mn,et,hn,un,Et,gn,_n,vn,se,wn,yt,bn,En,ke,Rt,B,re,St,Te,yn,Wt,Sn,Jt,x,$e,Wn,Q,Cn,Ct,kn,Tn,xe,$n,xn,qn,qe,Pn,tt,jn,Fn,Mn,Pe,zn,je,An,Dn,Vn,j,Fe,Nn,Y,On,ot,Kn,Ln,kt,In,Hn,Un,ie,Rn,Tt,Jn,Bn,Me,Bt,G,le,$t,ze,Qn,xt,Yn,Qt,k,Ae,Gn,qt,Xn,Zn,De,ea,Ve,ta,oa,na,Ne,aa,nt,sa,ra,ia,Oe,la,Ke,ca,da,pa,F,Le,fa,X,ma,at,ha,ua,Pt,ga,_a,va,ce,wa,jt,ba,Ea,Ie,Yt;return u=new pt({}),de=new pt({}),he=new pt({}),ue=new Ue({props:{name:"class transformers.SEWConfig",anchor:"transformers.SEWConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"squeeze_factor",val:" = 2"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)"},{name:"conv_kernel",val:" = (10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"ctc_loss_reduction",val:" = 'mean'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/configuration_sew.py#L29",parametersDescription:[{anchor:"transformers.SEWConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>SEW</code>.`,name:"vocab_size"},{anchor:"transformers.SEWConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.SEWConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.SEWConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SEWConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.SEWConfig.squeeze_factor",description:`<strong>squeeze_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Sequence length downsampling factor after the encoder and upsampling factor after the transformer.`,name:"squeeze_factor"},{anchor:"transformers.SEWConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SEWConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.SEWConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.SEWConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.SEWConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SEWConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.SEWConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.SEWConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.SEWConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.SEWConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.SEWConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.SEWConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.SEWConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.SEWConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.SEWConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.SEWConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.SEWConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.SEWConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.SEWConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.SEWConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.SEWConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.SEWConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.SEWConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.SEWConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.SEWConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification">Wav2Vec2ForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.SEWConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"}]}}),_e=new uo({props:{code:`from transformers import SEWModel, SEWConfig # Initializing a SEW asapp/sew-tiny-100k style configuration configuration = SEWConfig() # Initializing a model from the asapp/sew-tiny-100k style configuration model = SEWModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SEWModel, SEWConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SEW asapp/sew-tiny-100k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SEWConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the asapp/sew-tiny-100k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),ve=new pt({}),we=new Ue({props:{name:"class transformers.SEWModel",anchor:"transformers.SEWModel",parameters:[{name:"config",val:": SEWConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/modeling_sew.py#L796",parametersDescription:[{anchor:"transformers.SEWModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ce=new Ue({props:{name:"forward",anchor:"transformers.SEWModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/modeling_sew.py#L862",parametersDescription:[{anchor:"transformers.SEWModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SEWModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SEWModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SEWModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SEWModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig" >SEWConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),se=new ya({props:{$$slots:{default:[ys]},$$scope:{ctx:K}}}),ke=new uo({props:{code:`from transformers import Wav2Vec2Processor, SEWModel from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('asapp/sew-tiny-100k') model = SEWModel.from_pretrained('asapp/sew-tiny-100k') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, SEWModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWModel.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Te=new pt({}),$e=new Ue({props:{name:"class transformers.SEWForCTC",anchor:"transformers.SEWForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/modeling_sew.py#L924",parametersDescription:[{anchor:"transformers.SEWForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fe=new Ue({props:{name:"forward",anchor:"transformers.SEWForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/modeling_sew.py#L950",parametersDescription:[{anchor:"transformers.SEWForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SEWForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SEWForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SEWForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SEWForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SEWForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig" >SEWConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ie=new ya({props:{$$slots:{default:[Ss]},$$scope:{ctx:K}}}),Me=new uo({props:{code:`from transformers import Wav2Vec2Processor, SEWForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('asapp/sew-tiny-100k') model = SEWForCTC.from_pretrained('asapp/sew-tiny-100k') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, SEWForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWForCTC.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),ze=new pt({}),Ae=new Ue({props:{name:"class transformers.SEWForSequenceClassification",anchor:"transformers.SEWForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/modeling_sew.py#L1036",parametersDescription:[{anchor:"transformers.SEWForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Le=new Ue({props:{name:"forward",anchor:"transformers.SEWForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/sew/modeling_sew.py#L1065",parametersDescription:[{anchor:"transformers.SEWForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.SEWForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SEWForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SEWForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SEWForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SEWForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWConfig" >SEWConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ce=new ya({props:{$$slots:{default:[Ws]},$$scope:{ctx:K}}}),Ie=new uo({props:{code:`from transformers import Wav2Vec2FeatureExtractor, SEWForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('asapp/sew-tiny-100k') model = SEWForSequenceClassification.from_pretrained('asapp/sew-tiny-100k') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, SEWForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;asapp/sew-tiny-100k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),{c(){f=n("meta"),W=c(),h=n("h1"),_=n("a"),C=n("span"),v(u.$$.fragment),g=c(),q=n("span"),go=r("SEW"),zt=c(),L=n("h2"),Z=n("a"),ft=n("span"),v(de.$$.fragment),_o=c(),mt=n("span"),vo=r("Overview"),At=c(),ee=n("p"),wo=r("SEW (Squeezed and Efficient Wav2Vec) was proposed in "),pe=n("a"),bo=r(`Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition`),Eo=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),Dt=c(),Re=n("p"),yo=r("The abstract from the paper is the following:"),Vt=c(),Je=n("p"),ht=n("em"),So=r(`This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.`),Nt=c(),Be=n("p"),Wo=r("Tips:"),Ot=c(),te=n("ul"),ut=n("li"),Co=r("SEW is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ko=c(),fe=n("li"),To=r(`SEWForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Qe=n("a"),$o=r("Wav2Vec2CTCTokenizer"),xo=r("."),Kt=c(),oe=n("p"),qo=r("This model was contributed by "),me=n("a"),Po=r("anton-l"),jo=r("."),Lt=c(),I=n("h2"),ne=n("a"),gt=n("span"),v(he.$$.fragment),Fo=c(),_t=n("span"),Mo=r("SEWConfig"),It=c(),T=n("div"),v(ue.$$.fragment),zo=c(),H=n("p"),Ao=r("This is the configuration class to store the configuration of a "),Ye=n("a"),Do=r("SEWModel"),Vo=r(`. It is used to instantiate a SEW model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW `),ge=n("a"),No=r("asapp/sew-tiny-100k"),Oo=r(" architecture."),Ko=c(),U=n("p"),Lo=r("Configuration objects inherit from "),Ge=n("a"),Io=r("PretrainedConfig"),Ho=r(` and can be used to control the model outputs. Read the documentation from `),Xe=n("a"),Uo=r("PretrainedConfig"),Ro=r(" for more information."),Jo=c(),vt=n("p"),Bo=r("Example:"),Qo=c(),v(_e.$$.fragment),Ht=c(),R=n("h2"),ae=n("a"),wt=n("span"),v(ve.$$.fragment),Yo=c(),bt=n("span"),Go=r("SEWModel"),Ut=c(),$=n("div"),v(we.$$.fragment),Xo=c(),be=n("p"),Zo=r(`The bare SEW Model transformer outputting raw hidden-states without any specific head on top. SEW was proposed in `),Ee=n("a"),en=r("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),tn=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),on=c(),ye=n("p"),nn=r("This model inherits from "),Ze=n("a"),an=r("PreTrainedModel"),sn=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),rn=c(),Se=n("p"),ln=r("This model is a PyTorch "),We=n("a"),cn=r("torch.nn.Module"),dn=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pn=c(),P=n("div"),v(Ce.$$.fragment),fn=c(),J=n("p"),mn=r("The "),et=n("a"),hn=r("SEWModel"),un=r(" forward method, overrides the "),Et=n("code"),gn=r("__call__"),_n=r(" special method."),vn=c(),v(se.$$.fragment),wn=c(),yt=n("p"),bn=r("Example:"),En=c(),v(ke.$$.fragment),Rt=c(),B=n("h2"),re=n("a"),St=n("span"),v(Te.$$.fragment),yn=c(),Wt=n("span"),Sn=r("SEWForCTC"),Jt=c(),x=n("div"),v($e.$$.fragment),Wn=c(),Q=n("p"),Cn=r("SEW Model with a "),Ct=n("code"),kn=r("language modeling"),Tn=r(` head on top for Connectionist Temporal Classification (CTC). SEW was proposed in `),xe=n("a"),$n=r("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),xn=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),qn=c(),qe=n("p"),Pn=r("This model inherits from "),tt=n("a"),jn=r("PreTrainedModel"),Fn=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Mn=c(),Pe=n("p"),zn=r("This model is a PyTorch "),je=n("a"),An=r("torch.nn.Module"),Dn=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vn=c(),j=n("div"),v(Fe.$$.fragment),Nn=c(),Y=n("p"),On=r("The "),ot=n("a"),Kn=r("SEWForCTC"),Ln=r(" forward method, overrides the "),kt=n("code"),In=r("__call__"),Hn=r(" special method."),Un=c(),v(ie.$$.fragment),Rn=c(),Tt=n("p"),Jn=r("Example:"),Bn=c(),v(Me.$$.fragment),Bt=c(),G=n("h2"),le=n("a"),$t=n("span"),v(ze.$$.fragment),Qn=c(),xt=n("span"),Yn=r("SEWForSequenceClassification"),Qt=c(),k=n("div"),v(Ae.$$.fragment),Gn=c(),qt=n("p"),Xn=r(`SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),Zn=c(),De=n("p"),ea=r("SEW was proposed in "),Ve=n("a"),ta=r("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),oa=r(` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),na=c(),Ne=n("p"),aa=r("This model inherits from "),nt=n("a"),sa=r("PreTrainedModel"),ra=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),ia=c(),Oe=n("p"),la=r("This model is a PyTorch "),Ke=n("a"),ca=r("torch.nn.Module"),da=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pa=c(),F=n("div"),v(Le.$$.fragment),fa=c(),X=n("p"),ma=r("The "),at=n("a"),ha=r("SEWForSequenceClassification"),ua=r(" forward method, overrides the "),Pt=n("code"),ga=r("__call__"),_a=r(" special method."),va=c(),v(ce.$$.fragment),wa=c(),jt=n("p"),ba=r("Example:"),Ea=c(),v(Ie.$$.fragment),this.h()},l(t){const p=Es('[data-svelte="svelte-1phssyn"]',document.head);f=a(p,"META",{name:!0,content:!0}),p.forEach(o),W=d(t),h=a(t,"H1",{class:!0});var He=s(h);_=a(He,"A",{id:!0,class:!0,href:!0});var Ft=s(_);C=a(Ft,"SPAN",{});var Mt=s(C);w(u.$$.fragment,Mt),Mt.forEach(o),Ft.forEach(o),g=d(He),q=a(He,"SPAN",{});var Sa=s(q);go=i(Sa,"SEW"),Sa.forEach(o),He.forEach(o),zt=d(t),L=a(t,"H2",{class:!0});var Gt=s(L);Z=a(Gt,"A",{id:!0,class:!0,href:!0});var Wa=s(Z);ft=a(Wa,"SPAN",{});var Ca=s(ft);w(de.$$.fragment,Ca),Ca.forEach(o),Wa.forEach(o),_o=d(Gt),mt=a(Gt,"SPAN",{});var ka=s(mt);vo=i(ka,"Overview"),ka.forEach(o),Gt.forEach(o),At=d(t),ee=a(t,"P",{});var Xt=s(ee);wo=i(Xt,"SEW (Squeezed and Efficient Wav2Vec) was proposed in "),pe=a(Xt,"A",{href:!0,rel:!0});var Ta=s(pe);bo=i(Ta,`Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition`),Ta.forEach(o),Eo=i(Xt,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),Xt.forEach(o),Dt=d(t),Re=a(t,"P",{});var $a=s(Re);yo=i($a,"The abstract from the paper is the following:"),$a.forEach(o),Vt=d(t),Je=a(t,"P",{});var xa=s(Je);ht=a(xa,"EM",{});var qa=s(ht);So=i(qa,`This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.`),qa.forEach(o),xa.forEach(o),Nt=d(t),Be=a(t,"P",{});var Pa=s(Be);Wo=i(Pa,"Tips:"),Pa.forEach(o),Ot=d(t),te=a(t,"UL",{});var Zt=s(te);ut=a(Zt,"LI",{});var ja=s(ut);Co=i(ja,"SEW is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ja.forEach(o),ko=d(Zt),fe=a(Zt,"LI",{});var eo=s(fe);To=i(eo,`SEWForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Qe=a(eo,"A",{href:!0});var Fa=s(Qe);$o=i(Fa,"Wav2Vec2CTCTokenizer"),Fa.forEach(o),xo=i(eo,"."),eo.forEach(o),Zt.forEach(o),Kt=d(t),oe=a(t,"P",{});var to=s(oe);qo=i(to,"This model was contributed by "),me=a(to,"A",{href:!0,rel:!0});var Ma=s(me);Po=i(Ma,"anton-l"),Ma.forEach(o),jo=i(to,"."),to.forEach(o),Lt=d(t),I=a(t,"H2",{class:!0});var oo=s(I);ne=a(oo,"A",{id:!0,class:!0,href:!0});var za=s(ne);gt=a(za,"SPAN",{});var Aa=s(gt);w(he.$$.fragment,Aa),Aa.forEach(o),za.forEach(o),Fo=d(oo),_t=a(oo,"SPAN",{});var Da=s(_t);Mo=i(Da,"SEWConfig"),Da.forEach(o),oo.forEach(o),It=d(t),T=a(t,"DIV",{class:!0});var z=s(T);w(ue.$$.fragment,z),zo=d(z),H=a(z,"P",{});var st=s(H);Ao=i(st,"This is the configuration class to store the configuration of a "),Ye=a(st,"A",{href:!0});var Va=s(Ye);Do=i(Va,"SEWModel"),Va.forEach(o),Vo=i(st,`. It is used to instantiate a SEW model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW `),ge=a(st,"A",{href:!0,rel:!0});var Na=s(ge);No=i(Na,"asapp/sew-tiny-100k"),Na.forEach(o),Oo=i(st," architecture."),st.forEach(o),Ko=d(z),U=a(z,"P",{});var rt=s(U);Lo=i(rt,"Configuration objects inherit from "),Ge=a(rt,"A",{href:!0});var Oa=s(Ge);Io=i(Oa,"PretrainedConfig"),Oa.forEach(o),Ho=i(rt,` and can be used to control the model outputs. Read the documentation from `),Xe=a(rt,"A",{href:!0});var Ka=s(Xe);Uo=i(Ka,"PretrainedConfig"),Ka.forEach(o),Ro=i(rt," for more information."),rt.forEach(o),Jo=d(z),vt=a(z,"P",{});var La=s(vt);Bo=i(La,"Example:"),La.forEach(o),Qo=d(z),w(_e.$$.fragment,z),z.forEach(o),Ht=d(t),R=a(t,"H2",{class:!0});var no=s(R);ae=a(no,"A",{id:!0,class:!0,href:!0});var Ia=s(ae);wt=a(Ia,"SPAN",{});var Ha=s(wt);w(ve.$$.fragment,Ha),Ha.forEach(o),Ia.forEach(o),Yo=d(no),bt=a(no,"SPAN",{});var Ua=s(bt);Go=i(Ua,"SEWModel"),Ua.forEach(o),no.forEach(o),Ut=d(t),$=a(t,"DIV",{class:!0});var A=s($);w(we.$$.fragment,A),Xo=d(A),be=a(A,"P",{});var ao=s(be);Zo=i(ao,`The bare SEW Model transformer outputting raw hidden-states without any specific head on top. SEW was proposed in `),Ee=a(ao,"A",{href:!0,rel:!0});var Ra=s(Ee);en=i(Ra,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),Ra.forEach(o),tn=i(ao,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),ao.forEach(o),on=d(A),ye=a(A,"P",{});var so=s(ye);nn=i(so,"This model inherits from "),Ze=a(so,"A",{href:!0});var Ja=s(Ze);an=i(Ja,"PreTrainedModel"),Ja.forEach(o),sn=i(so,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),so.forEach(o),rn=d(A),Se=a(A,"P",{});var ro=s(Se);ln=i(ro,"This model is a PyTorch "),We=a(ro,"A",{href:!0,rel:!0});var Ba=s(We);cn=i(Ba,"torch.nn.Module"),Ba.forEach(o),dn=i(ro,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ro.forEach(o),pn=d(A),P=a(A,"DIV",{class:!0});var D=s(P);w(Ce.$$.fragment,D),fn=d(D),J=a(D,"P",{});var it=s(J);mn=i(it,"The "),et=a(it,"A",{href:!0});var Qa=s(et);hn=i(Qa,"SEWModel"),Qa.forEach(o),un=i(it," forward method, overrides the "),Et=a(it,"CODE",{});var Ya=s(Et);gn=i(Ya,"__call__"),Ya.forEach(o),_n=i(it," special method."),it.forEach(o),vn=d(D),w(se.$$.fragment,D),wn=d(D),yt=a(D,"P",{});var Ga=s(yt);bn=i(Ga,"Example:"),Ga.forEach(o),En=d(D),w(ke.$$.fragment,D),D.forEach(o),A.forEach(o),Rt=d(t),B=a(t,"H2",{class:!0});var io=s(B);re=a(io,"A",{id:!0,class:!0,href:!0});var Xa=s(re);St=a(Xa,"SPAN",{});var Za=s(St);w(Te.$$.fragment,Za),Za.forEach(o),Xa.forEach(o),yn=d(io),Wt=a(io,"SPAN",{});var es=s(Wt);Sn=i(es,"SEWForCTC"),es.forEach(o),io.forEach(o),Jt=d(t),x=a(t,"DIV",{class:!0});var V=s(x);w($e.$$.fragment,V),Wn=d(V),Q=a(V,"P",{});var lt=s(Q);Cn=i(lt,"SEW Model with a "),Ct=a(lt,"CODE",{});var ts=s(Ct);kn=i(ts,"language modeling"),ts.forEach(o),Tn=i(lt,` head on top for Connectionist Temporal Classification (CTC). SEW was proposed in `),xe=a(lt,"A",{href:!0,rel:!0});var os=s(xe);$n=i(os,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),os.forEach(o),xn=i(lt,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),lt.forEach(o),qn=d(V),qe=a(V,"P",{});var lo=s(qe);Pn=i(lo,"This model inherits from "),tt=a(lo,"A",{href:!0});var ns=s(tt);jn=i(ns,"PreTrainedModel"),ns.forEach(o),Fn=i(lo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),lo.forEach(o),Mn=d(V),Pe=a(V,"P",{});var co=s(Pe);zn=i(co,"This model is a PyTorch "),je=a(co,"A",{href:!0,rel:!0});var as=s(je);An=i(as,"torch.nn.Module"),as.forEach(o),Dn=i(co,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),co.forEach(o),Vn=d(V),j=a(V,"DIV",{class:!0});var N=s(j);w(Fe.$$.fragment,N),Nn=d(N),Y=a(N,"P",{});var ct=s(Y);On=i(ct,"The "),ot=a(ct,"A",{href:!0});var ss=s(ot);Kn=i(ss,"SEWForCTC"),ss.forEach(o),Ln=i(ct," forward method, overrides the "),kt=a(ct,"CODE",{});var rs=s(kt);In=i(rs,"__call__"),rs.forEach(o),Hn=i(ct," special method."),ct.forEach(o),Un=d(N),w(ie.$$.fragment,N),Rn=d(N),Tt=a(N,"P",{});var is=s(Tt);Jn=i(is,"Example:"),is.forEach(o),Bn=d(N),w(Me.$$.fragment,N),N.forEach(o),V.forEach(o),Bt=d(t),G=a(t,"H2",{class:!0});var po=s(G);le=a(po,"A",{id:!0,class:!0,href:!0});var ls=s(le);$t=a(ls,"SPAN",{});var cs=s($t);w(ze.$$.fragment,cs),cs.forEach(o),ls.forEach(o),Qn=d(po),xt=a(po,"SPAN",{});var ds=s(xt);Yn=i(ds,"SEWForSequenceClassification"),ds.forEach(o),po.forEach(o),Qt=d(t),k=a(t,"DIV",{class:!0});var M=s(k);w(Ae.$$.fragment,M),Gn=d(M),qt=a(M,"P",{});var ps=s(qt);Xn=i(ps,`SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),ps.forEach(o),Zn=d(M),De=a(M,"P",{});var fo=s(De);ea=i(fo,"SEW was proposed in "),Ve=a(fo,"A",{href:!0,rel:!0});var fs=s(Ve);ta=i(fs,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),fs.forEach(o),oa=i(fo,` by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.`),fo.forEach(o),na=d(M),Ne=a(M,"P",{});var mo=s(Ne);aa=i(mo,"This model inherits from "),nt=a(mo,"A",{href:!0});var ms=s(nt);sa=i(ms,"PreTrainedModel"),ms.forEach(o),ra=i(mo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),mo.forEach(o),ia=d(M),Oe=a(M,"P",{});var ho=s(Oe);la=i(ho,"This model is a PyTorch "),Ke=a(ho,"A",{href:!0,rel:!0});var hs=s(Ke);ca=i(hs,"torch.nn.Module"),hs.forEach(o),da=i(ho,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ho.forEach(o),pa=d(M),F=a(M,"DIV",{class:!0});var O=s(F);w(Le.$$.fragment,O),fa=d(O),X=a(O,"P",{});var dt=s(X);ma=i(dt,"The "),at=a(dt,"A",{href:!0});var us=s(at);ha=i(us,"SEWForSequenceClassification"),us.forEach(o),ua=i(dt," forward method, overrides the "),Pt=a(dt,"CODE",{});var gs=s(Pt);ga=i(gs,"__call__"),gs.forEach(o),_a=i(dt," special method."),dt.forEach(o),va=d(O),w(ce.$$.fragment,O),wa=d(O),jt=a(O,"P",{});var _s=s(jt);ba=i(_s,"Example:"),_s.forEach(o),Ea=d(O),w(Ie.$$.fragment,O),O.forEach(o),M.forEach(o),this.h()},h(){l(f,"name","hf:doc:metadata"),l(f,"content",JSON.stringify(ks)),l(_,"id","sew"),l(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_,"href","#sew"),l(h,"class","relative group"),l(Z,"id","overview"),l(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Z,"href","#overview"),l(L,"class","relative group"),l(pe,"href","https://arxiv.org/abs/2109.06870"),l(pe,"rel","nofollow"),l(Qe,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),l(me,"href","https://huggingface.co/anton-l"),l(me,"rel","nofollow"),l(ne,"id","transformers.SEWConfig"),l(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ne,"href","#transformers.SEWConfig"),l(I,"class","relative group"),l(Ye,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWModel"),l(ge,"href","https://huggingface.co/asapp/sew-tiny-100k"),l(ge,"rel","nofollow"),l(Ge,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Xe,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(T,"class","docstring"),l(ae,"id","transformers.SEWModel"),l(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ae,"href","#transformers.SEWModel"),l(R,"class","relative group"),l(Ee,"href","https://arxiv.org/abs/2109.06870"),l(Ee,"rel","nofollow"),l(Ze,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(We,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(We,"rel","nofollow"),l(et,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWModel"),l(P,"class","docstring"),l($,"class","docstring"),l(re,"id","transformers.SEWForCTC"),l(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(re,"href","#transformers.SEWForCTC"),l(B,"class","relative group"),l(xe,"href","https://arxiv.org/abs/2109.06870"),l(xe,"rel","nofollow"),l(tt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(je,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(je,"rel","nofollow"),l(ot,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForCTC"),l(j,"class","docstring"),l(x,"class","docstring"),l(le,"id","transformers.SEWForSequenceClassification"),l(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(le,"href","#transformers.SEWForSequenceClassification"),l(G,"class","relative group"),l(Ve,"href","https://arxiv.org/abs/2109.06870"),l(Ve,"rel","nofollow"),l(nt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ke,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ke,"rel","nofollow"),l(at,"href","/docs/transformers/v4.15.0/en/model_doc/sew#transformers.SEWForSequenceClassification"),l(F,"class","docstring"),l(k,"class","docstring")},m(t,p){e(document.head,f),m(t,W,p),m(t,h,p),e(h,_),e(_,C),b(u,C,null),e(h,g),e(h,q),e(q,go),m(t,zt,p),m(t,L,p),e(L,Z),e(Z,ft),b(de,ft,null),e(L,_o),e(L,mt),e(mt,vo),m(t,At,p),m(t,ee,p),e(ee,wo),e(ee,pe),e(pe,bo),e(ee,Eo),m(t,Dt,p),m(t,Re,p),e(Re,yo),m(t,Vt,p),m(t,Je,p),e(Je,ht),e(ht,So),m(t,Nt,p),m(t,Be,p),e(Be,Wo),m(t,Ot,p),m(t,te,p),e(te,ut),e(ut,Co),e(te,ko),e(te,fe),e(fe,To),e(fe,Qe),e(Qe,$o),e(fe,xo),m(t,Kt,p),m(t,oe,p),e(oe,qo),e(oe,me),e(me,Po),e(oe,jo),m(t,Lt,p),m(t,I,p),e(I,ne),e(ne,gt),b(he,gt,null),e(I,Fo),e(I,_t),e(_t,Mo),m(t,It,p),m(t,T,p),b(ue,T,null),e(T,zo),e(T,H),e(H,Ao),e(H,Ye),e(Ye,Do),e(H,Vo),e(H,ge),e(ge,No),e(H,Oo),e(T,Ko),e(T,U),e(U,Lo),e(U,Ge),e(Ge,Io),e(U,Ho),e(U,Xe),e(Xe,Uo),e(U,Ro),e(T,Jo),e(T,vt),e(vt,Bo),e(T,Qo),b(_e,T,null),m(t,Ht,p),m(t,R,p),e(R,ae),e(ae,wt),b(ve,wt,null),e(R,Yo),e(R,bt),e(bt,Go),m(t,Ut,p),m(t,$,p),b(we,$,null),e($,Xo),e($,be),e(be,Zo),e(be,Ee),e(Ee,en),e(be,tn),e($,on),e($,ye),e(ye,nn),e(ye,Ze),e(Ze,an),e(ye,sn),e($,rn),e($,Se),e(Se,ln),e(Se,We),e(We,cn),e(Se,dn),e($,pn),e($,P),b(Ce,P,null),e(P,fn),e(P,J),e(J,mn),e(J,et),e(et,hn),e(J,un),e(J,Et),e(Et,gn),e(J,_n),e(P,vn),b(se,P,null),e(P,wn),e(P,yt),e(yt,bn),e(P,En),b(ke,P,null),m(t,Rt,p),m(t,B,p),e(B,re),e(re,St),b(Te,St,null),e(B,yn),e(B,Wt),e(Wt,Sn),m(t,Jt,p),m(t,x,p),b($e,x,null),e(x,Wn),e(x,Q),e(Q,Cn),e(Q,Ct),e(Ct,kn),e(Q,Tn),e(Q,xe),e(xe,$n),e(Q,xn),e(x,qn),e(x,qe),e(qe,Pn),e(qe,tt),e(tt,jn),e(qe,Fn),e(x,Mn),e(x,Pe),e(Pe,zn),e(Pe,je),e(je,An),e(Pe,Dn),e(x,Vn),e(x,j),b(Fe,j,null),e(j,Nn),e(j,Y),e(Y,On),e(Y,ot),e(ot,Kn),e(Y,Ln),e(Y,kt),e(kt,In),e(Y,Hn),e(j,Un),b(ie,j,null),e(j,Rn),e(j,Tt),e(Tt,Jn),e(j,Bn),b(Me,j,null),m(t,Bt,p),m(t,G,p),e(G,le),e(le,$t),b(ze,$t,null),e(G,Qn),e(G,xt),e(xt,Yn),m(t,Qt,p),m(t,k,p),b(Ae,k,null),e(k,Gn),e(k,qt),e(qt,Xn),e(k,Zn),e(k,De),e(De,ea),e(De,Ve),e(Ve,ta),e(De,oa),e(k,na),e(k,Ne),e(Ne,aa),e(Ne,nt),e(nt,sa),e(Ne,ra),e(k,ia),e(k,Oe),e(Oe,la),e(Oe,Ke),e(Ke,ca),e(Oe,da),e(k,pa),e(k,F),b(Le,F,null),e(F,fa),e(F,X),e(X,ma),e(X,at),e(at,ha),e(X,ua),e(X,Pt),e(Pt,ga),e(X,_a),e(F,va),b(ce,F,null),e(F,wa),e(F,jt),e(jt,ba),e(F,Ea),b(Ie,F,null),Yt=!0},p(t,[p]){const He={};p&2&&(He.$$scope={dirty:p,ctx:t}),se.$set(He);const Ft={};p&2&&(Ft.$$scope={dirty:p,ctx:t}),ie.$set(Ft);const Mt={};p&2&&(Mt.$$scope={dirty:p,ctx:t}),ce.$set(Mt)},i(t){Yt||(E(u.$$.fragment,t),E(de.$$.fragment,t),E(he.$$.fragment,t),E(ue.$$.fragment,t),E(_e.$$.fragment,t),E(ve.$$.fragment,t),E(we.$$.fragment,t),E(Ce.$$.fragment,t),E(se.$$.fragment,t),E(ke.$$.fragment,t),E(Te.$$.fragment,t),E($e.$$.fragment,t),E(Fe.$$.fragment,t),E(ie.$$.fragment,t),E(Me.$$.fragment,t),E(ze.$$.fragment,t),E(Ae.$$.fragment,t),E(Le.$$.fragment,t),E(ce.$$.fragment,t),E(Ie.$$.fragment,t),Yt=!0)},o(t){y(u.$$.fragment,t),y(de.$$.fragment,t),y(he.$$.fragment,t),y(ue.$$.fragment,t),y(_e.$$.fragment,t),y(ve.$$.fragment,t),y(we.$$.fragment,t),y(Ce.$$.fragment,t),y(se.$$.fragment,t),y(ke.$$.fragment,t),y(Te.$$.fragment,t),y($e.$$.fragment,t),y(Fe.$$.fragment,t),y(ie.$$.fragment,t),y(Me.$$.fragment,t),y(ze.$$.fragment,t),y(Ae.$$.fragment,t),y(Le.$$.fragment,t),y(ce.$$.fragment,t),y(Ie.$$.fragment,t),Yt=!1},d(t){o(f),t&&o(W),t&&o(h),S(u),t&&o(zt),t&&o(L),S(de),t&&o(At),t&&o(ee),t&&o(Dt),t&&o(Re),t&&o(Vt),t&&o(Je),t&&o(Nt),t&&o(Be),t&&o(Ot),t&&o(te),t&&o(Kt),t&&o(oe),t&&o(Lt),t&&o(I),S(he),t&&o(It),t&&o(T),S(ue),S(_e),t&&o(Ht),t&&o(R),S(ve),t&&o(Ut),t&&o($),S(we),S(Ce),S(se),S(ke),t&&o(Rt),t&&o(B),S(Te),t&&o(Jt),t&&o(x),S($e),S(Fe),S(ie),S(Me),t&&o(Bt),t&&o(G),S(ze),t&&o(Qt),t&&o(k),S(Ae),S(Le),S(ce),S(Ie)}}}const ks={local:"sew",sections:[{local:"overview",title:"Overview"},{local:"transformers.SEWConfig",title:"SEWConfig"},{local:"transformers.SEWModel",title:"SEWModel"},{local:"transformers.SEWForCTC",title:"SEWForCTC"},{local:"transformers.SEWForSequenceClassification",title:"SEWForSequenceClassification"}],title:"SEW"};function Ts(K,f,W){let{fw:h}=f;return K.$$set=_=>{"fw"in _&&W(0,h=_.fw)},[h]}class Ms extends vs{constructor(f){super();ws(this,f,Ts,Cs,bs,{fw:0})}}export{Ms as default,ks as metadata};
9,973
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/mobilebert.mdx-c97f219a.js
import{S as rM,i as aM,s as iM,e as r,k as l,w,t as n,L as lM,c as a,d as t,m as d,a as i,x as y,h as s,b as p,J as e,g as m,y as $,q as F,o as B,B as E}from"../../chunks/vendor-b1433968.js";import{T as Ee}from"../../chunks/Tip-c3840994.js";import{D as ie}from"../../chunks/Docstring-ff504c58.js";import{C as Pe}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ze}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function dM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function cM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function pM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function hM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function mM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function uM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function fM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function gM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function _M(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function bM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function vM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function TM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function kM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function MM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function wM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function yM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function $M(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function FM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function BM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function EM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function zM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function xM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function PM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be;return{c(){h=r("p"),k=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),v=r("li"),b=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),M=r("li"),he=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),z=r("p"),Z=n("This second option is useful when using "),D=r("code"),ee=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),O=r("code"),ue=n("model(inputs)"),le=n("."),V=l(),j=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),G=l(),x=r("ul"),C=r("li"),ne=n("a single Tensor with "),H=r("code"),de=n("input_ids"),se=n(" only and nothing else: "),L=r("code"),fe=n("model(inputs_ids)"),ce=l(),P=r("li"),ge=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=r("code"),X=n("model([input_ids, attention_mask])"),re=n(" or "),Q=r("code"),_e=n("model([input_ids, attention_mask, token_type_ids])"),ae=l(),N=r("li"),pe=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r("code"),be=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){h=a(c,"P",{});var T=i(h);k=s(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),f=d(c),g=a(c,"UL",{});var J=i(g);v=a(J,"LI",{});var ke=i(v);b=s(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),_=d(J),M=a(J,"LI",{});var $e=i(M);he=s($e,"having all inputs as a list, tuple or dict in the first positional arguments."),$e.forEach(t),J.forEach(t),K=d(c),z=a(c,"P",{});var S=i(z);Z=s(S,"This second option is useful when using "),D=a(S,"CODE",{});var we=i(D);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),me=s(S,` method which currently requires having all the tensors in the first argument of the model call function: `),O=a(S,"CODE",{});var ve=i(O);ue=s(ve,"model(inputs)"),ve.forEach(t),le=s(S,"."),S.forEach(t),V=d(c),j=a(c,"P",{});var Te=i(j);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),G=d(c),x=a(c,"UL",{});var q=i(x);C=a(q,"LI",{});var I=i(C);ne=s(I,"a single Tensor with "),H=a(I,"CODE",{});var Fe=i(H);de=s(Fe,"input_ids"),Fe.forEach(t),se=s(I," only and nothing else: "),L=a(I,"CODE",{});var Me=i(L);fe=s(Me,"model(inputs_ids)"),Me.forEach(t),I.forEach(t),ce=d(q),P=a(q,"LI",{});var U=i(P);ge=s(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=a(U,"CODE",{});var ye=i(W);X=s(ye,"model([input_ids, attention_mask])"),ye.forEach(t),re=s(U," or "),Q=a(U,"CODE",{});var Y=i(Q);_e=s(Y,"model([input_ids, attention_mask, token_type_ids])"),Y.forEach(t),U.forEach(t),ae=d(q),N=a(q,"LI",{});var oe=i(N);pe=s(oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a(oe,"CODE",{});var Be=i(A);be=s(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(t),oe.forEach(t),q.forEach(t)},m(c,T){m(c,h,T),e(h,k),m(c,f,T),m(c,g,T),e(g,v),e(v,b),e(g,_),e(g,M),e(M,he),m(c,K,T),m(c,z,T),e(z,Z),e(z,D),e(D,ee),e(z,me),e(z,O),e(O,ue),e(z,le),m(c,V,T),m(c,j,T),e(j,te),m(c,G,T),m(c,x,T),e(x,C),e(C,ne),e(C,H),e(H,de),e(C,se),e(C,L),e(L,fe),e(x,ce),e(x,P),e(P,ge),e(P,W),e(W,X),e(P,re),e(P,Q),e(Q,_e),e(x,ae),e(x,N),e(N,pe),e(N,A),e(A,be)},d(c){c&&t(h),c&&t(f),c&&t(g),c&&t(K),c&&t(z),c&&t(V),c&&t(j),c&&t(G),c&&t(x)}}}function CM(R){let h,k,f,g,v;return{c(){h=r("p"),k=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){h=a(b,"P",{});var _=i(h);k=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var M=i(f);g=s(M,"Module"),M.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(b,_){m(b,h,_),e(h,k),e(h,f),e(f,g),e(h,v)},d(b){b&&t(h)}}}function qM(R){let h,k,f,g,v,b,_,M,he,K,z,Z,D,ee,me,O,ue,le,V,j,te,G,x,C,ne,H,de,se,L,fe,ce,P,ge,W,X,re,Q,_e,ae,N,pe,A,be,c,T,J,ke,$e,S,we,ve,Te,q,I,Fe,Me,U,ye,Y,oe,Be,Ht,_p,na,bp,vp,sa,Tp,kp,Mp,Qt,wp,ra,yp,$p,aa,Fp,Bp,Ep,fi,zp,xp,zn,Pp,gi,Cp,wd,Ut,No,_i,xn,qp,bi,jp,yd,ct,Pn,Np,vi,Sp,Ap,So,ia,Ip,Dp,la,Op,Lp,Wp,Cn,Rp,da,Hp,Qp,$d,Vt,Ao,Ti,qn,Up,ki,Vp,Fd,pt,jn,Kp,Nn,Gp,Mi,Jp,Yp,Zp,Io,ca,Xp,eh,pa,th,oh,nh,Sn,sh,ha,rh,ah,Bd,Kt,Do,wi,An,ih,yi,lh,Ed,Gt,In,dh,Dn,ch,ma,ph,hh,zd,Jt,On,mh,Ln,uh,ua,fh,gh,xd,Yt,Oo,$i,Wn,_h,Fi,bh,Pd,Ce,Rn,vh,Bi,Th,kh,Hn,Mh,fa,wh,yh,$h,Qn,Fh,Un,Bh,Eh,zh,Ei,Vn,xh,Ph,Ke,Kn,Ch,Zt,qh,ga,jh,Nh,zi,Sh,Ah,Ih,Lo,Dh,xi,Oh,Lh,Gn,Cd,Xt,Wo,Pi,Jn,Wh,Ci,Rh,qd,Le,Yn,Hh,eo,Qh,qi,Uh,Vh,ji,Kh,Gh,Jh,Zn,Yh,_a,Zh,Xh,em,Xn,tm,es,om,nm,sm,Ge,ts,rm,to,am,ba,im,lm,Ni,dm,cm,pm,Ro,hm,Si,mm,um,os,jd,oo,Ho,Ai,ns,fm,Ii,gm,Nd,We,ss,_m,rs,bm,Di,vm,Tm,km,as,Mm,va,wm,ym,$m,is,Fm,ls,Bm,Em,zm,Je,ds,xm,no,Pm,Ta,Cm,qm,Oi,jm,Nm,Sm,Qo,Am,Li,Im,Dm,cs,Sd,so,Uo,Wi,ps,Om,Ri,Lm,Ad,Re,hs,Wm,ms,Rm,Hi,Hm,Qm,Um,us,Vm,ka,Km,Gm,Jm,fs,Ym,gs,Zm,Xm,eu,Ye,_s,tu,ro,ou,Ma,nu,su,Qi,ru,au,iu,Vo,lu,Ui,du,cu,bs,Id,ao,Ko,Vi,vs,pu,Ki,hu,Dd,He,Ts,mu,Gi,uu,fu,ks,gu,wa,_u,bu,vu,Ms,Tu,ws,ku,Mu,wu,xe,ys,yu,io,$u,ya,Fu,Bu,Ji,Eu,zu,xu,Go,Pu,Yi,Cu,qu,$s,ju,Zi,Nu,Su,Fs,Od,lo,Jo,Xi,Bs,Au,el,Iu,Ld,Qe,Es,Du,tl,Ou,Lu,zs,Wu,$a,Ru,Hu,Qu,xs,Uu,Ps,Vu,Ku,Gu,Ze,Cs,Ju,co,Yu,Fa,Zu,Xu,ol,ef,tf,of,Yo,nf,nl,sf,rf,qs,Wd,po,Zo,sl,js,af,rl,lf,Rd,Ue,Ns,df,al,cf,pf,Ss,hf,Ba,mf,uf,ff,As,gf,Is,_f,bf,vf,Xe,Ds,Tf,ho,kf,Ea,Mf,wf,il,yf,$f,Ff,Xo,Bf,ll,Ef,zf,Os,Hd,mo,en,dl,Ls,xf,cl,Pf,Qd,Ve,Ws,Cf,uo,qf,pl,jf,Nf,hl,Sf,Af,If,Rs,Df,za,Of,Lf,Wf,Hs,Rf,Qs,Hf,Qf,Uf,et,Us,Vf,fo,Kf,xa,Gf,Jf,ml,Yf,Zf,Xf,tn,eg,ul,tg,og,Vs,Ud,go,on,fl,Ks,ng,gl,sg,Vd,qe,Gs,rg,_l,ag,ig,Js,lg,Pa,dg,cg,pg,Ys,hg,Zs,mg,ug,fg,nn,gg,tt,Xs,_g,_o,bg,Ca,vg,Tg,bl,kg,Mg,wg,sn,yg,vl,$g,Fg,er,Kd,bo,rn,Tl,tr,Bg,kl,Eg,Gd,je,or,zg,vo,xg,Ml,Pg,Cg,wl,qg,jg,Ng,nr,Sg,qa,Ag,Ig,Dg,sr,Og,rr,Lg,Wg,Rg,an,Hg,ot,ar,Qg,To,Ug,ja,Vg,Kg,yl,Gg,Jg,Yg,ln,Zg,$l,Xg,e_,ir,Jd,ko,dn,Fl,lr,t_,Bl,o_,Yd,Ne,dr,n_,cr,s_,El,r_,a_,i_,pr,l_,Na,d_,c_,p_,hr,h_,mr,m_,u_,f_,cn,g_,nt,ur,__,Mo,b_,Sa,v_,T_,zl,k_,M_,w_,pn,y_,xl,$_,F_,fr,Zd,wo,hn,Pl,gr,B_,Cl,E_,Xd,Se,_r,z_,br,x_,ql,P_,C_,q_,vr,j_,Aa,N_,S_,A_,Tr,I_,kr,D_,O_,L_,mn,W_,st,Mr,R_,yo,H_,Ia,Q_,U_,jl,V_,K_,G_,un,J_,Nl,Y_,Z_,wr,ec,$o,fn,Sl,yr,X_,Al,eb,tc,Ae,$r,tb,Il,ob,nb,Fr,sb,Da,rb,ab,ib,Br,lb,Er,db,cb,pb,gn,hb,rt,zr,mb,Fo,ub,Oa,fb,gb,Dl,_b,bb,vb,_n,Tb,Ol,kb,Mb,xr,oc,Bo,bn,Ll,Pr,wb,Wl,yb,nc,Ie,Cr,$b,Rl,Fb,Bb,qr,Eb,La,zb,xb,Pb,jr,Cb,Nr,qb,jb,Nb,vn,Sb,at,Sr,Ab,Eo,Ib,Wa,Db,Ob,Hl,Lb,Wb,Rb,Tn,Hb,Ql,Qb,Ub,Ar,sc,zo,kn,Ul,Ir,Vb,Vl,Kb,rc,De,Dr,Gb,Kl,Jb,Yb,Or,Zb,Ra,Xb,ev,tv,Lr,ov,Wr,nv,sv,rv,Mn,av,it,Rr,iv,xo,lv,Ha,dv,cv,Gl,pv,hv,mv,wn,uv,Jl,fv,gv,Hr,ac,Po,yn,Yl,Qr,_v,Zl,bv,ic,Oe,Ur,vv,Co,Tv,Xl,kv,Mv,ed,wv,yv,$v,Vr,Fv,Qa,Bv,Ev,zv,Kr,xv,Gr,Pv,Cv,qv,$n,jv,lt,Jr,Nv,qo,Sv,Ua,Av,Iv,td,Dv,Ov,Lv,Fn,Wv,od,Rv,Hv,Yr,lc;return b=new ze({}),ee=new ze({}),I=new ze({}),oe=new ie({props:{name:"class transformers.MobileBertConfig",anchor:"transformers.MobileBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 512"},{name:"num_hidden_layers",val:" = 24"},{name:"num_attention_heads",val:" = 4"},{name:"intermediate_size",val:" = 512"},{name:"hidden_act",val:" = 'relu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"embedding_size",val:" = 128"},{name:"trigram_input",val:" = True"},{name:"use_bottleneck",val:" = True"},{name:"intra_bottleneck_size",val:" = 128"},{name:"use_bottleneck_attention",val:" = False"},{name:"key_query_shared_bottleneck",val:" = True"},{name:"num_feedforward_networks",val:" = 4"},{name:"normalization_type",val:" = 'no_norm'"},{name:"classifier_activation",val:" = True"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/configuration_mobilebert.py#L28",parametersDescription:[{anchor:"transformers.MobileBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the MobileBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertModel">MobileBertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertModel">TFMobileBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.MobileBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.MobileBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.MobileBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.MobileBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.MobileBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.MobileBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.MobileBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.MobileBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.MobileBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertModel">MobileBertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertModel">TFMobileBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.MobileBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.MobileBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.MobileBertConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The ID of the token in the word embedding to use as padding.`,name:"pad_token_id"},{anchor:"transformers.MobileBertConfig.embedding_size",description:`<strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The dimension of the word embedding vectors.`,name:"embedding_size"},{anchor:"transformers.MobileBertConfig.trigram_input",description:`<strong>trigram_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Use a convolution of trigram as input.`,name:"trigram_input"},{anchor:"transformers.MobileBertConfig.use_bottleneck",description:`<strong>use_bottleneck</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use bottleneck in BERT.`,name:"use_bottleneck"},{anchor:"transformers.MobileBertConfig.intra_bottleneck_size",description:`<strong>intra_bottleneck_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Size of bottleneck layer output.`,name:"intra_bottleneck_size"},{anchor:"transformers.MobileBertConfig.use_bottleneck_attention",description:`<strong>use_bottleneck_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use attention inputs from the bottleneck transformation.`,name:"use_bottleneck_attention"},{anchor:"transformers.MobileBertConfig.key_query_shared_bottleneck",description:`<strong>key_query_shared_bottleneck</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use the same linear transformation for query&amp;key in the bottleneck.`,name:"key_query_shared_bottleneck"},{anchor:"transformers.MobileBertConfig.num_feedforward_networks",description:`<strong>num_feedforward_networks</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; Number of FFNs in a block.`,name:"num_feedforward_networks"},{anchor:"transformers.MobileBertConfig.normalization_type",description:`<strong>normalization_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;no_norm&quot;</code>) &#x2014; The normalization type in MobileBERT.`,name:"normalization_type"},{anchor:"transformers.MobileBertConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),zn=new Pe({props:{code:`from transformers import MobileBertModel, MobileBertConfig # Initializing a MobileBERT configuration configuration = MobileBertConfig() # Initializing a model from the configuration above model = MobileBertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertModel, MobileBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MobileBERT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MobileBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration above</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),xn=new ze({}),Pn=new ie({props:{name:"class transformers.MobileBertTokenizer",anchor:"transformers.MobileBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/tokenization_mobilebert.py#L36"}}),qn=new ze({}),jn=new ie({props:{name:"class transformers.MobileBertTokenizerFast",anchor:"transformers.MobileBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py#L40"}}),An=new ze({}),In=new ie({props:{name:"class transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput",anchor:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prediction_logits",val:": FloatTensor = None"},{name:"seq_relationship_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L684",parametersDescription:[{anchor:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput.seq_relationship_logits",description:`<strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"seq_relationship_logits"},{anchor:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),On=new ie({props:{name:"class transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput",anchor:"transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"prediction_logits",val:": Tensor = None"},{name:"seq_relationship_logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L800",parametersDescription:[{anchor:"transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput.seq_relationship_logits",description:`<strong>seq_relationship_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"seq_relationship_logits"},{anchor:"transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Wn=new ze({}),Rn=new ie({props:{name:"class transformers.MobileBertModel",anchor:"transformers.MobileBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L786",parametersDescription:[{anchor:"transformers.MobileBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kn=new ie({props:{name:"forward",anchor:"transformers.MobileBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_attentions",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L816",parametersDescription:[{anchor:"transformers.MobileBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Lo=new Ee({props:{$$slots:{default:[dM]},$$scope:{ctx:R}}}),Gn=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertModel import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertModel.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertModel.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Jn=new ze({}),Yn=new ie({props:{name:"class transformers.MobileBertForPreTraining",anchor:"transformers.MobileBertForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L902",parametersDescription:[{anchor:"transformers.MobileBertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ts=new ie({props:{name:"forward",anchor:"transformers.MobileBertForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"next_sentence_label",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L925",parametersDescription:[{anchor:"transformers.MobileBertForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.MobileBertForPreTraining.forward.next_sentence_label",description:`<strong>next_sentence_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"next_sentence_label"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput" >transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</p> </li> <li> <p><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput" >transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ro=new Ee({props:{$$slots:{default:[cM]},$$scope:{ctx:R}}}),os=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForPreTraining import torch tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased") model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&quot;google/mobilebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForPreTraining.from_pretrained(<span class="hljs-string">&quot;google/mobilebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),ns=new ze({}),ss=new ie({props:{name:"class transformers.MobileBertForMaskedLM",anchor:"transformers.MobileBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1006",parametersDescription:[{anchor:"transformers.MobileBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ds=new ie({props:{name:"forward",anchor:"transformers.MobileBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1032",parametersDescription:[{anchor:"transformers.MobileBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qo=new Ee({props:{$$slots:{default:[pM]},$$scope:{ctx:R}}}),cs=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForMaskedLM import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForMaskedLM.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ps=new ze({}),hs=new ie({props:{name:"class transformers.MobileBertForNextSentencePrediction",anchor:"transformers.MobileBertForNextSentencePrediction",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1105",parametersDescription:[{anchor:"transformers.MobileBertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),_s=new ie({props:{name:"forward",anchor:"transformers.MobileBertForNextSentencePrediction.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1115",parametersDescription:[{anchor:"transformers.MobileBertForNextSentencePrediction.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForNextSentencePrediction.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>.</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) \u2014 Next sequence prediction (classification) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Vo=new Ee({props:{$$slots:{default:[hM]},$$scope:{ctx:R}}}),bs=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='pt') outputs = model(**encoding, labels=torch.LongTensor([1])) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=torch.LongTensor([<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),vs=new ze({}),Ts=new ie({props:{name:"class transformers.MobileBertForSequenceClassification",anchor:"transformers.MobileBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1208",parametersDescription:[{anchor:"transformers.MobileBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ys=new ie({props:{name:"forward",anchor:"transformers.MobileBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1224",parametersDescription:[{anchor:"transformers.MobileBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Go=new Ee({props:{$$slots:{default:[mM]},$$scope:{ctx:R}}}),$s=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForSequenceClassification import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForSequenceClassification.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Fs=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForSequenceClassification import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForSequenceClassification.from_pretrained('google/mobilebert-uncased', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Bs=new ze({}),Es=new ie({props:{name:"class transformers.MobileBertForMultipleChoice",anchor:"transformers.MobileBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1415",parametersDescription:[{anchor:"transformers.MobileBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Cs=new ie({props:{name:"forward",anchor:"transformers.MobileBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1429",parametersDescription:[{anchor:"transformers.MobileBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Yo=new Ee({props:{$$slots:{default:[uM]},$$scope:{ctx:R}}}),qs=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForMultipleChoice import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForMultipleChoice.from_pretrained('google/mobilebert-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),js=new ze({}),Ns=new ie({props:{name:"class transformers.MobileBertForTokenClassification",anchor:"transformers.MobileBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1513",parametersDescription:[{anchor:"transformers.MobileBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ds=new ie({props:{name:"forward",anchor:"transformers.MobileBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1531",parametersDescription:[{anchor:"transformers.MobileBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xo=new Ee({props:{$$slots:{default:[fM]},$$scope:{ctx:R}}}),Os=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForTokenClassification import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForTokenClassification.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ls=new ze({}),Ws=new ie({props:{name:"class transformers.MobileBertForQuestionAnswering",anchor:"transformers.MobileBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1311",parametersDescription:[{anchor:"transformers.MobileBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Us=new ie({props:{name:"forward",anchor:"transformers.MobileBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_mobilebert.py#L1325",parametersDescription:[{anchor:"transformers.MobileBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.MobileBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tn=new Ee({props:{$$slots:{default:[gM]},$$scope:{ctx:R}}}),Vs=new Pe({props:{code:`from transformers import MobileBertTokenizer, MobileBertForQuestionAnswering import torch tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = MobileBertForQuestionAnswering.from_pretrained('google/mobilebert-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, MobileBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MobileBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Ks=new ze({}),Gs=new ie({props:{name:"class transformers.TFMobileBertModel",anchor:"transformers.TFMobileBertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L926",parametersDescription:[{anchor:"transformers.TFMobileBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),nn=new Ee({props:{$$slots:{default:[_M]},$$scope:{ctx:R}}}),Xs=new ie({props:{name:"call",anchor:"transformers.TFMobileBertModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L931",parametersDescription:[{anchor:"transformers.TFMobileBertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),sn=new Ee({props:{$$slots:{default:[bM]},$$scope:{ctx:R}}}),er=new Pe({props:{code:`from transformers import MobileBertTokenizer, TFMobileBertModel import tensorflow as tf tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertModel.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertModel.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),tr=new ze({}),or=new ie({props:{name:"class transformers.TFMobileBertForPreTraining",anchor:"transformers.TFMobileBertForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1001",parametersDescription:[{anchor:"transformers.TFMobileBertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),an=new Ee({props:{$$slots:{default:[vM]},$$scope:{ctx:R}}}),ar=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForPreTraining.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1015",parametersDescription:[{anchor:"transformers.TFMobileBertForPreTraining.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForPreTraining.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForPreTraining.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForPreTraining.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForPreTraining.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForPreTraining.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput" >transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput" >transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ln=new Ee({props:{$$slots:{default:[TM]},$$scope:{ctx:R}}}),ir=new Pe({props:{code:`import tensorflow as tf from transformers import MobileBertTokenizer, TFMobileBertForPreTraining tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores, seq_relationship_scores = outputs[:2],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tf.constant(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>))[<span class="hljs-literal">None</span>, :] <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_scores, seq_relationship_scores = outputs[:<span class="hljs-number">2</span>]`}}),lr=new ze({}),dr=new ie({props:{name:"class transformers.TFMobileBertForMaskedLM",anchor:"transformers.TFMobileBertForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1101",parametersDescription:[{anchor:"transformers.TFMobileBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),cn=new Ee({props:{$$slots:{default:[kM]},$$scope:{ctx:R}}}),ur=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1122",parametersDescription:[{anchor:"transformers.TFMobileBertForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMobileBertForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),pn=new Ee({props:{$$slots:{default:[MM]},$$scope:{ctx:R}}}),fr=new Pe({props:{code:`from transformers import MobileBertTokenizer, TFMobileBertForMaskedLM import tensorflow as tf tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForMaskedLM.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),gr=new ze({}),_r=new ie({props:{name:"class transformers.TFMobileBertForNextSentencePrediction",anchor:"transformers.TFMobileBertForNextSentencePrediction",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1215",parametersDescription:[{anchor:"transformers.TFMobileBertForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mn=new Ee({props:{$$slots:{default:[wM]},$$scope:{ctx:R}}}),Mr=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForNextSentencePrediction.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"next_sentence_label",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1225",parametersDescription:[{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForNextSentencePrediction.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" >transformers.modeling_tf_outputs.TFNextSentencePredictorOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>next_sentence_label</code> is provided) \u2014 Next sentence prediction loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" >transformers.modeling_tf_outputs.TFNextSentencePredictorOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),un=new Ee({props:{$$slots:{default:[yM]},$$scope:{ctx:R}}}),wr=new Pe({props:{code:`import tensorflow as tf from transformers import MobileBertTokenizer, TFMobileBertForNextSentencePrediction tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors='tf') logits = model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'])[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForNextSentencePrediction.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(encoding[<span class="hljs-string">&#x27;input_ids&#x27;</span>], token_type_ids=encoding[<span class="hljs-string">&#x27;token_type_ids&#x27;</span>])[<span class="hljs-number">0</span>]`}}),yr=new ze({}),$r=new ie({props:{name:"class transformers.TFMobileBertForSequenceClassification",anchor:"transformers.TFMobileBertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1323",parametersDescription:[{anchor:"transformers.TFMobileBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gn=new Ee({props:{$$slots:{default:[$M]},$$scope:{ctx:R}}}),zr=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1346",parametersDescription:[{anchor:"transformers.TFMobileBertForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMobileBertForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),_n=new Ee({props:{$$slots:{default:[FM]},$$scope:{ctx:R}}}),xr=new Pe({props:{code:`from transformers import MobileBertTokenizer, TFMobileBertForSequenceClassification import tensorflow as tf tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForSequenceClassification.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pr=new ze({}),Cr=new ie({props:{name:"class transformers.TFMobileBertForMultipleChoice",anchor:"transformers.TFMobileBertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1557",parametersDescription:[{anchor:"transformers.TFMobileBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),vn=new Ee({props:{$$slots:{default:[BM]},$$scope:{ctx:R}}}),Sr=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1586",parametersDescription:[{anchor:"transformers.TFMobileBertForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMobileBertForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Tn=new Ee({props:{$$slots:{default:[EM]},$$scope:{ctx:R}}}),Ar=new Pe({props:{code:`from transformers import MobileBertTokenizer, TFMobileBertForMultipleChoice import tensorflow as tf tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForMultipleChoice.from_pretrained('google/mobilebert-uncased') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ir=new ze({}),Dr=new ie({props:{name:"class transformers.TFMobileBertForTokenClassification",anchor:"transformers.TFMobileBertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1714",parametersDescription:[{anchor:"transformers.TFMobileBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mn=new Ee({props:{$$slots:{default:[zM]},$$scope:{ctx:R}}}),Rr=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1738",parametersDescription:[{anchor:"transformers.TFMobileBertForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMobileBertForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),wn=new Ee({props:{$$slots:{default:[xM]},$$scope:{ctx:R}}}),Hr=new Pe({props:{code:`from transformers import MobileBertTokenizer, TFMobileBertForTokenClassification import tensorflow as tf tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForTokenClassification.from_pretrained('google/mobilebert-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Qr=new ze({}),Ur=new ie({props:{name:"class transformers.TFMobileBertForQuestionAnswering",anchor:"transformers.TFMobileBertForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1434",parametersDescription:[{anchor:"transformers.TFMobileBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig">MobileBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$n=new Ee({props:{$$slots:{default:[PM]},$$scope:{ctx:R}}}),Jr=new ie({props:{name:"call",anchor:"transformers.TFMobileBertForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mobilebert/modeling_tf_mobilebert.py#L1453",parametersDescription:[{anchor:"transformers.TFMobileBertForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer">MobileBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFMobileBertForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertConfig" >MobileBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Fn=new Ee({props:{$$slots:{default:[CM]},$$scope:{ctx:R}}}),Yr=new Pe({props:{code:`from transformers import MobileBertTokenizer, TFMobileBertForQuestionAnswering import tensorflow as tf tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') model = TFMobileBertForQuestionAnswering.from_pretrained('google/mobilebert-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MobileBertTokenizer, TFMobileBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MobileBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMobileBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/mobilebert-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){h=r("meta"),k=l(),f=r("h1"),g=r("a"),v=r("span"),w(b.$$.fragment),_=l(),M=r("span"),he=n("MobileBERT"),K=l(),z=r("h2"),Z=r("a"),D=r("span"),w(ee.$$.fragment),me=l(),O=r("span"),ue=n("Overview"),le=l(),V=r("p"),j=n("The MobileBERT model was proposed in "),te=r("a"),G=n("MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices"),x=n(` by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. It\u2019s a bidirectional transformer based on the BERT model, which is compressed and accelerated using several approaches.`),C=l(),ne=r("p"),H=n("The abstract from the paper is the following:"),de=l(),se=r("p"),L=r("em"),fe=n(`Natural Language Processing (NLP) has recently achieved great success by using huge pre-trained models with hundreds of millions of parameters. However, these models suffer from heavy model sizes and high latency such that they cannot be deployed to resource-limited mobile devices. In this paper, we propose MobileBERT for compressing and accelerating the popular BERT model. Like the original BERT, MobileBERT is task-agnostic, that is, it can be generically applied to various downstream NLP tasks via simple fine-tuning. Basically, MobileBERT is a thin version of BERT_LARGE, while equipped with bottleneck structures and a carefully designed balance between self-attentions and feed-forward networks. To train MobileBERT, we first train a specially designed teacher model, an inverted-bottleneck incorporated BERT_LARGE model. Then, we conduct knowledge transfer from this teacher to MobileBERT. Empirical studies show that MobileBERT is 4.3x smaller and 5.5x faster than BERT_BASE while achieving competitive results on well-known benchmarks. On the natural language inference tasks of GLUE, MobileBERT achieves a GLUEscore o 77.7 (0.6 lower than BERT_BASE), and 62 ms latency on a Pixel 4 phone. On the SQuAD v1.1/v2.0 question answering task, MobileBERT achieves a dev F1 score of 90.0/79.2 (1.5/2.1 higher than BERT_BASE).`),ce=l(),P=r("p"),ge=n("Tips:"),W=l(),X=r("ul"),re=r("li"),Q=n(`MobileBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),_e=l(),ae=r("li"),N=n(`MobileBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.`),pe=l(),A=r("p"),be=n("This model was contributed by "),c=r("a"),T=n("vshampor"),J=n(". The original code can be found "),ke=r("a"),$e=n("here"),S=n("."),we=l(),ve=r("h2"),Te=r("a"),q=r("span"),w(I.$$.fragment),Fe=l(),Me=r("span"),U=n("MobileBertConfig"),ye=l(),Y=r("div"),w(oe.$$.fragment),Be=l(),Ht=r("p"),_p=n("This is the configuration class to store the configuration of a "),na=r("a"),bp=n("MobileBertModel"),vp=n(` or a `),sa=r("a"),Tp=n("TFMobileBertModel"),kp=n(`. It is used to instantiate a MobileBERT model according to the specified arguments, defining the model architecture.`),Mp=l(),Qt=r("p"),wp=n("Configuration objects inherit from "),ra=r("a"),yp=n("PretrainedConfig"),$p=n(` and can be used to control the model outputs. Read the documentation from `),aa=r("a"),Fp=n("PretrainedConfig"),Bp=n(" for more information."),Ep=l(),fi=r("p"),zp=n("Examples:"),xp=l(),w(zn.$$.fragment),Pp=l(),gi=r("p"),Cp=n(`Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints.`),wd=l(),Ut=r("h2"),No=r("a"),_i=r("span"),w(xn.$$.fragment),qp=l(),bi=r("span"),jp=n("MobileBertTokenizer"),yd=l(),ct=r("div"),w(Pn.$$.fragment),Np=l(),vi=r("p"),Sp=n("Construct a MobileBERT tokenizer."),Ap=l(),So=r("p"),ia=r("a"),Ip=n("MobileBertTokenizer"),Dp=n(" is identical to "),la=r("a"),Op=n("BertTokenizer"),Lp=n(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Wp=l(),Cn=r("p"),Rp=n("Refer to superclass "),da=r("a"),Hp=n("BertTokenizer"),Qp=n(` for usage examples and documentation concerning parameters.`),$d=l(),Vt=r("h2"),Ao=r("a"),Ti=r("span"),w(qn.$$.fragment),Up=l(),ki=r("span"),Vp=n("MobileBertTokenizerFast"),Fd=l(),pt=r("div"),w(jn.$$.fragment),Kp=l(),Nn=r("p"),Gp=n("Construct a \u201Cfast\u201D MobileBERT tokenizer (backed by HuggingFace\u2019s "),Mi=r("em"),Jp=n("tokenizers"),Yp=n(" library)."),Zp=l(),Io=r("p"),ca=r("a"),Xp=n("MobileBertTokenizerFast"),eh=n(" is identical to "),pa=r("a"),th=n("BertTokenizerFast"),oh=n(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),nh=l(),Sn=r("p"),sh=n("Refer to superclass "),ha=r("a"),rh=n("BertTokenizerFast"),ah=n(` for usage examples and documentation concerning parameters.`),Bd=l(),Kt=r("h2"),Do=r("a"),wi=r("span"),w(An.$$.fragment),ih=l(),yi=r("span"),lh=n("MobileBert specific outputs"),Ed=l(),Gt=r("div"),w(In.$$.fragment),dh=l(),Dn=r("p"),ch=n("Output type of "),ma=r("a"),ph=n("MobileBertForPreTraining"),hh=n("."),zd=l(),Jt=r("div"),w(On.$$.fragment),mh=l(),Ln=r("p"),uh=n("Output type of "),ua=r("a"),fh=n("TFMobileBertForPreTraining"),gh=n("."),xd=l(),Yt=r("h2"),Oo=r("a"),$i=r("span"),w(Wn.$$.fragment),_h=l(),Fi=r("span"),bh=n("MobileBertModel"),Pd=l(),Ce=r("div"),w(Rn.$$.fragment),vh=l(),Bi=r("p"),Th=n("The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top."),kh=l(),Hn=r("p"),Mh=n("This model inherits from "),fa=r("a"),wh=n("PreTrainedModel"),yh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$h=l(),Qn=r("p"),Fh=n("This model is also a PyTorch "),Un=r("a"),Bh=n("torch.nn.Module"),Eh=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zh=l(),Ei=r("p"),Vn=r("a"),xh=n("https://arxiv.org/pdf/2004.02984.pdf"),Ph=l(),Ke=r("div"),w(Kn.$$.fragment),Ch=l(),Zt=r("p"),qh=n("The "),ga=r("a"),jh=n("MobileBertModel"),Nh=n(" forward method, overrides the "),zi=r("code"),Sh=n("__call__"),Ah=n(" special method."),Ih=l(),w(Lo.$$.fragment),Dh=l(),xi=r("p"),Oh=n("Example:"),Lh=l(),w(Gn.$$.fragment),Cd=l(),Xt=r("h2"),Wo=r("a"),Pi=r("span"),w(Jn.$$.fragment),Wh=l(),Ci=r("span"),Rh=n("MobileBertForPreTraining"),qd=l(),Le=r("div"),w(Yn.$$.fragment),Hh=l(),eo=r("p"),Qh=n("MobileBert Model with two heads on top as done during the pretraining: a "),qi=r("code"),Uh=n("masked language modeling"),Vh=n(` head and a `),ji=r("code"),Kh=n("next sentence prediction (classification)"),Gh=n(" head."),Jh=l(),Zn=r("p"),Yh=n("This model inherits from "),_a=r("a"),Zh=n("PreTrainedModel"),Xh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),em=l(),Xn=r("p"),tm=n("This model is also a PyTorch "),es=r("a"),om=n("torch.nn.Module"),nm=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),sm=l(),Ge=r("div"),w(ts.$$.fragment),rm=l(),to=r("p"),am=n("The "),ba=r("a"),im=n("MobileBertForPreTraining"),lm=n(" forward method, overrides the "),Ni=r("code"),dm=n("__call__"),cm=n(" special method."),pm=l(),w(Ro.$$.fragment),hm=l(),Si=r("p"),mm=n("Examples:"),um=l(),w(os.$$.fragment),jd=l(),oo=r("h2"),Ho=r("a"),Ai=r("span"),w(ns.$$.fragment),fm=l(),Ii=r("span"),gm=n("MobileBertForMaskedLM"),Nd=l(),We=r("div"),w(ss.$$.fragment),_m=l(),rs=r("p"),bm=n("MobileBert Model with a "),Di=r("code"),vm=n("language modeling"),Tm=n(" head on top."),km=l(),as=r("p"),Mm=n("This model inherits from "),va=r("a"),wm=n("PreTrainedModel"),ym=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$m=l(),is=r("p"),Fm=n("This model is also a PyTorch "),ls=r("a"),Bm=n("torch.nn.Module"),Em=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zm=l(),Je=r("div"),w(ds.$$.fragment),xm=l(),no=r("p"),Pm=n("The "),Ta=r("a"),Cm=n("MobileBertForMaskedLM"),qm=n(" forward method, overrides the "),Oi=r("code"),jm=n("__call__"),Nm=n(" special method."),Sm=l(),w(Qo.$$.fragment),Am=l(),Li=r("p"),Im=n("Example:"),Dm=l(),w(cs.$$.fragment),Sd=l(),so=r("h2"),Uo=r("a"),Wi=r("span"),w(ps.$$.fragment),Om=l(),Ri=r("span"),Lm=n("MobileBertForNextSentencePrediction"),Ad=l(),Re=r("div"),w(hs.$$.fragment),Wm=l(),ms=r("p"),Rm=n("MobileBert Model with a "),Hi=r("code"),Hm=n("next sentence prediction (classification)"),Qm=n(" head on top."),Um=l(),us=r("p"),Vm=n("This model inherits from "),ka=r("a"),Km=n("PreTrainedModel"),Gm=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jm=l(),fs=r("p"),Ym=n("This model is also a PyTorch "),gs=r("a"),Zm=n("torch.nn.Module"),Xm=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),eu=l(),Ye=r("div"),w(_s.$$.fragment),tu=l(),ro=r("p"),ou=n("The "),Ma=r("a"),nu=n("MobileBertForNextSentencePrediction"),su=n(" forward method, overrides the "),Qi=r("code"),ru=n("__call__"),au=n(" special method."),iu=l(),w(Vo.$$.fragment),lu=l(),Ui=r("p"),du=n("Examples:"),cu=l(),w(bs.$$.fragment),Id=l(),ao=r("h2"),Ko=r("a"),Vi=r("span"),w(vs.$$.fragment),pu=l(),Ki=r("span"),hu=n("MobileBertForSequenceClassification"),Dd=l(),He=r("div"),w(Ts.$$.fragment),mu=l(),Gi=r("p"),uu=n(`MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),fu=l(),ks=r("p"),gu=n("This model inherits from "),wa=r("a"),_u=n("PreTrainedModel"),bu=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vu=l(),Ms=r("p"),Tu=n("This model is also a PyTorch "),ws=r("a"),ku=n("torch.nn.Module"),Mu=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wu=l(),xe=r("div"),w(ys.$$.fragment),yu=l(),io=r("p"),$u=n("The "),ya=r("a"),Fu=n("MobileBertForSequenceClassification"),Bu=n(" forward method, overrides the "),Ji=r("code"),Eu=n("__call__"),zu=n(" special method."),xu=l(),w(Go.$$.fragment),Pu=l(),Yi=r("p"),Cu=n("Example of single-label classification:"),qu=l(),w($s.$$.fragment),ju=l(),Zi=r("p"),Nu=n("Example of multi-label classification:"),Su=l(),w(Fs.$$.fragment),Od=l(),lo=r("h2"),Jo=r("a"),Xi=r("span"),w(Bs.$$.fragment),Au=l(),el=r("span"),Iu=n("MobileBertForMultipleChoice"),Ld=l(),Qe=r("div"),w(Es.$$.fragment),Du=l(),tl=r("p"),Ou=n(`MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Lu=l(),zs=r("p"),Wu=n("This model inherits from "),$a=r("a"),Ru=n("PreTrainedModel"),Hu=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qu=l(),xs=r("p"),Uu=n("This model is also a PyTorch "),Ps=r("a"),Vu=n("torch.nn.Module"),Ku=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gu=l(),Ze=r("div"),w(Cs.$$.fragment),Ju=l(),co=r("p"),Yu=n("The "),Fa=r("a"),Zu=n("MobileBertForMultipleChoice"),Xu=n(" forward method, overrides the "),ol=r("code"),ef=n("__call__"),tf=n(" special method."),of=l(),w(Yo.$$.fragment),nf=l(),nl=r("p"),sf=n("Example:"),rf=l(),w(qs.$$.fragment),Wd=l(),po=r("h2"),Zo=r("a"),sl=r("span"),w(js.$$.fragment),af=l(),rl=r("span"),lf=n("MobileBertForTokenClassification"),Rd=l(),Ue=r("div"),w(Ns.$$.fragment),df=l(),al=r("p"),cf=n(`MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),pf=l(),Ss=r("p"),hf=n("This model inherits from "),Ba=r("a"),mf=n("PreTrainedModel"),uf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ff=l(),As=r("p"),gf=n("This model is also a PyTorch "),Is=r("a"),_f=n("torch.nn.Module"),bf=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vf=l(),Xe=r("div"),w(Ds.$$.fragment),Tf=l(),ho=r("p"),kf=n("The "),Ea=r("a"),Mf=n("MobileBertForTokenClassification"),wf=n(" forward method, overrides the "),il=r("code"),yf=n("__call__"),$f=n(" special method."),Ff=l(),w(Xo.$$.fragment),Bf=l(),ll=r("p"),Ef=n("Example:"),zf=l(),w(Os.$$.fragment),Hd=l(),mo=r("h2"),en=r("a"),dl=r("span"),w(Ls.$$.fragment),xf=l(),cl=r("span"),Pf=n("MobileBertForQuestionAnswering"),Qd=l(),Ve=r("div"),w(Ws.$$.fragment),Cf=l(),uo=r("p"),qf=n(`MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),pl=r("code"),jf=n("span start logits"),Nf=n(" and "),hl=r("code"),Sf=n("span end logits"),Af=n(")."),If=l(),Rs=r("p"),Df=n("This model inherits from "),za=r("a"),Of=n("PreTrainedModel"),Lf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wf=l(),Hs=r("p"),Rf=n("This model is also a PyTorch "),Qs=r("a"),Hf=n("torch.nn.Module"),Qf=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Uf=l(),et=r("div"),w(Us.$$.fragment),Vf=l(),fo=r("p"),Kf=n("The "),xa=r("a"),Gf=n("MobileBertForQuestionAnswering"),Jf=n(" forward method, overrides the "),ml=r("code"),Yf=n("__call__"),Zf=n(" special method."),Xf=l(),w(tn.$$.fragment),eg=l(),ul=r("p"),tg=n("Example:"),og=l(),w(Vs.$$.fragment),Ud=l(),go=r("h2"),on=r("a"),fl=r("span"),w(Ks.$$.fragment),ng=l(),gl=r("span"),sg=n("TFMobileBertModel"),Vd=l(),qe=r("div"),w(Gs.$$.fragment),rg=l(),_l=r("p"),ag=n("The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top."),ig=l(),Js=r("p"),lg=n("This model inherits from "),Pa=r("a"),dg=n("TFPreTrainedModel"),cg=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pg=l(),Ys=r("p"),hg=n("This model is also a "),Zs=r("a"),mg=n("tf.keras.Model"),ug=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fg=l(),w(nn.$$.fragment),gg=l(),tt=r("div"),w(Xs.$$.fragment),_g=l(),_o=r("p"),bg=n("The "),Ca=r("a"),vg=n("TFMobileBertModel"),Tg=n(" forward method, overrides the "),bl=r("code"),kg=n("__call__"),Mg=n(" special method."),wg=l(),w(sn.$$.fragment),yg=l(),vl=r("p"),$g=n("Example:"),Fg=l(),w(er.$$.fragment),Kd=l(),bo=r("h2"),rn=r("a"),Tl=r("span"),w(tr.$$.fragment),Bg=l(),kl=r("span"),Eg=n("TFMobileBertForPreTraining"),Gd=l(),je=r("div"),w(or.$$.fragment),zg=l(),vo=r("p"),xg=n("MobileBert Model with two heads on top as done during the pretraining: a "),Ml=r("code"),Pg=n("masked language modeling"),Cg=n(` head and a `),wl=r("code"),qg=n("next sentence prediction (classification)"),jg=n(" head."),Ng=l(),nr=r("p"),Sg=n("This model inherits from "),qa=r("a"),Ag=n("TFPreTrainedModel"),Ig=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Dg=l(),sr=r("p"),Og=n("This model is also a "),rr=r("a"),Lg=n("tf.keras.Model"),Wg=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Rg=l(),w(an.$$.fragment),Hg=l(),ot=r("div"),w(ar.$$.fragment),Qg=l(),To=r("p"),Ug=n("The "),ja=r("a"),Vg=n("TFMobileBertForPreTraining"),Kg=n(" forward method, overrides the "),yl=r("code"),Gg=n("__call__"),Jg=n(" special method."),Yg=l(),w(ln.$$.fragment),Zg=l(),$l=r("p"),Xg=n("Examples:"),e_=l(),w(ir.$$.fragment),Jd=l(),ko=r("h2"),dn=r("a"),Fl=r("span"),w(lr.$$.fragment),t_=l(),Bl=r("span"),o_=n("TFMobileBertForMaskedLM"),Yd=l(),Ne=r("div"),w(dr.$$.fragment),n_=l(),cr=r("p"),s_=n("MobileBert Model with a "),El=r("code"),r_=n("language modeling"),a_=n(" head on top."),i_=l(),pr=r("p"),l_=n("This model inherits from "),Na=r("a"),d_=n("TFPreTrainedModel"),c_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),p_=l(),hr=r("p"),h_=n("This model is also a "),mr=r("a"),m_=n("tf.keras.Model"),u_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),f_=l(),w(cn.$$.fragment),g_=l(),nt=r("div"),w(ur.$$.fragment),__=l(),Mo=r("p"),b_=n("The "),Sa=r("a"),v_=n("TFMobileBertForMaskedLM"),T_=n(" forward method, overrides the "),zl=r("code"),k_=n("__call__"),M_=n(" special method."),w_=l(),w(pn.$$.fragment),y_=l(),xl=r("p"),$_=n("Example:"),F_=l(),w(fr.$$.fragment),Zd=l(),wo=r("h2"),hn=r("a"),Pl=r("span"),w(gr.$$.fragment),B_=l(),Cl=r("span"),E_=n("TFMobileBertForNextSentencePrediction"),Xd=l(),Se=r("div"),w(_r.$$.fragment),z_=l(),br=r("p"),x_=n("MobileBert Model with a "),ql=r("code"),P_=n("next sentence prediction (classification)"),C_=n(" head on top."),q_=l(),vr=r("p"),j_=n("This model inherits from "),Aa=r("a"),N_=n("TFPreTrainedModel"),S_=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),A_=l(),Tr=r("p"),I_=n("This model is also a "),kr=r("a"),D_=n("tf.keras.Model"),O_=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),L_=l(),w(mn.$$.fragment),W_=l(),st=r("div"),w(Mr.$$.fragment),R_=l(),yo=r("p"),H_=n("The "),Ia=r("a"),Q_=n("TFMobileBertForNextSentencePrediction"),U_=n(" forward method, overrides the "),jl=r("code"),V_=n("__call__"),K_=n(" special method."),G_=l(),w(un.$$.fragment),J_=l(),Nl=r("p"),Y_=n("Examples:"),Z_=l(),w(wr.$$.fragment),ec=l(),$o=r("h2"),fn=r("a"),Sl=r("span"),w(yr.$$.fragment),X_=l(),Al=r("span"),eb=n("TFMobileBertForSequenceClassification"),tc=l(),Ae=r("div"),w($r.$$.fragment),tb=l(),Il=r("p"),ob=n(`MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),nb=l(),Fr=r("p"),sb=n("This model inherits from "),Da=r("a"),rb=n("TFPreTrainedModel"),ab=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ib=l(),Br=r("p"),lb=n("This model is also a "),Er=r("a"),db=n("tf.keras.Model"),cb=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),pb=l(),w(gn.$$.fragment),hb=l(),rt=r("div"),w(zr.$$.fragment),mb=l(),Fo=r("p"),ub=n("The "),Oa=r("a"),fb=n("TFMobileBertForSequenceClassification"),gb=n(" forward method, overrides the "),Dl=r("code"),_b=n("__call__"),bb=n(" special method."),vb=l(),w(_n.$$.fragment),Tb=l(),Ol=r("p"),kb=n("Example:"),Mb=l(),w(xr.$$.fragment),oc=l(),Bo=r("h2"),bn=r("a"),Ll=r("span"),w(Pr.$$.fragment),wb=l(),Wl=r("span"),yb=n("TFMobileBertForMultipleChoice"),nc=l(),Ie=r("div"),w(Cr.$$.fragment),$b=l(),Rl=r("p"),Fb=n(`MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Bb=l(),qr=r("p"),Eb=n("This model inherits from "),La=r("a"),zb=n("TFPreTrainedModel"),xb=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pb=l(),jr=r("p"),Cb=n("This model is also a "),Nr=r("a"),qb=n("tf.keras.Model"),jb=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Nb=l(),w(vn.$$.fragment),Sb=l(),at=r("div"),w(Sr.$$.fragment),Ab=l(),Eo=r("p"),Ib=n("The "),Wa=r("a"),Db=n("TFMobileBertForMultipleChoice"),Ob=n(" forward method, overrides the "),Hl=r("code"),Lb=n("__call__"),Wb=n(" special method."),Rb=l(),w(Tn.$$.fragment),Hb=l(),Ql=r("p"),Qb=n("Example:"),Ub=l(),w(Ar.$$.fragment),sc=l(),zo=r("h2"),kn=r("a"),Ul=r("span"),w(Ir.$$.fragment),Vb=l(),Vl=r("span"),Kb=n("TFMobileBertForTokenClassification"),rc=l(),De=r("div"),w(Dr.$$.fragment),Gb=l(),Kl=r("p"),Jb=n(`MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Yb=l(),Or=r("p"),Zb=n("This model inherits from "),Ra=r("a"),Xb=n("TFPreTrainedModel"),ev=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tv=l(),Lr=r("p"),ov=n("This model is also a "),Wr=r("a"),nv=n("tf.keras.Model"),sv=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),rv=l(),w(Mn.$$.fragment),av=l(),it=r("div"),w(Rr.$$.fragment),iv=l(),xo=r("p"),lv=n("The "),Ha=r("a"),dv=n("TFMobileBertForTokenClassification"),cv=n(" forward method, overrides the "),Gl=r("code"),pv=n("__call__"),hv=n(" special method."),mv=l(),w(wn.$$.fragment),uv=l(),Jl=r("p"),fv=n("Example:"),gv=l(),w(Hr.$$.fragment),ac=l(),Po=r("h2"),yn=r("a"),Yl=r("span"),w(Qr.$$.fragment),_v=l(),Zl=r("span"),bv=n("TFMobileBertForQuestionAnswering"),ic=l(),Oe=r("div"),w(Ur.$$.fragment),vv=l(),Co=r("p"),Tv=n(`MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Xl=r("code"),kv=n("span start logits"),Mv=n(" and "),ed=r("code"),wv=n("span end logits"),yv=n(")."),$v=l(),Vr=r("p"),Fv=n("This model inherits from "),Qa=r("a"),Bv=n("TFPreTrainedModel"),Ev=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zv=l(),Kr=r("p"),xv=n("This model is also a "),Gr=r("a"),Pv=n("tf.keras.Model"),Cv=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),qv=l(),w($n.$$.fragment),jv=l(),lt=r("div"),w(Jr.$$.fragment),Nv=l(),qo=r("p"),Sv=n("The "),Ua=r("a"),Av=n("TFMobileBertForQuestionAnswering"),Iv=n(" forward method, overrides the "),td=r("code"),Dv=n("__call__"),Ov=n(" special method."),Lv=l(),w(Fn.$$.fragment),Wv=l(),od=r("p"),Rv=n("Example:"),Hv=l(),w(Yr.$$.fragment),this.h()},l(o){const u=lM('[data-svelte="svelte-1phssyn"]',document.head);h=a(u,"META",{name:!0,content:!0}),u.forEach(t),k=d(o),f=a(o,"H1",{class:!0});var Zr=i(f);g=a(Zr,"A",{id:!0,class:!0,href:!0});var nd=i(g);v=a(nd,"SPAN",{});var sd=i(v);y(b.$$.fragment,sd),sd.forEach(t),nd.forEach(t),_=d(Zr),M=a(Zr,"SPAN",{});var rd=i(M);he=s(rd,"MobileBERT"),rd.forEach(t),Zr.forEach(t),K=d(o),z=a(o,"H2",{class:!0});var Xr=i(z);Z=a(Xr,"A",{id:!0,class:!0,href:!0});var ad=i(Z);D=a(ad,"SPAN",{});var id=i(D);y(ee.$$.fragment,id),id.forEach(t),ad.forEach(t),me=d(Xr),O=a(Xr,"SPAN",{});var ld=i(O);ue=s(ld,"Overview"),ld.forEach(t),Xr.forEach(t),le=d(o),V=a(o,"P",{});var ea=i(V);j=s(ea,"The MobileBERT model was proposed in "),te=a(ea,"A",{href:!0,rel:!0});var dd=i(te);G=s(dd,"MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices"),dd.forEach(t),x=s(ea,` by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. It\u2019s a bidirectional transformer based on the BERT model, which is compressed and accelerated using several approaches.`),ea.forEach(t),C=d(o),ne=a(o,"P",{});var cd=i(ne);H=s(cd,"The abstract from the paper is the following:"),cd.forEach(t),de=d(o),se=a(o,"P",{});var pd=i(se);L=a(pd,"EM",{});var hd=i(L);fe=s(hd,`Natural Language Processing (NLP) has recently achieved great success by using huge pre-trained models with hundreds of millions of parameters. However, these models suffer from heavy model sizes and high latency such that they cannot be deployed to resource-limited mobile devices. In this paper, we propose MobileBERT for compressing and accelerating the popular BERT model. Like the original BERT, MobileBERT is task-agnostic, that is, it can be generically applied to various downstream NLP tasks via simple fine-tuning. Basically, MobileBERT is a thin version of BERT_LARGE, while equipped with bottleneck structures and a carefully designed balance between self-attentions and feed-forward networks. To train MobileBERT, we first train a specially designed teacher model, an inverted-bottleneck incorporated BERT_LARGE model. Then, we conduct knowledge transfer from this teacher to MobileBERT. Empirical studies show that MobileBERT is 4.3x smaller and 5.5x faster than BERT_BASE while achieving competitive results on well-known benchmarks. On the natural language inference tasks of GLUE, MobileBERT achieves a GLUEscore o 77.7 (0.6 lower than BERT_BASE), and 62 ms latency on a Pixel 4 phone. On the SQuAD v1.1/v2.0 question answering task, MobileBERT achieves a dev F1 score of 90.0/79.2 (1.5/2.1 higher than BERT_BASE).`),hd.forEach(t),pd.forEach(t),ce=d(o),P=a(o,"P",{});var md=i(P);ge=s(md,"Tips:"),md.forEach(t),W=d(o),X=a(o,"UL",{});var ta=i(X);re=a(ta,"LI",{});var ud=i(re);Q=s(ud,`MobileBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),ud.forEach(t),_e=d(ta),ae=a(ta,"LI",{});var fd=i(ae);N=s(fd,`MobileBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.`),fd.forEach(t),ta.forEach(t),pe=d(o),A=a(o,"P",{});var jo=i(A);be=s(jo,"This model was contributed by "),c=a(jo,"A",{href:!0,rel:!0});var gd=i(c);T=s(gd,"vshampor"),gd.forEach(t),J=s(jo,". The original code can be found "),ke=a(jo,"A",{href:!0,rel:!0});var _d=i(ke);$e=s(_d,"here"),_d.forEach(t),S=s(jo,"."),jo.forEach(t),we=d(o),ve=a(o,"H2",{class:!0});var oa=i(ve);Te=a(oa,"A",{id:!0,class:!0,href:!0});var bd=i(Te);q=a(bd,"SPAN",{});var vd=i(q);y(I.$$.fragment,vd),vd.forEach(t),bd.forEach(t),Fe=d(oa),Me=a(oa,"SPAN",{});var Td=i(Me);U=s(Td,"MobileBertConfig"),Td.forEach(t),oa.forEach(t),ye=d(o),Y=a(o,"DIV",{class:!0});var ht=i(Y);y(oe.$$.fragment,ht),Be=d(ht),Ht=a(ht,"P",{});var Va=i(Ht);_p=s(Va,"This is the configuration class to store the configuration of a "),na=a(Va,"A",{href:!0});var Qv=i(na);bp=s(Qv,"MobileBertModel"),Qv.forEach(t),vp=s(Va,` or a `),sa=a(Va,"A",{href:!0});var Uv=i(sa);Tp=s(Uv,"TFMobileBertModel"),Uv.forEach(t),kp=s(Va,`. It is used to instantiate a MobileBERT model according to the specified arguments, defining the model architecture.`),Va.forEach(t),Mp=d(ht),Qt=a(ht,"P",{});var Ka=i(Qt);wp=s(Ka,"Configuration objects inherit from "),ra=a(Ka,"A",{href:!0});var Vv=i(ra);yp=s(Vv,"PretrainedConfig"),Vv.forEach(t),$p=s(Ka,` and can be used to control the model outputs. Read the documentation from `),aa=a(Ka,"A",{href:!0});var Kv=i(aa);Fp=s(Kv,"PretrainedConfig"),Kv.forEach(t),Bp=s(Ka," for more information."),Ka.forEach(t),Ep=d(ht),fi=a(ht,"P",{});var Gv=i(fi);zp=s(Gv,"Examples:"),Gv.forEach(t),xp=d(ht),y(zn.$$.fragment,ht),Pp=d(ht),gi=a(ht,"P",{});var Jv=i(gi);Cp=s(Jv,`Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints.`),Jv.forEach(t),ht.forEach(t),wd=d(o),Ut=a(o,"H2",{class:!0});var dc=i(Ut);No=a(dc,"A",{id:!0,class:!0,href:!0});var Yv=i(No);_i=a(Yv,"SPAN",{});var Zv=i(_i);y(xn.$$.fragment,Zv),Zv.forEach(t),Yv.forEach(t),qp=d(dc),bi=a(dc,"SPAN",{});var Xv=i(bi);jp=s(Xv,"MobileBertTokenizer"),Xv.forEach(t),dc.forEach(t),yd=d(o),ct=a(o,"DIV",{class:!0});var Bn=i(ct);y(Pn.$$.fragment,Bn),Np=d(Bn),vi=a(Bn,"P",{});var eT=i(vi);Sp=s(eT,"Construct a MobileBERT tokenizer."),eT.forEach(t),Ap=d(Bn),So=a(Bn,"P",{});var kd=i(So);ia=a(kd,"A",{href:!0});var tT=i(ia);Ip=s(tT,"MobileBertTokenizer"),tT.forEach(t),Dp=s(kd," is identical to "),la=a(kd,"A",{href:!0});var oT=i(la);Op=s(oT,"BertTokenizer"),oT.forEach(t),Lp=s(kd,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),kd.forEach(t),Wp=d(Bn),Cn=a(Bn,"P",{});var cc=i(Cn);Rp=s(cc,"Refer to superclass "),da=a(cc,"A",{href:!0});var nT=i(da);Hp=s(nT,"BertTokenizer"),nT.forEach(t),Qp=s(cc,` for usage examples and documentation concerning parameters.`),cc.forEach(t),Bn.forEach(t),$d=d(o),Vt=a(o,"H2",{class:!0});var pc=i(Vt);Ao=a(pc,"A",{id:!0,class:!0,href:!0});var sT=i(Ao);Ti=a(sT,"SPAN",{});var rT=i(Ti);y(qn.$$.fragment,rT),rT.forEach(t),sT.forEach(t),Up=d(pc),ki=a(pc,"SPAN",{});var aT=i(ki);Vp=s(aT,"MobileBertTokenizerFast"),aT.forEach(t),pc.forEach(t),Fd=d(o),pt=a(o,"DIV",{class:!0});var En=i(pt);y(jn.$$.fragment,En),Kp=d(En),Nn=a(En,"P",{});var hc=i(Nn);Gp=s(hc,"Construct a \u201Cfast\u201D MobileBERT tokenizer (backed by HuggingFace\u2019s "),Mi=a(hc,"EM",{});var iT=i(Mi);Jp=s(iT,"tokenizers"),iT.forEach(t),Yp=s(hc," library)."),hc.forEach(t),Zp=d(En),Io=a(En,"P",{});var Md=i(Io);ca=a(Md,"A",{href:!0});var lT=i(ca);Xp=s(lT,"MobileBertTokenizerFast"),lT.forEach(t),eh=s(Md," is identical to "),pa=a(Md,"A",{href:!0});var dT=i(pa);th=s(dT,"BertTokenizerFast"),dT.forEach(t),oh=s(Md,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Md.forEach(t),nh=d(En),Sn=a(En,"P",{});var mc=i(Sn);sh=s(mc,"Refer to superclass "),ha=a(mc,"A",{href:!0});var cT=i(ha);rh=s(cT,"BertTokenizerFast"),cT.forEach(t),ah=s(mc,` for usage examples and documentation concerning parameters.`),mc.forEach(t),En.forEach(t),Bd=d(o),Kt=a(o,"H2",{class:!0});var uc=i(Kt);Do=a(uc,"A",{id:!0,class:!0,href:!0});var pT=i(Do);wi=a(pT,"SPAN",{});var hT=i(wi);y(An.$$.fragment,hT),hT.forEach(t),pT.forEach(t),ih=d(uc),yi=a(uc,"SPAN",{});var mT=i(yi);lh=s(mT,"MobileBert specific outputs"),mT.forEach(t),uc.forEach(t),Ed=d(o),Gt=a(o,"DIV",{class:!0});var fc=i(Gt);y(In.$$.fragment,fc),dh=d(fc),Dn=a(fc,"P",{});var gc=i(Dn);ch=s(gc,"Output type of "),ma=a(gc,"A",{href:!0});var uT=i(ma);ph=s(uT,"MobileBertForPreTraining"),uT.forEach(t),hh=s(gc,"."),gc.forEach(t),fc.forEach(t),zd=d(o),Jt=a(o,"DIV",{class:!0});var _c=i(Jt);y(On.$$.fragment,_c),mh=d(_c),Ln=a(_c,"P",{});var bc=i(Ln);uh=s(bc,"Output type of "),ua=a(bc,"A",{href:!0});var fT=i(ua);fh=s(fT,"TFMobileBertForPreTraining"),fT.forEach(t),gh=s(bc,"."),bc.forEach(t),_c.forEach(t),xd=d(o),Yt=a(o,"H2",{class:!0});var vc=i(Yt);Oo=a(vc,"A",{id:!0,class:!0,href:!0});var gT=i(Oo);$i=a(gT,"SPAN",{});var _T=i($i);y(Wn.$$.fragment,_T),_T.forEach(t),gT.forEach(t),_h=d(vc),Fi=a(vc,"SPAN",{});var bT=i(Fi);bh=s(bT,"MobileBertModel"),bT.forEach(t),vc.forEach(t),Pd=d(o),Ce=a(o,"DIV",{class:!0});var mt=i(Ce);y(Rn.$$.fragment,mt),vh=d(mt),Bi=a(mt,"P",{});var vT=i(Bi);Th=s(vT,"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top."),vT.forEach(t),kh=d(mt),Hn=a(mt,"P",{});var Tc=i(Hn);Mh=s(Tc,"This model inherits from "),fa=a(Tc,"A",{href:!0});var TT=i(fa);wh=s(TT,"PreTrainedModel"),TT.forEach(t),yh=s(Tc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tc.forEach(t),$h=d(mt),Qn=a(mt,"P",{});var kc=i(Qn);Fh=s(kc,"This model is also a PyTorch "),Un=a(kc,"A",{href:!0,rel:!0});var kT=i(Un);Bh=s(kT,"torch.nn.Module"),kT.forEach(t),Eh=s(kc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kc.forEach(t),zh=d(mt),Ei=a(mt,"P",{});var MT=i(Ei);Vn=a(MT,"A",{href:!0,rel:!0});var wT=i(Vn);xh=s(wT,"https://arxiv.org/pdf/2004.02984.pdf"),wT.forEach(t),MT.forEach(t),Ph=d(mt),Ke=a(mt,"DIV",{class:!0});var Mt=i(Ke);y(Kn.$$.fragment,Mt),Ch=d(Mt),Zt=a(Mt,"P",{});var Ga=i(Zt);qh=s(Ga,"The "),ga=a(Ga,"A",{href:!0});var yT=i(ga);jh=s(yT,"MobileBertModel"),yT.forEach(t),Nh=s(Ga," forward method, overrides the "),zi=a(Ga,"CODE",{});var $T=i(zi);Sh=s($T,"__call__"),$T.forEach(t),Ah=s(Ga," special method."),Ga.forEach(t),Ih=d(Mt),y(Lo.$$.fragment,Mt),Dh=d(Mt),xi=a(Mt,"P",{});var FT=i(xi);Oh=s(FT,"Example:"),FT.forEach(t),Lh=d(Mt),y(Gn.$$.fragment,Mt),Mt.forEach(t),mt.forEach(t),Cd=d(o),Xt=a(o,"H2",{class:!0});var Mc=i(Xt);Wo=a(Mc,"A",{id:!0,class:!0,href:!0});var BT=i(Wo);Pi=a(BT,"SPAN",{});var ET=i(Pi);y(Jn.$$.fragment,ET),ET.forEach(t),BT.forEach(t),Wh=d(Mc),Ci=a(Mc,"SPAN",{});var zT=i(Ci);Rh=s(zT,"MobileBertForPreTraining"),zT.forEach(t),Mc.forEach(t),qd=d(o),Le=a(o,"DIV",{class:!0});var wt=i(Le);y(Yn.$$.fragment,wt),Hh=d(wt),eo=a(wt,"P",{});var Ja=i(eo);Qh=s(Ja,"MobileBert Model with two heads on top as done during the pretraining: a "),qi=a(Ja,"CODE",{});var xT=i(qi);Uh=s(xT,"masked language modeling"),xT.forEach(t),Vh=s(Ja,` head and a `),ji=a(Ja,"CODE",{});var PT=i(ji);Kh=s(PT,"next sentence prediction (classification)"),PT.forEach(t),Gh=s(Ja," head."),Ja.forEach(t),Jh=d(wt),Zn=a(wt,"P",{});var wc=i(Zn);Yh=s(wc,"This model inherits from "),_a=a(wc,"A",{href:!0});var CT=i(_a);Zh=s(CT,"PreTrainedModel"),CT.forEach(t),Xh=s(wc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wc.forEach(t),em=d(wt),Xn=a(wt,"P",{});var yc=i(Xn);tm=s(yc,"This model is also a PyTorch "),es=a(yc,"A",{href:!0,rel:!0});var qT=i(es);om=s(qT,"torch.nn.Module"),qT.forEach(t),nm=s(yc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yc.forEach(t),sm=d(wt),Ge=a(wt,"DIV",{class:!0});var yt=i(Ge);y(ts.$$.fragment,yt),rm=d(yt),to=a(yt,"P",{});var Ya=i(to);am=s(Ya,"The "),ba=a(Ya,"A",{href:!0});var jT=i(ba);im=s(jT,"MobileBertForPreTraining"),jT.forEach(t),lm=s(Ya," forward method, overrides the "),Ni=a(Ya,"CODE",{});var NT=i(Ni);dm=s(NT,"__call__"),NT.forEach(t),cm=s(Ya," special method."),Ya.forEach(t),pm=d(yt),y(Ro.$$.fragment,yt),hm=d(yt),Si=a(yt,"P",{});var ST=i(Si);mm=s(ST,"Examples:"),ST.forEach(t),um=d(yt),y(os.$$.fragment,yt),yt.forEach(t),wt.forEach(t),jd=d(o),oo=a(o,"H2",{class:!0});var $c=i(oo);Ho=a($c,"A",{id:!0,class:!0,href:!0});var AT=i(Ho);Ai=a(AT,"SPAN",{});var IT=i(Ai);y(ns.$$.fragment,IT),IT.forEach(t),AT.forEach(t),fm=d($c),Ii=a($c,"SPAN",{});var DT=i(Ii);gm=s(DT,"MobileBertForMaskedLM"),DT.forEach(t),$c.forEach(t),Nd=d(o),We=a(o,"DIV",{class:!0});var $t=i(We);y(ss.$$.fragment,$t),_m=d($t),rs=a($t,"P",{});var Fc=i(rs);bm=s(Fc,"MobileBert Model with a "),Di=a(Fc,"CODE",{});var OT=i(Di);vm=s(OT,"language modeling"),OT.forEach(t),Tm=s(Fc," head on top."),Fc.forEach(t),km=d($t),as=a($t,"P",{});var Bc=i(as);Mm=s(Bc,"This model inherits from "),va=a(Bc,"A",{href:!0});var LT=i(va);wm=s(LT,"PreTrainedModel"),LT.forEach(t),ym=s(Bc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bc.forEach(t),$m=d($t),is=a($t,"P",{});var Ec=i(is);Fm=s(Ec,"This model is also a PyTorch "),ls=a(Ec,"A",{href:!0,rel:!0});var WT=i(ls);Bm=s(WT,"torch.nn.Module"),WT.forEach(t),Em=s(Ec,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ec.forEach(t),zm=d($t),Je=a($t,"DIV",{class:!0});var Ft=i(Je);y(ds.$$.fragment,Ft),xm=d(Ft),no=a(Ft,"P",{});var Za=i(no);Pm=s(Za,"The "),Ta=a(Za,"A",{href:!0});var RT=i(Ta);Cm=s(RT,"MobileBertForMaskedLM"),RT.forEach(t),qm=s(Za," forward method, overrides the "),Oi=a(Za,"CODE",{});var HT=i(Oi);jm=s(HT,"__call__"),HT.forEach(t),Nm=s(Za," special method."),Za.forEach(t),Sm=d(Ft),y(Qo.$$.fragment,Ft),Am=d(Ft),Li=a(Ft,"P",{});var QT=i(Li);Im=s(QT,"Example:"),QT.forEach(t),Dm=d(Ft),y(cs.$$.fragment,Ft),Ft.forEach(t),$t.forEach(t),Sd=d(o),so=a(o,"H2",{class:!0});var zc=i(so);Uo=a(zc,"A",{id:!0,class:!0,href:!0});var UT=i(Uo);Wi=a(UT,"SPAN",{});var VT=i(Wi);y(ps.$$.fragment,VT),VT.forEach(t),UT.forEach(t),Om=d(zc),Ri=a(zc,"SPAN",{});var KT=i(Ri);Lm=s(KT,"MobileBertForNextSentencePrediction"),KT.forEach(t),zc.forEach(t),Ad=d(o),Re=a(o,"DIV",{class:!0});var Bt=i(Re);y(hs.$$.fragment,Bt),Wm=d(Bt),ms=a(Bt,"P",{});var xc=i(ms);Rm=s(xc,"MobileBert Model with a "),Hi=a(xc,"CODE",{});var GT=i(Hi);Hm=s(GT,"next sentence prediction (classification)"),GT.forEach(t),Qm=s(xc," head on top."),xc.forEach(t),Um=d(Bt),us=a(Bt,"P",{});var Pc=i(us);Vm=s(Pc,"This model inherits from "),ka=a(Pc,"A",{href:!0});var JT=i(ka);Km=s(JT,"PreTrainedModel"),JT.forEach(t),Gm=s(Pc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pc.forEach(t),Jm=d(Bt),fs=a(Bt,"P",{});var Cc=i(fs);Ym=s(Cc,"This model is also a PyTorch "),gs=a(Cc,"A",{href:!0,rel:!0});var YT=i(gs);Zm=s(YT,"torch.nn.Module"),YT.forEach(t),Xm=s(Cc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cc.forEach(t),eu=d(Bt),Ye=a(Bt,"DIV",{class:!0});var Et=i(Ye);y(_s.$$.fragment,Et),tu=d(Et),ro=a(Et,"P",{});var Xa=i(ro);ou=s(Xa,"The "),Ma=a(Xa,"A",{href:!0});var ZT=i(Ma);nu=s(ZT,"MobileBertForNextSentencePrediction"),ZT.forEach(t),su=s(Xa," forward method, overrides the "),Qi=a(Xa,"CODE",{});var XT=i(Qi);ru=s(XT,"__call__"),XT.forEach(t),au=s(Xa," special method."),Xa.forEach(t),iu=d(Et),y(Vo.$$.fragment,Et),lu=d(Et),Ui=a(Et,"P",{});var ek=i(Ui);du=s(ek,"Examples:"),ek.forEach(t),cu=d(Et),y(bs.$$.fragment,Et),Et.forEach(t),Bt.forEach(t),Id=d(o),ao=a(o,"H2",{class:!0});var qc=i(ao);Ko=a(qc,"A",{id:!0,class:!0,href:!0});var tk=i(Ko);Vi=a(tk,"SPAN",{});var ok=i(Vi);y(vs.$$.fragment,ok),ok.forEach(t),tk.forEach(t),pu=d(qc),Ki=a(qc,"SPAN",{});var nk=i(Ki);hu=s(nk,"MobileBertForSequenceClassification"),nk.forEach(t),qc.forEach(t),Dd=d(o),He=a(o,"DIV",{class:!0});var zt=i(He);y(Ts.$$.fragment,zt),mu=d(zt),Gi=a(zt,"P",{});var sk=i(Gi);uu=s(sk,`MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),sk.forEach(t),fu=d(zt),ks=a(zt,"P",{});var jc=i(ks);gu=s(jc,"This model inherits from "),wa=a(jc,"A",{href:!0});var rk=i(wa);_u=s(rk,"PreTrainedModel"),rk.forEach(t),bu=s(jc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jc.forEach(t),vu=d(zt),Ms=a(zt,"P",{});var Nc=i(Ms);Tu=s(Nc,"This model is also a PyTorch "),ws=a(Nc,"A",{href:!0,rel:!0});var ak=i(ws);ku=s(ak,"torch.nn.Module"),ak.forEach(t),Mu=s(Nc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nc.forEach(t),wu=d(zt),xe=a(zt,"DIV",{class:!0});var dt=i(xe);y(ys.$$.fragment,dt),yu=d(dt),io=a(dt,"P",{});var ei=i(io);$u=s(ei,"The "),ya=a(ei,"A",{href:!0});var ik=i(ya);Fu=s(ik,"MobileBertForSequenceClassification"),ik.forEach(t),Bu=s(ei," forward method, overrides the "),Ji=a(ei,"CODE",{});var lk=i(Ji);Eu=s(lk,"__call__"),lk.forEach(t),zu=s(ei," special method."),ei.forEach(t),xu=d(dt),y(Go.$$.fragment,dt),Pu=d(dt),Yi=a(dt,"P",{});var dk=i(Yi);Cu=s(dk,"Example of single-label classification:"),dk.forEach(t),qu=d(dt),y($s.$$.fragment,dt),ju=d(dt),Zi=a(dt,"P",{});var ck=i(Zi);Nu=s(ck,"Example of multi-label classification:"),ck.forEach(t),Su=d(dt),y(Fs.$$.fragment,dt),dt.forEach(t),zt.forEach(t),Od=d(o),lo=a(o,"H2",{class:!0});var Sc=i(lo);Jo=a(Sc,"A",{id:!0,class:!0,href:!0});var pk=i(Jo);Xi=a(pk,"SPAN",{});var hk=i(Xi);y(Bs.$$.fragment,hk),hk.forEach(t),pk.forEach(t),Au=d(Sc),el=a(Sc,"SPAN",{});var mk=i(el);Iu=s(mk,"MobileBertForMultipleChoice"),mk.forEach(t),Sc.forEach(t),Ld=d(o),Qe=a(o,"DIV",{class:!0});var xt=i(Qe);y(Es.$$.fragment,xt),Du=d(xt),tl=a(xt,"P",{});var uk=i(tl);Ou=s(uk,`MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),uk.forEach(t),Lu=d(xt),zs=a(xt,"P",{});var Ac=i(zs);Wu=s(Ac,"This model inherits from "),$a=a(Ac,"A",{href:!0});var fk=i($a);Ru=s(fk,"PreTrainedModel"),fk.forEach(t),Hu=s(Ac,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ac.forEach(t),Qu=d(xt),xs=a(xt,"P",{});var Ic=i(xs);Uu=s(Ic,"This model is also a PyTorch "),Ps=a(Ic,"A",{href:!0,rel:!0});var gk=i(Ps);Vu=s(gk,"torch.nn.Module"),gk.forEach(t),Ku=s(Ic,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ic.forEach(t),Gu=d(xt),Ze=a(xt,"DIV",{class:!0});var Pt=i(Ze);y(Cs.$$.fragment,Pt),Ju=d(Pt),co=a(Pt,"P",{});var ti=i(co);Yu=s(ti,"The "),Fa=a(ti,"A",{href:!0});var _k=i(Fa);Zu=s(_k,"MobileBertForMultipleChoice"),_k.forEach(t),Xu=s(ti," forward method, overrides the "),ol=a(ti,"CODE",{});var bk=i(ol);ef=s(bk,"__call__"),bk.forEach(t),tf=s(ti," special method."),ti.forEach(t),of=d(Pt),y(Yo.$$.fragment,Pt),nf=d(Pt),nl=a(Pt,"P",{});var vk=i(nl);sf=s(vk,"Example:"),vk.forEach(t),rf=d(Pt),y(qs.$$.fragment,Pt),Pt.forEach(t),xt.forEach(t),Wd=d(o),po=a(o,"H2",{class:!0});var Dc=i(po);Zo=a(Dc,"A",{id:!0,class:!0,href:!0});var Tk=i(Zo);sl=a(Tk,"SPAN",{});var kk=i(sl);y(js.$$.fragment,kk),kk.forEach(t),Tk.forEach(t),af=d(Dc),rl=a(Dc,"SPAN",{});var Mk=i(rl);lf=s(Mk,"MobileBertForTokenClassification"),Mk.forEach(t),Dc.forEach(t),Rd=d(o),Ue=a(o,"DIV",{class:!0});var Ct=i(Ue);y(Ns.$$.fragment,Ct),df=d(Ct),al=a(Ct,"P",{});var wk=i(al);cf=s(wk,`MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),wk.forEach(t),pf=d(Ct),Ss=a(Ct,"P",{});var Oc=i(Ss);hf=s(Oc,"This model inherits from "),Ba=a(Oc,"A",{href:!0});var yk=i(Ba);mf=s(yk,"PreTrainedModel"),yk.forEach(t),uf=s(Oc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Oc.forEach(t),ff=d(Ct),As=a(Ct,"P",{});var Lc=i(As);gf=s(Lc,"This model is also a PyTorch "),Is=a(Lc,"A",{href:!0,rel:!0});var $k=i(Is);_f=s($k,"torch.nn.Module"),$k.forEach(t),bf=s(Lc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lc.forEach(t),vf=d(Ct),Xe=a(Ct,"DIV",{class:!0});var qt=i(Xe);y(Ds.$$.fragment,qt),Tf=d(qt),ho=a(qt,"P",{});var oi=i(ho);kf=s(oi,"The "),Ea=a(oi,"A",{href:!0});var Fk=i(Ea);Mf=s(Fk,"MobileBertForTokenClassification"),Fk.forEach(t),wf=s(oi," forward method, overrides the "),il=a(oi,"CODE",{});var Bk=i(il);yf=s(Bk,"__call__"),Bk.forEach(t),$f=s(oi," special method."),oi.forEach(t),Ff=d(qt),y(Xo.$$.fragment,qt),Bf=d(qt),ll=a(qt,"P",{});var Ek=i(ll);Ef=s(Ek,"Example:"),Ek.forEach(t),zf=d(qt),y(Os.$$.fragment,qt),qt.forEach(t),Ct.forEach(t),Hd=d(o),mo=a(o,"H2",{class:!0});var Wc=i(mo);en=a(Wc,"A",{id:!0,class:!0,href:!0});var zk=i(en);dl=a(zk,"SPAN",{});var xk=i(dl);y(Ls.$$.fragment,xk),xk.forEach(t),zk.forEach(t),xf=d(Wc),cl=a(Wc,"SPAN",{});var Pk=i(cl);Pf=s(Pk,"MobileBertForQuestionAnswering"),Pk.forEach(t),Wc.forEach(t),Qd=d(o),Ve=a(o,"DIV",{class:!0});var jt=i(Ve);y(Ws.$$.fragment,jt),Cf=d(jt),uo=a(jt,"P",{});var ni=i(uo);qf=s(ni,`MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),pl=a(ni,"CODE",{});var Ck=i(pl);jf=s(Ck,"span start logits"),Ck.forEach(t),Nf=s(ni," and "),hl=a(ni,"CODE",{});var qk=i(hl);Sf=s(qk,"span end logits"),qk.forEach(t),Af=s(ni,")."),ni.forEach(t),If=d(jt),Rs=a(jt,"P",{});var Rc=i(Rs);Df=s(Rc,"This model inherits from "),za=a(Rc,"A",{href:!0});var jk=i(za);Of=s(jk,"PreTrainedModel"),jk.forEach(t),Lf=s(Rc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rc.forEach(t),Wf=d(jt),Hs=a(jt,"P",{});var Hc=i(Hs);Rf=s(Hc,"This model is also a PyTorch "),Qs=a(Hc,"A",{href:!0,rel:!0});var Nk=i(Qs);Hf=s(Nk,"torch.nn.Module"),Nk.forEach(t),Qf=s(Hc,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hc.forEach(t),Uf=d(jt),et=a(jt,"DIV",{class:!0});var Nt=i(et);y(Us.$$.fragment,Nt),Vf=d(Nt),fo=a(Nt,"P",{});var si=i(fo);Kf=s(si,"The "),xa=a(si,"A",{href:!0});var Sk=i(xa);Gf=s(Sk,"MobileBertForQuestionAnswering"),Sk.forEach(t),Jf=s(si," forward method, overrides the "),ml=a(si,"CODE",{});var Ak=i(ml);Yf=s(Ak,"__call__"),Ak.forEach(t),Zf=s(si," special method."),si.forEach(t),Xf=d(Nt),y(tn.$$.fragment,Nt),eg=d(Nt),ul=a(Nt,"P",{});var Ik=i(ul);tg=s(Ik,"Example:"),Ik.forEach(t),og=d(Nt),y(Vs.$$.fragment,Nt),Nt.forEach(t),jt.forEach(t),Ud=d(o),go=a(o,"H2",{class:!0});var Qc=i(go);on=a(Qc,"A",{id:!0,class:!0,href:!0});var Dk=i(on);fl=a(Dk,"SPAN",{});var Ok=i(fl);y(Ks.$$.fragment,Ok),Ok.forEach(t),Dk.forEach(t),ng=d(Qc),gl=a(Qc,"SPAN",{});var Lk=i(gl);sg=s(Lk,"TFMobileBertModel"),Lk.forEach(t),Qc.forEach(t),Vd=d(o),qe=a(o,"DIV",{class:!0});var ut=i(qe);y(Gs.$$.fragment,ut),rg=d(ut),_l=a(ut,"P",{});var Wk=i(_l);ag=s(Wk,"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top."),Wk.forEach(t),ig=d(ut),Js=a(ut,"P",{});var Uc=i(Js);lg=s(Uc,"This model inherits from "),Pa=a(Uc,"A",{href:!0});var Rk=i(Pa);dg=s(Rk,"TFPreTrainedModel"),Rk.forEach(t),cg=s(Uc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Uc.forEach(t),pg=d(ut),Ys=a(ut,"P",{});var Vc=i(Ys);hg=s(Vc,"This model is also a "),Zs=a(Vc,"A",{href:!0,rel:!0});var Hk=i(Zs);mg=s(Hk,"tf.keras.Model"),Hk.forEach(t),ug=s(Vc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vc.forEach(t),fg=d(ut),y(nn.$$.fragment,ut),gg=d(ut),tt=a(ut,"DIV",{class:!0});var St=i(tt);y(Xs.$$.fragment,St),_g=d(St),_o=a(St,"P",{});var ri=i(_o);bg=s(ri,"The "),Ca=a(ri,"A",{href:!0});var Qk=i(Ca);vg=s(Qk,"TFMobileBertModel"),Qk.forEach(t),Tg=s(ri," forward method, overrides the "),bl=a(ri,"CODE",{});var Uk=i(bl);kg=s(Uk,"__call__"),Uk.forEach(t),Mg=s(ri," special method."),ri.forEach(t),wg=d(St),y(sn.$$.fragment,St),yg=d(St),vl=a(St,"P",{});var Vk=i(vl);$g=s(Vk,"Example:"),Vk.forEach(t),Fg=d(St),y(er.$$.fragment,St),St.forEach(t),ut.forEach(t),Kd=d(o),bo=a(o,"H2",{class:!0});var Kc=i(bo);rn=a(Kc,"A",{id:!0,class:!0,href:!0});var Kk=i(rn);Tl=a(Kk,"SPAN",{});var Gk=i(Tl);y(tr.$$.fragment,Gk),Gk.forEach(t),Kk.forEach(t),Bg=d(Kc),kl=a(Kc,"SPAN",{});var Jk=i(kl);Eg=s(Jk,"TFMobileBertForPreTraining"),Jk.forEach(t),Kc.forEach(t),Gd=d(o),je=a(o,"DIV",{class:!0});var ft=i(je);y(or.$$.fragment,ft),zg=d(ft),vo=a(ft,"P",{});var ai=i(vo);xg=s(ai,"MobileBert Model with two heads on top as done during the pretraining: a "),Ml=a(ai,"CODE",{});var Yk=i(Ml);Pg=s(Yk,"masked language modeling"),Yk.forEach(t),Cg=s(ai,` head and a `),wl=a(ai,"CODE",{});var Zk=i(wl);qg=s(Zk,"next sentence prediction (classification)"),Zk.forEach(t),jg=s(ai," head."),ai.forEach(t),Ng=d(ft),nr=a(ft,"P",{});var Gc=i(nr);Sg=s(Gc,"This model inherits from "),qa=a(Gc,"A",{href:!0});var Xk=i(qa);Ag=s(Xk,"TFPreTrainedModel"),Xk.forEach(t),Ig=s(Gc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gc.forEach(t),Dg=d(ft),sr=a(ft,"P",{});var Jc=i(sr);Og=s(Jc,"This model is also a "),rr=a(Jc,"A",{href:!0,rel:!0});var e1=i(rr);Lg=s(e1,"tf.keras.Model"),e1.forEach(t),Wg=s(Jc,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Jc.forEach(t),Rg=d(ft),y(an.$$.fragment,ft),Hg=d(ft),ot=a(ft,"DIV",{class:!0});var At=i(ot);y(ar.$$.fragment,At),Qg=d(At),To=a(At,"P",{});var ii=i(To);Ug=s(ii,"The "),ja=a(ii,"A",{href:!0});var t1=i(ja);Vg=s(t1,"TFMobileBertForPreTraining"),t1.forEach(t),Kg=s(ii," forward method, overrides the "),yl=a(ii,"CODE",{});var o1=i(yl);Gg=s(o1,"__call__"),o1.forEach(t),Jg=s(ii," special method."),ii.forEach(t),Yg=d(At),y(ln.$$.fragment,At),Zg=d(At),$l=a(At,"P",{});var n1=i($l);Xg=s(n1,"Examples:"),n1.forEach(t),e_=d(At),y(ir.$$.fragment,At),At.forEach(t),ft.forEach(t),Jd=d(o),ko=a(o,"H2",{class:!0});var Yc=i(ko);dn=a(Yc,"A",{id:!0,class:!0,href:!0});var s1=i(dn);Fl=a(s1,"SPAN",{});var r1=i(Fl);y(lr.$$.fragment,r1),r1.forEach(t),s1.forEach(t),t_=d(Yc),Bl=a(Yc,"SPAN",{});var a1=i(Bl);o_=s(a1,"TFMobileBertForMaskedLM"),a1.forEach(t),Yc.forEach(t),Yd=d(o),Ne=a(o,"DIV",{class:!0});var gt=i(Ne);y(dr.$$.fragment,gt),n_=d(gt),cr=a(gt,"P",{});var Zc=i(cr);s_=s(Zc,"MobileBert Model with a "),El=a(Zc,"CODE",{});var i1=i(El);r_=s(i1,"language modeling"),i1.forEach(t),a_=s(Zc," head on top."),Zc.forEach(t),i_=d(gt),pr=a(gt,"P",{});var Xc=i(pr);l_=s(Xc,"This model inherits from "),Na=a(Xc,"A",{href:!0});var l1=i(Na);d_=s(l1,"TFPreTrainedModel"),l1.forEach(t),c_=s(Xc,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xc.forEach(t),p_=d(gt),hr=a(gt,"P",{});var ep=i(hr);h_=s(ep,"This model is also a "),mr=a(ep,"A",{href:!0,rel:!0});var d1=i(mr);m_=s(d1,"tf.keras.Model"),d1.forEach(t),u_=s(ep,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ep.forEach(t),f_=d(gt),y(cn.$$.fragment,gt),g_=d(gt),nt=a(gt,"DIV",{class:!0});var It=i(nt);y(ur.$$.fragment,It),__=d(It),Mo=a(It,"P",{});var li=i(Mo);b_=s(li,"The "),Sa=a(li,"A",{href:!0});var c1=i(Sa);v_=s(c1,"TFMobileBertForMaskedLM"),c1.forEach(t),T_=s(li," forward method, overrides the "),zl=a(li,"CODE",{});var p1=i(zl);k_=s(p1,"__call__"),p1.forEach(t),M_=s(li," special method."),li.forEach(t),w_=d(It),y(pn.$$.fragment,It),y_=d(It),xl=a(It,"P",{});var h1=i(xl);$_=s(h1,"Example:"),h1.forEach(t),F_=d(It),y(fr.$$.fragment,It),It.forEach(t),gt.forEach(t),Zd=d(o),wo=a(o,"H2",{class:!0});var tp=i(wo);hn=a(tp,"A",{id:!0,class:!0,href:!0});var m1=i(hn);Pl=a(m1,"SPAN",{});var u1=i(Pl);y(gr.$$.fragment,u1),u1.forEach(t),m1.forEach(t),B_=d(tp),Cl=a(tp,"SPAN",{});var f1=i(Cl);E_=s(f1,"TFMobileBertForNextSentencePrediction"),f1.forEach(t),tp.forEach(t),Xd=d(o),Se=a(o,"DIV",{class:!0});var _t=i(Se);y(_r.$$.fragment,_t),z_=d(_t),br=a(_t,"P",{});var op=i(br);x_=s(op,"MobileBert Model with a "),ql=a(op,"CODE",{});var g1=i(ql);P_=s(g1,"next sentence prediction (classification)"),g1.forEach(t),C_=s(op," head on top."),op.forEach(t),q_=d(_t),vr=a(_t,"P",{});var np=i(vr);j_=s(np,"This model inherits from "),Aa=a(np,"A",{href:!0});var _1=i(Aa);N_=s(_1,"TFPreTrainedModel"),_1.forEach(t),S_=s(np,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),np.forEach(t),A_=d(_t),Tr=a(_t,"P",{});var sp=i(Tr);I_=s(sp,"This model is also a "),kr=a(sp,"A",{href:!0,rel:!0});var b1=i(kr);D_=s(b1,"tf.keras.Model"),b1.forEach(t),O_=s(sp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sp.forEach(t),L_=d(_t),y(mn.$$.fragment,_t),W_=d(_t),st=a(_t,"DIV",{class:!0});var Dt=i(st);y(Mr.$$.fragment,Dt),R_=d(Dt),yo=a(Dt,"P",{});var di=i(yo);H_=s(di,"The "),Ia=a(di,"A",{href:!0});var v1=i(Ia);Q_=s(v1,"TFMobileBertForNextSentencePrediction"),v1.forEach(t),U_=s(di," forward method, overrides the "),jl=a(di,"CODE",{});var T1=i(jl);V_=s(T1,"__call__"),T1.forEach(t),K_=s(di," special method."),di.forEach(t),G_=d(Dt),y(un.$$.fragment,Dt),J_=d(Dt),Nl=a(Dt,"P",{});var k1=i(Nl);Y_=s(k1,"Examples:"),k1.forEach(t),Z_=d(Dt),y(wr.$$.fragment,Dt),Dt.forEach(t),_t.forEach(t),ec=d(o),$o=a(o,"H2",{class:!0});var rp=i($o);fn=a(rp,"A",{id:!0,class:!0,href:!0});var M1=i(fn);Sl=a(M1,"SPAN",{});var w1=i(Sl);y(yr.$$.fragment,w1),w1.forEach(t),M1.forEach(t),X_=d(rp),Al=a(rp,"SPAN",{});var y1=i(Al);eb=s(y1,"TFMobileBertForSequenceClassification"),y1.forEach(t),rp.forEach(t),tc=d(o),Ae=a(o,"DIV",{class:!0});var bt=i(Ae);y($r.$$.fragment,bt),tb=d(bt),Il=a(bt,"P",{});var $1=i(Il);ob=s($1,`MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),$1.forEach(t),nb=d(bt),Fr=a(bt,"P",{});var ap=i(Fr);sb=s(ap,"This model inherits from "),Da=a(ap,"A",{href:!0});var F1=i(Da);rb=s(F1,"TFPreTrainedModel"),F1.forEach(t),ab=s(ap,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ap.forEach(t),ib=d(bt),Br=a(bt,"P",{});var ip=i(Br);lb=s(ip,"This model is also a "),Er=a(ip,"A",{href:!0,rel:!0});var B1=i(Er);db=s(B1,"tf.keras.Model"),B1.forEach(t),cb=s(ip,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ip.forEach(t),pb=d(bt),y(gn.$$.fragment,bt),hb=d(bt),rt=a(bt,"DIV",{class:!0});var Ot=i(rt);y(zr.$$.fragment,Ot),mb=d(Ot),Fo=a(Ot,"P",{});var ci=i(Fo);ub=s(ci,"The "),Oa=a(ci,"A",{href:!0});var E1=i(Oa);fb=s(E1,"TFMobileBertForSequenceClassification"),E1.forEach(t),gb=s(ci," forward method, overrides the "),Dl=a(ci,"CODE",{});var z1=i(Dl);_b=s(z1,"__call__"),z1.forEach(t),bb=s(ci," special method."),ci.forEach(t),vb=d(Ot),y(_n.$$.fragment,Ot),Tb=d(Ot),Ol=a(Ot,"P",{});var x1=i(Ol);kb=s(x1,"Example:"),x1.forEach(t),Mb=d(Ot),y(xr.$$.fragment,Ot),Ot.forEach(t),bt.forEach(t),oc=d(o),Bo=a(o,"H2",{class:!0});var lp=i(Bo);bn=a(lp,"A",{id:!0,class:!0,href:!0});var P1=i(bn);Ll=a(P1,"SPAN",{});var C1=i(Ll);y(Pr.$$.fragment,C1),C1.forEach(t),P1.forEach(t),wb=d(lp),Wl=a(lp,"SPAN",{});var q1=i(Wl);yb=s(q1,"TFMobileBertForMultipleChoice"),q1.forEach(t),lp.forEach(t),nc=d(o),Ie=a(o,"DIV",{class:!0});var vt=i(Ie);y(Cr.$$.fragment,vt),$b=d(vt),Rl=a(vt,"P",{});var j1=i(Rl);Fb=s(j1,`MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),j1.forEach(t),Bb=d(vt),qr=a(vt,"P",{});var dp=i(qr);Eb=s(dp,"This model inherits from "),La=a(dp,"A",{href:!0});var N1=i(La);zb=s(N1,"TFPreTrainedModel"),N1.forEach(t),xb=s(dp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dp.forEach(t),Pb=d(vt),jr=a(vt,"P",{});var cp=i(jr);Cb=s(cp,"This model is also a "),Nr=a(cp,"A",{href:!0,rel:!0});var S1=i(Nr);qb=s(S1,"tf.keras.Model"),S1.forEach(t),jb=s(cp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cp.forEach(t),Nb=d(vt),y(vn.$$.fragment,vt),Sb=d(vt),at=a(vt,"DIV",{class:!0});var Lt=i(at);y(Sr.$$.fragment,Lt),Ab=d(Lt),Eo=a(Lt,"P",{});var pi=i(Eo);Ib=s(pi,"The "),Wa=a(pi,"A",{href:!0});var A1=i(Wa);Db=s(A1,"TFMobileBertForMultipleChoice"),A1.forEach(t),Ob=s(pi," forward method, overrides the "),Hl=a(pi,"CODE",{});var I1=i(Hl);Lb=s(I1,"__call__"),I1.forEach(t),Wb=s(pi," special method."),pi.forEach(t),Rb=d(Lt),y(Tn.$$.fragment,Lt),Hb=d(Lt),Ql=a(Lt,"P",{});var D1=i(Ql);Qb=s(D1,"Example:"),D1.forEach(t),Ub=d(Lt),y(Ar.$$.fragment,Lt),Lt.forEach(t),vt.forEach(t),sc=d(o),zo=a(o,"H2",{class:!0});var pp=i(zo);kn=a(pp,"A",{id:!0,class:!0,href:!0});var O1=i(kn);Ul=a(O1,"SPAN",{});var L1=i(Ul);y(Ir.$$.fragment,L1),L1.forEach(t),O1.forEach(t),Vb=d(pp),Vl=a(pp,"SPAN",{});var W1=i(Vl);Kb=s(W1,"TFMobileBertForTokenClassification"),W1.forEach(t),pp.forEach(t),rc=d(o),De=a(o,"DIV",{class:!0});var Tt=i(De);y(Dr.$$.fragment,Tt),Gb=d(Tt),Kl=a(Tt,"P",{});var R1=i(Kl);Jb=s(R1,`MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),R1.forEach(t),Yb=d(Tt),Or=a(Tt,"P",{});var hp=i(Or);Zb=s(hp,"This model inherits from "),Ra=a(hp,"A",{href:!0});var H1=i(Ra);Xb=s(H1,"TFPreTrainedModel"),H1.forEach(t),ev=s(hp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hp.forEach(t),tv=d(Tt),Lr=a(Tt,"P",{});var mp=i(Lr);ov=s(mp,"This model is also a "),Wr=a(mp,"A",{href:!0,rel:!0});var Q1=i(Wr);nv=s(Q1,"tf.keras.Model"),Q1.forEach(t),sv=s(mp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),mp.forEach(t),rv=d(Tt),y(Mn.$$.fragment,Tt),av=d(Tt),it=a(Tt,"DIV",{class:!0});var Wt=i(it);y(Rr.$$.fragment,Wt),iv=d(Wt),xo=a(Wt,"P",{});var hi=i(xo);lv=s(hi,"The "),Ha=a(hi,"A",{href:!0});var U1=i(Ha);dv=s(U1,"TFMobileBertForTokenClassification"),U1.forEach(t),cv=s(hi," forward method, overrides the "),Gl=a(hi,"CODE",{});var V1=i(Gl);pv=s(V1,"__call__"),V1.forEach(t),hv=s(hi," special method."),hi.forEach(t),mv=d(Wt),y(wn.$$.fragment,Wt),uv=d(Wt),Jl=a(Wt,"P",{});var K1=i(Jl);fv=s(K1,"Example:"),K1.forEach(t),gv=d(Wt),y(Hr.$$.fragment,Wt),Wt.forEach(t),Tt.forEach(t),ac=d(o),Po=a(o,"H2",{class:!0});var up=i(Po);yn=a(up,"A",{id:!0,class:!0,href:!0});var G1=i(yn);Yl=a(G1,"SPAN",{});var J1=i(Yl);y(Qr.$$.fragment,J1),J1.forEach(t),G1.forEach(t),_v=d(up),Zl=a(up,"SPAN",{});var Y1=i(Zl);bv=s(Y1,"TFMobileBertForQuestionAnswering"),Y1.forEach(t),up.forEach(t),ic=d(o),Oe=a(o,"DIV",{class:!0});var kt=i(Oe);y(Ur.$$.fragment,kt),vv=d(kt),Co=a(kt,"P",{});var mi=i(Co);Tv=s(mi,`MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Xl=a(mi,"CODE",{});var Z1=i(Xl);kv=s(Z1,"span start logits"),Z1.forEach(t),Mv=s(mi," and "),ed=a(mi,"CODE",{});var X1=i(ed);wv=s(X1,"span end logits"),X1.forEach(t),yv=s(mi,")."),mi.forEach(t),$v=d(kt),Vr=a(kt,"P",{});var fp=i(Vr);Fv=s(fp,"This model inherits from "),Qa=a(fp,"A",{href:!0});var eM=i(Qa);Bv=s(eM,"TFPreTrainedModel"),eM.forEach(t),Ev=s(fp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fp.forEach(t),zv=d(kt),Kr=a(kt,"P",{});var gp=i(Kr);xv=s(gp,"This model is also a "),Gr=a(gp,"A",{href:!0,rel:!0});var tM=i(Gr);Pv=s(tM,"tf.keras.Model"),tM.forEach(t),Cv=s(gp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),gp.forEach(t),qv=d(kt),y($n.$$.fragment,kt),jv=d(kt),lt=a(kt,"DIV",{class:!0});var Rt=i(lt);y(Jr.$$.fragment,Rt),Nv=d(Rt),qo=a(Rt,"P",{});var ui=i(qo);Sv=s(ui,"The "),Ua=a(ui,"A",{href:!0});var oM=i(Ua);Av=s(oM,"TFMobileBertForQuestionAnswering"),oM.forEach(t),Iv=s(ui," forward method, overrides the "),td=a(ui,"CODE",{});var nM=i(td);Dv=s(nM,"__call__"),nM.forEach(t),Ov=s(ui," special method."),ui.forEach(t),Lv=d(Rt),y(Fn.$$.fragment,Rt),Wv=d(Rt),od=a(Rt,"P",{});var sM=i(od);Rv=s(sM,"Example:"),sM.forEach(t),Hv=d(Rt),y(Yr.$$.fragment,Rt),Rt.forEach(t),kt.forEach(t),this.h()},h(){p(h,"name","hf:doc:metadata"),p(h,"content",JSON.stringify(jM)),p(g,"id","mobilebert"),p(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(g,"href","#mobilebert"),p(f,"class","relative group"),p(Z,"id","overview"),p(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Z,"href","#overview"),p(z,"class","relative group"),p(te,"href","https://arxiv.org/abs/2004.02984"),p(te,"rel","nofollow"),p(c,"href","https://huggingface.co/vshampor"),p(c,"rel","nofollow"),p(ke,"href","https://github.com/google-research/mobilebert"),p(ke,"rel","nofollow"),p(Te,"id","transformers.MobileBertConfig"),p(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Te,"href","#transformers.MobileBertConfig"),p(ve,"class","relative group"),p(na,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertModel"),p(sa,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertModel"),p(ra,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(aa,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),p(Y,"class","docstring"),p(No,"id","transformers.MobileBertTokenizer"),p(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(No,"href","#transformers.MobileBertTokenizer"),p(Ut,"class","relative group"),p(ia,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizer"),p(la,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),p(da,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),p(ct,"class","docstring"),p(Ao,"id","transformers.MobileBertTokenizerFast"),p(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ao,"href","#transformers.MobileBertTokenizerFast"),p(Vt,"class","relative group"),p(ca,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertTokenizerFast"),p(pa,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),p(ha,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),p(pt,"class","docstring"),p(Do,"id","transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput"),p(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Do,"href","#transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput"),p(Kt,"class","relative group"),p(ma,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForPreTraining"),p(Gt,"class","docstring"),p(ua,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForPreTraining"),p(Jt,"class","docstring"),p(Oo,"id","transformers.MobileBertModel"),p(Oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Oo,"href","#transformers.MobileBertModel"),p(Yt,"class","relative group"),p(fa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Un,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Un,"rel","nofollow"),p(Vn,"href","https://arxiv.org/pdf/2004.02984.pdf"),p(Vn,"rel","nofollow"),p(ga,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertModel"),p(Ke,"class","docstring"),p(Ce,"class","docstring"),p(Wo,"id","transformers.MobileBertForPreTraining"),p(Wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Wo,"href","#transformers.MobileBertForPreTraining"),p(Xt,"class","relative group"),p(_a,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(es,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(es,"rel","nofollow"),p(ba,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForPreTraining"),p(Ge,"class","docstring"),p(Le,"class","docstring"),p(Ho,"id","transformers.MobileBertForMaskedLM"),p(Ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ho,"href","#transformers.MobileBertForMaskedLM"),p(oo,"class","relative group"),p(va,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(ls,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(ls,"rel","nofollow"),p(Ta,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForMaskedLM"),p(Je,"class","docstring"),p(We,"class","docstring"),p(Uo,"id","transformers.MobileBertForNextSentencePrediction"),p(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Uo,"href","#transformers.MobileBertForNextSentencePrediction"),p(so,"class","relative group"),p(ka,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(gs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(gs,"rel","nofollow"),p(Ma,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForNextSentencePrediction"),p(Ye,"class","docstring"),p(Re,"class","docstring"),p(Ko,"id","transformers.MobileBertForSequenceClassification"),p(Ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Ko,"href","#transformers.MobileBertForSequenceClassification"),p(ao,"class","relative group"),p(wa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(ws,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(ws,"rel","nofollow"),p(ya,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForSequenceClassification"),p(xe,"class","docstring"),p(He,"class","docstring"),p(Jo,"id","transformers.MobileBertForMultipleChoice"),p(Jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Jo,"href","#transformers.MobileBertForMultipleChoice"),p(lo,"class","relative group"),p($a,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Ps,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Ps,"rel","nofollow"),p(Fa,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForMultipleChoice"),p(Ze,"class","docstring"),p(Qe,"class","docstring"),p(Zo,"id","transformers.MobileBertForTokenClassification"),p(Zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Zo,"href","#transformers.MobileBertForTokenClassification"),p(po,"class","relative group"),p(Ba,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Is,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Is,"rel","nofollow"),p(Ea,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForTokenClassification"),p(Xe,"class","docstring"),p(Ue,"class","docstring"),p(en,"id","transformers.MobileBertForQuestionAnswering"),p(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(en,"href","#transformers.MobileBertForQuestionAnswering"),p(mo,"class","relative group"),p(za,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),p(Qs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Qs,"rel","nofollow"),p(xa,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.MobileBertForQuestionAnswering"),p(et,"class","docstring"),p(Ve,"class","docstring"),p(on,"id","transformers.TFMobileBertModel"),p(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(on,"href","#transformers.TFMobileBertModel"),p(go,"class","relative group"),p(Pa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Zs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Zs,"rel","nofollow"),p(Ca,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertModel"),p(tt,"class","docstring"),p(qe,"class","docstring"),p(rn,"id","transformers.TFMobileBertForPreTraining"),p(rn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(rn,"href","#transformers.TFMobileBertForPreTraining"),p(bo,"class","relative group"),p(qa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(rr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(rr,"rel","nofollow"),p(ja,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForPreTraining"),p(ot,"class","docstring"),p(je,"class","docstring"),p(dn,"id","transformers.TFMobileBertForMaskedLM"),p(dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(dn,"href","#transformers.TFMobileBertForMaskedLM"),p(ko,"class","relative group"),p(Na,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(mr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(mr,"rel","nofollow"),p(Sa,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForMaskedLM"),p(nt,"class","docstring"),p(Ne,"class","docstring"),p(hn,"id","transformers.TFMobileBertForNextSentencePrediction"),p(hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(hn,"href","#transformers.TFMobileBertForNextSentencePrediction"),p(wo,"class","relative group"),p(Aa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(kr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(kr,"rel","nofollow"),p(Ia,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForNextSentencePrediction"),p(st,"class","docstring"),p(Se,"class","docstring"),p(fn,"id","transformers.TFMobileBertForSequenceClassification"),p(fn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(fn,"href","#transformers.TFMobileBertForSequenceClassification"),p($o,"class","relative group"),p(Da,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Er,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Er,"rel","nofollow"),p(Oa,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForSequenceClassification"),p(rt,"class","docstring"),p(Ae,"class","docstring"),p(bn,"id","transformers.TFMobileBertForMultipleChoice"),p(bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(bn,"href","#transformers.TFMobileBertForMultipleChoice"),p(Bo,"class","relative group"),p(La,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Nr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Nr,"rel","nofollow"),p(Wa,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForMultipleChoice"),p(at,"class","docstring"),p(Ie,"class","docstring"),p(kn,"id","transformers.TFMobileBertForTokenClassification"),p(kn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(kn,"href","#transformers.TFMobileBertForTokenClassification"),p(zo,"class","relative group"),p(Ra,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Wr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Wr,"rel","nofollow"),p(Ha,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForTokenClassification"),p(it,"class","docstring"),p(De,"class","docstring"),p(yn,"id","transformers.TFMobileBertForQuestionAnswering"),p(yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(yn,"href","#transformers.TFMobileBertForQuestionAnswering"),p(Po,"class","relative group"),p(Qa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),p(Gr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Gr,"rel","nofollow"),p(Ua,"href","/docs/transformers/v4.15.0/en/model_doc/mobilebert#transformers.TFMobileBertForQuestionAnswering"),p(lt,"class","docstring"),p(Oe,"class","docstring")},m(o,u){e(document.head,h),m(o,k,u),m(o,f,u),e(f,g),e(g,v),$(b,v,null),e(f,_),e(f,M),e(M,he),m(o,K,u),m(o,z,u),e(z,Z),e(Z,D),$(ee,D,null),e(z,me),e(z,O),e(O,ue),m(o,le,u),m(o,V,u),e(V,j),e(V,te),e(te,G),e(V,x),m(o,C,u),m(o,ne,u),e(ne,H),m(o,de,u),m(o,se,u),e(se,L),e(L,fe),m(o,ce,u),m(o,P,u),e(P,ge),m(o,W,u),m(o,X,u),e(X,re),e(re,Q),e(X,_e),e(X,ae),e(ae,N),m(o,pe,u),m(o,A,u),e(A,be),e(A,c),e(c,T),e(A,J),e(A,ke),e(ke,$e),e(A,S),m(o,we,u),m(o,ve,u),e(ve,Te),e(Te,q),$(I,q,null),e(ve,Fe),e(ve,Me),e(Me,U),m(o,ye,u),m(o,Y,u),$(oe,Y,null),e(Y,Be),e(Y,Ht),e(Ht,_p),e(Ht,na),e(na,bp),e(Ht,vp),e(Ht,sa),e(sa,Tp),e(Ht,kp),e(Y,Mp),e(Y,Qt),e(Qt,wp),e(Qt,ra),e(ra,yp),e(Qt,$p),e(Qt,aa),e(aa,Fp),e(Qt,Bp),e(Y,Ep),e(Y,fi),e(fi,zp),e(Y,xp),$(zn,Y,null),e(Y,Pp),e(Y,gi),e(gi,Cp),m(o,wd,u),m(o,Ut,u),e(Ut,No),e(No,_i),$(xn,_i,null),e(Ut,qp),e(Ut,bi),e(bi,jp),m(o,yd,u),m(o,ct,u),$(Pn,ct,null),e(ct,Np),e(ct,vi),e(vi,Sp),e(ct,Ap),e(ct,So),e(So,ia),e(ia,Ip),e(So,Dp),e(So,la),e(la,Op),e(So,Lp),e(ct,Wp),e(ct,Cn),e(Cn,Rp),e(Cn,da),e(da,Hp),e(Cn,Qp),m(o,$d,u),m(o,Vt,u),e(Vt,Ao),e(Ao,Ti),$(qn,Ti,null),e(Vt,Up),e(Vt,ki),e(ki,Vp),m(o,Fd,u),m(o,pt,u),$(jn,pt,null),e(pt,Kp),e(pt,Nn),e(Nn,Gp),e(Nn,Mi),e(Mi,Jp),e(Nn,Yp),e(pt,Zp),e(pt,Io),e(Io,ca),e(ca,Xp),e(Io,eh),e(Io,pa),e(pa,th),e(Io,oh),e(pt,nh),e(pt,Sn),e(Sn,sh),e(Sn,ha),e(ha,rh),e(Sn,ah),m(o,Bd,u),m(o,Kt,u),e(Kt,Do),e(Do,wi),$(An,wi,null),e(Kt,ih),e(Kt,yi),e(yi,lh),m(o,Ed,u),m(o,Gt,u),$(In,Gt,null),e(Gt,dh),e(Gt,Dn),e(Dn,ch),e(Dn,ma),e(ma,ph),e(Dn,hh),m(o,zd,u),m(o,Jt,u),$(On,Jt,null),e(Jt,mh),e(Jt,Ln),e(Ln,uh),e(Ln,ua),e(ua,fh),e(Ln,gh),m(o,xd,u),m(o,Yt,u),e(Yt,Oo),e(Oo,$i),$(Wn,$i,null),e(Yt,_h),e(Yt,Fi),e(Fi,bh),m(o,Pd,u),m(o,Ce,u),$(Rn,Ce,null),e(Ce,vh),e(Ce,Bi),e(Bi,Th),e(Ce,kh),e(Ce,Hn),e(Hn,Mh),e(Hn,fa),e(fa,wh),e(Hn,yh),e(Ce,$h),e(Ce,Qn),e(Qn,Fh),e(Qn,Un),e(Un,Bh),e(Qn,Eh),e(Ce,zh),e(Ce,Ei),e(Ei,Vn),e(Vn,xh),e(Ce,Ph),e(Ce,Ke),$(Kn,Ke,null),e(Ke,Ch),e(Ke,Zt),e(Zt,qh),e(Zt,ga),e(ga,jh),e(Zt,Nh),e(Zt,zi),e(zi,Sh),e(Zt,Ah),e(Ke,Ih),$(Lo,Ke,null),e(Ke,Dh),e(Ke,xi),e(xi,Oh),e(Ke,Lh),$(Gn,Ke,null),m(o,Cd,u),m(o,Xt,u),e(Xt,Wo),e(Wo,Pi),$(Jn,Pi,null),e(Xt,Wh),e(Xt,Ci),e(Ci,Rh),m(o,qd,u),m(o,Le,u),$(Yn,Le,null),e(Le,Hh),e(Le,eo),e(eo,Qh),e(eo,qi),e(qi,Uh),e(eo,Vh),e(eo,ji),e(ji,Kh),e(eo,Gh),e(Le,Jh),e(Le,Zn),e(Zn,Yh),e(Zn,_a),e(_a,Zh),e(Zn,Xh),e(Le,em),e(Le,Xn),e(Xn,tm),e(Xn,es),e(es,om),e(Xn,nm),e(Le,sm),e(Le,Ge),$(ts,Ge,null),e(Ge,rm),e(Ge,to),e(to,am),e(to,ba),e(ba,im),e(to,lm),e(to,Ni),e(Ni,dm),e(to,cm),e(Ge,pm),$(Ro,Ge,null),e(Ge,hm),e(Ge,Si),e(Si,mm),e(Ge,um),$(os,Ge,null),m(o,jd,u),m(o,oo,u),e(oo,Ho),e(Ho,Ai),$(ns,Ai,null),e(oo,fm),e(oo,Ii),e(Ii,gm),m(o,Nd,u),m(o,We,u),$(ss,We,null),e(We,_m),e(We,rs),e(rs,bm),e(rs,Di),e(Di,vm),e(rs,Tm),e(We,km),e(We,as),e(as,Mm),e(as,va),e(va,wm),e(as,ym),e(We,$m),e(We,is),e(is,Fm),e(is,ls),e(ls,Bm),e(is,Em),e(We,zm),e(We,Je),$(ds,Je,null),e(Je,xm),e(Je,no),e(no,Pm),e(no,Ta),e(Ta,Cm),e(no,qm),e(no,Oi),e(Oi,jm),e(no,Nm),e(Je,Sm),$(Qo,Je,null),e(Je,Am),e(Je,Li),e(Li,Im),e(Je,Dm),$(cs,Je,null),m(o,Sd,u),m(o,so,u),e(so,Uo),e(Uo,Wi),$(ps,Wi,null),e(so,Om),e(so,Ri),e(Ri,Lm),m(o,Ad,u),m(o,Re,u),$(hs,Re,null),e(Re,Wm),e(Re,ms),e(ms,Rm),e(ms,Hi),e(Hi,Hm),e(ms,Qm),e(Re,Um),e(Re,us),e(us,Vm),e(us,ka),e(ka,Km),e(us,Gm),e(Re,Jm),e(Re,fs),e(fs,Ym),e(fs,gs),e(gs,Zm),e(fs,Xm),e(Re,eu),e(Re,Ye),$(_s,Ye,null),e(Ye,tu),e(Ye,ro),e(ro,ou),e(ro,Ma),e(Ma,nu),e(ro,su),e(ro,Qi),e(Qi,ru),e(ro,au),e(Ye,iu),$(Vo,Ye,null),e(Ye,lu),e(Ye,Ui),e(Ui,du),e(Ye,cu),$(bs,Ye,null),m(o,Id,u),m(o,ao,u),e(ao,Ko),e(Ko,Vi),$(vs,Vi,null),e(ao,pu),e(ao,Ki),e(Ki,hu),m(o,Dd,u),m(o,He,u),$(Ts,He,null),e(He,mu),e(He,Gi),e(Gi,uu),e(He,fu),e(He,ks),e(ks,gu),e(ks,wa),e(wa,_u),e(ks,bu),e(He,vu),e(He,Ms),e(Ms,Tu),e(Ms,ws),e(ws,ku),e(Ms,Mu),e(He,wu),e(He,xe),$(ys,xe,null),e(xe,yu),e(xe,io),e(io,$u),e(io,ya),e(ya,Fu),e(io,Bu),e(io,Ji),e(Ji,Eu),e(io,zu),e(xe,xu),$(Go,xe,null),e(xe,Pu),e(xe,Yi),e(Yi,Cu),e(xe,qu),$($s,xe,null),e(xe,ju),e(xe,Zi),e(Zi,Nu),e(xe,Su),$(Fs,xe,null),m(o,Od,u),m(o,lo,u),e(lo,Jo),e(Jo,Xi),$(Bs,Xi,null),e(lo,Au),e(lo,el),e(el,Iu),m(o,Ld,u),m(o,Qe,u),$(Es,Qe,null),e(Qe,Du),e(Qe,tl),e(tl,Ou),e(Qe,Lu),e(Qe,zs),e(zs,Wu),e(zs,$a),e($a,Ru),e(zs,Hu),e(Qe,Qu),e(Qe,xs),e(xs,Uu),e(xs,Ps),e(Ps,Vu),e(xs,Ku),e(Qe,Gu),e(Qe,Ze),$(Cs,Ze,null),e(Ze,Ju),e(Ze,co),e(co,Yu),e(co,Fa),e(Fa,Zu),e(co,Xu),e(co,ol),e(ol,ef),e(co,tf),e(Ze,of),$(Yo,Ze,null),e(Ze,nf),e(Ze,nl),e(nl,sf),e(Ze,rf),$(qs,Ze,null),m(o,Wd,u),m(o,po,u),e(po,Zo),e(Zo,sl),$(js,sl,null),e(po,af),e(po,rl),e(rl,lf),m(o,Rd,u),m(o,Ue,u),$(Ns,Ue,null),e(Ue,df),e(Ue,al),e(al,cf),e(Ue,pf),e(Ue,Ss),e(Ss,hf),e(Ss,Ba),e(Ba,mf),e(Ss,uf),e(Ue,ff),e(Ue,As),e(As,gf),e(As,Is),e(Is,_f),e(As,bf),e(Ue,vf),e(Ue,Xe),$(Ds,Xe,null),e(Xe,Tf),e(Xe,ho),e(ho,kf),e(ho,Ea),e(Ea,Mf),e(ho,wf),e(ho,il),e(il,yf),e(ho,$f),e(Xe,Ff),$(Xo,Xe,null),e(Xe,Bf),e(Xe,ll),e(ll,Ef),e(Xe,zf),$(Os,Xe,null),m(o,Hd,u),m(o,mo,u),e(mo,en),e(en,dl),$(Ls,dl,null),e(mo,xf),e(mo,cl),e(cl,Pf),m(o,Qd,u),m(o,Ve,u),$(Ws,Ve,null),e(Ve,Cf),e(Ve,uo),e(uo,qf),e(uo,pl),e(pl,jf),e(uo,Nf),e(uo,hl),e(hl,Sf),e(uo,Af),e(Ve,If),e(Ve,Rs),e(Rs,Df),e(Rs,za),e(za,Of),e(Rs,Lf),e(Ve,Wf),e(Ve,Hs),e(Hs,Rf),e(Hs,Qs),e(Qs,Hf),e(Hs,Qf),e(Ve,Uf),e(Ve,et),$(Us,et,null),e(et,Vf),e(et,fo),e(fo,Kf),e(fo,xa),e(xa,Gf),e(fo,Jf),e(fo,ml),e(ml,Yf),e(fo,Zf),e(et,Xf),$(tn,et,null),e(et,eg),e(et,ul),e(ul,tg),e(et,og),$(Vs,et,null),m(o,Ud,u),m(o,go,u),e(go,on),e(on,fl),$(Ks,fl,null),e(go,ng),e(go,gl),e(gl,sg),m(o,Vd,u),m(o,qe,u),$(Gs,qe,null),e(qe,rg),e(qe,_l),e(_l,ag),e(qe,ig),e(qe,Js),e(Js,lg),e(Js,Pa),e(Pa,dg),e(Js,cg),e(qe,pg),e(qe,Ys),e(Ys,hg),e(Ys,Zs),e(Zs,mg),e(Ys,ug),e(qe,fg),$(nn,qe,null),e(qe,gg),e(qe,tt),$(Xs,tt,null),e(tt,_g),e(tt,_o),e(_o,bg),e(_o,Ca),e(Ca,vg),e(_o,Tg),e(_o,bl),e(bl,kg),e(_o,Mg),e(tt,wg),$(sn,tt,null),e(tt,yg),e(tt,vl),e(vl,$g),e(tt,Fg),$(er,tt,null),m(o,Kd,u),m(o,bo,u),e(bo,rn),e(rn,Tl),$(tr,Tl,null),e(bo,Bg),e(bo,kl),e(kl,Eg),m(o,Gd,u),m(o,je,u),$(or,je,null),e(je,zg),e(je,vo),e(vo,xg),e(vo,Ml),e(Ml,Pg),e(vo,Cg),e(vo,wl),e(wl,qg),e(vo,jg),e(je,Ng),e(je,nr),e(nr,Sg),e(nr,qa),e(qa,Ag),e(nr,Ig),e(je,Dg),e(je,sr),e(sr,Og),e(sr,rr),e(rr,Lg),e(sr,Wg),e(je,Rg),$(an,je,null),e(je,Hg),e(je,ot),$(ar,ot,null),e(ot,Qg),e(ot,To),e(To,Ug),e(To,ja),e(ja,Vg),e(To,Kg),e(To,yl),e(yl,Gg),e(To,Jg),e(ot,Yg),$(ln,ot,null),e(ot,Zg),e(ot,$l),e($l,Xg),e(ot,e_),$(ir,ot,null),m(o,Jd,u),m(o,ko,u),e(ko,dn),e(dn,Fl),$(lr,Fl,null),e(ko,t_),e(ko,Bl),e(Bl,o_),m(o,Yd,u),m(o,Ne,u),$(dr,Ne,null),e(Ne,n_),e(Ne,cr),e(cr,s_),e(cr,El),e(El,r_),e(cr,a_),e(Ne,i_),e(Ne,pr),e(pr,l_),e(pr,Na),e(Na,d_),e(pr,c_),e(Ne,p_),e(Ne,hr),e(hr,h_),e(hr,mr),e(mr,m_),e(hr,u_),e(Ne,f_),$(cn,Ne,null),e(Ne,g_),e(Ne,nt),$(ur,nt,null),e(nt,__),e(nt,Mo),e(Mo,b_),e(Mo,Sa),e(Sa,v_),e(Mo,T_),e(Mo,zl),e(zl,k_),e(Mo,M_),e(nt,w_),$(pn,nt,null),e(nt,y_),e(nt,xl),e(xl,$_),e(nt,F_),$(fr,nt,null),m(o,Zd,u),m(o,wo,u),e(wo,hn),e(hn,Pl),$(gr,Pl,null),e(wo,B_),e(wo,Cl),e(Cl,E_),m(o,Xd,u),m(o,Se,u),$(_r,Se,null),e(Se,z_),e(Se,br),e(br,x_),e(br,ql),e(ql,P_),e(br,C_),e(Se,q_),e(Se,vr),e(vr,j_),e(vr,Aa),e(Aa,N_),e(vr,S_),e(Se,A_),e(Se,Tr),e(Tr,I_),e(Tr,kr),e(kr,D_),e(Tr,O_),e(Se,L_),$(mn,Se,null),e(Se,W_),e(Se,st),$(Mr,st,null),e(st,R_),e(st,yo),e(yo,H_),e(yo,Ia),e(Ia,Q_),e(yo,U_),e(yo,jl),e(jl,V_),e(yo,K_),e(st,G_),$(un,st,null),e(st,J_),e(st,Nl),e(Nl,Y_),e(st,Z_),$(wr,st,null),m(o,ec,u),m(o,$o,u),e($o,fn),e(fn,Sl),$(yr,Sl,null),e($o,X_),e($o,Al),e(Al,eb),m(o,tc,u),m(o,Ae,u),$($r,Ae,null),e(Ae,tb),e(Ae,Il),e(Il,ob),e(Ae,nb),e(Ae,Fr),e(Fr,sb),e(Fr,Da),e(Da,rb),e(Fr,ab),e(Ae,ib),e(Ae,Br),e(Br,lb),e(Br,Er),e(Er,db),e(Br,cb),e(Ae,pb),$(gn,Ae,null),e(Ae,hb),e(Ae,rt),$(zr,rt,null),e(rt,mb),e(rt,Fo),e(Fo,ub),e(Fo,Oa),e(Oa,fb),e(Fo,gb),e(Fo,Dl),e(Dl,_b),e(Fo,bb),e(rt,vb),$(_n,rt,null),e(rt,Tb),e(rt,Ol),e(Ol,kb),e(rt,Mb),$(xr,rt,null),m(o,oc,u),m(o,Bo,u),e(Bo,bn),e(bn,Ll),$(Pr,Ll,null),e(Bo,wb),e(Bo,Wl),e(Wl,yb),m(o,nc,u),m(o,Ie,u),$(Cr,Ie,null),e(Ie,$b),e(Ie,Rl),e(Rl,Fb),e(Ie,Bb),e(Ie,qr),e(qr,Eb),e(qr,La),e(La,zb),e(qr,xb),e(Ie,Pb),e(Ie,jr),e(jr,Cb),e(jr,Nr),e(Nr,qb),e(jr,jb),e(Ie,Nb),$(vn,Ie,null),e(Ie,Sb),e(Ie,at),$(Sr,at,null),e(at,Ab),e(at,Eo),e(Eo,Ib),e(Eo,Wa),e(Wa,Db),e(Eo,Ob),e(Eo,Hl),e(Hl,Lb),e(Eo,Wb),e(at,Rb),$(Tn,at,null),e(at,Hb),e(at,Ql),e(Ql,Qb),e(at,Ub),$(Ar,at,null),m(o,sc,u),m(o,zo,u),e(zo,kn),e(kn,Ul),$(Ir,Ul,null),e(zo,Vb),e(zo,Vl),e(Vl,Kb),m(o,rc,u),m(o,De,u),$(Dr,De,null),e(De,Gb),e(De,Kl),e(Kl,Jb),e(De,Yb),e(De,Or),e(Or,Zb),e(Or,Ra),e(Ra,Xb),e(Or,ev),e(De,tv),e(De,Lr),e(Lr,ov),e(Lr,Wr),e(Wr,nv),e(Lr,sv),e(De,rv),$(Mn,De,null),e(De,av),e(De,it),$(Rr,it,null),e(it,iv),e(it,xo),e(xo,lv),e(xo,Ha),e(Ha,dv),e(xo,cv),e(xo,Gl),e(Gl,pv),e(xo,hv),e(it,mv),$(wn,it,null),e(it,uv),e(it,Jl),e(Jl,fv),e(it,gv),$(Hr,it,null),m(o,ac,u),m(o,Po,u),e(Po,yn),e(yn,Yl),$(Qr,Yl,null),e(Po,_v),e(Po,Zl),e(Zl,bv),m(o,ic,u),m(o,Oe,u),$(Ur,Oe,null),e(Oe,vv),e(Oe,Co),e(Co,Tv),e(Co,Xl),e(Xl,kv),e(Co,Mv),e(Co,ed),e(ed,wv),e(Co,yv),e(Oe,$v),e(Oe,Vr),e(Vr,Fv),e(Vr,Qa),e(Qa,Bv),e(Vr,Ev),e(Oe,zv),e(Oe,Kr),e(Kr,xv),e(Kr,Gr),e(Gr,Pv),e(Kr,Cv),e(Oe,qv),$($n,Oe,null),e(Oe,jv),e(Oe,lt),$(Jr,lt,null),e(lt,Nv),e(lt,qo),e(qo,Sv),e(qo,Ua),e(Ua,Av),e(qo,Iv),e(qo,td),e(td,Dv),e(qo,Ov),e(lt,Lv),$(Fn,lt,null),e(lt,Wv),e(lt,od),e(od,Rv),e(lt,Hv),$(Yr,lt,null),lc=!0},p(o,[u]){const Zr={};u&2&&(Zr.$$scope={dirty:u,ctx:o}),Lo.$set(Zr);const nd={};u&2&&(nd.$$scope={dirty:u,ctx:o}),Ro.$set(nd);const sd={};u&2&&(sd.$$scope={dirty:u,ctx:o}),Qo.$set(sd);const rd={};u&2&&(rd.$$scope={dirty:u,ctx:o}),Vo.$set(rd);const Xr={};u&2&&(Xr.$$scope={dirty:u,ctx:o}),Go.$set(Xr);const ad={};u&2&&(ad.$$scope={dirty:u,ctx:o}),Yo.$set(ad);const id={};u&2&&(id.$$scope={dirty:u,ctx:o}),Xo.$set(id);const ld={};u&2&&(ld.$$scope={dirty:u,ctx:o}),tn.$set(ld);const ea={};u&2&&(ea.$$scope={dirty:u,ctx:o}),nn.$set(ea);const dd={};u&2&&(dd.$$scope={dirty:u,ctx:o}),sn.$set(dd);const cd={};u&2&&(cd.$$scope={dirty:u,ctx:o}),an.$set(cd);const pd={};u&2&&(pd.$$scope={dirty:u,ctx:o}),ln.$set(pd);const hd={};u&2&&(hd.$$scope={dirty:u,ctx:o}),cn.$set(hd);const md={};u&2&&(md.$$scope={dirty:u,ctx:o}),pn.$set(md);const ta={};u&2&&(ta.$$scope={dirty:u,ctx:o}),mn.$set(ta);const ud={};u&2&&(ud.$$scope={dirty:u,ctx:o}),un.$set(ud);const fd={};u&2&&(fd.$$scope={dirty:u,ctx:o}),gn.$set(fd);const jo={};u&2&&(jo.$$scope={dirty:u,ctx:o}),_n.$set(jo);const gd={};u&2&&(gd.$$scope={dirty:u,ctx:o}),vn.$set(gd);const _d={};u&2&&(_d.$$scope={dirty:u,ctx:o}),Tn.$set(_d);const oa={};u&2&&(oa.$$scope={dirty:u,ctx:o}),Mn.$set(oa);const bd={};u&2&&(bd.$$scope={dirty:u,ctx:o}),wn.$set(bd);const vd={};u&2&&(vd.$$scope={dirty:u,ctx:o}),$n.$set(vd);const Td={};u&2&&(Td.$$scope={dirty:u,ctx:o}),Fn.$set(Td)},i(o){lc||(F(b.$$.fragment,o),F(ee.$$.fragment,o),F(I.$$.fragment,o),F(oe.$$.fragment,o),F(zn.$$.fragment,o),F(xn.$$.fragment,o),F(Pn.$$.fragment,o),F(qn.$$.fragment,o),F(jn.$$.fragment,o),F(An.$$.fragment,o),F(In.$$.fragment,o),F(On.$$.fragment,o),F(Wn.$$.fragment,o),F(Rn.$$.fragment,o),F(Kn.$$.fragment,o),F(Lo.$$.fragment,o),F(Gn.$$.fragment,o),F(Jn.$$.fragment,o),F(Yn.$$.fragment,o),F(ts.$$.fragment,o),F(Ro.$$.fragment,o),F(os.$$.fragment,o),F(ns.$$.fragment,o),F(ss.$$.fragment,o),F(ds.$$.fragment,o),F(Qo.$$.fragment,o),F(cs.$$.fragment,o),F(ps.$$.fragment,o),F(hs.$$.fragment,o),F(_s.$$.fragment,o),F(Vo.$$.fragment,o),F(bs.$$.fragment,o),F(vs.$$.fragment,o),F(Ts.$$.fragment,o),F(ys.$$.fragment,o),F(Go.$$.fragment,o),F($s.$$.fragment,o),F(Fs.$$.fragment,o),F(Bs.$$.fragment,o),F(Es.$$.fragment,o),F(Cs.$$.fragment,o),F(Yo.$$.fragment,o),F(qs.$$.fragment,o),F(js.$$.fragment,o),F(Ns.$$.fragment,o),F(Ds.$$.fragment,o),F(Xo.$$.fragment,o),F(Os.$$.fragment,o),F(Ls.$$.fragment,o),F(Ws.$$.fragment,o),F(Us.$$.fragment,o),F(tn.$$.fragment,o),F(Vs.$$.fragment,o),F(Ks.$$.fragment,o),F(Gs.$$.fragment,o),F(nn.$$.fragment,o),F(Xs.$$.fragment,o),F(sn.$$.fragment,o),F(er.$$.fragment,o),F(tr.$$.fragment,o),F(or.$$.fragment,o),F(an.$$.fragment,o),F(ar.$$.fragment,o),F(ln.$$.fragment,o),F(ir.$$.fragment,o),F(lr.$$.fragment,o),F(dr.$$.fragment,o),F(cn.$$.fragment,o),F(ur.$$.fragment,o),F(pn.$$.fragment,o),F(fr.$$.fragment,o),F(gr.$$.fragment,o),F(_r.$$.fragment,o),F(mn.$$.fragment,o),F(Mr.$$.fragment,o),F(un.$$.fragment,o),F(wr.$$.fragment,o),F(yr.$$.fragment,o),F($r.$$.fragment,o),F(gn.$$.fragment,o),F(zr.$$.fragment,o),F(_n.$$.fragment,o),F(xr.$$.fragment,o),F(Pr.$$.fragment,o),F(Cr.$$.fragment,o),F(vn.$$.fragment,o),F(Sr.$$.fragment,o),F(Tn.$$.fragment,o),F(Ar.$$.fragment,o),F(Ir.$$.fragment,o),F(Dr.$$.fragment,o),F(Mn.$$.fragment,o),F(Rr.$$.fragment,o),F(wn.$$.fragment,o),F(Hr.$$.fragment,o),F(Qr.$$.fragment,o),F(Ur.$$.fragment,o),F($n.$$.fragment,o),F(Jr.$$.fragment,o),F(Fn.$$.fragment,o),F(Yr.$$.fragment,o),lc=!0)},o(o){B(b.$$.fragment,o),B(ee.$$.fragment,o),B(I.$$.fragment,o),B(oe.$$.fragment,o),B(zn.$$.fragment,o),B(xn.$$.fragment,o),B(Pn.$$.fragment,o),B(qn.$$.fragment,o),B(jn.$$.fragment,o),B(An.$$.fragment,o),B(In.$$.fragment,o),B(On.$$.fragment,o),B(Wn.$$.fragment,o),B(Rn.$$.fragment,o),B(Kn.$$.fragment,o),B(Lo.$$.fragment,o),B(Gn.$$.fragment,o),B(Jn.$$.fragment,o),B(Yn.$$.fragment,o),B(ts.$$.fragment,o),B(Ro.$$.fragment,o),B(os.$$.fragment,o),B(ns.$$.fragment,o),B(ss.$$.fragment,o),B(ds.$$.fragment,o),B(Qo.$$.fragment,o),B(cs.$$.fragment,o),B(ps.$$.fragment,o),B(hs.$$.fragment,o),B(_s.$$.fragment,o),B(Vo.$$.fragment,o),B(bs.$$.fragment,o),B(vs.$$.fragment,o),B(Ts.$$.fragment,o),B(ys.$$.fragment,o),B(Go.$$.fragment,o),B($s.$$.fragment,o),B(Fs.$$.fragment,o),B(Bs.$$.fragment,o),B(Es.$$.fragment,o),B(Cs.$$.fragment,o),B(Yo.$$.fragment,o),B(qs.$$.fragment,o),B(js.$$.fragment,o),B(Ns.$$.fragment,o),B(Ds.$$.fragment,o),B(Xo.$$.fragment,o),B(Os.$$.fragment,o),B(Ls.$$.fragment,o),B(Ws.$$.fragment,o),B(Us.$$.fragment,o),B(tn.$$.fragment,o),B(Vs.$$.fragment,o),B(Ks.$$.fragment,o),B(Gs.$$.fragment,o),B(nn.$$.fragment,o),B(Xs.$$.fragment,o),B(sn.$$.fragment,o),B(er.$$.fragment,o),B(tr.$$.fragment,o),B(or.$$.fragment,o),B(an.$$.fragment,o),B(ar.$$.fragment,o),B(ln.$$.fragment,o),B(ir.$$.fragment,o),B(lr.$$.fragment,o),B(dr.$$.fragment,o),B(cn.$$.fragment,o),B(ur.$$.fragment,o),B(pn.$$.fragment,o),B(fr.$$.fragment,o),B(gr.$$.fragment,o),B(_r.$$.fragment,o),B(mn.$$.fragment,o),B(Mr.$$.fragment,o),B(un.$$.fragment,o),B(wr.$$.fragment,o),B(yr.$$.fragment,o),B($r.$$.fragment,o),B(gn.$$.fragment,o),B(zr.$$.fragment,o),B(_n.$$.fragment,o),B(xr.$$.fragment,o),B(Pr.$$.fragment,o),B(Cr.$$.fragment,o),B(vn.$$.fragment,o),B(Sr.$$.fragment,o),B(Tn.$$.fragment,o),B(Ar.$$.fragment,o),B(Ir.$$.fragment,o),B(Dr.$$.fragment,o),B(Mn.$$.fragment,o),B(Rr.$$.fragment,o),B(wn.$$.fragment,o),B(Hr.$$.fragment,o),B(Qr.$$.fragment,o),B(Ur.$$.fragment,o),B($n.$$.fragment,o),B(Jr.$$.fragment,o),B(Fn.$$.fragment,o),B(Yr.$$.fragment,o),lc=!1},d(o){t(h),o&&t(k),o&&t(f),E(b),o&&t(K),o&&t(z),E(ee),o&&t(le),o&&t(V),o&&t(C),o&&t(ne),o&&t(de),o&&t(se),o&&t(ce),o&&t(P),o&&t(W),o&&t(X),o&&t(pe),o&&t(A),o&&t(we),o&&t(ve),E(I),o&&t(ye),o&&t(Y),E(oe),E(zn),o&&t(wd),o&&t(Ut),E(xn),o&&t(yd),o&&t(ct),E(Pn),o&&t($d),o&&t(Vt),E(qn),o&&t(Fd),o&&t(pt),E(jn),o&&t(Bd),o&&t(Kt),E(An),o&&t(Ed),o&&t(Gt),E(In),o&&t(zd),o&&t(Jt),E(On),o&&t(xd),o&&t(Yt),E(Wn),o&&t(Pd),o&&t(Ce),E(Rn),E(Kn),E(Lo),E(Gn),o&&t(Cd),o&&t(Xt),E(Jn),o&&t(qd),o&&t(Le),E(Yn),E(ts),E(Ro),E(os),o&&t(jd),o&&t(oo),E(ns),o&&t(Nd),o&&t(We),E(ss),E(ds),E(Qo),E(cs),o&&t(Sd),o&&t(so),E(ps),o&&t(Ad),o&&t(Re),E(hs),E(_s),E(Vo),E(bs),o&&t(Id),o&&t(ao),E(vs),o&&t(Dd),o&&t(He),E(Ts),E(ys),E(Go),E($s),E(Fs),o&&t(Od),o&&t(lo),E(Bs),o&&t(Ld),o&&t(Qe),E(Es),E(Cs),E(Yo),E(qs),o&&t(Wd),o&&t(po),E(js),o&&t(Rd),o&&t(Ue),E(Ns),E(Ds),E(Xo),E(Os),o&&t(Hd),o&&t(mo),E(Ls),o&&t(Qd),o&&t(Ve),E(Ws),E(Us),E(tn),E(Vs),o&&t(Ud),o&&t(go),E(Ks),o&&t(Vd),o&&t(qe),E(Gs),E(nn),E(Xs),E(sn),E(er),o&&t(Kd),o&&t(bo),E(tr),o&&t(Gd),o&&t(je),E(or),E(an),E(ar),E(ln),E(ir),o&&t(Jd),o&&t(ko),E(lr),o&&t(Yd),o&&t(Ne),E(dr),E(cn),E(ur),E(pn),E(fr),o&&t(Zd),o&&t(wo),E(gr),o&&t(Xd),o&&t(Se),E(_r),E(mn),E(Mr),E(un),E(wr),o&&t(ec),o&&t($o),E(yr),o&&t(tc),o&&t(Ae),E($r),E(gn),E(zr),E(_n),E(xr),o&&t(oc),o&&t(Bo),E(Pr),o&&t(nc),o&&t(Ie),E(Cr),E(vn),E(Sr),E(Tn),E(Ar),o&&t(sc),o&&t(zo),E(Ir),o&&t(rc),o&&t(De),E(Dr),E(Mn),E(Rr),E(wn),E(Hr),o&&t(ac),o&&t(Po),E(Qr),o&&t(ic),o&&t(Oe),E(Ur),E($n),E(Jr),E(Fn),E(Yr)}}}const jM={local:"mobilebert",sections:[{local:"overview",title:"Overview"},{local:"transformers.MobileBertConfig",title:"MobileBertConfig"},{local:"transformers.MobileBertTokenizer",title:"MobileBertTokenizer"},{local:"transformers.MobileBertTokenizerFast",title:"MobileBertTokenizerFast"},{local:"transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput",title:"MobileBert specific outputs"},{local:"transformers.MobileBertModel",title:"MobileBertModel"},{local:"transformers.MobileBertForPreTraining",title:"MobileBertForPreTraining"},{local:"transformers.MobileBertForMaskedLM",title:"MobileBertForMaskedLM"},{local:"transformers.MobileBertForNextSentencePrediction",title:"MobileBertForNextSentencePrediction"},{local:"transformers.MobileBertForSequenceClassification",title:"MobileBertForSequenceClassification"},{local:"transformers.MobileBertForMultipleChoice",title:"MobileBertForMultipleChoice"},{local:"transformers.MobileBertForTokenClassification",title:"MobileBertForTokenClassification"},{local:"transformers.MobileBertForQuestionAnswering",title:"MobileBertForQuestionAnswering"},{local:"transformers.TFMobileBertModel",title:"TFMobileBertModel"},{local:"transformers.TFMobileBertForPreTraining",title:"TFMobileBertForPreTraining"},{local:"transformers.TFMobileBertForMaskedLM",title:"TFMobileBertForMaskedLM"},{local:"transformers.TFMobileBertForNextSentencePrediction",title:"TFMobileBertForNextSentencePrediction"},{local:"transformers.TFMobileBertForSequenceClassification",title:"TFMobileBertForSequenceClassification"},{local:"transformers.TFMobileBertForMultipleChoice",title:"TFMobileBertForMultipleChoice"},{local:"transformers.TFMobileBertForTokenClassification",title:"TFMobileBertForTokenClassification"},{local:"transformers.TFMobileBertForQuestionAnswering",title:"TFMobileBertForQuestionAnswering"}],title:"MobileBERT"};function NM(R,h,k){let{fw:f}=h;return R.$$set=g=>{"fw"in g&&k(0,f=g.fw)},[f]}class WM extends rM{constructor(h){super();aM(this,h,NM,qM,iM,{fw:0})}}export{WM as default,jM as metadata};
9,974
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/convbert.mdx-6cbc27cd.js
import{S as Gg,i as Zg,s as Xg,e as r,k as l,w as b,t as n,L as e_,c as a,d as t,m as d,a as i,x as C,h as s,b as h,J as e,g as u,y as w,q as $,o as y,B}from"../../chunks/vendor-b1433968.js";import{T as Me}from"../../chunks/Tip-c3840994.js";import{D as re}from"../../chunks/Docstring-ff504c58.js";import{C as Ie}from"../../chunks/CodeBlock-a320dbd7.js";import{I as De}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function t_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function o_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function n_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function s_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function r_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function a_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function i_(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z;return{c(){p=r("p"),F=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),T=r("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),M=r("p"),X=n("This second option is useful when using "),S=r("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),L=r("code"),pe=n("model(inputs)"),ae=n("."),U=l(),A=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=r("ul"),j=r("li"),ne=n("a single Tensor with "),Q=r("code"),ie=n("input_ids"),se=n(" only and nothing else: "),O=r("code"),he=n("model(inputs_ids)"),le=l(),q=r("li"),ue=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r("code"),G=n("model([input_ids, attention_mask])"),fe=n(" or "),I=r("code"),me=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),x=r("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=r("code"),Z=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=a(c,"P",{});var k=i(p);F=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(c),g=a(c,"UL",{});var V=i(g);T=a(V,"LI",{});var $e=i(T);v=s($e,"having all inputs as keyword arguments (like PyTorch models), or"),$e.forEach(t),_=d(V),E=a(V,"LI",{});var Ce=i(E);de=s(Ce,"having all inputs as a list, tuple or dict in the first positional arguments."),Ce.forEach(t),V.forEach(t),K=d(c),M=a(c,"P",{});var N=i(M);X=s(N,"This second option is useful when using "),S=a(N,"CODE",{});var we=i(S);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),ce=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),L=a(N,"CODE",{});var oe=i(L);pe=s(oe,"model(inputs)"),oe.forEach(t),ae=s(N,"."),N.forEach(t),U=d(c),A=a(c,"P",{});var Te=i(A);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),J=d(c),z=a(c,"UL",{});var D=i(z);j=a(D,"LI",{});var P=i(j);ne=s(P,"a single Tensor with "),Q=a(P,"CODE",{});var ye=i(Q);ie=s(ye,"input_ids"),ye.forEach(t),se=s(P," only and nothing else: "),O=a(P,"CODE",{});var be=i(O);he=s(be,"model(inputs_ids)"),be.forEach(t),P.forEach(t),le=d(D),q=a(D,"LI",{});var Y=i(q);ue=s(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=a(Y,"CODE",{});var Be=i(R);G=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),fe=s(Y," or "),I=a(Y,"CODE",{});var ke=i(I);me=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Y.forEach(t),ge=d(D),x=a(D,"LI",{});var ve=i(x);_e=s(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=a(ve,"CODE",{});var Fe=i(W);Z=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ve.forEach(t),D.forEach(t)},m(c,k){u(c,p,k),e(p,F),u(c,f,k),u(c,g,k),e(g,T),e(T,v),e(g,_),e(g,E),e(E,de),u(c,K,k),u(c,M,k),e(M,X),e(M,S),e(S,ee),e(M,ce),e(M,L),e(L,pe),e(M,ae),u(c,U,k),u(c,A,k),e(A,te),u(c,J,k),u(c,z,k),e(z,j),e(j,ne),e(j,Q),e(Q,ie),e(j,se),e(j,O),e(O,he),e(z,le),e(z,q),e(q,ue),e(q,R),e(R,G),e(q,fe),e(q,I),e(I,me),e(z,ge),e(z,x),e(x,_e),e(x,W),e(W,Z)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(K),c&&t(M),c&&t(U),c&&t(A),c&&t(J),c&&t(z)}}}function l_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function d_(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z;return{c(){p=r("p"),F=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),T=r("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),M=r("p"),X=n("This second option is useful when using "),S=r("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),L=r("code"),pe=n("model(inputs)"),ae=n("."),U=l(),A=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=r("ul"),j=r("li"),ne=n("a single Tensor with "),Q=r("code"),ie=n("input_ids"),se=n(" only and nothing else: "),O=r("code"),he=n("model(inputs_ids)"),le=l(),q=r("li"),ue=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r("code"),G=n("model([input_ids, attention_mask])"),fe=n(" or "),I=r("code"),me=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),x=r("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=r("code"),Z=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=a(c,"P",{});var k=i(p);F=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(c),g=a(c,"UL",{});var V=i(g);T=a(V,"LI",{});var $e=i(T);v=s($e,"having all inputs as keyword arguments (like PyTorch models), or"),$e.forEach(t),_=d(V),E=a(V,"LI",{});var Ce=i(E);de=s(Ce,"having all inputs as a list, tuple or dict in the first positional arguments."),Ce.forEach(t),V.forEach(t),K=d(c),M=a(c,"P",{});var N=i(M);X=s(N,"This second option is useful when using "),S=a(N,"CODE",{});var we=i(S);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),ce=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),L=a(N,"CODE",{});var oe=i(L);pe=s(oe,"model(inputs)"),oe.forEach(t),ae=s(N,"."),N.forEach(t),U=d(c),A=a(c,"P",{});var Te=i(A);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),J=d(c),z=a(c,"UL",{});var D=i(z);j=a(D,"LI",{});var P=i(j);ne=s(P,"a single Tensor with "),Q=a(P,"CODE",{});var ye=i(Q);ie=s(ye,"input_ids"),ye.forEach(t),se=s(P," only and nothing else: "),O=a(P,"CODE",{});var be=i(O);he=s(be,"model(inputs_ids)"),be.forEach(t),P.forEach(t),le=d(D),q=a(D,"LI",{});var Y=i(q);ue=s(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=a(Y,"CODE",{});var Be=i(R);G=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),fe=s(Y," or "),I=a(Y,"CODE",{});var ke=i(I);me=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Y.forEach(t),ge=d(D),x=a(D,"LI",{});var ve=i(x);_e=s(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=a(ve,"CODE",{});var Fe=i(W);Z=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ve.forEach(t),D.forEach(t)},m(c,k){u(c,p,k),e(p,F),u(c,f,k),u(c,g,k),e(g,T),e(T,v),e(g,_),e(g,E),e(E,de),u(c,K,k),u(c,M,k),e(M,X),e(M,S),e(S,ee),e(M,ce),e(M,L),e(L,pe),e(M,ae),u(c,U,k),u(c,A,k),e(A,te),u(c,J,k),u(c,z,k),e(z,j),e(j,ne),e(j,Q),e(Q,ie),e(j,se),e(j,O),e(O,he),e(z,le),e(z,q),e(q,ue),e(q,R),e(R,G),e(q,fe),e(q,I),e(I,me),e(z,ge),e(z,x),e(x,_e),e(x,W),e(W,Z)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(K),c&&t(M),c&&t(U),c&&t(A),c&&t(J),c&&t(z)}}}function c_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function p_(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z;return{c(){p=r("p"),F=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),T=r("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),M=r("p"),X=n("This second option is useful when using "),S=r("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),L=r("code"),pe=n("model(inputs)"),ae=n("."),U=l(),A=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=r("ul"),j=r("li"),ne=n("a single Tensor with "),Q=r("code"),ie=n("input_ids"),se=n(" only and nothing else: "),O=r("code"),he=n("model(inputs_ids)"),le=l(),q=r("li"),ue=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r("code"),G=n("model([input_ids, attention_mask])"),fe=n(" or "),I=r("code"),me=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),x=r("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=r("code"),Z=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=a(c,"P",{});var k=i(p);F=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(c),g=a(c,"UL",{});var V=i(g);T=a(V,"LI",{});var $e=i(T);v=s($e,"having all inputs as keyword arguments (like PyTorch models), or"),$e.forEach(t),_=d(V),E=a(V,"LI",{});var Ce=i(E);de=s(Ce,"having all inputs as a list, tuple or dict in the first positional arguments."),Ce.forEach(t),V.forEach(t),K=d(c),M=a(c,"P",{});var N=i(M);X=s(N,"This second option is useful when using "),S=a(N,"CODE",{});var we=i(S);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),ce=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),L=a(N,"CODE",{});var oe=i(L);pe=s(oe,"model(inputs)"),oe.forEach(t),ae=s(N,"."),N.forEach(t),U=d(c),A=a(c,"P",{});var Te=i(A);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),J=d(c),z=a(c,"UL",{});var D=i(z);j=a(D,"LI",{});var P=i(j);ne=s(P,"a single Tensor with "),Q=a(P,"CODE",{});var ye=i(Q);ie=s(ye,"input_ids"),ye.forEach(t),se=s(P," only and nothing else: "),O=a(P,"CODE",{});var be=i(O);he=s(be,"model(inputs_ids)"),be.forEach(t),P.forEach(t),le=d(D),q=a(D,"LI",{});var Y=i(q);ue=s(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=a(Y,"CODE",{});var Be=i(R);G=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),fe=s(Y," or "),I=a(Y,"CODE",{});var ke=i(I);me=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Y.forEach(t),ge=d(D),x=a(D,"LI",{});var ve=i(x);_e=s(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=a(ve,"CODE",{});var Fe=i(W);Z=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ve.forEach(t),D.forEach(t)},m(c,k){u(c,p,k),e(p,F),u(c,f,k),u(c,g,k),e(g,T),e(T,v),e(g,_),e(g,E),e(E,de),u(c,K,k),u(c,M,k),e(M,X),e(M,S),e(S,ee),e(M,ce),e(M,L),e(L,pe),e(M,ae),u(c,U,k),u(c,A,k),e(A,te),u(c,J,k),u(c,z,k),e(z,j),e(j,ne),e(j,Q),e(Q,ie),e(j,se),e(j,O),e(O,he),e(z,le),e(z,q),e(q,ue),e(q,R),e(R,G),e(q,fe),e(q,I),e(I,me),e(z,ge),e(z,x),e(x,_e),e(x,W),e(W,Z)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(K),c&&t(M),c&&t(U),c&&t(A),c&&t(J),c&&t(z)}}}function h_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function u_(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z;return{c(){p=r("p"),F=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),T=r("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),M=r("p"),X=n("This second option is useful when using "),S=r("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),L=r("code"),pe=n("model(inputs)"),ae=n("."),U=l(),A=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=r("ul"),j=r("li"),ne=n("a single Tensor with "),Q=r("code"),ie=n("input_ids"),se=n(" only and nothing else: "),O=r("code"),he=n("model(inputs_ids)"),le=l(),q=r("li"),ue=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r("code"),G=n("model([input_ids, attention_mask])"),fe=n(" or "),I=r("code"),me=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),x=r("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=r("code"),Z=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=a(c,"P",{});var k=i(p);F=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(c),g=a(c,"UL",{});var V=i(g);T=a(V,"LI",{});var $e=i(T);v=s($e,"having all inputs as keyword arguments (like PyTorch models), or"),$e.forEach(t),_=d(V),E=a(V,"LI",{});var Ce=i(E);de=s(Ce,"having all inputs as a list, tuple or dict in the first positional arguments."),Ce.forEach(t),V.forEach(t),K=d(c),M=a(c,"P",{});var N=i(M);X=s(N,"This second option is useful when using "),S=a(N,"CODE",{});var we=i(S);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),ce=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),L=a(N,"CODE",{});var oe=i(L);pe=s(oe,"model(inputs)"),oe.forEach(t),ae=s(N,"."),N.forEach(t),U=d(c),A=a(c,"P",{});var Te=i(A);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),J=d(c),z=a(c,"UL",{});var D=i(z);j=a(D,"LI",{});var P=i(j);ne=s(P,"a single Tensor with "),Q=a(P,"CODE",{});var ye=i(Q);ie=s(ye,"input_ids"),ye.forEach(t),se=s(P," only and nothing else: "),O=a(P,"CODE",{});var be=i(O);he=s(be,"model(inputs_ids)"),be.forEach(t),P.forEach(t),le=d(D),q=a(D,"LI",{});var Y=i(q);ue=s(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=a(Y,"CODE",{});var Be=i(R);G=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),fe=s(Y," or "),I=a(Y,"CODE",{});var ke=i(I);me=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Y.forEach(t),ge=d(D),x=a(D,"LI",{});var ve=i(x);_e=s(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=a(ve,"CODE",{});var Fe=i(W);Z=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ve.forEach(t),D.forEach(t)},m(c,k){u(c,p,k),e(p,F),u(c,f,k),u(c,g,k),e(g,T),e(T,v),e(g,_),e(g,E),e(E,de),u(c,K,k),u(c,M,k),e(M,X),e(M,S),e(S,ee),e(M,ce),e(M,L),e(L,pe),e(M,ae),u(c,U,k),u(c,A,k),e(A,te),u(c,J,k),u(c,z,k),e(z,j),e(j,ne),e(j,Q),e(Q,ie),e(j,se),e(j,O),e(O,he),e(z,le),e(z,q),e(q,ue),e(q,R),e(R,G),e(q,fe),e(q,I),e(I,me),e(z,ge),e(z,x),e(x,_e),e(x,W),e(W,Z)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(K),c&&t(M),c&&t(U),c&&t(A),c&&t(J),c&&t(z)}}}function f_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function m_(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z;return{c(){p=r("p"),F=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),T=r("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),M=r("p"),X=n("This second option is useful when using "),S=r("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),L=r("code"),pe=n("model(inputs)"),ae=n("."),U=l(),A=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=r("ul"),j=r("li"),ne=n("a single Tensor with "),Q=r("code"),ie=n("input_ids"),se=n(" only and nothing else: "),O=r("code"),he=n("model(inputs_ids)"),le=l(),q=r("li"),ue=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r("code"),G=n("model([input_ids, attention_mask])"),fe=n(" or "),I=r("code"),me=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),x=r("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=r("code"),Z=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=a(c,"P",{});var k=i(p);F=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(c),g=a(c,"UL",{});var V=i(g);T=a(V,"LI",{});var $e=i(T);v=s($e,"having all inputs as keyword arguments (like PyTorch models), or"),$e.forEach(t),_=d(V),E=a(V,"LI",{});var Ce=i(E);de=s(Ce,"having all inputs as a list, tuple or dict in the first positional arguments."),Ce.forEach(t),V.forEach(t),K=d(c),M=a(c,"P",{});var N=i(M);X=s(N,"This second option is useful when using "),S=a(N,"CODE",{});var we=i(S);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),ce=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),L=a(N,"CODE",{});var oe=i(L);pe=s(oe,"model(inputs)"),oe.forEach(t),ae=s(N,"."),N.forEach(t),U=d(c),A=a(c,"P",{});var Te=i(A);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),J=d(c),z=a(c,"UL",{});var D=i(z);j=a(D,"LI",{});var P=i(j);ne=s(P,"a single Tensor with "),Q=a(P,"CODE",{});var ye=i(Q);ie=s(ye,"input_ids"),ye.forEach(t),se=s(P," only and nothing else: "),O=a(P,"CODE",{});var be=i(O);he=s(be,"model(inputs_ids)"),be.forEach(t),P.forEach(t),le=d(D),q=a(D,"LI",{});var Y=i(q);ue=s(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=a(Y,"CODE",{});var Be=i(R);G=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),fe=s(Y," or "),I=a(Y,"CODE",{});var ke=i(I);me=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Y.forEach(t),ge=d(D),x=a(D,"LI",{});var ve=i(x);_e=s(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=a(ve,"CODE",{});var Fe=i(W);Z=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ve.forEach(t),D.forEach(t)},m(c,k){u(c,p,k),e(p,F),u(c,f,k),u(c,g,k),e(g,T),e(T,v),e(g,_),e(g,E),e(E,de),u(c,K,k),u(c,M,k),e(M,X),e(M,S),e(S,ee),e(M,ce),e(M,L),e(L,pe),e(M,ae),u(c,U,k),u(c,A,k),e(A,te),u(c,J,k),u(c,z,k),e(z,j),e(j,ne),e(j,Q),e(Q,ie),e(j,se),e(j,O),e(O,he),e(z,le),e(z,q),e(q,ue),e(q,R),e(R,G),e(q,fe),e(q,I),e(I,me),e(z,ge),e(z,x),e(x,_e),e(x,W),e(W,Z)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(K),c&&t(M),c&&t(U),c&&t(A),c&&t(J),c&&t(z)}}}function g_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function __(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z;return{c(){p=r("p"),F=n("TF 2.0 models accepts two formats as inputs:"),f=l(),g=r("ul"),T=r("li"),v=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),E=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),K=l(),M=r("p"),X=n("This second option is useful when using "),S=r("code"),ee=n("tf.keras.Model.fit"),ce=n(` method which currently requires having all the tensors in the first argument of the model call function: `),L=r("code"),pe=n("model(inputs)"),ae=n("."),U=l(),A=r("p"),te=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=r("ul"),j=r("li"),ne=n("a single Tensor with "),Q=r("code"),ie=n("input_ids"),se=n(" only and nothing else: "),O=r("code"),he=n("model(inputs_ids)"),le=l(),q=r("li"),ue=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=r("code"),G=n("model([input_ids, attention_mask])"),fe=n(" or "),I=r("code"),me=n("model([input_ids, attention_mask, token_type_ids])"),ge=l(),x=r("li"),_e=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=r("code"),Z=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=a(c,"P",{});var k=i(p);F=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),f=d(c),g=a(c,"UL",{});var V=i(g);T=a(V,"LI",{});var $e=i(T);v=s($e,"having all inputs as keyword arguments (like PyTorch models), or"),$e.forEach(t),_=d(V),E=a(V,"LI",{});var Ce=i(E);de=s(Ce,"having all inputs as a list, tuple or dict in the first positional arguments."),Ce.forEach(t),V.forEach(t),K=d(c),M=a(c,"P",{});var N=i(M);X=s(N,"This second option is useful when using "),S=a(N,"CODE",{});var we=i(S);ee=s(we,"tf.keras.Model.fit"),we.forEach(t),ce=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),L=a(N,"CODE",{});var oe=i(L);pe=s(oe,"model(inputs)"),oe.forEach(t),ae=s(N,"."),N.forEach(t),U=d(c),A=a(c,"P",{});var Te=i(A);te=s(Te,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Te.forEach(t),J=d(c),z=a(c,"UL",{});var D=i(z);j=a(D,"LI",{});var P=i(j);ne=s(P,"a single Tensor with "),Q=a(P,"CODE",{});var ye=i(Q);ie=s(ye,"input_ids"),ye.forEach(t),se=s(P," only and nothing else: "),O=a(P,"CODE",{});var be=i(O);he=s(be,"model(inputs_ids)"),be.forEach(t),P.forEach(t),le=d(D),q=a(D,"LI",{});var Y=i(q);ue=s(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),R=a(Y,"CODE",{});var Be=i(R);G=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),fe=s(Y," or "),I=a(Y,"CODE",{});var ke=i(I);me=s(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(t),Y.forEach(t),ge=d(D),x=a(D,"LI",{});var ve=i(x);_e=s(ve,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=a(ve,"CODE",{});var Fe=i(W);Z=s(Fe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Fe.forEach(t),ve.forEach(t),D.forEach(t)},m(c,k){u(c,p,k),e(p,F),u(c,f,k),u(c,g,k),e(g,T),e(T,v),e(g,_),e(g,E),e(E,de),u(c,K,k),u(c,M,k),e(M,X),e(M,S),e(S,ee),e(M,ce),e(M,L),e(L,pe),e(M,ae),u(c,U,k),u(c,A,k),e(A,te),u(c,J,k),u(c,z,k),e(z,j),e(j,ne),e(j,Q),e(Q,ie),e(j,se),e(j,O),e(O,he),e(z,le),e(z,q),e(q,ue),e(q,R),e(R,G),e(q,fe),e(q,I),e(I,me),e(z,ge),e(z,x),e(x,_e),e(x,W),e(W,Z)},d(c){c&&t(p),c&&t(f),c&&t(g),c&&t(K),c&&t(M),c&&t(U),c&&t(A),c&&t(J),c&&t(z)}}}function v_(H){let p,F,f,g,T;return{c(){p=r("p"),F=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r("code"),g=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=a(v,"P",{});var _=i(p);F=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(_,"CODE",{});var E=i(f);g=s(E,"Module"),E.forEach(t),T=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){u(v,p,_),e(p,F),e(p,f),e(f,g),e(p,T)},d(v){v&&t(p)}}}function T_(H){let p,F,f,g,T,v,_,E,de,K,M,X,S,ee,ce,L,pe,ae,U,A,te,J,z,j,ne,Q,ie,se,O,he,le,q,ue,R,G,fe,I,me,ge,x,_e,W,Z,c,k,V,$e,Ce,N,we,oe,Te,D,P,ye,be,Y,Be,ke,ve,Fe,As,Cl,wl,Ns,$l,yl,Bl,Er,Fl,El,Yo,ci,Bt,oo,Mr,Uo,Ml,zr,zl,pi,ze,Vo,ql,ct,jl,Ds,xl,Pl,Is,Al,Nl,Ss,Dl,Il,Sl,ut,Ko,Ll,qr,Ol,Rl,Jo,Ls,Wl,jr,Hl,Ql,Os,Yl,xr,Ul,Vl,no,Go,Kl,Zo,Jl,Pr,Gl,Zl,Xl,ot,Xo,ed,Ar,td,od,en,nd,Ft,sd,Nr,rd,ad,Dr,id,ld,dd,Ir,hi,Et,so,Sr,tn,cd,Lr,pd,ui,Ge,on,hd,nn,ud,Or,fd,md,gd,ro,Rs,_d,vd,Ws,Td,kd,bd,sn,Cd,Hs,wd,$d,fi,Mt,ao,Rr,rn,yd,Wr,Bd,mi,pt,an,Fd,ln,Ed,dn,Md,zd,qd,Se,cn,jd,zt,xd,Qs,Pd,Ad,Hr,Nd,Dd,Id,io,Sd,Qr,Ld,Od,pn,gi,qt,lo,Yr,hn,Rd,Ur,Wd,_i,ht,un,Hd,jt,Qd,Vr,Yd,Ud,fn,Vd,Kd,Jd,Le,mn,Gd,xt,Zd,Ys,Xd,ec,Kr,tc,oc,nc,co,sc,Jr,rc,ac,gn,vi,Pt,po,Gr,_n,ic,Zr,lc,Ti,Ze,vn,dc,Xr,cc,pc,Tn,hc,kn,uc,fc,mc,Ee,bn,gc,At,_c,Us,vc,Tc,ea,kc,bc,Cc,ho,wc,ta,$c,yc,Cn,Bc,oa,Fc,Ec,wn,ki,Nt,uo,na,$n,Mc,sa,zc,bi,Xe,yn,qc,ra,jc,xc,Bn,Pc,Fn,Ac,Nc,Dc,Oe,En,Ic,Dt,Sc,Vs,Lc,Oc,aa,Rc,Wc,Hc,fo,Qc,ia,Yc,Uc,Mn,Ci,It,mo,la,zn,Vc,da,Kc,wi,et,qn,Jc,ca,Gc,Zc,jn,Xc,xn,ep,tp,op,Re,Pn,np,St,sp,Ks,rp,ap,pa,ip,lp,dp,go,cp,ha,pp,hp,An,$i,Lt,_o,ua,Nn,up,fa,fp,yi,tt,Dn,mp,Ot,gp,ma,_p,vp,ga,Tp,kp,bp,In,Cp,Sn,wp,$p,yp,We,Ln,Bp,Rt,Fp,Js,Ep,Mp,_a,zp,qp,jp,vo,xp,va,Pp,Ap,On,Bi,Wt,To,Ta,Rn,Np,ka,Dp,Fi,qe,Wn,Ip,ba,Sp,Lp,Hn,Op,Gs,Rp,Wp,Hp,Qn,Qp,Yn,Yp,Up,Vp,ko,Kp,He,Un,Jp,Ht,Gp,Zs,Zp,Xp,Ca,eh,th,oh,bo,nh,wa,sh,rh,Vn,Ei,Qt,Co,$a,Kn,ah,ya,ih,Mi,je,Jn,lh,Gn,dh,Ba,ch,ph,hh,Zn,uh,Xs,fh,mh,gh,Xn,_h,es,vh,Th,kh,wo,bh,Qe,ts,Ch,Yt,wh,er,$h,yh,Fa,Bh,Fh,Eh,$o,Mh,Ea,zh,qh,os,zi,Ut,yo,Ma,ns,jh,za,xh,qi,xe,ss,Ph,qa,Ah,Nh,rs,Dh,tr,Ih,Sh,Lh,as,Oh,is,Rh,Wh,Hh,Bo,Qh,Ye,ls,Yh,Vt,Uh,or,Vh,Kh,ja,Jh,Gh,Zh,Fo,Xh,xa,eu,tu,ds,ji,Kt,Eo,Pa,cs,ou,Aa,nu,xi,Pe,ps,su,Na,ru,au,hs,iu,nr,lu,du,cu,us,pu,fs,hu,uu,fu,Mo,mu,Ue,ms,gu,Jt,_u,sr,vu,Tu,Da,ku,bu,Cu,zo,wu,Ia,$u,yu,gs,Pi,Gt,qo,Sa,_s,Bu,La,Fu,Ai,Ae,vs,Eu,Oa,Mu,zu,Ts,qu,rr,ju,xu,Pu,ks,Au,bs,Nu,Du,Iu,jo,Su,Ve,Cs,Lu,Zt,Ou,ar,Ru,Wu,Ra,Hu,Qu,Yu,xo,Uu,Wa,Vu,Ku,ws,Ni,Xt,Po,Ha,$s,Ju,Qa,Gu,Di,Ne,ys,Zu,eo,Xu,Ya,ef,tf,Ua,of,nf,sf,Bs,rf,ir,af,lf,df,Fs,cf,Es,pf,hf,uf,Ao,ff,Ke,Ms,mf,to,gf,lr,_f,vf,Va,Tf,kf,bf,No,Cf,Ka,wf,$f,zs,Ii;return v=new De({}),ee=new De({}),V=new De({}),Te=new re({props:{name:"class transformers.ConvBertConfig",anchor:"transformers.ConvBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"is_encoder_decoder",val:" = False"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"embedding_size",val:" = 768"},{name:"head_ratio",val:" = 2"},{name:"conv_kernel_size",val:" = 9"},{name:"num_groups",val:" = 1"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/configuration_convbert.py#L31",parametersDescription:[{anchor:"transformers.ConvBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertModel">TFConvBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.ConvBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.ConvBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.ConvBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.ConvBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.ConvBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.ConvBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.ConvBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.ConvBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.ConvBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertModel">TFConvBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.ConvBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.ConvBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.ConvBertConfig.head_ratio",description:`<strong>head_ratio</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Ratio gamma to reduce the number of attention heads.`,name:"head_ratio"},{anchor:"transformers.ConvBertConfig.num_groups",description:`<strong>num_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of groups for grouped linear layers for ConvBert model`,name:"num_groups"},{anchor:"transformers.ConvBertConfig.conv_kernel_size",description:`<strong>conv_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 9) &#x2014; The size of the convolutional kernel.`,name:"conv_kernel_size"},{anchor:"transformers.ConvBertConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),Yo=new Ie({props:{code:`from transformers import ConvBertModel, ConvBertConfig # Initializing a ConvBERT convbert-base-uncased style configuration configuration = ConvBertConfig() # Initializing a model from the convbert-base-uncased style configuration model = ConvBertModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertModel, ConvBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ConvBERT convbert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ConvBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the convbert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Uo=new De({}),Vo=new re({props:{name:"class transformers.ConvBertTokenizer",anchor:"transformers.ConvBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/tokenization_convbert.py#L46"}}),Ko=new re({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L247",parametersDescription:[{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Go=new re({props:{name:"get_special_tokens_mask",anchor:"transformers.BertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L272",parametersDescription:[{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Xo=new re({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L300",parametersDescription:[{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),en=new Ie({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),tn=new De({}),on=new re({props:{name:"class transformers.ConvBertTokenizerFast",anchor:"transformers.ConvBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/tokenization_convbert_fast.py#L47"}}),rn=new De({}),an=new re({props:{name:"class transformers.ConvBertModel",anchor:"transformers.ConvBertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L766",parametersDescription:[{anchor:"transformers.ConvBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),cn=new re({props:{name:"forward",anchor:"transformers.ConvBertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L793",parametersDescription:[{anchor:"transformers.ConvBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConvBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:`,name:"attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),io=new Me({props:{$$slots:{default:[t_]},$$scope:{ctx:H}}}),pn=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertModel import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertModel.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertModel.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),hn=new De({}),un=new re({props:{name:"class transformers.ConvBertForMaskedLM",anchor:"transformers.ConvBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L880",parametersDescription:[{anchor:"transformers.ConvBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mn=new re({props:{name:"forward",anchor:"transformers.ConvBertForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L897",parametersDescription:[{anchor:"transformers.ConvBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConvBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:`,name:"attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),co=new Me({props:{$$slots:{default:[o_]},$$scope:{ctx:H}}}),gn=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertForMaskedLM import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertForMaskedLM.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),_n=new De({}),vn=new re({props:{name:"class transformers.ConvBertForSequenceClassification",anchor:"transformers.ConvBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L989",parametersDescription:[{anchor:"transformers.ConvBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bn=new re({props:{name:"forward",anchor:"transformers.ConvBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1000",parametersDescription:[{anchor:"transformers.ConvBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConvBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:`,name:"attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ho=new Me({props:{$$slots:{default:[n_]},$$scope:{ctx:H}}}),Cn=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertForSequenceClassification import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertForSequenceClassification.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),wn=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertForSequenceClassification import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertForSequenceClassification.from_pretrained('YituTech/conv-bert-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$n=new De({}),yn=new re({props:{name:"class transformers.ConvBertForMultipleChoice",anchor:"transformers.ConvBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1084",parametersDescription:[{anchor:"transformers.ConvBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),En=new re({props:{name:"forward",anchor:"transformers.ConvBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1095",parametersDescription:[{anchor:"transformers.ConvBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConvBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:`,name:"attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),fo=new Me({props:{$$slots:{default:[s_]},$$scope:{ctx:H}}}),Mn=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertForMultipleChoice import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertForMultipleChoice.from_pretrained('YituTech/conv-bert-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),zn=new De({}),qn=new re({props:{name:"class transformers.ConvBertForTokenClassification",anchor:"transformers.ConvBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1177",parametersDescription:[{anchor:"transformers.ConvBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Pn=new re({props:{name:"forward",anchor:"transformers.ConvBertForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1192",parametersDescription:[{anchor:"transformers.ConvBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConvBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:`,name:"attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),go=new Me({props:{$$slots:{default:[r_]},$$scope:{ctx:H}}}),An=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertForTokenClassification import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertForTokenClassification.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Nn=new De({}),Dn=new re({props:{name:"class transformers.ConvBertForQuestionAnswering",anchor:"transformers.ConvBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1268",parametersDescription:[{anchor:"transformers.ConvBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ln=new re({props:{name:"forward",anchor:"transformers.ConvBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_convbert.py#L1279",parametersDescription:[{anchor:"transformers.ConvBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConvBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:`,name:"attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vo=new Me({props:{$$slots:{default:[a_]},$$scope:{ctx:H}}}),On=new Ie({props:{code:`from transformers import ConvBertTokenizer, ConvBertForQuestionAnswering import torch tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = ConvBertForQuestionAnswering.from_pretrained('YituTech/conv-bert-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Rn=new De({}),Wn=new re({props:{name:"class transformers.TFConvBertModel",anchor:"transformers.TFConvBertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L747",parametersDescription:[{anchor:"transformers.TFConvBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ko=new Me({props:{$$slots:{default:[i_]},$$scope:{ctx:H}}}),Un=new re({props:{name:"call",anchor:"transformers.TFConvBertModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L753",parametersDescription:[{anchor:"transformers.TFConvBertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFConvBertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFConvBertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFConvBertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFConvBertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFConvBertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFConvBertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFConvBertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFConvBertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFConvBertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),bo=new Me({props:{$$slots:{default:[l_]},$$scope:{ctx:H}}}),Vn=new Ie({props:{code:`from transformers import ConvBertTokenizer, TFConvBertModel import tensorflow as tf tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertModel.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Kn=new De({}),Jn=new re({props:{name:"class transformers.TFConvBertForMaskedLM",anchor:"transformers.TFConvBertForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L864",parametersDescription:[{anchor:"transformers.TFConvBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),wo=new Me({props:{$$slots:{default:[d_]},$$scope:{ctx:H}}}),ts=new re({props:{name:"call",anchor:"transformers.TFConvBertForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L885",parametersDescription:[{anchor:"transformers.TFConvBertForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFConvBertForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFConvBertForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFConvBertForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFConvBertForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFConvBertForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFConvBertForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFConvBertForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFConvBertForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFConvBertForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFConvBertForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),$o=new Me({props:{$$slots:{default:[c_]},$$scope:{ctx:H}}}),os=new Ie({props:{code:`from transformers import ConvBertTokenizer, TFConvBertForMaskedLM import tensorflow as tf tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = TFConvBertForMaskedLM.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ns=new De({}),ss=new re({props:{name:"class transformers.TFConvBertForSequenceClassification",anchor:"transformers.TFConvBertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1001",parametersDescription:[{anchor:"transformers.TFConvBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bo=new Me({props:{$$slots:{default:[p_]},$$scope:{ctx:H}}}),ls=new re({props:{name:"call",anchor:"transformers.TFConvBertForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1008",parametersDescription:[{anchor:"transformers.TFConvBertForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFConvBertForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFConvBertForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFConvBertForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFConvBertForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFConvBertForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFConvBertForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFConvBertForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFConvBertForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFConvBertForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFConvBertForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Fo=new Me({props:{$$slots:{default:[h_]},$$scope:{ctx:H}}}),ds=new Ie({props:{code:`from transformers import ConvBertTokenizer, TFConvBertForSequenceClassification import tensorflow as tf tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = TFConvBertForSequenceClassification.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),cs=new De({}),ps=new re({props:{name:"class transformers.TFConvBertForMultipleChoice",anchor:"transformers.TFConvBertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1092",parametersDescription:[{anchor:"transformers.TFConvBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mo=new Me({props:{$$slots:{default:[u_]},$$scope:{ctx:H}}}),ms=new re({props:{name:"call",anchor:"transformers.TFConvBertForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1114",parametersDescription:[{anchor:"transformers.TFConvBertForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFConvBertForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFConvBertForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFConvBertForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFConvBertForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFConvBertForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFConvBertForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFConvBertForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFConvBertForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFConvBertForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFConvBertForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),zo=new Me({props:{$$slots:{default:[f_]},$$scope:{ctx:H}}}),gs=new Ie({props:{code:`from transformers import ConvBertTokenizer, TFConvBertForMultipleChoice import tensorflow as tf tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = TFConvBertForMultipleChoice.from_pretrained('YituTech/conv-bert-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),_s=new De({}),vs=new re({props:{name:"class transformers.TFConvBertForTokenClassification",anchor:"transformers.TFConvBertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1239",parametersDescription:[{anchor:"transformers.TFConvBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),jo=new Me({props:{$$slots:{default:[m_]},$$scope:{ctx:H}}}),Cs=new re({props:{name:"call",anchor:"transformers.TFConvBertForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1253",parametersDescription:[{anchor:"transformers.TFConvBertForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFConvBertForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFConvBertForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFConvBertForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFConvBertForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFConvBertForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFConvBertForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFConvBertForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFConvBertForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFConvBertForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFConvBertForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),xo=new Me({props:{$$slots:{default:[g_]},$$scope:{ctx:H}}}),ws=new Ie({props:{code:`from transformers import ConvBertTokenizer, TFConvBertForTokenClassification import tensorflow as tf tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = TFConvBertForTokenClassification.from_pretrained('YituTech/conv-bert-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$s=new De({}),ys=new re({props:{name:"class transformers.TFConvBertForQuestionAnswering",anchor:"transformers.TFConvBertForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1337",parametersDescription:[{anchor:"transformers.TFConvBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ao=new Me({props:{$$slots:{default:[__]},$$scope:{ctx:H}}}),Ms=new re({props:{name:"call",anchor:"transformers.TFConvBertForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/convbert/modeling_tf_convbert.py#L1347",parametersDescription:[{anchor:"transformers.TFConvBertForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFConvBertForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),No=new Me({props:{$$slots:{default:[v_]},$$scope:{ctx:H}}}),zs=new Ie({props:{code:`from transformers import ConvBertTokenizer, TFConvBertForQuestionAnswering import tensorflow as tf tokenizer = ConvBertTokenizer.from_pretrained('YituTech/conv-bert-base') model = TFConvBertForQuestionAnswering.from_pretrained('YituTech/conv-bert-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;YituTech/conv-bert-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){p=r("meta"),F=l(),f=r("h1"),g=r("a"),T=r("span"),b(v.$$.fragment),_=l(),E=r("span"),de=n("ConvBERT"),K=l(),M=r("h2"),X=r("a"),S=r("span"),b(ee.$$.fragment),ce=l(),L=r("span"),pe=n("Overview"),ae=l(),U=r("p"),A=n("The ConvBERT model was proposed in "),te=r("a"),J=n("ConvBERT: Improving BERT with Span-based Dynamic Convolution"),z=n(` by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.`),j=l(),ne=r("p"),Q=n("The abstract from the paper is the following:"),ie=l(),se=r("p"),O=r("em"),he=n(`Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost. Code and pre-trained models will be released.`),le=l(),q=r("p"),ue=n("ConvBERT training tips are similar to those of BERT."),R=l(),G=r("p"),fe=n("This model was contributed by "),I=r("a"),me=n("abhishek"),ge=n(`. The original implementation can be found here: `),x=r("a"),_e=n("https://github.com/yitu-opensource/ConvBert"),W=l(),Z=r("h2"),c=r("a"),k=r("span"),b(V.$$.fragment),$e=l(),Ce=r("span"),N=n("ConvBertConfig"),we=l(),oe=r("div"),b(Te.$$.fragment),D=l(),P=r("p"),ye=n("This is the configuration class to store the configuration of a "),be=r("a"),Y=n("ConvBertModel"),Be=n(`. It is used to instantiate an ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvBERT `),ke=r("a"),ve=n("conv-bert-base"),Fe=n(` architecture. Configuration objects inherit from `),As=r("a"),Cl=n("PretrainedConfig"),wl=n(` and can be used to control the model outputs. Read the documentation from `),Ns=r("a"),$l=n("PretrainedConfig"),yl=n(" for more information."),Bl=l(),Er=r("p"),Fl=n("Example:"),El=l(),b(Yo.$$.fragment),ci=l(),Bt=r("h2"),oo=r("a"),Mr=r("span"),b(Uo.$$.fragment),Ml=l(),zr=r("span"),zl=n("ConvBertTokenizer"),pi=l(),ze=r("div"),b(Vo.$$.fragment),ql=l(),ct=r("p"),jl=n("Construct a ConvBERT tokenizer. "),Ds=r("a"),xl=n("ConvBertTokenizer"),Pl=n(` is identical to `),Is=r("a"),Al=n("BertTokenizer"),Nl=n(` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass `),Ss=r("a"),Dl=n("BertTokenizer"),Il=n(" for usage examples and documentation concerning parameters."),Sl=l(),ut=r("div"),b(Ko.$$.fragment),Ll=l(),qr=r("p"),Ol=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),Rl=l(),Jo=r("ul"),Ls=r("li"),Wl=n("single sequence: "),jr=r("code"),Hl=n("[CLS] X [SEP]"),Ql=l(),Os=r("li"),Yl=n("pair of sequences: "),xr=r("code"),Ul=n("[CLS] A [SEP] B [SEP]"),Vl=l(),no=r("div"),b(Go.$$.fragment),Kl=l(),Zo=r("p"),Jl=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Pr=r("code"),Gl=n("prepare_for_model"),Zl=n(" method."),Xl=l(),ot=r("div"),b(Xo.$$.fragment),ed=l(),Ar=r("p"),td=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),od=l(),b(en.$$.fragment),nd=l(),Ft=r("p"),sd=n("If "),Nr=r("code"),rd=n("token_ids_1"),ad=n(" is "),Dr=r("code"),id=n("None"),ld=n(", this method only returns the first portion of the mask (0s)."),dd=l(),Ir=r("div"),hi=l(),Et=r("h2"),so=r("a"),Sr=r("span"),b(tn.$$.fragment),cd=l(),Lr=r("span"),pd=n("ConvBertTokenizerFast"),ui=l(),Ge=r("div"),b(on.$$.fragment),hd=l(),nn=r("p"),ud=n("Construct a \u201Cfast\u201D ConvBERT tokenizer (backed by HuggingFace\u2019s "),Or=r("em"),fd=n("tokenizers"),md=n(" library)."),gd=l(),ro=r("p"),Rs=r("a"),_d=n("ConvBertTokenizerFast"),vd=n(" is identical to "),Ws=r("a"),Td=n("BertTokenizerFast"),kd=n(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),bd=l(),sn=r("p"),Cd=n("Refer to superclass "),Hs=r("a"),wd=n("BertTokenizerFast"),$d=n(` for usage examples and documentation concerning parameters.`),fi=l(),Mt=r("h2"),ao=r("a"),Rr=r("span"),b(rn.$$.fragment),yd=l(),Wr=r("span"),Bd=n("ConvBertModel"),mi=l(),pt=r("div"),b(an.$$.fragment),Fd=l(),ln=r("p"),Ed=n(`The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),dn=r("a"),Md=n("torch.nn.Module"),zd=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qd=l(),Se=r("div"),b(cn.$$.fragment),jd=l(),zt=r("p"),xd=n("The "),Qs=r("a"),Pd=n("ConvBertModel"),Ad=n(" forward method, overrides the "),Hr=r("code"),Nd=n("__call__"),Dd=n(" special method."),Id=l(),b(io.$$.fragment),Sd=l(),Qr=r("p"),Ld=n("Example:"),Od=l(),b(pn.$$.fragment),gi=l(),qt=r("h2"),lo=r("a"),Yr=r("span"),b(hn.$$.fragment),Rd=l(),Ur=r("span"),Wd=n("ConvBertForMaskedLM"),_i=l(),ht=r("div"),b(un.$$.fragment),Hd=l(),jt=r("p"),Qd=n("ConvBERT Model with a "),Vr=r("code"),Yd=n("language modeling"),Ud=n(` head on top. This model is a PyTorch `),fn=r("a"),Vd=n("torch.nn.Module"),Kd=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Jd=l(),Le=r("div"),b(mn.$$.fragment),Gd=l(),xt=r("p"),Zd=n("The "),Ys=r("a"),Xd=n("ConvBertForMaskedLM"),ec=n(" forward method, overrides the "),Kr=r("code"),tc=n("__call__"),oc=n(" special method."),nc=l(),b(co.$$.fragment),sc=l(),Jr=r("p"),rc=n("Example:"),ac=l(),b(gn.$$.fragment),vi=l(),Pt=r("h2"),po=r("a"),Gr=r("span"),b(_n.$$.fragment),ic=l(),Zr=r("span"),lc=n("ConvBertForSequenceClassification"),Ti=l(),Ze=r("div"),b(vn.$$.fragment),dc=l(),Xr=r("p"),cc=n(`ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),pc=l(),Tn=r("p"),hc=n("This model is a PyTorch "),kn=r("a"),uc=n("torch.nn.Module"),fc=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mc=l(),Ee=r("div"),b(bn.$$.fragment),gc=l(),At=r("p"),_c=n("The "),Us=r("a"),vc=n("ConvBertForSequenceClassification"),Tc=n(" forward method, overrides the "),ea=r("code"),kc=n("__call__"),bc=n(" special method."),Cc=l(),b(ho.$$.fragment),wc=l(),ta=r("p"),$c=n("Example of single-label classification:"),yc=l(),b(Cn.$$.fragment),Bc=l(),oa=r("p"),Fc=n("Example of multi-label classification:"),Ec=l(),b(wn.$$.fragment),ki=l(),Nt=r("h2"),uo=r("a"),na=r("span"),b($n.$$.fragment),Mc=l(),sa=r("span"),zc=n("ConvBertForMultipleChoice"),bi=l(),Xe=r("div"),b(yn.$$.fragment),qc=l(),ra=r("p"),jc=n(`ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),xc=l(),Bn=r("p"),Pc=n("This model is a PyTorch "),Fn=r("a"),Ac=n("torch.nn.Module"),Nc=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dc=l(),Oe=r("div"),b(En.$$.fragment),Ic=l(),Dt=r("p"),Sc=n("The "),Vs=r("a"),Lc=n("ConvBertForMultipleChoice"),Oc=n(" forward method, overrides the "),aa=r("code"),Rc=n("__call__"),Wc=n(" special method."),Hc=l(),b(fo.$$.fragment),Qc=l(),ia=r("p"),Yc=n("Example:"),Uc=l(),b(Mn.$$.fragment),Ci=l(),It=r("h2"),mo=r("a"),la=r("span"),b(zn.$$.fragment),Vc=l(),da=r("span"),Kc=n("ConvBertForTokenClassification"),wi=l(),et=r("div"),b(qn.$$.fragment),Jc=l(),ca=r("p"),Gc=n(`ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zc=l(),jn=r("p"),Xc=n("This model is a PyTorch "),xn=r("a"),ep=n("torch.nn.Module"),tp=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),op=l(),Re=r("div"),b(Pn.$$.fragment),np=l(),St=r("p"),sp=n("The "),Ks=r("a"),rp=n("ConvBertForTokenClassification"),ap=n(" forward method, overrides the "),pa=r("code"),ip=n("__call__"),lp=n(" special method."),dp=l(),b(go.$$.fragment),cp=l(),ha=r("p"),pp=n("Example:"),hp=l(),b(An.$$.fragment),$i=l(),Lt=r("h2"),_o=r("a"),ua=r("span"),b(Nn.$$.fragment),up=l(),fa=r("span"),fp=n("ConvBertForQuestionAnswering"),yi=l(),tt=r("div"),b(Dn.$$.fragment),mp=l(),Ot=r("p"),gp=n(`ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),ma=r("code"),_p=n("span start logits"),vp=n(" and "),ga=r("code"),Tp=n("span end logits"),kp=n(")."),bp=l(),In=r("p"),Cp=n("This model is a PyTorch "),Sn=r("a"),wp=n("torch.nn.Module"),$p=n(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yp=l(),We=r("div"),b(Ln.$$.fragment),Bp=l(),Rt=r("p"),Fp=n("The "),Js=r("a"),Ep=n("ConvBertForQuestionAnswering"),Mp=n(" forward method, overrides the "),_a=r("code"),zp=n("__call__"),qp=n(" special method."),jp=l(),b(vo.$$.fragment),xp=l(),va=r("p"),Pp=n("Example:"),Ap=l(),b(On.$$.fragment),Bi=l(),Wt=r("h2"),To=r("a"),Ta=r("span"),b(Rn.$$.fragment),Np=l(),ka=r("span"),Dp=n("TFConvBertModel"),Fi=l(),qe=r("div"),b(Wn.$$.fragment),Ip=l(),ba=r("p"),Sp=n("The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top."),Lp=l(),Hn=r("p"),Op=n("This model inherits from "),Gs=r("a"),Rp=n("TFPreTrainedModel"),Wp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hp=l(),Qn=r("p"),Qp=n("This model is also a "),Yn=r("a"),Yp=n("tf.keras.Model"),Up=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vp=l(),b(ko.$$.fragment),Kp=l(),He=r("div"),b(Un.$$.fragment),Jp=l(),Ht=r("p"),Gp=n("The "),Zs=r("a"),Zp=n("TFConvBertModel"),Xp=n(" forward method, overrides the "),Ca=r("code"),eh=n("__call__"),th=n(" special method."),oh=l(),b(bo.$$.fragment),nh=l(),wa=r("p"),sh=n("Example:"),rh=l(),b(Vn.$$.fragment),Ei=l(),Qt=r("h2"),Co=r("a"),$a=r("span"),b(Kn.$$.fragment),ah=l(),ya=r("span"),ih=n("TFConvBertForMaskedLM"),Mi=l(),je=r("div"),b(Jn.$$.fragment),lh=l(),Gn=r("p"),dh=n("ConvBERT Model with a "),Ba=r("code"),ch=n("language modeling"),ph=n(" head on top."),hh=l(),Zn=r("p"),uh=n("This model inherits from "),Xs=r("a"),fh=n("TFPreTrainedModel"),mh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gh=l(),Xn=r("p"),_h=n("This model is also a "),es=r("a"),vh=n("tf.keras.Model"),Th=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),kh=l(),b(wo.$$.fragment),bh=l(),Qe=r("div"),b(ts.$$.fragment),Ch=l(),Yt=r("p"),wh=n("The "),er=r("a"),$h=n("TFConvBertForMaskedLM"),yh=n(" forward method, overrides the "),Fa=r("code"),Bh=n("__call__"),Fh=n(" special method."),Eh=l(),b($o.$$.fragment),Mh=l(),Ea=r("p"),zh=n("Example:"),qh=l(),b(os.$$.fragment),zi=l(),Ut=r("h2"),yo=r("a"),Ma=r("span"),b(ns.$$.fragment),jh=l(),za=r("span"),xh=n("TFConvBertForSequenceClassification"),qi=l(),xe=r("div"),b(ss.$$.fragment),Ph=l(),qa=r("p"),Ah=n("ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks."),Nh=l(),rs=r("p"),Dh=n("This model inherits from "),tr=r("a"),Ih=n("TFPreTrainedModel"),Sh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lh=l(),as=r("p"),Oh=n("This model is also a "),is=r("a"),Rh=n("tf.keras.Model"),Wh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hh=l(),b(Bo.$$.fragment),Qh=l(),Ye=r("div"),b(ls.$$.fragment),Yh=l(),Vt=r("p"),Uh=n("The "),or=r("a"),Vh=n("TFConvBertForSequenceClassification"),Kh=n(" forward method, overrides the "),ja=r("code"),Jh=n("__call__"),Gh=n(" special method."),Zh=l(),b(Fo.$$.fragment),Xh=l(),xa=r("p"),eu=n("Example:"),tu=l(),b(ds.$$.fragment),ji=l(),Kt=r("h2"),Eo=r("a"),Pa=r("span"),b(cs.$$.fragment),ou=l(),Aa=r("span"),nu=n("TFConvBertForMultipleChoice"),xi=l(),Pe=r("div"),b(ps.$$.fragment),su=l(),Na=r("p"),ru=n(`ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),au=l(),hs=r("p"),iu=n("This model inherits from "),nr=r("a"),lu=n("TFPreTrainedModel"),du=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cu=l(),us=r("p"),pu=n("This model is also a "),fs=r("a"),hu=n("tf.keras.Model"),uu=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fu=l(),b(Mo.$$.fragment),mu=l(),Ue=r("div"),b(ms.$$.fragment),gu=l(),Jt=r("p"),_u=n("The "),sr=r("a"),vu=n("TFConvBertForMultipleChoice"),Tu=n(" forward method, overrides the "),Da=r("code"),ku=n("__call__"),bu=n(" special method."),Cu=l(),b(zo.$$.fragment),wu=l(),Ia=r("p"),$u=n("Example:"),yu=l(),b(gs.$$.fragment),Pi=l(),Gt=r("h2"),qo=r("a"),Sa=r("span"),b(_s.$$.fragment),Bu=l(),La=r("span"),Fu=n("TFConvBertForTokenClassification"),Ai=l(),Ae=r("div"),b(vs.$$.fragment),Eu=l(),Oa=r("p"),Mu=n(`ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),zu=l(),Ts=r("p"),qu=n("This model inherits from "),rr=r("a"),ju=n("TFPreTrainedModel"),xu=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pu=l(),ks=r("p"),Au=n("This model is also a "),bs=r("a"),Nu=n("tf.keras.Model"),Du=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Iu=l(),b(jo.$$.fragment),Su=l(),Ve=r("div"),b(Cs.$$.fragment),Lu=l(),Zt=r("p"),Ou=n("The "),ar=r("a"),Ru=n("TFConvBertForTokenClassification"),Wu=n(" forward method, overrides the "),Ra=r("code"),Hu=n("__call__"),Qu=n(" special method."),Yu=l(),b(xo.$$.fragment),Uu=l(),Wa=r("p"),Vu=n("Example:"),Ku=l(),b(ws.$$.fragment),Ni=l(),Xt=r("h2"),Po=r("a"),Ha=r("span"),b($s.$$.fragment),Ju=l(),Qa=r("span"),Gu=n("TFConvBertForQuestionAnswering"),Di=l(),Ne=r("div"),b(ys.$$.fragment),Zu=l(),eo=r("p"),Xu=n(`ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Ya=r("code"),ef=n("span start logits"),tf=n(" and "),Ua=r("code"),of=n("span end logits"),nf=n(")."),sf=l(),Bs=r("p"),rf=n("This model inherits from "),ir=r("a"),af=n("TFPreTrainedModel"),lf=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),df=l(),Fs=r("p"),cf=n("This model is also a "),Es=r("a"),pf=n("tf.keras.Model"),hf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),uf=l(),b(Ao.$$.fragment),ff=l(),Ke=r("div"),b(Ms.$$.fragment),mf=l(),to=r("p"),gf=n("The "),lr=r("a"),_f=n("TFConvBertForQuestionAnswering"),vf=n(" forward method, overrides the "),Va=r("code"),Tf=n("__call__"),kf=n(" special method."),bf=l(),b(No.$$.fragment),Cf=l(),Ka=r("p"),wf=n("Example:"),$f=l(),b(zs.$$.fragment),this.h()},l(o){const m=e_('[data-svelte="svelte-1phssyn"]',document.head);p=a(m,"META",{name:!0,content:!0}),m.forEach(t),F=d(o),f=a(o,"H1",{class:!0});var qs=i(f);g=a(qs,"A",{id:!0,class:!0,href:!0});var Ja=i(g);T=a(Ja,"SPAN",{});var Ga=i(T);C(v.$$.fragment,Ga),Ga.forEach(t),Ja.forEach(t),_=d(qs),E=a(qs,"SPAN",{});var Za=i(E);de=s(Za,"ConvBERT"),Za.forEach(t),qs.forEach(t),K=d(o),M=a(o,"H2",{class:!0});var js=i(M);X=a(js,"A",{id:!0,class:!0,href:!0});var Xa=i(X);S=a(Xa,"SPAN",{});var ei=i(S);C(ee.$$.fragment,ei),ei.forEach(t),Xa.forEach(t),ce=d(js),L=a(js,"SPAN",{});var ti=i(L);pe=s(ti,"Overview"),ti.forEach(t),js.forEach(t),ae=d(o),U=a(o,"P",{});var xs=i(U);A=s(xs,"The ConvBERT model was proposed in "),te=a(xs,"A",{href:!0,rel:!0});var oi=i(te);J=s(oi,"ConvBERT: Improving BERT with Span-based Dynamic Convolution"),oi.forEach(t),z=s(xs,` by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.`),xs.forEach(t),j=d(o),ne=a(o,"P",{});var ni=i(ne);Q=s(ni,"The abstract from the paper is the following:"),ni.forEach(t),ie=d(o),se=a(o,"P",{});var si=i(se);O=a(si,"EM",{});var ri=i(O);he=s(ri,`Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost. Code and pre-trained models will be released.`),ri.forEach(t),si.forEach(t),le=d(o),q=a(o,"P",{});var ai=i(q);ue=s(ai,"ConvBERT training tips are similar to those of BERT."),ai.forEach(t),R=d(o),G=a(o,"P",{});var Do=i(G);fe=s(Do,"This model was contributed by "),I=a(Do,"A",{href:!0,rel:!0});var ii=i(I);me=s(ii,"abhishek"),ii.forEach(t),ge=s(Do,`. The original implementation can be found here: `),x=a(Do,"A",{href:!0,rel:!0});var li=i(x);_e=s(li,"https://github.com/yitu-opensource/ConvBert"),li.forEach(t),Do.forEach(t),W=d(o),Z=a(o,"H2",{class:!0});var Ps=i(Z);c=a(Ps,"A",{id:!0,class:!0,href:!0});var Ff=i(c);k=a(Ff,"SPAN",{});var Ef=i(k);C(V.$$.fragment,Ef),Ef.forEach(t),Ff.forEach(t),$e=d(Ps),Ce=a(Ps,"SPAN",{});var Mf=i(Ce);N=s(Mf,"ConvBertConfig"),Mf.forEach(t),Ps.forEach(t),we=d(o),oe=a(o,"DIV",{class:!0});var Io=i(oe);C(Te.$$.fragment,Io),D=d(Io),P=a(Io,"P",{});var ft=i(P);ye=s(ft,"This is the configuration class to store the configuration of a "),be=a(ft,"A",{href:!0});var zf=i(be);Y=s(zf,"ConvBertModel"),zf.forEach(t),Be=s(ft,`. It is used to instantiate an ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvBERT `),ke=a(ft,"A",{href:!0,rel:!0});var qf=i(ke);ve=s(qf,"conv-bert-base"),qf.forEach(t),Fe=s(ft,` architecture. Configuration objects inherit from `),As=a(ft,"A",{href:!0});var jf=i(As);Cl=s(jf,"PretrainedConfig"),jf.forEach(t),wl=s(ft,` and can be used to control the model outputs. Read the documentation from `),Ns=a(ft,"A",{href:!0});var xf=i(Ns);$l=s(xf,"PretrainedConfig"),xf.forEach(t),yl=s(ft," for more information."),ft.forEach(t),Bl=d(Io),Er=a(Io,"P",{});var Pf=i(Er);Fl=s(Pf,"Example:"),Pf.forEach(t),El=d(Io),C(Yo.$$.fragment,Io),Io.forEach(t),ci=d(o),Bt=a(o,"H2",{class:!0});var Si=i(Bt);oo=a(Si,"A",{id:!0,class:!0,href:!0});var Af=i(oo);Mr=a(Af,"SPAN",{});var Nf=i(Mr);C(Uo.$$.fragment,Nf),Nf.forEach(t),Af.forEach(t),Ml=d(Si),zr=a(Si,"SPAN",{});var Df=i(zr);zl=s(Df,"ConvBertTokenizer"),Df.forEach(t),Si.forEach(t),pi=d(o),ze=a(o,"DIV",{class:!0});var nt=i(ze);C(Vo.$$.fragment,nt),ql=d(nt),ct=a(nt,"P",{});var So=i(ct);jl=s(So,"Construct a ConvBERT tokenizer. "),Ds=a(So,"A",{href:!0});var If=i(Ds);xl=s(If,"ConvBertTokenizer"),If.forEach(t),Pl=s(So,` is identical to `),Is=a(So,"A",{href:!0});var Sf=i(Is);Al=s(Sf,"BertTokenizer"),Sf.forEach(t),Nl=s(So,` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass `),Ss=a(So,"A",{href:!0});var Lf=i(Ss);Dl=s(Lf,"BertTokenizer"),Lf.forEach(t),Il=s(So," for usage examples and documentation concerning parameters."),So.forEach(t),Sl=d(nt),ut=a(nt,"DIV",{class:!0});var dr=i(ut);C(Ko.$$.fragment,dr),Ll=d(dr),qr=a(dr,"P",{});var Of=i(qr);Ol=s(Of,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),Of.forEach(t),Rl=d(dr),Jo=a(dr,"UL",{});var Li=i(Jo);Ls=a(Li,"LI",{});var yf=i(Ls);Wl=s(yf,"single sequence: "),jr=a(yf,"CODE",{});var Rf=i(jr);Hl=s(Rf,"[CLS] X [SEP]"),Rf.forEach(t),yf.forEach(t),Ql=d(Li),Os=a(Li,"LI",{});var Bf=i(Os);Yl=s(Bf,"pair of sequences: "),xr=a(Bf,"CODE",{});var Wf=i(xr);Ul=s(Wf,"[CLS] A [SEP] B [SEP]"),Wf.forEach(t),Bf.forEach(t),Li.forEach(t),dr.forEach(t),Vl=d(nt),no=a(nt,"DIV",{class:!0});var Oi=i(no);C(Go.$$.fragment,Oi),Kl=d(Oi),Zo=a(Oi,"P",{});var Ri=i(Zo);Jl=s(Ri,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Pr=a(Ri,"CODE",{});var Hf=i(Pr);Gl=s(Hf,"prepare_for_model"),Hf.forEach(t),Zl=s(Ri," method."),Ri.forEach(t),Oi.forEach(t),Xl=d(nt),ot=a(nt,"DIV",{class:!0});var Lo=i(ot);C(Xo.$$.fragment,Lo),ed=d(Lo),Ar=a(Lo,"P",{});var Qf=i(Ar);td=s(Qf,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),Qf.forEach(t),od=d(Lo),C(en.$$.fragment,Lo),nd=d(Lo),Ft=a(Lo,"P",{});var cr=i(Ft);sd=s(cr,"If "),Nr=a(cr,"CODE",{});var Yf=i(Nr);rd=s(Yf,"token_ids_1"),Yf.forEach(t),ad=s(cr," is "),Dr=a(cr,"CODE",{});var Uf=i(Dr);id=s(Uf,"None"),Uf.forEach(t),ld=s(cr,", this method only returns the first portion of the mask (0s)."),cr.forEach(t),Lo.forEach(t),dd=d(nt),Ir=a(nt,"DIV",{class:!0}),i(Ir).forEach(t),nt.forEach(t),hi=d(o),Et=a(o,"H2",{class:!0});var Wi=i(Et);so=a(Wi,"A",{id:!0,class:!0,href:!0});var Vf=i(so);Sr=a(Vf,"SPAN",{});var Kf=i(Sr);C(tn.$$.fragment,Kf),Kf.forEach(t),Vf.forEach(t),cd=d(Wi),Lr=a(Wi,"SPAN",{});var Jf=i(Lr);pd=s(Jf,"ConvBertTokenizerFast"),Jf.forEach(t),Wi.forEach(t),ui=d(o),Ge=a(o,"DIV",{class:!0});var Oo=i(Ge);C(on.$$.fragment,Oo),hd=d(Oo),nn=a(Oo,"P",{});var Hi=i(nn);ud=s(Hi,"Construct a \u201Cfast\u201D ConvBERT tokenizer (backed by HuggingFace\u2019s "),Or=a(Hi,"EM",{});var Gf=i(Or);fd=s(Gf,"tokenizers"),Gf.forEach(t),md=s(Hi," library)."),Hi.forEach(t),gd=d(Oo),ro=a(Oo,"P",{});var di=i(ro);Rs=a(di,"A",{href:!0});var Zf=i(Rs);_d=s(Zf,"ConvBertTokenizerFast"),Zf.forEach(t),vd=s(di," is identical to "),Ws=a(di,"A",{href:!0});var Xf=i(Ws);Td=s(Xf,"BertTokenizerFast"),Xf.forEach(t),kd=s(di,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),di.forEach(t),bd=d(Oo),sn=a(Oo,"P",{});var Qi=i(sn);Cd=s(Qi,"Refer to superclass "),Hs=a(Qi,"A",{href:!0});var em=i(Hs);wd=s(em,"BertTokenizerFast"),em.forEach(t),$d=s(Qi,` for usage examples and documentation concerning parameters.`),Qi.forEach(t),Oo.forEach(t),fi=d(o),Mt=a(o,"H2",{class:!0});var Yi=i(Mt);ao=a(Yi,"A",{id:!0,class:!0,href:!0});var tm=i(ao);Rr=a(tm,"SPAN",{});var om=i(Rr);C(rn.$$.fragment,om),om.forEach(t),tm.forEach(t),yd=d(Yi),Wr=a(Yi,"SPAN",{});var nm=i(Wr);Bd=s(nm,"ConvBertModel"),nm.forEach(t),Yi.forEach(t),mi=d(o),pt=a(o,"DIV",{class:!0});var pr=i(pt);C(an.$$.fragment,pr),Fd=d(pr),ln=a(pr,"P",{});var Ui=i(ln);Ed=s(Ui,`The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),dn=a(Ui,"A",{href:!0,rel:!0});var sm=i(dn);Md=s(sm,"torch.nn.Module"),sm.forEach(t),zd=s(Ui,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ui.forEach(t),qd=d(pr),Se=a(pr,"DIV",{class:!0});var mt=i(Se);C(cn.$$.fragment,mt),jd=d(mt),zt=a(mt,"P",{});var hr=i(zt);xd=s(hr,"The "),Qs=a(hr,"A",{href:!0});var rm=i(Qs);Pd=s(rm,"ConvBertModel"),rm.forEach(t),Ad=s(hr," forward method, overrides the "),Hr=a(hr,"CODE",{});var am=i(Hr);Nd=s(am,"__call__"),am.forEach(t),Dd=s(hr," special method."),hr.forEach(t),Id=d(mt),C(io.$$.fragment,mt),Sd=d(mt),Qr=a(mt,"P",{});var im=i(Qr);Ld=s(im,"Example:"),im.forEach(t),Od=d(mt),C(pn.$$.fragment,mt),mt.forEach(t),pr.forEach(t),gi=d(o),qt=a(o,"H2",{class:!0});var Vi=i(qt);lo=a(Vi,"A",{id:!0,class:!0,href:!0});var lm=i(lo);Yr=a(lm,"SPAN",{});var dm=i(Yr);C(hn.$$.fragment,dm),dm.forEach(t),lm.forEach(t),Rd=d(Vi),Ur=a(Vi,"SPAN",{});var cm=i(Ur);Wd=s(cm,"ConvBertForMaskedLM"),cm.forEach(t),Vi.forEach(t),_i=d(o),ht=a(o,"DIV",{class:!0});var ur=i(ht);C(un.$$.fragment,ur),Hd=d(ur),jt=a(ur,"P",{});var fr=i(jt);Qd=s(fr,"ConvBERT Model with a "),Vr=a(fr,"CODE",{});var pm=i(Vr);Yd=s(pm,"language modeling"),pm.forEach(t),Ud=s(fr,` head on top. This model is a PyTorch `),fn=a(fr,"A",{href:!0,rel:!0});var hm=i(fn);Vd=s(hm,"torch.nn.Module"),hm.forEach(t),Kd=s(fr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fr.forEach(t),Jd=d(ur),Le=a(ur,"DIV",{class:!0});var gt=i(Le);C(mn.$$.fragment,gt),Gd=d(gt),xt=a(gt,"P",{});var mr=i(xt);Zd=s(mr,"The "),Ys=a(mr,"A",{href:!0});var um=i(Ys);Xd=s(um,"ConvBertForMaskedLM"),um.forEach(t),ec=s(mr," forward method, overrides the "),Kr=a(mr,"CODE",{});var fm=i(Kr);tc=s(fm,"__call__"),fm.forEach(t),oc=s(mr," special method."),mr.forEach(t),nc=d(gt),C(co.$$.fragment,gt),sc=d(gt),Jr=a(gt,"P",{});var mm=i(Jr);rc=s(mm,"Example:"),mm.forEach(t),ac=d(gt),C(gn.$$.fragment,gt),gt.forEach(t),ur.forEach(t),vi=d(o),Pt=a(o,"H2",{class:!0});var Ki=i(Pt);po=a(Ki,"A",{id:!0,class:!0,href:!0});var gm=i(po);Gr=a(gm,"SPAN",{});var _m=i(Gr);C(_n.$$.fragment,_m),_m.forEach(t),gm.forEach(t),ic=d(Ki),Zr=a(Ki,"SPAN",{});var vm=i(Zr);lc=s(vm,"ConvBertForSequenceClassification"),vm.forEach(t),Ki.forEach(t),Ti=d(o),Ze=a(o,"DIV",{class:!0});var Ro=i(Ze);C(vn.$$.fragment,Ro),dc=d(Ro),Xr=a(Ro,"P",{});var Tm=i(Xr);cc=s(Tm,`ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Tm.forEach(t),pc=d(Ro),Tn=a(Ro,"P",{});var Ji=i(Tn);hc=s(Ji,"This model is a PyTorch "),kn=a(Ji,"A",{href:!0,rel:!0});var km=i(kn);uc=s(km,"torch.nn.Module"),km.forEach(t),fc=s(Ji,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ji.forEach(t),mc=d(Ro),Ee=a(Ro,"DIV",{class:!0});var Je=i(Ee);C(bn.$$.fragment,Je),gc=d(Je),At=a(Je,"P",{});var gr=i(At);_c=s(gr,"The "),Us=a(gr,"A",{href:!0});var bm=i(Us);vc=s(bm,"ConvBertForSequenceClassification"),bm.forEach(t),Tc=s(gr," forward method, overrides the "),ea=a(gr,"CODE",{});var Cm=i(ea);kc=s(Cm,"__call__"),Cm.forEach(t),bc=s(gr," special method."),gr.forEach(t),Cc=d(Je),C(ho.$$.fragment,Je),wc=d(Je),ta=a(Je,"P",{});var wm=i(ta);$c=s(wm,"Example of single-label classification:"),wm.forEach(t),yc=d(Je),C(Cn.$$.fragment,Je),Bc=d(Je),oa=a(Je,"P",{});var $m=i(oa);Fc=s($m,"Example of multi-label classification:"),$m.forEach(t),Ec=d(Je),C(wn.$$.fragment,Je),Je.forEach(t),Ro.forEach(t),ki=d(o),Nt=a(o,"H2",{class:!0});var Gi=i(Nt);uo=a(Gi,"A",{id:!0,class:!0,href:!0});var ym=i(uo);na=a(ym,"SPAN",{});var Bm=i(na);C($n.$$.fragment,Bm),Bm.forEach(t),ym.forEach(t),Mc=d(Gi),sa=a(Gi,"SPAN",{});var Fm=i(sa);zc=s(Fm,"ConvBertForMultipleChoice"),Fm.forEach(t),Gi.forEach(t),bi=d(o),Xe=a(o,"DIV",{class:!0});var Wo=i(Xe);C(yn.$$.fragment,Wo),qc=d(Wo),ra=a(Wo,"P",{});var Em=i(ra);jc=s(Em,`ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Em.forEach(t),xc=d(Wo),Bn=a(Wo,"P",{});var Zi=i(Bn);Pc=s(Zi,"This model is a PyTorch "),Fn=a(Zi,"A",{href:!0,rel:!0});var Mm=i(Fn);Ac=s(Mm,"torch.nn.Module"),Mm.forEach(t),Nc=s(Zi,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zi.forEach(t),Dc=d(Wo),Oe=a(Wo,"DIV",{class:!0});var _t=i(Oe);C(En.$$.fragment,_t),Ic=d(_t),Dt=a(_t,"P",{});var _r=i(Dt);Sc=s(_r,"The "),Vs=a(_r,"A",{href:!0});var zm=i(Vs);Lc=s(zm,"ConvBertForMultipleChoice"),zm.forEach(t),Oc=s(_r," forward method, overrides the "),aa=a(_r,"CODE",{});var qm=i(aa);Rc=s(qm,"__call__"),qm.forEach(t),Wc=s(_r," special method."),_r.forEach(t),Hc=d(_t),C(fo.$$.fragment,_t),Qc=d(_t),ia=a(_t,"P",{});var jm=i(ia);Yc=s(jm,"Example:"),jm.forEach(t),Uc=d(_t),C(Mn.$$.fragment,_t),_t.forEach(t),Wo.forEach(t),Ci=d(o),It=a(o,"H2",{class:!0});var Xi=i(It);mo=a(Xi,"A",{id:!0,class:!0,href:!0});var xm=i(mo);la=a(xm,"SPAN",{});var Pm=i(la);C(zn.$$.fragment,Pm),Pm.forEach(t),xm.forEach(t),Vc=d(Xi),da=a(Xi,"SPAN",{});var Am=i(da);Kc=s(Am,"ConvBertForTokenClassification"),Am.forEach(t),Xi.forEach(t),wi=d(o),et=a(o,"DIV",{class:!0});var Ho=i(et);C(qn.$$.fragment,Ho),Jc=d(Ho),ca=a(Ho,"P",{});var Nm=i(ca);Gc=s(Nm,`ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Nm.forEach(t),Zc=d(Ho),jn=a(Ho,"P",{});var el=i(jn);Xc=s(el,"This model is a PyTorch "),xn=a(el,"A",{href:!0,rel:!0});var Dm=i(xn);ep=s(Dm,"torch.nn.Module"),Dm.forEach(t),tp=s(el,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),el.forEach(t),op=d(Ho),Re=a(Ho,"DIV",{class:!0});var vt=i(Re);C(Pn.$$.fragment,vt),np=d(vt),St=a(vt,"P",{});var vr=i(St);sp=s(vr,"The "),Ks=a(vr,"A",{href:!0});var Im=i(Ks);rp=s(Im,"ConvBertForTokenClassification"),Im.forEach(t),ap=s(vr," forward method, overrides the "),pa=a(vr,"CODE",{});var Sm=i(pa);ip=s(Sm,"__call__"),Sm.forEach(t),lp=s(vr," special method."),vr.forEach(t),dp=d(vt),C(go.$$.fragment,vt),cp=d(vt),ha=a(vt,"P",{});var Lm=i(ha);pp=s(Lm,"Example:"),Lm.forEach(t),hp=d(vt),C(An.$$.fragment,vt),vt.forEach(t),Ho.forEach(t),$i=d(o),Lt=a(o,"H2",{class:!0});var tl=i(Lt);_o=a(tl,"A",{id:!0,class:!0,href:!0});var Om=i(_o);ua=a(Om,"SPAN",{});var Rm=i(ua);C(Nn.$$.fragment,Rm),Rm.forEach(t),Om.forEach(t),up=d(tl),fa=a(tl,"SPAN",{});var Wm=i(fa);fp=s(Wm,"ConvBertForQuestionAnswering"),Wm.forEach(t),tl.forEach(t),yi=d(o),tt=a(o,"DIV",{class:!0});var Qo=i(tt);C(Dn.$$.fragment,Qo),mp=d(Qo),Ot=a(Qo,"P",{});var Tr=i(Ot);gp=s(Tr,`ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),ma=a(Tr,"CODE",{});var Hm=i(ma);_p=s(Hm,"span start logits"),Hm.forEach(t),vp=s(Tr," and "),ga=a(Tr,"CODE",{});var Qm=i(ga);Tp=s(Qm,"span end logits"),Qm.forEach(t),kp=s(Tr,")."),Tr.forEach(t),bp=d(Qo),In=a(Qo,"P",{});var ol=i(In);Cp=s(ol,"This model is a PyTorch "),Sn=a(ol,"A",{href:!0,rel:!0});var Ym=i(Sn);wp=s(Ym,"torch.nn.Module"),Ym.forEach(t),$p=s(ol,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ol.forEach(t),yp=d(Qo),We=a(Qo,"DIV",{class:!0});var Tt=i(We);C(Ln.$$.fragment,Tt),Bp=d(Tt),Rt=a(Tt,"P",{});var kr=i(Rt);Fp=s(kr,"The "),Js=a(kr,"A",{href:!0});var Um=i(Js);Ep=s(Um,"ConvBertForQuestionAnswering"),Um.forEach(t),Mp=s(kr," forward method, overrides the "),_a=a(kr,"CODE",{});var Vm=i(_a);zp=s(Vm,"__call__"),Vm.forEach(t),qp=s(kr," special method."),kr.forEach(t),jp=d(Tt),C(vo.$$.fragment,Tt),xp=d(Tt),va=a(Tt,"P",{});var Km=i(va);Pp=s(Km,"Example:"),Km.forEach(t),Ap=d(Tt),C(On.$$.fragment,Tt),Tt.forEach(t),Qo.forEach(t),Bi=d(o),Wt=a(o,"H2",{class:!0});var nl=i(Wt);To=a(nl,"A",{id:!0,class:!0,href:!0});var Jm=i(To);Ta=a(Jm,"SPAN",{});var Gm=i(Ta);C(Rn.$$.fragment,Gm),Gm.forEach(t),Jm.forEach(t),Np=d(nl),ka=a(nl,"SPAN",{});var Zm=i(ka);Dp=s(Zm,"TFConvBertModel"),Zm.forEach(t),nl.forEach(t),Fi=d(o),qe=a(o,"DIV",{class:!0});var st=i(qe);C(Wn.$$.fragment,st),Ip=d(st),ba=a(st,"P",{});var Xm=i(ba);Sp=s(Xm,"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top."),Xm.forEach(t),Lp=d(st),Hn=a(st,"P",{});var sl=i(Hn);Op=s(sl,"This model inherits from "),Gs=a(sl,"A",{href:!0});var eg=i(Gs);Rp=s(eg,"TFPreTrainedModel"),eg.forEach(t),Wp=s(sl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sl.forEach(t),Hp=d(st),Qn=a(st,"P",{});var rl=i(Qn);Qp=s(rl,"This model is also a "),Yn=a(rl,"A",{href:!0,rel:!0});var tg=i(Yn);Yp=s(tg,"tf.keras.Model"),tg.forEach(t),Up=s(rl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),rl.forEach(t),Vp=d(st),C(ko.$$.fragment,st),Kp=d(st),He=a(st,"DIV",{class:!0});var kt=i(He);C(Un.$$.fragment,kt),Jp=d(kt),Ht=a(kt,"P",{});var br=i(Ht);Gp=s(br,"The "),Zs=a(br,"A",{href:!0});var og=i(Zs);Zp=s(og,"TFConvBertModel"),og.forEach(t),Xp=s(br," forward method, overrides the "),Ca=a(br,"CODE",{});var ng=i(Ca);eh=s(ng,"__call__"),ng.forEach(t),th=s(br," special method."),br.forEach(t),oh=d(kt),C(bo.$$.fragment,kt),nh=d(kt),wa=a(kt,"P",{});var sg=i(wa);sh=s(sg,"Example:"),sg.forEach(t),rh=d(kt),C(Vn.$$.fragment,kt),kt.forEach(t),st.forEach(t),Ei=d(o),Qt=a(o,"H2",{class:!0});var al=i(Qt);Co=a(al,"A",{id:!0,class:!0,href:!0});var rg=i(Co);$a=a(rg,"SPAN",{});var ag=i($a);C(Kn.$$.fragment,ag),ag.forEach(t),rg.forEach(t),ah=d(al),ya=a(al,"SPAN",{});var ig=i(ya);ih=s(ig,"TFConvBertForMaskedLM"),ig.forEach(t),al.forEach(t),Mi=d(o),je=a(o,"DIV",{class:!0});var rt=i(je);C(Jn.$$.fragment,rt),lh=d(rt),Gn=a(rt,"P",{});var il=i(Gn);dh=s(il,"ConvBERT Model with a "),Ba=a(il,"CODE",{});var lg=i(Ba);ch=s(lg,"language modeling"),lg.forEach(t),ph=s(il," head on top."),il.forEach(t),hh=d(rt),Zn=a(rt,"P",{});var ll=i(Zn);uh=s(ll,"This model inherits from "),Xs=a(ll,"A",{href:!0});var dg=i(Xs);fh=s(dg,"TFPreTrainedModel"),dg.forEach(t),mh=s(ll,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ll.forEach(t),gh=d(rt),Xn=a(rt,"P",{});var dl=i(Xn);_h=s(dl,"This model is also a "),es=a(dl,"A",{href:!0,rel:!0});var cg=i(es);vh=s(cg,"tf.keras.Model"),cg.forEach(t),Th=s(dl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),dl.forEach(t),kh=d(rt),C(wo.$$.fragment,rt),bh=d(rt),Qe=a(rt,"DIV",{class:!0});var bt=i(Qe);C(ts.$$.fragment,bt),Ch=d(bt),Yt=a(bt,"P",{});var Cr=i(Yt);wh=s(Cr,"The "),er=a(Cr,"A",{href:!0});var pg=i(er);$h=s(pg,"TFConvBertForMaskedLM"),pg.forEach(t),yh=s(Cr," forward method, overrides the "),Fa=a(Cr,"CODE",{});var hg=i(Fa);Bh=s(hg,"__call__"),hg.forEach(t),Fh=s(Cr," special method."),Cr.forEach(t),Eh=d(bt),C($o.$$.fragment,bt),Mh=d(bt),Ea=a(bt,"P",{});var ug=i(Ea);zh=s(ug,"Example:"),ug.forEach(t),qh=d(bt),C(os.$$.fragment,bt),bt.forEach(t),rt.forEach(t),zi=d(o),Ut=a(o,"H2",{class:!0});var cl=i(Ut);yo=a(cl,"A",{id:!0,class:!0,href:!0});var fg=i(yo);Ma=a(fg,"SPAN",{});var mg=i(Ma);C(ns.$$.fragment,mg),mg.forEach(t),fg.forEach(t),jh=d(cl),za=a(cl,"SPAN",{});var gg=i(za);xh=s(gg,"TFConvBertForSequenceClassification"),gg.forEach(t),cl.forEach(t),qi=d(o),xe=a(o,"DIV",{class:!0});var at=i(xe);C(ss.$$.fragment,at),Ph=d(at),qa=a(at,"P",{});var _g=i(qa);Ah=s(_g,"ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks."),_g.forEach(t),Nh=d(at),rs=a(at,"P",{});var pl=i(rs);Dh=s(pl,"This model inherits from "),tr=a(pl,"A",{href:!0});var vg=i(tr);Ih=s(vg,"TFPreTrainedModel"),vg.forEach(t),Sh=s(pl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pl.forEach(t),Lh=d(at),as=a(at,"P",{});var hl=i(as);Oh=s(hl,"This model is also a "),is=a(hl,"A",{href:!0,rel:!0});var Tg=i(is);Rh=s(Tg,"tf.keras.Model"),Tg.forEach(t),Wh=s(hl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hl.forEach(t),Hh=d(at),C(Bo.$$.fragment,at),Qh=d(at),Ye=a(at,"DIV",{class:!0});var Ct=i(Ye);C(ls.$$.fragment,Ct),Yh=d(Ct),Vt=a(Ct,"P",{});var wr=i(Vt);Uh=s(wr,"The "),or=a(wr,"A",{href:!0});var kg=i(or);Vh=s(kg,"TFConvBertForSequenceClassification"),kg.forEach(t),Kh=s(wr," forward method, overrides the "),ja=a(wr,"CODE",{});var bg=i(ja);Jh=s(bg,"__call__"),bg.forEach(t),Gh=s(wr," special method."),wr.forEach(t),Zh=d(Ct),C(Fo.$$.fragment,Ct),Xh=d(Ct),xa=a(Ct,"P",{});var Cg=i(xa);eu=s(Cg,"Example:"),Cg.forEach(t),tu=d(Ct),C(ds.$$.fragment,Ct),Ct.forEach(t),at.forEach(t),ji=d(o),Kt=a(o,"H2",{class:!0});var ul=i(Kt);Eo=a(ul,"A",{id:!0,class:!0,href:!0});var wg=i(Eo);Pa=a(wg,"SPAN",{});var $g=i(Pa);C(cs.$$.fragment,$g),$g.forEach(t),wg.forEach(t),ou=d(ul),Aa=a(ul,"SPAN",{});var yg=i(Aa);nu=s(yg,"TFConvBertForMultipleChoice"),yg.forEach(t),ul.forEach(t),xi=d(o),Pe=a(o,"DIV",{class:!0});var it=i(Pe);C(ps.$$.fragment,it),su=d(it),Na=a(it,"P",{});var Bg=i(Na);ru=s(Bg,`ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Bg.forEach(t),au=d(it),hs=a(it,"P",{});var fl=i(hs);iu=s(fl,"This model inherits from "),nr=a(fl,"A",{href:!0});var Fg=i(nr);lu=s(Fg,"TFPreTrainedModel"),Fg.forEach(t),du=s(fl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fl.forEach(t),cu=d(it),us=a(it,"P",{});var ml=i(us);pu=s(ml,"This model is also a "),fs=a(ml,"A",{href:!0,rel:!0});var Eg=i(fs);hu=s(Eg,"tf.keras.Model"),Eg.forEach(t),uu=s(ml,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ml.forEach(t),fu=d(it),C(Mo.$$.fragment,it),mu=d(it),Ue=a(it,"DIV",{class:!0});var wt=i(Ue);C(ms.$$.fragment,wt),gu=d(wt),Jt=a(wt,"P",{});var $r=i(Jt);_u=s($r,"The "),sr=a($r,"A",{href:!0});var Mg=i(sr);vu=s(Mg,"TFConvBertForMultipleChoice"),Mg.forEach(t),Tu=s($r," forward method, overrides the "),Da=a($r,"CODE",{});var zg=i(Da);ku=s(zg,"__call__"),zg.forEach(t),bu=s($r," special method."),$r.forEach(t),Cu=d(wt),C(zo.$$.fragment,wt),wu=d(wt),Ia=a(wt,"P",{});var qg=i(Ia);$u=s(qg,"Example:"),qg.forEach(t),yu=d(wt),C(gs.$$.fragment,wt),wt.forEach(t),it.forEach(t),Pi=d(o),Gt=a(o,"H2",{class:!0});var gl=i(Gt);qo=a(gl,"A",{id:!0,class:!0,href:!0});var jg=i(qo);Sa=a(jg,"SPAN",{});var xg=i(Sa);C(_s.$$.fragment,xg),xg.forEach(t),jg.forEach(t),Bu=d(gl),La=a(gl,"SPAN",{});var Pg=i(La);Fu=s(Pg,"TFConvBertForTokenClassification"),Pg.forEach(t),gl.forEach(t),Ai=d(o),Ae=a(o,"DIV",{class:!0});var lt=i(Ae);C(vs.$$.fragment,lt),Eu=d(lt),Oa=a(lt,"P",{});var Ag=i(Oa);Mu=s(Ag,`ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ag.forEach(t),zu=d(lt),Ts=a(lt,"P",{});var _l=i(Ts);qu=s(_l,"This model inherits from "),rr=a(_l,"A",{href:!0});var Ng=i(rr);ju=s(Ng,"TFPreTrainedModel"),Ng.forEach(t),xu=s(_l,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_l.forEach(t),Pu=d(lt),ks=a(lt,"P",{});var vl=i(ks);Au=s(vl,"This model is also a "),bs=a(vl,"A",{href:!0,rel:!0});var Dg=i(bs);Nu=s(Dg,"tf.keras.Model"),Dg.forEach(t),Du=s(vl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vl.forEach(t),Iu=d(lt),C(jo.$$.fragment,lt),Su=d(lt),Ve=a(lt,"DIV",{class:!0});var $t=i(Ve);C(Cs.$$.fragment,$t),Lu=d($t),Zt=a($t,"P",{});var yr=i(Zt);Ou=s(yr,"The "),ar=a(yr,"A",{href:!0});var Ig=i(ar);Ru=s(Ig,"TFConvBertForTokenClassification"),Ig.forEach(t),Wu=s(yr," forward method, overrides the "),Ra=a(yr,"CODE",{});var Sg=i(Ra);Hu=s(Sg,"__call__"),Sg.forEach(t),Qu=s(yr," special method."),yr.forEach(t),Yu=d($t),C(xo.$$.fragment,$t),Uu=d($t),Wa=a($t,"P",{});var Lg=i(Wa);Vu=s(Lg,"Example:"),Lg.forEach(t),Ku=d($t),C(ws.$$.fragment,$t),$t.forEach(t),lt.forEach(t),Ni=d(o),Xt=a(o,"H2",{class:!0});var Tl=i(Xt);Po=a(Tl,"A",{id:!0,class:!0,href:!0});var Og=i(Po);Ha=a(Og,"SPAN",{});var Rg=i(Ha);C($s.$$.fragment,Rg),Rg.forEach(t),Og.forEach(t),Ju=d(Tl),Qa=a(Tl,"SPAN",{});var Wg=i(Qa);Gu=s(Wg,"TFConvBertForQuestionAnswering"),Wg.forEach(t),Tl.forEach(t),Di=d(o),Ne=a(o,"DIV",{class:!0});var dt=i(Ne);C(ys.$$.fragment,dt),Zu=d(dt),eo=a(dt,"P",{});var Br=i(eo);Xu=s(Br,`ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Ya=a(Br,"CODE",{});var Hg=i(Ya);ef=s(Hg,"span start logits"),Hg.forEach(t),tf=s(Br," and "),Ua=a(Br,"CODE",{});var Qg=i(Ua);of=s(Qg,"span end logits"),Qg.forEach(t),nf=s(Br,")."),Br.forEach(t),sf=d(dt),Bs=a(dt,"P",{});var kl=i(Bs);rf=s(kl,"This model inherits from "),ir=a(kl,"A",{href:!0});var Yg=i(ir);af=s(Yg,"TFPreTrainedModel"),Yg.forEach(t),lf=s(kl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kl.forEach(t),df=d(dt),Fs=a(dt,"P",{});var bl=i(Fs);cf=s(bl,"This model is also a "),Es=a(bl,"A",{href:!0,rel:!0});var Ug=i(Es);pf=s(Ug,"tf.keras.Model"),Ug.forEach(t),hf=s(bl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bl.forEach(t),uf=d(dt),C(Ao.$$.fragment,dt),ff=d(dt),Ke=a(dt,"DIV",{class:!0});var yt=i(Ke);C(Ms.$$.fragment,yt),mf=d(yt),to=a(yt,"P",{});var Fr=i(to);gf=s(Fr,"The "),lr=a(Fr,"A",{href:!0});var Vg=i(lr);_f=s(Vg,"TFConvBertForQuestionAnswering"),Vg.forEach(t),vf=s(Fr," forward method, overrides the "),Va=a(Fr,"CODE",{});var Kg=i(Va);Tf=s(Kg,"__call__"),Kg.forEach(t),kf=s(Fr," special method."),Fr.forEach(t),bf=d(yt),C(No.$$.fragment,yt),Cf=d(yt),Ka=a(yt,"P",{});var Jg=i(Ka);wf=s(Jg,"Example:"),Jg.forEach(t),$f=d(yt),C(zs.$$.fragment,yt),yt.forEach(t),dt.forEach(t),this.h()},h(){h(p,"name","hf:doc:metadata"),h(p,"content",JSON.stringify(k_)),h(g,"id","convbert"),h(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(g,"href","#convbert"),h(f,"class","relative group"),h(X,"id","overview"),h(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(X,"href","#overview"),h(M,"class","relative group"),h(te,"href","https://arxiv.org/abs/2008.02496"),h(te,"rel","nofollow"),h(I,"href","https://huggingface.co/abhishek"),h(I,"rel","nofollow"),h(x,"href","https://github.com/yitu-opensource/ConvBert"),h(x,"rel","nofollow"),h(c,"id","transformers.ConvBertConfig"),h(c,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(c,"href","#transformers.ConvBertConfig"),h(Z,"class","relative group"),h(be,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertModel"),h(ke,"href","https://huggingface.co/YituTech/conv-bert-base"),h(ke,"rel","nofollow"),h(As,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(Ns,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(oe,"class","docstring"),h(oo,"id","transformers.ConvBertTokenizer"),h(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(oo,"href","#transformers.ConvBertTokenizer"),h(Bt,"class","relative group"),h(Ds,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizer"),h(Is,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),h(Ss,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),h(ut,"class","docstring"),h(no,"class","docstring"),h(ot,"class","docstring"),h(Ir,"class","docstring"),h(ze,"class","docstring"),h(so,"id","transformers.ConvBertTokenizerFast"),h(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(so,"href","#transformers.ConvBertTokenizerFast"),h(Et,"class","relative group"),h(Rs,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertTokenizerFast"),h(Ws,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),h(Hs,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),h(Ge,"class","docstring"),h(ao,"id","transformers.ConvBertModel"),h(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ao,"href","#transformers.ConvBertModel"),h(Mt,"class","relative group"),h(dn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(dn,"rel","nofollow"),h(Qs,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertModel"),h(Se,"class","docstring"),h(pt,"class","docstring"),h(lo,"id","transformers.ConvBertForMaskedLM"),h(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(lo,"href","#transformers.ConvBertForMaskedLM"),h(qt,"class","relative group"),h(fn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(fn,"rel","nofollow"),h(Ys,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForMaskedLM"),h(Le,"class","docstring"),h(ht,"class","docstring"),h(po,"id","transformers.ConvBertForSequenceClassification"),h(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(po,"href","#transformers.ConvBertForSequenceClassification"),h(Pt,"class","relative group"),h(kn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(kn,"rel","nofollow"),h(Us,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForSequenceClassification"),h(Ee,"class","docstring"),h(Ze,"class","docstring"),h(uo,"id","transformers.ConvBertForMultipleChoice"),h(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(uo,"href","#transformers.ConvBertForMultipleChoice"),h(Nt,"class","relative group"),h(Fn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Fn,"rel","nofollow"),h(Vs,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForMultipleChoice"),h(Oe,"class","docstring"),h(Xe,"class","docstring"),h(mo,"id","transformers.ConvBertForTokenClassification"),h(mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(mo,"href","#transformers.ConvBertForTokenClassification"),h(It,"class","relative group"),h(xn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(xn,"rel","nofollow"),h(Ks,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForTokenClassification"),h(Re,"class","docstring"),h(et,"class","docstring"),h(_o,"id","transformers.ConvBertForQuestionAnswering"),h(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(_o,"href","#transformers.ConvBertForQuestionAnswering"),h(Lt,"class","relative group"),h(Sn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Sn,"rel","nofollow"),h(Js,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.ConvBertForQuestionAnswering"),h(We,"class","docstring"),h(tt,"class","docstring"),h(To,"id","transformers.TFConvBertModel"),h(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(To,"href","#transformers.TFConvBertModel"),h(Wt,"class","relative group"),h(Gs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Yn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Yn,"rel","nofollow"),h(Zs,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertModel"),h(He,"class","docstring"),h(qe,"class","docstring"),h(Co,"id","transformers.TFConvBertForMaskedLM"),h(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Co,"href","#transformers.TFConvBertForMaskedLM"),h(Qt,"class","relative group"),h(Xs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(es,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(es,"rel","nofollow"),h(er,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForMaskedLM"),h(Qe,"class","docstring"),h(je,"class","docstring"),h(yo,"id","transformers.TFConvBertForSequenceClassification"),h(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(yo,"href","#transformers.TFConvBertForSequenceClassification"),h(Ut,"class","relative group"),h(tr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(is,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(is,"rel","nofollow"),h(or,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForSequenceClassification"),h(Ye,"class","docstring"),h(xe,"class","docstring"),h(Eo,"id","transformers.TFConvBertForMultipleChoice"),h(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Eo,"href","#transformers.TFConvBertForMultipleChoice"),h(Kt,"class","relative group"),h(nr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(fs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(fs,"rel","nofollow"),h(sr,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForMultipleChoice"),h(Ue,"class","docstring"),h(Pe,"class","docstring"),h(qo,"id","transformers.TFConvBertForTokenClassification"),h(qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(qo,"href","#transformers.TFConvBertForTokenClassification"),h(Gt,"class","relative group"),h(rr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(bs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(bs,"rel","nofollow"),h(ar,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForTokenClassification"),h(Ve,"class","docstring"),h(Ae,"class","docstring"),h(Po,"id","transformers.TFConvBertForQuestionAnswering"),h(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Po,"href","#transformers.TFConvBertForQuestionAnswering"),h(Xt,"class","relative group"),h(ir,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Es,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Es,"rel","nofollow"),h(lr,"href","/docs/transformers/v4.15.0/en/model_doc/convbert#transformers.TFConvBertForQuestionAnswering"),h(Ke,"class","docstring"),h(Ne,"class","docstring")},m(o,m){e(document.head,p),u(o,F,m),u(o,f,m),e(f,g),e(g,T),w(v,T,null),e(f,_),e(f,E),e(E,de),u(o,K,m),u(o,M,m),e(M,X),e(X,S),w(ee,S,null),e(M,ce),e(M,L),e(L,pe),u(o,ae,m),u(o,U,m),e(U,A),e(U,te),e(te,J),e(U,z),u(o,j,m),u(o,ne,m),e(ne,Q),u(o,ie,m),u(o,se,m),e(se,O),e(O,he),u(o,le,m),u(o,q,m),e(q,ue),u(o,R,m),u(o,G,m),e(G,fe),e(G,I),e(I,me),e(G,ge),e(G,x),e(x,_e),u(o,W,m),u(o,Z,m),e(Z,c),e(c,k),w(V,k,null),e(Z,$e),e(Z,Ce),e(Ce,N),u(o,we,m),u(o,oe,m),w(Te,oe,null),e(oe,D),e(oe,P),e(P,ye),e(P,be),e(be,Y),e(P,Be),e(P,ke),e(ke,ve),e(P,Fe),e(P,As),e(As,Cl),e(P,wl),e(P,Ns),e(Ns,$l),e(P,yl),e(oe,Bl),e(oe,Er),e(Er,Fl),e(oe,El),w(Yo,oe,null),u(o,ci,m),u(o,Bt,m),e(Bt,oo),e(oo,Mr),w(Uo,Mr,null),e(Bt,Ml),e(Bt,zr),e(zr,zl),u(o,pi,m),u(o,ze,m),w(Vo,ze,null),e(ze,ql),e(ze,ct),e(ct,jl),e(ct,Ds),e(Ds,xl),e(ct,Pl),e(ct,Is),e(Is,Al),e(ct,Nl),e(ct,Ss),e(Ss,Dl),e(ct,Il),e(ze,Sl),e(ze,ut),w(Ko,ut,null),e(ut,Ll),e(ut,qr),e(qr,Ol),e(ut,Rl),e(ut,Jo),e(Jo,Ls),e(Ls,Wl),e(Ls,jr),e(jr,Hl),e(Jo,Ql),e(Jo,Os),e(Os,Yl),e(Os,xr),e(xr,Ul),e(ze,Vl),e(ze,no),w(Go,no,null),e(no,Kl),e(no,Zo),e(Zo,Jl),e(Zo,Pr),e(Pr,Gl),e(Zo,Zl),e(ze,Xl),e(ze,ot),w(Xo,ot,null),e(ot,ed),e(ot,Ar),e(Ar,td),e(ot,od),w(en,ot,null),e(ot,nd),e(ot,Ft),e(Ft,sd),e(Ft,Nr),e(Nr,rd),e(Ft,ad),e(Ft,Dr),e(Dr,id),e(Ft,ld),e(ze,dd),e(ze,Ir),u(o,hi,m),u(o,Et,m),e(Et,so),e(so,Sr),w(tn,Sr,null),e(Et,cd),e(Et,Lr),e(Lr,pd),u(o,ui,m),u(o,Ge,m),w(on,Ge,null),e(Ge,hd),e(Ge,nn),e(nn,ud),e(nn,Or),e(Or,fd),e(nn,md),e(Ge,gd),e(Ge,ro),e(ro,Rs),e(Rs,_d),e(ro,vd),e(ro,Ws),e(Ws,Td),e(ro,kd),e(Ge,bd),e(Ge,sn),e(sn,Cd),e(sn,Hs),e(Hs,wd),e(sn,$d),u(o,fi,m),u(o,Mt,m),e(Mt,ao),e(ao,Rr),w(rn,Rr,null),e(Mt,yd),e(Mt,Wr),e(Wr,Bd),u(o,mi,m),u(o,pt,m),w(an,pt,null),e(pt,Fd),e(pt,ln),e(ln,Ed),e(ln,dn),e(dn,Md),e(ln,zd),e(pt,qd),e(pt,Se),w(cn,Se,null),e(Se,jd),e(Se,zt),e(zt,xd),e(zt,Qs),e(Qs,Pd),e(zt,Ad),e(zt,Hr),e(Hr,Nd),e(zt,Dd),e(Se,Id),w(io,Se,null),e(Se,Sd),e(Se,Qr),e(Qr,Ld),e(Se,Od),w(pn,Se,null),u(o,gi,m),u(o,qt,m),e(qt,lo),e(lo,Yr),w(hn,Yr,null),e(qt,Rd),e(qt,Ur),e(Ur,Wd),u(o,_i,m),u(o,ht,m),w(un,ht,null),e(ht,Hd),e(ht,jt),e(jt,Qd),e(jt,Vr),e(Vr,Yd),e(jt,Ud),e(jt,fn),e(fn,Vd),e(jt,Kd),e(ht,Jd),e(ht,Le),w(mn,Le,null),e(Le,Gd),e(Le,xt),e(xt,Zd),e(xt,Ys),e(Ys,Xd),e(xt,ec),e(xt,Kr),e(Kr,tc),e(xt,oc),e(Le,nc),w(co,Le,null),e(Le,sc),e(Le,Jr),e(Jr,rc),e(Le,ac),w(gn,Le,null),u(o,vi,m),u(o,Pt,m),e(Pt,po),e(po,Gr),w(_n,Gr,null),e(Pt,ic),e(Pt,Zr),e(Zr,lc),u(o,Ti,m),u(o,Ze,m),w(vn,Ze,null),e(Ze,dc),e(Ze,Xr),e(Xr,cc),e(Ze,pc),e(Ze,Tn),e(Tn,hc),e(Tn,kn),e(kn,uc),e(Tn,fc),e(Ze,mc),e(Ze,Ee),w(bn,Ee,null),e(Ee,gc),e(Ee,At),e(At,_c),e(At,Us),e(Us,vc),e(At,Tc),e(At,ea),e(ea,kc),e(At,bc),e(Ee,Cc),w(ho,Ee,null),e(Ee,wc),e(Ee,ta),e(ta,$c),e(Ee,yc),w(Cn,Ee,null),e(Ee,Bc),e(Ee,oa),e(oa,Fc),e(Ee,Ec),w(wn,Ee,null),u(o,ki,m),u(o,Nt,m),e(Nt,uo),e(uo,na),w($n,na,null),e(Nt,Mc),e(Nt,sa),e(sa,zc),u(o,bi,m),u(o,Xe,m),w(yn,Xe,null),e(Xe,qc),e(Xe,ra),e(ra,jc),e(Xe,xc),e(Xe,Bn),e(Bn,Pc),e(Bn,Fn),e(Fn,Ac),e(Bn,Nc),e(Xe,Dc),e(Xe,Oe),w(En,Oe,null),e(Oe,Ic),e(Oe,Dt),e(Dt,Sc),e(Dt,Vs),e(Vs,Lc),e(Dt,Oc),e(Dt,aa),e(aa,Rc),e(Dt,Wc),e(Oe,Hc),w(fo,Oe,null),e(Oe,Qc),e(Oe,ia),e(ia,Yc),e(Oe,Uc),w(Mn,Oe,null),u(o,Ci,m),u(o,It,m),e(It,mo),e(mo,la),w(zn,la,null),e(It,Vc),e(It,da),e(da,Kc),u(o,wi,m),u(o,et,m),w(qn,et,null),e(et,Jc),e(et,ca),e(ca,Gc),e(et,Zc),e(et,jn),e(jn,Xc),e(jn,xn),e(xn,ep),e(jn,tp),e(et,op),e(et,Re),w(Pn,Re,null),e(Re,np),e(Re,St),e(St,sp),e(St,Ks),e(Ks,rp),e(St,ap),e(St,pa),e(pa,ip),e(St,lp),e(Re,dp),w(go,Re,null),e(Re,cp),e(Re,ha),e(ha,pp),e(Re,hp),w(An,Re,null),u(o,$i,m),u(o,Lt,m),e(Lt,_o),e(_o,ua),w(Nn,ua,null),e(Lt,up),e(Lt,fa),e(fa,fp),u(o,yi,m),u(o,tt,m),w(Dn,tt,null),e(tt,mp),e(tt,Ot),e(Ot,gp),e(Ot,ma),e(ma,_p),e(Ot,vp),e(Ot,ga),e(ga,Tp),e(Ot,kp),e(tt,bp),e(tt,In),e(In,Cp),e(In,Sn),e(Sn,wp),e(In,$p),e(tt,yp),e(tt,We),w(Ln,We,null),e(We,Bp),e(We,Rt),e(Rt,Fp),e(Rt,Js),e(Js,Ep),e(Rt,Mp),e(Rt,_a),e(_a,zp),e(Rt,qp),e(We,jp),w(vo,We,null),e(We,xp),e(We,va),e(va,Pp),e(We,Ap),w(On,We,null),u(o,Bi,m),u(o,Wt,m),e(Wt,To),e(To,Ta),w(Rn,Ta,null),e(Wt,Np),e(Wt,ka),e(ka,Dp),u(o,Fi,m),u(o,qe,m),w(Wn,qe,null),e(qe,Ip),e(qe,ba),e(ba,Sp),e(qe,Lp),e(qe,Hn),e(Hn,Op),e(Hn,Gs),e(Gs,Rp),e(Hn,Wp),e(qe,Hp),e(qe,Qn),e(Qn,Qp),e(Qn,Yn),e(Yn,Yp),e(Qn,Up),e(qe,Vp),w(ko,qe,null),e(qe,Kp),e(qe,He),w(Un,He,null),e(He,Jp),e(He,Ht),e(Ht,Gp),e(Ht,Zs),e(Zs,Zp),e(Ht,Xp),e(Ht,Ca),e(Ca,eh),e(Ht,th),e(He,oh),w(bo,He,null),e(He,nh),e(He,wa),e(wa,sh),e(He,rh),w(Vn,He,null),u(o,Ei,m),u(o,Qt,m),e(Qt,Co),e(Co,$a),w(Kn,$a,null),e(Qt,ah),e(Qt,ya),e(ya,ih),u(o,Mi,m),u(o,je,m),w(Jn,je,null),e(je,lh),e(je,Gn),e(Gn,dh),e(Gn,Ba),e(Ba,ch),e(Gn,ph),e(je,hh),e(je,Zn),e(Zn,uh),e(Zn,Xs),e(Xs,fh),e(Zn,mh),e(je,gh),e(je,Xn),e(Xn,_h),e(Xn,es),e(es,vh),e(Xn,Th),e(je,kh),w(wo,je,null),e(je,bh),e(je,Qe),w(ts,Qe,null),e(Qe,Ch),e(Qe,Yt),e(Yt,wh),e(Yt,er),e(er,$h),e(Yt,yh),e(Yt,Fa),e(Fa,Bh),e(Yt,Fh),e(Qe,Eh),w($o,Qe,null),e(Qe,Mh),e(Qe,Ea),e(Ea,zh),e(Qe,qh),w(os,Qe,null),u(o,zi,m),u(o,Ut,m),e(Ut,yo),e(yo,Ma),w(ns,Ma,null),e(Ut,jh),e(Ut,za),e(za,xh),u(o,qi,m),u(o,xe,m),w(ss,xe,null),e(xe,Ph),e(xe,qa),e(qa,Ah),e(xe,Nh),e(xe,rs),e(rs,Dh),e(rs,tr),e(tr,Ih),e(rs,Sh),e(xe,Lh),e(xe,as),e(as,Oh),e(as,is),e(is,Rh),e(as,Wh),e(xe,Hh),w(Bo,xe,null),e(xe,Qh),e(xe,Ye),w(ls,Ye,null),e(Ye,Yh),e(Ye,Vt),e(Vt,Uh),e(Vt,or),e(or,Vh),e(Vt,Kh),e(Vt,ja),e(ja,Jh),e(Vt,Gh),e(Ye,Zh),w(Fo,Ye,null),e(Ye,Xh),e(Ye,xa),e(xa,eu),e(Ye,tu),w(ds,Ye,null),u(o,ji,m),u(o,Kt,m),e(Kt,Eo),e(Eo,Pa),w(cs,Pa,null),e(Kt,ou),e(Kt,Aa),e(Aa,nu),u(o,xi,m),u(o,Pe,m),w(ps,Pe,null),e(Pe,su),e(Pe,Na),e(Na,ru),e(Pe,au),e(Pe,hs),e(hs,iu),e(hs,nr),e(nr,lu),e(hs,du),e(Pe,cu),e(Pe,us),e(us,pu),e(us,fs),e(fs,hu),e(us,uu),e(Pe,fu),w(Mo,Pe,null),e(Pe,mu),e(Pe,Ue),w(ms,Ue,null),e(Ue,gu),e(Ue,Jt),e(Jt,_u),e(Jt,sr),e(sr,vu),e(Jt,Tu),e(Jt,Da),e(Da,ku),e(Jt,bu),e(Ue,Cu),w(zo,Ue,null),e(Ue,wu),e(Ue,Ia),e(Ia,$u),e(Ue,yu),w(gs,Ue,null),u(o,Pi,m),u(o,Gt,m),e(Gt,qo),e(qo,Sa),w(_s,Sa,null),e(Gt,Bu),e(Gt,La),e(La,Fu),u(o,Ai,m),u(o,Ae,m),w(vs,Ae,null),e(Ae,Eu),e(Ae,Oa),e(Oa,Mu),e(Ae,zu),e(Ae,Ts),e(Ts,qu),e(Ts,rr),e(rr,ju),e(Ts,xu),e(Ae,Pu),e(Ae,ks),e(ks,Au),e(ks,bs),e(bs,Nu),e(ks,Du),e(Ae,Iu),w(jo,Ae,null),e(Ae,Su),e(Ae,Ve),w(Cs,Ve,null),e(Ve,Lu),e(Ve,Zt),e(Zt,Ou),e(Zt,ar),e(ar,Ru),e(Zt,Wu),e(Zt,Ra),e(Ra,Hu),e(Zt,Qu),e(Ve,Yu),w(xo,Ve,null),e(Ve,Uu),e(Ve,Wa),e(Wa,Vu),e(Ve,Ku),w(ws,Ve,null),u(o,Ni,m),u(o,Xt,m),e(Xt,Po),e(Po,Ha),w($s,Ha,null),e(Xt,Ju),e(Xt,Qa),e(Qa,Gu),u(o,Di,m),u(o,Ne,m),w(ys,Ne,null),e(Ne,Zu),e(Ne,eo),e(eo,Xu),e(eo,Ya),e(Ya,ef),e(eo,tf),e(eo,Ua),e(Ua,of),e(eo,nf),e(Ne,sf),e(Ne,Bs),e(Bs,rf),e(Bs,ir),e(ir,af),e(Bs,lf),e(Ne,df),e(Ne,Fs),e(Fs,cf),e(Fs,Es),e(Es,pf),e(Fs,hf),e(Ne,uf),w(Ao,Ne,null),e(Ne,ff),e(Ne,Ke),w(Ms,Ke,null),e(Ke,mf),e(Ke,to),e(to,gf),e(to,lr),e(lr,_f),e(to,vf),e(to,Va),e(Va,Tf),e(to,kf),e(Ke,bf),w(No,Ke,null),e(Ke,Cf),e(Ke,Ka),e(Ka,wf),e(Ke,$f),w(zs,Ke,null),Ii=!0},p(o,[m]){const qs={};m&2&&(qs.$$scope={dirty:m,ctx:o}),io.$set(qs);const Ja={};m&2&&(Ja.$$scope={dirty:m,ctx:o}),co.$set(Ja);const Ga={};m&2&&(Ga.$$scope={dirty:m,ctx:o}),ho.$set(Ga);const Za={};m&2&&(Za.$$scope={dirty:m,ctx:o}),fo.$set(Za);const js={};m&2&&(js.$$scope={dirty:m,ctx:o}),go.$set(js);const Xa={};m&2&&(Xa.$$scope={dirty:m,ctx:o}),vo.$set(Xa);const ei={};m&2&&(ei.$$scope={dirty:m,ctx:o}),ko.$set(ei);const ti={};m&2&&(ti.$$scope={dirty:m,ctx:o}),bo.$set(ti);const xs={};m&2&&(xs.$$scope={dirty:m,ctx:o}),wo.$set(xs);const oi={};m&2&&(oi.$$scope={dirty:m,ctx:o}),$o.$set(oi);const ni={};m&2&&(ni.$$scope={dirty:m,ctx:o}),Bo.$set(ni);const si={};m&2&&(si.$$scope={dirty:m,ctx:o}),Fo.$set(si);const ri={};m&2&&(ri.$$scope={dirty:m,ctx:o}),Mo.$set(ri);const ai={};m&2&&(ai.$$scope={dirty:m,ctx:o}),zo.$set(ai);const Do={};m&2&&(Do.$$scope={dirty:m,ctx:o}),jo.$set(Do);const ii={};m&2&&(ii.$$scope={dirty:m,ctx:o}),xo.$set(ii);const li={};m&2&&(li.$$scope={dirty:m,ctx:o}),Ao.$set(li);const Ps={};m&2&&(Ps.$$scope={dirty:m,ctx:o}),No.$set(Ps)},i(o){Ii||($(v.$$.fragment,o),$(ee.$$.fragment,o),$(V.$$.fragment,o),$(Te.$$.fragment,o),$(Yo.$$.fragment,o),$(Uo.$$.fragment,o),$(Vo.$$.fragment,o),$(Ko.$$.fragment,o),$(Go.$$.fragment,o),$(Xo.$$.fragment,o),$(en.$$.fragment,o),$(tn.$$.fragment,o),$(on.$$.fragment,o),$(rn.$$.fragment,o),$(an.$$.fragment,o),$(cn.$$.fragment,o),$(io.$$.fragment,o),$(pn.$$.fragment,o),$(hn.$$.fragment,o),$(un.$$.fragment,o),$(mn.$$.fragment,o),$(co.$$.fragment,o),$(gn.$$.fragment,o),$(_n.$$.fragment,o),$(vn.$$.fragment,o),$(bn.$$.fragment,o),$(ho.$$.fragment,o),$(Cn.$$.fragment,o),$(wn.$$.fragment,o),$($n.$$.fragment,o),$(yn.$$.fragment,o),$(En.$$.fragment,o),$(fo.$$.fragment,o),$(Mn.$$.fragment,o),$(zn.$$.fragment,o),$(qn.$$.fragment,o),$(Pn.$$.fragment,o),$(go.$$.fragment,o),$(An.$$.fragment,o),$(Nn.$$.fragment,o),$(Dn.$$.fragment,o),$(Ln.$$.fragment,o),$(vo.$$.fragment,o),$(On.$$.fragment,o),$(Rn.$$.fragment,o),$(Wn.$$.fragment,o),$(ko.$$.fragment,o),$(Un.$$.fragment,o),$(bo.$$.fragment,o),$(Vn.$$.fragment,o),$(Kn.$$.fragment,o),$(Jn.$$.fragment,o),$(wo.$$.fragment,o),$(ts.$$.fragment,o),$($o.$$.fragment,o),$(os.$$.fragment,o),$(ns.$$.fragment,o),$(ss.$$.fragment,o),$(Bo.$$.fragment,o),$(ls.$$.fragment,o),$(Fo.$$.fragment,o),$(ds.$$.fragment,o),$(cs.$$.fragment,o),$(ps.$$.fragment,o),$(Mo.$$.fragment,o),$(ms.$$.fragment,o),$(zo.$$.fragment,o),$(gs.$$.fragment,o),$(_s.$$.fragment,o),$(vs.$$.fragment,o),$(jo.$$.fragment,o),$(Cs.$$.fragment,o),$(xo.$$.fragment,o),$(ws.$$.fragment,o),$($s.$$.fragment,o),$(ys.$$.fragment,o),$(Ao.$$.fragment,o),$(Ms.$$.fragment,o),$(No.$$.fragment,o),$(zs.$$.fragment,o),Ii=!0)},o(o){y(v.$$.fragment,o),y(ee.$$.fragment,o),y(V.$$.fragment,o),y(Te.$$.fragment,o),y(Yo.$$.fragment,o),y(Uo.$$.fragment,o),y(Vo.$$.fragment,o),y(Ko.$$.fragment,o),y(Go.$$.fragment,o),y(Xo.$$.fragment,o),y(en.$$.fragment,o),y(tn.$$.fragment,o),y(on.$$.fragment,o),y(rn.$$.fragment,o),y(an.$$.fragment,o),y(cn.$$.fragment,o),y(io.$$.fragment,o),y(pn.$$.fragment,o),y(hn.$$.fragment,o),y(un.$$.fragment,o),y(mn.$$.fragment,o),y(co.$$.fragment,o),y(gn.$$.fragment,o),y(_n.$$.fragment,o),y(vn.$$.fragment,o),y(bn.$$.fragment,o),y(ho.$$.fragment,o),y(Cn.$$.fragment,o),y(wn.$$.fragment,o),y($n.$$.fragment,o),y(yn.$$.fragment,o),y(En.$$.fragment,o),y(fo.$$.fragment,o),y(Mn.$$.fragment,o),y(zn.$$.fragment,o),y(qn.$$.fragment,o),y(Pn.$$.fragment,o),y(go.$$.fragment,o),y(An.$$.fragment,o),y(Nn.$$.fragment,o),y(Dn.$$.fragment,o),y(Ln.$$.fragment,o),y(vo.$$.fragment,o),y(On.$$.fragment,o),y(Rn.$$.fragment,o),y(Wn.$$.fragment,o),y(ko.$$.fragment,o),y(Un.$$.fragment,o),y(bo.$$.fragment,o),y(Vn.$$.fragment,o),y(Kn.$$.fragment,o),y(Jn.$$.fragment,o),y(wo.$$.fragment,o),y(ts.$$.fragment,o),y($o.$$.fragment,o),y(os.$$.fragment,o),y(ns.$$.fragment,o),y(ss.$$.fragment,o),y(Bo.$$.fragment,o),y(ls.$$.fragment,o),y(Fo.$$.fragment,o),y(ds.$$.fragment,o),y(cs.$$.fragment,o),y(ps.$$.fragment,o),y(Mo.$$.fragment,o),y(ms.$$.fragment,o),y(zo.$$.fragment,o),y(gs.$$.fragment,o),y(_s.$$.fragment,o),y(vs.$$.fragment,o),y(jo.$$.fragment,o),y(Cs.$$.fragment,o),y(xo.$$.fragment,o),y(ws.$$.fragment,o),y($s.$$.fragment,o),y(ys.$$.fragment,o),y(Ao.$$.fragment,o),y(Ms.$$.fragment,o),y(No.$$.fragment,o),y(zs.$$.fragment,o),Ii=!1},d(o){t(p),o&&t(F),o&&t(f),B(v),o&&t(K),o&&t(M),B(ee),o&&t(ae),o&&t(U),o&&t(j),o&&t(ne),o&&t(ie),o&&t(se),o&&t(le),o&&t(q),o&&t(R),o&&t(G),o&&t(W),o&&t(Z),B(V),o&&t(we),o&&t(oe),B(Te),B(Yo),o&&t(ci),o&&t(Bt),B(Uo),o&&t(pi),o&&t(ze),B(Vo),B(Ko),B(Go),B(Xo),B(en),o&&t(hi),o&&t(Et),B(tn),o&&t(ui),o&&t(Ge),B(on),o&&t(fi),o&&t(Mt),B(rn),o&&t(mi),o&&t(pt),B(an),B(cn),B(io),B(pn),o&&t(gi),o&&t(qt),B(hn),o&&t(_i),o&&t(ht),B(un),B(mn),B(co),B(gn),o&&t(vi),o&&t(Pt),B(_n),o&&t(Ti),o&&t(Ze),B(vn),B(bn),B(ho),B(Cn),B(wn),o&&t(ki),o&&t(Nt),B($n),o&&t(bi),o&&t(Xe),B(yn),B(En),B(fo),B(Mn),o&&t(Ci),o&&t(It),B(zn),o&&t(wi),o&&t(et),B(qn),B(Pn),B(go),B(An),o&&t($i),o&&t(Lt),B(Nn),o&&t(yi),o&&t(tt),B(Dn),B(Ln),B(vo),B(On),o&&t(Bi),o&&t(Wt),B(Rn),o&&t(Fi),o&&t(qe),B(Wn),B(ko),B(Un),B(bo),B(Vn),o&&t(Ei),o&&t(Qt),B(Kn),o&&t(Mi),o&&t(je),B(Jn),B(wo),B(ts),B($o),B(os),o&&t(zi),o&&t(Ut),B(ns),o&&t(qi),o&&t(xe),B(ss),B(Bo),B(ls),B(Fo),B(ds),o&&t(ji),o&&t(Kt),B(cs),o&&t(xi),o&&t(Pe),B(ps),B(Mo),B(ms),B(zo),B(gs),o&&t(Pi),o&&t(Gt),B(_s),o&&t(Ai),o&&t(Ae),B(vs),B(jo),B(Cs),B(xo),B(ws),o&&t(Ni),o&&t(Xt),B($s),o&&t(Di),o&&t(Ne),B(ys),B(Ao),B(Ms),B(No),B(zs)}}}const k_={local:"convbert",sections:[{local:"overview",title:"Overview"},{local:"transformers.ConvBertConfig",title:"ConvBertConfig"},{local:"transformers.ConvBertTokenizer",title:"ConvBertTokenizer"},{local:"transformers.ConvBertTokenizerFast",title:"ConvBertTokenizerFast"},{local:"transformers.ConvBertModel",title:"ConvBertModel"},{local:"transformers.ConvBertForMaskedLM",title:"ConvBertForMaskedLM"},{local:"transformers.ConvBertForSequenceClassification",title:"ConvBertForSequenceClassification"},{local:"transformers.ConvBertForMultipleChoice",title:"ConvBertForMultipleChoice"},{local:"transformers.ConvBertForTokenClassification",title:"ConvBertForTokenClassification"},{local:"transformers.ConvBertForQuestionAnswering",title:"ConvBertForQuestionAnswering"},{local:"transformers.TFConvBertModel",title:"TFConvBertModel"},{local:"transformers.TFConvBertForMaskedLM",title:"TFConvBertForMaskedLM"},{local:"transformers.TFConvBertForSequenceClassification",title:"TFConvBertForSequenceClassification"},{local:"transformers.TFConvBertForMultipleChoice",title:"TFConvBertForMultipleChoice"},{local:"transformers.TFConvBertForTokenClassification",title:"TFConvBertForTokenClassification"},{local:"transformers.TFConvBertForQuestionAnswering",title:"TFConvBertForQuestionAnswering"}],title:"ConvBERT"};function b_(H,p,F){let{fw:f}=p;return H.$$set=g=>{"fw"in g&&F(0,f=g.fw)},[f]}class E_ extends Gg{constructor(p){super();Zg(this,p,b_,T_,Xg,{fw:0})}}export{E_ as default,k_ as metadata};
9,975
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/blenderbot_small.mdx-864a5190.js
import{S as pf,i as hf,s as uf,e as t,k as l,w as f,t as r,L as mf,c as n,d as o,m as i,a,x as _,h as d,b as c,J as e,g as p,y as g,q as b,o as k,B as v}from"../../chunks/vendor-b1433968.js";import{T as Sn}from"../../chunks/Tip-c3840994.js";import{D as P}from"../../chunks/Docstring-ff504c58.js";import{C as qo}from"../../chunks/CodeBlock-a320dbd7.js";import{I as qe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ff(H){let u,z,y,T,x;return{c(){u=t("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=t("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=n(S,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(w,"CODE",{});var F=a(y);T=d(F,"Module"),F.forEach(o),x=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(S,w){p(S,u,w),e(u,z),e(u,y),e(y,T),e(u,x)},d(S){S&&o(u)}}}function _f(H){let u,z,y,T,x;return{c(){u=t("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=t("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=n(S,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(w,"CODE",{});var F=a(y);T=d(F,"Module"),F.forEach(o),x=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(S,w){p(S,u,w),e(u,z),e(u,y),e(y,T),e(u,x)},d(S){S&&o(u)}}}function gf(H){let u,z,y,T,x,S,w,F,Pe,fe,B,je,U,Oe,Le,W,Ne,Ae,Q,V,Ie,Z,M,O,_e,se,Ee,J,I,Te,re,L,we,de,$e,ee,le,ie,De,X,Me,R,Ge;return{c(){u=t("p"),z=r("TF 2.0 models accepts two formats as inputs:"),y=l(),T=t("ul"),x=t("li"),S=r("having all inputs as keyword arguments (like PyTorch models), or"),w=l(),F=t("li"),Pe=r("having all inputs as a list, tuple or dict in the first positional arguments."),fe=l(),B=t("p"),je=r("This second option is useful when using "),U=t("code"),Oe=r("tf.keras.Model.fit"),Le=r(` method which currently requires having all the tensors in the first argument of the model call function: `),W=t("code"),Ne=r("model(inputs)"),Ae=r("."),Q=l(),V=t("p"),Ie=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=t("ul"),O=t("li"),_e=r("a single Tensor with "),se=t("code"),Ee=r("input_ids"),J=r(" only and nothing else: "),I=t("code"),Te=r("model(input_ids)"),re=l(),L=t("li"),we=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=t("code"),$e=r("model([input_ids, attention_mask])"),ee=r(" or "),le=t("code"),ie=r("model([input_ids, attention_mask, token_type_ids])"),De=l(),X=t("li"),Me=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=t("code"),Ge=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=n(m,"P",{});var q=a(u);z=d(q,"TF 2.0 models accepts two formats as inputs:"),q.forEach(o),y=i(m),T=n(m,"UL",{});var ce=a(T);x=n(ce,"LI",{});var ro=a(x);S=d(ro,"having all inputs as keyword arguments (like PyTorch models), or"),ro.forEach(o),w=i(ce),F=n(ce,"LI",{});var Ke=a(F);Pe=d(Ke,"having all inputs as a list, tuple or dict in the first positional arguments."),Ke.forEach(o),ce.forEach(o),fe=i(m),B=n(m,"P",{});var C=a(B);je=d(C,"This second option is useful when using "),U=n(C,"CODE",{});var lo=a(U);Oe=d(lo,"tf.keras.Model.fit"),lo.forEach(o),Le=d(C,` method which currently requires having all the tensors in the first argument of the model call function: `),W=n(C,"CODE",{});var Se=a(W);Ne=d(Se,"model(inputs)"),Se.forEach(o),Ae=d(C,"."),C.forEach(o),Q=i(m),V=n(m,"P",{});var io=a(V);Ie=d(io,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),io.forEach(o),Z=i(m),M=n(m,"UL",{});var Y=a(M);O=n(Y,"LI",{});var D=a(O);_e=d(D,"a single Tensor with "),se=n(D,"CODE",{});var co=a(se);Ee=d(co,"input_ids"),co.forEach(o),J=d(D," only and nothing else: "),I=n(D,"CODE",{});var po=a(I);Te=d(po,"model(input_ids)"),po.forEach(o),D.forEach(o),re=i(Y),L=n(Y,"LI",{});var oe=a(L);we=d(oe,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=n(oe,"CODE",{});var pe=a(de);$e=d(pe,"model([input_ids, attention_mask])"),pe.forEach(o),ee=d(oe," or "),le=n(oe,"CODE",{});var ge=a(le);ie=d(ge,"model([input_ids, attention_mask, token_type_ids])"),ge.forEach(o),oe.forEach(o),De=i(Y),X=n(Y,"LI",{});var xe=a(X);Me=d(xe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=n(xe,"CODE",{});var Be=a(R);Ge=d(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(o),xe.forEach(o),Y.forEach(o)},m(m,q){p(m,u,q),e(u,z),p(m,y,q),p(m,T,q),e(T,x),e(x,S),e(T,w),e(T,F),e(F,Pe),p(m,fe,q),p(m,B,q),e(B,je),e(B,U),e(U,Oe),e(B,Le),e(B,W),e(W,Ne),e(B,Ae),p(m,Q,q),p(m,V,q),e(V,Ie),p(m,Z,q),p(m,M,q),e(M,O),e(O,_e),e(O,se),e(se,Ee),e(O,J),e(O,I),e(I,Te),e(M,re),e(M,L),e(L,we),e(L,de),e(de,$e),e(L,ee),e(L,le),e(le,ie),e(M,De),e(M,X),e(X,Me),e(X,R),e(R,Ge)},d(m){m&&o(u),m&&o(y),m&&o(T),m&&o(fe),m&&o(B),m&&o(Q),m&&o(V),m&&o(Z),m&&o(M)}}}function bf(H){let u,z,y,T,x;return{c(){u=t("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=t("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=n(S,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(w,"CODE",{});var F=a(y);T=d(F,"Module"),F.forEach(o),x=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(S,w){p(S,u,w),e(u,z),e(u,y),e(y,T),e(u,x)},d(S){S&&o(u)}}}function kf(H){let u,z,y,T,x,S,w,F,Pe,fe,B,je,U,Oe,Le,W,Ne,Ae,Q,V,Ie,Z,M,O,_e,se,Ee,J,I,Te,re,L,we,de,$e,ee,le,ie,De,X,Me,R,Ge;return{c(){u=t("p"),z=r("TF 2.0 models accepts two formats as inputs:"),y=l(),T=t("ul"),x=t("li"),S=r("having all inputs as keyword arguments (like PyTorch models), or"),w=l(),F=t("li"),Pe=r("having all inputs as a list, tuple or dict in the first positional arguments."),fe=l(),B=t("p"),je=r("This second option is useful when using "),U=t("code"),Oe=r("tf.keras.Model.fit"),Le=r(` method which currently requires having all the tensors in the first argument of the model call function: `),W=t("code"),Ne=r("model(inputs)"),Ae=r("."),Q=l(),V=t("p"),Ie=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),M=t("ul"),O=t("li"),_e=r("a single Tensor with "),se=t("code"),Ee=r("input_ids"),J=r(" only and nothing else: "),I=t("code"),Te=r("model(input_ids)"),re=l(),L=t("li"),we=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=t("code"),$e=r("model([input_ids, attention_mask])"),ee=r(" or "),le=t("code"),ie=r("model([input_ids, attention_mask, token_type_ids])"),De=l(),X=t("li"),Me=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=t("code"),Ge=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=n(m,"P",{});var q=a(u);z=d(q,"TF 2.0 models accepts two formats as inputs:"),q.forEach(o),y=i(m),T=n(m,"UL",{});var ce=a(T);x=n(ce,"LI",{});var ro=a(x);S=d(ro,"having all inputs as keyword arguments (like PyTorch models), or"),ro.forEach(o),w=i(ce),F=n(ce,"LI",{});var Ke=a(F);Pe=d(Ke,"having all inputs as a list, tuple or dict in the first positional arguments."),Ke.forEach(o),ce.forEach(o),fe=i(m),B=n(m,"P",{});var C=a(B);je=d(C,"This second option is useful when using "),U=n(C,"CODE",{});var lo=a(U);Oe=d(lo,"tf.keras.Model.fit"),lo.forEach(o),Le=d(C,` method which currently requires having all the tensors in the first argument of the model call function: `),W=n(C,"CODE",{});var Se=a(W);Ne=d(Se,"model(inputs)"),Se.forEach(o),Ae=d(C,"."),C.forEach(o),Q=i(m),V=n(m,"P",{});var io=a(V);Ie=d(io,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),io.forEach(o),Z=i(m),M=n(m,"UL",{});var Y=a(M);O=n(Y,"LI",{});var D=a(O);_e=d(D,"a single Tensor with "),se=n(D,"CODE",{});var co=a(se);Ee=d(co,"input_ids"),co.forEach(o),J=d(D," only and nothing else: "),I=n(D,"CODE",{});var po=a(I);Te=d(po,"model(input_ids)"),po.forEach(o),D.forEach(o),re=i(Y),L=n(Y,"LI",{});var oe=a(L);we=d(oe,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=n(oe,"CODE",{});var pe=a(de);$e=d(pe,"model([input_ids, attention_mask])"),pe.forEach(o),ee=d(oe," or "),le=n(oe,"CODE",{});var ge=a(le);ie=d(ge,"model([input_ids, attention_mask, token_type_ids])"),ge.forEach(o),oe.forEach(o),De=i(Y),X=n(Y,"LI",{});var xe=a(X);Me=d(xe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=n(xe,"CODE",{});var Be=a(R);Ge=d(Be,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Be.forEach(o),xe.forEach(o),Y.forEach(o)},m(m,q){p(m,u,q),e(u,z),p(m,y,q),p(m,T,q),e(T,x),e(x,S),e(T,w),e(T,F),e(F,Pe),p(m,fe,q),p(m,B,q),e(B,je),e(B,U),e(U,Oe),e(B,Le),e(B,W),e(W,Ne),e(B,Ae),p(m,Q,q),p(m,V,q),e(V,Ie),p(m,Z,q),p(m,M,q),e(M,O),e(O,_e),e(O,se),e(se,Ee),e(O,J),e(O,I),e(I,Te),e(M,re),e(M,L),e(L,we),e(L,de),e(de,$e),e(L,ee),e(L,le),e(le,ie),e(M,De),e(M,X),e(X,Me),e(X,R),e(R,Ge)},d(m){m&&o(u),m&&o(y),m&&o(T),m&&o(fe),m&&o(B),m&&o(Q),m&&o(V),m&&o(Z),m&&o(M)}}}function vf(H){let u,z,y,T,x;return{c(){u=t("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=t("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=n(S,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(w,"CODE",{});var F=a(y);T=d(F,"Module"),F.forEach(o),x=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(S,w){p(S,u,w),e(u,z),e(u,y),e(y,T),e(u,x)},d(S){S&&o(u)}}}function yf(H){let u,z,y,T,x;return{c(){u=t("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),y=t("code"),T=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(S){u=n(S,"P",{});var w=a(u);z=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),y=n(w,"CODE",{});var F=a(y);T=d(F,"Module"),F.forEach(o),x=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(S,w){p(S,u,w),e(u,z),e(u,y),e(y,T),e(u,x)},d(S){S&&o(u)}}}function Tf(H){let u,z,y,T,x,S,w,F,Pe,fe,B,je,U,Oe,Le,W,Ne,Ae,Q,V,Ie,Z,M,O,_e,se,Ee,J,I,Te,re,L,we,de,$e,ee,le,ie,De,X,Me,R,Ge,m,q,ce,ro,Ke,C,lo,Se,io,Y,D,co,po,oe,pe,ge,xe,Be,bd,sa,kd,vr,he,et,vd,ho,yd,xn,Td,wd,ot,Sd,xd,Bd,uo,zd,Bn,Fd,qd,zn,Ed,$d,Md,ra,Cd,Pd,tt,yr,mo,Eo,da,nt,jd,la,Od,Tr,G,at,Ld,ia,Nd,Ad,st,Id,Fn,Dd,Gd,Ud,He,rt,Wd,ca,Rd,Kd,pa,Hd,Qd,$o,dt,Vd,fo,Jd,ha,Xd,Yd,ua,Zd,el,ol,Qe,lt,tl,qn,nl,En,al,sl,ma,rl,dl,fa,wr,_o,Mo,_a,it,ll,ga,il,Sr,Ue,ct,cl,pt,pl,ba,hl,ul,ml,Co,ht,fl,ka,_l,xr,go,Po,va,ut,gl,ya,bl,Br,ze,mt,kl,ft,vl,$n,yl,Tl,wl,_t,Sl,gt,xl,Bl,zl,be,bt,Fl,bo,ql,Mn,El,$l,Ta,Ml,Cl,Pl,jo,jl,wa,Ol,Ll,kt,zr,ko,Oo,Sa,vt,Nl,xa,Al,Fr,Fe,yt,Il,Tt,Dl,Cn,Gl,Ul,Wl,wt,Rl,St,Kl,Hl,Ql,te,xt,Vl,vo,Jl,Pn,Xl,Yl,Ba,Zl,ei,oi,Lo,ti,za,ni,ai,Fa,qa,Ea,$a,si,ri,Ma,Ca,Pa,Bt,di,ja,li,ii,qr,yo,No,Oa,zt,ci,La,pi,Er,Ft,Ve,qt,hi,Na,ui,mi,Et,$r,To,Ao,Aa,$t,fi,Ia,_i,Mr,ue,Mt,gi,Ct,bi,jn,ki,vi,yi,Pt,Ti,jt,wi,Si,xi,Io,Bi,ke,Ot,zi,wo,Fi,On,qi,Ei,Da,$i,Mi,Ci,Do,Pi,Ga,ji,Oi,Lt,Cr,So,Go,Ua,Nt,Li,Wa,Ni,Pr,me,At,Ai,It,Ii,Ln,Di,Gi,Ui,Dt,Wi,Gt,Ri,Ki,Hi,Uo,Qi,j,Ut,Vi,xo,Ji,Nn,Xi,Yi,Ra,Zi,ec,oc,Wo,tc,Ka,nc,ac,Ha,Qa,Va,Ja,sc,rc,Xa,Ya,Za,es,dc,lc,os,ts,ns,as,ic,cc,ss,rs,ds,Wt,pc,ls,hc,uc,mc,is,cs,ps,hs,fc,jr,Bo,Ro,us,Rt,_c,ms,gc,Or,N,Kt,bc,Ht,kc,An,vc,yc,Tc,Qt,wc,Vt,Sc,xc,Bc,fs,zc,Fc,We,_s,Jt,qc,Ec,gs,Xt,$c,Mc,bs,Yt,Cc,Pc,ks,Zt,jc,Oc,Je,en,Lc,vs,Nc,Ac,on,Ic,Xe,tn,Dc,ys,Gc,Uc,nn,Wc,Ye,an,Rc,Ts,Kc,Hc,sn,Lr,zo,Ko,ws,rn,Qc,Ss,Vc,Nr,A,dn,Jc,ln,Xc,In,Yc,Zc,ep,cn,op,pn,tp,np,ap,xs,sp,rp,Re,Bs,hn,dp,lp,zs,un,ip,cp,Fs,mn,pp,hp,qs,fn,up,mp,E,_n,fp,Fo,_p,Es,gp,bp,$s,kp,vp,yp,Ho,Tp,Ms,wp,Sp,Cs,Ps,js,Os,xp,Bp,Ls,Ns,As,Is,zp,Fp,Ds,Gs,Us,Ws,qp,Ep,Rs,Ks,gn,Qo,Vo,Hs,bn,$p,Qs,Mp,Cp,Vs,Pp,jp,Js,Op,Lp,Xs,Ys,Zs,er,Np,Ap,or,tr,nr,ar,Ip,Dp,sr,rr,dr,lr,Gp,Up,ir,cr,pr,hr,Wp,Rp,Ze,kn,Kp,ur,Hp,Qp,vn,Vp,eo,yn,Jp,mr,Xp,Yp,Tn,Ar;return S=new qe({}),re=new qe({}),Be=new qe({}),et=new P({props:{name:"class transformers.BlenderbotSmallConfig",anchor:"transformers.BlenderbotSmallConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"max_position_embeddings",val:" = 512"},{name:"encoder_layers",val:" = 8"},{name:"encoder_ffn_dim",val:" = 2048"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 8"},{name:"decoder_ffn_dim",val:" = 2048"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 512"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 1"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"forced_eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py#L29",parametersDescription:[{anchor:"transformers.BlenderbotSmallConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallModel">BlenderbotSmallModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallModel">TFBlenderbotSmallModel</a>.`,name:"vocab_size"},{anchor:"transformers.BlenderbotSmallConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.BlenderbotSmallConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.BlenderbotSmallConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.BlenderbotSmallConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.BlenderbotSmallConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.BlenderbotSmallConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.BlenderbotSmallConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.BlenderbotSmallConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.BlenderbotSmallConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.BlenderbotSmallConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.BlenderbotSmallConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.BlenderbotSmallConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.BlenderbotSmallConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.BlenderbotSmallConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.BlenderbotSmallConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.BlenderbotSmallConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.BlenderbotSmallConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),tt=new qo({props:{code:`from transformers import BlenderbotSmallModel, BlenderbotSmallConfig # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration configuration = BlenderbotSmallConfig() # Initializing a model from the facebook/blenderbot_small-90M style configuration model = BlenderbotSmallModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallModel, BlenderbotSmallConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BlenderbotSmallConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/blenderbot_small-90M style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotSmallModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),nt=new qe({}),at=new P({props:{name:"class transformers.BlenderbotSmallTokenizer",anchor:"transformers.BlenderbotSmallTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"bos_token",val:" = '__start__'"},{name:"eos_token",val:" = '__end__'"},{name:"unk_token",val:" = '__unk__'"},{name:"pad_token",val:" = '__null__'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py#L67",parametersDescription:[{anchor:"transformers.BlenderbotSmallTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.BlenderbotSmallTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.BlenderbotSmallTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;__start__&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.BlenderbotSmallTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;__end__&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.BlenderbotSmallTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;__unk__&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BlenderbotSmallTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;__pad__&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths. **kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>`,name:"pad_token"}]}}),rt=new P({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2837",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The model input with special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),dt=new P({props:{name:"get_special_tokens_mask",anchor:"transformers.PreTrainedTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List"},{name:"token_ids_1",val:": typing.Optional[typing.List] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L836",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids of the first sequence.`,name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of ids of the second sequence.`,name:"token_ids_1"},{anchor:"transformers.PreTrainedTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>1 for a special token, 0 for a sequence token.</p> `,returnType:` <p>A list of integers in the range [0, 1]</p> `}}),lt=new P({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2818",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),it=new qe({}),ct=new P({props:{name:"class transformers.BlenderbotSmallTokenizerFast",anchor:"transformers.BlenderbotSmallTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|endoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py#L50",parametersDescription:[{anchor:"transformers.BlenderbotSmallTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"}]}}),ht=new P({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BlenderbotSmallTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py#L96",parametersDescription:[{anchor:"transformers.BlenderbotSmallTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BlenderbotSmallTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ut=new qe({}),mt=new P({props:{name:"class transformers.BlenderbotSmallModel",anchor:"transformers.BlenderbotSmallModel",parameters:[{name:"config",val:": BlenderbotSmallConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py#L1068",parametersDescription:[{anchor:"transformers.BlenderbotSmallModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bt=new P({props:{name:"forward",anchor:"transformers.BlenderbotSmallModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py#L1095",parametersDescription:[{anchor:"transformers.BlenderbotSmallModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotSmallModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotSmallModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>BlenderbotSmall uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.BlenderbotSmallModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.BlenderbotSmallModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotSmallModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BlenderbotSmallModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotSmallModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BlenderbotSmallModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BlenderbotSmallModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BlenderbotSmallModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BlenderbotSmallModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotSmallModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotSmallModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),jo=new Sn({props:{$$slots:{default:[ff]},$$scope:{ctx:H}}}),kt=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, BlenderbotSmallModel model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M") tokenizer = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot_small-90M") input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, BlenderbotSmallModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotSmallModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot_small-90M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot_small-90M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),vt=new qe({}),yt=new P({props:{name:"class transformers.BlenderbotSmallForConditionalGeneration",anchor:"transformers.BlenderbotSmallForConditionalGeneration",parameters:[{name:"config",val:": BlenderbotSmallConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py#L1192",parametersDescription:[{anchor:"transformers.BlenderbotSmallForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xt=new P({props:{name:"forward",anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py#L1236",parametersDescription:[{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>BlenderbotSmall uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BlenderbotSmallForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Lo=new Sn({props:{$$slots:{default:[_f]},$$scope:{ctx:H}}}),zt=new qe({}),qt=new P({props:{name:"forward",anchor:"transformers.BlenderbotSmallForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py#L1399",parametersDescription:[{anchor:"transformers.BlenderbotSmallForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotSmallForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Et=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForCausalLM tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/bart-large') model = BlenderbotSmallForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, BlenderbotSmallForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotSmallForCausalLM.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$t=new qe({}),Mt=new P({props:{name:"class transformers.TFBlenderbotSmallModel",anchor:"transformers.TFBlenderbotSmallModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py#L1180",parametersDescription:[{anchor:"transformers.TFBlenderbotSmallModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Io=new Sn({props:{$$slots:{default:[gf]},$$scope:{ctx:H}}}),Ot=new P({props:{name:"call",anchor:"transformers.TFBlenderbotSmallModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py#L1192",parametersDescription:[{anchor:"transformers.TFBlenderbotSmallModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBlenderbotSmallModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBlenderbotSmallModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>BlenderbotSmall uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFBlenderbotSmallModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBlenderbotSmallModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBlenderbotSmallModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBlenderbotSmallModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBlenderbotSmallModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBlenderbotSmallModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBlenderbotSmallModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBlenderbotSmallModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBlenderbotSmallModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBlenderbotSmallModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBlenderbotSmallModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Do=new Sn({props:{$$slots:{default:[bf]},$$scope:{ctx:H}}}),Lt=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, TFBlenderbotSmallModel import tensorflow as tf tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot_small-90M') model = TFBlenderbotSmallModel.from_pretrained('facebook/blenderbot_small-90M') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, TFBlenderbotSmallModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBlenderbotSmallModel.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Nt=new qe({}),At=new P({props:{name:"class transformers.TFBlenderbotSmallForConditionalGeneration",anchor:"transformers.TFBlenderbotSmallForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py#L1287",parametersDescription:[{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Uo=new Sn({props:{$$slots:{default:[kf]},$$scope:{ctx:H}}}),Ut=new P({props:{name:"call",anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py#L1320",parametersDescription:[{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>BlenderbotSmall uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBlenderbotSmallForConditionalGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Wo=new Sn({props:{$$slots:{default:[vf]},$$scope:{ctx:H}}}),Rt=new qe({}),Kt=new P({props:{name:"class transformers.FlaxBlenderbotSmallModel",anchor:"transformers.FlaxBlenderbotSmallModel",parameters:[{name:"config",val:": BlenderbotSmallConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L1217",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBlenderbotSmallModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),en=new P({props:{name:"__call__",anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L1153",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),on=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallModel tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot_small-90M') model = FlaxBlenderbotSmallModel.from_pretrained('facebook/blenderbot_small-90M') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, FlaxBlenderbotSmallModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotSmallModel.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),tn=new P({props:{name:"encode",anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L975",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot_small.configuration_blenderbot_small.BlenderbotSmallConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nn=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/blenderbot_small-90M') tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot_small-90M') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),an=new P({props:{name:"decode",anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L1038",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot_small.configuration_blenderbot_small.BlenderbotSmallConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),sn=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/blenderbot_small-90M') tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot_small-90M') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),rn=new qe({}),dn=new P({props:{name:"class transformers.FlaxBlenderbotSmallForConditionalGeneration",anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration",parameters:[{name:"config",val:": BlenderbotSmallConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L1305",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig">BlenderbotSmallConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),_n=new P({props:{name:"__call__",anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L1153",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallConfig" >BlenderbotSmallConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ho=new Sn({props:{$$slots:{default:[yf]},$$scope:{ctx:H}}}),bn=new qe({}),kn=new P({props:{name:"encode",anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L975",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotSmallPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot_small.configuration_blenderbot_small.BlenderbotSmallConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vn=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/blenderbot_small-90M') tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot_small-90M') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),yn=new P({props:{name:"decode",anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"deterministic",val:": bool = True"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py#L1309",parametersDescription:[{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallTokenizer">BlenderbotSmallTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotSmallForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot_small.configuration_blenderbot_small.BlenderbotSmallConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Tn=new qo({props:{code:`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/blenderbot_small-90M') tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot_small-90M') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='np') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotSmallTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/blenderbot_small-90M&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){u=t("meta"),z=l(),y=t("h1"),T=t("a"),x=t("span"),f(S.$$.fragment),w=l(),F=t("span"),Pe=r("Blenderbot Small"),fe=l(),B=t("p"),je=r("Note that "),U=t("a"),Oe=r("BlenderbotSmallModel"),Le=r(` and `),W=t("a"),Ne=r("BlenderbotSmallForConditionalGeneration"),Ae=r(` are only used in combination with the checkpoint `),Q=t("a"),V=r("facebook/blenderbot-90M"),Ie=r(`. Larger Blenderbot checkpoints should instead be used with `),Z=t("a"),M=r("BlenderbotModel"),O=r(` and `),_e=t("a"),se=r("BlenderbotForConditionalGeneration"),Ee=l(),J=t("h2"),I=t("a"),Te=t("span"),f(re.$$.fragment),L=l(),we=t("span"),de=r("Overview"),$e=l(),ee=t("p"),le=r("The Blender chatbot model was proposed in "),ie=t("a"),De=r("Recipes for building an open-domain chatbot"),X=r(` Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.`),Me=l(),R=t("p"),Ge=r("The abstract of the paper is the following:"),m=l(),q=t("p"),ce=t("em"),ro=r(`Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.`),Ke=l(),C=t("p"),lo=r("This model was contributed by "),Se=t("a"),io=r("patrickvonplaten"),Y=r(`. The authors\u2019 code can be found `),D=t("a"),co=r("here"),po=r(" ."),oe=l(),pe=t("h2"),ge=t("a"),xe=t("span"),f(Be.$$.fragment),bd=l(),sa=t("span"),kd=r("BlenderbotSmallConfig"),vr=l(),he=t("div"),f(et.$$.fragment),vd=l(),ho=t("p"),yd=r("This is the configuration class to store the configuration of a "),xn=t("a"),Td=r("BlenderbotSmallModel"),wd=r(`. It is used to instantiate an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall `),ot=t("a"),Sd=r("facebook/blenderbot_small-90M"),xd=r(" architecture."),Bd=l(),uo=t("p"),zd=r("Configuration objects inherit from "),Bn=t("a"),Fd=r("PretrainedConfig"),qd=r(` and can be used to control the model outputs. Read the documentation from `),zn=t("a"),Ed=r("PretrainedConfig"),$d=r(" for more information."),Md=l(),ra=t("p"),Cd=r("Example:"),Pd=l(),f(tt.$$.fragment),yr=l(),mo=t("h2"),Eo=t("a"),da=t("span"),f(nt.$$.fragment),jd=l(),la=t("span"),Od=r("BlenderbotSmallTokenizer"),Tr=l(),G=t("div"),f(at.$$.fragment),Ld=l(),ia=t("p"),Nd=r("Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)"),Ad=l(),st=t("p"),Id=r("This tokenizer inherits from "),Fn=t("a"),Dd=r("PreTrainedTokenizer"),Gd=r(` which contains most of the main methods. Users should refer to the superclass for more information regarding methods.`),Ud=l(),He=t("div"),f(rt.$$.fragment),Wd=l(),ca=t("p"),Rd=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.`),Kd=l(),pa=t("p"),Hd=r("This implementation does not add special tokens and this method should be overridden in a subclass."),Qd=l(),$o=t("div"),f(dt.$$.fragment),Vd=l(),fo=t("p"),Jd=r(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ha=t("code"),Xd=r("prepare_for_model"),Yd=r(" or "),ua=t("code"),Zd=r("encode_plus"),el=r(" methods."),ol=l(),Qe=t("div"),f(lt.$$.fragment),tl=l(),qn=t("p"),nl=r("Create the token type IDs corresponding to the sequences passed. "),En=t("a"),al=r("What are token type IDs?"),sl=l(),ma=t("p"),rl=r("Should be overridden in a subclass if the model has a special way of building those."),dl=l(),fa=t("div"),wr=l(),_o=t("h2"),Mo=t("a"),_a=t("span"),f(it.$$.fragment),ll=l(),ga=t("span"),il=r("BlenderbotSmallTokenizerFast"),Sr=l(),Ue=t("div"),f(ct.$$.fragment),cl=l(),pt=t("p"),pl=r("Construct a \u201Cfast\u201D BlenderbotSmall tokenizer (backed by HuggingFace\u2019s "),ba=t("em"),hl=r("tokenizers"),ul=r(" library)."),ml=l(),Co=t("div"),f(ht.$$.fragment),fl=l(),ka=t("p"),_l=r(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall does not make use of token type ids, therefore a list of zeros is returned.`),xr=l(),go=t("h2"),Po=t("a"),va=t("span"),f(ut.$$.fragment),gl=l(),ya=t("span"),bl=r("BlenderbotSmallModel"),Br=l(),ze=t("div"),f(mt.$$.fragment),kl=l(),ft=t("p"),vl=r(`The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top. This model inherits from `),$n=t("a"),yl=r("PreTrainedModel"),Tl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wl=l(),_t=t("p"),Sl=r("This model is also a PyTorch "),gt=t("a"),xl=r("torch.nn.Module"),Bl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zl=l(),be=t("div"),f(bt.$$.fragment),Fl=l(),bo=t("p"),ql=r("The "),Mn=t("a"),El=r("BlenderbotSmallModel"),$l=r(" forward method, overrides the "),Ta=t("code"),Ml=r("__call__"),Cl=r(" special method."),Pl=l(),f(jo.$$.fragment),jl=l(),wa=t("p"),Ol=r("Example:"),Ll=l(),f(kt.$$.fragment),zr=l(),ko=t("h2"),Oo=t("a"),Sa=t("span"),f(vt.$$.fragment),Nl=l(),xa=t("span"),Al=r("BlenderbotSmallForConditionalGeneration"),Fr=l(),Fe=t("div"),f(yt.$$.fragment),Il=l(),Tt=t("p"),Dl=r(`The BlenderbotSmall Model with a language modeling head. Can be used for summarization. This model inherits from `),Cn=t("a"),Gl=r("PreTrainedModel"),Ul=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wl=l(),wt=t("p"),Rl=r("This model is also a PyTorch "),St=t("a"),Kl=r("torch.nn.Module"),Hl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ql=l(),te=t("div"),f(xt.$$.fragment),Vl=l(),vo=t("p"),Jl=r("The "),Pn=t("a"),Xl=r("BlenderbotSmallForConditionalGeneration"),Yl=r(" forward method, overrides the "),Ba=t("code"),Zl=r("__call__"),ei=r(" special method."),oi=l(),f(Lo.$$.fragment),ti=l(),za=t("p"),ni=r("Conversation example::"),ai=l(),Fa=t("blockquote"),qa=t("blockquote"),Ea=t("blockquote"),$a=t("p"),si=r(`from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForConditionalGeneration mname = \u2018facebook/blenderbot_small-90M\u2019 model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname) UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018pt\u2019) reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) what kind of carbs do they eat? i don\u2019t know much about carbs.`),ri=l(),Ma=t("blockquote"),Ca=t("blockquote"),Pa=t("blockquote"),Bt=t("p"),di=r(`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> \u201D \u2026 \u201D`),ja=t("s"),li=r("what kind of carbs do they eat? i don\u2019t know much about carbs."),ii=r(` \u201D \u2026 \u201D<s>I\u2019m not sure.\u201D \u2026 ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018pt\u2019) inputs.pop(\u201Ctoken_type_ids\u201D) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),qr=l(),yo=t("h2"),No=t("a"),Oa=t("span"),f(zt.$$.fragment),ci=l(),La=t("span"),pi=r("BlenderbotSmallForCausalLM"),Er=l(),Ft=t("div"),Ve=t("div"),f(qt.$$.fragment),hi=l(),Na=t("p"),ui=r("Example:"),mi=l(),f(Et.$$.fragment),$r=l(),To=t("h2"),Ao=t("a"),Aa=t("span"),f($t.$$.fragment),fi=l(),Ia=t("span"),_i=r("TFBlenderbotSmallModel"),Mr=l(),ue=t("div"),f(Mt.$$.fragment),gi=l(),Ct=t("p"),bi=r(`The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top. This model inherits from `),jn=t("a"),ki=r("TFPreTrainedModel"),vi=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yi=l(),Pt=t("p"),Ti=r("This model is also a "),jt=t("a"),wi=r("tf.keras.Model"),Si=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),xi=l(),f(Io.$$.fragment),Bi=l(),ke=t("div"),f(Ot.$$.fragment),zi=l(),wo=t("p"),Fi=r("The "),On=t("a"),qi=r("TFBlenderbotSmallModel"),Ei=r(" forward method, overrides the "),Da=t("code"),$i=r("__call__"),Mi=r(" special method."),Ci=l(),f(Do.$$.fragment),Pi=l(),Ga=t("p"),ji=r("Example:"),Oi=l(),f(Lt.$$.fragment),Cr=l(),So=t("h2"),Go=t("a"),Ua=t("span"),f(Nt.$$.fragment),Li=l(),Wa=t("span"),Ni=r("TFBlenderbotSmallForConditionalGeneration"),Pr=l(),me=t("div"),f(At.$$.fragment),Ai=l(),It=t("p"),Ii=r(`The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization. This model inherits from `),Ln=t("a"),Di=r("TFPreTrainedModel"),Gi=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ui=l(),Dt=t("p"),Wi=r("This model is also a "),Gt=t("a"),Ri=r("tf.keras.Model"),Ki=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hi=l(),f(Uo.$$.fragment),Qi=l(),j=t("div"),f(Ut.$$.fragment),Vi=l(),xo=t("p"),Ji=r("The "),Nn=t("a"),Xi=r("TFBlenderbotSmallForConditionalGeneration"),Yi=r(" forward method, overrides the "),Ra=t("code"),Zi=r("__call__"),ec=r(" special method."),oc=l(),f(Wo.$$.fragment),tc=l(),Ka=t("p"),nc=r("Conversation example::"),ac=l(),Ha=t("blockquote"),Qa=t("blockquote"),Va=t("blockquote"),Ja=t("p"),sc=r(`from transformers import BlenderbotSmallTokenizer, TFBlenderbotSmallForConditionalGeneration mname = \u2018facebook/blenderbot_small-90M\u2019 model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)`),rc=l(),Xa=t("blockquote"),Ya=t("blockquote"),Za=t("blockquote"),es=t("p"),dc=r(`UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018tf\u2019)`),lc=l(),os=t("blockquote"),ts=t("blockquote"),ns=t("blockquote"),as=t("p"),ic=r(`reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) what kind of carbs do they eat? i don\u2019t know much about carbs.`),cc=l(),ss=t("blockquote"),rs=t("blockquote"),ds=t("blockquote"),Wt=t("p"),pc=r(`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> \u201D \u2026 \u201D`),ls=t("s"),hc=r("what kind of carbs do they eat? i don\u2019t know much about carbs."),uc=r(` \u201D \u2026 \u201D<s>I\u2019m not sure.\u201D \u2026 )`),mc=l(),is=t("blockquote"),cs=t("blockquote"),ps=t("blockquote"),hs=t("p"),fc=r(`inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018tf\u2019) inputs.pop(\u201Ctoken_type_ids\u201D) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),jr=l(),Bo=t("h2"),Ro=t("a"),us=t("span"),f(Rt.$$.fragment),_c=l(),ms=t("span"),gc=r("FlaxBlenderbotSmallModel"),Or=l(),N=t("div"),f(Kt.$$.fragment),bc=l(),Ht=t("p"),kc=r(`The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),An=t("a"),vc=r("FlaxPreTrainedModel"),yc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tc=l(),Qt=t("p"),wc=r("This model is also a Flax Linen "),Vt=t("a"),Sc=r("flax.nn.Module"),xc=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Bc=l(),fs=t("p"),zc=r("Finally, this model supports inherent JAX features such as:"),Fc=l(),We=t("ul"),_s=t("li"),Jt=t("a"),qc=r("Just-In-Time (JIT) compilation"),Ec=l(),gs=t("li"),Xt=t("a"),$c=r("Automatic Differentiation"),Mc=l(),bs=t("li"),Yt=t("a"),Cc=r("Vectorization"),Pc=l(),ks=t("li"),Zt=t("a"),jc=r("Parallelization"),Oc=l(),Je=t("div"),f(en.$$.fragment),Lc=l(),vs=t("p"),Nc=r("Example:"),Ac=l(),f(on.$$.fragment),Ic=l(),Xe=t("div"),f(tn.$$.fragment),Dc=l(),ys=t("p"),Gc=r("Example:"),Uc=l(),f(nn.$$.fragment),Wc=l(),Ye=t("div"),f(an.$$.fragment),Rc=l(),Ts=t("p"),Kc=r("Example:"),Hc=l(),f(sn.$$.fragment),Lr=l(),zo=t("h2"),Ko=t("a"),ws=t("span"),f(rn.$$.fragment),Qc=l(),Ss=t("span"),Vc=r("FlaxBlenderbotForConditionalGeneration"),Nr=l(),A=t("div"),f(dn.$$.fragment),Jc=l(),ln=t("p"),Xc=r(`The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization. This model inherits from `),In=t("a"),Yc=r("FlaxPreTrainedModel"),Zc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ep=l(),cn=t("p"),op=r("This model is also a Flax Linen "),pn=t("a"),tp=r("flax.nn.Module"),np=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ap=l(),xs=t("p"),sp=r("Finally, this model supports inherent JAX features such as:"),rp=l(),Re=t("ul"),Bs=t("li"),hn=t("a"),dp=r("Just-In-Time (JIT) compilation"),lp=l(),zs=t("li"),un=t("a"),ip=r("Automatic Differentiation"),cp=l(),Fs=t("li"),mn=t("a"),pp=r("Vectorization"),hp=l(),qs=t("li"),fn=t("a"),up=r("Parallelization"),mp=l(),E=t("div"),f(_n.$$.fragment),fp=l(),Fo=t("p"),_p=r("The "),Es=t("code"),gp=r("FlaxBlenderbotSmallPreTrainedModel"),bp=r(" forward method, overrides the "),$s=t("code"),kp=r("__call__"),vp=r(" special method."),yp=l(),f(Ho.$$.fragment),Tp=l(),Ms=t("p"),wp=r("Summarization example::"),Sp=l(),Cs=t("blockquote"),Ps=t("blockquote"),js=t("blockquote"),Os=t("p"),xp=r("from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration"),Bp=l(),Ls=t("blockquote"),Ns=t("blockquote"),As=t("blockquote"),Is=t("p"),zp=r(`model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019) tokenizer = BlenderbotSmallTokenizer.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019)`),Fp=l(),Ds=t("blockquote"),Gs=t("blockquote"),Us=t("blockquote"),Ws=t("p"),qp=r(`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018np\u2019)`),Ep=l(),Rs=t("blockquote"),Ks=t("blockquote"),gn=t("blockquote"),Qo=t("h1"),Vo=t("a"),Hs=t("span"),f(bn.$$.fragment),$p=l(),Qs=t("span"),Mp=r("Generate Summary"),Cp=l(),Vs=t("p"),Pp=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019]).sequences print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))`),jp=l(),Js=t("p"),Op=r("Mask filling example::"),Lp=l(),Xs=t("blockquote"),Ys=t("blockquote"),Zs=t("blockquote"),er=t("p"),Np=r(`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration tokenizer = BlenderbotSmallTokenizer.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),Ap=l(),or=t("blockquote"),tr=t("blockquote"),nr=t("blockquote"),ar=t("p"),Ip=r(`model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018np\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),Dp=l(),sr=t("blockquote"),rr=t("blockquote"),dr=t("blockquote"),lr=t("p"),Gp=r(`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = jax.nn.softmax(logits[0, masked_index], axis=0) values, predictions = jax.lax.top_k(probs)`),Up=l(),ir=t("blockquote"),cr=t("blockquote"),pr=t("blockquote"),hr=t("p"),Wp=r("tokenizer.decode(predictions).split()"),Rp=l(),Ze=t("div"),f(kn.$$.fragment),Kp=l(),ur=t("p"),Hp=r("Example:"),Qp=l(),f(vn.$$.fragment),Vp=l(),eo=t("div"),f(yn.$$.fragment),Jp=l(),mr=t("p"),Xp=r("Example:"),Yp=l(),f(Tn.$$.fragment),this.h()},l(s){const h=mf('[data-svelte="svelte-1phssyn"]',document.head);u=n(h,"META",{name:!0,content:!0}),h.forEach(o),z=i(s),y=n(s,"H1",{class:!0});var wn=a(y);T=n(wn,"A",{id:!0,class:!0,href:!0});var fr=a(T);x=n(fr,"SPAN",{});var _r=a(x);_(S.$$.fragment,_r),_r.forEach(o),fr.forEach(o),w=i(wn),F=n(wn,"SPAN",{});var gr=a(F);Pe=d(gr,"Blenderbot Small"),gr.forEach(o),wn.forEach(o),fe=i(s),B=n(s,"P",{});var ve=a(B);je=d(ve,"Note that "),U=n(ve,"A",{href:!0});var br=a(U);Oe=d(br,"BlenderbotSmallModel"),br.forEach(o),Le=d(ve,` and `),W=n(ve,"A",{href:!0});var kr=a(W);Ne=d(kr,"BlenderbotSmallForConditionalGeneration"),kr.forEach(o),Ae=d(ve,` are only used in combination with the checkpoint `),Q=n(ve,"A",{href:!0,rel:!0});var eh=a(Q);V=d(eh,"facebook/blenderbot-90M"),eh.forEach(o),Ie=d(ve,`. Larger Blenderbot checkpoints should instead be used with `),Z=n(ve,"A",{href:!0});var oh=a(Z);M=d(oh,"BlenderbotModel"),oh.forEach(o),O=d(ve,` and `),_e=n(ve,"A",{href:!0});var th=a(_e);se=d(th,"BlenderbotForConditionalGeneration"),th.forEach(o),ve.forEach(o),Ee=i(s),J=n(s,"H2",{class:!0});var Ir=a(J);I=n(Ir,"A",{id:!0,class:!0,href:!0});var nh=a(I);Te=n(nh,"SPAN",{});var ah=a(Te);_(re.$$.fragment,ah),ah.forEach(o),nh.forEach(o),L=i(Ir),we=n(Ir,"SPAN",{});var sh=a(we);de=d(sh,"Overview"),sh.forEach(o),Ir.forEach(o),$e=i(s),ee=n(s,"P",{});var Dr=a(ee);le=d(Dr,"The Blender chatbot model was proposed in "),ie=n(Dr,"A",{href:!0,rel:!0});var rh=a(ie);De=d(rh,"Recipes for building an open-domain chatbot"),rh.forEach(o),X=d(Dr,` Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.`),Dr.forEach(o),Me=i(s),R=n(s,"P",{});var dh=a(R);Ge=d(dh,"The abstract of the paper is the following:"),dh.forEach(o),m=i(s),q=n(s,"P",{});var lh=a(q);ce=n(lh,"EM",{});var ih=a(ce);ro=d(ih,`Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.`),ih.forEach(o),lh.forEach(o),Ke=i(s),C=n(s,"P",{});var Dn=a(C);lo=d(Dn,"This model was contributed by "),Se=n(Dn,"A",{href:!0,rel:!0});var ch=a(Se);io=d(ch,"patrickvonplaten"),ch.forEach(o),Y=d(Dn,`. The authors\u2019 code can be found `),D=n(Dn,"A",{href:!0,rel:!0});var ph=a(D);co=d(ph,"here"),ph.forEach(o),po=d(Dn," ."),Dn.forEach(o),oe=i(s),pe=n(s,"H2",{class:!0});var Gr=a(pe);ge=n(Gr,"A",{id:!0,class:!0,href:!0});var hh=a(ge);xe=n(hh,"SPAN",{});var uh=a(xe);_(Be.$$.fragment,uh),uh.forEach(o),hh.forEach(o),bd=i(Gr),sa=n(Gr,"SPAN",{});var mh=a(sa);kd=d(mh,"BlenderbotSmallConfig"),mh.forEach(o),Gr.forEach(o),vr=i(s),he=n(s,"DIV",{class:!0});var oo=a(he);_(et.$$.fragment,oo),vd=i(oo),ho=n(oo,"P",{});var Gn=a(ho);yd=d(Gn,"This is the configuration class to store the configuration of a "),xn=n(Gn,"A",{href:!0});var fh=a(xn);Td=d(fh,"BlenderbotSmallModel"),fh.forEach(o),wd=d(Gn,`. It is used to instantiate an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall `),ot=n(Gn,"A",{href:!0,rel:!0});var _h=a(ot);Sd=d(_h,"facebook/blenderbot_small-90M"),_h.forEach(o),xd=d(Gn," architecture."),Gn.forEach(o),Bd=i(oo),uo=n(oo,"P",{});var Un=a(uo);zd=d(Un,"Configuration objects inherit from "),Bn=n(Un,"A",{href:!0});var gh=a(Bn);Fd=d(gh,"PretrainedConfig"),gh.forEach(o),qd=d(Un,` and can be used to control the model outputs. Read the documentation from `),zn=n(Un,"A",{href:!0});var bh=a(zn);Ed=d(bh,"PretrainedConfig"),bh.forEach(o),$d=d(Un," for more information."),Un.forEach(o),Md=i(oo),ra=n(oo,"P",{});var kh=a(ra);Cd=d(kh,"Example:"),kh.forEach(o),Pd=i(oo),_(tt.$$.fragment,oo),oo.forEach(o),yr=i(s),mo=n(s,"H2",{class:!0});var Ur=a(mo);Eo=n(Ur,"A",{id:!0,class:!0,href:!0});var vh=a(Eo);da=n(vh,"SPAN",{});var yh=a(da);_(nt.$$.fragment,yh),yh.forEach(o),vh.forEach(o),jd=i(Ur),la=n(Ur,"SPAN",{});var Th=a(la);Od=d(Th,"BlenderbotSmallTokenizer"),Th.forEach(o),Ur.forEach(o),Tr=i(s),G=n(s,"DIV",{class:!0});var ye=a(G);_(at.$$.fragment,ye),Ld=i(ye),ia=n(ye,"P",{});var wh=a(ia);Nd=d(wh,"Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)"),wh.forEach(o),Ad=i(ye),st=n(ye,"P",{});var Wr=a(st);Id=d(Wr,"This tokenizer inherits from "),Fn=n(Wr,"A",{href:!0});var Sh=a(Fn);Dd=d(Sh,"PreTrainedTokenizer"),Sh.forEach(o),Gd=d(Wr,` which contains most of the main methods. Users should refer to the superclass for more information regarding methods.`),Wr.forEach(o),Ud=i(ye),He=n(ye,"DIV",{class:!0});var Wn=a(He);_(rt.$$.fragment,Wn),Wd=i(Wn),ca=n(Wn,"P",{});var xh=a(ca);Rd=d(xh,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.`),xh.forEach(o),Kd=i(Wn),pa=n(Wn,"P",{});var Bh=a(pa);Hd=d(Bh,"This implementation does not add special tokens and this method should be overridden in a subclass."),Bh.forEach(o),Wn.forEach(o),Qd=i(ye),$o=n(ye,"DIV",{class:!0});var Rr=a($o);_(dt.$$.fragment,Rr),Vd=i(Rr),fo=n(Rr,"P",{});var Rn=a(fo);Jd=d(Rn,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ha=n(Rn,"CODE",{});var zh=a(ha);Xd=d(zh,"prepare_for_model"),zh.forEach(o),Yd=d(Rn," or "),ua=n(Rn,"CODE",{});var Fh=a(ua);Zd=d(Fh,"encode_plus"),Fh.forEach(o),el=d(Rn," methods."),Rn.forEach(o),Rr.forEach(o),ol=i(ye),Qe=n(ye,"DIV",{class:!0});var Kn=a(Qe);_(lt.$$.fragment,Kn),tl=i(Kn),qn=n(Kn,"P",{});var Zp=a(qn);nl=d(Zp,"Create the token type IDs corresponding to the sequences passed. "),En=n(Zp,"A",{href:!0});var qh=a(En);al=d(qh,"What are token type IDs?"),qh.forEach(o),Zp.forEach(o),sl=i(Kn),ma=n(Kn,"P",{});var Eh=a(ma);rl=d(Eh,"Should be overridden in a subclass if the model has a special way of building those."),Eh.forEach(o),Kn.forEach(o),dl=i(ye),fa=n(ye,"DIV",{class:!0}),a(fa).forEach(o),ye.forEach(o),wr=i(s),_o=n(s,"H2",{class:!0});var Kr=a(_o);Mo=n(Kr,"A",{id:!0,class:!0,href:!0});var $h=a(Mo);_a=n($h,"SPAN",{});var Mh=a(_a);_(it.$$.fragment,Mh),Mh.forEach(o),$h.forEach(o),ll=i(Kr),ga=n(Kr,"SPAN",{});var Ch=a(ga);il=d(Ch,"BlenderbotSmallTokenizerFast"),Ch.forEach(o),Kr.forEach(o),Sr=i(s),Ue=n(s,"DIV",{class:!0});var Hn=a(Ue);_(ct.$$.fragment,Hn),cl=i(Hn),pt=n(Hn,"P",{});var Hr=a(pt);pl=d(Hr,"Construct a \u201Cfast\u201D BlenderbotSmall tokenizer (backed by HuggingFace\u2019s "),ba=n(Hr,"EM",{});var Ph=a(ba);hl=d(Ph,"tokenizers"),Ph.forEach(o),ul=d(Hr," library)."),Hr.forEach(o),ml=i(Hn),Co=n(Hn,"DIV",{class:!0});var Qr=a(Co);_(ht.$$.fragment,Qr),fl=i(Qr),ka=n(Qr,"P",{});var jh=a(ka);_l=d(jh,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall does not make use of token type ids, therefore a list of zeros is returned.`),jh.forEach(o),Qr.forEach(o),Hn.forEach(o),xr=i(s),go=n(s,"H2",{class:!0});var Vr=a(go);Po=n(Vr,"A",{id:!0,class:!0,href:!0});var Oh=a(Po);va=n(Oh,"SPAN",{});var Lh=a(va);_(ut.$$.fragment,Lh),Lh.forEach(o),Oh.forEach(o),gl=i(Vr),ya=n(Vr,"SPAN",{});var Nh=a(ya);bl=d(Nh,"BlenderbotSmallModel"),Nh.forEach(o),Vr.forEach(o),Br=i(s),ze=n(s,"DIV",{class:!0});var Jo=a(ze);_(mt.$$.fragment,Jo),kl=i(Jo),ft=n(Jo,"P",{});var Jr=a(ft);vl=d(Jr,`The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top. This model inherits from `),$n=n(Jr,"A",{href:!0});var Ah=a($n);yl=d(Ah,"PreTrainedModel"),Ah.forEach(o),Tl=d(Jr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jr.forEach(o),wl=i(Jo),_t=n(Jo,"P",{});var Xr=a(_t);Sl=d(Xr,"This model is also a PyTorch "),gt=n(Xr,"A",{href:!0,rel:!0});var Ih=a(gt);xl=d(Ih,"torch.nn.Module"),Ih.forEach(o),Bl=d(Xr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xr.forEach(o),zl=i(Jo),be=n(Jo,"DIV",{class:!0});var to=a(be);_(bt.$$.fragment,to),Fl=i(to),bo=n(to,"P",{});var Qn=a(bo);ql=d(Qn,"The "),Mn=n(Qn,"A",{href:!0});var Dh=a(Mn);El=d(Dh,"BlenderbotSmallModel"),Dh.forEach(o),$l=d(Qn," forward method, overrides the "),Ta=n(Qn,"CODE",{});var Gh=a(Ta);Ml=d(Gh,"__call__"),Gh.forEach(o),Cl=d(Qn," special method."),Qn.forEach(o),Pl=i(to),_(jo.$$.fragment,to),jl=i(to),wa=n(to,"P",{});var Uh=a(wa);Ol=d(Uh,"Example:"),Uh.forEach(o),Ll=i(to),_(kt.$$.fragment,to),to.forEach(o),Jo.forEach(o),zr=i(s),ko=n(s,"H2",{class:!0});var Yr=a(ko);Oo=n(Yr,"A",{id:!0,class:!0,href:!0});var Wh=a(Oo);Sa=n(Wh,"SPAN",{});var Rh=a(Sa);_(vt.$$.fragment,Rh),Rh.forEach(o),Wh.forEach(o),Nl=i(Yr),xa=n(Yr,"SPAN",{});var Kh=a(xa);Al=d(Kh,"BlenderbotSmallForConditionalGeneration"),Kh.forEach(o),Yr.forEach(o),Fr=i(s),Fe=n(s,"DIV",{class:!0});var Xo=a(Fe);_(yt.$$.fragment,Xo),Il=i(Xo),Tt=n(Xo,"P",{});var Zr=a(Tt);Dl=d(Zr,`The BlenderbotSmall Model with a language modeling head. Can be used for summarization. This model inherits from `),Cn=n(Zr,"A",{href:!0});var Hh=a(Cn);Gl=d(Hh,"PreTrainedModel"),Hh.forEach(o),Ul=d(Zr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zr.forEach(o),Wl=i(Xo),wt=n(Xo,"P",{});var ed=a(wt);Rl=d(ed,"This model is also a PyTorch "),St=n(ed,"A",{href:!0,rel:!0});var Qh=a(St);Kl=d(Qh,"torch.nn.Module"),Qh.forEach(o),Hl=d(ed,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ed.forEach(o),Ql=i(Xo),te=n(Xo,"DIV",{class:!0});var Ce=a(te);_(xt.$$.fragment,Ce),Vl=i(Ce),vo=n(Ce,"P",{});var Vn=a(vo);Jl=d(Vn,"The "),Pn=n(Vn,"A",{href:!0});var Vh=a(Pn);Xl=d(Vh,"BlenderbotSmallForConditionalGeneration"),Vh.forEach(o),Yl=d(Vn," forward method, overrides the "),Ba=n(Vn,"CODE",{});var Jh=a(Ba);Zl=d(Jh,"__call__"),Jh.forEach(o),ei=d(Vn," special method."),Vn.forEach(o),oi=i(Ce),_(Lo.$$.fragment,Ce),ti=i(Ce),za=n(Ce,"P",{});var Xh=a(za);ni=d(Xh,"Conversation example::"),Xh.forEach(o),ai=i(Ce),Fa=n(Ce,"BLOCKQUOTE",{});var Yh=a(Fa);qa=n(Yh,"BLOCKQUOTE",{});var Zh=a(qa);Ea=n(Zh,"BLOCKQUOTE",{});var eu=a(Ea);$a=n(eu,"P",{});var ou=a($a);si=d(ou,`from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForConditionalGeneration mname = \u2018facebook/blenderbot_small-90M\u2019 model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname) UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018pt\u2019) reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) what kind of carbs do they eat? i don\u2019t know much about carbs.`),ou.forEach(o),eu.forEach(o),Zh.forEach(o),Yh.forEach(o),ri=i(Ce),Ma=n(Ce,"BLOCKQUOTE",{});var tu=a(Ma);Ca=n(tu,"BLOCKQUOTE",{});var nu=a(Ca);Pa=n(nu,"BLOCKQUOTE",{});var au=a(Pa);Bt=n(au,"P",{});var od=a(Bt);di=d(od,`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> \u201D \u2026 \u201D`),ja=n(od,"S",{});var su=a(ja);li=d(su,"what kind of carbs do they eat? i don\u2019t know much about carbs."),su.forEach(o),ii=d(od,` \u201D \u2026 \u201D<s>I\u2019m not sure.\u201D \u2026 ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018pt\u2019) inputs.pop(\u201Ctoken_type_ids\u201D) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),od.forEach(o),au.forEach(o),nu.forEach(o),tu.forEach(o),Ce.forEach(o),Xo.forEach(o),qr=i(s),yo=n(s,"H2",{class:!0});var td=a(yo);No=n(td,"A",{id:!0,class:!0,href:!0});var ru=a(No);Oa=n(ru,"SPAN",{});var du=a(Oa);_(zt.$$.fragment,du),du.forEach(o),ru.forEach(o),ci=i(td),La=n(td,"SPAN",{});var lu=a(La);pi=d(lu,"BlenderbotSmallForCausalLM"),lu.forEach(o),td.forEach(o),Er=i(s),Ft=n(s,"DIV",{class:!0});var iu=a(Ft);Ve=n(iu,"DIV",{class:!0});var Jn=a(Ve);_(qt.$$.fragment,Jn),hi=i(Jn),Na=n(Jn,"P",{});var cu=a(Na);ui=d(cu,"Example:"),cu.forEach(o),mi=i(Jn),_(Et.$$.fragment,Jn),Jn.forEach(o),iu.forEach(o),$r=i(s),To=n(s,"H2",{class:!0});var nd=a(To);Ao=n(nd,"A",{id:!0,class:!0,href:!0});var pu=a(Ao);Aa=n(pu,"SPAN",{});var hu=a(Aa);_($t.$$.fragment,hu),hu.forEach(o),pu.forEach(o),fi=i(nd),Ia=n(nd,"SPAN",{});var uu=a(Ia);_i=d(uu,"TFBlenderbotSmallModel"),uu.forEach(o),nd.forEach(o),Mr=i(s),ue=n(s,"DIV",{class:!0});var no=a(ue);_(Mt.$$.fragment,no),gi=i(no),Ct=n(no,"P",{});var ad=a(Ct);bi=d(ad,`The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top. This model inherits from `),jn=n(ad,"A",{href:!0});var mu=a(jn);ki=d(mu,"TFPreTrainedModel"),mu.forEach(o),vi=d(ad,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ad.forEach(o),yi=i(no),Pt=n(no,"P",{});var sd=a(Pt);Ti=d(sd,"This model is also a "),jt=n(sd,"A",{href:!0,rel:!0});var fu=a(jt);wi=d(fu,"tf.keras.Model"),fu.forEach(o),Si=d(sd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sd.forEach(o),xi=i(no),_(Io.$$.fragment,no),Bi=i(no),ke=n(no,"DIV",{class:!0});var ao=a(ke);_(Ot.$$.fragment,ao),zi=i(ao),wo=n(ao,"P",{});var Xn=a(wo);Fi=d(Xn,"The "),On=n(Xn,"A",{href:!0});var _u=a(On);qi=d(_u,"TFBlenderbotSmallModel"),_u.forEach(o),Ei=d(Xn," forward method, overrides the "),Da=n(Xn,"CODE",{});var gu=a(Da);$i=d(gu,"__call__"),gu.forEach(o),Mi=d(Xn," special method."),Xn.forEach(o),Ci=i(ao),_(Do.$$.fragment,ao),Pi=i(ao),Ga=n(ao,"P",{});var bu=a(Ga);ji=d(bu,"Example:"),bu.forEach(o),Oi=i(ao),_(Lt.$$.fragment,ao),ao.forEach(o),no.forEach(o),Cr=i(s),So=n(s,"H2",{class:!0});var rd=a(So);Go=n(rd,"A",{id:!0,class:!0,href:!0});var ku=a(Go);Ua=n(ku,"SPAN",{});var vu=a(Ua);_(Nt.$$.fragment,vu),vu.forEach(o),ku.forEach(o),Li=i(rd),Wa=n(rd,"SPAN",{});var yu=a(Wa);Ni=d(yu,"TFBlenderbotSmallForConditionalGeneration"),yu.forEach(o),rd.forEach(o),Pr=i(s),me=n(s,"DIV",{class:!0});var so=a(me);_(At.$$.fragment,so),Ai=i(so),It=n(so,"P",{});var dd=a(It);Ii=d(dd,`The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization. This model inherits from `),Ln=n(dd,"A",{href:!0});var Tu=a(Ln);Di=d(Tu,"TFPreTrainedModel"),Tu.forEach(o),Gi=d(dd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dd.forEach(o),Ui=i(so),Dt=n(so,"P",{});var ld=a(Dt);Wi=d(ld,"This model is also a "),Gt=n(ld,"A",{href:!0,rel:!0});var wu=a(Gt);Ri=d(wu,"tf.keras.Model"),wu.forEach(o),Ki=d(ld,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ld.forEach(o),Hi=i(so),_(Uo.$$.fragment,so),Qi=i(so),j=n(so,"DIV",{class:!0});var K=a(j);_(Ut.$$.fragment,K),Vi=i(K),xo=n(K,"P",{});var Yn=a(xo);Ji=d(Yn,"The "),Nn=n(Yn,"A",{href:!0});var Su=a(Nn);Xi=d(Su,"TFBlenderbotSmallForConditionalGeneration"),Su.forEach(o),Yi=d(Yn," forward method, overrides the "),Ra=n(Yn,"CODE",{});var xu=a(Ra);Zi=d(xu,"__call__"),xu.forEach(o),ec=d(Yn," special method."),Yn.forEach(o),oc=i(K),_(Wo.$$.fragment,K),tc=i(K),Ka=n(K,"P",{});var Bu=a(Ka);nc=d(Bu,"Conversation example::"),Bu.forEach(o),ac=i(K),Ha=n(K,"BLOCKQUOTE",{});var zu=a(Ha);Qa=n(zu,"BLOCKQUOTE",{});var Fu=a(Qa);Va=n(Fu,"BLOCKQUOTE",{});var qu=a(Va);Ja=n(qu,"P",{});var Eu=a(Ja);sc=d(Eu,`from transformers import BlenderbotSmallTokenizer, TFBlenderbotSmallForConditionalGeneration mname = \u2018facebook/blenderbot_small-90M\u2019 model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)`),Eu.forEach(o),qu.forEach(o),Fu.forEach(o),zu.forEach(o),rc=i(K),Xa=n(K,"BLOCKQUOTE",{});var $u=a(Xa);Ya=n($u,"BLOCKQUOTE",{});var Mu=a(Ya);Za=n(Mu,"BLOCKQUOTE",{});var Cu=a(Za);es=n(Cu,"P",{});var Pu=a(es);dc=d(Pu,`UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D print(\u201CHuman: \u201D, UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors=\u2018tf\u2019)`),Pu.forEach(o),Cu.forEach(o),Mu.forEach(o),$u.forEach(o),lc=i(K),os=n(K,"BLOCKQUOTE",{});var ju=a(os);ts=n(ju,"BLOCKQUOTE",{});var Ou=a(ts);ns=n(Ou,"BLOCKQUOTE",{});var Lu=a(ns);as=n(Lu,"P",{});var Nu=a(as);ic=d(Nu,`reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) what kind of carbs do they eat? i don\u2019t know much about carbs.`),Nu.forEach(o),Lu.forEach(o),Ou.forEach(o),ju.forEach(o),cc=i(K),ss=n(K,"BLOCKQUOTE",{});var Au=a(ss);rs=n(Au,"BLOCKQUOTE",{});var Iu=a(rs);ds=n(Iu,"BLOCKQUOTE",{});var Du=a(ds);Wt=n(Du,"P",{});var id=a(Wt);pc=d(id,`REPLY = \u201CI\u2019m not sure\u201D print(\u201CHuman: \u201D, REPLY) NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> \u201D \u2026 \u201D`),ls=n(id,"S",{});var Gu=a(ls);hc=d(Gu,"what kind of carbs do they eat? i don\u2019t know much about carbs."),Gu.forEach(o),uc=d(id,` \u201D \u2026 \u201D<s>I\u2019m not sure.\u201D \u2026 )`),id.forEach(o),Du.forEach(o),Iu.forEach(o),Au.forEach(o),mc=i(K),is=n(K,"BLOCKQUOTE",{});var Uu=a(is);cs=n(Uu,"BLOCKQUOTE",{});var Wu=a(cs);ps=n(Wu,"BLOCKQUOTE",{});var Ru=a(ps);hs=n(Ru,"P",{});var Ku=a(hs);fc=d(Ku,`inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018tf\u2019) inputs.pop(\u201Ctoken_type_ids\u201D) next_reply_ids = model.generate(**inputs) print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),Ku.forEach(o),Ru.forEach(o),Wu.forEach(o),Uu.forEach(o),K.forEach(o),so.forEach(o),jr=i(s),Bo=n(s,"H2",{class:!0});var cd=a(Bo);Ro=n(cd,"A",{id:!0,class:!0,href:!0});var Hu=a(Ro);us=n(Hu,"SPAN",{});var Qu=a(us);_(Rt.$$.fragment,Qu),Qu.forEach(o),Hu.forEach(o),_c=i(cd),ms=n(cd,"SPAN",{});var Vu=a(ms);gc=d(Vu,"FlaxBlenderbotSmallModel"),Vu.forEach(o),cd.forEach(o),Or=i(s),N=n(s,"DIV",{class:!0});var ne=a(N);_(Kt.$$.fragment,ne),bc=i(ne),Ht=n(ne,"P",{});var pd=a(Ht);kc=d(pd,`The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),An=n(pd,"A",{href:!0});var Ju=a(An);vc=d(Ju,"FlaxPreTrainedModel"),Ju.forEach(o),yc=d(pd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pd.forEach(o),Tc=i(ne),Qt=n(ne,"P",{});var hd=a(Qt);wc=d(hd,"This model is also a Flax Linen "),Vt=n(hd,"A",{href:!0,rel:!0});var Xu=a(Vt);Sc=d(Xu,"flax.nn.Module"),Xu.forEach(o),xc=d(hd,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),hd.forEach(o),Bc=i(ne),fs=n(ne,"P",{});var Yu=a(fs);zc=d(Yu,"Finally, this model supports inherent JAX features such as:"),Yu.forEach(o),Fc=i(ne),We=n(ne,"UL",{});var Yo=a(We);_s=n(Yo,"LI",{});var Zu=a(_s);Jt=n(Zu,"A",{href:!0,rel:!0});var em=a(Jt);qc=d(em,"Just-In-Time (JIT) compilation"),em.forEach(o),Zu.forEach(o),Ec=i(Yo),gs=n(Yo,"LI",{});var om=a(gs);Xt=n(om,"A",{href:!0,rel:!0});var tm=a(Xt);$c=d(tm,"Automatic Differentiation"),tm.forEach(o),om.forEach(o),Mc=i(Yo),bs=n(Yo,"LI",{});var nm=a(bs);Yt=n(nm,"A",{href:!0,rel:!0});var am=a(Yt);Cc=d(am,"Vectorization"),am.forEach(o),nm.forEach(o),Pc=i(Yo),ks=n(Yo,"LI",{});var sm=a(ks);Zt=n(sm,"A",{href:!0,rel:!0});var rm=a(Zt);jc=d(rm,"Parallelization"),rm.forEach(o),sm.forEach(o),Yo.forEach(o),Oc=i(ne),Je=n(ne,"DIV",{class:!0});var Zn=a(Je);_(en.$$.fragment,Zn),Lc=i(Zn),vs=n(Zn,"P",{});var dm=a(vs);Nc=d(dm,"Example:"),dm.forEach(o),Ac=i(Zn),_(on.$$.fragment,Zn),Zn.forEach(o),Ic=i(ne),Xe=n(ne,"DIV",{class:!0});var ea=a(Xe);_(tn.$$.fragment,ea),Dc=i(ea),ys=n(ea,"P",{});var lm=a(ys);Gc=d(lm,"Example:"),lm.forEach(o),Uc=i(ea),_(nn.$$.fragment,ea),ea.forEach(o),Wc=i(ne),Ye=n(ne,"DIV",{class:!0});var oa=a(Ye);_(an.$$.fragment,oa),Rc=i(oa),Ts=n(oa,"P",{});var im=a(Ts);Kc=d(im,"Example:"),im.forEach(o),Hc=i(oa),_(sn.$$.fragment,oa),oa.forEach(o),ne.forEach(o),Lr=i(s),zo=n(s,"H2",{class:!0});var ud=a(zo);Ko=n(ud,"A",{id:!0,class:!0,href:!0});var cm=a(Ko);ws=n(cm,"SPAN",{});var pm=a(ws);_(rn.$$.fragment,pm),pm.forEach(o),cm.forEach(o),Qc=i(ud),Ss=n(ud,"SPAN",{});var hm=a(Ss);Vc=d(hm,"FlaxBlenderbotForConditionalGeneration"),hm.forEach(o),ud.forEach(o),Nr=i(s),A=n(s,"DIV",{class:!0});var ae=a(A);_(dn.$$.fragment,ae),Jc=i(ae),ln=n(ae,"P",{});var md=a(ln);Xc=d(md,`The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization. This model inherits from `),In=n(md,"A",{href:!0});var um=a(In);Yc=d(um,"FlaxPreTrainedModel"),um.forEach(o),Zc=d(md,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),md.forEach(o),ep=i(ae),cn=n(ae,"P",{});var fd=a(cn);op=d(fd,"This model is also a Flax Linen "),pn=n(fd,"A",{href:!0,rel:!0});var mm=a(pn);tp=d(mm,"flax.nn.Module"),mm.forEach(o),np=d(fd,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),fd.forEach(o),ap=i(ae),xs=n(ae,"P",{});var fm=a(xs);sp=d(fm,"Finally, this model supports inherent JAX features such as:"),fm.forEach(o),rp=i(ae),Re=n(ae,"UL",{});var Zo=a(Re);Bs=n(Zo,"LI",{});var _m=a(Bs);hn=n(_m,"A",{href:!0,rel:!0});var gm=a(hn);dp=d(gm,"Just-In-Time (JIT) compilation"),gm.forEach(o),_m.forEach(o),lp=i(Zo),zs=n(Zo,"LI",{});var bm=a(zs);un=n(bm,"A",{href:!0,rel:!0});var km=a(un);ip=d(km,"Automatic Differentiation"),km.forEach(o),bm.forEach(o),cp=i(Zo),Fs=n(Zo,"LI",{});var vm=a(Fs);mn=n(vm,"A",{href:!0,rel:!0});var ym=a(mn);pp=d(ym,"Vectorization"),ym.forEach(o),vm.forEach(o),hp=i(Zo),qs=n(Zo,"LI",{});var Tm=a(qs);fn=n(Tm,"A",{href:!0,rel:!0});var wm=a(fn);up=d(wm,"Parallelization"),wm.forEach(o),Tm.forEach(o),Zo.forEach(o),mp=i(ae),E=n(ae,"DIV",{class:!0});var $=a(E);_(_n.$$.fragment,$),fp=i($),Fo=n($,"P",{});var ta=a(Fo);_p=d(ta,"The "),Es=n(ta,"CODE",{});var Sm=a(Es);gp=d(Sm,"FlaxBlenderbotSmallPreTrainedModel"),Sm.forEach(o),bp=d(ta," forward method, overrides the "),$s=n(ta,"CODE",{});var xm=a($s);kp=d(xm,"__call__"),xm.forEach(o),vp=d(ta," special method."),ta.forEach(o),yp=i($),_(Ho.$$.fragment,$),Tp=i($),Ms=n($,"P",{});var Bm=a(Ms);wp=d(Bm,"Summarization example::"),Bm.forEach(o),Sp=i($),Cs=n($,"BLOCKQUOTE",{});var zm=a(Cs);Ps=n(zm,"BLOCKQUOTE",{});var Fm=a(Ps);js=n(Fm,"BLOCKQUOTE",{});var qm=a(js);Os=n(qm,"P",{});var Em=a(Os);xp=d(Em,"from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration"),Em.forEach(o),qm.forEach(o),Fm.forEach(o),zm.forEach(o),Bp=i($),Ls=n($,"BLOCKQUOTE",{});var $m=a(Ls);Ns=n($m,"BLOCKQUOTE",{});var Mm=a(Ns);As=n(Mm,"BLOCKQUOTE",{});var Cm=a(As);Is=n(Cm,"P",{});var Pm=a(Is);zp=d(Pm,`model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019) tokenizer = BlenderbotSmallTokenizer.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019)`),Pm.forEach(o),Cm.forEach(o),Mm.forEach(o),$m.forEach(o),Fp=i($),Ds=n($,"BLOCKQUOTE",{});var jm=a(Ds);Gs=n(jm,"BLOCKQUOTE",{});var Om=a(Gs);Us=n(Om,"BLOCKQUOTE",{});var Lm=a(Us);Ws=n(Lm,"P",{});var Nm=a(Ws);qp=d(Nm,`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018np\u2019)`),Nm.forEach(o),Lm.forEach(o),Om.forEach(o),jm.forEach(o),Ep=i($),Rs=n($,"BLOCKQUOTE",{});var Am=a(Rs);Ks=n(Am,"BLOCKQUOTE",{});var Im=a(Ks);gn=n(Im,"BLOCKQUOTE",{});var _d=a(gn);Qo=n(_d,"H1",{class:!0});var gd=a(Qo);Vo=n(gd,"A",{id:!0,class:!0,href:!0});var Dm=a(Vo);Hs=n(Dm,"SPAN",{});var Gm=a(Hs);_(bn.$$.fragment,Gm),Gm.forEach(o),Dm.forEach(o),$p=i(gd),Qs=n(gd,"SPAN",{});var Um=a(Qs);Mp=d(Um,"Generate Summary"),Um.forEach(o),gd.forEach(o),Cp=i(_d),Vs=n(_d,"P",{});var Wm=a(Vs);Pp=d(Wm,`summary_ids = model.generate(inputs[\u2018input_ids\u2019]).sequences print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))`),Wm.forEach(o),_d.forEach(o),Im.forEach(o),Am.forEach(o),jp=i($),Js=n($,"P",{});var Rm=a(Js);Op=d(Rm,"Mask filling example::"),Rm.forEach(o),Lp=i($),Xs=n($,"BLOCKQUOTE",{});var Km=a(Xs);Ys=n(Km,"BLOCKQUOTE",{});var Hm=a(Ys);Zs=n(Hm,"BLOCKQUOTE",{});var Qm=a(Zs);er=n(Qm,"P",{});var Vm=a(er);Np=d(Vm,`from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration tokenizer = BlenderbotSmallTokenizer.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019) TXT = \u201CMy friends are <mask> but they eat too many carbs.\u201D`),Vm.forEach(o),Qm.forEach(o),Hm.forEach(o),Km.forEach(o),Ap=i($),or=n($,"BLOCKQUOTE",{});var Jm=a(or);tr=n(Jm,"BLOCKQUOTE",{});var Xm=a(tr);nr=n(Xm,"BLOCKQUOTE",{});var Ym=a(nr);ar=n(Ym,"P",{});var Zm=a(ar);Ip=d(Zm,`model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot_small-90M\u2019) input_ids = tokenizer([TXT], return_tensors=\u2018np\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),Zm.forEach(o),Ym.forEach(o),Xm.forEach(o),Jm.forEach(o),Dp=i($),sr=n($,"BLOCKQUOTE",{});var ef=a(sr);rr=n(ef,"BLOCKQUOTE",{});var of=a(rr);dr=n(of,"BLOCKQUOTE",{});var tf=a(dr);lr=n(tf,"P",{});var nf=a(lr);Gp=d(nf,`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = jax.nn.softmax(logits[0, masked_index], axis=0) values, predictions = jax.lax.top_k(probs)`),nf.forEach(o),tf.forEach(o),of.forEach(o),ef.forEach(o),Up=i($),ir=n($,"BLOCKQUOTE",{});var af=a(ir);cr=n(af,"BLOCKQUOTE",{});var sf=a(cr);pr=n(sf,"BLOCKQUOTE",{});var rf=a(pr);hr=n(rf,"P",{});var df=a(hr);Wp=d(df,"tokenizer.decode(predictions).split()"),df.forEach(o),rf.forEach(o),sf.forEach(o),af.forEach(o),$.forEach(o),Rp=i(ae),Ze=n(ae,"DIV",{class:!0});var na=a(Ze);_(kn.$$.fragment,na),Kp=i(na),ur=n(na,"P",{});var lf=a(ur);Hp=d(lf,"Example:"),lf.forEach(o),Qp=i(na),_(vn.$$.fragment,na),na.forEach(o),Vp=i(ae),eo=n(ae,"DIV",{class:!0});var aa=a(eo);_(yn.$$.fragment,aa),Jp=i(aa),mr=n(aa,"P",{});var cf=a(mr);Xp=d(cf,"Example:"),cf.forEach(o),Yp=i(aa),_(Tn.$$.fragment,aa),aa.forEach(o),ae.forEach(o),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(wf)),c(T,"id","blenderbot-small"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#blenderbot-small"),c(y,"class","relative group"),c(U,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallModel"),c(W,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallForConditionalGeneration"),c(Q,"href","https://huggingface.co/facebook/blenderbot-90M"),c(Q,"rel","nofollow"),c(Z,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotModel"),c(_e,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot#transformers.BlenderbotForConditionalGeneration"),c(I,"id","overview"),c(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(I,"href","#overview"),c(J,"class","relative group"),c(ie,"href","https://arxiv.org/pdf/2004.13637.pdf"),c(ie,"rel","nofollow"),c(Se,"href","https://huggingface.co/patrickvonplaten"),c(Se,"rel","nofollow"),c(D,"href","https://github.com/facebookresearch/ParlAI"),c(D,"rel","nofollow"),c(ge,"id","transformers.BlenderbotSmallConfig"),c(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ge,"href","#transformers.BlenderbotSmallConfig"),c(pe,"class","relative group"),c(xn,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallModel"),c(ot,"href","https://huggingface.co/facebook/blenderbot_small-90M"),c(ot,"rel","nofollow"),c(Bn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(zn,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(he,"class","docstring"),c(Eo,"id","transformers.BlenderbotSmallTokenizer"),c(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Eo,"href","#transformers.BlenderbotSmallTokenizer"),c(mo,"class","relative group"),c(Fn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(He,"class","docstring"),c($o,"class","docstring"),c(En,"href","../glossary#token-type-ids"),c(Qe,"class","docstring"),c(fa,"class","docstring"),c(G,"class","docstring"),c(Mo,"id","transformers.BlenderbotSmallTokenizerFast"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#transformers.BlenderbotSmallTokenizerFast"),c(_o,"class","relative group"),c(Co,"class","docstring"),c(Ue,"class","docstring"),c(Po,"id","transformers.BlenderbotSmallModel"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.BlenderbotSmallModel"),c(go,"class","relative group"),c($n,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(gt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(gt,"rel","nofollow"),c(Mn,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallModel"),c(be,"class","docstring"),c(ze,"class","docstring"),c(Oo,"id","transformers.BlenderbotSmallForConditionalGeneration"),c(Oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oo,"href","#transformers.BlenderbotSmallForConditionalGeneration"),c(ko,"class","relative group"),c(Cn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(St,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(St,"rel","nofollow"),c(Pn,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.BlenderbotSmallForConditionalGeneration"),c(te,"class","docstring"),c(Fe,"class","docstring"),c(No,"id","transformers.BlenderbotSmallForCausalLM"),c(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(No,"href","#transformers.BlenderbotSmallForCausalLM"),c(yo,"class","relative group"),c(Ve,"class","docstring"),c(Ft,"class","docstring"),c(Ao,"id","transformers.TFBlenderbotSmallModel"),c(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ao,"href","#transformers.TFBlenderbotSmallModel"),c(To,"class","relative group"),c(jn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(jt,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(jt,"rel","nofollow"),c(On,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallModel"),c(ke,"class","docstring"),c(ue,"class","docstring"),c(Go,"id","transformers.TFBlenderbotSmallForConditionalGeneration"),c(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Go,"href","#transformers.TFBlenderbotSmallForConditionalGeneration"),c(So,"class","relative group"),c(Ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Gt,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Gt,"rel","nofollow"),c(Nn,"href","/docs/transformers/v4.15.0/en/model_doc/blenderbot_small#transformers.TFBlenderbotSmallForConditionalGeneration"),c(j,"class","docstring"),c(me,"class","docstring"),c(Ro,"id","transformers.FlaxBlenderbotSmallModel"),c(Ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ro,"href","#transformers.FlaxBlenderbotSmallModel"),c(Bo,"class","relative group"),c(An,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Vt,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Vt,"rel","nofollow"),c(Jt,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Jt,"rel","nofollow"),c(Xt,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Xt,"rel","nofollow"),c(Yt,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Yt,"rel","nofollow"),c(Zt,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Zt,"rel","nofollow"),c(Je,"class","docstring"),c(Xe,"class","docstring"),c(Ye,"class","docstring"),c(N,"class","docstring"),c(Ko,"id","transformers.FlaxBlenderbotSmallForConditionalGeneration"),c(Ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ko,"href","#transformers.FlaxBlenderbotSmallForConditionalGeneration"),c(zo,"class","relative group"),c(In,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(pn,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(pn,"rel","nofollow"),c(hn,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(hn,"rel","nofollow"),c(un,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(un,"rel","nofollow"),c(mn,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(mn,"rel","nofollow"),c(fn,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(fn,"rel","nofollow"),c(Vo,"id","generate-summary"),c(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vo,"href","#generate-summary"),c(Qo,"class","relative group"),c(E,"class","docstring"),c(Ze,"class","docstring"),c(eo,"class","docstring"),c(A,"class","docstring")},m(s,h){e(document.head,u),p(s,z,h),p(s,y,h),e(y,T),e(T,x),g(S,x,null),e(y,w),e(y,F),e(F,Pe),p(s,fe,h),p(s,B,h),e(B,je),e(B,U),e(U,Oe),e(B,Le),e(B,W),e(W,Ne),e(B,Ae),e(B,Q),e(Q,V),e(B,Ie),e(B,Z),e(Z,M),e(B,O),e(B,_e),e(_e,se),p(s,Ee,h),p(s,J,h),e(J,I),e(I,Te),g(re,Te,null),e(J,L),e(J,we),e(we,de),p(s,$e,h),p(s,ee,h),e(ee,le),e(ee,ie),e(ie,De),e(ee,X),p(s,Me,h),p(s,R,h),e(R,Ge),p(s,m,h),p(s,q,h),e(q,ce),e(ce,ro),p(s,Ke,h),p(s,C,h),e(C,lo),e(C,Se),e(Se,io),e(C,Y),e(C,D),e(D,co),e(C,po),p(s,oe,h),p(s,pe,h),e(pe,ge),e(ge,xe),g(Be,xe,null),e(pe,bd),e(pe,sa),e(sa,kd),p(s,vr,h),p(s,he,h),g(et,he,null),e(he,vd),e(he,ho),e(ho,yd),e(ho,xn),e(xn,Td),e(ho,wd),e(ho,ot),e(ot,Sd),e(ho,xd),e(he,Bd),e(he,uo),e(uo,zd),e(uo,Bn),e(Bn,Fd),e(uo,qd),e(uo,zn),e(zn,Ed),e(uo,$d),e(he,Md),e(he,ra),e(ra,Cd),e(he,Pd),g(tt,he,null),p(s,yr,h),p(s,mo,h),e(mo,Eo),e(Eo,da),g(nt,da,null),e(mo,jd),e(mo,la),e(la,Od),p(s,Tr,h),p(s,G,h),g(at,G,null),e(G,Ld),e(G,ia),e(ia,Nd),e(G,Ad),e(G,st),e(st,Id),e(st,Fn),e(Fn,Dd),e(st,Gd),e(G,Ud),e(G,He),g(rt,He,null),e(He,Wd),e(He,ca),e(ca,Rd),e(He,Kd),e(He,pa),e(pa,Hd),e(G,Qd),e(G,$o),g(dt,$o,null),e($o,Vd),e($o,fo),e(fo,Jd),e(fo,ha),e(ha,Xd),e(fo,Yd),e(fo,ua),e(ua,Zd),e(fo,el),e(G,ol),e(G,Qe),g(lt,Qe,null),e(Qe,tl),e(Qe,qn),e(qn,nl),e(qn,En),e(En,al),e(Qe,sl),e(Qe,ma),e(ma,rl),e(G,dl),e(G,fa),p(s,wr,h),p(s,_o,h),e(_o,Mo),e(Mo,_a),g(it,_a,null),e(_o,ll),e(_o,ga),e(ga,il),p(s,Sr,h),p(s,Ue,h),g(ct,Ue,null),e(Ue,cl),e(Ue,pt),e(pt,pl),e(pt,ba),e(ba,hl),e(pt,ul),e(Ue,ml),e(Ue,Co),g(ht,Co,null),e(Co,fl),e(Co,ka),e(ka,_l),p(s,xr,h),p(s,go,h),e(go,Po),e(Po,va),g(ut,va,null),e(go,gl),e(go,ya),e(ya,bl),p(s,Br,h),p(s,ze,h),g(mt,ze,null),e(ze,kl),e(ze,ft),e(ft,vl),e(ft,$n),e($n,yl),e(ft,Tl),e(ze,wl),e(ze,_t),e(_t,Sl),e(_t,gt),e(gt,xl),e(_t,Bl),e(ze,zl),e(ze,be),g(bt,be,null),e(be,Fl),e(be,bo),e(bo,ql),e(bo,Mn),e(Mn,El),e(bo,$l),e(bo,Ta),e(Ta,Ml),e(bo,Cl),e(be,Pl),g(jo,be,null),e(be,jl),e(be,wa),e(wa,Ol),e(be,Ll),g(kt,be,null),p(s,zr,h),p(s,ko,h),e(ko,Oo),e(Oo,Sa),g(vt,Sa,null),e(ko,Nl),e(ko,xa),e(xa,Al),p(s,Fr,h),p(s,Fe,h),g(yt,Fe,null),e(Fe,Il),e(Fe,Tt),e(Tt,Dl),e(Tt,Cn),e(Cn,Gl),e(Tt,Ul),e(Fe,Wl),e(Fe,wt),e(wt,Rl),e(wt,St),e(St,Kl),e(wt,Hl),e(Fe,Ql),e(Fe,te),g(xt,te,null),e(te,Vl),e(te,vo),e(vo,Jl),e(vo,Pn),e(Pn,Xl),e(vo,Yl),e(vo,Ba),e(Ba,Zl),e(vo,ei),e(te,oi),g(Lo,te,null),e(te,ti),e(te,za),e(za,ni),e(te,ai),e(te,Fa),e(Fa,qa),e(qa,Ea),e(Ea,$a),e($a,si),e(te,ri),e(te,Ma),e(Ma,Ca),e(Ca,Pa),e(Pa,Bt),e(Bt,di),e(Bt,ja),e(ja,li),e(Bt,ii),p(s,qr,h),p(s,yo,h),e(yo,No),e(No,Oa),g(zt,Oa,null),e(yo,ci),e(yo,La),e(La,pi),p(s,Er,h),p(s,Ft,h),e(Ft,Ve),g(qt,Ve,null),e(Ve,hi),e(Ve,Na),e(Na,ui),e(Ve,mi),g(Et,Ve,null),p(s,$r,h),p(s,To,h),e(To,Ao),e(Ao,Aa),g($t,Aa,null),e(To,fi),e(To,Ia),e(Ia,_i),p(s,Mr,h),p(s,ue,h),g(Mt,ue,null),e(ue,gi),e(ue,Ct),e(Ct,bi),e(Ct,jn),e(jn,ki),e(Ct,vi),e(ue,yi),e(ue,Pt),e(Pt,Ti),e(Pt,jt),e(jt,wi),e(Pt,Si),e(ue,xi),g(Io,ue,null),e(ue,Bi),e(ue,ke),g(Ot,ke,null),e(ke,zi),e(ke,wo),e(wo,Fi),e(wo,On),e(On,qi),e(wo,Ei),e(wo,Da),e(Da,$i),e(wo,Mi),e(ke,Ci),g(Do,ke,null),e(ke,Pi),e(ke,Ga),e(Ga,ji),e(ke,Oi),g(Lt,ke,null),p(s,Cr,h),p(s,So,h),e(So,Go),e(Go,Ua),g(Nt,Ua,null),e(So,Li),e(So,Wa),e(Wa,Ni),p(s,Pr,h),p(s,me,h),g(At,me,null),e(me,Ai),e(me,It),e(It,Ii),e(It,Ln),e(Ln,Di),e(It,Gi),e(me,Ui),e(me,Dt),e(Dt,Wi),e(Dt,Gt),e(Gt,Ri),e(Dt,Ki),e(me,Hi),g(Uo,me,null),e(me,Qi),e(me,j),g(Ut,j,null),e(j,Vi),e(j,xo),e(xo,Ji),e(xo,Nn),e(Nn,Xi),e(xo,Yi),e(xo,Ra),e(Ra,Zi),e(xo,ec),e(j,oc),g(Wo,j,null),e(j,tc),e(j,Ka),e(Ka,nc),e(j,ac),e(j,Ha),e(Ha,Qa),e(Qa,Va),e(Va,Ja),e(Ja,sc),e(j,rc),e(j,Xa),e(Xa,Ya),e(Ya,Za),e(Za,es),e(es,dc),e(j,lc),e(j,os),e(os,ts),e(ts,ns),e(ns,as),e(as,ic),e(j,cc),e(j,ss),e(ss,rs),e(rs,ds),e(ds,Wt),e(Wt,pc),e(Wt,ls),e(ls,hc),e(Wt,uc),e(j,mc),e(j,is),e(is,cs),e(cs,ps),e(ps,hs),e(hs,fc),p(s,jr,h),p(s,Bo,h),e(Bo,Ro),e(Ro,us),g(Rt,us,null),e(Bo,_c),e(Bo,ms),e(ms,gc),p(s,Or,h),p(s,N,h),g(Kt,N,null),e(N,bc),e(N,Ht),e(Ht,kc),e(Ht,An),e(An,vc),e(Ht,yc),e(N,Tc),e(N,Qt),e(Qt,wc),e(Qt,Vt),e(Vt,Sc),e(Qt,xc),e(N,Bc),e(N,fs),e(fs,zc),e(N,Fc),e(N,We),e(We,_s),e(_s,Jt),e(Jt,qc),e(We,Ec),e(We,gs),e(gs,Xt),e(Xt,$c),e(We,Mc),e(We,bs),e(bs,Yt),e(Yt,Cc),e(We,Pc),e(We,ks),e(ks,Zt),e(Zt,jc),e(N,Oc),e(N,Je),g(en,Je,null),e(Je,Lc),e(Je,vs),e(vs,Nc),e(Je,Ac),g(on,Je,null),e(N,Ic),e(N,Xe),g(tn,Xe,null),e(Xe,Dc),e(Xe,ys),e(ys,Gc),e(Xe,Uc),g(nn,Xe,null),e(N,Wc),e(N,Ye),g(an,Ye,null),e(Ye,Rc),e(Ye,Ts),e(Ts,Kc),e(Ye,Hc),g(sn,Ye,null),p(s,Lr,h),p(s,zo,h),e(zo,Ko),e(Ko,ws),g(rn,ws,null),e(zo,Qc),e(zo,Ss),e(Ss,Vc),p(s,Nr,h),p(s,A,h),g(dn,A,null),e(A,Jc),e(A,ln),e(ln,Xc),e(ln,In),e(In,Yc),e(ln,Zc),e(A,ep),e(A,cn),e(cn,op),e(cn,pn),e(pn,tp),e(cn,np),e(A,ap),e(A,xs),e(xs,sp),e(A,rp),e(A,Re),e(Re,Bs),e(Bs,hn),e(hn,dp),e(Re,lp),e(Re,zs),e(zs,un),e(un,ip),e(Re,cp),e(Re,Fs),e(Fs,mn),e(mn,pp),e(Re,hp),e(Re,qs),e(qs,fn),e(fn,up),e(A,mp),e(A,E),g(_n,E,null),e(E,fp),e(E,Fo),e(Fo,_p),e(Fo,Es),e(Es,gp),e(Fo,bp),e(Fo,$s),e($s,kp),e(Fo,vp),e(E,yp),g(Ho,E,null),e(E,Tp),e(E,Ms),e(Ms,wp),e(E,Sp),e(E,Cs),e(Cs,Ps),e(Ps,js),e(js,Os),e(Os,xp),e(E,Bp),e(E,Ls),e(Ls,Ns),e(Ns,As),e(As,Is),e(Is,zp),e(E,Fp),e(E,Ds),e(Ds,Gs),e(Gs,Us),e(Us,Ws),e(Ws,qp),e(E,Ep),e(E,Rs),e(Rs,Ks),e(Ks,gn),e(gn,Qo),e(Qo,Vo),e(Vo,Hs),g(bn,Hs,null),e(Qo,$p),e(Qo,Qs),e(Qs,Mp),e(gn,Cp),e(gn,Vs),e(Vs,Pp),e(E,jp),e(E,Js),e(Js,Op),e(E,Lp),e(E,Xs),e(Xs,Ys),e(Ys,Zs),e(Zs,er),e(er,Np),e(E,Ap),e(E,or),e(or,tr),e(tr,nr),e(nr,ar),e(ar,Ip),e(E,Dp),e(E,sr),e(sr,rr),e(rr,dr),e(dr,lr),e(lr,Gp),e(E,Up),e(E,ir),e(ir,cr),e(cr,pr),e(pr,hr),e(hr,Wp),e(A,Rp),e(A,Ze),g(kn,Ze,null),e(Ze,Kp),e(Ze,ur),e(ur,Hp),e(Ze,Qp),g(vn,Ze,null),e(A,Vp),e(A,eo),g(yn,eo,null),e(eo,Jp),e(eo,mr),e(mr,Xp),e(eo,Yp),g(Tn,eo,null),Ar=!0},p(s,[h]){const wn={};h&2&&(wn.$$scope={dirty:h,ctx:s}),jo.$set(wn);const fr={};h&2&&(fr.$$scope={dirty:h,ctx:s}),Lo.$set(fr);const _r={};h&2&&(_r.$$scope={dirty:h,ctx:s}),Io.$set(_r);const gr={};h&2&&(gr.$$scope={dirty:h,ctx:s}),Do.$set(gr);const ve={};h&2&&(ve.$$scope={dirty:h,ctx:s}),Uo.$set(ve);const br={};h&2&&(br.$$scope={dirty:h,ctx:s}),Wo.$set(br);const kr={};h&2&&(kr.$$scope={dirty:h,ctx:s}),Ho.$set(kr)},i(s){Ar||(b(S.$$.fragment,s),b(re.$$.fragment,s),b(Be.$$.fragment,s),b(et.$$.fragment,s),b(tt.$$.fragment,s),b(nt.$$.fragment,s),b(at.$$.fragment,s),b(rt.$$.fragment,s),b(dt.$$.fragment,s),b(lt.$$.fragment,s),b(it.$$.fragment,s),b(ct.$$.fragment,s),b(ht.$$.fragment,s),b(ut.$$.fragment,s),b(mt.$$.fragment,s),b(bt.$$.fragment,s),b(jo.$$.fragment,s),b(kt.$$.fragment,s),b(vt.$$.fragment,s),b(yt.$$.fragment,s),b(xt.$$.fragment,s),b(Lo.$$.fragment,s),b(zt.$$.fragment,s),b(qt.$$.fragment,s),b(Et.$$.fragment,s),b($t.$$.fragment,s),b(Mt.$$.fragment,s),b(Io.$$.fragment,s),b(Ot.$$.fragment,s),b(Do.$$.fragment,s),b(Lt.$$.fragment,s),b(Nt.$$.fragment,s),b(At.$$.fragment,s),b(Uo.$$.fragment,s),b(Ut.$$.fragment,s),b(Wo.$$.fragment,s),b(Rt.$$.fragment,s),b(Kt.$$.fragment,s),b(en.$$.fragment,s),b(on.$$.fragment,s),b(tn.$$.fragment,s),b(nn.$$.fragment,s),b(an.$$.fragment,s),b(sn.$$.fragment,s),b(rn.$$.fragment,s),b(dn.$$.fragment,s),b(_n.$$.fragment,s),b(Ho.$$.fragment,s),b(bn.$$.fragment,s),b(kn.$$.fragment,s),b(vn.$$.fragment,s),b(yn.$$.fragment,s),b(Tn.$$.fragment,s),Ar=!0)},o(s){k(S.$$.fragment,s),k(re.$$.fragment,s),k(Be.$$.fragment,s),k(et.$$.fragment,s),k(tt.$$.fragment,s),k(nt.$$.fragment,s),k(at.$$.fragment,s),k(rt.$$.fragment,s),k(dt.$$.fragment,s),k(lt.$$.fragment,s),k(it.$$.fragment,s),k(ct.$$.fragment,s),k(ht.$$.fragment,s),k(ut.$$.fragment,s),k(mt.$$.fragment,s),k(bt.$$.fragment,s),k(jo.$$.fragment,s),k(kt.$$.fragment,s),k(vt.$$.fragment,s),k(yt.$$.fragment,s),k(xt.$$.fragment,s),k(Lo.$$.fragment,s),k(zt.$$.fragment,s),k(qt.$$.fragment,s),k(Et.$$.fragment,s),k($t.$$.fragment,s),k(Mt.$$.fragment,s),k(Io.$$.fragment,s),k(Ot.$$.fragment,s),k(Do.$$.fragment,s),k(Lt.$$.fragment,s),k(Nt.$$.fragment,s),k(At.$$.fragment,s),k(Uo.$$.fragment,s),k(Ut.$$.fragment,s),k(Wo.$$.fragment,s),k(Rt.$$.fragment,s),k(Kt.$$.fragment,s),k(en.$$.fragment,s),k(on.$$.fragment,s),k(tn.$$.fragment,s),k(nn.$$.fragment,s),k(an.$$.fragment,s),k(sn.$$.fragment,s),k(rn.$$.fragment,s),k(dn.$$.fragment,s),k(_n.$$.fragment,s),k(Ho.$$.fragment,s),k(bn.$$.fragment,s),k(kn.$$.fragment,s),k(vn.$$.fragment,s),k(yn.$$.fragment,s),k(Tn.$$.fragment,s),Ar=!1},d(s){o(u),s&&o(z),s&&o(y),v(S),s&&o(fe),s&&o(B),s&&o(Ee),s&&o(J),v(re),s&&o($e),s&&o(ee),s&&o(Me),s&&o(R),s&&o(m),s&&o(q),s&&o(Ke),s&&o(C),s&&o(oe),s&&o(pe),v(Be),s&&o(vr),s&&o(he),v(et),v(tt),s&&o(yr),s&&o(mo),v(nt),s&&o(Tr),s&&o(G),v(at),v(rt),v(dt),v(lt),s&&o(wr),s&&o(_o),v(it),s&&o(Sr),s&&o(Ue),v(ct),v(ht),s&&o(xr),s&&o(go),v(ut),s&&o(Br),s&&o(ze),v(mt),v(bt),v(jo),v(kt),s&&o(zr),s&&o(ko),v(vt),s&&o(Fr),s&&o(Fe),v(yt),v(xt),v(Lo),s&&o(qr),s&&o(yo),v(zt),s&&o(Er),s&&o(Ft),v(qt),v(Et),s&&o($r),s&&o(To),v($t),s&&o(Mr),s&&o(ue),v(Mt),v(Io),v(Ot),v(Do),v(Lt),s&&o(Cr),s&&o(So),v(Nt),s&&o(Pr),s&&o(me),v(At),v(Uo),v(Ut),v(Wo),s&&o(jr),s&&o(Bo),v(Rt),s&&o(Or),s&&o(N),v(Kt),v(en),v(on),v(tn),v(nn),v(an),v(sn),s&&o(Lr),s&&o(zo),v(rn),s&&o(Nr),s&&o(A),v(dn),v(_n),v(Ho),v(bn),v(kn),v(vn),v(yn),v(Tn)}}}const wf={local:"blenderbot-small",sections:[{local:"overview",title:"Overview"},{local:"transformers.BlenderbotSmallConfig",title:"BlenderbotSmallConfig"},{local:"transformers.BlenderbotSmallTokenizer",title:"BlenderbotSmallTokenizer"},{local:"transformers.BlenderbotSmallTokenizerFast",title:"BlenderbotSmallTokenizerFast"},{local:"transformers.BlenderbotSmallModel",title:"BlenderbotSmallModel"},{local:"transformers.BlenderbotSmallForConditionalGeneration",title:"BlenderbotSmallForConditionalGeneration"},{local:"transformers.BlenderbotSmallForCausalLM",title:"BlenderbotSmallForCausalLM"},{local:"transformers.TFBlenderbotSmallModel",title:"TFBlenderbotSmallModel"},{local:"transformers.TFBlenderbotSmallForConditionalGeneration",title:"TFBlenderbotSmallForConditionalGeneration"},{local:"transformers.FlaxBlenderbotSmallModel",title:"FlaxBlenderbotSmallModel"},{local:"transformers.FlaxBlenderbotSmallForConditionalGeneration",title:"FlaxBlenderbotForConditionalGeneration"}],title:"Blenderbot Small"};function Sf(H,u,z){let{fw:y}=u;return H.$$set=T=>{"fw"in T&&z(0,y=T.fw)},[y]}class $f extends pf{constructor(u){super();hf(this,u,Sf,Tf,uf,{fw:0})}}export{$f as default,wf as metadata};
9,976
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/t5.mdx-42578e18.js
import{S as Vw,i as Kw,s as Yw,e as o,k as d,w as f,t as a,L as Jw,c as s,d as t,m as l,a as i,x as _,h as r,b as c,J as e,g as p,y as g,q as T,o as v,B as k}from"../../chunks/vendor-b1433968.js";import{T as bt}from"../../chunks/Tip-c3840994.js";import{D as P}from"../../chunks/Docstring-ff504c58.js";import{C as R}from"../../chunks/CodeBlock-a320dbd7.js";import{I as $e}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Zw(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function Xw(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function Qw(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function ey(N){let u,$,b,y,z,x,w,E,Ee,ae,j,te,O,re,je,G,qe,Te,B,I,ie,pe,F,C,he,K,ve,ue,U,Fe,ke,M,Me,H,V,fe,A,Pe,_e,D,Ce,W,Ae;return{c(){u=o("p"),$=a("TF 2.0 models accepts two formats as inputs:"),b=d(),y=o("ul"),z=o("li"),x=a("having all inputs as keyword arguments (like PyTorch models), or"),w=d(),E=o("li"),Ee=a("having all inputs as a list, tuple or dict in the first positional arguments."),ae=d(),j=o("p"),te=a("This second option is useful when using "),O=o("code"),re=a("tf.keras.Model.fit"),je=a(` method which currently requires having all the tensors in the first argument of the model call function: `),G=o("code"),qe=a("model(inputs)"),Te=a("."),B=d(),I=o("p"),ie=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),pe=d(),F=o("ul"),C=o("li"),he=a("a single Tensor with "),K=o("code"),ve=a("input_ids"),ue=a(" only and nothing else: "),U=o("code"),Fe=a("model(inputs_ids)"),ke=d(),M=o("li"),Me=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),H=o("code"),V=a("model([input_ids, attention_mask])"),fe=a(" or "),A=o("code"),Pe=a("model([input_ids, attention_mask, token_type_ids])"),_e=d(),D=o("li"),Ce=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=o("code"),Ae=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=s(m,"P",{});var q=i(u);$=r(q,"TF 2.0 models accepts two formats as inputs:"),q.forEach(t),b=l(m),y=s(m,"UL",{});var ne=i(y);z=s(ne,"LI",{});var Oe=i(z);x=r(Oe,"having all inputs as keyword arguments (like PyTorch models), or"),Oe.forEach(t),w=l(ne),E=s(ne,"LI",{});var rt=i(E);Ee=r(rt,"having all inputs as a list, tuple or dict in the first positional arguments."),rt.forEach(t),ne.forEach(t),ae=l(m),j=s(m,"P",{});var S=i(j);te=r(S,"This second option is useful when using "),O=s(S,"CODE",{});var Ge=i(O);re=r(Ge,"tf.keras.Model.fit"),Ge.forEach(t),je=r(S,` method which currently requires having all the tensors in the first argument of the model call function: `),G=s(S,"CODE",{});var oe=i(G);qe=r(oe,"model(inputs)"),oe.forEach(t),Te=r(S,"."),S.forEach(t),B=l(m),I=s(m,"P",{});var it=i(I);ie=r(it,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),it.forEach(t),pe=l(m),F=s(m,"UL",{});var L=i(F);C=s(L,"LI",{});var Y=i(C);he=r(Y,"a single Tensor with "),K=s(Y,"CODE",{});var dt=i(K);ve=r(dt,"input_ids"),dt.forEach(t),ue=r(Y," only and nothing else: "),U=s(Y,"CODE",{});var Le=i(U);Fe=r(Le,"model(inputs_ids)"),Le.forEach(t),Y.forEach(t),ke=l(L),M=s(L,"LI",{});var J=i(M);Me=r(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),H=s(J,"CODE",{});var lt=i(H);V=r(lt,"model([input_ids, attention_mask])"),lt.forEach(t),fe=r(J," or "),A=s(J,"CODE",{});var Ne=i(A);Pe=r(Ne,"model([input_ids, attention_mask, token_type_ids])"),Ne.forEach(t),J.forEach(t),_e=l(L),D=s(L,"LI",{});var Ie=i(D);Ce=r(Ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=s(Ie,"CODE",{});var ct=i(W);Ae=r(ct,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ct.forEach(t),Ie.forEach(t),L.forEach(t)},m(m,q){p(m,u,q),e(u,$),p(m,b,q),p(m,y,q),e(y,z),e(z,x),e(y,w),e(y,E),e(E,Ee),p(m,ae,q),p(m,j,q),e(j,te),e(j,O),e(O,re),e(j,je),e(j,G),e(G,qe),e(j,Te),p(m,B,q),p(m,I,q),e(I,ie),p(m,pe,q),p(m,F,q),e(F,C),e(C,he),e(C,K),e(K,ve),e(C,ue),e(C,U),e(U,Fe),e(F,ke),e(F,M),e(M,Me),e(M,H),e(H,V),e(M,fe),e(M,A),e(A,Pe),e(F,_e),e(F,D),e(D,Ce),e(D,W),e(W,Ae)},d(m){m&&t(u),m&&t(b),m&&t(y),m&&t(ae),m&&t(j),m&&t(B),m&&t(I),m&&t(pe),m&&t(F)}}}function ty(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function ny(N){let u,$,b,y,z,x,w,E,Ee,ae,j,te,O,re,je,G,qe,Te,B,I,ie,pe,F,C,he,K,ve,ue,U,Fe,ke,M,Me,H,V,fe,A,Pe,_e,D,Ce,W,Ae;return{c(){u=o("p"),$=a("TF 2.0 models accepts two formats as inputs:"),b=d(),y=o("ul"),z=o("li"),x=a("having all inputs as keyword arguments (like PyTorch models), or"),w=d(),E=o("li"),Ee=a("having all inputs as a list, tuple or dict in the first positional arguments."),ae=d(),j=o("p"),te=a("This second option is useful when using "),O=o("code"),re=a("tf.keras.Model.fit"),je=a(` method which currently requires having all the tensors in the first argument of the model call function: `),G=o("code"),qe=a("model(inputs)"),Te=a("."),B=d(),I=o("p"),ie=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),pe=d(),F=o("ul"),C=o("li"),he=a("a single Tensor with "),K=o("code"),ve=a("input_ids"),ue=a(" only and nothing else: "),U=o("code"),Fe=a("model(inputs_ids)"),ke=d(),M=o("li"),Me=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),H=o("code"),V=a("model([input_ids, attention_mask])"),fe=a(" or "),A=o("code"),Pe=a("model([input_ids, attention_mask, token_type_ids])"),_e=d(),D=o("li"),Ce=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=o("code"),Ae=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=s(m,"P",{});var q=i(u);$=r(q,"TF 2.0 models accepts two formats as inputs:"),q.forEach(t),b=l(m),y=s(m,"UL",{});var ne=i(y);z=s(ne,"LI",{});var Oe=i(z);x=r(Oe,"having all inputs as keyword arguments (like PyTorch models), or"),Oe.forEach(t),w=l(ne),E=s(ne,"LI",{});var rt=i(E);Ee=r(rt,"having all inputs as a list, tuple or dict in the first positional arguments."),rt.forEach(t),ne.forEach(t),ae=l(m),j=s(m,"P",{});var S=i(j);te=r(S,"This second option is useful when using "),O=s(S,"CODE",{});var Ge=i(O);re=r(Ge,"tf.keras.Model.fit"),Ge.forEach(t),je=r(S,` method which currently requires having all the tensors in the first argument of the model call function: `),G=s(S,"CODE",{});var oe=i(G);qe=r(oe,"model(inputs)"),oe.forEach(t),Te=r(S,"."),S.forEach(t),B=l(m),I=s(m,"P",{});var it=i(I);ie=r(it,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),it.forEach(t),pe=l(m),F=s(m,"UL",{});var L=i(F);C=s(L,"LI",{});var Y=i(C);he=r(Y,"a single Tensor with "),K=s(Y,"CODE",{});var dt=i(K);ve=r(dt,"input_ids"),dt.forEach(t),ue=r(Y," only and nothing else: "),U=s(Y,"CODE",{});var Le=i(U);Fe=r(Le,"model(inputs_ids)"),Le.forEach(t),Y.forEach(t),ke=l(L),M=s(L,"LI",{});var J=i(M);Me=r(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),H=s(J,"CODE",{});var lt=i(H);V=r(lt,"model([input_ids, attention_mask])"),lt.forEach(t),fe=r(J," or "),A=s(J,"CODE",{});var Ne=i(A);Pe=r(Ne,"model([input_ids, attention_mask, token_type_ids])"),Ne.forEach(t),J.forEach(t),_e=l(L),D=s(L,"LI",{});var Ie=i(D);Ce=r(Ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=s(Ie,"CODE",{});var ct=i(W);Ae=r(ct,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ct.forEach(t),Ie.forEach(t),L.forEach(t)},m(m,q){p(m,u,q),e(u,$),p(m,b,q),p(m,y,q),e(y,z),e(z,x),e(y,w),e(y,E),e(E,Ee),p(m,ae,q),p(m,j,q),e(j,te),e(j,O),e(O,re),e(j,je),e(j,G),e(G,qe),e(j,Te),p(m,B,q),p(m,I,q),e(I,ie),p(m,pe,q),p(m,F,q),e(F,C),e(C,he),e(C,K),e(K,ve),e(C,ue),e(C,U),e(U,Fe),e(F,ke),e(F,M),e(M,Me),e(M,H),e(H,V),e(M,fe),e(M,A),e(A,Pe),e(F,_e),e(F,D),e(D,Ce),e(D,W),e(W,Ae)},d(m){m&&t(u),m&&t(b),m&&t(y),m&&t(ae),m&&t(j),m&&t(B),m&&t(I),m&&t(pe),m&&t(F)}}}function oy(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function sy(N){let u,$,b,y,z,x,w,E,Ee,ae,j,te,O,re,je,G,qe,Te,B,I,ie,pe,F,C,he,K,ve,ue,U,Fe,ke,M,Me,H,V,fe,A,Pe,_e,D,Ce,W,Ae;return{c(){u=o("p"),$=a("TF 2.0 models accepts two formats as inputs:"),b=d(),y=o("ul"),z=o("li"),x=a("having all inputs as keyword arguments (like PyTorch models), or"),w=d(),E=o("li"),Ee=a("having all inputs as a list, tuple or dict in the first positional arguments."),ae=d(),j=o("p"),te=a("This second option is useful when using "),O=o("code"),re=a("tf.keras.Model.fit"),je=a(` method which currently requires having all the tensors in the first argument of the model call function: `),G=o("code"),qe=a("model(inputs)"),Te=a("."),B=d(),I=o("p"),ie=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),pe=d(),F=o("ul"),C=o("li"),he=a("a single Tensor with "),K=o("code"),ve=a("input_ids"),ue=a(" only and nothing else: "),U=o("code"),Fe=a("model(inputs_ids)"),ke=d(),M=o("li"),Me=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),H=o("code"),V=a("model([input_ids, attention_mask])"),fe=a(" or "),A=o("code"),Pe=a("model([input_ids, attention_mask, token_type_ids])"),_e=d(),D=o("li"),Ce=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=o("code"),Ae=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=s(m,"P",{});var q=i(u);$=r(q,"TF 2.0 models accepts two formats as inputs:"),q.forEach(t),b=l(m),y=s(m,"UL",{});var ne=i(y);z=s(ne,"LI",{});var Oe=i(z);x=r(Oe,"having all inputs as keyword arguments (like PyTorch models), or"),Oe.forEach(t),w=l(ne),E=s(ne,"LI",{});var rt=i(E);Ee=r(rt,"having all inputs as a list, tuple or dict in the first positional arguments."),rt.forEach(t),ne.forEach(t),ae=l(m),j=s(m,"P",{});var S=i(j);te=r(S,"This second option is useful when using "),O=s(S,"CODE",{});var Ge=i(O);re=r(Ge,"tf.keras.Model.fit"),Ge.forEach(t),je=r(S,` method which currently requires having all the tensors in the first argument of the model call function: `),G=s(S,"CODE",{});var oe=i(G);qe=r(oe,"model(inputs)"),oe.forEach(t),Te=r(S,"."),S.forEach(t),B=l(m),I=s(m,"P",{});var it=i(I);ie=r(it,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),it.forEach(t),pe=l(m),F=s(m,"UL",{});var L=i(F);C=s(L,"LI",{});var Y=i(C);he=r(Y,"a single Tensor with "),K=s(Y,"CODE",{});var dt=i(K);ve=r(dt,"input_ids"),dt.forEach(t),ue=r(Y," only and nothing else: "),U=s(Y,"CODE",{});var Le=i(U);Fe=r(Le,"model(inputs_ids)"),Le.forEach(t),Y.forEach(t),ke=l(L),M=s(L,"LI",{});var J=i(M);Me=r(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),H=s(J,"CODE",{});var lt=i(H);V=r(lt,"model([input_ids, attention_mask])"),lt.forEach(t),fe=r(J," or "),A=s(J,"CODE",{});var Ne=i(A);Pe=r(Ne,"model([input_ids, attention_mask, token_type_ids])"),Ne.forEach(t),J.forEach(t),_e=l(L),D=s(L,"LI",{});var Ie=i(D);Ce=r(Ie,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),W=s(Ie,"CODE",{});var ct=i(W);Ae=r(ct,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ct.forEach(t),Ie.forEach(t),L.forEach(t)},m(m,q){p(m,u,q),e(u,$),p(m,b,q),p(m,y,q),e(y,z),e(z,x),e(y,w),e(y,E),e(E,Ee),p(m,ae,q),p(m,j,q),e(j,te),e(j,O),e(O,re),e(j,je),e(j,G),e(G,qe),e(j,Te),p(m,B,q),p(m,I,q),e(I,ie),p(m,pe,q),p(m,F,q),e(F,C),e(C,he),e(C,K),e(K,ve),e(C,ue),e(C,U),e(U,Fe),e(F,ke),e(F,M),e(M,Me),e(M,H),e(H,V),e(M,fe),e(M,A),e(A,Pe),e(F,_e),e(F,D),e(D,Ce),e(D,W),e(W,Ae)},d(m){m&&t(u),m&&t(b),m&&t(y),m&&t(ae),m&&t(j),m&&t(B),m&&t(I),m&&t(pe),m&&t(F)}}}function ay(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function ry(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function iy(N){let u,$,b,y,z;return{c(){u=o("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),b=o("code"),y=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var w=i(u);$=r(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),b=s(w,"CODE",{});var E=i(b);y=r(E,"Module"),E.forEach(t),z=r(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(x,w){p(x,u,w),e(u,$),e(u,b),e(b,y),e(u,z)},d(x){x&&t(u)}}}function dy(N){let u,$,b,y,z,x,w,E,Ee,ae,j,te,O,re,je,G,qe,Te,B,I,ie,pe,F,C,he,K,ve,ue,U,Fe,ke,M,Me,H,V,fe,A,Pe,_e,D,Ce,W,Ae,m,q,ne,Oe,rt,S,Ge,oe,it,L,Y,dt,Le,J,lt,Ne,Ie,ct,Dl,Sa,Lp,Sl,Be,ei,ti,co,Np,Ip,ni,oi,po,Dp,Sp,si,ai,ho,Op,Gp,ri,ii,uo,Bp,Up,di,Oa,mo,Wp,Rp,Ol,Ga,Hp,Gl,wt,li,bn,ci,Vp,Kp,Ba,Yp,Jp,Zp,pi,wn,hi,Xp,Qp,Ua,eh,th,nh,ui,yn,mi,oh,sh,Wa,ah,rh,Bl,xn,ih,fo,dh,lh,Ul,yt,ch,_o,ph,hh,go,uh,mh,Wl,Ra,Rl,Ht,zn,fi,To,fh,_i,_h,Hl,pt,gh,gi,Th,vh,Ti,kh,bh,vi,wh,yh,Vl,$n,xh,Ha,zh,$h,Kl,Va,Vt,ki,Eh,jh,ge,qh,bi,Fh,Mh,wi,Ph,Ch,yi,Ah,Lh,xi,Nh,Ih,zi,Dh,Sh,Ka,Oh,Gh,Bh,$i,Uh,Yl,vo,Jl,En,Wh,ko,Rh,Hh,Zl,Ya,bo,Ei,Vh,Kh,ji,Yh,Xl,wo,Ql,se,Jh,qi,Zh,Xh,Fi,Qh,eu,Mi,tu,nu,Pi,ou,su,Ci,au,ru,Ai,iu,du,Li,lu,cu,ec,xt,pu,Ni,hu,uu,Ii,mu,fu,tc,me,_u,Di,gu,Tu,Si,vu,ku,Oi,bu,wu,Gi,yu,xu,yo,zu,$u,Bi,Eu,ju,nc,xo,oc,Ja,qu,sc,zt,Ui,zo,Fu,Wi,Mu,Pu,Cu,Ri,Kt,Au,$o,Lu,Nu,Eo,Iu,Du,Su,Hi,jo,Ou,Vi,Gu,Bu,ac,Za,rc,Yt,jn,Ki,qo,Uu,Yi,Wu,ic,ht,Ru,Xa,Hu,Vu,Fo,Ku,Yu,Mo,Ju,Zu,dc,Po,lc,Ue,Xu,Ji,Qu,em,Zi,tm,nm,Qa,om,sm,Xi,am,rm,cc,er,im,pc,Co,hc,tr,uc,Jt,qn,Qi,Ao,dm,ed,lm,mc,nr,cm,fc,Fn,td,Zt,pm,Lo,hm,um,No,mm,fm,_m,nd,De,gm,Io,Tm,vm,Do,km,bm,So,wm,ym,Oo,xm,zm,Go,$m,Em,_c,Xt,Mn,od,Bo,jm,sd,qm,gc,gt,Uo,Fm,Tt,Mm,or,Pm,Cm,sr,Am,Lm,Wo,Nm,Im,Dm,Qt,Sm,ar,Om,Gm,rr,Bm,Um,Tc,en,Pn,ad,Ro,Wm,rd,Rm,vc,Z,Ho,Hm,Vo,Vm,Ko,Km,Ym,Jm,Yo,Zm,ir,Xm,Qm,ef,tn,tf,id,nf,of,dd,sf,af,rf,$t,Jo,df,ld,lf,cf,Zo,dr,pf,cd,hf,uf,lr,mf,pd,ff,_f,Cn,Xo,gf,Qo,Tf,hd,vf,kf,bf,An,es,wf,ud,yf,xf,md,kc,nn,Ln,fd,ts,zf,_d,$f,bc,Se,ns,Ef,on,jf,gd,qf,Ff,os,Mf,Pf,Cf,ss,Af,cr,Lf,Nf,If,Et,as,Df,Td,Sf,Of,rs,pr,Gf,vd,Bf,Uf,hr,Wf,kd,Rf,Hf,Nn,is,Vf,bd,Kf,wc,sn,In,wd,ds,Yf,yd,Jf,yc,X,ls,Zf,xd,Xf,Qf,cs,e_,ps,t_,n_,o_,hs,s_,ur,a_,r_,i_,us,d_,ms,l_,c_,p_,We,fs,h_,an,u_,mr,m_,f_,zd,__,g_,T_,Dn,v_,$d,k_,b_,_s,w_,Re,gs,y_,Ed,x_,z_,jd,$_,E_,qd,j_,q_,Ts,F_,ut,vs,M_,Fd,P_,C_,Md,A_,L_,ks,xc,rn,Sn,Pd,bs,N_,Cd,I_,zc,Q,ws,D_,ys,S_,Ad,O_,G_,B_,xs,U_,zs,W_,R_,H_,$s,V_,fr,K_,Y_,J_,Es,Z_,js,X_,Q_,eg,He,qs,tg,dn,ng,_r,og,sg,Ld,ag,rg,ig,On,dg,Nd,lg,cg,Fs,pg,Ve,Ms,hg,Id,ug,mg,Dd,fg,_g,Sd,gg,Tg,Ps,vg,mt,Cs,kg,Od,bg,wg,Gd,yg,xg,As,$c,ln,Gn,Bd,Ls,zg,Ud,$g,Ec,ee,Ns,Eg,Wd,jg,qg,Is,Fg,Ds,Mg,Pg,Cg,Ss,Ag,gr,Lg,Ng,Ig,Os,Dg,Gs,Sg,Og,Gg,Ke,Bs,Bg,cn,Ug,Tr,Wg,Rg,Rd,Hg,Vg,Kg,Bn,Yg,Hd,Jg,Zg,Us,Xg,Ye,Ws,Qg,Vd,eT,tT,Kd,nT,oT,Yd,sT,aT,Rs,rT,ft,Hs,iT,Jd,dT,lT,Zd,cT,pT,Vs,jc,pn,Un,Xd,Ks,hT,Qd,uT,qc,de,Ys,mT,el,fT,_T,Js,gT,Zs,TT,vT,kT,Xs,bT,vr,wT,yT,xT,Qs,zT,ea,$T,ET,jT,Wn,qT,Je,ta,FT,hn,MT,kr,PT,CT,tl,AT,LT,NT,Rn,IT,nl,DT,ST,na,Fc,un,Hn,ol,oa,OT,sl,GT,Mc,le,sa,BT,aa,UT,al,WT,RT,HT,ra,VT,ia,KT,YT,JT,da,ZT,br,XT,QT,ev,la,tv,ca,nv,ov,sv,Vn,av,Ze,pa,rv,mn,iv,wr,dv,lv,rl,cv,pv,hv,Kn,uv,il,mv,fv,ha,Pc,fn,Yn,dl,ua,_v,ll,gv,Cc,ce,ma,Tv,cl,vv,kv,fa,bv,_a,wv,yv,xv,ga,zv,yr,$v,Ev,jv,Ta,qv,va,Fv,Mv,Pv,Jn,Cv,Xe,ka,Av,_n,Lv,xr,Nv,Iv,pl,Dv,Sv,Ov,Zn,Gv,hl,Bv,Uv,ba,Ac,gn,Xn,ul,wa,Wv,ml,Rv,Lc,vt,Qe,ya,Hv,Tn,Vv,fl,Kv,Yv,_l,Jv,Zv,Xv,Qn,Qv,gl,ek,tk,xa,nk,jt,za,ok,Tl,sk,ak,$a,rk,qt,Ea,ik,vl,dk,lk,ja,Nc,vn,eo,kl,qa,ck,bl,pk,Ic,kt,et,Fa,hk,kn,uk,wl,mk,fk,yl,_k,gk,Tk,to,vk,xl,kk,bk,Ma,wk,Ft,Pa,yk,zl,xk,zk,Ca,$k,Mt,Aa,Ek,$l,jk,qk,La,Dc;return x=new $e({}),re=new $e({}),To=new $e({}),vo=new R({props:{code:`from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-small") model = T5ForConditionalGeneration.from_pretrained("t5-small") input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids # the forward function automatically creates the correct decoder_input_ids loss = model(input_ids=input_ids, labels=labels).loss,`,highlighted:`<span class="hljs-keyword">from</span> transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) input_ids = tokenizer(<span class="hljs-string">&#x27;The &lt;extra_id_0&gt; walks in &lt;extra_id_1&gt; park&#x27;</span>, <span class="hljs-attribute">return_tensors</span>=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids labels = tokenizer(<span class="hljs-string">&#x27;&lt;extra_id_0&gt; cute dog &lt;extra_id_1&gt; the &lt;extra_id_2&gt;&#x27;</span>, <span class="hljs-attribute">return_tensors</span>=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids <span class="hljs-comment"># the forward function automatically creates the correct decoder_input_ids</span> loss = model(<span class="hljs-attribute">input_ids</span>=input_ids, <span class="hljs-attribute">labels</span>=labels).loss`}}),wo=new R({props:{code:`from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-small") model = T5ForConditionalGeneration.from_pretrained("t5-small") input_ids = tokenizer('translate English to German: The house is wonderful.', return_tensors='pt').input_ids labels = tokenizer('Das Haus ist wunderbar.', return_tensors='pt').input_ids # the forward function automatically creates the correct decoder_input_ids loss = model(input_ids=input_ids, labels=labels).loss,`,highlighted:`<span class="hljs-keyword">from</span> transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) input_ids = tokenizer(<span class="hljs-string">&#x27;translate English to German: The house is wonderful.&#x27;</span>, <span class="hljs-attribute">return_tensors</span>=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids labels = tokenizer(<span class="hljs-string">&#x27;Das Haus ist wunderbar.&#x27;</span>, <span class="hljs-attribute">return_tensors</span>=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids <span class="hljs-comment"># the forward function automatically creates the correct decoder_input_ids</span> loss = model(<span class="hljs-attribute">input_ids</span>=input_ids, <span class="hljs-attribute">labels</span>=labels).loss`}}),xo=new R({props:{code:`from transformers import T5Tokenizer, T5ForConditionalGeneration import torch tokenizer = T5Tokenizer.from_pretrained("t5-small") model = T5ForConditionalGeneration.from_pretrained("t5-small") # the following 2 hyperparameters are task-specific max_source_length = 512 max_target_length = 128 # Suppose we have the following 2 training examples: input_sequence_1 = "Welcome to NYC" output_sequence_1 = "Bienvenue \xE0 NYC" input_sequence_2 = "HuggingFace is a company" output_sequence_2 = "HuggingFace est une entreprise" # encode the inputs task_prefix = "translate English to French: " input_sequences = [input_sequence_1, input_sequence_2] encoding = tokenizer([task_prefix + sequence for sequence in input_sequences], padding='longest', max_length=max_source_length, truncation=True, return_tensors="pt") input_ids, attention_mask = encoding.input_ids, encoding.attention_mask # encode the targets target_encoding = tokenizer([output_sequence_1, output_sequence_2], padding='longest', max_length=max_target_length, truncation=True) labels = target_encoding.input_ids # replace padding token id's of the labels by -100 labels = torch.tensor(labels) labels[labels == tokenizer.pad_token_id] = -100 # forward pass loss = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).loss,`,highlighted:`from transformers <span class="hljs-built_in">import</span> T5Tokenizer, T5ForConditionalGeneration <span class="hljs-built_in">import</span> torch <span class="hljs-attr">tokenizer</span> = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) <span class="hljs-attr">model</span> = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) <span class="hljs-comment"># the following 2 hyperparameters are task-specific</span> <span class="hljs-attr">max_source_length</span> = <span class="hljs-number">512</span> <span class="hljs-attr">max_target_length</span> = <span class="hljs-number">128</span> <span class="hljs-comment"># Suppose we have the following 2 training examples:</span> <span class="hljs-attr">input_sequence_1</span> = <span class="hljs-string">&quot;Welcome to NYC&quot;</span> <span class="hljs-attr">output_sequence_1</span> = <span class="hljs-string">&quot;Bienvenue \xE0 NYC&quot;</span> <span class="hljs-attr">input_sequence_2</span> = <span class="hljs-string">&quot;HuggingFace is a company&quot;</span> <span class="hljs-attr">output_sequence_2</span> = <span class="hljs-string">&quot;HuggingFace est une entreprise&quot;</span> <span class="hljs-comment"># encode the inputs</span> <span class="hljs-attr">task_prefix</span> = <span class="hljs-string">&quot;translate English to French: &quot;</span> <span class="hljs-attr">input_sequences</span> = [input_sequence_1, input_sequence_2] <span class="hljs-attr">encoding</span> = tokenizer([task_prefix + sequence for sequence <span class="hljs-keyword">in</span> input_sequences], <span class="hljs-attr">padding=&#x27;longest&#x27;,</span> <span class="hljs-attr">max_length=max_source_length,</span> <span class="hljs-attr">truncation=True,</span> <span class="hljs-attr">return_tensors=&quot;pt&quot;)</span> input_ids, <span class="hljs-attr">attention_mask</span> = encoding.input_ids, encoding.attention_mask <span class="hljs-comment"># encode the targets</span> <span class="hljs-attr">target_encoding</span> = tokenizer([output_sequence_1, output_sequence_2], <span class="hljs-attr">padding=&#x27;longest&#x27;,</span> <span class="hljs-attr">max_length=max_target_length,</span> <span class="hljs-attr">truncation=True)</span> <span class="hljs-attr">labels</span> = target_encoding.input_ids <span class="hljs-comment"># replace padding token id&#x27;s of the labels by -100</span> <span class="hljs-attr">labels</span> = torch.tensor(labels) labels[<span class="hljs-attr">labels</span> == tokenizer.pad_token_id] = -<span class="hljs-number">100</span> <span class="hljs-comment"># forward pass</span> <span class="hljs-attr">loss</span> = model(<span class="hljs-attr">input_ids=input_ids,</span> <span class="hljs-attr">attention_mask=attention_mask,</span> <span class="hljs-attr">labels=labels).loss</span>`}}),qo=new $e({}),Po=new R({props:{code:`from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-small") model = T5ForConditionalGeneration.from_pretrained("t5-small") input_ids = tokenizer('translate English to German: The house is wonderful.', return_tensors='pt').input_ids outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # Das Haus ist wunderbar.,`,highlighted:`<span class="hljs-keyword">from</span> transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) input_ids = tokenizer(<span class="hljs-string">&#x27;translate English to German: The house is wonderful.&#x27;</span>, <span class="hljs-attribute">return_tensors</span>=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids outputs = model.generate(input_ids) <span class="hljs-built_in">print</span>(tokenizer.decode(outputs[0], <span class="hljs-attribute">skip_special_tokens</span>=<span class="hljs-literal">True</span>)) <span class="hljs-comment"># Das Haus ist wunderbar.</span>`}}),Co=new R({props:{code:`from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-small") model = T5ForConditionalGeneration.from_pretrained("t5-small") # when generating, we will use the logits of right-most token to predict the next token # so the padding should be on the left tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token # to avoid an error task_prefix = 'translate English to German: ' sentences = ['The house is wonderful.', 'I like to work in NYC.'] # use different length sentences to test batching inputs = tokenizer([task_prefix + sentence for sentence in sentences], return_tensors="pt", padding=True) output_sequences = model.generate( input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=False, # disable sampling to test if batching affects output ) print(tokenizer.batch_decode(output_sequences, skip_special_tokens=True)) # ['Das Haus ist wunderbar.', 'Ich arbeite gerne in NYC.'],`,highlighted:`<span class="hljs-keyword">from</span> transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) <span class="hljs-comment"># when generating, we will use the logits of right-most token to predict the next token</span> <span class="hljs-comment"># so the padding should be on the left</span> tokenizer.padding_side = <span class="hljs-string">&quot;left&quot;</span> tokenizer.pad_token = tokenizer.eos_token # <span class="hljs-keyword">to</span> avoid an <span class="hljs-built_in">error</span> task_prefix = <span class="hljs-string">&#x27;translate English to German: &#x27;</span> sentences = [<span class="hljs-string">&#x27;The house is wonderful.&#x27;</span>, <span class="hljs-string">&#x27;I like to work in NYC.&#x27;</span>] # use different length sentences <span class="hljs-keyword">to</span> test batching inputs = tokenizer([task_prefix + sentence <span class="hljs-keyword">for</span> sentence <span class="hljs-keyword">in</span> sentences], <span class="hljs-attribute">return_tensors</span>=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-attribute">padding</span>=<span class="hljs-literal">True</span>) output_sequences = model.generate( <span class="hljs-attribute">input_ids</span>=inputs[<span class="hljs-string">&#x27;input_ids&#x27;</span>], <span class="hljs-attribute">attention_mask</span>=inputs[<span class="hljs-string">&#x27;attention_mask&#x27;</span>], <span class="hljs-attribute">do_sample</span>=<span class="hljs-literal">False</span>, # <span class="hljs-built_in">disable</span> sampling <span class="hljs-keyword">to</span> test <span class="hljs-keyword">if</span> batching affects output ) <span class="hljs-built_in">print</span>(tokenizer.batch_decode(output_sequences, <span class="hljs-attribute">skip_special_tokens</span>=<span class="hljs-literal">True</span>)) <span class="hljs-comment"># [&#x27;Das Haus ist wunderbar.&#x27;, &#x27;Ich arbeite gerne in NYC.&#x27;]</span>`}}),Ao=new $e({}),Bo=new $e({}),Uo=new P({props:{name:"class transformers.T5Config",anchor:"transformers.T5Config",parameters:[{name:"vocab_size",val:" = 32128"},{name:"d_model",val:" = 512"},{name:"d_kv",val:" = 64"},{name:"d_ff",val:" = 2048"},{name:"num_layers",val:" = 6"},{name:"num_decoder_layers",val:" = None"},{name:"num_heads",val:" = 8"},{name:"relative_attention_num_buckets",val:" = 32"},{name:"dropout_rate",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-06"},{name:"initializer_factor",val:" = 1.0"},{name:"feed_forward_proj",val:" = 'relu'"},{name:"is_encoder_decoder",val:" = True"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 0"},{name:"eos_token_id",val:" = 1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/configuration_t5.py#L34",parametersDescription:[{anchor:"transformers.T5Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32128) &#x2014; Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model">T5Model</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model">TFT5Model</a>.`,name:"vocab_size"},{anchor:"transformers.T5Config.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Size of the encoder layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.T5Config.d_kv",description:`<strong>d_kv</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Size of the key, query, value projections per attention head. <code>d_kv</code> has to be equal to <code>d_model // num_heads</code>.`,name:"d_kv"},{anchor:"transformers.T5Config.d_ff",description:`<strong>d_ff</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Size of the intermediate feed forward layer in each <code>T5Block</code>.`,name:"d_ff"},{anchor:"transformers.T5Config.num_layers",description:`<strong>num_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 6) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_layers"},{anchor:"transformers.T5Config.num_decoder_layers",description:`<strong>num_decoder_layers</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of hidden layers in the Transformer decoder. Will use the same value as <code>num_layers</code> if not set.`,name:"num_decoder_layers"},{anchor:"transformers.T5Config.num_heads",description:`<strong>num_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_heads"},{anchor:"transformers.T5Config.relative_attention_num_buckets",description:`<strong>relative_attention_num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer.`,name:"relative_attention_num_buckets"},{anchor:"transformers.T5Config.dropout_rate",description:`<strong>dropout_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The ratio for all dropout layers.`,name:"dropout_rate"},{anchor:"transformers.T5Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.T5Config.initializer_factor",description:`<strong>initializer_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"},{anchor:"transformers.T5Config.feed_forward_proj",description:`<strong>feed_forward_proj</strong> (<code>string</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; Type of feed forward layer to be used. Should be one of <code>&quot;relu&quot;</code> or <code>&quot;gated-gelu&quot;</code>. T5v1.1 uses the <code>&quot;gated-gelu&quot;</code> feed forward projection. Original T5 uses <code>&quot;relu&quot;</code>.`,name:"feed_forward_proj"},{anchor:"transformers.T5Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),Ro=new $e({}),Ho=new P({props:{name:"class transformers.T5Tokenizer",anchor:"transformers.T5Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"extra_ids",val:" = 100"},{name:"additional_special_tokens",val:" = None"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L53",parametersDescription:[{anchor:"transformers.T5Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.T5Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.T5Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.T5Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.T5Tokenizer.extra_ids",description:`<strong>extra_ids</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as &#x201C;<extra<em>id{%d}&gt;&#x201D; where &#x201D;{%d}&#x201D; is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning (&#x201C;<extra_id_0>&#x201D; is the last token in the vocabulary like in T5 preprocessing see <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117" rel="nofollow">here</a>).</extra_id_0></extra<em>`,name:"extra_ids"},{anchor:"transformers.T5Tokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.T5Tokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Jo=new P({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.T5Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L220",parametersDescription:[{anchor:"transformers.T5Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.T5Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Xo=new P({props:{name:"get_special_tokens_mask",anchor:"transformers.T5Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L160",parametersDescription:[{anchor:"transformers.T5Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.T5Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.T5Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),es=new P({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.T5Tokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5.py#L198",parametersDescription:[{anchor:"transformers.T5Tokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.T5Tokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ts=new $e({}),ns=new P({props:{name:"class transformers.T5TokenizerFast",anchor:"transformers.T5TokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"extra_ids",val:" = 100"},{name:"additional_special_tokens",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5_fast.py#L63",parametersDescription:[{anchor:"transformers.T5TokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.T5TokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.T5TokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.T5TokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.T5TokenizerFast.extra_ids",description:`<strong>extra_ids</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as &#x201C;<extra<em>id{%d}&gt;&#x201D; where &#x201D;{%d}&#x201D; is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning (&#x201C;<extra_id_0>&#x201D; is the last token in the vocabulary like in T5 preprocessing see <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117" rel="nofollow">here</a>).</extra_id_0></extra<em>`,name:"extra_ids"},{anchor:"transformers.T5TokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),as=new P({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.T5TokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5_fast.py#L164",parametersDescription:[{anchor:"transformers.T5TokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.T5TokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),is=new P({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.T5TokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/tokenization_t5_fast.py#L190",parametersDescription:[{anchor:"transformers.T5TokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.T5TokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ds=new $e({}),ls=new P({props:{name:"class transformers.T5Model",anchor:"transformers.T5Model",parameters:[{name:"config",val:": T5Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1245",parametersDescription:[{anchor:"transformers.T5Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fs=new P({props:{name:"forward",anchor:"transformers.T5Model.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1321",parametersDescription:[{anchor:"transformers.T5Model.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.T5Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.T5Model.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>T5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.T5Model.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.T5Model.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.T5Model.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.T5Model.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.T5Model.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.T5Model.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.T5Model.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.T5Model.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.T5Model.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.T5Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.T5Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.T5Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Dn=new bt({props:{$$slots:{default:[Zw]},$$scope:{ctx:N}}}),_s=new R({props:{code:`from transformers import T5Tokenizer, T5Model tokenizer = T5Tokenizer.from_pretrained('t5-small') model = T5Model.from_pretrained('t5-small') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 # forward pass outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, T5Model <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = T5Model.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),gs=new P({props:{name:"parallelize",anchor:"transformers.T5Model.parallelize",parameters:[{name:"device_map",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1277",parametersDescription:[{anchor:"transformers.T5Model.parallelize.device_map",description:`<strong>device_map</strong> (<code>Dict[int, list]</code>, optional, defaults to None) &#x2014; A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules:</p> <ul> <li>t5-small: 6</li> <li>t5-base: 12</li> <li>t5-large: 24</li> <li>t5-3b: 24</li> <li>t5-11b: 24</li> </ul>`,name:"device_map"}]}}),Ts=new R({props:{code:`# Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map),`,highlighted:`<span class="hljs-comment"># Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:</span> model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-3b&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>]} model.parallelize(device_map)`}}),vs=new P({props:{name:"deparallelize",anchor:"transformers.T5Model.deparallelize",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1289"}}),ks=new R({props:{code:`# On a 4 GPU machine with t5-3b: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache(),`,highlighted:`<span class="hljs-comment"># On a 4 GPU machine with t5-3b:</span> model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-3b&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>]} model.parallelize(device_map) <span class="hljs-comment"># Splits the model across several devices</span> model.deparallelize() <span class="hljs-comment"># Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()</span>`}}),bs=new $e({}),ws=new P({props:{name:"class transformers.T5ForConditionalGeneration",anchor:"transformers.T5ForConditionalGeneration",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1432",parametersDescription:[{anchor:"transformers.T5ForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qs=new P({props:{name:"forward",anchor:"transformers.T5ForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1513",parametersDescription:[{anchor:"transformers.T5ForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.T5ForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.T5ForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>T5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.T5ForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.T5ForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.T5ForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.T5ForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.T5ForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.T5ForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.T5ForConditionalGeneration.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.T5ForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.T5ForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.T5ForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.T5ForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.T5ForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.T5ForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new bt({props:{$$slots:{default:[Xw]},$$scope:{ctx:N}}}),Fs=new R({props:{code:`from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained('t5-small') model = T5ForConditionalGeneration.from_pretrained('t5-small') # training input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids outputs = model(input_ids=input_ids, labels=labels) loss = outputs.loss logits = outputs.logits # inference input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # studies have shown that owning a dog is good for you.,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, T5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&#x27;The &lt;extra_id_0&gt; walks in &lt;extra_id_1&gt; park&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&#x27;&lt;extra_id_0&gt; cute dog &lt;extra_id_1&gt; the &lt;extra_id_2&gt;&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;summarize: studies have shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># studies have shown that owning a dog is good for you.</span>`}}),Ms=new P({props:{name:"parallelize",anchor:"transformers.T5ForConditionalGeneration.parallelize",parameters:[{name:"device_map",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1469",parametersDescription:[{anchor:"transformers.T5ForConditionalGeneration.parallelize.device_map",description:`<strong>device_map</strong> (<code>Dict[int, list]</code>, optional, defaults to None) &#x2014; A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules:</p> <ul> <li>t5-small: 6</li> <li>t5-base: 12</li> <li>t5-large: 24</li> <li>t5-3b: 24</li> <li>t5-11b: 24</li> </ul>`,name:"device_map"}]}}),Ps=new R({props:{code:`# Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map),`,highlighted:`<span class="hljs-comment"># Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:</span> model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-3b&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>]} model.parallelize(device_map)`}}),Cs=new P({props:{name:"deparallelize",anchor:"transformers.T5ForConditionalGeneration.deparallelize",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1482"}}),As=new R({props:{code:`# On a 4 GPU machine with t5-3b: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache(),`,highlighted:`<span class="hljs-comment"># On a 4 GPU machine with t5-3b:</span> model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-3b&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>]} model.parallelize(device_map) <span class="hljs-comment"># Splits the model across several devices</span> model.deparallelize() <span class="hljs-comment"># Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()</span>`}}),Ls=new $e({}),Ns=new P({props:{name:"class transformers.T5EncoderModel",anchor:"transformers.T5EncoderModel",parameters:[{name:"config",val:": T5Config"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1723",parametersDescription:[{anchor:"transformers.T5EncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Bs=new P({props:{name:"forward",anchor:"transformers.T5EncoderModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1781",parametersDescription:[{anchor:"transformers.T5EncoderModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.T5EncoderModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.T5EncoderModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.T5EncoderModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.T5EncoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.T5EncoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.T5EncoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Bn=new bt({props:{$$slots:{default:[Qw]},$$scope:{ctx:N}}}),Us=new R({props:{code:`from transformers import T5Tokenizer, T5EncoderModel tokenizer = T5Tokenizer.from_pretrained('t5-small') model = T5EncoderModel.from_pretrained('t5-small') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, T5EncoderModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = T5EncoderModel.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ws=new P({props:{name:"parallelize",anchor:"transformers.T5EncoderModel.parallelize",parameters:[{name:"device_map",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1744",parametersDescription:[{anchor:"transformers.T5EncoderModel.parallelize.device_map",description:`<strong>device_map</strong> (<code>Dict[int, list]</code>, optional, defaults to None) &#x2014; A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules:</p> <ul> <li>t5-small: 6</li> <li>t5-base: 12</li> <li>t5-large: 24</li> <li>t5-3b: 24</li> <li>t5-11b: 24</li> </ul>`,name:"device_map"}]}}),Rs=new R({props:{code:`# Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map),`,highlighted:`<span class="hljs-comment"># Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:</span> model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-3b&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>]} model.parallelize(device_map)`}}),Hs=new P({props:{name:"deparallelize",anchor:"transformers.T5EncoderModel.deparallelize",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_t5.py#L1755"}}),Vs=new R({props:{code:`# On a 4 GPU machine with t5-3b: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache(),`,highlighted:`<span class="hljs-comment"># On a 4 GPU machine with t5-3b:</span> model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-3b&#x27;</span>) device_map = {<span class="hljs-number">0</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>], <span class="hljs-number">2</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>], <span class="hljs-number">3</span>: [<span class="hljs-number">17</span>, <span class="hljs-number">18</span>, <span class="hljs-number">19</span>, <span class="hljs-number">20</span>, <span class="hljs-number">21</span>, <span class="hljs-number">22</span>, <span class="hljs-number">23</span>]} model.parallelize(device_map) <span class="hljs-comment"># Splits the model across several devices</span> model.deparallelize() <span class="hljs-comment"># Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()</span>`}}),Ks=new $e({}),Ys=new P({props:{name:"class transformers.TFT5Model",anchor:"transformers.TFT5Model",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_tf_t5.py#L1125",parametersDescription:[{anchor:"transformers.TFT5Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wn=new bt({props:{$$slots:{default:[ey]},$$scope:{ctx:N}}}),ta=new P({props:{name:"call",anchor:"transformers.TFT5Model.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_tf_t5.py#L1151",parametersDescription:[{anchor:"transformers.TFT5Model.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on the right or the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>inputs</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.TFT5Model.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for sequence to sequence training. T5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.TFT5Model.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFT5Model.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default. head_mask &#x2014; (<code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Rn=new bt({props:{$$slots:{default:[ty]},$$scope:{ctx:N}}}),na=new R({props:{code:`from transformers import T5Tokenizer, TFT5Model tokenizer = T5Tokenizer.from_pretrained('t5-small') model = TFT5Model.from_pretrained('t5-small') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="tf").input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="tf").input_ids # Batch size 1 # forward pass outputs = model(input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, TFT5Model <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFT5Model.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),oa=new $e({}),sa=new P({props:{name:"class transformers.TFT5ForConditionalGeneration",anchor:"transformers.TFT5ForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_tf_t5.py#L1289",parametersDescription:[{anchor:"transformers.TFT5ForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vn=new bt({props:{$$slots:{default:[ny]},$$scope:{ctx:N}}}),pa=new P({props:{name:"call",anchor:"transformers.TFT5ForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_tf_t5.py#L1344",parametersDescription:[{anchor:"transformers.TFT5ForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on the right or the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>inputs</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.TFT5ForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for sequence to sequence training. T5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.TFT5ForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFT5ForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default. head_mask &#x2014; (<code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_attention_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Kn=new bt({props:{$$slots:{default:[oy]},$$scope:{ctx:N}}}),ha=new R({props:{code:`from transformers import T5Tokenizer, TFT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained('t5-small') model = TFT5ForConditionalGeneration.from_pretrained('t5-small') # training inputs = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='tf').input_ids labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='tf').input_ids outputs = model(inputs, labels=labels) loss = outputs.loss logits = outputs.logits # inference inputs = tokenizer("summarize: studies have shown that owning a dog is good for you", return_tensors="tf").input_ids # Batch size 1 outputs = model.generate(inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # studies have shown that owning a dog is good for you,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, TFT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&#x27;The &lt;extra_id_0&gt; walks in &lt;extra_id_1&gt; park&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&#x27;&lt;extra_id_0&gt; cute dog &lt;extra_id_1&gt; the &lt;extra_id_2&gt;&#x27;</span>, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;summarize: studies have shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># studies have shown that owning a dog is good for you</span>`}}),ua=new $e({}),ma=new P({props:{name:"class transformers.TFT5EncoderModel",anchor:"transformers.TFT5EncoderModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_tf_t5.py#L1595",parametersDescription:[{anchor:"transformers.TFT5EncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config">T5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jn=new bt({props:{$$slots:{default:[sy]},$$scope:{ctx:N}}}),ka=new P({props:{name:"call",anchor:"transformers.TFT5EncoderModel.call",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_tf_t5.py#L1613",parametersDescription:[{anchor:"transformers.TFT5EncoderModel.call.inputs",description:`<strong>inputs</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on the right or the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p>To know more on how to prepare <code>inputs</code> for pre-training take a look at <a href="./t5#training">T5 Training</a>.`,name:"inputs"},{anchor:"transformers.TFT5EncoderModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFT5EncoderModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix. head_mask &#x2014; (<code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>): Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"inputs_embeds"},{anchor:"transformers.TFT5EncoderModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFT5EncoderModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFT5EncoderModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFT5EncoderModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Zn=new bt({props:{$$slots:{default:[ay]},$$scope:{ctx:N}}}),ba=new R({props:{code:`from transformers import T5Tokenizer, TFT5EncoderModel tokenizer = T5Tokenizer.from_pretrained('t5-small') model = TFT5EncoderModel.from_pretrained('t5-small') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="tf").input_ids # Batch size 1 outputs = model(input_ids),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, TFT5EncoderModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFT5EncoderModel.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids)`}}),wa=new $e({}),ya=new P({props:{name:"__call__",anchor:"transformers.FlaxT5PreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": ndarray = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_flax_t5.py#L957",parametersDescription:[{anchor:"transformers.FlaxT5PreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>T5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qn=new bt({props:{$$slots:{default:[ry]},$$scope:{ctx:N}}}),xa=new R({props:{code:`from transformers import T5Tokenizer, FlaxT5Model tokenizer = T5Tokenizer.from_pretrained('t5-small') model = FlaxT5Model.from_pretrained('t5-small') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="np").input_ids decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids # forward pass outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxT5Model <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxT5Model.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),za=new P({props:{name:"encode",anchor:"transformers.FlaxT5PreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_flax_t5.py#L1041",parametersDescription:[{anchor:"transformers.FlaxT5PreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.t5.configuration_t5.T5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$a=new R({props:{code:`from transformers import T5Tokenizer, FlaxT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained('t5-small') model = FlaxT5ForConditionalGeneration.from_pretrained('t5-small') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors='np') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),Ea=new P({props:{name:"decode",anchor:"transformers.FlaxT5PreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_flax_t5.py#L1099",parametersDescription:[{anchor:"transformers.FlaxT5PreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For training, <code>decoder_input_ids</code> should be provided.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxT5PreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.t5.configuration_t5.T5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ja=new R({props:{code:`from transformers import T5Tokenizer, FlaxT5ForConditionalGeneration import jax.numpy as jnp tokenizer = T5Tokenizer.from_pretrained('t5-small') model = FlaxT5ForConditionalGeneration.from_pretrained('t5-small') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors='np') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qa=new $e({}),Fa=new P({props:{name:"__call__",anchor:"transformers.FlaxT5PreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": ndarray = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_flax_t5.py#L957",parametersDescription:[{anchor:"transformers.FlaxT5PreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>T5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./t5#training">T5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxT5PreTrainedModel.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Config" >T5Config</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),to=new bt({props:{$$slots:{default:[iy]},$$scope:{ctx:N}}}),Ma=new R({props:{code:`from transformers import T5Tokenizer, FlaxT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained('t5-small') model = FlaxT5ForConditionalGeneration.from_pretrained('t5-small') ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs." inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors='np') # Generate Summary summary_ids = model.generate(inputs['input_ids']).sequences print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE_TO_SUMMARIZE = <span class="hljs-string">&quot;summarize: My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Generate Summary</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary_ids = model.generate(inputs[<span class="hljs-string">&#x27;input_ids&#x27;</span>]).sequences <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(summary_ids[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">False</span>))`}}),Pa=new P({props:{name:"encode",anchor:"transformers.FlaxT5PreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_flax_t5.py#L1041",parametersDescription:[{anchor:"transformers.FlaxT5PreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./t5#training">T5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxT5PreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.t5.configuration_t5.T5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ca=new R({props:{code:`from transformers import T5Tokenizer, FlaxT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained('t5-small') model = FlaxT5ForConditionalGeneration.from_pretrained('t5-small') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors='np') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),Aa=new P({props:{name:"decode",anchor:"transformers.FlaxT5ForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/t5/modeling_flax_t5.py#L1464",parametersDescription:[{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For training, <code>decoder_input_ids</code> should be provided.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxT5ForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.t5.configuration_t5.T5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),La=new R({props:{code:`from transformers import T5Tokenizer, FlaxT5ForConditionalGeneration import jax.numpy as jnp tokenizer = T5Tokenizer.from_pretrained('t5-small') model = FlaxT5ForConditionalGeneration.from_pretrained('t5-small') text = "summarize: My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors='np') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;t5-small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;summarize: My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&#x27;np&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){u=o("meta"),$=d(),b=o("h1"),y=o("a"),z=o("span"),f(x.$$.fragment),w=d(),E=o("span"),Ee=a("T5"),ae=d(),j=o("h2"),te=o("a"),O=o("span"),f(re.$$.fragment),je=d(),G=o("span"),qe=a("Overview"),Te=d(),B=o("p"),I=a("The T5 model was presented in "),ie=o("a"),pe=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),F=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),C=d(),he=o("p"),K=a("The abstract from the paper is the following:"),ve=d(),ue=o("p"),U=o("em"),Fe=a(`Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pretraining objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new \u201CColossal Clean Crawled Corpus\u201D, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.`),ke=d(),M=o("p"),Me=a("Tips:"),H=d(),V=o("ul"),fe=o("li"),A=o("p"),Pe=a(`T5 is an encoder-decoder model pre-trained on a multi-task mixture of unsupervised and supervised tasks and for which each task is converted into a text-to-text format. T5 works well on a variety of tasks out-of-the-box by prepending a different prefix to the input corresponding to each task, e.g., for translation: `),_e=o("em"),D=a("translate English to German: \u2026"),Ce=a(`, for summarization: `),W=o("em"),Ae=a("summarize: \u2026"),m=a("."),q=d(),ne=o("li"),Oe=o("p"),rt=a("T5 uses relative scalar embeddings. Encoder input padding can be done on the left and on the right."),S=d(),Ge=o("li"),oe=o("p"),it=a("See the "),L=o("a"),Y=a("training"),dt=a(", "),Le=o("a"),J=a("inference"),lt=a(" and "),Ne=o("a"),Ie=a("scripts"),ct=a(" sections below for all details regarding usage."),Dl=d(),Sa=o("p"),Lp=a("T5 comes in different sizes:"),Sl=d(),Be=o("ul"),ei=o("li"),ti=o("p"),co=o("a"),Np=a("t5-small"),Ip=d(),ni=o("li"),oi=o("p"),po=o("a"),Dp=a("t5-base"),Sp=d(),si=o("li"),ai=o("p"),ho=o("a"),Op=a("t5-large"),Gp=d(),ri=o("li"),ii=o("p"),uo=o("a"),Bp=a("t5-3b"),Up=d(),di=o("li"),Oa=o("p"),mo=o("a"),Wp=a("t5-11b"),Rp=a("."),Ol=d(),Ga=o("p"),Hp=a("Based on the original T5 model, Google has released some follow-up works:"),Gl=d(),wt=o("ul"),li=o("li"),bn=o("p"),ci=o("strong"),Vp=a("T5v1.1"),Kp=a(`: T5v1.1 is an improved version of T5 with some architectural tweaks, and is pre-trained on C4 only without mixing in the supervised tasks. Refer to the documentation of T5v1.1 which can be found `),Ba=o("a"),Yp=a("here"),Jp=a("."),Zp=d(),pi=o("li"),wn=o("p"),hi=o("strong"),Xp=a("mT5"),Qp=a(`: mT5 is a multilingual T5 model. It is pre-trained on the mC4 corpus, which includes 101 languages. Refer to the documentation of mT5 which can be found `),Ua=o("a"),eh=a("here"),th=a("."),nh=d(),ui=o("li"),yn=o("p"),mi=o("strong"),oh=a("byT5"),sh=a(`: byT5 is a T5 model pre-trained on byte sequences rather than SentencePiece subword token sequences. Refer to the documentation of byT5 which can be found `),Wa=o("a"),ah=a("here"),rh=a("."),Bl=d(),xn=o("p"),ih=a("All checkpoints can be found on the "),fo=o("a"),dh=a("hub"),lh=a("."),Ul=d(),yt=o("p"),ch=a("This model was contributed by "),_o=o("a"),ph=a("thomwolf"),hh=a(". The original code can be found "),go=o("a"),uh=a("here"),mh=a("."),Wl=d(),Ra=o("a"),Rl=d(),Ht=o("h2"),zn=o("a"),fi=o("span"),f(To.$$.fragment),fh=d(),_i=o("span"),_h=a("Training"),Hl=d(),pt=o("p"),gh=a(`T5 is an encoder-decoder model and converts all NLP problems into a text-to-text format. It is trained using teacher forcing. This means that for training, we always need an input sequence and a corresponding target sequence. The input sequence is fed to the model using `),gi=o("code"),Th=a("input_ids"),vh=a(`. The target sequence is shifted to the right, i.e., prepended by a start-sequence token and fed to the decoder using the `),Ti=o("code"),kh=a("decoder_input_ids"),bh=a(`. In teacher-forcing style, the target sequence is then appended by the EOS token and corresponds to the `),vi=o("code"),wh=a("labels"),yh=a(`. The PAD token is hereby used as the start-sequence token. T5 can be trained / fine-tuned both in a supervised and unsupervised fashion.`),Vl=d(),$n=o("p"),xh=a("One can use "),Ha=o("a"),zh=a("T5ForConditionalGeneration"),$h=a(` (or the Tensorflow/Flax variant), which includes the language modeling head on top of the decoder.`),Kl=d(),Va=o("ul"),Vt=o("li"),ki=o("p"),Eh=a("Unsupervised denoising training"),jh=d(),ge=o("p"),qh=a("In this setup, spans of the input sequence are masked by so-called sentinel tokens ("),bi=o("em"),Fh=a("a.k.a"),Mh=a(` unique mask tokens) and the output sequence is formed as a concatenation of the same sentinel tokens and the `),wi=o("em"),Ph=a("real"),Ch=a(` masked tokens. Each sentinel token represents a unique mask token for this sentence and should start with `),yi=o("code"),Ah=a("<extra_id_0>"),Lh=a(`, `),xi=o("code"),Nh=a("<extra_id_1>"),Ih=a(", \u2026 up to "),zi=o("code"),Dh=a("<extra_id_99>"),Sh=a(`. As a default, 100 sentinel tokens are available in `),Ka=o("a"),Oh=a("T5Tokenizer"),Gh=a("."),Bh=d(),$i=o("p"),Uh=a(`For instance, the sentence \u201CThe cute dog walks in the park\u201D with the masks put on \u201Ccute dog\u201D and \u201Cthe\u201D should be processed as follows:`),Yl=d(),f(vo.$$.fragment),Jl=d(),En=o("p"),Wh=a("If you\u2019re interested in pre-training T5 on a new corpus, check out the "),ko=o("a"),Rh=a("run_t5_mlm_flax.py"),Hh=a(` script in the Examples directory.`),Zl=d(),Ya=o("ul"),bo=o("li"),Ei=o("p"),Vh=a("Supervised training"),Kh=d(),ji=o("p"),Yh=a(`In this setup, the input sequence and output sequence are a standard sequence-to-sequence input-output mapping. Suppose that we want to fine-tune the model for translation for example, and we have a training example: the input sequence \u201CThe house is wonderful.\u201D and output sequence \u201CDas Haus ist wunderbar.\u201D, then they should be prepared for the model as follows:`),Xl=d(),f(wo.$$.fragment),Ql=d(),se=o("p"),Jh=a("As you can see, only 2 inputs are required for the model in order to compute a loss: "),qi=o("code"),Zh=a("input_ids"),Xh=a(` (which are the `),Fi=o("code"),Qh=a("input_ids"),eu=a(" of the encoded input sequence) and "),Mi=o("code"),tu=a("labels"),nu=a(" (which are the "),Pi=o("code"),ou=a("input_ids"),su=a(` of the encoded target sequence). The model will automatically create the `),Ci=o("code"),au=a("decoder_input_ids"),ru=a(" based on the "),Ai=o("code"),iu=a("labels"),du=a(`, by shifting them one position to the right and prepending the `),Li=o("code"),lu=a("config.decoder_start_token_id"),cu=a(`, which for T5 is equal to 0 (i.e. the id of the pad token). Also note the task prefix: we prepend the input sequence with \u2018translate English to German: \u2019 before encoding it. This will help in improving the performance, as this task prefix was used during T5\u2019s pre-training.`),ec=d(),xt=o("p"),pu=a(`However, the example above only shows a single training example. In practice, one trains deep learning models in batches. This entails that we must pad/truncate examples to the same length. For encoder-decoder models, one typically defines a `),Ni=o("code"),hu=a("max_source_length"),uu=a(" and "),Ii=o("code"),mu=a("max_target_length"),fu=a(`, which determine the maximum length of the input and output sequences respectively (otherwise they are truncated). These should be carefully set depending on the task.`),tc=d(),me=o("p"),_u=a("In addition, we must make sure that padding token id\u2019s of the "),Di=o("code"),gu=a("labels"),Tu=a(` are not taken into account by the loss function. In PyTorch and Tensorflow, this can be done by replacing them with -100, which is the `),Si=o("code"),vu=a("ignore_index"),ku=a(` of the `),Oi=o("code"),bu=a("CrossEntropyLoss"),wu=a(". In Flax, one can use the "),Gi=o("code"),yu=a("decoder_attention_mask"),xu=a(` to ignore padded tokens from the loss (see the `),yo=o("a"),zu=a("Flax summarization script"),$u=a(` for details). We also pass `),Bi=o("code"),Eu=a("attention_mask"),ju=a(` as additional input to the model, which makes sure that padding tokens of the inputs are ignored. The code example below illustrates all of this.`),nc=d(),f(xo.$$.fragment),oc=d(),Ja=o("p"),qu=a("Additional training tips:"),sc=d(),zt=o("ul"),Ui=o("li"),zo=o("p"),Fu=a("T5 models need a slightly higher learning rate than the default one set in the "),Wi=o("code"),Mu=a("Trainer"),Pu=a(` when using the AdamW optimizer. Typically, 1e-4 and 3e-4 work well for most problems (classification, summarization, translation, question answering, question generation). Note that T5 was pre-trained using the AdaFactor optimizer.`),Cu=d(),Ri=o("li"),Kt=o("p"),Au=a("According to "),$o=o("a"),Lu=a("this forum post"),Nu=a(`, task prefixes matter when (1) doing multi-task training (2) your task is similar or related to one of the supervised tasks used in T5\u2019s pre-training mixture (see Appendix D of the `),Eo=o("a"),Iu=a("paper"),Du=a(` for the task prefixes used).`),Su=d(),Hi=o("li"),jo=o("p"),Ou=a(`If training on TPU, it is recommended to pad all examples of the dataset to the same length or make use of `),Vi=o("em"),Gu=a("pad_to_multiple_of"),Bu=a(` to have a small number of predefined bucket sizes to fit all examples in. Dynamically padding batches to the longest example is not recommended on TPU as it triggers a recompilation for every batch shape that is encountered during training thus significantly slowing down the training. only padding up to the longest example in a batch) leads to very slow training on TPU.`),ac=d(),Za=o("a"),rc=d(),Yt=o("h2"),jn=o("a"),Ki=o("span"),f(qo.$$.fragment),Uu=d(),Yi=o("span"),Wu=a("Inference"),ic=d(),ht=o("p"),Ru=a("At inference time, it is recommended to use "),Xa=o("a"),Hu=a("generate()"),Vu=a(`. This method takes care of encoding the input and feeding the encoded hidden states via cross-attention layers to the decoder and auto-regressively generates the decoder output. Check out `),Fo=o("a"),Ku=a("this blog post"),Yu=a(` to know all the details about generating text with Transformers. There\u2019s also `),Mo=o("a"),Ju=a("this blog post"),Zu=a(` which explains how generation works in general in encoder-decoder models.`),dc=d(),f(Po.$$.fragment),lc=d(),Ue=o("p"),Xu=a("Note that T5 uses the "),Ji=o("code"),Qu=a("pad_token_id"),em=a(" as the "),Zi=o("code"),tm=a("decoder_start_token_id"),nm=a(`, so when doing generation without using `),Qa=o("a"),om=a("generate()"),sm=a(", make sure you start it with the "),Xi=o("code"),am=a("pad_token_id"),rm=a("."),cc=d(),er=o("p"),im=a("The example above only shows a single example. You can also do batched inference, like so:"),pc=d(),f(Co.$$.fragment),hc=d(),tr=o("a"),uc=d(),Jt=o("h2"),qn=o("a"),Qi=o("span"),f(Ao.$$.fragment),dm=d(),ed=o("span"),lm=a("Example scripts"),mc=d(),nr=o("p"),cm=a("T5 is supported by several example scripts, both for pre-training and fine-tuning."),fc=d(),Fn=o("ul"),td=o("li"),Zt=o("p"),pm=a("pre-training: the "),Lo=o("a"),hm=a("run_t5_mlm_flax.py"),um=a(` script allows you to further pre-train T5 or pre-train T5 from scratch on your own data. The `),No=o("a"),mm=a("t5_tokenizer_model.py"),fm=a(` script allows you to further train a T5 tokenizer or train a T5 Tokenizer from scratch on your own data. Note that Flax (a neural network library on top of JAX) is particularly useful to train on TPU hardware.`),_m=d(),nd=o("li"),De=o("p"),gm=a("fine-tuning: T5 is supported by the official summarization scripts ("),Io=o("a"),Tm=a("PyTorch"),vm=a(", "),Do=o("a"),km=a("Tensorflow"),bm=a(", and "),So=o("a"),wm=a("Flax"),ym=a(`) and translation scripts (`),Oo=o("a"),xm=a("PyTorch"),zm=a(" and "),Go=o("a"),$m=a("Tensorflow"),Em=a(`). These scripts allow you to easily fine-tune T5 on custom data for summarization/translation.`),_c=d(),Xt=o("h2"),Mn=o("a"),od=o("span"),f(Bo.$$.fragment),jm=d(),sd=o("span"),qm=a("T5Config"),gc=d(),gt=o("div"),f(Uo.$$.fragment),Fm=d(),Tt=o("p"),Mm=a("This is the configuration class to store the configuration of a "),or=o("a"),Pm=a("T5Model"),Cm=a(` or a `),sr=o("a"),Am=a("TFT5Model"),Lm=a(`. It is used to instantiate a T5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the T5 `),Wo=o("a"),Nm=a("t5-small"),Im=a(" architecture."),Dm=d(),Qt=o("p"),Sm=a("Configuration objects inherit from "),ar=o("a"),Om=a("PretrainedConfig"),Gm=a(` and can be used to control the model outputs. Read the documentation from `),rr=o("a"),Bm=a("PretrainedConfig"),Um=a(" for more information."),Tc=d(),en=o("h2"),Pn=o("a"),ad=o("span"),f(Ro.$$.fragment),Wm=d(),rd=o("span"),Rm=a("T5Tokenizer"),vc=d(),Z=o("div"),f(Ho.$$.fragment),Hm=d(),Vo=o("p"),Vm=a("Construct a T5 tokenizer. Based on "),Ko=o("a"),Km=a("SentencePiece"),Ym=a("."),Jm=d(),Yo=o("p"),Zm=a("This tokenizer inherits from "),ir=o("a"),Xm=a("PreTrainedTokenizer"),Qm=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ef=d(),tn=o("p"),tf=a(`Attributes: sp_model (`),id=o("code"),nf=a("SentencePieceProcessor"),of=a(`): The `),dd=o("em"),sf=a("SentencePiece"),af=a(" processor that is used for every conversion (string, tokens and IDs)."),rf=d(),$t=o("div"),f(Jo.$$.fragment),df=d(),ld=o("p"),lf=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),cf=d(),Zo=o("ul"),dr=o("li"),pf=a("single sequence: "),cd=o("code"),hf=a("X </s>"),uf=d(),lr=o("li"),mf=a("pair of sequences: "),pd=o("code"),ff=a("A </s> B </s>"),_f=d(),Cn=o("div"),f(Xo.$$.fragment),gf=d(),Qo=o("p"),Tf=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),hd=o("code"),vf=a("prepare_for_model"),kf=a(" method."),bf=d(),An=o("div"),f(es.$$.fragment),wf=d(),ud=o("p"),yf=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),xf=d(),md=o("div"),kc=d(),nn=o("h2"),Ln=o("a"),fd=o("span"),f(ts.$$.fragment),zf=d(),_d=o("span"),$f=a("T5TokenizerFast"),bc=d(),Se=o("div"),f(ns.$$.fragment),Ef=d(),on=o("p"),jf=a("Construct a \u201Cfast\u201D T5 tokenizer (backed by HuggingFace\u2019s "),gd=o("em"),qf=a("tokenizers"),Ff=a(" library). Based on "),os=o("a"),Mf=a("Unigram"),Pf=a("."),Cf=d(),ss=o("p"),Af=a("This tokenizer inherits from "),cr=o("a"),Lf=a("PreTrainedTokenizerFast"),Nf=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),If=d(),Et=o("div"),f(as.$$.fragment),Df=d(),Td=o("p"),Sf=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Of=d(),rs=o("ul"),pr=o("li"),Gf=a("single sequence: "),vd=o("code"),Bf=a("X </s>"),Uf=d(),hr=o("li"),Wf=a("pair of sequences: "),kd=o("code"),Rf=a("A </s> B </s>"),Hf=d(),Nn=o("div"),f(is.$$.fragment),Vf=d(),bd=o("p"),Kf=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),wc=d(),sn=o("h2"),In=o("a"),wd=o("span"),f(ds.$$.fragment),Yf=d(),yd=o("span"),Jf=a("T5Model"),yc=d(),X=o("div"),f(ls.$$.fragment),Zf=d(),xd=o("p"),Xf=a("The bare T5 Model transformer outputting raw hidden-states without any specific head on top."),Qf=d(),cs=o("p"),e_=a("The T5 model was proposed in "),ps=o("a"),t_=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),n_=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),o_=d(),hs=o("p"),s_=a("This model inherits from "),ur=o("a"),a_=a("PreTrainedModel"),r_=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),i_=d(),us=o("p"),d_=a("This model is also a PyTorch "),ms=o("a"),l_=a("torch.nn.Module"),c_=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),p_=d(),We=o("div"),f(fs.$$.fragment),h_=d(),an=o("p"),u_=a("The "),mr=o("a"),m_=a("T5Model"),f_=a(" forward method, overrides the "),zd=o("code"),__=a("__call__"),g_=a(" special method."),T_=d(),f(Dn.$$.fragment),v_=d(),$d=o("p"),k_=a("Example:"),b_=d(),f(_s.$$.fragment),w_=d(),Re=o("div"),f(gs.$$.fragment),y_=d(),Ed=o("p"),x_=a("This is an experimental feature and is a subject to change at a moment\u2019s notice."),z_=d(),jd=o("p"),$_=a(`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),E_=d(),qd=o("p"),j_=a("Example:"),q_=d(),f(Ts.$$.fragment),F_=d(),ut=o("div"),f(vs.$$.fragment),M_=d(),Fd=o("p"),P_=a("Moves the model to cpu from a model parallel state."),C_=d(),Md=o("p"),A_=a("Example:"),L_=d(),f(ks.$$.fragment),xc=d(),rn=o("h2"),Sn=o("a"),Pd=o("span"),f(bs.$$.fragment),N_=d(),Cd=o("span"),I_=a("T5ForConditionalGeneration"),zc=d(),Q=o("div"),f(ws.$$.fragment),D_=d(),ys=o("p"),S_=a("T5 Model with a "),Ad=o("code"),O_=a("language modeling"),G_=a(" head on top."),B_=d(),xs=o("p"),U_=a("The T5 model was proposed in "),zs=o("a"),W_=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),R_=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),H_=d(),$s=o("p"),V_=a("This model inherits from "),fr=o("a"),K_=a("PreTrainedModel"),Y_=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),J_=d(),Es=o("p"),Z_=a("This model is also a PyTorch "),js=o("a"),X_=a("torch.nn.Module"),Q_=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),eg=d(),He=o("div"),f(qs.$$.fragment),tg=d(),dn=o("p"),ng=a("The "),_r=o("a"),og=a("T5ForConditionalGeneration"),sg=a(" forward method, overrides the "),Ld=o("code"),ag=a("__call__"),rg=a(" special method."),ig=d(),f(On.$$.fragment),dg=d(),Nd=o("p"),lg=a("Examples:"),cg=d(),f(Fs.$$.fragment),pg=d(),Ve=o("div"),f(Ms.$$.fragment),hg=d(),Id=o("p"),ug=a("This is an experimental feature and is a subject to change at a moment\u2019s notice."),mg=d(),Dd=o("p"),fg=a(`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),_g=d(),Sd=o("p"),gg=a("Example:"),Tg=d(),f(Ps.$$.fragment),vg=d(),mt=o("div"),f(Cs.$$.fragment),kg=d(),Od=o("p"),bg=a("Moves the model to cpu from a model parallel state."),wg=d(),Gd=o("p"),yg=a("Example:"),xg=d(),f(As.$$.fragment),$c=d(),ln=o("h2"),Gn=o("a"),Bd=o("span"),f(Ls.$$.fragment),zg=d(),Ud=o("span"),$g=a("T5EncoderModel"),Ec=d(),ee=o("div"),f(Ns.$$.fragment),Eg=d(),Wd=o("p"),jg=a("The bare T5 Model transformer outputting encoder\u2019s raw hidden-states without any specific head on top."),qg=d(),Is=o("p"),Fg=a("The T5 model was proposed in "),Ds=o("a"),Mg=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),Pg=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),Cg=d(),Ss=o("p"),Ag=a("This model inherits from "),gr=o("a"),Lg=a("PreTrainedModel"),Ng=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ig=d(),Os=o("p"),Dg=a("This model is also a PyTorch "),Gs=o("a"),Sg=a("torch.nn.Module"),Og=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gg=d(),Ke=o("div"),f(Bs.$$.fragment),Bg=d(),cn=o("p"),Ug=a("The "),Tr=o("a"),Wg=a("T5EncoderModel"),Rg=a(" forward method, overrides the "),Rd=o("code"),Hg=a("__call__"),Vg=a(" special method."),Kg=d(),f(Bn.$$.fragment),Yg=d(),Hd=o("p"),Jg=a("Example:"),Zg=d(),f(Us.$$.fragment),Xg=d(),Ye=o("div"),f(Ws.$$.fragment),Qg=d(),Vd=o("p"),eT=a("This is an experimental feature and is a subject to change at a moment\u2019s notice."),tT=d(),Kd=o("p"),nT=a(`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),oT=d(),Yd=o("p"),sT=a("Example:"),aT=d(),f(Rs.$$.fragment),rT=d(),ft=o("div"),f(Hs.$$.fragment),iT=d(),Jd=o("p"),dT=a("Moves the model to cpu from a model parallel state."),lT=d(),Zd=o("p"),cT=a("Example:"),pT=d(),f(Vs.$$.fragment),jc=d(),pn=o("h2"),Un=o("a"),Xd=o("span"),f(Ks.$$.fragment),hT=d(),Qd=o("span"),uT=a("TFT5Model"),qc=d(),de=o("div"),f(Ys.$$.fragment),mT=d(),el=o("p"),fT=a("The bare T5 Model transformer outputting raw hidden-stateswithout any specific head on top."),_T=d(),Js=o("p"),gT=a("The T5 model was proposed in "),Zs=o("a"),TT=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),vT=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),kT=d(),Xs=o("p"),bT=a("This model inherits from "),vr=o("a"),wT=a("TFPreTrainedModel"),yT=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xT=d(),Qs=o("p"),zT=a("This model is also a "),ea=o("a"),$T=a("tf.keras.Model"),ET=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jT=d(),f(Wn.$$.fragment),qT=d(),Je=o("div"),f(ta.$$.fragment),FT=d(),hn=o("p"),MT=a("The "),kr=o("a"),PT=a("TFT5Model"),CT=a(" forward method, overrides the "),tl=o("code"),AT=a("__call__"),LT=a(" special method."),NT=d(),f(Rn.$$.fragment),IT=d(),nl=o("p"),DT=a("Examples:"),ST=d(),f(na.$$.fragment),Fc=d(),un=o("h2"),Hn=o("a"),ol=o("span"),f(oa.$$.fragment),OT=d(),sl=o("span"),GT=a("TFT5ForConditionalGeneration"),Mc=d(),le=o("div"),f(sa.$$.fragment),BT=d(),aa=o("p"),UT=a("T5 Model with a "),al=o("code"),WT=a("language modeling"),RT=a(" head on top."),HT=d(),ra=o("p"),VT=a("The T5 model was proposed in "),ia=o("a"),KT=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),YT=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),JT=d(),da=o("p"),ZT=a("This model inherits from "),br=o("a"),XT=a("TFPreTrainedModel"),QT=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ev=d(),la=o("p"),tv=a("This model is also a "),ca=o("a"),nv=a("tf.keras.Model"),ov=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sv=d(),f(Vn.$$.fragment),av=d(),Ze=o("div"),f(pa.$$.fragment),rv=d(),mn=o("p"),iv=a("The "),wr=o("a"),dv=a("TFT5ForConditionalGeneration"),lv=a(" forward method, overrides the "),rl=o("code"),cv=a("__call__"),pv=a(" special method."),hv=d(),f(Kn.$$.fragment),uv=d(),il=o("p"),mv=a("Examples:"),fv=d(),f(ha.$$.fragment),Pc=d(),fn=o("h2"),Yn=o("a"),dl=o("span"),f(ua.$$.fragment),_v=d(),ll=o("span"),gv=a("TFT5EncoderModel"),Cc=d(),ce=o("div"),f(ma.$$.fragment),Tv=d(),cl=o("p"),vv=a("The bare T5 Model transformer outputting encoder\u2019s raw hidden-stateswithout any specific head on top."),kv=d(),fa=o("p"),bv=a("The T5 model was proposed in "),_a=o("a"),wv=a("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),yv=a(` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),xv=d(),ga=o("p"),zv=a("This model inherits from "),yr=o("a"),$v=a("TFPreTrainedModel"),Ev=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jv=d(),Ta=o("p"),qv=a("This model is also a "),va=o("a"),Fv=a("tf.keras.Model"),Mv=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Pv=d(),f(Jn.$$.fragment),Cv=d(),Xe=o("div"),f(ka.$$.fragment),Av=d(),_n=o("p"),Lv=a("The "),xr=o("a"),Nv=a("TFT5EncoderModel"),Iv=a(" forward method, overrides the "),pl=o("code"),Dv=a("__call__"),Sv=a(" special method."),Ov=d(),f(Zn.$$.fragment),Gv=d(),hl=o("p"),Bv=a("Examples:"),Uv=d(),f(ba.$$.fragment),Ac=d(),gn=o("h2"),Xn=o("a"),ul=o("span"),f(wa.$$.fragment),Wv=d(),ml=o("span"),Rv=a("FlaxT5Model"),Lc=d(),vt=o("div"),Qe=o("div"),f(ya.$$.fragment),Hv=d(),Tn=o("p"),Vv=a("The "),fl=o("code"),Kv=a("FlaxT5PreTrainedModel"),Yv=a(" forward method, overrides the "),_l=o("code"),Jv=a("__call__"),Zv=a(" special method."),Xv=d(),f(Qn.$$.fragment),Qv=d(),gl=o("p"),ek=a("Example:"),tk=d(),f(xa.$$.fragment),nk=d(),jt=o("div"),f(za.$$.fragment),ok=d(),Tl=o("p"),sk=a("Example:"),ak=d(),f($a.$$.fragment),rk=d(),qt=o("div"),f(Ea.$$.fragment),ik=d(),vl=o("p"),dk=a("Example:"),lk=d(),f(ja.$$.fragment),Nc=d(),vn=o("h2"),eo=o("a"),kl=o("span"),f(qa.$$.fragment),ck=d(),bl=o("span"),pk=a("FlaxT5ForConditionalGeneration"),Ic=d(),kt=o("div"),et=o("div"),f(Fa.$$.fragment),hk=d(),kn=o("p"),uk=a("The "),wl=o("code"),mk=a("FlaxT5PreTrainedModel"),fk=a(" forward method, overrides the "),yl=o("code"),_k=a("__call__"),gk=a(" special method."),Tk=d(),f(to.$$.fragment),vk=d(),xl=o("p"),kk=a("Example:"),bk=d(),f(Ma.$$.fragment),wk=d(),Ft=o("div"),f(Pa.$$.fragment),yk=d(),zl=o("p"),xk=a("Example:"),zk=d(),f(Ca.$$.fragment),$k=d(),Mt=o("div"),f(Aa.$$.fragment),Ek=d(),$l=o("p"),jk=a("Example:"),qk=d(),f(La.$$.fragment),this.h()},l(n){const h=Jw('[data-svelte="svelte-1phssyn"]',document.head);u=s(h,"META",{name:!0,content:!0}),h.forEach(t),$=l(n),b=s(n,"H1",{class:!0});var Na=i(b);y=s(Na,"A",{id:!0,class:!0,href:!0});var El=i(y);z=s(El,"SPAN",{});var jl=i(z);_(x.$$.fragment,jl),jl.forEach(t),El.forEach(t),w=l(Na),E=s(Na,"SPAN",{});var ql=i(E);Ee=r(ql,"T5"),ql.forEach(t),Na.forEach(t),ae=l(n),j=s(n,"H2",{class:!0});var Ia=i(j);te=s(Ia,"A",{id:!0,class:!0,href:!0});var Fl=i(te);O=s(Fl,"SPAN",{});var Ml=i(O);_(re.$$.fragment,Ml),Ml.forEach(t),Fl.forEach(t),je=l(Ia),G=s(Ia,"SPAN",{});var Pl=i(G);qe=r(Pl,"Overview"),Pl.forEach(t),Ia.forEach(t),Te=l(n),B=s(n,"P",{});var Da=i(B);I=r(Da,"The T5 model was presented in "),ie=s(Da,"A",{href:!0,rel:!0});var Cl=i(ie);pe=r(Cl,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),Cl.forEach(t),F=r(Da,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Da.forEach(t),C=l(n),he=s(n,"P",{});var Al=i(he);K=r(Al,"The abstract from the paper is the following:"),Al.forEach(t),ve=l(n),ue=s(n,"P",{});var Lk=i(ue);U=s(Lk,"EM",{});var Nk=i(U);Fe=r(Nk,`Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pretraining objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new \u201CColossal Clean Crawled Corpus\u201D, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.`),Nk.forEach(t),Lk.forEach(t),ke=l(n),M=s(n,"P",{});var Ik=i(M);Me=r(Ik,"Tips:"),Ik.forEach(t),H=l(n),V=s(n,"UL",{});var zr=i(V);fe=s(zr,"LI",{});var Dk=i(fe);A=s(Dk,"P",{});var $r=i(A);Pe=r($r,`T5 is an encoder-decoder model pre-trained on a multi-task mixture of unsupervised and supervised tasks and for which each task is converted into a text-to-text format. T5 works well on a variety of tasks out-of-the-box by prepending a different prefix to the input corresponding to each task, e.g., for translation: `),_e=s($r,"EM",{});var Sk=i(_e);D=r(Sk,"translate English to German: \u2026"),Sk.forEach(t),Ce=r($r,`, for summarization: `),W=s($r,"EM",{});var Ok=i(W);Ae=r(Ok,"summarize: \u2026"),Ok.forEach(t),m=r($r,"."),$r.forEach(t),Dk.forEach(t),q=l(zr),ne=s(zr,"LI",{});var Gk=i(ne);Oe=s(Gk,"P",{});var Bk=i(Oe);rt=r(Bk,"T5 uses relative scalar embeddings. Encoder input padding can be done on the left and on the right."),Bk.forEach(t),Gk.forEach(t),S=l(zr),Ge=s(zr,"LI",{});var Uk=i(Ge);oe=s(Uk,"P",{});var no=i(oe);it=r(no,"See the "),L=s(no,"A",{href:!0});var Wk=i(L);Y=r(Wk,"training"),Wk.forEach(t),dt=r(no,", "),Le=s(no,"A",{href:!0});var Rk=i(Le);J=r(Rk,"inference"),Rk.forEach(t),lt=r(no," and "),Ne=s(no,"A",{href:!0});var Hk=i(Ne);Ie=r(Hk,"scripts"),Hk.forEach(t),ct=r(no," sections below for all details regarding usage."),no.forEach(t),Uk.forEach(t),zr.forEach(t),Dl=l(n),Sa=s(n,"P",{});var Vk=i(Sa);Lp=r(Vk,"T5 comes in different sizes:"),Vk.forEach(t),Sl=l(n),Be=s(n,"UL",{});var Pt=i(Be);ei=s(Pt,"LI",{});var Kk=i(ei);ti=s(Kk,"P",{});var Yk=i(ti);co=s(Yk,"A",{href:!0,rel:!0});var Jk=i(co);Np=r(Jk,"t5-small"),Jk.forEach(t),Yk.forEach(t),Kk.forEach(t),Ip=l(Pt),ni=s(Pt,"LI",{});var Zk=i(ni);oi=s(Zk,"P",{});var Xk=i(oi);po=s(Xk,"A",{href:!0,rel:!0});var Qk=i(po);Dp=r(Qk,"t5-base"),Qk.forEach(t),Xk.forEach(t),Zk.forEach(t),Sp=l(Pt),si=s(Pt,"LI",{});var e5=i(si);ai=s(e5,"P",{});var t5=i(ai);ho=s(t5,"A",{href:!0,rel:!0});var n5=i(ho);Op=r(n5,"t5-large"),n5.forEach(t),t5.forEach(t),e5.forEach(t),Gp=l(Pt),ri=s(Pt,"LI",{});var o5=i(ri);ii=s(o5,"P",{});var s5=i(ii);uo=s(s5,"A",{href:!0,rel:!0});var a5=i(uo);Bp=r(a5,"t5-3b"),a5.forEach(t),s5.forEach(t),o5.forEach(t),Up=l(Pt),di=s(Pt,"LI",{});var r5=i(di);Oa=s(r5,"P",{});var Fk=i(Oa);mo=s(Fk,"A",{href:!0,rel:!0});var i5=i(mo);Wp=r(i5,"t5-11b"),i5.forEach(t),Rp=r(Fk,"."),Fk.forEach(t),r5.forEach(t),Pt.forEach(t),Ol=l(n),Ga=s(n,"P",{});var d5=i(Ga);Hp=r(d5,"Based on the original T5 model, Google has released some follow-up works:"),d5.forEach(t),Gl=l(n),wt=s(n,"UL",{});var Er=i(wt);li=s(Er,"LI",{});var l5=i(li);bn=s(l5,"P",{});var Ll=i(bn);ci=s(Ll,"STRONG",{});var c5=i(ci);Vp=r(c5,"T5v1.1"),c5.forEach(t),Kp=r(Ll,`: T5v1.1 is an improved version of T5 with some architectural tweaks, and is pre-trained on C4 only without mixing in the supervised tasks. Refer to the documentation of T5v1.1 which can be found `),Ba=s(Ll,"A",{href:!0});var p5=i(Ba);Yp=r(p5,"here"),p5.forEach(t),Jp=r(Ll,"."),Ll.forEach(t),l5.forEach(t),Zp=l(Er),pi=s(Er,"LI",{});var h5=i(pi);wn=s(h5,"P",{});var Nl=i(wn);hi=s(Nl,"STRONG",{});var u5=i(hi);Xp=r(u5,"mT5"),u5.forEach(t),Qp=r(Nl,`: mT5 is a multilingual T5 model. It is pre-trained on the mC4 corpus, which includes 101 languages. Refer to the documentation of mT5 which can be found `),Ua=s(Nl,"A",{href:!0});var m5=i(Ua);eh=r(m5,"here"),m5.forEach(t),th=r(Nl,"."),Nl.forEach(t),h5.forEach(t),nh=l(Er),ui=s(Er,"LI",{});var f5=i(ui);yn=s(f5,"P",{});var Il=i(yn);mi=s(Il,"STRONG",{});var _5=i(mi);oh=r(_5,"byT5"),_5.forEach(t),sh=r(Il,`: byT5 is a T5 model pre-trained on byte sequences rather than SentencePiece subword token sequences. Refer to the documentation of byT5 which can be found `),Wa=s(Il,"A",{href:!0});var g5=i(Wa);ah=r(g5,"here"),g5.forEach(t),rh=r(Il,"."),Il.forEach(t),f5.forEach(t),Er.forEach(t),Bl=l(n),xn=s(n,"P",{});var Sc=i(xn);ih=r(Sc,"All checkpoints can be found on the "),fo=s(Sc,"A",{href:!0,rel:!0});var T5=i(fo);dh=r(T5,"hub"),T5.forEach(t),lh=r(Sc,"."),Sc.forEach(t),Ul=l(n),yt=s(n,"P",{});var jr=i(yt);ch=r(jr,"This model was contributed by "),_o=s(jr,"A",{href:!0,rel:!0});var v5=i(_o);ph=r(v5,"thomwolf"),v5.forEach(t),hh=r(jr,". The original code can be found "),go=s(jr,"A",{href:!0,rel:!0});var k5=i(go);uh=r(k5,"here"),k5.forEach(t),mh=r(jr,"."),jr.forEach(t),Wl=l(n),Ra=s(n,"A",{id:!0}),i(Ra).forEach(t),Rl=l(n),Ht=s(n,"H2",{class:!0});var Oc=i(Ht);zn=s(Oc,"A",{id:!0,class:!0,href:!0});var b5=i(zn);fi=s(b5,"SPAN",{});var w5=i(fi);_(To.$$.fragment,w5),w5.forEach(t),b5.forEach(t),fh=l(Oc),_i=s(Oc,"SPAN",{});var y5=i(_i);_h=r(y5,"Training"),y5.forEach(t),Oc.forEach(t),Hl=l(n),pt=s(n,"P",{});var oo=i(pt);gh=r(oo,`T5 is an encoder-decoder model and converts all NLP problems into a text-to-text format. It is trained using teacher forcing. This means that for training, we always need an input sequence and a corresponding target sequence. The input sequence is fed to the model using `),gi=s(oo,"CODE",{});var x5=i(gi);Th=r(x5,"input_ids"),x5.forEach(t),vh=r(oo,`. The target sequence is shifted to the right, i.e., prepended by a start-sequence token and fed to the decoder using the `),Ti=s(oo,"CODE",{});var z5=i(Ti);kh=r(z5,"decoder_input_ids"),z5.forEach(t),bh=r(oo,`. In teacher-forcing style, the target sequence is then appended by the EOS token and corresponds to the `),vi=s(oo,"CODE",{});var $5=i(vi);wh=r($5,"labels"),$5.forEach(t),yh=r(oo,`. The PAD token is hereby used as the start-sequence token. T5 can be trained / fine-tuned both in a supervised and unsupervised fashion.`),oo.forEach(t),Vl=l(n),$n=s(n,"P",{});var Gc=i($n);xh=r(Gc,"One can use "),Ha=s(Gc,"A",{href:!0});var E5=i(Ha);zh=r(E5,"T5ForConditionalGeneration"),E5.forEach(t),$h=r(Gc,` (or the Tensorflow/Flax variant), which includes the language modeling head on top of the decoder.`),Gc.forEach(t),Kl=l(n),Va=s(n,"UL",{});var j5=i(Va);Vt=s(j5,"LI",{});var qr=i(Vt);ki=s(qr,"P",{});var q5=i(ki);Eh=r(q5,"Unsupervised denoising training"),q5.forEach(t),jh=l(qr),ge=s(qr,"P",{});var tt=i(ge);qh=r(tt,"In this setup, spans of the input sequence are masked by so-called sentinel tokens ("),bi=s(tt,"EM",{});var F5=i(bi);Fh=r(F5,"a.k.a"),F5.forEach(t),Mh=r(tt,` unique mask tokens) and the output sequence is formed as a concatenation of the same sentinel tokens and the `),wi=s(tt,"EM",{});var M5=i(wi);Ph=r(M5,"real"),M5.forEach(t),Ch=r(tt,` masked tokens. Each sentinel token represents a unique mask token for this sentence and should start with `),yi=s(tt,"CODE",{});var P5=i(yi);Ah=r(P5,"<extra_id_0>"),P5.forEach(t),Lh=r(tt,`, `),xi=s(tt,"CODE",{});var C5=i(xi);Nh=r(C5,"<extra_id_1>"),C5.forEach(t),Ih=r(tt,", \u2026 up to "),zi=s(tt,"CODE",{});var A5=i(zi);Dh=r(A5,"<extra_id_99>"),A5.forEach(t),Sh=r(tt,`. As a default, 100 sentinel tokens are available in `),Ka=s(tt,"A",{href:!0});var L5=i(Ka);Oh=r(L5,"T5Tokenizer"),L5.forEach(t),Gh=r(tt,"."),tt.forEach(t),Bh=l(qr),$i=s(qr,"P",{});var N5=i($i);Uh=r(N5,`For instance, the sentence \u201CThe cute dog walks in the park\u201D with the masks put on \u201Ccute dog\u201D and \u201Cthe\u201D should be processed as follows:`),N5.forEach(t),qr.forEach(t),j5.forEach(t),Yl=l(n),_(vo.$$.fragment,n),Jl=l(n),En=s(n,"P",{});var Bc=i(En);Wh=r(Bc,"If you\u2019re interested in pre-training T5 on a new corpus, check out the "),ko=s(Bc,"A",{href:!0,rel:!0});var I5=i(ko);Rh=r(I5,"run_t5_mlm_flax.py"),I5.forEach(t),Hh=r(Bc,` script in the Examples directory.`),Bc.forEach(t),Zl=l(n),Ya=s(n,"UL",{});var D5=i(Ya);bo=s(D5,"LI",{});var Uc=i(bo);Ei=s(Uc,"P",{});var S5=i(Ei);Vh=r(S5,"Supervised training"),S5.forEach(t),Kh=l(Uc),ji=s(Uc,"P",{});var O5=i(ji);Yh=r(O5,`In this setup, the input sequence and output sequence are a standard sequence-to-sequence input-output mapping. Suppose that we want to fine-tune the model for translation for example, and we have a training example: the input sequence \u201CThe house is wonderful.\u201D and output sequence \u201CDas Haus ist wunderbar.\u201D, then they should be prepared for the model as follows:`),O5.forEach(t),Uc.forEach(t),D5.forEach(t),Xl=l(n),_(wo.$$.fragment,n),Ql=l(n),se=s(n,"P",{});var be=i(se);Jh=r(be,"As you can see, only 2 inputs are required for the model in order to compute a loss: "),qi=s(be,"CODE",{});var G5=i(qi);Zh=r(G5,"input_ids"),G5.forEach(t),Xh=r(be,` (which are the `),Fi=s(be,"CODE",{});var B5=i(Fi);Qh=r(B5,"input_ids"),B5.forEach(t),eu=r(be," of the encoded input sequence) and "),Mi=s(be,"CODE",{});var U5=i(Mi);tu=r(U5,"labels"),U5.forEach(t),nu=r(be," (which are the "),Pi=s(be,"CODE",{});var W5=i(Pi);ou=r(W5,"input_ids"),W5.forEach(t),su=r(be,` of the encoded target sequence). The model will automatically create the `),Ci=s(be,"CODE",{});var R5=i(Ci);au=r(R5,"decoder_input_ids"),R5.forEach(t),ru=r(be," based on the "),Ai=s(be,"CODE",{});var H5=i(Ai);iu=r(H5,"labels"),H5.forEach(t),du=r(be,`, by shifting them one position to the right and prepending the `),Li=s(be,"CODE",{});var V5=i(Li);lu=r(V5,"config.decoder_start_token_id"),V5.forEach(t),cu=r(be,`, which for T5 is equal to 0 (i.e. the id of the pad token). Also note the task prefix: we prepend the input sequence with \u2018translate English to German: \u2019 before encoding it. This will help in improving the performance, as this task prefix was used during T5\u2019s pre-training.`),be.forEach(t),ec=l(n),xt=s(n,"P",{});var Fr=i(xt);pu=r(Fr,`However, the example above only shows a single training example. In practice, one trains deep learning models in batches. This entails that we must pad/truncate examples to the same length. For encoder-decoder models, one typically defines a `),Ni=s(Fr,"CODE",{});var K5=i(Ni);hu=r(K5,"max_source_length"),K5.forEach(t),uu=r(Fr," and "),Ii=s(Fr,"CODE",{});var Y5=i(Ii);mu=r(Y5,"max_target_length"),Y5.forEach(t),fu=r(Fr,`, which determine the maximum length of the input and output sequences respectively (otherwise they are truncated). These should be carefully set depending on the task.`),Fr.forEach(t),tc=l(n),me=s(n,"P",{});var nt=i(me);_u=r(nt,"In addition, we must make sure that padding token id\u2019s of the "),Di=s(nt,"CODE",{});var J5=i(Di);gu=r(J5,"labels"),J5.forEach(t),Tu=r(nt,` are not taken into account by the loss function. In PyTorch and Tensorflow, this can be done by replacing them with -100, which is the `),Si=s(nt,"CODE",{});var Z5=i(Si);vu=r(Z5,"ignore_index"),Z5.forEach(t),ku=r(nt,` of the `),Oi=s(nt,"CODE",{});var X5=i(Oi);bu=r(X5,"CrossEntropyLoss"),X5.forEach(t),wu=r(nt,". In Flax, one can use the "),Gi=s(nt,"CODE",{});var Q5=i(Gi);yu=r(Q5,"decoder_attention_mask"),Q5.forEach(t),xu=r(nt,` to ignore padded tokens from the loss (see the `),yo=s(nt,"A",{href:!0,rel:!0});var eb=i(yo);zu=r(eb,"Flax summarization script"),eb.forEach(t),$u=r(nt,` for details). We also pass `),Bi=s(nt,"CODE",{});var tb=i(Bi);Eu=r(tb,"attention_mask"),tb.forEach(t),ju=r(nt,` as additional input to the model, which makes sure that padding tokens of the inputs are ignored. The code example below illustrates all of this.`),nt.forEach(t),nc=l(n),_(xo.$$.fragment,n),oc=l(n),Ja=s(n,"P",{});var nb=i(Ja);qu=r(nb,"Additional training tips:"),nb.forEach(t),sc=l(n),zt=s(n,"UL",{});var Mr=i(zt);Ui=s(Mr,"LI",{});var ob=i(Ui);zo=s(ob,"P",{});var Wc=i(zo);Fu=r(Wc,"T5 models need a slightly higher learning rate than the default one set in the "),Wi=s(Wc,"CODE",{});var sb=i(Wi);Mu=r(sb,"Trainer"),sb.forEach(t),Pu=r(Wc,` when using the AdamW optimizer. Typically, 1e-4 and 3e-4 work well for most problems (classification, summarization, translation, question answering, question generation). Note that T5 was pre-trained using the AdaFactor optimizer.`),Wc.forEach(t),ob.forEach(t),Cu=l(Mr),Ri=s(Mr,"LI",{});var ab=i(Ri);Kt=s(ab,"P",{});var Pr=i(Kt);Au=r(Pr,"According to "),$o=s(Pr,"A",{href:!0,rel:!0});var rb=i($o);Lu=r(rb,"this forum post"),rb.forEach(t),Nu=r(Pr,`, task prefixes matter when (1) doing multi-task training (2) your task is similar or related to one of the supervised tasks used in T5\u2019s pre-training mixture (see Appendix D of the `),Eo=s(Pr,"A",{href:!0,rel:!0});var ib=i(Eo);Iu=r(ib,"paper"),ib.forEach(t),Du=r(Pr,` for the task prefixes used).`),Pr.forEach(t),ab.forEach(t),Su=l(Mr),Hi=s(Mr,"LI",{});var db=i(Hi);jo=s(db,"P",{});var Rc=i(jo);Ou=r(Rc,`If training on TPU, it is recommended to pad all examples of the dataset to the same length or make use of `),Vi=s(Rc,"EM",{});var lb=i(Vi);Gu=r(lb,"pad_to_multiple_of"),lb.forEach(t),Bu=r(Rc,` to have a small number of predefined bucket sizes to fit all examples in. Dynamically padding batches to the longest example is not recommended on TPU as it triggers a recompilation for every batch shape that is encountered during training thus significantly slowing down the training. only padding up to the longest example in a batch) leads to very slow training on TPU.`),Rc.forEach(t),db.forEach(t),Mr.forEach(t),ac=l(n),Za=s(n,"A",{id:!0}),i(Za).forEach(t),rc=l(n),Yt=s(n,"H2",{class:!0});var Hc=i(Yt);jn=s(Hc,"A",{id:!0,class:!0,href:!0});var cb=i(jn);Ki=s(cb,"SPAN",{});var pb=i(Ki);_(qo.$$.fragment,pb),pb.forEach(t),cb.forEach(t),Uu=l(Hc),Yi=s(Hc,"SPAN",{});var hb=i(Yi);Wu=r(hb,"Inference"),hb.forEach(t),Hc.forEach(t),ic=l(n),ht=s(n,"P",{});var so=i(ht);Ru=r(so,"At inference time, it is recommended to use "),Xa=s(so,"A",{href:!0});var ub=i(Xa);Hu=r(ub,"generate()"),ub.forEach(t),Vu=r(so,`. This method takes care of encoding the input and feeding the encoded hidden states via cross-attention layers to the decoder and auto-regressively generates the decoder output. Check out `),Fo=s(so,"A",{href:!0,rel:!0});var mb=i(Fo);Ku=r(mb,"this blog post"),mb.forEach(t),Yu=r(so,` to know all the details about generating text with Transformers. There\u2019s also `),Mo=s(so,"A",{href:!0,rel:!0});var fb=i(Mo);Ju=r(fb,"this blog post"),fb.forEach(t),Zu=r(so,` which explains how generation works in general in encoder-decoder models.`),so.forEach(t),dc=l(n),_(Po.$$.fragment,n),lc=l(n),Ue=s(n,"P",{});var Ct=i(Ue);Xu=r(Ct,"Note that T5 uses the "),Ji=s(Ct,"CODE",{});var _b=i(Ji);Qu=r(_b,"pad_token_id"),_b.forEach(t),em=r(Ct," as the "),Zi=s(Ct,"CODE",{});var gb=i(Zi);tm=r(gb,"decoder_start_token_id"),gb.forEach(t),nm=r(Ct,`, so when doing generation without using `),Qa=s(Ct,"A",{href:!0});var Tb=i(Qa);om=r(Tb,"generate()"),Tb.forEach(t),sm=r(Ct,", make sure you start it with the "),Xi=s(Ct,"CODE",{});var vb=i(Xi);am=r(vb,"pad_token_id"),vb.forEach(t),rm=r(Ct,"."),Ct.forEach(t),cc=l(n),er=s(n,"P",{});var kb=i(er);im=r(kb,"The example above only shows a single example. You can also do batched inference, like so:"),kb.forEach(t),pc=l(n),_(Co.$$.fragment,n),hc=l(n),tr=s(n,"A",{id:!0}),i(tr).forEach(t),uc=l(n),Jt=s(n,"H2",{class:!0});var Vc=i(Jt);qn=s(Vc,"A",{id:!0,class:!0,href:!0});var bb=i(qn);Qi=s(bb,"SPAN",{});var wb=i(Qi);_(Ao.$$.fragment,wb),wb.forEach(t),bb.forEach(t),dm=l(Vc),ed=s(Vc,"SPAN",{});var yb=i(ed);lm=r(yb,"Example scripts"),yb.forEach(t),Vc.forEach(t),mc=l(n),nr=s(n,"P",{});var xb=i(nr);cm=r(xb,"T5 is supported by several example scripts, both for pre-training and fine-tuning."),xb.forEach(t),fc=l(n),Fn=s(n,"UL",{});var Kc=i(Fn);td=s(Kc,"LI",{});var zb=i(td);Zt=s(zb,"P",{});var Cr=i(Zt);pm=r(Cr,"pre-training: the "),Lo=s(Cr,"A",{href:!0,rel:!0});var $b=i(Lo);hm=r($b,"run_t5_mlm_flax.py"),$b.forEach(t),um=r(Cr,` script allows you to further pre-train T5 or pre-train T5 from scratch on your own data. The `),No=s(Cr,"A",{href:!0,rel:!0});var Eb=i(No);mm=r(Eb,"t5_tokenizer_model.py"),Eb.forEach(t),fm=r(Cr,` script allows you to further train a T5 tokenizer or train a T5 Tokenizer from scratch on your own data. Note that Flax (a neural network library on top of JAX) is particularly useful to train on TPU hardware.`),Cr.forEach(t),zb.forEach(t),_m=l(Kc),nd=s(Kc,"LI",{});var jb=i(nd);De=s(jb,"P",{});var _t=i(De);gm=r(_t,"fine-tuning: T5 is supported by the official summarization scripts ("),Io=s(_t,"A",{href:!0,rel:!0});var qb=i(Io);Tm=r(qb,"PyTorch"),qb.forEach(t),vm=r(_t,", "),Do=s(_t,"A",{href:!0,rel:!0});var Fb=i(Do);km=r(Fb,"Tensorflow"),Fb.forEach(t),bm=r(_t,", and "),So=s(_t,"A",{href:!0,rel:!0});var Mb=i(So);wm=r(Mb,"Flax"),Mb.forEach(t),ym=r(_t,`) and translation scripts (`),Oo=s(_t,"A",{href:!0,rel:!0});var Pb=i(Oo);xm=r(Pb,"PyTorch"),Pb.forEach(t),zm=r(_t," and "),Go=s(_t,"A",{href:!0,rel:!0});var Cb=i(Go);$m=r(Cb,"Tensorflow"),Cb.forEach(t),Em=r(_t,`). These scripts allow you to easily fine-tune T5 on custom data for summarization/translation.`),_t.forEach(t),jb.forEach(t),Kc.forEach(t),_c=l(n),Xt=s(n,"H2",{class:!0});var Yc=i(Xt);Mn=s(Yc,"A",{id:!0,class:!0,href:!0});var Ab=i(Mn);od=s(Ab,"SPAN",{});var Lb=i(od);_(Bo.$$.fragment,Lb),Lb.forEach(t),Ab.forEach(t),jm=l(Yc),sd=s(Yc,"SPAN",{});var Nb=i(sd);qm=r(Nb,"T5Config"),Nb.forEach(t),Yc.forEach(t),gc=l(n),gt=s(n,"DIV",{class:!0});var Ar=i(gt);_(Uo.$$.fragment,Ar),Fm=l(Ar),Tt=s(Ar,"P",{});var ao=i(Tt);Mm=r(ao,"This is the configuration class to store the configuration of a "),or=s(ao,"A",{href:!0});var Ib=i(or);Pm=r(Ib,"T5Model"),Ib.forEach(t),Cm=r(ao,` or a `),sr=s(ao,"A",{href:!0});var Db=i(sr);Am=r(Db,"TFT5Model"),Db.forEach(t),Lm=r(ao,`. It is used to instantiate a T5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the T5 `),Wo=s(ao,"A",{href:!0,rel:!0});var Sb=i(Wo);Nm=r(Sb,"t5-small"),Sb.forEach(t),Im=r(ao," architecture."),ao.forEach(t),Dm=l(Ar),Qt=s(Ar,"P",{});var Lr=i(Qt);Sm=r(Lr,"Configuration objects inherit from "),ar=s(Lr,"A",{href:!0});var Ob=i(ar);Om=r(Ob,"PretrainedConfig"),Ob.forEach(t),Gm=r(Lr,` and can be used to control the model outputs. Read the documentation from `),rr=s(Lr,"A",{href:!0});var Gb=i(rr);Bm=r(Gb,"PretrainedConfig"),Gb.forEach(t),Um=r(Lr," for more information."),Lr.forEach(t),Ar.forEach(t),Tc=l(n),en=s(n,"H2",{class:!0});var Jc=i(en);Pn=s(Jc,"A",{id:!0,class:!0,href:!0});var Bb=i(Pn);ad=s(Bb,"SPAN",{});var Ub=i(ad);_(Ro.$$.fragment,Ub),Ub.forEach(t),Bb.forEach(t),Wm=l(Jc),rd=s(Jc,"SPAN",{});var Wb=i(rd);Rm=r(Wb,"T5Tokenizer"),Wb.forEach(t),Jc.forEach(t),vc=l(n),Z=s(n,"DIV",{class:!0});var we=i(Z);_(Ho.$$.fragment,we),Hm=l(we),Vo=s(we,"P",{});var Zc=i(Vo);Vm=r(Zc,"Construct a T5 tokenizer. Based on "),Ko=s(Zc,"A",{href:!0,rel:!0});var Rb=i(Ko);Km=r(Rb,"SentencePiece"),Rb.forEach(t),Ym=r(Zc,"."),Zc.forEach(t),Jm=l(we),Yo=s(we,"P",{});var Xc=i(Yo);Zm=r(Xc,"This tokenizer inherits from "),ir=s(Xc,"A",{href:!0});var Hb=i(ir);Xm=r(Hb,"PreTrainedTokenizer"),Hb.forEach(t),Qm=r(Xc,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Xc.forEach(t),ef=l(we),tn=s(we,"P",{});var Nr=i(tn);tf=r(Nr,`Attributes: sp_model (`),id=s(Nr,"CODE",{});var Vb=i(id);nf=r(Vb,"SentencePieceProcessor"),Vb.forEach(t),of=r(Nr,`): The `),dd=s(Nr,"EM",{});var Kb=i(dd);sf=r(Kb,"SentencePiece"),Kb.forEach(t),af=r(Nr," processor that is used for every conversion (string, tokens and IDs)."),Nr.forEach(t),rf=l(we),$t=s(we,"DIV",{class:!0});var Ir=i($t);_(Jo.$$.fragment,Ir),df=l(Ir),ld=s(Ir,"P",{});var Yb=i(ld);lf=r(Yb,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Yb.forEach(t),cf=l(Ir),Zo=s(Ir,"UL",{});var Qc=i(Zo);dr=s(Qc,"LI",{});var Mk=i(dr);pf=r(Mk,"single sequence: "),cd=s(Mk,"CODE",{});var Jb=i(cd);hf=r(Jb,"X </s>"),Jb.forEach(t),Mk.forEach(t),uf=l(Qc),lr=s(Qc,"LI",{});var Pk=i(lr);mf=r(Pk,"pair of sequences: "),pd=s(Pk,"CODE",{});var Zb=i(pd);ff=r(Zb,"A </s> B </s>"),Zb.forEach(t),Pk.forEach(t),Qc.forEach(t),Ir.forEach(t),_f=l(we),Cn=s(we,"DIV",{class:!0});var ep=i(Cn);_(Xo.$$.fragment,ep),gf=l(ep),Qo=s(ep,"P",{});var tp=i(Qo);Tf=r(tp,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),hd=s(tp,"CODE",{});var Xb=i(hd);vf=r(Xb,"prepare_for_model"),Xb.forEach(t),kf=r(tp," method."),tp.forEach(t),ep.forEach(t),bf=l(we),An=s(we,"DIV",{class:!0});var np=i(An);_(es.$$.fragment,np),wf=l(np),ud=s(np,"P",{});var Qb=i(ud);yf=r(Qb,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),Qb.forEach(t),np.forEach(t),xf=l(we),md=s(we,"DIV",{class:!0}),i(md).forEach(t),we.forEach(t),kc=l(n),nn=s(n,"H2",{class:!0});var op=i(nn);Ln=s(op,"A",{id:!0,class:!0,href:!0});var e1=i(Ln);fd=s(e1,"SPAN",{});var t1=i(fd);_(ts.$$.fragment,t1),t1.forEach(t),e1.forEach(t),zf=l(op),_d=s(op,"SPAN",{});var n1=i(_d);$f=r(n1,"T5TokenizerFast"),n1.forEach(t),op.forEach(t),bc=l(n),Se=s(n,"DIV",{class:!0});var At=i(Se);_(ns.$$.fragment,At),Ef=l(At),on=s(At,"P",{});var Dr=i(on);jf=r(Dr,"Construct a \u201Cfast\u201D T5 tokenizer (backed by HuggingFace\u2019s "),gd=s(Dr,"EM",{});var o1=i(gd);qf=r(o1,"tokenizers"),o1.forEach(t),Ff=r(Dr," library). Based on "),os=s(Dr,"A",{href:!0,rel:!0});var s1=i(os);Mf=r(s1,"Unigram"),s1.forEach(t),Pf=r(Dr,"."),Dr.forEach(t),Cf=l(At),ss=s(At,"P",{});var sp=i(ss);Af=r(sp,"This tokenizer inherits from "),cr=s(sp,"A",{href:!0});var a1=i(cr);Lf=r(a1,"PreTrainedTokenizerFast"),a1.forEach(t),Nf=r(sp,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),sp.forEach(t),If=l(At),Et=s(At,"DIV",{class:!0});var Sr=i(Et);_(as.$$.fragment,Sr),Df=l(Sr),Td=s(Sr,"P",{});var r1=i(Td);Sf=r(r1,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),r1.forEach(t),Of=l(Sr),rs=s(Sr,"UL",{});var ap=i(rs);pr=s(ap,"LI",{});var Ck=i(pr);Gf=r(Ck,"single sequence: "),vd=s(Ck,"CODE",{});var i1=i(vd);Bf=r(i1,"X </s>"),i1.forEach(t),Ck.forEach(t),Uf=l(ap),hr=s(ap,"LI",{});var Ak=i(hr);Wf=r(Ak,"pair of sequences: "),kd=s(Ak,"CODE",{});var d1=i(kd);Rf=r(d1,"A </s> B </s>"),d1.forEach(t),Ak.forEach(t),ap.forEach(t),Sr.forEach(t),Hf=l(At),Nn=s(At,"DIV",{class:!0});var rp=i(Nn);_(is.$$.fragment,rp),Vf=l(rp),bd=s(rp,"P",{});var l1=i(bd);Kf=r(l1,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned.`),l1.forEach(t),rp.forEach(t),At.forEach(t),wc=l(n),sn=s(n,"H2",{class:!0});var ip=i(sn);In=s(ip,"A",{id:!0,class:!0,href:!0});var c1=i(In);wd=s(c1,"SPAN",{});var p1=i(wd);_(ds.$$.fragment,p1),p1.forEach(t),c1.forEach(t),Yf=l(ip),yd=s(ip,"SPAN",{});var h1=i(yd);Jf=r(h1,"T5Model"),h1.forEach(t),ip.forEach(t),yc=l(n),X=s(n,"DIV",{class:!0});var ye=i(X);_(ls.$$.fragment,ye),Zf=l(ye),xd=s(ye,"P",{});var u1=i(xd);Xf=r(u1,"The bare T5 Model transformer outputting raw hidden-states without any specific head on top."),u1.forEach(t),Qf=l(ye),cs=s(ye,"P",{});var dp=i(cs);e_=r(dp,"The T5 model was proposed in "),ps=s(dp,"A",{href:!0,rel:!0});var m1=i(ps);t_=r(m1,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),m1.forEach(t),n_=r(dp,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),dp.forEach(t),o_=l(ye),hs=s(ye,"P",{});var lp=i(hs);s_=r(lp,"This model inherits from "),ur=s(lp,"A",{href:!0});var f1=i(ur);a_=r(f1,"PreTrainedModel"),f1.forEach(t),r_=r(lp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lp.forEach(t),i_=l(ye),us=s(ye,"P",{});var cp=i(us);d_=r(cp,"This model is also a PyTorch "),ms=s(cp,"A",{href:!0,rel:!0});var _1=i(ms);l_=r(_1,"torch.nn.Module"),_1.forEach(t),c_=r(cp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cp.forEach(t),p_=l(ye),We=s(ye,"DIV",{class:!0});var Lt=i(We);_(fs.$$.fragment,Lt),h_=l(Lt),an=s(Lt,"P",{});var Or=i(an);u_=r(Or,"The "),mr=s(Or,"A",{href:!0});var g1=i(mr);m_=r(g1,"T5Model"),g1.forEach(t),f_=r(Or," forward method, overrides the "),zd=s(Or,"CODE",{});var T1=i(zd);__=r(T1,"__call__"),T1.forEach(t),g_=r(Or," special method."),Or.forEach(t),T_=l(Lt),_(Dn.$$.fragment,Lt),v_=l(Lt),$d=s(Lt,"P",{});var v1=i($d);k_=r(v1,"Example:"),v1.forEach(t),b_=l(Lt),_(_s.$$.fragment,Lt),Lt.forEach(t),w_=l(ye),Re=s(ye,"DIV",{class:!0});var Nt=i(Re);_(gs.$$.fragment,Nt),y_=l(Nt),Ed=s(Nt,"P",{});var k1=i(Ed);x_=r(k1,"This is an experimental feature and is a subject to change at a moment\u2019s notice."),k1.forEach(t),z_=l(Nt),jd=s(Nt,"P",{});var b1=i(jd);$_=r(b1,`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),b1.forEach(t),E_=l(Nt),qd=s(Nt,"P",{});var w1=i(qd);j_=r(w1,"Example:"),w1.forEach(t),q_=l(Nt),_(Ts.$$.fragment,Nt),Nt.forEach(t),F_=l(ye),ut=s(ye,"DIV",{class:!0});var ro=i(ut);_(vs.$$.fragment,ro),M_=l(ro),Fd=s(ro,"P",{});var y1=i(Fd);P_=r(y1,"Moves the model to cpu from a model parallel state."),y1.forEach(t),C_=l(ro),Md=s(ro,"P",{});var x1=i(Md);A_=r(x1,"Example:"),x1.forEach(t),L_=l(ro),_(ks.$$.fragment,ro),ro.forEach(t),ye.forEach(t),xc=l(n),rn=s(n,"H2",{class:!0});var pp=i(rn);Sn=s(pp,"A",{id:!0,class:!0,href:!0});var z1=i(Sn);Pd=s(z1,"SPAN",{});var $1=i(Pd);_(bs.$$.fragment,$1),$1.forEach(t),z1.forEach(t),N_=l(pp),Cd=s(pp,"SPAN",{});var E1=i(Cd);I_=r(E1,"T5ForConditionalGeneration"),E1.forEach(t),pp.forEach(t),zc=l(n),Q=s(n,"DIV",{class:!0});var xe=i(Q);_(ws.$$.fragment,xe),D_=l(xe),ys=s(xe,"P",{});var hp=i(ys);S_=r(hp,"T5 Model with a "),Ad=s(hp,"CODE",{});var j1=i(Ad);O_=r(j1,"language modeling"),j1.forEach(t),G_=r(hp," head on top."),hp.forEach(t),B_=l(xe),xs=s(xe,"P",{});var up=i(xs);U_=r(up,"The T5 model was proposed in "),zs=s(up,"A",{href:!0,rel:!0});var q1=i(zs);W_=r(q1,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),q1.forEach(t),R_=r(up,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),up.forEach(t),H_=l(xe),$s=s(xe,"P",{});var mp=i($s);V_=r(mp,"This model inherits from "),fr=s(mp,"A",{href:!0});var F1=i(fr);K_=r(F1,"PreTrainedModel"),F1.forEach(t),Y_=r(mp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mp.forEach(t),J_=l(xe),Es=s(xe,"P",{});var fp=i(Es);Z_=r(fp,"This model is also a PyTorch "),js=s(fp,"A",{href:!0,rel:!0});var M1=i(js);X_=r(M1,"torch.nn.Module"),M1.forEach(t),Q_=r(fp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fp.forEach(t),eg=l(xe),He=s(xe,"DIV",{class:!0});var It=i(He);_(qs.$$.fragment,It),tg=l(It),dn=s(It,"P",{});var Gr=i(dn);ng=r(Gr,"The "),_r=s(Gr,"A",{href:!0});var P1=i(_r);og=r(P1,"T5ForConditionalGeneration"),P1.forEach(t),sg=r(Gr," forward method, overrides the "),Ld=s(Gr,"CODE",{});var C1=i(Ld);ag=r(C1,"__call__"),C1.forEach(t),rg=r(Gr," special method."),Gr.forEach(t),ig=l(It),_(On.$$.fragment,It),dg=l(It),Nd=s(It,"P",{});var A1=i(Nd);lg=r(A1,"Examples:"),A1.forEach(t),cg=l(It),_(Fs.$$.fragment,It),It.forEach(t),pg=l(xe),Ve=s(xe,"DIV",{class:!0});var Dt=i(Ve);_(Ms.$$.fragment,Dt),hg=l(Dt),Id=s(Dt,"P",{});var L1=i(Id);ug=r(L1,"This is an experimental feature and is a subject to change at a moment\u2019s notice."),L1.forEach(t),mg=l(Dt),Dd=s(Dt,"P",{});var N1=i(Dd);fg=r(N1,`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),N1.forEach(t),_g=l(Dt),Sd=s(Dt,"P",{});var I1=i(Sd);gg=r(I1,"Example:"),I1.forEach(t),Tg=l(Dt),_(Ps.$$.fragment,Dt),Dt.forEach(t),vg=l(xe),mt=s(xe,"DIV",{class:!0});var io=i(mt);_(Cs.$$.fragment,io),kg=l(io),Od=s(io,"P",{});var D1=i(Od);bg=r(D1,"Moves the model to cpu from a model parallel state."),D1.forEach(t),wg=l(io),Gd=s(io,"P",{});var S1=i(Gd);yg=r(S1,"Example:"),S1.forEach(t),xg=l(io),_(As.$$.fragment,io),io.forEach(t),xe.forEach(t),$c=l(n),ln=s(n,"H2",{class:!0});var _p=i(ln);Gn=s(_p,"A",{id:!0,class:!0,href:!0});var O1=i(Gn);Bd=s(O1,"SPAN",{});var G1=i(Bd);_(Ls.$$.fragment,G1),G1.forEach(t),O1.forEach(t),zg=l(_p),Ud=s(_p,"SPAN",{});var B1=i(Ud);$g=r(B1,"T5EncoderModel"),B1.forEach(t),_p.forEach(t),Ec=l(n),ee=s(n,"DIV",{class:!0});var ze=i(ee);_(Ns.$$.fragment,ze),Eg=l(ze),Wd=s(ze,"P",{});var U1=i(Wd);jg=r(U1,"The bare T5 Model transformer outputting encoder\u2019s raw hidden-states without any specific head on top."),U1.forEach(t),qg=l(ze),Is=s(ze,"P",{});var gp=i(Is);Fg=r(gp,"The T5 model was proposed in "),Ds=s(gp,"A",{href:!0,rel:!0});var W1=i(Ds);Mg=r(W1,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),W1.forEach(t),Pg=r(gp,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),gp.forEach(t),Cg=l(ze),Ss=s(ze,"P",{});var Tp=i(Ss);Ag=r(Tp,"This model inherits from "),gr=s(Tp,"A",{href:!0});var R1=i(gr);Lg=r(R1,"PreTrainedModel"),R1.forEach(t),Ng=r(Tp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tp.forEach(t),Ig=l(ze),Os=s(ze,"P",{});var vp=i(Os);Dg=r(vp,"This model is also a PyTorch "),Gs=s(vp,"A",{href:!0,rel:!0});var H1=i(Gs);Sg=r(H1,"torch.nn.Module"),H1.forEach(t),Og=r(vp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vp.forEach(t),Gg=l(ze),Ke=s(ze,"DIV",{class:!0});var St=i(Ke);_(Bs.$$.fragment,St),Bg=l(St),cn=s(St,"P",{});var Br=i(cn);Ug=r(Br,"The "),Tr=s(Br,"A",{href:!0});var V1=i(Tr);Wg=r(V1,"T5EncoderModel"),V1.forEach(t),Rg=r(Br," forward method, overrides the "),Rd=s(Br,"CODE",{});var K1=i(Rd);Hg=r(K1,"__call__"),K1.forEach(t),Vg=r(Br," special method."),Br.forEach(t),Kg=l(St),_(Bn.$$.fragment,St),Yg=l(St),Hd=s(St,"P",{});var Y1=i(Hd);Jg=r(Y1,"Example:"),Y1.forEach(t),Zg=l(St),_(Us.$$.fragment,St),St.forEach(t),Xg=l(ze),Ye=s(ze,"DIV",{class:!0});var Ot=i(Ye);_(Ws.$$.fragment,Ot),Qg=l(Ot),Vd=s(Ot,"P",{});var J1=i(Vd);eT=r(J1,"This is an experimental feature and is a subject to change at a moment\u2019s notice."),J1.forEach(t),tT=l(Ot),Kd=s(Ot,"P",{});var Z1=i(Kd);nT=r(Z1,`Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices.`),Z1.forEach(t),oT=l(Ot),Yd=s(Ot,"P",{});var X1=i(Yd);sT=r(X1,"Example:"),X1.forEach(t),aT=l(Ot),_(Rs.$$.fragment,Ot),Ot.forEach(t),rT=l(ze),ft=s(ze,"DIV",{class:!0});var lo=i(ft);_(Hs.$$.fragment,lo),iT=l(lo),Jd=s(lo,"P",{});var Q1=i(Jd);dT=r(Q1,"Moves the model to cpu from a model parallel state."),Q1.forEach(t),lT=l(lo),Zd=s(lo,"P",{});var ew=i(Zd);cT=r(ew,"Example:"),ew.forEach(t),pT=l(lo),_(Vs.$$.fragment,lo),lo.forEach(t),ze.forEach(t),jc=l(n),pn=s(n,"H2",{class:!0});var kp=i(pn);Un=s(kp,"A",{id:!0,class:!0,href:!0});var tw=i(Un);Xd=s(tw,"SPAN",{});var nw=i(Xd);_(Ks.$$.fragment,nw),nw.forEach(t),tw.forEach(t),hT=l(kp),Qd=s(kp,"SPAN",{});var ow=i(Qd);uT=r(ow,"TFT5Model"),ow.forEach(t),kp.forEach(t),qc=l(n),de=s(n,"DIV",{class:!0});var ot=i(de);_(Ys.$$.fragment,ot),mT=l(ot),el=s(ot,"P",{});var sw=i(el);fT=r(sw,"The bare T5 Model transformer outputting raw hidden-stateswithout any specific head on top."),sw.forEach(t),_T=l(ot),Js=s(ot,"P",{});var bp=i(Js);gT=r(bp,"The T5 model was proposed in "),Zs=s(bp,"A",{href:!0,rel:!0});var aw=i(Zs);TT=r(aw,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),aw.forEach(t),vT=r(bp,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),bp.forEach(t),kT=l(ot),Xs=s(ot,"P",{});var wp=i(Xs);bT=r(wp,"This model inherits from "),vr=s(wp,"A",{href:!0});var rw=i(vr);wT=r(rw,"TFPreTrainedModel"),rw.forEach(t),yT=r(wp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wp.forEach(t),xT=l(ot),Qs=s(ot,"P",{});var yp=i(Qs);zT=r(yp,"This model is also a "),ea=s(yp,"A",{href:!0,rel:!0});var iw=i(ea);$T=r(iw,"tf.keras.Model"),iw.forEach(t),ET=r(yp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),yp.forEach(t),jT=l(ot),_(Wn.$$.fragment,ot),qT=l(ot),Je=s(ot,"DIV",{class:!0});var Gt=i(Je);_(ta.$$.fragment,Gt),FT=l(Gt),hn=s(Gt,"P",{});var Ur=i(hn);MT=r(Ur,"The "),kr=s(Ur,"A",{href:!0});var dw=i(kr);PT=r(dw,"TFT5Model"),dw.forEach(t),CT=r(Ur," forward method, overrides the "),tl=s(Ur,"CODE",{});var lw=i(tl);AT=r(lw,"__call__"),lw.forEach(t),LT=r(Ur," special method."),Ur.forEach(t),NT=l(Gt),_(Rn.$$.fragment,Gt),IT=l(Gt),nl=s(Gt,"P",{});var cw=i(nl);DT=r(cw,"Examples:"),cw.forEach(t),ST=l(Gt),_(na.$$.fragment,Gt),Gt.forEach(t),ot.forEach(t),Fc=l(n),un=s(n,"H2",{class:!0});var xp=i(un);Hn=s(xp,"A",{id:!0,class:!0,href:!0});var pw=i(Hn);ol=s(pw,"SPAN",{});var hw=i(ol);_(oa.$$.fragment,hw),hw.forEach(t),pw.forEach(t),OT=l(xp),sl=s(xp,"SPAN",{});var uw=i(sl);GT=r(uw,"TFT5ForConditionalGeneration"),uw.forEach(t),xp.forEach(t),Mc=l(n),le=s(n,"DIV",{class:!0});var st=i(le);_(sa.$$.fragment,st),BT=l(st),aa=s(st,"P",{});var zp=i(aa);UT=r(zp,"T5 Model with a "),al=s(zp,"CODE",{});var mw=i(al);WT=r(mw,"language modeling"),mw.forEach(t),RT=r(zp," head on top."),zp.forEach(t),HT=l(st),ra=s(st,"P",{});var $p=i(ra);VT=r($p,"The T5 model was proposed in "),ia=s($p,"A",{href:!0,rel:!0});var fw=i(ia);KT=r(fw,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),fw.forEach(t),YT=r($p,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),$p.forEach(t),JT=l(st),da=s(st,"P",{});var Ep=i(da);ZT=r(Ep,"This model inherits from "),br=s(Ep,"A",{href:!0});var _w=i(br);XT=r(_w,"TFPreTrainedModel"),_w.forEach(t),QT=r(Ep,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ep.forEach(t),ev=l(st),la=s(st,"P",{});var jp=i(la);tv=r(jp,"This model is also a "),ca=s(jp,"A",{href:!0,rel:!0});var gw=i(ca);nv=r(gw,"tf.keras.Model"),gw.forEach(t),ov=r(jp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jp.forEach(t),sv=l(st),_(Vn.$$.fragment,st),av=l(st),Ze=s(st,"DIV",{class:!0});var Bt=i(Ze);_(pa.$$.fragment,Bt),rv=l(Bt),mn=s(Bt,"P",{});var Wr=i(mn);iv=r(Wr,"The "),wr=s(Wr,"A",{href:!0});var Tw=i(wr);dv=r(Tw,"TFT5ForConditionalGeneration"),Tw.forEach(t),lv=r(Wr," forward method, overrides the "),rl=s(Wr,"CODE",{});var vw=i(rl);cv=r(vw,"__call__"),vw.forEach(t),pv=r(Wr," special method."),Wr.forEach(t),hv=l(Bt),_(Kn.$$.fragment,Bt),uv=l(Bt),il=s(Bt,"P",{});var kw=i(il);mv=r(kw,"Examples:"),kw.forEach(t),fv=l(Bt),_(ha.$$.fragment,Bt),Bt.forEach(t),st.forEach(t),Pc=l(n),fn=s(n,"H2",{class:!0});var qp=i(fn);Yn=s(qp,"A",{id:!0,class:!0,href:!0});var bw=i(Yn);dl=s(bw,"SPAN",{});var ww=i(dl);_(ua.$$.fragment,ww),ww.forEach(t),bw.forEach(t),_v=l(qp),ll=s(qp,"SPAN",{});var yw=i(ll);gv=r(yw,"TFT5EncoderModel"),yw.forEach(t),qp.forEach(t),Cc=l(n),ce=s(n,"DIV",{class:!0});var at=i(ce);_(ma.$$.fragment,at),Tv=l(at),cl=s(at,"P",{});var xw=i(cl);vv=r(xw,"The bare T5 Model transformer outputting encoder\u2019s raw hidden-stateswithout any specific head on top."),xw.forEach(t),kv=l(at),fa=s(at,"P",{});var Fp=i(fa);bv=r(Fp,"The T5 model was proposed in "),_a=s(Fp,"A",{href:!0,rel:!0});var zw=i(_a);wv=r(zw,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),zw.forEach(t),yv=r(Fp,` by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It\u2019s an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.`),Fp.forEach(t),xv=l(at),ga=s(at,"P",{});var Mp=i(ga);zv=r(Mp,"This model inherits from "),yr=s(Mp,"A",{href:!0});var $w=i(yr);$v=r($w,"TFPreTrainedModel"),$w.forEach(t),Ev=r(Mp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mp.forEach(t),jv=l(at),Ta=s(at,"P",{});var Pp=i(Ta);qv=r(Pp,"This model is also a "),va=s(Pp,"A",{href:!0,rel:!0});var Ew=i(va);Fv=r(Ew,"tf.keras.Model"),Ew.forEach(t),Mv=r(Pp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Pp.forEach(t),Pv=l(at),_(Jn.$$.fragment,at),Cv=l(at),Xe=s(at,"DIV",{class:!0});var Ut=i(Xe);_(ka.$$.fragment,Ut),Av=l(Ut),_n=s(Ut,"P",{});var Rr=i(_n);Lv=r(Rr,"The "),xr=s(Rr,"A",{href:!0});var jw=i(xr);Nv=r(jw,"TFT5EncoderModel"),jw.forEach(t),Iv=r(Rr," forward method, overrides the "),pl=s(Rr,"CODE",{});var qw=i(pl);Dv=r(qw,"__call__"),qw.forEach(t),Sv=r(Rr," special method."),Rr.forEach(t),Ov=l(Ut),_(Zn.$$.fragment,Ut),Gv=l(Ut),hl=s(Ut,"P",{});var Fw=i(hl);Bv=r(Fw,"Examples:"),Fw.forEach(t),Uv=l(Ut),_(ba.$$.fragment,Ut),Ut.forEach(t),at.forEach(t),Ac=l(n),gn=s(n,"H2",{class:!0});var Cp=i(gn);Xn=s(Cp,"A",{id:!0,class:!0,href:!0});var Mw=i(Xn);ul=s(Mw,"SPAN",{});var Pw=i(ul);_(wa.$$.fragment,Pw),Pw.forEach(t),Mw.forEach(t),Wv=l(Cp),ml=s(Cp,"SPAN",{});var Cw=i(ml);Rv=r(Cw,"FlaxT5Model"),Cw.forEach(t),Cp.forEach(t),Lc=l(n),vt=s(n,"DIV",{class:!0});var Hr=i(vt);Qe=s(Hr,"DIV",{class:!0});var Wt=i(Qe);_(ya.$$.fragment,Wt),Hv=l(Wt),Tn=s(Wt,"P",{});var Vr=i(Tn);Vv=r(Vr,"The "),fl=s(Vr,"CODE",{});var Aw=i(fl);Kv=r(Aw,"FlaxT5PreTrainedModel"),Aw.forEach(t),Yv=r(Vr," forward method, overrides the "),_l=s(Vr,"CODE",{});var Lw=i(_l);Jv=r(Lw,"__call__"),Lw.forEach(t),Zv=r(Vr," special method."),Vr.forEach(t),Xv=l(Wt),_(Qn.$$.fragment,Wt),Qv=l(Wt),gl=s(Wt,"P",{});var Nw=i(gl);ek=r(Nw,"Example:"),Nw.forEach(t),tk=l(Wt),_(xa.$$.fragment,Wt),Wt.forEach(t),nk=l(Hr),jt=s(Hr,"DIV",{class:!0});var Kr=i(jt);_(za.$$.fragment,Kr),ok=l(Kr),Tl=s(Kr,"P",{});var Iw=i(Tl);sk=r(Iw,"Example:"),Iw.forEach(t),ak=l(Kr),_($a.$$.fragment,Kr),Kr.forEach(t),rk=l(Hr),qt=s(Hr,"DIV",{class:!0});var Yr=i(qt);_(Ea.$$.fragment,Yr),ik=l(Yr),vl=s(Yr,"P",{});var Dw=i(vl);dk=r(Dw,"Example:"),Dw.forEach(t),lk=l(Yr),_(ja.$$.fragment,Yr),Yr.forEach(t),Hr.forEach(t),Nc=l(n),vn=s(n,"H2",{class:!0});var Ap=i(vn);eo=s(Ap,"A",{id:!0,class:!0,href:!0});var Sw=i(eo);kl=s(Sw,"SPAN",{});var Ow=i(kl);_(qa.$$.fragment,Ow),Ow.forEach(t),Sw.forEach(t),ck=l(Ap),bl=s(Ap,"SPAN",{});var Gw=i(bl);pk=r(Gw,"FlaxT5ForConditionalGeneration"),Gw.forEach(t),Ap.forEach(t),Ic=l(n),kt=s(n,"DIV",{class:!0});var Jr=i(kt);et=s(Jr,"DIV",{class:!0});var Rt=i(et);_(Fa.$$.fragment,Rt),hk=l(Rt),kn=s(Rt,"P",{});var Zr=i(kn);uk=r(Zr,"The "),wl=s(Zr,"CODE",{});var Bw=i(wl);mk=r(Bw,"FlaxT5PreTrainedModel"),Bw.forEach(t),fk=r(Zr," forward method, overrides the "),yl=s(Zr,"CODE",{});var Uw=i(yl);_k=r(Uw,"__call__"),Uw.forEach(t),gk=r(Zr," special method."),Zr.forEach(t),Tk=l(Rt),_(to.$$.fragment,Rt),vk=l(Rt),xl=s(Rt,"P",{});var Ww=i(xl);kk=r(Ww,"Example:"),Ww.forEach(t),bk=l(Rt),_(Ma.$$.fragment,Rt),Rt.forEach(t),wk=l(Jr),Ft=s(Jr,"DIV",{class:!0});var Xr=i(Ft);_(Pa.$$.fragment,Xr),yk=l(Xr),zl=s(Xr,"P",{});var Rw=i(zl);xk=r(Rw,"Example:"),Rw.forEach(t),zk=l(Xr),_(Ca.$$.fragment,Xr),Xr.forEach(t),$k=l(Jr),Mt=s(Jr,"DIV",{class:!0});var Qr=i(Mt);_(Aa.$$.fragment,Qr),Ek=l(Qr),$l=s(Qr,"P",{});var Hw=i($l);jk=r(Hw,"Example:"),Hw.forEach(t),qk=l(Qr),_(La.$$.fragment,Qr),Qr.forEach(t),Jr.forEach(t),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(ly)),c(y,"id","t5"),c(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(y,"href","#t5"),c(b,"class","relative group"),c(te,"id","overview"),c(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(te,"href","#overview"),c(j,"class","relative group"),c(ie,"href","https://arxiv.org/pdf/1910.10683.pdf"),c(ie,"rel","nofollow"),c(L,"href","/docs/transformers/v4.15.0/en/model_doc/t5#training"),c(Le,"href","/docs/transformers/v4.15.0/en/model_doc/t5#inference"),c(Ne,"href","/docs/transformers/v4.15.0/en/model_doc/t5#scripts"),c(co,"href","https://huggingface.co/t5-small"),c(co,"rel","nofollow"),c(po,"href","https://huggingface.co/t5-base"),c(po,"rel","nofollow"),c(ho,"href","https://huggingface.co/t5-large"),c(ho,"rel","nofollow"),c(uo,"href","https://huggingface.co/t5-3b"),c(uo,"rel","nofollow"),c(mo,"href","https://huggingface.co/t5-11b"),c(mo,"rel","nofollow"),c(Ba,"href","/docs/transformers/v4.15.0/en/t5v1.1"),c(Ua,"href","/docs/transformers/v4.15.0/en/mt5"),c(Wa,"href","/docs/transformers/v4.15.0/en/byt5"),c(fo,"href","https://huggingface.co/models?search=t5"),c(fo,"rel","nofollow"),c(_o,"href","https://huggingface.co/thomwolf"),c(_o,"rel","nofollow"),c(go,"href","https://github.com/google-research/text-to-text-transfer-transformer"),c(go,"rel","nofollow"),c(Ra,"id","training"),c(zn,"id","training"),c(zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zn,"href","#training"),c(Ht,"class","relative group"),c(Ha,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),c(Ka,"href","/docs/transformers/v4.15.0/en/model_doc/mt5#transformers.T5Tokenizer"),c(ko,"href","https://github.com/huggingface/transformers/tree/master/examples/flax/language-modeling"),c(ko,"rel","nofollow"),c(yo,"href","https://github.com/huggingface/transformers/tree/master/examples/flax/summarization"),c(yo,"rel","nofollow"),c($o,"href","https://discuss.huggingface.co/t/t5-finetuning-tips/684"),c($o,"rel","nofollow"),c(Eo,"href","https://arxiv.org/pdf/1910.10683.pdf"),c(Eo,"rel","nofollow"),c(Za,"id","inference"),c(jn,"id","inference"),c(jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jn,"href","#inference"),c(Yt,"class","relative group"),c(Xa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),c(Fo,"href","https://huggingface.co/blog/how-to-generate"),c(Fo,"rel","nofollow"),c(Mo,"href","https://huggingface.co/blog/encoder-decoder#encoder-decoder"),c(Mo,"rel","nofollow"),c(Qa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate"),c(tr,"id","scripts"),c(qn,"id","example-scripts"),c(qn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qn,"href","#example-scripts"),c(Jt,"class","relative group"),c(Lo,"href","https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/run_t5_mlm_flax.py"),c(Lo,"rel","nofollow"),c(No,"href","https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/t5_tokenizer_model.py"),c(No,"rel","nofollow"),c(Io,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization"),c(Io,"rel","nofollow"),c(Do,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization"),c(Do,"rel","nofollow"),c(So,"href","https://github.com/huggingface/transformers/tree/master/examples/flax/summarization"),c(So,"rel","nofollow"),c(Oo,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/translation"),c(Oo,"rel","nofollow"),c(Go,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/translation"),c(Go,"rel","nofollow"),c(Mn,"id","transformers.T5Config"),c(Mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mn,"href","#transformers.T5Config"),c(Xt,"class","relative group"),c(or,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model"),c(sr,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model"),c(Wo,"href","https://huggingface.co/t5-small"),c(Wo,"rel","nofollow"),c(ar,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(rr,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(gt,"class","docstring"),c(Pn,"id","transformers.T5Tokenizer"),c(Pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Pn,"href","#transformers.T5Tokenizer"),c(en,"class","relative group"),c(Ko,"href","https://github.com/google/sentencepiece"),c(Ko,"rel","nofollow"),c(ir,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c($t,"class","docstring"),c(Cn,"class","docstring"),c(An,"class","docstring"),c(md,"class","docstring"),c(Z,"class","docstring"),c(Ln,"id","transformers.T5TokenizerFast"),c(Ln,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ln,"href","#transformers.T5TokenizerFast"),c(nn,"class","relative group"),c(os,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),c(os,"rel","nofollow"),c(cr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Et,"class","docstring"),c(Nn,"class","docstring"),c(Se,"class","docstring"),c(In,"id","transformers.T5Model"),c(In,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(In,"href","#transformers.T5Model"),c(sn,"class","relative group"),c(ps,"href","https://arxiv.org/abs/1910.10683"),c(ps,"rel","nofollow"),c(ur,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ms,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ms,"rel","nofollow"),c(mr,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5Model"),c(We,"class","docstring"),c(Re,"class","docstring"),c(ut,"class","docstring"),c(X,"class","docstring"),c(Sn,"id","transformers.T5ForConditionalGeneration"),c(Sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Sn,"href","#transformers.T5ForConditionalGeneration"),c(rn,"class","relative group"),c(zs,"href","https://arxiv.org/abs/1910.10683"),c(zs,"rel","nofollow"),c(fr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(js,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(js,"rel","nofollow"),c(_r,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5ForConditionalGeneration"),c(He,"class","docstring"),c(Ve,"class","docstring"),c(mt,"class","docstring"),c(Q,"class","docstring"),c(Gn,"id","transformers.T5EncoderModel"),c(Gn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gn,"href","#transformers.T5EncoderModel"),c(ln,"class","relative group"),c(Ds,"href","https://arxiv.org/abs/1910.10683"),c(Ds,"rel","nofollow"),c(gr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Gs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Gs,"rel","nofollow"),c(Tr,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.T5EncoderModel"),c(Ke,"class","docstring"),c(Ye,"class","docstring"),c(ft,"class","docstring"),c(ee,"class","docstring"),c(Un,"id","transformers.TFT5Model"),c(Un,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Un,"href","#transformers.TFT5Model"),c(pn,"class","relative group"),c(Zs,"href","https://arxiv.org/abs/1910.10683"),c(Zs,"rel","nofollow"),c(vr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ea,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ea,"rel","nofollow"),c(kr,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5Model"),c(Je,"class","docstring"),c(de,"class","docstring"),c(Hn,"id","transformers.TFT5ForConditionalGeneration"),c(Hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Hn,"href","#transformers.TFT5ForConditionalGeneration"),c(un,"class","relative group"),c(ia,"href","https://arxiv.org/abs/1910.10683"),c(ia,"rel","nofollow"),c(br,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ca,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ca,"rel","nofollow"),c(wr,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5ForConditionalGeneration"),c(Ze,"class","docstring"),c(le,"class","docstring"),c(Yn,"id","transformers.TFT5EncoderModel"),c(Yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yn,"href","#transformers.TFT5EncoderModel"),c(fn,"class","relative group"),c(_a,"href","https://arxiv.org/abs/1910.10683"),c(_a,"rel","nofollow"),c(yr,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(va,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(va,"rel","nofollow"),c(xr,"href","/docs/transformers/v4.15.0/en/model_doc/t5#transformers.TFT5EncoderModel"),c(Xe,"class","docstring"),c(ce,"class","docstring"),c(Xn,"id","transformers.FlaxT5Model"),c(Xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xn,"href","#transformers.FlaxT5Model"),c(gn,"class","relative group"),c(Qe,"class","docstring"),c(jt,"class","docstring"),c(qt,"class","docstring"),c(vt,"class","docstring"),c(eo,"id","transformers.FlaxT5ForConditionalGeneration"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.FlaxT5ForConditionalGeneration"),c(vn,"class","relative group"),c(et,"class","docstring"),c(Ft,"class","docstring"),c(Mt,"class","docstring"),c(kt,"class","docstring")},m(n,h){e(document.head,u),p(n,$,h),p(n,b,h),e(b,y),e(y,z),g(x,z,null),e(b,w),e(b,E),e(E,Ee),p(n,ae,h),p(n,j,h),e(j,te),e(te,O),g(re,O,null),e(j,je),e(j,G),e(G,qe),p(n,Te,h),p(n,B,h),e(B,I),e(B,ie),e(ie,pe),e(B,F),p(n,C,h),p(n,he,h),e(he,K),p(n,ve,h),p(n,ue,h),e(ue,U),e(U,Fe),p(n,ke,h),p(n,M,h),e(M,Me),p(n,H,h),p(n,V,h),e(V,fe),e(fe,A),e(A,Pe),e(A,_e),e(_e,D),e(A,Ce),e(A,W),e(W,Ae),e(A,m),e(V,q),e(V,ne),e(ne,Oe),e(Oe,rt),e(V,S),e(V,Ge),e(Ge,oe),e(oe,it),e(oe,L),e(L,Y),e(oe,dt),e(oe,Le),e(Le,J),e(oe,lt),e(oe,Ne),e(Ne,Ie),e(oe,ct),p(n,Dl,h),p(n,Sa,h),e(Sa,Lp),p(n,Sl,h),p(n,Be,h),e(Be,ei),e(ei,ti),e(ti,co),e(co,Np),e(Be,Ip),e(Be,ni),e(ni,oi),e(oi,po),e(po,Dp),e(Be,Sp),e(Be,si),e(si,ai),e(ai,ho),e(ho,Op),e(Be,Gp),e(Be,ri),e(ri,ii),e(ii,uo),e(uo,Bp),e(Be,Up),e(Be,di),e(di,Oa),e(Oa,mo),e(mo,Wp),e(Oa,Rp),p(n,Ol,h),p(n,Ga,h),e(Ga,Hp),p(n,Gl,h),p(n,wt,h),e(wt,li),e(li,bn),e(bn,ci),e(ci,Vp),e(bn,Kp),e(bn,Ba),e(Ba,Yp),e(bn,Jp),e(wt,Zp),e(wt,pi),e(pi,wn),e(wn,hi),e(hi,Xp),e(wn,Qp),e(wn,Ua),e(Ua,eh),e(wn,th),e(wt,nh),e(wt,ui),e(ui,yn),e(yn,mi),e(mi,oh),e(yn,sh),e(yn,Wa),e(Wa,ah),e(yn,rh),p(n,Bl,h),p(n,xn,h),e(xn,ih),e(xn,fo),e(fo,dh),e(xn,lh),p(n,Ul,h),p(n,yt,h),e(yt,ch),e(yt,_o),e(_o,ph),e(yt,hh),e(yt,go),e(go,uh),e(yt,mh),p(n,Wl,h),p(n,Ra,h),p(n,Rl,h),p(n,Ht,h),e(Ht,zn),e(zn,fi),g(To,fi,null),e(Ht,fh),e(Ht,_i),e(_i,_h),p(n,Hl,h),p(n,pt,h),e(pt,gh),e(pt,gi),e(gi,Th),e(pt,vh),e(pt,Ti),e(Ti,kh),e(pt,bh),e(pt,vi),e(vi,wh),e(pt,yh),p(n,Vl,h),p(n,$n,h),e($n,xh),e($n,Ha),e(Ha,zh),e($n,$h),p(n,Kl,h),p(n,Va,h),e(Va,Vt),e(Vt,ki),e(ki,Eh),e(Vt,jh),e(Vt,ge),e(ge,qh),e(ge,bi),e(bi,Fh),e(ge,Mh),e(ge,wi),e(wi,Ph),e(ge,Ch),e(ge,yi),e(yi,Ah),e(ge,Lh),e(ge,xi),e(xi,Nh),e(ge,Ih),e(ge,zi),e(zi,Dh),e(ge,Sh),e(ge,Ka),e(Ka,Oh),e(ge,Gh),e(Vt,Bh),e(Vt,$i),e($i,Uh),p(n,Yl,h),g(vo,n,h),p(n,Jl,h),p(n,En,h),e(En,Wh),e(En,ko),e(ko,Rh),e(En,Hh),p(n,Zl,h),p(n,Ya,h),e(Ya,bo),e(bo,Ei),e(Ei,Vh),e(bo,Kh),e(bo,ji),e(ji,Yh),p(n,Xl,h),g(wo,n,h),p(n,Ql,h),p(n,se,h),e(se,Jh),e(se,qi),e(qi,Zh),e(se,Xh),e(se,Fi),e(Fi,Qh),e(se,eu),e(se,Mi),e(Mi,tu),e(se,nu),e(se,Pi),e(Pi,ou),e(se,su),e(se,Ci),e(Ci,au),e(se,ru),e(se,Ai),e(Ai,iu),e(se,du),e(se,Li),e(Li,lu),e(se,cu),p(n,ec,h),p(n,xt,h),e(xt,pu),e(xt,Ni),e(Ni,hu),e(xt,uu),e(xt,Ii),e(Ii,mu),e(xt,fu),p(n,tc,h),p(n,me,h),e(me,_u),e(me,Di),e(Di,gu),e(me,Tu),e(me,Si),e(Si,vu),e(me,ku),e(me,Oi),e(Oi,bu),e(me,wu),e(me,Gi),e(Gi,yu),e(me,xu),e(me,yo),e(yo,zu),e(me,$u),e(me,Bi),e(Bi,Eu),e(me,ju),p(n,nc,h),g(xo,n,h),p(n,oc,h),p(n,Ja,h),e(Ja,qu),p(n,sc,h),p(n,zt,h),e(zt,Ui),e(Ui,zo),e(zo,Fu),e(zo,Wi),e(Wi,Mu),e(zo,Pu),e(zt,Cu),e(zt,Ri),e(Ri,Kt),e(Kt,Au),e(Kt,$o),e($o,Lu),e(Kt,Nu),e(Kt,Eo),e(Eo,Iu),e(Kt,Du),e(zt,Su),e(zt,Hi),e(Hi,jo),e(jo,Ou),e(jo,Vi),e(Vi,Gu),e(jo,Bu),p(n,ac,h),p(n,Za,h),p(n,rc,h),p(n,Yt,h),e(Yt,jn),e(jn,Ki),g(qo,Ki,null),e(Yt,Uu),e(Yt,Yi),e(Yi,Wu),p(n,ic,h),p(n,ht,h),e(ht,Ru),e(ht,Xa),e(Xa,Hu),e(ht,Vu),e(ht,Fo),e(Fo,Ku),e(ht,Yu),e(ht,Mo),e(Mo,Ju),e(ht,Zu),p(n,dc,h),g(Po,n,h),p(n,lc,h),p(n,Ue,h),e(Ue,Xu),e(Ue,Ji),e(Ji,Qu),e(Ue,em),e(Ue,Zi),e(Zi,tm),e(Ue,nm),e(Ue,Qa),e(Qa,om),e(Ue,sm),e(Ue,Xi),e(Xi,am),e(Ue,rm),p(n,cc,h),p(n,er,h),e(er,im),p(n,pc,h),g(Co,n,h),p(n,hc,h),p(n,tr,h),p(n,uc,h),p(n,Jt,h),e(Jt,qn),e(qn,Qi),g(Ao,Qi,null),e(Jt,dm),e(Jt,ed),e(ed,lm),p(n,mc,h),p(n,nr,h),e(nr,cm),p(n,fc,h),p(n,Fn,h),e(Fn,td),e(td,Zt),e(Zt,pm),e(Zt,Lo),e(Lo,hm),e(Zt,um),e(Zt,No),e(No,mm),e(Zt,fm),e(Fn,_m),e(Fn,nd),e(nd,De),e(De,gm),e(De,Io),e(Io,Tm),e(De,vm),e(De,Do),e(Do,km),e(De,bm),e(De,So),e(So,wm),e(De,ym),e(De,Oo),e(Oo,xm),e(De,zm),e(De,Go),e(Go,$m),e(De,Em),p(n,_c,h),p(n,Xt,h),e(Xt,Mn),e(Mn,od),g(Bo,od,null),e(Xt,jm),e(Xt,sd),e(sd,qm),p(n,gc,h),p(n,gt,h),g(Uo,gt,null),e(gt,Fm),e(gt,Tt),e(Tt,Mm),e(Tt,or),e(or,Pm),e(Tt,Cm),e(Tt,sr),e(sr,Am),e(Tt,Lm),e(Tt,Wo),e(Wo,Nm),e(Tt,Im),e(gt,Dm),e(gt,Qt),e(Qt,Sm),e(Qt,ar),e(ar,Om),e(Qt,Gm),e(Qt,rr),e(rr,Bm),e(Qt,Um),p(n,Tc,h),p(n,en,h),e(en,Pn),e(Pn,ad),g(Ro,ad,null),e(en,Wm),e(en,rd),e(rd,Rm),p(n,vc,h),p(n,Z,h),g(Ho,Z,null),e(Z,Hm),e(Z,Vo),e(Vo,Vm),e(Vo,Ko),e(Ko,Km),e(Vo,Ym),e(Z,Jm),e(Z,Yo),e(Yo,Zm),e(Yo,ir),e(ir,Xm),e(Yo,Qm),e(Z,ef),e(Z,tn),e(tn,tf),e(tn,id),e(id,nf),e(tn,of),e(tn,dd),e(dd,sf),e(tn,af),e(Z,rf),e(Z,$t),g(Jo,$t,null),e($t,df),e($t,ld),e(ld,lf),e($t,cf),e($t,Zo),e(Zo,dr),e(dr,pf),e(dr,cd),e(cd,hf),e(Zo,uf),e(Zo,lr),e(lr,mf),e(lr,pd),e(pd,ff),e(Z,_f),e(Z,Cn),g(Xo,Cn,null),e(Cn,gf),e(Cn,Qo),e(Qo,Tf),e(Qo,hd),e(hd,vf),e(Qo,kf),e(Z,bf),e(Z,An),g(es,An,null),e(An,wf),e(An,ud),e(ud,yf),e(Z,xf),e(Z,md),p(n,kc,h),p(n,nn,h),e(nn,Ln),e(Ln,fd),g(ts,fd,null),e(nn,zf),e(nn,_d),e(_d,$f),p(n,bc,h),p(n,Se,h),g(ns,Se,null),e(Se,Ef),e(Se,on),e(on,jf),e(on,gd),e(gd,qf),e(on,Ff),e(on,os),e(os,Mf),e(on,Pf),e(Se,Cf),e(Se,ss),e(ss,Af),e(ss,cr),e(cr,Lf),e(ss,Nf),e(Se,If),e(Se,Et),g(as,Et,null),e(Et,Df),e(Et,Td),e(Td,Sf),e(Et,Of),e(Et,rs),e(rs,pr),e(pr,Gf),e(pr,vd),e(vd,Bf),e(rs,Uf),e(rs,hr),e(hr,Wf),e(hr,kd),e(kd,Rf),e(Se,Hf),e(Se,Nn),g(is,Nn,null),e(Nn,Vf),e(Nn,bd),e(bd,Kf),p(n,wc,h),p(n,sn,h),e(sn,In),e(In,wd),g(ds,wd,null),e(sn,Yf),e(sn,yd),e(yd,Jf),p(n,yc,h),p(n,X,h),g(ls,X,null),e(X,Zf),e(X,xd),e(xd,Xf),e(X,Qf),e(X,cs),e(cs,e_),e(cs,ps),e(ps,t_),e(cs,n_),e(X,o_),e(X,hs),e(hs,s_),e(hs,ur),e(ur,a_),e(hs,r_),e(X,i_),e(X,us),e(us,d_),e(us,ms),e(ms,l_),e(us,c_),e(X,p_),e(X,We),g(fs,We,null),e(We,h_),e(We,an),e(an,u_),e(an,mr),e(mr,m_),e(an,f_),e(an,zd),e(zd,__),e(an,g_),e(We,T_),g(Dn,We,null),e(We,v_),e(We,$d),e($d,k_),e(We,b_),g(_s,We,null),e(X,w_),e(X,Re),g(gs,Re,null),e(Re,y_),e(Re,Ed),e(Ed,x_),e(Re,z_),e(Re,jd),e(jd,$_),e(Re,E_),e(Re,qd),e(qd,j_),e(Re,q_),g(Ts,Re,null),e(X,F_),e(X,ut),g(vs,ut,null),e(ut,M_),e(ut,Fd),e(Fd,P_),e(ut,C_),e(ut,Md),e(Md,A_),e(ut,L_),g(ks,ut,null),p(n,xc,h),p(n,rn,h),e(rn,Sn),e(Sn,Pd),g(bs,Pd,null),e(rn,N_),e(rn,Cd),e(Cd,I_),p(n,zc,h),p(n,Q,h),g(ws,Q,null),e(Q,D_),e(Q,ys),e(ys,S_),e(ys,Ad),e(Ad,O_),e(ys,G_),e(Q,B_),e(Q,xs),e(xs,U_),e(xs,zs),e(zs,W_),e(xs,R_),e(Q,H_),e(Q,$s),e($s,V_),e($s,fr),e(fr,K_),e($s,Y_),e(Q,J_),e(Q,Es),e(Es,Z_),e(Es,js),e(js,X_),e(Es,Q_),e(Q,eg),e(Q,He),g(qs,He,null),e(He,tg),e(He,dn),e(dn,ng),e(dn,_r),e(_r,og),e(dn,sg),e(dn,Ld),e(Ld,ag),e(dn,rg),e(He,ig),g(On,He,null),e(He,dg),e(He,Nd),e(Nd,lg),e(He,cg),g(Fs,He,null),e(Q,pg),e(Q,Ve),g(Ms,Ve,null),e(Ve,hg),e(Ve,Id),e(Id,ug),e(Ve,mg),e(Ve,Dd),e(Dd,fg),e(Ve,_g),e(Ve,Sd),e(Sd,gg),e(Ve,Tg),g(Ps,Ve,null),e(Q,vg),e(Q,mt),g(Cs,mt,null),e(mt,kg),e(mt,Od),e(Od,bg),e(mt,wg),e(mt,Gd),e(Gd,yg),e(mt,xg),g(As,mt,null),p(n,$c,h),p(n,ln,h),e(ln,Gn),e(Gn,Bd),g(Ls,Bd,null),e(ln,zg),e(ln,Ud),e(Ud,$g),p(n,Ec,h),p(n,ee,h),g(Ns,ee,null),e(ee,Eg),e(ee,Wd),e(Wd,jg),e(ee,qg),e(ee,Is),e(Is,Fg),e(Is,Ds),e(Ds,Mg),e(Is,Pg),e(ee,Cg),e(ee,Ss),e(Ss,Ag),e(Ss,gr),e(gr,Lg),e(Ss,Ng),e(ee,Ig),e(ee,Os),e(Os,Dg),e(Os,Gs),e(Gs,Sg),e(Os,Og),e(ee,Gg),e(ee,Ke),g(Bs,Ke,null),e(Ke,Bg),e(Ke,cn),e(cn,Ug),e(cn,Tr),e(Tr,Wg),e(cn,Rg),e(cn,Rd),e(Rd,Hg),e(cn,Vg),e(Ke,Kg),g(Bn,Ke,null),e(Ke,Yg),e(Ke,Hd),e(Hd,Jg),e(Ke,Zg),g(Us,Ke,null),e(ee,Xg),e(ee,Ye),g(Ws,Ye,null),e(Ye,Qg),e(Ye,Vd),e(Vd,eT),e(Ye,tT),e(Ye,Kd),e(Kd,nT),e(Ye,oT),e(Ye,Yd),e(Yd,sT),e(Ye,aT),g(Rs,Ye,null),e(ee,rT),e(ee,ft),g(Hs,ft,null),e(ft,iT),e(ft,Jd),e(Jd,dT),e(ft,lT),e(ft,Zd),e(Zd,cT),e(ft,pT),g(Vs,ft,null),p(n,jc,h),p(n,pn,h),e(pn,Un),e(Un,Xd),g(Ks,Xd,null),e(pn,hT),e(pn,Qd),e(Qd,uT),p(n,qc,h),p(n,de,h),g(Ys,de,null),e(de,mT),e(de,el),e(el,fT),e(de,_T),e(de,Js),e(Js,gT),e(Js,Zs),e(Zs,TT),e(Js,vT),e(de,kT),e(de,Xs),e(Xs,bT),e(Xs,vr),e(vr,wT),e(Xs,yT),e(de,xT),e(de,Qs),e(Qs,zT),e(Qs,ea),e(ea,$T),e(Qs,ET),e(de,jT),g(Wn,de,null),e(de,qT),e(de,Je),g(ta,Je,null),e(Je,FT),e(Je,hn),e(hn,MT),e(hn,kr),e(kr,PT),e(hn,CT),e(hn,tl),e(tl,AT),e(hn,LT),e(Je,NT),g(Rn,Je,null),e(Je,IT),e(Je,nl),e(nl,DT),e(Je,ST),g(na,Je,null),p(n,Fc,h),p(n,un,h),e(un,Hn),e(Hn,ol),g(oa,ol,null),e(un,OT),e(un,sl),e(sl,GT),p(n,Mc,h),p(n,le,h),g(sa,le,null),e(le,BT),e(le,aa),e(aa,UT),e(aa,al),e(al,WT),e(aa,RT),e(le,HT),e(le,ra),e(ra,VT),e(ra,ia),e(ia,KT),e(ra,YT),e(le,JT),e(le,da),e(da,ZT),e(da,br),e(br,XT),e(da,QT),e(le,ev),e(le,la),e(la,tv),e(la,ca),e(ca,nv),e(la,ov),e(le,sv),g(Vn,le,null),e(le,av),e(le,Ze),g(pa,Ze,null),e(Ze,rv),e(Ze,mn),e(mn,iv),e(mn,wr),e(wr,dv),e(mn,lv),e(mn,rl),e(rl,cv),e(mn,pv),e(Ze,hv),g(Kn,Ze,null),e(Ze,uv),e(Ze,il),e(il,mv),e(Ze,fv),g(ha,Ze,null),p(n,Pc,h),p(n,fn,h),e(fn,Yn),e(Yn,dl),g(ua,dl,null),e(fn,_v),e(fn,ll),e(ll,gv),p(n,Cc,h),p(n,ce,h),g(ma,ce,null),e(ce,Tv),e(ce,cl),e(cl,vv),e(ce,kv),e(ce,fa),e(fa,bv),e(fa,_a),e(_a,wv),e(fa,yv),e(ce,xv),e(ce,ga),e(ga,zv),e(ga,yr),e(yr,$v),e(ga,Ev),e(ce,jv),e(ce,Ta),e(Ta,qv),e(Ta,va),e(va,Fv),e(Ta,Mv),e(ce,Pv),g(Jn,ce,null),e(ce,Cv),e(ce,Xe),g(ka,Xe,null),e(Xe,Av),e(Xe,_n),e(_n,Lv),e(_n,xr),e(xr,Nv),e(_n,Iv),e(_n,pl),e(pl,Dv),e(_n,Sv),e(Xe,Ov),g(Zn,Xe,null),e(Xe,Gv),e(Xe,hl),e(hl,Bv),e(Xe,Uv),g(ba,Xe,null),p(n,Ac,h),p(n,gn,h),e(gn,Xn),e(Xn,ul),g(wa,ul,null),e(gn,Wv),e(gn,ml),e(ml,Rv),p(n,Lc,h),p(n,vt,h),e(vt,Qe),g(ya,Qe,null),e(Qe,Hv),e(Qe,Tn),e(Tn,Vv),e(Tn,fl),e(fl,Kv),e(Tn,Yv),e(Tn,_l),e(_l,Jv),e(Tn,Zv),e(Qe,Xv),g(Qn,Qe,null),e(Qe,Qv),e(Qe,gl),e(gl,ek),e(Qe,tk),g(xa,Qe,null),e(vt,nk),e(vt,jt),g(za,jt,null),e(jt,ok),e(jt,Tl),e(Tl,sk),e(jt,ak),g($a,jt,null),e(vt,rk),e(vt,qt),g(Ea,qt,null),e(qt,ik),e(qt,vl),e(vl,dk),e(qt,lk),g(ja,qt,null),p(n,Nc,h),p(n,vn,h),e(vn,eo),e(eo,kl),g(qa,kl,null),e(vn,ck),e(vn,bl),e(bl,pk),p(n,Ic,h),p(n,kt,h),e(kt,et),g(Fa,et,null),e(et,hk),e(et,kn),e(kn,uk),e(kn,wl),e(wl,mk),e(kn,fk),e(kn,yl),e(yl,_k),e(kn,gk),e(et,Tk),g(to,et,null),e(et,vk),e(et,xl),e(xl,kk),e(et,bk),g(Ma,et,null),e(kt,wk),e(kt,Ft),g(Pa,Ft,null),e(Ft,yk),e(Ft,zl),e(zl,xk),e(Ft,zk),g(Ca,Ft,null),e(kt,$k),e(kt,Mt),g(Aa,Mt,null),e(Mt,Ek),e(Mt,$l),e($l,jk),e(Mt,qk),g(La,Mt,null),Dc=!0},p(n,[h]){const Na={};h&2&&(Na.$$scope={dirty:h,ctx:n}),Dn.$set(Na);const El={};h&2&&(El.$$scope={dirty:h,ctx:n}),On.$set(El);const jl={};h&2&&(jl.$$scope={dirty:h,ctx:n}),Bn.$set(jl);const ql={};h&2&&(ql.$$scope={dirty:h,ctx:n}),Wn.$set(ql);const Ia={};h&2&&(Ia.$$scope={dirty:h,ctx:n}),Rn.$set(Ia);const Fl={};h&2&&(Fl.$$scope={dirty:h,ctx:n}),Vn.$set(Fl);const Ml={};h&2&&(Ml.$$scope={dirty:h,ctx:n}),Kn.$set(Ml);const Pl={};h&2&&(Pl.$$scope={dirty:h,ctx:n}),Jn.$set(Pl);const Da={};h&2&&(Da.$$scope={dirty:h,ctx:n}),Zn.$set(Da);const Cl={};h&2&&(Cl.$$scope={dirty:h,ctx:n}),Qn.$set(Cl);const Al={};h&2&&(Al.$$scope={dirty:h,ctx:n}),to.$set(Al)},i(n){Dc||(T(x.$$.fragment,n),T(re.$$.fragment,n),T(To.$$.fragment,n),T(vo.$$.fragment,n),T(wo.$$.fragment,n),T(xo.$$.fragment,n),T(qo.$$.fragment,n),T(Po.$$.fragment,n),T(Co.$$.fragment,n),T(Ao.$$.fragment,n),T(Bo.$$.fragment,n),T(Uo.$$.fragment,n),T(Ro.$$.fragment,n),T(Ho.$$.fragment,n),T(Jo.$$.fragment,n),T(Xo.$$.fragment,n),T(es.$$.fragment,n),T(ts.$$.fragment,n),T(ns.$$.fragment,n),T(as.$$.fragment,n),T(is.$$.fragment,n),T(ds.$$.fragment,n),T(ls.$$.fragment,n),T(fs.$$.fragment,n),T(Dn.$$.fragment,n),T(_s.$$.fragment,n),T(gs.$$.fragment,n),T(Ts.$$.fragment,n),T(vs.$$.fragment,n),T(ks.$$.fragment,n),T(bs.$$.fragment,n),T(ws.$$.fragment,n),T(qs.$$.fragment,n),T(On.$$.fragment,n),T(Fs.$$.fragment,n),T(Ms.$$.fragment,n),T(Ps.$$.fragment,n),T(Cs.$$.fragment,n),T(As.$$.fragment,n),T(Ls.$$.fragment,n),T(Ns.$$.fragment,n),T(Bs.$$.fragment,n),T(Bn.$$.fragment,n),T(Us.$$.fragment,n),T(Ws.$$.fragment,n),T(Rs.$$.fragment,n),T(Hs.$$.fragment,n),T(Vs.$$.fragment,n),T(Ks.$$.fragment,n),T(Ys.$$.fragment,n),T(Wn.$$.fragment,n),T(ta.$$.fragment,n),T(Rn.$$.fragment,n),T(na.$$.fragment,n),T(oa.$$.fragment,n),T(sa.$$.fragment,n),T(Vn.$$.fragment,n),T(pa.$$.fragment,n),T(Kn.$$.fragment,n),T(ha.$$.fragment,n),T(ua.$$.fragment,n),T(ma.$$.fragment,n),T(Jn.$$.fragment,n),T(ka.$$.fragment,n),T(Zn.$$.fragment,n),T(ba.$$.fragment,n),T(wa.$$.fragment,n),T(ya.$$.fragment,n),T(Qn.$$.fragment,n),T(xa.$$.fragment,n),T(za.$$.fragment,n),T($a.$$.fragment,n),T(Ea.$$.fragment,n),T(ja.$$.fragment,n),T(qa.$$.fragment,n),T(Fa.$$.fragment,n),T(to.$$.fragment,n),T(Ma.$$.fragment,n),T(Pa.$$.fragment,n),T(Ca.$$.fragment,n),T(Aa.$$.fragment,n),T(La.$$.fragment,n),Dc=!0)},o(n){v(x.$$.fragment,n),v(re.$$.fragment,n),v(To.$$.fragment,n),v(vo.$$.fragment,n),v(wo.$$.fragment,n),v(xo.$$.fragment,n),v(qo.$$.fragment,n),v(Po.$$.fragment,n),v(Co.$$.fragment,n),v(Ao.$$.fragment,n),v(Bo.$$.fragment,n),v(Uo.$$.fragment,n),v(Ro.$$.fragment,n),v(Ho.$$.fragment,n),v(Jo.$$.fragment,n),v(Xo.$$.fragment,n),v(es.$$.fragment,n),v(ts.$$.fragment,n),v(ns.$$.fragment,n),v(as.$$.fragment,n),v(is.$$.fragment,n),v(ds.$$.fragment,n),v(ls.$$.fragment,n),v(fs.$$.fragment,n),v(Dn.$$.fragment,n),v(_s.$$.fragment,n),v(gs.$$.fragment,n),v(Ts.$$.fragment,n),v(vs.$$.fragment,n),v(ks.$$.fragment,n),v(bs.$$.fragment,n),v(ws.$$.fragment,n),v(qs.$$.fragment,n),v(On.$$.fragment,n),v(Fs.$$.fragment,n),v(Ms.$$.fragment,n),v(Ps.$$.fragment,n),v(Cs.$$.fragment,n),v(As.$$.fragment,n),v(Ls.$$.fragment,n),v(Ns.$$.fragment,n),v(Bs.$$.fragment,n),v(Bn.$$.fragment,n),v(Us.$$.fragment,n),v(Ws.$$.fragment,n),v(Rs.$$.fragment,n),v(Hs.$$.fragment,n),v(Vs.$$.fragment,n),v(Ks.$$.fragment,n),v(Ys.$$.fragment,n),v(Wn.$$.fragment,n),v(ta.$$.fragment,n),v(Rn.$$.fragment,n),v(na.$$.fragment,n),v(oa.$$.fragment,n),v(sa.$$.fragment,n),v(Vn.$$.fragment,n),v(pa.$$.fragment,n),v(Kn.$$.fragment,n),v(ha.$$.fragment,n),v(ua.$$.fragment,n),v(ma.$$.fragment,n),v(Jn.$$.fragment,n),v(ka.$$.fragment,n),v(Zn.$$.fragment,n),v(ba.$$.fragment,n),v(wa.$$.fragment,n),v(ya.$$.fragment,n),v(Qn.$$.fragment,n),v(xa.$$.fragment,n),v(za.$$.fragment,n),v($a.$$.fragment,n),v(Ea.$$.fragment,n),v(ja.$$.fragment,n),v(qa.$$.fragment,n),v(Fa.$$.fragment,n),v(to.$$.fragment,n),v(Ma.$$.fragment,n),v(Pa.$$.fragment,n),v(Ca.$$.fragment,n),v(Aa.$$.fragment,n),v(La.$$.fragment,n),Dc=!1},d(n){t(u),n&&t($),n&&t(b),k(x),n&&t(ae),n&&t(j),k(re),n&&t(Te),n&&t(B),n&&t(C),n&&t(he),n&&t(ve),n&&t(ue),n&&t(ke),n&&t(M),n&&t(H),n&&t(V),n&&t(Dl),n&&t(Sa),n&&t(Sl),n&&t(Be),n&&t(Ol),n&&t(Ga),n&&t(Gl),n&&t(wt),n&&t(Bl),n&&t(xn),n&&t(Ul),n&&t(yt),n&&t(Wl),n&&t(Ra),n&&t(Rl),n&&t(Ht),k(To),n&&t(Hl),n&&t(pt),n&&t(Vl),n&&t($n),n&&t(Kl),n&&t(Va),n&&t(Yl),k(vo,n),n&&t(Jl),n&&t(En),n&&t(Zl),n&&t(Ya),n&&t(Xl),k(wo,n),n&&t(Ql),n&&t(se),n&&t(ec),n&&t(xt),n&&t(tc),n&&t(me),n&&t(nc),k(xo,n),n&&t(oc),n&&t(Ja),n&&t(sc),n&&t(zt),n&&t(ac),n&&t(Za),n&&t(rc),n&&t(Yt),k(qo),n&&t(ic),n&&t(ht),n&&t(dc),k(Po,n),n&&t(lc),n&&t(Ue),n&&t(cc),n&&t(er),n&&t(pc),k(Co,n),n&&t(hc),n&&t(tr),n&&t(uc),n&&t(Jt),k(Ao),n&&t(mc),n&&t(nr),n&&t(fc),n&&t(Fn),n&&t(_c),n&&t(Xt),k(Bo),n&&t(gc),n&&t(gt),k(Uo),n&&t(Tc),n&&t(en),k(Ro),n&&t(vc),n&&t(Z),k(Ho),k(Jo),k(Xo),k(es),n&&t(kc),n&&t(nn),k(ts),n&&t(bc),n&&t(Se),k(ns),k(as),k(is),n&&t(wc),n&&t(sn),k(ds),n&&t(yc),n&&t(X),k(ls),k(fs),k(Dn),k(_s),k(gs),k(Ts),k(vs),k(ks),n&&t(xc),n&&t(rn),k(bs),n&&t(zc),n&&t(Q),k(ws),k(qs),k(On),k(Fs),k(Ms),k(Ps),k(Cs),k(As),n&&t($c),n&&t(ln),k(Ls),n&&t(Ec),n&&t(ee),k(Ns),k(Bs),k(Bn),k(Us),k(Ws),k(Rs),k(Hs),k(Vs),n&&t(jc),n&&t(pn),k(Ks),n&&t(qc),n&&t(de),k(Ys),k(Wn),k(ta),k(Rn),k(na),n&&t(Fc),n&&t(un),k(oa),n&&t(Mc),n&&t(le),k(sa),k(Vn),k(pa),k(Kn),k(ha),n&&t(Pc),n&&t(fn),k(ua),n&&t(Cc),n&&t(ce),k(ma),k(Jn),k(ka),k(Zn),k(ba),n&&t(Ac),n&&t(gn),k(wa),n&&t(Lc),n&&t(vt),k(ya),k(Qn),k(xa),k(za),k($a),k(Ea),k(ja),n&&t(Nc),n&&t(vn),k(qa),n&&t(Ic),n&&t(kt),k(Fa),k(to),k(Ma),k(Pa),k(Ca),k(Aa),k(La)}}}const ly={local:"t5",sections:[{local:"overview",title:"Overview"},{local:"training",title:"Training"},{local:"inference",title:"Inference"},{local:"example-scripts",title:"Example scripts"},{local:"transformers.T5Config",title:"T5Config"},{local:"transformers.T5Tokenizer",title:"T5Tokenizer"},{local:"transformers.T5TokenizerFast",title:"T5TokenizerFast"},{local:"transformers.T5Model",title:"T5Model"},{local:"transformers.T5ForConditionalGeneration",title:"T5ForConditionalGeneration"},{local:"transformers.T5EncoderModel",title:"T5EncoderModel"},{local:"transformers.TFT5Model",title:"TFT5Model"},{local:"transformers.TFT5ForConditionalGeneration",title:"TFT5ForConditionalGeneration"},{local:"transformers.TFT5EncoderModel",title:"TFT5EncoderModel"},{local:"transformers.FlaxT5Model",title:"FlaxT5Model"},{local:"transformers.FlaxT5ForConditionalGeneration",title:"FlaxT5ForConditionalGeneration"}],title:"T5"};function cy(N,u,$){let{fw:b}=u;return N.$$set=y=>{"fw"in y&&$(0,b=y.fw)},[b]}class gy extends Vw{constructor(u){super();Kw(this,u,cy,dy,Yw,{fw:0})}}export{gy as default,ly as metadata};
9,977
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/mbart.mdx-3c95c19b.js
import{S as s$,i as r$,s as i$,e as o,k as d,w as m,t as r,L as d$,c as n,d as t,m as l,a,x as f,h as i,b as c,J as e,g as u,y as _,q as g,o as k,B as b}from"../../chunks/vendor-b1433968.js";import{T as at}from"../../chunks/Tip-c3840994.js";import{D as q}from"../../chunks/Docstring-ff504c58.js";import{C as I}from"../../chunks/CodeBlock-a320dbd7.js";import{I as C}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function l$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function c$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function p$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function u$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function h$(P){let h,z,v,M,x,T,y,B,st,Pe,F,We,be,rt,ve,ye,it,Je,ee,W,Qe,ie,N,D,dt,he,me,lt,te,ct,pt,Q,Oe,Te,Ze,de,Me,we,ut,le,xe,ze,Ye;return{c(){h=o("p"),z=r("TF 2.0 models accepts two formats as inputs:"),v=d(),M=o("ul"),x=o("li"),T=r("having all inputs as keyword arguments (like PyTorch models), or"),y=d(),B=o("li"),st=r("having all inputs as a list, tuple or dict in the first positional arguments."),Pe=d(),F=o("p"),We=r("This second option is useful when using "),be=o("code"),rt=r("tf.keras.Model.fit"),ve=r(` method which currently requires having all the tensors in the first argument of the model call function: `),ye=o("code"),it=r("model(inputs)"),Je=r("."),ee=d(),W=o("p"),Qe=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ie=d(),N=o("ul"),D=o("li"),dt=r("a single Tensor with "),he=o("code"),me=r("input_ids"),lt=r(" only and nothing else: "),te=o("code"),ct=r("model(input_ids)"),pt=d(),Q=o("li"),Oe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Te=o("code"),Ze=r("model([input_ids, attention_mask])"),de=r(" or "),Me=o("code"),we=r("model([input_ids, attention_mask, token_type_ids])"),ut=d(),le=o("li"),xe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ze=o("code"),Ye=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(w){h=n(w,"P",{});var $=a(h);z=i($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),v=l(w),M=n(w,"UL",{});var Be=a(M);x=n(Be,"LI",{});var Xe=a(x);T=i(Xe,"having all inputs as keyword arguments (like PyTorch models), or"),Xe.forEach(t),y=l(Be),B=n(Be,"LI",{});var Wt=a(B);st=i(Wt,"having all inputs as a list, tuple or dict in the first positional arguments."),Wt.forEach(t),Be.forEach(t),Pe=l(w),F=n(w,"P",{});var oe=a(F);We=i(oe,"This second option is useful when using "),be=n(oe,"CODE",{});var Qt=a(be);rt=i(Qt,"tf.keras.Model.fit"),Qt.forEach(t),ve=i(oe,` method which currently requires having all the tensors in the first argument of the model call function: `),ye=n(oe,"CODE",{});var gt=a(ye);it=i(gt,"model(inputs)"),gt.forEach(t),Je=i(oe,"."),oe.forEach(t),ee=l(w),W=n(w,"P",{});var Z=a(W);Qe=i(Z,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z.forEach(t),ie=l(w),N=n(w,"UL",{});var ce=a(N);D=n(ce,"LI",{});var pe=a(D);dt=i(pe,"a single Tensor with "),he=n(pe,"CODE",{});var Xt=a(he);me=i(Xt,"input_ids"),Xt.forEach(t),lt=i(pe," only and nothing else: "),te=n(pe,"CODE",{});var Kt=a(te);ct=i(Kt,"model(input_ids)"),Kt.forEach(t),pe.forEach(t),pt=l(ce),Q=n(ce,"LI",{});var ue=a(Q);Oe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Te=n(ue,"CODE",{});var Ht=a(Te);Ze=i(Ht,"model([input_ids, attention_mask])"),Ht.forEach(t),de=i(ue," or "),Me=n(ue,"CODE",{});var Vt=a(Me);we=i(Vt,"model([input_ids, attention_mask, token_type_ids])"),Vt.forEach(t),ue.forEach(t),ut=l(ce),le=n(ce,"LI",{});var Ke=a(le);xe=i(Ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ze=n(Ke,"CODE",{});var Jt=a(ze);Ye=i(Jt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Jt.forEach(t),Ke.forEach(t),ce.forEach(t)},m(w,$){u(w,h,$),e(h,z),u(w,v,$),u(w,M,$),e(M,x),e(x,T),e(M,y),e(M,B),e(B,st),u(w,Pe,$),u(w,F,$),e(F,We),e(F,be),e(be,rt),e(F,ve),e(F,ye),e(ye,it),e(F,Je),u(w,ee,$),u(w,W,$),e(W,Qe),u(w,ie,$),u(w,N,$),e(N,D),e(D,dt),e(D,he),e(he,me),e(D,lt),e(D,te),e(te,ct),e(N,pt),e(N,Q),e(Q,Oe),e(Q,Te),e(Te,Ze),e(Q,de),e(Q,Me),e(Me,we),e(N,ut),e(N,le),e(le,xe),e(le,ze),e(ze,Ye)},d(w){w&&t(h),w&&t(v),w&&t(M),w&&t(Pe),w&&t(F),w&&t(ee),w&&t(W),w&&t(ie),w&&t(N)}}}function m$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function f$(P){let h,z,v,M,x,T,y,B,st,Pe,F,We,be,rt,ve,ye,it,Je,ee,W,Qe,ie,N,D,dt,he,me,lt,te,ct,pt,Q,Oe,Te,Ze,de,Me,we,ut,le,xe,ze,Ye;return{c(){h=o("p"),z=r("TF 2.0 models accepts two formats as inputs:"),v=d(),M=o("ul"),x=o("li"),T=r("having all inputs as keyword arguments (like PyTorch models), or"),y=d(),B=o("li"),st=r("having all inputs as a list, tuple or dict in the first positional arguments."),Pe=d(),F=o("p"),We=r("This second option is useful when using "),be=o("code"),rt=r("tf.keras.Model.fit"),ve=r(` method which currently requires having all the tensors in the first argument of the model call function: `),ye=o("code"),it=r("model(inputs)"),Je=r("."),ee=d(),W=o("p"),Qe=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ie=d(),N=o("ul"),D=o("li"),dt=r("a single Tensor with "),he=o("code"),me=r("input_ids"),lt=r(" only and nothing else: "),te=o("code"),ct=r("model(input_ids)"),pt=d(),Q=o("li"),Oe=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Te=o("code"),Ze=r("model([input_ids, attention_mask])"),de=r(" or "),Me=o("code"),we=r("model([input_ids, attention_mask, token_type_ids])"),ut=d(),le=o("li"),xe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ze=o("code"),Ye=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(w){h=n(w,"P",{});var $=a(h);z=i($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),v=l(w),M=n(w,"UL",{});var Be=a(M);x=n(Be,"LI",{});var Xe=a(x);T=i(Xe,"having all inputs as keyword arguments (like PyTorch models), or"),Xe.forEach(t),y=l(Be),B=n(Be,"LI",{});var Wt=a(B);st=i(Wt,"having all inputs as a list, tuple or dict in the first positional arguments."),Wt.forEach(t),Be.forEach(t),Pe=l(w),F=n(w,"P",{});var oe=a(F);We=i(oe,"This second option is useful when using "),be=n(oe,"CODE",{});var Qt=a(be);rt=i(Qt,"tf.keras.Model.fit"),Qt.forEach(t),ve=i(oe,` method which currently requires having all the tensors in the first argument of the model call function: `),ye=n(oe,"CODE",{});var gt=a(ye);it=i(gt,"model(inputs)"),gt.forEach(t),Je=i(oe,"."),oe.forEach(t),ee=l(w),W=n(w,"P",{});var Z=a(W);Qe=i(Z,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z.forEach(t),ie=l(w),N=n(w,"UL",{});var ce=a(N);D=n(ce,"LI",{});var pe=a(D);dt=i(pe,"a single Tensor with "),he=n(pe,"CODE",{});var Xt=a(he);me=i(Xt,"input_ids"),Xt.forEach(t),lt=i(pe," only and nothing else: "),te=n(pe,"CODE",{});var Kt=a(te);ct=i(Kt,"model(input_ids)"),Kt.forEach(t),pe.forEach(t),pt=l(ce),Q=n(ce,"LI",{});var ue=a(Q);Oe=i(ue,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Te=n(ue,"CODE",{});var Ht=a(Te);Ze=i(Ht,"model([input_ids, attention_mask])"),Ht.forEach(t),de=i(ue," or "),Me=n(ue,"CODE",{});var Vt=a(Me);we=i(Vt,"model([input_ids, attention_mask, token_type_ids])"),Vt.forEach(t),ue.forEach(t),ut=l(ce),le=n(ce,"LI",{});var Ke=a(le);xe=i(Ke,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ze=n(Ke,"CODE",{});var Jt=a(ze);Ye=i(Jt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Jt.forEach(t),Ke.forEach(t),ce.forEach(t)},m(w,$){u(w,h,$),e(h,z),u(w,v,$),u(w,M,$),e(M,x),e(x,T),e(M,y),e(M,B),e(B,st),u(w,Pe,$),u(w,F,$),e(F,We),e(F,be),e(be,rt),e(F,ve),e(F,ye),e(ye,it),e(F,Je),u(w,ee,$),u(w,W,$),e(W,Qe),u(w,ie,$),u(w,N,$),e(N,D),e(D,dt),e(D,he),e(he,me),e(D,lt),e(D,te),e(te,ct),e(N,pt),e(N,Q),e(Q,Oe),e(Q,Te),e(Te,Ze),e(Q,de),e(Q,Me),e(Me,we),e(N,ut),e(N,le),e(le,xe),e(le,ze),e(ze,Ye)},d(w){w&&t(h),w&&t(v),w&&t(M),w&&t(Pe),w&&t(F),w&&t(ee),w&&t(W),w&&t(ie),w&&t(N)}}}function _$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function g$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function k$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function b$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function v$(P){let h,z,v,M,x;return{c(){h=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),v=o("code"),M=r("Module"),x=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=n(T,"P",{});var y=a(h);z=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),v=n(y,"CODE",{});var B=a(v);M=i(B,"Module"),B.forEach(t),x=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(T,y){u(T,h,y),e(h,z),e(h,v),e(v,M),e(h,x)},d(T){T&&t(h)}}}function y$(P){let h,z,v,M,x,T,y,B,st,Pe,F,We,be,rt,ve,ye,it,Je,ee,W,Qe,ie,N,D,dt,he,me,lt,te,ct,pt,Q,Oe,Te,Ze,de,Me,we,ut,le,xe,ze,Ye,w,$,Be,Xe,Wt,oe,Qt,gt,Z,ce,pe,Xt,Kt,ue,Ht,Vt,Ke,Jt,e_,$d,t_,o_,qh,kt,n_,Kn,Fd,a_,s_,r_,_i,i_,d_,$h,gi,Ed,l_,Fh,Hn,Eh,ki,Vn,jd,c_,p_,Zt,u_,Cd,h_,m_,Pd,f_,__,jh,Jn,Ch,Yt,Ao,Od,Zn,g_,Sd,k_,Ph,bt,b_,bi,v_,Yn,y_,T_,Ad,M_,w_,Oh,vi,x_,Sh,yi,Id,z_,Ah,eo,Io,Nd,ea,B_,Ld,q_,Ih,et,$_,Dd,F_,E_,Gd,j_,C_,Ud,P_,O_,Nh,No,S_,Ti,A_,I_,Lh,Mi,Rd,N_,Dh,ta,Gh,wi,oa,Wd,L_,D_,qe,G_,Qd,U_,R_,Xd,W_,Q_,Kd,X_,K_,Hd,H_,V_,Vd,J_,Z_,Uh,na,Rh,to,Lo,Jd,aa,Y_,Zd,eg,Wh,$e,sa,tg,oo,og,xi,ng,ag,ra,sg,rg,ig,no,dg,zi,lg,cg,Bi,pg,ug,hg,Yd,mg,fg,ia,Qh,ao,Do,el,da,_g,tl,gg,Xh,K,la,kg,ol,bg,vg,vt,qi,yg,Tg,$i,Mg,wg,Fi,xg,zg,Bg,ca,qg,nl,$g,Fg,Eg,al,jg,Cg,pa,Pg,Go,ua,Og,sl,Sg,Ag,tt,ha,Ig,ma,Ng,rl,Lg,Dg,Gg,fa,_a,il,Ug,Rg,dl,Wg,Qg,ga,ll,Xg,Kg,cl,Hg,Vg,pl,Jg,Kh,so,Uo,ul,ka,Zg,hl,Yg,Hh,G,ba,ek,ro,tk,ml,ok,nk,va,ak,sk,rk,yt,Ei,ik,dk,ji,lk,ck,Ci,pk,uk,hk,ya,mk,fl,fk,_k,gk,_l,kk,bk,Ta,vk,Se,Ma,yk,gl,Tk,Mk,wa,wk,kl,xk,zk,Bk,xa,za,bl,qk,$k,vl,Fk,Ek,Ba,yl,jk,Ck,Tl,Pk,Ok,Ml,Sk,Ak,Ro,qa,Ik,wl,Nk,Lk,Wo,$a,Dk,xl,Gk,Vh,io,Qo,zl,Fa,Uk,Bl,Rk,Jh,L,Ea,Wk,ja,Qk,Ca,Xk,Kk,Hk,Pa,Vk,Pi,Jk,Zk,Yk,ql,eb,tb,Oa,ob,ot,Sa,nb,Aa,ab,$l,sb,rb,ib,Ia,Na,Fl,db,lb,El,cb,pb,La,jl,ub,hb,Cl,mb,fb,Pl,_b,gb,Xo,Da,kb,Ol,bb,vb,Ko,Ga,yb,Ua,Tb,Sl,Mb,wb,xb,Ho,Ra,zb,Al,Bb,qb,Vo,Wa,$b,Il,Fb,Zh,lo,Jo,Nl,Qa,Eb,Ll,jb,Yh,H,Xa,Cb,co,Pb,Dl,Ob,Sb,Ka,Ab,Ib,Nb,Ha,Lb,Oi,Db,Gb,Ub,Gl,Rb,Wb,Va,Qb,Ae,Ja,Xb,Ul,Kb,Hb,Za,Vb,Rl,Jb,Zb,Yb,Ya,es,Wl,ev,tv,Ql,ov,nv,ts,Xl,av,sv,Kl,rv,iv,Hl,dv,lv,Zo,os,cv,Vl,pv,uv,Yo,ns,hv,Jl,mv,em,po,en,Zl,as,fv,Yl,_v,tm,He,ss,gv,rs,kv,Si,bv,vv,yv,is,Tv,ds,Mv,wv,xv,Ie,ls,zv,uo,Bv,Ai,qv,$v,ec,Fv,Ev,jv,tn,Cv,tc,Pv,Ov,cs,om,ho,on,oc,ps,Sv,nc,Av,nm,Ve,us,Iv,hs,Nv,Ii,Lv,Dv,Gv,ms,Uv,fs,Rv,Wv,Qv,E,_s,Xv,mo,Kv,Ni,Hv,Vv,ac,Jv,Zv,Yv,nn,ey,sc,ty,oy,rc,ic,dc,lc,ny,ay,cc,pc,uc,hc,sy,ry,mc,fc,_c,gc,iy,dy,kc,bc,gs,an,sn,vc,ks,ly,yc,cy,py,Tc,uy,hy,Mc,my,fy,wc,xc,fo,zc,_y,gy,rn,dn,Bc,bs,ky,qc,by,vy,$c,yy,Ty,Fc,Ec,jc,Cc,My,wy,Pc,Oc,Sc,Ac,xy,zy,Ic,Nc,Lc,Dc,By,am,_o,ln,Gc,vs,qy,Uc,$y,sm,Fe,ys,Fy,go,Ey,Rc,jy,Cy,Wc,Py,Oy,Sy,Ts,Ay,Li,Iy,Ny,Ly,Ms,Dy,ws,Gy,Uy,Ry,Ne,xs,Wy,ko,Qy,Di,Xy,Ky,Qc,Hy,Vy,Jy,cn,Zy,Xc,Yy,eT,zs,rm,bo,pn,Kc,Bs,tT,Hc,oT,im,Ee,qs,nT,Vc,aT,sT,$s,rT,Gi,iT,dT,lT,Fs,cT,Es,pT,uT,hT,ne,js,mT,vo,fT,Ui,_T,gT,Jc,kT,bT,vT,un,yT,Zc,TT,MT,Cs,wT,Yc,xT,zT,Ps,dm,yo,hn,ep,Os,BT,tp,qT,lm,Ss,Tt,As,$T,op,FT,ET,Is,cm,To,mn,np,Ns,jT,ap,CT,pm,je,Ls,PT,Ds,OT,Ri,ST,AT,IT,Gs,NT,Us,LT,DT,GT,fn,UT,Le,Rs,RT,Mo,WT,Wi,QT,XT,sp,KT,HT,VT,_n,JT,rp,ZT,YT,Ws,um,wo,gn,ip,Qs,e1,dp,t1,hm,Ce,Xs,o1,Ks,n1,Qi,a1,s1,r1,Hs,i1,Vs,d1,l1,c1,kn,p1,O,Js,u1,xo,h1,Xi,m1,f1,lp,_1,g1,k1,bn,b1,cp,v1,y1,pp,up,hp,mp,T1,M1,fp,_p,gp,kp,w1,x1,bp,vp,yp,Tp,z1,B1,Mp,wp,Zs,vn,yn,xp,Ys,q1,zp,$1,F1,Bp,E1,j1,qp,C1,P1,$p,Fp,zo,Ep,O1,S1,Tn,Mn,jp,er,A1,Cp,I1,N1,Pp,L1,D1,Op,Sp,tr,Ap,G1,U1,wn,xn,Ip,or,R1,Np,W1,mm,Bo,zn,Lp,nr,Q1,Dp,X1,fm,V,ar,K1,sr,H1,Ki,V1,J1,Z1,rr,Y1,ir,eM,tM,oM,Gp,nM,aM,ht,Up,dr,sM,rM,Rp,lr,iM,dM,Wp,cr,lM,cM,Qp,pr,pM,uM,De,ur,hM,qo,mM,Xp,fM,_M,Kp,gM,kM,bM,Bn,vM,Hp,yM,TM,hr,MM,Mt,mr,wM,Vp,xM,zM,fr,BM,wt,_r,qM,Jp,$M,FM,gr,_m,$o,qn,Zp,kr,EM,Yp,jM,gm,J,br,CM,vr,PM,Hi,OM,SM,AM,yr,IM,Tr,NM,LM,DM,eu,GM,UM,mt,tu,Mr,RM,WM,ou,wr,QM,XM,nu,xr,KM,HM,au,zr,VM,JM,j,Br,ZM,Fo,YM,su,e0,t0,ru,o0,n0,a0,$n,s0,iu,r0,i0,du,lu,cu,pu,d0,l0,uu,hu,mu,fu,c0,p0,_u,gu,ku,bu,u0,h0,vu,yu,qr,Fn,En,Tu,$r,m0,Mu,f0,_0,wu,g0,k0,xu,b0,v0,zu,Bu,Eo,qu,y0,T0,jn,Cn,$u,Fr,M0,Fu,w0,x0,Eu,z0,B0,ju,Cu,Pu,Ou,q0,$0,Su,Au,Iu,Nu,F0,E0,Lu,Du,Gu,Uu,j0,C0,xt,Er,P0,Ru,O0,S0,jr,A0,zt,Cr,I0,Wu,N0,L0,Pr,km,jo,Pn,Qu,Or,D0,Xu,G0,bm,U,Sr,U0,Ku,R0,W0,Ar,Q0,Vi,X0,K0,H0,Ir,V0,Nr,J0,Z0,Y0,Hu,ew,tw,ft,Vu,Lr,ow,nw,Ju,Dr,aw,sw,Zu,Gr,rw,iw,Yu,Ur,dw,lw,Ge,Rr,cw,Co,pw,eh,uw,hw,th,mw,fw,_w,On,gw,oh,kw,bw,Wr,vw,Bt,Qr,yw,nh,Tw,Mw,Xr,ww,qt,Kr,xw,ah,zw,Bw,Hr,vm,Po,Sn,sh,Vr,qw,rh,$w,ym,R,Jr,Fw,Oo,Ew,ih,jw,Cw,dh,Pw,Ow,Sw,Zr,Aw,Ji,Iw,Nw,Lw,Yr,Dw,ei,Gw,Uw,Rw,lh,Ww,Qw,_t,ch,ti,Xw,Kw,ph,oi,Hw,Vw,uh,ni,Jw,Zw,hh,ai,Yw,ex,Ue,si,tx,So,ox,mh,nx,ax,fh,sx,rx,ix,An,dx,_h,lx,cx,ri,px,$t,ii,ux,gh,hx,mx,di,fx,Ft,li,_x,kh,gx,kx,ci,Tm;return T=new C({}),ie=new C({}),Xe=new C({}),Hn=new I({props:{code:`from transformers import MBartForConditionalGeneration, MBartTokenizer tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO") example_english_phrase = "UN Chief Says There Is No Military Solution in Syria" expected_translation_romanian = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" inputs = tokenizer(example_english_phrase, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(expected_translation_romanian, return_tensors="pt") model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro") # forward pass model(**inputs, labels=batch['labels']),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot;UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>expected_translation_romanian = <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(expected_translation_romanian, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model(**inputs, labels=batch[<span class="hljs-string">&#x27;labels&#x27;</span>])`}}),Jn=new I({props:{code:`from transformers import MBartForConditionalGeneration, MBartTokenizer tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX") article = "UN Chief Says There Is No Military Solution in Syria" inputs = tokenizer(article, return_tensors="pt") translated_tokens = model.generate(**inputs, decoder_start_token_id=tokenizer.lang_code_to_id["ro_RO"]) tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>translated_tokens = model.generate(**inputs, decoder_start_token_id=tokenizer.lang_code_to_id[<span class="hljs-string">&quot;ro_RO&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(translated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span>`}}),Zn=new C({}),ea=new C({}),ta=new I({props:{code:`from transformers import MBartForConditionalGeneration, MBart50TokenizerFast model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO") src_text = " UN Chief Says There Is No Military Solution in Syria" tgt_text = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" model_inputs = tokenizer(src_text, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids model(**model_inputs, labels=labels) # forward pass,`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBart50TokenizerFast model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>) tokenizer = MBart50TokenizerFast.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) src_text = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> tgt_text = <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span> model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids model(**model_inputs, labels=labels) <span class="hljs-comment"># forward pass</span>`}}),na=new I({props:{code:`from transformers import MBartForConditionalGeneration, MBart50TokenizerFast article_hi = "\u0938\u0902\u092F\u0941\u0915\u094D\u0924 \u0930\u093E\u0937\u094D\u091F\u094D\u0930 \u0915\u0947 \u092A\u094D\u0930\u092E\u0941\u0916 \u0915\u093E \u0915\u0939\u0928\u093E \u0939\u0948 \u0915\u093F \u0938\u0940\u0930\u093F\u092F\u093E \u092E\u0947\u0902 \u0915\u094B\u0908 \u0938\u0948\u0928\u094D\u092F \u0938\u092E\u093E\u0927\u093E\u0928 \u0928\u0939\u0940\u0902 \u0939\u0948" article_ar = "\u0627\u0644\u0623\u0645\u064A\u0646 \u0627\u0644\u0639\u0627\u0645 \u0644\u0644\u0623\u0645\u0645 \u0627\u0644\u0645\u062A\u062D\u062F\u0629 \u064A\u0642\u0648\u0644 \u0625\u0646\u0647 \u0644\u0627 \u064A\u0648\u062C\u062F \u062D\u0644 \u0639\u0633\u0643\u0631\u064A \u0641\u064A \u0633\u0648\u0631\u064A\u0627." model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") # translate Hindi to French tokenizer.src_lang = "hi_IN" encoded_hi = tokenizer(article_hi, return_tensors="pt") generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id["fr_XX"]) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) # => "Le chef de l 'ONU affirme qu 'il n 'y a pas de solution militaire en Syria." # translate Arabic to English tokenizer.src_lang = "ar_AR" encoded_ar = tokenizer(article_ar, return_tensors="pt") generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"]) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) # => "The Secretary-General of the United Nations says there is no military solution in Syria.",`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBart50TokenizerFast article_hi = <span class="hljs-string">&quot;\u0938\u0902\u092F\u0941\u0915\u094D\u0924 \u0930\u093E\u0937\u094D\u091F\u094D\u0930 \u0915\u0947 \u092A\u094D\u0930\u092E\u0941\u0916 \u0915\u093E \u0915\u0939\u0928\u093E \u0939\u0948 \u0915\u093F \u0938\u0940\u0930\u093F\u092F\u093E \u092E\u0947\u0902 \u0915\u094B\u0908 \u0938\u0948\u0928\u094D\u092F \u0938\u092E\u093E\u0927\u093E\u0928 \u0928\u0939\u0940\u0902 \u0939\u0948&quot;</span> article_ar = <span class="hljs-string">&quot;\u0627\u0644\u0623\u0645\u064A\u0646 \u0627\u0644\u0639\u0627\u0645 \u0644\u0644\u0623\u0645\u0645 \u0627\u0644\u0645\u062A\u062D\u062F\u0629 \u064A\u0642\u0648\u0644 \u0625\u0646\u0647 \u0644\u0627 \u064A\u0648\u062C\u062F \u062D\u0644 \u0639\u0633\u0643\u0631\u064A \u0641\u064A \u0633\u0648\u0631\u064A\u0627.&quot;</span> model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>) tokenizer = MBart50TokenizerFast.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>) <span class="hljs-comment"># translate Hindi to French</span> tokenizer.src_lang = <span class="hljs-string">&quot;hi_IN&quot;</span> encoded_hi = tokenizer(article_hi, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id[<span class="hljs-string">&quot;fr_XX&quot;</span>]) tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-comment"># =&gt; &quot;Le chef de l &#x27;ONU affirme qu &#x27;il n &#x27;y a pas de solution militaire en Syria.&quot;</span> <span class="hljs-comment"># translate Arabic to English</span> tokenizer.src_lang = <span class="hljs-string">&quot;ar_AR&quot;</span> encoded_ar = tokenizer(article_ar, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id[<span class="hljs-string">&quot;en_XX&quot;</span>]) tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-comment"># =&gt; &quot;The Secretary-General of the United Nations says there is no military solution in Syria.&quot;</span>`}}),aa=new C({}),sa=new q({props:{name:"class transformers.MBartConfig",anchor:"transformers.MBartConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"forced_eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/configuration_mbart.py#L35",parametersDescription:[{anchor:"transformers.MBartConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartModel">MBartModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartModel">TFMBartModel</a>.`,name:"vocab_size"},{anchor:"transformers.MBartConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.MBartConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.MBartConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.MBartConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.MBartConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.MBartConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.MBartConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.MBartConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.MBartConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.MBartConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.MBartConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.MBartConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.MBartConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.MBartConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.MBartConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.MBartConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.MBartConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),ia=new I({props:{code:`from transformers import MBartModel, MBartConfig # Initializing a MBART facebook/mbart-large-cc25 style configuration configuration = MBartConfig() # Initializing a model from the facebook/mbart-large-cc25 style configuration model = MBartModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartModel, MBartConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MBART facebook/mbart-large-cc25 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MBartConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/mbart-large-cc25 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),da=new C({}),la=new q({props:{name:"class transformers.MBartTokenizer",anchor:"transformers.MBartTokenizer",parameters:[{name:"*args",val:""},{name:"tokenizer_file",val:" = None"},{name:"src_lang",val:" = None"},{name:"tgt_lang",val:" = None"},{name:"additional_special_tokens",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart.py#L70"}}),pa=new I({props:{code:`from transformers import MBartTokenizer tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-en-ro', src_lang="en_XX", tgt_lang="ro_RO") example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" expected_translation_romanian = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" inputs = tokenizer(example_english_phrase, return_tensors="pt) with tokenizer.as_target_tokenizer(): labels = tokenizer(expected_translation_romanian, return_tensors="pt") inputs["labels"] = labels["input_ids"],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-en-ro&#x27;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>expected_translation_romanian = <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt) &gt;&gt;&gt; with tokenizer.as_target_tokenizer(): ... labels = tokenizer(expected_translation_romanian, return_tensors=&quot;</span>pt<span class="hljs-string">&quot;) &gt;&gt;&gt; inputs[&quot;</span>labels<span class="hljs-string">&quot;] = labels[&quot;</span>input_ids<span class="hljs-string">&quot;]</span>`}}),ua=new q({props:{name:"as_target_tokenizer",anchor:"transformers.MBartTokenizer.as_target_tokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart.py#L229"}}),ha=new q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.MBartTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart.py#L178",parametersDescription:[{anchor:"transformers.MBartTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.MBartTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ka=new C({}),ba=new q({props:{name:"class transformers.MBartTokenizerFast",anchor:"transformers.MBartTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"src_lang",val:" = None"},{name:"tgt_lang",val:" = None"},{name:"additional_special_tokens",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart_fast.py#L83"}}),Ta=new I({props:{code:`from transformers import MBartTokenizerFast tokenizer = MBartTokenizerFast.from_pretrained('facebook/mbart-large-en-ro', src_lang="en_XX", tgt_lang="ro_RO") example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" expected_translation_romanian = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" inputs = tokenizer(example_english_phrase, return_tensors="pt) with tokenizer.as_target_tokenizer(): labels = tokenizer(expected_translation_romanian, return_tensors="pt") inputs["labels"] = labels["input_ids"],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizerFast.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-en-ro&#x27;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>expected_translation_romanian = <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt) &gt;&gt;&gt; with tokenizer.as_target_tokenizer(): ... labels = tokenizer(expected_translation_romanian, return_tensors=&quot;</span>pt<span class="hljs-string">&quot;) &gt;&gt;&gt; inputs[&quot;</span>labels<span class="hljs-string">&quot;] = labels[&quot;</span>input_ids<span class="hljs-string">&quot;]</span>`}}),Ma=new q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.MBartTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart_fast.py#L160",parametersDescription:[{anchor:"transformers.MBartTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.MBartTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),qa=new q({props:{name:"set_src_lang_special_tokens",anchor:"transformers.MBartTokenizerFast.set_src_lang_special_tokens",parameters:[{name:"src_lang",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart_fast.py#L223"}}),$a=new q({props:{name:"set_tgt_lang_special_tokens",anchor:"transformers.MBartTokenizerFast.set_tgt_lang_special_tokens",parameters:[{name:"lang",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/tokenization_mbart_fast.py#L238"}}),Fa=new C({}),Ea=new q({props:{name:"class transformers.MBart50Tokenizer",anchor:"transformers.MBart50Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"src_lang",val:" = None"},{name:"tgt_lang",val:" = None"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50.py#L48",parametersDescription:[{anchor:"transformers.MBart50Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.MBart50Tokenizer.src_lang",description:`<strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.`,name:"src_lang"},{anchor:"transformers.MBart50Tokenizer.tgt_lang",description:`<strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.`,name:"tgt_lang"},{anchor:"transformers.MBart50Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.MBart50Tokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.MBart50Tokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.MBart50Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MBart50Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MBart50Tokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.MBart50Tokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Oa=new I({props:{code:`from transformers import MBart50Tokenizer tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO") src_text = " UN Chief Says There Is No Military Solution in Syria" tgt_text = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" model_inputs = tokenizer(src_text, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids # model(**model_inputs, labels=labels) should work,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBart50Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBart50Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model(**model_inputs, labels=labels) should work</span>`}}),Sa=new q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.MBart50Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50.py#L280",parametersDescription:[{anchor:"transformers.MBart50Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.MBart50Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Da=new q({props:{name:"convert_tokens_to_string",anchor:"transformers.MBart50Tokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:": typing.List[str]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50.py#L233"}}),Ga=new q({props:{name:"get_special_tokens_mask",anchor:"transformers.MBart50Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50.py#L250",parametersDescription:[{anchor:"transformers.MBart50Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.MBart50Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.MBart50Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ra=new q({props:{name:"set_src_lang_special_tokens",anchor:"transformers.MBart50Tokenizer.set_src_lang_special_tokens",parameters:[{name:"src_lang",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50.py#L341"}}),Wa=new q({props:{name:"set_tgt_lang_special_tokens",anchor:"transformers.MBart50Tokenizer.set_tgt_lang_special_tokens",parameters:[{name:"tgt_lang",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50.py#L347"}}),Qa=new C({}),Xa=new q({props:{name:"class transformers.MBart50TokenizerFast",anchor:"transformers.MBart50TokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"src_lang",val:" = None"},{name:"tgt_lang",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L57",parametersDescription:[{anchor:"transformers.MBart50TokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.MBart50TokenizerFast.src_lang",description:`<strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.`,name:"src_lang"},{anchor:"transformers.MBart50TokenizerFast.tgt_lang",description:`<strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.`,name:"tgt_lang"},{anchor:"transformers.MBart50TokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.MBart50TokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.MBart50TokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.MBart50TokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MBart50TokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MBart50TokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),Va=new I({props:{code:`from transformers import MBart50TokenizerFast tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO") src_text = " UN Chief Says There Is No Military Solution in Syria" tgt_text = "\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria" model_inputs = tokenizer(src_text, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids # model(**model_inputs, labels=labels) should work,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBart50TokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBart50TokenizerFast.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = <span class="hljs-string">&quot;\u015Eeful ONU declar\u0103 c\u0103 nu exist\u0103 o solu\u0163ie militar\u0103 \xEEn Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model(**model_inputs, labels=labels) should work</span>`}}),Ja=new q({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.MBart50TokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L168",parametersDescription:[{anchor:"transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),os=new q({props:{name:"set_src_lang_special_tokens",anchor:"transformers.MBart50TokenizerFast.set_src_lang_special_tokens",parameters:[{name:"src_lang",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L219"}}),ns=new q({props:{name:"set_tgt_lang_special_tokens",anchor:"transformers.MBart50TokenizerFast.set_tgt_lang_special_tokens",parameters:[{name:"tgt_lang",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L234"}}),as=new C({}),ss=new q({props:{name:"class transformers.MBartModel",anchor:"transformers.MBartModel",parameters:[{name:"config",val:": MBartConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1116",parametersDescription:[{anchor:"transformers.MBartModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ls=new q({props:{name:"forward",anchor:"transformers.MBartModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1143",parametersDescription:[{anchor:"transformers.MBartModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MBartModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MBartModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.MBartModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MBartModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MBartModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MBartModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MBartModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MBartModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.MBartModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MBartModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MBartModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MBartModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MBartModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tn=new at({props:{$$slots:{default:[l$]},$$scope:{ctx:P}}}),cs=new I({props:{code:`from transformers import MBartTokenizer, MBartModel import torch tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = MBartModel.from_pretrained('facebook/mbart-large-cc25') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartModel.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ps=new C({}),us=new q({props:{name:"class transformers.MBartForConditionalGeneration",anchor:"transformers.MBartForConditionalGeneration",parameters:[{name:"config",val:": MBartConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1232",parametersDescription:[{anchor:"transformers.MBartForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),_s=new q({props:{name:"forward",anchor:"transformers.MBartForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1276",parametersDescription:[{anchor:"transformers.MBartForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MBartForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MBartForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.MBartForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MBartForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MBartForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MBartForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MBartForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MBartForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.MBartForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MBartForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MBartForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MBartForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MBartForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MBartForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),nn=new at({props:{$$slots:{default:[c$]},$$scope:{ctx:P}}}),ks=new C({}),bs=new C({}),vs=new C({}),ys=new q({props:{name:"class transformers.MBartForQuestionAnswering",anchor:"transformers.MBartForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1526",parametersDescription:[{anchor:"transformers.MBartForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xs=new q({props:{name:"forward",anchor:"transformers.MBartForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1538",parametersDescription:[{anchor:"transformers.MBartForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MBartForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MBartForQuestionAnswering.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.MBartForQuestionAnswering.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MBartForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MBartForQuestionAnswering.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MBartForQuestionAnswering.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MBartForQuestionAnswering.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MBartForQuestionAnswering.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.MBartForQuestionAnswering.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MBartForQuestionAnswering.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MBartForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MBartForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MBartForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MBartForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.MBartForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),cn=new at({props:{$$slots:{default:[p$]},$$scope:{ctx:P}}}),zs=new I({props:{code:`from transformers import MBartTokenizer, MBartForQuestionAnswering import torch tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = MBartForQuestionAnswering.from_pretrained('facebook/mbart-large-cc25') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Bs=new C({}),qs=new q({props:{name:"class transformers.MBartForSequenceClassification",anchor:"transformers.MBartForSequenceClassification",parameters:[{name:"config",val:": MBartConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1401",parametersDescription:[{anchor:"transformers.MBartForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),js=new q({props:{name:"forward",anchor:"transformers.MBartForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1414",parametersDescription:[{anchor:"transformers.MBartForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MBartForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MBartForSequenceClassification.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.MBartForSequenceClassification.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MBartForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MBartForSequenceClassification.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MBartForSequenceClassification.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MBartForSequenceClassification.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MBartForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.MBartForSequenceClassification.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MBartForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MBartForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MBartForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MBartForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MBartForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),un=new at({props:{$$slots:{default:[u$]},$$scope:{ctx:P}}}),Cs=new I({props:{code:`from transformers import MBartTokenizer, MBartForSequenceClassification import torch tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = MBartForSequenceClassification.from_pretrained('facebook/mbart-large-cc25') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ps=new I({props:{code:`from transformers import MBartTokenizer, MBartForSequenceClassification import torch tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = MBartForSequenceClassification.from_pretrained('facebook/mbart-large-cc25', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Os=new C({}),As=new q({props:{name:"forward",anchor:"transformers.MBartForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_mbart.py#L1688",parametersDescription:[{anchor:"transformers.MBartForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MBartForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MBartForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.MBartForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.MBartForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MBartForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MBartForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MBartForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.MBartForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.MBartForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MBartForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MBartForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Is=new I({props:{code:`from transformers import MBartTokenizer, MBartForCausalLM tokenizer = MBartTokenizer.from_pretrained('facebook/bart-large') model = MBartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForCausalLM.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ns=new C({}),Ls=new q({props:{name:"class transformers.TFMBartModel",anchor:"transformers.TFMBartModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_tf_mbart.py#L1195",parametersDescription:[{anchor:"transformers.TFMBartModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fn=new at({props:{$$slots:{default:[h$]},$$scope:{ctx:P}}}),Rs=new q({props:{name:"call",anchor:"transformers.TFMBartModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_tf_mbart.py#L1207",parametersDescription:[{anchor:"transformers.TFMBartModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMBartModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMBartModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.TFMBartModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFMBartModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMBartModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFMBartModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFMBartModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFMBartModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFMBartModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFMBartModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMBartModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMBartModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMBartModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),_n=new at({props:{$$slots:{default:[m$]},$$scope:{ctx:P}}}),Ws=new I({props:{code:`from transformers import MBartTokenizer, TFMBartModel import tensorflow as tf tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = TFMBartModel.from_pretrained('facebook/mbart-large-cc25') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, TFMBartModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMBartModel.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Qs=new C({}),Xs=new q({props:{name:"class transformers.TFMBartForConditionalGeneration",anchor:"transformers.TFMBartForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_tf_mbart.py#L1302",parametersDescription:[{anchor:"transformers.TFMBartForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kn=new at({props:{$$slots:{default:[f$]},$$scope:{ctx:P}}}),Js=new q({props:{name:"call",anchor:"transformers.TFMBartForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_tf_mbart.py#L1335",parametersDescription:[{anchor:"transformers.TFMBartForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMBartForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMBartForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.TFMBartForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFMBartForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMBartForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFMBartForConditionalGeneration.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFMBartForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFMBartForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFMBartForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFMBartForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMBartForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMBartForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMBartForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMBartForConditionalGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),bn=new at({props:{$$slots:{default:[_$]},$$scope:{ctx:P}}}),Ys=new C({}),er=new C({}),or=new C({}),nr=new C({}),ar=new q({props:{name:"class transformers.FlaxMBartModel",anchor:"transformers.FlaxMBartModel",parameters:[{name:"config",val:": MBartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1266",parametersDescription:[{anchor:"transformers.FlaxMBartModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMBartModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ur=new q({props:{name:"__call__",anchor:"transformers.FlaxMBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1203",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Bn=new at({props:{$$slots:{default:[g$]},$$scope:{ctx:P}}}),hr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartModel tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = FlaxMBartModel.from_pretrained('facebook/mbart-large-cc25') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartModel.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),mr=new q({props:{name:"encode",anchor:"transformers.FlaxMBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1027",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),fr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),_r=new q({props:{name:"decode",anchor:"transformers.FlaxMBartPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1090",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),gr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),kr=new C({}),br=new q({props:{name:"class transformers.FlaxMBartForConditionalGeneration",anchor:"transformers.FlaxMBartForConditionalGeneration",parameters:[{name:"config",val:": MBartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1353",parametersDescription:[{anchor:"transformers.FlaxMBartForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMBartForConditionalGeneration.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Br=new q({props:{name:"__call__",anchor:"transformers.FlaxMBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1203",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$n=new at({props:{$$slots:{default:[k$]},$$scope:{ctx:P}}}),$r=new C({}),Fr=new C({}),Er=new q({props:{name:"encode",anchor:"transformers.FlaxMBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1027",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),jr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),Cr=new q({props:{name:"decode",anchor:"transformers.FlaxMBartForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1357",parametersDescription:[{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Or=new C({}),Sr=new q({props:{name:"class transformers.FlaxMBartForSequenceClassification",anchor:"transformers.FlaxMBartForSequenceClassification",parameters:[{name:"config",val:": MBartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1661",parametersDescription:[{anchor:"transformers.FlaxMBartForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMBartForSequenceClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Rr=new q({props:{name:"__call__",anchor:"transformers.FlaxMBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1203",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new at({props:{$$slots:{default:[b$]},$$scope:{ctx:P}}}),Wr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForSequenceClassification tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = FlaxMBartForSequenceClassification.from_pretrained('facebook/mbart-large-cc25') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Qr=new q({props:{name:"encode",anchor:"transformers.FlaxMBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1027",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),Kr=new q({props:{name:"decode",anchor:"transformers.FlaxMBartPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1090",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Hr=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),Vr=new C({}),Jr=new q({props:{name:"class transformers.FlaxMBartForQuestionAnswering",anchor:"transformers.FlaxMBartForQuestionAnswering",parameters:[{name:"config",val:": MBartConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1749",parametersDescription:[{anchor:"transformers.FlaxMBartForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMBartForQuestionAnswering.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),si=new q({props:{name:"__call__",anchor:"transformers.FlaxMBartPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1203",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),An=new at({props:{$$slots:{default:[v$]},$$scope:{ctx:P}}}),ri=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForQuestionAnswering tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') model = FlaxMBartForQuestionAnswering.from_pretrained('facebook/mbart-large-cc25') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='jax') outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),ii=new q({props:{name:"encode",anchor:"transformers.FlaxMBartPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1027",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),di=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),li=new q({props:{name:"decode",anchor:"transformers.FlaxMBartPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/mbart/modeling_flax_mbart.py#L1090",parametersDescription:[{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMBartPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ci=new I({props:{code:`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration model = FlaxMBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors='jax') encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/mbart-large-cc25&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),{c(){h=o("meta"),z=d(),v=o("h1"),M=o("a"),x=o("span"),m(T.$$.fragment),y=d(),B=o("span"),st=r("MBart and MBart-50"),Pe=d(),F=o("p"),We=o("strong"),be=r("DISCLAIMER:"),rt=r(" If you see something strange, file a "),ve=o("a"),ye=r("Github Issue"),it=r(` and assign @patrickvonplaten`),Je=d(),ee=o("h2"),W=o("a"),Qe=o("span"),m(ie.$$.fragment),N=d(),D=o("span"),dt=r("Overview of MBart"),he=d(),me=o("p"),lt=r("The MBart model was presented in "),te=o("a"),ct=r("Multilingual Denoising Pre-training for Neural Machine Translation"),pt=r(` by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.`),Q=d(),Oe=o("p"),Te=r(`According to the abstract, MBART is a sequence-to-sequence denoising auto-encoder pretrained on large-scale monolingual corpora in many languages using the BART objective. mBART is one of the first methods for pretraining a complete sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only on the encoder, decoder, or reconstructing parts of the text.`),Ze=d(),de=o("p"),Me=r("This model was contributed by "),we=o("a"),ut=r("valhalla"),le=r(". The Authors\u2019 code can be found "),xe=o("a"),ze=r("here"),Ye=d(),w=o("h3"),$=o("a"),Be=o("span"),m(Xe.$$.fragment),Wt=d(),oe=o("span"),Qt=r("Training of MBart"),gt=d(),Z=o("p"),ce=r(`MBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for translation task. As the model is multilingual it expects the sequences in a different format. A special language id token is added in both the source and target text. The source text format is `),pe=o("code"),Xt=r("X [eos, src_lang_code]"),Kt=r(" where "),ue=o("code"),Ht=r("X"),Vt=r(` is the source text. The target text format is `),Ke=o("code"),Jt=r("[tgt_lang_code] X [eos]"),e_=r(". "),$d=o("code"),t_=r("bos"),o_=r(" is never used."),qh=d(),kt=o("p"),n_=r("The regular "),Kn=o("a"),Fd=o("strong"),a_=r("call"),s_=r("()"),r_=r(` will encode source text format, and it should be wrapped inside the context manager `),_i=o("a"),i_=r("as_target_tokenizer()"),d_=r(" to encode target text format."),$h=d(),gi=o("ul"),Ed=o("li"),l_=r("Supervised training"),Fh=d(),m(Hn.$$.fragment),Eh=d(),ki=o("ul"),Vn=o("li"),jd=o("p"),c_=r("Generation"),p_=d(),Zt=o("p"),u_=r("While generating the target text set the "),Cd=o("code"),h_=r("decoder_start_token_id"),m_=r(` to the target language id. The following example shows how to translate English to Romanian using the `),Pd=o("em"),f_=r("facebook/mbart-large-en-ro"),__=r(" model."),jh=d(),m(Jn.$$.fragment),Ch=d(),Yt=o("h2"),Ao=o("a"),Od=o("span"),m(Zn.$$.fragment),g_=d(),Sd=o("span"),k_=r("Overview of MBart-50"),Ph=d(),bt=o("p"),b_=r("MBart-50 was introduced in the "),bi=o("em"),v_=r(`Multilingual Translation with Extensible Multilingual Pretraining and Finetuning <`),Yn=o("a"),y_=r("https://arxiv.org/abs/2008.00401>"),T_=r(` paper by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. MBart-50 is created using the original `),Ad=o("em"),M_=r("mbart-large-cc25"),w_=r(` checkpoint by extendeding its embedding layers with randomly initialized vectors for an extra set of 25 language tokens and then pretrained on 50 languages.`),Oh=d(),vi=o("p"),x_=r("According to the abstract"),Sh=d(),yi=o("p"),Id=o("em"),z_=r(`Multilingual translation models can be created through multilingual finetuning. Instead of finetuning on one direction, a pretrained model is finetuned on many directions at the same time. It demonstrates that pretrained models can be extended to incorporate additional languages without loss of performance. Multilingual finetuning improves on average 1 BLEU over the strongest baselines (being either multilingual from scratch or bilingual finetuning) while improving 9.3 BLEU on average over bilingual baselines from scratch.`),Ah=d(),eo=o("h3"),Io=o("a"),Nd=o("span"),m(ea.$$.fragment),B_=d(),Ld=o("span"),q_=r("Training of MBart-50"),Ih=d(),et=o("p"),$_=r(`The text format for MBart-50 is slightly different from mBART. For MBart-50 the language id token is used as a prefix for both source and target text i.e the text format is `),Dd=o("code"),F_=r("[lang_code] X [eos]"),E_=r(", where "),Gd=o("code"),j_=r("lang_code"),C_=r(` is source language id for source text and target language id for target text, with `),Ud=o("code"),P_=r("X"),O_=r(` being the source or target text respectively.`),Nh=d(),No=o("p"),S_=r("MBart-50 has its own tokenizer "),Ti=o("a"),A_=r("MBart50Tokenizer"),I_=r("."),Lh=d(),Mi=o("ul"),Rd=o("li"),N_=r("Supervised training"),Dh=d(),m(ta.$$.fragment),Gh=d(),wi=o("ul"),oa=o("li"),Wd=o("p"),L_=r("Generation"),D_=d(),qe=o("p"),G_=r("To generate using the mBART-50 multilingual translation models, "),Qd=o("code"),U_=r("eos_token_id"),R_=r(` is used as the `),Xd=o("code"),W_=r("decoder_start_token_id"),Q_=r(` and the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the `),Kd=o("em"),X_=r("forced_bos_token_id"),K_=r(" parameter to the "),Hd=o("em"),H_=r("generate"),V_=r(` method. The following example shows how to translate between Hindi to French and Arabic to English using the `),Vd=o("em"),J_=r("facebook/mbart-50-large-many-to-many"),Z_=r(" checkpoint."),Uh=d(),m(na.$$.fragment),Rh=d(),to=o("h2"),Lo=o("a"),Jd=o("span"),m(aa.$$.fragment),Y_=d(),Zd=o("span"),eg=r("MBartConfig"),Wh=d(),$e=o("div"),m(sa.$$.fragment),tg=d(),oo=o("p"),og=r("This is the configuration class to store the configuration of a "),xi=o("a"),ng=r("MBartModel"),ag=r(`. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART `),ra=o("a"),sg=r("facebook/mbart-large-cc25"),rg=r(" architecture."),ig=d(),no=o("p"),dg=r("Configuration objects inherit from "),zi=o("a"),lg=r("PretrainedConfig"),cg=r(` and can be used to control the model outputs. Read the documentation from `),Bi=o("a"),pg=r("PretrainedConfig"),ug=r(" for more information."),hg=d(),Yd=o("p"),mg=r("Example:"),fg=d(),m(ia.$$.fragment),Qh=d(),ao=o("h2"),Do=o("a"),el=o("span"),m(da.$$.fragment),_g=d(),tl=o("span"),gg=r("MBartTokenizer"),Xh=d(),K=o("div"),m(la.$$.fragment),kg=d(),ol=o("p"),bg=r("Construct an MBART tokenizer."),vg=d(),vt=o("p"),qi=o("a"),yg=r("MBartTokenizer"),Tg=r(" is a subclass of "),$i=o("a"),Mg=r("XLMRobertaTokenizer"),wg=r(`. Refer to superclass `),Fi=o("a"),xg=r("XLMRobertaTokenizer"),zg=r(` for usage examples and documentation concerning the initialization parameters and other methods.`),Bg=d(),ca=o("p"),qg=r("The tokenization method is "),nl=o("code"),$g=r("<tokens> <eos> <language code>"),Fg=r(" for source language documents, and \u201C<language code>\n<tokens> <eos>``` for target language documents."),Eg=d(),al=o("p"),jg=r("Examples:"),Cg=d(),m(pa.$$.fragment),Pg=d(),Go=o("div"),m(ua.$$.fragment),Og=d(),sl=o("p"),Sg=r(`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Ag=d(),tt=o("div"),m(ha.$$.fragment),Ig=d(),ma=o("p"),Ng=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `),rl=o("code"),Lg=r("X"),Dg=r(" represents the sequence:"),Gg=d(),fa=o("ul"),_a=o("li"),il=o("code"),Ug=r("input_ids"),Rg=r(" (for encoder) "),dl=o("code"),Wg=r("X [eos, src_lang_code]"),Qg=d(),ga=o("li"),ll=o("code"),Xg=r("decoder_input_ids"),Kg=r(": (for decoder) "),cl=o("code"),Hg=r("X [eos, tgt_lang_code]"),Vg=d(),pl=o("p"),Jg=r(`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),Kh=d(),so=o("h2"),Uo=o("a"),ul=o("span"),m(ka.$$.fragment),Zg=d(),hl=o("span"),Yg=r("MBartTokenizerFast"),Hh=d(),G=o("div"),m(ba.$$.fragment),ek=d(),ro=o("p"),tk=r("Construct a \u201Cfast\u201D MBART tokenizer (backed by HuggingFace\u2019s "),ml=o("em"),ok=r("tokenizers"),nk=r(" library). Based on "),va=o("a"),ak=r("BPE"),sk=r("."),rk=d(),yt=o("p"),Ei=o("a"),ik=r("MBartTokenizerFast"),dk=r(" is a subclass of "),ji=o("a"),lk=r("XLMRobertaTokenizerFast"),ck=r(`. Refer to superclass `),Ci=o("a"),pk=r("XLMRobertaTokenizerFast"),uk=r(` for usage examples and documentation concerning the initialization parameters and other methods.`),hk=d(),ya=o("p"),mk=r("The tokenization method is "),fl=o("code"),fk=r("<tokens> <eos> <language code>"),_k=r(" for source language documents, and \u201C<language code>\n<tokens> <eos>``` for target language documents."),gk=d(),_l=o("p"),kk=r("Examples:"),bk=d(),m(Ta.$$.fragment),vk=d(),Se=o("div"),m(Ma.$$.fragment),yk=d(),gl=o("p"),Tk=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang.`),Mk=d(),wa=o("p"),wk=r("An MBART sequence has the following format, where "),kl=o("code"),xk=r("X"),zk=r(" represents the sequence:"),Bk=d(),xa=o("ul"),za=o("li"),bl=o("code"),qk=r("input_ids"),$k=r(" (for encoder) "),vl=o("code"),Fk=r("X [eos, src_lang_code]"),Ek=d(),Ba=o("li"),yl=o("code"),jk=r("decoder_input_ids"),Ck=r(": (for decoder) "),Tl=o("code"),Pk=r("X [eos, tgt_lang_code]"),Ok=d(),Ml=o("p"),Sk=r(`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),Ak=d(),Ro=o("div"),m(qa.$$.fragment),Ik=d(),wl=o("p"),Nk=r("Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."),Lk=d(),Wo=o("div"),m($a.$$.fragment),Dk=d(),xl=o("p"),Gk=r("Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."),Vh=d(),io=o("h2"),Qo=o("a"),zl=o("span"),m(Fa.$$.fragment),Uk=d(),Bl=o("span"),Rk=r("MBart50Tokenizer"),Jh=d(),L=o("div"),m(Ea.$$.fragment),Wk=d(),ja=o("p"),Qk=r("Construct a MBart50 tokenizer. Based on "),Ca=o("a"),Xk=r("SentencePiece"),Kk=r("."),Hk=d(),Pa=o("p"),Vk=r("This tokenizer inherits from "),Pi=o("a"),Jk=r("PreTrainedTokenizer"),Zk=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Yk=d(),ql=o("p"),eb=r("Examples:"),tb=d(),m(Oa.$$.fragment),ob=d(),ot=o("div"),m(Sa.$$.fragment),nb=d(),Aa=o("p"),ab=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART-50 sequence has the following format, where `),$l=o("code"),sb=r("X"),rb=r(" represents the sequence:"),ib=d(),Ia=o("ul"),Na=o("li"),Fl=o("code"),db=r("input_ids"),lb=r(" (for encoder) "),El=o("code"),cb=r("[src_lang_code] X [eos]"),pb=d(),La=o("li"),jl=o("code"),ub=r("labels"),hb=r(": (for decoder) "),Cl=o("code"),mb=r("[tgt_lang_code] X [eos]"),fb=d(),Pl=o("p"),_b=r(`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),gb=d(),Xo=o("div"),m(Da.$$.fragment),kb=d(),Ol=o("p"),bb=r("Converts a sequence of tokens (strings for sub-words) in a single string."),vb=d(),Ko=o("div"),m(Ga.$$.fragment),yb=d(),Ua=o("p"),Tb=r(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Sl=o("code"),Mb=r("prepare_for_model"),wb=r(" method."),xb=d(),Ho=o("div"),m(Ra.$$.fragment),zb=d(),Al=o("p"),Bb=r("Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."),qb=d(),Vo=o("div"),m(Wa.$$.fragment),$b=d(),Il=o("p"),Fb=r("Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."),Zh=d(),lo=o("h2"),Jo=o("a"),Nl=o("span"),m(Qa.$$.fragment),Eb=d(),Ll=o("span"),jb=r("MBart50TokenizerFast"),Yh=d(),H=o("div"),m(Xa.$$.fragment),Cb=d(),co=o("p"),Pb=r("Construct a \u201Cfast\u201D MBART tokenizer for mBART-50 (backed by HuggingFace\u2019s "),Dl=o("em"),Ob=r("tokenizers"),Sb=r(" library). Based on "),Ka=o("a"),Ab=r("BPE"),Ib=r("."),Nb=d(),Ha=o("p"),Lb=r("This tokenizer inherits from "),Oi=o("a"),Db=r("PreTrainedTokenizerFast"),Gb=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ub=d(),Gl=o("p"),Rb=r("Examples:"),Wb=d(),m(Va.$$.fragment),Qb=d(),Ae=o("div"),m(Ja.$$.fragment),Xb=d(),Ul=o("p"),Kb=r(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang.`),Hb=d(),Za=o("p"),Vb=r("An MBART-50 sequence has the following format, where "),Rl=o("code"),Jb=r("X"),Zb=r(" represents the sequence:"),Yb=d(),Ya=o("ul"),es=o("li"),Wl=o("code"),ev=r("input_ids"),tv=r(" (for encoder) "),Ql=o("code"),ov=r("[src_lang_code] X [eos]"),nv=d(),ts=o("li"),Xl=o("code"),av=r("labels"),sv=r(": (for decoder) "),Kl=o("code"),rv=r("[tgt_lang_code] X [eos]"),iv=d(),Hl=o("p"),dv=r(`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),lv=d(),Zo=o("div"),m(os.$$.fragment),cv=d(),Vl=o("p"),pv=r("Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."),uv=d(),Yo=o("div"),m(ns.$$.fragment),hv=d(),Jl=o("p"),mv=r("Reset the special tokens to the target language setting. prefix=[src_lang_code] and suffix=[eos]."),em=d(),po=o("h2"),en=o("a"),Zl=o("span"),m(as.$$.fragment),fv=d(),Yl=o("span"),_v=r("MBartModel"),tm=d(),He=o("div"),m(ss.$$.fragment),gv=d(),rs=o("p"),kv=r(`The bare MBART Model outputting raw hidden-states without any specific head on top. This model inherits from `),Si=o("a"),bv=r("PreTrainedModel"),vv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yv=d(),is=o("p"),Tv=r("This model is also a PyTorch "),ds=o("a"),Mv=r("torch.nn.Module"),wv=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xv=d(),Ie=o("div"),m(ls.$$.fragment),zv=d(),uo=o("p"),Bv=r("The "),Ai=o("a"),qv=r("MBartModel"),$v=r(" forward method, overrides the "),ec=o("code"),Fv=r("__call__"),Ev=r(" special method."),jv=d(),m(tn.$$.fragment),Cv=d(),tc=o("p"),Pv=r("Example:"),Ov=d(),m(cs.$$.fragment),om=d(),ho=o("h2"),on=o("a"),oc=o("span"),m(ps.$$.fragment),Sv=d(),nc=o("span"),Av=r("MBartForConditionalGeneration"),nm=d(),Ve=o("div"),m(us.$$.fragment),Iv=d(),hs=o("p"),Nv=r(`The MBART Model with a language modeling head. Can be used for summarization. This model inherits from `),Ii=o("a"),Lv=r("PreTrainedModel"),Dv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gv=d(),ms=o("p"),Uv=r("This model is also a PyTorch "),fs=o("a"),Rv=r("torch.nn.Module"),Wv=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qv=d(),E=o("div"),m(_s.$$.fragment),Xv=d(),mo=o("p"),Kv=r("The "),Ni=o("a"),Hv=r("MBartForConditionalGeneration"),Vv=r(" forward method, overrides the "),ac=o("code"),Jv=r("__call__"),Zv=r(" special method."),Yv=d(),m(nn.$$.fragment),ey=d(),sc=o("p"),ty=r("Summarization example::"),oy=d(),rc=o("blockquote"),ic=o("blockquote"),dc=o("blockquote"),lc=o("p"),ny=r("from transformers import MBartTokenizer, MBartForConditionalGeneration, MBartConfig"),ay=d(),cc=o("blockquote"),pc=o("blockquote"),uc=o("blockquote"),hc=o("p"),sy=r(`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),ry=d(),mc=o("blockquote"),fc=o("blockquote"),_c=o("blockquote"),gc=o("p"),iy=r(`ARTICLE_TO_SUMMARIZE = \u201CMeine Freunde sind cool, aber sie essen zu viel Kuchen.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018pt\u2019)`),dy=d(),kc=o("blockquote"),bc=o("blockquote"),gs=o("blockquote"),an=o("h1"),sn=o("a"),vc=o("span"),m(ks.$$.fragment),ly=d(),yc=o("span"),cy=r("Generate Summary"),py=d(),Tc=o("p"),uy=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),hy=d(),Mc=o("p"),my=r("Mask filling example::"),fy=d(),wc=o("blockquote"),xc=o("blockquote"),fo=o("blockquote"),zc=o("p"),_y=r(`from transformers import MBartTokenizer, MBartForConditionalGeneration tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),gy=d(),rn=o("h1"),dn=o("a"),Bc=o("span"),m(bs.$$.fragment),ky=d(),qc=o("span"),by=r("de_DE is the language symbol id <LID> for German"),vy=d(),$c=o("p"),yy=r("TXT = \u201D</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE\u201D"),Ty=d(),Fc=o("blockquote"),Ec=o("blockquote"),jc=o("blockquote"),Cc=o("p"),My=r(`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors=\u2018pt\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),wy=d(),Pc=o("blockquote"),Oc=o("blockquote"),Sc=o("blockquote"),Ac=o("p"),xy=r(`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5)`),zy=d(),Ic=o("blockquote"),Nc=o("blockquote"),Lc=o("blockquote"),Dc=o("p"),By=r("tokenizer.decode(predictions).split()"),am=d(),_o=o("h2"),ln=o("a"),Gc=o("span"),m(vs.$$.fragment),qy=d(),Uc=o("span"),$y=r("MBartForQuestionAnswering"),sm=d(),Fe=o("div"),m(ys.$$.fragment),Fy=d(),go=o("p"),Ey=r(`MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Rc=o("code"),jy=r("span start logits"),Cy=r(" and "),Wc=o("code"),Py=r("span end logits"),Oy=r(")."),Sy=d(),Ts=o("p"),Ay=r("This model inherits from "),Li=o("a"),Iy=r("PreTrainedModel"),Ny=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ly=d(),Ms=o("p"),Dy=r("This model is also a PyTorch "),ws=o("a"),Gy=r("torch.nn.Module"),Uy=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ry=d(),Ne=o("div"),m(xs.$$.fragment),Wy=d(),ko=o("p"),Qy=r("The "),Di=o("a"),Xy=r("MBartForQuestionAnswering"),Ky=r(" forward method, overrides the "),Qc=o("code"),Hy=r("__call__"),Vy=r(" special method."),Jy=d(),m(cn.$$.fragment),Zy=d(),Xc=o("p"),Yy=r("Example:"),eT=d(),m(zs.$$.fragment),rm=d(),bo=o("h2"),pn=o("a"),Kc=o("span"),m(Bs.$$.fragment),tT=d(),Hc=o("span"),oT=r("MBartForSequenceClassification"),im=d(),Ee=o("div"),m(qs.$$.fragment),nT=d(),Vc=o("p"),aT=r(`MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),sT=d(),$s=o("p"),rT=r("This model inherits from "),Gi=o("a"),iT=r("PreTrainedModel"),dT=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),lT=d(),Fs=o("p"),cT=r("This model is also a PyTorch "),Es=o("a"),pT=r("torch.nn.Module"),uT=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hT=d(),ne=o("div"),m(js.$$.fragment),mT=d(),vo=o("p"),fT=r("The "),Ui=o("a"),_T=r("MBartForSequenceClassification"),gT=r(" forward method, overrides the "),Jc=o("code"),kT=r("__call__"),bT=r(" special method."),vT=d(),m(un.$$.fragment),yT=d(),Zc=o("p"),TT=r("Example of single-label classification:"),MT=d(),m(Cs.$$.fragment),wT=d(),Yc=o("p"),xT=r("Example of multi-label classification:"),zT=d(),m(Ps.$$.fragment),dm=d(),yo=o("h2"),hn=o("a"),ep=o("span"),m(Os.$$.fragment),BT=d(),tp=o("span"),qT=r("MBartForCausalLM"),lm=d(),Ss=o("div"),Tt=o("div"),m(As.$$.fragment),$T=d(),op=o("p"),FT=r("Example:"),ET=d(),m(Is.$$.fragment),cm=d(),To=o("h2"),mn=o("a"),np=o("span"),m(Ns.$$.fragment),jT=d(),ap=o("span"),CT=r("TFMBartModel"),pm=d(),je=o("div"),m(Ls.$$.fragment),PT=d(),Ds=o("p"),OT=r(`The bare MBART Model outputting raw hidden-states without any specific head on top. This model inherits from `),Ri=o("a"),ST=r("TFPreTrainedModel"),AT=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),IT=d(),Gs=o("p"),NT=r("This model is also a "),Us=o("a"),LT=r("tf.keras.Model"),DT=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),GT=d(),m(fn.$$.fragment),UT=d(),Le=o("div"),m(Rs.$$.fragment),RT=d(),Mo=o("p"),WT=r("The "),Wi=o("a"),QT=r("TFMBartModel"),XT=r(" forward method, overrides the "),sp=o("code"),KT=r("__call__"),HT=r(" special method."),VT=d(),m(_n.$$.fragment),JT=d(),rp=o("p"),ZT=r("Example:"),YT=d(),m(Ws.$$.fragment),um=d(),wo=o("h2"),gn=o("a"),ip=o("span"),m(Qs.$$.fragment),e1=d(),dp=o("span"),t1=r("TFMBartForConditionalGeneration"),hm=d(),Ce=o("div"),m(Xs.$$.fragment),o1=d(),Ks=o("p"),n1=r(`The MBART Model with a language modeling head. Can be used for summarization. This model inherits from `),Qi=o("a"),a1=r("TFPreTrainedModel"),s1=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),r1=d(),Hs=o("p"),i1=r("This model is also a "),Vs=o("a"),d1=r("tf.keras.Model"),l1=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),c1=d(),m(kn.$$.fragment),p1=d(),O=o("div"),m(Js.$$.fragment),u1=d(),xo=o("p"),h1=r("The "),Xi=o("a"),m1=r("TFMBartForConditionalGeneration"),f1=r(" forward method, overrides the "),lp=o("code"),_1=r("__call__"),g1=r(" special method."),k1=d(),m(bn.$$.fragment),b1=d(),cp=o("p"),v1=r("Summarization example::"),y1=d(),pp=o("blockquote"),up=o("blockquote"),hp=o("blockquote"),mp=o("p"),T1=r("from transformers import MBartTokenizer, TFMBartForConditionalGeneration, MBartConfig"),M1=d(),fp=o("blockquote"),_p=o("blockquote"),gp=o("blockquote"),kp=o("p"),w1=r(`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),x1=d(),bp=o("blockquote"),vp=o("blockquote"),yp=o("blockquote"),Tp=o("p"),z1=r(`ARTICLE_TO_SUMMARIZE = \u201CMeine Freunde sind cool, aber sie essen zu viel Kuchen.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018tf\u2019)`),B1=d(),Mp=o("blockquote"),wp=o("blockquote"),Zs=o("blockquote"),vn=o("h1"),yn=o("a"),xp=o("span"),m(Ys.$$.fragment),q1=d(),zp=o("span"),$1=r("Generate Summary"),F1=d(),Bp=o("p"),E1=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),j1=d(),qp=o("p"),C1=r("Mask filling example::"),P1=d(),$p=o("blockquote"),Fp=o("blockquote"),zo=o("blockquote"),Ep=o("p"),O1=r(`from transformers import MBartTokenizer, TFMBartForConditionalGeneration tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),S1=d(),Tn=o("h1"),Mn=o("a"),jp=o("span"),m(er.$$.fragment),A1=d(),Cp=o("span"),I1=r("de_DE is the language symbol id <LID> for German"),N1=d(),Pp=o("p"),L1=r("TXT = \u201D</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE\u201D"),D1=d(),Op=o("blockquote"),Sp=o("blockquote"),tr=o("blockquote"),Ap=o("p"),G1=r(`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors=\u2018tf\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits probs = tf.nn.softmax(logits[0])`),U1=d(),wn=o("h1"),xn=o("a"),Ip=o("span"),m(or.$$.fragment),R1=d(),Np=o("span"),W1=r("probs[5] is associated with the mask token"),mm=d(),Bo=o("h2"),zn=o("a"),Lp=o("span"),m(nr.$$.fragment),Q1=d(),Dp=o("span"),X1=r("FlaxMBartModel"),fm=d(),V=o("div"),m(ar.$$.fragment),K1=d(),sr=o("p"),H1=r(`The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Ki=o("a"),V1=r("FlaxPreTrainedModel"),J1=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Z1=d(),rr=o("p"),Y1=r("This model is also a Flax Linen "),ir=o("a"),eM=r("flax.nn.Module"),tM=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),oM=d(),Gp=o("p"),nM=r("Finally, this model supports inherent JAX features such as:"),aM=d(),ht=o("ul"),Up=o("li"),dr=o("a"),sM=r("Just-In-Time (JIT) compilation"),rM=d(),Rp=o("li"),lr=o("a"),iM=r("Automatic Differentiation"),dM=d(),Wp=o("li"),cr=o("a"),lM=r("Vectorization"),cM=d(),Qp=o("li"),pr=o("a"),pM=r("Parallelization"),uM=d(),De=o("div"),m(ur.$$.fragment),hM=d(),qo=o("p"),mM=r("The "),Xp=o("code"),fM=r("FlaxMBartPreTrainedModel"),_M=r(" forward method, overrides the "),Kp=o("code"),gM=r("__call__"),kM=r(" special method."),bM=d(),m(Bn.$$.fragment),vM=d(),Hp=o("p"),yM=r("Example:"),TM=d(),m(hr.$$.fragment),MM=d(),Mt=o("div"),m(mr.$$.fragment),wM=d(),Vp=o("p"),xM=r("Example:"),zM=d(),m(fr.$$.fragment),BM=d(),wt=o("div"),m(_r.$$.fragment),qM=d(),Jp=o("p"),$M=r("Example:"),FM=d(),m(gr.$$.fragment),_m=d(),$o=o("h2"),qn=o("a"),Zp=o("span"),m(kr.$$.fragment),EM=d(),Yp=o("span"),jM=r("FlaxMBartForConditionalGeneration"),gm=d(),J=o("div"),m(br.$$.fragment),CM=d(),vr=o("p"),PM=r(`The MMBart Model with a language modeling head. Can be used for summarization. This model inherits from `),Hi=o("a"),OM=r("FlaxPreTrainedModel"),SM=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),AM=d(),yr=o("p"),IM=r("This model is also a Flax Linen "),Tr=o("a"),NM=r("flax.nn.Module"),LM=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),DM=d(),eu=o("p"),GM=r("Finally, this model supports inherent JAX features such as:"),UM=d(),mt=o("ul"),tu=o("li"),Mr=o("a"),RM=r("Just-In-Time (JIT) compilation"),WM=d(),ou=o("li"),wr=o("a"),QM=r("Automatic Differentiation"),XM=d(),nu=o("li"),xr=o("a"),KM=r("Vectorization"),HM=d(),au=o("li"),zr=o("a"),VM=r("Parallelization"),JM=d(),j=o("div"),m(Br.$$.fragment),ZM=d(),Fo=o("p"),YM=r("The "),su=o("code"),e0=r("FlaxMBartPreTrainedModel"),t0=r(" forward method, overrides the "),ru=o("code"),o0=r("__call__"),n0=r(" special method."),a0=d(),m($n.$$.fragment),s0=d(),iu=o("p"),r0=r("Summarization example::"),i0=d(),du=o("blockquote"),lu=o("blockquote"),cu=o("blockquote"),pu=o("p"),d0=r("from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration, MBartConfig"),l0=d(),uu=o("blockquote"),hu=o("blockquote"),mu=o("blockquote"),fu=o("p"),c0=r(`model = FlaxMBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),p0=d(),_u=o("blockquote"),gu=o("blockquote"),ku=o("blockquote"),bu=o("p"),u0=r(`ARTICLE_TO_SUMMARIZE = \u201CMeine Freunde sind cool, aber sie essen zu viel Kuchen.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018np\u2019)`),h0=d(),vu=o("blockquote"),yu=o("blockquote"),qr=o("blockquote"),Fn=o("h1"),En=o("a"),Tu=o("span"),m($r.$$.fragment),m0=d(),Mu=o("span"),f0=r("Generate Summary"),_0=d(),wu=o("p"),g0=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True).sequences print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),k0=d(),xu=o("p"),b0=r("Mask filling example::"),v0=d(),zu=o("blockquote"),Bu=o("blockquote"),Eo=o("blockquote"),qu=o("p"),y0=r(`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),T0=d(),jn=o("h1"),Cn=o("a"),$u=o("span"),m(Fr.$$.fragment),M0=d(),Fu=o("span"),w0=r("de_DE is the language symbol id <LID> for German"),x0=d(),Eu=o("p"),z0=r("TXT = \u201D</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE\u201D"),B0=d(),ju=o("blockquote"),Cu=o("blockquote"),Pu=o("blockquote"),Ou=o("p"),q0=r(`model = FlaxMBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors=\u2018np\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),$0=d(),Su=o("blockquote"),Au=o("blockquote"),Iu=o("blockquote"),Nu=o("p"),F0=r(`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5)`),E0=d(),Lu=o("blockquote"),Du=o("blockquote"),Gu=o("blockquote"),Uu=o("p"),j0=r("tokenizer.decode(predictions).split()"),C0=d(),xt=o("div"),m(Er.$$.fragment),P0=d(),Ru=o("p"),O0=r("Example:"),S0=d(),m(jr.$$.fragment),A0=d(),zt=o("div"),m(Cr.$$.fragment),I0=d(),Wu=o("p"),N0=r("Example:"),L0=d(),m(Pr.$$.fragment),km=d(),jo=o("h2"),Pn=o("a"),Qu=o("span"),m(Or.$$.fragment),D0=d(),Xu=o("span"),G0=r("FlaxMBartForSequenceClassification"),bm=d(),U=o("div"),m(Sr.$$.fragment),U0=d(),Ku=o("p"),R0=r(`MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),W0=d(),Ar=o("p"),Q0=r("This model inherits from "),Vi=o("a"),X0=r("FlaxPreTrainedModel"),K0=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),H0=d(),Ir=o("p"),V0=r("This model is also a Flax Linen "),Nr=o("a"),J0=r("flax.nn.Module"),Z0=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Y0=d(),Hu=o("p"),ew=r("Finally, this model supports inherent JAX features such as:"),tw=d(),ft=o("ul"),Vu=o("li"),Lr=o("a"),ow=r("Just-In-Time (JIT) compilation"),nw=d(),Ju=o("li"),Dr=o("a"),aw=r("Automatic Differentiation"),sw=d(),Zu=o("li"),Gr=o("a"),rw=r("Vectorization"),iw=d(),Yu=o("li"),Ur=o("a"),dw=r("Parallelization"),lw=d(),Ge=o("div"),m(Rr.$$.fragment),cw=d(),Co=o("p"),pw=r("The "),eh=o("code"),uw=r("FlaxMBartPreTrainedModel"),hw=r(" forward method, overrides the "),th=o("code"),mw=r("__call__"),fw=r(" special method."),_w=d(),m(On.$$.fragment),gw=d(),oh=o("p"),kw=r("Example:"),bw=d(),m(Wr.$$.fragment),vw=d(),Bt=o("div"),m(Qr.$$.fragment),yw=d(),nh=o("p"),Tw=r("Example:"),Mw=d(),m(Xr.$$.fragment),ww=d(),qt=o("div"),m(Kr.$$.fragment),xw=d(),ah=o("p"),zw=r("Example:"),Bw=d(),m(Hr.$$.fragment),vm=d(),Po=o("h2"),Sn=o("a"),sh=o("span"),m(Vr.$$.fragment),qw=d(),rh=o("span"),$w=r("FlaxMBartForQuestionAnswering"),ym=d(),R=o("div"),m(Jr.$$.fragment),Fw=d(),Oo=o("p"),Ew=r(`MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),ih=o("code"),jw=r("span start logits"),Cw=r(" and "),dh=o("code"),Pw=r("span end logits"),Ow=r(")."),Sw=d(),Zr=o("p"),Aw=r("This model inherits from "),Ji=o("a"),Iw=r("FlaxPreTrainedModel"),Nw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lw=d(),Yr=o("p"),Dw=r("This model is also a Flax Linen "),ei=o("a"),Gw=r("flax.nn.Module"),Uw=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Rw=d(),lh=o("p"),Ww=r("Finally, this model supports inherent JAX features such as:"),Qw=d(),_t=o("ul"),ch=o("li"),ti=o("a"),Xw=r("Just-In-Time (JIT) compilation"),Kw=d(),ph=o("li"),oi=o("a"),Hw=r("Automatic Differentiation"),Vw=d(),uh=o("li"),ni=o("a"),Jw=r("Vectorization"),Zw=d(),hh=o("li"),ai=o("a"),Yw=r("Parallelization"),ex=d(),Ue=o("div"),m(si.$$.fragment),tx=d(),So=o("p"),ox=r("The "),mh=o("code"),nx=r("FlaxMBartPreTrainedModel"),ax=r(" forward method, overrides the "),fh=o("code"),sx=r("__call__"),rx=r(" special method."),ix=d(),m(An.$$.fragment),dx=d(),_h=o("p"),lx=r("Example:"),cx=d(),m(ri.$$.fragment),px=d(),$t=o("div"),m(ii.$$.fragment),ux=d(),gh=o("p"),hx=r("Example:"),mx=d(),m(di.$$.fragment),fx=d(),Ft=o("div"),m(li.$$.fragment),_x=d(),kh=o("p"),gx=r("Example:"),kx=d(),m(ci.$$.fragment),this.h()},l(s){const p=d$('[data-svelte="svelte-1phssyn"]',document.head);h=n(p,"META",{name:!0,content:!0}),p.forEach(t),z=l(s),v=n(s,"H1",{class:!0});var pi=a(v);M=n(pi,"A",{id:!0,class:!0,href:!0});var bh=a(M);x=n(bh,"SPAN",{});var vh=a(x);f(T.$$.fragment,vh),vh.forEach(t),bh.forEach(t),y=l(pi),B=n(pi,"SPAN",{});var yh=a(B);st=i(yh,"MBart and MBart-50"),yh.forEach(t),pi.forEach(t),Pe=l(s),F=n(s,"P",{});var In=a(F);We=n(In,"STRONG",{});var Th=a(We);be=i(Th,"DISCLAIMER:"),Th.forEach(t),rt=i(In," If you see something strange, file a "),ve=n(In,"A",{href:!0,rel:!0});var Mh=a(ve);ye=i(Mh,"Github Issue"),Mh.forEach(t),it=i(In,` and assign @patrickvonplaten`),In.forEach(t),Je=l(s),ee=n(s,"H2",{class:!0});var ui=a(ee);W=n(ui,"A",{id:!0,class:!0,href:!0});var wh=a(W);Qe=n(wh,"SPAN",{});var xh=a(Qe);f(ie.$$.fragment,xh),xh.forEach(t),wh.forEach(t),N=l(ui),D=n(ui,"SPAN",{});var zh=a(D);dt=i(zh,"Overview of MBart"),zh.forEach(t),ui.forEach(t),he=l(s),me=n(s,"P",{});var hi=a(me);lt=i(hi,"The MBart model was presented in "),te=n(hi,"A",{href:!0,rel:!0});var yx=a(te);ct=i(yx,"Multilingual Denoising Pre-training for Neural Machine Translation"),yx.forEach(t),pt=i(hi,` by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.`),hi.forEach(t),Q=l(s),Oe=n(s,"P",{});var Tx=a(Oe);Te=i(Tx,`According to the abstract, MBART is a sequence-to-sequence denoising auto-encoder pretrained on large-scale monolingual corpora in many languages using the BART objective. mBART is one of the first methods for pretraining a complete sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only on the encoder, decoder, or reconstructing parts of the text.`),Tx.forEach(t),Ze=l(s),de=n(s,"P",{});var Bh=a(de);Me=i(Bh,"This model was contributed by "),we=n(Bh,"A",{href:!0,rel:!0});var Mx=a(we);ut=i(Mx,"valhalla"),Mx.forEach(t),le=i(Bh,". The Authors\u2019 code can be found "),xe=n(Bh,"A",{href:!0,rel:!0});var wx=a(xe);ze=i(wx,"here"),wx.forEach(t),Bh.forEach(t),Ye=l(s),w=n(s,"H3",{class:!0});var Mm=a(w);$=n(Mm,"A",{id:!0,class:!0,href:!0});var xx=a($);Be=n(xx,"SPAN",{});var zx=a(Be);f(Xe.$$.fragment,zx),zx.forEach(t),xx.forEach(t),Wt=l(Mm),oe=n(Mm,"SPAN",{});var Bx=a(oe);Qt=i(Bx,"Training of MBart"),Bx.forEach(t),Mm.forEach(t),gt=l(s),Z=n(s,"P",{});var Et=a(Z);ce=i(Et,`MBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for translation task. As the model is multilingual it expects the sequences in a different format. A special language id token is added in both the source and target text. The source text format is `),pe=n(Et,"CODE",{});var qx=a(pe);Xt=i(qx,"X [eos, src_lang_code]"),qx.forEach(t),Kt=i(Et," where "),ue=n(Et,"CODE",{});var $x=a(ue);Ht=i($x,"X"),$x.forEach(t),Vt=i(Et,` is the source text. The target text format is `),Ke=n(Et,"CODE",{});var Fx=a(Ke);Jt=i(Fx,"[tgt_lang_code] X [eos]"),Fx.forEach(t),e_=i(Et,". "),$d=n(Et,"CODE",{});var Ex=a($d);t_=i(Ex,"bos"),Ex.forEach(t),o_=i(Et," is never used."),Et.forEach(t),qh=l(s),kt=n(s,"P",{});var Zi=a(kt);n_=i(Zi,"The regular "),Kn=n(Zi,"A",{href:!0});var bx=a(Kn);Fd=n(bx,"STRONG",{});var jx=a(Fd);a_=i(jx,"call"),jx.forEach(t),s_=i(bx,"()"),bx.forEach(t),r_=i(Zi,` will encode source text format, and it should be wrapped inside the context manager `),_i=n(Zi,"A",{href:!0});var Cx=a(_i);i_=i(Cx,"as_target_tokenizer()"),Cx.forEach(t),d_=i(Zi," to encode target text format."),Zi.forEach(t),$h=l(s),gi=n(s,"UL",{});var Px=a(gi);Ed=n(Px,"LI",{});var Ox=a(Ed);l_=i(Ox,"Supervised training"),Ox.forEach(t),Px.forEach(t),Fh=l(s),f(Hn.$$.fragment,s),Eh=l(s),ki=n(s,"UL",{});var Sx=a(ki);Vn=n(Sx,"LI",{});var wm=a(Vn);jd=n(wm,"P",{});var Ax=a(jd);c_=i(Ax,"Generation"),Ax.forEach(t),p_=l(wm),Zt=n(wm,"P",{});var Yi=a(Zt);u_=i(Yi,"While generating the target text set the "),Cd=n(Yi,"CODE",{});var Ix=a(Cd);h_=i(Ix,"decoder_start_token_id"),Ix.forEach(t),m_=i(Yi,` to the target language id. The following example shows how to translate English to Romanian using the `),Pd=n(Yi,"EM",{});var Nx=a(Pd);f_=i(Nx,"facebook/mbart-large-en-ro"),Nx.forEach(t),__=i(Yi," model."),Yi.forEach(t),wm.forEach(t),Sx.forEach(t),jh=l(s),f(Jn.$$.fragment,s),Ch=l(s),Yt=n(s,"H2",{class:!0});var xm=a(Yt);Ao=n(xm,"A",{id:!0,class:!0,href:!0});var Lx=a(Ao);Od=n(Lx,"SPAN",{});var Dx=a(Od);f(Zn.$$.fragment,Dx),Dx.forEach(t),Lx.forEach(t),g_=l(xm),Sd=n(xm,"SPAN",{});var Gx=a(Sd);k_=i(Gx,"Overview of MBart-50"),Gx.forEach(t),xm.forEach(t),Ph=l(s),bt=n(s,"P",{});var ed=a(bt);b_=i(ed,"MBart-50 was introduced in the "),bi=n(ed,"EM",{});var vx=a(bi);v_=i(vx,`Multilingual Translation with Extensible Multilingual Pretraining and Finetuning <`),Yn=n(vx,"A",{href:!0,rel:!0});var Ux=a(Yn);y_=i(Ux,"https://arxiv.org/abs/2008.00401>"),Ux.forEach(t),vx.forEach(t),T_=i(ed,` paper by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. MBart-50 is created using the original `),Ad=n(ed,"EM",{});var Rx=a(Ad);M_=i(Rx,"mbart-large-cc25"),Rx.forEach(t),w_=i(ed,` checkpoint by extendeding its embedding layers with randomly initialized vectors for an extra set of 25 language tokens and then pretrained on 50 languages.`),ed.forEach(t),Oh=l(s),vi=n(s,"P",{});var Wx=a(vi);x_=i(Wx,"According to the abstract"),Wx.forEach(t),Sh=l(s),yi=n(s,"P",{});var Qx=a(yi);Id=n(Qx,"EM",{});var Xx=a(Id);z_=i(Xx,`Multilingual translation models can be created through multilingual finetuning. Instead of finetuning on one direction, a pretrained model is finetuned on many directions at the same time. It demonstrates that pretrained models can be extended to incorporate additional languages without loss of performance. Multilingual finetuning improves on average 1 BLEU over the strongest baselines (being either multilingual from scratch or bilingual finetuning) while improving 9.3 BLEU on average over bilingual baselines from scratch.`),Xx.forEach(t),Qx.forEach(t),Ah=l(s),eo=n(s,"H3",{class:!0});var zm=a(eo);Io=n(zm,"A",{id:!0,class:!0,href:!0});var Kx=a(Io);Nd=n(Kx,"SPAN",{});var Hx=a(Nd);f(ea.$$.fragment,Hx),Hx.forEach(t),Kx.forEach(t),B_=l(zm),Ld=n(zm,"SPAN",{});var Vx=a(Ld);q_=i(Vx,"Training of MBart-50"),Vx.forEach(t),zm.forEach(t),Ih=l(s),et=n(s,"P",{});var Nn=a(et);$_=i(Nn,`The text format for MBart-50 is slightly different from mBART. For MBart-50 the language id token is used as a prefix for both source and target text i.e the text format is `),Dd=n(Nn,"CODE",{});var Jx=a(Dd);F_=i(Jx,"[lang_code] X [eos]"),Jx.forEach(t),E_=i(Nn,", where "),Gd=n(Nn,"CODE",{});var Zx=a(Gd);j_=i(Zx,"lang_code"),Zx.forEach(t),C_=i(Nn,` is source language id for source text and target language id for target text, with `),Ud=n(Nn,"CODE",{});var Yx=a(Ud);P_=i(Yx,"X"),Yx.forEach(t),O_=i(Nn,` being the source or target text respectively.`),Nn.forEach(t),Nh=l(s),No=n(s,"P",{});var Bm=a(No);S_=i(Bm,"MBart-50 has its own tokenizer "),Ti=n(Bm,"A",{href:!0});var e2=a(Ti);A_=i(e2,"MBart50Tokenizer"),e2.forEach(t),I_=i(Bm,"."),Bm.forEach(t),Lh=l(s),Mi=n(s,"UL",{});var t2=a(Mi);Rd=n(t2,"LI",{});var o2=a(Rd);N_=i(o2,"Supervised training"),o2.forEach(t),t2.forEach(t),Dh=l(s),f(ta.$$.fragment,s),Gh=l(s),wi=n(s,"UL",{});var n2=a(wi);oa=n(n2,"LI",{});var qm=a(oa);Wd=n(qm,"P",{});var a2=a(Wd);L_=i(a2,"Generation"),a2.forEach(t),D_=l(qm),qe=n(qm,"P",{});var nt=a(qe);G_=i(nt,"To generate using the mBART-50 multilingual translation models, "),Qd=n(nt,"CODE",{});var s2=a(Qd);U_=i(s2,"eos_token_id"),s2.forEach(t),R_=i(nt,` is used as the `),Xd=n(nt,"CODE",{});var r2=a(Xd);W_=i(r2,"decoder_start_token_id"),r2.forEach(t),Q_=i(nt,` and the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the `),Kd=n(nt,"EM",{});var i2=a(Kd);X_=i(i2,"forced_bos_token_id"),i2.forEach(t),K_=i(nt," parameter to the "),Hd=n(nt,"EM",{});var d2=a(Hd);H_=i(d2,"generate"),d2.forEach(t),V_=i(nt,` method. The following example shows how to translate between Hindi to French and Arabic to English using the `),Vd=n(nt,"EM",{});var l2=a(Vd);J_=i(l2,"facebook/mbart-50-large-many-to-many"),l2.forEach(t),Z_=i(nt," checkpoint."),nt.forEach(t),qm.forEach(t),n2.forEach(t),Uh=l(s),f(na.$$.fragment,s),Rh=l(s),to=n(s,"H2",{class:!0});var $m=a(to);Lo=n($m,"A",{id:!0,class:!0,href:!0});var c2=a(Lo);Jd=n(c2,"SPAN",{});var p2=a(Jd);f(aa.$$.fragment,p2),p2.forEach(t),c2.forEach(t),Y_=l($m),Zd=n($m,"SPAN",{});var u2=a(Zd);eg=i(u2,"MBartConfig"),u2.forEach(t),$m.forEach(t),Wh=l(s),$e=n(s,"DIV",{class:!0});var jt=a($e);f(sa.$$.fragment,jt),tg=l(jt),oo=n(jt,"P",{});var td=a(oo);og=i(td,"This is the configuration class to store the configuration of a "),xi=n(td,"A",{href:!0});var h2=a(xi);ng=i(h2,"MBartModel"),h2.forEach(t),ag=i(td,`. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART `),ra=n(td,"A",{href:!0,rel:!0});var m2=a(ra);sg=i(m2,"facebook/mbart-large-cc25"),m2.forEach(t),rg=i(td," architecture."),td.forEach(t),ig=l(jt),no=n(jt,"P",{});var od=a(no);dg=i(od,"Configuration objects inherit from "),zi=n(od,"A",{href:!0});var f2=a(zi);lg=i(f2,"PretrainedConfig"),f2.forEach(t),cg=i(od,` and can be used to control the model outputs. Read the documentation from `),Bi=n(od,"A",{href:!0});var _2=a(Bi);pg=i(_2,"PretrainedConfig"),_2.forEach(t),ug=i(od," for more information."),od.forEach(t),hg=l(jt),Yd=n(jt,"P",{});var g2=a(Yd);mg=i(g2,"Example:"),g2.forEach(t),fg=l(jt),f(ia.$$.fragment,jt),jt.forEach(t),Qh=l(s),ao=n(s,"H2",{class:!0});var Fm=a(ao);Do=n(Fm,"A",{id:!0,class:!0,href:!0});var k2=a(Do);el=n(k2,"SPAN",{});var b2=a(el);f(da.$$.fragment,b2),b2.forEach(t),k2.forEach(t),_g=l(Fm),tl=n(Fm,"SPAN",{});var v2=a(tl);gg=i(v2,"MBartTokenizer"),v2.forEach(t),Fm.forEach(t),Xh=l(s),K=n(s,"DIV",{class:!0});var fe=a(K);f(la.$$.fragment,fe),kg=l(fe),ol=n(fe,"P",{});var y2=a(ol);bg=i(y2,"Construct an MBART tokenizer."),y2.forEach(t),vg=l(fe),vt=n(fe,"P",{});var mi=a(vt);qi=n(mi,"A",{href:!0});var T2=a(qi);yg=i(T2,"MBartTokenizer"),T2.forEach(t),Tg=i(mi," is a subclass of "),$i=n(mi,"A",{href:!0});var M2=a($i);Mg=i(M2,"XLMRobertaTokenizer"),M2.forEach(t),wg=i(mi,`. Refer to superclass `),Fi=n(mi,"A",{href:!0});var w2=a(Fi);xg=i(w2,"XLMRobertaTokenizer"),w2.forEach(t),zg=i(mi,` for usage examples and documentation concerning the initialization parameters and other methods.`),mi.forEach(t),Bg=l(fe),ca=n(fe,"P",{});var Em=a(ca);qg=i(Em,"The tokenization method is "),nl=n(Em,"CODE",{});var x2=a(nl);$g=i(x2,"<tokens> <eos> <language code>"),x2.forEach(t),Fg=i(Em," for source language documents, and \u201C<language code>\n<tokens> <eos>``` for target language documents."),Em.forEach(t),Eg=l(fe),al=n(fe,"P",{});var z2=a(al);jg=i(z2,"Examples:"),z2.forEach(t),Cg=l(fe),f(pa.$$.fragment,fe),Pg=l(fe),Go=n(fe,"DIV",{class:!0});var jm=a(Go);f(ua.$$.fragment,jm),Og=l(jm),sl=n(jm,"P",{});var B2=a(sl);Sg=i(B2,`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),B2.forEach(t),jm.forEach(t),Ag=l(fe),tt=n(fe,"DIV",{class:!0});var Ln=a(tt);f(ha.$$.fragment,Ln),Ig=l(Ln),ma=n(Ln,"P",{});var Cm=a(ma);Ng=i(Cm,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `),rl=n(Cm,"CODE",{});var q2=a(rl);Lg=i(q2,"X"),q2.forEach(t),Dg=i(Cm," represents the sequence:"),Cm.forEach(t),Gg=l(Ln),fa=n(Ln,"UL",{});var Pm=a(fa);_a=n(Pm,"LI",{});var Om=a(_a);il=n(Om,"CODE",{});var $2=a(il);Ug=i($2,"input_ids"),$2.forEach(t),Rg=i(Om," (for encoder) "),dl=n(Om,"CODE",{});var F2=a(dl);Wg=i(F2,"X [eos, src_lang_code]"),F2.forEach(t),Om.forEach(t),Qg=l(Pm),ga=n(Pm,"LI",{});var Sm=a(ga);ll=n(Sm,"CODE",{});var E2=a(ll);Xg=i(E2,"decoder_input_ids"),E2.forEach(t),Kg=i(Sm,": (for decoder) "),cl=n(Sm,"CODE",{});var j2=a(cl);Hg=i(j2,"X [eos, tgt_lang_code]"),j2.forEach(t),Sm.forEach(t),Pm.forEach(t),Vg=l(Ln),pl=n(Ln,"P",{});var C2=a(pl);Jg=i(C2,`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),C2.forEach(t),Ln.forEach(t),fe.forEach(t),Kh=l(s),so=n(s,"H2",{class:!0});var Am=a(so);Uo=n(Am,"A",{id:!0,class:!0,href:!0});var P2=a(Uo);ul=n(P2,"SPAN",{});var O2=a(ul);f(ka.$$.fragment,O2),O2.forEach(t),P2.forEach(t),Zg=l(Am),hl=n(Am,"SPAN",{});var S2=a(hl);Yg=i(S2,"MBartTokenizerFast"),S2.forEach(t),Am.forEach(t),Hh=l(s),G=n(s,"DIV",{class:!0});var ae=a(G);f(ba.$$.fragment,ae),ek=l(ae),ro=n(ae,"P",{});var nd=a(ro);tk=i(nd,"Construct a \u201Cfast\u201D MBART tokenizer (backed by HuggingFace\u2019s "),ml=n(nd,"EM",{});var A2=a(ml);ok=i(A2,"tokenizers"),A2.forEach(t),nk=i(nd," library). Based on "),va=n(nd,"A",{href:!0,rel:!0});var I2=a(va);ak=i(I2,"BPE"),I2.forEach(t),sk=i(nd,"."),nd.forEach(t),rk=l(ae),yt=n(ae,"P",{});var fi=a(yt);Ei=n(fi,"A",{href:!0});var N2=a(Ei);ik=i(N2,"MBartTokenizerFast"),N2.forEach(t),dk=i(fi," is a subclass of "),ji=n(fi,"A",{href:!0});var L2=a(ji);lk=i(L2,"XLMRobertaTokenizerFast"),L2.forEach(t),ck=i(fi,`. Refer to superclass `),Ci=n(fi,"A",{href:!0});var D2=a(Ci);pk=i(D2,"XLMRobertaTokenizerFast"),D2.forEach(t),uk=i(fi,` for usage examples and documentation concerning the initialization parameters and other methods.`),fi.forEach(t),hk=l(ae),ya=n(ae,"P",{});var Im=a(ya);mk=i(Im,"The tokenization method is "),fl=n(Im,"CODE",{});var G2=a(fl);fk=i(G2,"<tokens> <eos> <language code>"),G2.forEach(t),_k=i(Im," for source language documents, and \u201C<language code>\n<tokens> <eos>``` for target language documents."),Im.forEach(t),gk=l(ae),_l=n(ae,"P",{});var U2=a(_l);kk=i(U2,"Examples:"),U2.forEach(t),bk=l(ae),f(Ta.$$.fragment,ae),vk=l(ae),Se=n(ae,"DIV",{class:!0});var Ct=a(Se);f(Ma.$$.fragment,Ct),yk=l(Ct),gl=n(Ct,"P",{});var R2=a(gl);Tk=i(R2,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang.`),R2.forEach(t),Mk=l(Ct),wa=n(Ct,"P",{});var Nm=a(wa);wk=i(Nm,"An MBART sequence has the following format, where "),kl=n(Nm,"CODE",{});var W2=a(kl);xk=i(W2,"X"),W2.forEach(t),zk=i(Nm," represents the sequence:"),Nm.forEach(t),Bk=l(Ct),xa=n(Ct,"UL",{});var Lm=a(xa);za=n(Lm,"LI",{});var Dm=a(za);bl=n(Dm,"CODE",{});var Q2=a(bl);qk=i(Q2,"input_ids"),Q2.forEach(t),$k=i(Dm," (for encoder) "),vl=n(Dm,"CODE",{});var X2=a(vl);Fk=i(X2,"X [eos, src_lang_code]"),X2.forEach(t),Dm.forEach(t),Ek=l(Lm),Ba=n(Lm,"LI",{});var Gm=a(Ba);yl=n(Gm,"CODE",{});var K2=a(yl);jk=i(K2,"decoder_input_ids"),K2.forEach(t),Ck=i(Gm,": (for decoder) "),Tl=n(Gm,"CODE",{});var H2=a(Tl);Pk=i(H2,"X [eos, tgt_lang_code]"),H2.forEach(t),Gm.forEach(t),Lm.forEach(t),Ok=l(Ct),Ml=n(Ct,"P",{});var V2=a(Ml);Sk=i(V2,`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),V2.forEach(t),Ct.forEach(t),Ak=l(ae),Ro=n(ae,"DIV",{class:!0});var Um=a(Ro);f(qa.$$.fragment,Um),Ik=l(Um),wl=n(Um,"P",{});var J2=a(wl);Nk=i(J2,"Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."),J2.forEach(t),Um.forEach(t),Lk=l(ae),Wo=n(ae,"DIV",{class:!0});var Rm=a(Wo);f($a.$$.fragment,Rm),Dk=l(Rm),xl=n(Rm,"P",{});var Z2=a(xl);Gk=i(Z2,"Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."),Z2.forEach(t),Rm.forEach(t),ae.forEach(t),Vh=l(s),io=n(s,"H2",{class:!0});var Wm=a(io);Qo=n(Wm,"A",{id:!0,class:!0,href:!0});var Y2=a(Qo);zl=n(Y2,"SPAN",{});var ez=a(zl);f(Fa.$$.fragment,ez),ez.forEach(t),Y2.forEach(t),Uk=l(Wm),Bl=n(Wm,"SPAN",{});var tz=a(Bl);Rk=i(tz,"MBart50Tokenizer"),tz.forEach(t),Wm.forEach(t),Jh=l(s),L=n(s,"DIV",{class:!0});var Y=a(L);f(Ea.$$.fragment,Y),Wk=l(Y),ja=n(Y,"P",{});var Qm=a(ja);Qk=i(Qm,"Construct a MBart50 tokenizer. Based on "),Ca=n(Qm,"A",{href:!0,rel:!0});var oz=a(Ca);Xk=i(oz,"SentencePiece"),oz.forEach(t),Kk=i(Qm,"."),Qm.forEach(t),Hk=l(Y),Pa=n(Y,"P",{});var Xm=a(Pa);Vk=i(Xm,"This tokenizer inherits from "),Pi=n(Xm,"A",{href:!0});var nz=a(Pi);Jk=i(nz,"PreTrainedTokenizer"),nz.forEach(t),Zk=i(Xm,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Xm.forEach(t),Yk=l(Y),ql=n(Y,"P",{});var az=a(ql);eb=i(az,"Examples:"),az.forEach(t),tb=l(Y),f(Oa.$$.fragment,Y),ob=l(Y),ot=n(Y,"DIV",{class:!0});var Dn=a(ot);f(Sa.$$.fragment,Dn),nb=l(Dn),Aa=n(Dn,"P",{});var Km=a(Aa);ab=i(Km,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART-50 sequence has the following format, where `),$l=n(Km,"CODE",{});var sz=a($l);sb=i(sz,"X"),sz.forEach(t),rb=i(Km," represents the sequence:"),Km.forEach(t),ib=l(Dn),Ia=n(Dn,"UL",{});var Hm=a(Ia);Na=n(Hm,"LI",{});var Vm=a(Na);Fl=n(Vm,"CODE",{});var rz=a(Fl);db=i(rz,"input_ids"),rz.forEach(t),lb=i(Vm," (for encoder) "),El=n(Vm,"CODE",{});var iz=a(El);cb=i(iz,"[src_lang_code] X [eos]"),iz.forEach(t),Vm.forEach(t),pb=l(Hm),La=n(Hm,"LI",{});var Jm=a(La);jl=n(Jm,"CODE",{});var dz=a(jl);ub=i(dz,"labels"),dz.forEach(t),hb=i(Jm,": (for decoder) "),Cl=n(Jm,"CODE",{});var lz=a(Cl);mb=i(lz,"[tgt_lang_code] X [eos]"),lz.forEach(t),Jm.forEach(t),Hm.forEach(t),fb=l(Dn),Pl=n(Dn,"P",{});var cz=a(Pl);_b=i(cz,`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),cz.forEach(t),Dn.forEach(t),gb=l(Y),Xo=n(Y,"DIV",{class:!0});var Zm=a(Xo);f(Da.$$.fragment,Zm),kb=l(Zm),Ol=n(Zm,"P",{});var pz=a(Ol);bb=i(pz,"Converts a sequence of tokens (strings for sub-words) in a single string."),pz.forEach(t),Zm.forEach(t),vb=l(Y),Ko=n(Y,"DIV",{class:!0});var Ym=a(Ko);f(Ga.$$.fragment,Ym),yb=l(Ym),Ua=n(Ym,"P",{});var ef=a(Ua);Tb=i(ef,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Sl=n(ef,"CODE",{});var uz=a(Sl);Mb=i(uz,"prepare_for_model"),uz.forEach(t),wb=i(ef," method."),ef.forEach(t),Ym.forEach(t),xb=l(Y),Ho=n(Y,"DIV",{class:!0});var tf=a(Ho);f(Ra.$$.fragment,tf),zb=l(tf),Al=n(tf,"P",{});var hz=a(Al);Bb=i(hz,"Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."),hz.forEach(t),tf.forEach(t),qb=l(Y),Vo=n(Y,"DIV",{class:!0});var of=a(Vo);f(Wa.$$.fragment,of),$b=l(of),Il=n(of,"P",{});var mz=a(Il);Fb=i(mz,"Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."),mz.forEach(t),of.forEach(t),Y.forEach(t),Zh=l(s),lo=n(s,"H2",{class:!0});var nf=a(lo);Jo=n(nf,"A",{id:!0,class:!0,href:!0});var fz=a(Jo);Nl=n(fz,"SPAN",{});var _z=a(Nl);f(Qa.$$.fragment,_z),_z.forEach(t),fz.forEach(t),Eb=l(nf),Ll=n(nf,"SPAN",{});var gz=a(Ll);jb=i(gz,"MBart50TokenizerFast"),gz.forEach(t),nf.forEach(t),Yh=l(s),H=n(s,"DIV",{class:!0});var _e=a(H);f(Xa.$$.fragment,_e),Cb=l(_e),co=n(_e,"P",{});var ad=a(co);Pb=i(ad,"Construct a \u201Cfast\u201D MBART tokenizer for mBART-50 (backed by HuggingFace\u2019s "),Dl=n(ad,"EM",{});var kz=a(Dl);Ob=i(kz,"tokenizers"),kz.forEach(t),Sb=i(ad," library). Based on "),Ka=n(ad,"A",{href:!0,rel:!0});var bz=a(Ka);Ab=i(bz,"BPE"),bz.forEach(t),Ib=i(ad,"."),ad.forEach(t),Nb=l(_e),Ha=n(_e,"P",{});var af=a(Ha);Lb=i(af,"This tokenizer inherits from "),Oi=n(af,"A",{href:!0});var vz=a(Oi);Db=i(vz,"PreTrainedTokenizerFast"),vz.forEach(t),Gb=i(af,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),af.forEach(t),Ub=l(_e),Gl=n(_e,"P",{});var yz=a(Gl);Rb=i(yz,"Examples:"),yz.forEach(t),Wb=l(_e),f(Va.$$.fragment,_e),Qb=l(_e),Ae=n(_e,"DIV",{class:!0});var Pt=a(Ae);f(Ja.$$.fragment,Pt),Xb=l(Pt),Ul=n(Pt,"P",{});var Tz=a(Ul);Kb=i(Tz,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang.`),Tz.forEach(t),Hb=l(Pt),Za=n(Pt,"P",{});var sf=a(Za);Vb=i(sf,"An MBART-50 sequence has the following format, where "),Rl=n(sf,"CODE",{});var Mz=a(Rl);Jb=i(Mz,"X"),Mz.forEach(t),Zb=i(sf," represents the sequence:"),sf.forEach(t),Yb=l(Pt),Ya=n(Pt,"UL",{});var rf=a(Ya);es=n(rf,"LI",{});var df=a(es);Wl=n(df,"CODE",{});var wz=a(Wl);ev=i(wz,"input_ids"),wz.forEach(t),tv=i(df," (for encoder) "),Ql=n(df,"CODE",{});var xz=a(Ql);ov=i(xz,"[src_lang_code] X [eos]"),xz.forEach(t),df.forEach(t),nv=l(rf),ts=n(rf,"LI",{});var lf=a(ts);Xl=n(lf,"CODE",{});var zz=a(Xl);av=i(zz,"labels"),zz.forEach(t),sv=i(lf,": (for decoder) "),Kl=n(lf,"CODE",{});var Bz=a(Kl);rv=i(Bz,"[tgt_lang_code] X [eos]"),Bz.forEach(t),lf.forEach(t),rf.forEach(t),iv=l(Pt),Hl=n(Pt,"P",{});var qz=a(Hl);dv=i(qz,`BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.`),qz.forEach(t),Pt.forEach(t),lv=l(_e),Zo=n(_e,"DIV",{class:!0});var cf=a(Zo);f(os.$$.fragment,cf),cv=l(cf),Vl=n(cf,"P",{});var $z=a(Vl);pv=i($z,"Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."),$z.forEach(t),cf.forEach(t),uv=l(_e),Yo=n(_e,"DIV",{class:!0});var pf=a(Yo);f(ns.$$.fragment,pf),hv=l(pf),Jl=n(pf,"P",{});var Fz=a(Jl);mv=i(Fz,"Reset the special tokens to the target language setting. prefix=[src_lang_code] and suffix=[eos]."),Fz.forEach(t),pf.forEach(t),_e.forEach(t),em=l(s),po=n(s,"H2",{class:!0});var uf=a(po);en=n(uf,"A",{id:!0,class:!0,href:!0});var Ez=a(en);Zl=n(Ez,"SPAN",{});var jz=a(Zl);f(as.$$.fragment,jz),jz.forEach(t),Ez.forEach(t),fv=l(uf),Yl=n(uf,"SPAN",{});var Cz=a(Yl);_v=i(Cz,"MBartModel"),Cz.forEach(t),uf.forEach(t),tm=l(s),He=n(s,"DIV",{class:!0});var Gn=a(He);f(ss.$$.fragment,Gn),gv=l(Gn),rs=n(Gn,"P",{});var hf=a(rs);kv=i(hf,`The bare MBART Model outputting raw hidden-states without any specific head on top. This model inherits from `),Si=n(hf,"A",{href:!0});var Pz=a(Si);bv=i(Pz,"PreTrainedModel"),Pz.forEach(t),vv=i(hf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hf.forEach(t),yv=l(Gn),is=n(Gn,"P",{});var mf=a(is);Tv=i(mf,"This model is also a PyTorch "),ds=n(mf,"A",{href:!0,rel:!0});var Oz=a(ds);Mv=i(Oz,"torch.nn.Module"),Oz.forEach(t),wv=i(mf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mf.forEach(t),xv=l(Gn),Ie=n(Gn,"DIV",{class:!0});var Ot=a(Ie);f(ls.$$.fragment,Ot),zv=l(Ot),uo=n(Ot,"P",{});var sd=a(uo);Bv=i(sd,"The "),Ai=n(sd,"A",{href:!0});var Sz=a(Ai);qv=i(Sz,"MBartModel"),Sz.forEach(t),$v=i(sd," forward method, overrides the "),ec=n(sd,"CODE",{});var Az=a(ec);Fv=i(Az,"__call__"),Az.forEach(t),Ev=i(sd," special method."),sd.forEach(t),jv=l(Ot),f(tn.$$.fragment,Ot),Cv=l(Ot),tc=n(Ot,"P",{});var Iz=a(tc);Pv=i(Iz,"Example:"),Iz.forEach(t),Ov=l(Ot),f(cs.$$.fragment,Ot),Ot.forEach(t),Gn.forEach(t),om=l(s),ho=n(s,"H2",{class:!0});var ff=a(ho);on=n(ff,"A",{id:!0,class:!0,href:!0});var Nz=a(on);oc=n(Nz,"SPAN",{});var Lz=a(oc);f(ps.$$.fragment,Lz),Lz.forEach(t),Nz.forEach(t),Sv=l(ff),nc=n(ff,"SPAN",{});var Dz=a(nc);Av=i(Dz,"MBartForConditionalGeneration"),Dz.forEach(t),ff.forEach(t),nm=l(s),Ve=n(s,"DIV",{class:!0});var Un=a(Ve);f(us.$$.fragment,Un),Iv=l(Un),hs=n(Un,"P",{});var _f=a(hs);Nv=i(_f,`The MBART Model with a language modeling head. Can be used for summarization. This model inherits from `),Ii=n(_f,"A",{href:!0});var Gz=a(Ii);Lv=i(Gz,"PreTrainedModel"),Gz.forEach(t),Dv=i(_f,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_f.forEach(t),Gv=l(Un),ms=n(Un,"P",{});var gf=a(ms);Uv=i(gf,"This model is also a PyTorch "),fs=n(gf,"A",{href:!0,rel:!0});var Uz=a(fs);Rv=i(Uz,"torch.nn.Module"),Uz.forEach(t),Wv=i(gf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gf.forEach(t),Qv=l(Un),E=n(Un,"DIV",{class:!0});var S=a(E);f(_s.$$.fragment,S),Xv=l(S),mo=n(S,"P",{});var rd=a(mo);Kv=i(rd,"The "),Ni=n(rd,"A",{href:!0});var Rz=a(Ni);Hv=i(Rz,"MBartForConditionalGeneration"),Rz.forEach(t),Vv=i(rd," forward method, overrides the "),ac=n(rd,"CODE",{});var Wz=a(ac);Jv=i(Wz,"__call__"),Wz.forEach(t),Zv=i(rd," special method."),rd.forEach(t),Yv=l(S),f(nn.$$.fragment,S),ey=l(S),sc=n(S,"P",{});var Qz=a(sc);ty=i(Qz,"Summarization example::"),Qz.forEach(t),oy=l(S),rc=n(S,"BLOCKQUOTE",{});var Xz=a(rc);ic=n(Xz,"BLOCKQUOTE",{});var Kz=a(ic);dc=n(Kz,"BLOCKQUOTE",{});var Hz=a(dc);lc=n(Hz,"P",{});var Vz=a(lc);ny=i(Vz,"from transformers import MBartTokenizer, MBartForConditionalGeneration, MBartConfig"),Vz.forEach(t),Hz.forEach(t),Kz.forEach(t),Xz.forEach(t),ay=l(S),cc=n(S,"BLOCKQUOTE",{});var Jz=a(cc);pc=n(Jz,"BLOCKQUOTE",{});var Zz=a(pc);uc=n(Zz,"BLOCKQUOTE",{});var Yz=a(uc);hc=n(Yz,"P",{});var eB=a(hc);sy=i(eB,`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),eB.forEach(t),Yz.forEach(t),Zz.forEach(t),Jz.forEach(t),ry=l(S),mc=n(S,"BLOCKQUOTE",{});var tB=a(mc);fc=n(tB,"BLOCKQUOTE",{});var oB=a(fc);_c=n(oB,"BLOCKQUOTE",{});var nB=a(_c);gc=n(nB,"P",{});var aB=a(gc);iy=i(aB,`ARTICLE_TO_SUMMARIZE = \u201CMeine Freunde sind cool, aber sie essen zu viel Kuchen.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018pt\u2019)`),aB.forEach(t),nB.forEach(t),oB.forEach(t),tB.forEach(t),dy=l(S),kc=n(S,"BLOCKQUOTE",{});var sB=a(kc);bc=n(sB,"BLOCKQUOTE",{});var rB=a(bc);gs=n(rB,"BLOCKQUOTE",{});var kf=a(gs);an=n(kf,"H1",{class:!0});var bf=a(an);sn=n(bf,"A",{id:!0,class:!0,href:!0});var iB=a(sn);vc=n(iB,"SPAN",{});var dB=a(vc);f(ks.$$.fragment,dB),dB.forEach(t),iB.forEach(t),ly=l(bf),yc=n(bf,"SPAN",{});var lB=a(yc);cy=i(lB,"Generate Summary"),lB.forEach(t),bf.forEach(t),py=l(kf),Tc=n(kf,"P",{});var cB=a(Tc);uy=i(cB,`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),cB.forEach(t),kf.forEach(t),rB.forEach(t),sB.forEach(t),hy=l(S),Mc=n(S,"P",{});var pB=a(Mc);my=i(pB,"Mask filling example::"),pB.forEach(t),fy=l(S),wc=n(S,"BLOCKQUOTE",{});var uB=a(wc);xc=n(uB,"BLOCKQUOTE",{});var hB=a(xc);fo=n(hB,"BLOCKQUOTE",{});var id=a(fo);zc=n(id,"P",{});var mB=a(zc);_y=i(mB,`from transformers import MBartTokenizer, MBartForConditionalGeneration tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),mB.forEach(t),gy=l(id),rn=n(id,"H1",{class:!0});var vf=a(rn);dn=n(vf,"A",{id:!0,class:!0,href:!0});var fB=a(dn);Bc=n(fB,"SPAN",{});var _B=a(Bc);f(bs.$$.fragment,_B),_B.forEach(t),fB.forEach(t),ky=l(vf),qc=n(vf,"SPAN",{});var gB=a(qc);by=i(gB,"de_DE is the language symbol id <LID> for German"),gB.forEach(t),vf.forEach(t),vy=l(id),$c=n(id,"P",{});var kB=a($c);yy=i(kB,"TXT = \u201D</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE\u201D"),kB.forEach(t),id.forEach(t),hB.forEach(t),uB.forEach(t),Ty=l(S),Fc=n(S,"BLOCKQUOTE",{});var bB=a(Fc);Ec=n(bB,"BLOCKQUOTE",{});var vB=a(Ec);jc=n(vB,"BLOCKQUOTE",{});var yB=a(jc);Cc=n(yB,"P",{});var TB=a(Cc);My=i(TB,`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors=\u2018pt\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),TB.forEach(t),yB.forEach(t),vB.forEach(t),bB.forEach(t),wy=l(S),Pc=n(S,"BLOCKQUOTE",{});var MB=a(Pc);Oc=n(MB,"BLOCKQUOTE",{});var wB=a(Oc);Sc=n(wB,"BLOCKQUOTE",{});var xB=a(Sc);Ac=n(xB,"P",{});var zB=a(Ac);xy=i(zB,`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5)`),zB.forEach(t),xB.forEach(t),wB.forEach(t),MB.forEach(t),zy=l(S),Ic=n(S,"BLOCKQUOTE",{});var BB=a(Ic);Nc=n(BB,"BLOCKQUOTE",{});var qB=a(Nc);Lc=n(qB,"BLOCKQUOTE",{});var $B=a(Lc);Dc=n($B,"P",{});var FB=a(Dc);By=i(FB,"tokenizer.decode(predictions).split()"),FB.forEach(t),$B.forEach(t),qB.forEach(t),BB.forEach(t),S.forEach(t),Un.forEach(t),am=l(s),_o=n(s,"H2",{class:!0});var yf=a(_o);ln=n(yf,"A",{id:!0,class:!0,href:!0});var EB=a(ln);Gc=n(EB,"SPAN",{});var jB=a(Gc);f(vs.$$.fragment,jB),jB.forEach(t),EB.forEach(t),qy=l(yf),Uc=n(yf,"SPAN",{});var CB=a(Uc);$y=i(CB,"MBartForQuestionAnswering"),CB.forEach(t),yf.forEach(t),sm=l(s),Fe=n(s,"DIV",{class:!0});var St=a(Fe);f(ys.$$.fragment,St),Fy=l(St),go=n(St,"P",{});var dd=a(go);Ey=i(dd,`MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),Rc=n(dd,"CODE",{});var PB=a(Rc);jy=i(PB,"span start logits"),PB.forEach(t),Cy=i(dd," and "),Wc=n(dd,"CODE",{});var OB=a(Wc);Py=i(OB,"span end logits"),OB.forEach(t),Oy=i(dd,")."),dd.forEach(t),Sy=l(St),Ts=n(St,"P",{});var Tf=a(Ts);Ay=i(Tf,"This model inherits from "),Li=n(Tf,"A",{href:!0});var SB=a(Li);Iy=i(SB,"PreTrainedModel"),SB.forEach(t),Ny=i(Tf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tf.forEach(t),Ly=l(St),Ms=n(St,"P",{});var Mf=a(Ms);Dy=i(Mf,"This model is also a PyTorch "),ws=n(Mf,"A",{href:!0,rel:!0});var AB=a(ws);Gy=i(AB,"torch.nn.Module"),AB.forEach(t),Uy=i(Mf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Mf.forEach(t),Ry=l(St),Ne=n(St,"DIV",{class:!0});var At=a(Ne);f(xs.$$.fragment,At),Wy=l(At),ko=n(At,"P",{});var ld=a(ko);Qy=i(ld,"The "),Di=n(ld,"A",{href:!0});var IB=a(Di);Xy=i(IB,"MBartForQuestionAnswering"),IB.forEach(t),Ky=i(ld," forward method, overrides the "),Qc=n(ld,"CODE",{});var NB=a(Qc);Hy=i(NB,"__call__"),NB.forEach(t),Vy=i(ld," special method."),ld.forEach(t),Jy=l(At),f(cn.$$.fragment,At),Zy=l(At),Xc=n(At,"P",{});var LB=a(Xc);Yy=i(LB,"Example:"),LB.forEach(t),eT=l(At),f(zs.$$.fragment,At),At.forEach(t),St.forEach(t),rm=l(s),bo=n(s,"H2",{class:!0});var wf=a(bo);pn=n(wf,"A",{id:!0,class:!0,href:!0});var DB=a(pn);Kc=n(DB,"SPAN",{});var GB=a(Kc);f(Bs.$$.fragment,GB),GB.forEach(t),DB.forEach(t),tT=l(wf),Hc=n(wf,"SPAN",{});var UB=a(Hc);oT=i(UB,"MBartForSequenceClassification"),UB.forEach(t),wf.forEach(t),im=l(s),Ee=n(s,"DIV",{class:!0});var It=a(Ee);f(qs.$$.fragment,It),nT=l(It),Vc=n(It,"P",{});var RB=a(Vc);aT=i(RB,`MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),RB.forEach(t),sT=l(It),$s=n(It,"P",{});var xf=a($s);rT=i(xf,"This model inherits from "),Gi=n(xf,"A",{href:!0});var WB=a(Gi);iT=i(WB,"PreTrainedModel"),WB.forEach(t),dT=i(xf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xf.forEach(t),lT=l(It),Fs=n(It,"P",{});var zf=a(Fs);cT=i(zf,"This model is also a PyTorch "),Es=n(zf,"A",{href:!0,rel:!0});var QB=a(Es);pT=i(QB,"torch.nn.Module"),QB.forEach(t),uT=i(zf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zf.forEach(t),hT=l(It),ne=n(It,"DIV",{class:!0});var Re=a(ne);f(js.$$.fragment,Re),mT=l(Re),vo=n(Re,"P",{});var cd=a(vo);fT=i(cd,"The "),Ui=n(cd,"A",{href:!0});var XB=a(Ui);_T=i(XB,"MBartForSequenceClassification"),XB.forEach(t),gT=i(cd," forward method, overrides the "),Jc=n(cd,"CODE",{});var KB=a(Jc);kT=i(KB,"__call__"),KB.forEach(t),bT=i(cd," special method."),cd.forEach(t),vT=l(Re),f(un.$$.fragment,Re),yT=l(Re),Zc=n(Re,"P",{});var HB=a(Zc);TT=i(HB,"Example of single-label classification:"),HB.forEach(t),MT=l(Re),f(Cs.$$.fragment,Re),wT=l(Re),Yc=n(Re,"P",{});var VB=a(Yc);xT=i(VB,"Example of multi-label classification:"),VB.forEach(t),zT=l(Re),f(Ps.$$.fragment,Re),Re.forEach(t),It.forEach(t),dm=l(s),yo=n(s,"H2",{class:!0});var Bf=a(yo);hn=n(Bf,"A",{id:!0,class:!0,href:!0});var JB=a(hn);ep=n(JB,"SPAN",{});var ZB=a(ep);f(Os.$$.fragment,ZB),ZB.forEach(t),JB.forEach(t),BT=l(Bf),tp=n(Bf,"SPAN",{});var YB=a(tp);qT=i(YB,"MBartForCausalLM"),YB.forEach(t),Bf.forEach(t),lm=l(s),Ss=n(s,"DIV",{class:!0});var e4=a(Ss);Tt=n(e4,"DIV",{class:!0});var pd=a(Tt);f(As.$$.fragment,pd),$T=l(pd),op=n(pd,"P",{});var t4=a(op);FT=i(t4,"Example:"),t4.forEach(t),ET=l(pd),f(Is.$$.fragment,pd),pd.forEach(t),e4.forEach(t),cm=l(s),To=n(s,"H2",{class:!0});var qf=a(To);mn=n(qf,"A",{id:!0,class:!0,href:!0});var o4=a(mn);np=n(o4,"SPAN",{});var n4=a(np);f(Ns.$$.fragment,n4),n4.forEach(t),o4.forEach(t),jT=l(qf),ap=n(qf,"SPAN",{});var a4=a(ap);CT=i(a4,"TFMBartModel"),a4.forEach(t),qf.forEach(t),pm=l(s),je=n(s,"DIV",{class:!0});var Nt=a(je);f(Ls.$$.fragment,Nt),PT=l(Nt),Ds=n(Nt,"P",{});var $f=a(Ds);OT=i($f,`The bare MBART Model outputting raw hidden-states without any specific head on top. This model inherits from `),Ri=n($f,"A",{href:!0});var s4=a(Ri);ST=i(s4,"TFPreTrainedModel"),s4.forEach(t),AT=i($f,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$f.forEach(t),IT=l(Nt),Gs=n(Nt,"P",{});var Ff=a(Gs);NT=i(Ff,"This model is also a "),Us=n(Ff,"A",{href:!0,rel:!0});var r4=a(Us);LT=i(r4,"tf.keras.Model"),r4.forEach(t),DT=i(Ff,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ff.forEach(t),GT=l(Nt),f(fn.$$.fragment,Nt),UT=l(Nt),Le=n(Nt,"DIV",{class:!0});var Lt=a(Le);f(Rs.$$.fragment,Lt),RT=l(Lt),Mo=n(Lt,"P",{});var ud=a(Mo);WT=i(ud,"The "),Wi=n(ud,"A",{href:!0});var i4=a(Wi);QT=i(i4,"TFMBartModel"),i4.forEach(t),XT=i(ud," forward method, overrides the "),sp=n(ud,"CODE",{});var d4=a(sp);KT=i(d4,"__call__"),d4.forEach(t),HT=i(ud," special method."),ud.forEach(t),VT=l(Lt),f(_n.$$.fragment,Lt),JT=l(Lt),rp=n(Lt,"P",{});var l4=a(rp);ZT=i(l4,"Example:"),l4.forEach(t),YT=l(Lt),f(Ws.$$.fragment,Lt),Lt.forEach(t),Nt.forEach(t),um=l(s),wo=n(s,"H2",{class:!0});var Ef=a(wo);gn=n(Ef,"A",{id:!0,class:!0,href:!0});var c4=a(gn);ip=n(c4,"SPAN",{});var p4=a(ip);f(Qs.$$.fragment,p4),p4.forEach(t),c4.forEach(t),e1=l(Ef),dp=n(Ef,"SPAN",{});var u4=a(dp);t1=i(u4,"TFMBartForConditionalGeneration"),u4.forEach(t),Ef.forEach(t),hm=l(s),Ce=n(s,"DIV",{class:!0});var Dt=a(Ce);f(Xs.$$.fragment,Dt),o1=l(Dt),Ks=n(Dt,"P",{});var jf=a(Ks);n1=i(jf,`The MBART Model with a language modeling head. Can be used for summarization. This model inherits from `),Qi=n(jf,"A",{href:!0});var h4=a(Qi);a1=i(h4,"TFPreTrainedModel"),h4.forEach(t),s1=i(jf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jf.forEach(t),r1=l(Dt),Hs=n(Dt,"P",{});var Cf=a(Hs);i1=i(Cf,"This model is also a "),Vs=n(Cf,"A",{href:!0,rel:!0});var m4=a(Vs);d1=i(m4,"tf.keras.Model"),m4.forEach(t),l1=i(Cf,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cf.forEach(t),c1=l(Dt),f(kn.$$.fragment,Dt),p1=l(Dt),O=n(Dt,"DIV",{class:!0});var X=a(O);f(Js.$$.fragment,X),u1=l(X),xo=n(X,"P",{});var hd=a(xo);h1=i(hd,"The "),Xi=n(hd,"A",{href:!0});var f4=a(Xi);m1=i(f4,"TFMBartForConditionalGeneration"),f4.forEach(t),f1=i(hd," forward method, overrides the "),lp=n(hd,"CODE",{});var _4=a(lp);_1=i(_4,"__call__"),_4.forEach(t),g1=i(hd," special method."),hd.forEach(t),k1=l(X),f(bn.$$.fragment,X),b1=l(X),cp=n(X,"P",{});var g4=a(cp);v1=i(g4,"Summarization example::"),g4.forEach(t),y1=l(X),pp=n(X,"BLOCKQUOTE",{});var k4=a(pp);up=n(k4,"BLOCKQUOTE",{});var b4=a(up);hp=n(b4,"BLOCKQUOTE",{});var v4=a(hp);mp=n(v4,"P",{});var y4=a(mp);T1=i(y4,"from transformers import MBartTokenizer, TFMBartForConditionalGeneration, MBartConfig"),y4.forEach(t),v4.forEach(t),b4.forEach(t),k4.forEach(t),M1=l(X),fp=n(X,"BLOCKQUOTE",{});var T4=a(fp);_p=n(T4,"BLOCKQUOTE",{});var M4=a(_p);gp=n(M4,"BLOCKQUOTE",{});var w4=a(gp);kp=n(w4,"P",{});var x4=a(kp);w1=i(x4,`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),x4.forEach(t),w4.forEach(t),M4.forEach(t),T4.forEach(t),x1=l(X),bp=n(X,"BLOCKQUOTE",{});var z4=a(bp);vp=n(z4,"BLOCKQUOTE",{});var B4=a(vp);yp=n(B4,"BLOCKQUOTE",{});var q4=a(yp);Tp=n(q4,"P",{});var $4=a(Tp);z1=i($4,`ARTICLE_TO_SUMMARIZE = \u201CMeine Freunde sind cool, aber sie essen zu viel Kuchen.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018tf\u2019)`),$4.forEach(t),q4.forEach(t),B4.forEach(t),z4.forEach(t),B1=l(X),Mp=n(X,"BLOCKQUOTE",{});var F4=a(Mp);wp=n(F4,"BLOCKQUOTE",{});var E4=a(wp);Zs=n(E4,"BLOCKQUOTE",{});var Pf=a(Zs);vn=n(Pf,"H1",{class:!0});var Of=a(vn);yn=n(Of,"A",{id:!0,class:!0,href:!0});var j4=a(yn);xp=n(j4,"SPAN",{});var C4=a(xp);f(Ys.$$.fragment,C4),C4.forEach(t),j4.forEach(t),q1=l(Of),zp=n(Of,"SPAN",{});var P4=a(zp);$1=i(P4,"Generate Summary"),P4.forEach(t),Of.forEach(t),F1=l(Pf),Bp=n(Pf,"P",{});var O4=a(Bp);E1=i(O4,`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),O4.forEach(t),Pf.forEach(t),E4.forEach(t),F4.forEach(t),j1=l(X),qp=n(X,"P",{});var S4=a(qp);C1=i(S4,"Mask filling example::"),S4.forEach(t),P1=l(X),$p=n(X,"BLOCKQUOTE",{});var A4=a($p);Fp=n(A4,"BLOCKQUOTE",{});var I4=a(Fp);zo=n(I4,"BLOCKQUOTE",{});var md=a(zo);Ep=n(md,"P",{});var N4=a(Ep);O1=i(N4,`from transformers import MBartTokenizer, TFMBartForConditionalGeneration tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),N4.forEach(t),S1=l(md),Tn=n(md,"H1",{class:!0});var Sf=a(Tn);Mn=n(Sf,"A",{id:!0,class:!0,href:!0});var L4=a(Mn);jp=n(L4,"SPAN",{});var D4=a(jp);f(er.$$.fragment,D4),D4.forEach(t),L4.forEach(t),A1=l(Sf),Cp=n(Sf,"SPAN",{});var G4=a(Cp);I1=i(G4,"de_DE is the language symbol id <LID> for German"),G4.forEach(t),Sf.forEach(t),N1=l(md),Pp=n(md,"P",{});var U4=a(Pp);L1=i(U4,"TXT = \u201D</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE\u201D"),U4.forEach(t),md.forEach(t),I4.forEach(t),A4.forEach(t),D1=l(X),Op=n(X,"BLOCKQUOTE",{});var R4=a(Op);Sp=n(R4,"BLOCKQUOTE",{});var W4=a(Sp);tr=n(W4,"BLOCKQUOTE",{});var Af=a(tr);Ap=n(Af,"P",{});var Q4=a(Ap);G1=i(Q4,`model = MBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors=\u2018tf\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits probs = tf.nn.softmax(logits[0])`),Q4.forEach(t),U1=l(Af),wn=n(Af,"H1",{class:!0});var If=a(wn);xn=n(If,"A",{id:!0,class:!0,href:!0});var X4=a(xn);Ip=n(X4,"SPAN",{});var K4=a(Ip);f(or.$$.fragment,K4),K4.forEach(t),X4.forEach(t),R1=l(If),Np=n(If,"SPAN",{});var H4=a(Np);W1=i(H4,"probs[5] is associated with the mask token"),H4.forEach(t),If.forEach(t),Af.forEach(t),W4.forEach(t),R4.forEach(t),X.forEach(t),Dt.forEach(t),mm=l(s),Bo=n(s,"H2",{class:!0});var Nf=a(Bo);zn=n(Nf,"A",{id:!0,class:!0,href:!0});var V4=a(zn);Lp=n(V4,"SPAN",{});var J4=a(Lp);f(nr.$$.fragment,J4),J4.forEach(t),V4.forEach(t),Q1=l(Nf),Dp=n(Nf,"SPAN",{});var Z4=a(Dp);X1=i(Z4,"FlaxMBartModel"),Z4.forEach(t),Nf.forEach(t),fm=l(s),V=n(s,"DIV",{class:!0});var ge=a(V);f(ar.$$.fragment,ge),K1=l(ge),sr=n(ge,"P",{});var Lf=a(sr);H1=i(Lf,`The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Ki=n(Lf,"A",{href:!0});var Y4=a(Ki);V1=i(Y4,"FlaxPreTrainedModel"),Y4.forEach(t),J1=i(Lf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lf.forEach(t),Z1=l(ge),rr=n(ge,"P",{});var Df=a(rr);Y1=i(Df,"This model is also a Flax Linen "),ir=n(Df,"A",{href:!0,rel:!0});var eq=a(ir);eM=i(eq,"flax.nn.Module"),eq.forEach(t),tM=i(Df,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Df.forEach(t),oM=l(ge),Gp=n(ge,"P",{});var tq=a(Gp);nM=i(tq,"Finally, this model supports inherent JAX features such as:"),tq.forEach(t),aM=l(ge),ht=n(ge,"UL",{});var Rn=a(ht);Up=n(Rn,"LI",{});var oq=a(Up);dr=n(oq,"A",{href:!0,rel:!0});var nq=a(dr);sM=i(nq,"Just-In-Time (JIT) compilation"),nq.forEach(t),oq.forEach(t),rM=l(Rn),Rp=n(Rn,"LI",{});var aq=a(Rp);lr=n(aq,"A",{href:!0,rel:!0});var sq=a(lr);iM=i(sq,"Automatic Differentiation"),sq.forEach(t),aq.forEach(t),dM=l(Rn),Wp=n(Rn,"LI",{});var rq=a(Wp);cr=n(rq,"A",{href:!0,rel:!0});var iq=a(cr);lM=i(iq,"Vectorization"),iq.forEach(t),rq.forEach(t),cM=l(Rn),Qp=n(Rn,"LI",{});var dq=a(Qp);pr=n(dq,"A",{href:!0,rel:!0});var lq=a(pr);pM=i(lq,"Parallelization"),lq.forEach(t),dq.forEach(t),Rn.forEach(t),uM=l(ge),De=n(ge,"DIV",{class:!0});var Gt=a(De);f(ur.$$.fragment,Gt),hM=l(Gt),qo=n(Gt,"P",{});var fd=a(qo);mM=i(fd,"The "),Xp=n(fd,"CODE",{});var cq=a(Xp);fM=i(cq,"FlaxMBartPreTrainedModel"),cq.forEach(t),_M=i(fd," forward method, overrides the "),Kp=n(fd,"CODE",{});var pq=a(Kp);gM=i(pq,"__call__"),pq.forEach(t),kM=i(fd," special method."),fd.forEach(t),bM=l(Gt),f(Bn.$$.fragment,Gt),vM=l(Gt),Hp=n(Gt,"P",{});var uq=a(Hp);yM=i(uq,"Example:"),uq.forEach(t),TM=l(Gt),f(hr.$$.fragment,Gt),Gt.forEach(t),MM=l(ge),Mt=n(ge,"DIV",{class:!0});var _d=a(Mt);f(mr.$$.fragment,_d),wM=l(_d),Vp=n(_d,"P",{});var hq=a(Vp);xM=i(hq,"Example:"),hq.forEach(t),zM=l(_d),f(fr.$$.fragment,_d),_d.forEach(t),BM=l(ge),wt=n(ge,"DIV",{class:!0});var gd=a(wt);f(_r.$$.fragment,gd),qM=l(gd),Jp=n(gd,"P",{});var mq=a(Jp);$M=i(mq,"Example:"),mq.forEach(t),FM=l(gd),f(gr.$$.fragment,gd),gd.forEach(t),ge.forEach(t),_m=l(s),$o=n(s,"H2",{class:!0});var Gf=a($o);qn=n(Gf,"A",{id:!0,class:!0,href:!0});var fq=a(qn);Zp=n(fq,"SPAN",{});var _q=a(Zp);f(kr.$$.fragment,_q),_q.forEach(t),fq.forEach(t),EM=l(Gf),Yp=n(Gf,"SPAN",{});var gq=a(Yp);jM=i(gq,"FlaxMBartForConditionalGeneration"),gq.forEach(t),Gf.forEach(t),gm=l(s),J=n(s,"DIV",{class:!0});var ke=a(J);f(br.$$.fragment,ke),CM=l(ke),vr=n(ke,"P",{});var Uf=a(vr);PM=i(Uf,`The MMBart Model with a language modeling head. Can be used for summarization. This model inherits from `),Hi=n(Uf,"A",{href:!0});var kq=a(Hi);OM=i(kq,"FlaxPreTrainedModel"),kq.forEach(t),SM=i(Uf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Uf.forEach(t),AM=l(ke),yr=n(ke,"P",{});var Rf=a(yr);IM=i(Rf,"This model is also a Flax Linen "),Tr=n(Rf,"A",{href:!0,rel:!0});var bq=a(Tr);NM=i(bq,"flax.nn.Module"),bq.forEach(t),LM=i(Rf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Rf.forEach(t),DM=l(ke),eu=n(ke,"P",{});var vq=a(eu);GM=i(vq,"Finally, this model supports inherent JAX features such as:"),vq.forEach(t),UM=l(ke),mt=n(ke,"UL",{});var Wn=a(mt);tu=n(Wn,"LI",{});var yq=a(tu);Mr=n(yq,"A",{href:!0,rel:!0});var Tq=a(Mr);RM=i(Tq,"Just-In-Time (JIT) compilation"),Tq.forEach(t),yq.forEach(t),WM=l(Wn),ou=n(Wn,"LI",{});var Mq=a(ou);wr=n(Mq,"A",{href:!0,rel:!0});var wq=a(wr);QM=i(wq,"Automatic Differentiation"),wq.forEach(t),Mq.forEach(t),XM=l(Wn),nu=n(Wn,"LI",{});var xq=a(nu);xr=n(xq,"A",{href:!0,rel:!0});var zq=a(xr);KM=i(zq,"Vectorization"),zq.forEach(t),xq.forEach(t),HM=l(Wn),au=n(Wn,"LI",{});var Bq=a(au);zr=n(Bq,"A",{href:!0,rel:!0});var qq=a(zr);VM=i(qq,"Parallelization"),qq.forEach(t),Bq.forEach(t),Wn.forEach(t),JM=l(ke),j=n(ke,"DIV",{class:!0});var A=a(j);f(Br.$$.fragment,A),ZM=l(A),Fo=n(A,"P",{});var kd=a(Fo);YM=i(kd,"The "),su=n(kd,"CODE",{});var $q=a(su);e0=i($q,"FlaxMBartPreTrainedModel"),$q.forEach(t),t0=i(kd," forward method, overrides the "),ru=n(kd,"CODE",{});var Fq=a(ru);o0=i(Fq,"__call__"),Fq.forEach(t),n0=i(kd," special method."),kd.forEach(t),a0=l(A),f($n.$$.fragment,A),s0=l(A),iu=n(A,"P",{});var Eq=a(iu);r0=i(Eq,"Summarization example::"),Eq.forEach(t),i0=l(A),du=n(A,"BLOCKQUOTE",{});var jq=a(du);lu=n(jq,"BLOCKQUOTE",{});var Cq=a(lu);cu=n(Cq,"BLOCKQUOTE",{});var Pq=a(cu);pu=n(Pq,"P",{});var Oq=a(pu);d0=i(Oq,"from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration, MBartConfig"),Oq.forEach(t),Pq.forEach(t),Cq.forEach(t),jq.forEach(t),l0=l(A),uu=n(A,"BLOCKQUOTE",{});var Sq=a(uu);hu=n(Sq,"BLOCKQUOTE",{});var Aq=a(hu);mu=n(Aq,"BLOCKQUOTE",{});var Iq=a(mu);fu=n(Iq,"P",{});var Nq=a(fu);c0=i(Nq,`model = FlaxMBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),Nq.forEach(t),Iq.forEach(t),Aq.forEach(t),Sq.forEach(t),p0=l(A),_u=n(A,"BLOCKQUOTE",{});var Lq=a(_u);gu=n(Lq,"BLOCKQUOTE",{});var Dq=a(gu);ku=n(Dq,"BLOCKQUOTE",{});var Gq=a(ku);bu=n(Gq,"P",{});var Uq=a(bu);u0=i(Uq,`ARTICLE_TO_SUMMARIZE = \u201CMeine Freunde sind cool, aber sie essen zu viel Kuchen.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors=\u2018np\u2019)`),Uq.forEach(t),Gq.forEach(t),Dq.forEach(t),Lq.forEach(t),h0=l(A),vu=n(A,"BLOCKQUOTE",{});var Rq=a(vu);yu=n(Rq,"BLOCKQUOTE",{});var Wq=a(yu);qr=n(Wq,"BLOCKQUOTE",{});var Wf=a(qr);Fn=n(Wf,"H1",{class:!0});var Qf=a(Fn);En=n(Qf,"A",{id:!0,class:!0,href:!0});var Qq=a(En);Tu=n(Qq,"SPAN",{});var Xq=a(Tu);f($r.$$.fragment,Xq),Xq.forEach(t),Qq.forEach(t),m0=l(Qf),Mu=n(Qf,"SPAN",{});var Kq=a(Mu);f0=i(Kq,"Generate Summary"),Kq.forEach(t),Qf.forEach(t),_0=l(Wf),wu=n(Wf,"P",{});var Hq=a(wu);g0=i(Hq,`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True).sequences print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),Hq.forEach(t),Wf.forEach(t),Wq.forEach(t),Rq.forEach(t),k0=l(A),xu=n(A,"P",{});var Vq=a(xu);b0=i(Vq,"Mask filling example::"),Vq.forEach(t),v0=l(A),zu=n(A,"BLOCKQUOTE",{});var Jq=a(zu);Bu=n(Jq,"BLOCKQUOTE",{});var Zq=a(Bu);Eo=n(Zq,"BLOCKQUOTE",{});var bd=a(Eo);qu=n(bd,"P",{});var Yq=a(qu);y0=i(Yq,`from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration tokenizer = MBartTokenizer.from_pretrained(\u2018facebook/mbart-large-cc25\u2019)`),Yq.forEach(t),T0=l(bd),jn=n(bd,"H1",{class:!0});var Xf=a(jn);Cn=n(Xf,"A",{id:!0,class:!0,href:!0});var e5=a(Cn);$u=n(e5,"SPAN",{});var t5=a($u);f(Fr.$$.fragment,t5),t5.forEach(t),e5.forEach(t),M0=l(Xf),Fu=n(Xf,"SPAN",{});var o5=a(Fu);w0=i(o5,"de_DE is the language symbol id <LID> for German"),o5.forEach(t),Xf.forEach(t),x0=l(bd),Eu=n(bd,"P",{});var n5=a(Eu);z0=i(n5,"TXT = \u201D</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE\u201D"),n5.forEach(t),bd.forEach(t),Zq.forEach(t),Jq.forEach(t),B0=l(A),ju=n(A,"BLOCKQUOTE",{});var a5=a(ju);Cu=n(a5,"BLOCKQUOTE",{});var s5=a(Cu);Pu=n(s5,"BLOCKQUOTE",{});var r5=a(Pu);Ou=n(r5,"P",{});var i5=a(Ou);q0=i(i5,`model = FlaxMBartForConditionalGeneration.from_pretrained(\u2018facebook/mbart-large-cc25\u2019) input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors=\u2018np\u2019)[\u2018input_ids\u2019] logits = model(input_ids).logits`),i5.forEach(t),r5.forEach(t),s5.forEach(t),a5.forEach(t),$0=l(A),Su=n(A,"BLOCKQUOTE",{});var d5=a(Su);Au=n(d5,"BLOCKQUOTE",{});var l5=a(Au);Iu=n(l5,"BLOCKQUOTE",{});var c5=a(Iu);Nu=n(c5,"P",{});var p5=a(Nu);F0=i(p5,`masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item() probs = logits[0, masked_index].softmax(dim=0) values, predictions = probs.topk(5)`),p5.forEach(t),c5.forEach(t),l5.forEach(t),d5.forEach(t),E0=l(A),Lu=n(A,"BLOCKQUOTE",{});var u5=a(Lu);Du=n(u5,"BLOCKQUOTE",{});var h5=a(Du);Gu=n(h5,"BLOCKQUOTE",{});var m5=a(Gu);Uu=n(m5,"P",{});var f5=a(Uu);j0=i(f5,"tokenizer.decode(predictions).split()"),f5.forEach(t),m5.forEach(t),h5.forEach(t),u5.forEach(t),A.forEach(t),C0=l(ke),xt=n(ke,"DIV",{class:!0});var vd=a(xt);f(Er.$$.fragment,vd),P0=l(vd),Ru=n(vd,"P",{});var _5=a(Ru);O0=i(_5,"Example:"),_5.forEach(t),S0=l(vd),f(jr.$$.fragment,vd),vd.forEach(t),A0=l(ke),zt=n(ke,"DIV",{class:!0});var yd=a(zt);f(Cr.$$.fragment,yd),I0=l(yd),Wu=n(yd,"P",{});var g5=a(Wu);N0=i(g5,"Example:"),g5.forEach(t),L0=l(yd),f(Pr.$$.fragment,yd),yd.forEach(t),ke.forEach(t),km=l(s),jo=n(s,"H2",{class:!0});var Kf=a(jo);Pn=n(Kf,"A",{id:!0,class:!0,href:!0});var k5=a(Pn);Qu=n(k5,"SPAN",{});var b5=a(Qu);f(Or.$$.fragment,b5),b5.forEach(t),k5.forEach(t),D0=l(Kf),Xu=n(Kf,"SPAN",{});var v5=a(Xu);G0=i(v5,"FlaxMBartForSequenceClassification"),v5.forEach(t),Kf.forEach(t),bm=l(s),U=n(s,"DIV",{class:!0});var se=a(U);f(Sr.$$.fragment,se),U0=l(se),Ku=n(se,"P",{});var y5=a(Ku);R0=i(y5,`MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),y5.forEach(t),W0=l(se),Ar=n(se,"P",{});var Hf=a(Ar);Q0=i(Hf,"This model inherits from "),Vi=n(Hf,"A",{href:!0});var T5=a(Vi);X0=i(T5,"FlaxPreTrainedModel"),T5.forEach(t),K0=i(Hf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hf.forEach(t),H0=l(se),Ir=n(se,"P",{});var Vf=a(Ir);V0=i(Vf,"This model is also a Flax Linen "),Nr=n(Vf,"A",{href:!0,rel:!0});var M5=a(Nr);J0=i(M5,"flax.nn.Module"),M5.forEach(t),Z0=i(Vf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Vf.forEach(t),Y0=l(se),Hu=n(se,"P",{});var w5=a(Hu);ew=i(w5,"Finally, this model supports inherent JAX features such as:"),w5.forEach(t),tw=l(se),ft=n(se,"UL",{});var Qn=a(ft);Vu=n(Qn,"LI",{});var x5=a(Vu);Lr=n(x5,"A",{href:!0,rel:!0});var z5=a(Lr);ow=i(z5,"Just-In-Time (JIT) compilation"),z5.forEach(t),x5.forEach(t),nw=l(Qn),Ju=n(Qn,"LI",{});var B5=a(Ju);Dr=n(B5,"A",{href:!0,rel:!0});var q5=a(Dr);aw=i(q5,"Automatic Differentiation"),q5.forEach(t),B5.forEach(t),sw=l(Qn),Zu=n(Qn,"LI",{});var $5=a(Zu);Gr=n($5,"A",{href:!0,rel:!0});var F5=a(Gr);rw=i(F5,"Vectorization"),F5.forEach(t),$5.forEach(t),iw=l(Qn),Yu=n(Qn,"LI",{});var E5=a(Yu);Ur=n(E5,"A",{href:!0,rel:!0});var j5=a(Ur);dw=i(j5,"Parallelization"),j5.forEach(t),E5.forEach(t),Qn.forEach(t),lw=l(se),Ge=n(se,"DIV",{class:!0});var Ut=a(Ge);f(Rr.$$.fragment,Ut),cw=l(Ut),Co=n(Ut,"P",{});var Td=a(Co);pw=i(Td,"The "),eh=n(Td,"CODE",{});var C5=a(eh);uw=i(C5,"FlaxMBartPreTrainedModel"),C5.forEach(t),hw=i(Td," forward method, overrides the "),th=n(Td,"CODE",{});var P5=a(th);mw=i(P5,"__call__"),P5.forEach(t),fw=i(Td," special method."),Td.forEach(t),_w=l(Ut),f(On.$$.fragment,Ut),gw=l(Ut),oh=n(Ut,"P",{});var O5=a(oh);kw=i(O5,"Example:"),O5.forEach(t),bw=l(Ut),f(Wr.$$.fragment,Ut),Ut.forEach(t),vw=l(se),Bt=n(se,"DIV",{class:!0});var Md=a(Bt);f(Qr.$$.fragment,Md),yw=l(Md),nh=n(Md,"P",{});var S5=a(nh);Tw=i(S5,"Example:"),S5.forEach(t),Mw=l(Md),f(Xr.$$.fragment,Md),Md.forEach(t),ww=l(se),qt=n(se,"DIV",{class:!0});var wd=a(qt);f(Kr.$$.fragment,wd),xw=l(wd),ah=n(wd,"P",{});var A5=a(ah);zw=i(A5,"Example:"),A5.forEach(t),Bw=l(wd),f(Hr.$$.fragment,wd),wd.forEach(t),se.forEach(t),vm=l(s),Po=n(s,"H2",{class:!0});var Jf=a(Po);Sn=n(Jf,"A",{id:!0,class:!0,href:!0});var I5=a(Sn);sh=n(I5,"SPAN",{});var N5=a(sh);f(Vr.$$.fragment,N5),N5.forEach(t),I5.forEach(t),qw=l(Jf),rh=n(Jf,"SPAN",{});var L5=a(rh);$w=i(L5,"FlaxMBartForQuestionAnswering"),L5.forEach(t),Jf.forEach(t),ym=l(s),R=n(s,"DIV",{class:!0});var re=a(R);f(Jr.$$.fragment,re),Fw=l(re),Oo=n(re,"P",{});var xd=a(Oo);Ew=i(xd,`MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),ih=n(xd,"CODE",{});var D5=a(ih);jw=i(D5,"span start logits"),D5.forEach(t),Cw=i(xd," and "),dh=n(xd,"CODE",{});var G5=a(dh);Pw=i(G5,"span end logits"),G5.forEach(t),Ow=i(xd,")."),xd.forEach(t),Sw=l(re),Zr=n(re,"P",{});var Zf=a(Zr);Aw=i(Zf,"This model inherits from "),Ji=n(Zf,"A",{href:!0});var U5=a(Ji);Iw=i(U5,"FlaxPreTrainedModel"),U5.forEach(t),Nw=i(Zf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zf.forEach(t),Lw=l(re),Yr=n(re,"P",{});var Yf=a(Yr);Dw=i(Yf,"This model is also a Flax Linen "),ei=n(Yf,"A",{href:!0,rel:!0});var R5=a(ei);Gw=i(R5,"flax.nn.Module"),R5.forEach(t),Uw=i(Yf,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Yf.forEach(t),Rw=l(re),lh=n(re,"P",{});var W5=a(lh);Ww=i(W5,"Finally, this model supports inherent JAX features such as:"),W5.forEach(t),Qw=l(re),_t=n(re,"UL",{});var Xn=a(_t);ch=n(Xn,"LI",{});var Q5=a(ch);ti=n(Q5,"A",{href:!0,rel:!0});var X5=a(ti);Xw=i(X5,"Just-In-Time (JIT) compilation"),X5.forEach(t),Q5.forEach(t),Kw=l(Xn),ph=n(Xn,"LI",{});var K5=a(ph);oi=n(K5,"A",{href:!0,rel:!0});var H5=a(oi);Hw=i(H5,"Automatic Differentiation"),H5.forEach(t),K5.forEach(t),Vw=l(Xn),uh=n(Xn,"LI",{});var V5=a(uh);ni=n(V5,"A",{href:!0,rel:!0});var J5=a(ni);Jw=i(J5,"Vectorization"),J5.forEach(t),V5.forEach(t),Zw=l(Xn),hh=n(Xn,"LI",{});var Z5=a(hh);ai=n(Z5,"A",{href:!0,rel:!0});var Y5=a(ai);Yw=i(Y5,"Parallelization"),Y5.forEach(t),Z5.forEach(t),Xn.forEach(t),ex=l(re),Ue=n(re,"DIV",{class:!0});var Rt=a(Ue);f(si.$$.fragment,Rt),tx=l(Rt),So=n(Rt,"P",{});var zd=a(So);ox=i(zd,"The "),mh=n(zd,"CODE",{});var e$=a(mh);nx=i(e$,"FlaxMBartPreTrainedModel"),e$.forEach(t),ax=i(zd," forward method, overrides the "),fh=n(zd,"CODE",{});var t$=a(fh);sx=i(t$,"__call__"),t$.forEach(t),rx=i(zd," special method."),zd.forEach(t),ix=l(Rt),f(An.$$.fragment,Rt),dx=l(Rt),_h=n(Rt,"P",{});var o$=a(_h);lx=i(o$,"Example:"),o$.forEach(t),cx=l(Rt),f(ri.$$.fragment,Rt),Rt.forEach(t),px=l(re),$t=n(re,"DIV",{class:!0});var Bd=a($t);f(ii.$$.fragment,Bd),ux=l(Bd),gh=n(Bd,"P",{});var n$=a(gh);hx=i(n$,"Example:"),n$.forEach(t),mx=l(Bd),f(di.$$.fragment,Bd),Bd.forEach(t),fx=l(re),Ft=n(re,"DIV",{class:!0});var qd=a(Ft);f(li.$$.fragment,qd),_x=l(qd),kh=n(qd,"P",{});var a$=a(kh);gx=i(a$,"Example:"),a$.forEach(t),kx=l(qd),f(ci.$$.fragment,qd),qd.forEach(t),re.forEach(t),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(T$)),c(M,"id","mbart-and-mbart50"),c(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(M,"href","#mbart-and-mbart50"),c(v,"class","relative group"),c(ve,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),c(ve,"rel","nofollow"),c(W,"id","overview-of-mbart"),c(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(W,"href","#overview-of-mbart"),c(ee,"class","relative group"),c(te,"href","https://arxiv.org/abs/2001.08210"),c(te,"rel","nofollow"),c(we,"href","https://huggingface.co/valhalla"),c(we,"rel","nofollow"),c(xe,"href","https://github.com/pytorch/fairseq/tree/master/examples/mbart"),c(xe,"rel","nofollow"),c($,"id","training-of-mbart"),c($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c($,"href","#training-of-mbart"),c(w,"class","relative group"),c(Kn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),c(_i,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer.as_target_tokenizer"),c(Ao,"id","overview-of-mbart50"),c(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ao,"href","#overview-of-mbart50"),c(Yt,"class","relative group"),c(Yn,"href","https://arxiv.org/abs/2008.00401%3E"),c(Yn,"rel","nofollow"),c(Io,"id","training-of-mbart50"),c(Io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Io,"href","#training-of-mbart50"),c(eo,"class","relative group"),c(Ti,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBart50Tokenizer"),c(Lo,"id","transformers.MBartConfig"),c(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lo,"href","#transformers.MBartConfig"),c(to,"class","relative group"),c(xi,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartModel"),c(ra,"href","https://huggingface.co/facebook/mbart-large-cc25"),c(ra,"rel","nofollow"),c(zi,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Bi,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c($e,"class","docstring"),c(Do,"id","transformers.MBartTokenizer"),c(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Do,"href","#transformers.MBartTokenizer"),c(ao,"class","relative group"),c(qi,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizer"),c($i,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizer"),c(Fi,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizer"),c(Go,"class","docstring"),c(tt,"class","docstring"),c(K,"class","docstring"),c(Uo,"id","transformers.MBartTokenizerFast"),c(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Uo,"href","#transformers.MBartTokenizerFast"),c(so,"class","relative group"),c(va,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models"),c(va,"rel","nofollow"),c(Ei,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartTokenizerFast"),c(ji,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizerFast"),c(Ci,"href","/docs/transformers/v4.15.0/en/model_doc/xlmroberta#transformers.XLMRobertaTokenizerFast"),c(Se,"class","docstring"),c(Ro,"class","docstring"),c(Wo,"class","docstring"),c(G,"class","docstring"),c(Qo,"id","transformers.MBart50Tokenizer"),c(Qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qo,"href","#transformers.MBart50Tokenizer"),c(io,"class","relative group"),c(Ca,"href","https://github.com/google/sentencepiece"),c(Ca,"rel","nofollow"),c(Pi,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(ot,"class","docstring"),c(Xo,"class","docstring"),c(Ko,"class","docstring"),c(Ho,"class","docstring"),c(Vo,"class","docstring"),c(L,"class","docstring"),c(Jo,"id","transformers.MBart50TokenizerFast"),c(Jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jo,"href","#transformers.MBart50TokenizerFast"),c(lo,"class","relative group"),c(Ka,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models"),c(Ka,"rel","nofollow"),c(Oi,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Ae,"class","docstring"),c(Zo,"class","docstring"),c(Yo,"class","docstring"),c(H,"class","docstring"),c(en,"id","transformers.MBartModel"),c(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(en,"href","#transformers.MBartModel"),c(po,"class","relative group"),c(Si,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ds,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ds,"rel","nofollow"),c(Ai,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartModel"),c(Ie,"class","docstring"),c(He,"class","docstring"),c(on,"id","transformers.MBartForConditionalGeneration"),c(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(on,"href","#transformers.MBartForConditionalGeneration"),c(ho,"class","relative group"),c(Ii,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(fs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(fs,"rel","nofollow"),c(Ni,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForConditionalGeneration"),c(sn,"id","generate-summary"),c(sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(sn,"href","#generate-summary"),c(an,"class","relative group"),c(dn,"id","de_de-is-the-language-symbol-id-<lid>-for-german"),c(dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(dn,"href","#de_de-is-the-language-symbol-id-<lid>-for-german"),c(rn,"class","relative group"),c(E,"class","docstring"),c(Ve,"class","docstring"),c(ln,"id","transformers.MBartForQuestionAnswering"),c(ln,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ln,"href","#transformers.MBartForQuestionAnswering"),c(_o,"class","relative group"),c(Li,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ws,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ws,"rel","nofollow"),c(Di,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForQuestionAnswering"),c(Ne,"class","docstring"),c(Fe,"class","docstring"),c(pn,"id","transformers.MBartForSequenceClassification"),c(pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(pn,"href","#transformers.MBartForSequenceClassification"),c(bo,"class","relative group"),c(Gi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Es,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Es,"rel","nofollow"),c(Ui,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.MBartForSequenceClassification"),c(ne,"class","docstring"),c(Ee,"class","docstring"),c(hn,"id","transformers.MBartForCausalLM"),c(hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(hn,"href","#transformers.MBartForCausalLM"),c(yo,"class","relative group"),c(Tt,"class","docstring"),c(Ss,"class","docstring"),c(mn,"id","transformers.TFMBartModel"),c(mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(mn,"href","#transformers.TFMBartModel"),c(To,"class","relative group"),c(Ri,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Us,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Us,"rel","nofollow"),c(Wi,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartModel"),c(Le,"class","docstring"),c(je,"class","docstring"),c(gn,"id","transformers.TFMBartForConditionalGeneration"),c(gn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(gn,"href","#transformers.TFMBartForConditionalGeneration"),c(wo,"class","relative group"),c(Qi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Vs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Vs,"rel","nofollow"),c(Xi,"href","/docs/transformers/v4.15.0/en/model_doc/mbart#transformers.TFMBartForConditionalGeneration"),c(yn,"id","generate-summary"),c(yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(yn,"href","#generate-summary"),c(vn,"class","relative group"),c(Mn,"id","de_de-is-the-language-symbol-id-<lid>-for-german"),c(Mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mn,"href","#de_de-is-the-language-symbol-id-<lid>-for-german"),c(Tn,"class","relative group"),c(xn,"id","probs[5]-is-associated-with-the-mask-token"),c(xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xn,"href","#probs[5]-is-associated-with-the-mask-token"),c(wn,"class","relative group"),c(O,"class","docstring"),c(Ce,"class","docstring"),c(zn,"id","transformers.FlaxMBartModel"),c(zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zn,"href","#transformers.FlaxMBartModel"),c(Bo,"class","relative group"),c(Ki,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ir,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(ir,"rel","nofollow"),c(dr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(dr,"rel","nofollow"),c(lr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(lr,"rel","nofollow"),c(cr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(cr,"rel","nofollow"),c(pr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(pr,"rel","nofollow"),c(De,"class","docstring"),c(Mt,"class","docstring"),c(wt,"class","docstring"),c(V,"class","docstring"),c(qn,"id","transformers.FlaxMBartForConditionalGeneration"),c(qn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qn,"href","#transformers.FlaxMBartForConditionalGeneration"),c($o,"class","relative group"),c(Hi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Tr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Tr,"rel","nofollow"),c(Mr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Mr,"rel","nofollow"),c(wr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(wr,"rel","nofollow"),c(xr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(xr,"rel","nofollow"),c(zr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(zr,"rel","nofollow"),c(En,"id","generate-summary"),c(En,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(En,"href","#generate-summary"),c(Fn,"class","relative group"),c(Cn,"id","de_de-is-the-language-symbol-id-<lid>-for-german"),c(Cn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Cn,"href","#de_de-is-the-language-symbol-id-<lid>-for-german"),c(jn,"class","relative group"),c(j,"class","docstring"),c(xt,"class","docstring"),c(zt,"class","docstring"),c(J,"class","docstring"),c(Pn,"id","transformers.FlaxMBartForSequenceClassification"),c(Pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Pn,"href","#transformers.FlaxMBartForSequenceClassification"),c(jo,"class","relative group"),c(Vi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Nr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Nr,"rel","nofollow"),c(Lr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Lr,"rel","nofollow"),c(Dr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Dr,"rel","nofollow"),c(Gr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Gr,"rel","nofollow"),c(Ur,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Ur,"rel","nofollow"),c(Ge,"class","docstring"),c(Bt,"class","docstring"),c(qt,"class","docstring"),c(U,"class","docstring"),c(Sn,"id","transformers.FlaxMBartForQuestionAnswering"),c(Sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Sn,"href","#transformers.FlaxMBartForQuestionAnswering"),c(Po,"class","relative group"),c(Ji,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ei,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(ei,"rel","nofollow"),c(ti,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(ti,"rel","nofollow"),c(oi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(oi,"rel","nofollow"),c(ni,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(ni,"rel","nofollow"),c(ai,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(ai,"rel","nofollow"),c(Ue,"class","docstring"),c($t,"class","docstring"),c(Ft,"class","docstring"),c(R,"class","docstring")},m(s,p){e(document.head,h),u(s,z,p),u(s,v,p),e(v,M),e(M,x),_(T,x,null),e(v,y),e(v,B),e(B,st),u(s,Pe,p),u(s,F,p),e(F,We),e(We,be),e(F,rt),e(F,ve),e(ve,ye),e(F,it),u(s,Je,p),u(s,ee,p),e(ee,W),e(W,Qe),_(ie,Qe,null),e(ee,N),e(ee,D),e(D,dt),u(s,he,p),u(s,me,p),e(me,lt),e(me,te),e(te,ct),e(me,pt),u(s,Q,p),u(s,Oe,p),e(Oe,Te),u(s,Ze,p),u(s,de,p),e(de,Me),e(de,we),e(we,ut),e(de,le),e(de,xe),e(xe,ze),u(s,Ye,p),u(s,w,p),e(w,$),e($,Be),_(Xe,Be,null),e(w,Wt),e(w,oe),e(oe,Qt),u(s,gt,p),u(s,Z,p),e(Z,ce),e(Z,pe),e(pe,Xt),e(Z,Kt),e(Z,ue),e(ue,Ht),e(Z,Vt),e(Z,Ke),e(Ke,Jt),e(Z,e_),e(Z,$d),e($d,t_),e(Z,o_),u(s,qh,p),u(s,kt,p),e(kt,n_),e(kt,Kn),e(Kn,Fd),e(Fd,a_),e(Kn,s_),e(kt,r_),e(kt,_i),e(_i,i_),e(kt,d_),u(s,$h,p),u(s,gi,p),e(gi,Ed),e(Ed,l_),u(s,Fh,p),_(Hn,s,p),u(s,Eh,p),u(s,ki,p),e(ki,Vn),e(Vn,jd),e(jd,c_),e(Vn,p_),e(Vn,Zt),e(Zt,u_),e(Zt,Cd),e(Cd,h_),e(Zt,m_),e(Zt,Pd),e(Pd,f_),e(Zt,__),u(s,jh,p),_(Jn,s,p),u(s,Ch,p),u(s,Yt,p),e(Yt,Ao),e(Ao,Od),_(Zn,Od,null),e(Yt,g_),e(Yt,Sd),e(Sd,k_),u(s,Ph,p),u(s,bt,p),e(bt,b_),e(bt,bi),e(bi,v_),e(bi,Yn),e(Yn,y_),e(bt,T_),e(bt,Ad),e(Ad,M_),e(bt,w_),u(s,Oh,p),u(s,vi,p),e(vi,x_),u(s,Sh,p),u(s,yi,p),e(yi,Id),e(Id,z_),u(s,Ah,p),u(s,eo,p),e(eo,Io),e(Io,Nd),_(ea,Nd,null),e(eo,B_),e(eo,Ld),e(Ld,q_),u(s,Ih,p),u(s,et,p),e(et,$_),e(et,Dd),e(Dd,F_),e(et,E_),e(et,Gd),e(Gd,j_),e(et,C_),e(et,Ud),e(Ud,P_),e(et,O_),u(s,Nh,p),u(s,No,p),e(No,S_),e(No,Ti),e(Ti,A_),e(No,I_),u(s,Lh,p),u(s,Mi,p),e(Mi,Rd),e(Rd,N_),u(s,Dh,p),_(ta,s,p),u(s,Gh,p),u(s,wi,p),e(wi,oa),e(oa,Wd),e(Wd,L_),e(oa,D_),e(oa,qe),e(qe,G_),e(qe,Qd),e(Qd,U_),e(qe,R_),e(qe,Xd),e(Xd,W_),e(qe,Q_),e(qe,Kd),e(Kd,X_),e(qe,K_),e(qe,Hd),e(Hd,H_),e(qe,V_),e(qe,Vd),e(Vd,J_),e(qe,Z_),u(s,Uh,p),_(na,s,p),u(s,Rh,p),u(s,to,p),e(to,Lo),e(Lo,Jd),_(aa,Jd,null),e(to,Y_),e(to,Zd),e(Zd,eg),u(s,Wh,p),u(s,$e,p),_(sa,$e,null),e($e,tg),e($e,oo),e(oo,og),e(oo,xi),e(xi,ng),e(oo,ag),e(oo,ra),e(ra,sg),e(oo,rg),e($e,ig),e($e,no),e(no,dg),e(no,zi),e(zi,lg),e(no,cg),e(no,Bi),e(Bi,pg),e(no,ug),e($e,hg),e($e,Yd),e(Yd,mg),e($e,fg),_(ia,$e,null),u(s,Qh,p),u(s,ao,p),e(ao,Do),e(Do,el),_(da,el,null),e(ao,_g),e(ao,tl),e(tl,gg),u(s,Xh,p),u(s,K,p),_(la,K,null),e(K,kg),e(K,ol),e(ol,bg),e(K,vg),e(K,vt),e(vt,qi),e(qi,yg),e(vt,Tg),e(vt,$i),e($i,Mg),e(vt,wg),e(vt,Fi),e(Fi,xg),e(vt,zg),e(K,Bg),e(K,ca),e(ca,qg),e(ca,nl),e(nl,$g),e(ca,Fg),e(K,Eg),e(K,al),e(al,jg),e(K,Cg),_(pa,K,null),e(K,Pg),e(K,Go),_(ua,Go,null),e(Go,Og),e(Go,sl),e(sl,Sg),e(K,Ag),e(K,tt),_(ha,tt,null),e(tt,Ig),e(tt,ma),e(ma,Ng),e(ma,rl),e(rl,Lg),e(ma,Dg),e(tt,Gg),e(tt,fa),e(fa,_a),e(_a,il),e(il,Ug),e(_a,Rg),e(_a,dl),e(dl,Wg),e(fa,Qg),e(fa,ga),e(ga,ll),e(ll,Xg),e(ga,Kg),e(ga,cl),e(cl,Hg),e(tt,Vg),e(tt,pl),e(pl,Jg),u(s,Kh,p),u(s,so,p),e(so,Uo),e(Uo,ul),_(ka,ul,null),e(so,Zg),e(so,hl),e(hl,Yg),u(s,Hh,p),u(s,G,p),_(ba,G,null),e(G,ek),e(G,ro),e(ro,tk),e(ro,ml),e(ml,ok),e(ro,nk),e(ro,va),e(va,ak),e(ro,sk),e(G,rk),e(G,yt),e(yt,Ei),e(Ei,ik),e(yt,dk),e(yt,ji),e(ji,lk),e(yt,ck),e(yt,Ci),e(Ci,pk),e(yt,uk),e(G,hk),e(G,ya),e(ya,mk),e(ya,fl),e(fl,fk),e(ya,_k),e(G,gk),e(G,_l),e(_l,kk),e(G,bk),_(Ta,G,null),e(G,vk),e(G,Se),_(Ma,Se,null),e(Se,yk),e(Se,gl),e(gl,Tk),e(Se,Mk),e(Se,wa),e(wa,wk),e(wa,kl),e(kl,xk),e(wa,zk),e(Se,Bk),e(Se,xa),e(xa,za),e(za,bl),e(bl,qk),e(za,$k),e(za,vl),e(vl,Fk),e(xa,Ek),e(xa,Ba),e(Ba,yl),e(yl,jk),e(Ba,Ck),e(Ba,Tl),e(Tl,Pk),e(Se,Ok),e(Se,Ml),e(Ml,Sk),e(G,Ak),e(G,Ro),_(qa,Ro,null),e(Ro,Ik),e(Ro,wl),e(wl,Nk),e(G,Lk),e(G,Wo),_($a,Wo,null),e(Wo,Dk),e(Wo,xl),e(xl,Gk),u(s,Vh,p),u(s,io,p),e(io,Qo),e(Qo,zl),_(Fa,zl,null),e(io,Uk),e(io,Bl),e(Bl,Rk),u(s,Jh,p),u(s,L,p),_(Ea,L,null),e(L,Wk),e(L,ja),e(ja,Qk),e(ja,Ca),e(Ca,Xk),e(ja,Kk),e(L,Hk),e(L,Pa),e(Pa,Vk),e(Pa,Pi),e(Pi,Jk),e(Pa,Zk),e(L,Yk),e(L,ql),e(ql,eb),e(L,tb),_(Oa,L,null),e(L,ob),e(L,ot),_(Sa,ot,null),e(ot,nb),e(ot,Aa),e(Aa,ab),e(Aa,$l),e($l,sb),e(Aa,rb),e(ot,ib),e(ot,Ia),e(Ia,Na),e(Na,Fl),e(Fl,db),e(Na,lb),e(Na,El),e(El,cb),e(Ia,pb),e(Ia,La),e(La,jl),e(jl,ub),e(La,hb),e(La,Cl),e(Cl,mb),e(ot,fb),e(ot,Pl),e(Pl,_b),e(L,gb),e(L,Xo),_(Da,Xo,null),e(Xo,kb),e(Xo,Ol),e(Ol,bb),e(L,vb),e(L,Ko),_(Ga,Ko,null),e(Ko,yb),e(Ko,Ua),e(Ua,Tb),e(Ua,Sl),e(Sl,Mb),e(Ua,wb),e(L,xb),e(L,Ho),_(Ra,Ho,null),e(Ho,zb),e(Ho,Al),e(Al,Bb),e(L,qb),e(L,Vo),_(Wa,Vo,null),e(Vo,$b),e(Vo,Il),e(Il,Fb),u(s,Zh,p),u(s,lo,p),e(lo,Jo),e(Jo,Nl),_(Qa,Nl,null),e(lo,Eb),e(lo,Ll),e(Ll,jb),u(s,Yh,p),u(s,H,p),_(Xa,H,null),e(H,Cb),e(H,co),e(co,Pb),e(co,Dl),e(Dl,Ob),e(co,Sb),e(co,Ka),e(Ka,Ab),e(co,Ib),e(H,Nb),e(H,Ha),e(Ha,Lb),e(Ha,Oi),e(Oi,Db),e(Ha,Gb),e(H,Ub),e(H,Gl),e(Gl,Rb),e(H,Wb),_(Va,H,null),e(H,Qb),e(H,Ae),_(Ja,Ae,null),e(Ae,Xb),e(Ae,Ul),e(Ul,Kb),e(Ae,Hb),e(Ae,Za),e(Za,Vb),e(Za,Rl),e(Rl,Jb),e(Za,Zb),e(Ae,Yb),e(Ae,Ya),e(Ya,es),e(es,Wl),e(Wl,ev),e(es,tv),e(es,Ql),e(Ql,ov),e(Ya,nv),e(Ya,ts),e(ts,Xl),e(Xl,av),e(ts,sv),e(ts,Kl),e(Kl,rv),e(Ae,iv),e(Ae,Hl),e(Hl,dv),e(H,lv),e(H,Zo),_(os,Zo,null),e(Zo,cv),e(Zo,Vl),e(Vl,pv),e(H,uv),e(H,Yo),_(ns,Yo,null),e(Yo,hv),e(Yo,Jl),e(Jl,mv),u(s,em,p),u(s,po,p),e(po,en),e(en,Zl),_(as,Zl,null),e(po,fv),e(po,Yl),e(Yl,_v),u(s,tm,p),u(s,He,p),_(ss,He,null),e(He,gv),e(He,rs),e(rs,kv),e(rs,Si),e(Si,bv),e(rs,vv),e(He,yv),e(He,is),e(is,Tv),e(is,ds),e(ds,Mv),e(is,wv),e(He,xv),e(He,Ie),_(ls,Ie,null),e(Ie,zv),e(Ie,uo),e(uo,Bv),e(uo,Ai),e(Ai,qv),e(uo,$v),e(uo,ec),e(ec,Fv),e(uo,Ev),e(Ie,jv),_(tn,Ie,null),e(Ie,Cv),e(Ie,tc),e(tc,Pv),e(Ie,Ov),_(cs,Ie,null),u(s,om,p),u(s,ho,p),e(ho,on),e(on,oc),_(ps,oc,null),e(ho,Sv),e(ho,nc),e(nc,Av),u(s,nm,p),u(s,Ve,p),_(us,Ve,null),e(Ve,Iv),e(Ve,hs),e(hs,Nv),e(hs,Ii),e(Ii,Lv),e(hs,Dv),e(Ve,Gv),e(Ve,ms),e(ms,Uv),e(ms,fs),e(fs,Rv),e(ms,Wv),e(Ve,Qv),e(Ve,E),_(_s,E,null),e(E,Xv),e(E,mo),e(mo,Kv),e(mo,Ni),e(Ni,Hv),e(mo,Vv),e(mo,ac),e(ac,Jv),e(mo,Zv),e(E,Yv),_(nn,E,null),e(E,ey),e(E,sc),e(sc,ty),e(E,oy),e(E,rc),e(rc,ic),e(ic,dc),e(dc,lc),e(lc,ny),e(E,ay),e(E,cc),e(cc,pc),e(pc,uc),e(uc,hc),e(hc,sy),e(E,ry),e(E,mc),e(mc,fc),e(fc,_c),e(_c,gc),e(gc,iy),e(E,dy),e(E,kc),e(kc,bc),e(bc,gs),e(gs,an),e(an,sn),e(sn,vc),_(ks,vc,null),e(an,ly),e(an,yc),e(yc,cy),e(gs,py),e(gs,Tc),e(Tc,uy),e(E,hy),e(E,Mc),e(Mc,my),e(E,fy),e(E,wc),e(wc,xc),e(xc,fo),e(fo,zc),e(zc,_y),e(fo,gy),e(fo,rn),e(rn,dn),e(dn,Bc),_(bs,Bc,null),e(rn,ky),e(rn,qc),e(qc,by),e(fo,vy),e(fo,$c),e($c,yy),e(E,Ty),e(E,Fc),e(Fc,Ec),e(Ec,jc),e(jc,Cc),e(Cc,My),e(E,wy),e(E,Pc),e(Pc,Oc),e(Oc,Sc),e(Sc,Ac),e(Ac,xy),e(E,zy),e(E,Ic),e(Ic,Nc),e(Nc,Lc),e(Lc,Dc),e(Dc,By),u(s,am,p),u(s,_o,p),e(_o,ln),e(ln,Gc),_(vs,Gc,null),e(_o,qy),e(_o,Uc),e(Uc,$y),u(s,sm,p),u(s,Fe,p),_(ys,Fe,null),e(Fe,Fy),e(Fe,go),e(go,Ey),e(go,Rc),e(Rc,jy),e(go,Cy),e(go,Wc),e(Wc,Py),e(go,Oy),e(Fe,Sy),e(Fe,Ts),e(Ts,Ay),e(Ts,Li),e(Li,Iy),e(Ts,Ny),e(Fe,Ly),e(Fe,Ms),e(Ms,Dy),e(Ms,ws),e(ws,Gy),e(Ms,Uy),e(Fe,Ry),e(Fe,Ne),_(xs,Ne,null),e(Ne,Wy),e(Ne,ko),e(ko,Qy),e(ko,Di),e(Di,Xy),e(ko,Ky),e(ko,Qc),e(Qc,Hy),e(ko,Vy),e(Ne,Jy),_(cn,Ne,null),e(Ne,Zy),e(Ne,Xc),e(Xc,Yy),e(Ne,eT),_(zs,Ne,null),u(s,rm,p),u(s,bo,p),e(bo,pn),e(pn,Kc),_(Bs,Kc,null),e(bo,tT),e(bo,Hc),e(Hc,oT),u(s,im,p),u(s,Ee,p),_(qs,Ee,null),e(Ee,nT),e(Ee,Vc),e(Vc,aT),e(Ee,sT),e(Ee,$s),e($s,rT),e($s,Gi),e(Gi,iT),e($s,dT),e(Ee,lT),e(Ee,Fs),e(Fs,cT),e(Fs,Es),e(Es,pT),e(Fs,uT),e(Ee,hT),e(Ee,ne),_(js,ne,null),e(ne,mT),e(ne,vo),e(vo,fT),e(vo,Ui),e(Ui,_T),e(vo,gT),e(vo,Jc),e(Jc,kT),e(vo,bT),e(ne,vT),_(un,ne,null),e(ne,yT),e(ne,Zc),e(Zc,TT),e(ne,MT),_(Cs,ne,null),e(ne,wT),e(ne,Yc),e(Yc,xT),e(ne,zT),_(Ps,ne,null),u(s,dm,p),u(s,yo,p),e(yo,hn),e(hn,ep),_(Os,ep,null),e(yo,BT),e(yo,tp),e(tp,qT),u(s,lm,p),u(s,Ss,p),e(Ss,Tt),_(As,Tt,null),e(Tt,$T),e(Tt,op),e(op,FT),e(Tt,ET),_(Is,Tt,null),u(s,cm,p),u(s,To,p),e(To,mn),e(mn,np),_(Ns,np,null),e(To,jT),e(To,ap),e(ap,CT),u(s,pm,p),u(s,je,p),_(Ls,je,null),e(je,PT),e(je,Ds),e(Ds,OT),e(Ds,Ri),e(Ri,ST),e(Ds,AT),e(je,IT),e(je,Gs),e(Gs,NT),e(Gs,Us),e(Us,LT),e(Gs,DT),e(je,GT),_(fn,je,null),e(je,UT),e(je,Le),_(Rs,Le,null),e(Le,RT),e(Le,Mo),e(Mo,WT),e(Mo,Wi),e(Wi,QT),e(Mo,XT),e(Mo,sp),e(sp,KT),e(Mo,HT),e(Le,VT),_(_n,Le,null),e(Le,JT),e(Le,rp),e(rp,ZT),e(Le,YT),_(Ws,Le,null),u(s,um,p),u(s,wo,p),e(wo,gn),e(gn,ip),_(Qs,ip,null),e(wo,e1),e(wo,dp),e(dp,t1),u(s,hm,p),u(s,Ce,p),_(Xs,Ce,null),e(Ce,o1),e(Ce,Ks),e(Ks,n1),e(Ks,Qi),e(Qi,a1),e(Ks,s1),e(Ce,r1),e(Ce,Hs),e(Hs,i1),e(Hs,Vs),e(Vs,d1),e(Hs,l1),e(Ce,c1),_(kn,Ce,null),e(Ce,p1),e(Ce,O),_(Js,O,null),e(O,u1),e(O,xo),e(xo,h1),e(xo,Xi),e(Xi,m1),e(xo,f1),e(xo,lp),e(lp,_1),e(xo,g1),e(O,k1),_(bn,O,null),e(O,b1),e(O,cp),e(cp,v1),e(O,y1),e(O,pp),e(pp,up),e(up,hp),e(hp,mp),e(mp,T1),e(O,M1),e(O,fp),e(fp,_p),e(_p,gp),e(gp,kp),e(kp,w1),e(O,x1),e(O,bp),e(bp,vp),e(vp,yp),e(yp,Tp),e(Tp,z1),e(O,B1),e(O,Mp),e(Mp,wp),e(wp,Zs),e(Zs,vn),e(vn,yn),e(yn,xp),_(Ys,xp,null),e(vn,q1),e(vn,zp),e(zp,$1),e(Zs,F1),e(Zs,Bp),e(Bp,E1),e(O,j1),e(O,qp),e(qp,C1),e(O,P1),e(O,$p),e($p,Fp),e(Fp,zo),e(zo,Ep),e(Ep,O1),e(zo,S1),e(zo,Tn),e(Tn,Mn),e(Mn,jp),_(er,jp,null),e(Tn,A1),e(Tn,Cp),e(Cp,I1),e(zo,N1),e(zo,Pp),e(Pp,L1),e(O,D1),e(O,Op),e(Op,Sp),e(Sp,tr),e(tr,Ap),e(Ap,G1),e(tr,U1),e(tr,wn),e(wn,xn),e(xn,Ip),_(or,Ip,null),e(wn,R1),e(wn,Np),e(Np,W1),u(s,mm,p),u(s,Bo,p),e(Bo,zn),e(zn,Lp),_(nr,Lp,null),e(Bo,Q1),e(Bo,Dp),e(Dp,X1),u(s,fm,p),u(s,V,p),_(ar,V,null),e(V,K1),e(V,sr),e(sr,H1),e(sr,Ki),e(Ki,V1),e(sr,J1),e(V,Z1),e(V,rr),e(rr,Y1),e(rr,ir),e(ir,eM),e(rr,tM),e(V,oM),e(V,Gp),e(Gp,nM),e(V,aM),e(V,ht),e(ht,Up),e(Up,dr),e(dr,sM),e(ht,rM),e(ht,Rp),e(Rp,lr),e(lr,iM),e(ht,dM),e(ht,Wp),e(Wp,cr),e(cr,lM),e(ht,cM),e(ht,Qp),e(Qp,pr),e(pr,pM),e(V,uM),e(V,De),_(ur,De,null),e(De,hM),e(De,qo),e(qo,mM),e(qo,Xp),e(Xp,fM),e(qo,_M),e(qo,Kp),e(Kp,gM),e(qo,kM),e(De,bM),_(Bn,De,null),e(De,vM),e(De,Hp),e(Hp,yM),e(De,TM),_(hr,De,null),e(V,MM),e(V,Mt),_(mr,Mt,null),e(Mt,wM),e(Mt,Vp),e(Vp,xM),e(Mt,zM),_(fr,Mt,null),e(V,BM),e(V,wt),_(_r,wt,null),e(wt,qM),e(wt,Jp),e(Jp,$M),e(wt,FM),_(gr,wt,null),u(s,_m,p),u(s,$o,p),e($o,qn),e(qn,Zp),_(kr,Zp,null),e($o,EM),e($o,Yp),e(Yp,jM),u(s,gm,p),u(s,J,p),_(br,J,null),e(J,CM),e(J,vr),e(vr,PM),e(vr,Hi),e(Hi,OM),e(vr,SM),e(J,AM),e(J,yr),e(yr,IM),e(yr,Tr),e(Tr,NM),e(yr,LM),e(J,DM),e(J,eu),e(eu,GM),e(J,UM),e(J,mt),e(mt,tu),e(tu,Mr),e(Mr,RM),e(mt,WM),e(mt,ou),e(ou,wr),e(wr,QM),e(mt,XM),e(mt,nu),e(nu,xr),e(xr,KM),e(mt,HM),e(mt,au),e(au,zr),e(zr,VM),e(J,JM),e(J,j),_(Br,j,null),e(j,ZM),e(j,Fo),e(Fo,YM),e(Fo,su),e(su,e0),e(Fo,t0),e(Fo,ru),e(ru,o0),e(Fo,n0),e(j,a0),_($n,j,null),e(j,s0),e(j,iu),e(iu,r0),e(j,i0),e(j,du),e(du,lu),e(lu,cu),e(cu,pu),e(pu,d0),e(j,l0),e(j,uu),e(uu,hu),e(hu,mu),e(mu,fu),e(fu,c0),e(j,p0),e(j,_u),e(_u,gu),e(gu,ku),e(ku,bu),e(bu,u0),e(j,h0),e(j,vu),e(vu,yu),e(yu,qr),e(qr,Fn),e(Fn,En),e(En,Tu),_($r,Tu,null),e(Fn,m0),e(Fn,Mu),e(Mu,f0),e(qr,_0),e(qr,wu),e(wu,g0),e(j,k0),e(j,xu),e(xu,b0),e(j,v0),e(j,zu),e(zu,Bu),e(Bu,Eo),e(Eo,qu),e(qu,y0),e(Eo,T0),e(Eo,jn),e(jn,Cn),e(Cn,$u),_(Fr,$u,null),e(jn,M0),e(jn,Fu),e(Fu,w0),e(Eo,x0),e(Eo,Eu),e(Eu,z0),e(j,B0),e(j,ju),e(ju,Cu),e(Cu,Pu),e(Pu,Ou),e(Ou,q0),e(j,$0),e(j,Su),e(Su,Au),e(Au,Iu),e(Iu,Nu),e(Nu,F0),e(j,E0),e(j,Lu),e(Lu,Du),e(Du,Gu),e(Gu,Uu),e(Uu,j0),e(J,C0),e(J,xt),_(Er,xt,null),e(xt,P0),e(xt,Ru),e(Ru,O0),e(xt,S0),_(jr,xt,null),e(J,A0),e(J,zt),_(Cr,zt,null),e(zt,I0),e(zt,Wu),e(Wu,N0),e(zt,L0),_(Pr,zt,null),u(s,km,p),u(s,jo,p),e(jo,Pn),e(Pn,Qu),_(Or,Qu,null),e(jo,D0),e(jo,Xu),e(Xu,G0),u(s,bm,p),u(s,U,p),_(Sr,U,null),e(U,U0),e(U,Ku),e(Ku,R0),e(U,W0),e(U,Ar),e(Ar,Q0),e(Ar,Vi),e(Vi,X0),e(Ar,K0),e(U,H0),e(U,Ir),e(Ir,V0),e(Ir,Nr),e(Nr,J0),e(Ir,Z0),e(U,Y0),e(U,Hu),e(Hu,ew),e(U,tw),e(U,ft),e(ft,Vu),e(Vu,Lr),e(Lr,ow),e(ft,nw),e(ft,Ju),e(Ju,Dr),e(Dr,aw),e(ft,sw),e(ft,Zu),e(Zu,Gr),e(Gr,rw),e(ft,iw),e(ft,Yu),e(Yu,Ur),e(Ur,dw),e(U,lw),e(U,Ge),_(Rr,Ge,null),e(Ge,cw),e(Ge,Co),e(Co,pw),e(Co,eh),e(eh,uw),e(Co,hw),e(Co,th),e(th,mw),e(Co,fw),e(Ge,_w),_(On,Ge,null),e(Ge,gw),e(Ge,oh),e(oh,kw),e(Ge,bw),_(Wr,Ge,null),e(U,vw),e(U,Bt),_(Qr,Bt,null),e(Bt,yw),e(Bt,nh),e(nh,Tw),e(Bt,Mw),_(Xr,Bt,null),e(U,ww),e(U,qt),_(Kr,qt,null),e(qt,xw),e(qt,ah),e(ah,zw),e(qt,Bw),_(Hr,qt,null),u(s,vm,p),u(s,Po,p),e(Po,Sn),e(Sn,sh),_(Vr,sh,null),e(Po,qw),e(Po,rh),e(rh,$w),u(s,ym,p),u(s,R,p),_(Jr,R,null),e(R,Fw),e(R,Oo),e(Oo,Ew),e(Oo,ih),e(ih,jw),e(Oo,Cw),e(Oo,dh),e(dh,Pw),e(Oo,Ow),e(R,Sw),e(R,Zr),e(Zr,Aw),e(Zr,Ji),e(Ji,Iw),e(Zr,Nw),e(R,Lw),e(R,Yr),e(Yr,Dw),e(Yr,ei),e(ei,Gw),e(Yr,Uw),e(R,Rw),e(R,lh),e(lh,Ww),e(R,Qw),e(R,_t),e(_t,ch),e(ch,ti),e(ti,Xw),e(_t,Kw),e(_t,ph),e(ph,oi),e(oi,Hw),e(_t,Vw),e(_t,uh),e(uh,ni),e(ni,Jw),e(_t,Zw),e(_t,hh),e(hh,ai),e(ai,Yw),e(R,ex),e(R,Ue),_(si,Ue,null),e(Ue,tx),e(Ue,So),e(So,ox),e(So,mh),e(mh,nx),e(So,ax),e(So,fh),e(fh,sx),e(So,rx),e(Ue,ix),_(An,Ue,null),e(Ue,dx),e(Ue,_h),e(_h,lx),e(Ue,cx),_(ri,Ue,null),e(R,px),e(R,$t),_(ii,$t,null),e($t,ux),e($t,gh),e(gh,hx),e($t,mx),_(di,$t,null),e(R,fx),e(R,Ft),_(li,Ft,null),e(Ft,_x),e(Ft,kh),e(kh,gx),e(Ft,kx),_(ci,Ft,null),Tm=!0},p(s,[p]){const pi={};p&2&&(pi.$$scope={dirty:p,ctx:s}),tn.$set(pi);const bh={};p&2&&(bh.$$scope={dirty:p,ctx:s}),nn.$set(bh);const vh={};p&2&&(vh.$$scope={dirty:p,ctx:s}),cn.$set(vh);const yh={};p&2&&(yh.$$scope={dirty:p,ctx:s}),un.$set(yh);const In={};p&2&&(In.$$scope={dirty:p,ctx:s}),fn.$set(In);const Th={};p&2&&(Th.$$scope={dirty:p,ctx:s}),_n.$set(Th);const Mh={};p&2&&(Mh.$$scope={dirty:p,ctx:s}),kn.$set(Mh);const ui={};p&2&&(ui.$$scope={dirty:p,ctx:s}),bn.$set(ui);const wh={};p&2&&(wh.$$scope={dirty:p,ctx:s}),Bn.$set(wh);const xh={};p&2&&(xh.$$scope={dirty:p,ctx:s}),$n.$set(xh);const zh={};p&2&&(zh.$$scope={dirty:p,ctx:s}),On.$set(zh);const hi={};p&2&&(hi.$$scope={dirty:p,ctx:s}),An.$set(hi)},i(s){Tm||(g(T.$$.fragment,s),g(ie.$$.fragment,s),g(Xe.$$.fragment,s),g(Hn.$$.fragment,s),g(Jn.$$.fragment,s),g(Zn.$$.fragment,s),g(ea.$$.fragment,s),g(ta.$$.fragment,s),g(na.$$.fragment,s),g(aa.$$.fragment,s),g(sa.$$.fragment,s),g(ia.$$.fragment,s),g(da.$$.fragment,s),g(la.$$.fragment,s),g(pa.$$.fragment,s),g(ua.$$.fragment,s),g(ha.$$.fragment,s),g(ka.$$.fragment,s),g(ba.$$.fragment,s),g(Ta.$$.fragment,s),g(Ma.$$.fragment,s),g(qa.$$.fragment,s),g($a.$$.fragment,s),g(Fa.$$.fragment,s),g(Ea.$$.fragment,s),g(Oa.$$.fragment,s),g(Sa.$$.fragment,s),g(Da.$$.fragment,s),g(Ga.$$.fragment,s),g(Ra.$$.fragment,s),g(Wa.$$.fragment,s),g(Qa.$$.fragment,s),g(Xa.$$.fragment,s),g(Va.$$.fragment,s),g(Ja.$$.fragment,s),g(os.$$.fragment,s),g(ns.$$.fragment,s),g(as.$$.fragment,s),g(ss.$$.fragment,s),g(ls.$$.fragment,s),g(tn.$$.fragment,s),g(cs.$$.fragment,s),g(ps.$$.fragment,s),g(us.$$.fragment,s),g(_s.$$.fragment,s),g(nn.$$.fragment,s),g(ks.$$.fragment,s),g(bs.$$.fragment,s),g(vs.$$.fragment,s),g(ys.$$.fragment,s),g(xs.$$.fragment,s),g(cn.$$.fragment,s),g(zs.$$.fragment,s),g(Bs.$$.fragment,s),g(qs.$$.fragment,s),g(js.$$.fragment,s),g(un.$$.fragment,s),g(Cs.$$.fragment,s),g(Ps.$$.fragment,s),g(Os.$$.fragment,s),g(As.$$.fragment,s),g(Is.$$.fragment,s),g(Ns.$$.fragment,s),g(Ls.$$.fragment,s),g(fn.$$.fragment,s),g(Rs.$$.fragment,s),g(_n.$$.fragment,s),g(Ws.$$.fragment,s),g(Qs.$$.fragment,s),g(Xs.$$.fragment,s),g(kn.$$.fragment,s),g(Js.$$.fragment,s),g(bn.$$.fragment,s),g(Ys.$$.fragment,s),g(er.$$.fragment,s),g(or.$$.fragment,s),g(nr.$$.fragment,s),g(ar.$$.fragment,s),g(ur.$$.fragment,s),g(Bn.$$.fragment,s),g(hr.$$.fragment,s),g(mr.$$.fragment,s),g(fr.$$.fragment,s),g(_r.$$.fragment,s),g(gr.$$.fragment,s),g(kr.$$.fragment,s),g(br.$$.fragment,s),g(Br.$$.fragment,s),g($n.$$.fragment,s),g($r.$$.fragment,s),g(Fr.$$.fragment,s),g(Er.$$.fragment,s),g(jr.$$.fragment,s),g(Cr.$$.fragment,s),g(Pr.$$.fragment,s),g(Or.$$.fragment,s),g(Sr.$$.fragment,s),g(Rr.$$.fragment,s),g(On.$$.fragment,s),g(Wr.$$.fragment,s),g(Qr.$$.fragment,s),g(Xr.$$.fragment,s),g(Kr.$$.fragment,s),g(Hr.$$.fragment,s),g(Vr.$$.fragment,s),g(Jr.$$.fragment,s),g(si.$$.fragment,s),g(An.$$.fragment,s),g(ri.$$.fragment,s),g(ii.$$.fragment,s),g(di.$$.fragment,s),g(li.$$.fragment,s),g(ci.$$.fragment,s),Tm=!0)},o(s){k(T.$$.fragment,s),k(ie.$$.fragment,s),k(Xe.$$.fragment,s),k(Hn.$$.fragment,s),k(Jn.$$.fragment,s),k(Zn.$$.fragment,s),k(ea.$$.fragment,s),k(ta.$$.fragment,s),k(na.$$.fragment,s),k(aa.$$.fragment,s),k(sa.$$.fragment,s),k(ia.$$.fragment,s),k(da.$$.fragment,s),k(la.$$.fragment,s),k(pa.$$.fragment,s),k(ua.$$.fragment,s),k(ha.$$.fragment,s),k(ka.$$.fragment,s),k(ba.$$.fragment,s),k(Ta.$$.fragment,s),k(Ma.$$.fragment,s),k(qa.$$.fragment,s),k($a.$$.fragment,s),k(Fa.$$.fragment,s),k(Ea.$$.fragment,s),k(Oa.$$.fragment,s),k(Sa.$$.fragment,s),k(Da.$$.fragment,s),k(Ga.$$.fragment,s),k(Ra.$$.fragment,s),k(Wa.$$.fragment,s),k(Qa.$$.fragment,s),k(Xa.$$.fragment,s),k(Va.$$.fragment,s),k(Ja.$$.fragment,s),k(os.$$.fragment,s),k(ns.$$.fragment,s),k(as.$$.fragment,s),k(ss.$$.fragment,s),k(ls.$$.fragment,s),k(tn.$$.fragment,s),k(cs.$$.fragment,s),k(ps.$$.fragment,s),k(us.$$.fragment,s),k(_s.$$.fragment,s),k(nn.$$.fragment,s),k(ks.$$.fragment,s),k(bs.$$.fragment,s),k(vs.$$.fragment,s),k(ys.$$.fragment,s),k(xs.$$.fragment,s),k(cn.$$.fragment,s),k(zs.$$.fragment,s),k(Bs.$$.fragment,s),k(qs.$$.fragment,s),k(js.$$.fragment,s),k(un.$$.fragment,s),k(Cs.$$.fragment,s),k(Ps.$$.fragment,s),k(Os.$$.fragment,s),k(As.$$.fragment,s),k(Is.$$.fragment,s),k(Ns.$$.fragment,s),k(Ls.$$.fragment,s),k(fn.$$.fragment,s),k(Rs.$$.fragment,s),k(_n.$$.fragment,s),k(Ws.$$.fragment,s),k(Qs.$$.fragment,s),k(Xs.$$.fragment,s),k(kn.$$.fragment,s),k(Js.$$.fragment,s),k(bn.$$.fragment,s),k(Ys.$$.fragment,s),k(er.$$.fragment,s),k(or.$$.fragment,s),k(nr.$$.fragment,s),k(ar.$$.fragment,s),k(ur.$$.fragment,s),k(Bn.$$.fragment,s),k(hr.$$.fragment,s),k(mr.$$.fragment,s),k(fr.$$.fragment,s),k(_r.$$.fragment,s),k(gr.$$.fragment,s),k(kr.$$.fragment,s),k(br.$$.fragment,s),k(Br.$$.fragment,s),k($n.$$.fragment,s),k($r.$$.fragment,s),k(Fr.$$.fragment,s),k(Er.$$.fragment,s),k(jr.$$.fragment,s),k(Cr.$$.fragment,s),k(Pr.$$.fragment,s),k(Or.$$.fragment,s),k(Sr.$$.fragment,s),k(Rr.$$.fragment,s),k(On.$$.fragment,s),k(Wr.$$.fragment,s),k(Qr.$$.fragment,s),k(Xr.$$.fragment,s),k(Kr.$$.fragment,s),k(Hr.$$.fragment,s),k(Vr.$$.fragment,s),k(Jr.$$.fragment,s),k(si.$$.fragment,s),k(An.$$.fragment,s),k(ri.$$.fragment,s),k(ii.$$.fragment,s),k(di.$$.fragment,s),k(li.$$.fragment,s),k(ci.$$.fragment,s),Tm=!1},d(s){t(h),s&&t(z),s&&t(v),b(T),s&&t(Pe),s&&t(F),s&&t(Je),s&&t(ee),b(ie),s&&t(he),s&&t(me),s&&t(Q),s&&t(Oe),s&&t(Ze),s&&t(de),s&&t(Ye),s&&t(w),b(Xe),s&&t(gt),s&&t(Z),s&&t(qh),s&&t(kt),s&&t($h),s&&t(gi),s&&t(Fh),b(Hn,s),s&&t(Eh),s&&t(ki),s&&t(jh),b(Jn,s),s&&t(Ch),s&&t(Yt),b(Zn),s&&t(Ph),s&&t(bt),s&&t(Oh),s&&t(vi),s&&t(Sh),s&&t(yi),s&&t(Ah),s&&t(eo),b(ea),s&&t(Ih),s&&t(et),s&&t(Nh),s&&t(No),s&&t(Lh),s&&t(Mi),s&&t(Dh),b(ta,s),s&&t(Gh),s&&t(wi),s&&t(Uh),b(na,s),s&&t(Rh),s&&t(to),b(aa),s&&t(Wh),s&&t($e),b(sa),b(ia),s&&t(Qh),s&&t(ao),b(da),s&&t(Xh),s&&t(K),b(la),b(pa),b(ua),b(ha),s&&t(Kh),s&&t(so),b(ka),s&&t(Hh),s&&t(G),b(ba),b(Ta),b(Ma),b(qa),b($a),s&&t(Vh),s&&t(io),b(Fa),s&&t(Jh),s&&t(L),b(Ea),b(Oa),b(Sa),b(Da),b(Ga),b(Ra),b(Wa),s&&t(Zh),s&&t(lo),b(Qa),s&&t(Yh),s&&t(H),b(Xa),b(Va),b(Ja),b(os),b(ns),s&&t(em),s&&t(po),b(as),s&&t(tm),s&&t(He),b(ss),b(ls),b(tn),b(cs),s&&t(om),s&&t(ho),b(ps),s&&t(nm),s&&t(Ve),b(us),b(_s),b(nn),b(ks),b(bs),s&&t(am),s&&t(_o),b(vs),s&&t(sm),s&&t(Fe),b(ys),b(xs),b(cn),b(zs),s&&t(rm),s&&t(bo),b(Bs),s&&t(im),s&&t(Ee),b(qs),b(js),b(un),b(Cs),b(Ps),s&&t(dm),s&&t(yo),b(Os),s&&t(lm),s&&t(Ss),b(As),b(Is),s&&t(cm),s&&t(To),b(Ns),s&&t(pm),s&&t(je),b(Ls),b(fn),b(Rs),b(_n),b(Ws),s&&t(um),s&&t(wo),b(Qs),s&&t(hm),s&&t(Ce),b(Xs),b(kn),b(Js),b(bn),b(Ys),b(er),b(or),s&&t(mm),s&&t(Bo),b(nr),s&&t(fm),s&&t(V),b(ar),b(ur),b(Bn),b(hr),b(mr),b(fr),b(_r),b(gr),s&&t(_m),s&&t($o),b(kr),s&&t(gm),s&&t(J),b(br),b(Br),b($n),b($r),b(Fr),b(Er),b(jr),b(Cr),b(Pr),s&&t(km),s&&t(jo),b(Or),s&&t(bm),s&&t(U),b(Sr),b(Rr),b(On),b(Wr),b(Qr),b(Xr),b(Kr),b(Hr),s&&t(vm),s&&t(Po),b(Vr),s&&t(ym),s&&t(R),b(Jr),b(si),b(An),b(ri),b(ii),b(di),b(li),b(ci)}}}const T$={local:"mbart-and-mbart50",sections:[{local:"overview-of-mbart",sections:[{local:"training-of-mbart",title:"Training of MBart"}],title:"Overview of MBart"},{local:"overview-of-mbart50",sections:[{local:"training-of-mbart50",title:"Training of MBart-50"}],title:"Overview of MBart-50"},{local:"transformers.MBartConfig",title:"MBartConfig"},{local:"transformers.MBartTokenizer",title:"MBartTokenizer"},{local:"transformers.MBartTokenizerFast",title:"MBartTokenizerFast"},{local:"transformers.MBart50Tokenizer",title:"MBart50Tokenizer"},{local:"transformers.MBart50TokenizerFast",title:"MBart50TokenizerFast"},{local:"transformers.MBartModel",title:"MBartModel"},{local:"transformers.MBartForConditionalGeneration",title:"MBartForConditionalGeneration"},{local:"transformers.MBartForQuestionAnswering",title:"MBartForQuestionAnswering"},{local:"transformers.MBartForSequenceClassification",title:"MBartForSequenceClassification"},{local:"transformers.MBartForCausalLM",title:"MBartForCausalLM"},{local:"transformers.TFMBartModel",title:"TFMBartModel"},{local:"transformers.TFMBartForConditionalGeneration",title:"TFMBartForConditionalGeneration"},{local:"transformers.FlaxMBartModel",title:"FlaxMBartModel"},{local:"transformers.FlaxMBartForConditionalGeneration",title:"FlaxMBartForConditionalGeneration"},{local:"transformers.FlaxMBartForSequenceClassification",title:"FlaxMBartForSequenceClassification"},{local:"transformers.FlaxMBartForQuestionAnswering",title:"FlaxMBartForQuestionAnswering"}],title:"MBart and MBart-50"};function M$(P,h,z){let{fw:v}=h;return P.$$set=M=>{"fw"in M&&z(0,v=M.fw)},[v]}class F$ extends s${constructor(h){super();r$(this,h,M$,y$,i$,{fw:0})}}export{F$ as default,T$ as metadata};
9,978
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/flaubert.mdx-a2820a92.js
import{S as Eb,i as zb,s as Cb,e as a,k as l,w as y,t as o,L as qb,c as r,d as t,m as d,a as i,x as F,h as n,b as h,J as e,g as u,y as M,q as $,o as x,B as L}from"../../chunks/vendor-b1433968.js";import{T as Le}from"../../chunks/Tip-c3840994.js";import{D as ie}from"../../chunks/Docstring-ff504c58.js";import{C as nt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Oe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Pb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function jb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Xb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Ab(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Sb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Nb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Ib(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Db(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne;return{c(){p=a("p"),k=o("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),w=a("li"),le=o("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),E=a("p"),G=o("This second option is useful when using "),H=a("code"),Z=o("tf.keras.Model.fit"),de=o(` method which currently requires having all the tensors in the first argument of the model call function: `),Q=a("code"),ce=o("model(inputs)"),se=o("."),U=l(),X=a("p"),ee=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),te=o("a single Tensor with "),R=a("code"),ae=o("input_ids"),oe=o(" only and nothing else: "),B=a("code"),pe=o("model(inputs_ids)"),re=l(),C=a("li"),he=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=a("code"),ue=o("model([input_ids, attention_mask])"),me=o(" or "),I=a("code"),fe=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),j=a("li"),K=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ne=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=r(c,"P",{});var T=i(p);k=n(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),m=d(c),g=r(c,"UL",{});var V=i(g);v=r(V,"LI",{});var we=i(v);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),f=d(V),w=r(V,"LI",{});var Te=i(w);le=n(Te,"having all inputs as a list, tuple or dict in the first positional arguments."),Te.forEach(t),V.forEach(t),Y=d(c),E=r(c,"P",{});var P=i(E);G=n(P,"This second option is useful when using "),H=r(P,"CODE",{});var ke=i(H);Z=n(ke,"tf.keras.Model.fit"),ke.forEach(t),de=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),Q=r(P,"CODE",{});var ye=i(Q);ce=n(ye,"model(inputs)"),ye.forEach(t),se=n(P,"."),P.forEach(t),U=d(c),X=r(c,"P",{});var _e=i(X);ee=n(_e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e.forEach(t),J=d(c),z=r(c,"UL",{});var A=i(z);q=r(A,"LI",{});var D=i(q);te=n(D,"a single Tensor with "),R=r(D,"CODE",{});var Fe=i(R);ae=n(Fe,"input_ids"),Fe.forEach(t),oe=n(D," only and nothing else: "),B=r(D,"CODE",{});var Me=i(B);pe=n(Me,"model(inputs_ids)"),Me.forEach(t),D.forEach(t),re=d(A),C=r(A,"LI",{});var W=i(C);he=n(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=r(W,"CODE",{});var $e=i(N);ue=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),me=n(W," or "),I=r(W,"CODE",{});var xe=i(I);fe=n(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),W.forEach(t),ge=d(A),j=r(A,"LI",{});var be=i(j);K=n(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(be,"CODE",{});var ve=i(S);ne=n(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),be.forEach(t),A.forEach(t)},m(c,T){u(c,p,T),e(p,k),u(c,m,T),u(c,g,T),e(g,v),e(v,b),e(g,f),e(g,w),e(w,le),u(c,Y,T),u(c,E,T),e(E,G),e(E,H),e(H,Z),e(E,de),e(E,Q),e(Q,ce),e(E,se),u(c,U,T),u(c,X,T),e(X,ee),u(c,J,T),u(c,z,T),e(z,q),e(q,te),e(q,R),e(R,ae),e(q,oe),e(q,B),e(B,pe),e(z,re),e(z,C),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(z,ge),e(z,j),e(j,K),e(j,S),e(S,ne)},d(c){c&&t(p),c&&t(m),c&&t(g),c&&t(Y),c&&t(E),c&&t(U),c&&t(X),c&&t(J),c&&t(z)}}}function Wb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Ob(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne;return{c(){p=a("p"),k=o("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),w=a("li"),le=o("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),E=a("p"),G=o("This second option is useful when using "),H=a("code"),Z=o("tf.keras.Model.fit"),de=o(` method which currently requires having all the tensors in the first argument of the model call function: `),Q=a("code"),ce=o("model(inputs)"),se=o("."),U=l(),X=a("p"),ee=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),te=o("a single Tensor with "),R=a("code"),ae=o("input_ids"),oe=o(" only and nothing else: "),B=a("code"),pe=o("model(inputs_ids)"),re=l(),C=a("li"),he=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=a("code"),ue=o("model([input_ids, attention_mask])"),me=o(" or "),I=a("code"),fe=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),j=a("li"),K=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ne=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=r(c,"P",{});var T=i(p);k=n(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),m=d(c),g=r(c,"UL",{});var V=i(g);v=r(V,"LI",{});var we=i(v);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),f=d(V),w=r(V,"LI",{});var Te=i(w);le=n(Te,"having all inputs as a list, tuple or dict in the first positional arguments."),Te.forEach(t),V.forEach(t),Y=d(c),E=r(c,"P",{});var P=i(E);G=n(P,"This second option is useful when using "),H=r(P,"CODE",{});var ke=i(H);Z=n(ke,"tf.keras.Model.fit"),ke.forEach(t),de=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),Q=r(P,"CODE",{});var ye=i(Q);ce=n(ye,"model(inputs)"),ye.forEach(t),se=n(P,"."),P.forEach(t),U=d(c),X=r(c,"P",{});var _e=i(X);ee=n(_e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e.forEach(t),J=d(c),z=r(c,"UL",{});var A=i(z);q=r(A,"LI",{});var D=i(q);te=n(D,"a single Tensor with "),R=r(D,"CODE",{});var Fe=i(R);ae=n(Fe,"input_ids"),Fe.forEach(t),oe=n(D," only and nothing else: "),B=r(D,"CODE",{});var Me=i(B);pe=n(Me,"model(inputs_ids)"),Me.forEach(t),D.forEach(t),re=d(A),C=r(A,"LI",{});var W=i(C);he=n(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=r(W,"CODE",{});var $e=i(N);ue=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),me=n(W," or "),I=r(W,"CODE",{});var xe=i(I);fe=n(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),W.forEach(t),ge=d(A),j=r(A,"LI",{});var be=i(j);K=n(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(be,"CODE",{});var ve=i(S);ne=n(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),be.forEach(t),A.forEach(t)},m(c,T){u(c,p,T),e(p,k),u(c,m,T),u(c,g,T),e(g,v),e(v,b),e(g,f),e(g,w),e(w,le),u(c,Y,T),u(c,E,T),e(E,G),e(E,H),e(H,Z),e(E,de),e(E,Q),e(Q,ce),e(E,se),u(c,U,T),u(c,X,T),e(X,ee),u(c,J,T),u(c,z,T),e(z,q),e(q,te),e(q,R),e(R,ae),e(q,oe),e(q,B),e(B,pe),e(z,re),e(z,C),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(z,ge),e(z,j),e(j,K),e(j,S),e(S,ne)},d(c){c&&t(p),c&&t(m),c&&t(g),c&&t(Y),c&&t(E),c&&t(U),c&&t(X),c&&t(J),c&&t(z)}}}function Hb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Qb(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne;return{c(){p=a("p"),k=o("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),w=a("li"),le=o("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),E=a("p"),G=o("This second option is useful when using "),H=a("code"),Z=o("tf.keras.Model.fit"),de=o(` method which currently requires having all the tensors in the first argument of the model call function: `),Q=a("code"),ce=o("model(inputs)"),se=o("."),U=l(),X=a("p"),ee=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),te=o("a single Tensor with "),R=a("code"),ae=o("input_ids"),oe=o(" only and nothing else: "),B=a("code"),pe=o("model(inputs_ids)"),re=l(),C=a("li"),he=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=a("code"),ue=o("model([input_ids, attention_mask])"),me=o(" or "),I=a("code"),fe=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),j=a("li"),K=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ne=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=r(c,"P",{});var T=i(p);k=n(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),m=d(c),g=r(c,"UL",{});var V=i(g);v=r(V,"LI",{});var we=i(v);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),f=d(V),w=r(V,"LI",{});var Te=i(w);le=n(Te,"having all inputs as a list, tuple or dict in the first positional arguments."),Te.forEach(t),V.forEach(t),Y=d(c),E=r(c,"P",{});var P=i(E);G=n(P,"This second option is useful when using "),H=r(P,"CODE",{});var ke=i(H);Z=n(ke,"tf.keras.Model.fit"),ke.forEach(t),de=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),Q=r(P,"CODE",{});var ye=i(Q);ce=n(ye,"model(inputs)"),ye.forEach(t),se=n(P,"."),P.forEach(t),U=d(c),X=r(c,"P",{});var _e=i(X);ee=n(_e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e.forEach(t),J=d(c),z=r(c,"UL",{});var A=i(z);q=r(A,"LI",{});var D=i(q);te=n(D,"a single Tensor with "),R=r(D,"CODE",{});var Fe=i(R);ae=n(Fe,"input_ids"),Fe.forEach(t),oe=n(D," only and nothing else: "),B=r(D,"CODE",{});var Me=i(B);pe=n(Me,"model(inputs_ids)"),Me.forEach(t),D.forEach(t),re=d(A),C=r(A,"LI",{});var W=i(C);he=n(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=r(W,"CODE",{});var $e=i(N);ue=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),me=n(W," or "),I=r(W,"CODE",{});var xe=i(I);fe=n(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),W.forEach(t),ge=d(A),j=r(A,"LI",{});var be=i(j);K=n(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(be,"CODE",{});var ve=i(S);ne=n(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),be.forEach(t),A.forEach(t)},m(c,T){u(c,p,T),e(p,k),u(c,m,T),u(c,g,T),e(g,v),e(v,b),e(g,f),e(g,w),e(w,le),u(c,Y,T),u(c,E,T),e(E,G),e(E,H),e(H,Z),e(E,de),e(E,Q),e(Q,ce),e(E,se),u(c,U,T),u(c,X,T),e(X,ee),u(c,J,T),u(c,z,T),e(z,q),e(q,te),e(q,R),e(R,ae),e(q,oe),e(q,B),e(B,pe),e(z,re),e(z,C),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(z,ge),e(z,j),e(j,K),e(j,S),e(S,ne)},d(c){c&&t(p),c&&t(m),c&&t(g),c&&t(Y),c&&t(E),c&&t(U),c&&t(X),c&&t(J),c&&t(z)}}}function Bb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Rb(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne;return{c(){p=a("p"),k=o("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),w=a("li"),le=o("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),E=a("p"),G=o("This second option is useful when using "),H=a("code"),Z=o("tf.keras.Model.fit"),de=o(` method which currently requires having all the tensors in the first argument of the model call function: `),Q=a("code"),ce=o("model(inputs)"),se=o("."),U=l(),X=a("p"),ee=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),te=o("a single Tensor with "),R=a("code"),ae=o("input_ids"),oe=o(" only and nothing else: "),B=a("code"),pe=o("model(inputs_ids)"),re=l(),C=a("li"),he=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=a("code"),ue=o("model([input_ids, attention_mask])"),me=o(" or "),I=a("code"),fe=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),j=a("li"),K=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ne=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=r(c,"P",{});var T=i(p);k=n(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),m=d(c),g=r(c,"UL",{});var V=i(g);v=r(V,"LI",{});var we=i(v);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),f=d(V),w=r(V,"LI",{});var Te=i(w);le=n(Te,"having all inputs as a list, tuple or dict in the first positional arguments."),Te.forEach(t),V.forEach(t),Y=d(c),E=r(c,"P",{});var P=i(E);G=n(P,"This second option is useful when using "),H=r(P,"CODE",{});var ke=i(H);Z=n(ke,"tf.keras.Model.fit"),ke.forEach(t),de=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),Q=r(P,"CODE",{});var ye=i(Q);ce=n(ye,"model(inputs)"),ye.forEach(t),se=n(P,"."),P.forEach(t),U=d(c),X=r(c,"P",{});var _e=i(X);ee=n(_e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e.forEach(t),J=d(c),z=r(c,"UL",{});var A=i(z);q=r(A,"LI",{});var D=i(q);te=n(D,"a single Tensor with "),R=r(D,"CODE",{});var Fe=i(R);ae=n(Fe,"input_ids"),Fe.forEach(t),oe=n(D," only and nothing else: "),B=r(D,"CODE",{});var Me=i(B);pe=n(Me,"model(inputs_ids)"),Me.forEach(t),D.forEach(t),re=d(A),C=r(A,"LI",{});var W=i(C);he=n(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=r(W,"CODE",{});var $e=i(N);ue=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),me=n(W," or "),I=r(W,"CODE",{});var xe=i(I);fe=n(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),W.forEach(t),ge=d(A),j=r(A,"LI",{});var be=i(j);K=n(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(be,"CODE",{});var ve=i(S);ne=n(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),be.forEach(t),A.forEach(t)},m(c,T){u(c,p,T),e(p,k),u(c,m,T),u(c,g,T),e(g,v),e(v,b),e(g,f),e(g,w),e(w,le),u(c,Y,T),u(c,E,T),e(E,G),e(E,H),e(H,Z),e(E,de),e(E,Q),e(Q,ce),e(E,se),u(c,U,T),u(c,X,T),e(X,ee),u(c,J,T),u(c,z,T),e(z,q),e(q,te),e(q,R),e(R,ae),e(q,oe),e(q,B),e(B,pe),e(z,re),e(z,C),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(z,ge),e(z,j),e(j,K),e(j,S),e(S,ne)},d(c){c&&t(p),c&&t(m),c&&t(g),c&&t(Y),c&&t(E),c&&t(U),c&&t(X),c&&t(J),c&&t(z)}}}function Ub(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Vb(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne;return{c(){p=a("p"),k=o("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),w=a("li"),le=o("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),E=a("p"),G=o("This second option is useful when using "),H=a("code"),Z=o("tf.keras.Model.fit"),de=o(` method which currently requires having all the tensors in the first argument of the model call function: `),Q=a("code"),ce=o("model(inputs)"),se=o("."),U=l(),X=a("p"),ee=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),te=o("a single Tensor with "),R=a("code"),ae=o("input_ids"),oe=o(" only and nothing else: "),B=a("code"),pe=o("model(inputs_ids)"),re=l(),C=a("li"),he=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=a("code"),ue=o("model([input_ids, attention_mask])"),me=o(" or "),I=a("code"),fe=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),j=a("li"),K=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ne=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=r(c,"P",{});var T=i(p);k=n(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),m=d(c),g=r(c,"UL",{});var V=i(g);v=r(V,"LI",{});var we=i(v);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),f=d(V),w=r(V,"LI",{});var Te=i(w);le=n(Te,"having all inputs as a list, tuple or dict in the first positional arguments."),Te.forEach(t),V.forEach(t),Y=d(c),E=r(c,"P",{});var P=i(E);G=n(P,"This second option is useful when using "),H=r(P,"CODE",{});var ke=i(H);Z=n(ke,"tf.keras.Model.fit"),ke.forEach(t),de=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),Q=r(P,"CODE",{});var ye=i(Q);ce=n(ye,"model(inputs)"),ye.forEach(t),se=n(P,"."),P.forEach(t),U=d(c),X=r(c,"P",{});var _e=i(X);ee=n(_e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e.forEach(t),J=d(c),z=r(c,"UL",{});var A=i(z);q=r(A,"LI",{});var D=i(q);te=n(D,"a single Tensor with "),R=r(D,"CODE",{});var Fe=i(R);ae=n(Fe,"input_ids"),Fe.forEach(t),oe=n(D," only and nothing else: "),B=r(D,"CODE",{});var Me=i(B);pe=n(Me,"model(inputs_ids)"),Me.forEach(t),D.forEach(t),re=d(A),C=r(A,"LI",{});var W=i(C);he=n(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=r(W,"CODE",{});var $e=i(N);ue=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),me=n(W," or "),I=r(W,"CODE",{});var xe=i(I);fe=n(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),W.forEach(t),ge=d(A),j=r(A,"LI",{});var be=i(j);K=n(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(be,"CODE",{});var ve=i(S);ne=n(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),be.forEach(t),A.forEach(t)},m(c,T){u(c,p,T),e(p,k),u(c,m,T),u(c,g,T),e(g,v),e(v,b),e(g,f),e(g,w),e(w,le),u(c,Y,T),u(c,E,T),e(E,G),e(E,H),e(H,Z),e(E,de),e(E,Q),e(Q,ce),e(E,se),u(c,U,T),u(c,X,T),e(X,ee),u(c,J,T),u(c,z,T),e(z,q),e(q,te),e(q,R),e(R,ae),e(q,oe),e(q,B),e(B,pe),e(z,re),e(z,C),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(z,ge),e(z,j),e(j,K),e(j,S),e(S,ne)},d(c){c&&t(p),c&&t(m),c&&t(g),c&&t(Y),c&&t(E),c&&t(U),c&&t(X),c&&t(J),c&&t(z)}}}function Yb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Jb(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne;return{c(){p=a("p"),k=o("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),b=o("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),w=a("li"),le=o("having all inputs as a list, tuple or dict in the first positional arguments."),Y=l(),E=a("p"),G=o("This second option is useful when using "),H=a("code"),Z=o("tf.keras.Model.fit"),de=o(` method which currently requires having all the tensors in the first argument of the model call function: `),Q=a("code"),ce=o("model(inputs)"),se=o("."),U=l(),X=a("p"),ee=o(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),J=l(),z=a("ul"),q=a("li"),te=o("a single Tensor with "),R=a("code"),ae=o("input_ids"),oe=o(" only and nothing else: "),B=a("code"),pe=o("model(inputs_ids)"),re=l(),C=a("li"),he=o(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=a("code"),ue=o("model([input_ids, attention_mask])"),me=o(" or "),I=a("code"),fe=o("model([input_ids, attention_mask, token_type_ids])"),ge=l(),j=a("li"),K=o(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=a("code"),ne=o('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(c){p=r(c,"P",{});var T=i(p);k=n(T,"TF 2.0 models accepts two formats as inputs:"),T.forEach(t),m=d(c),g=r(c,"UL",{});var V=i(g);v=r(V,"LI",{});var we=i(v);b=n(we,"having all inputs as keyword arguments (like PyTorch models), or"),we.forEach(t),f=d(V),w=r(V,"LI",{});var Te=i(w);le=n(Te,"having all inputs as a list, tuple or dict in the first positional arguments."),Te.forEach(t),V.forEach(t),Y=d(c),E=r(c,"P",{});var P=i(E);G=n(P,"This second option is useful when using "),H=r(P,"CODE",{});var ke=i(H);Z=n(ke,"tf.keras.Model.fit"),ke.forEach(t),de=n(P,` method which currently requires having all the tensors in the first argument of the model call function: `),Q=r(P,"CODE",{});var ye=i(Q);ce=n(ye,"model(inputs)"),ye.forEach(t),se=n(P,"."),P.forEach(t),U=d(c),X=r(c,"P",{});var _e=i(X);ee=n(_e,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),_e.forEach(t),J=d(c),z=r(c,"UL",{});var A=i(z);q=r(A,"LI",{});var D=i(q);te=n(D,"a single Tensor with "),R=r(D,"CODE",{});var Fe=i(R);ae=n(Fe,"input_ids"),Fe.forEach(t),oe=n(D," only and nothing else: "),B=r(D,"CODE",{});var Me=i(B);pe=n(Me,"model(inputs_ids)"),Me.forEach(t),D.forEach(t),re=d(A),C=r(A,"LI",{});var W=i(C);he=n(W,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),N=r(W,"CODE",{});var $e=i(N);ue=n($e,"model([input_ids, attention_mask])"),$e.forEach(t),me=n(W," or "),I=r(W,"CODE",{});var xe=i(I);fe=n(xe,"model([input_ids, attention_mask, token_type_ids])"),xe.forEach(t),W.forEach(t),ge=d(A),j=r(A,"LI",{});var be=i(j);K=n(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),S=r(be,"CODE",{});var ve=i(S);ne=n(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),be.forEach(t),A.forEach(t)},m(c,T){u(c,p,T),e(p,k),u(c,m,T),u(c,g,T),e(g,v),e(v,b),e(g,f),e(g,w),e(w,le),u(c,Y,T),u(c,E,T),e(E,G),e(E,H),e(H,Z),e(E,de),e(E,Q),e(Q,ce),e(E,se),u(c,U,T),u(c,X,T),e(X,ee),u(c,J,T),u(c,z,T),e(z,q),e(q,te),e(q,R),e(R,ae),e(q,oe),e(q,B),e(B,pe),e(z,re),e(z,C),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(z,ge),e(z,j),e(j,K),e(j,S),e(S,ne)},d(c){c&&t(p),c&&t(m),c&&t(g),c&&t(Y),c&&t(E),c&&t(U),c&&t(X),c&&t(J),c&&t(z)}}}function Kb(O){let p,k,m,g,v;return{c(){p=a("p"),k=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=o("Module"),v=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(b){p=r(b,"P",{});var f=i(p);k=n(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var w=i(m);g=n(w,"Module"),w.forEach(t),v=n(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(b,f){u(b,p,f),e(p,k),e(p,m),e(m,g),e(p,v)},d(b){b&&t(p)}}}function Gb(O){let p,k,m,g,v,b,f,w,le,Y,E,G,H,Z,de,Q,ce,se,U,X,ee,J,z,q,te,R,ae,oe,B,pe,re,C,he,N,ue,me,I,fe,ge,j,K,S,ne,c,T,V,we,Te,P,ke,ye,_e,A,D,Fe,Me,W,$e,xe,be,ve,Gl,Bs,Zl,ed,Rs,td,od,Ei,qt,co,Ba,Ro,nd,Ra,sd,zi,st,Uo,ad,Ua,rd,id,_t,Va,ld,dd,Ya,cd,pd,bt,hd,Ja,ud,md,Ka,fd,gd,Ga,_d,bd,vd,Vo,kd,Za,Td,wd,yd,Yo,Fd,Us,Md,$d,Ci,Pt,po,er,Jo,xd,tr,Ld,qi,He,Ko,Ed,or,zd,Cd,Go,qd,Vs,Pd,jd,Xd,Zo,Ad,en,Sd,Nd,Id,Qe,tn,Dd,jt,Wd,Ys,Od,Hd,nr,Qd,Bd,Rd,ho,Ud,sr,Vd,Yd,on,Pi,Xt,uo,ar,nn,Jd,rr,Kd,ji,ze,sn,Gd,ir,Zd,ec,an,tc,Js,oc,nc,sc,rn,ac,ln,rc,ic,lc,dn,dc,Ks,cc,pc,hc,Be,cn,uc,At,mc,Gs,fc,gc,lr,_c,bc,vc,mo,kc,dr,Tc,wc,pn,Xi,St,fo,cr,hn,yc,pr,Fc,Ai,Ce,un,Mc,hr,$c,xc,mn,Lc,Zs,Ec,zc,Cc,fn,qc,gn,Pc,jc,Xc,_n,Ac,ea,Sc,Nc,Ic,Ee,bn,Dc,Nt,Wc,ta,Oc,Hc,ur,Qc,Bc,Rc,go,Uc,mr,Vc,Yc,vn,Jc,fr,Kc,Gc,kn,Si,It,_o,gr,Tn,Zc,_r,ep,Ni,qe,wn,tp,br,op,np,yn,sp,oa,ap,rp,ip,Fn,lp,Mn,dp,cp,pp,$n,hp,na,up,mp,fp,Re,xn,gp,Dt,_p,sa,bp,vp,vr,kp,Tp,wp,bo,yp,kr,Fp,Mp,Ln,Ii,Wt,vo,Tr,En,$p,wr,xp,Di,Pe,zn,Lp,yr,Ep,zp,Cn,Cp,aa,qp,Pp,jp,qn,Xp,Pn,Ap,Sp,Np,jn,Ip,ra,Dp,Wp,Op,Ue,Xn,Hp,Ot,Qp,ia,Bp,Rp,Fr,Up,Vp,Yp,ko,Jp,Mr,Kp,Gp,An,Wi,Ht,To,$r,Sn,Zp,xr,eh,Oi,je,Nn,th,Qt,oh,Lr,nh,sh,Er,ah,rh,ih,In,lh,la,dh,ch,ph,Dn,hh,Wn,uh,mh,fh,On,gh,da,_h,bh,vh,Ve,Hn,kh,Bt,Th,ca,wh,yh,zr,Fh,Mh,$h,wo,xh,Cr,Lh,Eh,Qn,Hi,Rt,yo,qr,Bn,zh,Pr,Ch,Qi,Xe,Rn,qh,Ut,Ph,jr,jh,Xh,Xr,Ah,Sh,Nh,Un,Ih,pa,Dh,Wh,Oh,Vn,Hh,Yn,Qh,Bh,Rh,Jn,Uh,ha,Vh,Yh,Jh,Ye,Kn,Kh,Vt,Gh,ua,Zh,eu,Ar,tu,ou,nu,Fo,su,Sr,au,ru,Gn,Bi,Yt,Mo,Nr,Zn,iu,Ir,lu,Ri,Ae,es,du,Dr,cu,pu,ts,hu,ma,uu,mu,fu,os,gu,ns,_u,bu,vu,$o,ku,Je,ss,Tu,Jt,wu,fa,yu,Fu,Wr,Mu,$u,xu,xo,Lu,Or,Eu,zu,as,Ui,Kt,Lo,Hr,rs,Cu,Qr,qu,Vi,Se,is,Pu,Br,ju,Xu,ls,Au,ga,Su,Nu,Iu,ds,Du,cs,Wu,Ou,Hu,Eo,Qu,Ke,ps,Bu,Gt,Ru,_a,Uu,Vu,Rr,Yu,Ju,Ku,zo,Gu,Ur,Zu,em,hs,Yi,Zt,Co,Vr,us,tm,Yr,om,Ji,Ne,ms,nm,Jr,sm,am,fs,rm,ba,im,lm,dm,gs,cm,_s,pm,hm,um,qo,mm,Ge,bs,fm,eo,gm,va,_m,bm,Kr,vm,km,Tm,Po,wm,Gr,ym,Fm,vs,Ki,to,jo,Zr,ks,Mm,ei,$m,Gi,Ie,Ts,xm,ti,Lm,Em,ws,zm,ka,Cm,qm,Pm,ys,jm,Fs,Xm,Am,Sm,Xo,Nm,Ze,Ms,Im,oo,Dm,Ta,Wm,Om,oi,Hm,Qm,Bm,Ao,Rm,ni,Um,Vm,$s,Zi,no,So,si,xs,Ym,ai,Jm,el,De,Ls,Km,ri,Gm,Zm,Es,ef,wa,tf,of,nf,zs,sf,Cs,af,rf,lf,No,df,et,qs,cf,so,pf,ya,hf,uf,ii,mf,ff,gf,Io,_f,li,bf,vf,Ps,tl,ao,Do,di,js,kf,ci,Tf,ol,We,Xs,wf,ro,yf,pi,Ff,Mf,hi,$f,xf,Lf,As,Ef,Fa,zf,Cf,qf,Ss,Pf,Ns,jf,Xf,Af,Wo,Sf,tt,Is,Nf,io,If,Ma,Df,Wf,ui,Of,Hf,Qf,Oo,Bf,mi,Rf,Uf,Ds,nl;return b=new Oe({}),Z=new Oe({}),c=new Oe({}),ke=new ie({props:{name:"class transformers.FlaubertConfig",anchor:"transformers.FlaubertConfig",parameters:[{name:"layerdrop",val:" = 0.0"},{name:"pre_norm",val:" = False"},{name:"pad_token_id",val:" = 2"},{name:"bos_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/configuration_flaubert.py#L31",parametersDescription:[{anchor:"transformers.FlaubertConfig.pre_norm",description:`<strong>pre_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply the layer normalization before or after the feed forward layer following the attention in each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)`,name:"pre_norm"},{anchor:"transformers.FlaubertConfig.layerdrop",description:`<strong>layerdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with Structured Dropout. ICLR 2020)`,name:"layerdrop"},{anchor:"transformers.FlaubertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30145) &#x2014; Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertModel">FlaubertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertModel">TFFlaubertModel</a>.`,name:"vocab_size"},{anchor:"transformers.FlaubertConfig.emb_dim",description:`<strong>emb_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"emb_dim"},{anchor:"transformers.FlaubertConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.FlaubertConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.FlaubertConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.FlaubertConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the attention mechanism`,name:"attention_dropout"},{anchor:"transformers.FlaubertConfig.gelu_activation",description:`<strong>gelu_activation</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a <em>gelu</em> activation instead of <em>relu</em>.`,name:"gelu_activation"},{anchor:"transformers.FlaubertConfig.sinusoidal_embeddings",description:`<strong>sinusoidal_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.`,name:"sinusoidal_embeddings"},{anchor:"transformers.FlaubertConfig.causal",description:`<strong>causal</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in order to only attend to the left-side context instead if a bidirectional context.`,name:"causal"},{anchor:"transformers.FlaubertConfig.asm",description:`<strong>asm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction layer.`,name:"asm"},{anchor:"transformers.FlaubertConfig.n_langs",description:`<strong>n_langs</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of languages the model handles. Set to 1 for monolingual models.`,name:"n_langs"},{anchor:"transformers.FlaubertConfig.use_lang_emb",description:`<strong>use_lang_emb</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use language embeddings. Some models use additional language embeddings, see <a href="http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings" rel="nofollow">the multilingual models page</a> for information on how to use them.`,name:"use_lang_emb"},{anchor:"transformers.FlaubertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.FlaubertConfig.embed_init_std",description:`<strong>embed_init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 2048^-0.5) &#x2014; The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.`,name:"embed_init_std"},{anchor:"transformers.FlaubertConfig.init_std",description:`<strong>init_std</strong> (<code>int</code>, <em>optional</em>, defaults to 50257) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the embedding matrices.`,name:"init_std"},{anchor:"transformers.FlaubertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.FlaubertConfig.bos_index",description:`<strong>bos_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index of the beginning of sentence token in the vocabulary.`,name:"bos_index"},{anchor:"transformers.FlaubertConfig.eos_index",description:`<strong>eos_index</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The index of the end of sentence token in the vocabulary.`,name:"eos_index"},{anchor:"transformers.FlaubertConfig.pad_index",description:`<strong>pad_index</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The index of the padding token in the vocabulary.`,name:"pad_index"},{anchor:"transformers.FlaubertConfig.unk_index",description:`<strong>unk_index</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The index of the unknown token in the vocabulary.`,name:"unk_index"},{anchor:"transformers.FlaubertConfig.mask_index",description:`<strong>mask_index</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The index of the masking token in the vocabulary.`,name:"mask_index"},{anchor:"transformers.FlaubertConfig.is_encoder(bool,",description:`<strong>is_encoder(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.`,name:"is_encoder(bool,"},{anchor:"transformers.FlaubertConfig.summary_type",description:`<strong>summary_type</strong> (<code>string</code>, <em>optional</em>, defaults to &#x201C;first&#x201D;) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Has to be one of the following options:</p> <ul> <li><code>&quot;last&quot;</code>: Take the last token hidden state (like XLNet).</li> <li><code>&quot;first&quot;</code>: Take the first token hidden state (like BERT).</li> <li><code>&quot;mean&quot;</code>: Take the mean of all tokens hidden states.</li> <li><code>&quot;cls_index&quot;</code>: Supply a Tensor of classification token position (like GPT/GPT-2).</li> <li><code>&quot;attn&quot;</code>: Not implemented now, use multi-head attention.</li> </ul>`,name:"summary_type"},{anchor:"transformers.FlaubertConfig.summary_use_proj",description:`<strong>summary_use_proj</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Whether or not to add a projection after the vector extraction.`,name:"summary_use_proj"},{anchor:"transformers.FlaubertConfig.summary_activation",description:`<strong>summary_activation</strong> (<code>str</code>, <em>optional</em>) &#x2014; Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.</p> <p>Pass <code>&quot;tanh&quot;</code> for a tanh activation to the output, any other value will result in no activation.`,name:"summary_activation"},{anchor:"transformers.FlaubertConfig.summary_proj_to_labels",description:`<strong>summary_proj_to_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Used in the sequence classification and multiple choice models.</p> <p>Whether the projection outputs should have <code>config.num_labels</code> or <code>config.hidden_size</code> classes.`,name:"summary_proj_to_labels"},{anchor:"transformers.FlaubertConfig.summary_first_dropout",description:`<strong>summary_first_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Used in the sequence classification and multiple choice models.</p> <p>The dropout ratio to be used after the projection and activation.`,name:"summary_first_dropout"},{anchor:"transformers.FlaubertConfig.start_n_top",description:`<strong>start_n_top</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Used in the SQuAD evaluation script.`,name:"start_n_top"},{anchor:"transformers.FlaubertConfig.end_n_top",description:`<strong>end_n_top</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Used in the SQuAD evaluation script.`,name:"end_n_top"},{anchor:"transformers.FlaubertConfig.mask_token_id",description:`<strong>mask_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Model agnostic parameter to identify masked tokens when generating text in an MLM context.`,name:"mask_token_id"},{anchor:"transformers.FlaubertConfig.lang_id",description:`<strong>lang_id</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The ID of the language used by the model. This parameter is used when generating text in a given language.`,name:"lang_id"}]}}),Ro=new Oe({}),Uo=new ie({props:{name:"class transformers.FlaubertTokenizer",anchor:"transformers.FlaubertTokenizer",parameters:[{name:"do_lowercase",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/tokenization_flaubert.py#L79"}}),Jo=new Oe({}),Ko=new ie({props:{name:"class transformers.FlaubertModel",anchor:"transformers.FlaubertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L134",parametersDescription:[{anchor:"transformers.FlaubertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),tn=new ie({props:{name:"forward",anchor:"transformers.FlaubertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L147",parametersDescription:[{anchor:"transformers.FlaubertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertTokenizer">FlaubertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaubertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaubertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaubertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaubertModel.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <code>attention_mask</code> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>:`,name:"lengths"},{anchor:"transformers.FlaubertModel.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary strings to <code>torch.FloatTensor</code> that contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding. The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.FlaubertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaubertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FlaubertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaubertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaubertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig" >FlaubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ho=new Le({props:{$$slots:{default:[Pb]},$$scope:{ctx:O}}}),on=new nt({props:{code:`from transformers import FlaubertTokenizer, FlaubertModel import torch tokenizer = FlaubertTokenizer.from_pretrained('flaubert/flaubert_base_cased') model = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaubertTokenizer, FlaubertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FlaubertTokenizer.from_pretrained(<span class="hljs-string">&#x27;flaubert/flaubert_base_cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaubertModel.from_pretrained(<span class="hljs-string">&#x27;flaubert/flaubert_base_cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),nn=new Oe({}),sn=new ie({props:{name:"class transformers.FlaubertWithLMHeadModel",anchor:"transformers.FlaubertWithLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L326",parametersDescription:[{anchor:"transformers.FlaubertWithLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),cn=new ie({props:{name:"forward",anchor:"transformers.XLMWithLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L710",parametersDescription:[{anchor:"transformers.XLMWithLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMWithLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMWithLMHeadModel.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMWithLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMWithLMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMWithLMHeadModel.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMWithLMHeadModel.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMWithLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMWithLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMWithLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMWithLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMWithLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMWithLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),mo=new Le({props:{$$slots:{default:[jb]},$$scope:{ctx:O}}}),pn=new nt({props:{code:`from transformers import XLMTokenizer, XLMWithLMHeadModel import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("The capital of France is <special1>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;special1&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),hn=new Oe({}),un=new ie({props:{name:"class transformers.FlaubertForSequenceClassification",anchor:"transformers.FlaubertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L348",parametersDescription:[{anchor:"transformers.FlaubertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bn=new ie({props:{name:"forward",anchor:"transformers.XLMForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L790",parametersDescription:[{anchor:"transformers.XLMForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForSequenceClassification.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForSequenceClassification.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForSequenceClassification.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),go=new Le({props:{$$slots:{default:[Xb]},$$scope:{ctx:O}}}),vn=new nt({props:{code:`from transformers import XLMTokenizer, XLMForSequenceClassification import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),kn=new nt({props:{code:`from transformers import XLMTokenizer, XLMForSequenceClassification import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Tn=new Oe({}),wn=new ie({props:{name:"class transformers.FlaubertForMultipleChoice",anchor:"transformers.FlaubertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L436",parametersDescription:[{anchor:"transformers.FlaubertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xn=new ie({props:{name:"forward",anchor:"transformers.XLMForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L1209",parametersDescription:[{anchor:"transformers.XLMForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForMultipleChoice.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForMultipleChoice.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForMultipleChoice.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),bo=new Le({props:{$$slots:{default:[Ab]},$$scope:{ctx:O}}}),Ln=new nt({props:{code:`from transformers import XLMTokenizer, XLMForMultipleChoice import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForMultipleChoice.from_pretrained('xlm-mlm-en-2048') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),En=new Oe({}),zn=new ie({props:{name:"class transformers.FlaubertForTokenClassification",anchor:"transformers.FlaubertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L370",parametersDescription:[{anchor:"transformers.FlaubertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xn=new ie({props:{name:"forward",anchor:"transformers.XLMForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L1116",parametersDescription:[{anchor:"transformers.XLMForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForTokenClassification.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForTokenClassification.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForTokenClassification.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ko=new Le({props:{$$slots:{default:[Sb]},$$scope:{ctx:O}}}),An=new nt({props:{code:`from transformers import XLMTokenizer, XLMForTokenClassification import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForTokenClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Sn=new Oe({}),Nn=new ie({props:{name:"class transformers.FlaubertForQuestionAnsweringSimple",anchor:"transformers.FlaubertForQuestionAnsweringSimple",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L392",parametersDescription:[{anchor:"transformers.FlaubertForQuestionAnsweringSimple.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Hn=new ie({props:{name:"forward",anchor:"transformers.XLMForQuestionAnsweringSimple.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L890",parametersDescription:[{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.XLMForQuestionAnsweringSimple.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),wo=new Le({props:{$$slots:{default:[Nb]},$$scope:{ctx:O}}}),Qn=new nt({props:{code:`from transformers import XLMTokenizer, XLMForQuestionAnsweringSimple import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForQuestionAnsweringSimple <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForQuestionAnsweringSimple.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Bn=new Oe({}),Rn=new ie({props:{name:"class transformers.FlaubertForQuestionAnswering",anchor:"transformers.FlaubertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_flaubert.py#L414",parametersDescription:[{anchor:"transformers.FlaubertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kn=new ie({props:{name:"forward",anchor:"transformers.XLMForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"is_impossible",val:" = None"},{name:"cls_index",val:" = None"},{name:"p_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_xlm.py#L995",parametersDescription:[{anchor:"transformers.XLMForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer">XLMTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.XLMForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.XLMForQuestionAnswering.forward.langs",description:`<strong>langs</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.XLMForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.XLMForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.XLMForQuestionAnswering.forward.lengths",description:`<strong>lengths</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.XLMForQuestionAnswering.forward.cache",description:`<strong>cache</strong> (<code>Dict[str, torch.FloatTensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.XLMForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.XLMForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.XLMForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.XLMForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.XLMForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.XLMForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.XLMForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"},{anchor:"transformers.XLMForQuestionAnswering.forward.is_impossible",description:`<strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels whether a question has an answer or no answer (SQuAD 2.0)`,name:"is_impossible"},{anchor:"transformers.XLMForQuestionAnswering.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the classification token to use as input for computing plausibility of the answer.`,name:"cls_index"},{anchor:"transformers.XLMForQuestionAnswering.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Optional mask of tokens which can&#x2019;t be in answers (e.g. [CLS], [PAD], &#x2026;). 1.0 means token should be masked. 0.0 mean token is not masked.`,name:"p_mask"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput" >transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</p> </li> <li> <p><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top config.start_n_top start token possibilities (beam-search).</p> </li> <li> <p><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top config.start_n_top start token possibilities (beam-search).</p> </li> <li> <p><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</p> </li> <li> <p><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</p> </li> <li> <p><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the <code>is_impossible</code> label of the answers.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput" >transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fo=new Le({props:{$$slots:{default:[Ib]},$$scope:{ctx:O}}}),Gn=new nt({props:{code:`from transformers import XLMTokenizer, XLMForQuestionAnswering import torch tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor(tokenizer.encode(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, add_special_tokens=<span class="hljs-literal">True</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss`}}),Zn=new Oe({}),es=new ie({props:{name:"class transformers.TFFlaubertModel",anchor:"transformers.TFFlaubertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L232",parametersDescription:[{anchor:"transformers.TFFlaubertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$o=new Le({props:{$$slots:{default:[Db]},$$scope:{ctx:O}}}),ss=new ie({props:{name:"call",anchor:"transformers.TFFlaubertModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L237",parametersDescription:[{anchor:"transformers.TFFlaubertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertTokenizer">FlaubertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFlaubertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li><code>1</code> for tokens that are <strong>not masked</strong>,</li> <li><code>0</code> for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFlaubertModel.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFFlaubertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li><code>0</code> corresponds to a <em>sentence A</em> token,</li> <li><code>1</code> corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFlaubertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFFlaubertModel.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility Indices selected in <code>[0, ..., input_ids.size(-1)]</code>:`,name:"lengths"},{anchor:"transformers.TFFlaubertModel.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>tf.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFFlaubertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li><code>1</code> indicates the head is <strong>not masked</strong>,</li> <li><code>0</code> indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFFlaubertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFlaubertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFlaubertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFlaubertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFlaubertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig" >FlaubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),xo=new Le({props:{$$slots:{default:[Wb]},$$scope:{ctx:O}}}),as=new nt({props:{code:`from transformers import FlaubertTokenizer, TFFlaubertModel import tensorflow as tf tokenizer = FlaubertTokenizer.from_pretrained('flaubert/flaubert_base_cased') model = TFFlaubertModel.from_pretrained('flaubert/flaubert_base_cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaubertTokenizer, TFFlaubertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FlaubertTokenizer.from_pretrained(<span class="hljs-string">&#x27;flaubert/flaubert_base_cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFlaubertModel.from_pretrained(<span class="hljs-string">&#x27;flaubert/flaubert_base_cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),rs=new Oe({}),is=new ie({props:{name:"class transformers.TFFlaubertWithLMHeadModel",anchor:"transformers.TFFlaubertWithLMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L793",parametersDescription:[{anchor:"transformers.TFFlaubertWithLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Eo=new Le({props:{$$slots:{default:[Ob]},$$scope:{ctx:O}}}),ps=new ie({props:{name:"call",anchor:"transformers.TFFlaubertWithLMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L820",parametersDescription:[{anchor:"transformers.TFFlaubertWithLMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertTokenizer">FlaubertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li><code>1</code> for tokens that are <strong>not masked</strong>,</li> <li><code>0</code> for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li><code>0</code> corresponds to a <em>sentence A</em> token,</li> <li><code>1</code> corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility Indices selected in <code>[0, ..., input_ids.size(-1)]</code>:`,name:"lengths"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>tf.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li><code>1</code> indicates the head is <strong>not masked</strong>,</li> <li><code>0</code> indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFlaubertWithLMHeadModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <code>transformers.models.flaubert.modeling_tf_flaubert.TFFlaubertWithLMHeadModelOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig" >FlaubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.flaubert.modeling_tf_flaubert.TFFlaubertWithLMHeadModelOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),zo=new Le({props:{$$slots:{default:[Hb]},$$scope:{ctx:O}}}),hs=new nt({props:{code:`from transformers import FlaubertTokenizer, TFFlaubertWithLMHeadModel import tensorflow as tf tokenizer = FlaubertTokenizer.from_pretrained('flaubert/flaubert_base_cased') model = TFFlaubertWithLMHeadModel.from_pretrained('flaubert/flaubert_base_cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaubertTokenizer, TFFlaubertWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FlaubertTokenizer.from_pretrained(<span class="hljs-string">&#x27;flaubert/flaubert_base_cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFlaubertWithLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;flaubert/flaubert_base_cased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),us=new Oe({}),ms=new ie({props:{name:"class transformers.TFFlaubertForSequenceClassification",anchor:"transformers.TFFlaubertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L901",parametersDescription:[{anchor:"transformers.TFFlaubertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qo=new Le({props:{$$slots:{default:[Qb]},$$scope:{ctx:O}}}),bs=new ie({props:{name:"call",anchor:"transformers.TFXLMForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L946",parametersDescription:[{anchor:"transformers.TFXLMForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForSequenceClassification.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForSequenceClassification.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForSequenceClassification.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFXLMForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Po=new Le({props:{$$slots:{default:[Bb]},$$scope:{ctx:O}}}),vs=new nt({props:{code:`from transformers import XLMTokenizer, TFXLMForSequenceClassification import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ks=new Oe({}),Ts=new ie({props:{name:"class transformers.TFFlaubertForMultipleChoice",anchor:"transformers.TFFlaubertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L946",parametersDescription:[{anchor:"transformers.TFFlaubertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xo=new Le({props:{$$slots:{default:[Rb]},$$scope:{ctx:O}}}),Ms=new ie({props:{name:"call",anchor:"transformers.TFXLMForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1071",parametersDescription:[{anchor:"transformers.TFXLMForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForMultipleChoice.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForMultipleChoice.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForMultipleChoice.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ao=new Le({props:{$$slots:{default:[Ub]},$$scope:{ctx:O}}}),$s=new nt({props:{code:`from transformers import XLMTokenizer, TFXLMForMultipleChoice import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForMultipleChoice.from_pretrained('xlm-mlm-en-2048') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),xs=new Oe({}),Ls=new ie({props:{name:"class transformers.TFFlaubertForTokenClassification",anchor:"transformers.TFFlaubertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L931",parametersDescription:[{anchor:"transformers.TFFlaubertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),No=new Le({props:{$$slots:{default:[Vb]},$$scope:{ctx:O}}}),qs=new ie({props:{name:"call",anchor:"transformers.TFXLMForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1221",parametersDescription:[{anchor:"transformers.TFXLMForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForTokenClassification.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForTokenClassification.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForTokenClassification.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFXLMForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Io=new Le({props:{$$slots:{default:[Yb]},$$scope:{ctx:O}}}),Ps=new nt({props:{code:`from transformers import XLMTokenizer, TFXLMForTokenClassification import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForTokenClassification.from_pretrained('xlm-mlm-en-2048') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),js=new Oe({}),Xs=new ie({props:{name:"class transformers.TFFlaubertForQuestionAnsweringSimple",anchor:"transformers.TFFlaubertForQuestionAnsweringSimple",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/flaubert/modeling_tf_flaubert.py#L916",parametersDescription:[{anchor:"transformers.TFFlaubertForQuestionAnsweringSimple.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertConfig">FlaubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wo=new Le({props:{$$slots:{default:[Jb]},$$scope:{ctx:O}}}),Is=new ie({props:{name:"call",anchor:"transformers.TFXLMForQuestionAnsweringSimple.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"langs",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"lengths",val:" = None"},{name:"cache",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/xlm/modeling_tf_xlm.py#L1325",parametersDescription:[{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.langs",description:`<strong>langs</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the <em>language name to language id</em> mapping is in <code>model.config.lang2id</code> (which is a dictionary string to int) and the <em>language id to language name</em> mapping is in <code>model.config.id2lang</code> (dictionary int to string).</p> <p>See usage examples detailed in the <a href="../multilingual">multilingual documentation</a>.`,name:"langs"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.lengths",description:`<strong>lengths</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use <em>attention_mask</em> for the same result (see above), kept here for compatibility. Indices selected in <code>[0, ..., input_ids.size(-1)]</code>.`,name:"lengths"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.cache",description:`<strong>cache</strong> (<code>Dict[str, tf.Tensor]</code>, <em>optional</em>) &#x2014; Dictionary string to <code>torch.FloatTensor</code> that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see <code>cache</code> output below). Can be used to speed up sequential decoding.</p> <p>The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.`,name:"cache"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFXLMForQuestionAnsweringSimple.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMConfig" >XLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Oo=new Le({props:{$$slots:{default:[Kb]},$$scope:{ctx:O}}}),Ds=new nt({props:{code:`from transformers import XLMTokenizer, TFXLMForQuestionAnsweringSimple import tensorflow as tf tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = TFXLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, TFXLMForQuestionAnsweringSimple <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFXLMForQuestionAnsweringSimple.from_pretrained(<span class="hljs-string">&#x27;xlm-mlm-en-2048&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){p=a("meta"),k=l(),m=a("h1"),g=a("a"),v=a("span"),y(b.$$.fragment),f=l(),w=a("span"),le=o("FlauBERT"),Y=l(),E=a("h2"),G=a("a"),H=a("span"),y(Z.$$.fragment),de=l(),Q=a("span"),ce=o("Overview"),se=l(),U=a("p"),X=o("The FlauBERT model was proposed in the paper "),ee=a("a"),J=o("FlauBERT: Unsupervised Language Model Pre-training for French"),z=o(` by Hang Le et al. It\u2019s a transformer model pretrained using a masked language modeling (MLM) objective (like BERT).`),q=l(),te=a("p"),R=o("The abstract from the paper is the following:"),ae=l(),oe=a("p"),B=a("em"),pe=o(`Language models have become a key step to achieve state-of-the art results in many different Natural Language Processing (NLP) tasks. Leveraging the huge amount of unlabeled texts nowadays available, they provide an efficient way to pre-train continuous word representations that can be fine-tuned for a downstream task, along with their contextualization at the sentence level. This has been widely demonstrated for English using contextualized representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018; Radford et al., 2018; Devlin et al., 2019; Yang et al., 2019b). In this paper, we introduce and share FlauBERT, a model learned on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most of the time they outperform other pretraining approaches. Different versions of FlauBERT as well as a unified evaluation protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research community for further reproducible experiments in French NLP.`),re=l(),C=a("p"),he=o("This model was contributed by "),N=a("a"),ue=o("formiel"),me=o(". The original code can be found "),I=a("a"),fe=o("here"),ge=o("."),j=l(),K=a("h2"),S=a("a"),ne=a("span"),y(c.$$.fragment),T=l(),V=a("span"),we=o("FlaubertConfig"),Te=l(),P=a("div"),y(ke.$$.fragment),ye=l(),_e=a("p"),A=o("This is the configuration class to store the configuration of a "),D=a("a"),Fe=o("FlaubertModel"),Me=o(` or a `),W=a("a"),$e=o("TFFlaubertModel"),xe=o(`. It is used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.`),be=l(),ve=a("p"),Gl=o("Configuration objects inherit from "),Bs=a("a"),Zl=o("PretrainedConfig"),ed=o(` and can be used to control the model outputs. Read the documentation from `),Rs=a("a"),td=o("PretrainedConfig"),od=o(" for more information."),Ei=l(),qt=a("h2"),co=a("a"),Ba=a("span"),y(Ro.$$.fragment),nd=l(),Ra=a("span"),sd=o("FlaubertTokenizer"),zi=l(),st=a("div"),y(Uo.$$.fragment),ad=l(),Ua=a("p"),rd=o("Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:"),id=l(),_t=a("ul"),Va=a("li"),ld=o("Moses preprocessing and tokenization."),dd=l(),Ya=a("li"),cd=o("Normalizing all inputs text."),pd=l(),bt=a("li"),hd=o("The arguments "),Ja=a("code"),ud=o("special_tokens"),md=o(" and the function "),Ka=a("code"),fd=o("set_special_tokens"),gd=o(`, can be used to add additional symbols (like \u201D`),Ga=a("strong"),_d=o("classify"),bd=o("\u201D) to a vocabulary."),vd=l(),Vo=a("li"),kd=o("The argument "),Za=a("code"),Td=o("do_lowercase"),wd=o(" controls lower casing (automatically set for pretrained vocabularies)."),yd=l(),Yo=a("p"),Fd=o("This tokenizer inherits from "),Us=a("a"),Md=o("XLMTokenizer"),$d=o(`. Please check the superclass for usage examples and documentation regarding arguments.`),Ci=l(),Pt=a("h2"),po=a("a"),er=a("span"),y(Jo.$$.fragment),xd=l(),tr=a("span"),Ld=o("FlaubertModel"),qi=l(),He=a("div"),y(Ko.$$.fragment),Ed=l(),or=a("p"),zd=o("The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top."),Cd=l(),Go=a("p"),qd=o("This model inherits from "),Vs=a("a"),Pd=o("PreTrainedModel"),jd=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xd=l(),Zo=a("p"),Ad=o("This model is also a PyTorch "),en=a("a"),Sd=o("torch.nn.Module"),Nd=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Id=l(),Qe=a("div"),y(tn.$$.fragment),Dd=l(),jt=a("p"),Wd=o("The "),Ys=a("a"),Od=o("FlaubertModel"),Hd=o(" forward method, overrides the "),nr=a("code"),Qd=o("__call__"),Bd=o(" special method."),Rd=l(),y(ho.$$.fragment),Ud=l(),sr=a("p"),Vd=o("Example:"),Yd=l(),y(on.$$.fragment),Pi=l(),Xt=a("h2"),uo=a("a"),ar=a("span"),y(nn.$$.fragment),Jd=l(),rr=a("span"),Kd=o("FlaubertWithLMHeadModel"),ji=l(),ze=a("div"),y(sn.$$.fragment),Gd=l(),ir=a("p"),Zd=o(`The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),ec=l(),an=a("p"),tc=o("This model inherits from "),Js=a("a"),oc=o("PreTrainedModel"),nc=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sc=l(),rn=a("p"),ac=o("This model is also a PyTorch "),ln=a("a"),rc=o("torch.nn.Module"),ic=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),lc=l(),dn=a("p"),dc=o("This class overrides "),Ks=a("a"),cc=o("XLMWithLMHeadModel"),pc=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),hc=l(),Be=a("div"),y(cn.$$.fragment),uc=l(),At=a("p"),mc=o("The "),Gs=a("a"),fc=o("XLMWithLMHeadModel"),gc=o(" forward method, overrides the "),lr=a("code"),_c=o("__call__"),bc=o(" special method."),vc=l(),y(mo.$$.fragment),kc=l(),dr=a("p"),Tc=o("Example:"),wc=l(),y(pn.$$.fragment),Xi=l(),St=a("h2"),fo=a("a"),cr=a("span"),y(hn.$$.fragment),yc=l(),pr=a("span"),Fc=o("FlaubertForSequenceClassification"),Ai=l(),Ce=a("div"),y(un.$$.fragment),Mc=l(),hr=a("p"),$c=o(`Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),xc=l(),mn=a("p"),Lc=o("This model inherits from "),Zs=a("a"),Ec=o("PreTrainedModel"),zc=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cc=l(),fn=a("p"),qc=o("This model is also a PyTorch "),gn=a("a"),Pc=o("torch.nn.Module"),jc=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xc=l(),_n=a("p"),Ac=o("This class overrides "),ea=a("a"),Sc=o("XLMForSequenceClassification"),Nc=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ic=l(),Ee=a("div"),y(bn.$$.fragment),Dc=l(),Nt=a("p"),Wc=o("The "),ta=a("a"),Oc=o("XLMForSequenceClassification"),Hc=o(" forward method, overrides the "),ur=a("code"),Qc=o("__call__"),Bc=o(" special method."),Rc=l(),y(go.$$.fragment),Uc=l(),mr=a("p"),Vc=o("Example of single-label classification:"),Yc=l(),y(vn.$$.fragment),Jc=l(),fr=a("p"),Kc=o("Example of multi-label classification:"),Gc=l(),y(kn.$$.fragment),Si=l(),It=a("h2"),_o=a("a"),gr=a("span"),y(Tn.$$.fragment),Zc=l(),_r=a("span"),ep=o("FlaubertForMultipleChoice"),Ni=l(),qe=a("div"),y(wn.$$.fragment),tp=l(),br=a("p"),op=o(`Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),np=l(),yn=a("p"),sp=o("This model inherits from "),oa=a("a"),ap=o("PreTrainedModel"),rp=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ip=l(),Fn=a("p"),lp=o("This model is also a PyTorch "),Mn=a("a"),dp=o("torch.nn.Module"),cp=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pp=l(),$n=a("p"),hp=o("This class overrides "),na=a("a"),up=o("XLMForMultipleChoice"),mp=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),fp=l(),Re=a("div"),y(xn.$$.fragment),gp=l(),Dt=a("p"),_p=o("The "),sa=a("a"),bp=o("XLMForMultipleChoice"),vp=o(" forward method, overrides the "),vr=a("code"),kp=o("__call__"),Tp=o(" special method."),wp=l(),y(bo.$$.fragment),yp=l(),kr=a("p"),Fp=o("Example:"),Mp=l(),y(Ln.$$.fragment),Ii=l(),Wt=a("h2"),vo=a("a"),Tr=a("span"),y(En.$$.fragment),$p=l(),wr=a("span"),xp=o("FlaubertForTokenClassification"),Di=l(),Pe=a("div"),y(zn.$$.fragment),Lp=l(),yr=a("p"),Ep=o(`Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),zp=l(),Cn=a("p"),Cp=o("This model inherits from "),aa=a("a"),qp=o("PreTrainedModel"),Pp=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jp=l(),qn=a("p"),Xp=o("This model is also a PyTorch "),Pn=a("a"),Ap=o("torch.nn.Module"),Sp=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Np=l(),jn=a("p"),Ip=o("This class overrides "),ra=a("a"),Dp=o("XLMForTokenClassification"),Wp=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Op=l(),Ue=a("div"),y(Xn.$$.fragment),Hp=l(),Ot=a("p"),Qp=o("The "),ia=a("a"),Bp=o("XLMForTokenClassification"),Rp=o(" forward method, overrides the "),Fr=a("code"),Up=o("__call__"),Vp=o(" special method."),Yp=l(),y(ko.$$.fragment),Jp=l(),Mr=a("p"),Kp=o("Example:"),Gp=l(),y(An.$$.fragment),Wi=l(),Ht=a("h2"),To=a("a"),$r=a("span"),y(Sn.$$.fragment),Zp=l(),xr=a("span"),eh=o("FlaubertForQuestionAnsweringSimple"),Oi=l(),je=a("div"),y(Nn.$$.fragment),th=l(),Qt=a("p"),oh=o(`Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Lr=a("code"),nh=o("span start logits"),sh=o(" and "),Er=a("code"),ah=o("span end logits"),rh=o(")."),ih=l(),In=a("p"),lh=o("This model inherits from "),la=a("a"),dh=o("PreTrainedModel"),ch=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ph=l(),Dn=a("p"),hh=o("This model is also a PyTorch "),Wn=a("a"),uh=o("torch.nn.Module"),mh=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fh=l(),On=a("p"),gh=o("This class overrides "),da=a("a"),_h=o("XLMForQuestionAnsweringSimple"),bh=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),vh=l(),Ve=a("div"),y(Hn.$$.fragment),kh=l(),Bt=a("p"),Th=o("The "),ca=a("a"),wh=o("XLMForQuestionAnsweringSimple"),yh=o(" forward method, overrides the "),zr=a("code"),Fh=o("__call__"),Mh=o(" special method."),$h=l(),y(wo.$$.fragment),xh=l(),Cr=a("p"),Lh=o("Example:"),Eh=l(),y(Qn.$$.fragment),Hi=l(),Rt=a("h2"),yo=a("a"),qr=a("span"),y(Bn.$$.fragment),zh=l(),Pr=a("span"),Ch=o("FlaubertForQuestionAnswering"),Qi=l(),Xe=a("div"),y(Rn.$$.fragment),qh=l(),Ut=a("p"),Ph=o(`Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),jr=a("code"),jh=o("span start logits"),Xh=o(" and "),Xr=a("code"),Ah=o("span end logits"),Sh=o(")."),Nh=l(),Un=a("p"),Ih=o("This model inherits from "),pa=a("a"),Dh=o("PreTrainedModel"),Wh=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Oh=l(),Vn=a("p"),Hh=o("This model is also a PyTorch "),Yn=a("a"),Qh=o("torch.nn.Module"),Bh=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rh=l(),Jn=a("p"),Uh=o("This class overrides "),ha=a("a"),Vh=o("XLMForQuestionAnswering"),Yh=o(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Jh=l(),Ye=a("div"),y(Kn.$$.fragment),Kh=l(),Vt=a("p"),Gh=o("The "),ua=a("a"),Zh=o("XLMForQuestionAnswering"),eu=o(" forward method, overrides the "),Ar=a("code"),tu=o("__call__"),ou=o(" special method."),nu=l(),y(Fo.$$.fragment),su=l(),Sr=a("p"),au=o("Example:"),ru=l(),y(Gn.$$.fragment),Bi=l(),Yt=a("h2"),Mo=a("a"),Nr=a("span"),y(Zn.$$.fragment),iu=l(),Ir=a("span"),lu=o("TFFlaubertModel"),Ri=l(),Ae=a("div"),y(es.$$.fragment),du=l(),Dr=a("p"),cu=o("The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top."),pu=l(),ts=a("p"),hu=o("This model inherits from "),ma=a("a"),uu=o("TFPreTrainedModel"),mu=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fu=l(),os=a("p"),gu=o("This model is also a "),ns=a("a"),_u=o("tf.keras.Model"),bu=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),vu=l(),y($o.$$.fragment),ku=l(),Je=a("div"),y(ss.$$.fragment),Tu=l(),Jt=a("p"),wu=o("The "),fa=a("a"),yu=o("TFFlaubertModel"),Fu=o(" forward method, overrides the "),Wr=a("code"),Mu=o("__call__"),$u=o(" special method."),xu=l(),y(xo.$$.fragment),Lu=l(),Or=a("p"),Eu=o("Example:"),zu=l(),y(as.$$.fragment),Ui=l(),Kt=a("h2"),Lo=a("a"),Hr=a("span"),y(rs.$$.fragment),Cu=l(),Qr=a("span"),qu=o("TFFlaubertWithLMHeadModel"),Vi=l(),Se=a("div"),y(is.$$.fragment),Pu=l(),Br=a("p"),ju=o(`The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Xu=l(),ls=a("p"),Au=o("This model inherits from "),ga=a("a"),Su=o("TFPreTrainedModel"),Nu=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Iu=l(),ds=a("p"),Du=o("This model is also a "),cs=a("a"),Wu=o("tf.keras.Model"),Ou=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hu=l(),y(Eo.$$.fragment),Qu=l(),Ke=a("div"),y(ps.$$.fragment),Bu=l(),Gt=a("p"),Ru=o("The "),_a=a("a"),Uu=o("TFFlaubertWithLMHeadModel"),Vu=o(" forward method, overrides the "),Rr=a("code"),Yu=o("__call__"),Ju=o(" special method."),Ku=l(),y(zo.$$.fragment),Gu=l(),Ur=a("p"),Zu=o("Example:"),em=l(),y(hs.$$.fragment),Yi=l(),Zt=a("h2"),Co=a("a"),Vr=a("span"),y(us.$$.fragment),tm=l(),Yr=a("span"),om=o("TFFlaubertForSequenceClassification"),Ji=l(),Ne=a("div"),y(ms.$$.fragment),nm=l(),Jr=a("p"),sm=o(`Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),am=l(),fs=a("p"),rm=o("This model inherits from "),ba=a("a"),im=o("TFPreTrainedModel"),lm=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dm=l(),gs=a("p"),cm=o("This model is also a "),_s=a("a"),pm=o("tf.keras.Model"),hm=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),um=l(),y(qo.$$.fragment),mm=l(),Ge=a("div"),y(bs.$$.fragment),fm=l(),eo=a("p"),gm=o("The "),va=a("a"),_m=o("TFXLMForSequenceClassification"),bm=o(" forward method, overrides the "),Kr=a("code"),vm=o("__call__"),km=o(" special method."),Tm=l(),y(Po.$$.fragment),wm=l(),Gr=a("p"),ym=o("Example:"),Fm=l(),y(vs.$$.fragment),Ki=l(),to=a("h2"),jo=a("a"),Zr=a("span"),y(ks.$$.fragment),Mm=l(),ei=a("span"),$m=o("TFFlaubertForMultipleChoice"),Gi=l(),Ie=a("div"),y(Ts.$$.fragment),xm=l(),ti=a("p"),Lm=o(`Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Em=l(),ws=a("p"),zm=o("This model inherits from "),ka=a("a"),Cm=o("TFPreTrainedModel"),qm=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pm=l(),ys=a("p"),jm=o("This model is also a "),Fs=a("a"),Xm=o("tf.keras.Model"),Am=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sm=l(),y(Xo.$$.fragment),Nm=l(),Ze=a("div"),y(Ms.$$.fragment),Im=l(),oo=a("p"),Dm=o("The "),Ta=a("a"),Wm=o("TFXLMForMultipleChoice"),Om=o(" forward method, overrides the "),oi=a("code"),Hm=o("__call__"),Qm=o(" special method."),Bm=l(),y(Ao.$$.fragment),Rm=l(),ni=a("p"),Um=o("Example:"),Vm=l(),y($s.$$.fragment),Zi=l(),no=a("h2"),So=a("a"),si=a("span"),y(xs.$$.fragment),Ym=l(),ai=a("span"),Jm=o("TFFlaubertForTokenClassification"),el=l(),De=a("div"),y(Ls.$$.fragment),Km=l(),ri=a("p"),Gm=o(`Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zm=l(),Es=a("p"),ef=o("This model inherits from "),wa=a("a"),tf=o("TFPreTrainedModel"),of=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nf=l(),zs=a("p"),sf=o("This model is also a "),Cs=a("a"),af=o("tf.keras.Model"),rf=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),lf=l(),y(No.$$.fragment),df=l(),et=a("div"),y(qs.$$.fragment),cf=l(),so=a("p"),pf=o("The "),ya=a("a"),hf=o("TFXLMForTokenClassification"),uf=o(" forward method, overrides the "),ii=a("code"),mf=o("__call__"),ff=o(" special method."),gf=l(),y(Io.$$.fragment),_f=l(),li=a("p"),bf=o("Example:"),vf=l(),y(Ps.$$.fragment),tl=l(),ao=a("h2"),Do=a("a"),di=a("span"),y(js.$$.fragment),kf=l(),ci=a("span"),Tf=o("TFFlaubertForQuestionAnsweringSimple"),ol=l(),We=a("div"),y(Xs.$$.fragment),wf=l(),ro=a("p"),yf=o(`Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),pi=a("code"),Ff=o("span start logits"),Mf=o(" and "),hi=a("code"),$f=o("span end logits"),xf=o(")."),Lf=l(),As=a("p"),Ef=o("This model inherits from "),Fa=a("a"),zf=o("TFPreTrainedModel"),Cf=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qf=l(),Ss=a("p"),Pf=o("This model is also a "),Ns=a("a"),jf=o("tf.keras.Model"),Xf=o(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Af=l(),y(Wo.$$.fragment),Sf=l(),tt=a("div"),y(Is.$$.fragment),Nf=l(),io=a("p"),If=o("The "),Ma=a("a"),Df=o("TFXLMForQuestionAnsweringSimple"),Wf=o(" forward method, overrides the "),ui=a("code"),Of=o("__call__"),Hf=o(" special method."),Qf=l(),y(Oo.$$.fragment),Bf=l(),mi=a("p"),Rf=o("Example:"),Uf=l(),y(Ds.$$.fragment),this.h()},l(s){const _=qb('[data-svelte="svelte-1phssyn"]',document.head);p=r(_,"META",{name:!0,content:!0}),_.forEach(t),k=d(s),m=r(s,"H1",{class:!0});var Ws=i(m);g=r(Ws,"A",{id:!0,class:!0,href:!0});var fi=i(g);v=r(fi,"SPAN",{});var gi=i(v);F(b.$$.fragment,gi),gi.forEach(t),fi.forEach(t),f=d(Ws),w=r(Ws,"SPAN",{});var _i=i(w);le=n(_i,"FlauBERT"),_i.forEach(t),Ws.forEach(t),Y=d(s),E=r(s,"H2",{class:!0});var Os=i(E);G=r(Os,"A",{id:!0,class:!0,href:!0});var bi=i(G);H=r(bi,"SPAN",{});var vi=i(H);F(Z.$$.fragment,vi),vi.forEach(t),bi.forEach(t),de=d(Os),Q=r(Os,"SPAN",{});var ki=i(Q);ce=n(ki,"Overview"),ki.forEach(t),Os.forEach(t),se=d(s),U=r(s,"P",{});var Hs=i(U);X=n(Hs,"The FlauBERT model was proposed in the paper "),ee=r(Hs,"A",{href:!0,rel:!0});var Ti=i(ee);J=n(Ti,"FlauBERT: Unsupervised Language Model Pre-training for French"),Ti.forEach(t),z=n(Hs,` by Hang Le et al. It\u2019s a transformer model pretrained using a masked language modeling (MLM) objective (like BERT).`),Hs.forEach(t),q=d(s),te=r(s,"P",{});var wi=i(te);R=n(wi,"The abstract from the paper is the following:"),wi.forEach(t),ae=d(s),oe=r(s,"P",{});var yi=i(oe);B=r(yi,"EM",{});var Fi=i(B);pe=n(Fi,`Language models have become a key step to achieve state-of-the art results in many different Natural Language Processing (NLP) tasks. Leveraging the huge amount of unlabeled texts nowadays available, they provide an efficient way to pre-train continuous word representations that can be fine-tuned for a downstream task, along with their contextualization at the sentence level. This has been widely demonstrated for English using contextualized representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018; Radford et al., 2018; Devlin et al., 2019; Yang et al., 2019b). In this paper, we introduce and share FlauBERT, a model learned on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most of the time they outperform other pretraining approaches. Different versions of FlauBERT as well as a unified evaluation protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research community for further reproducible experiments in French NLP.`),Fi.forEach(t),yi.forEach(t),re=d(s),C=r(s,"P",{});var lo=i(C);he=n(lo,"This model was contributed by "),N=r(lo,"A",{href:!0,rel:!0});var Mi=i(N);ue=n(Mi,"formiel"),Mi.forEach(t),me=n(lo,". The original code can be found "),I=r(lo,"A",{href:!0,rel:!0});var $i=i(I);fe=n($i,"here"),$i.forEach(t),ge=n(lo,"."),lo.forEach(t),j=d(s),K=r(s,"H2",{class:!0});var Qs=i(K);S=r(Qs,"A",{id:!0,class:!0,href:!0});var xi=i(S);ne=r(xi,"SPAN",{});var Li=i(ne);F(c.$$.fragment,Li),Li.forEach(t),xi.forEach(t),T=d(Qs),V=r(Qs,"SPAN",{});var Vf=i(V);we=n(Vf,"FlaubertConfig"),Vf.forEach(t),Qs.forEach(t),Te=d(s),P=r(s,"DIV",{class:!0});var $a=i(P);F(ke.$$.fragment,$a),ye=d($a),_e=r($a,"P",{});var xa=i(_e);A=n(xa,"This is the configuration class to store the configuration of a "),D=r(xa,"A",{href:!0});var Yf=i(D);Fe=n(Yf,"FlaubertModel"),Yf.forEach(t),Me=n(xa,` or a `),W=r(xa,"A",{href:!0});var Jf=i(W);$e=n(Jf,"TFFlaubertModel"),Jf.forEach(t),xe=n(xa,`. It is used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.`),xa.forEach(t),be=d($a),ve=r($a,"P",{});var La=i(ve);Gl=n(La,"Configuration objects inherit from "),Bs=r(La,"A",{href:!0});var Kf=i(Bs);Zl=n(Kf,"PretrainedConfig"),Kf.forEach(t),ed=n(La,` and can be used to control the model outputs. Read the documentation from `),Rs=r(La,"A",{href:!0});var Gf=i(Rs);td=n(Gf,"PretrainedConfig"),Gf.forEach(t),od=n(La," for more information."),La.forEach(t),$a.forEach(t),Ei=d(s),qt=r(s,"H2",{class:!0});var sl=i(qt);co=r(sl,"A",{id:!0,class:!0,href:!0});var Zf=i(co);Ba=r(Zf,"SPAN",{});var eg=i(Ba);F(Ro.$$.fragment,eg),eg.forEach(t),Zf.forEach(t),nd=d(sl),Ra=r(sl,"SPAN",{});var tg=i(Ra);sd=n(tg,"FlaubertTokenizer"),tg.forEach(t),sl.forEach(t),zi=d(s),st=r(s,"DIV",{class:!0});var Ho=i(st);F(Uo.$$.fragment,Ho),ad=d(Ho),Ua=r(Ho,"P",{});var og=i(Ua);rd=n(og,"Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:"),og.forEach(t),id=d(Ho),_t=r(Ho,"UL",{});var Qo=i(_t);Va=r(Qo,"LI",{});var ng=i(Va);ld=n(ng,"Moses preprocessing and tokenization."),ng.forEach(t),dd=d(Qo),Ya=r(Qo,"LI",{});var sg=i(Ya);cd=n(sg,"Normalizing all inputs text."),sg.forEach(t),pd=d(Qo),bt=r(Qo,"LI",{});var Bo=i(bt);hd=n(Bo,"The arguments "),Ja=r(Bo,"CODE",{});var ag=i(Ja);ud=n(ag,"special_tokens"),ag.forEach(t),md=n(Bo," and the function "),Ka=r(Bo,"CODE",{});var rg=i(Ka);fd=n(rg,"set_special_tokens"),rg.forEach(t),gd=n(Bo,`, can be used to add additional symbols (like \u201D`),Ga=r(Bo,"STRONG",{});var ig=i(Ga);_d=n(ig,"classify"),ig.forEach(t),bd=n(Bo,"\u201D) to a vocabulary."),Bo.forEach(t),vd=d(Qo),Vo=r(Qo,"LI",{});var al=i(Vo);kd=n(al,"The argument "),Za=r(al,"CODE",{});var lg=i(Za);Td=n(lg,"do_lowercase"),lg.forEach(t),wd=n(al," controls lower casing (automatically set for pretrained vocabularies)."),al.forEach(t),Qo.forEach(t),yd=d(Ho),Yo=r(Ho,"P",{});var rl=i(Yo);Fd=n(rl,"This tokenizer inherits from "),Us=r(rl,"A",{href:!0});var dg=i(Us);Md=n(dg,"XLMTokenizer"),dg.forEach(t),$d=n(rl,`. Please check the superclass for usage examples and documentation regarding arguments.`),rl.forEach(t),Ho.forEach(t),Ci=d(s),Pt=r(s,"H2",{class:!0});var il=i(Pt);po=r(il,"A",{id:!0,class:!0,href:!0});var cg=i(po);er=r(cg,"SPAN",{});var pg=i(er);F(Jo.$$.fragment,pg),pg.forEach(t),cg.forEach(t),xd=d(il),tr=r(il,"SPAN",{});var hg=i(tr);Ld=n(hg,"FlaubertModel"),hg.forEach(t),il.forEach(t),qi=d(s),He=r(s,"DIV",{class:!0});var vt=i(He);F(Ko.$$.fragment,vt),Ed=d(vt),or=r(vt,"P",{});var ug=i(or);zd=n(ug,"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top."),ug.forEach(t),Cd=d(vt),Go=r(vt,"P",{});var ll=i(Go);qd=n(ll,"This model inherits from "),Vs=r(ll,"A",{href:!0});var mg=i(Vs);Pd=n(mg,"PreTrainedModel"),mg.forEach(t),jd=n(ll,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ll.forEach(t),Xd=d(vt),Zo=r(vt,"P",{});var dl=i(Zo);Ad=n(dl,"This model is also a PyTorch "),en=r(dl,"A",{href:!0,rel:!0});var fg=i(en);Sd=n(fg,"torch.nn.Module"),fg.forEach(t),Nd=n(dl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dl.forEach(t),Id=d(vt),Qe=r(vt,"DIV",{class:!0});var kt=i(Qe);F(tn.$$.fragment,kt),Dd=d(kt),jt=r(kt,"P",{});var Ea=i(jt);Wd=n(Ea,"The "),Ys=r(Ea,"A",{href:!0});var gg=i(Ys);Od=n(gg,"FlaubertModel"),gg.forEach(t),Hd=n(Ea," forward method, overrides the "),nr=r(Ea,"CODE",{});var _g=i(nr);Qd=n(_g,"__call__"),_g.forEach(t),Bd=n(Ea," special method."),Ea.forEach(t),Rd=d(kt),F(ho.$$.fragment,kt),Ud=d(kt),sr=r(kt,"P",{});var bg=i(sr);Vd=n(bg,"Example:"),bg.forEach(t),Yd=d(kt),F(on.$$.fragment,kt),kt.forEach(t),vt.forEach(t),Pi=d(s),Xt=r(s,"H2",{class:!0});var cl=i(Xt);uo=r(cl,"A",{id:!0,class:!0,href:!0});var vg=i(uo);ar=r(vg,"SPAN",{});var kg=i(ar);F(nn.$$.fragment,kg),kg.forEach(t),vg.forEach(t),Jd=d(cl),rr=r(cl,"SPAN",{});var Tg=i(rr);Kd=n(Tg,"FlaubertWithLMHeadModel"),Tg.forEach(t),cl.forEach(t),ji=d(s),ze=r(s,"DIV",{class:!0});var at=i(ze);F(sn.$$.fragment,at),Gd=d(at),ir=r(at,"P",{});var wg=i(ir);Zd=n(wg,`The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),wg.forEach(t),ec=d(at),an=r(at,"P",{});var pl=i(an);tc=n(pl,"This model inherits from "),Js=r(pl,"A",{href:!0});var yg=i(Js);oc=n(yg,"PreTrainedModel"),yg.forEach(t),nc=n(pl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pl.forEach(t),sc=d(at),rn=r(at,"P",{});var hl=i(rn);ac=n(hl,"This model is also a PyTorch "),ln=r(hl,"A",{href:!0,rel:!0});var Fg=i(ln);rc=n(Fg,"torch.nn.Module"),Fg.forEach(t),ic=n(hl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hl.forEach(t),lc=d(at),dn=r(at,"P",{});var ul=i(dn);dc=n(ul,"This class overrides "),Ks=r(ul,"A",{href:!0});var Mg=i(Ks);cc=n(Mg,"XLMWithLMHeadModel"),Mg.forEach(t),pc=n(ul,`. Please check the superclass for the appropriate documentation alongside usage examples.`),ul.forEach(t),hc=d(at),Be=r(at,"DIV",{class:!0});var Tt=i(Be);F(cn.$$.fragment,Tt),uc=d(Tt),At=r(Tt,"P",{});var za=i(At);mc=n(za,"The "),Gs=r(za,"A",{href:!0});var $g=i(Gs);fc=n($g,"XLMWithLMHeadModel"),$g.forEach(t),gc=n(za," forward method, overrides the "),lr=r(za,"CODE",{});var xg=i(lr);_c=n(xg,"__call__"),xg.forEach(t),bc=n(za," special method."),za.forEach(t),vc=d(Tt),F(mo.$$.fragment,Tt),kc=d(Tt),dr=r(Tt,"P",{});var Lg=i(dr);Tc=n(Lg,"Example:"),Lg.forEach(t),wc=d(Tt),F(pn.$$.fragment,Tt),Tt.forEach(t),at.forEach(t),Xi=d(s),St=r(s,"H2",{class:!0});var ml=i(St);fo=r(ml,"A",{id:!0,class:!0,href:!0});var Eg=i(fo);cr=r(Eg,"SPAN",{});var zg=i(cr);F(hn.$$.fragment,zg),zg.forEach(t),Eg.forEach(t),yc=d(ml),pr=r(ml,"SPAN",{});var Cg=i(pr);Fc=n(Cg,"FlaubertForSequenceClassification"),Cg.forEach(t),ml.forEach(t),Ai=d(s),Ce=r(s,"DIV",{class:!0});var rt=i(Ce);F(un.$$.fragment,rt),Mc=d(rt),hr=r(rt,"P",{});var qg=i(hr);$c=n(qg,`Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),qg.forEach(t),xc=d(rt),mn=r(rt,"P",{});var fl=i(mn);Lc=n(fl,"This model inherits from "),Zs=r(fl,"A",{href:!0});var Pg=i(Zs);Ec=n(Pg,"PreTrainedModel"),Pg.forEach(t),zc=n(fl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fl.forEach(t),Cc=d(rt),fn=r(rt,"P",{});var gl=i(fn);qc=n(gl,"This model is also a PyTorch "),gn=r(gl,"A",{href:!0,rel:!0});var jg=i(gn);Pc=n(jg,"torch.nn.Module"),jg.forEach(t),jc=n(gl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gl.forEach(t),Xc=d(rt),_n=r(rt,"P",{});var _l=i(_n);Ac=n(_l,"This class overrides "),ea=r(_l,"A",{href:!0});var Xg=i(ea);Sc=n(Xg,"XLMForSequenceClassification"),Xg.forEach(t),Nc=n(_l,`. Please check the superclass for the appropriate documentation alongside usage examples.`),_l.forEach(t),Ic=d(rt),Ee=r(rt,"DIV",{class:!0});var ot=i(Ee);F(bn.$$.fragment,ot),Dc=d(ot),Nt=r(ot,"P",{});var Ca=i(Nt);Wc=n(Ca,"The "),ta=r(Ca,"A",{href:!0});var Ag=i(ta);Oc=n(Ag,"XLMForSequenceClassification"),Ag.forEach(t),Hc=n(Ca," forward method, overrides the "),ur=r(Ca,"CODE",{});var Sg=i(ur);Qc=n(Sg,"__call__"),Sg.forEach(t),Bc=n(Ca," special method."),Ca.forEach(t),Rc=d(ot),F(go.$$.fragment,ot),Uc=d(ot),mr=r(ot,"P",{});var Ng=i(mr);Vc=n(Ng,"Example of single-label classification:"),Ng.forEach(t),Yc=d(ot),F(vn.$$.fragment,ot),Jc=d(ot),fr=r(ot,"P",{});var Ig=i(fr);Kc=n(Ig,"Example of multi-label classification:"),Ig.forEach(t),Gc=d(ot),F(kn.$$.fragment,ot),ot.forEach(t),rt.forEach(t),Si=d(s),It=r(s,"H2",{class:!0});var bl=i(It);_o=r(bl,"A",{id:!0,class:!0,href:!0});var Dg=i(_o);gr=r(Dg,"SPAN",{});var Wg=i(gr);F(Tn.$$.fragment,Wg),Wg.forEach(t),Dg.forEach(t),Zc=d(bl),_r=r(bl,"SPAN",{});var Og=i(_r);ep=n(Og,"FlaubertForMultipleChoice"),Og.forEach(t),bl.forEach(t),Ni=d(s),qe=r(s,"DIV",{class:!0});var it=i(qe);F(wn.$$.fragment,it),tp=d(it),br=r(it,"P",{});var Hg=i(br);op=n(Hg,`Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Hg.forEach(t),np=d(it),yn=r(it,"P",{});var vl=i(yn);sp=n(vl,"This model inherits from "),oa=r(vl,"A",{href:!0});var Qg=i(oa);ap=n(Qg,"PreTrainedModel"),Qg.forEach(t),rp=n(vl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vl.forEach(t),ip=d(it),Fn=r(it,"P",{});var kl=i(Fn);lp=n(kl,"This model is also a PyTorch "),Mn=r(kl,"A",{href:!0,rel:!0});var Bg=i(Mn);dp=n(Bg,"torch.nn.Module"),Bg.forEach(t),cp=n(kl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kl.forEach(t),pp=d(it),$n=r(it,"P",{});var Tl=i($n);hp=n(Tl,"This class overrides "),na=r(Tl,"A",{href:!0});var Rg=i(na);up=n(Rg,"XLMForMultipleChoice"),Rg.forEach(t),mp=n(Tl,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Tl.forEach(t),fp=d(it),Re=r(it,"DIV",{class:!0});var wt=i(Re);F(xn.$$.fragment,wt),gp=d(wt),Dt=r(wt,"P",{});var qa=i(Dt);_p=n(qa,"The "),sa=r(qa,"A",{href:!0});var Ug=i(sa);bp=n(Ug,"XLMForMultipleChoice"),Ug.forEach(t),vp=n(qa," forward method, overrides the "),vr=r(qa,"CODE",{});var Vg=i(vr);kp=n(Vg,"__call__"),Vg.forEach(t),Tp=n(qa," special method."),qa.forEach(t),wp=d(wt),F(bo.$$.fragment,wt),yp=d(wt),kr=r(wt,"P",{});var Yg=i(kr);Fp=n(Yg,"Example:"),Yg.forEach(t),Mp=d(wt),F(Ln.$$.fragment,wt),wt.forEach(t),it.forEach(t),Ii=d(s),Wt=r(s,"H2",{class:!0});var wl=i(Wt);vo=r(wl,"A",{id:!0,class:!0,href:!0});var Jg=i(vo);Tr=r(Jg,"SPAN",{});var Kg=i(Tr);F(En.$$.fragment,Kg),Kg.forEach(t),Jg.forEach(t),$p=d(wl),wr=r(wl,"SPAN",{});var Gg=i(wr);xp=n(Gg,"FlaubertForTokenClassification"),Gg.forEach(t),wl.forEach(t),Di=d(s),Pe=r(s,"DIV",{class:!0});var lt=i(Pe);F(zn.$$.fragment,lt),Lp=d(lt),yr=r(lt,"P",{});var Zg=i(yr);Ep=n(Zg,`Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zg.forEach(t),zp=d(lt),Cn=r(lt,"P",{});var yl=i(Cn);Cp=n(yl,"This model inherits from "),aa=r(yl,"A",{href:!0});var e_=i(aa);qp=n(e_,"PreTrainedModel"),e_.forEach(t),Pp=n(yl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yl.forEach(t),jp=d(lt),qn=r(lt,"P",{});var Fl=i(qn);Xp=n(Fl,"This model is also a PyTorch "),Pn=r(Fl,"A",{href:!0,rel:!0});var t_=i(Pn);Ap=n(t_,"torch.nn.Module"),t_.forEach(t),Sp=n(Fl,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fl.forEach(t),Np=d(lt),jn=r(lt,"P",{});var Ml=i(jn);Ip=n(Ml,"This class overrides "),ra=r(Ml,"A",{href:!0});var o_=i(ra);Dp=n(o_,"XLMForTokenClassification"),o_.forEach(t),Wp=n(Ml,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ml.forEach(t),Op=d(lt),Ue=r(lt,"DIV",{class:!0});var yt=i(Ue);F(Xn.$$.fragment,yt),Hp=d(yt),Ot=r(yt,"P",{});var Pa=i(Ot);Qp=n(Pa,"The "),ia=r(Pa,"A",{href:!0});var n_=i(ia);Bp=n(n_,"XLMForTokenClassification"),n_.forEach(t),Rp=n(Pa," forward method, overrides the "),Fr=r(Pa,"CODE",{});var s_=i(Fr);Up=n(s_,"__call__"),s_.forEach(t),Vp=n(Pa," special method."),Pa.forEach(t),Yp=d(yt),F(ko.$$.fragment,yt),Jp=d(yt),Mr=r(yt,"P",{});var a_=i(Mr);Kp=n(a_,"Example:"),a_.forEach(t),Gp=d(yt),F(An.$$.fragment,yt),yt.forEach(t),lt.forEach(t),Wi=d(s),Ht=r(s,"H2",{class:!0});var $l=i(Ht);To=r($l,"A",{id:!0,class:!0,href:!0});var r_=i(To);$r=r(r_,"SPAN",{});var i_=i($r);F(Sn.$$.fragment,i_),i_.forEach(t),r_.forEach(t),Zp=d($l),xr=r($l,"SPAN",{});var l_=i(xr);eh=n(l_,"FlaubertForQuestionAnsweringSimple"),l_.forEach(t),$l.forEach(t),Oi=d(s),je=r(s,"DIV",{class:!0});var dt=i(je);F(Nn.$$.fragment,dt),th=d(dt),Qt=r(dt,"P",{});var ja=i(Qt);oh=n(ja,`Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Lr=r(ja,"CODE",{});var d_=i(Lr);nh=n(d_,"span start logits"),d_.forEach(t),sh=n(ja," and "),Er=r(ja,"CODE",{});var c_=i(Er);ah=n(c_,"span end logits"),c_.forEach(t),rh=n(ja,")."),ja.forEach(t),ih=d(dt),In=r(dt,"P",{});var xl=i(In);lh=n(xl,"This model inherits from "),la=r(xl,"A",{href:!0});var p_=i(la);dh=n(p_,"PreTrainedModel"),p_.forEach(t),ch=n(xl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xl.forEach(t),ph=d(dt),Dn=r(dt,"P",{});var Ll=i(Dn);hh=n(Ll,"This model is also a PyTorch "),Wn=r(Ll,"A",{href:!0,rel:!0});var h_=i(Wn);uh=n(h_,"torch.nn.Module"),h_.forEach(t),mh=n(Ll,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ll.forEach(t),fh=d(dt),On=r(dt,"P",{});var El=i(On);gh=n(El,"This class overrides "),da=r(El,"A",{href:!0});var u_=i(da);_h=n(u_,"XLMForQuestionAnsweringSimple"),u_.forEach(t),bh=n(El,`. Please check the superclass for the appropriate documentation alongside usage examples.`),El.forEach(t),vh=d(dt),Ve=r(dt,"DIV",{class:!0});var Ft=i(Ve);F(Hn.$$.fragment,Ft),kh=d(Ft),Bt=r(Ft,"P",{});var Xa=i(Bt);Th=n(Xa,"The "),ca=r(Xa,"A",{href:!0});var m_=i(ca);wh=n(m_,"XLMForQuestionAnsweringSimple"),m_.forEach(t),yh=n(Xa," forward method, overrides the "),zr=r(Xa,"CODE",{});var f_=i(zr);Fh=n(f_,"__call__"),f_.forEach(t),Mh=n(Xa," special method."),Xa.forEach(t),$h=d(Ft),F(wo.$$.fragment,Ft),xh=d(Ft),Cr=r(Ft,"P",{});var g_=i(Cr);Lh=n(g_,"Example:"),g_.forEach(t),Eh=d(Ft),F(Qn.$$.fragment,Ft),Ft.forEach(t),dt.forEach(t),Hi=d(s),Rt=r(s,"H2",{class:!0});var zl=i(Rt);yo=r(zl,"A",{id:!0,class:!0,href:!0});var __=i(yo);qr=r(__,"SPAN",{});var b_=i(qr);F(Bn.$$.fragment,b_),b_.forEach(t),__.forEach(t),zh=d(zl),Pr=r(zl,"SPAN",{});var v_=i(Pr);Ch=n(v_,"FlaubertForQuestionAnswering"),v_.forEach(t),zl.forEach(t),Qi=d(s),Xe=r(s,"DIV",{class:!0});var ct=i(Xe);F(Rn.$$.fragment,ct),qh=d(ct),Ut=r(ct,"P",{});var Aa=i(Ut);Ph=n(Aa,`Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),jr=r(Aa,"CODE",{});var k_=i(jr);jh=n(k_,"span start logits"),k_.forEach(t),Xh=n(Aa," and "),Xr=r(Aa,"CODE",{});var T_=i(Xr);Ah=n(T_,"span end logits"),T_.forEach(t),Sh=n(Aa,")."),Aa.forEach(t),Nh=d(ct),Un=r(ct,"P",{});var Cl=i(Un);Ih=n(Cl,"This model inherits from "),pa=r(Cl,"A",{href:!0});var w_=i(pa);Dh=n(w_,"PreTrainedModel"),w_.forEach(t),Wh=n(Cl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cl.forEach(t),Oh=d(ct),Vn=r(ct,"P",{});var ql=i(Vn);Hh=n(ql,"This model is also a PyTorch "),Yn=r(ql,"A",{href:!0,rel:!0});var y_=i(Yn);Qh=n(y_,"torch.nn.Module"),y_.forEach(t),Bh=n(ql,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ql.forEach(t),Rh=d(ct),Jn=r(ct,"P",{});var Pl=i(Jn);Uh=n(Pl,"This class overrides "),ha=r(Pl,"A",{href:!0});var F_=i(ha);Vh=n(F_,"XLMForQuestionAnswering"),F_.forEach(t),Yh=n(Pl,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Pl.forEach(t),Jh=d(ct),Ye=r(ct,"DIV",{class:!0});var Mt=i(Ye);F(Kn.$$.fragment,Mt),Kh=d(Mt),Vt=r(Mt,"P",{});var Sa=i(Vt);Gh=n(Sa,"The "),ua=r(Sa,"A",{href:!0});var M_=i(ua);Zh=n(M_,"XLMForQuestionAnswering"),M_.forEach(t),eu=n(Sa," forward method, overrides the "),Ar=r(Sa,"CODE",{});var $_=i(Ar);tu=n($_,"__call__"),$_.forEach(t),ou=n(Sa," special method."),Sa.forEach(t),nu=d(Mt),F(Fo.$$.fragment,Mt),su=d(Mt),Sr=r(Mt,"P",{});var x_=i(Sr);au=n(x_,"Example:"),x_.forEach(t),ru=d(Mt),F(Gn.$$.fragment,Mt),Mt.forEach(t),ct.forEach(t),Bi=d(s),Yt=r(s,"H2",{class:!0});var jl=i(Yt);Mo=r(jl,"A",{id:!0,class:!0,href:!0});var L_=i(Mo);Nr=r(L_,"SPAN",{});var E_=i(Nr);F(Zn.$$.fragment,E_),E_.forEach(t),L_.forEach(t),iu=d(jl),Ir=r(jl,"SPAN",{});var z_=i(Ir);lu=n(z_,"TFFlaubertModel"),z_.forEach(t),jl.forEach(t),Ri=d(s),Ae=r(s,"DIV",{class:!0});var pt=i(Ae);F(es.$$.fragment,pt),du=d(pt),Dr=r(pt,"P",{});var C_=i(Dr);cu=n(C_,"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top."),C_.forEach(t),pu=d(pt),ts=r(pt,"P",{});var Xl=i(ts);hu=n(Xl,"This model inherits from "),ma=r(Xl,"A",{href:!0});var q_=i(ma);uu=n(q_,"TFPreTrainedModel"),q_.forEach(t),mu=n(Xl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xl.forEach(t),fu=d(pt),os=r(pt,"P",{});var Al=i(os);gu=n(Al,"This model is also a "),ns=r(Al,"A",{href:!0,rel:!0});var P_=i(ns);_u=n(P_,"tf.keras.Model"),P_.forEach(t),bu=n(Al,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Al.forEach(t),vu=d(pt),F($o.$$.fragment,pt),ku=d(pt),Je=r(pt,"DIV",{class:!0});var $t=i(Je);F(ss.$$.fragment,$t),Tu=d($t),Jt=r($t,"P",{});var Na=i(Jt);wu=n(Na,"The "),fa=r(Na,"A",{href:!0});var j_=i(fa);yu=n(j_,"TFFlaubertModel"),j_.forEach(t),Fu=n(Na," forward method, overrides the "),Wr=r(Na,"CODE",{});var X_=i(Wr);Mu=n(X_,"__call__"),X_.forEach(t),$u=n(Na," special method."),Na.forEach(t),xu=d($t),F(xo.$$.fragment,$t),Lu=d($t),Or=r($t,"P",{});var A_=i(Or);Eu=n(A_,"Example:"),A_.forEach(t),zu=d($t),F(as.$$.fragment,$t),$t.forEach(t),pt.forEach(t),Ui=d(s),Kt=r(s,"H2",{class:!0});var Sl=i(Kt);Lo=r(Sl,"A",{id:!0,class:!0,href:!0});var S_=i(Lo);Hr=r(S_,"SPAN",{});var N_=i(Hr);F(rs.$$.fragment,N_),N_.forEach(t),S_.forEach(t),Cu=d(Sl),Qr=r(Sl,"SPAN",{});var I_=i(Qr);qu=n(I_,"TFFlaubertWithLMHeadModel"),I_.forEach(t),Sl.forEach(t),Vi=d(s),Se=r(s,"DIV",{class:!0});var ht=i(Se);F(is.$$.fragment,ht),Pu=d(ht),Br=r(ht,"P",{});var D_=i(Br);ju=n(D_,`The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),D_.forEach(t),Xu=d(ht),ls=r(ht,"P",{});var Nl=i(ls);Au=n(Nl,"This model inherits from "),ga=r(Nl,"A",{href:!0});var W_=i(ga);Su=n(W_,"TFPreTrainedModel"),W_.forEach(t),Nu=n(Nl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nl.forEach(t),Iu=d(ht),ds=r(ht,"P",{});var Il=i(ds);Du=n(Il,"This model is also a "),cs=r(Il,"A",{href:!0,rel:!0});var O_=i(cs);Wu=n(O_,"tf.keras.Model"),O_.forEach(t),Ou=n(Il,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Il.forEach(t),Hu=d(ht),F(Eo.$$.fragment,ht),Qu=d(ht),Ke=r(ht,"DIV",{class:!0});var xt=i(Ke);F(ps.$$.fragment,xt),Bu=d(xt),Gt=r(xt,"P",{});var Ia=i(Gt);Ru=n(Ia,"The "),_a=r(Ia,"A",{href:!0});var H_=i(_a);Uu=n(H_,"TFFlaubertWithLMHeadModel"),H_.forEach(t),Vu=n(Ia," forward method, overrides the "),Rr=r(Ia,"CODE",{});var Q_=i(Rr);Yu=n(Q_,"__call__"),Q_.forEach(t),Ju=n(Ia," special method."),Ia.forEach(t),Ku=d(xt),F(zo.$$.fragment,xt),Gu=d(xt),Ur=r(xt,"P",{});var B_=i(Ur);Zu=n(B_,"Example:"),B_.forEach(t),em=d(xt),F(hs.$$.fragment,xt),xt.forEach(t),ht.forEach(t),Yi=d(s),Zt=r(s,"H2",{class:!0});var Dl=i(Zt);Co=r(Dl,"A",{id:!0,class:!0,href:!0});var R_=i(Co);Vr=r(R_,"SPAN",{});var U_=i(Vr);F(us.$$.fragment,U_),U_.forEach(t),R_.forEach(t),tm=d(Dl),Yr=r(Dl,"SPAN",{});var V_=i(Yr);om=n(V_,"TFFlaubertForSequenceClassification"),V_.forEach(t),Dl.forEach(t),Ji=d(s),Ne=r(s,"DIV",{class:!0});var ut=i(Ne);F(ms.$$.fragment,ut),nm=d(ut),Jr=r(ut,"P",{});var Y_=i(Jr);sm=n(Y_,`Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Y_.forEach(t),am=d(ut),fs=r(ut,"P",{});var Wl=i(fs);rm=n(Wl,"This model inherits from "),ba=r(Wl,"A",{href:!0});var J_=i(ba);im=n(J_,"TFPreTrainedModel"),J_.forEach(t),lm=n(Wl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wl.forEach(t),dm=d(ut),gs=r(ut,"P",{});var Ol=i(gs);cm=n(Ol,"This model is also a "),_s=r(Ol,"A",{href:!0,rel:!0});var K_=i(_s);pm=n(K_,"tf.keras.Model"),K_.forEach(t),hm=n(Ol,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ol.forEach(t),um=d(ut),F(qo.$$.fragment,ut),mm=d(ut),Ge=r(ut,"DIV",{class:!0});var Lt=i(Ge);F(bs.$$.fragment,Lt),fm=d(Lt),eo=r(Lt,"P",{});var Da=i(eo);gm=n(Da,"The "),va=r(Da,"A",{href:!0});var G_=i(va);_m=n(G_,"TFXLMForSequenceClassification"),G_.forEach(t),bm=n(Da," forward method, overrides the "),Kr=r(Da,"CODE",{});var Z_=i(Kr);vm=n(Z_,"__call__"),Z_.forEach(t),km=n(Da," special method."),Da.forEach(t),Tm=d(Lt),F(Po.$$.fragment,Lt),wm=d(Lt),Gr=r(Lt,"P",{});var eb=i(Gr);ym=n(eb,"Example:"),eb.forEach(t),Fm=d(Lt),F(vs.$$.fragment,Lt),Lt.forEach(t),ut.forEach(t),Ki=d(s),to=r(s,"H2",{class:!0});var Hl=i(to);jo=r(Hl,"A",{id:!0,class:!0,href:!0});var tb=i(jo);Zr=r(tb,"SPAN",{});var ob=i(Zr);F(ks.$$.fragment,ob),ob.forEach(t),tb.forEach(t),Mm=d(Hl),ei=r(Hl,"SPAN",{});var nb=i(ei);$m=n(nb,"TFFlaubertForMultipleChoice"),nb.forEach(t),Hl.forEach(t),Gi=d(s),Ie=r(s,"DIV",{class:!0});var mt=i(Ie);F(Ts.$$.fragment,mt),xm=d(mt),ti=r(mt,"P",{});var sb=i(ti);Lm=n(sb,`Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),sb.forEach(t),Em=d(mt),ws=r(mt,"P",{});var Ql=i(ws);zm=n(Ql,"This model inherits from "),ka=r(Ql,"A",{href:!0});var ab=i(ka);Cm=n(ab,"TFPreTrainedModel"),ab.forEach(t),qm=n(Ql,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ql.forEach(t),Pm=d(mt),ys=r(mt,"P",{});var Bl=i(ys);jm=n(Bl,"This model is also a "),Fs=r(Bl,"A",{href:!0,rel:!0});var rb=i(Fs);Xm=n(rb,"tf.keras.Model"),rb.forEach(t),Am=n(Bl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Bl.forEach(t),Sm=d(mt),F(Xo.$$.fragment,mt),Nm=d(mt),Ze=r(mt,"DIV",{class:!0});var Et=i(Ze);F(Ms.$$.fragment,Et),Im=d(Et),oo=r(Et,"P",{});var Wa=i(oo);Dm=n(Wa,"The "),Ta=r(Wa,"A",{href:!0});var ib=i(Ta);Wm=n(ib,"TFXLMForMultipleChoice"),ib.forEach(t),Om=n(Wa," forward method, overrides the "),oi=r(Wa,"CODE",{});var lb=i(oi);Hm=n(lb,"__call__"),lb.forEach(t),Qm=n(Wa," special method."),Wa.forEach(t),Bm=d(Et),F(Ao.$$.fragment,Et),Rm=d(Et),ni=r(Et,"P",{});var db=i(ni);Um=n(db,"Example:"),db.forEach(t),Vm=d(Et),F($s.$$.fragment,Et),Et.forEach(t),mt.forEach(t),Zi=d(s),no=r(s,"H2",{class:!0});var Rl=i(no);So=r(Rl,"A",{id:!0,class:!0,href:!0});var cb=i(So);si=r(cb,"SPAN",{});var pb=i(si);F(xs.$$.fragment,pb),pb.forEach(t),cb.forEach(t),Ym=d(Rl),ai=r(Rl,"SPAN",{});var hb=i(ai);Jm=n(hb,"TFFlaubertForTokenClassification"),hb.forEach(t),Rl.forEach(t),el=d(s),De=r(s,"DIV",{class:!0});var ft=i(De);F(Ls.$$.fragment,ft),Km=d(ft),ri=r(ft,"P",{});var ub=i(ri);Gm=n(ub,`Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ub.forEach(t),Zm=d(ft),Es=r(ft,"P",{});var Ul=i(Es);ef=n(Ul,"This model inherits from "),wa=r(Ul,"A",{href:!0});var mb=i(wa);tf=n(mb,"TFPreTrainedModel"),mb.forEach(t),of=n(Ul,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ul.forEach(t),nf=d(ft),zs=r(ft,"P",{});var Vl=i(zs);sf=n(Vl,"This model is also a "),Cs=r(Vl,"A",{href:!0,rel:!0});var fb=i(Cs);af=n(fb,"tf.keras.Model"),fb.forEach(t),rf=n(Vl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vl.forEach(t),lf=d(ft),F(No.$$.fragment,ft),df=d(ft),et=r(ft,"DIV",{class:!0});var zt=i(et);F(qs.$$.fragment,zt),cf=d(zt),so=r(zt,"P",{});var Oa=i(so);pf=n(Oa,"The "),ya=r(Oa,"A",{href:!0});var gb=i(ya);hf=n(gb,"TFXLMForTokenClassification"),gb.forEach(t),uf=n(Oa," forward method, overrides the "),ii=r(Oa,"CODE",{});var _b=i(ii);mf=n(_b,"__call__"),_b.forEach(t),ff=n(Oa," special method."),Oa.forEach(t),gf=d(zt),F(Io.$$.fragment,zt),_f=d(zt),li=r(zt,"P",{});var bb=i(li);bf=n(bb,"Example:"),bb.forEach(t),vf=d(zt),F(Ps.$$.fragment,zt),zt.forEach(t),ft.forEach(t),tl=d(s),ao=r(s,"H2",{class:!0});var Yl=i(ao);Do=r(Yl,"A",{id:!0,class:!0,href:!0});var vb=i(Do);di=r(vb,"SPAN",{});var kb=i(di);F(js.$$.fragment,kb),kb.forEach(t),vb.forEach(t),kf=d(Yl),ci=r(Yl,"SPAN",{});var Tb=i(ci);Tf=n(Tb,"TFFlaubertForQuestionAnsweringSimple"),Tb.forEach(t),Yl.forEach(t),ol=d(s),We=r(s,"DIV",{class:!0});var gt=i(We);F(Xs.$$.fragment,gt),wf=d(gt),ro=r(gt,"P",{});var Ha=i(ro);yf=n(Ha,`Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),pi=r(Ha,"CODE",{});var wb=i(pi);Ff=n(wb,"span start logits"),wb.forEach(t),Mf=n(Ha," and "),hi=r(Ha,"CODE",{});var yb=i(hi);$f=n(yb,"span end logits"),yb.forEach(t),xf=n(Ha,")."),Ha.forEach(t),Lf=d(gt),As=r(gt,"P",{});var Jl=i(As);Ef=n(Jl,"This model inherits from "),Fa=r(Jl,"A",{href:!0});var Fb=i(Fa);zf=n(Fb,"TFPreTrainedModel"),Fb.forEach(t),Cf=n(Jl,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jl.forEach(t),qf=d(gt),Ss=r(gt,"P",{});var Kl=i(Ss);Pf=n(Kl,"This model is also a "),Ns=r(Kl,"A",{href:!0,rel:!0});var Mb=i(Ns);jf=n(Mb,"tf.keras.Model"),Mb.forEach(t),Xf=n(Kl,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Kl.forEach(t),Af=d(gt),F(Wo.$$.fragment,gt),Sf=d(gt),tt=r(gt,"DIV",{class:!0});var Ct=i(tt);F(Is.$$.fragment,Ct),Nf=d(Ct),io=r(Ct,"P",{});var Qa=i(io);If=n(Qa,"The "),Ma=r(Qa,"A",{href:!0});var $b=i(Ma);Df=n($b,"TFXLMForQuestionAnsweringSimple"),$b.forEach(t),Wf=n(Qa," forward method, overrides the "),ui=r(Qa,"CODE",{});var xb=i(ui);Of=n(xb,"__call__"),xb.forEach(t),Hf=n(Qa," special method."),Qa.forEach(t),Qf=d(Ct),F(Oo.$$.fragment,Ct),Bf=d(Ct),mi=r(Ct,"P",{});var Lb=i(mi);Rf=n(Lb,"Example:"),Lb.forEach(t),Uf=d(Ct),F(Ds.$$.fragment,Ct),Ct.forEach(t),gt.forEach(t),this.h()},h(){h(p,"name","hf:doc:metadata"),h(p,"content",JSON.stringify(Zb)),h(g,"id","flaubert"),h(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(g,"href","#flaubert"),h(m,"class","relative group"),h(G,"id","overview"),h(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(G,"href","#overview"),h(E,"class","relative group"),h(ee,"href","https://arxiv.org/abs/1912.05372"),h(ee,"rel","nofollow"),h(N,"href","https://huggingface.co/formiel"),h(N,"rel","nofollow"),h(I,"href","https://github.com/getalp/Flaubert"),h(I,"rel","nofollow"),h(S,"id","transformers.FlaubertConfig"),h(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(S,"href","#transformers.FlaubertConfig"),h(K,"class","relative group"),h(D,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertModel"),h(W,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertModel"),h(Bs,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(Rs,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(P,"class","docstring"),h(co,"id","transformers.FlaubertTokenizer"),h(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(co,"href","#transformers.FlaubertTokenizer"),h(qt,"class","relative group"),h(Us,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMTokenizer"),h(st,"class","docstring"),h(po,"id","transformers.FlaubertModel"),h(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(po,"href","#transformers.FlaubertModel"),h(Pt,"class","relative group"),h(Vs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(en,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(en,"rel","nofollow"),h(Ys,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.FlaubertModel"),h(Qe,"class","docstring"),h(He,"class","docstring"),h(uo,"id","transformers.FlaubertWithLMHeadModel"),h(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(uo,"href","#transformers.FlaubertWithLMHeadModel"),h(Xt,"class","relative group"),h(Js,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(ln,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(ln,"rel","nofollow"),h(Ks,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel"),h(Gs,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMWithLMHeadModel"),h(Be,"class","docstring"),h(ze,"class","docstring"),h(fo,"id","transformers.FlaubertForSequenceClassification"),h(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(fo,"href","#transformers.FlaubertForSequenceClassification"),h(St,"class","relative group"),h(Zs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(gn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(gn,"rel","nofollow"),h(ea,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForSequenceClassification"),h(ta,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForSequenceClassification"),h(Ee,"class","docstring"),h(Ce,"class","docstring"),h(_o,"id","transformers.FlaubertForMultipleChoice"),h(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(_o,"href","#transformers.FlaubertForMultipleChoice"),h(It,"class","relative group"),h(oa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Mn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Mn,"rel","nofollow"),h(na,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForMultipleChoice"),h(sa,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForMultipleChoice"),h(Re,"class","docstring"),h(qe,"class","docstring"),h(vo,"id","transformers.FlaubertForTokenClassification"),h(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(vo,"href","#transformers.FlaubertForTokenClassification"),h(Wt,"class","relative group"),h(aa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Pn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Pn,"rel","nofollow"),h(ra,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForTokenClassification"),h(ia,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForTokenClassification"),h(Ue,"class","docstring"),h(Pe,"class","docstring"),h(To,"id","transformers.FlaubertForQuestionAnsweringSimple"),h(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(To,"href","#transformers.FlaubertForQuestionAnsweringSimple"),h(Ht,"class","relative group"),h(la,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Wn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Wn,"rel","nofollow"),h(da,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnsweringSimple"),h(ca,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnsweringSimple"),h(Ve,"class","docstring"),h(je,"class","docstring"),h(yo,"id","transformers.FlaubertForQuestionAnswering"),h(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(yo,"href","#transformers.FlaubertForQuestionAnswering"),h(Rt,"class","relative group"),h(pa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),h(Yn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Yn,"rel","nofollow"),h(ha,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnswering"),h(ua,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.XLMForQuestionAnswering"),h(Ye,"class","docstring"),h(Xe,"class","docstring"),h(Mo,"id","transformers.TFFlaubertModel"),h(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Mo,"href","#transformers.TFFlaubertModel"),h(Yt,"class","relative group"),h(ma,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(ns,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(ns,"rel","nofollow"),h(fa,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertModel"),h(Je,"class","docstring"),h(Ae,"class","docstring"),h(Lo,"id","transformers.TFFlaubertWithLMHeadModel"),h(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Lo,"href","#transformers.TFFlaubertWithLMHeadModel"),h(Kt,"class","relative group"),h(ga,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(cs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(cs,"rel","nofollow"),h(_a,"href","/docs/transformers/v4.15.0/en/model_doc/flaubert#transformers.TFFlaubertWithLMHeadModel"),h(Ke,"class","docstring"),h(Se,"class","docstring"),h(Co,"id","transformers.TFFlaubertForSequenceClassification"),h(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Co,"href","#transformers.TFFlaubertForSequenceClassification"),h(Zt,"class","relative group"),h(ba,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(_s,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(_s,"rel","nofollow"),h(va,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForSequenceClassification"),h(Ge,"class","docstring"),h(Ne,"class","docstring"),h(jo,"id","transformers.TFFlaubertForMultipleChoice"),h(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(jo,"href","#transformers.TFFlaubertForMultipleChoice"),h(to,"class","relative group"),h(ka,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Fs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Fs,"rel","nofollow"),h(Ta,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForMultipleChoice"),h(Ze,"class","docstring"),h(Ie,"class","docstring"),h(So,"id","transformers.TFFlaubertForTokenClassification"),h(So,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(So,"href","#transformers.TFFlaubertForTokenClassification"),h(no,"class","relative group"),h(wa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Cs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Cs,"rel","nofollow"),h(ya,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForTokenClassification"),h(et,"class","docstring"),h(De,"class","docstring"),h(Do,"id","transformers.TFFlaubertForQuestionAnsweringSimple"),h(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Do,"href","#transformers.TFFlaubertForQuestionAnsweringSimple"),h(ao,"class","relative group"),h(Fa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),h(Ns,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Ns,"rel","nofollow"),h(Ma,"href","/docs/transformers/v4.15.0/en/model_doc/xlm#transformers.TFXLMForQuestionAnsweringSimple"),h(tt,"class","docstring"),h(We,"class","docstring")},m(s,_){e(document.head,p),u(s,k,_),u(s,m,_),e(m,g),e(g,v),M(b,v,null),e(m,f),e(m,w),e(w,le),u(s,Y,_),u(s,E,_),e(E,G),e(G,H),M(Z,H,null),e(E,de),e(E,Q),e(Q,ce),u(s,se,_),u(s,U,_),e(U,X),e(U,ee),e(ee,J),e(U,z),u(s,q,_),u(s,te,_),e(te,R),u(s,ae,_),u(s,oe,_),e(oe,B),e(B,pe),u(s,re,_),u(s,C,_),e(C,he),e(C,N),e(N,ue),e(C,me),e(C,I),e(I,fe),e(C,ge),u(s,j,_),u(s,K,_),e(K,S),e(S,ne),M(c,ne,null),e(K,T),e(K,V),e(V,we),u(s,Te,_),u(s,P,_),M(ke,P,null),e(P,ye),e(P,_e),e(_e,A),e(_e,D),e(D,Fe),e(_e,Me),e(_e,W),e(W,$e),e(_e,xe),e(P,be),e(P,ve),e(ve,Gl),e(ve,Bs),e(Bs,Zl),e(ve,ed),e(ve,Rs),e(Rs,td),e(ve,od),u(s,Ei,_),u(s,qt,_),e(qt,co),e(co,Ba),M(Ro,Ba,null),e(qt,nd),e(qt,Ra),e(Ra,sd),u(s,zi,_),u(s,st,_),M(Uo,st,null),e(st,ad),e(st,Ua),e(Ua,rd),e(st,id),e(st,_t),e(_t,Va),e(Va,ld),e(_t,dd),e(_t,Ya),e(Ya,cd),e(_t,pd),e(_t,bt),e(bt,hd),e(bt,Ja),e(Ja,ud),e(bt,md),e(bt,Ka),e(Ka,fd),e(bt,gd),e(bt,Ga),e(Ga,_d),e(bt,bd),e(_t,vd),e(_t,Vo),e(Vo,kd),e(Vo,Za),e(Za,Td),e(Vo,wd),e(st,yd),e(st,Yo),e(Yo,Fd),e(Yo,Us),e(Us,Md),e(Yo,$d),u(s,Ci,_),u(s,Pt,_),e(Pt,po),e(po,er),M(Jo,er,null),e(Pt,xd),e(Pt,tr),e(tr,Ld),u(s,qi,_),u(s,He,_),M(Ko,He,null),e(He,Ed),e(He,or),e(or,zd),e(He,Cd),e(He,Go),e(Go,qd),e(Go,Vs),e(Vs,Pd),e(Go,jd),e(He,Xd),e(He,Zo),e(Zo,Ad),e(Zo,en),e(en,Sd),e(Zo,Nd),e(He,Id),e(He,Qe),M(tn,Qe,null),e(Qe,Dd),e(Qe,jt),e(jt,Wd),e(jt,Ys),e(Ys,Od),e(jt,Hd),e(jt,nr),e(nr,Qd),e(jt,Bd),e(Qe,Rd),M(ho,Qe,null),e(Qe,Ud),e(Qe,sr),e(sr,Vd),e(Qe,Yd),M(on,Qe,null),u(s,Pi,_),u(s,Xt,_),e(Xt,uo),e(uo,ar),M(nn,ar,null),e(Xt,Jd),e(Xt,rr),e(rr,Kd),u(s,ji,_),u(s,ze,_),M(sn,ze,null),e(ze,Gd),e(ze,ir),e(ir,Zd),e(ze,ec),e(ze,an),e(an,tc),e(an,Js),e(Js,oc),e(an,nc),e(ze,sc),e(ze,rn),e(rn,ac),e(rn,ln),e(ln,rc),e(rn,ic),e(ze,lc),e(ze,dn),e(dn,dc),e(dn,Ks),e(Ks,cc),e(dn,pc),e(ze,hc),e(ze,Be),M(cn,Be,null),e(Be,uc),e(Be,At),e(At,mc),e(At,Gs),e(Gs,fc),e(At,gc),e(At,lr),e(lr,_c),e(At,bc),e(Be,vc),M(mo,Be,null),e(Be,kc),e(Be,dr),e(dr,Tc),e(Be,wc),M(pn,Be,null),u(s,Xi,_),u(s,St,_),e(St,fo),e(fo,cr),M(hn,cr,null),e(St,yc),e(St,pr),e(pr,Fc),u(s,Ai,_),u(s,Ce,_),M(un,Ce,null),e(Ce,Mc),e(Ce,hr),e(hr,$c),e(Ce,xc),e(Ce,mn),e(mn,Lc),e(mn,Zs),e(Zs,Ec),e(mn,zc),e(Ce,Cc),e(Ce,fn),e(fn,qc),e(fn,gn),e(gn,Pc),e(fn,jc),e(Ce,Xc),e(Ce,_n),e(_n,Ac),e(_n,ea),e(ea,Sc),e(_n,Nc),e(Ce,Ic),e(Ce,Ee),M(bn,Ee,null),e(Ee,Dc),e(Ee,Nt),e(Nt,Wc),e(Nt,ta),e(ta,Oc),e(Nt,Hc),e(Nt,ur),e(ur,Qc),e(Nt,Bc),e(Ee,Rc),M(go,Ee,null),e(Ee,Uc),e(Ee,mr),e(mr,Vc),e(Ee,Yc),M(vn,Ee,null),e(Ee,Jc),e(Ee,fr),e(fr,Kc),e(Ee,Gc),M(kn,Ee,null),u(s,Si,_),u(s,It,_),e(It,_o),e(_o,gr),M(Tn,gr,null),e(It,Zc),e(It,_r),e(_r,ep),u(s,Ni,_),u(s,qe,_),M(wn,qe,null),e(qe,tp),e(qe,br),e(br,op),e(qe,np),e(qe,yn),e(yn,sp),e(yn,oa),e(oa,ap),e(yn,rp),e(qe,ip),e(qe,Fn),e(Fn,lp),e(Fn,Mn),e(Mn,dp),e(Fn,cp),e(qe,pp),e(qe,$n),e($n,hp),e($n,na),e(na,up),e($n,mp),e(qe,fp),e(qe,Re),M(xn,Re,null),e(Re,gp),e(Re,Dt),e(Dt,_p),e(Dt,sa),e(sa,bp),e(Dt,vp),e(Dt,vr),e(vr,kp),e(Dt,Tp),e(Re,wp),M(bo,Re,null),e(Re,yp),e(Re,kr),e(kr,Fp),e(Re,Mp),M(Ln,Re,null),u(s,Ii,_),u(s,Wt,_),e(Wt,vo),e(vo,Tr),M(En,Tr,null),e(Wt,$p),e(Wt,wr),e(wr,xp),u(s,Di,_),u(s,Pe,_),M(zn,Pe,null),e(Pe,Lp),e(Pe,yr),e(yr,Ep),e(Pe,zp),e(Pe,Cn),e(Cn,Cp),e(Cn,aa),e(aa,qp),e(Cn,Pp),e(Pe,jp),e(Pe,qn),e(qn,Xp),e(qn,Pn),e(Pn,Ap),e(qn,Sp),e(Pe,Np),e(Pe,jn),e(jn,Ip),e(jn,ra),e(ra,Dp),e(jn,Wp),e(Pe,Op),e(Pe,Ue),M(Xn,Ue,null),e(Ue,Hp),e(Ue,Ot),e(Ot,Qp),e(Ot,ia),e(ia,Bp),e(Ot,Rp),e(Ot,Fr),e(Fr,Up),e(Ot,Vp),e(Ue,Yp),M(ko,Ue,null),e(Ue,Jp),e(Ue,Mr),e(Mr,Kp),e(Ue,Gp),M(An,Ue,null),u(s,Wi,_),u(s,Ht,_),e(Ht,To),e(To,$r),M(Sn,$r,null),e(Ht,Zp),e(Ht,xr),e(xr,eh),u(s,Oi,_),u(s,je,_),M(Nn,je,null),e(je,th),e(je,Qt),e(Qt,oh),e(Qt,Lr),e(Lr,nh),e(Qt,sh),e(Qt,Er),e(Er,ah),e(Qt,rh),e(je,ih),e(je,In),e(In,lh),e(In,la),e(la,dh),e(In,ch),e(je,ph),e(je,Dn),e(Dn,hh),e(Dn,Wn),e(Wn,uh),e(Dn,mh),e(je,fh),e(je,On),e(On,gh),e(On,da),e(da,_h),e(On,bh),e(je,vh),e(je,Ve),M(Hn,Ve,null),e(Ve,kh),e(Ve,Bt),e(Bt,Th),e(Bt,ca),e(ca,wh),e(Bt,yh),e(Bt,zr),e(zr,Fh),e(Bt,Mh),e(Ve,$h),M(wo,Ve,null),e(Ve,xh),e(Ve,Cr),e(Cr,Lh),e(Ve,Eh),M(Qn,Ve,null),u(s,Hi,_),u(s,Rt,_),e(Rt,yo),e(yo,qr),M(Bn,qr,null),e(Rt,zh),e(Rt,Pr),e(Pr,Ch),u(s,Qi,_),u(s,Xe,_),M(Rn,Xe,null),e(Xe,qh),e(Xe,Ut),e(Ut,Ph),e(Ut,jr),e(jr,jh),e(Ut,Xh),e(Ut,Xr),e(Xr,Ah),e(Ut,Sh),e(Xe,Nh),e(Xe,Un),e(Un,Ih),e(Un,pa),e(pa,Dh),e(Un,Wh),e(Xe,Oh),e(Xe,Vn),e(Vn,Hh),e(Vn,Yn),e(Yn,Qh),e(Vn,Bh),e(Xe,Rh),e(Xe,Jn),e(Jn,Uh),e(Jn,ha),e(ha,Vh),e(Jn,Yh),e(Xe,Jh),e(Xe,Ye),M(Kn,Ye,null),e(Ye,Kh),e(Ye,Vt),e(Vt,Gh),e(Vt,ua),e(ua,Zh),e(Vt,eu),e(Vt,Ar),e(Ar,tu),e(Vt,ou),e(Ye,nu),M(Fo,Ye,null),e(Ye,su),e(Ye,Sr),e(Sr,au),e(Ye,ru),M(Gn,Ye,null),u(s,Bi,_),u(s,Yt,_),e(Yt,Mo),e(Mo,Nr),M(Zn,Nr,null),e(Yt,iu),e(Yt,Ir),e(Ir,lu),u(s,Ri,_),u(s,Ae,_),M(es,Ae,null),e(Ae,du),e(Ae,Dr),e(Dr,cu),e(Ae,pu),e(Ae,ts),e(ts,hu),e(ts,ma),e(ma,uu),e(ts,mu),e(Ae,fu),e(Ae,os),e(os,gu),e(os,ns),e(ns,_u),e(os,bu),e(Ae,vu),M($o,Ae,null),e(Ae,ku),e(Ae,Je),M(ss,Je,null),e(Je,Tu),e(Je,Jt),e(Jt,wu),e(Jt,fa),e(fa,yu),e(Jt,Fu),e(Jt,Wr),e(Wr,Mu),e(Jt,$u),e(Je,xu),M(xo,Je,null),e(Je,Lu),e(Je,Or),e(Or,Eu),e(Je,zu),M(as,Je,null),u(s,Ui,_),u(s,Kt,_),e(Kt,Lo),e(Lo,Hr),M(rs,Hr,null),e(Kt,Cu),e(Kt,Qr),e(Qr,qu),u(s,Vi,_),u(s,Se,_),M(is,Se,null),e(Se,Pu),e(Se,Br),e(Br,ju),e(Se,Xu),e(Se,ls),e(ls,Au),e(ls,ga),e(ga,Su),e(ls,Nu),e(Se,Iu),e(Se,ds),e(ds,Du),e(ds,cs),e(cs,Wu),e(ds,Ou),e(Se,Hu),M(Eo,Se,null),e(Se,Qu),e(Se,Ke),M(ps,Ke,null),e(Ke,Bu),e(Ke,Gt),e(Gt,Ru),e(Gt,_a),e(_a,Uu),e(Gt,Vu),e(Gt,Rr),e(Rr,Yu),e(Gt,Ju),e(Ke,Ku),M(zo,Ke,null),e(Ke,Gu),e(Ke,Ur),e(Ur,Zu),e(Ke,em),M(hs,Ke,null),u(s,Yi,_),u(s,Zt,_),e(Zt,Co),e(Co,Vr),M(us,Vr,null),e(Zt,tm),e(Zt,Yr),e(Yr,om),u(s,Ji,_),u(s,Ne,_),M(ms,Ne,null),e(Ne,nm),e(Ne,Jr),e(Jr,sm),e(Ne,am),e(Ne,fs),e(fs,rm),e(fs,ba),e(ba,im),e(fs,lm),e(Ne,dm),e(Ne,gs),e(gs,cm),e(gs,_s),e(_s,pm),e(gs,hm),e(Ne,um),M(qo,Ne,null),e(Ne,mm),e(Ne,Ge),M(bs,Ge,null),e(Ge,fm),e(Ge,eo),e(eo,gm),e(eo,va),e(va,_m),e(eo,bm),e(eo,Kr),e(Kr,vm),e(eo,km),e(Ge,Tm),M(Po,Ge,null),e(Ge,wm),e(Ge,Gr),e(Gr,ym),e(Ge,Fm),M(vs,Ge,null),u(s,Ki,_),u(s,to,_),e(to,jo),e(jo,Zr),M(ks,Zr,null),e(to,Mm),e(to,ei),e(ei,$m),u(s,Gi,_),u(s,Ie,_),M(Ts,Ie,null),e(Ie,xm),e(Ie,ti),e(ti,Lm),e(Ie,Em),e(Ie,ws),e(ws,zm),e(ws,ka),e(ka,Cm),e(ws,qm),e(Ie,Pm),e(Ie,ys),e(ys,jm),e(ys,Fs),e(Fs,Xm),e(ys,Am),e(Ie,Sm),M(Xo,Ie,null),e(Ie,Nm),e(Ie,Ze),M(Ms,Ze,null),e(Ze,Im),e(Ze,oo),e(oo,Dm),e(oo,Ta),e(Ta,Wm),e(oo,Om),e(oo,oi),e(oi,Hm),e(oo,Qm),e(Ze,Bm),M(Ao,Ze,null),e(Ze,Rm),e(Ze,ni),e(ni,Um),e(Ze,Vm),M($s,Ze,null),u(s,Zi,_),u(s,no,_),e(no,So),e(So,si),M(xs,si,null),e(no,Ym),e(no,ai),e(ai,Jm),u(s,el,_),u(s,De,_),M(Ls,De,null),e(De,Km),e(De,ri),e(ri,Gm),e(De,Zm),e(De,Es),e(Es,ef),e(Es,wa),e(wa,tf),e(Es,of),e(De,nf),e(De,zs),e(zs,sf),e(zs,Cs),e(Cs,af),e(zs,rf),e(De,lf),M(No,De,null),e(De,df),e(De,et),M(qs,et,null),e(et,cf),e(et,so),e(so,pf),e(so,ya),e(ya,hf),e(so,uf),e(so,ii),e(ii,mf),e(so,ff),e(et,gf),M(Io,et,null),e(et,_f),e(et,li),e(li,bf),e(et,vf),M(Ps,et,null),u(s,tl,_),u(s,ao,_),e(ao,Do),e(Do,di),M(js,di,null),e(ao,kf),e(ao,ci),e(ci,Tf),u(s,ol,_),u(s,We,_),M(Xs,We,null),e(We,wf),e(We,ro),e(ro,yf),e(ro,pi),e(pi,Ff),e(ro,Mf),e(ro,hi),e(hi,$f),e(ro,xf),e(We,Lf),e(We,As),e(As,Ef),e(As,Fa),e(Fa,zf),e(As,Cf),e(We,qf),e(We,Ss),e(Ss,Pf),e(Ss,Ns),e(Ns,jf),e(Ss,Xf),e(We,Af),M(Wo,We,null),e(We,Sf),e(We,tt),M(Is,tt,null),e(tt,Nf),e(tt,io),e(io,If),e(io,Ma),e(Ma,Df),e(io,Wf),e(io,ui),e(ui,Of),e(io,Hf),e(tt,Qf),M(Oo,tt,null),e(tt,Bf),e(tt,mi),e(mi,Rf),e(tt,Uf),M(Ds,tt,null),nl=!0},p(s,[_]){const Ws={};_&2&&(Ws.$$scope={dirty:_,ctx:s}),ho.$set(Ws);const fi={};_&2&&(fi.$$scope={dirty:_,ctx:s}),mo.$set(fi);const gi={};_&2&&(gi.$$scope={dirty:_,ctx:s}),go.$set(gi);const _i={};_&2&&(_i.$$scope={dirty:_,ctx:s}),bo.$set(_i);const Os={};_&2&&(Os.$$scope={dirty:_,ctx:s}),ko.$set(Os);const bi={};_&2&&(bi.$$scope={dirty:_,ctx:s}),wo.$set(bi);const vi={};_&2&&(vi.$$scope={dirty:_,ctx:s}),Fo.$set(vi);const ki={};_&2&&(ki.$$scope={dirty:_,ctx:s}),$o.$set(ki);const Hs={};_&2&&(Hs.$$scope={dirty:_,ctx:s}),xo.$set(Hs);const Ti={};_&2&&(Ti.$$scope={dirty:_,ctx:s}),Eo.$set(Ti);const wi={};_&2&&(wi.$$scope={dirty:_,ctx:s}),zo.$set(wi);const yi={};_&2&&(yi.$$scope={dirty:_,ctx:s}),qo.$set(yi);const Fi={};_&2&&(Fi.$$scope={dirty:_,ctx:s}),Po.$set(Fi);const lo={};_&2&&(lo.$$scope={dirty:_,ctx:s}),Xo.$set(lo);const Mi={};_&2&&(Mi.$$scope={dirty:_,ctx:s}),Ao.$set(Mi);const $i={};_&2&&($i.$$scope={dirty:_,ctx:s}),No.$set($i);const Qs={};_&2&&(Qs.$$scope={dirty:_,ctx:s}),Io.$set(Qs);const xi={};_&2&&(xi.$$scope={dirty:_,ctx:s}),Wo.$set(xi);const Li={};_&2&&(Li.$$scope={dirty:_,ctx:s}),Oo.$set(Li)},i(s){nl||($(b.$$.fragment,s),$(Z.$$.fragment,s),$(c.$$.fragment,s),$(ke.$$.fragment,s),$(Ro.$$.fragment,s),$(Uo.$$.fragment,s),$(Jo.$$.fragment,s),$(Ko.$$.fragment,s),$(tn.$$.fragment,s),$(ho.$$.fragment,s),$(on.$$.fragment,s),$(nn.$$.fragment,s),$(sn.$$.fragment,s),$(cn.$$.fragment,s),$(mo.$$.fragment,s),$(pn.$$.fragment,s),$(hn.$$.fragment,s),$(un.$$.fragment,s),$(bn.$$.fragment,s),$(go.$$.fragment,s),$(vn.$$.fragment,s),$(kn.$$.fragment,s),$(Tn.$$.fragment,s),$(wn.$$.fragment,s),$(xn.$$.fragment,s),$(bo.$$.fragment,s),$(Ln.$$.fragment,s),$(En.$$.fragment,s),$(zn.$$.fragment,s),$(Xn.$$.fragment,s),$(ko.$$.fragment,s),$(An.$$.fragment,s),$(Sn.$$.fragment,s),$(Nn.$$.fragment,s),$(Hn.$$.fragment,s),$(wo.$$.fragment,s),$(Qn.$$.fragment,s),$(Bn.$$.fragment,s),$(Rn.$$.fragment,s),$(Kn.$$.fragment,s),$(Fo.$$.fragment,s),$(Gn.$$.fragment,s),$(Zn.$$.fragment,s),$(es.$$.fragment,s),$($o.$$.fragment,s),$(ss.$$.fragment,s),$(xo.$$.fragment,s),$(as.$$.fragment,s),$(rs.$$.fragment,s),$(is.$$.fragment,s),$(Eo.$$.fragment,s),$(ps.$$.fragment,s),$(zo.$$.fragment,s),$(hs.$$.fragment,s),$(us.$$.fragment,s),$(ms.$$.fragment,s),$(qo.$$.fragment,s),$(bs.$$.fragment,s),$(Po.$$.fragment,s),$(vs.$$.fragment,s),$(ks.$$.fragment,s),$(Ts.$$.fragment,s),$(Xo.$$.fragment,s),$(Ms.$$.fragment,s),$(Ao.$$.fragment,s),$($s.$$.fragment,s),$(xs.$$.fragment,s),$(Ls.$$.fragment,s),$(No.$$.fragment,s),$(qs.$$.fragment,s),$(Io.$$.fragment,s),$(Ps.$$.fragment,s),$(js.$$.fragment,s),$(Xs.$$.fragment,s),$(Wo.$$.fragment,s),$(Is.$$.fragment,s),$(Oo.$$.fragment,s),$(Ds.$$.fragment,s),nl=!0)},o(s){x(b.$$.fragment,s),x(Z.$$.fragment,s),x(c.$$.fragment,s),x(ke.$$.fragment,s),x(Ro.$$.fragment,s),x(Uo.$$.fragment,s),x(Jo.$$.fragment,s),x(Ko.$$.fragment,s),x(tn.$$.fragment,s),x(ho.$$.fragment,s),x(on.$$.fragment,s),x(nn.$$.fragment,s),x(sn.$$.fragment,s),x(cn.$$.fragment,s),x(mo.$$.fragment,s),x(pn.$$.fragment,s),x(hn.$$.fragment,s),x(un.$$.fragment,s),x(bn.$$.fragment,s),x(go.$$.fragment,s),x(vn.$$.fragment,s),x(kn.$$.fragment,s),x(Tn.$$.fragment,s),x(wn.$$.fragment,s),x(xn.$$.fragment,s),x(bo.$$.fragment,s),x(Ln.$$.fragment,s),x(En.$$.fragment,s),x(zn.$$.fragment,s),x(Xn.$$.fragment,s),x(ko.$$.fragment,s),x(An.$$.fragment,s),x(Sn.$$.fragment,s),x(Nn.$$.fragment,s),x(Hn.$$.fragment,s),x(wo.$$.fragment,s),x(Qn.$$.fragment,s),x(Bn.$$.fragment,s),x(Rn.$$.fragment,s),x(Kn.$$.fragment,s),x(Fo.$$.fragment,s),x(Gn.$$.fragment,s),x(Zn.$$.fragment,s),x(es.$$.fragment,s),x($o.$$.fragment,s),x(ss.$$.fragment,s),x(xo.$$.fragment,s),x(as.$$.fragment,s),x(rs.$$.fragment,s),x(is.$$.fragment,s),x(Eo.$$.fragment,s),x(ps.$$.fragment,s),x(zo.$$.fragment,s),x(hs.$$.fragment,s),x(us.$$.fragment,s),x(ms.$$.fragment,s),x(qo.$$.fragment,s),x(bs.$$.fragment,s),x(Po.$$.fragment,s),x(vs.$$.fragment,s),x(ks.$$.fragment,s),x(Ts.$$.fragment,s),x(Xo.$$.fragment,s),x(Ms.$$.fragment,s),x(Ao.$$.fragment,s),x($s.$$.fragment,s),x(xs.$$.fragment,s),x(Ls.$$.fragment,s),x(No.$$.fragment,s),x(qs.$$.fragment,s),x(Io.$$.fragment,s),x(Ps.$$.fragment,s),x(js.$$.fragment,s),x(Xs.$$.fragment,s),x(Wo.$$.fragment,s),x(Is.$$.fragment,s),x(Oo.$$.fragment,s),x(Ds.$$.fragment,s),nl=!1},d(s){t(p),s&&t(k),s&&t(m),L(b),s&&t(Y),s&&t(E),L(Z),s&&t(se),s&&t(U),s&&t(q),s&&t(te),s&&t(ae),s&&t(oe),s&&t(re),s&&t(C),s&&t(j),s&&t(K),L(c),s&&t(Te),s&&t(P),L(ke),s&&t(Ei),s&&t(qt),L(Ro),s&&t(zi),s&&t(st),L(Uo),s&&t(Ci),s&&t(Pt),L(Jo),s&&t(qi),s&&t(He),L(Ko),L(tn),L(ho),L(on),s&&t(Pi),s&&t(Xt),L(nn),s&&t(ji),s&&t(ze),L(sn),L(cn),L(mo),L(pn),s&&t(Xi),s&&t(St),L(hn),s&&t(Ai),s&&t(Ce),L(un),L(bn),L(go),L(vn),L(kn),s&&t(Si),s&&t(It),L(Tn),s&&t(Ni),s&&t(qe),L(wn),L(xn),L(bo),L(Ln),s&&t(Ii),s&&t(Wt),L(En),s&&t(Di),s&&t(Pe),L(zn),L(Xn),L(ko),L(An),s&&t(Wi),s&&t(Ht),L(Sn),s&&t(Oi),s&&t(je),L(Nn),L(Hn),L(wo),L(Qn),s&&t(Hi),s&&t(Rt),L(Bn),s&&t(Qi),s&&t(Xe),L(Rn),L(Kn),L(Fo),L(Gn),s&&t(Bi),s&&t(Yt),L(Zn),s&&t(Ri),s&&t(Ae),L(es),L($o),L(ss),L(xo),L(as),s&&t(Ui),s&&t(Kt),L(rs),s&&t(Vi),s&&t(Se),L(is),L(Eo),L(ps),L(zo),L(hs),s&&t(Yi),s&&t(Zt),L(us),s&&t(Ji),s&&t(Ne),L(ms),L(qo),L(bs),L(Po),L(vs),s&&t(Ki),s&&t(to),L(ks),s&&t(Gi),s&&t(Ie),L(Ts),L(Xo),L(Ms),L(Ao),L($s),s&&t(Zi),s&&t(no),L(xs),s&&t(el),s&&t(De),L(Ls),L(No),L(qs),L(Io),L(Ps),s&&t(tl),s&&t(ao),L(js),s&&t(ol),s&&t(We),L(Xs),L(Wo),L(Is),L(Oo),L(Ds)}}}const Zb={local:"flaubert",sections:[{local:"overview",title:"Overview"},{local:"transformers.FlaubertConfig",title:"FlaubertConfig"},{local:"transformers.FlaubertTokenizer",title:"FlaubertTokenizer"},{local:"transformers.FlaubertModel",title:"FlaubertModel"},{local:"transformers.FlaubertWithLMHeadModel",title:"FlaubertWithLMHeadModel"},{local:"transformers.FlaubertForSequenceClassification",title:"FlaubertForSequenceClassification"},{local:"transformers.FlaubertForMultipleChoice",title:"FlaubertForMultipleChoice"},{local:"transformers.FlaubertForTokenClassification",title:"FlaubertForTokenClassification"},{local:"transformers.FlaubertForQuestionAnsweringSimple",title:"FlaubertForQuestionAnsweringSimple"},{local:"transformers.FlaubertForQuestionAnswering",title:"FlaubertForQuestionAnswering"},{local:"transformers.TFFlaubertModel",title:"TFFlaubertModel"},{local:"transformers.TFFlaubertWithLMHeadModel",title:"TFFlaubertWithLMHeadModel"},{local:"transformers.TFFlaubertForSequenceClassification",title:"TFFlaubertForSequenceClassification"},{local:"transformers.TFFlaubertForMultipleChoice",title:"TFFlaubertForMultipleChoice"},{local:"transformers.TFFlaubertForTokenClassification",title:"TFFlaubertForTokenClassification"},{local:"transformers.TFFlaubertForQuestionAnsweringSimple",title:"TFFlaubertForQuestionAnsweringSimple"}],title:"FlauBERT"};function ev(O,p,k){let{fw:m}=p;return O.$$set=g=>{"fw"in g&&k(0,m=g.fw)},[m]}class iv extends Eb{constructor(p){super();zb(this,p,ev,Gb,Cb,{fw:0})}}export{iv as default,Zb as metadata};
9,979
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bigbird.mdx-bf5d1fbb.js
import{S as O2,i as D2,s as W2,e as n,k as l,w as _,t as a,L as U2,c as s,d as t,m as c,a as r,x as b,h as i,b as d,J as e,g,y as v,q as k,o as B,B as y}from"../../chunks/vendor-b1433968.js";import{T as V}from"../../chunks/Tip-c3840994.js";import{D as F}from"../../chunks/Docstring-ff504c58.js";import{C as D}from"../../chunks/CodeBlock-a320dbd7.js";import{I as M}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Q2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function H2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function J2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function V2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function R2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function G2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function K2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function X2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function Y2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function Z2(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function ex(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function tx(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function ox(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function nx(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function sx(z){let p,T,m,w,x;return{c(){p=n("p"),T=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),w=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){p=s(u,"P",{});var f=r(p);T=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var $=r(m);w=i($,"Module"),$.forEach(t),x=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(u,f){g(u,p,f),e(p,T),e(p,m),e(m,w),e(p,x)},d(u){u&&t(p)}}}function rx(z){let p,T,m,w,x,u,f,$,zh,qc,et,Qt,ri,Go,Mh,ai,Eh,Pc,Ht,qh,Ko,Ph,Ch,Cc,ta,jh,jc,oa,ii,Ah,Ac,na,Lh,Lc,U,Xo,Ih,Yo,Nh,Sh,Oh,be,Dh,di,Wh,Uh,li,Qh,Hh,ci,Jh,Vh,pi,Rh,Gh,Kh,hi,Xh,Yh,fi,Zh,ef,Zo,tf,gi,of,nf,sf,sa,rf,mi,af,Ic,je,df,en,lf,cf,tn,pf,hf,Nc,tt,Jt,ui,on,ff,_i,gf,Sc,E,nn,mf,ot,uf,ra,_f,bf,sn,vf,kf,Bf,nt,yf,aa,wf,Tf,ia,xf,$f,Ff,bi,zf,Mf,rn,Ef,vi,ki,Bi,yi,qf,Pf,wi,Ti,an,Vt,Rt,xi,dn,Cf,$i,jf,Af,Fi,Lf,If,zi,Mi,ln,Gt,Kt,Ei,cn,Nf,qi,Sf,Of,Pi,Df,Wf,Ci,ji,pn,Xt,Yt,Ai,hn,Uf,Li,Qf,Hf,Ii,Jf,Oc,st,Zt,Ni,fn,Vf,Si,Rf,Dc,q,gn,Gf,mn,Kf,un,Xf,Yf,Zf,_n,eg,da,tg,og,ng,Ae,bn,sg,Oi,rg,ag,vn,la,ig,Di,dg,lg,ca,cg,Wi,pg,hg,eo,kn,fg,Bn,gg,Ui,mg,ug,_g,to,yn,bg,rt,vg,Qi,kg,Bg,Hi,yg,wg,Tg,Ji,Wc,at,oo,Vi,wn,xg,Ri,$g,Uc,Q,Tn,Fg,Te,zg,Gi,Mg,Eg,xn,qg,Pg,pa,Cg,jg,Ag,Le,$n,Lg,Ki,Ig,Ng,Fn,ha,Sg,Xi,Og,Dg,fa,Wg,Yi,Ug,Qg,we,zn,Hg,Zi,Jg,Vg,Mn,Rg,ed,Gg,Kg,no,En,Xg,qn,Yg,td,Zg,em,Qc,it,so,od,Pn,tm,nd,om,Hc,dt,Cn,nm,jn,sm,ga,rm,am,Jc,lt,ro,sd,An,im,rd,dm,Vc,H,Ln,lm,In,cm,Nn,pm,hm,fm,Sn,gm,On,mm,um,_m,W,bm,ad,vm,km,id,Bm,ym,dd,wm,Tm,ld,xm,$m,cd,Fm,zm,pd,Mm,Em,qm,R,Dn,Pm,ct,Cm,ma,jm,Am,hd,Lm,Im,Nm,ao,Sm,fd,Om,Dm,Wn,Rc,pt,io,gd,Un,Wm,md,Um,Gc,Qn,G,Hn,Qm,ht,Hm,ua,Jm,Vm,ud,Rm,Gm,Km,lo,Xm,_d,Ym,Zm,Jn,Kc,ft,co,bd,Vn,eu,vd,tu,Xc,xe,Rn,ou,gt,nu,kd,su,ru,Gn,au,iu,du,K,Kn,lu,mt,cu,_a,pu,hu,Bd,fu,gu,mu,po,uu,yd,_u,bu,Xn,Yc,ut,ho,wd,Yn,vu,Td,ku,Zc,$e,Zn,Bu,_t,yu,xd,wu,Tu,es,xu,$u,Fu,X,ts,zu,bt,Mu,ba,Eu,qu,$d,Pu,Cu,ju,fo,Au,Fd,Lu,Iu,os,ep,vt,go,zd,ns,Nu,Md,Su,tp,ve,ss,Ou,Ed,Du,Wu,rs,Uu,as,Qu,Hu,Ju,S,is,Vu,kt,Ru,va,Gu,Ku,qd,Xu,Yu,Zu,mo,e_,Pd,t_,o_,ds,n_,Cd,s_,r_,ls,op,Bt,uo,jd,cs,a_,Ad,i_,np,ke,ps,d_,Ld,l_,c_,hs,p_,fs,h_,f_,g_,Y,gs,m_,yt,u_,ka,__,b_,Id,v_,k_,B_,_o,y_,Nd,w_,T_,ms,sp,wt,bo,Sd,us,x_,Od,$_,rp,Be,_s,F_,Dd,z_,M_,bs,E_,vs,q_,P_,C_,Z,ks,j_,Tt,A_,Ba,L_,I_,Wd,N_,S_,O_,vo,D_,Ud,W_,U_,Bs,ap,xt,ko,Qd,ys,Q_,Hd,H_,ip,ye,ws,J_,$t,V_,Jd,R_,G_,Vd,K_,X_,Y_,Ts,Z_,xs,eb,tb,ob,ee,$s,nb,Ft,sb,ya,rb,ab,Rd,ib,db,lb,Bo,cb,Gd,pb,hb,Fs,dp,zt,yo,Kd,zs,fb,Xd,gb,lp,P,Ms,mb,Yd,ub,_b,Es,bb,wa,vb,kb,Bb,qs,yb,Ps,wb,Tb,xb,Zd,$b,Fb,Fe,el,Cs,zb,Mb,tl,js,Eb,qb,ol,As,Pb,Cb,nl,Ls,jb,Ab,te,Is,Lb,Mt,Ib,sl,Nb,Sb,rl,Ob,Db,Wb,wo,Ub,al,Qb,Hb,Ns,cp,Et,To,il,Ss,Jb,dl,Vb,pp,C,Os,Rb,qt,Gb,ll,Kb,Xb,cl,Yb,Zb,ev,Ds,tv,Ta,ov,nv,sv,Ws,rv,Us,av,iv,dv,pl,lv,cv,ze,hl,Qs,pv,hv,fl,Hs,fv,gv,gl,Js,mv,uv,ml,Vs,_v,bv,oe,Rs,vv,Pt,kv,ul,Bv,yv,_l,wv,Tv,xv,xo,$v,bl,Fv,zv,Gs,hp,Ct,$o,vl,Ks,Mv,kl,Ev,fp,j,Xs,qv,Ys,Pv,Bl,Cv,jv,Av,Zs,Lv,xa,Iv,Nv,Sv,er,Ov,tr,Dv,Wv,Uv,yl,Qv,Hv,Me,wl,or,Jv,Vv,Tl,nr,Rv,Gv,xl,sr,Kv,Xv,$l,rr,Yv,Zv,ne,ar,ek,jt,tk,Fl,ok,nk,zl,sk,rk,ak,Fo,ik,Ml,dk,lk,ir,gp,At,zo,El,dr,ck,ql,pk,mp,A,lr,hk,Pl,fk,gk,cr,mk,$a,uk,_k,bk,pr,vk,hr,kk,Bk,yk,Cl,wk,Tk,Ee,jl,fr,xk,$k,Al,gr,Fk,zk,Ll,mr,Mk,Ek,Il,ur,qk,Pk,se,_r,Ck,Lt,jk,Nl,Ak,Lk,Sl,Ik,Nk,Sk,Mo,Ok,Ol,Dk,Wk,br,up,It,Eo,Dl,vr,Uk,Wl,Qk,_p,L,kr,Hk,Ul,Jk,Vk,Br,Rk,Fa,Gk,Kk,Xk,yr,Yk,wr,Zk,eB,tB,Ql,oB,nB,qe,Hl,Tr,sB,rB,Jl,xr,aB,iB,Vl,$r,dB,lB,Rl,Fr,cB,pB,re,zr,hB,Nt,fB,Gl,gB,mB,Kl,uB,_B,bB,qo,vB,Xl,kB,BB,Mr,bp,St,Po,Yl,Er,yB,Zl,wB,vp,I,qr,TB,ec,xB,$B,Pr,FB,za,zB,MB,EB,Cr,qB,jr,PB,CB,jB,tc,AB,LB,Pe,oc,Ar,IB,NB,nc,Lr,SB,OB,sc,Ir,DB,WB,rc,Nr,UB,QB,ae,Sr,HB,Ot,JB,ac,VB,RB,ic,GB,KB,XB,Co,YB,dc,ZB,e1,Or,kp,Dt,jo,lc,Dr,t1,cc,o1,Bp,N,Wr,n1,Wt,s1,pc,r1,a1,hc,i1,d1,l1,Ur,c1,Ma,p1,h1,f1,Qr,g1,Hr,m1,u1,_1,fc,b1,v1,Ce,gc,Jr,k1,B1,mc,Vr,y1,w1,uc,Rr,T1,x1,_c,Gr,$1,F1,ie,Kr,z1,Ut,M1,Ea,E1,q1,bc,P1,C1,j1,Ao,A1,vc,L1,I1,Xr,yp;return u=new M({}),Go=new M({}),on=new M({}),nn=new F({props:{name:"class transformers.BigBirdConfig",anchor:"transformers.BigBirdConfig",parameters:[{name:"vocab_size",val:" = 50358"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 4096"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"sep_token_id",val:" = 66"},{name:"attention_type",val:" = 'block_sparse'"},{name:"use_bias",val:" = True"},{name:"rescale_embeddings",val:" = False"},{name:"block_size",val:" = 64"},{name:"num_random_blocks",val:" = 3"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/configuration_big_bird.py#L31",parametersDescription:[{anchor:"transformers.BigBirdConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50358) &#x2014; Vocabulary size of the BigBird model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdModel">BigBirdModel</a>.`,name:"vocab_size"},{anchor:"transformers.BigBirdConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.BigBirdConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.BigBirdConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.BigBirdConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.BigBirdConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.BigBirdConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.BigBirdConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.BigBirdConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 1024 or 2048 or 4096).`,name:"max_position_embeddings"},{anchor:"transformers.BigBirdConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdModel">BigBirdModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.BigBirdConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.BigBirdConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.BigBirdConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"},{anchor:"transformers.BigBirdConfig.attention_type",description:`<strong>attention_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;block_sparse&quot;</code>) &#x2014; Whether to use block sparse attention (with n complexity) as introduced in paper or original attention layer (with n^2 complexity). Possible values are <code>&quot;original_full&quot;</code> and <code>&quot;block_sparse&quot;</code>.`,name:"attention_type"},{anchor:"transformers.BigBirdConfig.use_bias",description:`<strong>use_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use bias in query, key, value.`,name:"use_bias"},{anchor:"transformers.BigBirdConfig.rescale_embeddings",description:`<strong>rescale_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to rescale embeddings with (hidden_size ** 0.5).`,name:"rescale_embeddings"},{anchor:"transformers.BigBirdConfig.block_size",description:`<strong>block_size</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Size of each block. Useful only when <code>attention_type == &quot;block_sparse&quot;</code>.`,name:"block_size"},{anchor:"transformers.BigBirdConfig.num_random_blocks",description:`<strong>num_random_blocks</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Each query is going to attend these many number of random blocks. Useful only when <code>attention_type == &quot;block_sparse&quot;</code>.`,name:"num_random_blocks"},{anchor:"transformers.BigBirdConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),rn=new D({props:{code:",",highlighted:""}}),dn=new M({}),cn=new M({}),hn=new M({}),fn=new M({}),gn=new F({props:{name:"class transformers.BigBirdTokenizer",anchor:"transformers.BigBirdTokenizer",parameters:[{name:"vocab_file",val:""},{name:"unk_token",val:" = '<unk>'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird.py#L47",parametersDescription:[{anchor:"transformers.BigBirdTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.BigBirdTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.BigBirdTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The begin of sequence token.`,name:"bos_token"},{anchor:"transformers.BigBirdTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BigBirdTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BigBirdTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BigBirdTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BigBirdTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.BigBirdTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),bn=new F({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BigBirdTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird.py#L195",parametersDescription:[{anchor:"transformers.BigBirdTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BigBirdTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),kn=new F({props:{name:"get_special_tokens_mask",anchor:"transformers.BigBirdTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird.py#L220",parametersDescription:[{anchor:"transformers.BigBirdTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BigBirdTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BigBirdTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),yn=new F({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BigBirdTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird.py#L247",parametersDescription:[{anchor:"transformers.BigBirdTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BigBirdTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),wn=new M({}),Tn=new F({props:{name:"class transformers.BigBirdTokenizerFast",anchor:"transformers.BigBirdTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"unk_token",val:" = '<unk>'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird_fast.py#L59",parametersDescription:[{anchor:"transformers.BigBirdTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.BigBirdTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.BigBirdTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.`,name:"eos_token"},{anchor:"transformers.BigBirdTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BigBirdTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BigBirdTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BigBirdTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BigBirdTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),$n=new F({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BigBirdTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird_fast.py#L145",parametersDescription:[{anchor:"transformers.BigBirdTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.BigBirdTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),zn=new F({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BigBirdTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird_fast.py#L201",parametersDescription:[{anchor:"transformers.BigBirdTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.BigBirdTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Mn=new D({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),En=new F({props:{name:"get_special_tokens_mask",anchor:"transformers.BigBirdTokenizerFast.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/tokenization_big_bird_fast.py#L170",parametersDescription:[{anchor:"transformers.BigBirdTokenizerFast.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.BigBirdTokenizerFast.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BigBirdTokenizerFast.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Set to True if the token list is already formatted with special tokens for the model`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Pn=new M({}),Cn=new F({props:{name:"class transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput",anchor:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prediction_logits",val:": FloatTensor = None"},{name:"seq_relationship_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L1845",parametersDescription:[{anchor:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"prediction_logits"},{anchor:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput.seq_relationship_logits",description:`<strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"seq_relationship_logits"},{anchor:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),An=new M({}),Ln=new F({props:{name:"class transformers.BigBirdModel",anchor:"transformers.BigBirdModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L1915",parametersDescription:[{anchor:"transformers.BigBirdModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Dn=new F({props:{name:"forward",anchor:"transformers.BigBirdModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L1972",parametersDescription:[{anchor:"transformers.BigBirdModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BigBirdModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.BigBirdModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BigBirdModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ao=new V({props:{$$slots:{default:[Q2]},$$scope:{ctx:z}}}),Wn=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdModel import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdModel.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdModel.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Un=new M({}),Hn=new F({props:{name:"forward",anchor:"transformers.BigBirdForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"next_sentence_label",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2271",parametersDescription:[{anchor:"transformers.BigBirdForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.BigBirdForPreTraining.forward.next_sentence_label",description:`<strong>next_sentence_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"next_sentence_label"},{anchor:"transformers.BigBirdForPreTraining.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput" >transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</p> </li> <li> <p><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput" >transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),lo=new V({props:{$$slots:{default:[H2]},$$scope:{ctx:z}}}),Jn=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForPreTraining import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdForPreTraining.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),Vn=new M({}),Rn=new F({props:{name:"class transformers.BigBirdForCausalLM",anchor:"transformers.BigBirdForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2460",parametersDescription:[{anchor:"transformers.BigBirdForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kn=new F({props:{name:"forward",anchor:"transformers.BigBirdForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2482",parametersDescription:[{anchor:"transformers.BigBirdForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BigBirdForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.BigBirdForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BigBirdForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.BigBirdForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),po=new V({props:{$$slots:{default:[J2]},$$scope:{ctx:z}}}),Xn=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') config = BigBirdConfig.from_pretrained("google/bigbird-roberta-base") config.is_decoder = True model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = BigBirdConfig.from_pretrained(<span class="hljs-string">&quot;google/bigbird-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForCausalLM.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),Yn=new M({}),Zn=new F({props:{name:"class transformers.BigBirdForMaskedLM",anchor:"transformers.BigBirdForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2358",parametersDescription:[{anchor:"transformers.BigBirdForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ts=new F({props:{name:"forward",anchor:"transformers.BigBirdForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2380",parametersDescription:[{anchor:"transformers.BigBirdForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),fo=new V({props:{$$slots:{default:[V2]},$$scope:{ctx:z}}}),os=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForMaskedLM import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdForMaskedLM.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ns=new M({}),ss=new F({props:{name:"class transformers.BigBirdForSequenceClassification",anchor:"transformers.BigBirdForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2637",parametersDescription:[{anchor:"transformers.BigBirdForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),is=new F({props:{name:"forward",anchor:"transformers.BigBirdForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2648",parametersDescription:[{anchor:"transformers.BigBirdForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),mo=new V({props:{$$slots:{default:[R2]},$$scope:{ctx:z}}}),ds=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForSequenceClassification import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdForSequenceClassification.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ls=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForSequenceClassification import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdForSequenceClassification.from_pretrained('google/bigbird-roberta-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),cs=new M({}),ps=new F({props:{name:"class transformers.BigBirdForMultipleChoice",anchor:"transformers.BigBirdForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2732",parametersDescription:[{anchor:"transformers.BigBirdForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gs=new F({props:{name:"forward",anchor:"transformers.BigBirdForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2743",parametersDescription:[{anchor:"transformers.BigBirdForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_o=new V({props:{$$slots:{default:[G2]},$$scope:{ctx:z}}}),ms=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForMultipleChoice import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdForMultipleChoice.from_pretrained('google/bigbird-roberta-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),us=new M({}),_s=new F({props:{name:"class transformers.BigBirdForTokenClassification",anchor:"transformers.BigBirdForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2825",parametersDescription:[{anchor:"transformers.BigBirdForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ks=new F({props:{name:"forward",anchor:"transformers.BigBirdForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2840",parametersDescription:[{anchor:"transformers.BigBirdForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),vo=new V({props:{$$slots:{default:[K2]},$$scope:{ctx:z}}}),Bs=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForTokenClassification import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = BigBirdForTokenClassification.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ys=new M({}),ws=new F({props:{name:"class transformers.BigBirdForQuestionAnswering",anchor:"transformers.BigBirdForQuestionAnswering",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2934",parametersDescription:[{anchor:"transformers.BigBirdForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$s=new F({props:{name:"forward",anchor:"transformers.BigBirdForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"question_lengths",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_big_bird.py#L2948",parametersDescription:[{anchor:"transformers.BigBirdForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.BigBirdForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <code>transformers.models.big_bird.modeling_big_bird.BigBirdForQuestionAnsweringModelOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 1)</code>) \u2014 pooler output from BigBigModel</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.big_bird.modeling_big_bird.BigBirdForQuestionAnsweringModelOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Bo=new V({props:{$$slots:{default:[X2]},$$scope:{ctx:z}}}),Fs=new D({props:{code:`from transformers import BigBirdTokenizer, BigBirdForQuestionAnswering import torch tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-base-trivia-itc') model = BigBirdForQuestionAnswering.from_pretrained('google/bigbird-base-trivia-itc') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, BigBirdForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-base-trivia-itc&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-base-trivia-itc&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),zs=new M({}),Ms=new F({props:{name:"class transformers.FlaxBigBirdModel",anchor:"transformers.FlaxBigBirdModel",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1556",parametersDescription:[{anchor:"transformers.FlaxBigBirdModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Is=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1441",parametersDescription:[{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),wo=new V({props:{$$slots:{default:[Y2]},$$scope:{ctx:z}}}),Ns=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdModel tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdModel.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdModel.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ss=new M({}),Os=new F({props:{name:"class transformers.FlaxBigBirdForPreTraining",anchor:"transformers.FlaxBigBirdForPreTraining",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1631",parametersDescription:[{anchor:"transformers.FlaxBigBirdForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdForPreTraining.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Rs=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1441",parametersDescription:[{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.big_bird.modeling_flax_big_bird.FlaxBigBirdForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>prediction_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>seq_relationship_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.big_bird.modeling_flax_big_bird.FlaxBigBirdForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),xo=new V({props:{$$slots:{default:[Z2]},$$scope:{ctx:z}}}),Gs=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdForPreTraining tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdForPreTraining.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdForPreTraining.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),Ks=new M({}),Xs=new F({props:{name:"class transformers.FlaxBigBirdForMaskedLM",anchor:"transformers.FlaxBigBirdForMaskedLM",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1718",parametersDescription:[{anchor:"transformers.FlaxBigBirdForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdForMaskedLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),ar=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1441",parametersDescription:[{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fo=new V({props:{$$slots:{default:[ex]},$$scope:{ctx:z}}}),ir=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdForMaskedLM tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdForMaskedLM.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("The capital of France is [MASK].", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),dr=new M({}),lr=new F({props:{name:"class transformers.FlaxBigBirdForSequenceClassification",anchor:"transformers.FlaxBigBirdForSequenceClassification",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1807",parametersDescription:[{anchor:"transformers.FlaxBigBirdForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdForSequenceClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),_r=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1441",parametersDescription:[{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mo=new V({props:{$$slots:{default:[tx]},$$scope:{ctx:z}}}),br=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdForSequenceClassification tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdForSequenceClassification.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),vr=new M({}),kr=new F({props:{name:"class transformers.FlaxBigBirdForMultipleChoice",anchor:"transformers.FlaxBigBirdForMultipleChoice",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1884",parametersDescription:[{anchor:"transformers.FlaxBigBirdForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdForMultipleChoice.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),zr=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1441",parametersDescription:[{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qo=new V({props:{$$slots:{default:[ox]},$$scope:{ctx:z}}}),Mr=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdForMultipleChoice tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdForMultipleChoice.from_pretrained('google/bigbird-roberta-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='jax', padding=True) outputs = model(**{k: v[None, :] for k,v in encoding.items()}) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Er=new M({}),qr=new F({props:{name:"class transformers.FlaxBigBirdForTokenClassification",anchor:"transformers.FlaxBigBirdForTokenClassification",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1976",parametersDescription:[{anchor:"transformers.FlaxBigBirdForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdForTokenClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Sr=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L1441",parametersDescription:[{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Co=new V({props:{$$slots:{default:[nx]},$$scope:{ctx:z}}}),Or=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdForTokenClassification tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdForTokenClassification.from_pretrained('google/bigbird-roberta-base') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Dr=new M({}),Wr=new F({props:{name:"class transformers.FlaxBigBirdForQuestionAnswering",anchor:"transformers.FlaxBigBirdForQuestionAnswering",parameters:[{name:"config",val:": BigBirdConfig"},{name:"input_shape",val:": typing.Optional[tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L2075",parametersDescription:[{anchor:"transformers.FlaxBigBirdForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig">BigBirdConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBigBirdForQuestionAnswering.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Kr=new F({props:{name:"__call__",anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"question_lengths",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/big_bird/modeling_flax_big_bird.py#L2078",parametersDescription:[{anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdTokenizer">BigBirdTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxBigBirdForQuestionAnswering.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.big_bird.modeling_flax_big_bird.FlaxBigBirdForQuestionAnsweringModelOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdConfig" >BigBirdConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>pooled_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 pooled_output returned by FlaxBigBirdModel.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.big_bird.modeling_flax_big_bird.FlaxBigBirdForQuestionAnsweringModelOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ao=new V({props:{$$slots:{default:[sx]},$$scope:{ctx:z}}}),Xr=new D({props:{code:`from transformers import BigBirdTokenizer, FlaxBigBirdForQuestionAnswering tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') model = FlaxBigBirdForQuestionAnswering.from_pretrained('google/bigbird-roberta-base') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='jax') outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BigBirdTokenizer, FlaxBigBirdForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BigBirdTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBigBirdForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-roberta-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){p=n("meta"),T=l(),m=n("h1"),w=n("a"),x=n("span"),_(u.$$.fragment),f=l(),$=n("span"),zh=a("BigBird"),qc=l(),et=n("h2"),Qt=n("a"),ri=n("span"),_(Go.$$.fragment),Mh=l(),ai=n("span"),Eh=a("Overview"),Pc=l(),Ht=n("p"),qh=a("The BigBird model was proposed in "),Ko=n("a"),Ph=a("Big Bird: Transformers for Longer Sequences"),Ch=a(` by Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and random attention approximates full attention, while being computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context, BigBird has shown improved performance on various long document NLP tasks, such as question answering and summarization, compared to BERT or RoBERTa.`),Cc=l(),ta=n("p"),jh=a("The abstract from the paper is the following:"),jc=l(),oa=n("p"),ii=n("em"),Ah=a(`Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP. Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to 8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context, BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.`),Ac=l(),na=n("p"),Lh=a("Tips:"),Lc=l(),U=n("ul"),Xo=n("li"),Ih=a("For an in-detail explanation on how BigBird\u2019s attention works, see "),Yo=n("a"),Nh=a("this blog post"),Sh=a("."),Oh=l(),be=n("li"),Dh=a("BigBird comes with 2 implementations: "),di=n("strong"),Wh=a("original_full"),Uh=a(" & "),li=n("strong"),Qh=a("block_sparse"),Hh=a(`. For the sequence length < 1024, using `),ci=n("strong"),Jh=a("original_full"),Vh=a(" is advised as there is no benefit in using "),pi=n("strong"),Rh=a("block_sparse"),Gh=a(" attention."),Kh=l(),hi=n("li"),Xh=a("The code currently uses window size of 3 blocks and 2 global blocks."),Yh=l(),fi=n("li"),Zh=a("Sequence length must be divisible by block size."),ef=l(),Zo=n("li"),tf=a("Current implementation supports only "),gi=n("strong"),of=a("ITC"),nf=a("."),sf=l(),sa=n("li"),rf=a("Current implementation doesn\u2019t support "),mi=n("strong"),af=a("num_random_blocks = 0"),Ic=l(),je=n("p"),df=a("This model was contributed by "),en=n("a"),lf=a("vasudevgupta"),cf=a(`. The original code can be found `),tn=n("a"),pf=a("here"),hf=a("."),Nc=l(),tt=n("h2"),Jt=n("a"),ui=n("span"),_(on.$$.fragment),ff=l(),_i=n("span"),gf=a("BigBirdConfig"),Sc=l(),E=n("div"),_(nn.$$.fragment),mf=l(),ot=n("p"),uf=a("This is the configuration class to store the configuration of a "),ra=n("a"),_f=a("BigBirdModel"),bf=a(`. It is used to instantiate an BigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BigBird `),sn=n("a"),vf=a("google/bigbird-roberta-base"),kf=a(" architecture."),Bf=l(),nt=n("p"),yf=a("Configuration objects inherit from "),aa=n("a"),wf=a("PretrainedConfig"),Tf=a(` and can be used to control the model outputs. Read the documentation from `),ia=n("a"),xf=a("PretrainedConfig"),$f=a(" for more information."),Ff=l(),bi=n("p"),zf=a("Example:"),Mf=l(),_(rn.$$.fragment),Ef=l(),vi=n("blockquote"),ki=n("blockquote"),Bi=n("blockquote"),yi=n("p"),qf=a("from transformers import BigBirdModel, BigBirdConfig"),Pf=l(),wi=n("blockquote"),Ti=n("blockquote"),an=n("blockquote"),Vt=n("h1"),Rt=n("a"),xi=n("span"),_(dn.$$.fragment),Cf=l(),$i=n("span"),jf=a("Initializing a BigBird google/bigbird-roberta-base style configuration"),Af=l(),Fi=n("p"),Lf=a("configuration = BigBirdConfig()"),If=l(),zi=n("blockquote"),Mi=n("blockquote"),ln=n("blockquote"),Gt=n("h1"),Kt=n("a"),Ei=n("span"),_(cn.$$.fragment),Nf=l(),qi=n("span"),Sf=a("Initializing a model from the google/bigbird-roberta-base style configuration"),Of=l(),Pi=n("p"),Df=a("model = BigBirdModel(configuration)"),Wf=l(),Ci=n("blockquote"),ji=n("blockquote"),pn=n("blockquote"),Xt=n("h1"),Yt=n("a"),Ai=n("span"),_(hn.$$.fragment),Uf=l(),Li=n("span"),Qf=a("Accessing the model configuration"),Hf=l(),Ii=n("p"),Jf=a("configuration = model.config"),Oc=l(),st=n("h2"),Zt=n("a"),Ni=n("span"),_(fn.$$.fragment),Vf=l(),Si=n("span"),Rf=a("BigBirdTokenizer"),Dc=l(),q=n("div"),_(gn.$$.fragment),Gf=l(),mn=n("p"),Kf=a("Construct a BigBird tokenizer. Based on "),un=n("a"),Xf=a("SentencePiece"),Yf=a("."),Zf=l(),_n=n("p"),eg=a("This tokenizer inherits from "),da=n("a"),tg=a("PreTrainedTokenizer"),og=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ng=l(),Ae=n("div"),_(bn.$$.fragment),sg=l(),Oi=n("p"),rg=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Big Bird sequence has the following format:`),ag=l(),vn=n("ul"),la=n("li"),ig=a("single sequence: "),Di=n("code"),dg=a("[CLS] X [SEP]"),lg=l(),ca=n("li"),cg=a("pair of sequences: "),Wi=n("code"),pg=a("[CLS] A [SEP] B [SEP]"),hg=l(),eo=n("div"),_(kn.$$.fragment),fg=l(),Bn=n("p"),gg=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Ui=n("code"),mg=a("prepare_for_model"),ug=a(" method."),_g=l(),to=n("div"),_(yn.$$.fragment),bg=l(),rt=n("p"),vg=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `),Qi=n("code"),kg=a("token_ids_1"),Bg=a(" is "),Hi=n("code"),yg=a("None"),wg=a(", this method only returns the first portion of the mask (0s)."),Tg=l(),Ji=n("div"),Wc=l(),at=n("h2"),oo=n("a"),Vi=n("span"),_(wn.$$.fragment),xg=l(),Ri=n("span"),$g=a("BigBirdTokenizerFast"),Uc=l(),Q=n("div"),_(Tn.$$.fragment),Fg=l(),Te=n("p"),zg=a("Construct a \u201Cfast\u201D BigBird tokenizer (backed by HuggingFace\u2019s "),Gi=n("em"),Mg=a("tokenizers"),Eg=a(" library). Based on "),xn=n("a"),qg=a("Unigram"),Pg=a(`. This tokenizer inherits from `),pa=n("a"),Cg=a("PreTrainedTokenizerFast"),jg=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),Ag=l(),Le=n("div"),_($n.$$.fragment),Lg=l(),Ki=n("p"),Ig=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BigBird sequence has the following format:`),Ng=l(),Fn=n("ul"),ha=n("li"),Sg=a("single sequence: "),Xi=n("code"),Og=a("[CLS] X [SEP]"),Dg=l(),fa=n("li"),Wg=a("pair of sequences: "),Yi=n("code"),Ug=a("[CLS] A [SEP] B [SEP]"),Qg=l(),we=n("div"),_(zn.$$.fragment),Hg=l(),Zi=n("p"),Jg=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format:`),Vg=l(),_(Mn.$$.fragment),Rg=l(),ed=n("p"),Gg=a("if token_ids_1 is None, only returns the first portion of the mask (0s)."),Kg=l(),no=n("div"),_(En.$$.fragment),Xg=l(),qn=n("p"),Yg=a(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),td=n("code"),Zg=a("prepare_for_model"),em=a(" method."),Qc=l(),it=n("h2"),so=n("a"),od=n("span"),_(Pn.$$.fragment),tm=l(),nd=n("span"),om=a("BigBird specific outputs"),Hc=l(),dt=n("div"),_(Cn.$$.fragment),nm=l(),jn=n("p"),sm=a("Output type of "),ga=n("a"),rm=a("BigBirdForPreTraining"),am=a("."),Jc=l(),lt=n("h2"),ro=n("a"),sd=n("span"),_(An.$$.fragment),im=l(),rd=n("span"),dm=a("BigBirdModel"),Vc=l(),H=n("div"),_(Ln.$$.fragment),lm=l(),In=n("p"),cm=a(`The bare BigBird Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Nn=n("a"),pm=a("torch.nn.Module"),hm=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fm=l(),Sn=n("p"),gm=a(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),On=n("a"),mm=a(`Attention is all you need`),um=a(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),_m=l(),W=n("p"),bm=a("To behave as an decoder the model needs to be initialized with the "),ad=n("code"),vm=a("is_decoder"),km=a(` argument of the configuration set to `),id=n("code"),Bm=a("True"),ym=a(". To be used in a Seq2Seq model, the model needs to initialized with both "),dd=n("code"),wm=a("is_decoder"),Tm=a(` argument and `),ld=n("code"),xm=a("add_cross_attention"),$m=a(" set to "),cd=n("code"),Fm=a("True"),zm=a("; an "),pd=n("code"),Mm=a("encoder_hidden_states"),Em=a(` is then expected as an input to the forward pass.`),qm=l(),R=n("div"),_(Dn.$$.fragment),Pm=l(),ct=n("p"),Cm=a("The "),ma=n("a"),jm=a("BigBirdModel"),Am=a(" forward method, overrides the "),hd=n("code"),Lm=a("__call__"),Im=a(" special method."),Nm=l(),_(ao.$$.fragment),Sm=l(),fd=n("p"),Om=a("Example:"),Dm=l(),_(Wn.$$.fragment),Rc=l(),pt=n("h2"),io=n("a"),gd=n("span"),_(Un.$$.fragment),Wm=l(),md=n("span"),Um=a("BigBirdForPreTraining"),Gc=l(),Qn=n("div"),G=n("div"),_(Hn.$$.fragment),Qm=l(),ht=n("p"),Hm=a("The "),ua=n("a"),Jm=a("BigBirdForPreTraining"),Vm=a(" forward method, overrides the "),ud=n("code"),Rm=a("__call__"),Gm=a(" special method."),Km=l(),_(lo.$$.fragment),Xm=l(),_d=n("p"),Ym=a("Example:"),Zm=l(),_(Jn.$$.fragment),Kc=l(),ft=n("h2"),co=n("a"),bd=n("span"),_(Vn.$$.fragment),eu=l(),vd=n("span"),tu=a("BigBirdForCausalLM"),Xc=l(),xe=n("div"),_(Rn.$$.fragment),ou=l(),gt=n("p"),nu=a("BigBird Model with a "),kd=n("code"),su=a("language modeling"),ru=a(` head on top for CLM fine-tuning. This model is a PyTorch `),Gn=n("a"),au=a("torch.nn.Module"),iu=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),du=l(),K=n("div"),_(Kn.$$.fragment),lu=l(),mt=n("p"),cu=a("The "),_a=n("a"),pu=a("BigBirdForCausalLM"),hu=a(" forward method, overrides the "),Bd=n("code"),fu=a("__call__"),gu=a(" special method."),mu=l(),_(po.$$.fragment),uu=l(),yd=n("p"),_u=a("Example:"),bu=l(),_(Xn.$$.fragment),Yc=l(),ut=n("h2"),ho=n("a"),wd=n("span"),_(Yn.$$.fragment),vu=l(),Td=n("span"),ku=a("BigBirdForMaskedLM"),Zc=l(),$e=n("div"),_(Zn.$$.fragment),Bu=l(),_t=n("p"),yu=a("BigBird Model with a "),xd=n("code"),wu=a("language modeling"),Tu=a(` head on top. This model is a PyTorch `),es=n("a"),xu=a("torch.nn.Module"),$u=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fu=l(),X=n("div"),_(ts.$$.fragment),zu=l(),bt=n("p"),Mu=a("The "),ba=n("a"),Eu=a("BigBirdForMaskedLM"),qu=a(" forward method, overrides the "),$d=n("code"),Pu=a("__call__"),Cu=a(" special method."),ju=l(),_(fo.$$.fragment),Au=l(),Fd=n("p"),Lu=a("Example:"),Iu=l(),_(os.$$.fragment),ep=l(),vt=n("h2"),go=n("a"),zd=n("span"),_(ns.$$.fragment),Nu=l(),Md=n("span"),Su=a("BigBirdForSequenceClassification"),tp=l(),ve=n("div"),_(ss.$$.fragment),Ou=l(),Ed=n("p"),Du=a(`BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Wu=l(),rs=n("p"),Uu=a("This model is a PyTorch "),as=n("a"),Qu=a("torch.nn.Module"),Hu=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ju=l(),S=n("div"),_(is.$$.fragment),Vu=l(),kt=n("p"),Ru=a("The "),va=n("a"),Gu=a("BigBirdForSequenceClassification"),Ku=a(" forward method, overrides the "),qd=n("code"),Xu=a("__call__"),Yu=a(" special method."),Zu=l(),_(mo.$$.fragment),e_=l(),Pd=n("p"),t_=a("Example of single-label classification:"),o_=l(),_(ds.$$.fragment),n_=l(),Cd=n("p"),s_=a("Example of multi-label classification:"),r_=l(),_(ls.$$.fragment),op=l(),Bt=n("h2"),uo=n("a"),jd=n("span"),_(cs.$$.fragment),a_=l(),Ad=n("span"),i_=a("BigBirdForMultipleChoice"),np=l(),ke=n("div"),_(ps.$$.fragment),d_=l(),Ld=n("p"),l_=a(`BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),c_=l(),hs=n("p"),p_=a("This model is a PyTorch "),fs=n("a"),h_=a("torch.nn.Module"),f_=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),g_=l(),Y=n("div"),_(gs.$$.fragment),m_=l(),yt=n("p"),u_=a("The "),ka=n("a"),__=a("BigBirdForMultipleChoice"),b_=a(" forward method, overrides the "),Id=n("code"),v_=a("__call__"),k_=a(" special method."),B_=l(),_(_o.$$.fragment),y_=l(),Nd=n("p"),w_=a("Example:"),T_=l(),_(ms.$$.fragment),sp=l(),wt=n("h2"),bo=n("a"),Sd=n("span"),_(us.$$.fragment),x_=l(),Od=n("span"),$_=a("BigBirdForTokenClassification"),rp=l(),Be=n("div"),_(_s.$$.fragment),F_=l(),Dd=n("p"),z_=a(`BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),M_=l(),bs=n("p"),E_=a("This model is a PyTorch "),vs=n("a"),q_=a("torch.nn.Module"),P_=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),C_=l(),Z=n("div"),_(ks.$$.fragment),j_=l(),Tt=n("p"),A_=a("The "),Ba=n("a"),L_=a("BigBirdForTokenClassification"),I_=a(" forward method, overrides the "),Wd=n("code"),N_=a("__call__"),S_=a(" special method."),O_=l(),_(vo.$$.fragment),D_=l(),Ud=n("p"),W_=a("Example:"),U_=l(),_(Bs.$$.fragment),ap=l(),xt=n("h2"),ko=n("a"),Qd=n("span"),_(ys.$$.fragment),Q_=l(),Hd=n("span"),H_=a("BigBirdForQuestionAnswering"),ip=l(),ye=n("div"),_(ws.$$.fragment),J_=l(),$t=n("p"),V_=a(`BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Jd=n("code"),R_=a("span start logits"),G_=a(" and "),Vd=n("code"),K_=a("span end logits"),X_=a(")."),Y_=l(),Ts=n("p"),Z_=a("This model is a PyTorch "),xs=n("a"),eb=a("torch.nn.Module"),tb=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ob=l(),ee=n("div"),_($s.$$.fragment),nb=l(),Ft=n("p"),sb=a("The "),ya=n("a"),rb=a("BigBirdForQuestionAnswering"),ab=a(" forward method, overrides the "),Rd=n("code"),ib=a("__call__"),db=a(" special method."),lb=l(),_(Bo.$$.fragment),cb=l(),Gd=n("p"),pb=a("Example:"),hb=l(),_(Fs.$$.fragment),dp=l(),zt=n("h2"),yo=n("a"),Kd=n("span"),_(zs.$$.fragment),fb=l(),Xd=n("span"),gb=a("FlaxBigBirdModel"),lp=l(),P=n("div"),_(Ms.$$.fragment),mb=l(),Yd=n("p"),ub=a("The bare BigBird Model transformer outputting raw hidden-states without any specific head on top."),_b=l(),Es=n("p"),bb=a("This model inherits from "),wa=n("a"),vb=a("FlaxPreTrainedModel"),kb=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Bb=l(),qs=n("p"),yb=a("This model is also a Flax Linen "),Ps=n("a"),wb=a("flax.linen.Module"),Tb=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),xb=l(),Zd=n("p"),$b=a("Finally, this model supports inherent JAX features such as:"),Fb=l(),Fe=n("ul"),el=n("li"),Cs=n("a"),zb=a("Just-In-Time (JIT) compilation"),Mb=l(),tl=n("li"),js=n("a"),Eb=a("Automatic Differentiation"),qb=l(),ol=n("li"),As=n("a"),Pb=a("Vectorization"),Cb=l(),nl=n("li"),Ls=n("a"),jb=a("Parallelization"),Ab=l(),te=n("div"),_(Is.$$.fragment),Lb=l(),Mt=n("p"),Ib=a("The "),sl=n("code"),Nb=a("FlaxBigBirdPreTrainedModel"),Sb=a(" forward method, overrides the "),rl=n("code"),Ob=a("__call__"),Db=a(" special method."),Wb=l(),_(wo.$$.fragment),Ub=l(),al=n("p"),Qb=a("Example:"),Hb=l(),_(Ns.$$.fragment),cp=l(),Et=n("h2"),To=n("a"),il=n("span"),_(Ss.$$.fragment),Jb=l(),dl=n("span"),Vb=a("FlaxBigBirdForPreTraining"),pp=l(),C=n("div"),_(Os.$$.fragment),Rb=l(),qt=n("p"),Gb=a("BigBird Model with two heads on top as done during the pretraining: a "),ll=n("code"),Kb=a("masked language modeling"),Xb=a(" head and a "),cl=n("code"),Yb=a("next sentence prediction (classification)"),Zb=a(" head."),ev=l(),Ds=n("p"),tv=a("This model inherits from "),Ta=n("a"),ov=a("FlaxPreTrainedModel"),nv=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),sv=l(),Ws=n("p"),rv=a("This model is also a Flax Linen "),Us=n("a"),av=a("flax.linen.Module"),iv=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),dv=l(),pl=n("p"),lv=a("Finally, this model supports inherent JAX features such as:"),cv=l(),ze=n("ul"),hl=n("li"),Qs=n("a"),pv=a("Just-In-Time (JIT) compilation"),hv=l(),fl=n("li"),Hs=n("a"),fv=a("Automatic Differentiation"),gv=l(),gl=n("li"),Js=n("a"),mv=a("Vectorization"),uv=l(),ml=n("li"),Vs=n("a"),_v=a("Parallelization"),bv=l(),oe=n("div"),_(Rs.$$.fragment),vv=l(),Pt=n("p"),kv=a("The "),ul=n("code"),Bv=a("FlaxBigBirdPreTrainedModel"),yv=a(" forward method, overrides the "),_l=n("code"),wv=a("__call__"),Tv=a(" special method."),xv=l(),_(xo.$$.fragment),$v=l(),bl=n("p"),Fv=a("Example:"),zv=l(),_(Gs.$$.fragment),hp=l(),Ct=n("h2"),$o=n("a"),vl=n("span"),_(Ks.$$.fragment),Mv=l(),kl=n("span"),Ev=a("FlaxBigBirdForMaskedLM"),fp=l(),j=n("div"),_(Xs.$$.fragment),qv=l(),Ys=n("p"),Pv=a("BigBird Model with a "),Bl=n("code"),Cv=a("language modeling"),jv=a(" head on top."),Av=l(),Zs=n("p"),Lv=a("This model inherits from "),xa=n("a"),Iv=a("FlaxPreTrainedModel"),Nv=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Sv=l(),er=n("p"),Ov=a("This model is also a Flax Linen "),tr=n("a"),Dv=a("flax.linen.Module"),Wv=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Uv=l(),yl=n("p"),Qv=a("Finally, this model supports inherent JAX features such as:"),Hv=l(),Me=n("ul"),wl=n("li"),or=n("a"),Jv=a("Just-In-Time (JIT) compilation"),Vv=l(),Tl=n("li"),nr=n("a"),Rv=a("Automatic Differentiation"),Gv=l(),xl=n("li"),sr=n("a"),Kv=a("Vectorization"),Xv=l(),$l=n("li"),rr=n("a"),Yv=a("Parallelization"),Zv=l(),ne=n("div"),_(ar.$$.fragment),ek=l(),jt=n("p"),tk=a("The "),Fl=n("code"),ok=a("FlaxBigBirdPreTrainedModel"),nk=a(" forward method, overrides the "),zl=n("code"),sk=a("__call__"),rk=a(" special method."),ak=l(),_(Fo.$$.fragment),ik=l(),Ml=n("p"),dk=a("Example:"),lk=l(),_(ir.$$.fragment),gp=l(),At=n("h2"),zo=n("a"),El=n("span"),_(dr.$$.fragment),ck=l(),ql=n("span"),pk=a("FlaxBigBirdForSequenceClassification"),mp=l(),A=n("div"),_(lr.$$.fragment),hk=l(),Pl=n("p"),fk=a(`BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),gk=l(),cr=n("p"),mk=a("This model inherits from "),$a=n("a"),uk=a("FlaxPreTrainedModel"),_k=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),bk=l(),pr=n("p"),vk=a("This model is also a Flax Linen "),hr=n("a"),kk=a("flax.linen.Module"),Bk=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),yk=l(),Cl=n("p"),wk=a("Finally, this model supports inherent JAX features such as:"),Tk=l(),Ee=n("ul"),jl=n("li"),fr=n("a"),xk=a("Just-In-Time (JIT) compilation"),$k=l(),Al=n("li"),gr=n("a"),Fk=a("Automatic Differentiation"),zk=l(),Ll=n("li"),mr=n("a"),Mk=a("Vectorization"),Ek=l(),Il=n("li"),ur=n("a"),qk=a("Parallelization"),Pk=l(),se=n("div"),_(_r.$$.fragment),Ck=l(),Lt=n("p"),jk=a("The "),Nl=n("code"),Ak=a("FlaxBigBirdPreTrainedModel"),Lk=a(" forward method, overrides the "),Sl=n("code"),Ik=a("__call__"),Nk=a(" special method."),Sk=l(),_(Mo.$$.fragment),Ok=l(),Ol=n("p"),Dk=a("Example:"),Wk=l(),_(br.$$.fragment),up=l(),It=n("h2"),Eo=n("a"),Dl=n("span"),_(vr.$$.fragment),Uk=l(),Wl=n("span"),Qk=a("FlaxBigBirdForMultipleChoice"),_p=l(),L=n("div"),_(kr.$$.fragment),Hk=l(),Ul=n("p"),Jk=a(`BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Vk=l(),Br=n("p"),Rk=a("This model inherits from "),Fa=n("a"),Gk=a("FlaxPreTrainedModel"),Kk=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Xk=l(),yr=n("p"),Yk=a("This model is also a Flax Linen "),wr=n("a"),Zk=a("flax.linen.Module"),eB=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),tB=l(),Ql=n("p"),oB=a("Finally, this model supports inherent JAX features such as:"),nB=l(),qe=n("ul"),Hl=n("li"),Tr=n("a"),sB=a("Just-In-Time (JIT) compilation"),rB=l(),Jl=n("li"),xr=n("a"),aB=a("Automatic Differentiation"),iB=l(),Vl=n("li"),$r=n("a"),dB=a("Vectorization"),lB=l(),Rl=n("li"),Fr=n("a"),cB=a("Parallelization"),pB=l(),re=n("div"),_(zr.$$.fragment),hB=l(),Nt=n("p"),fB=a("The "),Gl=n("code"),gB=a("FlaxBigBirdPreTrainedModel"),mB=a(" forward method, overrides the "),Kl=n("code"),uB=a("__call__"),_B=a(" special method."),bB=l(),_(qo.$$.fragment),vB=l(),Xl=n("p"),kB=a("Example:"),BB=l(),_(Mr.$$.fragment),bp=l(),St=n("h2"),Po=n("a"),Yl=n("span"),_(Er.$$.fragment),yB=l(),Zl=n("span"),wB=a("FlaxBigBirdForTokenClassification"),vp=l(),I=n("div"),_(qr.$$.fragment),TB=l(),ec=n("p"),xB=a(`BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),$B=l(),Pr=n("p"),FB=a("This model inherits from "),za=n("a"),zB=a("FlaxPreTrainedModel"),MB=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),EB=l(),Cr=n("p"),qB=a("This model is also a Flax Linen "),jr=n("a"),PB=a("flax.linen.Module"),CB=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),jB=l(),tc=n("p"),AB=a("Finally, this model supports inherent JAX features such as:"),LB=l(),Pe=n("ul"),oc=n("li"),Ar=n("a"),IB=a("Just-In-Time (JIT) compilation"),NB=l(),nc=n("li"),Lr=n("a"),SB=a("Automatic Differentiation"),OB=l(),sc=n("li"),Ir=n("a"),DB=a("Vectorization"),WB=l(),rc=n("li"),Nr=n("a"),UB=a("Parallelization"),QB=l(),ae=n("div"),_(Sr.$$.fragment),HB=l(),Ot=n("p"),JB=a("The "),ac=n("code"),VB=a("FlaxBigBirdPreTrainedModel"),RB=a(" forward method, overrides the "),ic=n("code"),GB=a("__call__"),KB=a(" special method."),XB=l(),_(Co.$$.fragment),YB=l(),dc=n("p"),ZB=a("Example:"),e1=l(),_(Or.$$.fragment),kp=l(),Dt=n("h2"),jo=n("a"),lc=n("span"),_(Dr.$$.fragment),t1=l(),cc=n("span"),o1=a("FlaxBigBirdForQuestionAnswering"),Bp=l(),N=n("div"),_(Wr.$$.fragment),n1=l(),Wt=n("p"),s1=a(`BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),pc=n("code"),r1=a("span start logits"),a1=a(" and "),hc=n("code"),i1=a("span end logits"),d1=a(")."),l1=l(),Ur=n("p"),c1=a("This model inherits from "),Ma=n("a"),p1=a("FlaxPreTrainedModel"),h1=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),f1=l(),Qr=n("p"),g1=a("This model is also a Flax Linen "),Hr=n("a"),m1=a("flax.linen.Module"),u1=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),_1=l(),fc=n("p"),b1=a("Finally, this model supports inherent JAX features such as:"),v1=l(),Ce=n("ul"),gc=n("li"),Jr=n("a"),k1=a("Just-In-Time (JIT) compilation"),B1=l(),mc=n("li"),Vr=n("a"),y1=a("Automatic Differentiation"),w1=l(),uc=n("li"),Rr=n("a"),T1=a("Vectorization"),x1=l(),_c=n("li"),Gr=n("a"),$1=a("Parallelization"),F1=l(),ie=n("div"),_(Kr.$$.fragment),z1=l(),Ut=n("p"),M1=a("The "),Ea=n("a"),E1=a("FlaxBigBirdForQuestionAnswering"),q1=a(" forward method, overrides the "),bc=n("code"),P1=a("__call__"),C1=a(" special method."),j1=l(),_(Ao.$$.fragment),A1=l(),vc=n("p"),L1=a("Example:"),I1=l(),_(Xr.$$.fragment),this.h()},l(o){const h=U2('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(t),T=c(o),m=s(o,"H1",{class:!0});var Yr=r(m);w=s(Yr,"A",{id:!0,class:!0,href:!0});var kc=r(w);x=s(kc,"SPAN",{});var Bc=r(x);b(u.$$.fragment,Bc),Bc.forEach(t),kc.forEach(t),f=c(Yr),$=s(Yr,"SPAN",{});var yc=r($);zh=i(yc,"BigBird"),yc.forEach(t),Yr.forEach(t),qc=c(o),et=s(o,"H2",{class:!0});var Zr=r(et);Qt=s(Zr,"A",{id:!0,class:!0,href:!0});var wc=r(Qt);ri=s(wc,"SPAN",{});var Tc=r(ri);b(Go.$$.fragment,Tc),Tc.forEach(t),wc.forEach(t),Mh=c(Zr),ai=s(Zr,"SPAN",{});var xc=r(ai);Eh=i(xc,"Overview"),xc.forEach(t),Zr.forEach(t),Pc=c(o),Ht=s(o,"P",{});var ea=r(Ht);qh=i(ea,"The BigBird model was proposed in "),Ko=s(ea,"A",{href:!0,rel:!0});var $c=r(Ko);Ph=i($c,"Big Bird: Transformers for Longer Sequences"),$c.forEach(t),Ch=i(ea,` by Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and random attention approximates full attention, while being computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context, BigBird has shown improved performance on various long document NLP tasks, such as question answering and summarization, compared to BERT or RoBERTa.`),ea.forEach(t),Cc=c(o),ta=s(o,"P",{});var Fc=r(ta);jh=i(Fc,"The abstract from the paper is the following:"),Fc.forEach(t),jc=c(o),oa=s(o,"P",{});var zc=r(oa);ii=s(zc,"EM",{});var Mc=r(ii);Ah=i(Mc,`Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP. Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to 8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context, BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.`),Mc.forEach(t),zc.forEach(t),Ac=c(o),na=s(o,"P",{});var Ec=r(na);Lh=i(Ec,"Tips:"),Ec.forEach(t),Lc=c(o),U=s(o,"UL",{});var J=r(U);Xo=s(J,"LI",{});var wp=r(Xo);Ih=i(wp,"For an in-detail explanation on how BigBird\u2019s attention works, see "),Yo=s(wp,"A",{href:!0,rel:!0});var U1=r(Yo);Nh=i(U1,"this blog post"),U1.forEach(t),Sh=i(wp,"."),wp.forEach(t),Oh=c(J),be=s(J,"LI",{});var Ie=r(be);Dh=i(Ie,"BigBird comes with 2 implementations: "),di=s(Ie,"STRONG",{});var Q1=r(di);Wh=i(Q1,"original_full"),Q1.forEach(t),Uh=i(Ie," & "),li=s(Ie,"STRONG",{});var H1=r(li);Qh=i(H1,"block_sparse"),H1.forEach(t),Hh=i(Ie,`. For the sequence length < 1024, using `),ci=s(Ie,"STRONG",{});var J1=r(ci);Jh=i(J1,"original_full"),J1.forEach(t),Vh=i(Ie," is advised as there is no benefit in using "),pi=s(Ie,"STRONG",{});var V1=r(pi);Rh=i(V1,"block_sparse"),V1.forEach(t),Gh=i(Ie," attention."),Ie.forEach(t),Kh=c(J),hi=s(J,"LI",{});var R1=r(hi);Xh=i(R1,"The code currently uses window size of 3 blocks and 2 global blocks."),R1.forEach(t),Yh=c(J),fi=s(J,"LI",{});var G1=r(fi);Zh=i(G1,"Sequence length must be divisible by block size."),G1.forEach(t),ef=c(J),Zo=s(J,"LI",{});var Tp=r(Zo);tf=i(Tp,"Current implementation supports only "),gi=s(Tp,"STRONG",{});var K1=r(gi);of=i(K1,"ITC"),K1.forEach(t),nf=i(Tp,"."),Tp.forEach(t),sf=c(J),sa=s(J,"LI",{});var N1=r(sa);rf=i(N1,"Current implementation doesn\u2019t support "),mi=s(N1,"STRONG",{});var X1=r(mi);af=i(X1,"num_random_blocks = 0"),X1.forEach(t),N1.forEach(t),J.forEach(t),Ic=c(o),je=s(o,"P",{});var qa=r(je);df=i(qa,"This model was contributed by "),en=s(qa,"A",{href:!0,rel:!0});var Y1=r(en);lf=i(Y1,"vasudevgupta"),Y1.forEach(t),cf=i(qa,`. The original code can be found `),tn=s(qa,"A",{href:!0,rel:!0});var Z1=r(tn);pf=i(Z1,"here"),Z1.forEach(t),hf=i(qa,"."),qa.forEach(t),Nc=c(o),tt=s(o,"H2",{class:!0});var xp=r(tt);Jt=s(xp,"A",{id:!0,class:!0,href:!0});var ey=r(Jt);ui=s(ey,"SPAN",{});var ty=r(ui);b(on.$$.fragment,ty),ty.forEach(t),ey.forEach(t),ff=c(xp),_i=s(xp,"SPAN",{});var oy=r(_i);gf=i(oy,"BigBirdConfig"),oy.forEach(t),xp.forEach(t),Sc=c(o),E=s(o,"DIV",{class:!0});var O=r(E);b(nn.$$.fragment,O),mf=c(O),ot=s(O,"P",{});var Pa=r(ot);uf=i(Pa,"This is the configuration class to store the configuration of a "),ra=s(Pa,"A",{href:!0});var ny=r(ra);_f=i(ny,"BigBirdModel"),ny.forEach(t),bf=i(Pa,`. It is used to instantiate an BigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BigBird `),sn=s(Pa,"A",{href:!0,rel:!0});var sy=r(sn);vf=i(sy,"google/bigbird-roberta-base"),sy.forEach(t),kf=i(Pa," architecture."),Pa.forEach(t),Bf=c(O),nt=s(O,"P",{});var Ca=r(nt);yf=i(Ca,"Configuration objects inherit from "),aa=s(Ca,"A",{href:!0});var ry=r(aa);wf=i(ry,"PretrainedConfig"),ry.forEach(t),Tf=i(Ca,` and can be used to control the model outputs. Read the documentation from `),ia=s(Ca,"A",{href:!0});var ay=r(ia);xf=i(ay,"PretrainedConfig"),ay.forEach(t),$f=i(Ca," for more information."),Ca.forEach(t),Ff=c(O),bi=s(O,"P",{});var iy=r(bi);zf=i(iy,"Example:"),iy.forEach(t),Mf=c(O),b(rn.$$.fragment,O),Ef=c(O),vi=s(O,"BLOCKQUOTE",{});var dy=r(vi);ki=s(dy,"BLOCKQUOTE",{});var ly=r(ki);Bi=s(ly,"BLOCKQUOTE",{});var cy=r(Bi);yi=s(cy,"P",{});var py=r(yi);qf=i(py,"from transformers import BigBirdModel, BigBirdConfig"),py.forEach(t),cy.forEach(t),ly.forEach(t),dy.forEach(t),Pf=c(O),wi=s(O,"BLOCKQUOTE",{});var hy=r(wi);Ti=s(hy,"BLOCKQUOTE",{});var fy=r(Ti);an=s(fy,"BLOCKQUOTE",{});var $p=r(an);Vt=s($p,"H1",{class:!0});var Fp=r(Vt);Rt=s(Fp,"A",{id:!0,class:!0,href:!0});var gy=r(Rt);xi=s(gy,"SPAN",{});var my=r(xi);b(dn.$$.fragment,my),my.forEach(t),gy.forEach(t),Cf=c(Fp),$i=s(Fp,"SPAN",{});var uy=r($i);jf=i(uy,"Initializing a BigBird google/bigbird-roberta-base style configuration"),uy.forEach(t),Fp.forEach(t),Af=c($p),Fi=s($p,"P",{});var _y=r(Fi);Lf=i(_y,"configuration = BigBirdConfig()"),_y.forEach(t),$p.forEach(t),fy.forEach(t),hy.forEach(t),If=c(O),zi=s(O,"BLOCKQUOTE",{});var by=r(zi);Mi=s(by,"BLOCKQUOTE",{});var vy=r(Mi);ln=s(vy,"BLOCKQUOTE",{});var zp=r(ln);Gt=s(zp,"H1",{class:!0});var Mp=r(Gt);Kt=s(Mp,"A",{id:!0,class:!0,href:!0});var ky=r(Kt);Ei=s(ky,"SPAN",{});var By=r(Ei);b(cn.$$.fragment,By),By.forEach(t),ky.forEach(t),Nf=c(Mp),qi=s(Mp,"SPAN",{});var yy=r(qi);Sf=i(yy,"Initializing a model from the google/bigbird-roberta-base style configuration"),yy.forEach(t),Mp.forEach(t),Of=c(zp),Pi=s(zp,"P",{});var wy=r(Pi);Df=i(wy,"model = BigBirdModel(configuration)"),wy.forEach(t),zp.forEach(t),vy.forEach(t),by.forEach(t),Wf=c(O),Ci=s(O,"BLOCKQUOTE",{});var Ty=r(Ci);ji=s(Ty,"BLOCKQUOTE",{});var xy=r(ji);pn=s(xy,"BLOCKQUOTE",{});var Ep=r(pn);Xt=s(Ep,"H1",{class:!0});var qp=r(Xt);Yt=s(qp,"A",{id:!0,class:!0,href:!0});var $y=r(Yt);Ai=s($y,"SPAN",{});var Fy=r(Ai);b(hn.$$.fragment,Fy),Fy.forEach(t),$y.forEach(t),Uf=c(qp),Li=s(qp,"SPAN",{});var zy=r(Li);Qf=i(zy,"Accessing the model configuration"),zy.forEach(t),qp.forEach(t),Hf=c(Ep),Ii=s(Ep,"P",{});var My=r(Ii);Jf=i(My,"configuration = model.config"),My.forEach(t),Ep.forEach(t),xy.forEach(t),Ty.forEach(t),O.forEach(t),Oc=c(o),st=s(o,"H2",{class:!0});var Pp=r(st);Zt=s(Pp,"A",{id:!0,class:!0,href:!0});var Ey=r(Zt);Ni=s(Ey,"SPAN",{});var qy=r(Ni);b(fn.$$.fragment,qy),qy.forEach(t),Ey.forEach(t),Vf=c(Pp),Si=s(Pp,"SPAN",{});var Py=r(Si);Rf=i(Py,"BigBirdTokenizer"),Py.forEach(t),Pp.forEach(t),Dc=c(o),q=s(o,"DIV",{class:!0});var de=r(q);b(gn.$$.fragment,de),Gf=c(de),mn=s(de,"P",{});var Cp=r(mn);Kf=i(Cp,"Construct a BigBird tokenizer. Based on "),un=s(Cp,"A",{href:!0,rel:!0});var Cy=r(un);Xf=i(Cy,"SentencePiece"),Cy.forEach(t),Yf=i(Cp,"."),Cp.forEach(t),Zf=c(de),_n=s(de,"P",{});var jp=r(_n);eg=i(jp,"This tokenizer inherits from "),da=s(jp,"A",{href:!0});var jy=r(da);tg=i(jy,"PreTrainedTokenizer"),jy.forEach(t),og=i(jp,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),jp.forEach(t),ng=c(de),Ae=s(de,"DIV",{class:!0});var ja=r(Ae);b(bn.$$.fragment,ja),sg=c(ja),Oi=s(ja,"P",{});var Ay=r(Oi);rg=i(Ay,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Big Bird sequence has the following format:`),Ay.forEach(t),ag=c(ja),vn=s(ja,"UL",{});var Ap=r(vn);la=s(Ap,"LI",{});var S1=r(la);ig=i(S1,"single sequence: "),Di=s(S1,"CODE",{});var Ly=r(Di);dg=i(Ly,"[CLS] X [SEP]"),Ly.forEach(t),S1.forEach(t),lg=c(Ap),ca=s(Ap,"LI",{});var O1=r(ca);cg=i(O1,"pair of sequences: "),Wi=s(O1,"CODE",{});var Iy=r(Wi);pg=i(Iy,"[CLS] A [SEP] B [SEP]"),Iy.forEach(t),O1.forEach(t),Ap.forEach(t),ja.forEach(t),hg=c(de),eo=s(de,"DIV",{class:!0});var Lp=r(eo);b(kn.$$.fragment,Lp),fg=c(Lp),Bn=s(Lp,"P",{});var Ip=r(Bn);gg=i(Ip,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Ui=s(Ip,"CODE",{});var Ny=r(Ui);mg=i(Ny,"prepare_for_model"),Ny.forEach(t),ug=i(Ip," method."),Ip.forEach(t),Lp.forEach(t),_g=c(de),to=s(de,"DIV",{class:!0});var Np=r(to);b(yn.$$.fragment,Np),bg=c(Np),rt=s(Np,"P",{});var Aa=r(rt);vg=i(Aa,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `),Qi=s(Aa,"CODE",{});var Sy=r(Qi);kg=i(Sy,"token_ids_1"),Sy.forEach(t),Bg=i(Aa," is "),Hi=s(Aa,"CODE",{});var Oy=r(Hi);yg=i(Oy,"None"),Oy.forEach(t),wg=i(Aa,", this method only returns the first portion of the mask (0s)."),Aa.forEach(t),Np.forEach(t),Tg=c(de),Ji=s(de,"DIV",{class:!0}),r(Ji).forEach(t),de.forEach(t),Wc=c(o),at=s(o,"H2",{class:!0});var Sp=r(at);oo=s(Sp,"A",{id:!0,class:!0,href:!0});var Dy=r(oo);Vi=s(Dy,"SPAN",{});var Wy=r(Vi);b(wn.$$.fragment,Wy),Wy.forEach(t),Dy.forEach(t),xg=c(Sp),Ri=s(Sp,"SPAN",{});var Uy=r(Ri);$g=i(Uy,"BigBirdTokenizerFast"),Uy.forEach(t),Sp.forEach(t),Uc=c(o),Q=s(o,"DIV",{class:!0});var Ne=r(Q);b(Tn.$$.fragment,Ne),Fg=c(Ne),Te=s(Ne,"P",{});var Lo=r(Te);zg=i(Lo,"Construct a \u201Cfast\u201D BigBird tokenizer (backed by HuggingFace\u2019s "),Gi=s(Lo,"EM",{});var Qy=r(Gi);Mg=i(Qy,"tokenizers"),Qy.forEach(t),Eg=i(Lo," library). Based on "),xn=s(Lo,"A",{href:!0,rel:!0});var Hy=r(xn);qg=i(Hy,"Unigram"),Hy.forEach(t),Pg=i(Lo,`. This tokenizer inherits from `),pa=s(Lo,"A",{href:!0});var Jy=r(pa);Cg=i(Jy,"PreTrainedTokenizerFast"),Jy.forEach(t),jg=i(Lo,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),Lo.forEach(t),Ag=c(Ne),Le=s(Ne,"DIV",{class:!0});var La=r(Le);b($n.$$.fragment,La),Lg=c(La),Ki=s(La,"P",{});var Vy=r(Ki);Ig=i(Vy,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BigBird sequence has the following format:`),Vy.forEach(t),Ng=c(La),Fn=s(La,"UL",{});var Op=r(Fn);ha=s(Op,"LI",{});var D1=r(ha);Sg=i(D1,"single sequence: "),Xi=s(D1,"CODE",{});var Ry=r(Xi);Og=i(Ry,"[CLS] X [SEP]"),Ry.forEach(t),D1.forEach(t),Dg=c(Op),fa=s(Op,"LI",{});var W1=r(fa);Wg=i(W1,"pair of sequences: "),Yi=s(W1,"CODE",{});var Gy=r(Yi);Ug=i(Gy,"[CLS] A [SEP] B [SEP]"),Gy.forEach(t),W1.forEach(t),Op.forEach(t),La.forEach(t),Qg=c(Ne),we=s(Ne,"DIV",{class:!0});var Io=r(we);b(zn.$$.fragment,Io),Hg=c(Io),Zi=s(Io,"P",{});var Ky=r(Zi);Jg=i(Ky,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format:`),Ky.forEach(t),Vg=c(Io),b(Mn.$$.fragment,Io),Rg=c(Io),ed=s(Io,"P",{});var Xy=r(ed);Gg=i(Xy,"if token_ids_1 is None, only returns the first portion of the mask (0s)."),Xy.forEach(t),Io.forEach(t),Kg=c(Ne),no=s(Ne,"DIV",{class:!0});var Dp=r(no);b(En.$$.fragment,Dp),Xg=c(Dp),qn=s(Dp,"P",{});var Wp=r(qn);Yg=i(Wp,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),td=s(Wp,"CODE",{});var Yy=r(td);Zg=i(Yy,"prepare_for_model"),Yy.forEach(t),em=i(Wp," method."),Wp.forEach(t),Dp.forEach(t),Ne.forEach(t),Qc=c(o),it=s(o,"H2",{class:!0});var Up=r(it);so=s(Up,"A",{id:!0,class:!0,href:!0});var Zy=r(so);od=s(Zy,"SPAN",{});var ew=r(od);b(Pn.$$.fragment,ew),ew.forEach(t),Zy.forEach(t),tm=c(Up),nd=s(Up,"SPAN",{});var tw=r(nd);om=i(tw,"BigBird specific outputs"),tw.forEach(t),Up.forEach(t),Hc=c(o),dt=s(o,"DIV",{class:!0});var Qp=r(dt);b(Cn.$$.fragment,Qp),nm=c(Qp),jn=s(Qp,"P",{});var Hp=r(jn);sm=i(Hp,"Output type of "),ga=s(Hp,"A",{href:!0});var ow=r(ga);rm=i(ow,"BigBirdForPreTraining"),ow.forEach(t),am=i(Hp,"."),Hp.forEach(t),Qp.forEach(t),Jc=c(o),lt=s(o,"H2",{class:!0});var Jp=r(lt);ro=s(Jp,"A",{id:!0,class:!0,href:!0});var nw=r(ro);sd=s(nw,"SPAN",{});var sw=r(sd);b(An.$$.fragment,sw),sw.forEach(t),nw.forEach(t),im=c(Jp),rd=s(Jp,"SPAN",{});var rw=r(rd);dm=i(rw,"BigBirdModel"),rw.forEach(t),Jp.forEach(t),Vc=c(o),H=s(o,"DIV",{class:!0});var Se=r(H);b(Ln.$$.fragment,Se),lm=c(Se),In=s(Se,"P",{});var Vp=r(In);cm=i(Vp,`The bare BigBird Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Nn=s(Vp,"A",{href:!0,rel:!0});var aw=r(Nn);pm=i(aw,"torch.nn.Module"),aw.forEach(t),hm=i(Vp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vp.forEach(t),fm=c(Se),Sn=s(Se,"P",{});var Rp=r(Sn);gm=i(Rp,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),On=s(Rp,"A",{href:!0,rel:!0});var iw=r(On);mm=i(iw,`Attention is all you need`),iw.forEach(t),um=i(Rp,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Rp.forEach(t),_m=c(Se),W=s(Se,"P",{});var le=r(W);bm=i(le,"To behave as an decoder the model needs to be initialized with the "),ad=s(le,"CODE",{});var dw=r(ad);vm=i(dw,"is_decoder"),dw.forEach(t),km=i(le,` argument of the configuration set to `),id=s(le,"CODE",{});var lw=r(id);Bm=i(lw,"True"),lw.forEach(t),ym=i(le,". To be used in a Seq2Seq model, the model needs to initialized with both "),dd=s(le,"CODE",{});var cw=r(dd);wm=i(cw,"is_decoder"),cw.forEach(t),Tm=i(le,` argument and `),ld=s(le,"CODE",{});var pw=r(ld);xm=i(pw,"add_cross_attention"),pw.forEach(t),$m=i(le," set to "),cd=s(le,"CODE",{});var hw=r(cd);Fm=i(hw,"True"),hw.forEach(t),zm=i(le,"; an "),pd=s(le,"CODE",{});var fw=r(pd);Mm=i(fw,"encoder_hidden_states"),fw.forEach(t),Em=i(le,` is then expected as an input to the forward pass.`),le.forEach(t),qm=c(Se),R=s(Se,"DIV",{class:!0});var Oe=r(R);b(Dn.$$.fragment,Oe),Pm=c(Oe),ct=s(Oe,"P",{});var Ia=r(ct);Cm=i(Ia,"The "),ma=s(Ia,"A",{href:!0});var gw=r(ma);jm=i(gw,"BigBirdModel"),gw.forEach(t),Am=i(Ia," forward method, overrides the "),hd=s(Ia,"CODE",{});var mw=r(hd);Lm=i(mw,"__call__"),mw.forEach(t),Im=i(Ia," special method."),Ia.forEach(t),Nm=c(Oe),b(ao.$$.fragment,Oe),Sm=c(Oe),fd=s(Oe,"P",{});var uw=r(fd);Om=i(uw,"Example:"),uw.forEach(t),Dm=c(Oe),b(Wn.$$.fragment,Oe),Oe.forEach(t),Se.forEach(t),Rc=c(o),pt=s(o,"H2",{class:!0});var Gp=r(pt);io=s(Gp,"A",{id:!0,class:!0,href:!0});var _w=r(io);gd=s(_w,"SPAN",{});var bw=r(gd);b(Un.$$.fragment,bw),bw.forEach(t),_w.forEach(t),Wm=c(Gp),md=s(Gp,"SPAN",{});var vw=r(md);Um=i(vw,"BigBirdForPreTraining"),vw.forEach(t),Gp.forEach(t),Gc=c(o),Qn=s(o,"DIV",{class:!0});var kw=r(Qn);G=s(kw,"DIV",{class:!0});var De=r(G);b(Hn.$$.fragment,De),Qm=c(De),ht=s(De,"P",{});var Na=r(ht);Hm=i(Na,"The "),ua=s(Na,"A",{href:!0});var Bw=r(ua);Jm=i(Bw,"BigBirdForPreTraining"),Bw.forEach(t),Vm=i(Na," forward method, overrides the "),ud=s(Na,"CODE",{});var yw=r(ud);Rm=i(yw,"__call__"),yw.forEach(t),Gm=i(Na," special method."),Na.forEach(t),Km=c(De),b(lo.$$.fragment,De),Xm=c(De),_d=s(De,"P",{});var ww=r(_d);Ym=i(ww,"Example:"),ww.forEach(t),Zm=c(De),b(Jn.$$.fragment,De),De.forEach(t),kw.forEach(t),Kc=c(o),ft=s(o,"H2",{class:!0});var Kp=r(ft);co=s(Kp,"A",{id:!0,class:!0,href:!0});var Tw=r(co);bd=s(Tw,"SPAN",{});var xw=r(bd);b(Vn.$$.fragment,xw),xw.forEach(t),Tw.forEach(t),eu=c(Kp),vd=s(Kp,"SPAN",{});var $w=r(vd);tu=i($w,"BigBirdForCausalLM"),$w.forEach(t),Kp.forEach(t),Xc=c(o),xe=s(o,"DIV",{class:!0});var Sa=r(xe);b(Rn.$$.fragment,Sa),ou=c(Sa),gt=s(Sa,"P",{});var Oa=r(gt);nu=i(Oa,"BigBird Model with a "),kd=s(Oa,"CODE",{});var Fw=r(kd);su=i(Fw,"language modeling"),Fw.forEach(t),ru=i(Oa,` head on top for CLM fine-tuning. This model is a PyTorch `),Gn=s(Oa,"A",{href:!0,rel:!0});var zw=r(Gn);au=i(zw,"torch.nn.Module"),zw.forEach(t),iu=i(Oa,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Oa.forEach(t),du=c(Sa),K=s(Sa,"DIV",{class:!0});var We=r(K);b(Kn.$$.fragment,We),lu=c(We),mt=s(We,"P",{});var Da=r(mt);cu=i(Da,"The "),_a=s(Da,"A",{href:!0});var Mw=r(_a);pu=i(Mw,"BigBirdForCausalLM"),Mw.forEach(t),hu=i(Da," forward method, overrides the "),Bd=s(Da,"CODE",{});var Ew=r(Bd);fu=i(Ew,"__call__"),Ew.forEach(t),gu=i(Da," special method."),Da.forEach(t),mu=c(We),b(po.$$.fragment,We),uu=c(We),yd=s(We,"P",{});var qw=r(yd);_u=i(qw,"Example:"),qw.forEach(t),bu=c(We),b(Xn.$$.fragment,We),We.forEach(t),Sa.forEach(t),Yc=c(o),ut=s(o,"H2",{class:!0});var Xp=r(ut);ho=s(Xp,"A",{id:!0,class:!0,href:!0});var Pw=r(ho);wd=s(Pw,"SPAN",{});var Cw=r(wd);b(Yn.$$.fragment,Cw),Cw.forEach(t),Pw.forEach(t),vu=c(Xp),Td=s(Xp,"SPAN",{});var jw=r(Td);ku=i(jw,"BigBirdForMaskedLM"),jw.forEach(t),Xp.forEach(t),Zc=c(o),$e=s(o,"DIV",{class:!0});var Wa=r($e);b(Zn.$$.fragment,Wa),Bu=c(Wa),_t=s(Wa,"P",{});var Ua=r(_t);yu=i(Ua,"BigBird Model with a "),xd=s(Ua,"CODE",{});var Aw=r(xd);wu=i(Aw,"language modeling"),Aw.forEach(t),Tu=i(Ua,` head on top. This model is a PyTorch `),es=s(Ua,"A",{href:!0,rel:!0});var Lw=r(es);xu=i(Lw,"torch.nn.Module"),Lw.forEach(t),$u=i(Ua,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ua.forEach(t),Fu=c(Wa),X=s(Wa,"DIV",{class:!0});var Ue=r(X);b(ts.$$.fragment,Ue),zu=c(Ue),bt=s(Ue,"P",{});var Qa=r(bt);Mu=i(Qa,"The "),ba=s(Qa,"A",{href:!0});var Iw=r(ba);Eu=i(Iw,"BigBirdForMaskedLM"),Iw.forEach(t),qu=i(Qa," forward method, overrides the "),$d=s(Qa,"CODE",{});var Nw=r($d);Pu=i(Nw,"__call__"),Nw.forEach(t),Cu=i(Qa," special method."),Qa.forEach(t),ju=c(Ue),b(fo.$$.fragment,Ue),Au=c(Ue),Fd=s(Ue,"P",{});var Sw=r(Fd);Lu=i(Sw,"Example:"),Sw.forEach(t),Iu=c(Ue),b(os.$$.fragment,Ue),Ue.forEach(t),Wa.forEach(t),ep=c(o),vt=s(o,"H2",{class:!0});var Yp=r(vt);go=s(Yp,"A",{id:!0,class:!0,href:!0});var Ow=r(go);zd=s(Ow,"SPAN",{});var Dw=r(zd);b(ns.$$.fragment,Dw),Dw.forEach(t),Ow.forEach(t),Nu=c(Yp),Md=s(Yp,"SPAN",{});var Ww=r(Md);Su=i(Ww,"BigBirdForSequenceClassification"),Ww.forEach(t),Yp.forEach(t),tp=c(o),ve=s(o,"DIV",{class:!0});var No=r(ve);b(ss.$$.fragment,No),Ou=c(No),Ed=s(No,"P",{});var Uw=r(Ed);Du=i(Uw,`BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Uw.forEach(t),Wu=c(No),rs=s(No,"P",{});var Zp=r(rs);Uu=i(Zp,"This model is a PyTorch "),as=s(Zp,"A",{href:!0,rel:!0});var Qw=r(as);Qu=i(Qw,"torch.nn.Module"),Qw.forEach(t),Hu=i(Zp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zp.forEach(t),Ju=c(No),S=s(No,"DIV",{class:!0});var ce=r(S);b(is.$$.fragment,ce),Vu=c(ce),kt=s(ce,"P",{});var Ha=r(kt);Ru=i(Ha,"The "),va=s(Ha,"A",{href:!0});var Hw=r(va);Gu=i(Hw,"BigBirdForSequenceClassification"),Hw.forEach(t),Ku=i(Ha," forward method, overrides the "),qd=s(Ha,"CODE",{});var Jw=r(qd);Xu=i(Jw,"__call__"),Jw.forEach(t),Yu=i(Ha," special method."),Ha.forEach(t),Zu=c(ce),b(mo.$$.fragment,ce),e_=c(ce),Pd=s(ce,"P",{});var Vw=r(Pd);t_=i(Vw,"Example of single-label classification:"),Vw.forEach(t),o_=c(ce),b(ds.$$.fragment,ce),n_=c(ce),Cd=s(ce,"P",{});var Rw=r(Cd);s_=i(Rw,"Example of multi-label classification:"),Rw.forEach(t),r_=c(ce),b(ls.$$.fragment,ce),ce.forEach(t),No.forEach(t),op=c(o),Bt=s(o,"H2",{class:!0});var eh=r(Bt);uo=s(eh,"A",{id:!0,class:!0,href:!0});var Gw=r(uo);jd=s(Gw,"SPAN",{});var Kw=r(jd);b(cs.$$.fragment,Kw),Kw.forEach(t),Gw.forEach(t),a_=c(eh),Ad=s(eh,"SPAN",{});var Xw=r(Ad);i_=i(Xw,"BigBirdForMultipleChoice"),Xw.forEach(t),eh.forEach(t),np=c(o),ke=s(o,"DIV",{class:!0});var So=r(ke);b(ps.$$.fragment,So),d_=c(So),Ld=s(So,"P",{});var Yw=r(Ld);l_=i(Yw,`BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Yw.forEach(t),c_=c(So),hs=s(So,"P",{});var th=r(hs);p_=i(th,"This model is a PyTorch "),fs=s(th,"A",{href:!0,rel:!0});var Zw=r(fs);h_=i(Zw,"torch.nn.Module"),Zw.forEach(t),f_=i(th,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),th.forEach(t),g_=c(So),Y=s(So,"DIV",{class:!0});var Qe=r(Y);b(gs.$$.fragment,Qe),m_=c(Qe),yt=s(Qe,"P",{});var Ja=r(yt);u_=i(Ja,"The "),ka=s(Ja,"A",{href:!0});var eT=r(ka);__=i(eT,"BigBirdForMultipleChoice"),eT.forEach(t),b_=i(Ja," forward method, overrides the "),Id=s(Ja,"CODE",{});var tT=r(Id);v_=i(tT,"__call__"),tT.forEach(t),k_=i(Ja," special method."),Ja.forEach(t),B_=c(Qe),b(_o.$$.fragment,Qe),y_=c(Qe),Nd=s(Qe,"P",{});var oT=r(Nd);w_=i(oT,"Example:"),oT.forEach(t),T_=c(Qe),b(ms.$$.fragment,Qe),Qe.forEach(t),So.forEach(t),sp=c(o),wt=s(o,"H2",{class:!0});var oh=r(wt);bo=s(oh,"A",{id:!0,class:!0,href:!0});var nT=r(bo);Sd=s(nT,"SPAN",{});var sT=r(Sd);b(us.$$.fragment,sT),sT.forEach(t),nT.forEach(t),x_=c(oh),Od=s(oh,"SPAN",{});var rT=r(Od);$_=i(rT,"BigBirdForTokenClassification"),rT.forEach(t),oh.forEach(t),rp=c(o),Be=s(o,"DIV",{class:!0});var Oo=r(Be);b(_s.$$.fragment,Oo),F_=c(Oo),Dd=s(Oo,"P",{});var aT=r(Dd);z_=i(aT,`BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),aT.forEach(t),M_=c(Oo),bs=s(Oo,"P",{});var nh=r(bs);E_=i(nh,"This model is a PyTorch "),vs=s(nh,"A",{href:!0,rel:!0});var iT=r(vs);q_=i(iT,"torch.nn.Module"),iT.forEach(t),P_=i(nh,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nh.forEach(t),C_=c(Oo),Z=s(Oo,"DIV",{class:!0});var He=r(Z);b(ks.$$.fragment,He),j_=c(He),Tt=s(He,"P",{});var Va=r(Tt);A_=i(Va,"The "),Ba=s(Va,"A",{href:!0});var dT=r(Ba);L_=i(dT,"BigBirdForTokenClassification"),dT.forEach(t),I_=i(Va," forward method, overrides the "),Wd=s(Va,"CODE",{});var lT=r(Wd);N_=i(lT,"__call__"),lT.forEach(t),S_=i(Va," special method."),Va.forEach(t),O_=c(He),b(vo.$$.fragment,He),D_=c(He),Ud=s(He,"P",{});var cT=r(Ud);W_=i(cT,"Example:"),cT.forEach(t),U_=c(He),b(Bs.$$.fragment,He),He.forEach(t),Oo.forEach(t),ap=c(o),xt=s(o,"H2",{class:!0});var sh=r(xt);ko=s(sh,"A",{id:!0,class:!0,href:!0});var pT=r(ko);Qd=s(pT,"SPAN",{});var hT=r(Qd);b(ys.$$.fragment,hT),hT.forEach(t),pT.forEach(t),Q_=c(sh),Hd=s(sh,"SPAN",{});var fT=r(Hd);H_=i(fT,"BigBirdForQuestionAnswering"),fT.forEach(t),sh.forEach(t),ip=c(o),ye=s(o,"DIV",{class:!0});var Do=r(ye);b(ws.$$.fragment,Do),J_=c(Do),$t=s(Do,"P",{});var Ra=r($t);V_=i(Ra,`BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Jd=s(Ra,"CODE",{});var gT=r(Jd);R_=i(gT,"span start logits"),gT.forEach(t),G_=i(Ra," and "),Vd=s(Ra,"CODE",{});var mT=r(Vd);K_=i(mT,"span end logits"),mT.forEach(t),X_=i(Ra,")."),Ra.forEach(t),Y_=c(Do),Ts=s(Do,"P",{});var rh=r(Ts);Z_=i(rh,"This model is a PyTorch "),xs=s(rh,"A",{href:!0,rel:!0});var uT=r(xs);eb=i(uT,"torch.nn.Module"),uT.forEach(t),tb=i(rh,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),rh.forEach(t),ob=c(Do),ee=s(Do,"DIV",{class:!0});var Je=r(ee);b($s.$$.fragment,Je),nb=c(Je),Ft=s(Je,"P",{});var Ga=r(Ft);sb=i(Ga,"The "),ya=s(Ga,"A",{href:!0});var _T=r(ya);rb=i(_T,"BigBirdForQuestionAnswering"),_T.forEach(t),ab=i(Ga," forward method, overrides the "),Rd=s(Ga,"CODE",{});var bT=r(Rd);ib=i(bT,"__call__"),bT.forEach(t),db=i(Ga," special method."),Ga.forEach(t),lb=c(Je),b(Bo.$$.fragment,Je),cb=c(Je),Gd=s(Je,"P",{});var vT=r(Gd);pb=i(vT,"Example:"),vT.forEach(t),hb=c(Je),b(Fs.$$.fragment,Je),Je.forEach(t),Do.forEach(t),dp=c(o),zt=s(o,"H2",{class:!0});var ah=r(zt);yo=s(ah,"A",{id:!0,class:!0,href:!0});var kT=r(yo);Kd=s(kT,"SPAN",{});var BT=r(Kd);b(zs.$$.fragment,BT),BT.forEach(t),kT.forEach(t),fb=c(ah),Xd=s(ah,"SPAN",{});var yT=r(Xd);gb=i(yT,"FlaxBigBirdModel"),yT.forEach(t),ah.forEach(t),lp=c(o),P=s(o,"DIV",{class:!0});var pe=r(P);b(Ms.$$.fragment,pe),mb=c(pe),Yd=s(pe,"P",{});var wT=r(Yd);ub=i(wT,"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top."),wT.forEach(t),_b=c(pe),Es=s(pe,"P",{});var ih=r(Es);bb=i(ih,"This model inherits from "),wa=s(ih,"A",{href:!0});var TT=r(wa);vb=i(TT,"FlaxPreTrainedModel"),TT.forEach(t),kb=i(ih,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),ih.forEach(t),Bb=c(pe),qs=s(pe,"P",{});var dh=r(qs);yb=i(dh,"This model is also a Flax Linen "),Ps=s(dh,"A",{href:!0,rel:!0});var xT=r(Ps);wb=i(xT,"flax.linen.Module"),xT.forEach(t),Tb=i(dh,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),dh.forEach(t),xb=c(pe),Zd=s(pe,"P",{});var $T=r(Zd);$b=i($T,"Finally, this model supports inherent JAX features such as:"),$T.forEach(t),Fb=c(pe),Fe=s(pe,"UL",{});var Wo=r(Fe);el=s(Wo,"LI",{});var FT=r(el);Cs=s(FT,"A",{href:!0,rel:!0});var zT=r(Cs);zb=i(zT,"Just-In-Time (JIT) compilation"),zT.forEach(t),FT.forEach(t),Mb=c(Wo),tl=s(Wo,"LI",{});var MT=r(tl);js=s(MT,"A",{href:!0,rel:!0});var ET=r(js);Eb=i(ET,"Automatic Differentiation"),ET.forEach(t),MT.forEach(t),qb=c(Wo),ol=s(Wo,"LI",{});var qT=r(ol);As=s(qT,"A",{href:!0,rel:!0});var PT=r(As);Pb=i(PT,"Vectorization"),PT.forEach(t),qT.forEach(t),Cb=c(Wo),nl=s(Wo,"LI",{});var CT=r(nl);Ls=s(CT,"A",{href:!0,rel:!0});var jT=r(Ls);jb=i(jT,"Parallelization"),jT.forEach(t),CT.forEach(t),Wo.forEach(t),Ab=c(pe),te=s(pe,"DIV",{class:!0});var Ve=r(te);b(Is.$$.fragment,Ve),Lb=c(Ve),Mt=s(Ve,"P",{});var Ka=r(Mt);Ib=i(Ka,"The "),sl=s(Ka,"CODE",{});var AT=r(sl);Nb=i(AT,"FlaxBigBirdPreTrainedModel"),AT.forEach(t),Sb=i(Ka," forward method, overrides the "),rl=s(Ka,"CODE",{});var LT=r(rl);Ob=i(LT,"__call__"),LT.forEach(t),Db=i(Ka," special method."),Ka.forEach(t),Wb=c(Ve),b(wo.$$.fragment,Ve),Ub=c(Ve),al=s(Ve,"P",{});var IT=r(al);Qb=i(IT,"Example:"),IT.forEach(t),Hb=c(Ve),b(Ns.$$.fragment,Ve),Ve.forEach(t),pe.forEach(t),cp=c(o),Et=s(o,"H2",{class:!0});var lh=r(Et);To=s(lh,"A",{id:!0,class:!0,href:!0});var NT=r(To);il=s(NT,"SPAN",{});var ST=r(il);b(Ss.$$.fragment,ST),ST.forEach(t),NT.forEach(t),Jb=c(lh),dl=s(lh,"SPAN",{});var OT=r(dl);Vb=i(OT,"FlaxBigBirdForPreTraining"),OT.forEach(t),lh.forEach(t),pp=c(o),C=s(o,"DIV",{class:!0});var he=r(C);b(Os.$$.fragment,he),Rb=c(he),qt=s(he,"P",{});var Xa=r(qt);Gb=i(Xa,"BigBird Model with two heads on top as done during the pretraining: a "),ll=s(Xa,"CODE",{});var DT=r(ll);Kb=i(DT,"masked language modeling"),DT.forEach(t),Xb=i(Xa," head and a "),cl=s(Xa,"CODE",{});var WT=r(cl);Yb=i(WT,"next sentence prediction (classification)"),WT.forEach(t),Zb=i(Xa," head."),Xa.forEach(t),ev=c(he),Ds=s(he,"P",{});var ch=r(Ds);tv=i(ch,"This model inherits from "),Ta=s(ch,"A",{href:!0});var UT=r(Ta);ov=i(UT,"FlaxPreTrainedModel"),UT.forEach(t),nv=i(ch,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),ch.forEach(t),sv=c(he),Ws=s(he,"P",{});var ph=r(Ws);rv=i(ph,"This model is also a Flax Linen "),Us=s(ph,"A",{href:!0,rel:!0});var QT=r(Us);av=i(QT,"flax.linen.Module"),QT.forEach(t),iv=i(ph,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ph.forEach(t),dv=c(he),pl=s(he,"P",{});var HT=r(pl);lv=i(HT,"Finally, this model supports inherent JAX features such as:"),HT.forEach(t),cv=c(he),ze=s(he,"UL",{});var Uo=r(ze);hl=s(Uo,"LI",{});var JT=r(hl);Qs=s(JT,"A",{href:!0,rel:!0});var VT=r(Qs);pv=i(VT,"Just-In-Time (JIT) compilation"),VT.forEach(t),JT.forEach(t),hv=c(Uo),fl=s(Uo,"LI",{});var RT=r(fl);Hs=s(RT,"A",{href:!0,rel:!0});var GT=r(Hs);fv=i(GT,"Automatic Differentiation"),GT.forEach(t),RT.forEach(t),gv=c(Uo),gl=s(Uo,"LI",{});var KT=r(gl);Js=s(KT,"A",{href:!0,rel:!0});var XT=r(Js);mv=i(XT,"Vectorization"),XT.forEach(t),KT.forEach(t),uv=c(Uo),ml=s(Uo,"LI",{});var YT=r(ml);Vs=s(YT,"A",{href:!0,rel:!0});var ZT=r(Vs);_v=i(ZT,"Parallelization"),ZT.forEach(t),YT.forEach(t),Uo.forEach(t),bv=c(he),oe=s(he,"DIV",{class:!0});var Re=r(oe);b(Rs.$$.fragment,Re),vv=c(Re),Pt=s(Re,"P",{});var Ya=r(Pt);kv=i(Ya,"The "),ul=s(Ya,"CODE",{});var e0=r(ul);Bv=i(e0,"FlaxBigBirdPreTrainedModel"),e0.forEach(t),yv=i(Ya," forward method, overrides the "),_l=s(Ya,"CODE",{});var t0=r(_l);wv=i(t0,"__call__"),t0.forEach(t),Tv=i(Ya," special method."),Ya.forEach(t),xv=c(Re),b(xo.$$.fragment,Re),$v=c(Re),bl=s(Re,"P",{});var o0=r(bl);Fv=i(o0,"Example:"),o0.forEach(t),zv=c(Re),b(Gs.$$.fragment,Re),Re.forEach(t),he.forEach(t),hp=c(o),Ct=s(o,"H2",{class:!0});var hh=r(Ct);$o=s(hh,"A",{id:!0,class:!0,href:!0});var n0=r($o);vl=s(n0,"SPAN",{});var s0=r(vl);b(Ks.$$.fragment,s0),s0.forEach(t),n0.forEach(t),Mv=c(hh),kl=s(hh,"SPAN",{});var r0=r(kl);Ev=i(r0,"FlaxBigBirdForMaskedLM"),r0.forEach(t),hh.forEach(t),fp=c(o),j=s(o,"DIV",{class:!0});var fe=r(j);b(Xs.$$.fragment,fe),qv=c(fe),Ys=s(fe,"P",{});var fh=r(Ys);Pv=i(fh,"BigBird Model with a "),Bl=s(fh,"CODE",{});var a0=r(Bl);Cv=i(a0,"language modeling"),a0.forEach(t),jv=i(fh," head on top."),fh.forEach(t),Av=c(fe),Zs=s(fe,"P",{});var gh=r(Zs);Lv=i(gh,"This model inherits from "),xa=s(gh,"A",{href:!0});var i0=r(xa);Iv=i(i0,"FlaxPreTrainedModel"),i0.forEach(t),Nv=i(gh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),gh.forEach(t),Sv=c(fe),er=s(fe,"P",{});var mh=r(er);Ov=i(mh,"This model is also a Flax Linen "),tr=s(mh,"A",{href:!0,rel:!0});var d0=r(tr);Dv=i(d0,"flax.linen.Module"),d0.forEach(t),Wv=i(mh,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),mh.forEach(t),Uv=c(fe),yl=s(fe,"P",{});var l0=r(yl);Qv=i(l0,"Finally, this model supports inherent JAX features such as:"),l0.forEach(t),Hv=c(fe),Me=s(fe,"UL",{});var Qo=r(Me);wl=s(Qo,"LI",{});var c0=r(wl);or=s(c0,"A",{href:!0,rel:!0});var p0=r(or);Jv=i(p0,"Just-In-Time (JIT) compilation"),p0.forEach(t),c0.forEach(t),Vv=c(Qo),Tl=s(Qo,"LI",{});var h0=r(Tl);nr=s(h0,"A",{href:!0,rel:!0});var f0=r(nr);Rv=i(f0,"Automatic Differentiation"),f0.forEach(t),h0.forEach(t),Gv=c(Qo),xl=s(Qo,"LI",{});var g0=r(xl);sr=s(g0,"A",{href:!0,rel:!0});var m0=r(sr);Kv=i(m0,"Vectorization"),m0.forEach(t),g0.forEach(t),Xv=c(Qo),$l=s(Qo,"LI",{});var u0=r($l);rr=s(u0,"A",{href:!0,rel:!0});var _0=r(rr);Yv=i(_0,"Parallelization"),_0.forEach(t),u0.forEach(t),Qo.forEach(t),Zv=c(fe),ne=s(fe,"DIV",{class:!0});var Ge=r(ne);b(ar.$$.fragment,Ge),ek=c(Ge),jt=s(Ge,"P",{});var Za=r(jt);tk=i(Za,"The "),Fl=s(Za,"CODE",{});var b0=r(Fl);ok=i(b0,"FlaxBigBirdPreTrainedModel"),b0.forEach(t),nk=i(Za," forward method, overrides the "),zl=s(Za,"CODE",{});var v0=r(zl);sk=i(v0,"__call__"),v0.forEach(t),rk=i(Za," special method."),Za.forEach(t),ak=c(Ge),b(Fo.$$.fragment,Ge),ik=c(Ge),Ml=s(Ge,"P",{});var k0=r(Ml);dk=i(k0,"Example:"),k0.forEach(t),lk=c(Ge),b(ir.$$.fragment,Ge),Ge.forEach(t),fe.forEach(t),gp=c(o),At=s(o,"H2",{class:!0});var uh=r(At);zo=s(uh,"A",{id:!0,class:!0,href:!0});var B0=r(zo);El=s(B0,"SPAN",{});var y0=r(El);b(dr.$$.fragment,y0),y0.forEach(t),B0.forEach(t),ck=c(uh),ql=s(uh,"SPAN",{});var w0=r(ql);pk=i(w0,"FlaxBigBirdForSequenceClassification"),w0.forEach(t),uh.forEach(t),mp=c(o),A=s(o,"DIV",{class:!0});var ge=r(A);b(lr.$$.fragment,ge),hk=c(ge),Pl=s(ge,"P",{});var T0=r(Pl);fk=i(T0,`BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),T0.forEach(t),gk=c(ge),cr=s(ge,"P",{});var _h=r(cr);mk=i(_h,"This model inherits from "),$a=s(_h,"A",{href:!0});var x0=r($a);uk=i(x0,"FlaxPreTrainedModel"),x0.forEach(t),_k=i(_h,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),_h.forEach(t),bk=c(ge),pr=s(ge,"P",{});var bh=r(pr);vk=i(bh,"This model is also a Flax Linen "),hr=s(bh,"A",{href:!0,rel:!0});var $0=r(hr);kk=i($0,"flax.linen.Module"),$0.forEach(t),Bk=i(bh,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),bh.forEach(t),yk=c(ge),Cl=s(ge,"P",{});var F0=r(Cl);wk=i(F0,"Finally, this model supports inherent JAX features such as:"),F0.forEach(t),Tk=c(ge),Ee=s(ge,"UL",{});var Ho=r(Ee);jl=s(Ho,"LI",{});var z0=r(jl);fr=s(z0,"A",{href:!0,rel:!0});var M0=r(fr);xk=i(M0,"Just-In-Time (JIT) compilation"),M0.forEach(t),z0.forEach(t),$k=c(Ho),Al=s(Ho,"LI",{});var E0=r(Al);gr=s(E0,"A",{href:!0,rel:!0});var q0=r(gr);Fk=i(q0,"Automatic Differentiation"),q0.forEach(t),E0.forEach(t),zk=c(Ho),Ll=s(Ho,"LI",{});var P0=r(Ll);mr=s(P0,"A",{href:!0,rel:!0});var C0=r(mr);Mk=i(C0,"Vectorization"),C0.forEach(t),P0.forEach(t),Ek=c(Ho),Il=s(Ho,"LI",{});var j0=r(Il);ur=s(j0,"A",{href:!0,rel:!0});var A0=r(ur);qk=i(A0,"Parallelization"),A0.forEach(t),j0.forEach(t),Ho.forEach(t),Pk=c(ge),se=s(ge,"DIV",{class:!0});var Ke=r(se);b(_r.$$.fragment,Ke),Ck=c(Ke),Lt=s(Ke,"P",{});var ei=r(Lt);jk=i(ei,"The "),Nl=s(ei,"CODE",{});var L0=r(Nl);Ak=i(L0,"FlaxBigBirdPreTrainedModel"),L0.forEach(t),Lk=i(ei," forward method, overrides the "),Sl=s(ei,"CODE",{});var I0=r(Sl);Ik=i(I0,"__call__"),I0.forEach(t),Nk=i(ei," special method."),ei.forEach(t),Sk=c(Ke),b(Mo.$$.fragment,Ke),Ok=c(Ke),Ol=s(Ke,"P",{});var N0=r(Ol);Dk=i(N0,"Example:"),N0.forEach(t),Wk=c(Ke),b(br.$$.fragment,Ke),Ke.forEach(t),ge.forEach(t),up=c(o),It=s(o,"H2",{class:!0});var vh=r(It);Eo=s(vh,"A",{id:!0,class:!0,href:!0});var S0=r(Eo);Dl=s(S0,"SPAN",{});var O0=r(Dl);b(vr.$$.fragment,O0),O0.forEach(t),S0.forEach(t),Uk=c(vh),Wl=s(vh,"SPAN",{});var D0=r(Wl);Qk=i(D0,"FlaxBigBirdForMultipleChoice"),D0.forEach(t),vh.forEach(t),_p=c(o),L=s(o,"DIV",{class:!0});var me=r(L);b(kr.$$.fragment,me),Hk=c(me),Ul=s(me,"P",{});var W0=r(Ul);Jk=i(W0,`BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),W0.forEach(t),Vk=c(me),Br=s(me,"P",{});var kh=r(Br);Rk=i(kh,"This model inherits from "),Fa=s(kh,"A",{href:!0});var U0=r(Fa);Gk=i(U0,"FlaxPreTrainedModel"),U0.forEach(t),Kk=i(kh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),kh.forEach(t),Xk=c(me),yr=s(me,"P",{});var Bh=r(yr);Yk=i(Bh,"This model is also a Flax Linen "),wr=s(Bh,"A",{href:!0,rel:!0});var Q0=r(wr);Zk=i(Q0,"flax.linen.Module"),Q0.forEach(t),eB=i(Bh,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Bh.forEach(t),tB=c(me),Ql=s(me,"P",{});var H0=r(Ql);oB=i(H0,"Finally, this model supports inherent JAX features such as:"),H0.forEach(t),nB=c(me),qe=s(me,"UL",{});var Jo=r(qe);Hl=s(Jo,"LI",{});var J0=r(Hl);Tr=s(J0,"A",{href:!0,rel:!0});var V0=r(Tr);sB=i(V0,"Just-In-Time (JIT) compilation"),V0.forEach(t),J0.forEach(t),rB=c(Jo),Jl=s(Jo,"LI",{});var R0=r(Jl);xr=s(R0,"A",{href:!0,rel:!0});var G0=r(xr);aB=i(G0,"Automatic Differentiation"),G0.forEach(t),R0.forEach(t),iB=c(Jo),Vl=s(Jo,"LI",{});var K0=r(Vl);$r=s(K0,"A",{href:!0,rel:!0});var X0=r($r);dB=i(X0,"Vectorization"),X0.forEach(t),K0.forEach(t),lB=c(Jo),Rl=s(Jo,"LI",{});var Y0=r(Rl);Fr=s(Y0,"A",{href:!0,rel:!0});var Z0=r(Fr);cB=i(Z0,"Parallelization"),Z0.forEach(t),Y0.forEach(t),Jo.forEach(t),pB=c(me),re=s(me,"DIV",{class:!0});var Xe=r(re);b(zr.$$.fragment,Xe),hB=c(Xe),Nt=s(Xe,"P",{});var ti=r(Nt);fB=i(ti,"The "),Gl=s(ti,"CODE",{});var e2=r(Gl);gB=i(e2,"FlaxBigBirdPreTrainedModel"),e2.forEach(t),mB=i(ti," forward method, overrides the "),Kl=s(ti,"CODE",{});var t2=r(Kl);uB=i(t2,"__call__"),t2.forEach(t),_B=i(ti," special method."),ti.forEach(t),bB=c(Xe),b(qo.$$.fragment,Xe),vB=c(Xe),Xl=s(Xe,"P",{});var o2=r(Xl);kB=i(o2,"Example:"),o2.forEach(t),BB=c(Xe),b(Mr.$$.fragment,Xe),Xe.forEach(t),me.forEach(t),bp=c(o),St=s(o,"H2",{class:!0});var yh=r(St);Po=s(yh,"A",{id:!0,class:!0,href:!0});var n2=r(Po);Yl=s(n2,"SPAN",{});var s2=r(Yl);b(Er.$$.fragment,s2),s2.forEach(t),n2.forEach(t),yB=c(yh),Zl=s(yh,"SPAN",{});var r2=r(Zl);wB=i(r2,"FlaxBigBirdForTokenClassification"),r2.forEach(t),yh.forEach(t),vp=c(o),I=s(o,"DIV",{class:!0});var ue=r(I);b(qr.$$.fragment,ue),TB=c(ue),ec=s(ue,"P",{});var a2=r(ec);xB=i(a2,`BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),a2.forEach(t),$B=c(ue),Pr=s(ue,"P",{});var wh=r(Pr);FB=i(wh,"This model inherits from "),za=s(wh,"A",{href:!0});var i2=r(za);zB=i(i2,"FlaxPreTrainedModel"),i2.forEach(t),MB=i(wh,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),wh.forEach(t),EB=c(ue),Cr=s(ue,"P",{});var Th=r(Cr);qB=i(Th,"This model is also a Flax Linen "),jr=s(Th,"A",{href:!0,rel:!0});var d2=r(jr);PB=i(d2,"flax.linen.Module"),d2.forEach(t),CB=i(Th,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Th.forEach(t),jB=c(ue),tc=s(ue,"P",{});var l2=r(tc);AB=i(l2,"Finally, this model supports inherent JAX features such as:"),l2.forEach(t),LB=c(ue),Pe=s(ue,"UL",{});var Vo=r(Pe);oc=s(Vo,"LI",{});var c2=r(oc);Ar=s(c2,"A",{href:!0,rel:!0});var p2=r(Ar);IB=i(p2,"Just-In-Time (JIT) compilation"),p2.forEach(t),c2.forEach(t),NB=c(Vo),nc=s(Vo,"LI",{});var h2=r(nc);Lr=s(h2,"A",{href:!0,rel:!0});var f2=r(Lr);SB=i(f2,"Automatic Differentiation"),f2.forEach(t),h2.forEach(t),OB=c(Vo),sc=s(Vo,"LI",{});var g2=r(sc);Ir=s(g2,"A",{href:!0,rel:!0});var m2=r(Ir);DB=i(m2,"Vectorization"),m2.forEach(t),g2.forEach(t),WB=c(Vo),rc=s(Vo,"LI",{});var u2=r(rc);Nr=s(u2,"A",{href:!0,rel:!0});var _2=r(Nr);UB=i(_2,"Parallelization"),_2.forEach(t),u2.forEach(t),Vo.forEach(t),QB=c(ue),ae=s(ue,"DIV",{class:!0});var Ye=r(ae);b(Sr.$$.fragment,Ye),HB=c(Ye),Ot=s(Ye,"P",{});var oi=r(Ot);JB=i(oi,"The "),ac=s(oi,"CODE",{});var b2=r(ac);VB=i(b2,"FlaxBigBirdPreTrainedModel"),b2.forEach(t),RB=i(oi," forward method, overrides the "),ic=s(oi,"CODE",{});var v2=r(ic);GB=i(v2,"__call__"),v2.forEach(t),KB=i(oi," special method."),oi.forEach(t),XB=c(Ye),b(Co.$$.fragment,Ye),YB=c(Ye),dc=s(Ye,"P",{});var k2=r(dc);ZB=i(k2,"Example:"),k2.forEach(t),e1=c(Ye),b(Or.$$.fragment,Ye),Ye.forEach(t),ue.forEach(t),kp=c(o),Dt=s(o,"H2",{class:!0});var xh=r(Dt);jo=s(xh,"A",{id:!0,class:!0,href:!0});var B2=r(jo);lc=s(B2,"SPAN",{});var y2=r(lc);b(Dr.$$.fragment,y2),y2.forEach(t),B2.forEach(t),t1=c(xh),cc=s(xh,"SPAN",{});var w2=r(cc);o1=i(w2,"FlaxBigBirdForQuestionAnswering"),w2.forEach(t),xh.forEach(t),Bp=c(o),N=s(o,"DIV",{class:!0});var _e=r(N);b(Wr.$$.fragment,_e),n1=c(_e),Wt=s(_e,"P",{});var ni=r(Wt);s1=i(ni,`BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),pc=s(ni,"CODE",{});var T2=r(pc);r1=i(T2,"span start logits"),T2.forEach(t),a1=i(ni," and "),hc=s(ni,"CODE",{});var x2=r(hc);i1=i(x2,"span end logits"),x2.forEach(t),d1=i(ni,")."),ni.forEach(t),l1=c(_e),Ur=s(_e,"P",{});var $h=r(Ur);c1=i($h,"This model inherits from "),Ma=s($h,"A",{href:!0});var $2=r(Ma);p1=i($2,"FlaxPreTrainedModel"),$2.forEach(t),h1=i($h,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),$h.forEach(t),f1=c(_e),Qr=s(_e,"P",{});var Fh=r(Qr);g1=i(Fh,"This model is also a Flax Linen "),Hr=s(Fh,"A",{href:!0,rel:!0});var F2=r(Hr);m1=i(F2,"flax.linen.Module"),F2.forEach(t),u1=i(Fh,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Fh.forEach(t),_1=c(_e),fc=s(_e,"P",{});var z2=r(fc);b1=i(z2,"Finally, this model supports inherent JAX features such as:"),z2.forEach(t),v1=c(_e),Ce=s(_e,"UL",{});var Ro=r(Ce);gc=s(Ro,"LI",{});var M2=r(gc);Jr=s(M2,"A",{href:!0,rel:!0});var E2=r(Jr);k1=i(E2,"Just-In-Time (JIT) compilation"),E2.forEach(t),M2.forEach(t),B1=c(Ro),mc=s(Ro,"LI",{});var q2=r(mc);Vr=s(q2,"A",{href:!0,rel:!0});var P2=r(Vr);y1=i(P2,"Automatic Differentiation"),P2.forEach(t),q2.forEach(t),w1=c(Ro),uc=s(Ro,"LI",{});var C2=r(uc);Rr=s(C2,"A",{href:!0,rel:!0});var j2=r(Rr);T1=i(j2,"Vectorization"),j2.forEach(t),C2.forEach(t),x1=c(Ro),_c=s(Ro,"LI",{});var A2=r(_c);Gr=s(A2,"A",{href:!0,rel:!0});var L2=r(Gr);$1=i(L2,"Parallelization"),L2.forEach(t),A2.forEach(t),Ro.forEach(t),F1=c(_e),ie=s(_e,"DIV",{class:!0});var Ze=r(ie);b(Kr.$$.fragment,Ze),z1=c(Ze),Ut=s(Ze,"P",{});var si=r(Ut);M1=i(si,"The "),Ea=s(si,"A",{href:!0});var I2=r(Ea);E1=i(I2,"FlaxBigBirdForQuestionAnswering"),I2.forEach(t),q1=i(si," forward method, overrides the "),bc=s(si,"CODE",{});var N2=r(bc);P1=i(N2,"__call__"),N2.forEach(t),C1=i(si," special method."),si.forEach(t),j1=c(Ze),b(Ao.$$.fragment,Ze),A1=c(Ze),vc=s(Ze,"P",{});var S2=r(vc);L1=i(S2,"Example:"),S2.forEach(t),I1=c(Ze),b(Xr.$$.fragment,Ze),Ze.forEach(t),_e.forEach(t),this.h()},h(){d(p,"name","hf:doc:metadata"),d(p,"content",JSON.stringify(ax)),d(w,"id","bigbird"),d(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(w,"href","#bigbird"),d(m,"class","relative group"),d(Qt,"id","overview"),d(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Qt,"href","#overview"),d(et,"class","relative group"),d(Ko,"href","https://arxiv.org/abs/2007.14062"),d(Ko,"rel","nofollow"),d(Yo,"href","https://huggingface.co/blog/big-bird"),d(Yo,"rel","nofollow"),d(en,"href","https://huggingface.co/vasudevgupta"),d(en,"rel","nofollow"),d(tn,"href","https://github.com/google-research/bigbird"),d(tn,"rel","nofollow"),d(Jt,"id","transformers.BigBirdConfig"),d(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Jt,"href","#transformers.BigBirdConfig"),d(tt,"class","relative group"),d(ra,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdModel"),d(sn,"href","https://huggingface.co/google/bigbird-roberta-base"),d(sn,"rel","nofollow"),d(aa,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(ia,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(Rt,"id","initializing-a-bigbird-google/bigbird-roberta-base-style-configuration"),d(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Rt,"href","#initializing-a-bigbird-google/bigbird-roberta-base-style-configuration"),d(Vt,"class","relative group"),d(Kt,"id","initializing-a-model-from-the-google/bigbird-roberta-base-style-configuration"),d(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Kt,"href","#initializing-a-model-from-the-google/bigbird-roberta-base-style-configuration"),d(Gt,"class","relative group"),d(Yt,"id","accessing-the-model-configuration"),d(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Yt,"href","#accessing-the-model-configuration"),d(Xt,"class","relative group"),d(E,"class","docstring"),d(Zt,"id","transformers.BigBirdTokenizer"),d(Zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Zt,"href","#transformers.BigBirdTokenizer"),d(st,"class","relative group"),d(un,"href","https://github.com/google/sentencepiece"),d(un,"rel","nofollow"),d(da,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(Ae,"class","docstring"),d(eo,"class","docstring"),d(to,"class","docstring"),d(Ji,"class","docstring"),d(q,"class","docstring"),d(oo,"id","transformers.BigBirdTokenizerFast"),d(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(oo,"href","#transformers.BigBirdTokenizerFast"),d(at,"class","relative group"),d(xn,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),d(xn,"rel","nofollow"),d(pa,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),d(Le,"class","docstring"),d(we,"class","docstring"),d(no,"class","docstring"),d(Q,"class","docstring"),d(so,"id","transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput"),d(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(so,"href","#transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput"),d(it,"class","relative group"),d(ga,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForPreTraining"),d(dt,"class","docstring"),d(ro,"id","transformers.BigBirdModel"),d(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ro,"href","#transformers.BigBirdModel"),d(lt,"class","relative group"),d(Nn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Nn,"rel","nofollow"),d(On,"href","https://arxiv.org/abs/1706.03762"),d(On,"rel","nofollow"),d(ma,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdModel"),d(R,"class","docstring"),d(H,"class","docstring"),d(io,"id","transformers.BigBirdForPreTraining"),d(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(io,"href","#transformers.BigBirdForPreTraining"),d(pt,"class","relative group"),d(ua,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForPreTraining"),d(G,"class","docstring"),d(Qn,"class","docstring"),d(co,"id","transformers.BigBirdForCausalLM"),d(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(co,"href","#transformers.BigBirdForCausalLM"),d(ft,"class","relative group"),d(Gn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Gn,"rel","nofollow"),d(_a,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForCausalLM"),d(K,"class","docstring"),d(xe,"class","docstring"),d(ho,"id","transformers.BigBirdForMaskedLM"),d(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ho,"href","#transformers.BigBirdForMaskedLM"),d(ut,"class","relative group"),d(es,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(es,"rel","nofollow"),d(ba,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForMaskedLM"),d(X,"class","docstring"),d($e,"class","docstring"),d(go,"id","transformers.BigBirdForSequenceClassification"),d(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(go,"href","#transformers.BigBirdForSequenceClassification"),d(vt,"class","relative group"),d(as,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(as,"rel","nofollow"),d(va,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForSequenceClassification"),d(S,"class","docstring"),d(ve,"class","docstring"),d(uo,"id","transformers.BigBirdForMultipleChoice"),d(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(uo,"href","#transformers.BigBirdForMultipleChoice"),d(Bt,"class","relative group"),d(fs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(fs,"rel","nofollow"),d(ka,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForMultipleChoice"),d(Y,"class","docstring"),d(ke,"class","docstring"),d(bo,"id","transformers.BigBirdForTokenClassification"),d(bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(bo,"href","#transformers.BigBirdForTokenClassification"),d(wt,"class","relative group"),d(vs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(vs,"rel","nofollow"),d(Ba,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForTokenClassification"),d(Z,"class","docstring"),d(Be,"class","docstring"),d(ko,"id","transformers.BigBirdForQuestionAnswering"),d(ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ko,"href","#transformers.BigBirdForQuestionAnswering"),d(xt,"class","relative group"),d(xs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(xs,"rel","nofollow"),d(ya,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.BigBirdForQuestionAnswering"),d(ee,"class","docstring"),d(ye,"class","docstring"),d(yo,"id","transformers.FlaxBigBirdModel"),d(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(yo,"href","#transformers.FlaxBigBirdModel"),d(zt,"class","relative group"),d(wa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(Ps,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(Ps,"rel","nofollow"),d(Cs,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(Cs,"rel","nofollow"),d(js,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(js,"rel","nofollow"),d(As,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(As,"rel","nofollow"),d(Ls,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(Ls,"rel","nofollow"),d(te,"class","docstring"),d(P,"class","docstring"),d(To,"id","transformers.FlaxBigBirdForPreTraining"),d(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(To,"href","#transformers.FlaxBigBirdForPreTraining"),d(Et,"class","relative group"),d(Ta,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(Us,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(Us,"rel","nofollow"),d(Qs,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(Qs,"rel","nofollow"),d(Hs,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(Hs,"rel","nofollow"),d(Js,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(Js,"rel","nofollow"),d(Vs,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(Vs,"rel","nofollow"),d(oe,"class","docstring"),d(C,"class","docstring"),d($o,"id","transformers.FlaxBigBirdForMaskedLM"),d($o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d($o,"href","#transformers.FlaxBigBirdForMaskedLM"),d(Ct,"class","relative group"),d(xa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(tr,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(tr,"rel","nofollow"),d(or,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(or,"rel","nofollow"),d(nr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(nr,"rel","nofollow"),d(sr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(sr,"rel","nofollow"),d(rr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(rr,"rel","nofollow"),d(ne,"class","docstring"),d(j,"class","docstring"),d(zo,"id","transformers.FlaxBigBirdForSequenceClassification"),d(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(zo,"href","#transformers.FlaxBigBirdForSequenceClassification"),d(At,"class","relative group"),d($a,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(hr,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(hr,"rel","nofollow"),d(fr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(fr,"rel","nofollow"),d(gr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(gr,"rel","nofollow"),d(mr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(mr,"rel","nofollow"),d(ur,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(ur,"rel","nofollow"),d(se,"class","docstring"),d(A,"class","docstring"),d(Eo,"id","transformers.FlaxBigBirdForMultipleChoice"),d(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Eo,"href","#transformers.FlaxBigBirdForMultipleChoice"),d(It,"class","relative group"),d(Fa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(wr,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(wr,"rel","nofollow"),d(Tr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(Tr,"rel","nofollow"),d(xr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(xr,"rel","nofollow"),d($r,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d($r,"rel","nofollow"),d(Fr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(Fr,"rel","nofollow"),d(re,"class","docstring"),d(L,"class","docstring"),d(Po,"id","transformers.FlaxBigBirdForTokenClassification"),d(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Po,"href","#transformers.FlaxBigBirdForTokenClassification"),d(St,"class","relative group"),d(za,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(jr,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(jr,"rel","nofollow"),d(Ar,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(Ar,"rel","nofollow"),d(Lr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(Lr,"rel","nofollow"),d(Ir,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(Ir,"rel","nofollow"),d(Nr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(Nr,"rel","nofollow"),d(ae,"class","docstring"),d(I,"class","docstring"),d(jo,"id","transformers.FlaxBigBirdForQuestionAnswering"),d(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(jo,"href","#transformers.FlaxBigBirdForQuestionAnswering"),d(Dt,"class","relative group"),d(Ma,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(Hr,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),d(Hr,"rel","nofollow"),d(Jr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(Jr,"rel","nofollow"),d(Vr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(Vr,"rel","nofollow"),d(Rr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(Rr,"rel","nofollow"),d(Gr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(Gr,"rel","nofollow"),d(Ea,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird#transformers.FlaxBigBirdForQuestionAnswering"),d(ie,"class","docstring"),d(N,"class","docstring")},m(o,h){e(document.head,p),g(o,T,h),g(o,m,h),e(m,w),e(w,x),v(u,x,null),e(m,f),e(m,$),e($,zh),g(o,qc,h),g(o,et,h),e(et,Qt),e(Qt,ri),v(Go,ri,null),e(et,Mh),e(et,ai),e(ai,Eh),g(o,Pc,h),g(o,Ht,h),e(Ht,qh),e(Ht,Ko),e(Ko,Ph),e(Ht,Ch),g(o,Cc,h),g(o,ta,h),e(ta,jh),g(o,jc,h),g(o,oa,h),e(oa,ii),e(ii,Ah),g(o,Ac,h),g(o,na,h),e(na,Lh),g(o,Lc,h),g(o,U,h),e(U,Xo),e(Xo,Ih),e(Xo,Yo),e(Yo,Nh),e(Xo,Sh),e(U,Oh),e(U,be),e(be,Dh),e(be,di),e(di,Wh),e(be,Uh),e(be,li),e(li,Qh),e(be,Hh),e(be,ci),e(ci,Jh),e(be,Vh),e(be,pi),e(pi,Rh),e(be,Gh),e(U,Kh),e(U,hi),e(hi,Xh),e(U,Yh),e(U,fi),e(fi,Zh),e(U,ef),e(U,Zo),e(Zo,tf),e(Zo,gi),e(gi,of),e(Zo,nf),e(U,sf),e(U,sa),e(sa,rf),e(sa,mi),e(mi,af),g(o,Ic,h),g(o,je,h),e(je,df),e(je,en),e(en,lf),e(je,cf),e(je,tn),e(tn,pf),e(je,hf),g(o,Nc,h),g(o,tt,h),e(tt,Jt),e(Jt,ui),v(on,ui,null),e(tt,ff),e(tt,_i),e(_i,gf),g(o,Sc,h),g(o,E,h),v(nn,E,null),e(E,mf),e(E,ot),e(ot,uf),e(ot,ra),e(ra,_f),e(ot,bf),e(ot,sn),e(sn,vf),e(ot,kf),e(E,Bf),e(E,nt),e(nt,yf),e(nt,aa),e(aa,wf),e(nt,Tf),e(nt,ia),e(ia,xf),e(nt,$f),e(E,Ff),e(E,bi),e(bi,zf),e(E,Mf),v(rn,E,null),e(E,Ef),e(E,vi),e(vi,ki),e(ki,Bi),e(Bi,yi),e(yi,qf),e(E,Pf),e(E,wi),e(wi,Ti),e(Ti,an),e(an,Vt),e(Vt,Rt),e(Rt,xi),v(dn,xi,null),e(Vt,Cf),e(Vt,$i),e($i,jf),e(an,Af),e(an,Fi),e(Fi,Lf),e(E,If),e(E,zi),e(zi,Mi),e(Mi,ln),e(ln,Gt),e(Gt,Kt),e(Kt,Ei),v(cn,Ei,null),e(Gt,Nf),e(Gt,qi),e(qi,Sf),e(ln,Of),e(ln,Pi),e(Pi,Df),e(E,Wf),e(E,Ci),e(Ci,ji),e(ji,pn),e(pn,Xt),e(Xt,Yt),e(Yt,Ai),v(hn,Ai,null),e(Xt,Uf),e(Xt,Li),e(Li,Qf),e(pn,Hf),e(pn,Ii),e(Ii,Jf),g(o,Oc,h),g(o,st,h),e(st,Zt),e(Zt,Ni),v(fn,Ni,null),e(st,Vf),e(st,Si),e(Si,Rf),g(o,Dc,h),g(o,q,h),v(gn,q,null),e(q,Gf),e(q,mn),e(mn,Kf),e(mn,un),e(un,Xf),e(mn,Yf),e(q,Zf),e(q,_n),e(_n,eg),e(_n,da),e(da,tg),e(_n,og),e(q,ng),e(q,Ae),v(bn,Ae,null),e(Ae,sg),e(Ae,Oi),e(Oi,rg),e(Ae,ag),e(Ae,vn),e(vn,la),e(la,ig),e(la,Di),e(Di,dg),e(vn,lg),e(vn,ca),e(ca,cg),e(ca,Wi),e(Wi,pg),e(q,hg),e(q,eo),v(kn,eo,null),e(eo,fg),e(eo,Bn),e(Bn,gg),e(Bn,Ui),e(Ui,mg),e(Bn,ug),e(q,_g),e(q,to),v(yn,to,null),e(to,bg),e(to,rt),e(rt,vg),e(rt,Qi),e(Qi,kg),e(rt,Bg),e(rt,Hi),e(Hi,yg),e(rt,wg),e(q,Tg),e(q,Ji),g(o,Wc,h),g(o,at,h),e(at,oo),e(oo,Vi),v(wn,Vi,null),e(at,xg),e(at,Ri),e(Ri,$g),g(o,Uc,h),g(o,Q,h),v(Tn,Q,null),e(Q,Fg),e(Q,Te),e(Te,zg),e(Te,Gi),e(Gi,Mg),e(Te,Eg),e(Te,xn),e(xn,qg),e(Te,Pg),e(Te,pa),e(pa,Cg),e(Te,jg),e(Q,Ag),e(Q,Le),v($n,Le,null),e(Le,Lg),e(Le,Ki),e(Ki,Ig),e(Le,Ng),e(Le,Fn),e(Fn,ha),e(ha,Sg),e(ha,Xi),e(Xi,Og),e(Fn,Dg),e(Fn,fa),e(fa,Wg),e(fa,Yi),e(Yi,Ug),e(Q,Qg),e(Q,we),v(zn,we,null),e(we,Hg),e(we,Zi),e(Zi,Jg),e(we,Vg),v(Mn,we,null),e(we,Rg),e(we,ed),e(ed,Gg),e(Q,Kg),e(Q,no),v(En,no,null),e(no,Xg),e(no,qn),e(qn,Yg),e(qn,td),e(td,Zg),e(qn,em),g(o,Qc,h),g(o,it,h),e(it,so),e(so,od),v(Pn,od,null),e(it,tm),e(it,nd),e(nd,om),g(o,Hc,h),g(o,dt,h),v(Cn,dt,null),e(dt,nm),e(dt,jn),e(jn,sm),e(jn,ga),e(ga,rm),e(jn,am),g(o,Jc,h),g(o,lt,h),e(lt,ro),e(ro,sd),v(An,sd,null),e(lt,im),e(lt,rd),e(rd,dm),g(o,Vc,h),g(o,H,h),v(Ln,H,null),e(H,lm),e(H,In),e(In,cm),e(In,Nn),e(Nn,pm),e(In,hm),e(H,fm),e(H,Sn),e(Sn,gm),e(Sn,On),e(On,mm),e(Sn,um),e(H,_m),e(H,W),e(W,bm),e(W,ad),e(ad,vm),e(W,km),e(W,id),e(id,Bm),e(W,ym),e(W,dd),e(dd,wm),e(W,Tm),e(W,ld),e(ld,xm),e(W,$m),e(W,cd),e(cd,Fm),e(W,zm),e(W,pd),e(pd,Mm),e(W,Em),e(H,qm),e(H,R),v(Dn,R,null),e(R,Pm),e(R,ct),e(ct,Cm),e(ct,ma),e(ma,jm),e(ct,Am),e(ct,hd),e(hd,Lm),e(ct,Im),e(R,Nm),v(ao,R,null),e(R,Sm),e(R,fd),e(fd,Om),e(R,Dm),v(Wn,R,null),g(o,Rc,h),g(o,pt,h),e(pt,io),e(io,gd),v(Un,gd,null),e(pt,Wm),e(pt,md),e(md,Um),g(o,Gc,h),g(o,Qn,h),e(Qn,G),v(Hn,G,null),e(G,Qm),e(G,ht),e(ht,Hm),e(ht,ua),e(ua,Jm),e(ht,Vm),e(ht,ud),e(ud,Rm),e(ht,Gm),e(G,Km),v(lo,G,null),e(G,Xm),e(G,_d),e(_d,Ym),e(G,Zm),v(Jn,G,null),g(o,Kc,h),g(o,ft,h),e(ft,co),e(co,bd),v(Vn,bd,null),e(ft,eu),e(ft,vd),e(vd,tu),g(o,Xc,h),g(o,xe,h),v(Rn,xe,null),e(xe,ou),e(xe,gt),e(gt,nu),e(gt,kd),e(kd,su),e(gt,ru),e(gt,Gn),e(Gn,au),e(gt,iu),e(xe,du),e(xe,K),v(Kn,K,null),e(K,lu),e(K,mt),e(mt,cu),e(mt,_a),e(_a,pu),e(mt,hu),e(mt,Bd),e(Bd,fu),e(mt,gu),e(K,mu),v(po,K,null),e(K,uu),e(K,yd),e(yd,_u),e(K,bu),v(Xn,K,null),g(o,Yc,h),g(o,ut,h),e(ut,ho),e(ho,wd),v(Yn,wd,null),e(ut,vu),e(ut,Td),e(Td,ku),g(o,Zc,h),g(o,$e,h),v(Zn,$e,null),e($e,Bu),e($e,_t),e(_t,yu),e(_t,xd),e(xd,wu),e(_t,Tu),e(_t,es),e(es,xu),e(_t,$u),e($e,Fu),e($e,X),v(ts,X,null),e(X,zu),e(X,bt),e(bt,Mu),e(bt,ba),e(ba,Eu),e(bt,qu),e(bt,$d),e($d,Pu),e(bt,Cu),e(X,ju),v(fo,X,null),e(X,Au),e(X,Fd),e(Fd,Lu),e(X,Iu),v(os,X,null),g(o,ep,h),g(o,vt,h),e(vt,go),e(go,zd),v(ns,zd,null),e(vt,Nu),e(vt,Md),e(Md,Su),g(o,tp,h),g(o,ve,h),v(ss,ve,null),e(ve,Ou),e(ve,Ed),e(Ed,Du),e(ve,Wu),e(ve,rs),e(rs,Uu),e(rs,as),e(as,Qu),e(rs,Hu),e(ve,Ju),e(ve,S),v(is,S,null),e(S,Vu),e(S,kt),e(kt,Ru),e(kt,va),e(va,Gu),e(kt,Ku),e(kt,qd),e(qd,Xu),e(kt,Yu),e(S,Zu),v(mo,S,null),e(S,e_),e(S,Pd),e(Pd,t_),e(S,o_),v(ds,S,null),e(S,n_),e(S,Cd),e(Cd,s_),e(S,r_),v(ls,S,null),g(o,op,h),g(o,Bt,h),e(Bt,uo),e(uo,jd),v(cs,jd,null),e(Bt,a_),e(Bt,Ad),e(Ad,i_),g(o,np,h),g(o,ke,h),v(ps,ke,null),e(ke,d_),e(ke,Ld),e(Ld,l_),e(ke,c_),e(ke,hs),e(hs,p_),e(hs,fs),e(fs,h_),e(hs,f_),e(ke,g_),e(ke,Y),v(gs,Y,null),e(Y,m_),e(Y,yt),e(yt,u_),e(yt,ka),e(ka,__),e(yt,b_),e(yt,Id),e(Id,v_),e(yt,k_),e(Y,B_),v(_o,Y,null),e(Y,y_),e(Y,Nd),e(Nd,w_),e(Y,T_),v(ms,Y,null),g(o,sp,h),g(o,wt,h),e(wt,bo),e(bo,Sd),v(us,Sd,null),e(wt,x_),e(wt,Od),e(Od,$_),g(o,rp,h),g(o,Be,h),v(_s,Be,null),e(Be,F_),e(Be,Dd),e(Dd,z_),e(Be,M_),e(Be,bs),e(bs,E_),e(bs,vs),e(vs,q_),e(bs,P_),e(Be,C_),e(Be,Z),v(ks,Z,null),e(Z,j_),e(Z,Tt),e(Tt,A_),e(Tt,Ba),e(Ba,L_),e(Tt,I_),e(Tt,Wd),e(Wd,N_),e(Tt,S_),e(Z,O_),v(vo,Z,null),e(Z,D_),e(Z,Ud),e(Ud,W_),e(Z,U_),v(Bs,Z,null),g(o,ap,h),g(o,xt,h),e(xt,ko),e(ko,Qd),v(ys,Qd,null),e(xt,Q_),e(xt,Hd),e(Hd,H_),g(o,ip,h),g(o,ye,h),v(ws,ye,null),e(ye,J_),e(ye,$t),e($t,V_),e($t,Jd),e(Jd,R_),e($t,G_),e($t,Vd),e(Vd,K_),e($t,X_),e(ye,Y_),e(ye,Ts),e(Ts,Z_),e(Ts,xs),e(xs,eb),e(Ts,tb),e(ye,ob),e(ye,ee),v($s,ee,null),e(ee,nb),e(ee,Ft),e(Ft,sb),e(Ft,ya),e(ya,rb),e(Ft,ab),e(Ft,Rd),e(Rd,ib),e(Ft,db),e(ee,lb),v(Bo,ee,null),e(ee,cb),e(ee,Gd),e(Gd,pb),e(ee,hb),v(Fs,ee,null),g(o,dp,h),g(o,zt,h),e(zt,yo),e(yo,Kd),v(zs,Kd,null),e(zt,fb),e(zt,Xd),e(Xd,gb),g(o,lp,h),g(o,P,h),v(Ms,P,null),e(P,mb),e(P,Yd),e(Yd,ub),e(P,_b),e(P,Es),e(Es,bb),e(Es,wa),e(wa,vb),e(Es,kb),e(P,Bb),e(P,qs),e(qs,yb),e(qs,Ps),e(Ps,wb),e(qs,Tb),e(P,xb),e(P,Zd),e(Zd,$b),e(P,Fb),e(P,Fe),e(Fe,el),e(el,Cs),e(Cs,zb),e(Fe,Mb),e(Fe,tl),e(tl,js),e(js,Eb),e(Fe,qb),e(Fe,ol),e(ol,As),e(As,Pb),e(Fe,Cb),e(Fe,nl),e(nl,Ls),e(Ls,jb),e(P,Ab),e(P,te),v(Is,te,null),e(te,Lb),e(te,Mt),e(Mt,Ib),e(Mt,sl),e(sl,Nb),e(Mt,Sb),e(Mt,rl),e(rl,Ob),e(Mt,Db),e(te,Wb),v(wo,te,null),e(te,Ub),e(te,al),e(al,Qb),e(te,Hb),v(Ns,te,null),g(o,cp,h),g(o,Et,h),e(Et,To),e(To,il),v(Ss,il,null),e(Et,Jb),e(Et,dl),e(dl,Vb),g(o,pp,h),g(o,C,h),v(Os,C,null),e(C,Rb),e(C,qt),e(qt,Gb),e(qt,ll),e(ll,Kb),e(qt,Xb),e(qt,cl),e(cl,Yb),e(qt,Zb),e(C,ev),e(C,Ds),e(Ds,tv),e(Ds,Ta),e(Ta,ov),e(Ds,nv),e(C,sv),e(C,Ws),e(Ws,rv),e(Ws,Us),e(Us,av),e(Ws,iv),e(C,dv),e(C,pl),e(pl,lv),e(C,cv),e(C,ze),e(ze,hl),e(hl,Qs),e(Qs,pv),e(ze,hv),e(ze,fl),e(fl,Hs),e(Hs,fv),e(ze,gv),e(ze,gl),e(gl,Js),e(Js,mv),e(ze,uv),e(ze,ml),e(ml,Vs),e(Vs,_v),e(C,bv),e(C,oe),v(Rs,oe,null),e(oe,vv),e(oe,Pt),e(Pt,kv),e(Pt,ul),e(ul,Bv),e(Pt,yv),e(Pt,_l),e(_l,wv),e(Pt,Tv),e(oe,xv),v(xo,oe,null),e(oe,$v),e(oe,bl),e(bl,Fv),e(oe,zv),v(Gs,oe,null),g(o,hp,h),g(o,Ct,h),e(Ct,$o),e($o,vl),v(Ks,vl,null),e(Ct,Mv),e(Ct,kl),e(kl,Ev),g(o,fp,h),g(o,j,h),v(Xs,j,null),e(j,qv),e(j,Ys),e(Ys,Pv),e(Ys,Bl),e(Bl,Cv),e(Ys,jv),e(j,Av),e(j,Zs),e(Zs,Lv),e(Zs,xa),e(xa,Iv),e(Zs,Nv),e(j,Sv),e(j,er),e(er,Ov),e(er,tr),e(tr,Dv),e(er,Wv),e(j,Uv),e(j,yl),e(yl,Qv),e(j,Hv),e(j,Me),e(Me,wl),e(wl,or),e(or,Jv),e(Me,Vv),e(Me,Tl),e(Tl,nr),e(nr,Rv),e(Me,Gv),e(Me,xl),e(xl,sr),e(sr,Kv),e(Me,Xv),e(Me,$l),e($l,rr),e(rr,Yv),e(j,Zv),e(j,ne),v(ar,ne,null),e(ne,ek),e(ne,jt),e(jt,tk),e(jt,Fl),e(Fl,ok),e(jt,nk),e(jt,zl),e(zl,sk),e(jt,rk),e(ne,ak),v(Fo,ne,null),e(ne,ik),e(ne,Ml),e(Ml,dk),e(ne,lk),v(ir,ne,null),g(o,gp,h),g(o,At,h),e(At,zo),e(zo,El),v(dr,El,null),e(At,ck),e(At,ql),e(ql,pk),g(o,mp,h),g(o,A,h),v(lr,A,null),e(A,hk),e(A,Pl),e(Pl,fk),e(A,gk),e(A,cr),e(cr,mk),e(cr,$a),e($a,uk),e(cr,_k),e(A,bk),e(A,pr),e(pr,vk),e(pr,hr),e(hr,kk),e(pr,Bk),e(A,yk),e(A,Cl),e(Cl,wk),e(A,Tk),e(A,Ee),e(Ee,jl),e(jl,fr),e(fr,xk),e(Ee,$k),e(Ee,Al),e(Al,gr),e(gr,Fk),e(Ee,zk),e(Ee,Ll),e(Ll,mr),e(mr,Mk),e(Ee,Ek),e(Ee,Il),e(Il,ur),e(ur,qk),e(A,Pk),e(A,se),v(_r,se,null),e(se,Ck),e(se,Lt),e(Lt,jk),e(Lt,Nl),e(Nl,Ak),e(Lt,Lk),e(Lt,Sl),e(Sl,Ik),e(Lt,Nk),e(se,Sk),v(Mo,se,null),e(se,Ok),e(se,Ol),e(Ol,Dk),e(se,Wk),v(br,se,null),g(o,up,h),g(o,It,h),e(It,Eo),e(Eo,Dl),v(vr,Dl,null),e(It,Uk),e(It,Wl),e(Wl,Qk),g(o,_p,h),g(o,L,h),v(kr,L,null),e(L,Hk),e(L,Ul),e(Ul,Jk),e(L,Vk),e(L,Br),e(Br,Rk),e(Br,Fa),e(Fa,Gk),e(Br,Kk),e(L,Xk),e(L,yr),e(yr,Yk),e(yr,wr),e(wr,Zk),e(yr,eB),e(L,tB),e(L,Ql),e(Ql,oB),e(L,nB),e(L,qe),e(qe,Hl),e(Hl,Tr),e(Tr,sB),e(qe,rB),e(qe,Jl),e(Jl,xr),e(xr,aB),e(qe,iB),e(qe,Vl),e(Vl,$r),e($r,dB),e(qe,lB),e(qe,Rl),e(Rl,Fr),e(Fr,cB),e(L,pB),e(L,re),v(zr,re,null),e(re,hB),e(re,Nt),e(Nt,fB),e(Nt,Gl),e(Gl,gB),e(Nt,mB),e(Nt,Kl),e(Kl,uB),e(Nt,_B),e(re,bB),v(qo,re,null),e(re,vB),e(re,Xl),e(Xl,kB),e(re,BB),v(Mr,re,null),g(o,bp,h),g(o,St,h),e(St,Po),e(Po,Yl),v(Er,Yl,null),e(St,yB),e(St,Zl),e(Zl,wB),g(o,vp,h),g(o,I,h),v(qr,I,null),e(I,TB),e(I,ec),e(ec,xB),e(I,$B),e(I,Pr),e(Pr,FB),e(Pr,za),e(za,zB),e(Pr,MB),e(I,EB),e(I,Cr),e(Cr,qB),e(Cr,jr),e(jr,PB),e(Cr,CB),e(I,jB),e(I,tc),e(tc,AB),e(I,LB),e(I,Pe),e(Pe,oc),e(oc,Ar),e(Ar,IB),e(Pe,NB),e(Pe,nc),e(nc,Lr),e(Lr,SB),e(Pe,OB),e(Pe,sc),e(sc,Ir),e(Ir,DB),e(Pe,WB),e(Pe,rc),e(rc,Nr),e(Nr,UB),e(I,QB),e(I,ae),v(Sr,ae,null),e(ae,HB),e(ae,Ot),e(Ot,JB),e(Ot,ac),e(ac,VB),e(Ot,RB),e(Ot,ic),e(ic,GB),e(Ot,KB),e(ae,XB),v(Co,ae,null),e(ae,YB),e(ae,dc),e(dc,ZB),e(ae,e1),v(Or,ae,null),g(o,kp,h),g(o,Dt,h),e(Dt,jo),e(jo,lc),v(Dr,lc,null),e(Dt,t1),e(Dt,cc),e(cc,o1),g(o,Bp,h),g(o,N,h),v(Wr,N,null),e(N,n1),e(N,Wt),e(Wt,s1),e(Wt,pc),e(pc,r1),e(Wt,a1),e(Wt,hc),e(hc,i1),e(Wt,d1),e(N,l1),e(N,Ur),e(Ur,c1),e(Ur,Ma),e(Ma,p1),e(Ur,h1),e(N,f1),e(N,Qr),e(Qr,g1),e(Qr,Hr),e(Hr,m1),e(Qr,u1),e(N,_1),e(N,fc),e(fc,b1),e(N,v1),e(N,Ce),e(Ce,gc),e(gc,Jr),e(Jr,k1),e(Ce,B1),e(Ce,mc),e(mc,Vr),e(Vr,y1),e(Ce,w1),e(Ce,uc),e(uc,Rr),e(Rr,T1),e(Ce,x1),e(Ce,_c),e(_c,Gr),e(Gr,$1),e(N,F1),e(N,ie),v(Kr,ie,null),e(ie,z1),e(ie,Ut),e(Ut,M1),e(Ut,Ea),e(Ea,E1),e(Ut,q1),e(Ut,bc),e(bc,P1),e(Ut,C1),e(ie,j1),v(Ao,ie,null),e(ie,A1),e(ie,vc),e(vc,L1),e(ie,I1),v(Xr,ie,null),yp=!0},p(o,[h]){const Yr={};h&2&&(Yr.$$scope={dirty:h,ctx:o}),ao.$set(Yr);const kc={};h&2&&(kc.$$scope={dirty:h,ctx:o}),lo.$set(kc);const Bc={};h&2&&(Bc.$$scope={dirty:h,ctx:o}),po.$set(Bc);const yc={};h&2&&(yc.$$scope={dirty:h,ctx:o}),fo.$set(yc);const Zr={};h&2&&(Zr.$$scope={dirty:h,ctx:o}),mo.$set(Zr);const wc={};h&2&&(wc.$$scope={dirty:h,ctx:o}),_o.$set(wc);const Tc={};h&2&&(Tc.$$scope={dirty:h,ctx:o}),vo.$set(Tc);const xc={};h&2&&(xc.$$scope={dirty:h,ctx:o}),Bo.$set(xc);const ea={};h&2&&(ea.$$scope={dirty:h,ctx:o}),wo.$set(ea);const $c={};h&2&&($c.$$scope={dirty:h,ctx:o}),xo.$set($c);const Fc={};h&2&&(Fc.$$scope={dirty:h,ctx:o}),Fo.$set(Fc);const zc={};h&2&&(zc.$$scope={dirty:h,ctx:o}),Mo.$set(zc);const Mc={};h&2&&(Mc.$$scope={dirty:h,ctx:o}),qo.$set(Mc);const Ec={};h&2&&(Ec.$$scope={dirty:h,ctx:o}),Co.$set(Ec);const J={};h&2&&(J.$$scope={dirty:h,ctx:o}),Ao.$set(J)},i(o){yp||(k(u.$$.fragment,o),k(Go.$$.fragment,o),k(on.$$.fragment,o),k(nn.$$.fragment,o),k(rn.$$.fragment,o),k(dn.$$.fragment,o),k(cn.$$.fragment,o),k(hn.$$.fragment,o),k(fn.$$.fragment,o),k(gn.$$.fragment,o),k(bn.$$.fragment,o),k(kn.$$.fragment,o),k(yn.$$.fragment,o),k(wn.$$.fragment,o),k(Tn.$$.fragment,o),k($n.$$.fragment,o),k(zn.$$.fragment,o),k(Mn.$$.fragment,o),k(En.$$.fragment,o),k(Pn.$$.fragment,o),k(Cn.$$.fragment,o),k(An.$$.fragment,o),k(Ln.$$.fragment,o),k(Dn.$$.fragment,o),k(ao.$$.fragment,o),k(Wn.$$.fragment,o),k(Un.$$.fragment,o),k(Hn.$$.fragment,o),k(lo.$$.fragment,o),k(Jn.$$.fragment,o),k(Vn.$$.fragment,o),k(Rn.$$.fragment,o),k(Kn.$$.fragment,o),k(po.$$.fragment,o),k(Xn.$$.fragment,o),k(Yn.$$.fragment,o),k(Zn.$$.fragment,o),k(ts.$$.fragment,o),k(fo.$$.fragment,o),k(os.$$.fragment,o),k(ns.$$.fragment,o),k(ss.$$.fragment,o),k(is.$$.fragment,o),k(mo.$$.fragment,o),k(ds.$$.fragment,o),k(ls.$$.fragment,o),k(cs.$$.fragment,o),k(ps.$$.fragment,o),k(gs.$$.fragment,o),k(_o.$$.fragment,o),k(ms.$$.fragment,o),k(us.$$.fragment,o),k(_s.$$.fragment,o),k(ks.$$.fragment,o),k(vo.$$.fragment,o),k(Bs.$$.fragment,o),k(ys.$$.fragment,o),k(ws.$$.fragment,o),k($s.$$.fragment,o),k(Bo.$$.fragment,o),k(Fs.$$.fragment,o),k(zs.$$.fragment,o),k(Ms.$$.fragment,o),k(Is.$$.fragment,o),k(wo.$$.fragment,o),k(Ns.$$.fragment,o),k(Ss.$$.fragment,o),k(Os.$$.fragment,o),k(Rs.$$.fragment,o),k(xo.$$.fragment,o),k(Gs.$$.fragment,o),k(Ks.$$.fragment,o),k(Xs.$$.fragment,o),k(ar.$$.fragment,o),k(Fo.$$.fragment,o),k(ir.$$.fragment,o),k(dr.$$.fragment,o),k(lr.$$.fragment,o),k(_r.$$.fragment,o),k(Mo.$$.fragment,o),k(br.$$.fragment,o),k(vr.$$.fragment,o),k(kr.$$.fragment,o),k(zr.$$.fragment,o),k(qo.$$.fragment,o),k(Mr.$$.fragment,o),k(Er.$$.fragment,o),k(qr.$$.fragment,o),k(Sr.$$.fragment,o),k(Co.$$.fragment,o),k(Or.$$.fragment,o),k(Dr.$$.fragment,o),k(Wr.$$.fragment,o),k(Kr.$$.fragment,o),k(Ao.$$.fragment,o),k(Xr.$$.fragment,o),yp=!0)},o(o){B(u.$$.fragment,o),B(Go.$$.fragment,o),B(on.$$.fragment,o),B(nn.$$.fragment,o),B(rn.$$.fragment,o),B(dn.$$.fragment,o),B(cn.$$.fragment,o),B(hn.$$.fragment,o),B(fn.$$.fragment,o),B(gn.$$.fragment,o),B(bn.$$.fragment,o),B(kn.$$.fragment,o),B(yn.$$.fragment,o),B(wn.$$.fragment,o),B(Tn.$$.fragment,o),B($n.$$.fragment,o),B(zn.$$.fragment,o),B(Mn.$$.fragment,o),B(En.$$.fragment,o),B(Pn.$$.fragment,o),B(Cn.$$.fragment,o),B(An.$$.fragment,o),B(Ln.$$.fragment,o),B(Dn.$$.fragment,o),B(ao.$$.fragment,o),B(Wn.$$.fragment,o),B(Un.$$.fragment,o),B(Hn.$$.fragment,o),B(lo.$$.fragment,o),B(Jn.$$.fragment,o),B(Vn.$$.fragment,o),B(Rn.$$.fragment,o),B(Kn.$$.fragment,o),B(po.$$.fragment,o),B(Xn.$$.fragment,o),B(Yn.$$.fragment,o),B(Zn.$$.fragment,o),B(ts.$$.fragment,o),B(fo.$$.fragment,o),B(os.$$.fragment,o),B(ns.$$.fragment,o),B(ss.$$.fragment,o),B(is.$$.fragment,o),B(mo.$$.fragment,o),B(ds.$$.fragment,o),B(ls.$$.fragment,o),B(cs.$$.fragment,o),B(ps.$$.fragment,o),B(gs.$$.fragment,o),B(_o.$$.fragment,o),B(ms.$$.fragment,o),B(us.$$.fragment,o),B(_s.$$.fragment,o),B(ks.$$.fragment,o),B(vo.$$.fragment,o),B(Bs.$$.fragment,o),B(ys.$$.fragment,o),B(ws.$$.fragment,o),B($s.$$.fragment,o),B(Bo.$$.fragment,o),B(Fs.$$.fragment,o),B(zs.$$.fragment,o),B(Ms.$$.fragment,o),B(Is.$$.fragment,o),B(wo.$$.fragment,o),B(Ns.$$.fragment,o),B(Ss.$$.fragment,o),B(Os.$$.fragment,o),B(Rs.$$.fragment,o),B(xo.$$.fragment,o),B(Gs.$$.fragment,o),B(Ks.$$.fragment,o),B(Xs.$$.fragment,o),B(ar.$$.fragment,o),B(Fo.$$.fragment,o),B(ir.$$.fragment,o),B(dr.$$.fragment,o),B(lr.$$.fragment,o),B(_r.$$.fragment,o),B(Mo.$$.fragment,o),B(br.$$.fragment,o),B(vr.$$.fragment,o),B(kr.$$.fragment,o),B(zr.$$.fragment,o),B(qo.$$.fragment,o),B(Mr.$$.fragment,o),B(Er.$$.fragment,o),B(qr.$$.fragment,o),B(Sr.$$.fragment,o),B(Co.$$.fragment,o),B(Or.$$.fragment,o),B(Dr.$$.fragment,o),B(Wr.$$.fragment,o),B(Kr.$$.fragment,o),B(Ao.$$.fragment,o),B(Xr.$$.fragment,o),yp=!1},d(o){t(p),o&&t(T),o&&t(m),y(u),o&&t(qc),o&&t(et),y(Go),o&&t(Pc),o&&t(Ht),o&&t(Cc),o&&t(ta),o&&t(jc),o&&t(oa),o&&t(Ac),o&&t(na),o&&t(Lc),o&&t(U),o&&t(Ic),o&&t(je),o&&t(Nc),o&&t(tt),y(on),o&&t(Sc),o&&t(E),y(nn),y(rn),y(dn),y(cn),y(hn),o&&t(Oc),o&&t(st),y(fn),o&&t(Dc),o&&t(q),y(gn),y(bn),y(kn),y(yn),o&&t(Wc),o&&t(at),y(wn),o&&t(Uc),o&&t(Q),y(Tn),y($n),y(zn),y(Mn),y(En),o&&t(Qc),o&&t(it),y(Pn),o&&t(Hc),o&&t(dt),y(Cn),o&&t(Jc),o&&t(lt),y(An),o&&t(Vc),o&&t(H),y(Ln),y(Dn),y(ao),y(Wn),o&&t(Rc),o&&t(pt),y(Un),o&&t(Gc),o&&t(Qn),y(Hn),y(lo),y(Jn),o&&t(Kc),o&&t(ft),y(Vn),o&&t(Xc),o&&t(xe),y(Rn),y(Kn),y(po),y(Xn),o&&t(Yc),o&&t(ut),y(Yn),o&&t(Zc),o&&t($e),y(Zn),y(ts),y(fo),y(os),o&&t(ep),o&&t(vt),y(ns),o&&t(tp),o&&t(ve),y(ss),y(is),y(mo),y(ds),y(ls),o&&t(op),o&&t(Bt),y(cs),o&&t(np),o&&t(ke),y(ps),y(gs),y(_o),y(ms),o&&t(sp),o&&t(wt),y(us),o&&t(rp),o&&t(Be),y(_s),y(ks),y(vo),y(Bs),o&&t(ap),o&&t(xt),y(ys),o&&t(ip),o&&t(ye),y(ws),y($s),y(Bo),y(Fs),o&&t(dp),o&&t(zt),y(zs),o&&t(lp),o&&t(P),y(Ms),y(Is),y(wo),y(Ns),o&&t(cp),o&&t(Et),y(Ss),o&&t(pp),o&&t(C),y(Os),y(Rs),y(xo),y(Gs),o&&t(hp),o&&t(Ct),y(Ks),o&&t(fp),o&&t(j),y(Xs),y(ar),y(Fo),y(ir),o&&t(gp),o&&t(At),y(dr),o&&t(mp),o&&t(A),y(lr),y(_r),y(Mo),y(br),o&&t(up),o&&t(It),y(vr),o&&t(_p),o&&t(L),y(kr),y(zr),y(qo),y(Mr),o&&t(bp),o&&t(St),y(Er),o&&t(vp),o&&t(I),y(qr),y(Sr),y(Co),y(Or),o&&t(kp),o&&t(Dt),y(Dr),o&&t(Bp),o&&t(N),y(Wr),y(Kr),y(Ao),y(Xr)}}}const ax={local:"bigbird",sections:[{local:"overview",title:"Overview"},{local:"transformers.BigBirdConfig",title:"BigBirdConfig"},{local:"transformers.BigBirdTokenizer",title:"BigBirdTokenizer"},{local:"transformers.BigBirdTokenizerFast",title:"BigBirdTokenizerFast"},{local:"transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput",title:"BigBird specific outputs"},{local:"transformers.BigBirdModel",title:"BigBirdModel"},{local:"transformers.BigBirdForPreTraining",title:"BigBirdForPreTraining"},{local:"transformers.BigBirdForCausalLM",title:"BigBirdForCausalLM"},{local:"transformers.BigBirdForMaskedLM",title:"BigBirdForMaskedLM"},{local:"transformers.BigBirdForSequenceClassification",title:"BigBirdForSequenceClassification"},{local:"transformers.BigBirdForMultipleChoice",title:"BigBirdForMultipleChoice"},{local:"transformers.BigBirdForTokenClassification",title:"BigBirdForTokenClassification"},{local:"transformers.BigBirdForQuestionAnswering",title:"BigBirdForQuestionAnswering"},{local:"transformers.FlaxBigBirdModel",title:"FlaxBigBirdModel"},{local:"transformers.FlaxBigBirdForPreTraining",title:"FlaxBigBirdForPreTraining"},{local:"transformers.FlaxBigBirdForMaskedLM",title:"FlaxBigBirdForMaskedLM"},{local:"transformers.FlaxBigBirdForSequenceClassification",title:"FlaxBigBirdForSequenceClassification"},{local:"transformers.FlaxBigBirdForMultipleChoice",title:"FlaxBigBirdForMultipleChoice"},{local:"transformers.FlaxBigBirdForTokenClassification",title:"FlaxBigBirdForTokenClassification"},{local:"transformers.FlaxBigBirdForQuestionAnswering",title:"FlaxBigBirdForQuestionAnswering"}],title:"BigBird"};function ix(z,p,T){let{fw:m}=p;return z.$$set=w=>{"fw"in w&&T(0,m=w.fw)},[m]}class gx extends O2{constructor(p){super();D2(this,p,ix,rx,W2,{fw:0})}}export{gx as default,ax as metadata};
9,980
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bertgeneration.mdx-c2ee025a.js
import{S as la,i as ca,s as ha,e as s,k as l,w as v,t as n,L as pa,c as a,d as o,m as c,a as i,x as b,h as r,b as d,J as e,g as p,y as k,q as T,o as w,B as y}from"../../chunks/vendor-b1433968.js";import{T as da}from"../../chunks/Tip-c3840994.js";import{D as gt}from"../../chunks/Docstring-ff504c58.js";import{C as Vt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as _t}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ua(ie){let u,q,f,g,D;return{c(){u=s("p"),q=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),g=n("Module"),D=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){u=a(_,"P",{});var E=i(u);q=r(E,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(E,"CODE",{});var S=i(f);g=r(S,"Module"),S.forEach(o),D=r(E,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),E.forEach(o)},m(_,E){p(_,u,E),e(u,q),e(u,f),e(f,g),e(u,D)},d(_){_&&o(u)}}}function fa(ie){let u,q,f,g,D;return{c(){u=s("p"),q=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),g=n("Module"),D=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){u=a(_,"P",{});var E=i(u);q=r(E,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a(E,"CODE",{});var S=i(f);g=r(S,"Module"),S.forEach(o),D=r(E,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),E.forEach(o)},m(_,E){p(_,u,E),e(u,q),e(u,f),e(f,g),e(u,D)},d(_){_&&o(u)}}}function ma(ie){let u,q,f,g,D,_,E,S,Do,Jt,R,Y,vt,de,jo,bt,So,Kt,A,Ao,Ue,Mo,No,le,Lo,Fo,Qt,He,Oo,Xt,Ve,kt,Io,Yt,Je,Ro,Zt,Ke,ce,Wo,Qe,Uo,Ho,eo,he,to,Xe,pe,Vo,Ye,Jo,Ko,oo,ue,no,Ze,Qo,ro,Z,M,et,Xo,Yo,tt,Zo,en,Tt,tn,on,nn,wt,rn,so,N,sn,fe,an,dn,me,ln,cn,ao,W,ee,yt,ge,hn,Et,pn,io,$,_e,un,ve,fn,Bt,mn,gn,_n,U,vn,ot,bn,kn,nt,Tn,wn,yn,zt,En,Bn,be,lo,H,te,$t,ke,zn,Gt,$n,co,j,Te,Gn,we,qn,ye,xn,Pn,Cn,Ee,Dn,rt,jn,Sn,An,qt,ho,V,oe,xt,Be,Mn,Pt,Nn,po,m,ze,Ln,Ct,Fn,On,$e,In,st,Rn,Wn,Un,Ge,Hn,qe,Vn,Jn,Kn,xe,Qn,Pe,Xn,Yn,Zn,J,er,at,tr,or,Ce,nr,rr,sr,B,ar,Dt,ir,dr,jt,lr,cr,St,hr,pr,At,ur,fr,Mt,mr,gr,Nt,_r,vr,br,x,De,kr,K,Tr,it,wr,yr,Lt,Er,Br,zr,ne,$r,Ft,Gr,qr,je,uo,Q,re,Ot,Se,xr,It,Pr,fo,G,Ae,Cr,Me,Dr,Rt,jr,Sr,Ar,Ne,Mr,dt,Nr,Lr,Fr,Le,Or,Fe,Ir,Rr,Wr,P,Oe,Ur,X,Hr,lt,Vr,Jr,Wt,Kr,Qr,Xr,se,Yr,Ut,Zr,es,Ie,mo;return _=new _t({}),de=new _t({}),he=new Vt({props:{code:`# leverage checkpoints for Bert2Bert model... # use BERT's cls token as BOS token and sep token as EOS token encoder = BertGenerationEncoder.from_pretrained("bert-large-uncased", bos_token_id=101, eos_token_id=102) # add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token decoder = BertGenerationDecoder.from_pretrained("bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102) bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder) # create tokenizer... tokenizer = BertTokenizer.from_pretrained("bert-large-uncased") input_ids = tokenizer('This is a long article to summarize', add_special_tokens=False, return_tensors="pt").input_ids labels = tokenizer('This is a short summary', return_tensors="pt").input_ids # train... loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss loss.backward(),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># leverage checkpoints for Bert2Bert model...</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use BERT&#x27;s cls token as BOS token and sep token as EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder = BertGenerationEncoder.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>, bos_token_id=<span class="hljs-number">101</span>, eos_token_id=<span class="hljs-number">102</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add cross attention layers and use BERT&#x27;s cls token as BOS token and sep token as EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = BertGenerationDecoder.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>, add_cross_attention=<span class="hljs-literal">True</span>, is_decoder=<span class="hljs-literal">True</span>, bos_token_id=<span class="hljs-number">101</span>, eos_token_id=<span class="hljs-number">102</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># create tokenizer...</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&#x27;This is a long article to summarize&#x27;</span>, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&#x27;This is a short summary&#x27;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># train...</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span>loss.backward()`}}),ue=new Vt({props:{code:`# instantiate sentence fusion model sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse") tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse") input_ids = tokenizer('This is the first sentence. This is the second sentence.', add_special_tokens=False, return_tensors="pt").input_ids outputs = sentence_fuser.generate(input_ids) print(tokenizer.decode(outputs[0])),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate sentence fusion model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sentence_fuser = EncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;google/roberta2roberta_L-24_discofuse&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;google/roberta2roberta_L-24_discofuse&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&#x27;This is the first sentence. This is the second sentence.&#x27;</span>, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = sentence_fuser.generate(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>]))`}}),ge=new _t({}),_e=new gt({props:{name:"class transformers.BertGenerationConfig",anchor:"transformers.BertGenerationConfig",parameters:[{name:"vocab_size",val:" = 50358"},{name:"hidden_size",val:" = 1024"},{name:"num_hidden_layers",val:" = 24"},{name:"num_attention_heads",val:" = 16"},{name:"intermediate_size",val:" = 4096"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 2"},{name:"eos_token_id",val:" = 1"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"use_cache",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_generation/configuration_bert_generation.py#L20",parametersDescription:[{anchor:"transformers.BertGenerationConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50358) &#x2014; Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>BertGeneration</code>.`,name:"vocab_size"},{anchor:"transformers.BertGenerationConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.BertGenerationConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.BertGenerationConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.BertGenerationConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often called feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.BertGenerationConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.BertGenerationConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.BertGenerationConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.BertGenerationConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.BertGenerationConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.BertGenerationConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.BertGenerationConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.BertGenerationConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"}]}}),be=new Vt({props:{code:`from transformers import BertGenerationConfig, BertGenerationEncoder # Initializing a BertGeneration config configuration = BertGenerationConfig() # Initializing a model from the config model = BertGenerationEncoder(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertGenerationConfig, BertGenerationEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BertGeneration config</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BertGenerationConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the config</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertGenerationEncoder(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),ke=new _t({}),Te=new gt({props:{name:"class transformers.BertGenerationTokenizer",anchor:"transformers.BertGenerationTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"sep_token",val:" = '<::::>'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_generation/tokenization_bert_generation.py#L41",parametersDescription:[{anchor:"transformers.BertGenerationTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.BertGenerationTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.BertGenerationTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The begin of sequence token.`,name:"bos_token"},{anchor:"transformers.BertGenerationTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BertGenerationTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BertGenerationTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Be=new _t({}),ze=new gt({props:{name:"class transformers.BertGenerationEncoder",anchor:"transformers.BertGenerationEncoder",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_generation/modeling_bert_generation.py#L259",parametersDescription:[{anchor:"transformers.BertGenerationEncoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig">BertGenerationConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),De=new gt({props:{name:"forward",anchor:"transformers.BertGenerationEncoder.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_generation/modeling_bert_generation.py#L301",parametersDescription:[{anchor:"transformers.BertGenerationEncoder.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationTokenizer">BertGenerationTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertGenerationEncoder.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertGenerationEncoder.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertGenerationEncoder.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertGenerationEncoder.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertGenerationEncoder.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertGenerationEncoder.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertGenerationEncoder.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertGenerationEncoder.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BertGenerationEncoder.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>: <code>1</code> for tokens that are NOT MASKED, <code>0</code> for MASKED tokens.`,name:"encoder_attention_mask"},{anchor:"transformers.BertGenerationEncoder.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BertGenerationEncoder.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig" >BertGenerationConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ne=new da({props:{$$slots:{default:[ua]},$$scope:{ctx:ie}}}),je=new Vt({props:{code:`from transformers import BertGenerationTokenizer, BertGenerationEncoder import torch tokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') model = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertGenerationTokenizer, BertGenerationEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertGenerationTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bert_for_seq_generation_L-24_bbc_encoder&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertGenerationEncoder.from_pretrained(<span class="hljs-string">&#x27;google/bert_for_seq_generation_L-24_bbc_encoder&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Se=new _t({}),Ae=new gt({props:{name:"class transformers.BertGenerationDecoder",anchor:"transformers.BertGenerationDecoder",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_generation/modeling_bert_generation.py#L449",parametersDescription:[{anchor:"transformers.BertGenerationDecoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig">BertGenerationConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Oe=new gt({props:{name:"forward",anchor:"transformers.BertGenerationDecoder.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert_generation/modeling_bert_generation.py#L468",parametersDescription:[{anchor:"transformers.BertGenerationDecoder.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationTokenizer">BertGenerationTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BertGenerationDecoder.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BertGenerationDecoder.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.BertGenerationDecoder.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BertGenerationDecoder.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.BertGenerationDecoder.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BertGenerationDecoder.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BertGenerationDecoder.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BertGenerationDecoder.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BertGenerationDecoder.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.BertGenerationDecoder.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.BertGenerationDecoder.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BertGenerationDecoder.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationConfig" >BertGenerationConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),se=new da({props:{$$slots:{default:[fa]},$$scope:{ctx:ie}}}),Ie=new Vt({props:{code:`from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig import torch tokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") config.is_decoder = True model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', config=config) inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertGenerationTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bert_for_seq_generation_L-24_bbc_encoder&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertGenerationConfig.from_pretrained(<span class="hljs-string">&quot;google/bert_for_seq_generation_L-24_bbc_encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertGenerationDecoder.from_pretrained(<span class="hljs-string">&#x27;google/bert_for_seq_generation_L-24_bbc_encoder&#x27;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_token_type_ids=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),{c(){u=s("meta"),q=l(),f=s("h1"),g=s("a"),D=s("span"),v(_.$$.fragment),E=l(),S=s("span"),Do=n("BertGeneration"),Jt=l(),R=s("h2"),Y=s("a"),vt=s("span"),v(de.$$.fragment),jo=l(),bt=s("span"),So=n("Overview"),Kt=l(),A=s("p"),Ao=n(`The BertGeneration model is a BERT model that can be leveraged for sequence-to-sequence tasks using `),Ue=s("a"),Mo=n("EncoderDecoderModel"),No=n(" as proposed in "),le=s("a"),Lo=n(`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Fo=n(" by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),Qt=l(),He=s("p"),Oo=n("The abstract from the paper is the following:"),Xt=l(),Ve=s("p"),kt=s("em"),Io=n(`Unsupervised pretraining of large neural models has recently revolutionized Natural Language Processing. By warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT, GPT-2 and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation, Text Summarization, Sentence Splitting, and Sentence Fusion.`),Yt=l(),Je=s("p"),Ro=n("Usage:"),Zt=l(),Ke=s("ul"),ce=s("li"),Wo=n("The model can be used in combination with the "),Qe=s("a"),Uo=n("EncoderDecoderModel"),Ho=n(` to leverage two pretrained BERT checkpoints for subsequent fine-tuning.`),eo=l(),v(he.$$.fragment),to=l(),Xe=s("ul"),pe=s("li"),Vo=n("Pretrained "),Ye=s("a"),Jo=n("EncoderDecoderModel"),Ko=n(" are also directly available in the model hub, e.g.,"),oo=l(),v(ue.$$.fragment),no=l(),Ze=s("p"),Qo=n("Tips:"),ro=l(),Z=s("ul"),M=s("li"),et=s("a"),Xo=n("BertGenerationEncoder"),Yo=n(" and "),tt=s("a"),Zo=n("BertGenerationDecoder"),en=n(` should be used in combination with `),Tt=s("code"),tn=n("EncoderDecoder"),on=n("."),nn=l(),wt=s("li"),rn=n(`For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input. Therefore, no EOS token should be added to the end of the input.`),so=l(),N=s("p"),sn=n("This model was contributed by "),fe=s("a"),an=n("patrickvonplaten"),dn=n(`. The original code can be found `),me=s("a"),ln=n("here"),cn=n("."),ao=l(),W=s("h2"),ee=s("a"),yt=s("span"),v(ge.$$.fragment),hn=l(),Et=s("span"),pn=n("BertGenerationConfig"),io=l(),$=s("div"),v(_e.$$.fragment),un=l(),ve=s("p"),fn=n(`This is the configuration class to store the configuration of a `),Bt=s("code"),mn=n("BertGenerationPreTrainedModel"),gn=n(`. It is used to instantiate a BertGeneration model according to the specified arguments, defining the model architecture.`),_n=l(),U=s("p"),vn=n("Configuration objects inherit from "),ot=s("a"),bn=n("PretrainedConfig"),kn=n(` and can be used to control the model outputs. Read the documentation from `),nt=s("a"),Tn=n("PretrainedConfig"),wn=n(" for more information."),yn=l(),zt=s("p"),En=n("Examples:"),Bn=l(),v(be.$$.fragment),lo=l(),H=s("h2"),te=s("a"),$t=s("span"),v(ke.$$.fragment),zn=l(),Gt=s("span"),$n=n("BertGenerationTokenizer"),co=l(),j=s("div"),v(Te.$$.fragment),Gn=l(),we=s("p"),qn=n("Construct a BertGeneration tokenizer. Based on "),ye=s("a"),xn=n("SentencePiece"),Pn=n("."),Cn=l(),Ee=s("p"),Dn=n("This tokenizer inherits from "),rt=s("a"),jn=n("PreTrainedTokenizer"),Sn=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),An=l(),qt=s("div"),ho=l(),V=s("h2"),oe=s("a"),xt=s("span"),v(Be.$$.fragment),Mn=l(),Pt=s("span"),Nn=n("BertGenerationEncoder"),po=l(),m=s("div"),v(ze.$$.fragment),Ln=l(),Ct=s("p"),Fn=n("The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top."),On=l(),$e=s("p"),In=n("This model inherits from "),st=s("a"),Rn=n("PreTrainedModel"),Wn=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Un=l(),Ge=s("p"),Hn=n("This model is also a PyTorch "),qe=s("a"),Vn=n("torch.nn.Module"),Jn=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kn=l(),xe=s("p"),Qn=n(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Pe=s("a"),Xn=n(`Attention is all you need`),Yn=n(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Zn=l(),J=s("p"),er=n(`This model should be used when leveraging Bert or Roberta checkpoints for the `),at=s("a"),tr=n("EncoderDecoderModel"),or=n(" class as described in "),Ce=s("a"),nr=n(`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),rr=n(" by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn."),sr=l(),B=s("p"),ar=n("To behave as an decoder the model needs to be initialized with the "),Dt=s("code"),ir=n("is_decoder"),dr=n(` argument of the configuration set to `),jt=s("code"),lr=n("True"),cr=n(". To be used in a Seq2Seq model, the model needs to initialized with both "),St=s("code"),hr=n("is_decoder"),pr=n(` argument and `),At=s("code"),ur=n("add_cross_attention"),fr=n(" set to "),Mt=s("code"),mr=n("True"),gr=n("; an "),Nt=s("code"),_r=n("encoder_hidden_states"),vr=n(` is then expected as an input to the forward pass.`),br=l(),x=s("div"),v(De.$$.fragment),kr=l(),K=s("p"),Tr=n("The "),it=s("a"),wr=n("BertGenerationEncoder"),yr=n(" forward method, overrides the "),Lt=s("code"),Er=n("__call__"),Br=n(" special method."),zr=l(),v(ne.$$.fragment),$r=l(),Ft=s("p"),Gr=n("Example:"),qr=l(),v(je.$$.fragment),uo=l(),Q=s("h2"),re=s("a"),Ot=s("span"),v(Se.$$.fragment),xr=l(),It=s("span"),Pr=n("BertGenerationDecoder"),fo=l(),G=s("div"),v(Ae.$$.fragment),Cr=l(),Me=s("p"),Dr=n("BertGeneration Model with a "),Rt=s("code"),jr=n("language modeling"),Sr=n(" head on top for CLM fine-tuning."),Ar=l(),Ne=s("p"),Mr=n("This model inherits from "),dt=s("a"),Nr=n("PreTrainedModel"),Lr=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fr=l(),Le=s("p"),Or=n("This model is also a PyTorch "),Fe=s("a"),Ir=n("torch.nn.Module"),Rr=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wr=l(),P=s("div"),v(Oe.$$.fragment),Ur=l(),X=s("p"),Hr=n("The "),lt=s("a"),Vr=n("BertGenerationDecoder"),Jr=n(" forward method, overrides the "),Wt=s("code"),Kr=n("__call__"),Qr=n(" special method."),Xr=l(),v(se.$$.fragment),Yr=l(),Ut=s("p"),Zr=n("Example:"),es=l(),v(Ie.$$.fragment),this.h()},l(t){const h=pa('[data-svelte="svelte-1phssyn"]',document.head);u=a(h,"META",{name:!0,content:!0}),h.forEach(o),q=c(t),f=a(t,"H1",{class:!0});var Re=i(f);g=a(Re,"A",{id:!0,class:!0,href:!0});var Ht=i(g);D=a(Ht,"SPAN",{});var ts=i(D);b(_.$$.fragment,ts),ts.forEach(o),Ht.forEach(o),E=c(Re),S=a(Re,"SPAN",{});var os=i(S);Do=r(os,"BertGeneration"),os.forEach(o),Re.forEach(o),Jt=c(t),R=a(t,"H2",{class:!0});var go=i(R);Y=a(go,"A",{id:!0,class:!0,href:!0});var ns=i(Y);vt=a(ns,"SPAN",{});var rs=i(vt);b(de.$$.fragment,rs),rs.forEach(o),ns.forEach(o),jo=c(go),bt=a(go,"SPAN",{});var ss=i(bt);So=r(ss,"Overview"),ss.forEach(o),go.forEach(o),Kt=c(t),A=a(t,"P",{});var ct=i(A);Ao=r(ct,`The BertGeneration model is a BERT model that can be leveraged for sequence-to-sequence tasks using `),Ue=a(ct,"A",{href:!0});var as=i(Ue);Mo=r(as,"EncoderDecoderModel"),as.forEach(o),No=r(ct," as proposed in "),le=a(ct,"A",{href:!0,rel:!0});var is=i(le);Lo=r(is,`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),is.forEach(o),Fo=r(ct," by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),ct.forEach(o),Qt=c(t),He=a(t,"P",{});var ds=i(He);Oo=r(ds,"The abstract from the paper is the following:"),ds.forEach(o),Xt=c(t),Ve=a(t,"P",{});var ls=i(Ve);kt=a(ls,"EM",{});var cs=i(kt);Io=r(cs,`Unsupervised pretraining of large neural models has recently revolutionized Natural Language Processing. By warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT, GPT-2 and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation, Text Summarization, Sentence Splitting, and Sentence Fusion.`),cs.forEach(o),ls.forEach(o),Yt=c(t),Je=a(t,"P",{});var hs=i(Je);Ro=r(hs,"Usage:"),hs.forEach(o),Zt=c(t),Ke=a(t,"UL",{});var ps=i(Ke);ce=a(ps,"LI",{});var _o=i(ce);Wo=r(_o,"The model can be used in combination with the "),Qe=a(_o,"A",{href:!0});var us=i(Qe);Uo=r(us,"EncoderDecoderModel"),us.forEach(o),Ho=r(_o,` to leverage two pretrained BERT checkpoints for subsequent fine-tuning.`),_o.forEach(o),ps.forEach(o),eo=c(t),b(he.$$.fragment,t),to=c(t),Xe=a(t,"UL",{});var fs=i(Xe);pe=a(fs,"LI",{});var vo=i(pe);Vo=r(vo,"Pretrained "),Ye=a(vo,"A",{href:!0});var ms=i(Ye);Jo=r(ms,"EncoderDecoderModel"),ms.forEach(o),Ko=r(vo," are also directly available in the model hub, e.g.,"),vo.forEach(o),fs.forEach(o),oo=c(t),b(ue.$$.fragment,t),no=c(t),Ze=a(t,"P",{});var gs=i(Ze);Qo=r(gs,"Tips:"),gs.forEach(o),ro=c(t),Z=a(t,"UL",{});var bo=i(Z);M=a(bo,"LI",{});var We=i(M);et=a(We,"A",{href:!0});var _s=i(et);Xo=r(_s,"BertGenerationEncoder"),_s.forEach(o),Yo=r(We," and "),tt=a(We,"A",{href:!0});var vs=i(tt);Zo=r(vs,"BertGenerationDecoder"),vs.forEach(o),en=r(We,` should be used in combination with `),Tt=a(We,"CODE",{});var bs=i(Tt);tn=r(bs,"EncoderDecoder"),bs.forEach(o),on=r(We,"."),We.forEach(o),nn=c(bo),wt=a(bo,"LI",{});var ks=i(wt);rn=r(ks,`For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input. Therefore, no EOS token should be added to the end of the input.`),ks.forEach(o),bo.forEach(o),so=c(t),N=a(t,"P",{});var ht=i(N);sn=r(ht,"This model was contributed by "),fe=a(ht,"A",{href:!0,rel:!0});var Ts=i(fe);an=r(Ts,"patrickvonplaten"),Ts.forEach(o),dn=r(ht,`. The original code can be found `),me=a(ht,"A",{href:!0,rel:!0});var ws=i(me);ln=r(ws,"here"),ws.forEach(o),cn=r(ht,"."),ht.forEach(o),ao=c(t),W=a(t,"H2",{class:!0});var ko=i(W);ee=a(ko,"A",{id:!0,class:!0,href:!0});var ys=i(ee);yt=a(ys,"SPAN",{});var Es=i(yt);b(ge.$$.fragment,Es),Es.forEach(o),ys.forEach(o),hn=c(ko),Et=a(ko,"SPAN",{});var Bs=i(Et);pn=r(Bs,"BertGenerationConfig"),Bs.forEach(o),ko.forEach(o),io=c(t),$=a(t,"DIV",{class:!0});var L=i($);b(_e.$$.fragment,L),un=c(L),ve=a(L,"P",{});var To=i(ve);fn=r(To,`This is the configuration class to store the configuration of a `),Bt=a(To,"CODE",{});var zs=i(Bt);mn=r(zs,"BertGenerationPreTrainedModel"),zs.forEach(o),gn=r(To,`. It is used to instantiate a BertGeneration model according to the specified arguments, defining the model architecture.`),To.forEach(o),_n=c(L),U=a(L,"P",{});var pt=i(U);vn=r(pt,"Configuration objects inherit from "),ot=a(pt,"A",{href:!0});var $s=i(ot);bn=r($s,"PretrainedConfig"),$s.forEach(o),kn=r(pt,` and can be used to control the model outputs. Read the documentation from `),nt=a(pt,"A",{href:!0});var Gs=i(nt);Tn=r(Gs,"PretrainedConfig"),Gs.forEach(o),wn=r(pt," for more information."),pt.forEach(o),yn=c(L),zt=a(L,"P",{});var qs=i(zt);En=r(qs,"Examples:"),qs.forEach(o),Bn=c(L),b(be.$$.fragment,L),L.forEach(o),lo=c(t),H=a(t,"H2",{class:!0});var wo=i(H);te=a(wo,"A",{id:!0,class:!0,href:!0});var xs=i(te);$t=a(xs,"SPAN",{});var Ps=i($t);b(ke.$$.fragment,Ps),Ps.forEach(o),xs.forEach(o),zn=c(wo),Gt=a(wo,"SPAN",{});var Cs=i(Gt);$n=r(Cs,"BertGenerationTokenizer"),Cs.forEach(o),wo.forEach(o),co=c(t),j=a(t,"DIV",{class:!0});var ae=i(j);b(Te.$$.fragment,ae),Gn=c(ae),we=a(ae,"P",{});var yo=i(we);qn=r(yo,"Construct a BertGeneration tokenizer. Based on "),ye=a(yo,"A",{href:!0,rel:!0});var Ds=i(ye);xn=r(Ds,"SentencePiece"),Ds.forEach(o),Pn=r(yo,"."),yo.forEach(o),Cn=c(ae),Ee=a(ae,"P",{});var Eo=i(Ee);Dn=r(Eo,"This tokenizer inherits from "),rt=a(Eo,"A",{href:!0});var js=i(rt);jn=r(js,"PreTrainedTokenizer"),js.forEach(o),Sn=r(Eo,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Eo.forEach(o),An=c(ae),qt=a(ae,"DIV",{class:!0}),i(qt).forEach(o),ae.forEach(o),ho=c(t),V=a(t,"H2",{class:!0});var Bo=i(V);oe=a(Bo,"A",{id:!0,class:!0,href:!0});var Ss=i(oe);xt=a(Ss,"SPAN",{});var As=i(xt);b(Be.$$.fragment,As),As.forEach(o),Ss.forEach(o),Mn=c(Bo),Pt=a(Bo,"SPAN",{});var Ms=i(Pt);Nn=r(Ms,"BertGenerationEncoder"),Ms.forEach(o),Bo.forEach(o),po=c(t),m=a(t,"DIV",{class:!0});var z=i(m);b(ze.$$.fragment,z),Ln=c(z),Ct=a(z,"P",{});var Ns=i(Ct);Fn=r(Ns,"The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top."),Ns.forEach(o),On=c(z),$e=a(z,"P",{});var zo=i($e);In=r(zo,"This model inherits from "),st=a(zo,"A",{href:!0});var Ls=i(st);Rn=r(Ls,"PreTrainedModel"),Ls.forEach(o),Wn=r(zo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zo.forEach(o),Un=c(z),Ge=a(z,"P",{});var $o=i(Ge);Hn=r($o,"This model is also a PyTorch "),qe=a($o,"A",{href:!0,rel:!0});var Fs=i(qe);Vn=r(Fs,"torch.nn.Module"),Fs.forEach(o),Jn=r($o,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$o.forEach(o),Kn=c(z),xe=a(z,"P",{});var Go=i(xe);Qn=r(Go,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Pe=a(Go,"A",{href:!0,rel:!0});var Os=i(Pe);Xn=r(Os,`Attention is all you need`),Os.forEach(o),Yn=r(Go,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Go.forEach(o),Zn=c(z),J=a(z,"P",{});var ut=i(J);er=r(ut,`This model should be used when leveraging Bert or Roberta checkpoints for the `),at=a(ut,"A",{href:!0});var Is=i(at);tr=r(Is,"EncoderDecoderModel"),Is.forEach(o),or=r(ut," class as described in "),Ce=a(ut,"A",{href:!0,rel:!0});var Rs=i(Ce);nr=r(Rs,`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Rs.forEach(o),rr=r(ut," by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn."),ut.forEach(o),sr=c(z),B=a(z,"P",{});var C=i(B);ar=r(C,"To behave as an decoder the model needs to be initialized with the "),Dt=a(C,"CODE",{});var Ws=i(Dt);ir=r(Ws,"is_decoder"),Ws.forEach(o),dr=r(C,` argument of the configuration set to `),jt=a(C,"CODE",{});var Us=i(jt);lr=r(Us,"True"),Us.forEach(o),cr=r(C,". To be used in a Seq2Seq model, the model needs to initialized with both "),St=a(C,"CODE",{});var Hs=i(St);hr=r(Hs,"is_decoder"),Hs.forEach(o),pr=r(C,` argument and `),At=a(C,"CODE",{});var Vs=i(At);ur=r(Vs,"add_cross_attention"),Vs.forEach(o),fr=r(C," set to "),Mt=a(C,"CODE",{});var Js=i(Mt);mr=r(Js,"True"),Js.forEach(o),gr=r(C,"; an "),Nt=a(C,"CODE",{});var Ks=i(Nt);_r=r(Ks,"encoder_hidden_states"),Ks.forEach(o),vr=r(C,` is then expected as an input to the forward pass.`),C.forEach(o),br=c(z),x=a(z,"DIV",{class:!0});var F=i(x);b(De.$$.fragment,F),kr=c(F),K=a(F,"P",{});var ft=i(K);Tr=r(ft,"The "),it=a(ft,"A",{href:!0});var Qs=i(it);wr=r(Qs,"BertGenerationEncoder"),Qs.forEach(o),yr=r(ft," forward method, overrides the "),Lt=a(ft,"CODE",{});var Xs=i(Lt);Er=r(Xs,"__call__"),Xs.forEach(o),Br=r(ft," special method."),ft.forEach(o),zr=c(F),b(ne.$$.fragment,F),$r=c(F),Ft=a(F,"P",{});var Ys=i(Ft);Gr=r(Ys,"Example:"),Ys.forEach(o),qr=c(F),b(je.$$.fragment,F),F.forEach(o),z.forEach(o),uo=c(t),Q=a(t,"H2",{class:!0});var qo=i(Q);re=a(qo,"A",{id:!0,class:!0,href:!0});var Zs=i(re);Ot=a(Zs,"SPAN",{});var ea=i(Ot);b(Se.$$.fragment,ea),ea.forEach(o),Zs.forEach(o),xr=c(qo),It=a(qo,"SPAN",{});var ta=i(It);Pr=r(ta,"BertGenerationDecoder"),ta.forEach(o),qo.forEach(o),fo=c(t),G=a(t,"DIV",{class:!0});var O=i(G);b(Ae.$$.fragment,O),Cr=c(O),Me=a(O,"P",{});var xo=i(Me);Dr=r(xo,"BertGeneration Model with a "),Rt=a(xo,"CODE",{});var oa=i(Rt);jr=r(oa,"language modeling"),oa.forEach(o),Sr=r(xo," head on top for CLM fine-tuning."),xo.forEach(o),Ar=c(O),Ne=a(O,"P",{});var Po=i(Ne);Mr=r(Po,"This model inherits from "),dt=a(Po,"A",{href:!0});var na=i(dt);Nr=r(na,"PreTrainedModel"),na.forEach(o),Lr=r(Po,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Po.forEach(o),Fr=c(O),Le=a(O,"P",{});var Co=i(Le);Or=r(Co,"This model is also a PyTorch "),Fe=a(Co,"A",{href:!0,rel:!0});var ra=i(Fe);Ir=r(ra,"torch.nn.Module"),ra.forEach(o),Rr=r(Co,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Co.forEach(o),Wr=c(O),P=a(O,"DIV",{class:!0});var I=i(P);b(Oe.$$.fragment,I),Ur=c(I),X=a(I,"P",{});var mt=i(X);Hr=r(mt,"The "),lt=a(mt,"A",{href:!0});var sa=i(lt);Vr=r(sa,"BertGenerationDecoder"),sa.forEach(o),Jr=r(mt," forward method, overrides the "),Wt=a(mt,"CODE",{});var aa=i(Wt);Kr=r(aa,"__call__"),aa.forEach(o),Qr=r(mt," special method."),mt.forEach(o),Xr=c(I),b(se.$$.fragment,I),Yr=c(I),Ut=a(I,"P",{});var ia=i(Ut);Zr=r(ia,"Example:"),ia.forEach(o),es=c(I),b(Ie.$$.fragment,I),I.forEach(o),O.forEach(o),this.h()},h(){d(u,"name","hf:doc:metadata"),d(u,"content",JSON.stringify(ga)),d(g,"id","bertgeneration"),d(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(g,"href","#bertgeneration"),d(f,"class","relative group"),d(Y,"id","overview"),d(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Y,"href","#overview"),d(R,"class","relative group"),d(Ue,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),d(le,"href","https://arxiv.org/abs/1907.12461"),d(le,"rel","nofollow"),d(Qe,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),d(Ye,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),d(et,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationEncoder"),d(tt,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationDecoder"),d(fe,"href","https://huggingface.co/patrickvonplaten"),d(fe,"rel","nofollow"),d(me,"href","https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder"),d(me,"rel","nofollow"),d(ee,"id","transformers.BertGenerationConfig"),d(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ee,"href","#transformers.BertGenerationConfig"),d(W,"class","relative group"),d(ot,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(nt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d($,"class","docstring"),d(te,"id","transformers.BertGenerationTokenizer"),d(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(te,"href","#transformers.BertGenerationTokenizer"),d(H,"class","relative group"),d(ye,"href","https://github.com/google/sentencepiece"),d(ye,"rel","nofollow"),d(rt,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(qt,"class","docstring"),d(j,"class","docstring"),d(oe,"id","transformers.BertGenerationEncoder"),d(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(oe,"href","#transformers.BertGenerationEncoder"),d(V,"class","relative group"),d(st,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(qe,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(qe,"rel","nofollow"),d(Pe,"href","https://arxiv.org/abs/1706.03762"),d(Pe,"rel","nofollow"),d(at,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel"),d(Ce,"href","https://arxiv.org/abs/1907.12461"),d(Ce,"rel","nofollow"),d(it,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationEncoder"),d(x,"class","docstring"),d(m,"class","docstring"),d(re,"id","transformers.BertGenerationDecoder"),d(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(re,"href","#transformers.BertGenerationDecoder"),d(Q,"class","relative group"),d(dt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(Fe,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Fe,"rel","nofollow"),d(lt,"href","/docs/transformers/v4.15.0/en/model_doc/bertgeneration#transformers.BertGenerationDecoder"),d(P,"class","docstring"),d(G,"class","docstring")},m(t,h){e(document.head,u),p(t,q,h),p(t,f,h),e(f,g),e(g,D),k(_,D,null),e(f,E),e(f,S),e(S,Do),p(t,Jt,h),p(t,R,h),e(R,Y),e(Y,vt),k(de,vt,null),e(R,jo),e(R,bt),e(bt,So),p(t,Kt,h),p(t,A,h),e(A,Ao),e(A,Ue),e(Ue,Mo),e(A,No),e(A,le),e(le,Lo),e(A,Fo),p(t,Qt,h),p(t,He,h),e(He,Oo),p(t,Xt,h),p(t,Ve,h),e(Ve,kt),e(kt,Io),p(t,Yt,h),p(t,Je,h),e(Je,Ro),p(t,Zt,h),p(t,Ke,h),e(Ke,ce),e(ce,Wo),e(ce,Qe),e(Qe,Uo),e(ce,Ho),p(t,eo,h),k(he,t,h),p(t,to,h),p(t,Xe,h),e(Xe,pe),e(pe,Vo),e(pe,Ye),e(Ye,Jo),e(pe,Ko),p(t,oo,h),k(ue,t,h),p(t,no,h),p(t,Ze,h),e(Ze,Qo),p(t,ro,h),p(t,Z,h),e(Z,M),e(M,et),e(et,Xo),e(M,Yo),e(M,tt),e(tt,Zo),e(M,en),e(M,Tt),e(Tt,tn),e(M,on),e(Z,nn),e(Z,wt),e(wt,rn),p(t,so,h),p(t,N,h),e(N,sn),e(N,fe),e(fe,an),e(N,dn),e(N,me),e(me,ln),e(N,cn),p(t,ao,h),p(t,W,h),e(W,ee),e(ee,yt),k(ge,yt,null),e(W,hn),e(W,Et),e(Et,pn),p(t,io,h),p(t,$,h),k(_e,$,null),e($,un),e($,ve),e(ve,fn),e(ve,Bt),e(Bt,mn),e(ve,gn),e($,_n),e($,U),e(U,vn),e(U,ot),e(ot,bn),e(U,kn),e(U,nt),e(nt,Tn),e(U,wn),e($,yn),e($,zt),e(zt,En),e($,Bn),k(be,$,null),p(t,lo,h),p(t,H,h),e(H,te),e(te,$t),k(ke,$t,null),e(H,zn),e(H,Gt),e(Gt,$n),p(t,co,h),p(t,j,h),k(Te,j,null),e(j,Gn),e(j,we),e(we,qn),e(we,ye),e(ye,xn),e(we,Pn),e(j,Cn),e(j,Ee),e(Ee,Dn),e(Ee,rt),e(rt,jn),e(Ee,Sn),e(j,An),e(j,qt),p(t,ho,h),p(t,V,h),e(V,oe),e(oe,xt),k(Be,xt,null),e(V,Mn),e(V,Pt),e(Pt,Nn),p(t,po,h),p(t,m,h),k(ze,m,null),e(m,Ln),e(m,Ct),e(Ct,Fn),e(m,On),e(m,$e),e($e,In),e($e,st),e(st,Rn),e($e,Wn),e(m,Un),e(m,Ge),e(Ge,Hn),e(Ge,qe),e(qe,Vn),e(Ge,Jn),e(m,Kn),e(m,xe),e(xe,Qn),e(xe,Pe),e(Pe,Xn),e(xe,Yn),e(m,Zn),e(m,J),e(J,er),e(J,at),e(at,tr),e(J,or),e(J,Ce),e(Ce,nr),e(J,rr),e(m,sr),e(m,B),e(B,ar),e(B,Dt),e(Dt,ir),e(B,dr),e(B,jt),e(jt,lr),e(B,cr),e(B,St),e(St,hr),e(B,pr),e(B,At),e(At,ur),e(B,fr),e(B,Mt),e(Mt,mr),e(B,gr),e(B,Nt),e(Nt,_r),e(B,vr),e(m,br),e(m,x),k(De,x,null),e(x,kr),e(x,K),e(K,Tr),e(K,it),e(it,wr),e(K,yr),e(K,Lt),e(Lt,Er),e(K,Br),e(x,zr),k(ne,x,null),e(x,$r),e(x,Ft),e(Ft,Gr),e(x,qr),k(je,x,null),p(t,uo,h),p(t,Q,h),e(Q,re),e(re,Ot),k(Se,Ot,null),e(Q,xr),e(Q,It),e(It,Pr),p(t,fo,h),p(t,G,h),k(Ae,G,null),e(G,Cr),e(G,Me),e(Me,Dr),e(Me,Rt),e(Rt,jr),e(Me,Sr),e(G,Ar),e(G,Ne),e(Ne,Mr),e(Ne,dt),e(dt,Nr),e(Ne,Lr),e(G,Fr),e(G,Le),e(Le,Or),e(Le,Fe),e(Fe,Ir),e(Le,Rr),e(G,Wr),e(G,P),k(Oe,P,null),e(P,Ur),e(P,X),e(X,Hr),e(X,lt),e(lt,Vr),e(X,Jr),e(X,Wt),e(Wt,Kr),e(X,Qr),e(P,Xr),k(se,P,null),e(P,Yr),e(P,Ut),e(Ut,Zr),e(P,es),k(Ie,P,null),mo=!0},p(t,[h]){const Re={};h&2&&(Re.$$scope={dirty:h,ctx:t}),ne.$set(Re);const Ht={};h&2&&(Ht.$$scope={dirty:h,ctx:t}),se.$set(Ht)},i(t){mo||(T(_.$$.fragment,t),T(de.$$.fragment,t),T(he.$$.fragment,t),T(ue.$$.fragment,t),T(ge.$$.fragment,t),T(_e.$$.fragment,t),T(be.$$.fragment,t),T(ke.$$.fragment,t),T(Te.$$.fragment,t),T(Be.$$.fragment,t),T(ze.$$.fragment,t),T(De.$$.fragment,t),T(ne.$$.fragment,t),T(je.$$.fragment,t),T(Se.$$.fragment,t),T(Ae.$$.fragment,t),T(Oe.$$.fragment,t),T(se.$$.fragment,t),T(Ie.$$.fragment,t),mo=!0)},o(t){w(_.$$.fragment,t),w(de.$$.fragment,t),w(he.$$.fragment,t),w(ue.$$.fragment,t),w(ge.$$.fragment,t),w(_e.$$.fragment,t),w(be.$$.fragment,t),w(ke.$$.fragment,t),w(Te.$$.fragment,t),w(Be.$$.fragment,t),w(ze.$$.fragment,t),w(De.$$.fragment,t),w(ne.$$.fragment,t),w(je.$$.fragment,t),w(Se.$$.fragment,t),w(Ae.$$.fragment,t),w(Oe.$$.fragment,t),w(se.$$.fragment,t),w(Ie.$$.fragment,t),mo=!1},d(t){o(u),t&&o(q),t&&o(f),y(_),t&&o(Jt),t&&o(R),y(de),t&&o(Kt),t&&o(A),t&&o(Qt),t&&o(He),t&&o(Xt),t&&o(Ve),t&&o(Yt),t&&o(Je),t&&o(Zt),t&&o(Ke),t&&o(eo),y(he,t),t&&o(to),t&&o(Xe),t&&o(oo),y(ue,t),t&&o(no),t&&o(Ze),t&&o(ro),t&&o(Z),t&&o(so),t&&o(N),t&&o(ao),t&&o(W),y(ge),t&&o(io),t&&o($),y(_e),y(be),t&&o(lo),t&&o(H),y(ke),t&&o(co),t&&o(j),y(Te),t&&o(ho),t&&o(V),y(Be),t&&o(po),t&&o(m),y(ze),y(De),y(ne),y(je),t&&o(uo),t&&o(Q),y(Se),t&&o(fo),t&&o(G),y(Ae),y(Oe),y(se),y(Ie)}}}const ga={local:"bertgeneration",sections:[{local:"overview",title:"Overview"},{local:"transformers.BertGenerationConfig",title:"BertGenerationConfig"},{local:"transformers.BertGenerationTokenizer",title:"BertGenerationTokenizer"},{local:"transformers.BertGenerationEncoder",title:"BertGenerationEncoder"},{local:"transformers.BertGenerationDecoder",title:"BertGenerationDecoder"}],title:"BertGeneration"};function _a(ie,u,q){let{fw:f}=u;return ie.$$set=g=>{"fw"in g&&q(0,f=g.fw)},[f]}class Ea extends la{constructor(u){super();ca(this,u,_a,ma,ha,{fw:0})}}export{Ea as default,ga as metadata};
9,981
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/marian.mdx-dd33c746.js
import{S as ef,i as tf,s as nf,e as o,k as d,w as v,t as r,L as of,c as s,d as t,m as l,a,x as k,h as i,b as c,J as e,g as h,y as T,q as M,o as y,B as b}from"../../chunks/vendor-b1433968.js";import{T as cn}from"../../chunks/Tip-c3840994.js";import{D as Z}from"../../chunks/Docstring-ff504c58.js";import{C as $e}from"../../chunks/CodeBlock-a320dbd7.js";import{I as he}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function sf(C){let u,z,f,g,w;return{c(){u=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=r("Module"),w=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var _=a(u);z=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var $=a(f);g=i($,"Module"),$.forEach(t),w=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(x,_){h(x,u,_),e(u,z),e(u,f),e(f,g),e(u,w)},d(x){x&&t(u)}}}function af(C){let u,z,f,g,w;return{c(){u=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=r("Module"),w=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var _=a(u);z=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var $=a(f);g=i($,"Module"),$.forEach(t),w=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(x,_){h(x,u,_),e(u,z),e(u,f),e(f,g),e(u,w)},d(x){x&&t(u)}}}function rf(C){let u,z,f,g,w,x,_,$,Ae,ue,E,ve,X,Se,Q,ee,Oe,je,V,A,Ee,I,P,F,te,ne,ke,De,G,q,Te,N,He,oe,Me,se,ae,re,Ue,S,We,O,D;return{c(){u=o("p"),z=r("TF 2.0 models accepts two formats as inputs:"),f=d(),g=o("ul"),w=o("li"),x=r("having all inputs as keyword arguments (like PyTorch models), or"),_=d(),$=o("li"),Ae=r("having all inputs as a list, tuple or dict in the first positional arguments."),ue=d(),E=o("p"),ve=r("This second option is useful when using "),X=o("code"),Se=r("tf.keras.Model.fit"),Q=r(` method which currently requires having all the tensors in the first argument of the model call function: `),ee=o("code"),Oe=r("model(inputs)"),je=r("."),V=d(),A=o("p"),Ee=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),I=d(),P=o("ul"),F=o("li"),te=r("a single Tensor with "),ne=o("code"),ke=r("input_ids"),De=r(" only and nothing else: "),G=o("code"),q=r("model(input_ids)"),Te=d(),N=o("li"),He=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),oe=o("code"),Me=r("model([input_ids, attention_mask])"),se=r(" or "),ae=o("code"),re=r("model([input_ids, attention_mask, token_type_ids])"),Ue=d(),S=o("li"),We=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),O=o("code"),D=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=s(m,"P",{});var j=a(u);z=i(j,"TF 2.0 models accepts two formats as inputs:"),j.forEach(t),f=l(m),g=s(m,"UL",{});var ye=a(g);w=s(ye,"LI",{});var ot=a(w);x=i(ot,"having all inputs as keyword arguments (like PyTorch models), or"),ot.forEach(t),_=l(ye),$=s(ye,"LI",{});var be=a($);Ae=i(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),ye.forEach(t),ue=l(m),E=s(m,"P",{});var H=a(E);ve=i(H,"This second option is useful when using "),X=s(H,"CODE",{});var st=a(X);Se=i(st,"tf.keras.Model.fit"),st.forEach(t),Q=i(H,` method which currently requires having all the tensors in the first argument of the model call function: `),ee=s(H,"CODE",{});var at=a(ee);Oe=i(at,"model(inputs)"),at.forEach(t),je=i(H,"."),H.forEach(t),V=l(m),A=s(m,"P",{});var Re=a(A);Ee=i(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(t),I=l(m),P=s(m,"UL",{});var L=a(P);F=s(L,"LI",{});var ie=a(F);te=i(ie,"a single Tensor with "),ne=s(ie,"CODE",{});var rt=a(ne);ke=i(rt,"input_ids"),rt.forEach(t),De=i(ie," only and nothing else: "),G=s(ie,"CODE",{});var Be=a(G);q=i(Be,"model(input_ids)"),Be.forEach(t),ie.forEach(t),Te=l(L),N=s(L,"LI",{});var U=a(N);He=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),oe=s(U,"CODE",{});var it=a(oe);Me=i(it,"model([input_ids, attention_mask])"),it.forEach(t),se=i(U," or "),ae=s(U,"CODE",{});var dt=a(ae);re=i(dt,"model([input_ids, attention_mask, token_type_ids])"),dt.forEach(t),U.forEach(t),Ue=l(L),S=s(L,"LI",{});var de=a(S);We=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),O=s(de,"CODE",{});var xe=a(O);D=i(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),de.forEach(t),L.forEach(t)},m(m,j){h(m,u,j),e(u,z),h(m,f,j),h(m,g,j),e(g,w),e(w,x),e(g,_),e(g,$),e($,Ae),h(m,ue,j),h(m,E,j),e(E,ve),e(E,X),e(X,Se),e(E,Q),e(E,ee),e(ee,Oe),e(E,je),h(m,V,j),h(m,A,j),e(A,Ee),h(m,I,j),h(m,P,j),e(P,F),e(F,te),e(F,ne),e(ne,ke),e(F,De),e(F,G),e(G,q),e(P,Te),e(P,N),e(N,He),e(N,oe),e(oe,Me),e(N,se),e(N,ae),e(ae,re),e(P,Ue),e(P,S),e(S,We),e(S,O),e(O,D)},d(m){m&&t(u),m&&t(f),m&&t(g),m&&t(ue),m&&t(E),m&&t(V),m&&t(A),m&&t(I),m&&t(P)}}}function df(C){let u,z,f,g,w;return{c(){u=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=r("Module"),w=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var _=a(u);z=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var $=a(f);g=i($,"Module"),$.forEach(t),w=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(x,_){h(x,u,_),e(u,z),e(u,f),e(f,g),e(u,w)},d(x){x&&t(u)}}}function lf(C){let u,z,f,g,w,x,_,$,Ae,ue,E,ve,X,Se,Q,ee,Oe,je,V,A,Ee,I,P,F,te,ne,ke,De,G,q,Te,N,He,oe,Me,se,ae,re,Ue,S,We,O,D;return{c(){u=o("p"),z=r("TF 2.0 models accepts two formats as inputs:"),f=d(),g=o("ul"),w=o("li"),x=r("having all inputs as keyword arguments (like PyTorch models), or"),_=d(),$=o("li"),Ae=r("having all inputs as a list, tuple or dict in the first positional arguments."),ue=d(),E=o("p"),ve=r("This second option is useful when using "),X=o("code"),Se=r("tf.keras.Model.fit"),Q=r(` method which currently requires having all the tensors in the first argument of the model call function: `),ee=o("code"),Oe=r("model(inputs)"),je=r("."),V=d(),A=o("p"),Ee=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),I=d(),P=o("ul"),F=o("li"),te=r("a single Tensor with "),ne=o("code"),ke=r("input_ids"),De=r(" only and nothing else: "),G=o("code"),q=r("model(input_ids)"),Te=d(),N=o("li"),He=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),oe=o("code"),Me=r("model([input_ids, attention_mask])"),se=r(" or "),ae=o("code"),re=r("model([input_ids, attention_mask, token_type_ids])"),Ue=d(),S=o("li"),We=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),O=o("code"),D=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=s(m,"P",{});var j=a(u);z=i(j,"TF 2.0 models accepts two formats as inputs:"),j.forEach(t),f=l(m),g=s(m,"UL",{});var ye=a(g);w=s(ye,"LI",{});var ot=a(w);x=i(ot,"having all inputs as keyword arguments (like PyTorch models), or"),ot.forEach(t),_=l(ye),$=s(ye,"LI",{});var be=a($);Ae=i(be,"having all inputs as a list, tuple or dict in the first positional arguments."),be.forEach(t),ye.forEach(t),ue=l(m),E=s(m,"P",{});var H=a(E);ve=i(H,"This second option is useful when using "),X=s(H,"CODE",{});var st=a(X);Se=i(st,"tf.keras.Model.fit"),st.forEach(t),Q=i(H,` method which currently requires having all the tensors in the first argument of the model call function: `),ee=s(H,"CODE",{});var at=a(ee);Oe=i(at,"model(inputs)"),at.forEach(t),je=i(H,"."),H.forEach(t),V=l(m),A=s(m,"P",{});var Re=a(A);Ee=i(Re,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Re.forEach(t),I=l(m),P=s(m,"UL",{});var L=a(P);F=s(L,"LI",{});var ie=a(F);te=i(ie,"a single Tensor with "),ne=s(ie,"CODE",{});var rt=a(ne);ke=i(rt,"input_ids"),rt.forEach(t),De=i(ie," only and nothing else: "),G=s(ie,"CODE",{});var Be=a(G);q=i(Be,"model(input_ids)"),Be.forEach(t),ie.forEach(t),Te=l(L),N=s(L,"LI",{});var U=a(N);He=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),oe=s(U,"CODE",{});var it=a(oe);Me=i(it,"model([input_ids, attention_mask])"),it.forEach(t),se=i(U," or "),ae=s(U,"CODE",{});var dt=a(ae);re=i(dt,"model([input_ids, attention_mask, token_type_ids])"),dt.forEach(t),U.forEach(t),Ue=l(L),S=s(L,"LI",{});var de=a(S);We=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),O=s(de,"CODE",{});var xe=a(O);D=i(xe,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),xe.forEach(t),de.forEach(t),L.forEach(t)},m(m,j){h(m,u,j),e(u,z),h(m,f,j),h(m,g,j),e(g,w),e(w,x),e(g,_),e(g,$),e($,Ae),h(m,ue,j),h(m,E,j),e(E,ve),e(E,X),e(X,Se),e(E,Q),e(E,ee),e(ee,Oe),e(E,je),h(m,V,j),h(m,A,j),e(A,Ee),h(m,I,j),h(m,P,j),e(P,F),e(F,te),e(F,ne),e(ne,ke),e(F,De),e(F,G),e(G,q),e(P,Te),e(P,N),e(N,He),e(N,oe),e(oe,Me),e(N,se),e(N,ae),e(ae,re),e(P,Ue),e(P,S),e(S,We),e(S,O),e(O,D)},d(m){m&&t(u),m&&t(f),m&&t(g),m&&t(ue),m&&t(E),m&&t(V),m&&t(A),m&&t(I),m&&t(P)}}}function cf(C){let u,z,f,g,w;return{c(){u=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=r("Module"),w=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var _=a(u);z=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var $=a(f);g=i($,"Module"),$.forEach(t),w=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(x,_){h(x,u,_),e(u,z),e(u,f),e(f,g),e(u,w)},d(x){x&&t(u)}}}function pf(C){let u,z,f,g,w;return{c(){u=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=r("Module"),w=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var _=a(u);z=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var $=a(f);g=i($,"Module"),$.forEach(t),w=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(x,_){h(x,u,_),e(u,z),e(u,f),e(f,g),e(u,w)},d(x){x&&t(u)}}}function hf(C){let u,z,f,g,w;return{c(){u=o("p"),z=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=o("code"),g=r("Module"),w=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(x){u=s(x,"P",{});var _=a(u);z=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(_,"CODE",{});var $=a(f);g=i($,"Module"),$.forEach(t),w=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(x,_){h(x,u,_),e(u,z),e(u,f),e(f,g),e(u,w)},d(x){x&&t(u)}}}function uf(C){let u,z,f,g,w,x,_,$,Ae,ue,E,ve,X,Se,Q,ee,Oe,je,V,A,Ee,I,P,F,te,ne,ke,De,G,q,Te,N,He,oe,Me,se,ae,re,Ue,S,We,O,D,m,j,ye,ot,be,H,st,at,Re,L,ie,rt,Be,U,it,dt,de,xe,bi,Zo,xi,wi,zi,lt,pn,$i,Ps,ji,Ei,Pi,hn,qi,qs,Fi,Ni,Ci,ct,Ii,Fs,Li,Ai,Ns,Si,Oi,Di,Cs,un,Hi,Is,Ui,Wi,Ri,Ls,mn,Bi,fn,Vi,Gi,Qa,pt,Nt,As,gn,Ji,Ss,Yi,er,Pe,Xo,Ki,Os,Zi,Xi,_n,Qi,vn,ed,td,nd,ht,od,Ds,sd,ad,Hs,rd,id,dd,Us,ld,tr,ut,Ct,Ws,kn,cd,Rs,pd,nr,Je,Bs,hd,ud,Vs,Tn,md,fd,Gs,Mn,gd,or,mt,It,Js,yn,_d,Ys,vd,sr,qe,bn,kd,Ks,Td,Md,yd,xn,bd,Zs,xd,wd,zd,wn,$d,zn,jd,Ed,Pd,$n,qd,Xs,Fd,Nd,ar,Lt,Cd,jn,Id,Ld,rr,En,ir,Qo,Ad,dr,Pn,lr,ft,At,Qs,qn,Sd,ea,Od,cr,es,Dd,pr,Fn,hr,ts,Hd,ur,Nn,mr,gt,St,ta,Cn,Ud,na,Wd,fr,le,In,Rd,_t,Bd,ns,Vd,Gd,Ln,Jd,Yd,Kd,vt,Zd,os,Xd,Qd,ss,el,tl,nl,oa,ol,sl,An,gr,kt,Ot,sa,Sn,al,aa,rl,_r,W,On,il,Dn,dl,Hn,ll,cl,pl,Un,hl,as,ul,ml,fl,ra,gl,_l,Wn,vl,Dt,Rn,kl,ia,Tl,vr,Tt,Ht,da,Bn,Ml,la,yl,kr,we,Vn,bl,Gn,xl,rs,wl,zl,$l,Jn,jl,Yn,El,Pl,ql,me,Kn,Fl,Mt,Nl,is,Cl,Il,ca,Ll,Al,Sl,Ut,Ol,pa,Dl,Hl,Zn,Tr,yt,Wt,ha,Xn,Ul,ua,Wl,Mr,ze,Qn,Rl,eo,Bl,ds,Vl,Gl,Jl,to,Yl,no,Kl,Zl,Xl,J,oo,Ql,bt,ec,ls,tc,nc,ma,oc,sc,ac,Rt,rc,so,ic,ao,dc,lc,cc,fa,pc,hc,ro,yr,xt,Bt,ga,io,uc,_a,mc,br,lo,Ye,co,fc,va,gc,_c,po,xr,wt,Vt,ka,ho,vc,Ta,kc,wr,ce,uo,Tc,mo,Mc,cs,yc,bc,xc,fo,wc,go,zc,$c,jc,Gt,Ec,fe,_o,Pc,zt,qc,ps,Fc,Nc,Ma,Cc,Ic,Lc,Jt,Ac,ya,Sc,Oc,vo,zr,$t,Yt,ba,ko,Dc,xa,Hc,$r,pe,To,Uc,Mo,Wc,hs,Rc,Bc,Vc,yo,Gc,bo,Jc,Yc,Kc,Kt,Zc,Y,xo,Xc,jt,Qc,us,ep,tp,wa,np,op,sp,Zt,ap,wo,rp,zo,ip,dp,lp,za,cp,pp,$o,jr,Et,Xt,$a,jo,hp,ja,up,Er,R,Eo,mp,Po,fp,ms,gp,_p,vp,qo,kp,Fo,Tp,Mp,yp,Ea,bp,xp,Ve,Pa,No,wp,zp,qa,Co,$p,jp,Fa,Io,Ep,Pp,Na,Lo,qp,Fp,ge,Ao,Np,Pt,Cp,Ca,Ip,Lp,Ia,Ap,Sp,Op,Qt,Dp,La,Hp,Up,So,Pr,qt,en,Aa,Oo,Wp,Sa,Rp,qr,B,Do,Bp,Ho,Vp,fs,Gp,Jp,Yp,Uo,Kp,Wo,Zp,Xp,Qp,Oa,eh,th,Ge,Da,Ro,nh,oh,Ha,Bo,sh,ah,Ua,Vo,rh,ih,Wa,Go,dh,lh,_e,Jo,ch,Ft,ph,Ra,hh,uh,Ba,mh,fh,gh,tn,_h,Va,vh,kh,Yo,Fr;return x=new he({}),te=new he({}),gn=new he({}),kn=new he({}),yn=new he({}),En=new $e({props:{code:`from transformers import MarianMTModel, MarianTokenizer src_text = [ '>>fra<< this is a sentence in english that we want to translate to french', '>>por<< This should go to portuguese', '>>esp<< And this to Spanish' ] model_name = 'Helsinki-NLP/opus-mt-en-roa' tokenizer = MarianTokenizer.from_pretrained(model_name) print(tokenizer.supported_language_codes) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) [tokenizer.decode(t, skip_special_tokens=True) for t in translated],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianMTModel, MarianTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = [ <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;&gt;&gt;fra&lt;&lt; this is a sentence in english that we want to translate to french&#x27;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;&gt;&gt;por&lt;&lt; This should go to portuguese&#x27;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;&gt;&gt;esp&lt;&lt; And this to Spanish&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-roa&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.supported_language_codes) [<span class="hljs-string">&#x27;&gt;&gt;zlm_Latn&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;mfe&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;hat&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;pap&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ast&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;cat&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ind&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;glg&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;wln&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;spa&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;fra&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ron&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;por&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ita&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;oci&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;arg&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;min&lt;&lt;&#x27;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>translated = model.generate(**tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>[tokenizer.decode(t, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> t <span class="hljs-keyword">in</span> translated] [<span class="hljs-string">&quot;c&#x27;est une phrase en anglais que nous voulons traduire en fran\xE7ais&quot;</span>, <span class="hljs-string">&#x27;Isto deve ir para o portugu\xEAs.&#x27;</span>, <span class="hljs-string">&#x27;Y esto al espa\xF1ol&#x27;</span>]`}}),Pn=new $e({props:{code:`from huggingface_hub import list_models model_list = list_models() org = "Helsinki-NLP" model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] suffix = [x.split('/')[1] for x in model_ids] old_style_multi_models = [f'{org}/{s}' for s in suffix if s != s.lower()],`,highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> list_models model_list = list_models() org = <span class="hljs-string">&quot;Helsinki-NLP&quot;</span> model_ids = [x.modelId <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> model_list <span class="hljs-keyword">if</span> x.modelId.startswith(org)] suffix = [x.split(<span class="hljs-string">&#x27;/&#x27;</span>)[<span class="hljs-number">1</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> model_ids] old_style_multi_models = [<span class="hljs-string">f&#x27;<span class="hljs-subst">{org}</span>/<span class="hljs-subst">{s}</span>&#x27;</span> <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> suffix <span class="hljs-keyword">if</span> s != s.lower()]`}}),qn=new he({}),Fn=new $e({props:{code:`['Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU', 'Helsinki-NLP/opus-mt-ROMANCE-en', 'Helsinki-NLP/opus-mt-SCANDINAVIA-SCANDINAVIA', 'Helsinki-NLP/opus-mt-de-ZH', 'Helsinki-NLP/opus-mt-en-CELTIC', 'Helsinki-NLP/opus-mt-en-ROMANCE', 'Helsinki-NLP/opus-mt-es-NORWAY', 'Helsinki-NLP/opus-mt-fi-NORWAY', 'Helsinki-NLP/opus-mt-fi-ZH', 'Helsinki-NLP/opus-mt-fi_nb_no_nn_ru_sv_en-SAMI', 'Helsinki-NLP/opus-mt-sv-NORWAY', 'Helsinki-NLP/opus-mt-sv-ZH'] GROUP_MEMBERS = { 'ZH': ['cmn', 'cn', 'yue', 'ze_zh', 'zh_cn', 'zh_CN', 'zh_HK', 'zh_tw', 'zh_TW', 'zh_yue', 'zhs', 'zht', 'zh'], 'ROMANCE': ['fr', 'fr_BE', 'fr_CA', 'fr_FR', 'wa', 'frp', 'oc', 'ca', 'rm', 'lld', 'fur', 'lij', 'lmo', 'es', 'es_AR', 'es_CL', 'es_CO', 'es_CR', 'es_DO', 'es_EC', 'es_ES', 'es_GT', 'es_HN', 'es_MX', 'es_NI', 'es_PA', 'es_PE', 'es_PR', 'es_SV', 'es_UY', 'es_VE', 'pt', 'pt_br', 'pt_BR', 'pt_PT', 'gl', 'lad', 'an', 'mwl', 'it', 'it_IT', 'co', 'nap', 'scn', 'vec', 'sc', 'ro', 'la'], 'NORTH_EU': ['de', 'nl', 'fy', 'af', 'da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'], 'SCANDINAVIA': ['da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'], 'SAMI': ['se', 'sma', 'smj', 'smn', 'sms'], 'NORWAY': ['nb_NO', 'nb', 'nn_NO', 'nn', 'nog', 'no_nb', 'no'], 'CELTIC': ['ga', 'cy', 'br', 'gd', 'kw', 'gv'] },`,highlighted:`[<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-ROMANCE-en&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-SCANDINAVIA-SCANDINAVIA&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-de-ZH&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-CELTIC&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-ROMANCE&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-es-NORWAY&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-fi-NORWAY&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-fi-ZH&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-fi_nb_no_nn_ru_sv_en-SAMI&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-sv-NORWAY&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-sv-ZH&#x27;</span>] GROUP_MEMBERS = { <span class="hljs-string">&#x27;ZH&#x27;</span>: [<span class="hljs-string">&#x27;cmn&#x27;</span>, <span class="hljs-string">&#x27;cn&#x27;</span>, <span class="hljs-string">&#x27;yue&#x27;</span>, <span class="hljs-string">&#x27;ze_zh&#x27;</span>, <span class="hljs-string">&#x27;zh_cn&#x27;</span>, <span class="hljs-string">&#x27;zh_CN&#x27;</span>, <span class="hljs-string">&#x27;zh_HK&#x27;</span>, <span class="hljs-string">&#x27;zh_tw&#x27;</span>, <span class="hljs-string">&#x27;zh_TW&#x27;</span>, <span class="hljs-string">&#x27;zh_yue&#x27;</span>, <span class="hljs-string">&#x27;zhs&#x27;</span>, <span class="hljs-string">&#x27;zht&#x27;</span>, <span class="hljs-string">&#x27;zh&#x27;</span>], <span class="hljs-string">&#x27;ROMANCE&#x27;</span>: [<span class="hljs-string">&#x27;fr&#x27;</span>, <span class="hljs-string">&#x27;fr_BE&#x27;</span>, <span class="hljs-string">&#x27;fr_CA&#x27;</span>, <span class="hljs-string">&#x27;fr_FR&#x27;</span>, <span class="hljs-string">&#x27;wa&#x27;</span>, <span class="hljs-string">&#x27;frp&#x27;</span>, <span class="hljs-string">&#x27;oc&#x27;</span>, <span class="hljs-string">&#x27;ca&#x27;</span>, <span class="hljs-string">&#x27;rm&#x27;</span>, <span class="hljs-string">&#x27;lld&#x27;</span>, <span class="hljs-string">&#x27;fur&#x27;</span>, <span class="hljs-string">&#x27;lij&#x27;</span>, <span class="hljs-string">&#x27;lmo&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;es_AR&#x27;</span>, <span class="hljs-string">&#x27;es_CL&#x27;</span>, <span class="hljs-string">&#x27;es_CO&#x27;</span>, <span class="hljs-string">&#x27;es_CR&#x27;</span>, <span class="hljs-string">&#x27;es_DO&#x27;</span>, <span class="hljs-string">&#x27;es_EC&#x27;</span>, <span class="hljs-string">&#x27;es_ES&#x27;</span>, <span class="hljs-string">&#x27;es_GT&#x27;</span>, <span class="hljs-string">&#x27;es_HN&#x27;</span>, <span class="hljs-string">&#x27;es_MX&#x27;</span>, <span class="hljs-string">&#x27;es_NI&#x27;</span>, <span class="hljs-string">&#x27;es_PA&#x27;</span>, <span class="hljs-string">&#x27;es_PE&#x27;</span>, <span class="hljs-string">&#x27;es_PR&#x27;</span>, <span class="hljs-string">&#x27;es_SV&#x27;</span>, <span class="hljs-string">&#x27;es_UY&#x27;</span>, <span class="hljs-string">&#x27;es_VE&#x27;</span>, <span class="hljs-string">&#x27;pt&#x27;</span>, <span class="hljs-string">&#x27;pt_br&#x27;</span>, <span class="hljs-string">&#x27;pt_BR&#x27;</span>, <span class="hljs-string">&#x27;pt_PT&#x27;</span>, <span class="hljs-string">&#x27;gl&#x27;</span>, <span class="hljs-string">&#x27;lad&#x27;</span>, <span class="hljs-string">&#x27;an&#x27;</span>, <span class="hljs-string">&#x27;mwl&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&#x27;it_IT&#x27;</span>, <span class="hljs-string">&#x27;co&#x27;</span>, <span class="hljs-string">&#x27;nap&#x27;</span>, <span class="hljs-string">&#x27;scn&#x27;</span>, <span class="hljs-string">&#x27;vec&#x27;</span>, <span class="hljs-string">&#x27;sc&#x27;</span>, <span class="hljs-string">&#x27;ro&#x27;</span>, <span class="hljs-string">&#x27;la&#x27;</span>], <span class="hljs-string">&#x27;NORTH_EU&#x27;</span>: [<span class="hljs-string">&#x27;de&#x27;</span>, <span class="hljs-string">&#x27;nl&#x27;</span>, <span class="hljs-string">&#x27;fy&#x27;</span>, <span class="hljs-string">&#x27;af&#x27;</span>, <span class="hljs-string">&#x27;da&#x27;</span>, <span class="hljs-string">&#x27;fo&#x27;</span>, <span class="hljs-string">&#x27;is&#x27;</span>, <span class="hljs-string">&#x27;no&#x27;</span>, <span class="hljs-string">&#x27;nb&#x27;</span>, <span class="hljs-string">&#x27;nn&#x27;</span>, <span class="hljs-string">&#x27;sv&#x27;</span>], <span class="hljs-string">&#x27;SCANDINAVIA&#x27;</span>: [<span class="hljs-string">&#x27;da&#x27;</span>, <span class="hljs-string">&#x27;fo&#x27;</span>, <span class="hljs-string">&#x27;is&#x27;</span>, <span class="hljs-string">&#x27;no&#x27;</span>, <span class="hljs-string">&#x27;nb&#x27;</span>, <span class="hljs-string">&#x27;nn&#x27;</span>, <span class="hljs-string">&#x27;sv&#x27;</span>], <span class="hljs-string">&#x27;SAMI&#x27;</span>: [<span class="hljs-string">&#x27;se&#x27;</span>, <span class="hljs-string">&#x27;sma&#x27;</span>, <span class="hljs-string">&#x27;smj&#x27;</span>, <span class="hljs-string">&#x27;smn&#x27;</span>, <span class="hljs-string">&#x27;sms&#x27;</span>], <span class="hljs-string">&#x27;NORWAY&#x27;</span>: [<span class="hljs-string">&#x27;nb_NO&#x27;</span>, <span class="hljs-string">&#x27;nb&#x27;</span>, <span class="hljs-string">&#x27;nn_NO&#x27;</span>, <span class="hljs-string">&#x27;nn&#x27;</span>, <span class="hljs-string">&#x27;nog&#x27;</span>, <span class="hljs-string">&#x27;no_nb&#x27;</span>, <span class="hljs-string">&#x27;no&#x27;</span>], <span class="hljs-string">&#x27;CELTIC&#x27;</span>: [<span class="hljs-string">&#x27;ga&#x27;</span>, <span class="hljs-string">&#x27;cy&#x27;</span>, <span class="hljs-string">&#x27;br&#x27;</span>, <span class="hljs-string">&#x27;gd&#x27;</span>, <span class="hljs-string">&#x27;kw&#x27;</span>, <span class="hljs-string">&#x27;gv&#x27;</span>] }`}}),Nn=new $e({props:{code:`from transformers import MarianMTModel, MarianTokenizer src_text = [ '>>fr<< this is a sentence in english that we want to translate to french', '>>pt<< This should go to portuguese', '>>es<< And this to Spanish' ] model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE' tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated],`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianMTModel, MarianTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = [ <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;&gt;&gt;fr&lt;&lt; this is a sentence in english that we want to translate to french&#x27;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;&gt;&gt;pt&lt;&lt; This should go to portuguese&#x27;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&#x27;&gt;&gt;es&lt;&lt; And this to Spanish&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-ROMANCE&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>translated = model.generate(**tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = [tokenizer.decode(t, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> t <span class="hljs-keyword">in</span> translated] [<span class="hljs-string">&quot;c&#x27;est une phrase en anglais que nous voulons traduire en fran\xE7ais&quot;</span>, <span class="hljs-string">&#x27;Isto deve ir para o portugu\xEAs.&#x27;</span>, <span class="hljs-string">&#x27;Y esto al espa\xF1ol&#x27;</span>]`}}),Cn=new he({}),In=new Z({props:{name:"class transformers.MarianConfig",anchor:"transformers.MarianConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 58100"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 58100"},{name:"eos_token_id",val:" = 0"},{name:"forced_eos_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/configuration_marian.py#L29",parametersDescription:[{anchor:"transformers.MarianConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianModel">MarianModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianModel">TFMarianModel</a>.`,name:"vocab_size"},{anchor:"transformers.MarianConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.MarianConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.MarianConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.MarianConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.MarianConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.MarianConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.MarianConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.MarianConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.MarianConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.MarianConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.MarianConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.MarianConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.MarianConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.MarianConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.MarianConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.MarianConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.MarianConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),An=new $e({props:{code:`from transformers import MarianModel, MarianConfig # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration configuration = MarianConfig() # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration model = MarianModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianModel, MarianConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MarianConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Sn=new he({}),On=new Z({props:{name:"class transformers.MarianTokenizer",anchor:"transformers.MarianTokenizer",parameters:[{name:"vocab",val:""},{name:"source_spm",val:""},{name:"target_spm",val:""},{name:"source_lang",val:" = None"},{name:"target_lang",val:" = None"},{name:"unk_token",val:" = '<unk>'"},{name:"eos_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"model_max_length",val:" = 512"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/tokenization_marian.py#L56",parametersDescription:[{anchor:"transformers.MarianTokenizer.source_spm",description:`<strong>source_spm</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary for the source language.`,name:"source_spm"},{anchor:"transformers.MarianTokenizer.target_spm",description:`<strong>target_spm</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary for the target language.`,name:"target_spm"},{anchor:"transformers.MarianTokenizer.source_lang",description:`<strong>source_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.`,name:"source_lang"},{anchor:"transformers.MarianTokenizer.target_lang",description:`<strong>target_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.`,name:"target_lang"},{anchor:"transformers.MarianTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MarianTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.MarianTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MarianTokenizer.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sentence length the model accepts.`,name:"model_max_length"},{anchor:"transformers.MarianTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;eop&gt;&quot;, &quot;&lt;eod&gt;&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.MarianTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),Wn=new $e({props:{code:`from transformers import MarianTokenizer tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de') src_texts = [ "I am a small frog.", "Tom asked his teacher for advice."] tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional inputs = tokenizer(src_texts, return_tensors="pt", padding=True) with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_texts, return_tensors="pt", padding=True) inputs["labels"] = labels["input_ids"] outputs = model(**inputs) should work,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>src_texts = [ <span class="hljs-string">&quot;I am a small frog.&quot;</span>, <span class="hljs-string">&quot;Tom asked his teacher for advice.&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_texts = [<span class="hljs-string">&quot;Ich bin ein kleiner Frosch.&quot;</span>, <span class="hljs-string">&quot;Tom bat seinen Lehrer um Rat.&quot;</span>] <span class="hljs-comment"># optional</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(src_texts, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(tgt_texts, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-comment"># keys [input_ids, attention_mask, labels].</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) should work`}}),Rn=new Z({props:{name:"as_target_tokenizer",anchor:"transformers.MarianTokenizer.as_target_tokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/tokenization_marian.py#L261"}}),Bn=new he({}),Vn=new Z({props:{name:"class transformers.MarianModel",anchor:"transformers.MarianModel",parameters:[{name:"config",val:": MarianConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_marian.py#L1085",parametersDescription:[{anchor:"transformers.MarianModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kn=new Z({props:{name:"forward",anchor:"transformers.MarianModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_marian.py#L1112",parametersDescription:[{anchor:"transformers.MarianModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MarianModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MarianModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.MarianModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MarianModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MarianModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MarianModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MarianModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MarianModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MarianModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MarianModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MarianModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MarianModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MarianModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MarianModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ut=new cn({props:{$$slots:{default:[sf]},$$scope:{ctx:C}}}),Zn=new $e({props:{code:`from transformers import MarianTokenizer, MarianModel tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de') model = MarianModel.from_pretrained('Helsinki-NLP/opus-mt-en-de') input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 decoder_input_ids = tokenizer("<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen", return_tensors="pt", add_special_tokens=False).input_ids # Batch size 1 outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, MarianModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianModel.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;&lt;pad&gt; Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen&quot;</span>, <span class="hljs-meta">... </span>return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Xn=new he({}),Qn=new Z({props:{name:"class transformers.MarianMTModel",anchor:"transformers.MarianMTModel",parameters:[{name:"config",val:": MarianConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_marian.py#L1209",parametersDescription:[{anchor:"transformers.MarianMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),oo=new Z({props:{name:"forward",anchor:"transformers.MarianMTModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_marian.py#L1259",parametersDescription:[{anchor:"transformers.MarianMTModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MarianMTModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MarianMTModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.MarianMTModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MarianMTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MarianMTModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MarianMTModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MarianMTModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MarianMTModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MarianMTModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MarianMTModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MarianMTModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MarianMTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MarianMTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MarianMTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MarianMTModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Rt=new cn({props:{$$slots:{default:[af]},$$scope:{ctx:C}}}),ro=new $e({props:{code:`from transformers import MarianTokenizer, MarianMTModel from typing import List src = 'fr' # source language trg = 'en' # target language sample_text = "o\xF9 est l'arr\xEAt de bus ?" model_name = f'Helsinki-NLP/opus-mt-{src}-{trg}' model = MarianMTModel.from_pretrained(model_name) tokenizer = MarianTokenizer.from_pretrained(model_name) batch = tokenizer([sample_text], return_tensors="pt") gen = model.generate(**batch) tokenizer.batch_decode(gen, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, MarianMTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">List</span> <span class="hljs-meta">&gt;&gt;&gt; </span>src = <span class="hljs-string">&#x27;fr&#x27;</span> <span class="hljs-comment"># source language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>trg = <span class="hljs-string">&#x27;en&#x27;</span> <span class="hljs-comment"># target language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sample_text = <span class="hljs-string">&quot;o\xF9 est l&#x27;arr\xEAt de bus ?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">f&#x27;Helsinki-NLP/opus-mt-<span class="hljs-subst">{src}</span>-<span class="hljs-subst">{trg}</span>&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer([sample_text], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>gen = model.generate(**batch) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(gen, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Where is the bus stop ?&quot;</span>`}}),io=new he({}),co=new Z({props:{name:"forward",anchor:"transformers.MarianForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_marian.py#L1430",parametersDescription:[{anchor:"transformers.MarianForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MarianForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MarianForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.MarianForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.MarianForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MarianForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MarianForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MarianForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.MarianForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.MarianForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MarianForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MarianForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),po=new $e({props:{code:`from transformers import MarianTokenizer, MarianForCausalLM tokenizer = MarianTokenizer.from_pretrained('facebook/bart-large') model = MarianForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, MarianForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianForCausalLM.from_pretrained(<span class="hljs-string">&#x27;facebook/bart-large&#x27;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ho=new he({}),uo=new Z({props:{name:"class transformers.TFMarianModel",anchor:"transformers.TFMarianModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_tf_marian.py#L1213",parametersDescription:[{anchor:"transformers.TFMarianModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gt=new cn({props:{$$slots:{default:[rf]},$$scope:{ctx:C}}}),_o=new Z({props:{name:"call",anchor:"transformers.TFMarianModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_tf_marian.py#L1225",parametersDescription:[{anchor:"transformers.TFMarianModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMarianModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMarianModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFMarianModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFMarianModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMarianModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFMarianModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFMarianModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFMarianModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFMarianModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFMarianModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMarianModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMarianModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMarianModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Jt=new cn({props:{$$slots:{default:[df]},$$scope:{ctx:C}}}),vo=new $e({props:{code:`from transformers import MarianTokenizer, TFMarianModel import tensorflow as tf tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de') model = TFMarianModel.from_pretrained('Helsinki-NLP/opus-mt-en-de') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, TFMarianModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMarianModel.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ko=new he({}),To=new Z({props:{name:"class transformers.TFMarianMTModel",anchor:"transformers.TFMarianMTModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_tf_marian.py#L1320",parametersDescription:[{anchor:"transformers.TFMarianMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new cn({props:{$$slots:{default:[lf]},$$scope:{ctx:C}}}),xo=new Z({props:{name:"call",anchor:"transformers.TFMarianMTModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_tf_marian.py#L1353",parametersDescription:[{anchor:"transformers.TFMarianMTModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMarianMTModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMarianMTModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFMarianMTModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFMarianMTModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMarianMTModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFMarianMTModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFMarianMTModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFMarianMTModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFMarianMTModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFMarianMTModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMarianMTModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMarianMTModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMarianMTModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMarianMTModel.call.labels",description:`<strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Zt=new cn({props:{$$slots:{default:[cf]},$$scope:{ctx:C}}}),$o=new $e({props:{code:`from transformers import MarianTokenizer, TFMarianMTModel from typing import List src = 'fr' # source language trg = 'en' # target language sample_text = "o\xF9 est l'arr\xEAt de bus ?" model_name = f'Helsinki-NLP/opus-mt-{src}-{trg}' model = TFMarianMTModel.from_pretrained(model_name) tokenizer = MarianTokenizer.from_pretrained(model_name) batch = tokenizer([sample_text], return_tensors="tf") gen = model.generate(**batch) tokenizer.batch_decode(gen, skip_special_tokens=True),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, TFMarianMTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">List</span> <span class="hljs-meta">&gt;&gt;&gt; </span>src = <span class="hljs-string">&#x27;fr&#x27;</span> <span class="hljs-comment"># source language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>trg = <span class="hljs-string">&#x27;en&#x27;</span> <span class="hljs-comment"># target language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sample_text = <span class="hljs-string">&quot;o\xF9 est l&#x27;arr\xEAt de bus ?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">f&#x27;Helsinki-NLP/opus-mt-<span class="hljs-subst">{src}</span>-<span class="hljs-subst">{trg}</span>&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer([sample_text], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>gen = model.generate(**batch) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(gen, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Where is the bus stop ?&quot;</span>`}}),jo=new he({}),Eo=new Z({props:{name:"class transformers.FlaxMarianModel",anchor:"transformers.FlaxMarianModel",parameters:[{name:"config",val:": MarianConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_flax_marian.py#L1209",parametersDescription:[{anchor:"transformers.FlaxMarianModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMarianModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Ao=new Z({props:{name:"__call__",anchor:"transformers.FlaxMarianPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_flax_marian.py#L1144",parametersDescription:[{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qt=new cn({props:{$$slots:{default:[pf]},$$scope:{ctx:C}}}),So=new $e({props:{code:`from transformers import MarianTokenizer, FlaxMarianModel tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de') model = FlaxMarianModel.from_pretrained('Helsinki-NLP/opus-mt-en-de') inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, FlaxMarianModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMarianModel.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Oo=new he({}),Do=new Z({props:{name:"class transformers.FlaxMarianMTModel",anchor:"transformers.FlaxMarianMTModel",parameters:[{name:"config",val:": MarianConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_flax_marian.py#L1295",parametersDescription:[{anchor:"transformers.FlaxMarianMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMarianMTModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Jo=new Z({props:{name:"__call__",anchor:"transformers.FlaxMarianPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/marian/modeling_flax_marian.py#L1144",parametersDescription:[{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMarianPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tn=new cn({props:{$$slots:{default:[hf]},$$scope:{ctx:C}}}),Yo=new $e({props:{code:`from transformers import MarianTokenizer, FlaxMarianMTModel model = FlaxMarianMTModel.from_pretrained('Helsinki-NLP/opus-mt-en-de') tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de') text = "My friends are cool but they eat too many carbs." input_ids = tokenizer(text, max_length=64, return_tensors='jax').input_ids sequences = model.generate(input_ids, max_length=64, num_beams=2).sequences outputs = tokenizer.batch_decode(sequences, skip_special_tokens=True) # should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, FlaxMarianMTModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMarianMTModel.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-de&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(text, max_length=<span class="hljs-number">64</span>, return_tensors=<span class="hljs-string">&#x27;jax&#x27;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>sequences = model.generate(input_ids, max_length=<span class="hljs-number">64</span>, num_beams=<span class="hljs-number">2</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = tokenizer.batch_decode(sequences, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*</span>`}}),{c(){u=o("meta"),z=d(),f=o("h1"),g=o("a"),w=o("span"),v(x.$$.fragment),_=d(),$=o("span"),Ae=r("MarianMT"),ue=d(),E=o("p"),ve=o("strong"),X=r("Bugs:"),Se=r(" If you see something strange, file a "),Q=o("a"),ee=r("Github Issue"),Oe=r(` and assign @patrickvonplaten.`),je=d(),V=o("p"),A=r("Translations should be similar, but not identical to output in the test set linked to in each model card."),Ee=d(),I=o("h2"),P=o("a"),F=o("span"),v(te.$$.fragment),ne=d(),ke=o("span"),De=r("Implementation Notes"),G=d(),q=o("ul"),Te=o("li"),N=o("p"),He=r("Each model is about 298 MB on disk, there are more than 1,000 models."),oe=d(),Me=o("li"),se=o("p"),ae=r("The list of supported language pairs can be found "),re=o("a"),Ue=r("here"),S=r("."),We=d(),O=o("li"),D=o("p"),m=r("Models were originally trained by "),j=o("a"),ye=r("J\xF6rg Tiedemann"),ot=r(" using the "),be=o("a"),H=r("Marian"),st=r(" C++ library, which supports fast training and translation."),at=d(),Re=o("li"),L=o("p"),ie=r(`All models are transformer encoder-decoders with 6 layers in each component. Each model\u2019s performance is documented in a model card.`),rt=d(),Be=o("li"),U=o("p"),it=r("The 80 opus models that require BPE preprocessing are not supported."),dt=d(),de=o("li"),xe=o("p"),bi=r("The modeling code is the same as "),Zo=o("a"),xi=r("BartForConditionalGeneration"),wi=r(" with a few minor modifications:"),zi=d(),lt=o("ul"),pn=o("li"),$i=r("static (sinusoid) positional embeddings ("),Ps=o("code"),ji=r("MarianConfig.static_position_embeddings=True"),Ei=r(")"),Pi=d(),hn=o("li"),qi=r("no layernorm_embedding ("),qs=o("code"),Fi=r("MarianConfig.normalize_embedding=False"),Ni=r(")"),Ci=d(),ct=o("li"),Ii=r("the model starts generating with "),Fs=o("code"),Li=r("pad_token_id"),Ai=r(` (which has 0 as a token_embedding) as the prefix (Bart uses `),Ns=o("code"),Si=r("<s/>"),Oi=r("),"),Di=d(),Cs=o("li"),un=o("p"),Hi=r("Code to bulk convert models can be found in "),Is=o("code"),Ui=r("convert_marian_to_pytorch.py"),Wi=r("."),Ri=d(),Ls=o("li"),mn=o("p"),Bi=r("This model was contributed by "),fn=o("a"),Vi=r("sshleifer"),Gi=r("."),Qa=d(),pt=o("h2"),Nt=o("a"),As=o("span"),v(gn.$$.fragment),Ji=d(),Ss=o("span"),Yi=r("Naming"),er=d(),Pe=o("ul"),Xo=o("li"),Ki=r("All model names use the following format: "),Os=o("code"),Zi=r("Helsinki-NLP/opus-mt-{src}-{tgt}"),Xi=d(),_n=o("li"),Qi=r("The language codes used to name models are inconsistent. Two digit codes can usually be found "),vn=o("a"),ed=r("here"),td=r(`, three digit codes require googling \u201Clanguage code {code}\u201C.`),nd=d(),ht=o("li"),od=r("Codes formatted like "),Ds=o("code"),sd=r("es_AR"),ad=r(" are usually "),Hs=o("code"),rd=r("code_{region}"),id=r(". That one is Spanish from Argentina."),dd=d(),Us=o("li"),ld=r(`The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second group use a combination of ISO-639-5 codes and ISO-639-2 codes.`),tr=d(),ut=o("h2"),Ct=o("a"),Ws=o("span"),v(kn.$$.fragment),cd=d(),Rs=o("span"),pd=r("Examples"),nr=d(),Je=o("ul"),Bs=o("li"),hd=r(`Since Marian models are smaller than many other translation models available in the library, they can be useful for fine-tuning experiments and integration tests.`),ud=d(),Vs=o("li"),Tn=o("a"),md=r("Fine-tune on GPU"),fd=d(),Gs=o("li"),Mn=o("a"),gd=r("Fine-tune on GPU with pytorch-lightning"),or=d(),mt=o("h2"),It=o("a"),Js=o("span"),v(yn.$$.fragment),_d=d(),Ys=o("span"),vd=r("Multilingual Models"),sr=d(),qe=o("ul"),bn=o("li"),kd=r("All model names use the following format: "),Ks=o("code"),Td=r("Helsinki-NLP/opus-mt-{src}-{tgt}"),Md=r(":"),yd=d(),xn=o("li"),bd=r(`If a model can output multiple languages, and you should specify a language code by prepending the desired output language to the `),Zs=o("code"),xd=r("src_text"),wd=r("."),zd=d(),wn=o("li"),$d=r("You can see a models\u2019s supported language codes in its model card, under target constituents, like in "),zn=o("a"),jd=r("opus-mt-en-roa"),Ed=r("."),Pd=d(),$n=o("li"),qd=r("Note that if a model is only multilingual on the source side, like "),Xs=o("code"),Fd=r("Helsinki-NLP/opus-mt-roa-en"),Nd=r(`, no language codes are required.`),ar=d(),Lt=o("p"),Cd=r("New multi-lingual models from the "),jn=o("a"),Id=r("Tatoeba-Challenge repo"),Ld=r(` require 3 character language codes:`),rr=d(),v(En.$$.fragment),ir=d(),Qo=o("p"),Ad=r("Here is the code to see all available pretrained models on the hub:"),dr=d(),v(Pn.$$.fragment),lr=d(),ft=o("h2"),At=o("a"),Qs=o("span"),v(qn.$$.fragment),Sd=d(),ea=o("span"),Od=r("Old Style Multi-Lingual Models"),cr=d(),es=o("p"),Dd=r(`These are the old style multi-lingual models ported from the OPUS-MT-Train repo: and the members of each language group:`),pr=d(),v(Fn.$$.fragment),hr=d(),ts=o("p"),Hd=r("Example of translating english to many romance languages, using old-style 2 character language codes"),ur=d(),v(Nn.$$.fragment),mr=d(),gt=o("h2"),St=o("a"),ta=o("span"),v(Cn.$$.fragment),Ud=d(),na=o("span"),Wd=r("MarianConfig"),fr=d(),le=o("div"),v(In.$$.fragment),Rd=d(),_t=o("p"),Bd=r("This is the configuration class to store the configuration of a "),ns=o("a"),Vd=r("MarianModel"),Gd=r(`. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian `),Ln=o("a"),Jd=r("Helsinki-NLP/opus-mt-en-de"),Yd=r(" architecture."),Kd=d(),vt=o("p"),Zd=r("Configuration objects inherit from "),os=o("a"),Xd=r("PretrainedConfig"),Qd=r(` and can be used to control the model outputs. Read the documentation from `),ss=o("a"),el=r("PretrainedConfig"),tl=r(" for more information."),nl=d(),oa=o("p"),ol=r("Examples:"),sl=d(),v(An.$$.fragment),gr=d(),kt=o("h2"),Ot=o("a"),sa=o("span"),v(Sn.$$.fragment),al=d(),aa=o("span"),rl=r("MarianTokenizer"),_r=d(),W=o("div"),v(On.$$.fragment),il=d(),Dn=o("p"),dl=r("Construct a Marian tokenizer. Based on "),Hn=o("a"),ll=r("SentencePiece"),cl=r("."),pl=d(),Un=o("p"),hl=r("This tokenizer inherits from "),as=o("a"),ul=r("PreTrainedTokenizer"),ml=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),fl=d(),ra=o("p"),gl=r("Examples:"),_l=d(),v(Wn.$$.fragment),vl=d(),Dt=o("div"),v(Rn.$$.fragment),kl=d(),ia=o("p"),Tl=r(`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),vr=d(),Tt=o("h2"),Ht=o("a"),da=o("span"),v(Bn.$$.fragment),Ml=d(),la=o("span"),yl=r("MarianModel"),kr=d(),we=o("div"),v(Vn.$$.fragment),bl=d(),Gn=o("p"),xl=r(`The bare Marian Model outputting raw hidden-states without any specific head on top. This model inherits from `),rs=o("a"),wl=r("PreTrainedModel"),zl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$l=d(),Jn=o("p"),jl=r("This model is also a PyTorch "),Yn=o("a"),El=r("torch.nn.Module"),Pl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ql=d(),me=o("div"),v(Kn.$$.fragment),Fl=d(),Mt=o("p"),Nl=r("The "),is=o("a"),Cl=r("MarianModel"),Il=r(" forward method, overrides the "),ca=o("code"),Ll=r("__call__"),Al=r(" special method."),Sl=d(),v(Ut.$$.fragment),Ol=d(),pa=o("p"),Dl=r("Example:"),Hl=d(),v(Zn.$$.fragment),Tr=d(),yt=o("h2"),Wt=o("a"),ha=o("span"),v(Xn.$$.fragment),Ul=d(),ua=o("span"),Wl=r("MarianMTModel"),Mr=d(),ze=o("div"),v(Qn.$$.fragment),Rl=d(),eo=o("p"),Bl=r(`The Marian Model with a language modeling head. Can be used for summarization. This model inherits from `),ds=o("a"),Vl=r("PreTrainedModel"),Gl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jl=d(),to=o("p"),Yl=r("This model is also a PyTorch "),no=o("a"),Kl=r("torch.nn.Module"),Zl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xl=d(),J=o("div"),v(oo.$$.fragment),Ql=d(),bt=o("p"),ec=r("The "),ls=o("a"),tc=r("MarianMTModel"),nc=r(" forward method, overrides the "),ma=o("code"),oc=r("__call__"),sc=r(" special method."),ac=d(),v(Rt.$$.fragment),rc=d(),so=o("p"),ic=r(`Pytorch version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),ao=o("a"),dc=r("here"),lc=r("."),cc=d(),fa=o("p"),pc=r("Examples:"),hc=d(),v(ro.$$.fragment),yr=d(),xt=o("h2"),Bt=o("a"),ga=o("span"),v(io.$$.fragment),uc=d(),_a=o("span"),mc=r("MarianForCausalLM"),br=d(),lo=o("div"),Ye=o("div"),v(co.$$.fragment),fc=d(),va=o("p"),gc=r("Example:"),_c=d(),v(po.$$.fragment),xr=d(),wt=o("h2"),Vt=o("a"),ka=o("span"),v(ho.$$.fragment),vc=d(),Ta=o("span"),kc=r("TFMarianModel"),wr=d(),ce=o("div"),v(uo.$$.fragment),Tc=d(),mo=o("p"),Mc=r(`The bare MARIAN Model outputting raw hidden-states without any specific head on top. This model inherits from `),cs=o("a"),yc=r("TFPreTrainedModel"),bc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xc=d(),fo=o("p"),wc=r("This model is also a "),go=o("a"),zc=r("tf.keras.Model"),$c=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jc=d(),v(Gt.$$.fragment),Ec=d(),fe=o("div"),v(_o.$$.fragment),Pc=d(),zt=o("p"),qc=r("The "),ps=o("a"),Fc=r("TFMarianModel"),Nc=r(" forward method, overrides the "),Ma=o("code"),Cc=r("__call__"),Ic=r(" special method."),Lc=d(),v(Jt.$$.fragment),Ac=d(),ya=o("p"),Sc=r("Example:"),Oc=d(),v(vo.$$.fragment),zr=d(),$t=o("h2"),Yt=o("a"),ba=o("span"),v(ko.$$.fragment),Dc=d(),xa=o("span"),Hc=r("TFMarianMTModel"),$r=d(),pe=o("div"),v(To.$$.fragment),Uc=d(),Mo=o("p"),Wc=r(`The MARIAN Model with a language modeling head. Can be used for summarization. This model inherits from `),hs=o("a"),Rc=r("TFPreTrainedModel"),Bc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Vc=d(),yo=o("p"),Gc=r("This model is also a "),bo=o("a"),Jc=r("tf.keras.Model"),Yc=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Kc=d(),v(Kt.$$.fragment),Zc=d(),Y=o("div"),v(xo.$$.fragment),Xc=d(),jt=o("p"),Qc=r("The "),us=o("a"),ep=r("TFMarianMTModel"),tp=r(" forward method, overrides the "),wa=o("code"),np=r("__call__"),op=r(" special method."),sp=d(),v(Zt.$$.fragment),ap=d(),wo=o("p"),rp=r(`TF version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),zo=o("a"),ip=r("here"),dp=r("."),lp=d(),za=o("p"),cp=r("Examples:"),pp=d(),v($o.$$.fragment),jr=d(),Et=o("h2"),Xt=o("a"),$a=o("span"),v(jo.$$.fragment),hp=d(),ja=o("span"),up=r("FlaxMarianModel"),Er=d(),R=o("div"),v(Eo.$$.fragment),mp=d(),Po=o("p"),fp=r(`The bare Marian Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),ms=o("a"),gp=r("FlaxPreTrainedModel"),_p=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vp=d(),qo=o("p"),kp=r("This model is also a Flax Linen "),Fo=o("a"),Tp=r("flax.nn.Module"),Mp=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),yp=d(),Ea=o("p"),bp=r("Finally, this model supports inherent JAX features such as:"),xp=d(),Ve=o("ul"),Pa=o("li"),No=o("a"),wp=r("Just-In-Time (JIT) compilation"),zp=d(),qa=o("li"),Co=o("a"),$p=r("Automatic Differentiation"),jp=d(),Fa=o("li"),Io=o("a"),Ep=r("Vectorization"),Pp=d(),Na=o("li"),Lo=o("a"),qp=r("Parallelization"),Fp=d(),ge=o("div"),v(Ao.$$.fragment),Np=d(),Pt=o("p"),Cp=r("The "),Ca=o("code"),Ip=r("FlaxMarianPreTrainedModel"),Lp=r(" forward method, overrides the "),Ia=o("code"),Ap=r("__call__"),Sp=r(" special method."),Op=d(),v(Qt.$$.fragment),Dp=d(),La=o("p"),Hp=r("Example:"),Up=d(),v(So.$$.fragment),Pr=d(),qt=o("h2"),en=o("a"),Aa=o("span"),v(Oo.$$.fragment),Wp=d(),Sa=o("span"),Rp=r("FlaxMarianMTModel"),qr=d(),B=o("div"),v(Do.$$.fragment),Bp=d(),Ho=o("p"),Vp=r(`The MARIAN Model with a language modeling head. Can be used for translation. This model inherits from `),fs=o("a"),Gp=r("FlaxPreTrainedModel"),Jp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yp=d(),Uo=o("p"),Kp=r("This model is also a Flax Linen "),Wo=o("a"),Zp=r("flax.nn.Module"),Xp=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Qp=d(),Oa=o("p"),eh=r("Finally, this model supports inherent JAX features such as:"),th=d(),Ge=o("ul"),Da=o("li"),Ro=o("a"),nh=r("Just-In-Time (JIT) compilation"),oh=d(),Ha=o("li"),Bo=o("a"),sh=r("Automatic Differentiation"),ah=d(),Ua=o("li"),Vo=o("a"),rh=r("Vectorization"),ih=d(),Wa=o("li"),Go=o("a"),dh=r("Parallelization"),lh=d(),_e=o("div"),v(Jo.$$.fragment),ch=d(),Ft=o("p"),ph=r("The "),Ra=o("code"),hh=r("FlaxMarianPreTrainedModel"),uh=r(" forward method, overrides the "),Ba=o("code"),mh=r("__call__"),fh=r(" special method."),gh=d(),v(tn.$$.fragment),_h=d(),Va=o("p"),vh=r("Example:"),kh=d(),v(Yo.$$.fragment),this.h()},l(n){const p=of('[data-svelte="svelte-1phssyn"]',document.head);u=s(p,"META",{name:!0,content:!0}),p.forEach(t),z=l(n),f=s(n,"H1",{class:!0});var Ko=a(f);g=s(Ko,"A",{id:!0,class:!0,href:!0});var Ga=a(g);w=s(Ga,"SPAN",{});var Ja=a(w);k(x.$$.fragment,Ja),Ja.forEach(t),Ga.forEach(t),_=l(Ko),$=s(Ko,"SPAN",{});var Ya=a($);Ae=i(Ya,"MarianMT"),Ya.forEach(t),Ko.forEach(t),ue=l(n),E=s(n,"P",{});var nn=a(E);ve=s(nn,"STRONG",{});var Ka=a(ve);X=i(Ka,"Bugs:"),Ka.forEach(t),Se=i(nn," If you see something strange, file a "),Q=s(nn,"A",{href:!0,rel:!0});var Za=a(Q);ee=i(Za,"Github Issue"),Za.forEach(t),Oe=i(nn,` and assign @patrickvonplaten.`),nn.forEach(t),je=l(n),V=s(n,"P",{});var Xa=a(V);A=i(Xa,"Translations should be similar, but not identical to output in the test set linked to in each model card."),Xa.forEach(t),Ee=l(n),I=s(n,"H2",{class:!0});var Nr=a(I);P=s(Nr,"A",{id:!0,class:!0,href:!0});var Mh=a(P);F=s(Mh,"SPAN",{});var yh=a(F);k(te.$$.fragment,yh),yh.forEach(t),Mh.forEach(t),ne=l(Nr),ke=s(Nr,"SPAN",{});var bh=a(ke);De=i(bh,"Implementation Notes"),bh.forEach(t),Nr.forEach(t),G=l(n),q=s(n,"UL",{});var K=a(q);Te=s(K,"LI",{});var xh=a(Te);N=s(xh,"P",{});var wh=a(N);He=i(wh,"Each model is about 298 MB on disk, there are more than 1,000 models."),wh.forEach(t),xh.forEach(t),oe=l(K),Me=s(K,"LI",{});var zh=a(Me);se=s(zh,"P",{});var Cr=a(se);ae=i(Cr,"The list of supported language pairs can be found "),re=s(Cr,"A",{href:!0,rel:!0});var $h=a(re);Ue=i($h,"here"),$h.forEach(t),S=i(Cr,"."),Cr.forEach(t),zh.forEach(t),We=l(K),O=s(K,"LI",{});var jh=a(O);D=s(jh,"P",{});var gs=a(D);m=i(gs,"Models were originally trained by "),j=s(gs,"A",{href:!0,rel:!0});var Eh=a(j);ye=i(Eh,"J\xF6rg Tiedemann"),Eh.forEach(t),ot=i(gs," using the "),be=s(gs,"A",{href:!0,rel:!0});var Ph=a(be);H=i(Ph,"Marian"),Ph.forEach(t),st=i(gs," C++ library, which supports fast training and translation."),gs.forEach(t),jh.forEach(t),at=l(K),Re=s(K,"LI",{});var qh=a(Re);L=s(qh,"P",{});var Fh=a(L);ie=i(Fh,`All models are transformer encoder-decoders with 6 layers in each component. Each model\u2019s performance is documented in a model card.`),Fh.forEach(t),qh.forEach(t),rt=l(K),Be=s(K,"LI",{});var Nh=a(Be);U=s(Nh,"P",{});var Ch=a(U);it=i(Ch,"The 80 opus models that require BPE preprocessing are not supported."),Ch.forEach(t),Nh.forEach(t),dt=l(K),de=s(K,"LI",{});var Ir=a(de);xe=s(Ir,"P",{});var Lr=a(xe);bi=i(Lr,"The modeling code is the same as "),Zo=s(Lr,"A",{href:!0});var Ih=a(Zo);xi=i(Ih,"BartForConditionalGeneration"),Ih.forEach(t),wi=i(Lr," with a few minor modifications:"),Lr.forEach(t),zi=l(Ir),lt=s(Ir,"UL",{});var _s=a(lt);pn=s(_s,"LI",{});var Ar=a(pn);$i=i(Ar,"static (sinusoid) positional embeddings ("),Ps=s(Ar,"CODE",{});var Lh=a(Ps);ji=i(Lh,"MarianConfig.static_position_embeddings=True"),Lh.forEach(t),Ei=i(Ar,")"),Ar.forEach(t),Pi=l(_s),hn=s(_s,"LI",{});var Sr=a(hn);qi=i(Sr,"no layernorm_embedding ("),qs=s(Sr,"CODE",{});var Ah=a(qs);Fi=i(Ah,"MarianConfig.normalize_embedding=False"),Ah.forEach(t),Ni=i(Sr,")"),Sr.forEach(t),Ci=l(_s),ct=s(_s,"LI",{});var vs=a(ct);Ii=i(vs,"the model starts generating with "),Fs=s(vs,"CODE",{});var Sh=a(Fs);Li=i(Sh,"pad_token_id"),Sh.forEach(t),Ai=i(vs,` (which has 0 as a token_embedding) as the prefix (Bart uses `),Ns=s(vs,"CODE",{});var Oh=a(Ns);Si=i(Oh,"<s/>"),Oh.forEach(t),Oi=i(vs,"),"),vs.forEach(t),_s.forEach(t),Ir.forEach(t),Di=l(K),Cs=s(K,"LI",{});var Dh=a(Cs);un=s(Dh,"P",{});var Or=a(un);Hi=i(Or,"Code to bulk convert models can be found in "),Is=s(Or,"CODE",{});var Hh=a(Is);Ui=i(Hh,"convert_marian_to_pytorch.py"),Hh.forEach(t),Wi=i(Or,"."),Or.forEach(t),Dh.forEach(t),Ri=l(K),Ls=s(K,"LI",{});var Uh=a(Ls);mn=s(Uh,"P",{});var Dr=a(mn);Bi=i(Dr,"This model was contributed by "),fn=s(Dr,"A",{href:!0,rel:!0});var Wh=a(fn);Vi=i(Wh,"sshleifer"),Wh.forEach(t),Gi=i(Dr,"."),Dr.forEach(t),Uh.forEach(t),K.forEach(t),Qa=l(n),pt=s(n,"H2",{class:!0});var Hr=a(pt);Nt=s(Hr,"A",{id:!0,class:!0,href:!0});var Rh=a(Nt);As=s(Rh,"SPAN",{});var Bh=a(As);k(gn.$$.fragment,Bh),Bh.forEach(t),Rh.forEach(t),Ji=l(Hr),Ss=s(Hr,"SPAN",{});var Vh=a(Ss);Yi=i(Vh,"Naming"),Vh.forEach(t),Hr.forEach(t),er=l(n),Pe=s(n,"UL",{});var on=a(Pe);Xo=s(on,"LI",{});var Th=a(Xo);Ki=i(Th,"All model names use the following format: "),Os=s(Th,"CODE",{});var Gh=a(Os);Zi=i(Gh,"Helsinki-NLP/opus-mt-{src}-{tgt}"),Gh.forEach(t),Th.forEach(t),Xi=l(on),_n=s(on,"LI",{});var Ur=a(_n);Qi=i(Ur,"The language codes used to name models are inconsistent. Two digit codes can usually be found "),vn=s(Ur,"A",{href:!0,rel:!0});var Jh=a(vn);ed=i(Jh,"here"),Jh.forEach(t),td=i(Ur,`, three digit codes require googling \u201Clanguage code {code}\u201C.`),Ur.forEach(t),nd=l(on),ht=s(on,"LI",{});var ks=a(ht);od=i(ks,"Codes formatted like "),Ds=s(ks,"CODE",{});var Yh=a(Ds);sd=i(Yh,"es_AR"),Yh.forEach(t),ad=i(ks," are usually "),Hs=s(ks,"CODE",{});var Kh=a(Hs);rd=i(Kh,"code_{region}"),Kh.forEach(t),id=i(ks,". That one is Spanish from Argentina."),ks.forEach(t),dd=l(on),Us=s(on,"LI",{});var Zh=a(Us);ld=i(Zh,`The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second group use a combination of ISO-639-5 codes and ISO-639-2 codes.`),Zh.forEach(t),on.forEach(t),tr=l(n),ut=s(n,"H2",{class:!0});var Wr=a(ut);Ct=s(Wr,"A",{id:!0,class:!0,href:!0});var Xh=a(Ct);Ws=s(Xh,"SPAN",{});var Qh=a(Ws);k(kn.$$.fragment,Qh),Qh.forEach(t),Xh.forEach(t),cd=l(Wr),Rs=s(Wr,"SPAN",{});var eu=a(Rs);pd=i(eu,"Examples"),eu.forEach(t),Wr.forEach(t),nr=l(n),Je=s(n,"UL",{});var Ts=a(Je);Bs=s(Ts,"LI",{});var tu=a(Bs);hd=i(tu,`Since Marian models are smaller than many other translation models available in the library, they can be useful for fine-tuning experiments and integration tests.`),tu.forEach(t),ud=l(Ts),Vs=s(Ts,"LI",{});var nu=a(Vs);Tn=s(nu,"A",{href:!0,rel:!0});var ou=a(Tn);md=i(ou,"Fine-tune on GPU"),ou.forEach(t),nu.forEach(t),fd=l(Ts),Gs=s(Ts,"LI",{});var su=a(Gs);Mn=s(su,"A",{href:!0,rel:!0});var au=a(Mn);gd=i(au,"Fine-tune on GPU with pytorch-lightning"),au.forEach(t),su.forEach(t),Ts.forEach(t),or=l(n),mt=s(n,"H2",{class:!0});var Rr=a(mt);It=s(Rr,"A",{id:!0,class:!0,href:!0});var ru=a(It);Js=s(ru,"SPAN",{});var iu=a(Js);k(yn.$$.fragment,iu),iu.forEach(t),ru.forEach(t),_d=l(Rr),Ys=s(Rr,"SPAN",{});var du=a(Ys);vd=i(du,"Multilingual Models"),du.forEach(t),Rr.forEach(t),sr=l(n),qe=s(n,"UL",{});var sn=a(qe);bn=s(sn,"LI",{});var Br=a(bn);kd=i(Br,"All model names use the following format: "),Ks=s(Br,"CODE",{});var lu=a(Ks);Td=i(lu,"Helsinki-NLP/opus-mt-{src}-{tgt}"),lu.forEach(t),Md=i(Br,":"),Br.forEach(t),yd=l(sn),xn=s(sn,"LI",{});var Vr=a(xn);bd=i(Vr,`If a model can output multiple languages, and you should specify a language code by prepending the desired output language to the `),Zs=s(Vr,"CODE",{});var cu=a(Zs);xd=i(cu,"src_text"),cu.forEach(t),wd=i(Vr,"."),Vr.forEach(t),zd=l(sn),wn=s(sn,"LI",{});var Gr=a(wn);$d=i(Gr,"You can see a models\u2019s supported language codes in its model card, under target constituents, like in "),zn=s(Gr,"A",{href:!0,rel:!0});var pu=a(zn);jd=i(pu,"opus-mt-en-roa"),pu.forEach(t),Ed=i(Gr,"."),Gr.forEach(t),Pd=l(sn),$n=s(sn,"LI",{});var Jr=a($n);qd=i(Jr,"Note that if a model is only multilingual on the source side, like "),Xs=s(Jr,"CODE",{});var hu=a(Xs);Fd=i(hu,"Helsinki-NLP/opus-mt-roa-en"),hu.forEach(t),Nd=i(Jr,`, no language codes are required.`),Jr.forEach(t),sn.forEach(t),ar=l(n),Lt=s(n,"P",{});var Yr=a(Lt);Cd=i(Yr,"New multi-lingual models from the "),jn=s(Yr,"A",{href:!0,rel:!0});var uu=a(jn);Id=i(uu,"Tatoeba-Challenge repo"),uu.forEach(t),Ld=i(Yr,` require 3 character language codes:`),Yr.forEach(t),rr=l(n),k(En.$$.fragment,n),ir=l(n),Qo=s(n,"P",{});var mu=a(Qo);Ad=i(mu,"Here is the code to see all available pretrained models on the hub:"),mu.forEach(t),dr=l(n),k(Pn.$$.fragment,n),lr=l(n),ft=s(n,"H2",{class:!0});var Kr=a(ft);At=s(Kr,"A",{id:!0,class:!0,href:!0});var fu=a(At);Qs=s(fu,"SPAN",{});var gu=a(Qs);k(qn.$$.fragment,gu),gu.forEach(t),fu.forEach(t),Sd=l(Kr),ea=s(Kr,"SPAN",{});var _u=a(ea);Od=i(_u,"Old Style Multi-Lingual Models"),_u.forEach(t),Kr.forEach(t),cr=l(n),es=s(n,"P",{});var vu=a(es);Dd=i(vu,`These are the old style multi-lingual models ported from the OPUS-MT-Train repo: and the members of each language group:`),vu.forEach(t),pr=l(n),k(Fn.$$.fragment,n),hr=l(n),ts=s(n,"P",{});var ku=a(ts);Hd=i(ku,"Example of translating english to many romance languages, using old-style 2 character language codes"),ku.forEach(t),ur=l(n),k(Nn.$$.fragment,n),mr=l(n),gt=s(n,"H2",{class:!0});var Zr=a(gt);St=s(Zr,"A",{id:!0,class:!0,href:!0});var Tu=a(St);ta=s(Tu,"SPAN",{});var Mu=a(ta);k(Cn.$$.fragment,Mu),Mu.forEach(t),Tu.forEach(t),Ud=l(Zr),na=s(Zr,"SPAN",{});var yu=a(na);Wd=i(yu,"MarianConfig"),yu.forEach(t),Zr.forEach(t),fr=l(n),le=s(n,"DIV",{class:!0});var Ke=a(le);k(In.$$.fragment,Ke),Rd=l(Ke),_t=s(Ke,"P",{});var Ms=a(_t);Bd=i(Ms,"This is the configuration class to store the configuration of a "),ns=s(Ms,"A",{href:!0});var bu=a(ns);Vd=i(bu,"MarianModel"),bu.forEach(t),Gd=i(Ms,`. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian `),Ln=s(Ms,"A",{href:!0,rel:!0});var xu=a(Ln);Jd=i(xu,"Helsinki-NLP/opus-mt-en-de"),xu.forEach(t),Yd=i(Ms," architecture."),Ms.forEach(t),Kd=l(Ke),vt=s(Ke,"P",{});var ys=a(vt);Zd=i(ys,"Configuration objects inherit from "),os=s(ys,"A",{href:!0});var wu=a(os);Xd=i(wu,"PretrainedConfig"),wu.forEach(t),Qd=i(ys,` and can be used to control the model outputs. Read the documentation from `),ss=s(ys,"A",{href:!0});var zu=a(ss);el=i(zu,"PretrainedConfig"),zu.forEach(t),tl=i(ys," for more information."),ys.forEach(t),nl=l(Ke),oa=s(Ke,"P",{});var $u=a(oa);ol=i($u,"Examples:"),$u.forEach(t),sl=l(Ke),k(An.$$.fragment,Ke),Ke.forEach(t),gr=l(n),kt=s(n,"H2",{class:!0});var Xr=a(kt);Ot=s(Xr,"A",{id:!0,class:!0,href:!0});var ju=a(Ot);sa=s(ju,"SPAN",{});var Eu=a(sa);k(Sn.$$.fragment,Eu),Eu.forEach(t),ju.forEach(t),al=l(Xr),aa=s(Xr,"SPAN",{});var Pu=a(aa);rl=i(Pu,"MarianTokenizer"),Pu.forEach(t),Xr.forEach(t),_r=l(n),W=s(n,"DIV",{class:!0});var Fe=a(W);k(On.$$.fragment,Fe),il=l(Fe),Dn=s(Fe,"P",{});var Qr=a(Dn);dl=i(Qr,"Construct a Marian tokenizer. Based on "),Hn=s(Qr,"A",{href:!0,rel:!0});var qu=a(Hn);ll=i(qu,"SentencePiece"),qu.forEach(t),cl=i(Qr,"."),Qr.forEach(t),pl=l(Fe),Un=s(Fe,"P",{});var ei=a(Un);hl=i(ei,"This tokenizer inherits from "),as=s(ei,"A",{href:!0});var Fu=a(as);ul=i(Fu,"PreTrainedTokenizer"),Fu.forEach(t),ml=i(ei,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ei.forEach(t),fl=l(Fe),ra=s(Fe,"P",{});var Nu=a(ra);gl=i(Nu,"Examples:"),Nu.forEach(t),_l=l(Fe),k(Wn.$$.fragment,Fe),vl=l(Fe),Dt=s(Fe,"DIV",{class:!0});var ti=a(Dt);k(Rn.$$.fragment,ti),kl=l(ti),ia=s(ti,"P",{});var Cu=a(ia);Tl=i(Cu,`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Cu.forEach(t),ti.forEach(t),Fe.forEach(t),vr=l(n),Tt=s(n,"H2",{class:!0});var ni=a(Tt);Ht=s(ni,"A",{id:!0,class:!0,href:!0});var Iu=a(Ht);da=s(Iu,"SPAN",{});var Lu=a(da);k(Bn.$$.fragment,Lu),Lu.forEach(t),Iu.forEach(t),Ml=l(ni),la=s(ni,"SPAN",{});var Au=a(la);yl=i(Au,"MarianModel"),Au.forEach(t),ni.forEach(t),kr=l(n),we=s(n,"DIV",{class:!0});var an=a(we);k(Vn.$$.fragment,an),bl=l(an),Gn=s(an,"P",{});var oi=a(Gn);xl=i(oi,`The bare Marian Model outputting raw hidden-states without any specific head on top. This model inherits from `),rs=s(oi,"A",{href:!0});var Su=a(rs);wl=i(Su,"PreTrainedModel"),Su.forEach(t),zl=i(oi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oi.forEach(t),$l=l(an),Jn=s(an,"P",{});var si=a(Jn);jl=i(si,"This model is also a PyTorch "),Yn=s(si,"A",{href:!0,rel:!0});var Ou=a(Yn);El=i(Ou,"torch.nn.Module"),Ou.forEach(t),Pl=i(si,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),si.forEach(t),ql=l(an),me=s(an,"DIV",{class:!0});var Ze=a(me);k(Kn.$$.fragment,Ze),Fl=l(Ze),Mt=s(Ze,"P",{});var bs=a(Mt);Nl=i(bs,"The "),is=s(bs,"A",{href:!0});var Du=a(is);Cl=i(Du,"MarianModel"),Du.forEach(t),Il=i(bs," forward method, overrides the "),ca=s(bs,"CODE",{});var Hu=a(ca);Ll=i(Hu,"__call__"),Hu.forEach(t),Al=i(bs," special method."),bs.forEach(t),Sl=l(Ze),k(Ut.$$.fragment,Ze),Ol=l(Ze),pa=s(Ze,"P",{});var Uu=a(pa);Dl=i(Uu,"Example:"),Uu.forEach(t),Hl=l(Ze),k(Zn.$$.fragment,Ze),Ze.forEach(t),an.forEach(t),Tr=l(n),yt=s(n,"H2",{class:!0});var ai=a(yt);Wt=s(ai,"A",{id:!0,class:!0,href:!0});var Wu=a(Wt);ha=s(Wu,"SPAN",{});var Ru=a(ha);k(Xn.$$.fragment,Ru),Ru.forEach(t),Wu.forEach(t),Ul=l(ai),ua=s(ai,"SPAN",{});var Bu=a(ua);Wl=i(Bu,"MarianMTModel"),Bu.forEach(t),ai.forEach(t),Mr=l(n),ze=s(n,"DIV",{class:!0});var rn=a(ze);k(Qn.$$.fragment,rn),Rl=l(rn),eo=s(rn,"P",{});var ri=a(eo);Bl=i(ri,`The Marian Model with a language modeling head. Can be used for summarization. This model inherits from `),ds=s(ri,"A",{href:!0});var Vu=a(ds);Vl=i(Vu,"PreTrainedModel"),Vu.forEach(t),Gl=i(ri,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ri.forEach(t),Jl=l(rn),to=s(rn,"P",{});var ii=a(to);Yl=i(ii,"This model is also a PyTorch "),no=s(ii,"A",{href:!0,rel:!0});var Gu=a(no);Kl=i(Gu,"torch.nn.Module"),Gu.forEach(t),Zl=i(ii,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ii.forEach(t),Xl=l(rn),J=s(rn,"DIV",{class:!0});var Ne=a(J);k(oo.$$.fragment,Ne),Ql=l(Ne),bt=s(Ne,"P",{});var xs=a(bt);ec=i(xs,"The "),ls=s(xs,"A",{href:!0});var Ju=a(ls);tc=i(Ju,"MarianMTModel"),Ju.forEach(t),nc=i(xs," forward method, overrides the "),ma=s(xs,"CODE",{});var Yu=a(ma);oc=i(Yu,"__call__"),Yu.forEach(t),sc=i(xs," special method."),xs.forEach(t),ac=l(Ne),k(Rt.$$.fragment,Ne),rc=l(Ne),so=s(Ne,"P",{});var di=a(so);ic=i(di,`Pytorch version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),ao=s(di,"A",{href:!0,rel:!0});var Ku=a(ao);dc=i(Ku,"here"),Ku.forEach(t),lc=i(di,"."),di.forEach(t),cc=l(Ne),fa=s(Ne,"P",{});var Zu=a(fa);pc=i(Zu,"Examples:"),Zu.forEach(t),hc=l(Ne),k(ro.$$.fragment,Ne),Ne.forEach(t),rn.forEach(t),yr=l(n),xt=s(n,"H2",{class:!0});var li=a(xt);Bt=s(li,"A",{id:!0,class:!0,href:!0});var Xu=a(Bt);ga=s(Xu,"SPAN",{});var Qu=a(ga);k(io.$$.fragment,Qu),Qu.forEach(t),Xu.forEach(t),uc=l(li),_a=s(li,"SPAN",{});var em=a(_a);mc=i(em,"MarianForCausalLM"),em.forEach(t),li.forEach(t),br=l(n),lo=s(n,"DIV",{class:!0});var tm=a(lo);Ye=s(tm,"DIV",{class:!0});var ws=a(Ye);k(co.$$.fragment,ws),fc=l(ws),va=s(ws,"P",{});var nm=a(va);gc=i(nm,"Example:"),nm.forEach(t),_c=l(ws),k(po.$$.fragment,ws),ws.forEach(t),tm.forEach(t),xr=l(n),wt=s(n,"H2",{class:!0});var ci=a(wt);Vt=s(ci,"A",{id:!0,class:!0,href:!0});var om=a(Vt);ka=s(om,"SPAN",{});var sm=a(ka);k(ho.$$.fragment,sm),sm.forEach(t),om.forEach(t),vc=l(ci),Ta=s(ci,"SPAN",{});var am=a(Ta);kc=i(am,"TFMarianModel"),am.forEach(t),ci.forEach(t),wr=l(n),ce=s(n,"DIV",{class:!0});var Xe=a(ce);k(uo.$$.fragment,Xe),Tc=l(Xe),mo=s(Xe,"P",{});var pi=a(mo);Mc=i(pi,`The bare MARIAN Model outputting raw hidden-states without any specific head on top. This model inherits from `),cs=s(pi,"A",{href:!0});var rm=a(cs);yc=i(rm,"TFPreTrainedModel"),rm.forEach(t),bc=i(pi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pi.forEach(t),xc=l(Xe),fo=s(Xe,"P",{});var hi=a(fo);wc=i(hi,"This model is also a "),go=s(hi,"A",{href:!0,rel:!0});var im=a(go);zc=i(im,"tf.keras.Model"),im.forEach(t),$c=i(hi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hi.forEach(t),jc=l(Xe),k(Gt.$$.fragment,Xe),Ec=l(Xe),fe=s(Xe,"DIV",{class:!0});var Qe=a(fe);k(_o.$$.fragment,Qe),Pc=l(Qe),zt=s(Qe,"P",{});var zs=a(zt);qc=i(zs,"The "),ps=s(zs,"A",{href:!0});var dm=a(ps);Fc=i(dm,"TFMarianModel"),dm.forEach(t),Nc=i(zs," forward method, overrides the "),Ma=s(zs,"CODE",{});var lm=a(Ma);Cc=i(lm,"__call__"),lm.forEach(t),Ic=i(zs," special method."),zs.forEach(t),Lc=l(Qe),k(Jt.$$.fragment,Qe),Ac=l(Qe),ya=s(Qe,"P",{});var cm=a(ya);Sc=i(cm,"Example:"),cm.forEach(t),Oc=l(Qe),k(vo.$$.fragment,Qe),Qe.forEach(t),Xe.forEach(t),zr=l(n),$t=s(n,"H2",{class:!0});var ui=a($t);Yt=s(ui,"A",{id:!0,class:!0,href:!0});var pm=a(Yt);ba=s(pm,"SPAN",{});var hm=a(ba);k(ko.$$.fragment,hm),hm.forEach(t),pm.forEach(t),Dc=l(ui),xa=s(ui,"SPAN",{});var um=a(xa);Hc=i(um,"TFMarianMTModel"),um.forEach(t),ui.forEach(t),$r=l(n),pe=s(n,"DIV",{class:!0});var et=a(pe);k(To.$$.fragment,et),Uc=l(et),Mo=s(et,"P",{});var mi=a(Mo);Wc=i(mi,`The MARIAN Model with a language modeling head. Can be used for summarization. This model inherits from `),hs=s(mi,"A",{href:!0});var mm=a(hs);Rc=i(mm,"TFPreTrainedModel"),mm.forEach(t),Bc=i(mi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mi.forEach(t),Vc=l(et),yo=s(et,"P",{});var fi=a(yo);Gc=i(fi,"This model is also a "),bo=s(fi,"A",{href:!0,rel:!0});var fm=a(bo);Jc=i(fm,"tf.keras.Model"),fm.forEach(t),Yc=i(fi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fi.forEach(t),Kc=l(et),k(Kt.$$.fragment,et),Zc=l(et),Y=s(et,"DIV",{class:!0});var Ce=a(Y);k(xo.$$.fragment,Ce),Xc=l(Ce),jt=s(Ce,"P",{});var $s=a(jt);Qc=i($s,"The "),us=s($s,"A",{href:!0});var gm=a(us);ep=i(gm,"TFMarianMTModel"),gm.forEach(t),tp=i($s," forward method, overrides the "),wa=s($s,"CODE",{});var _m=a(wa);np=i(_m,"__call__"),_m.forEach(t),op=i($s," special method."),$s.forEach(t),sp=l(Ce),k(Zt.$$.fragment,Ce),ap=l(Ce),wo=s(Ce,"P",{});var gi=a(wo);rp=i(gi,`TF version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),zo=s(gi,"A",{href:!0,rel:!0});var vm=a(zo);ip=i(vm,"here"),vm.forEach(t),dp=i(gi,"."),gi.forEach(t),lp=l(Ce),za=s(Ce,"P",{});var km=a(za);cp=i(km,"Examples:"),km.forEach(t),pp=l(Ce),k($o.$$.fragment,Ce),Ce.forEach(t),et.forEach(t),jr=l(n),Et=s(n,"H2",{class:!0});var _i=a(Et);Xt=s(_i,"A",{id:!0,class:!0,href:!0});var Tm=a(Xt);$a=s(Tm,"SPAN",{});var Mm=a($a);k(jo.$$.fragment,Mm),Mm.forEach(t),Tm.forEach(t),hp=l(_i),ja=s(_i,"SPAN",{});var ym=a(ja);up=i(ym,"FlaxMarianModel"),ym.forEach(t),_i.forEach(t),Er=l(n),R=s(n,"DIV",{class:!0});var Ie=a(R);k(Eo.$$.fragment,Ie),mp=l(Ie),Po=s(Ie,"P",{});var vi=a(Po);fp=i(vi,`The bare Marian Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),ms=s(vi,"A",{href:!0});var bm=a(ms);gp=i(bm,"FlaxPreTrainedModel"),bm.forEach(t),_p=i(vi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vi.forEach(t),vp=l(Ie),qo=s(Ie,"P",{});var ki=a(qo);kp=i(ki,"This model is also a Flax Linen "),Fo=s(ki,"A",{href:!0,rel:!0});var xm=a(Fo);Tp=i(xm,"flax.nn.Module"),xm.forEach(t),Mp=i(ki,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ki.forEach(t),yp=l(Ie),Ea=s(Ie,"P",{});var wm=a(Ea);bp=i(wm,"Finally, this model supports inherent JAX features such as:"),wm.forEach(t),xp=l(Ie),Ve=s(Ie,"UL",{});var dn=a(Ve);Pa=s(dn,"LI",{});var zm=a(Pa);No=s(zm,"A",{href:!0,rel:!0});var $m=a(No);wp=i($m,"Just-In-Time (JIT) compilation"),$m.forEach(t),zm.forEach(t),zp=l(dn),qa=s(dn,"LI",{});var jm=a(qa);Co=s(jm,"A",{href:!0,rel:!0});var Em=a(Co);$p=i(Em,"Automatic Differentiation"),Em.forEach(t),jm.forEach(t),jp=l(dn),Fa=s(dn,"LI",{});var Pm=a(Fa);Io=s(Pm,"A",{href:!0,rel:!0});var qm=a(Io);Ep=i(qm,"Vectorization"),qm.forEach(t),Pm.forEach(t),Pp=l(dn),Na=s(dn,"LI",{});var Fm=a(Na);Lo=s(Fm,"A",{href:!0,rel:!0});var Nm=a(Lo);qp=i(Nm,"Parallelization"),Nm.forEach(t),Fm.forEach(t),dn.forEach(t),Fp=l(Ie),ge=s(Ie,"DIV",{class:!0});var tt=a(ge);k(Ao.$$.fragment,tt),Np=l(tt),Pt=s(tt,"P",{});var js=a(Pt);Cp=i(js,"The "),Ca=s(js,"CODE",{});var Cm=a(Ca);Ip=i(Cm,"FlaxMarianPreTrainedModel"),Cm.forEach(t),Lp=i(js," forward method, overrides the "),Ia=s(js,"CODE",{});var Im=a(Ia);Ap=i(Im,"__call__"),Im.forEach(t),Sp=i(js," special method."),js.forEach(t),Op=l(tt),k(Qt.$$.fragment,tt),Dp=l(tt),La=s(tt,"P",{});var Lm=a(La);Hp=i(Lm,"Example:"),Lm.forEach(t),Up=l(tt),k(So.$$.fragment,tt),tt.forEach(t),Ie.forEach(t),Pr=l(n),qt=s(n,"H2",{class:!0});var Ti=a(qt);en=s(Ti,"A",{id:!0,class:!0,href:!0});var Am=a(en);Aa=s(Am,"SPAN",{});var Sm=a(Aa);k(Oo.$$.fragment,Sm),Sm.forEach(t),Am.forEach(t),Wp=l(Ti),Sa=s(Ti,"SPAN",{});var Om=a(Sa);Rp=i(Om,"FlaxMarianMTModel"),Om.forEach(t),Ti.forEach(t),qr=l(n),B=s(n,"DIV",{class:!0});var Le=a(B);k(Do.$$.fragment,Le),Bp=l(Le),Ho=s(Le,"P",{});var Mi=a(Ho);Vp=i(Mi,`The MARIAN Model with a language modeling head. Can be used for translation. This model inherits from `),fs=s(Mi,"A",{href:!0});var Dm=a(fs);Gp=i(Dm,"FlaxPreTrainedModel"),Dm.forEach(t),Jp=i(Mi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mi.forEach(t),Yp=l(Le),Uo=s(Le,"P",{});var yi=a(Uo);Kp=i(yi,"This model is also a Flax Linen "),Wo=s(yi,"A",{href:!0,rel:!0});var Hm=a(Wo);Zp=i(Hm,"flax.nn.Module"),Hm.forEach(t),Xp=i(yi,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),yi.forEach(t),Qp=l(Le),Oa=s(Le,"P",{});var Um=a(Oa);eh=i(Um,"Finally, this model supports inherent JAX features such as:"),Um.forEach(t),th=l(Le),Ge=s(Le,"UL",{});var ln=a(Ge);Da=s(ln,"LI",{});var Wm=a(Da);Ro=s(Wm,"A",{href:!0,rel:!0});var Rm=a(Ro);nh=i(Rm,"Just-In-Time (JIT) compilation"),Rm.forEach(t),Wm.forEach(t),oh=l(ln),Ha=s(ln,"LI",{});var Bm=a(Ha);Bo=s(Bm,"A",{href:!0,rel:!0});var Vm=a(Bo);sh=i(Vm,"Automatic Differentiation"),Vm.forEach(t),Bm.forEach(t),ah=l(ln),Ua=s(ln,"LI",{});var Gm=a(Ua);Vo=s(Gm,"A",{href:!0,rel:!0});var Jm=a(Vo);rh=i(Jm,"Vectorization"),Jm.forEach(t),Gm.forEach(t),ih=l(ln),Wa=s(ln,"LI",{});var Ym=a(Wa);Go=s(Ym,"A",{href:!0,rel:!0});var Km=a(Go);dh=i(Km,"Parallelization"),Km.forEach(t),Ym.forEach(t),ln.forEach(t),lh=l(Le),_e=s(Le,"DIV",{class:!0});var nt=a(_e);k(Jo.$$.fragment,nt),ch=l(nt),Ft=s(nt,"P",{});var Es=a(Ft);ph=i(Es,"The "),Ra=s(Es,"CODE",{});var Zm=a(Ra);hh=i(Zm,"FlaxMarianPreTrainedModel"),Zm.forEach(t),uh=i(Es," forward method, overrides the "),Ba=s(Es,"CODE",{});var Xm=a(Ba);mh=i(Xm,"__call__"),Xm.forEach(t),fh=i(Es," special method."),Es.forEach(t),gh=l(nt),k(tn.$$.fragment,nt),_h=l(nt),Va=s(nt,"P",{});var Qm=a(Va);vh=i(Qm,"Example:"),Qm.forEach(t),kh=l(nt),k(Yo.$$.fragment,nt),nt.forEach(t),Le.forEach(t),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(mf)),c(g,"id","marianmt"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#marianmt"),c(f,"class","relative group"),c(Q,"href","https://github.com/huggingface/transformers/issues/new?assignees=sshleifer&labels=&template=bug-report.md&title"),c(Q,"rel","nofollow"),c(P,"id","implementation-notes"),c(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(P,"href","#implementation-notes"),c(I,"class","relative group"),c(re,"href","https://huggingface.co/Helsinki-NLP"),c(re,"rel","nofollow"),c(j,"href","https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann"),c(j,"rel","nofollow"),c(be,"href","https://marian-nmt.github.io/"),c(be,"rel","nofollow"),c(Zo,"href","/docs/transformers/v4.15.0/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(fn,"href","https://huggingface.co/sshleifer"),c(fn,"rel","nofollow"),c(Nt,"id","naming"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#naming"),c(pt,"class","relative group"),c(vn,"href","https://developers.google.com/admin-sdk/directory/v1/languages"),c(vn,"rel","nofollow"),c(Ct,"id","examples"),c(Ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ct,"href","#examples"),c(ut,"class","relative group"),c(Tn,"href","https://github.com/huggingface/transformers/blob/master/examples/research_projects/seq2seq-distillation/train_distil_marian_enro_teacher.sh"),c(Tn,"rel","nofollow"),c(Mn,"href","https://github.com/huggingface/transformers/blob/master/examples/research_projects/seq2seq-distillation/train_distil_marian_no_teacher.sh"),c(Mn,"rel","nofollow"),c(It,"id","multilingual-models"),c(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(It,"href","#multilingual-models"),c(mt,"class","relative group"),c(zn,"href","https://huggingface.co/Helsinki-NLP/opus-mt-en-roa"),c(zn,"rel","nofollow"),c(jn,"href","https://github.com/Helsinki-NLP/Tatoeba-Challenge"),c(jn,"rel","nofollow"),c(At,"id","old-style-multilingual-models"),c(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(At,"href","#old-style-multilingual-models"),c(ft,"class","relative group"),c(St,"id","transformers.MarianConfig"),c(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(St,"href","#transformers.MarianConfig"),c(gt,"class","relative group"),c(ns,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianModel"),c(Ln,"href","https://huggingface.co/Helsinki-NLP/opus-mt-en-de"),c(Ln,"rel","nofollow"),c(os,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(ss,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(le,"class","docstring"),c(Ot,"id","transformers.MarianTokenizer"),c(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ot,"href","#transformers.MarianTokenizer"),c(kt,"class","relative group"),c(Hn,"href","https://github.com/google/sentencepiece"),c(Hn,"rel","nofollow"),c(as,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Dt,"class","docstring"),c(W,"class","docstring"),c(Ht,"id","transformers.MarianModel"),c(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ht,"href","#transformers.MarianModel"),c(Tt,"class","relative group"),c(rs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Yn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Yn,"rel","nofollow"),c(is,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianModel"),c(me,"class","docstring"),c(we,"class","docstring"),c(Wt,"id","transformers.MarianMTModel"),c(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wt,"href","#transformers.MarianMTModel"),c(yt,"class","relative group"),c(ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(no,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(no,"rel","nofollow"),c(ls,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.MarianMTModel"),c(ao,"href","https://huggingface.co/models?search=Helsinki-NLP"),c(ao,"rel","nofollow"),c(J,"class","docstring"),c(ze,"class","docstring"),c(Bt,"id","transformers.MarianForCausalLM"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#transformers.MarianForCausalLM"),c(xt,"class","relative group"),c(Ye,"class","docstring"),c(lo,"class","docstring"),c(Vt,"id","transformers.TFMarianModel"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.TFMarianModel"),c(wt,"class","relative group"),c(cs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(go,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(go,"rel","nofollow"),c(ps,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianModel"),c(fe,"class","docstring"),c(ce,"class","docstring"),c(Yt,"id","transformers.TFMarianMTModel"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#transformers.TFMarianMTModel"),c($t,"class","relative group"),c(hs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(bo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(bo,"rel","nofollow"),c(us,"href","/docs/transformers/v4.15.0/en/model_doc/marian#transformers.TFMarianMTModel"),c(zo,"href","https://huggingface.co/models?search=Helsinki-NLP"),c(zo,"rel","nofollow"),c(Y,"class","docstring"),c(pe,"class","docstring"),c(Xt,"id","transformers.FlaxMarianModel"),c(Xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xt,"href","#transformers.FlaxMarianModel"),c(Et,"class","relative group"),c(ms,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Fo,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Fo,"rel","nofollow"),c(No,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(No,"rel","nofollow"),c(Co,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Co,"rel","nofollow"),c(Io,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Io,"rel","nofollow"),c(Lo,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Lo,"rel","nofollow"),c(ge,"class","docstring"),c(R,"class","docstring"),c(en,"id","transformers.FlaxMarianMTModel"),c(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(en,"href","#transformers.FlaxMarianMTModel"),c(qt,"class","relative group"),c(fs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Wo,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(Wo,"rel","nofollow"),c(Ro,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Ro,"rel","nofollow"),c(Bo,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Bo,"rel","nofollow"),c(Vo,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Vo,"rel","nofollow"),c(Go,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Go,"rel","nofollow"),c(_e,"class","docstring"),c(B,"class","docstring")},m(n,p){e(document.head,u),h(n,z,p),h(n,f,p),e(f,g),e(g,w),T(x,w,null),e(f,_),e(f,$),e($,Ae),h(n,ue,p),h(n,E,p),e(E,ve),e(ve,X),e(E,Se),e(E,Q),e(Q,ee),e(E,Oe),h(n,je,p),h(n,V,p),e(V,A),h(n,Ee,p),h(n,I,p),e(I,P),e(P,F),T(te,F,null),e(I,ne),e(I,ke),e(ke,De),h(n,G,p),h(n,q,p),e(q,Te),e(Te,N),e(N,He),e(q,oe),e(q,Me),e(Me,se),e(se,ae),e(se,re),e(re,Ue),e(se,S),e(q,We),e(q,O),e(O,D),e(D,m),e(D,j),e(j,ye),e(D,ot),e(D,be),e(be,H),e(D,st),e(q,at),e(q,Re),e(Re,L),e(L,ie),e(q,rt),e(q,Be),e(Be,U),e(U,it),e(q,dt),e(q,de),e(de,xe),e(xe,bi),e(xe,Zo),e(Zo,xi),e(xe,wi),e(de,zi),e(de,lt),e(lt,pn),e(pn,$i),e(pn,Ps),e(Ps,ji),e(pn,Ei),e(lt,Pi),e(lt,hn),e(hn,qi),e(hn,qs),e(qs,Fi),e(hn,Ni),e(lt,Ci),e(lt,ct),e(ct,Ii),e(ct,Fs),e(Fs,Li),e(ct,Ai),e(ct,Ns),e(Ns,Si),e(ct,Oi),e(q,Di),e(q,Cs),e(Cs,un),e(un,Hi),e(un,Is),e(Is,Ui),e(un,Wi),e(q,Ri),e(q,Ls),e(Ls,mn),e(mn,Bi),e(mn,fn),e(fn,Vi),e(mn,Gi),h(n,Qa,p),h(n,pt,p),e(pt,Nt),e(Nt,As),T(gn,As,null),e(pt,Ji),e(pt,Ss),e(Ss,Yi),h(n,er,p),h(n,Pe,p),e(Pe,Xo),e(Xo,Ki),e(Xo,Os),e(Os,Zi),e(Pe,Xi),e(Pe,_n),e(_n,Qi),e(_n,vn),e(vn,ed),e(_n,td),e(Pe,nd),e(Pe,ht),e(ht,od),e(ht,Ds),e(Ds,sd),e(ht,ad),e(ht,Hs),e(Hs,rd),e(ht,id),e(Pe,dd),e(Pe,Us),e(Us,ld),h(n,tr,p),h(n,ut,p),e(ut,Ct),e(Ct,Ws),T(kn,Ws,null),e(ut,cd),e(ut,Rs),e(Rs,pd),h(n,nr,p),h(n,Je,p),e(Je,Bs),e(Bs,hd),e(Je,ud),e(Je,Vs),e(Vs,Tn),e(Tn,md),e(Je,fd),e(Je,Gs),e(Gs,Mn),e(Mn,gd),h(n,or,p),h(n,mt,p),e(mt,It),e(It,Js),T(yn,Js,null),e(mt,_d),e(mt,Ys),e(Ys,vd),h(n,sr,p),h(n,qe,p),e(qe,bn),e(bn,kd),e(bn,Ks),e(Ks,Td),e(bn,Md),e(qe,yd),e(qe,xn),e(xn,bd),e(xn,Zs),e(Zs,xd),e(xn,wd),e(qe,zd),e(qe,wn),e(wn,$d),e(wn,zn),e(zn,jd),e(wn,Ed),e(qe,Pd),e(qe,$n),e($n,qd),e($n,Xs),e(Xs,Fd),e($n,Nd),h(n,ar,p),h(n,Lt,p),e(Lt,Cd),e(Lt,jn),e(jn,Id),e(Lt,Ld),h(n,rr,p),T(En,n,p),h(n,ir,p),h(n,Qo,p),e(Qo,Ad),h(n,dr,p),T(Pn,n,p),h(n,lr,p),h(n,ft,p),e(ft,At),e(At,Qs),T(qn,Qs,null),e(ft,Sd),e(ft,ea),e(ea,Od),h(n,cr,p),h(n,es,p),e(es,Dd),h(n,pr,p),T(Fn,n,p),h(n,hr,p),h(n,ts,p),e(ts,Hd),h(n,ur,p),T(Nn,n,p),h(n,mr,p),h(n,gt,p),e(gt,St),e(St,ta),T(Cn,ta,null),e(gt,Ud),e(gt,na),e(na,Wd),h(n,fr,p),h(n,le,p),T(In,le,null),e(le,Rd),e(le,_t),e(_t,Bd),e(_t,ns),e(ns,Vd),e(_t,Gd),e(_t,Ln),e(Ln,Jd),e(_t,Yd),e(le,Kd),e(le,vt),e(vt,Zd),e(vt,os),e(os,Xd),e(vt,Qd),e(vt,ss),e(ss,el),e(vt,tl),e(le,nl),e(le,oa),e(oa,ol),e(le,sl),T(An,le,null),h(n,gr,p),h(n,kt,p),e(kt,Ot),e(Ot,sa),T(Sn,sa,null),e(kt,al),e(kt,aa),e(aa,rl),h(n,_r,p),h(n,W,p),T(On,W,null),e(W,il),e(W,Dn),e(Dn,dl),e(Dn,Hn),e(Hn,ll),e(Dn,cl),e(W,pl),e(W,Un),e(Un,hl),e(Un,as),e(as,ul),e(Un,ml),e(W,fl),e(W,ra),e(ra,gl),e(W,_l),T(Wn,W,null),e(W,vl),e(W,Dt),T(Rn,Dt,null),e(Dt,kl),e(Dt,ia),e(ia,Tl),h(n,vr,p),h(n,Tt,p),e(Tt,Ht),e(Ht,da),T(Bn,da,null),e(Tt,Ml),e(Tt,la),e(la,yl),h(n,kr,p),h(n,we,p),T(Vn,we,null),e(we,bl),e(we,Gn),e(Gn,xl),e(Gn,rs),e(rs,wl),e(Gn,zl),e(we,$l),e(we,Jn),e(Jn,jl),e(Jn,Yn),e(Yn,El),e(Jn,Pl),e(we,ql),e(we,me),T(Kn,me,null),e(me,Fl),e(me,Mt),e(Mt,Nl),e(Mt,is),e(is,Cl),e(Mt,Il),e(Mt,ca),e(ca,Ll),e(Mt,Al),e(me,Sl),T(Ut,me,null),e(me,Ol),e(me,pa),e(pa,Dl),e(me,Hl),T(Zn,me,null),h(n,Tr,p),h(n,yt,p),e(yt,Wt),e(Wt,ha),T(Xn,ha,null),e(yt,Ul),e(yt,ua),e(ua,Wl),h(n,Mr,p),h(n,ze,p),T(Qn,ze,null),e(ze,Rl),e(ze,eo),e(eo,Bl),e(eo,ds),e(ds,Vl),e(eo,Gl),e(ze,Jl),e(ze,to),e(to,Yl),e(to,no),e(no,Kl),e(to,Zl),e(ze,Xl),e(ze,J),T(oo,J,null),e(J,Ql),e(J,bt),e(bt,ec),e(bt,ls),e(ls,tc),e(bt,nc),e(bt,ma),e(ma,oc),e(bt,sc),e(J,ac),T(Rt,J,null),e(J,rc),e(J,so),e(so,ic),e(so,ao),e(ao,dc),e(so,lc),e(J,cc),e(J,fa),e(fa,pc),e(J,hc),T(ro,J,null),h(n,yr,p),h(n,xt,p),e(xt,Bt),e(Bt,ga),T(io,ga,null),e(xt,uc),e(xt,_a),e(_a,mc),h(n,br,p),h(n,lo,p),e(lo,Ye),T(co,Ye,null),e(Ye,fc),e(Ye,va),e(va,gc),e(Ye,_c),T(po,Ye,null),h(n,xr,p),h(n,wt,p),e(wt,Vt),e(Vt,ka),T(ho,ka,null),e(wt,vc),e(wt,Ta),e(Ta,kc),h(n,wr,p),h(n,ce,p),T(uo,ce,null),e(ce,Tc),e(ce,mo),e(mo,Mc),e(mo,cs),e(cs,yc),e(mo,bc),e(ce,xc),e(ce,fo),e(fo,wc),e(fo,go),e(go,zc),e(fo,$c),e(ce,jc),T(Gt,ce,null),e(ce,Ec),e(ce,fe),T(_o,fe,null),e(fe,Pc),e(fe,zt),e(zt,qc),e(zt,ps),e(ps,Fc),e(zt,Nc),e(zt,Ma),e(Ma,Cc),e(zt,Ic),e(fe,Lc),T(Jt,fe,null),e(fe,Ac),e(fe,ya),e(ya,Sc),e(fe,Oc),T(vo,fe,null),h(n,zr,p),h(n,$t,p),e($t,Yt),e(Yt,ba),T(ko,ba,null),e($t,Dc),e($t,xa),e(xa,Hc),h(n,$r,p),h(n,pe,p),T(To,pe,null),e(pe,Uc),e(pe,Mo),e(Mo,Wc),e(Mo,hs),e(hs,Rc),e(Mo,Bc),e(pe,Vc),e(pe,yo),e(yo,Gc),e(yo,bo),e(bo,Jc),e(yo,Yc),e(pe,Kc),T(Kt,pe,null),e(pe,Zc),e(pe,Y),T(xo,Y,null),e(Y,Xc),e(Y,jt),e(jt,Qc),e(jt,us),e(us,ep),e(jt,tp),e(jt,wa),e(wa,np),e(jt,op),e(Y,sp),T(Zt,Y,null),e(Y,ap),e(Y,wo),e(wo,rp),e(wo,zo),e(zo,ip),e(wo,dp),e(Y,lp),e(Y,za),e(za,cp),e(Y,pp),T($o,Y,null),h(n,jr,p),h(n,Et,p),e(Et,Xt),e(Xt,$a),T(jo,$a,null),e(Et,hp),e(Et,ja),e(ja,up),h(n,Er,p),h(n,R,p),T(Eo,R,null),e(R,mp),e(R,Po),e(Po,fp),e(Po,ms),e(ms,gp),e(Po,_p),e(R,vp),e(R,qo),e(qo,kp),e(qo,Fo),e(Fo,Tp),e(qo,Mp),e(R,yp),e(R,Ea),e(Ea,bp),e(R,xp),e(R,Ve),e(Ve,Pa),e(Pa,No),e(No,wp),e(Ve,zp),e(Ve,qa),e(qa,Co),e(Co,$p),e(Ve,jp),e(Ve,Fa),e(Fa,Io),e(Io,Ep),e(Ve,Pp),e(Ve,Na),e(Na,Lo),e(Lo,qp),e(R,Fp),e(R,ge),T(Ao,ge,null),e(ge,Np),e(ge,Pt),e(Pt,Cp),e(Pt,Ca),e(Ca,Ip),e(Pt,Lp),e(Pt,Ia),e(Ia,Ap),e(Pt,Sp),e(ge,Op),T(Qt,ge,null),e(ge,Dp),e(ge,La),e(La,Hp),e(ge,Up),T(So,ge,null),h(n,Pr,p),h(n,qt,p),e(qt,en),e(en,Aa),T(Oo,Aa,null),e(qt,Wp),e(qt,Sa),e(Sa,Rp),h(n,qr,p),h(n,B,p),T(Do,B,null),e(B,Bp),e(B,Ho),e(Ho,Vp),e(Ho,fs),e(fs,Gp),e(Ho,Jp),e(B,Yp),e(B,Uo),e(Uo,Kp),e(Uo,Wo),e(Wo,Zp),e(Uo,Xp),e(B,Qp),e(B,Oa),e(Oa,eh),e(B,th),e(B,Ge),e(Ge,Da),e(Da,Ro),e(Ro,nh),e(Ge,oh),e(Ge,Ha),e(Ha,Bo),e(Bo,sh),e(Ge,ah),e(Ge,Ua),e(Ua,Vo),e(Vo,rh),e(Ge,ih),e(Ge,Wa),e(Wa,Go),e(Go,dh),e(B,lh),e(B,_e),T(Jo,_e,null),e(_e,ch),e(_e,Ft),e(Ft,ph),e(Ft,Ra),e(Ra,hh),e(Ft,uh),e(Ft,Ba),e(Ba,mh),e(Ft,fh),e(_e,gh),T(tn,_e,null),e(_e,_h),e(_e,Va),e(Va,vh),e(_e,kh),T(Yo,_e,null),Fr=!0},p(n,[p]){const Ko={};p&2&&(Ko.$$scope={dirty:p,ctx:n}),Ut.$set(Ko);const Ga={};p&2&&(Ga.$$scope={dirty:p,ctx:n}),Rt.$set(Ga);const Ja={};p&2&&(Ja.$$scope={dirty:p,ctx:n}),Gt.$set(Ja);const Ya={};p&2&&(Ya.$$scope={dirty:p,ctx:n}),Jt.$set(Ya);const nn={};p&2&&(nn.$$scope={dirty:p,ctx:n}),Kt.$set(nn);const Ka={};p&2&&(Ka.$$scope={dirty:p,ctx:n}),Zt.$set(Ka);const Za={};p&2&&(Za.$$scope={dirty:p,ctx:n}),Qt.$set(Za);const Xa={};p&2&&(Xa.$$scope={dirty:p,ctx:n}),tn.$set(Xa)},i(n){Fr||(M(x.$$.fragment,n),M(te.$$.fragment,n),M(gn.$$.fragment,n),M(kn.$$.fragment,n),M(yn.$$.fragment,n),M(En.$$.fragment,n),M(Pn.$$.fragment,n),M(qn.$$.fragment,n),M(Fn.$$.fragment,n),M(Nn.$$.fragment,n),M(Cn.$$.fragment,n),M(In.$$.fragment,n),M(An.$$.fragment,n),M(Sn.$$.fragment,n),M(On.$$.fragment,n),M(Wn.$$.fragment,n),M(Rn.$$.fragment,n),M(Bn.$$.fragment,n),M(Vn.$$.fragment,n),M(Kn.$$.fragment,n),M(Ut.$$.fragment,n),M(Zn.$$.fragment,n),M(Xn.$$.fragment,n),M(Qn.$$.fragment,n),M(oo.$$.fragment,n),M(Rt.$$.fragment,n),M(ro.$$.fragment,n),M(io.$$.fragment,n),M(co.$$.fragment,n),M(po.$$.fragment,n),M(ho.$$.fragment,n),M(uo.$$.fragment,n),M(Gt.$$.fragment,n),M(_o.$$.fragment,n),M(Jt.$$.fragment,n),M(vo.$$.fragment,n),M(ko.$$.fragment,n),M(To.$$.fragment,n),M(Kt.$$.fragment,n),M(xo.$$.fragment,n),M(Zt.$$.fragment,n),M($o.$$.fragment,n),M(jo.$$.fragment,n),M(Eo.$$.fragment,n),M(Ao.$$.fragment,n),M(Qt.$$.fragment,n),M(So.$$.fragment,n),M(Oo.$$.fragment,n),M(Do.$$.fragment,n),M(Jo.$$.fragment,n),M(tn.$$.fragment,n),M(Yo.$$.fragment,n),Fr=!0)},o(n){y(x.$$.fragment,n),y(te.$$.fragment,n),y(gn.$$.fragment,n),y(kn.$$.fragment,n),y(yn.$$.fragment,n),y(En.$$.fragment,n),y(Pn.$$.fragment,n),y(qn.$$.fragment,n),y(Fn.$$.fragment,n),y(Nn.$$.fragment,n),y(Cn.$$.fragment,n),y(In.$$.fragment,n),y(An.$$.fragment,n),y(Sn.$$.fragment,n),y(On.$$.fragment,n),y(Wn.$$.fragment,n),y(Rn.$$.fragment,n),y(Bn.$$.fragment,n),y(Vn.$$.fragment,n),y(Kn.$$.fragment,n),y(Ut.$$.fragment,n),y(Zn.$$.fragment,n),y(Xn.$$.fragment,n),y(Qn.$$.fragment,n),y(oo.$$.fragment,n),y(Rt.$$.fragment,n),y(ro.$$.fragment,n),y(io.$$.fragment,n),y(co.$$.fragment,n),y(po.$$.fragment,n),y(ho.$$.fragment,n),y(uo.$$.fragment,n),y(Gt.$$.fragment,n),y(_o.$$.fragment,n),y(Jt.$$.fragment,n),y(vo.$$.fragment,n),y(ko.$$.fragment,n),y(To.$$.fragment,n),y(Kt.$$.fragment,n),y(xo.$$.fragment,n),y(Zt.$$.fragment,n),y($o.$$.fragment,n),y(jo.$$.fragment,n),y(Eo.$$.fragment,n),y(Ao.$$.fragment,n),y(Qt.$$.fragment,n),y(So.$$.fragment,n),y(Oo.$$.fragment,n),y(Do.$$.fragment,n),y(Jo.$$.fragment,n),y(tn.$$.fragment,n),y(Yo.$$.fragment,n),Fr=!1},d(n){t(u),n&&t(z),n&&t(f),b(x),n&&t(ue),n&&t(E),n&&t(je),n&&t(V),n&&t(Ee),n&&t(I),b(te),n&&t(G),n&&t(q),n&&t(Qa),n&&t(pt),b(gn),n&&t(er),n&&t(Pe),n&&t(tr),n&&t(ut),b(kn),n&&t(nr),n&&t(Je),n&&t(or),n&&t(mt),b(yn),n&&t(sr),n&&t(qe),n&&t(ar),n&&t(Lt),n&&t(rr),b(En,n),n&&t(ir),n&&t(Qo),n&&t(dr),b(Pn,n),n&&t(lr),n&&t(ft),b(qn),n&&t(cr),n&&t(es),n&&t(pr),b(Fn,n),n&&t(hr),n&&t(ts),n&&t(ur),b(Nn,n),n&&t(mr),n&&t(gt),b(Cn),n&&t(fr),n&&t(le),b(In),b(An),n&&t(gr),n&&t(kt),b(Sn),n&&t(_r),n&&t(W),b(On),b(Wn),b(Rn),n&&t(vr),n&&t(Tt),b(Bn),n&&t(kr),n&&t(we),b(Vn),b(Kn),b(Ut),b(Zn),n&&t(Tr),n&&t(yt),b(Xn),n&&t(Mr),n&&t(ze),b(Qn),b(oo),b(Rt),b(ro),n&&t(yr),n&&t(xt),b(io),n&&t(br),n&&t(lo),b(co),b(po),n&&t(xr),n&&t(wt),b(ho),n&&t(wr),n&&t(ce),b(uo),b(Gt),b(_o),b(Jt),b(vo),n&&t(zr),n&&t($t),b(ko),n&&t($r),n&&t(pe),b(To),b(Kt),b(xo),b(Zt),b($o),n&&t(jr),n&&t(Et),b(jo),n&&t(Er),n&&t(R),b(Eo),b(Ao),b(Qt),b(So),n&&t(Pr),n&&t(qt),b(Oo),n&&t(qr),n&&t(B),b(Do),b(Jo),b(tn),b(Yo)}}}const mf={local:"marianmt",sections:[{local:"implementation-notes",title:"Implementation Notes"},{local:"naming",title:"Naming"},{local:"examples",title:"Examples"},{local:"multilingual-models",title:"Multilingual Models"},{local:"old-style-multilingual-models",title:"Old Style Multi-Lingual Models"},{local:"transformers.MarianConfig",title:"MarianConfig"},{local:"transformers.MarianTokenizer",title:"MarianTokenizer"},{local:"transformers.MarianModel",title:"MarianModel"},{local:"transformers.MarianMTModel",title:"MarianMTModel"},{local:"transformers.MarianForCausalLM",title:"MarianForCausalLM"},{local:"transformers.TFMarianModel",title:"TFMarianModel"},{local:"transformers.TFMarianMTModel",title:"TFMarianMTModel"},{local:"transformers.FlaxMarianModel",title:"FlaxMarianModel"},{local:"transformers.FlaxMarianMTModel",title:"FlaxMarianMTModel"}],title:"MarianMT"};function ff(C,u,z){let{fw:f}=u;return C.$$set=g=>{"fw"in g&&z(0,f=g.fw)},[f]}class yf extends ef{constructor(u){super();tf(this,u,ff,uf,nf,{fw:0})}}export{yf as default,mf as metadata};
9,982
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/perceiver.mdx-7aec1840.js
import{S as Kb,i as Jb,s as Gb,e as r,k as l,w as u,t as a,L as Xb,c as s,d as o,m as d,a as n,x as f,h as i,b as c,M as Zb,J as e,g as m,y as g,q as v,o as _,B as P}from"../../chunks/vendor-b1433968.js";import{T as jt}from"../../chunks/Tip-c3840994.js";import{D as T}from"../../chunks/Docstring-ff504c58.js";import{C as Mt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as F}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Qb(j){let h,$;return{c(){h=r("p"),$=a(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(w){h=s(w,"P",{});var k=n(h);$=i(k,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),k.forEach(o)},m(w,k){m(w,h,k),e(h,$)},d(w){w&&o(h)}}}function Yb(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function e2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function t2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function o2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function r2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function s2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function n2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function a2(j){let h,$,w,k,x;return{c(){h=r("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r("code"),k=a("Module"),x=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){h=s(y,"P",{});var b=n(h);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=s(b,"CODE",{});var E=n(w);k=i(E,"Module"),E.forEach(o),x=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(y,b){m(y,h,b),e(h,$),e(h,w),e(w,k),e(h,x)},d(y){y&&o(h)}}}function i2(j){let h,$,w,k,x,y,b,E,ep,Tc,Ie,It,Kn,To,tp,Jn,op,xc,qt,rp,xo,sp,np,Ec,zt,ap,Eo,ip,cp,Fc,As,lp,Cc,Ds,Gn,dp,jc,Ns,pp,Mc,Os,mp,Ic,D,hp,Ls,up,fp,Xn,gp,vp,Zn,_p,Pp,Qn,wp,bp,Yn,yp,kp,qc,Ss,$p,zc,M,Tp,ea,xp,Ep,ta,Fp,Cp,oa,jp,Mp,ra,Ip,qp,sa,zp,Ap,na,Dp,Np,Ac,At,V_,Dc,Fo,Op,Bs,Lp,Nc,ve,Sp,Co,Bp,Hp,jo,Wp,Vp,Oc,Hs,Rp,Lc,Dt,Mo,Up,Io,Kp,Jp,Gp,qo,Xp,zo,Zp,Qp,Sc,Ao,aa,Yp,em,Bc,Ws,_e,tm,ia,om,rm,ca,sm,nm,Do,am,Hc,qe,Nt,la,No,im,da,cm,Wc,ze,Oo,lm,pa,dm,Vc,Ae,Lo,pm,ma,mm,Rc,De,So,hm,ha,um,Uc,Ne,Bo,fm,ua,gm,Kc,Oe,Ot,fa,Ho,vm,ga,_m,Jc,N,Wo,Pm,Le,wm,Vs,bm,ym,Vo,km,$m,Tm,Se,xm,Rs,Em,Fm,Us,Cm,jm,Mm,va,Im,qm,Ro,Gc,Be,Lt,_a,Uo,zm,Pa,Am,Xc,Q,Ko,Dm,wa,Nm,Om,Jo,Lm,Ks,Sm,Bm,Hm,St,Go,Wm,ba,Vm,Zc,He,Bt,ya,Xo,Rm,ka,Um,Qc,Y,Zo,Km,$a,Jm,Gm,Qo,Xm,Js,Zm,Qm,Ym,Pe,Yo,eh,Ta,th,oh,Ht,Yc,We,Wt,xa,er,rh,Ea,sh,el,ae,tr,nh,or,ah,Fa,ih,ch,lh,rr,dh,Ca,ph,mh,tl,Ve,Vt,ja,sr,hh,Ma,uh,ol,ie,nr,fh,Ia,gh,vh,ee,_h,qa,Ph,wh,za,bh,yh,Aa,kh,$h,Da,Th,xh,rl,Re,Rt,Na,ar,Eh,Oa,Fh,sl,Ue,ir,Ch,La,jh,nl,Ke,Ut,Sa,cr,Mh,Ba,Ih,al,Je,lr,qh,Ha,zh,il,Ge,Kt,Wa,dr,Ah,Va,Dh,cl,ce,pr,Nh,Ra,Oh,Lh,Ua,Sh,ll,Xe,Jt,Ka,mr,Bh,Ja,Hh,dl,Ze,hr,Wh,Ga,Vh,pl,Qe,Gt,Xa,ur,Rh,Za,Uh,ml,le,fr,Kh,Qa,Jh,Gh,Ya,Xh,hl,Ye,Xt,ei,gr,Zh,ti,Qh,ul,et,vr,Yh,_r,eu,oi,tu,ou,fl,tt,Zt,ri,Pr,ru,si,su,gl,ot,wr,nu,ni,au,vl,rt,Qt,ai,br,iu,ii,cu,_l,st,yr,lu,kr,du,ci,pu,mu,Pl,nt,Yt,li,$r,hu,di,uu,wl,de,Tr,fu,xr,gu,pi,vu,_u,Pu,mi,wu,bl,at,eo,hi,Er,bu,ui,yu,yl,it,Fr,ku,fi,$u,kl,ct,to,gi,Cr,Tu,vi,xu,$l,lt,jr,Eu,_i,Fu,Tl,dt,oo,Pi,Mr,Cu,wi,ju,xl,pt,Ir,Mu,bi,Iu,El,mt,ro,yi,qr,qu,ki,zu,Fl,ht,zr,Au,$i,Du,Cl,ut,so,Ti,Ar,Nu,xi,Ou,jl,pe,Dr,Lu,Nr,Su,Or,Bu,Hu,Wu,S,Lr,Vu,ft,Ru,Gs,Uu,Ku,Ei,Ju,Gu,Xu,no,Zu,Fi,Qu,Yu,Sr,Ml,gt,ao,Ci,Br,ef,ji,tf,Il,me,Hr,of,Wr,rf,Vr,sf,nf,af,B,Rr,cf,vt,lf,Xs,df,pf,Mi,mf,hf,uf,io,ff,Ii,gf,vf,Ur,ql,_t,co,qi,Kr,_f,zi,Pf,zl,he,Jr,wf,Gr,bf,Xr,yf,kf,$f,H,Zr,Tf,Pt,xf,Zs,Ef,Ff,Ai,Cf,jf,Mf,lo,If,Di,qf,zf,Qr,Al,wt,po,Ni,Yr,Af,Oi,Df,Dl,q,es,Nf,Li,Of,Lf,Si,Sf,Bf,W,Qs,Hf,Wf,Ys,Vf,Rf,Bi,Uf,Kf,en,Jf,Gf,tn,Xf,Zf,Qf,ts,Yf,os,eg,tg,og,V,rs,rg,bt,sg,on,ng,ag,Hi,ig,cg,lg,mo,dg,Wi,pg,mg,ss,Nl,yt,ho,Vi,ns,hg,Ri,ug,Ol,z,as,fg,Ui,gg,vg,Ki,_g,Pg,R,rn,wg,bg,sn,yg,kg,Ji,$g,Tg,nn,xg,Eg,an,Fg,Cg,jg,is,Mg,cs,Ig,qg,zg,U,ls,Ag,kt,Dg,cn,Ng,Og,Gi,Lg,Sg,Bg,uo,Hg,Xi,Wg,Vg,ds,Ll,$t,fo,Zi,ps,Rg,Qi,Ug,Sl,A,ms,Kg,Yi,Jg,Gg,ec,Xg,Zg,K,ln,Qg,Yg,dn,ev,tv,tc,ov,rv,pn,sv,nv,mn,av,iv,cv,hs,lv,us,dv,pv,mv,J,fs,hv,Tt,uv,hn,fv,gv,oc,vv,_v,Pv,go,wv,rc,bv,yv,gs,Bl,xt,vo,sc,vs,kv,nc,$v,Hl,O,_s,Tv,L,xv,un,Ev,Fv,fn,Cv,jv,ac,Mv,Iv,gn,qv,zv,vn,Av,Dv,Nv,ic,Ov,Lv,Ps,Sv,ws,Bv,Hv,Wv,G,bs,Vv,Et,Rv,_n,Uv,Kv,cc,Jv,Gv,Xv,_o,Zv,lc,Qv,Yv,ys,Wl,Ft,Po,dc,ks,e_,pc,t_,Vl,C,$s,o_,mc,r_,s_,wo,Pn,n_,a_,wn,i_,c_,l_,we,bn,d_,p_,yn,m_,h_,kn,u_,f_,g_,bo,$n,v_,__,Tn,P_,w_,b_,Ts,y_,hc,k_,$_,T_,uc,x_,E_,xs,F_,Es,C_,j_,M_,X,Fs,I_,Ct,q_,xn,z_,A_,fc,D_,N_,O_,yo,L_,gc,S_,B_,Cs,Rl;return y=new F({}),To=new F({}),No=new F({}),Oo=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput",parameters:[{name:"logits",val:": FloatTensor = None"},{name:"last_hidden_state",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L66",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}]}}),Lo=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput",parameters:[{name:"logits",val:": FloatTensor = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L95",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_labels)</code>) &#x2014; Output of the basic decoder.`,name:"logits"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}]}}),So=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L112",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.`,name:"loss"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, num_latents, num_latents)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}]}}),Bo=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L141",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}]}}),Ho=new F({}),Wo=new T({props:{name:"class transformers.PerceiverConfig",anchor:"transformers.PerceiverConfig",parameters:[{name:"num_latents",val:" = 256"},{name:"d_latents",val:" = 1280"},{name:"d_model",val:" = 768"},{name:"num_blocks",val:" = 1"},{name:"num_self_attends_per_block",val:" = 26"},{name:"num_self_attention_heads",val:" = 8"},{name:"num_cross_attention_heads",val:" = 8"},{name:"qk_channels",val:" = None"},{name:"v_channels",val:" = None"},{name:"cross_attention_shape_for_attention",val:" = 'kv'"},{name:"self_attention_widening_factor",val:" = 1"},{name:"cross_attention_widening_factor",val:" = 1"},{name:"hidden_act",val:" = 'gelu'"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"position_embedding_init_scale",val:" = 0.02"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"is_encoder_decoder",val:" = False"},{name:"use_query_residual",val:" = True"},{name:"vocab_size",val:" = 262"},{name:"max_position_embeddings",val:" = 2048"},{name:"image_size",val:" = 56"},{name:"train_size",val:" = [368, 496]"},{name:"num_frames",val:" = 16"},{name:"audio_samples_per_frame",val:" = 1920"},{name:"samples_per_patch",val:" = 16"},{name:"output_shape",val:" = [1, 16, 224, 224]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/configuration_perceiver.py#L29",parametersDescription:[{anchor:"transformers.PerceiverConfig.num_latents",description:`<strong>num_latents</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The number of latents.`,name:"num_latents"},{anchor:"transformers.PerceiverConfig.d_latents",description:`<strong>d_latents</strong> (<code>int</code>, <em>optional</em>, defaults to 1280) &#x2014; Dimension of the latent embeddings.`,name:"d_latents"},{anchor:"transformers.PerceiverConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the inputs. Should only be provided in case [<em>PerceiverTextPreprocessor</em>] is used or no preprocessor is provided.`,name:"d_model"},{anchor:"transformers.PerceiverConfig.num_blocks",description:`<strong>num_blocks</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of blocks in the Transformer encoder.`,name:"num_blocks"},{anchor:"transformers.PerceiverConfig.num_self_attends_per_block",description:`<strong>num_self_attends_per_block</strong> (<code>int</code>, <em>optional</em>, defaults to 26) &#x2014; The number of self-attention layers per block.`,name:"num_self_attends_per_block"},{anchor:"transformers.PerceiverConfig.num_self_attention_heads",description:`<strong>num_self_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each self-attention layer in the Transformer encoder.`,name:"num_self_attention_heads"},{anchor:"transformers.PerceiverConfig.num_cross_attention_heads",description:`<strong>num_cross_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each cross-attention layer in the Transformer encoder.`,name:"num_cross_attention_heads"},{anchor:"transformers.PerceiverConfig.qk_channels",description:`<strong>qk_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimension to project the queries + keys before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified.`,name:"qk_channels"},{anchor:"transformers.PerceiverConfig.v_channels",description:`<strong>v_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimension to project the values before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified.`,name:"v_channels"},{anchor:"transformers.PerceiverConfig.cross_attention_shape_for_attention",description:`<strong>cross_attention_shape_for_attention</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&apos;kv&apos;</code>) &#x2014; Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.`,name:"cross_attention_shape_for_attention"},{anchor:"transformers.PerceiverConfig.self_attention_widening_factor",description:`<strong>self_attention_widening_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.`,name:"self_attention_widening_factor"},{anchor:"transformers.PerceiverConfig.cross_attention_widening_factor",description:`<strong>cross_attention_widening_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.`,name:"cross_attention_widening_factor"},{anchor:"transformers.PerceiverConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.PerceiverConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.PerceiverConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.PerceiverConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.PerceiverConfig.use_query_residual",description:`<strong>use_query_residual</strong> (<code>float</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to add a query residual in the cross-attention layer of the encoder.`,name:"use_query_residual"},{anchor:"transformers.PerceiverConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 262) &#x2014; Vocabulary size for the masked language modeling model.`,name:"vocab_size"},{anchor:"transformers.PerceiverConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sequence length that the masked language modeling model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.PerceiverConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 56) &#x2014; Size of the images after preprocessing, for <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a>.`,name:"image_size"},{anchor:"transformers.PerceiverConfig.train_size",description:`<strong>train_size</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [368, 496]) &#x2014; Training size of the images for the optical flow model.`,name:"train_size"},{anchor:"transformers.PerceiverConfig.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of video frames used for the multimodal autoencoding model.`,name:"num_frames"},{anchor:"transformers.PerceiverConfig.audio_samples_per_frame",description:`<strong>audio_samples_per_frame</strong> (<code>int</code>, <em>optional</em>, defaults to 1920) &#x2014; Number of audio samples per frame for the multimodal autoencoding model.`,name:"audio_samples_per_frame"},{anchor:"transformers.PerceiverConfig.samples_per_patch",description:`<strong>samples_per_patch</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.`,name:"samples_per_patch"},{anchor:"transformers.PerceiverConfig.output_shape",description:`<strong>output_shape</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[1, 16, 224, 224]</code>) &#x2014; Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal autoencoding model. This excludes the channel dimension.`,name:"output_shape"}]}}),Ro=new Mt({props:{code:`from transformers import PerceiverModel, PerceiverConfig # Initializing a Perceiver deepmind/language-perceiver style configuration configuration = PerceiverConfig() # Initializing a model from the deepmind/language-perceiver style configuration model = PerceiverModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverModel, PerceiverConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Perceiver deepmind/language-perceiver style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = PerceiverConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the deepmind/language-perceiver style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Uo=new F({}),Ko=new T({props:{name:"class transformers.PerceiverTokenizer",anchor:"transformers.PerceiverTokenizer",parameters:[{name:"pad_token",val:" = '[PAD]'"},{name:"bos_token",val:" = '[BOS]'"},{name:"eos_token",val:" = '[EOS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"model_max_length",val:" = 2048"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/tokenization_perceiver.py#L27",parametersDescription:[{anchor:"transformers.PerceiverTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.PerceiverTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[BOS]&quot;</code>) &#x2014; The BOS token (reserved in the vocab, but not actually used).`,name:"bos_token"},{anchor:"transformers.PerceiverTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[EOS]&quot;</code>) &#x2014; The end of sequence token (reserved in the vocab, but not actually used).</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.PerceiverTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The MASK token, useful for masked language modeling.`,name:"mask_token"},{anchor:"transformers.PerceiverTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The CLS token (reserved in the vocab, but not actually used).`,name:"cls_token"},{anchor:"transformers.PerceiverTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from two sequences.`,name:"sep_token"}]}}),Go=new T({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2334",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),Xo=new F({}),Zo=new T({props:{name:"class transformers.PerceiverFeatureExtractor",anchor:"transformers.PerceiverFeatureExtractor",parameters:[{name:"do_center_crop",val:" = True"},{name:"crop_size",val:" = 256"},{name:"do_resize",val:" = True"},{name:"size",val:" = 224"},{name:"resample",val:" = 3"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/feature_extraction_perceiver.py#L37",parametersDescription:[{anchor:"transformers.PerceiverFeatureExtractor.do_center_crop",description:`<strong>do_center_crop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to crop the input at the center. If the input size is smaller than <code>crop_size</code> along any edge, the image is padded with 0&#x2019;s and then center cropped.`,name:"do_center_crop"},{anchor:"transformers.PerceiverFeatureExtractor.crop_size",description:`<strong>crop_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Desired output size when applying center-cropping. Only has an effect if <code>do_center_crop</code> is set to <code>True</code>.`,name:"crop_size"},{anchor:"transformers.PerceiverFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.PerceiverFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.PerceiverFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.PerceiverFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with <code>image_mean</code> and <code>image_std</code>.`,name:"do_normalize"},{anchor:"transformers.PerceiverFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.`,name:"image_mean"},{anchor:"transformers.PerceiverFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.`,name:"image_std"}]}}),Yo=new T({props:{name:"__call__",anchor:"transformers.PerceiverFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/feature_extraction_perceiver.py#L122",parametersDescription:[{anchor:"transformers.PerceiverFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.PerceiverFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),Ht=new jt({props:{warning:"&lcub;true}",$$slots:{default:[Qb]},$$scope:{ctx:j}}}),er=new F({}),tr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2753",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.`,name:"config"}]}}),sr=new F({}),nr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor",parameters:[{name:"config",val:""},{name:"prep_type",val:" = 'conv'"},{name:"spatial_downsample",val:": int = 4"},{name:"temporal_downsample",val:": int = 1"},{name:"position_encoding_type",val:": str = 'fourier'"},{name:"in_channels",val:": int = 3"},{name:"out_channels",val:": int = 64"},{name:"conv_after_patching",val:": bool = False"},{name:"conv_after_patching_in_channels",val:": int = 54"},{name:"conv2d_use_batchnorm",val:": bool = True"},{name:"concat_or_add_pos",val:": str = 'concat'"},{name:"project_pos_dim",val:": int = -1"},{name:"**position_encoding_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2910",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.prep_type",description:`<strong>prep_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;conv&quot;</code>) &#x2014; Preprocessing type. Can be &#x201C;conv1x1&#x201D;, &#x201C;conv&#x201D;, &#x201C;patches&#x201D;, &#x201C;pixels&#x201D;.`,name:"prep_type"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.spatial_downsample",description:`<strong>spatial_downsample</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; Spatial downsampling factor.`,name:"spatial_downsample"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.temporal_downsample",description:`<strong>temporal_downsample</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Temporal downsampling factor (only relevant in case a time dimension is present).`,name:"temporal_downsample"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.position_encoding_type",description:`<strong>position_encoding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;fourier&quot;</code>) &#x2014; Position encoding type. Can be &#x201C;fourier&#x201D; or &#x201C;trainable&#x201D;.`,name:"position_encoding_type"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.in_channels",description:`<strong>in_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of channels in the input.`,name:"in_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.out_channels",description:`<strong>out_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Number of channels in the output.`,name:"out_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv_after_patching",description:`<strong>conv_after_patching</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply a convolutional layer after patching.`,name:"conv_after_patching"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv_after_patching_in_channels",description:`<strong>conv_after_patching_in_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 54) &#x2014; Number of channels in the input of the convolutional layer after patching.`,name:"conv_after_patching_in_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv2d_use_batchnorm",description:`<strong>conv2d_use_batchnorm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use batch normalization in the convolutional layer.`,name:"conv2d_use_batchnorm"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.concat_or_add_pos",description:`<strong>concat_or_add_pos</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;concat&quot;</code>) &#x2014; How to concatenate the position encoding to the input. Can be &#x201C;concat&#x201D; or &#x201C;add&#x201D;.`,name:"concat_or_add_pos"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.project_pos_dim",description:`<strong>project_pos_dim</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Dimension of the position encoding to project to. If -1, no projection is applied.`,name:"project_pos_dim"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.*position_encoding_kwargs",description:`*<strong>*position_encoding_kwargs</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; Keyword arguments for the position encoding.`,name:"*position_encoding_kwargs"}]}}),ar=new F({}),ir=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L3139",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.`,name:"config"}]}}),cr=new F({}),lr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor",parameters:[{name:"config",val:""},{name:"prep_type",val:": str = 'patches'"},{name:"samples_per_patch",val:": int = 96"},{name:"position_encoding_type",val:": str = 'fourier'"},{name:"concat_or_add_pos",val:": str = 'concat'"},{name:"out_channels",val:" = 64"},{name:"project_pos_dim",val:" = -1"},{name:"**position_encoding_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L3165",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.prep_type",description:`<strong>prep_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;patches&quot;</code>) &#x2014; Preprocessor type to use. Only &#x201C;patches&#x201D; is supported.`,name:"prep_type"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.samples_per_patch",description:`<strong>samples_per_patch</strong> (<code>int</code>, <em>optional</em>, defaults to 96) &#x2014; Number of samples per patch.`,name:"samples_per_patch"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.position_encoding_type",description:`<strong>position_encoding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;fourier&quot;</code>) &#x2014; Type of position encoding to use. Can be &#x201C;trainable&#x201D; or &#x201C;fourier&#x201D;.`,name:"position_encoding_type"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.concat_or_add_pos",description:`<strong>concat_or_add_pos</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;concat&quot;</code>) &#x2014; How to concatenate the position encoding to the input. Can be &#x201C;concat&#x201D; or &#x201C;add&#x201D;.`,name:"concat_or_add_pos"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.out_channels",description:`<strong>out_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Number of channels in the output.`,name:"out_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.project_pos_dim",description:`<strong>project_pos_dim</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Dimension of the position encoding to project to. If -1, no projection is applied.`,name:"project_pos_dim"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.*position_encoding_kwargs",description:`*<strong>*position_encoding_kwargs</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; Keyword arguments for the position encoding.`,name:"*position_encoding_kwargs"}]}}),dr=new F({}),pr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor",parameters:[{name:"modalities",val:": typing.Mapping[str, typing.Callable[..., typing.Tuple[torch.Tensor, typing.Optional[torch.Tensor], torch.Tensor]]]"},{name:"mask_probs",val:": typing.Union[typing.Mapping[str, float], NoneType] = None"},{name:"min_padding_size",val:": int = 2"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L3262",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.modalities",description:`<strong>modalities</strong> (<code>Dict[str, PreprocessorType]</code>) &#x2014; Dict mapping modality name to preprocessor.`,name:"modalities"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.mask_probs",description:`<strong>mask_probs</strong> (<code>Dict[str, float]</code>) &#x2014; Dict mapping modality name to masking probability of that modality.`,name:"mask_probs"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.min_padding_size",description:`<strong>min_padding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size.`,name:"min_padding_size"}]}}),mr=new F({}),hr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1997",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.`,name:"config"}]}}),ur=new F({}),fr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder",parameters:[{name:"config",val:""},{name:"output_num_channels",val:""},{name:"position_encoding_type",val:" = 'trainable'"},{name:"output_index_dims",val:" = None"},{name:"num_channels",val:" = 128"},{name:"subsampled_index_dims",val:" = None"},{name:"qk_channels",val:" = None"},{name:"v_channels",val:" = None"},{name:"num_heads",val:" = 1"},{name:"widening_factor",val:" = 1"},{name:"use_query_residual",val:" = False"},{name:"concat_preprocessed_input",val:" = False"},{name:"final_project",val:" = True"},{name:"position_encoding_only",val:" = False"},{name:"**position_encoding_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2021",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.output_num_channels",description:`<strong>output_num_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of channels in the output. Will only be used in case <em>final_project</em> is set to <em>True</em>.`,name:"output_num_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.position_encoding_type",description:`<strong>position_encoding_type</strong> (<code>str</code>, <em>optional</em>, defaults to &#x201C;trainable&#x201D;) &#x2014; The type of position encoding to use. Can be either &#x201C;trainable&#x201D;, &#x201C;fourier&#x201D;, or &#x201C;none&#x201D;.`,name:"position_encoding_type"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.output_index_dims",description:`<strong>output_index_dims</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of dimensions of the output queries. Ignored if &#x2018;position_encoding_type&#x2019; == &#x2018;none&#x2019;.`,name:"output_index_dims"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of channels of the decoder queries. Ignored if &#x2018;position_encoding_type&#x2019; == &#x2018;none&#x2019;.`,name:"num_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.qk_channels",description:`<strong>qk_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of channels of the queries and keys in the cross-attention layer.`,name:"qk_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.v_channels",description:`<strong>v_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The number of channels of the values in the cross-attention layer.`,name:"v_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.num_heads",description:`<strong>num_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of attention heads in the cross-attention layer.`,name:"num_heads"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.widening_factor",description:`<strong>widening_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The widening factor of the cross-attention layer.`,name:"widening_factor"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.use_query_residual",description:`<strong>use_query_residual</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a residual connection between the query and the output of the cross-attention layer.`,name:"use_query_residual"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.concat_preprocessed_input",description:`<strong>concat_preprocessed_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to concatenate the preprocessed input to the query.`,name:"concat_preprocessed_input"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.final_project",description:`<strong>final_project</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to project the output of the cross-attention layer to a target dimension.`,name:"final_project"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.position_encoding_only",description:`<strong>position_encoding_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to only use this class to define output queries.`,name:"position_encoding_only"}]}}),gr=new F({}),vr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder",parameters:[{name:"config",val:""},{name:"**decoder_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2201",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.`,name:"config"}]}}),Pr=new F({}),wr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder",parameters:[{name:"config",val:""},{name:"output_image_shape",val:""},{name:"output_num_channels",val:" = 2"},{name:"rescale_factor",val:" = 100.0"},{name:"**decoder_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2241"}}),br=new F({}),yr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder",parameters:[{name:"config",val:""},{name:"output_shape",val:""},{name:"position_encoding_type",val:""},{name:"**decoder_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2270",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.output_shape",description:`<strong>output_shape</strong> (<code>List[int]</code>) &#x2014; Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.`,name:"output_shape"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.position_encoding_type",description:`<strong>position_encoding_type</strong> (<code>str</code>) &#x2014; The type of position encoding to use. Can be either &#x201C;trainable&#x201D;, &#x201C;fourier&#x201D;, or &#x201C;none&#x201D;.`,name:"position_encoding_type"}]}}),$r=new F({}),Tr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder",parameters:[{name:"config",val:""},{name:"modalities",val:""},{name:"num_outputs",val:""},{name:"output_num_channels",val:""},{name:"min_padding_size",val:" = 2"},{name:"subsampled_index_dims",val:" = None"},{name:"**decoder_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2343",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.modalities",description:`<strong>modalities</strong> (<code>Dict[str, PerceiverAbstractDecoder]</code>) &#x2014; Dictionary mapping modality name to the decoder of that modality.`,name:"modalities"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.num_outputs",description:`<strong>num_outputs</strong> (<code>int</code>) &#x2014; The number of outputs of the decoder.`,name:"num_outputs"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.output_num_channels",description:`<strong>output_num_channels</strong> (<code>int</code>) &#x2014; The number of channels in the output.`,name:"output_num_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.min_padding_size",description:`<strong>min_padding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size.`,name:"min_padding_size"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.subsampled_index_dims",description:`<strong>subsampled_index_dims</strong> (<code>Dict[str, PerceiverAbstractDecoder]</code>, <em>optional</em>) &#x2014; Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that modality.`,name:"subsampled_index_dims"}]}}),Er=new F({}),Fr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor",parameters:[{name:"in_channels",val:""},{name:"out_channels",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2889",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor.in_channels",description:`<strong>in_channels</strong> (<code>int</code>) &#x2014; Number of channels in the input.`,name:"in_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor.out_channels",description:`<strong>out_channels</strong> (<code>int</code>) &#x2014; Number of channels in the output.`,name:"out_channels"}]}}),Cr=new F({}),jr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor",parameters:[{name:"config",val:""},{name:"in_channels",val:""},{name:"postproc_type",val:": str = 'patches'"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2861",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.in_channels",description:`<strong>in_channels</strong> (<code>int</code>) &#x2014; Number of channels in the input.`,name:"in_channels"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.postproc_type",description:`<strong>postproc_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;patches&quot;</code>) &#x2014; Postprocessor type to use. Currently, only &#x201C;patches&#x201D; is supported.`,name:"postproc_type"}]}}),Mr=new F({}),Ir=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor",parameters:[{name:"config",val:""},{name:"in_channels",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2841",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor.config",description:`<strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.`,name:"config"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor.in_channels",description:`<strong>in_channels</strong> (<code>int</code>) &#x2014; Number of channels in the input.`,name:"in_channels"}]}}),qr=new F({}),zr=new T({props:{name:"class transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor",anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor",parameters:[{name:"modalities",val:": typing.Mapping[str, typing.Callable[..., typing.Any]]"},{name:"input_is_dict",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L2807",parametersDescription:[{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor.modalities",description:`<strong>modalities</strong> (<code>Dict[str, PostprocessorType]</code>) &#x2014; Dictionary mapping modality name to postprocessor class for that modality.`,name:"modalities"},{anchor:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor.input_is_dict",description:`<strong>input_is_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If False, input is a tensor which is sliced up during postprocessing by <em>modality_sizes</em>.`,name:"input_is_dict"}]}}),Ar=new F({}),Dr=new T({props:{name:"class transformers.PerceiverModel",anchor:"transformers.PerceiverModel",parameters:[{name:"config",val:""},{name:"decoder",val:" = None"},{name:"input_preprocessor",val:": typing.Callable[..., typing.Tuple[torch.Tensor, typing.Optional[torch.Tensor], torch.Tensor]] = None"},{name:"output_postprocessor",val:": typing.Callable[..., typing.Any] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L716",parametersDescription:[{anchor:"transformers.PerceiverModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.PerceiverModel.decoder",description:`<strong>decoder</strong> (<em>DecoderType</em>, <em>optional</em>) &#x2014; Optional decoder to use to decode the latent representation of the encoder. Examples include <em>transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder</em>.`,name:"decoder"},{anchor:"transformers.PerceiverModel.input_preprocessor",description:`<strong>input_preprocessor</strong> (<em>PreprocessorType</em>, <em>optional</em>) &#x2014; Optional input preprocessor to use. Examples include <em>transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor</em>.`,name:"input_preprocessor"},{anchor:"transformers.PerceiverModel.output_postprocessor",description:`<strong>output_postprocessor</strong> (<em>PostprocessorType</em>, <em>optional</em>) &#x2014; Optional output postprocessor to use. Examples include <em>transformers.models.perceiver.modeling_perceiver.PerceiverImagePostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor</em>.`,name:"output_postprocessor"},{anchor:"transformers.PerceiverModel.Note",description:"<strong>Note</strong> that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case. &#x2014;",name:"Note"}]}}),Lr=new T({props:{name:"forward",anchor:"transformers.PerceiverModel.forward",parameters:[{name:"inputs",val:""},{name:"attention_mask",val:" = None"},{name:"subsampled_output_points",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L752",parametersDescription:[{anchor:"transformers.PerceiverModel.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),no=new jt({props:{$$slots:{default:[Yb]},$$scope:{ctx:j}}}),Sr=new Mt({props:{code:`from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverFeatureExtractor, PerceiverModel from transformers.models.perceiver.modeling_perceiver import PerceiverTextPreprocessor, PerceiverImagePreprocessor, PerceiverClassificationDecoder import torch import requests from PIL import Image # EXAMPLE 1: using the Perceiver to classify texts # - we define a TextPreprocessor, which can be used to embed tokens # - we define a ClassificationDecoder, which can be used to decode the # final hidden states of the latents to classification logits # using trainable position embeddings config = PerceiverConfig() preprocessor = PerceiverTextPreprocessor(config) decoder = PerceiverClassificationDecoder(config, num_channels=config.d_latents, trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1), use_query_residual=True) model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder) # you can then do a forward pass as follows: tokenizer = PerceiverTokenizer() text = "hello world" inputs = tokenizer(text, return_tensors="pt").input_ids with torch.no_grad(): outputs = model(inputs=inputs) logits = outputs.logits # to train, one can train the model using standard cross-entropy: criterion = torch.nn.CrossEntropyLoss() labels = torch.tensor([1]) loss = criterion(logits, labels) # EXAMPLE 2: using the Perceiver to classify images # - we define an ImagePreprocessor, which can be used to embed images preprocessor=PerceiverImagePreprocessor( config, prep_type="conv1x1", spatial_downsample=1, out_channels=256, position_encoding_type="trainable", concat_or_add_pos="concat", project_pos_dim=256, trainable_position_encoding_kwargs=dict(num_channels=256, index_dims=config.image_size ** 2, ), ) model = PerceiverModel( config, input_preprocessor=preprocessor, decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1), use_query_residual=True, ), ) # you can then do a forward pass as follows: feature_extractor = PerceiverFeatureExtractor() url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) inputs = feature_extractor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(inputs=inputs) logits = outputs.logits # to train, one can train the model using standard cross-entropy: criterion = torch.nn.CrossEntropyLoss() labels = torch.tensor([1]) loss = criterion(logits, labels),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverConfig, PerceiverTokenizer, PerceiverFeatureExtractor, PerceiverModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.perceiver.modeling_perceiver <span class="hljs-keyword">import</span> PerceiverTextPreprocessor, PerceiverImagePreprocessor, PerceiverClassificationDecoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># EXAMPLE 1: using the Perceiver to classify texts</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># - we define a TextPreprocessor, which can be used to embed tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># - we define a ClassificationDecoder, which can be used to decode the</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># final hidden states of the latents to classification logits</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># using trainable position embeddings</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = PerceiverConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>preprocessor = PerceiverTextPreprocessor(config) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = PerceiverClassificationDecoder(config, <span class="hljs-meta">... </span> num_channels=config.d_latents, <span class="hljs-meta">... </span> trainable_position_encoding_kwargs=<span class="hljs-built_in">dict</span>(num_channels=config.d_latents, index_dims=<span class="hljs-number">1</span>), <span class="hljs-meta">... </span> use_query_residual=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can then do a forward pass as follows:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PerceiverTokenizer() <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">&gt;&gt;&gt; </span> outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># to train, one can train the model using standard cross-entropy:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>criterion = torch.nn.CrossEntropyLoss() <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = criterion(logits, labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># EXAMPLE 2: using the Perceiver to classify images</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># - we define an ImagePreprocessor, which can be used to embed images</span> <span class="hljs-meta">&gt;&gt;&gt; </span>preprocessor=PerceiverImagePreprocessor( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> prep_type=<span class="hljs-string">&quot;conv1x1&quot;</span>, <span class="hljs-meta">... </span> spatial_downsample=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> out_channels=<span class="hljs-number">256</span>, <span class="hljs-meta">... </span> position_encoding_type=<span class="hljs-string">&quot;trainable&quot;</span>, <span class="hljs-meta">... </span> concat_or_add_pos=<span class="hljs-string">&quot;concat&quot;</span>, <span class="hljs-meta">... </span> project_pos_dim=<span class="hljs-number">256</span>, <span class="hljs-meta">... </span> trainable_position_encoding_kwargs=<span class="hljs-built_in">dict</span>(num_channels=<span class="hljs-number">256</span>, index_dims=config.image_size ** <span class="hljs-number">2</span>, <span class="hljs-meta">... </span> ), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverModel( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> input_preprocessor=preprocessor, <span class="hljs-meta">... </span> decoder=PerceiverClassificationDecoder( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> num_channels=config.d_latents, <span class="hljs-meta">... </span> trainable_position_encoding_kwargs=<span class="hljs-built_in">dict</span>(num_channels=config.d_latents, index_dims=<span class="hljs-number">1</span>), <span class="hljs-meta">... </span> use_query_residual=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> ), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can then do a forward pass as follows:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">&gt;&gt;&gt; </span> outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># to train, one can train the model using standard cross-entropy:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>criterion = torch.nn.CrossEntropyLoss() <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = criterion(logits, labels)`}}),Br=new F({}),Hr=new T({props:{name:"class transformers.PerceiverForMaskedLM",anchor:"transformers.PerceiverForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L941",parametersDescription:[{anchor:"transformers.PerceiverForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Rr=new T({props:{name:"forward",anchor:"transformers.PerceiverForMaskedLM.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"},{name:"input_ids",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L972",parametersDescription:[{anchor:"transformers.PerceiverForMaskedLM.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, num_latents, num_latents)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),io=new jt({props:{$$slots:{default:[e2]},$$scope:{ctx:j}}}),Ur=new Mt({props:{code:`from transformers import PerceiverTokenizer, PerceiverForMaskedLM import torch tokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver') model = PerceiverForMaskedLM.from_pretrained('deepmind/language-perceiver') # training text = "This is an incomplete sentence where some words are missing." inputs = tokenizer(text, padding="max_length", return_tensors="pt") # mask " missing." inputs['input_ids'][0, 52:61] = tokenizer.mask_token_id labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits # inference text = "This is an incomplete sentence where some words are missing." encoding = tokenizer(text, padding="max_length", return_tensors="pt") # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space. encoding['input_ids'][0, 52:61] = tokenizer.mask_token_id # forward pass with torch.no_grad(): outputs = model(**encoding) logits = outputs.logits masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist() tokenizer.decode(masked_tokens_predictions),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverTokenizer, PerceiverForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PerceiverTokenizer.from_pretrained(<span class="hljs-string">&#x27;deepmind/language-perceiver&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;deepmind/language-perceiver&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;This is an incomplete sentence where some words are missing.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># mask &quot; missing.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&#x27;input_ids&#x27;</span>][<span class="hljs-number">0</span>, <span class="hljs-number">52</span>:<span class="hljs-number">61</span>] = tokenizer.mask_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(text, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;This is an incomplete sentence where some words are missing.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(text, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># mask bytes corresponding to &quot; missing.&quot;. Note that the model performs much better if the masked span starts with a space.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding[<span class="hljs-string">&#x27;input_ids&#x27;</span>][<span class="hljs-number">0</span>, <span class="hljs-number">52</span>:<span class="hljs-number">61</span>] = tokenizer.mask_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">&gt;&gt;&gt; </span> outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>masked_tokens_predictions = logits[<span class="hljs-number">0</span>, <span class="hljs-number">52</span>:<span class="hljs-number">61</span>].argmax(dim=-<span class="hljs-number">1</span>).tolist() <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(masked_tokens_predictions) <span class="hljs-string">&#x27; missing.&#x27;</span>`}}),Kr=new F({}),Jr=new T({props:{name:"class transformers.PerceiverForSequenceClassification",anchor:"transformers.PerceiverForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1067",parametersDescription:[{anchor:"transformers.PerceiverForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Zr=new T({props:{name:"forward",anchor:"transformers.PerceiverForSequenceClassification.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"},{name:"input_ids",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1088",parametersDescription:[{anchor:"transformers.PerceiverForSequenceClassification.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),lo=new jt({props:{$$slots:{default:[t2]},$$scope:{ctx:j}}}),Qr=new Mt({props:{code:`from transformers import PerceiverTokenizer, PerceiverForSequenceClassification tokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver') model = PerceiverForSequenceClassification.from_pretrained('deepmind/language-perceiver') text = "hello world" inputs = tokenizer(images=image, return_tensors="pt").input_ids outputs = model(inputs=inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverTokenizer, PerceiverForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PerceiverTokenizer.from_pretrained(<span class="hljs-string">&#x27;deepmind/language-perceiver&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;deepmind/language-perceiver&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Yr=new F({}),es=new T({props:{name:"class transformers.PerceiverForImageClassificationLearned",anchor:"transformers.PerceiverForImageClassificationLearned",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1190",parametersDescription:[{anchor:"transformers.PerceiverForImageClassificationLearned.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rs=new T({props:{name:"forward",anchor:"transformers.PerceiverForImageClassificationLearned.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"},{name:"pixel_values",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1221",parametersDescription:[{anchor:"transformers.PerceiverForImageClassificationLearned.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForImageClassificationLearned.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForImageClassificationLearned.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForImageClassificationLearned.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForImageClassificationLearned.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForImageClassificationLearned.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForImageClassificationLearned.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),mo=new jt({props:{$$slots:{default:[o2]},$$scope:{ctx:j}}}),ss=new Mt({props:{code:`from transformers import PerceiverFeatureExtractor, PerceiverForImageClassificationLearned from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = PerceiverFeatureExtractor.from_pretrained('deepmind/vision-perceiver-learned') model = PerceiverForImageClassificationLearned.from_pretrained('deepmind/vision-perceiver-learned') inputs = feature_extractor(images=image, return_tensors="pt").pixel_values outputs = model(inputs=inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverFeatureExtractor, PerceiverForImageClassificationLearned <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;deepmind/vision-perceiver-learned&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForImageClassificationLearned.from_pretrained(<span class="hljs-string">&#x27;deepmind/vision-perceiver-learned&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),ns=new F({}),as=new T({props:{name:"class transformers.PerceiverForImageClassificationFourier",anchor:"transformers.PerceiverForImageClassificationFourier",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1329",parametersDescription:[{anchor:"transformers.PerceiverForImageClassificationFourier.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ls=new T({props:{name:"forward",anchor:"transformers.PerceiverForImageClassificationFourier.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"},{name:"pixel_values",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1358",parametersDescription:[{anchor:"transformers.PerceiverForImageClassificationFourier.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForImageClassificationFourier.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForImageClassificationFourier.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForImageClassificationFourier.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForImageClassificationFourier.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForImageClassificationFourier.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForImageClassificationFourier.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),uo=new jt({props:{$$slots:{default:[r2]},$$scope:{ctx:j}}}),ds=new Mt({props:{code:`from transformers import PerceiverFeatureExtractor, PerceiverForImageClassificationFourier from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = PerceiverFeatureExtractor.from_pretrained('deepmind/vision-perceiver-fourier') model = PerceiverForImageClassificationFourier.from_pretrained('deepmind/vision-perceiver-fourier') inputs = feature_extractor(images=image, return_tensors="pt").pixel_values outputs = model(inputs=inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverFeatureExtractor, PerceiverForImageClassificationFourier <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;deepmind/vision-perceiver-fourier&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForImageClassificationFourier.from_pretrained(<span class="hljs-string">&#x27;deepmind/vision-perceiver-fourier&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),ps=new F({}),ms=new T({props:{name:"class transformers.PerceiverForImageClassificationConvProcessing",anchor:"transformers.PerceiverForImageClassificationConvProcessing",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1465",parametersDescription:[{anchor:"transformers.PerceiverForImageClassificationConvProcessing.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fs=new T({props:{name:"forward",anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"},{name:"pixel_values",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1495",parametersDescription:[{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForImageClassificationConvProcessing.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),go=new jt({props:{$$slots:{default:[s2]},$$scope:{ctx:j}}}),gs=new Mt({props:{code:`from transformers import PerceiverFeatureExtractor, PerceiverForImageClassificationConvProcessing from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = PerceiverFeatureExtractor.from_pretrained('deepmind/vision-perceiver-conv') model = PerceiverForImageClassificationConvProcessing.from_pretrained('deepmind/vision-perceiver-conv') inputs = feature_extractor(images=image, return_tensors="pt").pixel_values outputs = model(inputs=inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverFeatureExtractor, PerceiverForImageClassificationConvProcessing <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;deepmind/vision-perceiver-conv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForImageClassificationConvProcessing.from_pretrained(<span class="hljs-string">&#x27;deepmind/vision-perceiver-conv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),vs=new F({}),_s=new T({props:{name:"class transformers.PerceiverForOpticalFlow",anchor:"transformers.PerceiverForOpticalFlow",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1602",parametersDescription:[{anchor:"transformers.PerceiverForOpticalFlow.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bs=new T({props:{name:"forward",anchor:"transformers.PerceiverForOpticalFlow.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1649",parametersDescription:[{anchor:"transformers.PerceiverForOpticalFlow.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForOpticalFlow.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForOpticalFlow.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForOpticalFlow.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForOpticalFlow.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForOpticalFlow.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForOpticalFlow.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the optical flow loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_o=new jt({props:{$$slots:{default:[n2]},$$scope:{ctx:j}}}),ys=new Mt({props:{code:`from transformers import PerceiverForOpticalFlow import torch model = PerceiverForOpticalFlow.from_pretrained('deepmind/optical-flow-perceiver') # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel, # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels) # patches have shape (batch_size, num_frames, num_channels, height, width) # the authors train on resolutions of 368 x 496 patches = torch.randn(1, 2, 27, 368, 496) outputs = model(inputs=patches) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverForOpticalFlow <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForOpticalFlow.from_pretrained(<span class="hljs-string">&#x27;deepmind/optical-flow-perceiver&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># patches have shape (batch_size, num_frames, num_channels, height, width)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the authors train on resolutions of 368 x 496</span> <span class="hljs-meta">&gt;&gt;&gt; </span>patches = torch.randn(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">27</span>, <span class="hljs-number">368</span>, <span class="hljs-number">496</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=patches) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ks=new F({}),$s=new T({props:{name:"class transformers.PerceiverForMultimodalAutoencoding",anchor:"transformers.PerceiverForMultimodalAutoencoding",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1743",parametersDescription:[{anchor:"transformers.PerceiverForMultimodalAutoencoding.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fs=new T({props:{name:"forward",anchor:"transformers.PerceiverForMultimodalAutoencoding.forward",parameters:[{name:"inputs",val:" = None"},{name:"attention_mask",val:" = None"},{name:"subsampled_output_points",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"labels",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/perceiver/modeling_perceiver.py#L1859",parametersDescription:[{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.inputs",description:`<strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.`,name:"inputs"},{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PerceiverForMultimodalAutoencoding.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yo=new jt({props:{$$slots:{default:[a2]},$$scope:{ctx:j}}}),Cs=new Mt({props:{code:`from transformers import PerceiverForMultimodalAutoencoding import torch import numpy as np # create multimodal inputs images = torch.randn((1, 16, 3, 224, 224)) audio = torch.randn((1, 30720, 1)) inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700))) model = PerceiverForMultimodalAutoencoding.from_pretrained('deepmind/multimodal-perceiver') # in the Perceiver IO paper, videos are auto-encoded in chunks # each chunk subsamples different index dimensions of the image and audio modality decoder queries nchunks = 128 image_chunk_size = np.prod((16, 224, 224)) // nchunks audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks # process the first chunk chunk_idx = 0 subsampling = { "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)), "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)), "label": None, } outputs = model(inputs=inputs, subsampled_output_points=subsampling) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverForMultimodalAutoencoding <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># create multimodal inputs</span> <span class="hljs-meta">&gt;&gt;&gt; </span>images = torch.randn((<span class="hljs-number">1</span>, <span class="hljs-number">16</span>, <span class="hljs-number">3</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>audio = torch.randn((<span class="hljs-number">1</span>, <span class="hljs-number">30720</span>, <span class="hljs-number">1</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = <span class="hljs-built_in">dict</span>(image=images, audio=audio, label=torch.zeros((images.shape[<span class="hljs-number">0</span>], <span class="hljs-number">700</span>))) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForMultimodalAutoencoding.from_pretrained(<span class="hljs-string">&#x27;deepmind/multimodal-perceiver&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># in the Perceiver IO paper, videos are auto-encoded in chunks</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># each chunk subsamples different index dimensions of the image and audio modality decoder queries</span> <span class="hljs-meta">&gt;&gt;&gt; </span>nchunks = <span class="hljs-number">128</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image_chunk_size = np.prod((<span class="hljs-number">16</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>)) // nchunks <span class="hljs-meta">&gt;&gt;&gt; </span>audio_chunk_size = audio.shape[<span class="hljs-number">1</span>] // model.config.samples_per_patch // nchunks <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># process the first chunk</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chunk_idx = <span class="hljs-number">0</span> <span class="hljs-meta">&gt;&gt;&gt; </span>subsampling = { <span class="hljs-meta">... </span><span class="hljs-string">&quot;image&quot;</span>: torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + <span class="hljs-number">1</span>)), <span class="hljs-meta">... </span><span class="hljs-string">&quot;audio&quot;</span>: torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + <span class="hljs-number">1</span>)), <span class="hljs-meta">... </span><span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-literal">None</span>, <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs, subsampled_output_points=subsampling) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){h=r("meta"),$=l(),w=r("h1"),k=r("a"),x=r("span"),u(y.$$.fragment),b=l(),E=r("span"),ep=a("Perceiver"),Tc=l(),Ie=r("h2"),It=r("a"),Kn=r("span"),u(To.$$.fragment),tp=l(),Jn=r("span"),op=a("Overview"),xc=l(),qt=r("p"),rp=a("The Perceiver IO model was proposed in "),xo=r("a"),sp=a(`Perceiver IO: A General Architecture for Structured Inputs & Outputs`),np=a(` by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier H\xE9naff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, Jo\xE3o Carreira.`),Ec=l(),zt=r("p"),ap=a("Perceiver IO is a generalization of "),Eo=r("a"),ip=a("Perceiver"),cp=a(` to handle arbitrary outputs in addition to arbitrary inputs. The original Perceiver only produced a single classification label. In addition to classification labels, Perceiver IO can produce (for example) language, optical flow, and multimodal videos with audio. This is done using the same building blocks as the original Perceiver. The computational complexity of Perceiver IO is linear in the input and output size and the bulk of the processing occurs in the latent space, allowing us to process inputs and outputs that are much larger than can be handled by standard Transformers. This means, for example, Perceiver IO can do BERT-style masked language modeling directly using bytes instead of tokenized inputs.`),Fc=l(),As=r("p"),lp=a("The abstract from the paper is the following:"),Cc=l(),Ds=r("p"),Gn=r("em"),dp=a(`The recently-proposed Perceiver model obtains good results on several domains (images, audio, multimodal, point clouds) while scaling linearly in compute and memory with the input size. While the Perceiver supports many kinds of inputs, it can only produce very simple outputs such as class scores. Perceiver IO overcomes this limitation without sacrificing the original\u2019s appealing properties by learning to flexibly query the model\u2019s latent space to produce outputs of arbitrary size and semantics. Perceiver IO still decouples model depth from data size and still scales linearly with data size, but now with respect to both input and output sizes. The full Perceiver IO model achieves strong results on tasks with highly structured output spaces, such as natural language and visual understanding, StarCraft II, and multi-task and multi-modal domains. As highlights, Perceiver IO matches a Transformer-based BERT baseline on the GLUE language benchmark without the need for input tokenization and achieves state-of-the-art performance on Sintel optical flow estimation.`),jc=l(),Ns=r("p"),pp=a("Here\u2019s a TLDR explaining how Perceiver works:"),Mc=l(),Os=r("p"),mp=a(`The main problem with the self-attention mechanism of the Transformer is that the time and memory requirements scale quadratically with the sequence length. Hence, models like BERT and RoBERTa are limited to a max sequence length of 512 tokens. Perceiver aims to solve this issue by, instead of performing self-attention on the inputs, perform it on a set of latent variables, and only use the inputs for cross-attention. In this way, the time and memory requirements don\u2019t depend on the length of the inputs anymore, as one uses a fixed amount of latent variables, like 256 or 512. These are randomly initialized, after which they are trained end-to-end using backpropagation.`),Ic=l(),D=r("p"),hp=a("Internally, "),Ls=r("a"),up=a("PerceiverModel"),fp=a(" will create the latents, which is a tensor of shape "),Xn=r("code"),gp=a("(batch_size, num_latents, d_latents)"),vp=a(". One must provide "),Zn=r("code"),_p=a("inputs"),Pp=a(` (which could be text, images, audio, you name it!) to the model, which it will use to perform cross-attention with the latents. The output of the Perceiver encoder is a tensor of the same shape. One can then, similar to BERT, convert the last hidden states of the latents to classification logits by averaging along the sequence dimension, and placing a linear layer on top of that to project the `),Qn=r("code"),wp=a("d_latents"),bp=a(" to "),Yn=r("code"),yp=a("num_labels"),kp=a("."),qc=l(),Ss=r("p"),$p=a(`This was the idea of the original Perceiver paper. However, it could only output classification logits. In a follow-up work, PerceiverIO, they generalized it to let the model also produce outputs of arbitrary size. How, you might ask? The idea is actually relatively simple: one defines outputs of an arbitrary size, and then applies cross-attention with the last hidden states of the latents, using the outputs as queries, and the latents as keys and values.`),zc=l(),M=r("p"),Tp=a(`So let\u2019s say one wants to perform masked language modeling (BERT-style) with the Perceiver. As the Perceiver\u2019s input length will not have an impact on the computation time of the self-attention layers, one can provide raw bytes, providing `),ea=r("code"),xp=a("inputs"),Ep=a(` of length 2048 to the model. If one now masks out certain of these 2048 tokens, one can define the `),ta=r("code"),Fp=a("outputs"),Cp=a(" as being of shape: "),oa=r("code"),jp=a("(batch_size, 2048, 768)"),Mp=a(`. Next, one performs cross-attention with the final hidden states of the latents to update the `),ra=r("code"),Ip=a("outputs"),qp=a(" tensor. After cross-attention, one still has a tensor of shape "),sa=r("code"),zp=a("(batch_size, 2048, 768)"),Ap=a(`. One can then place a regular language modeling head on top, to project the last dimension to the vocabulary size of the model, i.e. creating logits of shape `),na=r("code"),Dp=a("(batch_size, 2048, 262)"),Np=a(` (as Perceiver uses a vocabulary size of 262 byte IDs).`),Ac=l(),At=r("img"),Dc=l(),Fo=r("small"),Op=a("Perceiver IO architecture. Taken from the "),Bs=r("a"),Lp=a("original paper"),Nc=l(),ve=r("p"),Sp=a("This model was contributed by "),Co=r("a"),Bp=a("nielsr"),Hp=a(`. The original code can be found `),jo=r("a"),Wp=a("here"),Vp=a("."),Oc=l(),Hs=r("p"),Rp=a("Tips:"),Lc=l(),Dt=r("ul"),Mo=r("li"),Up=a("The quickest way to get started with the Perceiver is by checking the "),Io=r("a"),Kp=a(`tutorial notebooks`),Jp=a("."),Gp=l(),qo=r("li"),Xp=a("Refer to the "),zo=r("a"),Zp=a("blog post"),Qp=a(` if you want to fully understand how the model works and is implemented in the library. Note that the models available in the library only showcase some examples of what you can do with the Perceiver. There are many more use cases, including question answering, named-entity recognition, object detection, audio classification, video classification, etc.`),Sc=l(),Ao=r("p"),aa=r("strong"),Yp=a("Note"),em=a(":"),Bc=l(),Ws=r("ul"),_e=r("li"),tm=a("Perceiver does "),ia=r("strong"),om=a("not"),rm=a(" work with "),ca=r("code"),sm=a("torch.nn.DataParallel"),nm=a(" due to a bug in PyTorch, see "),Do=r("a"),am=a("issue #36035"),Hc=l(),qe=r("h2"),Nt=r("a"),la=r("span"),u(No.$$.fragment),im=l(),da=r("span"),cm=a("Perceiver specific outputs"),Wc=l(),ze=r("div"),u(Oo.$$.fragment),lm=l(),pa=r("p"),dm=a("Base class for Perceiver base model\u2019s outputs, with potential hidden states, attentions and cross-attentions."),Vc=l(),Ae=r("div"),u(Lo.$$.fragment),pm=l(),ma=r("p"),mm=a("Base class for Perceiver decoder outputs, with potential cross-attentions."),Rc=l(),De=r("div"),u(So.$$.fragment),hm=l(),ha=r("p"),um=a("Base class for Perceiver\u2019s masked language model outputs."),Uc=l(),Ne=r("div"),u(Bo.$$.fragment),fm=l(),ua=r("p"),gm=a(`Base class for Perceiver\u2019s outputs of sequence/image classification models, optical flow and multimodal autoencoding.`),Kc=l(),Oe=r("h2"),Ot=r("a"),fa=r("span"),u(Ho.$$.fragment),vm=l(),ga=r("span"),_m=a("PerceiverConfig"),Jc=l(),N=r("div"),u(Wo.$$.fragment),Pm=l(),Le=r("p"),wm=a("This is the configuration class to store the configuration of a "),Vs=r("a"),bm=a("PerceiverModel"),ym=a(`. It is used to instantiate an Perceiver model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Perceiver `),Vo=r("a"),km=a("deepmind/language-perceiver"),$m=a(" architecture."),Tm=l(),Se=r("p"),xm=a("Configuration objects inherit from "),Rs=r("a"),Em=a("PretrainedConfig"),Fm=a(` and can be used to control the model outputs. Read the documentation from `),Us=r("a"),Cm=a("PretrainedConfig"),jm=a(" for more information."),Mm=l(),va=r("p"),Im=a("Example:"),qm=l(),u(Ro.$$.fragment),Gc=l(),Be=r("h2"),Lt=r("a"),_a=r("span"),u(Uo.$$.fragment),zm=l(),Pa=r("span"),Am=a("PerceiverTokenizer"),Xc=l(),Q=r("div"),u(Ko.$$.fragment),Dm=l(),wa=r("p"),Nm=a("Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding."),Om=l(),Jo=r("p"),Lm=a("This tokenizer inherits from "),Ks=r("a"),Sm=a("PreTrainedTokenizer"),Bm=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Hm=l(),St=r("div"),u(Go.$$.fragment),Wm=l(),ba=r("p"),Vm=a(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Zc=l(),He=r("h2"),Bt=r("a"),ya=r("span"),u(Xo.$$.fragment),Rm=l(),ka=r("span"),Um=a("PerceiverFeatureExtractor"),Qc=l(),Y=r("div"),u(Zo.$$.fragment),Km=l(),$a=r("p"),Jm=a("Constructs a Perceiver feature extractor."),Gm=l(),Qo=r("p"),Xm=a("This feature extractor inherits from "),Js=r("a"),Zm=a("ImageFeatureExtractionMixin"),Qm=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ym=l(),Pe=r("div"),u(Yo.$$.fragment),eh=l(),Ta=r("p"),th=a("Main method to prepare for the model one or several image(s)."),oh=l(),u(Ht.$$.fragment),Yc=l(),We=r("h2"),Wt=r("a"),xa=r("span"),u(er.$$.fragment),rh=l(),Ea=r("span"),sh=a("PerceiverTextPreprocessor"),el=l(),ae=r("div"),u(tr.$$.fragment),nh=l(),or=r("p"),ah=a("Text preprocessing for Perceiver Encoder. Can be used to embed "),Fa=r("code"),ih=a("inputs"),ch=a(" and add positional encodings."),lh=l(),rr=r("p"),dh=a("The dimensionality of the embeddings is determined by the "),Ca=r("code"),ph=a("d_model"),mh=a(" attribute of the configuration."),tl=l(),Ve=r("h2"),Vt=r("a"),ja=r("span"),u(sr.$$.fragment),hh=l(),Ma=r("span"),uh=a("PerceiverImagePreprocessor"),ol=l(),ie=r("div"),u(nr.$$.fragment),fh=l(),Ia=r("p"),gh=a("Image preprocessing for Perceiver Encoder."),vh=l(),ee=r("p"),_h=a("Note: the "),qa=r("em"),Ph=a("out_channels"),wh=a(" argument refers to the output channels of a convolutional layer, if "),za=r("em"),bh=a("prep_type"),yh=a(` is set to \u201Cconv1x1\u201D or \u201Cconv\u201D. If one adds absolute position embeddings, one must make sure the `),Aa=r("em"),kh=a("num_channels"),$h=a(` of the position encoding kwargs are set equal to the `),Da=r("em"),Th=a("out_channels"),xh=a("."),rl=l(),Re=r("h2"),Rt=r("a"),Na=r("span"),u(ar.$$.fragment),Eh=l(),Oa=r("span"),Fh=a("PerceiverOneHotPreprocessor"),sl=l(),Ue=r("div"),u(ir.$$.fragment),Ch=l(),La=r("p"),jh=a("One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input."),nl=l(),Ke=r("h2"),Ut=r("a"),Sa=r("span"),u(cr.$$.fragment),Mh=l(),Ba=r("span"),Ih=a("PerceiverAudioPreprocessor"),al=l(),Je=r("div"),u(lr.$$.fragment),qh=l(),Ha=r("p"),zh=a("Audio preprocessing for Perceiver Encoder."),il=l(),Ge=r("h2"),Kt=r("a"),Wa=r("span"),u(dr.$$.fragment),Ah=l(),Va=r("span"),Dh=a("PerceiverMultimodalPreprocessor"),cl=l(),ce=r("div"),u(pr.$$.fragment),Nh=l(),Ra=r("p"),Oh=a("Multimodal preprocessing for Perceiver Encoder."),Lh=l(),Ua=r("p"),Sh=a(`Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number of channels.`),ll=l(),Xe=r("h2"),Jt=r("a"),Ka=r("span"),u(mr.$$.fragment),Bh=l(),Ja=r("span"),Hh=a("PerceiverProjectionDecoder"),dl=l(),Ze=r("div"),u(hr.$$.fragment),Wh=l(),Ga=r("p"),Vh=a("Baseline projection decoder (no cross-attention)."),pl=l(),Qe=r("h2"),Gt=r("a"),Xa=r("span"),u(ur.$$.fragment),Rh=l(),Za=r("span"),Uh=a("PerceiverBasicDecoder"),ml=l(),le=r("div"),u(fr.$$.fragment),Kh=l(),Qa=r("p"),Jh=a(`Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a cross-attention operation, in which the latents produce keys and values.`),Gh=l(),Ya=r("p"),Xh=a("The shape of the output of this class depends on how one defines the output queries (also called decoder queries)."),hl=l(),Ye=r("h2"),Xt=r("a"),ei=r("span"),u(gr.$$.fragment),Zh=l(),ti=r("span"),Qh=a("PerceiverClassificationDecoder"),ul=l(),et=r("div"),u(vr.$$.fragment),Yh=l(),_r=r("p"),eu=a("Cross-attention based classification decoder. Light-weight wrapper of "),oi=r("code"),tu=a("PerceiverBasicDecoder"),ou=a(` for logit output. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).`),fl=l(),tt=r("h2"),Zt=r("a"),ri=r("span"),u(Pr.$$.fragment),ru=l(),si=r("span"),su=a("PerceiverOpticalFlowDecoder"),gl=l(),ot=r("div"),u(wr.$$.fragment),nu=l(),ni=r("p"),au=a("Cross-attention based optical flow decoder."),vl=l(),rt=r("h2"),Qt=r("a"),ai=r("span"),u(br.$$.fragment),iu=l(),ii=r("span"),cu=a("PerceiverBasicVideoAutoencodingDecoder"),_l=l(),st=r("div"),u(yr.$$.fragment),lu=l(),kr=r("p"),du=a("Cross-attention based video-autoencoding decoder. Light-weight wrapper of ["),ci=r("em"),pu=a("PerceiverBasicDecoder"),mu=a(`] with video reshaping logic.`),Pl=l(),nt=r("h2"),Yt=r("a"),li=r("span"),u($r.$$.fragment),hu=l(),di=r("span"),uu=a("PerceiverMultimodalDecoder"),wl=l(),de=r("div"),u(Tr.$$.fragment),fu=l(),xr=r("p"),gu=a("Multimodal decoding by composing uni-modal decoders. The "),pi=r("em"),vu=a("modalities"),_u=a(` argument of the constructor is a dictionary mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are concatenated along the time dimension.`),Pu=l(),mi=r("p"),wu=a("Next, there is a shared cross attention operation across all modalities."),bl=l(),at=r("h2"),eo=r("a"),hi=r("span"),u(Er.$$.fragment),bu=l(),ui=r("span"),yu=a("PerceiverProjectionPostprocessor"),yl=l(),it=r("div"),u(Fr.$$.fragment),ku=l(),fi=r("p"),$u=a(`Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower dimension.`),kl=l(),ct=r("h2"),to=r("a"),gi=r("span"),u(Cr.$$.fragment),Tu=l(),vi=r("span"),xu=a("PerceiverAudioPostprocessor"),$l=l(),lt=r("div"),u(jr.$$.fragment),Eu=l(),_i=r("p"),Fu=a("Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features."),Tl=l(),dt=r("h2"),oo=r("a"),Pi=r("span"),u(Mr.$$.fragment),Cu=l(),wi=r("span"),ju=a("PerceiverClassificationPostprocessor"),xl=l(),pt=r("div"),u(Ir.$$.fragment),Mu=l(),bi=r("p"),Iu=a("Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits."),El=l(),mt=r("h2"),ro=r("a"),yi=r("span"),u(qr.$$.fragment),qu=l(),ki=r("span"),zu=a("PerceiverMultimodalPostprocessor"),Fl=l(),ht=r("div"),u(zr.$$.fragment),Au=l(),$i=r("p"),Du=a(`Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single postprocessor.`),Cl=l(),ut=r("h2"),so=r("a"),Ti=r("span"),u(Ar.$$.fragment),Nu=l(),xi=r("span"),Ou=a("PerceiverModel"),jl=l(),pe=r("div"),u(Dr.$$.fragment),Lu=l(),Nr=r("p"),Su=a(`The Perceiver: a scalable, fully attentional architecture. This model is a PyTorch `),Or=r("a"),Bu=a("torch.nn.Module"),Hu=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wu=l(),S=r("div"),u(Lr.$$.fragment),Vu=l(),ft=r("p"),Ru=a("The "),Gs=r("a"),Uu=a("PerceiverModel"),Ku=a(" forward method, overrides the "),Ei=r("code"),Ju=a("__call__"),Gu=a(" special method."),Xu=l(),u(no.$$.fragment),Zu=l(),Fi=r("p"),Qu=a("Examples:"),Yu=l(),u(Sr.$$.fragment),Ml=l(),gt=r("h2"),ao=r("a"),Ci=r("span"),u(Br.$$.fragment),ef=l(),ji=r("span"),tf=a("PerceiverForMaskedLM"),Il=l(),me=r("div"),u(Hr.$$.fragment),of=l(),Wr=r("p"),rf=a(`Example use of Perceiver for masked language modeling. This model is a PyTorch `),Vr=r("a"),sf=a("torch.nn.Module"),nf=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),af=l(),B=r("div"),u(Rr.$$.fragment),cf=l(),vt=r("p"),lf=a("The "),Xs=r("a"),df=a("PerceiverForMaskedLM"),pf=a(" forward method, overrides the "),Mi=r("code"),mf=a("__call__"),hf=a(" special method."),uf=l(),u(io.$$.fragment),ff=l(),Ii=r("p"),gf=a("Examples:"),vf=l(),u(Ur.$$.fragment),ql=l(),_t=r("h2"),co=r("a"),qi=r("span"),u(Kr.$$.fragment),_f=l(),zi=r("span"),Pf=a("PerceiverForSequenceClassification"),zl=l(),he=r("div"),u(Jr.$$.fragment),wf=l(),Gr=r("p"),bf=a(`Example use of Perceiver for text classification. This model is a PyTorch `),Xr=r("a"),yf=a("torch.nn.Module"),kf=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$f=l(),H=r("div"),u(Zr.$$.fragment),Tf=l(),Pt=r("p"),xf=a("The "),Zs=r("a"),Ef=a("PerceiverForSequenceClassification"),Ff=a(" forward method, overrides the "),Ai=r("code"),Cf=a("__call__"),jf=a(" special method."),Mf=l(),u(lo.$$.fragment),If=l(),Di=r("p"),qf=a("Examples:"),zf=l(),u(Qr.$$.fragment),Al=l(),wt=r("h2"),po=r("a"),Ni=r("span"),u(Yr.$$.fragment),Af=l(),Oi=r("span"),Df=a("PerceiverForImageClassificationLearned"),Dl=l(),q=r("div"),u(es.$$.fragment),Nf=l(),Li=r("p"),Of=a("Example use of Perceiver for image classification, for tasks such as ImageNet."),Lf=l(),Si=r("p"),Sf=a(`This model uses learned position embeddings. In other words, this model is not given any privileged information about the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.`),Bf=l(),W=r("p"),Qs=r("a"),Hf=a("PerceiverForImageClassificationLearned"),Wf=a(` uses `),Ys=r("a"),Vf=a("PerceiverImagePreprocessor"),Rf=a(" (with "),Bi=r("code"),Uf=a('prep_type="conv1x1"'),Kf=a(`) to preprocess the input images, and `),en=r("a"),Jf=a("PerceiverClassificationDecoder"),Gf=a(` to decode the latent representation of `),tn=r("a"),Xf=a("PerceiverModel"),Zf=a(" into classification logits."),Qf=l(),ts=r("p"),Yf=a("This model is a PyTorch "),os=r("a"),eg=a("torch.nn.Module"),tg=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),og=l(),V=r("div"),u(rs.$$.fragment),rg=l(),bt=r("p"),sg=a("The "),on=r("a"),ng=a("PerceiverForImageClassificationLearned"),ag=a(" forward method, overrides the "),Hi=r("code"),ig=a("__call__"),cg=a(" special method."),lg=l(),u(mo.$$.fragment),dg=l(),Wi=r("p"),pg=a("Examples:"),mg=l(),u(ss.$$.fragment),Nl=l(),yt=r("h2"),ho=r("a"),Vi=r("span"),u(ns.$$.fragment),hg=l(),Ri=r("span"),ug=a("PerceiverForImageClassificationFourier"),Ol=l(),z=r("div"),u(as.$$.fragment),fg=l(),Ui=r("p"),gg=a("Example use of Perceiver for image classification, for tasks such as ImageNet."),vg=l(),Ki=r("p"),_g=a(`This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).`),Pg=l(),R=r("p"),rn=r("a"),wg=a("PerceiverForImageClassificationLearned"),bg=a(` uses `),sn=r("a"),yg=a("PerceiverImagePreprocessor"),kg=a(" (with "),Ji=r("code"),$g=a('prep_type="pixels"'),Tg=a(`) to preprocess the input images, and `),nn=r("a"),xg=a("PerceiverClassificationDecoder"),Eg=a(` to decode the latent representation of `),an=r("a"),Fg=a("PerceiverModel"),Cg=a(" into classification logits."),jg=l(),is=r("p"),Mg=a("This model is a PyTorch "),cs=r("a"),Ig=a("torch.nn.Module"),qg=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zg=l(),U=r("div"),u(ls.$$.fragment),Ag=l(),kt=r("p"),Dg=a("The "),cn=r("a"),Ng=a("PerceiverForImageClassificationFourier"),Og=a(" forward method, overrides the "),Gi=r("code"),Lg=a("__call__"),Sg=a(" special method."),Bg=l(),u(uo.$$.fragment),Hg=l(),Xi=r("p"),Wg=a("Examples:"),Vg=l(),u(ds.$$.fragment),Ll=l(),$t=r("h2"),fo=r("a"),Zi=r("span"),u(ps.$$.fragment),Rg=l(),Qi=r("span"),Ug=a("PerceiverForImageClassificationConvProcessing"),Sl=l(),A=r("div"),u(ms.$$.fragment),Kg=l(),Yi=r("p"),Jg=a("Example use of Perceiver for image classification, for tasks such as ImageNet."),Gg=l(),ec=r("p"),Xg=a(`This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy of 82.1 on ImageNet.`),Zg=l(),K=r("p"),ln=r("a"),Qg=a("PerceiverForImageClassificationLearned"),Yg=a(` uses `),dn=r("a"),ev=a("PerceiverImagePreprocessor"),tv=a(" (with "),tc=r("code"),ov=a('prep_type="conv"'),rv=a(`) to preprocess the input images, and `),pn=r("a"),sv=a("PerceiverClassificationDecoder"),nv=a(` to decode the latent representation of `),mn=r("a"),av=a("PerceiverModel"),iv=a(" into classification logits."),cv=l(),hs=r("p"),lv=a("This model is a PyTorch "),us=r("a"),dv=a("torch.nn.Module"),pv=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mv=l(),J=r("div"),u(fs.$$.fragment),hv=l(),Tt=r("p"),uv=a("The "),hn=r("a"),fv=a("PerceiverForImageClassificationConvProcessing"),gv=a(" forward method, overrides the "),oc=r("code"),vv=a("__call__"),_v=a(" special method."),Pv=l(),u(go.$$.fragment),wv=l(),rc=r("p"),bv=a("Examples:"),yv=l(),u(gs.$$.fragment),Bl=l(),xt=r("h2"),vo=r("a"),sc=r("span"),u(vs.$$.fragment),kv=l(),nc=r("span"),$v=a("PerceiverForOpticalFlow"),Hl=l(),O=r("div"),u(_s.$$.fragment),Tv=l(),L=r("p"),xv=a(`Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. `),un=r("a"),Ev=a("PerceiverForOpticalFlow"),Fv=a(` uses `),fn=r("a"),Cv=a("PerceiverImagePreprocessor"),jv=a(" (with "),ac=r("em"),Mv=a("prep_type=\u201Cpatches\u201D"),Iv=a(`) to preprocess the input images, and `),gn=r("a"),qv=a("PerceiverOpticalFlowDecoder"),zv=a(` to decode the latent representation of `),vn=r("a"),Av=a("PerceiverModel"),Dv=a("."),Nv=l(),ic=r("p"),Ov=a(`As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation using the same encoding used for the input.`),Lv=l(),Ps=r("p"),Sv=a("This model is a PyTorch "),ws=r("a"),Bv=a("torch.nn.Module"),Hv=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wv=l(),G=r("div"),u(bs.$$.fragment),Vv=l(),Et=r("p"),Rv=a("The "),_n=r("a"),Uv=a("PerceiverForOpticalFlow"),Kv=a(" forward method, overrides the "),cc=r("code"),Jv=a("__call__"),Gv=a(" special method."),Xv=l(),u(_o.$$.fragment),Zv=l(),lc=r("p"),Qv=a("Examples:"),Yv=l(),u(ys.$$.fragment),Wl=l(),Ft=r("h2"),Po=r("a"),dc=r("span"),u(ks.$$.fragment),e_=l(),pc=r("span"),t_=a("PerceiverForMultimodalAutoencoding"),Vl=l(),C=r("div"),u($s.$$.fragment),o_=l(),mc=r("p"),r_=a("Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700."),s_=l(),wo=r("p"),Pn=r("a"),n_=a("PerceiverForMultimodalAutoencoding"),a_=a(` uses `),wn=r("a"),i_=a("PerceiverMultimodalPreprocessor"),c_=a(` to preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies the Perceiver encoder.`),l_=l(),we=r("p"),bn=r("a"),d_=a("PerceiverMultimodalDecoder"),p_=a(` is used to decode the latent representation of `),yn=r("a"),m_=a("PerceiverModel"),h_=a(`. This decoder uses each modality-specific decoder to construct queries. The decoder queries are created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent representation. This is determined by the subsampled indices for each modality, which can be provided as additional input to the forward pass of `),kn=r("a"),u_=a("PerceiverForMultimodalAutoencoding"),f_=a("."),g_=l(),bo=r("p"),$n=r("a"),v_=a("PerceiverMultimodalDecoder"),__=a(` also pads the decoder queries of the different modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention is performed with the latent representation of `),Tn=r("a"),P_=a("PerceiverModel"),w_=a("."),b_=l(),Ts=r("p"),y_=a("Finally, "),hc=r("code"),k_=a("PerceiverMultiModalPostprocessor"),$_=a(` is used to turn this tensor into an actual video. It first splits up the output into the different modalities, and then applies the respective postprocessor for each modality.`),T_=l(),uc=r("p"),x_=a(`Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the \u201Clabel\u201D modality), this auto-encoding model becomes a Kinetics 700 video classifier.`),E_=l(),xs=r("p"),F_=a("This model is a PyTorch "),Es=r("a"),C_=a("torch.nn.Module"),j_=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),M_=l(),X=r("div"),u(Fs.$$.fragment),I_=l(),Ct=r("p"),q_=a("The "),xn=r("a"),z_=a("PerceiverForMultimodalAutoencoding"),A_=a(" forward method, overrides the "),fc=r("code"),D_=a("__call__"),N_=a(" special method."),O_=l(),u(yo.$$.fragment),L_=l(),gc=r("p"),S_=a("Examples:"),B_=l(),u(Cs.$$.fragment),this.h()},l(t){const p=Xb('[data-svelte="svelte-1phssyn"]',document.head);h=s(p,"META",{name:!0,content:!0}),p.forEach(o),$=d(t),w=s(t,"H1",{class:!0});var js=n(w);k=s(js,"A",{id:!0,class:!0,href:!0});var vc=n(k);x=s(vc,"SPAN",{});var _c=n(x);f(y.$$.fragment,_c),_c.forEach(o),vc.forEach(o),b=d(js),E=s(js,"SPAN",{});var Pc=n(E);ep=i(Pc,"Perceiver"),Pc.forEach(o),js.forEach(o),Tc=d(t),Ie=s(t,"H2",{class:!0});var Ms=n(Ie);It=s(Ms,"A",{id:!0,class:!0,href:!0});var wc=n(It);Kn=s(wc,"SPAN",{});var bc=n(Kn);f(To.$$.fragment,bc),bc.forEach(o),wc.forEach(o),tp=d(Ms),Jn=s(Ms,"SPAN",{});var yc=n(Jn);op=i(yc,"Overview"),yc.forEach(o),Ms.forEach(o),xc=d(t),qt=s(t,"P",{});var Is=n(qt);rp=i(Is,"The Perceiver IO model was proposed in "),xo=s(Is,"A",{href:!0,rel:!0});var R_=n(xo);sp=i(R_,`Perceiver IO: A General Architecture for Structured Inputs & Outputs`),R_.forEach(o),np=i(Is,` by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier H\xE9naff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, Jo\xE3o Carreira.`),Is.forEach(o),Ec=d(t),zt=s(t,"P",{});var Ul=n(zt);ap=i(Ul,"Perceiver IO is a generalization of "),Eo=s(Ul,"A",{href:!0,rel:!0});var U_=n(Eo);ip=i(U_,"Perceiver"),U_.forEach(o),cp=i(Ul,` to handle arbitrary outputs in addition to arbitrary inputs. The original Perceiver only produced a single classification label. In addition to classification labels, Perceiver IO can produce (for example) language, optical flow, and multimodal videos with audio. This is done using the same building blocks as the original Perceiver. The computational complexity of Perceiver IO is linear in the input and output size and the bulk of the processing occurs in the latent space, allowing us to process inputs and outputs that are much larger than can be handled by standard Transformers. This means, for example, Perceiver IO can do BERT-style masked language modeling directly using bytes instead of tokenized inputs.`),Ul.forEach(o),Fc=d(t),As=s(t,"P",{});var K_=n(As);lp=i(K_,"The abstract from the paper is the following:"),K_.forEach(o),Cc=d(t),Ds=s(t,"P",{});var J_=n(Ds);Gn=s(J_,"EM",{});var G_=n(Gn);dp=i(G_,`The recently-proposed Perceiver model obtains good results on several domains (images, audio, multimodal, point clouds) while scaling linearly in compute and memory with the input size. While the Perceiver supports many kinds of inputs, it can only produce very simple outputs such as class scores. Perceiver IO overcomes this limitation without sacrificing the original\u2019s appealing properties by learning to flexibly query the model\u2019s latent space to produce outputs of arbitrary size and semantics. Perceiver IO still decouples model depth from data size and still scales linearly with data size, but now with respect to both input and output sizes. The full Perceiver IO model achieves strong results on tasks with highly structured output spaces, such as natural language and visual understanding, StarCraft II, and multi-task and multi-modal domains. As highlights, Perceiver IO matches a Transformer-based BERT baseline on the GLUE language benchmark without the need for input tokenization and achieves state-of-the-art performance on Sintel optical flow estimation.`),G_.forEach(o),J_.forEach(o),jc=d(t),Ns=s(t,"P",{});var X_=n(Ns);pp=i(X_,"Here\u2019s a TLDR explaining how Perceiver works:"),X_.forEach(o),Mc=d(t),Os=s(t,"P",{});var Z_=n(Os);mp=i(Z_,`The main problem with the self-attention mechanism of the Transformer is that the time and memory requirements scale quadratically with the sequence length. Hence, models like BERT and RoBERTa are limited to a max sequence length of 512 tokens. Perceiver aims to solve this issue by, instead of performing self-attention on the inputs, perform it on a set of latent variables, and only use the inputs for cross-attention. In this way, the time and memory requirements don\u2019t depend on the length of the inputs anymore, as one uses a fixed amount of latent variables, like 256 or 512. These are randomly initialized, after which they are trained end-to-end using backpropagation.`),Z_.forEach(o),Ic=d(t),D=s(t,"P",{});var te=n(D);hp=i(te,"Internally, "),Ls=s(te,"A",{href:!0});var Q_=n(Ls);up=i(Q_,"PerceiverModel"),Q_.forEach(o),fp=i(te," will create the latents, which is a tensor of shape "),Xn=s(te,"CODE",{});var Y_=n(Xn);gp=i(Y_,"(batch_size, num_latents, d_latents)"),Y_.forEach(o),vp=i(te,". One must provide "),Zn=s(te,"CODE",{});var eP=n(Zn);_p=i(eP,"inputs"),eP.forEach(o),Pp=i(te,` (which could be text, images, audio, you name it!) to the model, which it will use to perform cross-attention with the latents. The output of the Perceiver encoder is a tensor of the same shape. One can then, similar to BERT, convert the last hidden states of the latents to classification logits by averaging along the sequence dimension, and placing a linear layer on top of that to project the `),Qn=s(te,"CODE",{});var tP=n(Qn);wp=i(tP,"d_latents"),tP.forEach(o),bp=i(te," to "),Yn=s(te,"CODE",{});var oP=n(Yn);yp=i(oP,"num_labels"),oP.forEach(o),kp=i(te,"."),te.forEach(o),qc=d(t),Ss=s(t,"P",{});var rP=n(Ss);$p=i(rP,`This was the idea of the original Perceiver paper. However, it could only output classification logits. In a follow-up work, PerceiverIO, they generalized it to let the model also produce outputs of arbitrary size. How, you might ask? The idea is actually relatively simple: one defines outputs of an arbitrary size, and then applies cross-attention with the last hidden states of the latents, using the outputs as queries, and the latents as keys and values.`),rP.forEach(o),zc=d(t),M=s(t,"P",{});var Z=n(M);Tp=i(Z,`So let\u2019s say one wants to perform masked language modeling (BERT-style) with the Perceiver. As the Perceiver\u2019s input length will not have an impact on the computation time of the self-attention layers, one can provide raw bytes, providing `),ea=s(Z,"CODE",{});var sP=n(ea);xp=i(sP,"inputs"),sP.forEach(o),Ep=i(Z,` of length 2048 to the model. If one now masks out certain of these 2048 tokens, one can define the `),ta=s(Z,"CODE",{});var nP=n(ta);Fp=i(nP,"outputs"),nP.forEach(o),Cp=i(Z," as being of shape: "),oa=s(Z,"CODE",{});var aP=n(oa);jp=i(aP,"(batch_size, 2048, 768)"),aP.forEach(o),Mp=i(Z,`. Next, one performs cross-attention with the final hidden states of the latents to update the `),ra=s(Z,"CODE",{});var iP=n(ra);Ip=i(iP,"outputs"),iP.forEach(o),qp=i(Z," tensor. After cross-attention, one still has a tensor of shape "),sa=s(Z,"CODE",{});var cP=n(sa);zp=i(cP,"(batch_size, 2048, 768)"),cP.forEach(o),Ap=i(Z,`. One can then place a regular language modeling head on top, to project the last dimension to the vocabulary size of the model, i.e. creating logits of shape `),na=s(Z,"CODE",{});var lP=n(na);Dp=i(lP,"(batch_size, 2048, 262)"),lP.forEach(o),Np=i(Z,` (as Perceiver uses a vocabulary size of 262 byte IDs).`),Z.forEach(o),Ac=d(t),At=s(t,"IMG",{src:!0,alt:!0,width:!0}),Dc=d(t),Fo=s(t,"SMALL",{});var H_=n(Fo);Op=i(H_,"Perceiver IO architecture. Taken from the "),Bs=s(H_,"A",{href:!0});var dP=n(Bs);Lp=i(dP,"original paper"),dP.forEach(o),H_.forEach(o),Nc=d(t),ve=s(t,"P",{});var En=n(ve);Sp=i(En,"This model was contributed by "),Co=s(En,"A",{href:!0,rel:!0});var pP=n(Co);Bp=i(pP,"nielsr"),pP.forEach(o),Hp=i(En,`. The original code can be found `),jo=s(En,"A",{href:!0,rel:!0});var mP=n(jo);Wp=i(mP,"here"),mP.forEach(o),Vp=i(En,"."),En.forEach(o),Oc=d(t),Hs=s(t,"P",{});var hP=n(Hs);Rp=i(hP,"Tips:"),hP.forEach(o),Lc=d(t),Dt=s(t,"UL",{});var Kl=n(Dt);Mo=s(Kl,"LI",{});var Jl=n(Mo);Up=i(Jl,"The quickest way to get started with the Perceiver is by checking the "),Io=s(Jl,"A",{href:!0,rel:!0});var uP=n(Io);Kp=i(uP,`tutorial notebooks`),uP.forEach(o),Jp=i(Jl,"."),Jl.forEach(o),Gp=d(Kl),qo=s(Kl,"LI",{});var Gl=n(qo);Xp=i(Gl,"Refer to the "),zo=s(Gl,"A",{href:!0,rel:!0});var fP=n(zo);Zp=i(fP,"blog post"),fP.forEach(o),Qp=i(Gl,` if you want to fully understand how the model works and is implemented in the library. Note that the models available in the library only showcase some examples of what you can do with the Perceiver. There are many more use cases, including question answering, named-entity recognition, object detection, audio classification, video classification, etc.`),Gl.forEach(o),Kl.forEach(o),Sc=d(t),Ao=s(t,"P",{});var W_=n(Ao);aa=s(W_,"STRONG",{});var gP=n(aa);Yp=i(gP,"Note"),gP.forEach(o),em=i(W_,":"),W_.forEach(o),Bc=d(t),Ws=s(t,"UL",{});var vP=n(Ws);_e=s(vP,"LI",{});var qs=n(_e);tm=i(qs,"Perceiver does "),ia=s(qs,"STRONG",{});var _P=n(ia);om=i(_P,"not"),_P.forEach(o),rm=i(qs," work with "),ca=s(qs,"CODE",{});var PP=n(ca);sm=i(PP,"torch.nn.DataParallel"),PP.forEach(o),nm=i(qs," due to a bug in PyTorch, see "),Do=s(qs,"A",{href:!0,rel:!0});var wP=n(Do);am=i(wP,"issue #36035"),wP.forEach(o),qs.forEach(o),vP.forEach(o),Hc=d(t),qe=s(t,"H2",{class:!0});var Xl=n(qe);Nt=s(Xl,"A",{id:!0,class:!0,href:!0});var bP=n(Nt);la=s(bP,"SPAN",{});var yP=n(la);f(No.$$.fragment,yP),yP.forEach(o),bP.forEach(o),im=d(Xl),da=s(Xl,"SPAN",{});var kP=n(da);cm=i(kP,"Perceiver specific outputs"),kP.forEach(o),Xl.forEach(o),Wc=d(t),ze=s(t,"DIV",{class:!0});var Zl=n(ze);f(Oo.$$.fragment,Zl),lm=d(Zl),pa=s(Zl,"P",{});var $P=n(pa);dm=i($P,"Base class for Perceiver base model\u2019s outputs, with potential hidden states, attentions and cross-attentions."),$P.forEach(o),Zl.forEach(o),Vc=d(t),Ae=s(t,"DIV",{class:!0});var Ql=n(Ae);f(Lo.$$.fragment,Ql),pm=d(Ql),ma=s(Ql,"P",{});var TP=n(ma);mm=i(TP,"Base class for Perceiver decoder outputs, with potential cross-attentions."),TP.forEach(o),Ql.forEach(o),Rc=d(t),De=s(t,"DIV",{class:!0});var Yl=n(De);f(So.$$.fragment,Yl),hm=d(Yl),ha=s(Yl,"P",{});var xP=n(ha);um=i(xP,"Base class for Perceiver\u2019s masked language model outputs."),xP.forEach(o),Yl.forEach(o),Uc=d(t),Ne=s(t,"DIV",{class:!0});var ed=n(Ne);f(Bo.$$.fragment,ed),fm=d(ed),ua=s(ed,"P",{});var EP=n(ua);gm=i(EP,`Base class for Perceiver\u2019s outputs of sequence/image classification models, optical flow and multimodal autoencoding.`),EP.forEach(o),ed.forEach(o),Kc=d(t),Oe=s(t,"H2",{class:!0});var td=n(Oe);Ot=s(td,"A",{id:!0,class:!0,href:!0});var FP=n(Ot);fa=s(FP,"SPAN",{});var CP=n(fa);f(Ho.$$.fragment,CP),CP.forEach(o),FP.forEach(o),vm=d(td),ga=s(td,"SPAN",{});var jP=n(ga);_m=i(jP,"PerceiverConfig"),jP.forEach(o),td.forEach(o),Jc=d(t),N=s(t,"DIV",{class:!0});var be=n(N);f(Wo.$$.fragment,be),Pm=d(be),Le=s(be,"P",{});var Fn=n(Le);wm=i(Fn,"This is the configuration class to store the configuration of a "),Vs=s(Fn,"A",{href:!0});var MP=n(Vs);bm=i(MP,"PerceiverModel"),MP.forEach(o),ym=i(Fn,`. It is used to instantiate an Perceiver model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Perceiver `),Vo=s(Fn,"A",{href:!0,rel:!0});var IP=n(Vo);km=i(IP,"deepmind/language-perceiver"),IP.forEach(o),$m=i(Fn," architecture."),Fn.forEach(o),Tm=d(be),Se=s(be,"P",{});var Cn=n(Se);xm=i(Cn,"Configuration objects inherit from "),Rs=s(Cn,"A",{href:!0});var qP=n(Rs);Em=i(qP,"PretrainedConfig"),qP.forEach(o),Fm=i(Cn,` and can be used to control the model outputs. Read the documentation from `),Us=s(Cn,"A",{href:!0});var zP=n(Us);Cm=i(zP,"PretrainedConfig"),zP.forEach(o),jm=i(Cn," for more information."),Cn.forEach(o),Mm=d(be),va=s(be,"P",{});var AP=n(va);Im=i(AP,"Example:"),AP.forEach(o),qm=d(be),f(Ro.$$.fragment,be),be.forEach(o),Gc=d(t),Be=s(t,"H2",{class:!0});var od=n(Be);Lt=s(od,"A",{id:!0,class:!0,href:!0});var DP=n(Lt);_a=s(DP,"SPAN",{});var NP=n(_a);f(Uo.$$.fragment,NP),NP.forEach(o),DP.forEach(o),zm=d(od),Pa=s(od,"SPAN",{});var OP=n(Pa);Am=i(OP,"PerceiverTokenizer"),OP.forEach(o),od.forEach(o),Xc=d(t),Q=s(t,"DIV",{class:!0});var ko=n(Q);f(Ko.$$.fragment,ko),Dm=d(ko),wa=s(ko,"P",{});var LP=n(wa);Nm=i(LP,"Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding."),LP.forEach(o),Om=d(ko),Jo=s(ko,"P",{});var rd=n(Jo);Lm=i(rd,"This tokenizer inherits from "),Ks=s(rd,"A",{href:!0});var SP=n(Ks);Sm=i(SP,"PreTrainedTokenizer"),SP.forEach(o),Bm=i(rd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),rd.forEach(o),Hm=d(ko),St=s(ko,"DIV",{class:!0});var sd=n(St);f(Go.$$.fragment,sd),Wm=d(sd),ba=s(sd,"P",{});var BP=n(ba);Vm=i(BP,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),BP.forEach(o),sd.forEach(o),ko.forEach(o),Zc=d(t),He=s(t,"H2",{class:!0});var nd=n(He);Bt=s(nd,"A",{id:!0,class:!0,href:!0});var HP=n(Bt);ya=s(HP,"SPAN",{});var WP=n(ya);f(Xo.$$.fragment,WP),WP.forEach(o),HP.forEach(o),Rm=d(nd),ka=s(nd,"SPAN",{});var VP=n(ka);Um=i(VP,"PerceiverFeatureExtractor"),VP.forEach(o),nd.forEach(o),Qc=d(t),Y=s(t,"DIV",{class:!0});var $o=n(Y);f(Zo.$$.fragment,$o),Km=d($o),$a=s($o,"P",{});var RP=n($a);Jm=i(RP,"Constructs a Perceiver feature extractor."),RP.forEach(o),Gm=d($o),Qo=s($o,"P",{});var ad=n(Qo);Xm=i(ad,"This feature extractor inherits from "),Js=s(ad,"A",{href:!0});var UP=n(Js);Zm=i(UP,"ImageFeatureExtractionMixin"),UP.forEach(o),Qm=i(ad,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ad.forEach(o),Ym=d($o),Pe=s($o,"DIV",{class:!0});var jn=n(Pe);f(Yo.$$.fragment,jn),eh=d(jn),Ta=s(jn,"P",{});var KP=n(Ta);th=i(KP,"Main method to prepare for the model one or several image(s)."),KP.forEach(o),oh=d(jn),f(Ht.$$.fragment,jn),jn.forEach(o),$o.forEach(o),Yc=d(t),We=s(t,"H2",{class:!0});var id=n(We);Wt=s(id,"A",{id:!0,class:!0,href:!0});var JP=n(Wt);xa=s(JP,"SPAN",{});var GP=n(xa);f(er.$$.fragment,GP),GP.forEach(o),JP.forEach(o),rh=d(id),Ea=s(id,"SPAN",{});var XP=n(Ea);sh=i(XP,"PerceiverTextPreprocessor"),XP.forEach(o),id.forEach(o),el=d(t),ae=s(t,"DIV",{class:!0});var Mn=n(ae);f(tr.$$.fragment,Mn),nh=d(Mn),or=s(Mn,"P",{});var cd=n(or);ah=i(cd,"Text preprocessing for Perceiver Encoder. Can be used to embed "),Fa=s(cd,"CODE",{});var ZP=n(Fa);ih=i(ZP,"inputs"),ZP.forEach(o),ch=i(cd," and add positional encodings."),cd.forEach(o),lh=d(Mn),rr=s(Mn,"P",{});var ld=n(rr);dh=i(ld,"The dimensionality of the embeddings is determined by the "),Ca=s(ld,"CODE",{});var QP=n(Ca);ph=i(QP,"d_model"),QP.forEach(o),mh=i(ld," attribute of the configuration."),ld.forEach(o),Mn.forEach(o),tl=d(t),Ve=s(t,"H2",{class:!0});var dd=n(Ve);Vt=s(dd,"A",{id:!0,class:!0,href:!0});var YP=n(Vt);ja=s(YP,"SPAN",{});var e1=n(ja);f(sr.$$.fragment,e1),e1.forEach(o),YP.forEach(o),hh=d(dd),Ma=s(dd,"SPAN",{});var t1=n(Ma);uh=i(t1,"PerceiverImagePreprocessor"),t1.forEach(o),dd.forEach(o),ol=d(t),ie=s(t,"DIV",{class:!0});var In=n(ie);f(nr.$$.fragment,In),fh=d(In),Ia=s(In,"P",{});var o1=n(Ia);gh=i(o1,"Image preprocessing for Perceiver Encoder."),o1.forEach(o),vh=d(In),ee=s(In,"P",{});var ye=n(ee);_h=i(ye,"Note: the "),qa=s(ye,"EM",{});var r1=n(qa);Ph=i(r1,"out_channels"),r1.forEach(o),wh=i(ye," argument refers to the output channels of a convolutional layer, if "),za=s(ye,"EM",{});var s1=n(za);bh=i(s1,"prep_type"),s1.forEach(o),yh=i(ye,` is set to \u201Cconv1x1\u201D or \u201Cconv\u201D. If one adds absolute position embeddings, one must make sure the `),Aa=s(ye,"EM",{});var n1=n(Aa);kh=i(n1,"num_channels"),n1.forEach(o),$h=i(ye,` of the position encoding kwargs are set equal to the `),Da=s(ye,"EM",{});var a1=n(Da);Th=i(a1,"out_channels"),a1.forEach(o),xh=i(ye,"."),ye.forEach(o),In.forEach(o),rl=d(t),Re=s(t,"H2",{class:!0});var pd=n(Re);Rt=s(pd,"A",{id:!0,class:!0,href:!0});var i1=n(Rt);Na=s(i1,"SPAN",{});var c1=n(Na);f(ar.$$.fragment,c1),c1.forEach(o),i1.forEach(o),Eh=d(pd),Oa=s(pd,"SPAN",{});var l1=n(Oa);Fh=i(l1,"PerceiverOneHotPreprocessor"),l1.forEach(o),pd.forEach(o),sl=d(t),Ue=s(t,"DIV",{class:!0});var md=n(Ue);f(ir.$$.fragment,md),Ch=d(md),La=s(md,"P",{});var d1=n(La);jh=i(d1,"One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input."),d1.forEach(o),md.forEach(o),nl=d(t),Ke=s(t,"H2",{class:!0});var hd=n(Ke);Ut=s(hd,"A",{id:!0,class:!0,href:!0});var p1=n(Ut);Sa=s(p1,"SPAN",{});var m1=n(Sa);f(cr.$$.fragment,m1),m1.forEach(o),p1.forEach(o),Mh=d(hd),Ba=s(hd,"SPAN",{});var h1=n(Ba);Ih=i(h1,"PerceiverAudioPreprocessor"),h1.forEach(o),hd.forEach(o),al=d(t),Je=s(t,"DIV",{class:!0});var ud=n(Je);f(lr.$$.fragment,ud),qh=d(ud),Ha=s(ud,"P",{});var u1=n(Ha);zh=i(u1,"Audio preprocessing for Perceiver Encoder."),u1.forEach(o),ud.forEach(o),il=d(t),Ge=s(t,"H2",{class:!0});var fd=n(Ge);Kt=s(fd,"A",{id:!0,class:!0,href:!0});var f1=n(Kt);Wa=s(f1,"SPAN",{});var g1=n(Wa);f(dr.$$.fragment,g1),g1.forEach(o),f1.forEach(o),Ah=d(fd),Va=s(fd,"SPAN",{});var v1=n(Va);Dh=i(v1,"PerceiverMultimodalPreprocessor"),v1.forEach(o),fd.forEach(o),cl=d(t),ce=s(t,"DIV",{class:!0});var qn=n(ce);f(pr.$$.fragment,qn),Nh=d(qn),Ra=s(qn,"P",{});var _1=n(Ra);Oh=i(_1,"Multimodal preprocessing for Perceiver Encoder."),_1.forEach(o),Lh=d(qn),Ua=s(qn,"P",{});var P1=n(Ua);Sh=i(P1,`Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number of channels.`),P1.forEach(o),qn.forEach(o),ll=d(t),Xe=s(t,"H2",{class:!0});var gd=n(Xe);Jt=s(gd,"A",{id:!0,class:!0,href:!0});var w1=n(Jt);Ka=s(w1,"SPAN",{});var b1=n(Ka);f(mr.$$.fragment,b1),b1.forEach(o),w1.forEach(o),Bh=d(gd),Ja=s(gd,"SPAN",{});var y1=n(Ja);Hh=i(y1,"PerceiverProjectionDecoder"),y1.forEach(o),gd.forEach(o),dl=d(t),Ze=s(t,"DIV",{class:!0});var vd=n(Ze);f(hr.$$.fragment,vd),Wh=d(vd),Ga=s(vd,"P",{});var k1=n(Ga);Vh=i(k1,"Baseline projection decoder (no cross-attention)."),k1.forEach(o),vd.forEach(o),pl=d(t),Qe=s(t,"H2",{class:!0});var _d=n(Qe);Gt=s(_d,"A",{id:!0,class:!0,href:!0});var $1=n(Gt);Xa=s($1,"SPAN",{});var T1=n(Xa);f(ur.$$.fragment,T1),T1.forEach(o),$1.forEach(o),Rh=d(_d),Za=s(_d,"SPAN",{});var x1=n(Za);Uh=i(x1,"PerceiverBasicDecoder"),x1.forEach(o),_d.forEach(o),ml=d(t),le=s(t,"DIV",{class:!0});var zn=n(le);f(fr.$$.fragment,zn),Kh=d(zn),Qa=s(zn,"P",{});var E1=n(Qa);Jh=i(E1,`Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a cross-attention operation, in which the latents produce keys and values.`),E1.forEach(o),Gh=d(zn),Ya=s(zn,"P",{});var F1=n(Ya);Xh=i(F1,"The shape of the output of this class depends on how one defines the output queries (also called decoder queries)."),F1.forEach(o),zn.forEach(o),hl=d(t),Ye=s(t,"H2",{class:!0});var Pd=n(Ye);Xt=s(Pd,"A",{id:!0,class:!0,href:!0});var C1=n(Xt);ei=s(C1,"SPAN",{});var j1=n(ei);f(gr.$$.fragment,j1),j1.forEach(o),C1.forEach(o),Zh=d(Pd),ti=s(Pd,"SPAN",{});var M1=n(ti);Qh=i(M1,"PerceiverClassificationDecoder"),M1.forEach(o),Pd.forEach(o),ul=d(t),et=s(t,"DIV",{class:!0});var wd=n(et);f(vr.$$.fragment,wd),Yh=d(wd),_r=s(wd,"P",{});var bd=n(_r);eu=i(bd,"Cross-attention based classification decoder. Light-weight wrapper of "),oi=s(bd,"CODE",{});var I1=n(oi);tu=i(I1,"PerceiverBasicDecoder"),I1.forEach(o),ou=i(bd,` for logit output. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).`),bd.forEach(o),wd.forEach(o),fl=d(t),tt=s(t,"H2",{class:!0});var yd=n(tt);Zt=s(yd,"A",{id:!0,class:!0,href:!0});var q1=n(Zt);ri=s(q1,"SPAN",{});var z1=n(ri);f(Pr.$$.fragment,z1),z1.forEach(o),q1.forEach(o),ru=d(yd),si=s(yd,"SPAN",{});var A1=n(si);su=i(A1,"PerceiverOpticalFlowDecoder"),A1.forEach(o),yd.forEach(o),gl=d(t),ot=s(t,"DIV",{class:!0});var kd=n(ot);f(wr.$$.fragment,kd),nu=d(kd),ni=s(kd,"P",{});var D1=n(ni);au=i(D1,"Cross-attention based optical flow decoder."),D1.forEach(o),kd.forEach(o),vl=d(t),rt=s(t,"H2",{class:!0});var $d=n(rt);Qt=s($d,"A",{id:!0,class:!0,href:!0});var N1=n(Qt);ai=s(N1,"SPAN",{});var O1=n(ai);f(br.$$.fragment,O1),O1.forEach(o),N1.forEach(o),iu=d($d),ii=s($d,"SPAN",{});var L1=n(ii);cu=i(L1,"PerceiverBasicVideoAutoencodingDecoder"),L1.forEach(o),$d.forEach(o),_l=d(t),st=s(t,"DIV",{class:!0});var Td=n(st);f(yr.$$.fragment,Td),lu=d(Td),kr=s(Td,"P",{});var xd=n(kr);du=i(xd,"Cross-attention based video-autoencoding decoder. Light-weight wrapper of ["),ci=s(xd,"EM",{});var S1=n(ci);pu=i(S1,"PerceiverBasicDecoder"),S1.forEach(o),mu=i(xd,`] with video reshaping logic.`),xd.forEach(o),Td.forEach(o),Pl=d(t),nt=s(t,"H2",{class:!0});var Ed=n(nt);Yt=s(Ed,"A",{id:!0,class:!0,href:!0});var B1=n(Yt);li=s(B1,"SPAN",{});var H1=n(li);f($r.$$.fragment,H1),H1.forEach(o),B1.forEach(o),hu=d(Ed),di=s(Ed,"SPAN",{});var W1=n(di);uu=i(W1,"PerceiverMultimodalDecoder"),W1.forEach(o),Ed.forEach(o),wl=d(t),de=s(t,"DIV",{class:!0});var An=n(de);f(Tr.$$.fragment,An),fu=d(An),xr=s(An,"P",{});var Fd=n(xr);gu=i(Fd,"Multimodal decoding by composing uni-modal decoders. The "),pi=s(Fd,"EM",{});var V1=n(pi);vu=i(V1,"modalities"),V1.forEach(o),_u=i(Fd,` argument of the constructor is a dictionary mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are concatenated along the time dimension.`),Fd.forEach(o),Pu=d(An),mi=s(An,"P",{});var R1=n(mi);wu=i(R1,"Next, there is a shared cross attention operation across all modalities."),R1.forEach(o),An.forEach(o),bl=d(t),at=s(t,"H2",{class:!0});var Cd=n(at);eo=s(Cd,"A",{id:!0,class:!0,href:!0});var U1=n(eo);hi=s(U1,"SPAN",{});var K1=n(hi);f(Er.$$.fragment,K1),K1.forEach(o),U1.forEach(o),bu=d(Cd),ui=s(Cd,"SPAN",{});var J1=n(ui);yu=i(J1,"PerceiverProjectionPostprocessor"),J1.forEach(o),Cd.forEach(o),yl=d(t),it=s(t,"DIV",{class:!0});var jd=n(it);f(Fr.$$.fragment,jd),ku=d(jd),fi=s(jd,"P",{});var G1=n(fi);$u=i(G1,`Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower dimension.`),G1.forEach(o),jd.forEach(o),kl=d(t),ct=s(t,"H2",{class:!0});var Md=n(ct);to=s(Md,"A",{id:!0,class:!0,href:!0});var X1=n(to);gi=s(X1,"SPAN",{});var Z1=n(gi);f(Cr.$$.fragment,Z1),Z1.forEach(o),X1.forEach(o),Tu=d(Md),vi=s(Md,"SPAN",{});var Q1=n(vi);xu=i(Q1,"PerceiverAudioPostprocessor"),Q1.forEach(o),Md.forEach(o),$l=d(t),lt=s(t,"DIV",{class:!0});var Id=n(lt);f(jr.$$.fragment,Id),Eu=d(Id),_i=s(Id,"P",{});var Y1=n(_i);Fu=i(Y1,"Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features."),Y1.forEach(o),Id.forEach(o),Tl=d(t),dt=s(t,"H2",{class:!0});var qd=n(dt);oo=s(qd,"A",{id:!0,class:!0,href:!0});var ew=n(oo);Pi=s(ew,"SPAN",{});var tw=n(Pi);f(Mr.$$.fragment,tw),tw.forEach(o),ew.forEach(o),Cu=d(qd),wi=s(qd,"SPAN",{});var ow=n(wi);ju=i(ow,"PerceiverClassificationPostprocessor"),ow.forEach(o),qd.forEach(o),xl=d(t),pt=s(t,"DIV",{class:!0});var zd=n(pt);f(Ir.$$.fragment,zd),Mu=d(zd),bi=s(zd,"P",{});var rw=n(bi);Iu=i(rw,"Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits."),rw.forEach(o),zd.forEach(o),El=d(t),mt=s(t,"H2",{class:!0});var Ad=n(mt);ro=s(Ad,"A",{id:!0,class:!0,href:!0});var sw=n(ro);yi=s(sw,"SPAN",{});var nw=n(yi);f(qr.$$.fragment,nw),nw.forEach(o),sw.forEach(o),qu=d(Ad),ki=s(Ad,"SPAN",{});var aw=n(ki);zu=i(aw,"PerceiverMultimodalPostprocessor"),aw.forEach(o),Ad.forEach(o),Fl=d(t),ht=s(t,"DIV",{class:!0});var Dd=n(ht);f(zr.$$.fragment,Dd),Au=d(Dd),$i=s(Dd,"P",{});var iw=n($i);Du=i(iw,`Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single postprocessor.`),iw.forEach(o),Dd.forEach(o),Cl=d(t),ut=s(t,"H2",{class:!0});var Nd=n(ut);so=s(Nd,"A",{id:!0,class:!0,href:!0});var cw=n(so);Ti=s(cw,"SPAN",{});var lw=n(Ti);f(Ar.$$.fragment,lw),lw.forEach(o),cw.forEach(o),Nu=d(Nd),xi=s(Nd,"SPAN",{});var dw=n(xi);Ou=i(dw,"PerceiverModel"),dw.forEach(o),Nd.forEach(o),jl=d(t),pe=s(t,"DIV",{class:!0});var Dn=n(pe);f(Dr.$$.fragment,Dn),Lu=d(Dn),Nr=s(Dn,"P",{});var Od=n(Nr);Su=i(Od,`The Perceiver: a scalable, fully attentional architecture. This model is a PyTorch `),Or=s(Od,"A",{href:!0,rel:!0});var pw=n(Or);Bu=i(pw,"torch.nn.Module"),pw.forEach(o),Hu=i(Od,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Od.forEach(o),Wu=d(Dn),S=s(Dn,"DIV",{class:!0});var ke=n(S);f(Lr.$$.fragment,ke),Vu=d(ke),ft=s(ke,"P",{});var Nn=n(ft);Ru=i(Nn,"The "),Gs=s(Nn,"A",{href:!0});var mw=n(Gs);Uu=i(mw,"PerceiverModel"),mw.forEach(o),Ku=i(Nn," forward method, overrides the "),Ei=s(Nn,"CODE",{});var hw=n(Ei);Ju=i(hw,"__call__"),hw.forEach(o),Gu=i(Nn," special method."),Nn.forEach(o),Xu=d(ke),f(no.$$.fragment,ke),Zu=d(ke),Fi=s(ke,"P",{});var uw=n(Fi);Qu=i(uw,"Examples:"),uw.forEach(o),Yu=d(ke),f(Sr.$$.fragment,ke),ke.forEach(o),Dn.forEach(o),Ml=d(t),gt=s(t,"H2",{class:!0});var Ld=n(gt);ao=s(Ld,"A",{id:!0,class:!0,href:!0});var fw=n(ao);Ci=s(fw,"SPAN",{});var gw=n(Ci);f(Br.$$.fragment,gw),gw.forEach(o),fw.forEach(o),ef=d(Ld),ji=s(Ld,"SPAN",{});var vw=n(ji);tf=i(vw,"PerceiverForMaskedLM"),vw.forEach(o),Ld.forEach(o),Il=d(t),me=s(t,"DIV",{class:!0});var On=n(me);f(Hr.$$.fragment,On),of=d(On),Wr=s(On,"P",{});var Sd=n(Wr);rf=i(Sd,`Example use of Perceiver for masked language modeling. This model is a PyTorch `),Vr=s(Sd,"A",{href:!0,rel:!0});var _w=n(Vr);sf=i(_w,"torch.nn.Module"),_w.forEach(o),nf=i(Sd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sd.forEach(o),af=d(On),B=s(On,"DIV",{class:!0});var $e=n(B);f(Rr.$$.fragment,$e),cf=d($e),vt=s($e,"P",{});var Ln=n(vt);lf=i(Ln,"The "),Xs=s(Ln,"A",{href:!0});var Pw=n(Xs);df=i(Pw,"PerceiverForMaskedLM"),Pw.forEach(o),pf=i(Ln," forward method, overrides the "),Mi=s(Ln,"CODE",{});var ww=n(Mi);mf=i(ww,"__call__"),ww.forEach(o),hf=i(Ln," special method."),Ln.forEach(o),uf=d($e),f(io.$$.fragment,$e),ff=d($e),Ii=s($e,"P",{});var bw=n(Ii);gf=i(bw,"Examples:"),bw.forEach(o),vf=d($e),f(Ur.$$.fragment,$e),$e.forEach(o),On.forEach(o),ql=d(t),_t=s(t,"H2",{class:!0});var Bd=n(_t);co=s(Bd,"A",{id:!0,class:!0,href:!0});var yw=n(co);qi=s(yw,"SPAN",{});var kw=n(qi);f(Kr.$$.fragment,kw),kw.forEach(o),yw.forEach(o),_f=d(Bd),zi=s(Bd,"SPAN",{});var $w=n(zi);Pf=i($w,"PerceiverForSequenceClassification"),$w.forEach(o),Bd.forEach(o),zl=d(t),he=s(t,"DIV",{class:!0});var Sn=n(he);f(Jr.$$.fragment,Sn),wf=d(Sn),Gr=s(Sn,"P",{});var Hd=n(Gr);bf=i(Hd,`Example use of Perceiver for text classification. This model is a PyTorch `),Xr=s(Hd,"A",{href:!0,rel:!0});var Tw=n(Xr);yf=i(Tw,"torch.nn.Module"),Tw.forEach(o),kf=i(Hd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hd.forEach(o),$f=d(Sn),H=s(Sn,"DIV",{class:!0});var Te=n(H);f(Zr.$$.fragment,Te),Tf=d(Te),Pt=s(Te,"P",{});var Bn=n(Pt);xf=i(Bn,"The "),Zs=s(Bn,"A",{href:!0});var xw=n(Zs);Ef=i(xw,"PerceiverForSequenceClassification"),xw.forEach(o),Ff=i(Bn," forward method, overrides the "),Ai=s(Bn,"CODE",{});var Ew=n(Ai);Cf=i(Ew,"__call__"),Ew.forEach(o),jf=i(Bn," special method."),Bn.forEach(o),Mf=d(Te),f(lo.$$.fragment,Te),If=d(Te),Di=s(Te,"P",{});var Fw=n(Di);qf=i(Fw,"Examples:"),Fw.forEach(o),zf=d(Te),f(Qr.$$.fragment,Te),Te.forEach(o),Sn.forEach(o),Al=d(t),wt=s(t,"H2",{class:!0});var Wd=n(wt);po=s(Wd,"A",{id:!0,class:!0,href:!0});var Cw=n(po);Ni=s(Cw,"SPAN",{});var jw=n(Ni);f(Yr.$$.fragment,jw),jw.forEach(o),Cw.forEach(o),Af=d(Wd),Oi=s(Wd,"SPAN",{});var Mw=n(Oi);Df=i(Mw,"PerceiverForImageClassificationLearned"),Mw.forEach(o),Wd.forEach(o),Dl=d(t),q=s(t,"DIV",{class:!0});var oe=n(q);f(es.$$.fragment,oe),Nf=d(oe),Li=s(oe,"P",{});var Iw=n(Li);Of=i(Iw,"Example use of Perceiver for image classification, for tasks such as ImageNet."),Iw.forEach(o),Lf=d(oe),Si=s(oe,"P",{});var qw=n(Si);Sf=i(qw,`This model uses learned position embeddings. In other words, this model is not given any privileged information about the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.`),qw.forEach(o),Bf=d(oe),W=s(oe,"P",{});var ue=n(W);Qs=s(ue,"A",{href:!0});var zw=n(Qs);Hf=i(zw,"PerceiverForImageClassificationLearned"),zw.forEach(o),Wf=i(ue,` uses `),Ys=s(ue,"A",{href:!0});var Aw=n(Ys);Vf=i(Aw,"PerceiverImagePreprocessor"),Aw.forEach(o),Rf=i(ue," (with "),Bi=s(ue,"CODE",{});var Dw=n(Bi);Uf=i(Dw,'prep_type="conv1x1"'),Dw.forEach(o),Kf=i(ue,`) to preprocess the input images, and `),en=s(ue,"A",{href:!0});var Nw=n(en);Jf=i(Nw,"PerceiverClassificationDecoder"),Nw.forEach(o),Gf=i(ue,` to decode the latent representation of `),tn=s(ue,"A",{href:!0});var Ow=n(tn);Xf=i(Ow,"PerceiverModel"),Ow.forEach(o),Zf=i(ue," into classification logits."),ue.forEach(o),Qf=d(oe),ts=s(oe,"P",{});var Vd=n(ts);Yf=i(Vd,"This model is a PyTorch "),os=s(Vd,"A",{href:!0,rel:!0});var Lw=n(os);eg=i(Lw,"torch.nn.Module"),Lw.forEach(o),tg=i(Vd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vd.forEach(o),og=d(oe),V=s(oe,"DIV",{class:!0});var xe=n(V);f(rs.$$.fragment,xe),rg=d(xe),bt=s(xe,"P",{});var Hn=n(bt);sg=i(Hn,"The "),on=s(Hn,"A",{href:!0});var Sw=n(on);ng=i(Sw,"PerceiverForImageClassificationLearned"),Sw.forEach(o),ag=i(Hn," forward method, overrides the "),Hi=s(Hn,"CODE",{});var Bw=n(Hi);ig=i(Bw,"__call__"),Bw.forEach(o),cg=i(Hn," special method."),Hn.forEach(o),lg=d(xe),f(mo.$$.fragment,xe),dg=d(xe),Wi=s(xe,"P",{});var Hw=n(Wi);pg=i(Hw,"Examples:"),Hw.forEach(o),mg=d(xe),f(ss.$$.fragment,xe),xe.forEach(o),oe.forEach(o),Nl=d(t),yt=s(t,"H2",{class:!0});var Rd=n(yt);ho=s(Rd,"A",{id:!0,class:!0,href:!0});var Ww=n(ho);Vi=s(Ww,"SPAN",{});var Vw=n(Vi);f(ns.$$.fragment,Vw),Vw.forEach(o),Ww.forEach(o),hg=d(Rd),Ri=s(Rd,"SPAN",{});var Rw=n(Ri);ug=i(Rw,"PerceiverForImageClassificationFourier"),Rw.forEach(o),Rd.forEach(o),Ol=d(t),z=s(t,"DIV",{class:!0});var re=n(z);f(as.$$.fragment,re),fg=d(re),Ui=s(re,"P",{});var Uw=n(Ui);gg=i(Uw,"Example use of Perceiver for image classification, for tasks such as ImageNet."),Uw.forEach(o),vg=d(re),Ki=s(re,"P",{});var Kw=n(Ki);_g=i(Kw,`This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).`),Kw.forEach(o),Pg=d(re),R=s(re,"P",{});var fe=n(R);rn=s(fe,"A",{href:!0});var Jw=n(rn);wg=i(Jw,"PerceiverForImageClassificationLearned"),Jw.forEach(o),bg=i(fe,` uses `),sn=s(fe,"A",{href:!0});var Gw=n(sn);yg=i(Gw,"PerceiverImagePreprocessor"),Gw.forEach(o),kg=i(fe," (with "),Ji=s(fe,"CODE",{});var Xw=n(Ji);$g=i(Xw,'prep_type="pixels"'),Xw.forEach(o),Tg=i(fe,`) to preprocess the input images, and `),nn=s(fe,"A",{href:!0});var Zw=n(nn);xg=i(Zw,"PerceiverClassificationDecoder"),Zw.forEach(o),Eg=i(fe,` to decode the latent representation of `),an=s(fe,"A",{href:!0});var Qw=n(an);Fg=i(Qw,"PerceiverModel"),Qw.forEach(o),Cg=i(fe," into classification logits."),fe.forEach(o),jg=d(re),is=s(re,"P",{});var Ud=n(is);Mg=i(Ud,"This model is a PyTorch "),cs=s(Ud,"A",{href:!0,rel:!0});var Yw=n(cs);Ig=i(Yw,"torch.nn.Module"),Yw.forEach(o),qg=i(Ud,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ud.forEach(o),zg=d(re),U=s(re,"DIV",{class:!0});var Ee=n(U);f(ls.$$.fragment,Ee),Ag=d(Ee),kt=s(Ee,"P",{});var Wn=n(kt);Dg=i(Wn,"The "),cn=s(Wn,"A",{href:!0});var eb=n(cn);Ng=i(eb,"PerceiverForImageClassificationFourier"),eb.forEach(o),Og=i(Wn," forward method, overrides the "),Gi=s(Wn,"CODE",{});var tb=n(Gi);Lg=i(tb,"__call__"),tb.forEach(o),Sg=i(Wn," special method."),Wn.forEach(o),Bg=d(Ee),f(uo.$$.fragment,Ee),Hg=d(Ee),Xi=s(Ee,"P",{});var ob=n(Xi);Wg=i(ob,"Examples:"),ob.forEach(o),Vg=d(Ee),f(ds.$$.fragment,Ee),Ee.forEach(o),re.forEach(o),Ll=d(t),$t=s(t,"H2",{class:!0});var Kd=n($t);fo=s(Kd,"A",{id:!0,class:!0,href:!0});var rb=n(fo);Zi=s(rb,"SPAN",{});var sb=n(Zi);f(ps.$$.fragment,sb),sb.forEach(o),rb.forEach(o),Rg=d(Kd),Qi=s(Kd,"SPAN",{});var nb=n(Qi);Ug=i(nb,"PerceiverForImageClassificationConvProcessing"),nb.forEach(o),Kd.forEach(o),Sl=d(t),A=s(t,"DIV",{class:!0});var se=n(A);f(ms.$$.fragment,se),Kg=d(se),Yi=s(se,"P",{});var ab=n(Yi);Jg=i(ab,"Example use of Perceiver for image classification, for tasks such as ImageNet."),ab.forEach(o),Gg=d(se),ec=s(se,"P",{});var ib=n(ec);Xg=i(ib,`This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy of 82.1 on ImageNet.`),ib.forEach(o),Zg=d(se),K=s(se,"P",{});var ge=n(K);ln=s(ge,"A",{href:!0});var cb=n(ln);Qg=i(cb,"PerceiverForImageClassificationLearned"),cb.forEach(o),Yg=i(ge,` uses `),dn=s(ge,"A",{href:!0});var lb=n(dn);ev=i(lb,"PerceiverImagePreprocessor"),lb.forEach(o),tv=i(ge," (with "),tc=s(ge,"CODE",{});var db=n(tc);ov=i(db,'prep_type="conv"'),db.forEach(o),rv=i(ge,`) to preprocess the input images, and `),pn=s(ge,"A",{href:!0});var pb=n(pn);sv=i(pb,"PerceiverClassificationDecoder"),pb.forEach(o),nv=i(ge,` to decode the latent representation of `),mn=s(ge,"A",{href:!0});var mb=n(mn);av=i(mb,"PerceiverModel"),mb.forEach(o),iv=i(ge," into classification logits."),ge.forEach(o),cv=d(se),hs=s(se,"P",{});var Jd=n(hs);lv=i(Jd,"This model is a PyTorch "),us=s(Jd,"A",{href:!0,rel:!0});var hb=n(us);dv=i(hb,"torch.nn.Module"),hb.forEach(o),pv=i(Jd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Jd.forEach(o),mv=d(se),J=s(se,"DIV",{class:!0});var Fe=n(J);f(fs.$$.fragment,Fe),hv=d(Fe),Tt=s(Fe,"P",{});var Vn=n(Tt);uv=i(Vn,"The "),hn=s(Vn,"A",{href:!0});var ub=n(hn);fv=i(ub,"PerceiverForImageClassificationConvProcessing"),ub.forEach(o),gv=i(Vn," forward method, overrides the "),oc=s(Vn,"CODE",{});var fb=n(oc);vv=i(fb,"__call__"),fb.forEach(o),_v=i(Vn," special method."),Vn.forEach(o),Pv=d(Fe),f(go.$$.fragment,Fe),wv=d(Fe),rc=s(Fe,"P",{});var gb=n(rc);bv=i(gb,"Examples:"),gb.forEach(o),yv=d(Fe),f(gs.$$.fragment,Fe),Fe.forEach(o),se.forEach(o),Bl=d(t),xt=s(t,"H2",{class:!0});var Gd=n(xt);vo=s(Gd,"A",{id:!0,class:!0,href:!0});var vb=n(vo);sc=s(vb,"SPAN",{});var _b=n(sc);f(vs.$$.fragment,_b),_b.forEach(o),vb.forEach(o),kv=d(Gd),nc=s(Gd,"SPAN",{});var Pb=n(nc);$v=i(Pb,"PerceiverForOpticalFlow"),Pb.forEach(o),Gd.forEach(o),Hl=d(t),O=s(t,"DIV",{class:!0});var Ce=n(O);f(_s.$$.fragment,Ce),Tv=d(Ce),L=s(Ce,"P",{});var ne=n(L);xv=i(ne,`Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. `),un=s(ne,"A",{href:!0});var wb=n(un);Ev=i(wb,"PerceiverForOpticalFlow"),wb.forEach(o),Fv=i(ne,` uses `),fn=s(ne,"A",{href:!0});var bb=n(fn);Cv=i(bb,"PerceiverImagePreprocessor"),bb.forEach(o),jv=i(ne," (with "),ac=s(ne,"EM",{});var yb=n(ac);Mv=i(yb,"prep_type=\u201Cpatches\u201D"),yb.forEach(o),Iv=i(ne,`) to preprocess the input images, and `),gn=s(ne,"A",{href:!0});var kb=n(gn);qv=i(kb,"PerceiverOpticalFlowDecoder"),kb.forEach(o),zv=i(ne,` to decode the latent representation of `),vn=s(ne,"A",{href:!0});var $b=n(vn);Av=i($b,"PerceiverModel"),$b.forEach(o),Dv=i(ne,"."),ne.forEach(o),Nv=d(Ce),ic=s(Ce,"P",{});var Tb=n(ic);Ov=i(Tb,`As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation using the same encoding used for the input.`),Tb.forEach(o),Lv=d(Ce),Ps=s(Ce,"P",{});var Xd=n(Ps);Sv=i(Xd,"This model is a PyTorch "),ws=s(Xd,"A",{href:!0,rel:!0});var xb=n(ws);Bv=i(xb,"torch.nn.Module"),xb.forEach(o),Hv=i(Xd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xd.forEach(o),Wv=d(Ce),G=s(Ce,"DIV",{class:!0});var je=n(G);f(bs.$$.fragment,je),Vv=d(je),Et=s(je,"P",{});var Rn=n(Et);Rv=i(Rn,"The "),_n=s(Rn,"A",{href:!0});var Eb=n(_n);Uv=i(Eb,"PerceiverForOpticalFlow"),Eb.forEach(o),Kv=i(Rn," forward method, overrides the "),cc=s(Rn,"CODE",{});var Fb=n(cc);Jv=i(Fb,"__call__"),Fb.forEach(o),Gv=i(Rn," special method."),Rn.forEach(o),Xv=d(je),f(_o.$$.fragment,je),Zv=d(je),lc=s(je,"P",{});var Cb=n(lc);Qv=i(Cb,"Examples:"),Cb.forEach(o),Yv=d(je),f(ys.$$.fragment,je),je.forEach(o),Ce.forEach(o),Wl=d(t),Ft=s(t,"H2",{class:!0});var Zd=n(Ft);Po=s(Zd,"A",{id:!0,class:!0,href:!0});var jb=n(Po);dc=s(jb,"SPAN",{});var Mb=n(dc);f(ks.$$.fragment,Mb),Mb.forEach(o),jb.forEach(o),e_=d(Zd),pc=s(Zd,"SPAN",{});var Ib=n(pc);t_=i(Ib,"PerceiverForMultimodalAutoencoding"),Ib.forEach(o),Zd.forEach(o),Vl=d(t),C=s(t,"DIV",{class:!0});var I=n(C);f($s.$$.fragment,I),o_=d(I),mc=s(I,"P",{});var qb=n(mc);r_=i(qb,"Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700."),qb.forEach(o),s_=d(I),wo=s(I,"P",{});var kc=n(wo);Pn=s(kc,"A",{href:!0});var zb=n(Pn);n_=i(zb,"PerceiverForMultimodalAutoencoding"),zb.forEach(o),a_=i(kc,` uses `),wn=s(kc,"A",{href:!0});var Ab=n(wn);i_=i(Ab,"PerceiverMultimodalPreprocessor"),Ab.forEach(o),c_=i(kc,` to preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies the Perceiver encoder.`),kc.forEach(o),l_=d(I),we=s(I,"P",{});var zs=n(we);bn=s(zs,"A",{href:!0});var Db=n(bn);d_=i(Db,"PerceiverMultimodalDecoder"),Db.forEach(o),p_=i(zs,` is used to decode the latent representation of `),yn=s(zs,"A",{href:!0});var Nb=n(yn);m_=i(Nb,"PerceiverModel"),Nb.forEach(o),h_=i(zs,`. This decoder uses each modality-specific decoder to construct queries. The decoder queries are created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent representation. This is determined by the subsampled indices for each modality, which can be provided as additional input to the forward pass of `),kn=s(zs,"A",{href:!0});var Ob=n(kn);u_=i(Ob,"PerceiverForMultimodalAutoencoding"),Ob.forEach(o),f_=i(zs,"."),zs.forEach(o),g_=d(I),bo=s(I,"P",{});var $c=n(bo);$n=s($c,"A",{href:!0});var Lb=n($n);v_=i(Lb,"PerceiverMultimodalDecoder"),Lb.forEach(o),__=i($c,` also pads the decoder queries of the different modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention is performed with the latent representation of `),Tn=s($c,"A",{href:!0});var Sb=n(Tn);P_=i(Sb,"PerceiverModel"),Sb.forEach(o),w_=i($c,"."),$c.forEach(o),b_=d(I),Ts=s(I,"P",{});var Qd=n(Ts);y_=i(Qd,"Finally, "),hc=s(Qd,"CODE",{});var Bb=n(hc);k_=i(Bb,"PerceiverMultiModalPostprocessor"),Bb.forEach(o),$_=i(Qd,` is used to turn this tensor into an actual video. It first splits up the output into the different modalities, and then applies the respective postprocessor for each modality.`),Qd.forEach(o),T_=d(I),uc=s(I,"P",{});var Hb=n(uc);x_=i(Hb,`Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the \u201Clabel\u201D modality), this auto-encoding model becomes a Kinetics 700 video classifier.`),Hb.forEach(o),E_=d(I),xs=s(I,"P",{});var Yd=n(xs);F_=i(Yd,"This model is a PyTorch "),Es=s(Yd,"A",{href:!0,rel:!0});var Wb=n(Es);C_=i(Wb,"torch.nn.Module"),Wb.forEach(o),j_=i(Yd,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yd.forEach(o),M_=d(I),X=s(I,"DIV",{class:!0});var Me=n(X);f(Fs.$$.fragment,Me),I_=d(Me),Ct=s(Me,"P",{});var Un=n(Ct);q_=i(Un,"The "),xn=s(Un,"A",{href:!0});var Vb=n(xn);z_=i(Vb,"PerceiverForMultimodalAutoencoding"),Vb.forEach(o),A_=i(Un," forward method, overrides the "),fc=s(Un,"CODE",{});var Rb=n(fc);D_=i(Rb,"__call__"),Rb.forEach(o),N_=i(Un," special method."),Un.forEach(o),O_=d(Me),f(yo.$$.fragment,Me),L_=d(Me),gc=s(Me,"P",{});var Ub=n(gc);S_=i(Ub,"Examples:"),Ub.forEach(o),B_=d(Me),f(Cs.$$.fragment,Me),Me.forEach(o),I.forEach(o),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(c2)),c(k,"id","perceiver"),c(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(k,"href","#perceiver"),c(w,"class","relative group"),c(It,"id","overview"),c(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(It,"href","#overview"),c(Ie,"class","relative group"),c(xo,"href","https://arxiv.org/abs/2107.14795"),c(xo,"rel","nofollow"),c(Eo,"href","https://arxiv.org/abs/2103.03206"),c(Eo,"rel","nofollow"),c(Ls,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),Zb(At.src,V_="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/perceiver_architecture.jpg")||c(At,"src",V_),c(At,"alt","drawing"),c(At,"width","600"),c(Bs,"href","https://arxiv.org/abs/2105.15203"),c(Co,"href","https://huggingface.co/nielsr"),c(Co,"rel","nofollow"),c(jo,"href","https://github.com/deepmind/deepmind-research/tree/master/perceiver"),c(jo,"rel","nofollow"),c(Io,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver"),c(Io,"rel","nofollow"),c(zo,"href","https://huggingface.co/blog/perceiver"),c(zo,"rel","nofollow"),c(Do,"href","https://github.com/pytorch/pytorch/issues/36035"),c(Do,"rel","nofollow"),c(Nt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput"),c(qe,"class","relative group"),c(ze,"class","docstring"),c(Ae,"class","docstring"),c(De,"class","docstring"),c(Ne,"class","docstring"),c(Ot,"id","transformers.PerceiverConfig"),c(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ot,"href","#transformers.PerceiverConfig"),c(Oe,"class","relative group"),c(Vs,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(Vo,"href","https://huggingface.co/deepmind/language-perceiver"),c(Vo,"rel","nofollow"),c(Rs,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Us,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(N,"class","docstring"),c(Lt,"id","transformers.PerceiverTokenizer"),c(Lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lt,"href","#transformers.PerceiverTokenizer"),c(Be,"class","relative group"),c(Ks,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(St,"class","docstring"),c(Q,"class","docstring"),c(Bt,"id","transformers.PerceiverFeatureExtractor"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#transformers.PerceiverFeatureExtractor"),c(He,"class","relative group"),c(Js,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.ImageFeatureExtractionMixin"),c(Pe,"class","docstring"),c(Y,"class","docstring"),c(Wt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor"),c(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor"),c(We,"class","relative group"),c(ae,"class","docstring"),c(Vt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"),c(Ve,"class","relative group"),c(ie,"class","docstring"),c(Rt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor"),c(Re,"class","relative group"),c(Ue,"class","docstring"),c(Ut,"id","transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor"),c(Ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ut,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor"),c(Ke,"class","relative group"),c(Je,"class","docstring"),c(Kt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor"),c(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor"),c(Ge,"class","relative group"),c(ce,"class","docstring"),c(Jt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder"),c(Xe,"class","relative group"),c(Ze,"class","docstring"),c(Gt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder"),c(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder"),c(Qe,"class","relative group"),c(le,"class","docstring"),c(Xt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"),c(Xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"),c(Ye,"class","relative group"),c(et,"class","docstring"),c(Zt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder"),c(Zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Zt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder"),c(tt,"class","relative group"),c(ot,"class","docstring"),c(Qt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder"),c(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder"),c(rt,"class","relative group"),c(st,"class","docstring"),c(Yt,"id","transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"),c(nt,"class","relative group"),c(de,"class","docstring"),c(eo,"id","transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor"),c(at,"class","relative group"),c(it,"class","docstring"),c(to,"id","transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor"),c(to,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(to,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor"),c(ct,"class","relative group"),c(lt,"class","docstring"),c(oo,"id","transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor"),c(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(oo,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor"),c(dt,"class","relative group"),c(pt,"class","docstring"),c(ro,"id","transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor"),c(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ro,"href","#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor"),c(mt,"class","relative group"),c(ht,"class","docstring"),c(so,"id","transformers.PerceiverModel"),c(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(so,"href","#transformers.PerceiverModel"),c(ut,"class","relative group"),c(Or,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Or,"rel","nofollow"),c(Gs,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(S,"class","docstring"),c(pe,"class","docstring"),c(ao,"id","transformers.PerceiverForMaskedLM"),c(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ao,"href","#transformers.PerceiverForMaskedLM"),c(gt,"class","relative group"),c(Vr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Vr,"rel","nofollow"),c(Xs,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForMaskedLM"),c(B,"class","docstring"),c(me,"class","docstring"),c(co,"id","transformers.PerceiverForSequenceClassification"),c(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(co,"href","#transformers.PerceiverForSequenceClassification"),c(_t,"class","relative group"),c(Xr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Xr,"rel","nofollow"),c(Zs,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForSequenceClassification"),c(H,"class","docstring"),c(he,"class","docstring"),c(po,"id","transformers.PerceiverForImageClassificationLearned"),c(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(po,"href","#transformers.PerceiverForImageClassificationLearned"),c(wt,"class","relative group"),c(Qs,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned"),c(Ys,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"),c(en,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"),c(tn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(os,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(os,"rel","nofollow"),c(on,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned"),c(V,"class","docstring"),c(q,"class","docstring"),c(ho,"id","transformers.PerceiverForImageClassificationFourier"),c(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ho,"href","#transformers.PerceiverForImageClassificationFourier"),c(yt,"class","relative group"),c(rn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned"),c(sn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"),c(nn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"),c(an,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(cs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(cs,"rel","nofollow"),c(cn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationFourier"),c(U,"class","docstring"),c(z,"class","docstring"),c(fo,"id","transformers.PerceiverForImageClassificationConvProcessing"),c(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fo,"href","#transformers.PerceiverForImageClassificationConvProcessing"),c($t,"class","relative group"),c(ln,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned"),c(dn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"),c(pn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"),c(mn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(us,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(us,"rel","nofollow"),c(hn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForImageClassificationConvProcessing"),c(J,"class","docstring"),c(A,"class","docstring"),c(vo,"id","transformers.PerceiverForOpticalFlow"),c(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vo,"href","#transformers.PerceiverForOpticalFlow"),c(xt,"class","relative group"),c(un,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForOpticalFlow"),c(fn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"),c(gn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder"),c(vn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(ws,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ws,"rel","nofollow"),c(_n,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForOpticalFlow"),c(G,"class","docstring"),c(O,"class","docstring"),c(Po,"id","transformers.PerceiverForMultimodalAutoencoding"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.PerceiverForMultimodalAutoencoding"),c(Ft,"class","relative group"),c(Pn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForMultimodalAutoencoding"),c(wn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor"),c(bn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"),c(yn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(kn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForMultimodalAutoencoding"),c($n,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"),c(Tn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverModel"),c(Es,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Es,"rel","nofollow"),c(xn,"href","/docs/transformers/v4.15.0/en/model_doc/perceiver#transformers.PerceiverForMultimodalAutoencoding"),c(X,"class","docstring"),c(C,"class","docstring")},m(t,p){e(document.head,h),m(t,$,p),m(t,w,p),e(w,k),e(k,x),g(y,x,null),e(w,b),e(w,E),e(E,ep),m(t,Tc,p),m(t,Ie,p),e(Ie,It),e(It,Kn),g(To,Kn,null),e(Ie,tp),e(Ie,Jn),e(Jn,op),m(t,xc,p),m(t,qt,p),e(qt,rp),e(qt,xo),e(xo,sp),e(qt,np),m(t,Ec,p),m(t,zt,p),e(zt,ap),e(zt,Eo),e(Eo,ip),e(zt,cp),m(t,Fc,p),m(t,As,p),e(As,lp),m(t,Cc,p),m(t,Ds,p),e(Ds,Gn),e(Gn,dp),m(t,jc,p),m(t,Ns,p),e(Ns,pp),m(t,Mc,p),m(t,Os,p),e(Os,mp),m(t,Ic,p),m(t,D,p),e(D,hp),e(D,Ls),e(Ls,up),e(D,fp),e(D,Xn),e(Xn,gp),e(D,vp),e(D,Zn),e(Zn,_p),e(D,Pp),e(D,Qn),e(Qn,wp),e(D,bp),e(D,Yn),e(Yn,yp),e(D,kp),m(t,qc,p),m(t,Ss,p),e(Ss,$p),m(t,zc,p),m(t,M,p),e(M,Tp),e(M,ea),e(ea,xp),e(M,Ep),e(M,ta),e(ta,Fp),e(M,Cp),e(M,oa),e(oa,jp),e(M,Mp),e(M,ra),e(ra,Ip),e(M,qp),e(M,sa),e(sa,zp),e(M,Ap),e(M,na),e(na,Dp),e(M,Np),m(t,Ac,p),m(t,At,p),m(t,Dc,p),m(t,Fo,p),e(Fo,Op),e(Fo,Bs),e(Bs,Lp),m(t,Nc,p),m(t,ve,p),e(ve,Sp),e(ve,Co),e(Co,Bp),e(ve,Hp),e(ve,jo),e(jo,Wp),e(ve,Vp),m(t,Oc,p),m(t,Hs,p),e(Hs,Rp),m(t,Lc,p),m(t,Dt,p),e(Dt,Mo),e(Mo,Up),e(Mo,Io),e(Io,Kp),e(Mo,Jp),e(Dt,Gp),e(Dt,qo),e(qo,Xp),e(qo,zo),e(zo,Zp),e(qo,Qp),m(t,Sc,p),m(t,Ao,p),e(Ao,aa),e(aa,Yp),e(Ao,em),m(t,Bc,p),m(t,Ws,p),e(Ws,_e),e(_e,tm),e(_e,ia),e(ia,om),e(_e,rm),e(_e,ca),e(ca,sm),e(_e,nm),e(_e,Do),e(Do,am),m(t,Hc,p),m(t,qe,p),e(qe,Nt),e(Nt,la),g(No,la,null),e(qe,im),e(qe,da),e(da,cm),m(t,Wc,p),m(t,ze,p),g(Oo,ze,null),e(ze,lm),e(ze,pa),e(pa,dm),m(t,Vc,p),m(t,Ae,p),g(Lo,Ae,null),e(Ae,pm),e(Ae,ma),e(ma,mm),m(t,Rc,p),m(t,De,p),g(So,De,null),e(De,hm),e(De,ha),e(ha,um),m(t,Uc,p),m(t,Ne,p),g(Bo,Ne,null),e(Ne,fm),e(Ne,ua),e(ua,gm),m(t,Kc,p),m(t,Oe,p),e(Oe,Ot),e(Ot,fa),g(Ho,fa,null),e(Oe,vm),e(Oe,ga),e(ga,_m),m(t,Jc,p),m(t,N,p),g(Wo,N,null),e(N,Pm),e(N,Le),e(Le,wm),e(Le,Vs),e(Vs,bm),e(Le,ym),e(Le,Vo),e(Vo,km),e(Le,$m),e(N,Tm),e(N,Se),e(Se,xm),e(Se,Rs),e(Rs,Em),e(Se,Fm),e(Se,Us),e(Us,Cm),e(Se,jm),e(N,Mm),e(N,va),e(va,Im),e(N,qm),g(Ro,N,null),m(t,Gc,p),m(t,Be,p),e(Be,Lt),e(Lt,_a),g(Uo,_a,null),e(Be,zm),e(Be,Pa),e(Pa,Am),m(t,Xc,p),m(t,Q,p),g(Ko,Q,null),e(Q,Dm),e(Q,wa),e(wa,Nm),e(Q,Om),e(Q,Jo),e(Jo,Lm),e(Jo,Ks),e(Ks,Sm),e(Jo,Bm),e(Q,Hm),e(Q,St),g(Go,St,null),e(St,Wm),e(St,ba),e(ba,Vm),m(t,Zc,p),m(t,He,p),e(He,Bt),e(Bt,ya),g(Xo,ya,null),e(He,Rm),e(He,ka),e(ka,Um),m(t,Qc,p),m(t,Y,p),g(Zo,Y,null),e(Y,Km),e(Y,$a),e($a,Jm),e(Y,Gm),e(Y,Qo),e(Qo,Xm),e(Qo,Js),e(Js,Zm),e(Qo,Qm),e(Y,Ym),e(Y,Pe),g(Yo,Pe,null),e(Pe,eh),e(Pe,Ta),e(Ta,th),e(Pe,oh),g(Ht,Pe,null),m(t,Yc,p),m(t,We,p),e(We,Wt),e(Wt,xa),g(er,xa,null),e(We,rh),e(We,Ea),e(Ea,sh),m(t,el,p),m(t,ae,p),g(tr,ae,null),e(ae,nh),e(ae,or),e(or,ah),e(or,Fa),e(Fa,ih),e(or,ch),e(ae,lh),e(ae,rr),e(rr,dh),e(rr,Ca),e(Ca,ph),e(rr,mh),m(t,tl,p),m(t,Ve,p),e(Ve,Vt),e(Vt,ja),g(sr,ja,null),e(Ve,hh),e(Ve,Ma),e(Ma,uh),m(t,ol,p),m(t,ie,p),g(nr,ie,null),e(ie,fh),e(ie,Ia),e(Ia,gh),e(ie,vh),e(ie,ee),e(ee,_h),e(ee,qa),e(qa,Ph),e(ee,wh),e(ee,za),e(za,bh),e(ee,yh),e(ee,Aa),e(Aa,kh),e(ee,$h),e(ee,Da),e(Da,Th),e(ee,xh),m(t,rl,p),m(t,Re,p),e(Re,Rt),e(Rt,Na),g(ar,Na,null),e(Re,Eh),e(Re,Oa),e(Oa,Fh),m(t,sl,p),m(t,Ue,p),g(ir,Ue,null),e(Ue,Ch),e(Ue,La),e(La,jh),m(t,nl,p),m(t,Ke,p),e(Ke,Ut),e(Ut,Sa),g(cr,Sa,null),e(Ke,Mh),e(Ke,Ba),e(Ba,Ih),m(t,al,p),m(t,Je,p),g(lr,Je,null),e(Je,qh),e(Je,Ha),e(Ha,zh),m(t,il,p),m(t,Ge,p),e(Ge,Kt),e(Kt,Wa),g(dr,Wa,null),e(Ge,Ah),e(Ge,Va),e(Va,Dh),m(t,cl,p),m(t,ce,p),g(pr,ce,null),e(ce,Nh),e(ce,Ra),e(Ra,Oh),e(ce,Lh),e(ce,Ua),e(Ua,Sh),m(t,ll,p),m(t,Xe,p),e(Xe,Jt),e(Jt,Ka),g(mr,Ka,null),e(Xe,Bh),e(Xe,Ja),e(Ja,Hh),m(t,dl,p),m(t,Ze,p),g(hr,Ze,null),e(Ze,Wh),e(Ze,Ga),e(Ga,Vh),m(t,pl,p),m(t,Qe,p),e(Qe,Gt),e(Gt,Xa),g(ur,Xa,null),e(Qe,Rh),e(Qe,Za),e(Za,Uh),m(t,ml,p),m(t,le,p),g(fr,le,null),e(le,Kh),e(le,Qa),e(Qa,Jh),e(le,Gh),e(le,Ya),e(Ya,Xh),m(t,hl,p),m(t,Ye,p),e(Ye,Xt),e(Xt,ei),g(gr,ei,null),e(Ye,Zh),e(Ye,ti),e(ti,Qh),m(t,ul,p),m(t,et,p),g(vr,et,null),e(et,Yh),e(et,_r),e(_r,eu),e(_r,oi),e(oi,tu),e(_r,ou),m(t,fl,p),m(t,tt,p),e(tt,Zt),e(Zt,ri),g(Pr,ri,null),e(tt,ru),e(tt,si),e(si,su),m(t,gl,p),m(t,ot,p),g(wr,ot,null),e(ot,nu),e(ot,ni),e(ni,au),m(t,vl,p),m(t,rt,p),e(rt,Qt),e(Qt,ai),g(br,ai,null),e(rt,iu),e(rt,ii),e(ii,cu),m(t,_l,p),m(t,st,p),g(yr,st,null),e(st,lu),e(st,kr),e(kr,du),e(kr,ci),e(ci,pu),e(kr,mu),m(t,Pl,p),m(t,nt,p),e(nt,Yt),e(Yt,li),g($r,li,null),e(nt,hu),e(nt,di),e(di,uu),m(t,wl,p),m(t,de,p),g(Tr,de,null),e(de,fu),e(de,xr),e(xr,gu),e(xr,pi),e(pi,vu),e(xr,_u),e(de,Pu),e(de,mi),e(mi,wu),m(t,bl,p),m(t,at,p),e(at,eo),e(eo,hi),g(Er,hi,null),e(at,bu),e(at,ui),e(ui,yu),m(t,yl,p),m(t,it,p),g(Fr,it,null),e(it,ku),e(it,fi),e(fi,$u),m(t,kl,p),m(t,ct,p),e(ct,to),e(to,gi),g(Cr,gi,null),e(ct,Tu),e(ct,vi),e(vi,xu),m(t,$l,p),m(t,lt,p),g(jr,lt,null),e(lt,Eu),e(lt,_i),e(_i,Fu),m(t,Tl,p),m(t,dt,p),e(dt,oo),e(oo,Pi),g(Mr,Pi,null),e(dt,Cu),e(dt,wi),e(wi,ju),m(t,xl,p),m(t,pt,p),g(Ir,pt,null),e(pt,Mu),e(pt,bi),e(bi,Iu),m(t,El,p),m(t,mt,p),e(mt,ro),e(ro,yi),g(qr,yi,null),e(mt,qu),e(mt,ki),e(ki,zu),m(t,Fl,p),m(t,ht,p),g(zr,ht,null),e(ht,Au),e(ht,$i),e($i,Du),m(t,Cl,p),m(t,ut,p),e(ut,so),e(so,Ti),g(Ar,Ti,null),e(ut,Nu),e(ut,xi),e(xi,Ou),m(t,jl,p),m(t,pe,p),g(Dr,pe,null),e(pe,Lu),e(pe,Nr),e(Nr,Su),e(Nr,Or),e(Or,Bu),e(Nr,Hu),e(pe,Wu),e(pe,S),g(Lr,S,null),e(S,Vu),e(S,ft),e(ft,Ru),e(ft,Gs),e(Gs,Uu),e(ft,Ku),e(ft,Ei),e(Ei,Ju),e(ft,Gu),e(S,Xu),g(no,S,null),e(S,Zu),e(S,Fi),e(Fi,Qu),e(S,Yu),g(Sr,S,null),m(t,Ml,p),m(t,gt,p),e(gt,ao),e(ao,Ci),g(Br,Ci,null),e(gt,ef),e(gt,ji),e(ji,tf),m(t,Il,p),m(t,me,p),g(Hr,me,null),e(me,of),e(me,Wr),e(Wr,rf),e(Wr,Vr),e(Vr,sf),e(Wr,nf),e(me,af),e(me,B),g(Rr,B,null),e(B,cf),e(B,vt),e(vt,lf),e(vt,Xs),e(Xs,df),e(vt,pf),e(vt,Mi),e(Mi,mf),e(vt,hf),e(B,uf),g(io,B,null),e(B,ff),e(B,Ii),e(Ii,gf),e(B,vf),g(Ur,B,null),m(t,ql,p),m(t,_t,p),e(_t,co),e(co,qi),g(Kr,qi,null),e(_t,_f),e(_t,zi),e(zi,Pf),m(t,zl,p),m(t,he,p),g(Jr,he,null),e(he,wf),e(he,Gr),e(Gr,bf),e(Gr,Xr),e(Xr,yf),e(Gr,kf),e(he,$f),e(he,H),g(Zr,H,null),e(H,Tf),e(H,Pt),e(Pt,xf),e(Pt,Zs),e(Zs,Ef),e(Pt,Ff),e(Pt,Ai),e(Ai,Cf),e(Pt,jf),e(H,Mf),g(lo,H,null),e(H,If),e(H,Di),e(Di,qf),e(H,zf),g(Qr,H,null),m(t,Al,p),m(t,wt,p),e(wt,po),e(po,Ni),g(Yr,Ni,null),e(wt,Af),e(wt,Oi),e(Oi,Df),m(t,Dl,p),m(t,q,p),g(es,q,null),e(q,Nf),e(q,Li),e(Li,Of),e(q,Lf),e(q,Si),e(Si,Sf),e(q,Bf),e(q,W),e(W,Qs),e(Qs,Hf),e(W,Wf),e(W,Ys),e(Ys,Vf),e(W,Rf),e(W,Bi),e(Bi,Uf),e(W,Kf),e(W,en),e(en,Jf),e(W,Gf),e(W,tn),e(tn,Xf),e(W,Zf),e(q,Qf),e(q,ts),e(ts,Yf),e(ts,os),e(os,eg),e(ts,tg),e(q,og),e(q,V),g(rs,V,null),e(V,rg),e(V,bt),e(bt,sg),e(bt,on),e(on,ng),e(bt,ag),e(bt,Hi),e(Hi,ig),e(bt,cg),e(V,lg),g(mo,V,null),e(V,dg),e(V,Wi),e(Wi,pg),e(V,mg),g(ss,V,null),m(t,Nl,p),m(t,yt,p),e(yt,ho),e(ho,Vi),g(ns,Vi,null),e(yt,hg),e(yt,Ri),e(Ri,ug),m(t,Ol,p),m(t,z,p),g(as,z,null),e(z,fg),e(z,Ui),e(Ui,gg),e(z,vg),e(z,Ki),e(Ki,_g),e(z,Pg),e(z,R),e(R,rn),e(rn,wg),e(R,bg),e(R,sn),e(sn,yg),e(R,kg),e(R,Ji),e(Ji,$g),e(R,Tg),e(R,nn),e(nn,xg),e(R,Eg),e(R,an),e(an,Fg),e(R,Cg),e(z,jg),e(z,is),e(is,Mg),e(is,cs),e(cs,Ig),e(is,qg),e(z,zg),e(z,U),g(ls,U,null),e(U,Ag),e(U,kt),e(kt,Dg),e(kt,cn),e(cn,Ng),e(kt,Og),e(kt,Gi),e(Gi,Lg),e(kt,Sg),e(U,Bg),g(uo,U,null),e(U,Hg),e(U,Xi),e(Xi,Wg),e(U,Vg),g(ds,U,null),m(t,Ll,p),m(t,$t,p),e($t,fo),e(fo,Zi),g(ps,Zi,null),e($t,Rg),e($t,Qi),e(Qi,Ug),m(t,Sl,p),m(t,A,p),g(ms,A,null),e(A,Kg),e(A,Yi),e(Yi,Jg),e(A,Gg),e(A,ec),e(ec,Xg),e(A,Zg),e(A,K),e(K,ln),e(ln,Qg),e(K,Yg),e(K,dn),e(dn,ev),e(K,tv),e(K,tc),e(tc,ov),e(K,rv),e(K,pn),e(pn,sv),e(K,nv),e(K,mn),e(mn,av),e(K,iv),e(A,cv),e(A,hs),e(hs,lv),e(hs,us),e(us,dv),e(hs,pv),e(A,mv),e(A,J),g(fs,J,null),e(J,hv),e(J,Tt),e(Tt,uv),e(Tt,hn),e(hn,fv),e(Tt,gv),e(Tt,oc),e(oc,vv),e(Tt,_v),e(J,Pv),g(go,J,null),e(J,wv),e(J,rc),e(rc,bv),e(J,yv),g(gs,J,null),m(t,Bl,p),m(t,xt,p),e(xt,vo),e(vo,sc),g(vs,sc,null),e(xt,kv),e(xt,nc),e(nc,$v),m(t,Hl,p),m(t,O,p),g(_s,O,null),e(O,Tv),e(O,L),e(L,xv),e(L,un),e(un,Ev),e(L,Fv),e(L,fn),e(fn,Cv),e(L,jv),e(L,ac),e(ac,Mv),e(L,Iv),e(L,gn),e(gn,qv),e(L,zv),e(L,vn),e(vn,Av),e(L,Dv),e(O,Nv),e(O,ic),e(ic,Ov),e(O,Lv),e(O,Ps),e(Ps,Sv),e(Ps,ws),e(ws,Bv),e(Ps,Hv),e(O,Wv),e(O,G),g(bs,G,null),e(G,Vv),e(G,Et),e(Et,Rv),e(Et,_n),e(_n,Uv),e(Et,Kv),e(Et,cc),e(cc,Jv),e(Et,Gv),e(G,Xv),g(_o,G,null),e(G,Zv),e(G,lc),e(lc,Qv),e(G,Yv),g(ys,G,null),m(t,Wl,p),m(t,Ft,p),e(Ft,Po),e(Po,dc),g(ks,dc,null),e(Ft,e_),e(Ft,pc),e(pc,t_),m(t,Vl,p),m(t,C,p),g($s,C,null),e(C,o_),e(C,mc),e(mc,r_),e(C,s_),e(C,wo),e(wo,Pn),e(Pn,n_),e(wo,a_),e(wo,wn),e(wn,i_),e(wo,c_),e(C,l_),e(C,we),e(we,bn),e(bn,d_),e(we,p_),e(we,yn),e(yn,m_),e(we,h_),e(we,kn),e(kn,u_),e(we,f_),e(C,g_),e(C,bo),e(bo,$n),e($n,v_),e(bo,__),e(bo,Tn),e(Tn,P_),e(bo,w_),e(C,b_),e(C,Ts),e(Ts,y_),e(Ts,hc),e(hc,k_),e(Ts,$_),e(C,T_),e(C,uc),e(uc,x_),e(C,E_),e(C,xs),e(xs,F_),e(xs,Es),e(Es,C_),e(xs,j_),e(C,M_),e(C,X),g(Fs,X,null),e(X,I_),e(X,Ct),e(Ct,q_),e(Ct,xn),e(xn,z_),e(Ct,A_),e(Ct,fc),e(fc,D_),e(Ct,N_),e(X,O_),g(yo,X,null),e(X,L_),e(X,gc),e(gc,S_),e(X,B_),g(Cs,X,null),Rl=!0},p(t,[p]){const js={};p&2&&(js.$$scope={dirty:p,ctx:t}),Ht.$set(js);const vc={};p&2&&(vc.$$scope={dirty:p,ctx:t}),no.$set(vc);const _c={};p&2&&(_c.$$scope={dirty:p,ctx:t}),io.$set(_c);const Pc={};p&2&&(Pc.$$scope={dirty:p,ctx:t}),lo.$set(Pc);const Ms={};p&2&&(Ms.$$scope={dirty:p,ctx:t}),mo.$set(Ms);const wc={};p&2&&(wc.$$scope={dirty:p,ctx:t}),uo.$set(wc);const bc={};p&2&&(bc.$$scope={dirty:p,ctx:t}),go.$set(bc);const yc={};p&2&&(yc.$$scope={dirty:p,ctx:t}),_o.$set(yc);const Is={};p&2&&(Is.$$scope={dirty:p,ctx:t}),yo.$set(Is)},i(t){Rl||(v(y.$$.fragment,t),v(To.$$.fragment,t),v(No.$$.fragment,t),v(Oo.$$.fragment,t),v(Lo.$$.fragment,t),v(So.$$.fragment,t),v(Bo.$$.fragment,t),v(Ho.$$.fragment,t),v(Wo.$$.fragment,t),v(Ro.$$.fragment,t),v(Uo.$$.fragment,t),v(Ko.$$.fragment,t),v(Go.$$.fragment,t),v(Xo.$$.fragment,t),v(Zo.$$.fragment,t),v(Yo.$$.fragment,t),v(Ht.$$.fragment,t),v(er.$$.fragment,t),v(tr.$$.fragment,t),v(sr.$$.fragment,t),v(nr.$$.fragment,t),v(ar.$$.fragment,t),v(ir.$$.fragment,t),v(cr.$$.fragment,t),v(lr.$$.fragment,t),v(dr.$$.fragment,t),v(pr.$$.fragment,t),v(mr.$$.fragment,t),v(hr.$$.fragment,t),v(ur.$$.fragment,t),v(fr.$$.fragment,t),v(gr.$$.fragment,t),v(vr.$$.fragment,t),v(Pr.$$.fragment,t),v(wr.$$.fragment,t),v(br.$$.fragment,t),v(yr.$$.fragment,t),v($r.$$.fragment,t),v(Tr.$$.fragment,t),v(Er.$$.fragment,t),v(Fr.$$.fragment,t),v(Cr.$$.fragment,t),v(jr.$$.fragment,t),v(Mr.$$.fragment,t),v(Ir.$$.fragment,t),v(qr.$$.fragment,t),v(zr.$$.fragment,t),v(Ar.$$.fragment,t),v(Dr.$$.fragment,t),v(Lr.$$.fragment,t),v(no.$$.fragment,t),v(Sr.$$.fragment,t),v(Br.$$.fragment,t),v(Hr.$$.fragment,t),v(Rr.$$.fragment,t),v(io.$$.fragment,t),v(Ur.$$.fragment,t),v(Kr.$$.fragment,t),v(Jr.$$.fragment,t),v(Zr.$$.fragment,t),v(lo.$$.fragment,t),v(Qr.$$.fragment,t),v(Yr.$$.fragment,t),v(es.$$.fragment,t),v(rs.$$.fragment,t),v(mo.$$.fragment,t),v(ss.$$.fragment,t),v(ns.$$.fragment,t),v(as.$$.fragment,t),v(ls.$$.fragment,t),v(uo.$$.fragment,t),v(ds.$$.fragment,t),v(ps.$$.fragment,t),v(ms.$$.fragment,t),v(fs.$$.fragment,t),v(go.$$.fragment,t),v(gs.$$.fragment,t),v(vs.$$.fragment,t),v(_s.$$.fragment,t),v(bs.$$.fragment,t),v(_o.$$.fragment,t),v(ys.$$.fragment,t),v(ks.$$.fragment,t),v($s.$$.fragment,t),v(Fs.$$.fragment,t),v(yo.$$.fragment,t),v(Cs.$$.fragment,t),Rl=!0)},o(t){_(y.$$.fragment,t),_(To.$$.fragment,t),_(No.$$.fragment,t),_(Oo.$$.fragment,t),_(Lo.$$.fragment,t),_(So.$$.fragment,t),_(Bo.$$.fragment,t),_(Ho.$$.fragment,t),_(Wo.$$.fragment,t),_(Ro.$$.fragment,t),_(Uo.$$.fragment,t),_(Ko.$$.fragment,t),_(Go.$$.fragment,t),_(Xo.$$.fragment,t),_(Zo.$$.fragment,t),_(Yo.$$.fragment,t),_(Ht.$$.fragment,t),_(er.$$.fragment,t),_(tr.$$.fragment,t),_(sr.$$.fragment,t),_(nr.$$.fragment,t),_(ar.$$.fragment,t),_(ir.$$.fragment,t),_(cr.$$.fragment,t),_(lr.$$.fragment,t),_(dr.$$.fragment,t),_(pr.$$.fragment,t),_(mr.$$.fragment,t),_(hr.$$.fragment,t),_(ur.$$.fragment,t),_(fr.$$.fragment,t),_(gr.$$.fragment,t),_(vr.$$.fragment,t),_(Pr.$$.fragment,t),_(wr.$$.fragment,t),_(br.$$.fragment,t),_(yr.$$.fragment,t),_($r.$$.fragment,t),_(Tr.$$.fragment,t),_(Er.$$.fragment,t),_(Fr.$$.fragment,t),_(Cr.$$.fragment,t),_(jr.$$.fragment,t),_(Mr.$$.fragment,t),_(Ir.$$.fragment,t),_(qr.$$.fragment,t),_(zr.$$.fragment,t),_(Ar.$$.fragment,t),_(Dr.$$.fragment,t),_(Lr.$$.fragment,t),_(no.$$.fragment,t),_(Sr.$$.fragment,t),_(Br.$$.fragment,t),_(Hr.$$.fragment,t),_(Rr.$$.fragment,t),_(io.$$.fragment,t),_(Ur.$$.fragment,t),_(Kr.$$.fragment,t),_(Jr.$$.fragment,t),_(Zr.$$.fragment,t),_(lo.$$.fragment,t),_(Qr.$$.fragment,t),_(Yr.$$.fragment,t),_(es.$$.fragment,t),_(rs.$$.fragment,t),_(mo.$$.fragment,t),_(ss.$$.fragment,t),_(ns.$$.fragment,t),_(as.$$.fragment,t),_(ls.$$.fragment,t),_(uo.$$.fragment,t),_(ds.$$.fragment,t),_(ps.$$.fragment,t),_(ms.$$.fragment,t),_(fs.$$.fragment,t),_(go.$$.fragment,t),_(gs.$$.fragment,t),_(vs.$$.fragment,t),_(_s.$$.fragment,t),_(bs.$$.fragment,t),_(_o.$$.fragment,t),_(ys.$$.fragment,t),_(ks.$$.fragment,t),_($s.$$.fragment,t),_(Fs.$$.fragment,t),_(yo.$$.fragment,t),_(Cs.$$.fragment,t),Rl=!1},d(t){o(h),t&&o($),t&&o(w),P(y),t&&o(Tc),t&&o(Ie),P(To),t&&o(xc),t&&o(qt),t&&o(Ec),t&&o(zt),t&&o(Fc),t&&o(As),t&&o(Cc),t&&o(Ds),t&&o(jc),t&&o(Ns),t&&o(Mc),t&&o(Os),t&&o(Ic),t&&o(D),t&&o(qc),t&&o(Ss),t&&o(zc),t&&o(M),t&&o(Ac),t&&o(At),t&&o(Dc),t&&o(Fo),t&&o(Nc),t&&o(ve),t&&o(Oc),t&&o(Hs),t&&o(Lc),t&&o(Dt),t&&o(Sc),t&&o(Ao),t&&o(Bc),t&&o(Ws),t&&o(Hc),t&&o(qe),P(No),t&&o(Wc),t&&o(ze),P(Oo),t&&o(Vc),t&&o(Ae),P(Lo),t&&o(Rc),t&&o(De),P(So),t&&o(Uc),t&&o(Ne),P(Bo),t&&o(Kc),t&&o(Oe),P(Ho),t&&o(Jc),t&&o(N),P(Wo),P(Ro),t&&o(Gc),t&&o(Be),P(Uo),t&&o(Xc),t&&o(Q),P(Ko),P(Go),t&&o(Zc),t&&o(He),P(Xo),t&&o(Qc),t&&o(Y),P(Zo),P(Yo),P(Ht),t&&o(Yc),t&&o(We),P(er),t&&o(el),t&&o(ae),P(tr),t&&o(tl),t&&o(Ve),P(sr),t&&o(ol),t&&o(ie),P(nr),t&&o(rl),t&&o(Re),P(ar),t&&o(sl),t&&o(Ue),P(ir),t&&o(nl),t&&o(Ke),P(cr),t&&o(al),t&&o(Je),P(lr),t&&o(il),t&&o(Ge),P(dr),t&&o(cl),t&&o(ce),P(pr),t&&o(ll),t&&o(Xe),P(mr),t&&o(dl),t&&o(Ze),P(hr),t&&o(pl),t&&o(Qe),P(ur),t&&o(ml),t&&o(le),P(fr),t&&o(hl),t&&o(Ye),P(gr),t&&o(ul),t&&o(et),P(vr),t&&o(fl),t&&o(tt),P(Pr),t&&o(gl),t&&o(ot),P(wr),t&&o(vl),t&&o(rt),P(br),t&&o(_l),t&&o(st),P(yr),t&&o(Pl),t&&o(nt),P($r),t&&o(wl),t&&o(de),P(Tr),t&&o(bl),t&&o(at),P(Er),t&&o(yl),t&&o(it),P(Fr),t&&o(kl),t&&o(ct),P(Cr),t&&o($l),t&&o(lt),P(jr),t&&o(Tl),t&&o(dt),P(Mr),t&&o(xl),t&&o(pt),P(Ir),t&&o(El),t&&o(mt),P(qr),t&&o(Fl),t&&o(ht),P(zr),t&&o(Cl),t&&o(ut),P(Ar),t&&o(jl),t&&o(pe),P(Dr),P(Lr),P(no),P(Sr),t&&o(Ml),t&&o(gt),P(Br),t&&o(Il),t&&o(me),P(Hr),P(Rr),P(io),P(Ur),t&&o(ql),t&&o(_t),P(Kr),t&&o(zl),t&&o(he),P(Jr),P(Zr),P(lo),P(Qr),t&&o(Al),t&&o(wt),P(Yr),t&&o(Dl),t&&o(q),P(es),P(rs),P(mo),P(ss),t&&o(Nl),t&&o(yt),P(ns),t&&o(Ol),t&&o(z),P(as),P(ls),P(uo),P(ds),t&&o(Ll),t&&o($t),P(ps),t&&o(Sl),t&&o(A),P(ms),P(fs),P(go),P(gs),t&&o(Bl),t&&o(xt),P(vs),t&&o(Hl),t&&o(O),P(_s),P(bs),P(_o),P(ys),t&&o(Wl),t&&o(Ft),P(ks),t&&o(Vl),t&&o(C),P($s),P(Fs),P(yo),P(Cs)}}}const c2={local:"perceiver",sections:[{local:"overview",title:"Overview"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput",title:"Perceiver specific outputs"},{local:"transformers.PerceiverConfig",title:"PerceiverConfig"},{local:"transformers.PerceiverTokenizer",title:"PerceiverTokenizer"},{local:"transformers.PerceiverFeatureExtractor",title:"PerceiverFeatureExtractor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor",title:"PerceiverTextPreprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor",title:"PerceiverImagePreprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor",title:"PerceiverOneHotPreprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor",title:"PerceiverAudioPreprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor",title:"PerceiverMultimodalPreprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder",title:"PerceiverProjectionDecoder"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder",title:"PerceiverBasicDecoder"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder",title:"PerceiverClassificationDecoder"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder",title:"PerceiverOpticalFlowDecoder"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder",title:"PerceiverBasicVideoAutoencodingDecoder"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder",title:"PerceiverMultimodalDecoder"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor",title:"PerceiverProjectionPostprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor",title:"PerceiverAudioPostprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor",title:"PerceiverClassificationPostprocessor"},{local:"transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor",title:"PerceiverMultimodalPostprocessor"},{local:"transformers.PerceiverModel",title:"PerceiverModel"},{local:"transformers.PerceiverForMaskedLM",title:"PerceiverForMaskedLM"},{local:"transformers.PerceiverForSequenceClassification",title:"PerceiverForSequenceClassification"},{local:"transformers.PerceiverForImageClassificationLearned",title:"PerceiverForImageClassificationLearned"},{local:"transformers.PerceiverForImageClassificationFourier",title:"PerceiverForImageClassificationFourier"},{local:"transformers.PerceiverForImageClassificationConvProcessing",title:"PerceiverForImageClassificationConvProcessing"},{local:"transformers.PerceiverForOpticalFlow",title:"PerceiverForOpticalFlow"},{local:"transformers.PerceiverForMultimodalAutoencoding",title:"PerceiverForMultimodalAutoencoding"}],title:"Perceiver"};function l2(j,h,$){let{fw:w}=h;return j.$$set=k=>{"fw"in k&&$(0,w=k.fw)},[w]}class g2 extends Kb{constructor(h){super();Jb(this,h,l2,i2,Gb,{fw:0})}}export{g2 as default,c2 as metadata};
9,983
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/ctrl.mdx-d139d84d.js
import{S as xc,i as Pc,s as Sc,e as a,k as l,w,t as n,L as jc,c as r,d as t,m as d,a as i,x as L,h as s,b as c,J as e,g as u,y as $,q as R,o as M,B as E}from"../../chunks/vendor-b1433968.js";import{T as wt}from"../../chunks/Tip-c3840994.js";import{D as Ie}from"../../chunks/Docstring-ff504c58.js";import{C as Vt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as it}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ic(B){let p,C,m,g,v;return{c(){p=a("p"),C=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=r(T,"P",{});var _=i(p);C=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var y=i(m);g=s(y,"Module"),y.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){u(T,p,_),e(p,C),e(p,m),e(m,g),e(p,v)},d(T){T&&t(p)}}}function Nc(B){let p,C,m,g,v;return{c(){p=a("p"),C=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=r(T,"P",{});var _=i(p);C=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var y=i(m);g=s(y,"Module"),y.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){u(T,p,_),e(p,C),e(p,m),e(m,g),e(p,v)},d(T){T&&t(p)}}}function Dc(B){let p,C,m,g,v;return{c(){p=a("p"),C=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=r(T,"P",{});var _=i(p);C=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var y=i(m);g=s(y,"Module"),y.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){u(T,p,_),e(p,C),e(p,m),e(m,g),e(p,v)},d(T){T&&t(p)}}}function Ac(B){let p,C,m,g,v,T,_,y,me,Q,b,X,A,Y,ge,H,_e,ce,P,S,Z,oe,F,z,Te,U,he,ne,V,pe,se,q,ve,O,ae,ke,W,j,ee,I,te,G,Ce;return{c(){p=a("p"),C=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),y=a("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=l(),b=a("p"),X=n("This second option is useful when using "),A=a("code"),Y=n("tf.keras.Model.fit"),ge=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),_e=n("model(inputs)"),ce=n("."),P=l(),S=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),oe=l(),F=a("ul"),z=a("li"),Te=n("a single Tensor with "),U=a("code"),he=n("input_ids"),ne=n(" only and nothing else: "),V=a("code"),pe=n("model(inputs_ids)"),se=l(),q=a("li"),ve=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),ae=n("model([input_ids, attention_mask])"),ke=n(" or "),W=a("code"),j=n("model([input_ids, attention_mask, token_type_ids])"),ee=l(),I=a("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=a("code"),Ce=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var k=i(p);C=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(h),g=r(h,"UL",{});var re=i(g);v=r(re,"LI",{});var Ae=i(v);T=s(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),_=d(re),y=r(re,"LI",{});var ue=i(y);me=s(ue,"having all inputs as a list, tuple or dict in the first positional arguments."),ue.forEach(t),re.forEach(t),Q=d(h),b=r(h,"P",{});var N=i(b);X=s(N,"This second option is useful when using "),A=r(N,"CODE",{});var Me=i(A);Y=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(N,"CODE",{});var He=i(H);_e=s(He,"model(inputs)"),He.forEach(t),ce=s(N,"."),N.forEach(t),P=d(h),S=r(h,"P",{});var Oe=i(S);Z=s(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),oe=d(h),F=r(h,"UL",{});var D=i(F);z=r(D,"LI",{});var x=i(z);Te=s(x,"a single Tensor with "),U=r(x,"CODE",{});var We=i(U);he=s(We,"input_ids"),We.forEach(t),ne=s(x," only and nothing else: "),V=r(x,"CODE",{});var Ee=i(V);pe=s(Ee,"model(inputs_ids)"),Ee.forEach(t),x.forEach(t),se=d(D),q=r(D,"LI",{});var K=i(q);ve=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(K,"CODE",{});var Be=i(O);ae=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),ke=s(K," or "),W=r(K,"CODE",{});var Fe=i(W);j=s(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),K.forEach(t),ee=d(D),I=r(D,"LI",{});var be=i(I);te=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(be,"CODE",{});var Ue=i(G);Ce=s(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),be.forEach(t),D.forEach(t)},m(h,k){u(h,p,k),e(p,C),u(h,m,k),u(h,g,k),e(g,v),e(v,T),e(g,_),e(g,y),e(y,me),u(h,Q,k),u(h,b,k),e(b,X),e(b,A),e(A,Y),e(b,ge),e(b,H),e(H,_e),e(b,ce),u(h,P,k),u(h,S,k),e(S,Z),u(h,oe,k),u(h,F,k),e(F,z),e(z,Te),e(z,U),e(U,he),e(z,ne),e(z,V),e(V,pe),e(F,se),e(F,q),e(q,ve),e(q,O),e(O,ae),e(q,ke),e(q,W),e(W,j),e(F,ee),e(F,I),e(I,te),e(I,G),e(G,Ce)},d(h){h&&t(p),h&&t(m),h&&t(g),h&&t(Q),h&&t(b),h&&t(P),h&&t(S),h&&t(oe),h&&t(F)}}}function Hc(B){let p,C,m,g,v;return{c(){p=a("p"),C=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=r(T,"P",{});var _=i(p);C=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var y=i(m);g=s(y,"Module"),y.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){u(T,p,_),e(p,C),e(p,m),e(m,g),e(p,v)},d(T){T&&t(p)}}}function Oc(B){let p,C,m,g,v,T,_,y,me,Q,b,X,A,Y,ge,H,_e,ce,P,S,Z,oe,F,z,Te,U,he,ne,V,pe,se,q,ve,O,ae,ke,W,j,ee,I,te,G,Ce;return{c(){p=a("p"),C=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),y=a("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=l(),b=a("p"),X=n("This second option is useful when using "),A=a("code"),Y=n("tf.keras.Model.fit"),ge=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),_e=n("model(inputs)"),ce=n("."),P=l(),S=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),oe=l(),F=a("ul"),z=a("li"),Te=n("a single Tensor with "),U=a("code"),he=n("input_ids"),ne=n(" only and nothing else: "),V=a("code"),pe=n("model(inputs_ids)"),se=l(),q=a("li"),ve=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),ae=n("model([input_ids, attention_mask])"),ke=n(" or "),W=a("code"),j=n("model([input_ids, attention_mask, token_type_ids])"),ee=l(),I=a("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=a("code"),Ce=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var k=i(p);C=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(h),g=r(h,"UL",{});var re=i(g);v=r(re,"LI",{});var Ae=i(v);T=s(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),_=d(re),y=r(re,"LI",{});var ue=i(y);me=s(ue,"having all inputs as a list, tuple or dict in the first positional arguments."),ue.forEach(t),re.forEach(t),Q=d(h),b=r(h,"P",{});var N=i(b);X=s(N,"This second option is useful when using "),A=r(N,"CODE",{});var Me=i(A);Y=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(N,"CODE",{});var He=i(H);_e=s(He,"model(inputs)"),He.forEach(t),ce=s(N,"."),N.forEach(t),P=d(h),S=r(h,"P",{});var Oe=i(S);Z=s(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),oe=d(h),F=r(h,"UL",{});var D=i(F);z=r(D,"LI",{});var x=i(z);Te=s(x,"a single Tensor with "),U=r(x,"CODE",{});var We=i(U);he=s(We,"input_ids"),We.forEach(t),ne=s(x," only and nothing else: "),V=r(x,"CODE",{});var Ee=i(V);pe=s(Ee,"model(inputs_ids)"),Ee.forEach(t),x.forEach(t),se=d(D),q=r(D,"LI",{});var K=i(q);ve=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(K,"CODE",{});var Be=i(O);ae=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),ke=s(K," or "),W=r(K,"CODE",{});var Fe=i(W);j=s(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),K.forEach(t),ee=d(D),I=r(D,"LI",{});var be=i(I);te=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(be,"CODE",{});var Ue=i(G);Ce=s(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),be.forEach(t),D.forEach(t)},m(h,k){u(h,p,k),e(p,C),u(h,m,k),u(h,g,k),e(g,v),e(v,T),e(g,_),e(g,y),e(y,me),u(h,Q,k),u(h,b,k),e(b,X),e(b,A),e(A,Y),e(b,ge),e(b,H),e(H,_e),e(b,ce),u(h,P,k),u(h,S,k),e(S,Z),u(h,oe,k),u(h,F,k),e(F,z),e(z,Te),e(z,U),e(U,he),e(z,ne),e(z,V),e(V,pe),e(F,se),e(F,q),e(q,ve),e(q,O),e(O,ae),e(q,ke),e(q,W),e(W,j),e(F,ee),e(F,I),e(I,te),e(I,G),e(G,Ce)},d(h){h&&t(p),h&&t(m),h&&t(g),h&&t(Q),h&&t(b),h&&t(P),h&&t(S),h&&t(oe),h&&t(F)}}}function Wc(B){let p,C,m,g,v;return{c(){p=a("p"),C=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=r(T,"P",{});var _=i(p);C=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var y=i(m);g=s(y,"Module"),y.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){u(T,p,_),e(p,C),e(p,m),e(m,g),e(p,v)},d(T){T&&t(p)}}}function Bc(B){let p,C,m,g,v,T,_,y,me,Q,b,X,A,Y,ge,H,_e,ce,P,S,Z,oe,F,z,Te,U,he,ne,V,pe,se,q,ve,O,ae,ke,W,j,ee,I,te,G,Ce;return{c(){p=a("p"),C=n("TF 2.0 models accepts two formats as inputs:"),m=l(),g=a("ul"),v=a("li"),T=n("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),y=a("li"),me=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=l(),b=a("p"),X=n("This second option is useful when using "),A=a("code"),Y=n("tf.keras.Model.fit"),ge=n(` method which currently requires having all the tensors in the first argument of the model call function: `),H=a("code"),_e=n("model(inputs)"),ce=n("."),P=l(),S=a("p"),Z=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),oe=l(),F=a("ul"),z=a("li"),Te=n("a single Tensor with "),U=a("code"),he=n("input_ids"),ne=n(" only and nothing else: "),V=a("code"),pe=n("model(inputs_ids)"),se=l(),q=a("li"),ve=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=a("code"),ae=n("model([input_ids, attention_mask])"),ke=n(" or "),W=a("code"),j=n("model([input_ids, attention_mask, token_type_ids])"),ee=l(),I=a("li"),te=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=a("code"),Ce=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(h){p=r(h,"P",{});var k=i(p);C=s(k,"TF 2.0 models accepts two formats as inputs:"),k.forEach(t),m=d(h),g=r(h,"UL",{});var re=i(g);v=r(re,"LI",{});var Ae=i(v);T=s(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),_=d(re),y=r(re,"LI",{});var ue=i(y);me=s(ue,"having all inputs as a list, tuple or dict in the first positional arguments."),ue.forEach(t),re.forEach(t),Q=d(h),b=r(h,"P",{});var N=i(b);X=s(N,"This second option is useful when using "),A=r(N,"CODE",{});var Me=i(A);Y=s(Me,"tf.keras.Model.fit"),Me.forEach(t),ge=s(N,` method which currently requires having all the tensors in the first argument of the model call function: `),H=r(N,"CODE",{});var He=i(H);_e=s(He,"model(inputs)"),He.forEach(t),ce=s(N,"."),N.forEach(t),P=d(h),S=r(h,"P",{});var Oe=i(S);Z=s(Oe,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Oe.forEach(t),oe=d(h),F=r(h,"UL",{});var D=i(F);z=r(D,"LI",{});var x=i(z);Te=s(x,"a single Tensor with "),U=r(x,"CODE",{});var We=i(U);he=s(We,"input_ids"),We.forEach(t),ne=s(x," only and nothing else: "),V=r(x,"CODE",{});var Ee=i(V);pe=s(Ee,"model(inputs_ids)"),Ee.forEach(t),x.forEach(t),se=d(D),q=r(D,"LI",{});var K=i(q);ve=s(K,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=r(K,"CODE",{});var Be=i(O);ae=s(Be,"model([input_ids, attention_mask])"),Be.forEach(t),ke=s(K," or "),W=r(K,"CODE",{});var Fe=i(W);j=s(Fe,"model([input_ids, attention_mask, token_type_ids])"),Fe.forEach(t),K.forEach(t),ee=d(D),I=r(D,"LI",{});var be=i(I);te=s(be,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),G=r(be,"CODE",{});var Ue=i(G);Ce=s(Ue,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Ue.forEach(t),be.forEach(t),D.forEach(t)},m(h,k){u(h,p,k),e(p,C),u(h,m,k),u(h,g,k),e(g,v),e(v,T),e(g,_),e(g,y),e(y,me),u(h,Q,k),u(h,b,k),e(b,X),e(b,A),e(A,Y),e(b,ge),e(b,H),e(H,_e),e(b,ce),u(h,P,k),u(h,S,k),e(S,Z),u(h,oe,k),u(h,F,k),e(F,z),e(z,Te),e(z,U),e(U,he),e(z,ne),e(z,V),e(V,pe),e(F,se),e(F,q),e(q,ve),e(q,O),e(O,ae),e(q,ke),e(q,W),e(W,j),e(F,ee),e(F,I),e(I,te),e(I,G),e(G,Ce)},d(h){h&&t(p),h&&t(m),h&&t(g),h&&t(Q),h&&t(b),h&&t(P),h&&t(S),h&&t(oe),h&&t(F)}}}function Uc(B){let p,C,m,g,v;return{c(){p=a("p"),C=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),g=n("Module"),v=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){p=r(T,"P",{});var _=i(p);C=s(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var y=i(m);g=s(y,"Module"),y.forEach(t),v=s(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(T,_){u(T,p,_),e(p,C),e(p,m),e(m,g),e(p,v)},d(T){T&&t(p)}}}function Vc(B){let p,C,m,g,v,T,_,y,me,Q,b,X,A,Y,ge,H,_e,ce,P,S,Z,oe,F,z,Te,U,he,ne,V,pe,se,q,ve,O,ae,ke,W,j,ee,I,te,G,Ce,h,k,re,Ae,ue,N,Me,He,Oe,D,x,We,Ee,K,Be,Fe,be,Ue,Ko,la,da,Cs,Xe,ca,Gt,ha,pa,Kt,ua,fa,bs,lt,Lt,wn,Jt,ma,Ln,ga,ys,ye,Xt,_a,Je,Ta,Jo,va,ka,Xo,Ca,ba,Qt,ya,wa,La,dt,$a,Qo,Ra,Ma,Yo,Ea,Fa,za,$n,qa,xa,Yt,ws,ct,$t,Rn,Zt,Pa,Mn,Sa,Ls,Ne,eo,ja,En,Ia,Na,to,Da,Zo,Aa,Ha,Oa,Fn,$s,ht,Rt,zn,oo,Wa,qn,Ba,Rs,we,no,Ua,xn,Va,Ga,so,Ka,en,Ja,Xa,Qa,ao,Ya,ro,Za,er,tr,ze,io,or,pt,nr,tn,sr,ar,Pn,rr,ir,lr,Mt,dr,Sn,cr,hr,lo,Ms,ut,Et,jn,co,pr,In,ur,Es,Le,ho,fr,Nn,mr,gr,po,_r,on,Tr,vr,kr,uo,Cr,fo,br,yr,wr,qe,mo,Lr,ft,$r,nn,Rr,Mr,Dn,Er,Fr,zr,Ft,qr,An,xr,Pr,go,Fs,mt,zt,Hn,_o,Sr,On,jr,zs,$e,To,Ir,Re,Nr,sn,Dr,Ar,Wn,Hr,Or,Bn,Wr,Br,Un,Ur,Vr,Vn,Gr,Kr,Jr,vo,Xr,an,Qr,Yr,Zr,ko,ei,Co,ti,oi,ni,ie,bo,si,gt,ai,rn,ri,ii,Gn,li,di,ci,qt,hi,Kn,pi,ui,yo,fi,Jn,mi,gi,wo,qs,_t,xt,Xn,Lo,_i,Qn,Ti,xs,le,$o,vi,Yn,ki,Ci,Ro,bi,ln,yi,wi,Li,Mo,$i,Eo,Ri,Mi,Ei,Pt,Fi,xe,Fo,zi,Tt,qi,dn,xi,Pi,Zn,Si,ji,Ii,St,Ni,es,Di,Ai,zo,Ps,vt,jt,ts,qo,Hi,os,Oi,Ss,de,xo,Wi,ns,Bi,Ui,Po,Vi,cn,Gi,Ki,Ji,So,Xi,jo,Qi,Yi,Zi,It,el,Pe,Io,tl,kt,ol,hn,nl,sl,ss,al,rl,il,Nt,ll,as,dl,cl,No,js,Ct,Dt,rs,Do,hl,is,pl,Is,J,Ao,ul,ls,fl,ml,pn,un,gl,_l,Tl,De,vl,ds,kl,Cl,cs,bl,yl,hs,wl,Ll,ps,$l,Rl,Ml,Ho,El,fn,Fl,zl,ql,Oo,xl,Wo,Pl,Sl,jl,At,Il,Se,Bo,Nl,bt,Dl,mn,Al,Hl,us,Ol,Wl,Bl,Ht,Ul,fs,Vl,Gl,Uo,Ns;return T=new it({}),Y=new it({}),Jt=new it({}),Xt=new Ie({props:{name:"class transformers.CTRLConfig",anchor:"transformers.CTRLConfig",parameters:[{name:"vocab_size",val:" = 246534"},{name:"n_positions",val:" = 256"},{name:"n_embd",val:" = 1280"},{name:"dff",val:" = 8192"},{name:"n_layer",val:" = 48"},{name:"n_head",val:" = 16"},{name:"resid_pdrop",val:" = 0.1"},{name:"embd_pdrop",val:" = 0.1"},{name:"attn_pdrop",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-06"},{name:"initializer_range",val:" = 0.02"},{name:"summary_type",val:" = 'cls_index'"},{name:"summary_use_proj",val:" = True"},{name:"summary_activation",val:" = None"},{name:"summary_proj_to_labels",val:" = True"},{name:"summary_first_dropout",val:" = 0.1"},{name:"use_cache",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/configuration_ctrl.py#L26",parametersDescription:[{anchor:"transformers.CTRLConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 246534) &#x2014; Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLModel">CTRLModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLModel">TFCTRLModel</a>.`,name:"vocab_size"},{anchor:"transformers.CTRLConfig.n_positions",description:`<strong>n_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"n_positions"},{anchor:"transformers.CTRLConfig.n_embd",description:`<strong>n_embd</strong> (<code>int</code>, <em>optional</em>, defaults to 1280) &#x2014; Dimensionality of the embeddings and hidden states.`,name:"n_embd"},{anchor:"transformers.CTRLConfig.dff",description:`<strong>dff</strong> (<code>int</code>, <em>optional</em>, defaults to 8192) &#x2014; Dimensionality of the inner dimension of the feed forward networks (FFN).`,name:"dff"},{anchor:"transformers.CTRLConfig.n_layer",description:`<strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 48) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"n_layer"},{anchor:"transformers.CTRLConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.CTRLConfig.resid_pdrop",description:`<strong>resid_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"resid_pdrop"},{anchor:"transformers.CTRLConfig.embd_pdrop",description:`<strong>embd_pdrop</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the embeddings.`,name:"embd_pdrop"},{anchor:"transformers.CTRLConfig.attn_pdrop",description:`<strong>attn_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention.`,name:"attn_pdrop"},{anchor:"transformers.CTRLConfig.layer_norm_epsilon",description:`<strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The epsilon to use in the layer normalization layers`,name:"layer_norm_epsilon"},{anchor:"transformers.CTRLConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.CTRLConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}]}}),Yt=new Vt({props:{code:`from transformers import CTRLModel, CTRLConfig # Initializing a CTRL configuration configuration = CTRLConfig() # Initializing a model from the configuration model = CTRLModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLModel, CTRLConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CTRL configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = CTRLConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = CTRLModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Zt=new it({}),eo=new Ie({props:{name:"class transformers.CTRLTokenizer",anchor:"transformers.CTRLTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"unk_token",val:" = '<unk>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/tokenization_ctrl.py#L119",parametersDescription:[{anchor:"transformers.CTRLTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.CTRLTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.CTRLTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"}]}}),oo=new it({}),no=new Ie({props:{name:"class transformers.CTRLModel",anchor:"transformers.CTRLModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_ctrl.py#L322",parametersDescription:[{anchor:"transformers.CTRLModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),io=new Ie({props:{name:"forward",anchor:"transformers.CTRLModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_ctrl.py#L355",parametersDescription:[{anchor:"transformers.CTRLModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer">CTRLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CTRLModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.FloatTensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.CTRLModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CTRLModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CTRLModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CTRLModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CTRLModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CTRLModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.CTRLModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CTRLModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CTRLModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPast" >transformers.modeling_outputs.BaseModelOutputWithPast</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig" >CTRLConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPast" >transformers.modeling_outputs.BaseModelOutputWithPast</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mt=new wt({props:{$$slots:{default:[Ic]},$$scope:{ctx:B}}}),lo=new Vt({props:{code:`from transformers import CTRLTokenizer, CTRLModel import torch tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = CTRLModel.from_pretrained('ctrl') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, CTRLModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CTRLModel.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),co=new it({}),ho=new Ie({props:{name:"class transformers.CTRLLMHeadModel",anchor:"transformers.CTRLLMHeadModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_ctrl.py#L495",parametersDescription:[{anchor:"transformers.CTRLLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mo=new Ie({props:{name:"forward",anchor:"transformers.CTRLLMHeadModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_ctrl.py#L517",parametersDescription:[{anchor:"transformers.CTRLLMHeadModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer">CTRLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CTRLLMHeadModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.FloatTensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.CTRLLMHeadModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CTRLLMHeadModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CTRLLMHeadModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CTRLLMHeadModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CTRLLMHeadModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CTRLLMHeadModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.CTRLLMHeadModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CTRLLMHeadModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CTRLLMHeadModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.CTRLLMHeadModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithPast" >transformers.modeling_outputs.CausalLMOutputWithPast</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig" >CTRLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithPast" >transformers.modeling_outputs.CausalLMOutputWithPast</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ft=new wt({props:{$$slots:{default:[Nc]},$$scope:{ctx:B}}}),go=new Vt({props:{code:`import torch from transformers import CTRLTokenizer, CTRLLMHeadModel tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = CTRLLMHeadModel.from_pretrained('ctrl') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, CTRLLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CTRLLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),_o=new it({}),To=new Ie({props:{name:"class transformers.CTRLForSequenceClassification",anchor:"transformers.CTRLForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_ctrl.py#L611",parametersDescription:[{anchor:"transformers.CTRLForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bo=new Ie({props:{name:"forward",anchor:"transformers.CTRLForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"past_key_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_ctrl.py#L621",parametersDescription:[{anchor:"transformers.CTRLForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer">CTRLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CTRLForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[torch.FloatTensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past_key_values"},{anchor:"transformers.CTRLForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CTRLForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.CTRLForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CTRLForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.CTRLForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.CTRLForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.CTRLForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CTRLForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CTRLForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.CTRLForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig" >CTRLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qt=new wt({props:{$$slots:{default:[Dc]},$$scope:{ctx:B}}}),yo=new Vt({props:{code:`from transformers import CTRLTokenizer, CTRLForSequenceClassification import torch tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = CTRLForSequenceClassification.from_pretrained('ctrl') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, CTRLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CTRLForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),wo=new Vt({props:{code:`from transformers import CTRLTokenizer, CTRLForSequenceClassification import torch tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = CTRLForSequenceClassification.from_pretrained('ctrl', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, CTRLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = CTRLForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Lo=new it({}),$o=new Ie({props:{name:"class transformers.TFCTRLModel",anchor:"transformers.TFCTRLModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_tf_ctrl.py#L539",parametersDescription:[{anchor:"transformers.TFCTRLModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Pt=new wt({props:{$$slots:{default:[Ac]},$$scope:{ctx:B}}}),Fo=new Ie({props:{name:"call",anchor:"transformers.TFCTRLModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_tf_ctrl.py#L544",parametersDescription:[{anchor:"transformers.TFCTRLModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states).</p> <p>Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer">CTRLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFCTRLModel.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFCTRLModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFCTRLModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFCTRLModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFCTRLModel.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFCTRLModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFCTRLModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past</code> key value states are returned and can be used to speed up decoding (see <code>past</code>).`,name:"use_cache"},{anchor:"transformers.TFCTRLModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFCTRLModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCTRLModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCTRLModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPast</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig" >CTRLConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPast</a> or <code>tuple(tf.Tensor)</code></p> `}}),St=new wt({props:{$$slots:{default:[Hc]},$$scope:{ctx:B}}}),zo=new Vt({props:{code:`from transformers import CTRLTokenizer, TFCTRLModel import tensorflow as tf tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = TFCTRLModel.from_pretrained('ctrl') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, TFCTRLModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCTRLModel.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),qo=new it({}),xo=new Ie({props:{name:"class transformers.TFCTRLLMHeadModel",anchor:"transformers.TFCTRLLMHeadModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_tf_ctrl.py#L650",parametersDescription:[{anchor:"transformers.TFCTRLLMHeadModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),It=new wt({props:{$$slots:{default:[Oc]},$$scope:{ctx:B}}}),Io=new Ie({props:{name:"call",anchor:"transformers.TFCTRLLMHeadModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_tf_ctrl.py#L671",parametersDescription:[{anchor:"transformers.TFCTRLLMHeadModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states).</p> <p>Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer">CTRLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFCTRLLMHeadModel.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFCTRLLMHeadModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFCTRLLMHeadModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFCTRLLMHeadModel.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFCTRLLMHeadModel.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFCTRLLMHeadModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFCTRLLMHeadModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past</code> key value states are returned and can be used to speed up decoding (see <code>past</code>).`,name:"use_cache"},{anchor:"transformers.TFCTRLLMHeadModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFCTRLLMHeadModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCTRLLMHeadModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCTRLLMHeadModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFCTRLLMHeadModel.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" >transformers.modeling_tf_outputs.TFCausalLMOutputWithPast</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig" >CTRLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" >transformers.modeling_tf_outputs.TFCausalLMOutputWithPast</a> or <code>tuple(tf.Tensor)</code></p> `}}),Nt=new wt({props:{$$slots:{default:[Wc]},$$scope:{ctx:B}}}),No=new Vt({props:{code:`from transformers import CTRLTokenizer, TFCTRLLMHeadModel import tensorflow as tf tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = TFCTRLLMHeadModel.from_pretrained('ctrl') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, TFCTRLLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCTRLLMHeadModel.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Do=new it({}),Ao=new Ie({props:{name:"class transformers.TFCTRLForSequenceClassification",anchor:"transformers.TFCTRLForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_tf_ctrl.py#L778",parametersDescription:[{anchor:"transformers.TFCTRLForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig">CTRLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),At=new wt({props:{$$slots:{default:[Bc]},$$scope:{ctx:B}}}),Bo=new Ie({props:{name:"call",anchor:"transformers.TFCTRLForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"past",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/ctrl/modeling_tf_ctrl.py#L793",parametersDescription:[{anchor:"transformers.TFCTRLForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, input_ids_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past</code> is <code>None</code> else <code>past[0].shape[-2]</code> (<code>sequence_length</code> of input past key value states).</p> <p>Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past</code> is used, only input IDs that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLTokenizer">CTRLTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFCTRLForSequenceClassification.call.past",description:`<strong>past</strong> (<code>List[tf.Tensor]</code> of length <code>config.n_layers</code>) &#x2014; Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past</code> output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed.`,name:"past"},{anchor:"transformers.TFCTRLForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFCTRLForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFCTRLForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFCTRLForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFCTRLForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> or <code>Numpy array</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFCTRLForSequenceClassification.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past</code> key value states are returned and can be used to speed up decoding (see <code>past</code>).`,name:"use_cache"},{anchor:"transformers.TFCTRLForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFCTRLForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCTRLForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCTRLForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFCTRLForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLConfig" >CTRLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ht=new wt({props:{$$slots:{default:[Uc]},$$scope:{ctx:B}}}),Uo=new Vt({props:{code:`from transformers import CTRLTokenizer, TFCTRLForSequenceClassification import tensorflow as tf tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = TFCTRLForSequenceClassification.from_pretrained('ctrl') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CTRLTokenizer, TFCTRLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CTRLTokenizer.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCTRLForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;ctrl&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){p=a("meta"),C=l(),m=a("h1"),g=a("a"),v=a("span"),w(T.$$.fragment),_=l(),y=a("span"),me=n("CTRL"),Q=l(),b=a("h2"),X=a("a"),A=a("span"),w(Y.$$.fragment),ge=l(),H=a("span"),_e=n("Overview"),ce=l(),P=a("p"),S=n("CTRL model was proposed in "),Z=a("a"),oe=n("CTRL: A Conditional Transformer Language Model for Controllable Generation"),F=n(" by Nitish Shirish Keskar"),z=a("em"),Te=n(", Bryan McCann"),U=n(`, Lav R. Varshney, Caiming Xiong and Richard Socher. It\u2019s a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).`),he=l(),ne=a("p"),V=n("The abstract from the paper is the following:"),pe=l(),se=a("p"),q=a("em"),ve=n(`Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution.`),O=l(),ae=a("p"),ke=n("Tips:"),W=l(),j=a("ul"),ee=a("li"),I=n(`CTRL makes use of control codes to generate text: it requires generations to be started by certain words, sentences or links to generate coherent text. Refer to the `),te=a("a"),G=n("original implementation"),Ce=n(` for more information.`),h=l(),k=a("li"),re=n(`CTRL is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),Ae=l(),ue=a("li"),N=n(`CTRL was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows CTRL to generate syntactically coherent text as it can be observed in the `),Me=a("em"),He=n("run_generation.py"),Oe=n(" example script."),D=l(),x=a("li"),We=n("The PyTorch models can take the "),Ee=a("em"),K=n("past"),Be=n(` as input, which is the previously computed key/value attention pairs. Using this `),Fe=a("em"),be=n("past"),Ue=n(` value prevents the model from re-computing pre-computed values in the context of text generation. See `),Ko=a("a"),la=n("reusing the past in generative models"),da=n(` for more information on the usage of this argument.`),Cs=l(),Xe=a("p"),ca=n("This model was contributed by "),Gt=a("a"),ha=n("keskarnitishr"),pa=n(`. The original code can be found `),Kt=a("a"),ua=n("here"),fa=n("."),bs=l(),lt=a("h2"),Lt=a("a"),wn=a("span"),w(Jt.$$.fragment),ma=l(),Ln=a("span"),ga=n("CTRLConfig"),ys=l(),ye=a("div"),w(Xt.$$.fragment),_a=l(),Je=a("p"),Ta=n("This is the configuration class to store the configuration of a "),Jo=a("a"),va=n("CTRLModel"),ka=n(` or a `),Xo=a("a"),Ca=n("TFCTRLModel"),ba=n(`. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),Qt=a("a"),ya=n("ctrl"),wa=n(" architecture from SalesForce."),La=l(),dt=a("p"),$a=n("Configuration objects inherit from "),Qo=a("a"),Ra=n("PretrainedConfig"),Ma=n(` and can be used to control the model outputs. Read the documentation from `),Yo=a("a"),Ea=n("PretrainedConfig"),Fa=n(" for more information."),za=l(),$n=a("p"),qa=n("Examples:"),xa=l(),w(Yt.$$.fragment),ws=l(),ct=a("h2"),$t=a("a"),Rn=a("span"),w(Zt.$$.fragment),Pa=l(),Mn=a("span"),Sa=n("CTRLTokenizer"),Ls=l(),Ne=a("div"),w(eo.$$.fragment),ja=l(),En=a("p"),Ia=n("Construct a CTRL tokenizer. Based on Byte-Pair-Encoding."),Na=l(),to=a("p"),Da=n("This tokenizer inherits from "),Zo=a("a"),Aa=n("PreTrainedTokenizer"),Ha=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Oa=l(),Fn=a("div"),$s=l(),ht=a("h2"),Rt=a("a"),zn=a("span"),w(oo.$$.fragment),Wa=l(),qn=a("span"),Ba=n("CTRLModel"),Rs=l(),we=a("div"),w(no.$$.fragment),Ua=l(),xn=a("p"),Va=n("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top."),Ga=l(),so=a("p"),Ka=n("This model inherits from "),en=a("a"),Ja=n("PreTrainedModel"),Xa=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qa=l(),ao=a("p"),Ya=n("This model is also a PyTorch "),ro=a("a"),Za=n("torch.nn.Module"),er=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),tr=l(),ze=a("div"),w(io.$$.fragment),or=l(),pt=a("p"),nr=n("The "),tn=a("a"),sr=n("CTRLModel"),ar=n(" forward method, overrides the "),Pn=a("code"),rr=n("__call__"),ir=n(" special method."),lr=l(),w(Mt.$$.fragment),dr=l(),Sn=a("p"),cr=n("Example:"),hr=l(),w(lo.$$.fragment),Ms=l(),ut=a("h2"),Et=a("a"),jn=a("span"),w(co.$$.fragment),pr=l(),In=a("span"),ur=n("CTRLLMHeadModel"),Es=l(),Le=a("div"),w(ho.$$.fragment),fr=l(),Nn=a("p"),mr=n(`The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),gr=l(),po=a("p"),_r=n("This model inherits from "),on=a("a"),Tr=n("PreTrainedModel"),vr=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kr=l(),uo=a("p"),Cr=n("This model is also a PyTorch "),fo=a("a"),br=n("torch.nn.Module"),yr=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wr=l(),qe=a("div"),w(mo.$$.fragment),Lr=l(),ft=a("p"),$r=n("The "),nn=a("a"),Rr=n("CTRLLMHeadModel"),Mr=n(" forward method, overrides the "),Dn=a("code"),Er=n("__call__"),Fr=n(" special method."),zr=l(),w(Ft.$$.fragment),qr=l(),An=a("p"),xr=n("Example:"),Pr=l(),w(go.$$.fragment),Fs=l(),mt=a("h2"),zt=a("a"),Hn=a("span"),w(_o.$$.fragment),Sr=l(),On=a("span"),jr=n("CTRLForSequenceClassification"),zs=l(),$e=a("div"),w(To.$$.fragment),Ir=l(),Re=a("p"),Nr=n(`The CTRL Model transformer with a sequence classification head on top (linear layer). `),sn=a("a"),Dr=n("CTRLForSequenceClassification"),Ar=n(` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `),Wn=a("code"),Hr=n("pad_token_id"),Or=n(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Bn=a("code"),Wr=n("pad_token_id"),Br=n(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Un=a("code"),Ur=n("inputs_embeds"),Vr=n(` are passed instead of `),Vn=a("code"),Gr=n("input_ids"),Kr=n(", it does the same (take the last value in each row of the batch)."),Jr=l(),vo=a("p"),Xr=n("This model inherits from "),an=a("a"),Qr=n("PreTrainedModel"),Yr=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zr=l(),ko=a("p"),ei=n("This model is also a PyTorch "),Co=a("a"),ti=n("torch.nn.Module"),oi=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ni=l(),ie=a("div"),w(bo.$$.fragment),si=l(),gt=a("p"),ai=n("The "),rn=a("a"),ri=n("CTRLForSequenceClassification"),ii=n(" forward method, overrides the "),Gn=a("code"),li=n("__call__"),di=n(" special method."),ci=l(),w(qt.$$.fragment),hi=l(),Kn=a("p"),pi=n("Example of single-label classification:"),ui=l(),w(yo.$$.fragment),fi=l(),Jn=a("p"),mi=n("Example of multi-label classification:"),gi=l(),w(wo.$$.fragment),qs=l(),_t=a("h2"),xt=a("a"),Xn=a("span"),w(Lo.$$.fragment),_i=l(),Qn=a("span"),Ti=n("TFCTRLModel"),xs=l(),le=a("div"),w($o.$$.fragment),vi=l(),Yn=a("p"),ki=n("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top."),Ci=l(),Ro=a("p"),bi=n("This model inherits from "),ln=a("a"),yi=n("TFPreTrainedModel"),wi=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Li=l(),Mo=a("p"),$i=n("This model is also a "),Eo=a("a"),Ri=n("tf.keras.Model"),Mi=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ei=l(),w(Pt.$$.fragment),Fi=l(),xe=a("div"),w(Fo.$$.fragment),zi=l(),Tt=a("p"),qi=n("The "),dn=a("a"),xi=n("TFCTRLModel"),Pi=n(" forward method, overrides the "),Zn=a("code"),Si=n("__call__"),ji=n(" special method."),Ii=l(),w(St.$$.fragment),Ni=l(),es=a("p"),Di=n("Example:"),Ai=l(),w(zo.$$.fragment),Ps=l(),vt=a("h2"),jt=a("a"),ts=a("span"),w(qo.$$.fragment),Hi=l(),os=a("span"),Oi=n("TFCTRLLMHeadModel"),Ss=l(),de=a("div"),w(xo.$$.fragment),Wi=l(),ns=a("p"),Bi=n(`The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Ui=l(),Po=a("p"),Vi=n("This model inherits from "),cn=a("a"),Gi=n("TFPreTrainedModel"),Ki=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ji=l(),So=a("p"),Xi=n("This model is also a "),jo=a("a"),Qi=n("tf.keras.Model"),Yi=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Zi=l(),w(It.$$.fragment),el=l(),Pe=a("div"),w(Io.$$.fragment),tl=l(),kt=a("p"),ol=n("The "),hn=a("a"),nl=n("TFCTRLLMHeadModel"),sl=n(" forward method, overrides the "),ss=a("code"),al=n("__call__"),rl=n(" special method."),il=l(),w(Nt.$$.fragment),ll=l(),as=a("p"),dl=n("Example:"),cl=l(),w(No.$$.fragment),js=l(),Ct=a("h2"),Dt=a("a"),rs=a("span"),w(Do.$$.fragment),hl=l(),is=a("span"),pl=n("TFCTRLForSequenceClassification"),Is=l(),J=a("div"),w(Ao.$$.fragment),ul=l(),ls=a("p"),fl=n("The CTRL Model transformer with a sequence classification head on top (linear layer)."),ml=l(),pn=a("p"),un=a("a"),gl=n("TFCTRLForSequenceClassification"),_l=n(` uses the last token in order to do the classification, as other causal models (e.g. GPT-1, GPT-2) do.`),Tl=l(),De=a("p"),vl=n(`Since it does classification on the last token, it requires to know the position of the last token. If a `),ds=a("code"),kl=n("pad_token_id"),Cl=n(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),cs=a("code"),bl=n("pad_token_id"),yl=n(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),hs=a("code"),wl=n("inputs_embeds"),Ll=n(" are passed instead of "),ps=a("code"),$l=n("input_ids"),Rl=n(`, it does the same (take the last value in each row of the batch).`),Ml=l(),Ho=a("p"),El=n("This model inherits from "),fn=a("a"),Fl=n("TFPreTrainedModel"),zl=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ql=l(),Oo=a("p"),xl=n("This model is also a "),Wo=a("a"),Pl=n("tf.keras.Model"),Sl=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jl=l(),w(At.$$.fragment),Il=l(),Se=a("div"),w(Bo.$$.fragment),Nl=l(),bt=a("p"),Dl=n("The "),mn=a("a"),Al=n("TFCTRLForSequenceClassification"),Hl=n(" forward method, overrides the "),us=a("code"),Ol=n("__call__"),Wl=n(" special method."),Bl=l(),w(Ht.$$.fragment),Ul=l(),fs=a("p"),Vl=n("Example:"),Gl=l(),w(Uo.$$.fragment),this.h()},l(o){const f=jc('[data-svelte="svelte-1phssyn"]',document.head);p=r(f,"META",{name:!0,content:!0}),f.forEach(t),C=d(o),m=r(o,"H1",{class:!0});var Vo=i(m);g=r(Vo,"A",{id:!0,class:!0,href:!0});var ms=i(g);v=r(ms,"SPAN",{});var gs=i(v);L(T.$$.fragment,gs),gs.forEach(t),ms.forEach(t),_=d(Vo),y=r(Vo,"SPAN",{});var _s=i(y);me=s(_s,"CTRL"),_s.forEach(t),Vo.forEach(t),Q=d(o),b=r(o,"H2",{class:!0});var Go=i(b);X=r(Go,"A",{id:!0,class:!0,href:!0});var Ts=i(X);A=r(Ts,"SPAN",{});var vs=i(A);L(Y.$$.fragment,vs),vs.forEach(t),Ts.forEach(t),ge=d(Go),H=r(Go,"SPAN",{});var ks=i(H);_e=s(ks,"Overview"),ks.forEach(t),Go.forEach(t),ce=d(o),P=r(o,"P",{});var yt=i(P);S=s(yt,"CTRL model was proposed in "),Z=r(yt,"A",{href:!0,rel:!0});var Jl=i(Z);oe=s(Jl,"CTRL: A Conditional Transformer Language Model for Controllable Generation"),Jl.forEach(t),F=s(yt," by Nitish Shirish Keskar"),z=r(yt,"EM",{});var Xl=i(z);Te=s(Xl,", Bryan McCann"),Xl.forEach(t),U=s(yt,`, Lav R. Varshney, Caiming Xiong and Richard Socher. It\u2019s a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).`),yt.forEach(t),he=d(o),ne=r(o,"P",{});var Ql=i(ne);V=s(Ql,"The abstract from the paper is the following:"),Ql.forEach(t),pe=d(o),se=r(o,"P",{});var Yl=i(se);q=r(Yl,"EM",{});var Zl=i(q);ve=s(Zl,`Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution.`),Zl.forEach(t),Yl.forEach(t),O=d(o),ae=r(o,"P",{});var ed=i(ae);ke=s(ed,"Tips:"),ed.forEach(t),W=d(o),j=r(o,"UL",{});var Ot=i(j);ee=r(Ot,"LI",{});var Ds=i(ee);I=s(Ds,`CTRL makes use of control codes to generate text: it requires generations to be started by certain words, sentences or links to generate coherent text. Refer to the `),te=r(Ds,"A",{href:!0,rel:!0});var td=i(te);G=s(td,"original implementation"),td.forEach(t),Ce=s(Ds,` for more information.`),Ds.forEach(t),h=d(Ot),k=r(Ot,"LI",{});var od=i(k);re=s(od,`CTRL is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),od.forEach(t),Ae=d(Ot),ue=r(Ot,"LI",{});var As=i(ue);N=s(As,`CTRL was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows CTRL to generate syntactically coherent text as it can be observed in the `),Me=r(As,"EM",{});var nd=i(Me);He=s(nd,"run_generation.py"),nd.forEach(t),Oe=s(As," example script."),As.forEach(t),D=d(Ot),x=r(Ot,"LI",{});var Wt=i(x);We=s(Wt,"The PyTorch models can take the "),Ee=r(Wt,"EM",{});var sd=i(Ee);K=s(sd,"past"),sd.forEach(t),Be=s(Wt,` as input, which is the previously computed key/value attention pairs. Using this `),Fe=r(Wt,"EM",{});var ad=i(Fe);be=s(ad,"past"),ad.forEach(t),Ue=s(Wt,` value prevents the model from re-computing pre-computed values in the context of text generation. See `),Ko=r(Wt,"A",{href:!0});var rd=i(Ko);la=s(rd,"reusing the past in generative models"),rd.forEach(t),da=s(Wt,` for more information on the usage of this argument.`),Wt.forEach(t),Ot.forEach(t),Cs=d(o),Xe=r(o,"P",{});var gn=i(Xe);ca=s(gn,"This model was contributed by "),Gt=r(gn,"A",{href:!0,rel:!0});var id=i(Gt);ha=s(id,"keskarnitishr"),id.forEach(t),pa=s(gn,`. The original code can be found `),Kt=r(gn,"A",{href:!0,rel:!0});var ld=i(Kt);ua=s(ld,"here"),ld.forEach(t),fa=s(gn,"."),gn.forEach(t),bs=d(o),lt=r(o,"H2",{class:!0});var Hs=i(lt);Lt=r(Hs,"A",{id:!0,class:!0,href:!0});var dd=i(Lt);wn=r(dd,"SPAN",{});var cd=i(wn);L(Jt.$$.fragment,cd),cd.forEach(t),dd.forEach(t),ma=d(Hs),Ln=r(Hs,"SPAN",{});var hd=i(Ln);ga=s(hd,"CTRLConfig"),hd.forEach(t),Hs.forEach(t),ys=d(o),ye=r(o,"DIV",{class:!0});var Qe=i(ye);L(Xt.$$.fragment,Qe),_a=d(Qe),Je=r(Qe,"P",{});var Bt=i(Je);Ta=s(Bt,"This is the configuration class to store the configuration of a "),Jo=r(Bt,"A",{href:!0});var pd=i(Jo);va=s(pd,"CTRLModel"),pd.forEach(t),ka=s(Bt,` or a `),Xo=r(Bt,"A",{href:!0});var ud=i(Xo);Ca=s(ud,"TFCTRLModel"),ud.forEach(t),ba=s(Bt,`. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `),Qt=r(Bt,"A",{href:!0,rel:!0});var fd=i(Qt);ya=s(fd,"ctrl"),fd.forEach(t),wa=s(Bt," architecture from SalesForce."),Bt.forEach(t),La=d(Qe),dt=r(Qe,"P",{});var _n=i(dt);$a=s(_n,"Configuration objects inherit from "),Qo=r(_n,"A",{href:!0});var md=i(Qo);Ra=s(md,"PretrainedConfig"),md.forEach(t),Ma=s(_n,` and can be used to control the model outputs. Read the documentation from `),Yo=r(_n,"A",{href:!0});var gd=i(Yo);Ea=s(gd,"PretrainedConfig"),gd.forEach(t),Fa=s(_n," for more information."),_n.forEach(t),za=d(Qe),$n=r(Qe,"P",{});var _d=i($n);qa=s(_d,"Examples:"),_d.forEach(t),xa=d(Qe),L(Yt.$$.fragment,Qe),Qe.forEach(t),ws=d(o),ct=r(o,"H2",{class:!0});var Os=i(ct);$t=r(Os,"A",{id:!0,class:!0,href:!0});var Td=i($t);Rn=r(Td,"SPAN",{});var vd=i(Rn);L(Zt.$$.fragment,vd),vd.forEach(t),Td.forEach(t),Pa=d(Os),Mn=r(Os,"SPAN",{});var kd=i(Mn);Sa=s(kd,"CTRLTokenizer"),kd.forEach(t),Os.forEach(t),Ls=d(o),Ne=r(o,"DIV",{class:!0});var Ut=i(Ne);L(eo.$$.fragment,Ut),ja=d(Ut),En=r(Ut,"P",{});var Cd=i(En);Ia=s(Cd,"Construct a CTRL tokenizer. Based on Byte-Pair-Encoding."),Cd.forEach(t),Na=d(Ut),to=r(Ut,"P",{});var Ws=i(to);Da=s(Ws,"This tokenizer inherits from "),Zo=r(Ws,"A",{href:!0});var bd=i(Zo);Aa=s(bd,"PreTrainedTokenizer"),bd.forEach(t),Ha=s(Ws,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ws.forEach(t),Oa=d(Ut),Fn=r(Ut,"DIV",{class:!0}),i(Fn).forEach(t),Ut.forEach(t),$s=d(o),ht=r(o,"H2",{class:!0});var Bs=i(ht);Rt=r(Bs,"A",{id:!0,class:!0,href:!0});var yd=i(Rt);zn=r(yd,"SPAN",{});var wd=i(zn);L(oo.$$.fragment,wd),wd.forEach(t),yd.forEach(t),Wa=d(Bs),qn=r(Bs,"SPAN",{});var Ld=i(qn);Ba=s(Ld,"CTRLModel"),Ld.forEach(t),Bs.forEach(t),Rs=d(o),we=r(o,"DIV",{class:!0});var Ye=i(we);L(no.$$.fragment,Ye),Ua=d(Ye),xn=r(Ye,"P",{});var $d=i(xn);Va=s($d,"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top."),$d.forEach(t),Ga=d(Ye),so=r(Ye,"P",{});var Us=i(so);Ka=s(Us,"This model inherits from "),en=r(Us,"A",{href:!0});var Rd=i(en);Ja=s(Rd,"PreTrainedModel"),Rd.forEach(t),Xa=s(Us,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Us.forEach(t),Qa=d(Ye),ao=r(Ye,"P",{});var Vs=i(ao);Ya=s(Vs,"This model is also a PyTorch "),ro=r(Vs,"A",{href:!0,rel:!0});var Md=i(ro);Za=s(Md,"torch.nn.Module"),Md.forEach(t),er=s(Vs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vs.forEach(t),tr=d(Ye),ze=r(Ye,"DIV",{class:!0});var Ze=i(ze);L(io.$$.fragment,Ze),or=d(Ze),pt=r(Ze,"P",{});var Tn=i(pt);nr=s(Tn,"The "),tn=r(Tn,"A",{href:!0});var Ed=i(tn);sr=s(Ed,"CTRLModel"),Ed.forEach(t),ar=s(Tn," forward method, overrides the "),Pn=r(Tn,"CODE",{});var Fd=i(Pn);rr=s(Fd,"__call__"),Fd.forEach(t),ir=s(Tn," special method."),Tn.forEach(t),lr=d(Ze),L(Mt.$$.fragment,Ze),dr=d(Ze),Sn=r(Ze,"P",{});var zd=i(Sn);cr=s(zd,"Example:"),zd.forEach(t),hr=d(Ze),L(lo.$$.fragment,Ze),Ze.forEach(t),Ye.forEach(t),Ms=d(o),ut=r(o,"H2",{class:!0});var Gs=i(ut);Et=r(Gs,"A",{id:!0,class:!0,href:!0});var qd=i(Et);jn=r(qd,"SPAN",{});var xd=i(jn);L(co.$$.fragment,xd),xd.forEach(t),qd.forEach(t),pr=d(Gs),In=r(Gs,"SPAN",{});var Pd=i(In);ur=s(Pd,"CTRLLMHeadModel"),Pd.forEach(t),Gs.forEach(t),Es=d(o),Le=r(o,"DIV",{class:!0});var et=i(Le);L(ho.$$.fragment,et),fr=d(et),Nn=r(et,"P",{});var Sd=i(Nn);mr=s(Sd,`The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),Sd.forEach(t),gr=d(et),po=r(et,"P",{});var Ks=i(po);_r=s(Ks,"This model inherits from "),on=r(Ks,"A",{href:!0});var jd=i(on);Tr=s(jd,"PreTrainedModel"),jd.forEach(t),vr=s(Ks,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ks.forEach(t),kr=d(et),uo=r(et,"P",{});var Js=i(uo);Cr=s(Js,"This model is also a PyTorch "),fo=r(Js,"A",{href:!0,rel:!0});var Id=i(fo);br=s(Id,"torch.nn.Module"),Id.forEach(t),yr=s(Js,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Js.forEach(t),wr=d(et),qe=r(et,"DIV",{class:!0});var tt=i(qe);L(mo.$$.fragment,tt),Lr=d(tt),ft=r(tt,"P",{});var vn=i(ft);$r=s(vn,"The "),nn=r(vn,"A",{href:!0});var Nd=i(nn);Rr=s(Nd,"CTRLLMHeadModel"),Nd.forEach(t),Mr=s(vn," forward method, overrides the "),Dn=r(vn,"CODE",{});var Dd=i(Dn);Er=s(Dd,"__call__"),Dd.forEach(t),Fr=s(vn," special method."),vn.forEach(t),zr=d(tt),L(Ft.$$.fragment,tt),qr=d(tt),An=r(tt,"P",{});var Ad=i(An);xr=s(Ad,"Example:"),Ad.forEach(t),Pr=d(tt),L(go.$$.fragment,tt),tt.forEach(t),et.forEach(t),Fs=d(o),mt=r(o,"H2",{class:!0});var Xs=i(mt);zt=r(Xs,"A",{id:!0,class:!0,href:!0});var Hd=i(zt);Hn=r(Hd,"SPAN",{});var Od=i(Hn);L(_o.$$.fragment,Od),Od.forEach(t),Hd.forEach(t),Sr=d(Xs),On=r(Xs,"SPAN",{});var Wd=i(On);jr=s(Wd,"CTRLForSequenceClassification"),Wd.forEach(t),Xs.forEach(t),zs=d(o),$e=r(o,"DIV",{class:!0});var ot=i($e);L(To.$$.fragment,ot),Ir=d(ot),Re=r(ot,"P",{});var Ve=i(Re);Nr=s(Ve,`The CTRL Model transformer with a sequence classification head on top (linear layer). `),sn=r(Ve,"A",{href:!0});var Bd=i(sn);Dr=s(Bd,"CTRLForSequenceClassification"),Bd.forEach(t),Ar=s(Ve,` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `),Wn=r(Ve,"CODE",{});var Ud=i(Wn);Hr=s(Ud,"pad_token_id"),Ud.forEach(t),Or=s(Ve,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Bn=r(Ve,"CODE",{});var Vd=i(Bn);Wr=s(Vd,"pad_token_id"),Vd.forEach(t),Br=s(Ve,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),Un=r(Ve,"CODE",{});var Gd=i(Un);Ur=s(Gd,"inputs_embeds"),Gd.forEach(t),Vr=s(Ve,` are passed instead of `),Vn=r(Ve,"CODE",{});var Kd=i(Vn);Gr=s(Kd,"input_ids"),Kd.forEach(t),Kr=s(Ve,", it does the same (take the last value in each row of the batch)."),Ve.forEach(t),Jr=d(ot),vo=r(ot,"P",{});var Qs=i(vo);Xr=s(Qs,"This model inherits from "),an=r(Qs,"A",{href:!0});var Jd=i(an);Qr=s(Jd,"PreTrainedModel"),Jd.forEach(t),Yr=s(Qs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qs.forEach(t),Zr=d(ot),ko=r(ot,"P",{});var Ys=i(ko);ei=s(Ys,"This model is also a PyTorch "),Co=r(Ys,"A",{href:!0,rel:!0});var Xd=i(Co);ti=s(Xd,"torch.nn.Module"),Xd.forEach(t),oi=s(Ys,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ys.forEach(t),ni=d(ot),ie=r(ot,"DIV",{class:!0});var je=i(ie);L(bo.$$.fragment,je),si=d(je),gt=r(je,"P",{});var kn=i(gt);ai=s(kn,"The "),rn=r(kn,"A",{href:!0});var Qd=i(rn);ri=s(Qd,"CTRLForSequenceClassification"),Qd.forEach(t),ii=s(kn," forward method, overrides the "),Gn=r(kn,"CODE",{});var Yd=i(Gn);li=s(Yd,"__call__"),Yd.forEach(t),di=s(kn," special method."),kn.forEach(t),ci=d(je),L(qt.$$.fragment,je),hi=d(je),Kn=r(je,"P",{});var Zd=i(Kn);pi=s(Zd,"Example of single-label classification:"),Zd.forEach(t),ui=d(je),L(yo.$$.fragment,je),fi=d(je),Jn=r(je,"P",{});var ec=i(Jn);mi=s(ec,"Example of multi-label classification:"),ec.forEach(t),gi=d(je),L(wo.$$.fragment,je),je.forEach(t),ot.forEach(t),qs=d(o),_t=r(o,"H2",{class:!0});var Zs=i(_t);xt=r(Zs,"A",{id:!0,class:!0,href:!0});var tc=i(xt);Xn=r(tc,"SPAN",{});var oc=i(Xn);L(Lo.$$.fragment,oc),oc.forEach(t),tc.forEach(t),_i=d(Zs),Qn=r(Zs,"SPAN",{});var nc=i(Qn);Ti=s(nc,"TFCTRLModel"),nc.forEach(t),Zs.forEach(t),xs=d(o),le=r(o,"DIV",{class:!0});var Ge=i(le);L($o.$$.fragment,Ge),vi=d(Ge),Yn=r(Ge,"P",{});var sc=i(Yn);ki=s(sc,"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top."),sc.forEach(t),Ci=d(Ge),Ro=r(Ge,"P",{});var ea=i(Ro);bi=s(ea,"This model inherits from "),ln=r(ea,"A",{href:!0});var ac=i(ln);yi=s(ac,"TFPreTrainedModel"),ac.forEach(t),wi=s(ea,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ea.forEach(t),Li=d(Ge),Mo=r(Ge,"P",{});var ta=i(Mo);$i=s(ta,"This model is also a "),Eo=r(ta,"A",{href:!0,rel:!0});var rc=i(Eo);Ri=s(rc,"tf.keras.Model"),rc.forEach(t),Mi=s(ta,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ta.forEach(t),Ei=d(Ge),L(Pt.$$.fragment,Ge),Fi=d(Ge),xe=r(Ge,"DIV",{class:!0});var nt=i(xe);L(Fo.$$.fragment,nt),zi=d(nt),Tt=r(nt,"P",{});var Cn=i(Tt);qi=s(Cn,"The "),dn=r(Cn,"A",{href:!0});var ic=i(dn);xi=s(ic,"TFCTRLModel"),ic.forEach(t),Pi=s(Cn," forward method, overrides the "),Zn=r(Cn,"CODE",{});var lc=i(Zn);Si=s(lc,"__call__"),lc.forEach(t),ji=s(Cn," special method."),Cn.forEach(t),Ii=d(nt),L(St.$$.fragment,nt),Ni=d(nt),es=r(nt,"P",{});var dc=i(es);Di=s(dc,"Example:"),dc.forEach(t),Ai=d(nt),L(zo.$$.fragment,nt),nt.forEach(t),Ge.forEach(t),Ps=d(o),vt=r(o,"H2",{class:!0});var oa=i(vt);jt=r(oa,"A",{id:!0,class:!0,href:!0});var cc=i(jt);ts=r(cc,"SPAN",{});var hc=i(ts);L(qo.$$.fragment,hc),hc.forEach(t),cc.forEach(t),Hi=d(oa),os=r(oa,"SPAN",{});var pc=i(os);Oi=s(pc,"TFCTRLLMHeadModel"),pc.forEach(t),oa.forEach(t),Ss=d(o),de=r(o,"DIV",{class:!0});var Ke=i(de);L(xo.$$.fragment,Ke),Wi=d(Ke),ns=r(Ke,"P",{});var uc=i(ns);Bi=s(uc,`The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).`),uc.forEach(t),Ui=d(Ke),Po=r(Ke,"P",{});var na=i(Po);Vi=s(na,"This model inherits from "),cn=r(na,"A",{href:!0});var fc=i(cn);Gi=s(fc,"TFPreTrainedModel"),fc.forEach(t),Ki=s(na,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),na.forEach(t),Ji=d(Ke),So=r(Ke,"P",{});var sa=i(So);Xi=s(sa,"This model is also a "),jo=r(sa,"A",{href:!0,rel:!0});var mc=i(jo);Qi=s(mc,"tf.keras.Model"),mc.forEach(t),Yi=s(sa,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sa.forEach(t),Zi=d(Ke),L(It.$$.fragment,Ke),el=d(Ke),Pe=r(Ke,"DIV",{class:!0});var st=i(Pe);L(Io.$$.fragment,st),tl=d(st),kt=r(st,"P",{});var bn=i(kt);ol=s(bn,"The "),hn=r(bn,"A",{href:!0});var gc=i(hn);nl=s(gc,"TFCTRLLMHeadModel"),gc.forEach(t),sl=s(bn," forward method, overrides the "),ss=r(bn,"CODE",{});var _c=i(ss);al=s(_c,"__call__"),_c.forEach(t),rl=s(bn," special method."),bn.forEach(t),il=d(st),L(Nt.$$.fragment,st),ll=d(st),as=r(st,"P",{});var Tc=i(as);dl=s(Tc,"Example:"),Tc.forEach(t),cl=d(st),L(No.$$.fragment,st),st.forEach(t),Ke.forEach(t),js=d(o),Ct=r(o,"H2",{class:!0});var aa=i(Ct);Dt=r(aa,"A",{id:!0,class:!0,href:!0});var vc=i(Dt);rs=r(vc,"SPAN",{});var kc=i(rs);L(Do.$$.fragment,kc),kc.forEach(t),vc.forEach(t),hl=d(aa),is=r(aa,"SPAN",{});var Cc=i(is);pl=s(Cc,"TFCTRLForSequenceClassification"),Cc.forEach(t),aa.forEach(t),Is=d(o),J=r(o,"DIV",{class:!0});var fe=i(J);L(Ao.$$.fragment,fe),ul=d(fe),ls=r(fe,"P",{});var bc=i(ls);fl=s(bc,"The CTRL Model transformer with a sequence classification head on top (linear layer)."),bc.forEach(t),ml=d(fe),pn=r(fe,"P",{});var Kl=i(pn);un=r(Kl,"A",{href:!0});var yc=i(un);gl=s(yc,"TFCTRLForSequenceClassification"),yc.forEach(t),_l=s(Kl,` uses the last token in order to do the classification, as other causal models (e.g. GPT-1, GPT-2) do.`),Kl.forEach(t),Tl=d(fe),De=r(fe,"P",{});var at=i(De);vl=s(at,`Since it does classification on the last token, it requires to know the position of the last token. If a `),ds=r(at,"CODE",{});var wc=i(ds);kl=s(wc,"pad_token_id"),wc.forEach(t),Cl=s(at,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),cs=r(at,"CODE",{});var Lc=i(cs);bl=s(Lc,"pad_token_id"),Lc.forEach(t),yl=s(at,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),hs=r(at,"CODE",{});var $c=i(hs);wl=s($c,"inputs_embeds"),$c.forEach(t),Ll=s(at," are passed instead of "),ps=r(at,"CODE",{});var Rc=i(ps);$l=s(Rc,"input_ids"),Rc.forEach(t),Rl=s(at,`, it does the same (take the last value in each row of the batch).`),at.forEach(t),Ml=d(fe),Ho=r(fe,"P",{});var ra=i(Ho);El=s(ra,"This model inherits from "),fn=r(ra,"A",{href:!0});var Mc=i(fn);Fl=s(Mc,"TFPreTrainedModel"),Mc.forEach(t),zl=s(ra,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ra.forEach(t),ql=d(fe),Oo=r(fe,"P",{});var ia=i(Oo);xl=s(ia,"This model is also a "),Wo=r(ia,"A",{href:!0,rel:!0});var Ec=i(Wo);Pl=s(Ec,"tf.keras.Model"),Ec.forEach(t),Sl=s(ia,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ia.forEach(t),jl=d(fe),L(At.$$.fragment,fe),Il=d(fe),Se=r(fe,"DIV",{class:!0});var rt=i(Se);L(Bo.$$.fragment,rt),Nl=d(rt),bt=r(rt,"P",{});var yn=i(bt);Dl=s(yn,"The "),mn=r(yn,"A",{href:!0});var Fc=i(mn);Al=s(Fc,"TFCTRLForSequenceClassification"),Fc.forEach(t),Hl=s(yn," forward method, overrides the "),us=r(yn,"CODE",{});var zc=i(us);Ol=s(zc,"__call__"),zc.forEach(t),Wl=s(yn," special method."),yn.forEach(t),Bl=d(rt),L(Ht.$$.fragment,rt),Ul=d(rt),fs=r(rt,"P",{});var qc=i(fs);Vl=s(qc,"Example:"),qc.forEach(t),Gl=d(rt),L(Uo.$$.fragment,rt),rt.forEach(t),fe.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(Gc)),c(g,"id","ctrl"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#ctrl"),c(m,"class","relative group"),c(X,"id","overview"),c(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(X,"href","#overview"),c(b,"class","relative group"),c(Z,"href","https://arxiv.org/abs/1909.05858"),c(Z,"rel","nofollow"),c(te,"href","https://github.com/salesforce/ctrl"),c(te,"rel","nofollow"),c(Ko,"href","../quickstart#using-the-past"),c(Gt,"href","https://huggingface.co/keskarnitishr"),c(Gt,"rel","nofollow"),c(Kt,"href","https://github.com/salesforce/ctrl"),c(Kt,"rel","nofollow"),c(Lt,"id","transformers.CTRLConfig"),c(Lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lt,"href","#transformers.CTRLConfig"),c(lt,"class","relative group"),c(Jo,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLModel"),c(Xo,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLModel"),c(Qt,"href","https://huggingface.co/ctrl"),c(Qt,"rel","nofollow"),c(Qo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Yo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(ye,"class","docstring"),c($t,"id","transformers.CTRLTokenizer"),c($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c($t,"href","#transformers.CTRLTokenizer"),c(ct,"class","relative group"),c(Zo,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Fn,"class","docstring"),c(Ne,"class","docstring"),c(Rt,"id","transformers.CTRLModel"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.CTRLModel"),c(ht,"class","relative group"),c(en,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ro,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ro,"rel","nofollow"),c(tn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLModel"),c(ze,"class","docstring"),c(we,"class","docstring"),c(Et,"id","transformers.CTRLLMHeadModel"),c(Et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Et,"href","#transformers.CTRLLMHeadModel"),c(ut,"class","relative group"),c(on,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(fo,"rel","nofollow"),c(nn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLLMHeadModel"),c(qe,"class","docstring"),c(Le,"class","docstring"),c(zt,"id","transformers.CTRLForSequenceClassification"),c(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zt,"href","#transformers.CTRLForSequenceClassification"),c(mt,"class","relative group"),c(sn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLForSequenceClassification"),c(an,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Co,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Co,"rel","nofollow"),c(rn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.CTRLForSequenceClassification"),c(ie,"class","docstring"),c($e,"class","docstring"),c(xt,"id","transformers.TFCTRLModel"),c(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xt,"href","#transformers.TFCTRLModel"),c(_t,"class","relative group"),c(ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Eo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Eo,"rel","nofollow"),c(dn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLModel"),c(xe,"class","docstring"),c(le,"class","docstring"),c(jt,"id","transformers.TFCTRLLMHeadModel"),c(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jt,"href","#transformers.TFCTRLLMHeadModel"),c(vt,"class","relative group"),c(cn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(jo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(jo,"rel","nofollow"),c(hn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLLMHeadModel"),c(Pe,"class","docstring"),c(de,"class","docstring"),c(Dt,"id","transformers.TFCTRLForSequenceClassification"),c(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dt,"href","#transformers.TFCTRLForSequenceClassification"),c(Ct,"class","relative group"),c(un,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLForSequenceClassification"),c(fn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Wo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Wo,"rel","nofollow"),c(mn,"href","/docs/transformers/v4.15.0/en/model_doc/ctrl#transformers.TFCTRLForSequenceClassification"),c(Se,"class","docstring"),c(J,"class","docstring")},m(o,f){e(document.head,p),u(o,C,f),u(o,m,f),e(m,g),e(g,v),$(T,v,null),e(m,_),e(m,y),e(y,me),u(o,Q,f),u(o,b,f),e(b,X),e(X,A),$(Y,A,null),e(b,ge),e(b,H),e(H,_e),u(o,ce,f),u(o,P,f),e(P,S),e(P,Z),e(Z,oe),e(P,F),e(P,z),e(z,Te),e(P,U),u(o,he,f),u(o,ne,f),e(ne,V),u(o,pe,f),u(o,se,f),e(se,q),e(q,ve),u(o,O,f),u(o,ae,f),e(ae,ke),u(o,W,f),u(o,j,f),e(j,ee),e(ee,I),e(ee,te),e(te,G),e(ee,Ce),e(j,h),e(j,k),e(k,re),e(j,Ae),e(j,ue),e(ue,N),e(ue,Me),e(Me,He),e(ue,Oe),e(j,D),e(j,x),e(x,We),e(x,Ee),e(Ee,K),e(x,Be),e(x,Fe),e(Fe,be),e(x,Ue),e(x,Ko),e(Ko,la),e(x,da),u(o,Cs,f),u(o,Xe,f),e(Xe,ca),e(Xe,Gt),e(Gt,ha),e(Xe,pa),e(Xe,Kt),e(Kt,ua),e(Xe,fa),u(o,bs,f),u(o,lt,f),e(lt,Lt),e(Lt,wn),$(Jt,wn,null),e(lt,ma),e(lt,Ln),e(Ln,ga),u(o,ys,f),u(o,ye,f),$(Xt,ye,null),e(ye,_a),e(ye,Je),e(Je,Ta),e(Je,Jo),e(Jo,va),e(Je,ka),e(Je,Xo),e(Xo,Ca),e(Je,ba),e(Je,Qt),e(Qt,ya),e(Je,wa),e(ye,La),e(ye,dt),e(dt,$a),e(dt,Qo),e(Qo,Ra),e(dt,Ma),e(dt,Yo),e(Yo,Ea),e(dt,Fa),e(ye,za),e(ye,$n),e($n,qa),e(ye,xa),$(Yt,ye,null),u(o,ws,f),u(o,ct,f),e(ct,$t),e($t,Rn),$(Zt,Rn,null),e(ct,Pa),e(ct,Mn),e(Mn,Sa),u(o,Ls,f),u(o,Ne,f),$(eo,Ne,null),e(Ne,ja),e(Ne,En),e(En,Ia),e(Ne,Na),e(Ne,to),e(to,Da),e(to,Zo),e(Zo,Aa),e(to,Ha),e(Ne,Oa),e(Ne,Fn),u(o,$s,f),u(o,ht,f),e(ht,Rt),e(Rt,zn),$(oo,zn,null),e(ht,Wa),e(ht,qn),e(qn,Ba),u(o,Rs,f),u(o,we,f),$(no,we,null),e(we,Ua),e(we,xn),e(xn,Va),e(we,Ga),e(we,so),e(so,Ka),e(so,en),e(en,Ja),e(so,Xa),e(we,Qa),e(we,ao),e(ao,Ya),e(ao,ro),e(ro,Za),e(ao,er),e(we,tr),e(we,ze),$(io,ze,null),e(ze,or),e(ze,pt),e(pt,nr),e(pt,tn),e(tn,sr),e(pt,ar),e(pt,Pn),e(Pn,rr),e(pt,ir),e(ze,lr),$(Mt,ze,null),e(ze,dr),e(ze,Sn),e(Sn,cr),e(ze,hr),$(lo,ze,null),u(o,Ms,f),u(o,ut,f),e(ut,Et),e(Et,jn),$(co,jn,null),e(ut,pr),e(ut,In),e(In,ur),u(o,Es,f),u(o,Le,f),$(ho,Le,null),e(Le,fr),e(Le,Nn),e(Nn,mr),e(Le,gr),e(Le,po),e(po,_r),e(po,on),e(on,Tr),e(po,vr),e(Le,kr),e(Le,uo),e(uo,Cr),e(uo,fo),e(fo,br),e(uo,yr),e(Le,wr),e(Le,qe),$(mo,qe,null),e(qe,Lr),e(qe,ft),e(ft,$r),e(ft,nn),e(nn,Rr),e(ft,Mr),e(ft,Dn),e(Dn,Er),e(ft,Fr),e(qe,zr),$(Ft,qe,null),e(qe,qr),e(qe,An),e(An,xr),e(qe,Pr),$(go,qe,null),u(o,Fs,f),u(o,mt,f),e(mt,zt),e(zt,Hn),$(_o,Hn,null),e(mt,Sr),e(mt,On),e(On,jr),u(o,zs,f),u(o,$e,f),$(To,$e,null),e($e,Ir),e($e,Re),e(Re,Nr),e(Re,sn),e(sn,Dr),e(Re,Ar),e(Re,Wn),e(Wn,Hr),e(Re,Or),e(Re,Bn),e(Bn,Wr),e(Re,Br),e(Re,Un),e(Un,Ur),e(Re,Vr),e(Re,Vn),e(Vn,Gr),e(Re,Kr),e($e,Jr),e($e,vo),e(vo,Xr),e(vo,an),e(an,Qr),e(vo,Yr),e($e,Zr),e($e,ko),e(ko,ei),e(ko,Co),e(Co,ti),e(ko,oi),e($e,ni),e($e,ie),$(bo,ie,null),e(ie,si),e(ie,gt),e(gt,ai),e(gt,rn),e(rn,ri),e(gt,ii),e(gt,Gn),e(Gn,li),e(gt,di),e(ie,ci),$(qt,ie,null),e(ie,hi),e(ie,Kn),e(Kn,pi),e(ie,ui),$(yo,ie,null),e(ie,fi),e(ie,Jn),e(Jn,mi),e(ie,gi),$(wo,ie,null),u(o,qs,f),u(o,_t,f),e(_t,xt),e(xt,Xn),$(Lo,Xn,null),e(_t,_i),e(_t,Qn),e(Qn,Ti),u(o,xs,f),u(o,le,f),$($o,le,null),e(le,vi),e(le,Yn),e(Yn,ki),e(le,Ci),e(le,Ro),e(Ro,bi),e(Ro,ln),e(ln,yi),e(Ro,wi),e(le,Li),e(le,Mo),e(Mo,$i),e(Mo,Eo),e(Eo,Ri),e(Mo,Mi),e(le,Ei),$(Pt,le,null),e(le,Fi),e(le,xe),$(Fo,xe,null),e(xe,zi),e(xe,Tt),e(Tt,qi),e(Tt,dn),e(dn,xi),e(Tt,Pi),e(Tt,Zn),e(Zn,Si),e(Tt,ji),e(xe,Ii),$(St,xe,null),e(xe,Ni),e(xe,es),e(es,Di),e(xe,Ai),$(zo,xe,null),u(o,Ps,f),u(o,vt,f),e(vt,jt),e(jt,ts),$(qo,ts,null),e(vt,Hi),e(vt,os),e(os,Oi),u(o,Ss,f),u(o,de,f),$(xo,de,null),e(de,Wi),e(de,ns),e(ns,Bi),e(de,Ui),e(de,Po),e(Po,Vi),e(Po,cn),e(cn,Gi),e(Po,Ki),e(de,Ji),e(de,So),e(So,Xi),e(So,jo),e(jo,Qi),e(So,Yi),e(de,Zi),$(It,de,null),e(de,el),e(de,Pe),$(Io,Pe,null),e(Pe,tl),e(Pe,kt),e(kt,ol),e(kt,hn),e(hn,nl),e(kt,sl),e(kt,ss),e(ss,al),e(kt,rl),e(Pe,il),$(Nt,Pe,null),e(Pe,ll),e(Pe,as),e(as,dl),e(Pe,cl),$(No,Pe,null),u(o,js,f),u(o,Ct,f),e(Ct,Dt),e(Dt,rs),$(Do,rs,null),e(Ct,hl),e(Ct,is),e(is,pl),u(o,Is,f),u(o,J,f),$(Ao,J,null),e(J,ul),e(J,ls),e(ls,fl),e(J,ml),e(J,pn),e(pn,un),e(un,gl),e(pn,_l),e(J,Tl),e(J,De),e(De,vl),e(De,ds),e(ds,kl),e(De,Cl),e(De,cs),e(cs,bl),e(De,yl),e(De,hs),e(hs,wl),e(De,Ll),e(De,ps),e(ps,$l),e(De,Rl),e(J,Ml),e(J,Ho),e(Ho,El),e(Ho,fn),e(fn,Fl),e(Ho,zl),e(J,ql),e(J,Oo),e(Oo,xl),e(Oo,Wo),e(Wo,Pl),e(Oo,Sl),e(J,jl),$(At,J,null),e(J,Il),e(J,Se),$(Bo,Se,null),e(Se,Nl),e(Se,bt),e(bt,Dl),e(bt,mn),e(mn,Al),e(bt,Hl),e(bt,us),e(us,Ol),e(bt,Wl),e(Se,Bl),$(Ht,Se,null),e(Se,Ul),e(Se,fs),e(fs,Vl),e(Se,Gl),$(Uo,Se,null),Ns=!0},p(o,[f]){const Vo={};f&2&&(Vo.$$scope={dirty:f,ctx:o}),Mt.$set(Vo);const ms={};f&2&&(ms.$$scope={dirty:f,ctx:o}),Ft.$set(ms);const gs={};f&2&&(gs.$$scope={dirty:f,ctx:o}),qt.$set(gs);const _s={};f&2&&(_s.$$scope={dirty:f,ctx:o}),Pt.$set(_s);const Go={};f&2&&(Go.$$scope={dirty:f,ctx:o}),St.$set(Go);const Ts={};f&2&&(Ts.$$scope={dirty:f,ctx:o}),It.$set(Ts);const vs={};f&2&&(vs.$$scope={dirty:f,ctx:o}),Nt.$set(vs);const ks={};f&2&&(ks.$$scope={dirty:f,ctx:o}),At.$set(ks);const yt={};f&2&&(yt.$$scope={dirty:f,ctx:o}),Ht.$set(yt)},i(o){Ns||(R(T.$$.fragment,o),R(Y.$$.fragment,o),R(Jt.$$.fragment,o),R(Xt.$$.fragment,o),R(Yt.$$.fragment,o),R(Zt.$$.fragment,o),R(eo.$$.fragment,o),R(oo.$$.fragment,o),R(no.$$.fragment,o),R(io.$$.fragment,o),R(Mt.$$.fragment,o),R(lo.$$.fragment,o),R(co.$$.fragment,o),R(ho.$$.fragment,o),R(mo.$$.fragment,o),R(Ft.$$.fragment,o),R(go.$$.fragment,o),R(_o.$$.fragment,o),R(To.$$.fragment,o),R(bo.$$.fragment,o),R(qt.$$.fragment,o),R(yo.$$.fragment,o),R(wo.$$.fragment,o),R(Lo.$$.fragment,o),R($o.$$.fragment,o),R(Pt.$$.fragment,o),R(Fo.$$.fragment,o),R(St.$$.fragment,o),R(zo.$$.fragment,o),R(qo.$$.fragment,o),R(xo.$$.fragment,o),R(It.$$.fragment,o),R(Io.$$.fragment,o),R(Nt.$$.fragment,o),R(No.$$.fragment,o),R(Do.$$.fragment,o),R(Ao.$$.fragment,o),R(At.$$.fragment,o),R(Bo.$$.fragment,o),R(Ht.$$.fragment,o),R(Uo.$$.fragment,o),Ns=!0)},o(o){M(T.$$.fragment,o),M(Y.$$.fragment,o),M(Jt.$$.fragment,o),M(Xt.$$.fragment,o),M(Yt.$$.fragment,o),M(Zt.$$.fragment,o),M(eo.$$.fragment,o),M(oo.$$.fragment,o),M(no.$$.fragment,o),M(io.$$.fragment,o),M(Mt.$$.fragment,o),M(lo.$$.fragment,o),M(co.$$.fragment,o),M(ho.$$.fragment,o),M(mo.$$.fragment,o),M(Ft.$$.fragment,o),M(go.$$.fragment,o),M(_o.$$.fragment,o),M(To.$$.fragment,o),M(bo.$$.fragment,o),M(qt.$$.fragment,o),M(yo.$$.fragment,o),M(wo.$$.fragment,o),M(Lo.$$.fragment,o),M($o.$$.fragment,o),M(Pt.$$.fragment,o),M(Fo.$$.fragment,o),M(St.$$.fragment,o),M(zo.$$.fragment,o),M(qo.$$.fragment,o),M(xo.$$.fragment,o),M(It.$$.fragment,o),M(Io.$$.fragment,o),M(Nt.$$.fragment,o),M(No.$$.fragment,o),M(Do.$$.fragment,o),M(Ao.$$.fragment,o),M(At.$$.fragment,o),M(Bo.$$.fragment,o),M(Ht.$$.fragment,o),M(Uo.$$.fragment,o),Ns=!1},d(o){t(p),o&&t(C),o&&t(m),E(T),o&&t(Q),o&&t(b),E(Y),o&&t(ce),o&&t(P),o&&t(he),o&&t(ne),o&&t(pe),o&&t(se),o&&t(O),o&&t(ae),o&&t(W),o&&t(j),o&&t(Cs),o&&t(Xe),o&&t(bs),o&&t(lt),E(Jt),o&&t(ys),o&&t(ye),E(Xt),E(Yt),o&&t(ws),o&&t(ct),E(Zt),o&&t(Ls),o&&t(Ne),E(eo),o&&t($s),o&&t(ht),E(oo),o&&t(Rs),o&&t(we),E(no),E(io),E(Mt),E(lo),o&&t(Ms),o&&t(ut),E(co),o&&t(Es),o&&t(Le),E(ho),E(mo),E(Ft),E(go),o&&t(Fs),o&&t(mt),E(_o),o&&t(zs),o&&t($e),E(To),E(bo),E(qt),E(yo),E(wo),o&&t(qs),o&&t(_t),E(Lo),o&&t(xs),o&&t(le),E($o),E(Pt),E(Fo),E(St),E(zo),o&&t(Ps),o&&t(vt),E(qo),o&&t(Ss),o&&t(de),E(xo),E(It),E(Io),E(Nt),E(No),o&&t(js),o&&t(Ct),E(Do),o&&t(Is),o&&t(J),E(Ao),E(At),E(Bo),E(Ht),E(Uo)}}}const Gc={local:"ctrl",sections:[{local:"overview",title:"Overview"},{local:"transformers.CTRLConfig",title:"CTRLConfig"},{local:"transformers.CTRLTokenizer",title:"CTRLTokenizer"},{local:"transformers.CTRLModel",title:"CTRLModel"},{local:"transformers.CTRLLMHeadModel",title:"CTRLLMHeadModel"},{local:"transformers.CTRLForSequenceClassification",title:"CTRLForSequenceClassification"},{local:"transformers.TFCTRLModel",title:"TFCTRLModel"},{local:"transformers.TFCTRLLMHeadModel",title:"TFCTRLLMHeadModel"},{local:"transformers.TFCTRLForSequenceClassification",title:"TFCTRLForSequenceClassification"}],title:"CTRL"};function Kc(B,p,C){let{fw:m}=p;return B.$$set=g=>{"fw"in g&&C(0,m=g.fw)},[m]}class th extends xc{constructor(p){super();Pc(this,p,Kc,Vc,Sc,{fw:0})}}export{th as default,Gc as metadata};
9,984
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/vit.mdx-003c5ca2.js
import{S as lp,i as dp,s as cp,e as a,k as d,w,t as r,L as hp,c as n,d as t,m as c,a as s,x as b,h as i,b as l,J as e,g as f,y,q as x,o as $,B as F}from"../../chunks/vendor-b1433968.js";import{T as Fe}from"../../chunks/Tip-c3840994.js";import{D as U}from"../../chunks/Docstring-ff504c58.js";import{C as No}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Ee}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function pp(I){let h,v,m,u,T;return{c(){h=a("p"),v=r(`This is a recently introduced model so the API hasn\u2019t been tested extensively. There may be some bugs or slight breaking changes to fix it in the future. If you see something strange, file a `),m=a("a"),u=r("Github Issue"),T=r("."),this.h()},l(_){h=n(_,"P",{});var g=s(h);v=i(g,`This is a recently introduced model so the API hasn\u2019t been tested extensively. There may be some bugs or slight breaking changes to fix it in the future. If you see something strange, file a `),m=n(g,"A",{href:!0,rel:!0});var V=s(m);u=i(V,"Github Issue"),V.forEach(t),T=i(g,"."),g.forEach(t),this.h()},h(){l(m,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),l(m,"rel","nofollow")},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function fp(I){let h,v;return{c(){h=a("p"),v=r(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(m){h=n(m,"P",{});var u=s(h);v=i(u,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),u.forEach(t)},m(m,u){f(m,h,u),e(h,v)},d(m){m&&t(h)}}}function mp(I){let h,v,m,u,T;return{c(){h=a("p"),v=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=n(_,"P",{});var g=s(h);v=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(g,"CODE",{});var V=s(m);u=i(V,"Module"),V.forEach(t),T=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function up(I){let h,v,m,u,T;return{c(){h=a("p"),v=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=n(_,"P",{});var g=s(h);v=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(g,"CODE",{});var V=s(m);u=i(V,"Module"),V.forEach(t),T=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function gp(I){let h,v,m,u,T,_,g,V,de,R,k,re,P,W,te,C,ce,oe;return{c(){h=a("p"),v=r("TF 2.0 models accepts two formats as inputs:"),m=d(),u=a("ul"),T=a("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),g=d(),V=a("li"),de=r("having all inputs as a list, tuple or dict in the first positional arguments."),R=d(),k=a("p"),re=r("This second option is useful when using "),P=a("code"),W=r("tf.keras.Model.fit"),te=r(` method which currently requires having all the tensors in the first argument of the model call function: `),C=a("code"),ce=r("model(inputs)"),oe=r(".")},l(E){h=n(E,"P",{});var j=s(h);v=i(j,"TF 2.0 models accepts two formats as inputs:"),j.forEach(t),m=c(E),u=n(E,"UL",{});var N=s(u);T=n(N,"LI",{});var Ve=s(T);_=i(Ve,"having all inputs as keyword arguments (like PyTorch models), or"),Ve.forEach(t),g=c(N),V=n(N,"LI",{});var ae=s(V);de=i(ae,"having all inputs as a list, tuple or dict in the first positional arguments."),ae.forEach(t),N.forEach(t),R=c(E),k=n(E,"P",{});var D=s(k);re=i(D,"This second option is useful when using "),P=n(D,"CODE",{});var ke=s(P);W=i(ke,"tf.keras.Model.fit"),ke.forEach(t),te=i(D,` method which currently requires having all the tensors in the first argument of the model call function: `),C=n(D,"CODE",{});var me=s(C);ce=i(me,"model(inputs)"),me.forEach(t),oe=i(D,"."),D.forEach(t)},m(E,j){f(E,h,j),e(h,v),f(E,m,j),f(E,u,j),e(u,T),e(T,_),e(u,g),e(u,V),e(V,de),f(E,R,j),f(E,k,j),e(k,re),e(k,P),e(P,W),e(k,te),e(k,C),e(C,ce),e(k,oe)},d(E){E&&t(h),E&&t(m),E&&t(u),E&&t(R),E&&t(k)}}}function _p(I){let h,v,m,u,T;return{c(){h=a("p"),v=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=n(_,"P",{});var g=s(h);v=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(g,"CODE",{});var V=s(m);u=i(V,"Module"),V.forEach(t),T=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function vp(I){let h,v,m,u,T,_,g,V,de,R,k,re,P,W,te,C,ce,oe;return{c(){h=a("p"),v=r("TF 2.0 models accepts two formats as inputs:"),m=d(),u=a("ul"),T=a("li"),_=r("having all inputs as keyword arguments (like PyTorch models), or"),g=d(),V=a("li"),de=r("having all inputs as a list, tuple or dict in the first positional arguments."),R=d(),k=a("p"),re=r("This second option is useful when using "),P=a("code"),W=r("tf.keras.Model.fit"),te=r(` method which currently requires having all the tensors in the first argument of the model call function: `),C=a("code"),ce=r("model(inputs)"),oe=r(".")},l(E){h=n(E,"P",{});var j=s(h);v=i(j,"TF 2.0 models accepts two formats as inputs:"),j.forEach(t),m=c(E),u=n(E,"UL",{});var N=s(u);T=n(N,"LI",{});var Ve=s(T);_=i(Ve,"having all inputs as keyword arguments (like PyTorch models), or"),Ve.forEach(t),g=c(N),V=n(N,"LI",{});var ae=s(V);de=i(ae,"having all inputs as a list, tuple or dict in the first positional arguments."),ae.forEach(t),N.forEach(t),R=c(E),k=n(E,"P",{});var D=s(k);re=i(D,"This second option is useful when using "),P=n(D,"CODE",{});var ke=s(P);W=i(ke,"tf.keras.Model.fit"),ke.forEach(t),te=i(D,` method which currently requires having all the tensors in the first argument of the model call function: `),C=n(D,"CODE",{});var me=s(C);ce=i(me,"model(inputs)"),me.forEach(t),oe=i(D,"."),D.forEach(t)},m(E,j){f(E,h,j),e(h,v),f(E,m,j),f(E,u,j),e(u,T),e(T,_),e(u,g),e(u,V),e(V,de),f(E,R,j),f(E,k,j),e(k,re),e(k,P),e(P,W),e(k,te),e(k,C),e(C,ce),e(k,oe)},d(E){E&&t(h),E&&t(m),E&&t(u),E&&t(R),E&&t(k)}}}function Tp(I){let h,v,m,u,T;return{c(){h=a("p"),v=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=n(_,"P",{});var g=s(h);v=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(g,"CODE",{});var V=s(m);u=i(V,"Module"),V.forEach(t),T=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function wp(I){let h,v,m,u,T;return{c(){h=a("p"),v=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=n(_,"P",{});var g=s(h);v=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(g,"CODE",{});var V=s(m);u=i(V,"Module"),V.forEach(t),T=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function bp(I){let h,v,m,u,T;return{c(){h=a("p"),v=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),u=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){h=n(_,"P",{});var g=s(h);v=i(g,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n(g,"CODE",{});var V=s(m);u=i(V,"Module"),V.forEach(t),T=i(g,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),g.forEach(t)},m(_,g){f(_,h,g),e(h,v),e(h,m),e(m,u),e(h,T)},d(_){_&&t(h)}}}function yp(I){let h,v,m,u,T,_,g,V,de,R,k,re,P,W,te,C,ce,oe,E,j,N,Ve,ae,D,ke,me,Lo,Vs,Cn,qo,Ta,ks,An,Do,Is,zn,L,gt,js,_t,Ps,Ms,Cs,wa,As,zs,vt,Ns,Oo,Ls,qs,Ds,Ie,Os,ba,Ss,Ws,Tt,Bs,Us,Rs,je,Hs,wt,Js,Ks,bt,Gs,Xs,Zs,Pe,Qs,yt,Ys,er,xt,tr,or,ar,ya,nr,Nn,So,sr,Ln,ue,xa,M,rr,Wo,ir,lr,Bo,dr,cr,Uo,hr,pr,$a,fr,mr,Fa,ur,gr,Ea,_r,vr,Va,Tr,wr,Ro,br,yr,xr,ka,$t,$r,Ho,Fr,Er,Vr,Ia,Ft,kr,Et,Ir,jr,qn,ge,Pr,Vt,Mr,Cr,kt,Ar,zr,Dn,Ke,Nr,It,Lr,qr,On,Me,Ge,ja,jt,Dr,Pa,Or,Sn,B,Pt,Sr,Ce,Wr,Jo,Br,Ur,Mt,Rr,Hr,Jr,Ae,Kr,Ko,Gr,Xr,Go,Zr,Qr,Yr,Ma,ei,ti,Ct,Wn,ze,Xe,Ca,At,oi,Aa,ai,Bn,ne,zt,ni,za,si,ri,Nt,ii,Na,li,di,ci,_e,Lt,hi,La,pi,fi,Ze,Un,Ne,Qe,qa,qt,mi,Da,ui,Rn,he,Dt,gi,Ot,_i,St,vi,Ti,wi,H,Wt,bi,Le,yi,Xo,xi,$i,Oa,Fi,Ei,Vi,Ye,ki,Sa,Ii,ji,Bt,Hn,qe,et,Wa,Ut,Pi,Ba,Mi,Jn,se,Rt,Ci,Ua,Ai,zi,Ht,Ni,Jt,Li,qi,Di,J,Kt,Oi,De,Si,Zo,Wi,Bi,Ra,Ui,Ri,Hi,tt,Ji,Ha,Ki,Gi,Gt,Kn,Oe,ot,Ja,Xt,Xi,Ka,Zi,Gn,O,Zt,Qi,Ga,Yi,el,Qt,tl,Qo,ol,al,nl,Yt,sl,eo,rl,il,ll,at,dl,K,to,cl,Se,hl,Yo,pl,fl,Xa,ml,ul,gl,nt,_l,Za,vl,Tl,oo,Xn,We,st,Qa,ao,wl,Ya,bl,Zn,S,no,yl,en,xl,$l,so,Fl,ea,El,Vl,kl,ro,Il,io,jl,Pl,Ml,rt,Cl,G,lo,Al,Be,zl,ta,Nl,Ll,tn,ql,Dl,Ol,it,Sl,on,Wl,Bl,co,Qn,Ue,lt,an,ho,Ul,nn,Rl,Yn,A,po,Hl,sn,Jl,Kl,fo,Gl,oa,Xl,Zl,Ql,mo,Yl,uo,ed,td,od,rn,ad,nd,pe,ln,go,sd,rd,dn,_o,id,ld,cn,vo,dd,cd,hn,To,hd,pd,X,wo,fd,Re,md,pn,ud,gd,fn,_d,vd,Td,dt,wd,mn,bd,yd,bo,es,He,ct,un,yo,xd,gn,$d,ts,z,xo,Fd,_n,Ed,Vd,$o,kd,aa,Id,jd,Pd,Fo,Md,Eo,Cd,Ad,zd,vn,Nd,Ld,fe,Tn,Vo,qd,Dd,wn,ko,Od,Sd,bn,Io,Wd,Bd,yn,jo,Ud,Rd,Z,Po,Hd,Je,Jd,xn,Kd,Gd,$n,Xd,Zd,Qd,ht,Yd,Fn,ec,tc,Mo,os;return _=new Ee({}),k=new Fe({props:{$$slots:{default:[pp]},$$scope:{ctx:I}}}),C=new Ee({}),jt=new Ee({}),Pt=new U({props:{name:"class transformers.ViTConfig",anchor:"transformers.ViTConfig",parameters:[{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"is_encoder_decoder",val:" = False"},{name:"image_size",val:" = 224"},{name:"patch_size",val:" = 16"},{name:"num_channels",val:" = 3"},{name:"qkv_bias",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/configuration_vit.py#L29",parametersDescription:[{anchor:"transformers.ViTConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.ViTConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.ViTConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.ViTConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.ViTConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.ViTConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.ViTConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.ViTConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.ViTConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.ViTConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>224</code>) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.ViTConfig.patch_size",description:`<strong>patch_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>16</code>) &#x2014; The size (resolution) of each patch.`,name:"patch_size"},{anchor:"transformers.ViTConfig.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to <code>3</code>) &#x2014; The number of input channels.`,name:"num_channels"},{anchor:"transformers.ViTConfig.qkv_bias",description:`<strong>qkv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to add a bias to the queries, keys and values.`,name:"qkv_bias"}]}}),Ct=new No({props:{code:`from transformers import ViTModel, ViTConfig # Initializing a ViT vit-base-patch16-224 style configuration configuration = ViTConfig() # Initializing a model from the vit-base-patch16-224 style configuration model = ViTModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTModel, ViTConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViT vit-base-patch16-224 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the vit-base-patch16-224 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ViTModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),At=new Ee({}),zt=new U({props:{name:"class transformers.ViTFeatureExtractor",anchor:"transformers.ViTFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 224"},{name:"resample",val:" = 2"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/feature_extraction_vit.py#L37",parametersDescription:[{anchor:"transformers.ViTFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.ViTFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.ViTFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.ViTFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with mean and standard deviation.`,name:"do_normalize"},{anchor:"transformers.ViTFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.5, 0.5, 0.5]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.`,name:"image_mean"},{anchor:"transformers.ViTFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.5, 0.5, 0.5]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.`,name:"image_std"}]}}),Lt=new U({props:{name:"__call__",anchor:"transformers.ViTFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/feature_extraction_vit.py#L83",parametersDescription:[{anchor:"transformers.ViTFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.ViTFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),Ze=new Fe({props:{warning:"&lcub;true}",$$slots:{default:[fp]},$$scope:{ctx:I}}}),qt=new Ee({}),Dt=new U({props:{name:"class transformers.ViTModel",anchor:"transformers.ViTModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_vit.py#L480",parametersDescription:[{anchor:"transformers.ViTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wt=new U({props:{name:"forward",anchor:"transformers.ViTModel.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"interpolate_pos_encoding",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_vit.py#L505",parametersDescription:[{anchor:"transformers.ViTModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.ViTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ViTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ViTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ViTModel.forward.interpolate_pos_encoding",description:`<strong>interpolate_pos_encoding</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to interpolate the pre-trained position encodings.`,name:"interpolate_pos_encoding"},{anchor:"transformers.ViTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig" >ViTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ye=new Fe({props:{$$slots:{default:[mp]},$$scope:{ctx:I}}}),Bt=new No({props:{code:`from transformers import ViTFeatureExtractor, ViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor, ViTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ViTModel.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ut=new Ee({}),Rt=new U({props:{name:"class transformers.ViTForImageClassification",anchor:"transformers.ViTForImageClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_vit.py#L599",parametersDescription:[{anchor:"transformers.ViTForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new U({props:{name:"forward",anchor:"transformers.ViTForImageClassification.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"head_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"interpolate_pos_encoding",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_vit.py#L612",parametersDescription:[{anchor:"transformers.ViTForImageClassification.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.ViTForImageClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ViTForImageClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ViTForImageClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ViTForImageClassification.forward.interpolate_pos_encoding",description:`<strong>interpolate_pos_encoding</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to interpolate the pre-trained position encodings.`,name:"interpolate_pos_encoding"},{anchor:"transformers.ViTForImageClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.ViTForImageClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig" >ViTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tt=new Fe({props:{$$slots:{default:[up]},$$scope:{ctx:I}}}),Gt=new No({props:{code:`from transformers import ViTFeatureExtractor, ViTForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224') model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor, ViTForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ViTForImageClassification.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),Xt=new Ee({}),Zt=new U({props:{name:"class transformers.TFViTModel",anchor:"transformers.TFViTModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_tf_vit.py#L655",parametersDescription:[{anchor:"transformers.TFViTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),at=new Fe({props:{$$slots:{default:[gp]},$$scope:{ctx:I}}}),to=new U({props:{name:"call",anchor:"transformers.TFViTModel.call",parameters:[{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"interpolate_pos_encoding",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_tf_vit.py#L661",parametersDescription:[{anchor:"transformers.TFViTModel.call.pixel_values",description:'<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.',name:"pixel_values"},{anchor:"transformers.TFViTModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFViTModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFViTModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFViTModel.call.interpolate_pos_encoding",description:`<strong>interpolate_pos_encoding</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to interpolate the pre-trained position encodings.`,name:"interpolate_pos_encoding"},{anchor:"transformers.TFViTModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFViTModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig" >ViTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),nt=new Fe({props:{$$slots:{default:[_p]},$$scope:{ctx:I}}}),oo=new No({props:{code:`from transformers import ViTFeatureExtractor, TFViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') model = TFViTModel.from_pretrained('google/vit-base-patch16-224-in21k') inputs = feature_extractor(images=image, return_tensors="tf") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor, TFViTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFViTModel.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ao=new Ee({}),no=new U({props:{name:"class transformers.TFViTForImageClassification",anchor:"transformers.TFViTForImageClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_tf_vit.py#L761",parametersDescription:[{anchor:"transformers.TFViTForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rt=new Fe({props:{$$slots:{default:[vp]},$$scope:{ctx:I}}}),lo=new U({props:{name:"call",anchor:"transformers.TFViTForImageClassification.call",parameters:[{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"interpolate_pos_encoding",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_tf_vit.py#L775",parametersDescription:[{anchor:"transformers.TFViTForImageClassification.call.pixel_values",description:'<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.',name:"pixel_values"},{anchor:"transformers.TFViTForImageClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFViTForImageClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFViTForImageClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFViTForImageClassification.call.interpolate_pos_encoding",description:`<strong>interpolate_pos_encoding</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to interpolate the pre-trained position encodings.`,name:"interpolate_pos_encoding"},{anchor:"transformers.TFViTForImageClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFViTForImageClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFViTForImageClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig" >ViTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),it=new Fe({props:{$$slots:{default:[Tp]},$$scope:{ctx:I}}}),co=new No({props:{code:`from transformers import ViTFeatureExtractor, TFViTForImageClassification import tensorflow as tf from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224') model = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224') inputs = feature_extractor(images=image, return_tensors="tf") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = tf.math.argmax(logits, axis=-1)[0] print("Predicted class:", model.config.id2label[int(predicted_class_idx)]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor, TFViTForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFViTForImageClassification.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = tf.math.argmax(logits, axis=-<span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[<span class="hljs-built_in">int</span>(predicted_class_idx)])`}}),ho=new Ee({}),po=new U({props:{name:"class transformers.FlaxViTModel",anchor:"transformers.FlaxViTModel",parameters:[{name:"config",val:": ViTConfig"},{name:"input_shape",val:" = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_flax_vit.py#L511",parametersDescription:[{anchor:"transformers.FlaxViTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxViTModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),wo=new U({props:{name:"__call__",anchor:"transformers.FlaxViTPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_flax_vit.py#L426",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.vit.configuration_vit.ViTConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),dt=new Fe({props:{$$slots:{default:[wp]},$$scope:{ctx:I}}}),bo=new No({props:{code:`from transformers import ViTFeatureExtractor, FlaxViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') model = FlaxViTModel.from_pretrained('google/vit-base-patch16-224-in21k') inputs = feature_extractor(images=image, return_tensors="np") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor, FlaxViTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxViTModel.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224-in21k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),yo=new Ee({}),xo=new U({props:{name:"class transformers.FlaxViTForImageClassification",anchor:"transformers.FlaxViTForImageClassification",parameters:[{name:"config",val:": ViTConfig"},{name:"input_shape",val:" = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_flax_vit.py#L592",parametersDescription:[{anchor:"transformers.FlaxViTForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTConfig">ViTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxViTForImageClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Po=new U({props:{name:"__call__",anchor:"transformers.FlaxViTPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/modeling_flax_vit.py#L426",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.vit.configuration_vit.ViTConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ht=new Fe({props:{$$slots:{default:[bp]},$$scope:{ctx:I}}}),Mo=new No({props:{code:`from transformers import ViTFeatureExtractor, FlaxViTForImageClassification from PIL import Image import jax import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224') model = FlaxViTForImageClassification.from_pretrained('google/vit-base-patch16-224') inputs = feature_extractor(images=image, return_tensors="np") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = jax.numpy.argmax(logits, axis=-1) print("Predicted class:", model.config.id2label[predicted_class_idx.item()]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor, FlaxViTForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxViTForImageClassification.from_pretrained(<span class="hljs-string">&#x27;google/vit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = jax.numpy.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx.item()])`}}),{c(){h=a("meta"),v=d(),m=a("h1"),u=a("a"),T=a("span"),w(_.$$.fragment),g=d(),V=a("span"),de=r("Vision Transformer (ViT)"),R=d(),w(k.$$.fragment),re=d(),P=a("h2"),W=a("a"),te=a("span"),w(C.$$.fragment),ce=d(),oe=a("span"),E=r("Overview"),j=d(),N=a("p"),Ve=r("The Vision Transformer (ViT) model was proposed in "),ae=a("a"),D=r(`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`),ke=r(` by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. It\u2019s the first paper that successfully trains a Transformer encoder on ImageNet, attaining very good results compared to familiar convolutional architectures.`),me=d(),Lo=a("p"),Vs=r("The abstract from the paper is the following:"),Cn=d(),qo=a("p"),Ta=a("em"),ks=r(`While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.`),An=d(),Do=a("p"),Is=r("Tips:"),zn=d(),L=a("ul"),gt=a("li"),js=r("Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found "),_t=a("a"),Ps=r("here"),Ms=r("."),Cs=d(),wa=a("li"),As=r(`To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder.`),zs=d(),vt=a("li"),Ns=r(`As the Vision Transformer expects each image to be of the same size (resolution), one can use `),Oo=a("a"),Ls=r("ViTFeatureExtractor"),qs=r(" to resize (or rescale) and normalize images for the model."),Ds=d(),Ie=a("li"),Os=r(`Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `),ba=a("code"),Ss=r("google/vit-base-patch16-224"),Ws=r(` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the `),Tt=a("a"),Bs=r("hub"),Us=r("."),Rs=d(),je=a("li"),Hs=r("The available checkpoints are either (1) pre-trained on "),wt=a("a"),Js=r("ImageNet-21k"),Ks=r(` (a collection of 14 million images and 21k classes) only, or (2) also fine-tuned on `),bt=a("a"),Gs=r("ImageNet"),Xs=r(` (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes).`),Zs=d(),Pe=a("li"),Qs=r(`The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to use a higher resolution than pre-training `),yt=a("a"),Ys=r("(Touvron et al., 2019)"),er=r(", "),xt=a("a"),tr=r(`(Kolesnikov et al., 2020)`),or=r(`. In order to fine-tune at higher resolution, the authors perform 2D interpolation of the pre-trained position embeddings, according to their location in the original image.`),ar=d(),ya=a("li"),nr=r(`The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant improvement of 2% to training from scratch, but still 4% behind supervised pre-training.`),Nn=d(),So=a("p"),sr=r("Following the original Vision Transformer, some follow-up works have been made:"),Ln=d(),ue=a("ul"),xa=a("li"),M=a("p"),rr=r(`DeiT (Data-efficient Image Transformers) by Facebook AI. DeiT models are distilled vision transformers. Refer to `),Wo=a("a"),ir=r("DeiT\u2019s documentation page"),lr=r(`. The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into `),Bo=a("a"),dr=r("ViTModel"),cr=r(" or "),Uo=a("a"),hr=r("ViTForImageClassification"),pr=r(`. There are 4 variants available (in 3 different sizes): `),$a=a("em"),fr=r("facebook/deit-tiny-patch16-224"),mr=r(", "),Fa=a("em"),ur=r("facebook/deit-small-patch16-224"),gr=r(`, `),Ea=a("em"),_r=r("facebook/deit-base-patch16-224"),vr=r(" and "),Va=a("em"),Tr=r("facebook/deit-base-patch16-384"),wr=r(`. Note that one should use `),Ro=a("a"),br=r("DeiTFeatureExtractor"),yr=r(" in order to prepare images for the model."),xr=d(),ka=a("li"),$t=a("p"),$r=r(`BEiT (BERT pre-training of Image Transformers) by Microsoft Research. BEiT models outperform supervised pre-trained vision transformers using a self-supervised method inspired by BERT (masked image modeling) and based on a VQ-VAE. Refer to `),Ho=a("a"),Fr=r("BEiT\u2019s documentation page"),Er=r("."),Vr=d(),Ia=a("li"),Ft=a("p"),kr=r(`DINO (a method for self-supervised training of Vision Transformers) by Facebook AI. Vision Transformers trained using the DINO method show very interesting properties not seen with convolutional models. They are capable of segmenting objects, without having ever been trained to do so. DINO checkpoints can be found on the `),Et=a("a"),Ir=r("hub"),jr=r("."),qn=d(),ge=a("p"),Pr=r("This model was contributed by "),Vt=a("a"),Mr=r("nielsr"),Cr=r(`. The original code (written in JAX) can be found `),kt=a("a"),Ar=r("here"),zr=r("."),Dn=d(),Ke=a("p"),Nr=r("Note that we converted the weights from Ross Wightman\u2019s "),It=a("a"),Lr=r("timm library"),qr=r(`, who already converted the weights from JAX to PyTorch. Credits go to him!`),On=d(),Me=a("h2"),Ge=a("a"),ja=a("span"),w(jt.$$.fragment),Dr=d(),Pa=a("span"),Or=r("ViTConfig"),Sn=d(),B=a("div"),w(Pt.$$.fragment),Sr=d(),Ce=a("p"),Wr=r("This is the configuration class to store the configuration of a "),Jo=a("a"),Br=r("ViTModel"),Ur=r(`. It is used to instantiate an ViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViT `),Mt=a("a"),Rr=r("google/vit-base-patch16-224"),Hr=r(" architecture."),Jr=d(),Ae=a("p"),Kr=r("Configuration objects inherit from "),Ko=a("a"),Gr=r("PretrainedConfig"),Xr=r(` and can be used to control the model outputs. Read the documentation from `),Go=a("a"),Zr=r("PretrainedConfig"),Qr=r(" for more information."),Yr=d(),Ma=a("p"),ei=r("Example:"),ti=d(),w(Ct.$$.fragment),Wn=d(),ze=a("h2"),Xe=a("a"),Ca=a("span"),w(At.$$.fragment),oi=d(),Aa=a("span"),ai=r("ViTFeatureExtractor"),Bn=d(),ne=a("div"),w(zt.$$.fragment),ni=d(),za=a("p"),si=r("Constructs a ViT feature extractor."),ri=d(),Nt=a("p"),ii=r("This feature extractor inherits from "),Na=a("code"),li=r("FeatureExtractionMixin"),di=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ci=d(),_e=a("div"),w(Lt.$$.fragment),hi=d(),La=a("p"),pi=r("Main method to prepare for the model one or several image(s)."),fi=d(),w(Ze.$$.fragment),Un=d(),Ne=a("h2"),Qe=a("a"),qa=a("span"),w(qt.$$.fragment),mi=d(),Da=a("span"),ui=r("ViTModel"),Rn=d(),he=a("div"),w(Dt.$$.fragment),gi=d(),Ot=a("p"),_i=r(`The bare ViT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),St=a("a"),vi=r("torch.nn.Module"),Ti=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wi=d(),H=a("div"),w(Wt.$$.fragment),bi=d(),Le=a("p"),yi=r("The "),Xo=a("a"),xi=r("ViTModel"),$i=r(" forward method, overrides the "),Oa=a("code"),Fi=r("__call__"),Ei=r(" special method."),Vi=d(),w(Ye.$$.fragment),ki=d(),Sa=a("p"),Ii=r("Examples:"),ji=d(),w(Bt.$$.fragment),Hn=d(),qe=a("h2"),et=a("a"),Wa=a("span"),w(Ut.$$.fragment),Pi=d(),Ba=a("span"),Mi=r("ViTForImageClassification"),Jn=d(),se=a("div"),w(Rt.$$.fragment),Ci=d(),Ua=a("p"),Ai=r(`ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),zi=d(),Ht=a("p"),Ni=r("This model is a PyTorch "),Jt=a("a"),Li=r("torch.nn.Module"),qi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Di=d(),J=a("div"),w(Kt.$$.fragment),Oi=d(),De=a("p"),Si=r("The "),Zo=a("a"),Wi=r("ViTForImageClassification"),Bi=r(" forward method, overrides the "),Ra=a("code"),Ui=r("__call__"),Ri=r(" special method."),Hi=d(),w(tt.$$.fragment),Ji=d(),Ha=a("p"),Ki=r("Examples:"),Gi=d(),w(Gt.$$.fragment),Kn=d(),Oe=a("h2"),ot=a("a"),Ja=a("span"),w(Xt.$$.fragment),Xi=d(),Ka=a("span"),Zi=r("TFViTModel"),Gn=d(),O=a("div"),w(Zt.$$.fragment),Qi=d(),Ga=a("p"),Yi=r("The bare ViT Model transformer outputting raw hidden-states without any specific head on top."),el=d(),Qt=a("p"),tl=r("This model inherits from "),Qo=a("a"),ol=r("TFPreTrainedModel"),al=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nl=d(),Yt=a("p"),sl=r("This model is also a "),eo=a("a"),rl=r("tf.keras.Model"),il=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ll=d(),w(at.$$.fragment),dl=d(),K=a("div"),w(to.$$.fragment),cl=d(),Se=a("p"),hl=r("The "),Yo=a("a"),pl=r("TFViTModel"),fl=r(" forward method, overrides the "),Xa=a("code"),ml=r("__call__"),ul=r(" special method."),gl=d(),w(nt.$$.fragment),_l=d(),Za=a("p"),vl=r("Examples:"),Tl=d(),w(oo.$$.fragment),Xn=d(),We=a("h2"),st=a("a"),Qa=a("span"),w(ao.$$.fragment),wl=d(),Ya=a("span"),bl=r("TFViTForImageClassification"),Zn=d(),S=a("div"),w(no.$$.fragment),yl=d(),en=a("p"),xl=r(`ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),$l=d(),so=a("p"),Fl=r("This model inherits from "),ea=a("a"),El=r("TFPreTrainedModel"),Vl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),kl=d(),ro=a("p"),Il=r("This model is also a "),io=a("a"),jl=r("tf.keras.Model"),Pl=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ml=d(),w(rt.$$.fragment),Cl=d(),G=a("div"),w(lo.$$.fragment),Al=d(),Be=a("p"),zl=r("The "),ta=a("a"),Nl=r("TFViTForImageClassification"),Ll=r(" forward method, overrides the "),tn=a("code"),ql=r("__call__"),Dl=r(" special method."),Ol=d(),w(it.$$.fragment),Sl=d(),on=a("p"),Wl=r("Examples:"),Bl=d(),w(co.$$.fragment),Qn=d(),Ue=a("h2"),lt=a("a"),an=a("span"),w(ho.$$.fragment),Ul=d(),nn=a("span"),Rl=r("FlaxVitModel"),Yn=d(),A=a("div"),w(po.$$.fragment),Hl=d(),sn=a("p"),Jl=r("The bare ViT Model transformer outputting raw hidden-states without any specific head on top."),Kl=d(),fo=a("p"),Gl=r("This model inherits from "),oa=a("a"),Xl=r("FlaxPreTrainedModel"),Zl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Ql=d(),mo=a("p"),Yl=r("This model is also a Flax Linen "),uo=a("a"),ed=r("flax.linen.Module"),td=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),od=d(),rn=a("p"),ad=r("Finally, this model supports inherent JAX features such as:"),nd=d(),pe=a("ul"),ln=a("li"),go=a("a"),sd=r("Just-In-Time (JIT) compilation"),rd=d(),dn=a("li"),_o=a("a"),id=r("Automatic Differentiation"),ld=d(),cn=a("li"),vo=a("a"),dd=r("Vectorization"),cd=d(),hn=a("li"),To=a("a"),hd=r("Parallelization"),pd=d(),X=a("div"),w(wo.$$.fragment),fd=d(),Re=a("p"),md=r("The "),pn=a("code"),ud=r("FlaxViTPreTrainedModel"),gd=r(" forward method, overrides the "),fn=a("code"),_d=r("__call__"),vd=r(" special method."),Td=d(),w(dt.$$.fragment),wd=d(),mn=a("p"),bd=r("Examples:"),yd=d(),w(bo.$$.fragment),es=d(),He=a("h2"),ct=a("a"),un=a("span"),w(yo.$$.fragment),xd=d(),gn=a("span"),$d=r("FlaxViTForImageClassification"),ts=d(),z=a("div"),w(xo.$$.fragment),Fd=d(),_n=a("p"),Ed=r(`ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),Vd=d(),$o=a("p"),kd=r("This model inherits from "),aa=a("a"),Id=r("FlaxPreTrainedModel"),jd=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Pd=d(),Fo=a("p"),Md=r("This model is also a Flax Linen "),Eo=a("a"),Cd=r("flax.linen.Module"),Ad=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),zd=d(),vn=a("p"),Nd=r("Finally, this model supports inherent JAX features such as:"),Ld=d(),fe=a("ul"),Tn=a("li"),Vo=a("a"),qd=r("Just-In-Time (JIT) compilation"),Dd=d(),wn=a("li"),ko=a("a"),Od=r("Automatic Differentiation"),Sd=d(),bn=a("li"),Io=a("a"),Wd=r("Vectorization"),Bd=d(),yn=a("li"),jo=a("a"),Ud=r("Parallelization"),Rd=d(),Z=a("div"),w(Po.$$.fragment),Hd=d(),Je=a("p"),Jd=r("The "),xn=a("code"),Kd=r("FlaxViTPreTrainedModel"),Gd=r(" forward method, overrides the "),$n=a("code"),Xd=r("__call__"),Zd=r(" special method."),Qd=d(),w(ht.$$.fragment),Yd=d(),Fn=a("p"),ec=r("Example:"),tc=d(),w(Mo.$$.fragment),this.h()},l(o){const p=hp('[data-svelte="svelte-1phssyn"]',document.head);h=n(p,"META",{name:!0,content:!0}),p.forEach(t),v=c(o),m=n(o,"H1",{class:!0});var Co=s(m);u=n(Co,"A",{id:!0,class:!0,href:!0});var En=s(u);T=n(En,"SPAN",{});var Vn=s(T);b(_.$$.fragment,Vn),Vn.forEach(t),En.forEach(t),g=c(Co),V=n(Co,"SPAN",{});var kn=s(V);de=i(kn,"Vision Transformer (ViT)"),kn.forEach(t),Co.forEach(t),R=c(o),b(k.$$.fragment,o),re=c(o),P=n(o,"H2",{class:!0});var Ao=s(P);W=n(Ao,"A",{id:!0,class:!0,href:!0});var In=s(W);te=n(In,"SPAN",{});var jn=s(te);b(C.$$.fragment,jn),jn.forEach(t),In.forEach(t),ce=c(Ao),oe=n(Ao,"SPAN",{});var Pn=s(oe);E=i(Pn,"Overview"),Pn.forEach(t),Ao.forEach(t),j=c(o),N=n(o,"P",{});var zo=s(N);Ve=i(zo,"The Vision Transformer (ViT) model was proposed in "),ae=n(zo,"A",{href:!0,rel:!0});var Mn=s(ae);D=i(Mn,`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`),Mn.forEach(t),ke=i(zo,` by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. It\u2019s the first paper that successfully trains a Transformer encoder on ImageNet, attaining very good results compared to familiar convolutional architectures.`),zo.forEach(t),me=c(o),Lo=n(o,"P",{});var oc=s(Lo);Vs=i(oc,"The abstract from the paper is the following:"),oc.forEach(t),Cn=c(o),qo=n(o,"P",{});var ac=s(qo);Ta=n(ac,"EM",{});var nc=s(Ta);ks=i(nc,`While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.`),nc.forEach(t),ac.forEach(t),An=c(o),Do=n(o,"P",{});var sc=s(Do);Is=i(sc,"Tips:"),sc.forEach(t),zn=c(o),L=n(o,"UL",{});var Q=s(L);gt=n(Q,"LI",{});var as=s(gt);js=i(as,"Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found "),_t=n(as,"A",{href:!0,rel:!0});var rc=s(_t);Ps=i(rc,"here"),rc.forEach(t),Ms=i(as,"."),as.forEach(t),Cs=c(Q),wa=n(Q,"LI",{});var ic=s(wa);As=i(ic,`To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder.`),ic.forEach(t),zs=c(Q),vt=n(Q,"LI",{});var ns=s(vt);Ns=i(ns,`As the Vision Transformer expects each image to be of the same size (resolution), one can use `),Oo=n(ns,"A",{href:!0});var lc=s(Oo);Ls=i(lc,"ViTFeatureExtractor"),lc.forEach(t),qs=i(ns," to resize (or rescale) and normalize images for the model."),ns.forEach(t),Ds=c(Q),Ie=n(Q,"LI",{});var na=s(Ie);Os=i(na,`Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `),ba=n(na,"CODE",{});var dc=s(ba);Ss=i(dc,"google/vit-base-patch16-224"),dc.forEach(t),Ws=i(na,` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the `),Tt=n(na,"A",{href:!0,rel:!0});var cc=s(Tt);Bs=i(cc,"hub"),cc.forEach(t),Us=i(na,"."),na.forEach(t),Rs=c(Q),je=n(Q,"LI",{});var sa=s(je);Hs=i(sa,"The available checkpoints are either (1) pre-trained on "),wt=n(sa,"A",{href:!0,rel:!0});var hc=s(wt);Js=i(hc,"ImageNet-21k"),hc.forEach(t),Ks=i(sa,` (a collection of 14 million images and 21k classes) only, or (2) also fine-tuned on `),bt=n(sa,"A",{href:!0,rel:!0});var pc=s(bt);Gs=i(pc,"ImageNet"),pc.forEach(t),Xs=i(sa,` (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes).`),sa.forEach(t),Zs=c(Q),Pe=n(Q,"LI",{});var ra=s(Pe);Qs=i(ra,`The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to use a higher resolution than pre-training `),yt=n(ra,"A",{href:!0,rel:!0});var fc=s(yt);Ys=i(fc,"(Touvron et al., 2019)"),fc.forEach(t),er=i(ra,", "),xt=n(ra,"A",{href:!0,rel:!0});var mc=s(xt);tr=i(mc,`(Kolesnikov et al., 2020)`),mc.forEach(t),or=i(ra,`. In order to fine-tune at higher resolution, the authors perform 2D interpolation of the pre-trained position embeddings, according to their location in the original image.`),ra.forEach(t),ar=c(Q),ya=n(Q,"LI",{});var uc=s(ya);nr=i(uc,`The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant improvement of 2% to training from scratch, but still 4% behind supervised pre-training.`),uc.forEach(t),Q.forEach(t),Nn=c(o),So=n(o,"P",{});var gc=s(So);sr=i(gc,"Following the original Vision Transformer, some follow-up works have been made:"),gc.forEach(t),Ln=c(o),ue=n(o,"UL",{});var ia=s(ue);xa=n(ia,"LI",{});var _c=s(xa);M=n(_c,"P",{});var q=s(M);rr=i(q,`DeiT (Data-efficient Image Transformers) by Facebook AI. DeiT models are distilled vision transformers. Refer to `),Wo=n(q,"A",{href:!0});var vc=s(Wo);ir=i(vc,"DeiT\u2019s documentation page"),vc.forEach(t),lr=i(q,`. The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into `),Bo=n(q,"A",{href:!0});var Tc=s(Bo);dr=i(Tc,"ViTModel"),Tc.forEach(t),cr=i(q," or "),Uo=n(q,"A",{href:!0});var wc=s(Uo);hr=i(wc,"ViTForImageClassification"),wc.forEach(t),pr=i(q,`. There are 4 variants available (in 3 different sizes): `),$a=n(q,"EM",{});var bc=s($a);fr=i(bc,"facebook/deit-tiny-patch16-224"),bc.forEach(t),mr=i(q,", "),Fa=n(q,"EM",{});var yc=s(Fa);ur=i(yc,"facebook/deit-small-patch16-224"),yc.forEach(t),gr=i(q,`, `),Ea=n(q,"EM",{});var xc=s(Ea);_r=i(xc,"facebook/deit-base-patch16-224"),xc.forEach(t),vr=i(q," and "),Va=n(q,"EM",{});var $c=s(Va);Tr=i($c,"facebook/deit-base-patch16-384"),$c.forEach(t),wr=i(q,`. Note that one should use `),Ro=n(q,"A",{href:!0});var Fc=s(Ro);br=i(Fc,"DeiTFeatureExtractor"),Fc.forEach(t),yr=i(q," in order to prepare images for the model."),q.forEach(t),_c.forEach(t),xr=c(ia),ka=n(ia,"LI",{});var Ec=s(ka);$t=n(Ec,"P",{});var ss=s($t);$r=i(ss,`BEiT (BERT pre-training of Image Transformers) by Microsoft Research. BEiT models outperform supervised pre-trained vision transformers using a self-supervised method inspired by BERT (masked image modeling) and based on a VQ-VAE. Refer to `),Ho=n(ss,"A",{href:!0});var Vc=s(Ho);Fr=i(Vc,"BEiT\u2019s documentation page"),Vc.forEach(t),Er=i(ss,"."),ss.forEach(t),Ec.forEach(t),Vr=c(ia),Ia=n(ia,"LI",{});var kc=s(Ia);Ft=n(kc,"P",{});var rs=s(Ft);kr=i(rs,`DINO (a method for self-supervised training of Vision Transformers) by Facebook AI. Vision Transformers trained using the DINO method show very interesting properties not seen with convolutional models. They are capable of segmenting objects, without having ever been trained to do so. DINO checkpoints can be found on the `),Et=n(rs,"A",{href:!0,rel:!0});var Ic=s(Et);Ir=i(Ic,"hub"),Ic.forEach(t),jr=i(rs,"."),rs.forEach(t),kc.forEach(t),ia.forEach(t),qn=c(o),ge=n(o,"P",{});var la=s(ge);Pr=i(la,"This model was contributed by "),Vt=n(la,"A",{href:!0,rel:!0});var jc=s(Vt);Mr=i(jc,"nielsr"),jc.forEach(t),Cr=i(la,`. The original code (written in JAX) can be found `),kt=n(la,"A",{href:!0,rel:!0});var Pc=s(kt);Ar=i(Pc,"here"),Pc.forEach(t),zr=i(la,"."),la.forEach(t),Dn=c(o),Ke=n(o,"P",{});var is=s(Ke);Nr=i(is,"Note that we converted the weights from Ross Wightman\u2019s "),It=n(is,"A",{href:!0,rel:!0});var Mc=s(It);Lr=i(Mc,"timm library"),Mc.forEach(t),qr=i(is,`, who already converted the weights from JAX to PyTorch. Credits go to him!`),is.forEach(t),On=c(o),Me=n(o,"H2",{class:!0});var ls=s(Me);Ge=n(ls,"A",{id:!0,class:!0,href:!0});var Cc=s(Ge);ja=n(Cc,"SPAN",{});var Ac=s(ja);b(jt.$$.fragment,Ac),Ac.forEach(t),Cc.forEach(t),Dr=c(ls),Pa=n(ls,"SPAN",{});var zc=s(Pa);Or=i(zc,"ViTConfig"),zc.forEach(t),ls.forEach(t),Sn=c(o),B=n(o,"DIV",{class:!0});var ve=s(B);b(Pt.$$.fragment,ve),Sr=c(ve),Ce=n(ve,"P",{});var da=s(Ce);Wr=i(da,"This is the configuration class to store the configuration of a "),Jo=n(da,"A",{href:!0});var Nc=s(Jo);Br=i(Nc,"ViTModel"),Nc.forEach(t),Ur=i(da,`. It is used to instantiate an ViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViT `),Mt=n(da,"A",{href:!0,rel:!0});var Lc=s(Mt);Rr=i(Lc,"google/vit-base-patch16-224"),Lc.forEach(t),Hr=i(da," architecture."),da.forEach(t),Jr=c(ve),Ae=n(ve,"P",{});var ca=s(Ae);Kr=i(ca,"Configuration objects inherit from "),Ko=n(ca,"A",{href:!0});var qc=s(Ko);Gr=i(qc,"PretrainedConfig"),qc.forEach(t),Xr=i(ca,` and can be used to control the model outputs. Read the documentation from `),Go=n(ca,"A",{href:!0});var Dc=s(Go);Zr=i(Dc,"PretrainedConfig"),Dc.forEach(t),Qr=i(ca," for more information."),ca.forEach(t),Yr=c(ve),Ma=n(ve,"P",{});var Oc=s(Ma);ei=i(Oc,"Example:"),Oc.forEach(t),ti=c(ve),b(Ct.$$.fragment,ve),ve.forEach(t),Wn=c(o),ze=n(o,"H2",{class:!0});var ds=s(ze);Xe=n(ds,"A",{id:!0,class:!0,href:!0});var Sc=s(Xe);Ca=n(Sc,"SPAN",{});var Wc=s(Ca);b(At.$$.fragment,Wc),Wc.forEach(t),Sc.forEach(t),oi=c(ds),Aa=n(ds,"SPAN",{});var Bc=s(Aa);ai=i(Bc,"ViTFeatureExtractor"),Bc.forEach(t),ds.forEach(t),Bn=c(o),ne=n(o,"DIV",{class:!0});var pt=s(ne);b(zt.$$.fragment,pt),ni=c(pt),za=n(pt,"P",{});var Uc=s(za);si=i(Uc,"Constructs a ViT feature extractor."),Uc.forEach(t),ri=c(pt),Nt=n(pt,"P",{});var cs=s(Nt);ii=i(cs,"This feature extractor inherits from "),Na=n(cs,"CODE",{});var Rc=s(Na);li=i(Rc,"FeatureExtractionMixin"),Rc.forEach(t),di=i(cs,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),cs.forEach(t),ci=c(pt),_e=n(pt,"DIV",{class:!0});var ha=s(_e);b(Lt.$$.fragment,ha),hi=c(ha),La=n(ha,"P",{});var Hc=s(La);pi=i(Hc,"Main method to prepare for the model one or several image(s)."),Hc.forEach(t),fi=c(ha),b(Ze.$$.fragment,ha),ha.forEach(t),pt.forEach(t),Un=c(o),Ne=n(o,"H2",{class:!0});var hs=s(Ne);Qe=n(hs,"A",{id:!0,class:!0,href:!0});var Jc=s(Qe);qa=n(Jc,"SPAN",{});var Kc=s(qa);b(qt.$$.fragment,Kc),Kc.forEach(t),Jc.forEach(t),mi=c(hs),Da=n(hs,"SPAN",{});var Gc=s(Da);ui=i(Gc,"ViTModel"),Gc.forEach(t),hs.forEach(t),Rn=c(o),he=n(o,"DIV",{class:!0});var pa=s(he);b(Dt.$$.fragment,pa),gi=c(pa),Ot=n(pa,"P",{});var ps=s(Ot);_i=i(ps,`The bare ViT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),St=n(ps,"A",{href:!0,rel:!0});var Xc=s(St);vi=i(Xc,"torch.nn.Module"),Xc.forEach(t),Ti=i(ps,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ps.forEach(t),wi=c(pa),H=n(pa,"DIV",{class:!0});var Te=s(H);b(Wt.$$.fragment,Te),bi=c(Te),Le=n(Te,"P",{});var fa=s(Le);yi=i(fa,"The "),Xo=n(fa,"A",{href:!0});var Zc=s(Xo);xi=i(Zc,"ViTModel"),Zc.forEach(t),$i=i(fa," forward method, overrides the "),Oa=n(fa,"CODE",{});var Qc=s(Oa);Fi=i(Qc,"__call__"),Qc.forEach(t),Ei=i(fa," special method."),fa.forEach(t),Vi=c(Te),b(Ye.$$.fragment,Te),ki=c(Te),Sa=n(Te,"P",{});var Yc=s(Sa);Ii=i(Yc,"Examples:"),Yc.forEach(t),ji=c(Te),b(Bt.$$.fragment,Te),Te.forEach(t),pa.forEach(t),Hn=c(o),qe=n(o,"H2",{class:!0});var fs=s(qe);et=n(fs,"A",{id:!0,class:!0,href:!0});var eh=s(et);Wa=n(eh,"SPAN",{});var th=s(Wa);b(Ut.$$.fragment,th),th.forEach(t),eh.forEach(t),Pi=c(fs),Ba=n(fs,"SPAN",{});var oh=s(Ba);Mi=i(oh,"ViTForImageClassification"),oh.forEach(t),fs.forEach(t),Jn=c(o),se=n(o,"DIV",{class:!0});var ft=s(se);b(Rt.$$.fragment,ft),Ci=c(ft),Ua=n(ft,"P",{});var ah=s(Ua);Ai=i(ah,`ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),ah.forEach(t),zi=c(ft),Ht=n(ft,"P",{});var ms=s(Ht);Ni=i(ms,"This model is a PyTorch "),Jt=n(ms,"A",{href:!0,rel:!0});var nh=s(Jt);Li=i(nh,"torch.nn.Module"),nh.forEach(t),qi=i(ms,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ms.forEach(t),Di=c(ft),J=n(ft,"DIV",{class:!0});var we=s(J);b(Kt.$$.fragment,we),Oi=c(we),De=n(we,"P",{});var ma=s(De);Si=i(ma,"The "),Zo=n(ma,"A",{href:!0});var sh=s(Zo);Wi=i(sh,"ViTForImageClassification"),sh.forEach(t),Bi=i(ma," forward method, overrides the "),Ra=n(ma,"CODE",{});var rh=s(Ra);Ui=i(rh,"__call__"),rh.forEach(t),Ri=i(ma," special method."),ma.forEach(t),Hi=c(we),b(tt.$$.fragment,we),Ji=c(we),Ha=n(we,"P",{});var ih=s(Ha);Ki=i(ih,"Examples:"),ih.forEach(t),Gi=c(we),b(Gt.$$.fragment,we),we.forEach(t),ft.forEach(t),Kn=c(o),Oe=n(o,"H2",{class:!0});var us=s(Oe);ot=n(us,"A",{id:!0,class:!0,href:!0});var lh=s(ot);Ja=n(lh,"SPAN",{});var dh=s(Ja);b(Xt.$$.fragment,dh),dh.forEach(t),lh.forEach(t),Xi=c(us),Ka=n(us,"SPAN",{});var ch=s(Ka);Zi=i(ch,"TFViTModel"),ch.forEach(t),us.forEach(t),Gn=c(o),O=n(o,"DIV",{class:!0});var ie=s(O);b(Zt.$$.fragment,ie),Qi=c(ie),Ga=n(ie,"P",{});var hh=s(Ga);Yi=i(hh,"The bare ViT Model transformer outputting raw hidden-states without any specific head on top."),hh.forEach(t),el=c(ie),Qt=n(ie,"P",{});var gs=s(Qt);tl=i(gs,"This model inherits from "),Qo=n(gs,"A",{href:!0});var ph=s(Qo);ol=i(ph,"TFPreTrainedModel"),ph.forEach(t),al=i(gs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gs.forEach(t),nl=c(ie),Yt=n(ie,"P",{});var _s=s(Yt);sl=i(_s,"This model is also a "),eo=n(_s,"A",{href:!0,rel:!0});var fh=s(eo);rl=i(fh,"tf.keras.Model"),fh.forEach(t),il=i(_s,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_s.forEach(t),ll=c(ie),b(at.$$.fragment,ie),dl=c(ie),K=n(ie,"DIV",{class:!0});var be=s(K);b(to.$$.fragment,be),cl=c(be),Se=n(be,"P",{});var ua=s(Se);hl=i(ua,"The "),Yo=n(ua,"A",{href:!0});var mh=s(Yo);pl=i(mh,"TFViTModel"),mh.forEach(t),fl=i(ua," forward method, overrides the "),Xa=n(ua,"CODE",{});var uh=s(Xa);ml=i(uh,"__call__"),uh.forEach(t),ul=i(ua," special method."),ua.forEach(t),gl=c(be),b(nt.$$.fragment,be),_l=c(be),Za=n(be,"P",{});var gh=s(Za);vl=i(gh,"Examples:"),gh.forEach(t),Tl=c(be),b(oo.$$.fragment,be),be.forEach(t),ie.forEach(t),Xn=c(o),We=n(o,"H2",{class:!0});var vs=s(We);st=n(vs,"A",{id:!0,class:!0,href:!0});var _h=s(st);Qa=n(_h,"SPAN",{});var vh=s(Qa);b(ao.$$.fragment,vh),vh.forEach(t),_h.forEach(t),wl=c(vs),Ya=n(vs,"SPAN",{});var Th=s(Ya);bl=i(Th,"TFViTForImageClassification"),Th.forEach(t),vs.forEach(t),Zn=c(o),S=n(o,"DIV",{class:!0});var le=s(S);b(no.$$.fragment,le),yl=c(le),en=n(le,"P",{});var wh=s(en);xl=i(wh,`ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),wh.forEach(t),$l=c(le),so=n(le,"P",{});var Ts=s(so);Fl=i(Ts,"This model inherits from "),ea=n(Ts,"A",{href:!0});var bh=s(ea);El=i(bh,"TFPreTrainedModel"),bh.forEach(t),Vl=i(Ts,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ts.forEach(t),kl=c(le),ro=n(le,"P",{});var ws=s(ro);Il=i(ws,"This model is also a "),io=n(ws,"A",{href:!0,rel:!0});var yh=s(io);jl=i(yh,"tf.keras.Model"),yh.forEach(t),Pl=i(ws,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ws.forEach(t),Ml=c(le),b(rt.$$.fragment,le),Cl=c(le),G=n(le,"DIV",{class:!0});var ye=s(G);b(lo.$$.fragment,ye),Al=c(ye),Be=n(ye,"P",{});var ga=s(Be);zl=i(ga,"The "),ta=n(ga,"A",{href:!0});var xh=s(ta);Nl=i(xh,"TFViTForImageClassification"),xh.forEach(t),Ll=i(ga," forward method, overrides the "),tn=n(ga,"CODE",{});var $h=s(tn);ql=i($h,"__call__"),$h.forEach(t),Dl=i(ga," special method."),ga.forEach(t),Ol=c(ye),b(it.$$.fragment,ye),Sl=c(ye),on=n(ye,"P",{});var Fh=s(on);Wl=i(Fh,"Examples:"),Fh.forEach(t),Bl=c(ye),b(co.$$.fragment,ye),ye.forEach(t),le.forEach(t),Qn=c(o),Ue=n(o,"H2",{class:!0});var bs=s(Ue);lt=n(bs,"A",{id:!0,class:!0,href:!0});var Eh=s(lt);an=n(Eh,"SPAN",{});var Vh=s(an);b(ho.$$.fragment,Vh),Vh.forEach(t),Eh.forEach(t),Ul=c(bs),nn=n(bs,"SPAN",{});var kh=s(nn);Rl=i(kh,"FlaxVitModel"),kh.forEach(t),bs.forEach(t),Yn=c(o),A=n(o,"DIV",{class:!0});var Y=s(A);b(po.$$.fragment,Y),Hl=c(Y),sn=n(Y,"P",{});var Ih=s(sn);Jl=i(Ih,"The bare ViT Model transformer outputting raw hidden-states without any specific head on top."),Ih.forEach(t),Kl=c(Y),fo=n(Y,"P",{});var ys=s(fo);Gl=i(ys,"This model inherits from "),oa=n(ys,"A",{href:!0});var jh=s(oa);Xl=i(jh,"FlaxPreTrainedModel"),jh.forEach(t),Zl=i(ys,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),ys.forEach(t),Ql=c(Y),mo=n(Y,"P",{});var xs=s(mo);Yl=i(xs,"This model is also a Flax Linen "),uo=n(xs,"A",{href:!0,rel:!0});var Ph=s(uo);ed=i(Ph,"flax.linen.Module"),Ph.forEach(t),td=i(xs,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),xs.forEach(t),od=c(Y),rn=n(Y,"P",{});var Mh=s(rn);ad=i(Mh,"Finally, this model supports inherent JAX features such as:"),Mh.forEach(t),nd=c(Y),pe=n(Y,"UL",{});var mt=s(pe);ln=n(mt,"LI",{});var Ch=s(ln);go=n(Ch,"A",{href:!0,rel:!0});var Ah=s(go);sd=i(Ah,"Just-In-Time (JIT) compilation"),Ah.forEach(t),Ch.forEach(t),rd=c(mt),dn=n(mt,"LI",{});var zh=s(dn);_o=n(zh,"A",{href:!0,rel:!0});var Nh=s(_o);id=i(Nh,"Automatic Differentiation"),Nh.forEach(t),zh.forEach(t),ld=c(mt),cn=n(mt,"LI",{});var Lh=s(cn);vo=n(Lh,"A",{href:!0,rel:!0});var qh=s(vo);dd=i(qh,"Vectorization"),qh.forEach(t),Lh.forEach(t),cd=c(mt),hn=n(mt,"LI",{});var Dh=s(hn);To=n(Dh,"A",{href:!0,rel:!0});var Oh=s(To);hd=i(Oh,"Parallelization"),Oh.forEach(t),Dh.forEach(t),mt.forEach(t),pd=c(Y),X=n(Y,"DIV",{class:!0});var xe=s(X);b(wo.$$.fragment,xe),fd=c(xe),Re=n(xe,"P",{});var _a=s(Re);md=i(_a,"The "),pn=n(_a,"CODE",{});var Sh=s(pn);ud=i(Sh,"FlaxViTPreTrainedModel"),Sh.forEach(t),gd=i(_a," forward method, overrides the "),fn=n(_a,"CODE",{});var Wh=s(fn);_d=i(Wh,"__call__"),Wh.forEach(t),vd=i(_a," special method."),_a.forEach(t),Td=c(xe),b(dt.$$.fragment,xe),wd=c(xe),mn=n(xe,"P",{});var Bh=s(mn);bd=i(Bh,"Examples:"),Bh.forEach(t),yd=c(xe),b(bo.$$.fragment,xe),xe.forEach(t),Y.forEach(t),es=c(o),He=n(o,"H2",{class:!0});var $s=s(He);ct=n($s,"A",{id:!0,class:!0,href:!0});var Uh=s(ct);un=n(Uh,"SPAN",{});var Rh=s(un);b(yo.$$.fragment,Rh),Rh.forEach(t),Uh.forEach(t),xd=c($s),gn=n($s,"SPAN",{});var Hh=s(gn);$d=i(Hh,"FlaxViTForImageClassification"),Hh.forEach(t),$s.forEach(t),ts=c(o),z=n(o,"DIV",{class:!0});var ee=s(z);b(xo.$$.fragment,ee),Fd=c(ee),_n=n(ee,"P",{});var Jh=s(_n);Ed=i(Jh,`ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.`),Jh.forEach(t),Vd=c(ee),$o=n(ee,"P",{});var Fs=s($o);kd=i(Fs,"This model inherits from "),aa=n(Fs,"A",{href:!0});var Kh=s(aa);Id=i(Kh,"FlaxPreTrainedModel"),Kh.forEach(t),jd=i(Fs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Fs.forEach(t),Pd=c(ee),Fo=n(ee,"P",{});var Es=s(Fo);Md=i(Es,"This model is also a Flax Linen "),Eo=n(Es,"A",{href:!0,rel:!0});var Gh=s(Eo);Cd=i(Gh,"flax.linen.Module"),Gh.forEach(t),Ad=i(Es,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Es.forEach(t),zd=c(ee),vn=n(ee,"P",{});var Xh=s(vn);Nd=i(Xh,"Finally, this model supports inherent JAX features such as:"),Xh.forEach(t),Ld=c(ee),fe=n(ee,"UL",{});var ut=s(fe);Tn=n(ut,"LI",{});var Zh=s(Tn);Vo=n(Zh,"A",{href:!0,rel:!0});var Qh=s(Vo);qd=i(Qh,"Just-In-Time (JIT) compilation"),Qh.forEach(t),Zh.forEach(t),Dd=c(ut),wn=n(ut,"LI",{});var Yh=s(wn);ko=n(Yh,"A",{href:!0,rel:!0});var ep=s(ko);Od=i(ep,"Automatic Differentiation"),ep.forEach(t),Yh.forEach(t),Sd=c(ut),bn=n(ut,"LI",{});var tp=s(bn);Io=n(tp,"A",{href:!0,rel:!0});var op=s(Io);Wd=i(op,"Vectorization"),op.forEach(t),tp.forEach(t),Bd=c(ut),yn=n(ut,"LI",{});var ap=s(yn);jo=n(ap,"A",{href:!0,rel:!0});var np=s(jo);Ud=i(np,"Parallelization"),np.forEach(t),ap.forEach(t),ut.forEach(t),Rd=c(ee),Z=n(ee,"DIV",{class:!0});var $e=s(Z);b(Po.$$.fragment,$e),Hd=c($e),Je=n($e,"P",{});var va=s(Je);Jd=i(va,"The "),xn=n(va,"CODE",{});var sp=s(xn);Kd=i(sp,"FlaxViTPreTrainedModel"),sp.forEach(t),Gd=i(va," forward method, overrides the "),$n=n(va,"CODE",{});var rp=s($n);Xd=i(rp,"__call__"),rp.forEach(t),Zd=i(va," special method."),va.forEach(t),Qd=c($e),b(ht.$$.fragment,$e),Yd=c($e),Fn=n($e,"P",{});var ip=s(Fn);ec=i(ip,"Example:"),ip.forEach(t),tc=c($e),b(Mo.$$.fragment,$e),$e.forEach(t),ee.forEach(t),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(xp)),l(u,"id","vision-transformer-vit"),l(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(u,"href","#vision-transformer-vit"),l(m,"class","relative group"),l(W,"id","overview"),l(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(W,"href","#overview"),l(P,"class","relative group"),l(ae,"href","https://arxiv.org/abs/2010.11929"),l(ae,"rel","nofollow"),l(_t,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer"),l(_t,"rel","nofollow"),l(Oo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor"),l(Tt,"href","https://huggingface.co/models?search=vit"),l(Tt,"rel","nofollow"),l(wt,"href","http://www.image-net.org/"),l(wt,"rel","nofollow"),l(bt,"href","http://www.image-net.org/challenges/LSVRC/2012/"),l(bt,"rel","nofollow"),l(yt,"href","https://arxiv.org/abs/1906.06423"),l(yt,"rel","nofollow"),l(xt,"href","https://arxiv.org/abs/1912.11370"),l(xt,"rel","nofollow"),l(Wo,"href","/docs/transformers/v4.15.0/en/deit"),l(Bo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTModel"),l(Uo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTForImageClassification"),l(Ro,"href","/docs/transformers/v4.15.0/en/model_doc/deit#transformers.DeiTFeatureExtractor"),l(Ho,"href","/docs/transformers/v4.15.0/en/beit"),l(Et,"href","https://huggingface.co/models?other=dino"),l(Et,"rel","nofollow"),l(Vt,"href","https://huggingface.co/nielsr"),l(Vt,"rel","nofollow"),l(kt,"href","https://github.com/google-research/vision_transformer"),l(kt,"rel","nofollow"),l(It,"href","https://github.com/rwightman/pytorch-image-models"),l(It,"rel","nofollow"),l(Ge,"id","transformers.ViTConfig"),l(Ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ge,"href","#transformers.ViTConfig"),l(Me,"class","relative group"),l(Jo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTModel"),l(Mt,"href","https://huggingface.co/google/vit-base-patch16-224"),l(Mt,"rel","nofollow"),l(Ko,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Go,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(B,"class","docstring"),l(Xe,"id","transformers.ViTFeatureExtractor"),l(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Xe,"href","#transformers.ViTFeatureExtractor"),l(ze,"class","relative group"),l(_e,"class","docstring"),l(ne,"class","docstring"),l(Qe,"id","transformers.ViTModel"),l(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Qe,"href","#transformers.ViTModel"),l(Ne,"class","relative group"),l(St,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(St,"rel","nofollow"),l(Xo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTModel"),l(H,"class","docstring"),l(he,"class","docstring"),l(et,"id","transformers.ViTForImageClassification"),l(et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(et,"href","#transformers.ViTForImageClassification"),l(qe,"class","relative group"),l(Jt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Jt,"rel","nofollow"),l(Zo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTForImageClassification"),l(J,"class","docstring"),l(se,"class","docstring"),l(ot,"id","transformers.TFViTModel"),l(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ot,"href","#transformers.TFViTModel"),l(Oe,"class","relative group"),l(Qo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),l(eo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(eo,"rel","nofollow"),l(Yo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.TFViTModel"),l(K,"class","docstring"),l(O,"class","docstring"),l(st,"id","transformers.TFViTForImageClassification"),l(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(st,"href","#transformers.TFViTForImageClassification"),l(We,"class","relative group"),l(ea,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),l(io,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(io,"rel","nofollow"),l(ta,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.TFViTForImageClassification"),l(G,"class","docstring"),l(S,"class","docstring"),l(lt,"id","transformers.FlaxViTModel"),l(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(lt,"href","#transformers.FlaxViTModel"),l(Ue,"class","relative group"),l(oa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(uo,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(uo,"rel","nofollow"),l(go,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(go,"rel","nofollow"),l(_o,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(_o,"rel","nofollow"),l(vo,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(vo,"rel","nofollow"),l(To,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(To,"rel","nofollow"),l(X,"class","docstring"),l(A,"class","docstring"),l(ct,"id","transformers.FlaxViTForImageClassification"),l(ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ct,"href","#transformers.FlaxViTForImageClassification"),l(He,"class","relative group"),l(aa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(Eo,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(Eo,"rel","nofollow"),l(Vo,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(Vo,"rel","nofollow"),l(ko,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(ko,"rel","nofollow"),l(Io,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Io,"rel","nofollow"),l(jo,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(jo,"rel","nofollow"),l(Z,"class","docstring"),l(z,"class","docstring")},m(o,p){e(document.head,h),f(o,v,p),f(o,m,p),e(m,u),e(u,T),y(_,T,null),e(m,g),e(m,V),e(V,de),f(o,R,p),y(k,o,p),f(o,re,p),f(o,P,p),e(P,W),e(W,te),y(C,te,null),e(P,ce),e(P,oe),e(oe,E),f(o,j,p),f(o,N,p),e(N,Ve),e(N,ae),e(ae,D),e(N,ke),f(o,me,p),f(o,Lo,p),e(Lo,Vs),f(o,Cn,p),f(o,qo,p),e(qo,Ta),e(Ta,ks),f(o,An,p),f(o,Do,p),e(Do,Is),f(o,zn,p),f(o,L,p),e(L,gt),e(gt,js),e(gt,_t),e(_t,Ps),e(gt,Ms),e(L,Cs),e(L,wa),e(wa,As),e(L,zs),e(L,vt),e(vt,Ns),e(vt,Oo),e(Oo,Ls),e(vt,qs),e(L,Ds),e(L,Ie),e(Ie,Os),e(Ie,ba),e(ba,Ss),e(Ie,Ws),e(Ie,Tt),e(Tt,Bs),e(Ie,Us),e(L,Rs),e(L,je),e(je,Hs),e(je,wt),e(wt,Js),e(je,Ks),e(je,bt),e(bt,Gs),e(je,Xs),e(L,Zs),e(L,Pe),e(Pe,Qs),e(Pe,yt),e(yt,Ys),e(Pe,er),e(Pe,xt),e(xt,tr),e(Pe,or),e(L,ar),e(L,ya),e(ya,nr),f(o,Nn,p),f(o,So,p),e(So,sr),f(o,Ln,p),f(o,ue,p),e(ue,xa),e(xa,M),e(M,rr),e(M,Wo),e(Wo,ir),e(M,lr),e(M,Bo),e(Bo,dr),e(M,cr),e(M,Uo),e(Uo,hr),e(M,pr),e(M,$a),e($a,fr),e(M,mr),e(M,Fa),e(Fa,ur),e(M,gr),e(M,Ea),e(Ea,_r),e(M,vr),e(M,Va),e(Va,Tr),e(M,wr),e(M,Ro),e(Ro,br),e(M,yr),e(ue,xr),e(ue,ka),e(ka,$t),e($t,$r),e($t,Ho),e(Ho,Fr),e($t,Er),e(ue,Vr),e(ue,Ia),e(Ia,Ft),e(Ft,kr),e(Ft,Et),e(Et,Ir),e(Ft,jr),f(o,qn,p),f(o,ge,p),e(ge,Pr),e(ge,Vt),e(Vt,Mr),e(ge,Cr),e(ge,kt),e(kt,Ar),e(ge,zr),f(o,Dn,p),f(o,Ke,p),e(Ke,Nr),e(Ke,It),e(It,Lr),e(Ke,qr),f(o,On,p),f(o,Me,p),e(Me,Ge),e(Ge,ja),y(jt,ja,null),e(Me,Dr),e(Me,Pa),e(Pa,Or),f(o,Sn,p),f(o,B,p),y(Pt,B,null),e(B,Sr),e(B,Ce),e(Ce,Wr),e(Ce,Jo),e(Jo,Br),e(Ce,Ur),e(Ce,Mt),e(Mt,Rr),e(Ce,Hr),e(B,Jr),e(B,Ae),e(Ae,Kr),e(Ae,Ko),e(Ko,Gr),e(Ae,Xr),e(Ae,Go),e(Go,Zr),e(Ae,Qr),e(B,Yr),e(B,Ma),e(Ma,ei),e(B,ti),y(Ct,B,null),f(o,Wn,p),f(o,ze,p),e(ze,Xe),e(Xe,Ca),y(At,Ca,null),e(ze,oi),e(ze,Aa),e(Aa,ai),f(o,Bn,p),f(o,ne,p),y(zt,ne,null),e(ne,ni),e(ne,za),e(za,si),e(ne,ri),e(ne,Nt),e(Nt,ii),e(Nt,Na),e(Na,li),e(Nt,di),e(ne,ci),e(ne,_e),y(Lt,_e,null),e(_e,hi),e(_e,La),e(La,pi),e(_e,fi),y(Ze,_e,null),f(o,Un,p),f(o,Ne,p),e(Ne,Qe),e(Qe,qa),y(qt,qa,null),e(Ne,mi),e(Ne,Da),e(Da,ui),f(o,Rn,p),f(o,he,p),y(Dt,he,null),e(he,gi),e(he,Ot),e(Ot,_i),e(Ot,St),e(St,vi),e(Ot,Ti),e(he,wi),e(he,H),y(Wt,H,null),e(H,bi),e(H,Le),e(Le,yi),e(Le,Xo),e(Xo,xi),e(Le,$i),e(Le,Oa),e(Oa,Fi),e(Le,Ei),e(H,Vi),y(Ye,H,null),e(H,ki),e(H,Sa),e(Sa,Ii),e(H,ji),y(Bt,H,null),f(o,Hn,p),f(o,qe,p),e(qe,et),e(et,Wa),y(Ut,Wa,null),e(qe,Pi),e(qe,Ba),e(Ba,Mi),f(o,Jn,p),f(o,se,p),y(Rt,se,null),e(se,Ci),e(se,Ua),e(Ua,Ai),e(se,zi),e(se,Ht),e(Ht,Ni),e(Ht,Jt),e(Jt,Li),e(Ht,qi),e(se,Di),e(se,J),y(Kt,J,null),e(J,Oi),e(J,De),e(De,Si),e(De,Zo),e(Zo,Wi),e(De,Bi),e(De,Ra),e(Ra,Ui),e(De,Ri),e(J,Hi),y(tt,J,null),e(J,Ji),e(J,Ha),e(Ha,Ki),e(J,Gi),y(Gt,J,null),f(o,Kn,p),f(o,Oe,p),e(Oe,ot),e(ot,Ja),y(Xt,Ja,null),e(Oe,Xi),e(Oe,Ka),e(Ka,Zi),f(o,Gn,p),f(o,O,p),y(Zt,O,null),e(O,Qi),e(O,Ga),e(Ga,Yi),e(O,el),e(O,Qt),e(Qt,tl),e(Qt,Qo),e(Qo,ol),e(Qt,al),e(O,nl),e(O,Yt),e(Yt,sl),e(Yt,eo),e(eo,rl),e(Yt,il),e(O,ll),y(at,O,null),e(O,dl),e(O,K),y(to,K,null),e(K,cl),e(K,Se),e(Se,hl),e(Se,Yo),e(Yo,pl),e(Se,fl),e(Se,Xa),e(Xa,ml),e(Se,ul),e(K,gl),y(nt,K,null),e(K,_l),e(K,Za),e(Za,vl),e(K,Tl),y(oo,K,null),f(o,Xn,p),f(o,We,p),e(We,st),e(st,Qa),y(ao,Qa,null),e(We,wl),e(We,Ya),e(Ya,bl),f(o,Zn,p),f(o,S,p),y(no,S,null),e(S,yl),e(S,en),e(en,xl),e(S,$l),e(S,so),e(so,Fl),e(so,ea),e(ea,El),e(so,Vl),e(S,kl),e(S,ro),e(ro,Il),e(ro,io),e(io,jl),e(ro,Pl),e(S,Ml),y(rt,S,null),e(S,Cl),e(S,G),y(lo,G,null),e(G,Al),e(G,Be),e(Be,zl),e(Be,ta),e(ta,Nl),e(Be,Ll),e(Be,tn),e(tn,ql),e(Be,Dl),e(G,Ol),y(it,G,null),e(G,Sl),e(G,on),e(on,Wl),e(G,Bl),y(co,G,null),f(o,Qn,p),f(o,Ue,p),e(Ue,lt),e(lt,an),y(ho,an,null),e(Ue,Ul),e(Ue,nn),e(nn,Rl),f(o,Yn,p),f(o,A,p),y(po,A,null),e(A,Hl),e(A,sn),e(sn,Jl),e(A,Kl),e(A,fo),e(fo,Gl),e(fo,oa),e(oa,Xl),e(fo,Zl),e(A,Ql),e(A,mo),e(mo,Yl),e(mo,uo),e(uo,ed),e(mo,td),e(A,od),e(A,rn),e(rn,ad),e(A,nd),e(A,pe),e(pe,ln),e(ln,go),e(go,sd),e(pe,rd),e(pe,dn),e(dn,_o),e(_o,id),e(pe,ld),e(pe,cn),e(cn,vo),e(vo,dd),e(pe,cd),e(pe,hn),e(hn,To),e(To,hd),e(A,pd),e(A,X),y(wo,X,null),e(X,fd),e(X,Re),e(Re,md),e(Re,pn),e(pn,ud),e(Re,gd),e(Re,fn),e(fn,_d),e(Re,vd),e(X,Td),y(dt,X,null),e(X,wd),e(X,mn),e(mn,bd),e(X,yd),y(bo,X,null),f(o,es,p),f(o,He,p),e(He,ct),e(ct,un),y(yo,un,null),e(He,xd),e(He,gn),e(gn,$d),f(o,ts,p),f(o,z,p),y(xo,z,null),e(z,Fd),e(z,_n),e(_n,Ed),e(z,Vd),e(z,$o),e($o,kd),e($o,aa),e(aa,Id),e($o,jd),e(z,Pd),e(z,Fo),e(Fo,Md),e(Fo,Eo),e(Eo,Cd),e(Fo,Ad),e(z,zd),e(z,vn),e(vn,Nd),e(z,Ld),e(z,fe),e(fe,Tn),e(Tn,Vo),e(Vo,qd),e(fe,Dd),e(fe,wn),e(wn,ko),e(ko,Od),e(fe,Sd),e(fe,bn),e(bn,Io),e(Io,Wd),e(fe,Bd),e(fe,yn),e(yn,jo),e(jo,Ud),e(z,Rd),e(z,Z),y(Po,Z,null),e(Z,Hd),e(Z,Je),e(Je,Jd),e(Je,xn),e(xn,Kd),e(Je,Gd),e(Je,$n),e($n,Xd),e(Je,Zd),e(Z,Qd),y(ht,Z,null),e(Z,Yd),e(Z,Fn),e(Fn,ec),e(Z,tc),y(Mo,Z,null),os=!0},p(o,[p]){const Co={};p&2&&(Co.$$scope={dirty:p,ctx:o}),k.$set(Co);const En={};p&2&&(En.$$scope={dirty:p,ctx:o}),Ze.$set(En);const Vn={};p&2&&(Vn.$$scope={dirty:p,ctx:o}),Ye.$set(Vn);const kn={};p&2&&(kn.$$scope={dirty:p,ctx:o}),tt.$set(kn);const Ao={};p&2&&(Ao.$$scope={dirty:p,ctx:o}),at.$set(Ao);const In={};p&2&&(In.$$scope={dirty:p,ctx:o}),nt.$set(In);const jn={};p&2&&(jn.$$scope={dirty:p,ctx:o}),rt.$set(jn);const Pn={};p&2&&(Pn.$$scope={dirty:p,ctx:o}),it.$set(Pn);const zo={};p&2&&(zo.$$scope={dirty:p,ctx:o}),dt.$set(zo);const Mn={};p&2&&(Mn.$$scope={dirty:p,ctx:o}),ht.$set(Mn)},i(o){os||(x(_.$$.fragment,o),x(k.$$.fragment,o),x(C.$$.fragment,o),x(jt.$$.fragment,o),x(Pt.$$.fragment,o),x(Ct.$$.fragment,o),x(At.$$.fragment,o),x(zt.$$.fragment,o),x(Lt.$$.fragment,o),x(Ze.$$.fragment,o),x(qt.$$.fragment,o),x(Dt.$$.fragment,o),x(Wt.$$.fragment,o),x(Ye.$$.fragment,o),x(Bt.$$.fragment,o),x(Ut.$$.fragment,o),x(Rt.$$.fragment,o),x(Kt.$$.fragment,o),x(tt.$$.fragment,o),x(Gt.$$.fragment,o),x(Xt.$$.fragment,o),x(Zt.$$.fragment,o),x(at.$$.fragment,o),x(to.$$.fragment,o),x(nt.$$.fragment,o),x(oo.$$.fragment,o),x(ao.$$.fragment,o),x(no.$$.fragment,o),x(rt.$$.fragment,o),x(lo.$$.fragment,o),x(it.$$.fragment,o),x(co.$$.fragment,o),x(ho.$$.fragment,o),x(po.$$.fragment,o),x(wo.$$.fragment,o),x(dt.$$.fragment,o),x(bo.$$.fragment,o),x(yo.$$.fragment,o),x(xo.$$.fragment,o),x(Po.$$.fragment,o),x(ht.$$.fragment,o),x(Mo.$$.fragment,o),os=!0)},o(o){$(_.$$.fragment,o),$(k.$$.fragment,o),$(C.$$.fragment,o),$(jt.$$.fragment,o),$(Pt.$$.fragment,o),$(Ct.$$.fragment,o),$(At.$$.fragment,o),$(zt.$$.fragment,o),$(Lt.$$.fragment,o),$(Ze.$$.fragment,o),$(qt.$$.fragment,o),$(Dt.$$.fragment,o),$(Wt.$$.fragment,o),$(Ye.$$.fragment,o),$(Bt.$$.fragment,o),$(Ut.$$.fragment,o),$(Rt.$$.fragment,o),$(Kt.$$.fragment,o),$(tt.$$.fragment,o),$(Gt.$$.fragment,o),$(Xt.$$.fragment,o),$(Zt.$$.fragment,o),$(at.$$.fragment,o),$(to.$$.fragment,o),$(nt.$$.fragment,o),$(oo.$$.fragment,o),$(ao.$$.fragment,o),$(no.$$.fragment,o),$(rt.$$.fragment,o),$(lo.$$.fragment,o),$(it.$$.fragment,o),$(co.$$.fragment,o),$(ho.$$.fragment,o),$(po.$$.fragment,o),$(wo.$$.fragment,o),$(dt.$$.fragment,o),$(bo.$$.fragment,o),$(yo.$$.fragment,o),$(xo.$$.fragment,o),$(Po.$$.fragment,o),$(ht.$$.fragment,o),$(Mo.$$.fragment,o),os=!1},d(o){t(h),o&&t(v),o&&t(m),F(_),o&&t(R),F(k,o),o&&t(re),o&&t(P),F(C),o&&t(j),o&&t(N),o&&t(me),o&&t(Lo),o&&t(Cn),o&&t(qo),o&&t(An),o&&t(Do),o&&t(zn),o&&t(L),o&&t(Nn),o&&t(So),o&&t(Ln),o&&t(ue),o&&t(qn),o&&t(ge),o&&t(Dn),o&&t(Ke),o&&t(On),o&&t(Me),F(jt),o&&t(Sn),o&&t(B),F(Pt),F(Ct),o&&t(Wn),o&&t(ze),F(At),o&&t(Bn),o&&t(ne),F(zt),F(Lt),F(Ze),o&&t(Un),o&&t(Ne),F(qt),o&&t(Rn),o&&t(he),F(Dt),F(Wt),F(Ye),F(Bt),o&&t(Hn),o&&t(qe),F(Ut),o&&t(Jn),o&&t(se),F(Rt),F(Kt),F(tt),F(Gt),o&&t(Kn),o&&t(Oe),F(Xt),o&&t(Gn),o&&t(O),F(Zt),F(at),F(to),F(nt),F(oo),o&&t(Xn),o&&t(We),F(ao),o&&t(Zn),o&&t(S),F(no),F(rt),F(lo),F(it),F(co),o&&t(Qn),o&&t(Ue),F(ho),o&&t(Yn),o&&t(A),F(po),F(wo),F(dt),F(bo),o&&t(es),o&&t(He),F(yo),o&&t(ts),o&&t(z),F(xo),F(Po),F(ht),F(Mo)}}}const xp={local:"vision-transformer-vit",sections:[{local:"overview",title:"Overview"},{local:"transformers.ViTConfig",title:"ViTConfig"},{local:"transformers.ViTFeatureExtractor",title:"ViTFeatureExtractor"},{local:"transformers.ViTModel",title:"ViTModel"},{local:"transformers.ViTForImageClassification",title:"ViTForImageClassification"},{local:"transformers.TFViTModel",title:"TFViTModel"},{local:"transformers.TFViTForImageClassification",title:"TFViTForImageClassification"},{local:"transformers.FlaxViTModel",title:"FlaxVitModel"},{local:"transformers.FlaxViTForImageClassification",title:"FlaxViTForImageClassification"}],title:"Vision Transformer (ViT)"};function $p(I,h,v){let{fw:m}=h;return I.$$set=u=>{"fw"in u&&v(0,m=u.fw)},[m]}class Pp extends lp{constructor(h){super();dp(this,h,$p,yp,cp,{fw:0})}}export{Pp as default,xp as metadata};
9,985
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/funnel.mdx-379be58f.js
import{S as Ty,i as Fy,s as ky,e as s,k as l,w as k,t,L as wy,c as r,d as n,m as d,a,x as w,h as o,b as c,J as e,g as h,y as b,q as y,o as $,B as E}from"../../chunks/vendor-b1433968.js";import{T as ze}from"../../chunks/Tip-c3840994.js";import{D as ee}from"../../chunks/Docstring-ff504c58.js";import{C as Ne}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Pe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function by(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function yy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function $y(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Ey(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function My(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function zy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Py(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function qy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Cy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function xy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function jy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Ly(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Ay(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Dy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Iy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Oy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Sy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Ny(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function By(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Wy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Qy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Ry(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Hy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge;return{c(){p=s("p"),M=t("TF 2.0 models accepts two formats as inputs:"),m=l(),g=s("ul"),T=s("li"),v=t("having all inputs as keyword arguments (like PyTorch models), or"),_=l(),z=s("li"),ce=t("having all inputs as a list, tuple or dict in the first positional arguments."),G=l(),P=s("p"),X=t("This second option is useful when using "),I=s("code"),ne=t("tf.keras.Model.fit"),ue=t(` method which currently requires having all the tensors in the first argument of the model call function: `),O=s("code"),pe=t("model(inputs)"),ie=t("."),U=l(),L=s("p"),te=t(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),q=s("ul"),x=s("li"),oe=t("a single Tensor with "),Q=s("code"),le=t("input_ids"),se=t(" only and nothing else: "),S=s("code"),he=t("model(inputs_ids)"),de=l(),C=s("li"),fe=t(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s("code"),J=t("model([input_ids, attention_mask])"),ae=t(" or "),R=s("code"),me=t("model([input_ids, attention_mask, token_type_ids])"),N=l(),A=s("li"),re=t(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=s("code"),ge=t('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=r(u,"P",{});var F=a(p);M=o(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(n),m=d(u),g=r(u,"UL",{});var K=a(g);T=r(K,"LI",{});var ve=a(T);v=o(ve,"having all inputs as keyword arguments (like PyTorch models), or"),ve.forEach(n),_=d(K),z=r(K,"LI",{});var we=a(z);ce=o(we,"having all inputs as a list, tuple or dict in the first positional arguments."),we.forEach(n),K.forEach(n),G=d(u),P=r(u,"P",{});var D=a(P);X=o(D,"This second option is useful when using "),I=r(D,"CODE",{});var Te=a(I);ne=o(Te,"tf.keras.Model.fit"),Te.forEach(n),ue=o(D,` method which currently requires having all the tensors in the first argument of the model call function: `),O=r(D,"CODE",{});var be=a(O);pe=o(be,"model(inputs)"),be.forEach(n),ie=o(D,"."),D.forEach(n),U=d(u),L=r(u,"P",{});var ye=a(L);te=o(ye,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ye.forEach(n),Z=d(u),q=r(u,"UL",{});var j=a(q);x=r(j,"LI",{});var V=a(x);oe=o(V,"a single Tensor with "),Q=r(V,"CODE",{});var $e=a(Q);le=o($e,"input_ids"),$e.forEach(n),se=o(V," only and nothing else: "),S=r(V,"CODE",{});var Fe=a(S);he=o(Fe,"model(inputs_ids)"),Fe.forEach(n),V.forEach(n),de=d(j),C=r(j,"LI",{});var Y=a(C);fe=o(Y,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r(Y,"CODE",{});var Ee=a(B);J=o(Ee,"model([input_ids, attention_mask])"),Ee.forEach(n),ae=o(Y," or "),R=r(Y,"CODE",{});var ke=a(R);me=o(ke,"model([input_ids, attention_mask, token_type_ids])"),ke.forEach(n),Y.forEach(n),N=d(j),A=r(j,"LI",{});var _e=a(A);re=o(_e,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),H=r(_e,"CODE",{});var Me=a(H);ge=o(Me,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Me.forEach(n),_e.forEach(n),j.forEach(n)},m(u,F){h(u,p,F),e(p,M),h(u,m,F),h(u,g,F),e(g,T),e(T,v),e(g,_),e(g,z),e(z,ce),h(u,G,F),h(u,P,F),e(P,X),e(P,I),e(I,ne),e(P,ue),e(P,O),e(O,pe),e(P,ie),h(u,U,F),h(u,L,F),e(L,te),h(u,Z,F),h(u,q,F),e(q,x),e(x,oe),e(x,Q),e(Q,le),e(x,se),e(x,S),e(S,he),e(q,de),e(q,C),e(C,fe),e(C,B),e(B,J),e(C,ae),e(C,R),e(R,me),e(q,N),e(q,A),e(A,re),e(A,H),e(H,ge)},d(u){u&&n(p),u&&n(m),u&&n(g),u&&n(G),u&&n(P),u&&n(U),u&&n(L),u&&n(Z),u&&n(q)}}}function Vy(W){let p,M,m,g,T;return{c(){p=s("p"),M=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),g=t("Module"),T=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){p=r(v,"P",{});var _=a(p);M=o(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(_,"CODE",{});var z=a(m);g=o(z,"Module"),z.forEach(n),T=o(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(n)},m(v,_){h(v,p,_),e(p,M),e(p,m),e(m,g),e(p,T)},d(v){v&&n(p)}}}function Yy(W){let p,M,m,g,T,v,_,z,ce,G,P,X,I,ne,ue,O,pe,ie,U,L,te,Z,q,x,oe,Q,le,se,S,he,de,C,fe,B,J,ae,R,me,N,A,re,H,ge,u,F,K,ve,we,D,Te,be,ye,j,V,$e,Fe,Y,Ee,ke,_e,Me,Va,Dp,Ip,yc,jn,Op,Io,Sp,Np,Oo,Bp,Wp,$c,Kn,Nt,ll,So,Qp,dl,Rp,Ec,Cn,No,Hp,xn,Vp,Ya,Yp,Up,Ua,Gp,Zp,Bo,Kp,Xp,Jp,Xn,eh,Ga,nh,th,Za,oh,sh,Mc,Jn,Bt,cl,Wo,rh,ul,ah,zc,qe,Qo,ih,pl,lh,dh,Wt,Ka,ch,uh,Xa,ph,hh,fh,Ro,mh,Ja,gh,_h,vh,Ln,Ho,Th,hl,Fh,kh,Vo,ei,wh,fl,bh,yh,ni,$h,ml,Eh,Mh,Qt,Yo,zh,Uo,Ph,gl,qh,Ch,xh,wn,Go,jh,_l,Lh,Ah,Zo,Dh,et,Ih,vl,Oh,Sh,Tl,Nh,Bh,Wh,Fl,Pc,nt,Rt,kl,Ko,Qh,wl,Rh,qc,Ze,Xo,Hh,Jo,Vh,bl,Yh,Uh,Gh,Ht,ti,Zh,Kh,oi,Xh,Jh,ef,es,nf,si,tf,of,sf,bn,ns,rf,yl,af,lf,ts,df,tt,cf,$l,uf,pf,El,hf,ff,Cc,ot,Vt,Ml,os,mf,zl,gf,xc,st,ss,_f,rs,vf,ri,Tf,Ff,jc,rt,as,kf,is,wf,ai,bf,yf,Lc,at,Yt,Pl,ls,$f,ql,Ef,Ac,We,ds,Mf,Cl,zf,Pf,cs,qf,us,Cf,xf,jf,ps,Lf,ii,Af,Df,If,hs,Of,fs,Sf,Nf,Bf,Ke,ms,Wf,it,Qf,li,Rf,Hf,xl,Vf,Yf,Uf,Ut,Gf,jl,Zf,Kf,gs,Dc,lt,Gt,Ll,_s,Xf,Al,Jf,Ic,Qe,vs,em,Dl,nm,tm,Ts,om,Fs,sm,rm,am,ks,im,di,lm,dm,cm,ws,um,bs,pm,hm,fm,Xe,ys,mm,dt,gm,ci,_m,vm,Il,Tm,Fm,km,Zt,wm,Ol,bm,ym,$s,Oc,ct,Kt,Sl,Es,$m,Nl,Em,Sc,Ms,Je,zs,Mm,ut,zm,ui,Pm,qm,Bl,Cm,xm,jm,Xt,Lm,Wl,Am,Dm,Ps,Nc,pt,Jt,Ql,qs,Im,Rl,Om,Bc,Re,Cs,Sm,xs,Nm,Hl,Bm,Wm,Qm,js,Rm,Ls,Hm,Vm,Ym,As,Um,pi,Gm,Zm,Km,Ds,Xm,Is,Jm,eg,ng,en,Os,tg,ht,og,hi,sg,rg,Vl,ag,ig,lg,eo,dg,Yl,cg,ug,Ss,Wc,ft,no,Ul,Ns,pg,Gl,hg,Qc,He,Bs,fg,Zl,mg,gg,Ws,_g,Qs,vg,Tg,Fg,Rs,kg,fi,wg,bg,yg,Hs,$g,Vs,Eg,Mg,zg,Be,Ys,Pg,mt,qg,mi,Cg,xg,Kl,jg,Lg,Ag,to,Dg,Xl,Ig,Og,Us,Sg,Jl,Ng,Bg,Gs,Rc,gt,oo,ed,Zs,Wg,nd,Qg,Hc,Ve,Ks,Rg,td,Hg,Vg,Xs,Yg,Js,Ug,Gg,Zg,er,Kg,gi,Xg,Jg,e_,nr,n_,tr,t_,o_,s_,nn,or,r_,_t,a_,_i,i_,l_,od,d_,c_,u_,so,p_,sd,h_,f_,sr,Vc,vt,ro,rd,rr,m_,ad,g_,Yc,Ye,ar,__,id,v_,T_,ir,F_,lr,k_,w_,b_,dr,y_,vi,$_,E_,M_,cr,z_,ur,P_,q_,C_,tn,pr,x_,Tt,j_,Ti,L_,A_,ld,D_,I_,O_,ao,S_,dd,N_,B_,hr,Uc,Ft,io,cd,fr,W_,ud,Q_,Gc,Ue,mr,R_,kt,H_,pd,V_,Y_,hd,U_,G_,Z_,gr,K_,_r,X_,J_,ev,vr,nv,Fi,tv,ov,sv,Tr,rv,Fr,av,iv,lv,on,kr,dv,wt,cv,ki,uv,pv,fd,hv,fv,mv,lo,gv,md,_v,vv,wr,Zc,bt,co,gd,br,Tv,_d,Fv,Kc,xe,yr,kv,vd,wv,bv,$r,yv,Er,$v,Ev,Mv,Mr,zv,wi,Pv,qv,Cv,zr,xv,Pr,jv,Lv,Av,uo,Dv,sn,qr,Iv,yt,Ov,bi,Sv,Nv,Td,Bv,Wv,Qv,po,Rv,Fd,Hv,Vv,Cr,Xc,$t,ho,kd,xr,Yv,wd,Uv,Jc,je,jr,Gv,bd,Zv,Kv,Lr,Xv,Ar,Jv,eT,nT,Dr,tT,yi,oT,sT,rT,Ir,aT,Or,iT,lT,dT,fo,cT,rn,Sr,uT,Et,pT,$i,hT,fT,yd,mT,gT,_T,mo,vT,$d,TT,FT,Nr,eu,Mt,go,Ed,Br,kT,Md,wT,nu,Le,Wr,bT,zd,yT,$T,Qr,ET,Rr,MT,zT,PT,Hr,qT,Ei,CT,xT,jT,Vr,LT,Yr,AT,DT,IT,_o,OT,an,Ur,ST,zt,NT,Mi,BT,WT,Pd,QT,RT,HT,vo,VT,qd,YT,UT,Gr,tu,Pt,To,Cd,Zr,GT,xd,ZT,ou,Ae,Kr,KT,Xr,XT,jd,JT,eF,nF,Jr,tF,ea,oF,sF,rF,na,aF,zi,iF,lF,dF,ta,cF,oa,uF,pF,hF,Fo,fF,ln,sa,mF,qt,gF,Pi,_F,vF,Ld,TF,FF,kF,ko,wF,Ad,bF,yF,ra,su,Ct,wo,Dd,aa,$F,Id,EF,ru,De,ia,MF,Od,zF,PF,la,qF,da,CF,xF,jF,ca,LF,qi,AF,DF,IF,ua,OF,pa,SF,NF,BF,bo,WF,dn,ha,QF,xt,RF,Ci,HF,VF,Sd,YF,UF,GF,yo,ZF,Nd,KF,XF,fa,au,jt,$o,Bd,ma,JF,Wd,ek,iu,Ie,ga,nk,Qd,tk,ok,_a,sk,va,rk,ak,ik,Ta,lk,xi,dk,ck,uk,Fa,pk,ka,hk,fk,mk,Eo,gk,cn,wa,_k,Lt,vk,ji,Tk,Fk,Rd,kk,wk,bk,Mo,yk,Hd,$k,Ek,ba,lu,At,zo,Vd,ya,Mk,Yd,zk,du,Oe,$a,Pk,Ud,qk,Ck,Ea,xk,Ma,jk,Lk,Ak,za,Dk,Li,Ik,Ok,Sk,Pa,Nk,qa,Bk,Wk,Qk,Po,Rk,un,Ca,Hk,Dt,Vk,Ai,Yk,Uk,Gd,Gk,Zk,Kk,qo,Xk,Zd,Jk,e1,xa,cu,It,Co,Kd,ja,n1,Xd,t1,uu,Se,La,o1,Ot,s1,Jd,r1,a1,ec,i1,l1,d1,Aa,c1,Da,u1,p1,h1,Ia,f1,Di,m1,g1,_1,Oa,v1,Sa,T1,F1,k1,xo,w1,pn,Na,b1,St,y1,Ii,$1,E1,nc,M1,z1,P1,jo,q1,tc,C1,x1,Ba,pu;return v=new Pe({}),ne=new Pe({}),So=new Pe({}),No=new ee({props:{name:"class transformers.FunnelConfig",anchor:"transformers.FunnelConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"block_sizes",val:" = [4, 4, 4]"},{name:"block_repeats",val:" = None"},{name:"num_decoder_layers",val:" = 2"},{name:"d_model",val:" = 768"},{name:"n_head",val:" = 12"},{name:"d_head",val:" = 64"},{name:"d_inner",val:" = 3072"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.0"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 3"},{name:"initializer_range",val:" = 0.1"},{name:"initializer_std",val:" = None"},{name:"layer_norm_eps",val:" = 1e-09"},{name:"pooling_type",val:" = 'mean'"},{name:"attention_type",val:" = 'relative_shift'"},{name:"separate_cls",val:" = True"},{name:"truncate_seq",val:" = True"},{name:"pool_q_only",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/configuration_funnel.py#L37",parametersDescription:[{anchor:"transformers.FunnelConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the Funnel transformer. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel">FunnelModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelModel">TFFunnelModel</a>.`,name:"vocab_size"},{anchor:"transformers.FunnelConfig.block_sizes",description:`<strong>block_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[4, 4, 4]</code>) &#x2014; The sizes of the blocks used in the model.`,name:"block_sizes"},{anchor:"transformers.FunnelConfig.block_repeats",description:`<strong>block_repeats</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; If passed along, each layer of each block is repeated the number of times indicated.`,name:"block_repeats"},{anchor:"transformers.FunnelConfig.num_decoder_layers",description:`<strong>num_decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The number of layers in the decoder (when not using the base model).`,name:"num_decoder_layers"},{anchor:"transformers.FunnelConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the model&#x2019;s hidden states.`,name:"d_model"},{anchor:"transformers.FunnelConfig.n_head",description:`<strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"n_head"},{anchor:"transformers.FunnelConfig.d_head",description:`<strong>d_head</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Dimensionality of the model&#x2019;s heads.`,name:"d_head"},{anchor:"transformers.FunnelConfig.d_inner",description:`<strong>d_inner</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Inner dimension in the feed-forward blocks.`,name:"d_inner"},{anchor:"transformers.FunnelConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>callable</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.FunnelConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.FunnelConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.FunnelConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability used between the two layers of the feed-forward blocks.`,name:"activation_dropout"},{anchor:"transformers.FunnelConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.FunnelConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel">FunnelModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelModel">TFFunnelModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.FunnelConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The standard deviation of the <em>uniform initializer</em> for initializing all weight matrices in attention layers.`,name:"initializer_range"},{anchor:"transformers.FunnelConfig.initializer_std",description:`<strong>initializer_std</strong> (<code>float</code>, <em>optional</em>) &#x2014; The standard deviation of the <em>normal initializer</em> for initializing the embedding matrix and the weight of linear layers. Will default to 1 for the embedding matrix and the value given by Xavier initialization for linear layers.`,name:"initializer_std"},{anchor:"transformers.FunnelConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-9) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.FunnelConfig.pooling_type",description:`<strong>pooling_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;mean&quot;</code>) &#x2014; Possible values are <code>&quot;mean&quot;</code> or <code>&quot;max&quot;</code>. The way pooling is performed at the beginning of each block.`,name:"pooling_type"},{anchor:"transformers.FunnelConfig.attention_type",description:`<strong>attention_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;relative_shift&quot;</code>) &#x2014; Possible values are <code>&quot;relative_shift&quot;</code> or <code>&quot;factorized&quot;</code>. The former is faster on CPU/GPU while the latter is faster on TPU.`,name:"attention_type"},{anchor:"transformers.FunnelConfig.separate_cls",description:`<strong>separate_cls</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to separate the cls token when applying pooling.`,name:"separate_cls"},{anchor:"transformers.FunnelConfig.truncate_seq",description:`<strong>truncate_seq</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When using <code>separate_cls</code>, whether or not to truncate the last token when pooling, to avoid getting a sequence length that is not a multiple of 2.`,name:"truncate_seq"},{anchor:"transformers.FunnelConfig.pool_q_only",description:`<strong>pool_q_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to apply the pooling only to the query or to query, key and values for the attention layers.`,name:"pool_q_only"}]}}),Wo=new Pe({}),Qo=new ee({props:{name:"class transformers.FunnelTokenizer",anchor:"transformers.FunnelTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '<sep>'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '<cls>'"},{name:"mask_token",val:" = '<mask>'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/tokenization_funnel.py#L58"}}),Ho=new ee({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L247",parametersDescription:[{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Yo=new ee({props:{name:"get_special_tokens_mask",anchor:"transformers.BertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert/tokenization_bert.py#L272",parametersDescription:[{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Go=new ee({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FunnelTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/tokenization_funnel.py#L109",parametersDescription:[{anchor:"transformers.FunnelTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FunnelTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Zo=new Ne({props:{code:`2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`2<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Ko=new Pe({}),Xo=new ee({props:{name:"class transformers.FunnelTokenizerFast",anchor:"transformers.FunnelTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '<sep>'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '<cls>'"},{name:"mask_token",val:" = '<mask>'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"clean_text",val:" = True"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"wordpieces_prefix",val:" = '##'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/tokenization_funnel_fast.py#L71"}}),ns=new ee({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FunnelTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/tokenization_funnel_fast.py#L125",parametersDescription:[{anchor:"transformers.FunnelTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FunnelTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),ts=new Ne({props:{code:`2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |,`,highlighted:`2<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),os=new Pe({}),ss=new ee({props:{name:"class transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput",anchor:"transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L801",parametersDescription:[{anchor:"transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss of the ELECTRA-style objective.`,name:"loss"},{anchor:"transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Prediction scores of the head (scores for each token before SoftMax).`,name:"logits"},{anchor:"transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),as=new ee({props:{name:"class transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput",anchor:"transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput",parameters:[{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1005",parametersDescription:[{anchor:"transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Prediction scores of the head (scores for each token before SoftMax).`,name:"logits"},{anchor:"transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),ls=new Pe({}),ds=new ee({props:{name:"class transformers.FunnelBaseModel",anchor:"transformers.FunnelBaseModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L894",parametersDescription:[{anchor:"transformers.FunnelBaseModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ms=new ee({props:{name:"forward",anchor:"transformers.FunnelBaseModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L910",parametersDescription:[{anchor:"transformers.FunnelBaseModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelBaseModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelBaseModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelBaseModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelBaseModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelBaseModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelBaseModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ut=new ze({props:{$$slots:{default:[by]},$$scope:{ctx:W}}}),gs=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelBaseModel import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = FunnelBaseModel.from_pretrained('funnel-transformer/small-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelBaseModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelBaseModel.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),_s=new Pe({}),vs=new ee({props:{name:"class transformers.FunnelModel",anchor:"transformers.FunnelModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L971",parametersDescription:[{anchor:"transformers.FunnelModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ys=new ee({props:{name:"forward",anchor:"transformers.FunnelModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L988",parametersDescription:[{anchor:"transformers.FunnelModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Zt=new ze({props:{$$slots:{default:[yy]},$$scope:{ctx:W}}}),$s=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelModel import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = FunnelModel.from_pretrained('funnel-transformer/small') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelModel.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Es=new Pe({}),zs=new ee({props:{name:"forward",anchor:"transformers.FunnelForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1088",parametersDescription:[{anchor:"transformers.FunnelForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FunnelForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates the token is an original token,</li> <li>1 indicates the token was replaced.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput" >transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss of the ELECTRA-style objective.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Prediction scores of the head (scores for each token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput" >transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xt=new ze({props:{$$slots:{default:[$y]},$$scope:{ctx:W}}}),Ps=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForPreTraining import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = FunnelForPreTraining.from_pretrained('funnel-transformer/small') inputs = tokenizer("Hello, my dog is cute", return_tensors= "pt") logits = model(**inputs).logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors= <span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits`}}),qs=new Pe({}),Cs=new ee({props:{name:"class transformers.FunnelForMaskedLM",anchor:"transformers.FunnelForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1162",parametersDescription:[{anchor:"transformers.FunnelForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Os=new ee({props:{name:"forward",anchor:"transformers.FunnelForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1178",parametersDescription:[{anchor:"transformers.FunnelForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FunnelForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),eo=new ze({props:{$$slots:{default:[Ey]},$$scope:{ctx:W}}}),Ss=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForMaskedLM import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = FunnelForMaskedLM.from_pretrained('funnel-transformer/small') inputs = tokenizer("The capital of France is <mask>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ns=new Pe({}),Bs=new ee({props:{name:"class transformers.FunnelForSequenceClassification",anchor:"transformers.FunnelForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1241",parametersDescription:[{anchor:"transformers.FunnelForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ys=new ee({props:{name:"forward",anchor:"transformers.FunnelForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1252",parametersDescription:[{anchor:"transformers.FunnelForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FunnelForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),to=new ze({props:{$$slots:{default:[My]},$$scope:{ctx:W}}}),Us=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForSequenceClassification import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = FunnelForSequenceClassification.from_pretrained('funnel-transformer/small-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Gs=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForSequenceClassification import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = FunnelForSequenceClassification.from_pretrained('funnel-transformer/small-base', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Zs=new Pe({}),Ks=new ee({props:{name:"class transformers.FunnelForMultipleChoice",anchor:"transformers.FunnelForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1333",parametersDescription:[{anchor:"transformers.FunnelForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),or=new ee({props:{name:"forward",anchor:"transformers.FunnelForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1342",parametersDescription:[{anchor:"transformers.FunnelForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FunnelForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),so=new ze({props:{$$slots:{default:[zy]},$$scope:{ctx:W}}}),sr=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForMultipleChoice import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = FunnelForMultipleChoice.from_pretrained('funnel-transformer/small-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True) outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k,v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),rr=new Pe({}),ar=new ee({props:{name:"class transformers.FunnelForTokenClassification",anchor:"transformers.FunnelForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1416",parametersDescription:[{anchor:"transformers.FunnelForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),pr=new ee({props:{name:"forward",anchor:"transformers.FunnelForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1428",parametersDescription:[{anchor:"transformers.FunnelForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FunnelForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ao=new ze({props:{$$slots:{default:[Py]},$$scope:{ctx:W}}}),hr=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForTokenClassification import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = FunnelForTokenClassification.from_pretrained('funnel-transformer/small') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),fr=new Pe({}),mr=new ee({props:{name:"class transformers.FunnelForQuestionAnswering",anchor:"transformers.FunnelForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1499",parametersDescription:[{anchor:"transformers.FunnelForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig">FunnelConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kr=new ee({props:{name:"forward",anchor:"transformers.FunnelForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_funnel.py#L1510",parametersDescription:[{anchor:"transformers.FunnelForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FunnelForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FunnelForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FunnelForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FunnelForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FunnelForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FunnelForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FunnelForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.FunnelForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),lo=new ze({props:{$$slots:{default:[qy]},$$scope:{ctx:W}}}),wr=new Ne({props:{code:`from transformers import FunnelTokenizer, FunnelForQuestionAnswering import torch tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = FunnelForQuestionAnswering.from_pretrained('funnel-transformer/small') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, FunnelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FunnelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),br=new Pe({}),yr=new ee({props:{name:"class transformers.TFFunnelBaseModel",anchor:"transformers.TFFunnelBaseModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1122",parametersDescription:[{anchor:"transformers.TFFunnelBaseModel.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),uo=new ze({props:{$$slots:{default:[Cy]},$$scope:{ctx:W}}}),qr=new ee({props:{name:"call",anchor:"transformers.TFFunnelBaseModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1127",parametersDescription:[{anchor:"transformers.TFFunnelBaseModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelBaseModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelBaseModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelBaseModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelBaseModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelBaseModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelBaseModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelBaseModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),po=new ze({props:{$$slots:{default:[xy]},$$scope:{ctx:W}}}),Cr=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelBaseModel import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = TFFunnelBaseModel.from_pretrained('funnel-transformer/small-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelBaseModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelBaseModel.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),xr=new Pe({}),jr=new ee({props:{name:"class transformers.TFFunnelModel",anchor:"transformers.TFFunnelModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1183",parametersDescription:[{anchor:"transformers.TFFunnelModel.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fo=new ze({props:{$$slots:{default:[jy]},$$scope:{ctx:W}}}),Sr=new ee({props:{name:"call",anchor:"transformers.TFFunnelModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1188",parametersDescription:[{anchor:"transformers.TFFunnelModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),mo=new ze({props:{$$slots:{default:[Ly]},$$scope:{ctx:W}}}),Nr=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelModel import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = TFFunnelModel.from_pretrained('funnel-transformer/small') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelModel.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Br=new Pe({}),Wr=new ee({props:{name:"class transformers.TFFunnelForPreTraining",anchor:"transformers.TFFunnelForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1246",parametersDescription:[{anchor:"transformers.TFFunnelForPreTraining.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),_o=new ze({props:{$$slots:{default:[Ay]},$$scope:{ctx:W}}}),Ur=new ee({props:{name:"call",anchor:"transformers.TFFunnelForPreTraining.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1253",parametersDescription:[{anchor:"transformers.TFFunnelForPreTraining.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelForPreTraining.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelForPreTraining.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelForPreTraining.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput" >transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Prediction scores of the head (scores for each token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput" >transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),vo=new ze({props:{$$slots:{default:[Dy]},$$scope:{ctx:W}}}),Gr=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelForPreTraining import torch tokenizer = TFFunnelTokenizer.from_pretrained('funnel-transformer/small') model = TFFunnelForPreTraining.from_pretrained('funnel-transformer/small') inputs = tokenizer("Hello, my dog is cute", return_tensors= "tf") logits = model(inputs).logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = TFFunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelForPreTraining.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors= <span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(inputs).logits`}}),Zr=new Pe({}),Kr=new ee({props:{name:"class transformers.TFFunnelForMaskedLM",anchor:"transformers.TFFunnelForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1325",parametersDescription:[{anchor:"transformers.TFFunnelForMaskedLM.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fo=new ze({props:{$$slots:{default:[Iy]},$$scope:{ctx:W}}}),sa=new ee({props:{name:"call",anchor:"transformers.TFFunnelForMaskedLM.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1339",parametersDescription:[{anchor:"transformers.TFFunnelForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFFunnelForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ko=new ze({props:{$$slots:{default:[Oy]},$$scope:{ctx:W}}}),ra=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelForMaskedLM import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = TFFunnelForMaskedLM.from_pretrained('funnel-transformer/small') inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelForMaskedLM.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),aa=new Pe({}),ia=new ee({props:{name:"class transformers.TFFunnelForSequenceClassification",anchor:"transformers.TFFunnelForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1419",parametersDescription:[{anchor:"transformers.TFFunnelForSequenceClassification.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bo=new ze({props:{$$slots:{default:[Sy]},$$scope:{ctx:W}}}),ha=new ee({props:{name:"call",anchor:"transformers.TFFunnelForSequenceClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1427",parametersDescription:[{anchor:"transformers.TFFunnelForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFFunnelForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),yo=new ze({props:{$$slots:{default:[Ny]},$$scope:{ctx:W}}}),fa=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelForSequenceClassification import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = TFFunnelForSequenceClassification.from_pretrained('funnel-transformer/small-base') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ma=new Pe({}),ga=new ee({props:{name:"class transformers.TFFunnelForMultipleChoice",anchor:"transformers.TFFunnelForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1508",parametersDescription:[{anchor:"transformers.TFFunnelForMultipleChoice.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Eo=new ze({props:{$$slots:{default:[By]},$$scope:{ctx:W}}}),wa=new ee({props:{name:"call",anchor:"transformers.TFFunnelForMultipleChoice.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1525",parametersDescription:[{anchor:"transformers.TFFunnelForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFFunnelForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Mo=new ze({props:{$$slots:{default:[Wy]},$$scope:{ctx:W}}}),ba=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelForMultipleChoice import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small-base') model = TFFunnelForMultipleChoice.from_pretrained('funnel-transformer/small-base') prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='tf', padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelForMultipleChoice.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ya=new Pe({}),$a=new ee({props:{name:"class transformers.TFFunnelForTokenClassification",anchor:"transformers.TFFunnelForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1643",parametersDescription:[{anchor:"transformers.TFFunnelForTokenClassification.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Po=new ze({props:{$$slots:{default:[Qy]},$$scope:{ctx:W}}}),Ca=new ee({props:{name:"call",anchor:"transformers.TFFunnelForTokenClassification.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1654",parametersDescription:[{anchor:"transformers.TFFunnelForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFFunnelForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),qo=new ze({props:{$$slots:{default:[Ry]},$$scope:{ctx:W}}}),xa=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelForTokenClassification import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = TFFunnelForTokenClassification.from_pretrained('funnel-transformer/small') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelForTokenClassification.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids))) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ja=new Pe({}),La=new ee({props:{name:"class transformers.TFFunnelForQuestionAnswering",anchor:"transformers.TFFunnelForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1735",parametersDescription:[{anchor:"transformers.TFFunnelForQuestionAnswering.config",description:`<strong>config</strong> (<code>XxxConfig</code>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),xo=new ze({props:{$$slots:{default:[Hy]},$$scope:{ctx:W}}}),Na=new ee({props:{name:"call",anchor:"transformers.TFFunnelForQuestionAnswering.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/funnel/modeling_tf_funnel.py#L1745",parametersDescription:[{anchor:"transformers.TFFunnelForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer">FunnelTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFFunnelForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelConfig" >FunnelConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),jo=new ze({props:{$$slots:{default:[Vy]},$$scope:{ctx:W}}}),Ba=new Ne({props:{code:`from transformers import FunnelTokenizer, TFFunnelForQuestionAnswering import tensorflow as tf tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') model = TFFunnelForQuestionAnswering.from_pretrained('funnel-transformer/small') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors='tf') outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FunnelTokenizer, TFFunnelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FunnelTokenizer.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFFunnelForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;funnel-transformer/small&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;tf&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&#x27; &#x27;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]+<span class="hljs-number">1</span>])`}}),{c(){p=s("meta"),M=l(),m=s("h1"),g=s("a"),T=s("span"),k(v.$$.fragment),_=l(),z=s("span"),ce=t("Funnel Transformer"),G=l(),P=s("h2"),X=s("a"),I=s("span"),k(ne.$$.fragment),ue=l(),O=s("span"),pe=t("Overview"),ie=l(),U=s("p"),L=t("The Funnel Transformer model was proposed in the paper "),te=s("a"),Z=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),q=t(`. It is a bidirectional transformer model, like BERT, but with a pooling operation after each block of layers, a bit like in traditional convolutional neural networks (CNN) in computer vision.`),x=l(),oe=s("p"),Q=t("The abstract from the paper is the following:"),le=l(),se=s("p"),S=s("em"),he=t(`With the success of language pretraining, it is highly desirable to develop more efficient architectures of good scalability that can exploit the abundant unlabeled data at a lower cost. To improve the efficiency, we examine the much-overlooked redundancy in maintaining a full-length token-level presentation, especially for tasks that only require a single-vector presentation of the sequence. With this intuition, we propose Funnel-Transformer which gradually compresses the sequence of hidden states to a shorter one and hence reduces the computation cost. More importantly, by re-investing the saved FLOPs from length reduction in constructing a deeper or wider model, we further improve the model capacity. In addition, to perform token-level predictions as required by common pretraining objectives, Funnel-Transformer is able to recover a deep representation for each token from the reduced hidden sequence via a decoder. Empirically, with comparable or fewer FLOPs, Funnel-Transformer outperforms the standard Transformer on a wide variety of sequence-level prediction tasks, including text classification, language understanding, and reading comprehension.`),de=l(),C=s("p"),fe=t("Tips:"),B=l(),J=s("ul"),ae=s("li"),R=t(`Since Funnel Transformer uses pooling, the sequence length of the hidden states changes after each block of layers. The base model therefore has a final sequence length that is a quarter of the original one. This model can be used directly for tasks that just require a sentence summary (like sequence classification or multiple choice). For other tasks, the full model is used; this full model has a decoder that upsamples the final hidden states to the same sequence length as the input.`),me=l(),N=s("li"),A=t(`The Funnel Transformer checkpoints are all available with a full version and a base version. The first ones should be used for `),re=s("a"),H=t("FunnelModel"),ge=t(", "),u=s("a"),F=t("FunnelForPreTraining"),K=t(`, `),ve=s("a"),we=t("FunnelForMaskedLM"),D=t(", "),Te=s("a"),be=t("FunnelForTokenClassification"),ye=t(` and class:`),j=s("em"),V=t("~transformers.FunnelForQuestionAnswering"),$e=t(`. The second ones should be used for `),Fe=s("a"),Y=t("FunnelBaseModel"),Ee=t(", "),ke=s("a"),_e=t("FunnelForSequenceClassification"),Me=t(` and `),Va=s("a"),Dp=t("FunnelForMultipleChoice"),Ip=t("."),yc=l(),jn=s("p"),Op=t("This model was contributed by "),Io=s("a"),Sp=t("sgugger"),Np=t(". The original code can be found "),Oo=s("a"),Bp=t("here"),Wp=t("."),$c=l(),Kn=s("h2"),Nt=s("a"),ll=s("span"),k(So.$$.fragment),Qp=l(),dl=s("span"),Rp=t("FunnelConfig"),Ec=l(),Cn=s("div"),k(No.$$.fragment),Hp=l(),xn=s("p"),Vp=t("This is the configuration class to store the configuration of a "),Ya=s("a"),Yp=t("FunnelModel"),Up=t(` or a `),Ua=s("a"),Gp=t("TFBertModel"),Zp=t(`. It is used to instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel Transformer `),Bo=s("a"),Kp=t("funnel-transformer/small"),Xp=t(" architecture."),Jp=l(),Xn=s("p"),eh=t("Configuration objects inherit from "),Ga=s("a"),nh=t("PretrainedConfig"),th=t(` and can be used to control the model outputs. Read the documentation from `),Za=s("a"),oh=t("PretrainedConfig"),sh=t(" for more information."),Mc=l(),Jn=s("h2"),Bt=s("a"),cl=s("span"),k(Wo.$$.fragment),rh=l(),ul=s("span"),ah=t("FunnelTokenizer"),zc=l(),qe=s("div"),k(Qo.$$.fragment),ih=l(),pl=s("p"),lh=t("Construct a Funnel Transformer tokenizer."),dh=l(),Wt=s("p"),Ka=s("a"),ch=t("FunnelTokenizer"),uh=t(" is identical to "),Xa=s("a"),ph=t("BertTokenizer"),hh=t(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),fh=l(),Ro=s("p"),mh=t("Refer to superclass "),Ja=s("a"),gh=t("BertTokenizer"),_h=t(` for usage examples and documentation concerning parameters.`),vh=l(),Ln=s("div"),k(Ho.$$.fragment),Th=l(),hl=s("p"),Fh=t(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),kh=l(),Vo=s("ul"),ei=s("li"),wh=t("single sequence: "),fl=s("code"),bh=t("[CLS] X [SEP]"),yh=l(),ni=s("li"),$h=t("pair of sequences: "),ml=s("code"),Eh=t("[CLS] A [SEP] B [SEP]"),Mh=l(),Qt=s("div"),k(Yo.$$.fragment),zh=l(),Uo=s("p"),Ph=t(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),gl=s("code"),qh=t("prepare_for_model"),Ch=t(" method."),xh=l(),wn=s("div"),k(Go.$$.fragment),jh=l(),_l=s("p"),Lh=t(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel Transformer sequence pair mask has the following format:`),Ah=l(),k(Zo.$$.fragment),Dh=l(),et=s("p"),Ih=t("If "),vl=s("code"),Oh=t("token_ids_1"),Sh=t(" is "),Tl=s("code"),Nh=t("None"),Bh=t(", this method only returns the first portion of the mask (0s)."),Wh=l(),Fl=s("div"),Pc=l(),nt=s("h2"),Rt=s("a"),kl=s("span"),k(Ko.$$.fragment),Qh=l(),wl=s("span"),Rh=t("FunnelTokenizerFast"),qc=l(),Ze=s("div"),k(Xo.$$.fragment),Hh=l(),Jo=s("p"),Vh=t("Construct a \u201Cfast\u201D Funnel Transformer tokenizer (backed by HuggingFace\u2019s "),bl=s("em"),Yh=t("tokenizers"),Uh=t(" library)."),Gh=l(),Ht=s("p"),ti=s("a"),Zh=t("FunnelTokenizerFast"),Kh=t(" is identical to "),oi=s("a"),Xh=t("BertTokenizerFast"),Jh=t(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),ef=l(),es=s("p"),nf=t("Refer to superclass "),si=s("a"),tf=t("BertTokenizerFast"),of=t(` for usage examples and documentation concerning parameters.`),sf=l(),bn=s("div"),k(ns.$$.fragment),rf=l(),yl=s("p"),af=t(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel Transformer sequence pair mask has the following format:`),lf=l(),k(ts.$$.fragment),df=l(),tt=s("p"),cf=t("If "),$l=s("code"),uf=t("token_ids_1"),pf=t(" is "),El=s("code"),hf=t("None"),ff=t(", this method only returns the first portion of the mask (0s)."),Cc=l(),ot=s("h2"),Vt=s("a"),Ml=s("span"),k(os.$$.fragment),mf=l(),zl=s("span"),gf=t("Funnel specific outputs"),xc=l(),st=s("div"),k(ss.$$.fragment),_f=l(),rs=s("p"),vf=t("Output type of "),ri=s("a"),Tf=t("FunnelForPreTraining"),Ff=t("."),jc=l(),rt=s("div"),k(as.$$.fragment),kf=l(),is=s("p"),wf=t("Output type of "),ai=s("a"),bf=t("FunnelForPreTraining"),yf=t("."),Lc=l(),at=s("h2"),Yt=s("a"),Pl=s("span"),k(ls.$$.fragment),$f=l(),ql=s("span"),Ef=t("FunnelBaseModel"),Ac=l(),We=s("div"),k(ds.$$.fragment),Mf=l(),Cl=s("p"),zf=t(`The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top.`),Pf=l(),cs=s("p"),qf=t("The Funnel Transformer model was proposed in "),us=s("a"),Cf=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),xf=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),jf=l(),ps=s("p"),Lf=t("This model inherits from "),ii=s("a"),Af=t("PreTrainedModel"),Df=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),If=l(),hs=s("p"),Of=t("This model is also a PyTorch "),fs=s("a"),Sf=t("torch.nn.Module"),Nf=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bf=l(),Ke=s("div"),k(ms.$$.fragment),Wf=l(),it=s("p"),Qf=t("The "),li=s("a"),Rf=t("FunnelBaseModel"),Hf=t(" forward method, overrides the "),xl=s("code"),Vf=t("__call__"),Yf=t(" special method."),Uf=l(),k(Ut.$$.fragment),Gf=l(),jl=s("p"),Zf=t("Example:"),Kf=l(),k(gs.$$.fragment),Dc=l(),lt=s("h2"),Gt=s("a"),Ll=s("span"),k(_s.$$.fragment),Xf=l(),Al=s("span"),Jf=t("FunnelModel"),Ic=l(),Qe=s("div"),k(vs.$$.fragment),em=l(),Dl=s("p"),nm=t("The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top."),tm=l(),Ts=s("p"),om=t("The Funnel Transformer model was proposed in "),Fs=s("a"),sm=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),rm=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),am=l(),ks=s("p"),im=t("This model inherits from "),di=s("a"),lm=t("PreTrainedModel"),dm=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cm=l(),ws=s("p"),um=t("This model is also a PyTorch "),bs=s("a"),pm=t("torch.nn.Module"),hm=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fm=l(),Xe=s("div"),k(ys.$$.fragment),mm=l(),dt=s("p"),gm=t("The "),ci=s("a"),_m=t("FunnelModel"),vm=t(" forward method, overrides the "),Il=s("code"),Tm=t("__call__"),Fm=t(" special method."),km=l(),k(Zt.$$.fragment),wm=l(),Ol=s("p"),bm=t("Example:"),ym=l(),k($s.$$.fragment),Oc=l(),ct=s("h2"),Kt=s("a"),Sl=s("span"),k(Es.$$.fragment),$m=l(),Nl=s("span"),Em=t("FunnelModelForPreTraining"),Sc=l(),Ms=s("div"),Je=s("div"),k(zs.$$.fragment),Mm=l(),ut=s("p"),zm=t("The "),ui=s("a"),Pm=t("FunnelForPreTraining"),qm=t(" forward method, overrides the "),Bl=s("code"),Cm=t("__call__"),xm=t(" special method."),jm=l(),k(Xt.$$.fragment),Lm=l(),Wl=s("p"),Am=t("Examples:"),Dm=l(),k(Ps.$$.fragment),Nc=l(),pt=s("h2"),Jt=s("a"),Ql=s("span"),k(qs.$$.fragment),Im=l(),Rl=s("span"),Om=t("FunnelForMaskedLM"),Bc=l(),Re=s("div"),k(Cs.$$.fragment),Sm=l(),xs=s("p"),Nm=t("Funnel Transformer Model with a "),Hl=s("code"),Bm=t("language modeling"),Wm=t(" head on top."),Qm=l(),js=s("p"),Rm=t("The Funnel Transformer model was proposed in "),Ls=s("a"),Hm=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Vm=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Ym=l(),As=s("p"),Um=t("This model inherits from "),pi=s("a"),Gm=t("PreTrainedModel"),Zm=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Km=l(),Ds=s("p"),Xm=t("This model is also a PyTorch "),Is=s("a"),Jm=t("torch.nn.Module"),eg=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ng=l(),en=s("div"),k(Os.$$.fragment),tg=l(),ht=s("p"),og=t("The "),hi=s("a"),sg=t("FunnelForMaskedLM"),rg=t(" forward method, overrides the "),Vl=s("code"),ag=t("__call__"),ig=t(" special method."),lg=l(),k(eo.$$.fragment),dg=l(),Yl=s("p"),cg=t("Example:"),ug=l(),k(Ss.$$.fragment),Wc=l(),ft=s("h2"),no=s("a"),Ul=s("span"),k(Ns.$$.fragment),pg=l(),Gl=s("span"),hg=t("FunnelForSequenceClassification"),Qc=l(),He=s("div"),k(Bs.$$.fragment),fg=l(),Zl=s("p"),mg=t(`Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the first timestep of the last hidden state) e.g. for GLUE tasks.`),gg=l(),Ws=s("p"),_g=t("The Funnel Transformer model was proposed in "),Qs=s("a"),vg=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Tg=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Fg=l(),Rs=s("p"),kg=t("This model inherits from "),fi=s("a"),wg=t("PreTrainedModel"),bg=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yg=l(),Hs=s("p"),$g=t("This model is also a PyTorch "),Vs=s("a"),Eg=t("torch.nn.Module"),Mg=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zg=l(),Be=s("div"),k(Ys.$$.fragment),Pg=l(),mt=s("p"),qg=t("The "),mi=s("a"),Cg=t("FunnelForSequenceClassification"),xg=t(" forward method, overrides the "),Kl=s("code"),jg=t("__call__"),Lg=t(" special method."),Ag=l(),k(to.$$.fragment),Dg=l(),Xl=s("p"),Ig=t("Example of single-label classification:"),Og=l(),k(Us.$$.fragment),Sg=l(),Jl=s("p"),Ng=t("Example of multi-label classification:"),Bg=l(),k(Gs.$$.fragment),Rc=l(),gt=s("h2"),oo=s("a"),ed=s("span"),k(Zs.$$.fragment),Wg=l(),nd=s("span"),Qg=t("FunnelForMultipleChoice"),Hc=l(),Ve=s("div"),k(Ks.$$.fragment),Rg=l(),td=s("p"),Hg=t(`Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.`),Vg=l(),Xs=s("p"),Yg=t("The Funnel Transformer model was proposed in "),Js=s("a"),Ug=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Gg=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Zg=l(),er=s("p"),Kg=t("This model inherits from "),gi=s("a"),Xg=t("PreTrainedModel"),Jg=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),e_=l(),nr=s("p"),n_=t("This model is also a PyTorch "),tr=s("a"),t_=t("torch.nn.Module"),o_=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),s_=l(),nn=s("div"),k(or.$$.fragment),r_=l(),_t=s("p"),a_=t("The "),_i=s("a"),i_=t("FunnelForMultipleChoice"),l_=t(" forward method, overrides the "),od=s("code"),d_=t("__call__"),c_=t(" special method."),u_=l(),k(so.$$.fragment),p_=l(),sd=s("p"),h_=t("Example:"),f_=l(),k(sr.$$.fragment),Vc=l(),vt=s("h2"),ro=s("a"),rd=s("span"),k(rr.$$.fragment),m_=l(),ad=s("span"),g_=t("FunnelForTokenClassification"),Yc=l(),Ye=s("div"),k(ar.$$.fragment),__=l(),id=s("p"),v_=t(`Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),T_=l(),ir=s("p"),F_=t("The Funnel Transformer model was proposed in "),lr=s("a"),k_=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),w_=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),b_=l(),dr=s("p"),y_=t("This model inherits from "),vi=s("a"),$_=t("PreTrainedModel"),E_=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),M_=l(),cr=s("p"),z_=t("This model is also a PyTorch "),ur=s("a"),P_=t("torch.nn.Module"),q_=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),C_=l(),tn=s("div"),k(pr.$$.fragment),x_=l(),Tt=s("p"),j_=t("The "),Ti=s("a"),L_=t("FunnelForTokenClassification"),A_=t(" forward method, overrides the "),ld=s("code"),D_=t("__call__"),I_=t(" special method."),O_=l(),k(ao.$$.fragment),S_=l(),dd=s("p"),N_=t("Example:"),B_=l(),k(hr.$$.fragment),Uc=l(),Ft=s("h2"),io=s("a"),cd=s("span"),k(fr.$$.fragment),W_=l(),ud=s("span"),Q_=t("FunnelForQuestionAnswering"),Gc=l(),Ue=s("div"),k(mr.$$.fragment),R_=l(),kt=s("p"),H_=t(`Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),pd=s("code"),V_=t("span start logits"),Y_=t(" and "),hd=s("code"),U_=t("span end logits"),G_=t(")."),Z_=l(),gr=s("p"),K_=t("The Funnel Transformer model was proposed in "),_r=s("a"),X_=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),J_=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),ev=l(),vr=s("p"),nv=t("This model inherits from "),Fi=s("a"),tv=t("PreTrainedModel"),ov=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sv=l(),Tr=s("p"),rv=t("This model is also a PyTorch "),Fr=s("a"),av=t("torch.nn.Module"),iv=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),lv=l(),on=s("div"),k(kr.$$.fragment),dv=l(),wt=s("p"),cv=t("The "),ki=s("a"),uv=t("FunnelForQuestionAnswering"),pv=t(" forward method, overrides the "),fd=s("code"),hv=t("__call__"),fv=t(" special method."),mv=l(),k(lo.$$.fragment),gv=l(),md=s("p"),_v=t("Example:"),vv=l(),k(wr.$$.fragment),Zc=l(),bt=s("h2"),co=s("a"),gd=s("span"),k(br.$$.fragment),Tv=l(),_d=s("span"),Fv=t("TFFunnelBaseModel"),Kc=l(),xe=s("div"),k(yr.$$.fragment),kv=l(),vd=s("p"),wv=t(`The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top.`),bv=l(),$r=s("p"),yv=t("The Funnel Transformer model was proposed in "),Er=s("a"),$v=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Ev=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Mv=l(),Mr=s("p"),zv=t("This model inherits from "),wi=s("a"),Pv=t("TFPreTrainedModel"),qv=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cv=l(),zr=s("p"),xv=t("This model is also a "),Pr=s("a"),jv=t("tf.keras.Model"),Lv=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Av=l(),k(uo.$$.fragment),Dv=l(),sn=s("div"),k(qr.$$.fragment),Iv=l(),yt=s("p"),Ov=t("The "),bi=s("a"),Sv=t("TFFunnelBaseModel"),Nv=t(" forward method, overrides the "),Td=s("code"),Bv=t("__call__"),Wv=t(" special method."),Qv=l(),k(po.$$.fragment),Rv=l(),Fd=s("p"),Hv=t("Example:"),Vv=l(),k(Cr.$$.fragment),Xc=l(),$t=s("h2"),ho=s("a"),kd=s("span"),k(xr.$$.fragment),Yv=l(),wd=s("span"),Uv=t("TFFunnelModel"),Jc=l(),je=s("div"),k(jr.$$.fragment),Gv=l(),bd=s("p"),Zv=t("The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top."),Kv=l(),Lr=s("p"),Xv=t("The Funnel Transformer model was proposed in "),Ar=s("a"),Jv=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),eT=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),nT=l(),Dr=s("p"),tT=t("This model inherits from "),yi=s("a"),oT=t("TFPreTrainedModel"),sT=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rT=l(),Ir=s("p"),aT=t("This model is also a "),Or=s("a"),iT=t("tf.keras.Model"),lT=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),dT=l(),k(fo.$$.fragment),cT=l(),rn=s("div"),k(Sr.$$.fragment),uT=l(),Et=s("p"),pT=t("The "),$i=s("a"),hT=t("TFFunnelModel"),fT=t(" forward method, overrides the "),yd=s("code"),mT=t("__call__"),gT=t(" special method."),_T=l(),k(mo.$$.fragment),vT=l(),$d=s("p"),TT=t("Example:"),FT=l(),k(Nr.$$.fragment),eu=l(),Mt=s("h2"),go=s("a"),Ed=s("span"),k(Br.$$.fragment),kT=l(),Md=s("span"),wT=t("TFFunnelModelForPreTraining"),nu=l(),Le=s("div"),k(Wr.$$.fragment),bT=l(),zd=s("p"),yT=t("Funnel model with a binary classification head on top as used during pretraining for identifying generated tokens."),$T=l(),Qr=s("p"),ET=t("The Funnel Transformer model was proposed in "),Rr=s("a"),MT=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),zT=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),PT=l(),Hr=s("p"),qT=t("This model inherits from "),Ei=s("a"),CT=t("TFPreTrainedModel"),xT=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jT=l(),Vr=s("p"),LT=t("This model is also a "),Yr=s("a"),AT=t("tf.keras.Model"),DT=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),IT=l(),k(_o.$$.fragment),OT=l(),an=s("div"),k(Ur.$$.fragment),ST=l(),zt=s("p"),NT=t("The "),Mi=s("a"),BT=t("TFFunnelForPreTraining"),WT=t(" forward method, overrides the "),Pd=s("code"),QT=t("__call__"),RT=t(" special method."),HT=l(),k(vo.$$.fragment),VT=l(),qd=s("p"),YT=t("Examples:"),UT=l(),k(Gr.$$.fragment),tu=l(),Pt=s("h2"),To=s("a"),Cd=s("span"),k(Zr.$$.fragment),GT=l(),xd=s("span"),ZT=t("TFFunnelForMaskedLM"),ou=l(),Ae=s("div"),k(Kr.$$.fragment),KT=l(),Xr=s("p"),XT=t("Funnel Model with a "),jd=s("code"),JT=t("language modeling"),eF=t(" head on top."),nF=l(),Jr=s("p"),tF=t("The Funnel Transformer model was proposed in "),ea=s("a"),oF=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),sF=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),rF=l(),na=s("p"),aF=t("This model inherits from "),zi=s("a"),iF=t("TFPreTrainedModel"),lF=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dF=l(),ta=s("p"),cF=t("This model is also a "),oa=s("a"),uF=t("tf.keras.Model"),pF=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),hF=l(),k(Fo.$$.fragment),fF=l(),ln=s("div"),k(sa.$$.fragment),mF=l(),qt=s("p"),gF=t("The "),Pi=s("a"),_F=t("TFFunnelForMaskedLM"),vF=t(" forward method, overrides the "),Ld=s("code"),TF=t("__call__"),FF=t(" special method."),kF=l(),k(ko.$$.fragment),wF=l(),Ad=s("p"),bF=t("Example:"),yF=l(),k(ra.$$.fragment),su=l(),Ct=s("h2"),wo=s("a"),Dd=s("span"),k(aa.$$.fragment),$F=l(),Id=s("span"),EF=t("TFFunnelForSequenceClassification"),ru=l(),De=s("div"),k(ia.$$.fragment),MF=l(),Od=s("p"),zF=t(`Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),PF=l(),la=s("p"),qF=t("The Funnel Transformer model was proposed in "),da=s("a"),CF=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),xF=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),jF=l(),ca=s("p"),LF=t("This model inherits from "),qi=s("a"),AF=t("TFPreTrainedModel"),DF=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),IF=l(),ua=s("p"),OF=t("This model is also a "),pa=s("a"),SF=t("tf.keras.Model"),NF=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),BF=l(),k(bo.$$.fragment),WF=l(),dn=s("div"),k(ha.$$.fragment),QF=l(),xt=s("p"),RF=t("The "),Ci=s("a"),HF=t("TFFunnelForSequenceClassification"),VF=t(" forward method, overrides the "),Sd=s("code"),YF=t("__call__"),UF=t(" special method."),GF=l(),k(yo.$$.fragment),ZF=l(),Nd=s("p"),KF=t("Example:"),XF=l(),k(fa.$$.fragment),au=l(),jt=s("h2"),$o=s("a"),Bd=s("span"),k(ma.$$.fragment),JF=l(),Wd=s("span"),ek=t("TFFunnelForMultipleChoice"),iu=l(),Ie=s("div"),k(ga.$$.fragment),nk=l(),Qd=s("p"),tk=t(`Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ok=l(),_a=s("p"),sk=t("The Funnel Transformer model was proposed in "),va=s("a"),rk=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),ak=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),ik=l(),Ta=s("p"),lk=t("This model inherits from "),xi=s("a"),dk=t("TFPreTrainedModel"),ck=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),uk=l(),Fa=s("p"),pk=t("This model is also a "),ka=s("a"),hk=t("tf.keras.Model"),fk=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),mk=l(),k(Eo.$$.fragment),gk=l(),cn=s("div"),k(wa.$$.fragment),_k=l(),Lt=s("p"),vk=t("The "),ji=s("a"),Tk=t("TFFunnelForMultipleChoice"),Fk=t(" forward method, overrides the "),Rd=s("code"),kk=t("__call__"),wk=t(" special method."),bk=l(),k(Mo.$$.fragment),yk=l(),Hd=s("p"),$k=t("Example:"),Ek=l(),k(ba.$$.fragment),lu=l(),At=s("h2"),zo=s("a"),Vd=s("span"),k(ya.$$.fragment),Mk=l(),Yd=s("span"),zk=t("TFFunnelForTokenClassification"),du=l(),Oe=s("div"),k($a.$$.fragment),Pk=l(),Ud=s("p"),qk=t(`Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ck=l(),Ea=s("p"),xk=t("The Funnel Transformer model was proposed in "),Ma=s("a"),jk=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Lk=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Ak=l(),za=s("p"),Dk=t("This model inherits from "),Li=s("a"),Ik=t("TFPreTrainedModel"),Ok=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sk=l(),Pa=s("p"),Nk=t("This model is also a "),qa=s("a"),Bk=t("tf.keras.Model"),Wk=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Qk=l(),k(Po.$$.fragment),Rk=l(),un=s("div"),k(Ca.$$.fragment),Hk=l(),Dt=s("p"),Vk=t("The "),Ai=s("a"),Yk=t("TFFunnelForTokenClassification"),Uk=t(" forward method, overrides the "),Gd=s("code"),Gk=t("__call__"),Zk=t(" special method."),Kk=l(),k(qo.$$.fragment),Xk=l(),Zd=s("p"),Jk=t("Example:"),e1=l(),k(xa.$$.fragment),cu=l(),It=s("h2"),Co=s("a"),Kd=s("span"),k(ja.$$.fragment),n1=l(),Xd=s("span"),t1=t("TFFunnelForQuestionAnswering"),uu=l(),Se=s("div"),k(La.$$.fragment),o1=l(),Ot=s("p"),s1=t(`Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Jd=s("code"),r1=t("span start logits"),a1=t(" and "),ec=s("code"),i1=t("span end logits"),l1=t(")."),d1=l(),Aa=s("p"),c1=t("The Funnel Transformer model was proposed in "),Da=s("a"),u1=t(`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),p1=t(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),h1=l(),Ia=s("p"),f1=t("This model inherits from "),Di=s("a"),m1=t("TFPreTrainedModel"),g1=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_1=l(),Oa=s("p"),v1=t("This model is also a "),Sa=s("a"),T1=t("tf.keras.Model"),F1=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),k1=l(),k(xo.$$.fragment),w1=l(),pn=s("div"),k(Na.$$.fragment),b1=l(),St=s("p"),y1=t("The "),Ii=s("a"),$1=t("TFFunnelForQuestionAnswering"),E1=t(" forward method, overrides the "),nc=s("code"),M1=t("__call__"),z1=t(" special method."),P1=l(),k(jo.$$.fragment),q1=l(),tc=s("p"),C1=t("Example:"),x1=l(),k(Ba.$$.fragment),this.h()},l(i){const f=wy('[data-svelte="svelte-1phssyn"]',document.head);p=r(f,"META",{name:!0,content:!0}),f.forEach(n),M=d(i),m=r(i,"H1",{class:!0});var Wa=a(m);g=r(Wa,"A",{id:!0,class:!0,href:!0});var oc=a(g);T=r(oc,"SPAN",{});var sc=a(T);w(v.$$.fragment,sc),sc.forEach(n),oc.forEach(n),_=d(Wa),z=r(Wa,"SPAN",{});var rc=a(z);ce=o(rc,"Funnel Transformer"),rc.forEach(n),Wa.forEach(n),G=d(i),P=r(i,"H2",{class:!0});var Qa=a(P);X=r(Qa,"A",{id:!0,class:!0,href:!0});var ac=a(X);I=r(ac,"SPAN",{});var ic=a(I);w(ne.$$.fragment,ic),ic.forEach(n),ac.forEach(n),ue=d(Qa),O=r(Qa,"SPAN",{});var lc=a(O);pe=o(lc,"Overview"),lc.forEach(n),Qa.forEach(n),ie=d(i),U=r(i,"P",{});var Ra=a(U);L=o(Ra,"The Funnel Transformer model was proposed in the paper "),te=r(Ra,"A",{href:!0,rel:!0});var dc=a(te);Z=o(dc,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),dc.forEach(n),q=o(Ra,`. It is a bidirectional transformer model, like BERT, but with a pooling operation after each block of layers, a bit like in traditional convolutional neural networks (CNN) in computer vision.`),Ra.forEach(n),x=d(i),oe=r(i,"P",{});var cc=a(oe);Q=o(cc,"The abstract from the paper is the following:"),cc.forEach(n),le=d(i),se=r(i,"P",{});var uc=a(se);S=r(uc,"EM",{});var pc=a(S);he=o(pc,`With the success of language pretraining, it is highly desirable to develop more efficient architectures of good scalability that can exploit the abundant unlabeled data at a lower cost. To improve the efficiency, we examine the much-overlooked redundancy in maintaining a full-length token-level presentation, especially for tasks that only require a single-vector presentation of the sequence. With this intuition, we propose Funnel-Transformer which gradually compresses the sequence of hidden states to a shorter one and hence reduces the computation cost. More importantly, by re-investing the saved FLOPs from length reduction in constructing a deeper or wider model, we further improve the model capacity. In addition, to perform token-level predictions as required by common pretraining objectives, Funnel-Transformer is able to recover a deep representation for each token from the reduced hidden sequence via a decoder. Empirically, with comparable or fewer FLOPs, Funnel-Transformer outperforms the standard Transformer on a wide variety of sequence-level prediction tasks, including text classification, language understanding, and reading comprehension.`),pc.forEach(n),uc.forEach(n),de=d(i),C=r(i,"P",{});var hc=a(C);fe=o(hc,"Tips:"),hc.forEach(n),B=d(i),J=r(i,"UL",{});var Ha=a(J);ae=r(Ha,"LI",{});var fc=a(ae);R=o(fc,`Since Funnel Transformer uses pooling, the sequence length of the hidden states changes after each block of layers. The base model therefore has a final sequence length that is a quarter of the original one. This model can be used directly for tasks that just require a sentence summary (like sequence classification or multiple choice). For other tasks, the full model is used; this full model has a decoder that upsamples the final hidden states to the same sequence length as the input.`),fc.forEach(n),me=d(Ha),N=r(Ha,"LI",{});var Ce=a(N);A=o(Ce,`The Funnel Transformer checkpoints are all available with a full version and a base version. The first ones should be used for `),re=r(Ce,"A",{href:!0});var mc=a(re);H=o(mc,"FunnelModel"),mc.forEach(n),ge=o(Ce,", "),u=r(Ce,"A",{href:!0});var gc=a(u);F=o(gc,"FunnelForPreTraining"),gc.forEach(n),K=o(Ce,`, `),ve=r(Ce,"A",{href:!0});var _c=a(ve);we=o(_c,"FunnelForMaskedLM"),_c.forEach(n),D=o(Ce,", "),Te=r(Ce,"A",{href:!0});var vc=a(Te);be=o(vc,"FunnelForTokenClassification"),vc.forEach(n),ye=o(Ce,` and class:`),j=r(Ce,"EM",{});var Tc=a(j);V=o(Tc,"~transformers.FunnelForQuestionAnswering"),Tc.forEach(n),$e=o(Ce,`. The second ones should be used for `),Fe=r(Ce,"A",{href:!0});var Fc=a(Fe);Y=o(Fc,"FunnelBaseModel"),Fc.forEach(n),Ee=o(Ce,", "),ke=r(Ce,"A",{href:!0});var kc=a(ke);_e=o(kc,"FunnelForSequenceClassification"),kc.forEach(n),Me=o(Ce,` and `),Va=r(Ce,"A",{href:!0});var A1=a(Va);Dp=o(A1,"FunnelForMultipleChoice"),A1.forEach(n),Ip=o(Ce,"."),Ce.forEach(n),Ha.forEach(n),yc=d(i),jn=r(i,"P",{});var Oi=a(jn);Op=o(Oi,"This model was contributed by "),Io=r(Oi,"A",{href:!0,rel:!0});var D1=a(Io);Sp=o(D1,"sgugger"),D1.forEach(n),Np=o(Oi,". The original code can be found "),Oo=r(Oi,"A",{href:!0,rel:!0});var I1=a(Oo);Bp=o(I1,"here"),I1.forEach(n),Wp=o(Oi,"."),Oi.forEach(n),$c=d(i),Kn=r(i,"H2",{class:!0});var hu=a(Kn);Nt=r(hu,"A",{id:!0,class:!0,href:!0});var O1=a(Nt);ll=r(O1,"SPAN",{});var S1=a(ll);w(So.$$.fragment,S1),S1.forEach(n),O1.forEach(n),Qp=d(hu),dl=r(hu,"SPAN",{});var N1=a(dl);Rp=o(N1,"FunnelConfig"),N1.forEach(n),hu.forEach(n),Ec=d(i),Cn=r(i,"DIV",{class:!0});var Si=a(Cn);w(No.$$.fragment,Si),Hp=d(Si),xn=r(Si,"P",{});var Lo=a(xn);Vp=o(Lo,"This is the configuration class to store the configuration of a "),Ya=r(Lo,"A",{href:!0});var B1=a(Ya);Yp=o(B1,"FunnelModel"),B1.forEach(n),Up=o(Lo,` or a `),Ua=r(Lo,"A",{href:!0});var W1=a(Ua);Gp=o(W1,"TFBertModel"),W1.forEach(n),Zp=o(Lo,`. It is used to instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel Transformer `),Bo=r(Lo,"A",{href:!0,rel:!0});var Q1=a(Bo);Kp=o(Q1,"funnel-transformer/small"),Q1.forEach(n),Xp=o(Lo," architecture."),Lo.forEach(n),Jp=d(Si),Xn=r(Si,"P",{});var Ni=a(Xn);eh=o(Ni,"Configuration objects inherit from "),Ga=r(Ni,"A",{href:!0});var R1=a(Ga);nh=o(R1,"PretrainedConfig"),R1.forEach(n),th=o(Ni,` and can be used to control the model outputs. Read the documentation from `),Za=r(Ni,"A",{href:!0});var H1=a(Za);oh=o(H1,"PretrainedConfig"),H1.forEach(n),sh=o(Ni," for more information."),Ni.forEach(n),Si.forEach(n),Mc=d(i),Jn=r(i,"H2",{class:!0});var fu=a(Jn);Bt=r(fu,"A",{id:!0,class:!0,href:!0});var V1=a(Bt);cl=r(V1,"SPAN",{});var Y1=a(cl);w(Wo.$$.fragment,Y1),Y1.forEach(n),V1.forEach(n),rh=d(fu),ul=r(fu,"SPAN",{});var U1=a(ul);ah=o(U1,"FunnelTokenizer"),U1.forEach(n),fu.forEach(n),zc=d(i),qe=r(i,"DIV",{class:!0});var Ge=a(qe);w(Qo.$$.fragment,Ge),ih=d(Ge),pl=r(Ge,"P",{});var G1=a(pl);lh=o(G1,"Construct a Funnel Transformer tokenizer."),G1.forEach(n),dh=d(Ge),Wt=r(Ge,"P",{});var wc=a(Wt);Ka=r(wc,"A",{href:!0});var Z1=a(Ka);ch=o(Z1,"FunnelTokenizer"),Z1.forEach(n),uh=o(wc," is identical to "),Xa=r(wc,"A",{href:!0});var K1=a(Xa);ph=o(K1,"BertTokenizer"),K1.forEach(n),hh=o(wc,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),wc.forEach(n),fh=d(Ge),Ro=r(Ge,"P",{});var mu=a(Ro);mh=o(mu,"Refer to superclass "),Ja=r(mu,"A",{href:!0});var X1=a(Ja);gh=o(X1,"BertTokenizer"),X1.forEach(n),_h=o(mu,` for usage examples and documentation concerning parameters.`),mu.forEach(n),vh=d(Ge),Ln=r(Ge,"DIV",{class:!0});var Bi=a(Ln);w(Ho.$$.fragment,Bi),Th=d(Bi),hl=r(Bi,"P",{});var J1=a(hl);Fh=o(J1,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),J1.forEach(n),kh=d(Bi),Vo=r(Bi,"UL",{});var gu=a(Vo);ei=r(gu,"LI",{});var j1=a(ei);wh=o(j1,"single sequence: "),fl=r(j1,"CODE",{});var ew=a(fl);bh=o(ew,"[CLS] X [SEP]"),ew.forEach(n),j1.forEach(n),yh=d(gu),ni=r(gu,"LI",{});var L1=a(ni);$h=o(L1,"pair of sequences: "),ml=r(L1,"CODE",{});var nw=a(ml);Eh=o(nw,"[CLS] A [SEP] B [SEP]"),nw.forEach(n),L1.forEach(n),gu.forEach(n),Bi.forEach(n),Mh=d(Ge),Qt=r(Ge,"DIV",{class:!0});var _u=a(Qt);w(Yo.$$.fragment,_u),zh=d(_u),Uo=r(_u,"P",{});var vu=a(Uo);Ph=o(vu,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),gl=r(vu,"CODE",{});var tw=a(gl);qh=o(tw,"prepare_for_model"),tw.forEach(n),Ch=o(vu," method."),vu.forEach(n),_u.forEach(n),xh=d(Ge),wn=r(Ge,"DIV",{class:!0});var Ao=a(wn);w(Go.$$.fragment,Ao),jh=d(Ao),_l=r(Ao,"P",{});var ow=a(_l);Lh=o(ow,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel Transformer sequence pair mask has the following format:`),ow.forEach(n),Ah=d(Ao),w(Zo.$$.fragment,Ao),Dh=d(Ao),et=r(Ao,"P",{});var Wi=a(et);Ih=o(Wi,"If "),vl=r(Wi,"CODE",{});var sw=a(vl);Oh=o(sw,"token_ids_1"),sw.forEach(n),Sh=o(Wi," is "),Tl=r(Wi,"CODE",{});var rw=a(Tl);Nh=o(rw,"None"),rw.forEach(n),Bh=o(Wi,", this method only returns the first portion of the mask (0s)."),Wi.forEach(n),Ao.forEach(n),Wh=d(Ge),Fl=r(Ge,"DIV",{class:!0}),a(Fl).forEach(n),Ge.forEach(n),Pc=d(i),nt=r(i,"H2",{class:!0});var Tu=a(nt);Rt=r(Tu,"A",{id:!0,class:!0,href:!0});var aw=a(Rt);kl=r(aw,"SPAN",{});var iw=a(kl);w(Ko.$$.fragment,iw),iw.forEach(n),aw.forEach(n),Qh=d(Tu),wl=r(Tu,"SPAN",{});var lw=a(wl);Rh=o(lw,"FunnelTokenizerFast"),lw.forEach(n),Tu.forEach(n),qc=d(i),Ze=r(i,"DIV",{class:!0});var An=a(Ze);w(Xo.$$.fragment,An),Hh=d(An),Jo=r(An,"P",{});var Fu=a(Jo);Vh=o(Fu,"Construct a \u201Cfast\u201D Funnel Transformer tokenizer (backed by HuggingFace\u2019s "),bl=r(Fu,"EM",{});var dw=a(bl);Yh=o(dw,"tokenizers"),dw.forEach(n),Uh=o(Fu," library)."),Fu.forEach(n),Gh=d(An),Ht=r(An,"P",{});var bc=a(Ht);ti=r(bc,"A",{href:!0});var cw=a(ti);Zh=o(cw,"FunnelTokenizerFast"),cw.forEach(n),Kh=o(bc," is identical to "),oi=r(bc,"A",{href:!0});var uw=a(oi);Xh=o(uw,"BertTokenizerFast"),uw.forEach(n),Jh=o(bc,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),bc.forEach(n),ef=d(An),es=r(An,"P",{});var ku=a(es);nf=o(ku,"Refer to superclass "),si=r(ku,"A",{href:!0});var pw=a(si);tf=o(pw,"BertTokenizerFast"),pw.forEach(n),of=o(ku,` for usage examples and documentation concerning parameters.`),ku.forEach(n),sf=d(An),bn=r(An,"DIV",{class:!0});var Do=a(bn);w(ns.$$.fragment,Do),rf=d(Do),yl=r(Do,"P",{});var hw=a(yl);af=o(hw,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel Transformer sequence pair mask has the following format:`),hw.forEach(n),lf=d(Do),w(ts.$$.fragment,Do),df=d(Do),tt=r(Do,"P",{});var Qi=a(tt);cf=o(Qi,"If "),$l=r(Qi,"CODE",{});var fw=a($l);uf=o(fw,"token_ids_1"),fw.forEach(n),pf=o(Qi," is "),El=r(Qi,"CODE",{});var mw=a(El);hf=o(mw,"None"),mw.forEach(n),ff=o(Qi,", this method only returns the first portion of the mask (0s)."),Qi.forEach(n),Do.forEach(n),An.forEach(n),Cc=d(i),ot=r(i,"H2",{class:!0});var wu=a(ot);Vt=r(wu,"A",{id:!0,class:!0,href:!0});var gw=a(Vt);Ml=r(gw,"SPAN",{});var _w=a(Ml);w(os.$$.fragment,_w),_w.forEach(n),gw.forEach(n),mf=d(wu),zl=r(wu,"SPAN",{});var vw=a(zl);gf=o(vw,"Funnel specific outputs"),vw.forEach(n),wu.forEach(n),xc=d(i),st=r(i,"DIV",{class:!0});var bu=a(st);w(ss.$$.fragment,bu),_f=d(bu),rs=r(bu,"P",{});var yu=a(rs);vf=o(yu,"Output type of "),ri=r(yu,"A",{href:!0});var Tw=a(ri);Tf=o(Tw,"FunnelForPreTraining"),Tw.forEach(n),Ff=o(yu,"."),yu.forEach(n),bu.forEach(n),jc=d(i),rt=r(i,"DIV",{class:!0});var $u=a(rt);w(as.$$.fragment,$u),kf=d($u),is=r($u,"P",{});var Eu=a(is);wf=o(Eu,"Output type of "),ai=r(Eu,"A",{href:!0});var Fw=a(ai);bf=o(Fw,"FunnelForPreTraining"),Fw.forEach(n),yf=o(Eu,"."),Eu.forEach(n),$u.forEach(n),Lc=d(i),at=r(i,"H2",{class:!0});var Mu=a(at);Yt=r(Mu,"A",{id:!0,class:!0,href:!0});var kw=a(Yt);Pl=r(kw,"SPAN",{});var ww=a(Pl);w(ls.$$.fragment,ww),ww.forEach(n),kw.forEach(n),$f=d(Mu),ql=r(Mu,"SPAN",{});var bw=a(ql);Ef=o(bw,"FunnelBaseModel"),bw.forEach(n),Mu.forEach(n),Ac=d(i),We=r(i,"DIV",{class:!0});var yn=a(We);w(ds.$$.fragment,yn),Mf=d(yn),Cl=r(yn,"P",{});var yw=a(Cl);zf=o(yw,`The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top.`),yw.forEach(n),Pf=d(yn),cs=r(yn,"P",{});var zu=a(cs);qf=o(zu,"The Funnel Transformer model was proposed in "),us=r(zu,"A",{href:!0,rel:!0});var $w=a(us);Cf=o($w,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),$w.forEach(n),xf=o(zu," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),zu.forEach(n),jf=d(yn),ps=r(yn,"P",{});var Pu=a(ps);Lf=o(Pu,"This model inherits from "),ii=r(Pu,"A",{href:!0});var Ew=a(ii);Af=o(Ew,"PreTrainedModel"),Ew.forEach(n),Df=o(Pu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pu.forEach(n),If=d(yn),hs=r(yn,"P",{});var qu=a(hs);Of=o(qu,"This model is also a PyTorch "),fs=r(qu,"A",{href:!0,rel:!0});var Mw=a(fs);Sf=o(Mw,"torch.nn.Module"),Mw.forEach(n),Nf=o(qu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qu.forEach(n),Bf=d(yn),Ke=r(yn,"DIV",{class:!0});var Dn=a(Ke);w(ms.$$.fragment,Dn),Wf=d(Dn),it=r(Dn,"P",{});var Ri=a(it);Qf=o(Ri,"The "),li=r(Ri,"A",{href:!0});var zw=a(li);Rf=o(zw,"FunnelBaseModel"),zw.forEach(n),Hf=o(Ri," forward method, overrides the "),xl=r(Ri,"CODE",{});var Pw=a(xl);Vf=o(Pw,"__call__"),Pw.forEach(n),Yf=o(Ri," special method."),Ri.forEach(n),Uf=d(Dn),w(Ut.$$.fragment,Dn),Gf=d(Dn),jl=r(Dn,"P",{});var qw=a(jl);Zf=o(qw,"Example:"),qw.forEach(n),Kf=d(Dn),w(gs.$$.fragment,Dn),Dn.forEach(n),yn.forEach(n),Dc=d(i),lt=r(i,"H2",{class:!0});var Cu=a(lt);Gt=r(Cu,"A",{id:!0,class:!0,href:!0});var Cw=a(Gt);Ll=r(Cw,"SPAN",{});var xw=a(Ll);w(_s.$$.fragment,xw),xw.forEach(n),Cw.forEach(n),Xf=d(Cu),Al=r(Cu,"SPAN",{});var jw=a(Al);Jf=o(jw,"FunnelModel"),jw.forEach(n),Cu.forEach(n),Ic=d(i),Qe=r(i,"DIV",{class:!0});var $n=a(Qe);w(vs.$$.fragment,$n),em=d($n),Dl=r($n,"P",{});var Lw=a(Dl);nm=o(Lw,"The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top."),Lw.forEach(n),tm=d($n),Ts=r($n,"P",{});var xu=a(Ts);om=o(xu,"The Funnel Transformer model was proposed in "),Fs=r(xu,"A",{href:!0,rel:!0});var Aw=a(Fs);sm=o(Aw,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Aw.forEach(n),rm=o(xu," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),xu.forEach(n),am=d($n),ks=r($n,"P",{});var ju=a(ks);im=o(ju,"This model inherits from "),di=r(ju,"A",{href:!0});var Dw=a(di);lm=o(Dw,"PreTrainedModel"),Dw.forEach(n),dm=o(ju,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ju.forEach(n),cm=d($n),ws=r($n,"P",{});var Lu=a(ws);um=o(Lu,"This model is also a PyTorch "),bs=r(Lu,"A",{href:!0,rel:!0});var Iw=a(bs);pm=o(Iw,"torch.nn.Module"),Iw.forEach(n),hm=o(Lu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lu.forEach(n),fm=d($n),Xe=r($n,"DIV",{class:!0});var In=a(Xe);w(ys.$$.fragment,In),mm=d(In),dt=r(In,"P",{});var Hi=a(dt);gm=o(Hi,"The "),ci=r(Hi,"A",{href:!0});var Ow=a(ci);_m=o(Ow,"FunnelModel"),Ow.forEach(n),vm=o(Hi," forward method, overrides the "),Il=r(Hi,"CODE",{});var Sw=a(Il);Tm=o(Sw,"__call__"),Sw.forEach(n),Fm=o(Hi," special method."),Hi.forEach(n),km=d(In),w(Zt.$$.fragment,In),wm=d(In),Ol=r(In,"P",{});var Nw=a(Ol);bm=o(Nw,"Example:"),Nw.forEach(n),ym=d(In),w($s.$$.fragment,In),In.forEach(n),$n.forEach(n),Oc=d(i),ct=r(i,"H2",{class:!0});var Au=a(ct);Kt=r(Au,"A",{id:!0,class:!0,href:!0});var Bw=a(Kt);Sl=r(Bw,"SPAN",{});var Ww=a(Sl);w(Es.$$.fragment,Ww),Ww.forEach(n),Bw.forEach(n),$m=d(Au),Nl=r(Au,"SPAN",{});var Qw=a(Nl);Em=o(Qw,"FunnelModelForPreTraining"),Qw.forEach(n),Au.forEach(n),Sc=d(i),Ms=r(i,"DIV",{class:!0});var Rw=a(Ms);Je=r(Rw,"DIV",{class:!0});var On=a(Je);w(zs.$$.fragment,On),Mm=d(On),ut=r(On,"P",{});var Vi=a(ut);zm=o(Vi,"The "),ui=r(Vi,"A",{href:!0});var Hw=a(ui);Pm=o(Hw,"FunnelForPreTraining"),Hw.forEach(n),qm=o(Vi," forward method, overrides the "),Bl=r(Vi,"CODE",{});var Vw=a(Bl);Cm=o(Vw,"__call__"),Vw.forEach(n),xm=o(Vi," special method."),Vi.forEach(n),jm=d(On),w(Xt.$$.fragment,On),Lm=d(On),Wl=r(On,"P",{});var Yw=a(Wl);Am=o(Yw,"Examples:"),Yw.forEach(n),Dm=d(On),w(Ps.$$.fragment,On),On.forEach(n),Rw.forEach(n),Nc=d(i),pt=r(i,"H2",{class:!0});var Du=a(pt);Jt=r(Du,"A",{id:!0,class:!0,href:!0});var Uw=a(Jt);Ql=r(Uw,"SPAN",{});var Gw=a(Ql);w(qs.$$.fragment,Gw),Gw.forEach(n),Uw.forEach(n),Im=d(Du),Rl=r(Du,"SPAN",{});var Zw=a(Rl);Om=o(Zw,"FunnelForMaskedLM"),Zw.forEach(n),Du.forEach(n),Bc=d(i),Re=r(i,"DIV",{class:!0});var En=a(Re);w(Cs.$$.fragment,En),Sm=d(En),xs=r(En,"P",{});var Iu=a(xs);Nm=o(Iu,"Funnel Transformer Model with a "),Hl=r(Iu,"CODE",{});var Kw=a(Hl);Bm=o(Kw,"language modeling"),Kw.forEach(n),Wm=o(Iu," head on top."),Iu.forEach(n),Qm=d(En),js=r(En,"P",{});var Ou=a(js);Rm=o(Ou,"The Funnel Transformer model was proposed in "),Ls=r(Ou,"A",{href:!0,rel:!0});var Xw=a(Ls);Hm=o(Xw,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Xw.forEach(n),Vm=o(Ou," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Ou.forEach(n),Ym=d(En),As=r(En,"P",{});var Su=a(As);Um=o(Su,"This model inherits from "),pi=r(Su,"A",{href:!0});var Jw=a(pi);Gm=o(Jw,"PreTrainedModel"),Jw.forEach(n),Zm=o(Su,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Su.forEach(n),Km=d(En),Ds=r(En,"P",{});var Nu=a(Ds);Xm=o(Nu,"This model is also a PyTorch "),Is=r(Nu,"A",{href:!0,rel:!0});var eb=a(Is);Jm=o(eb,"torch.nn.Module"),eb.forEach(n),eg=o(Nu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nu.forEach(n),ng=d(En),en=r(En,"DIV",{class:!0});var Sn=a(en);w(Os.$$.fragment,Sn),tg=d(Sn),ht=r(Sn,"P",{});var Yi=a(ht);og=o(Yi,"The "),hi=r(Yi,"A",{href:!0});var nb=a(hi);sg=o(nb,"FunnelForMaskedLM"),nb.forEach(n),rg=o(Yi," forward method, overrides the "),Vl=r(Yi,"CODE",{});var tb=a(Vl);ag=o(tb,"__call__"),tb.forEach(n),ig=o(Yi," special method."),Yi.forEach(n),lg=d(Sn),w(eo.$$.fragment,Sn),dg=d(Sn),Yl=r(Sn,"P",{});var ob=a(Yl);cg=o(ob,"Example:"),ob.forEach(n),ug=d(Sn),w(Ss.$$.fragment,Sn),Sn.forEach(n),En.forEach(n),Wc=d(i),ft=r(i,"H2",{class:!0});var Bu=a(ft);no=r(Bu,"A",{id:!0,class:!0,href:!0});var sb=a(no);Ul=r(sb,"SPAN",{});var rb=a(Ul);w(Ns.$$.fragment,rb),rb.forEach(n),sb.forEach(n),pg=d(Bu),Gl=r(Bu,"SPAN",{});var ab=a(Gl);hg=o(ab,"FunnelForSequenceClassification"),ab.forEach(n),Bu.forEach(n),Qc=d(i),He=r(i,"DIV",{class:!0});var Mn=a(He);w(Bs.$$.fragment,Mn),fg=d(Mn),Zl=r(Mn,"P",{});var ib=a(Zl);mg=o(ib,`Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the first timestep of the last hidden state) e.g. for GLUE tasks.`),ib.forEach(n),gg=d(Mn),Ws=r(Mn,"P",{});var Wu=a(Ws);_g=o(Wu,"The Funnel Transformer model was proposed in "),Qs=r(Wu,"A",{href:!0,rel:!0});var lb=a(Qs);vg=o(lb,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),lb.forEach(n),Tg=o(Wu," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Wu.forEach(n),Fg=d(Mn),Rs=r(Mn,"P",{});var Qu=a(Rs);kg=o(Qu,"This model inherits from "),fi=r(Qu,"A",{href:!0});var db=a(fi);wg=o(db,"PreTrainedModel"),db.forEach(n),bg=o(Qu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qu.forEach(n),yg=d(Mn),Hs=r(Mn,"P",{});var Ru=a(Hs);$g=o(Ru,"This model is also a PyTorch "),Vs=r(Ru,"A",{href:!0,rel:!0});var cb=a(Vs);Eg=o(cb,"torch.nn.Module"),cb.forEach(n),Mg=o(Ru,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ru.forEach(n),zg=d(Mn),Be=r(Mn,"DIV",{class:!0});var hn=a(Be);w(Ys.$$.fragment,hn),Pg=d(hn),mt=r(hn,"P",{});var Ui=a(mt);qg=o(Ui,"The "),mi=r(Ui,"A",{href:!0});var ub=a(mi);Cg=o(ub,"FunnelForSequenceClassification"),ub.forEach(n),xg=o(Ui," forward method, overrides the "),Kl=r(Ui,"CODE",{});var pb=a(Kl);jg=o(pb,"__call__"),pb.forEach(n),Lg=o(Ui," special method."),Ui.forEach(n),Ag=d(hn),w(to.$$.fragment,hn),Dg=d(hn),Xl=r(hn,"P",{});var hb=a(Xl);Ig=o(hb,"Example of single-label classification:"),hb.forEach(n),Og=d(hn),w(Us.$$.fragment,hn),Sg=d(hn),Jl=r(hn,"P",{});var fb=a(Jl);Ng=o(fb,"Example of multi-label classification:"),fb.forEach(n),Bg=d(hn),w(Gs.$$.fragment,hn),hn.forEach(n),Mn.forEach(n),Rc=d(i),gt=r(i,"H2",{class:!0});var Hu=a(gt);oo=r(Hu,"A",{id:!0,class:!0,href:!0});var mb=a(oo);ed=r(mb,"SPAN",{});var gb=a(ed);w(Zs.$$.fragment,gb),gb.forEach(n),mb.forEach(n),Wg=d(Hu),nd=r(Hu,"SPAN",{});var _b=a(nd);Qg=o(_b,"FunnelForMultipleChoice"),_b.forEach(n),Hu.forEach(n),Hc=d(i),Ve=r(i,"DIV",{class:!0});var zn=a(Ve);w(Ks.$$.fragment,zn),Rg=d(zn),td=r(zn,"P",{});var vb=a(td);Hg=o(vb,`Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.`),vb.forEach(n),Vg=d(zn),Xs=r(zn,"P",{});var Vu=a(Xs);Yg=o(Vu,"The Funnel Transformer model was proposed in "),Js=r(Vu,"A",{href:!0,rel:!0});var Tb=a(Js);Ug=o(Tb,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Tb.forEach(n),Gg=o(Vu," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Vu.forEach(n),Zg=d(zn),er=r(zn,"P",{});var Yu=a(er);Kg=o(Yu,"This model inherits from "),gi=r(Yu,"A",{href:!0});var Fb=a(gi);Xg=o(Fb,"PreTrainedModel"),Fb.forEach(n),Jg=o(Yu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yu.forEach(n),e_=d(zn),nr=r(zn,"P",{});var Uu=a(nr);n_=o(Uu,"This model is also a PyTorch "),tr=r(Uu,"A",{href:!0,rel:!0});var kb=a(tr);t_=o(kb,"torch.nn.Module"),kb.forEach(n),o_=o(Uu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Uu.forEach(n),s_=d(zn),nn=r(zn,"DIV",{class:!0});var Nn=a(nn);w(or.$$.fragment,Nn),r_=d(Nn),_t=r(Nn,"P",{});var Gi=a(_t);a_=o(Gi,"The "),_i=r(Gi,"A",{href:!0});var wb=a(_i);i_=o(wb,"FunnelForMultipleChoice"),wb.forEach(n),l_=o(Gi," forward method, overrides the "),od=r(Gi,"CODE",{});var bb=a(od);d_=o(bb,"__call__"),bb.forEach(n),c_=o(Gi," special method."),Gi.forEach(n),u_=d(Nn),w(so.$$.fragment,Nn),p_=d(Nn),sd=r(Nn,"P",{});var yb=a(sd);h_=o(yb,"Example:"),yb.forEach(n),f_=d(Nn),w(sr.$$.fragment,Nn),Nn.forEach(n),zn.forEach(n),Vc=d(i),vt=r(i,"H2",{class:!0});var Gu=a(vt);ro=r(Gu,"A",{id:!0,class:!0,href:!0});var $b=a(ro);rd=r($b,"SPAN",{});var Eb=a(rd);w(rr.$$.fragment,Eb),Eb.forEach(n),$b.forEach(n),m_=d(Gu),ad=r(Gu,"SPAN",{});var Mb=a(ad);g_=o(Mb,"FunnelForTokenClassification"),Mb.forEach(n),Gu.forEach(n),Yc=d(i),Ye=r(i,"DIV",{class:!0});var Pn=a(Ye);w(ar.$$.fragment,Pn),__=d(Pn),id=r(Pn,"P",{});var zb=a(id);v_=o(zb,`Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),zb.forEach(n),T_=d(Pn),ir=r(Pn,"P",{});var Zu=a(ir);F_=o(Zu,"The Funnel Transformer model was proposed in "),lr=r(Zu,"A",{href:!0,rel:!0});var Pb=a(lr);k_=o(Pb,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Pb.forEach(n),w_=o(Zu," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Zu.forEach(n),b_=d(Pn),dr=r(Pn,"P",{});var Ku=a(dr);y_=o(Ku,"This model inherits from "),vi=r(Ku,"A",{href:!0});var qb=a(vi);$_=o(qb,"PreTrainedModel"),qb.forEach(n),E_=o(Ku,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ku.forEach(n),M_=d(Pn),cr=r(Pn,"P",{});var Xu=a(cr);z_=o(Xu,"This model is also a PyTorch "),ur=r(Xu,"A",{href:!0,rel:!0});var Cb=a(ur);P_=o(Cb,"torch.nn.Module"),Cb.forEach(n),q_=o(Xu,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xu.forEach(n),C_=d(Pn),tn=r(Pn,"DIV",{class:!0});var Bn=a(tn);w(pr.$$.fragment,Bn),x_=d(Bn),Tt=r(Bn,"P",{});var Zi=a(Tt);j_=o(Zi,"The "),Ti=r(Zi,"A",{href:!0});var xb=a(Ti);L_=o(xb,"FunnelForTokenClassification"),xb.forEach(n),A_=o(Zi," forward method, overrides the "),ld=r(Zi,"CODE",{});var jb=a(ld);D_=o(jb,"__call__"),jb.forEach(n),I_=o(Zi," special method."),Zi.forEach(n),O_=d(Bn),w(ao.$$.fragment,Bn),S_=d(Bn),dd=r(Bn,"P",{});var Lb=a(dd);N_=o(Lb,"Example:"),Lb.forEach(n),B_=d(Bn),w(hr.$$.fragment,Bn),Bn.forEach(n),Pn.forEach(n),Uc=d(i),Ft=r(i,"H2",{class:!0});var Ju=a(Ft);io=r(Ju,"A",{id:!0,class:!0,href:!0});var Ab=a(io);cd=r(Ab,"SPAN",{});var Db=a(cd);w(fr.$$.fragment,Db),Db.forEach(n),Ab.forEach(n),W_=d(Ju),ud=r(Ju,"SPAN",{});var Ib=a(ud);Q_=o(Ib,"FunnelForQuestionAnswering"),Ib.forEach(n),Ju.forEach(n),Gc=d(i),Ue=r(i,"DIV",{class:!0});var qn=a(Ue);w(mr.$$.fragment,qn),R_=d(qn),kt=r(qn,"P",{});var Ki=a(kt);H_=o(Ki,`Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),pd=r(Ki,"CODE",{});var Ob=a(pd);V_=o(Ob,"span start logits"),Ob.forEach(n),Y_=o(Ki," and "),hd=r(Ki,"CODE",{});var Sb=a(hd);U_=o(Sb,"span end logits"),Sb.forEach(n),G_=o(Ki,")."),Ki.forEach(n),Z_=d(qn),gr=r(qn,"P",{});var ep=a(gr);K_=o(ep,"The Funnel Transformer model was proposed in "),_r=r(ep,"A",{href:!0,rel:!0});var Nb=a(_r);X_=o(Nb,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Nb.forEach(n),J_=o(ep," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),ep.forEach(n),ev=d(qn),vr=r(qn,"P",{});var np=a(vr);nv=o(np,"This model inherits from "),Fi=r(np,"A",{href:!0});var Bb=a(Fi);tv=o(Bb,"PreTrainedModel"),Bb.forEach(n),ov=o(np,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),np.forEach(n),sv=d(qn),Tr=r(qn,"P",{});var tp=a(Tr);rv=o(tp,"This model is also a PyTorch "),Fr=r(tp,"A",{href:!0,rel:!0});var Wb=a(Fr);av=o(Wb,"torch.nn.Module"),Wb.forEach(n),iv=o(tp,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),tp.forEach(n),lv=d(qn),on=r(qn,"DIV",{class:!0});var Wn=a(on);w(kr.$$.fragment,Wn),dv=d(Wn),wt=r(Wn,"P",{});var Xi=a(wt);cv=o(Xi,"The "),ki=r(Xi,"A",{href:!0});var Qb=a(ki);uv=o(Qb,"FunnelForQuestionAnswering"),Qb.forEach(n),pv=o(Xi," forward method, overrides the "),fd=r(Xi,"CODE",{});var Rb=a(fd);hv=o(Rb,"__call__"),Rb.forEach(n),fv=o(Xi," special method."),Xi.forEach(n),mv=d(Wn),w(lo.$$.fragment,Wn),gv=d(Wn),md=r(Wn,"P",{});var Hb=a(md);_v=o(Hb,"Example:"),Hb.forEach(n),vv=d(Wn),w(wr.$$.fragment,Wn),Wn.forEach(n),qn.forEach(n),Zc=d(i),bt=r(i,"H2",{class:!0});var op=a(bt);co=r(op,"A",{id:!0,class:!0,href:!0});var Vb=a(co);gd=r(Vb,"SPAN",{});var Yb=a(gd);w(br.$$.fragment,Yb),Yb.forEach(n),Vb.forEach(n),Tv=d(op),_d=r(op,"SPAN",{});var Ub=a(_d);Fv=o(Ub,"TFFunnelBaseModel"),Ub.forEach(n),op.forEach(n),Kc=d(i),xe=r(i,"DIV",{class:!0});var fn=a(xe);w(yr.$$.fragment,fn),kv=d(fn),vd=r(fn,"P",{});var Gb=a(vd);wv=o(Gb,`The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top.`),Gb.forEach(n),bv=d(fn),$r=r(fn,"P",{});var sp=a($r);yv=o(sp,"The Funnel Transformer model was proposed in "),Er=r(sp,"A",{href:!0,rel:!0});var Zb=a(Er);$v=o(Zb,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),Zb.forEach(n),Ev=o(sp," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),sp.forEach(n),Mv=d(fn),Mr=r(fn,"P",{});var rp=a(Mr);zv=o(rp,"This model inherits from "),wi=r(rp,"A",{href:!0});var Kb=a(wi);Pv=o(Kb,"TFPreTrainedModel"),Kb.forEach(n),qv=o(rp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rp.forEach(n),Cv=d(fn),zr=r(fn,"P",{});var ap=a(zr);xv=o(ap,"This model is also a "),Pr=r(ap,"A",{href:!0,rel:!0});var Xb=a(Pr);jv=o(Xb,"tf.keras.Model"),Xb.forEach(n),Lv=o(ap,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ap.forEach(n),Av=d(fn),w(uo.$$.fragment,fn),Dv=d(fn),sn=r(fn,"DIV",{class:!0});var Qn=a(sn);w(qr.$$.fragment,Qn),Iv=d(Qn),yt=r(Qn,"P",{});var Ji=a(yt);Ov=o(Ji,"The "),bi=r(Ji,"A",{href:!0});var Jb=a(bi);Sv=o(Jb,"TFFunnelBaseModel"),Jb.forEach(n),Nv=o(Ji," forward method, overrides the "),Td=r(Ji,"CODE",{});var e0=a(Td);Bv=o(e0,"__call__"),e0.forEach(n),Wv=o(Ji," special method."),Ji.forEach(n),Qv=d(Qn),w(po.$$.fragment,Qn),Rv=d(Qn),Fd=r(Qn,"P",{});var n0=a(Fd);Hv=o(n0,"Example:"),n0.forEach(n),Vv=d(Qn),w(Cr.$$.fragment,Qn),Qn.forEach(n),fn.forEach(n),Xc=d(i),$t=r(i,"H2",{class:!0});var ip=a($t);ho=r(ip,"A",{id:!0,class:!0,href:!0});var t0=a(ho);kd=r(t0,"SPAN",{});var o0=a(kd);w(xr.$$.fragment,o0),o0.forEach(n),t0.forEach(n),Yv=d(ip),wd=r(ip,"SPAN",{});var s0=a(wd);Uv=o(s0,"TFFunnelModel"),s0.forEach(n),ip.forEach(n),Jc=d(i),je=r(i,"DIV",{class:!0});var mn=a(je);w(jr.$$.fragment,mn),Gv=d(mn),bd=r(mn,"P",{});var r0=a(bd);Zv=o(r0,"The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top."),r0.forEach(n),Kv=d(mn),Lr=r(mn,"P",{});var lp=a(Lr);Xv=o(lp,"The Funnel Transformer model was proposed in "),Ar=r(lp,"A",{href:!0,rel:!0});var a0=a(Ar);Jv=o(a0,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),a0.forEach(n),eT=o(lp," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),lp.forEach(n),nT=d(mn),Dr=r(mn,"P",{});var dp=a(Dr);tT=o(dp,"This model inherits from "),yi=r(dp,"A",{href:!0});var i0=a(yi);oT=o(i0,"TFPreTrainedModel"),i0.forEach(n),sT=o(dp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dp.forEach(n),rT=d(mn),Ir=r(mn,"P",{});var cp=a(Ir);aT=o(cp,"This model is also a "),Or=r(cp,"A",{href:!0,rel:!0});var l0=a(Or);iT=o(l0,"tf.keras.Model"),l0.forEach(n),lT=o(cp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cp.forEach(n),dT=d(mn),w(fo.$$.fragment,mn),cT=d(mn),rn=r(mn,"DIV",{class:!0});var Rn=a(rn);w(Sr.$$.fragment,Rn),uT=d(Rn),Et=r(Rn,"P",{});var el=a(Et);pT=o(el,"The "),$i=r(el,"A",{href:!0});var d0=a($i);hT=o(d0,"TFFunnelModel"),d0.forEach(n),fT=o(el," forward method, overrides the "),yd=r(el,"CODE",{});var c0=a(yd);mT=o(c0,"__call__"),c0.forEach(n),gT=o(el," special method."),el.forEach(n),_T=d(Rn),w(mo.$$.fragment,Rn),vT=d(Rn),$d=r(Rn,"P",{});var u0=a($d);TT=o(u0,"Example:"),u0.forEach(n),FT=d(Rn),w(Nr.$$.fragment,Rn),Rn.forEach(n),mn.forEach(n),eu=d(i),Mt=r(i,"H2",{class:!0});var up=a(Mt);go=r(up,"A",{id:!0,class:!0,href:!0});var p0=a(go);Ed=r(p0,"SPAN",{});var h0=a(Ed);w(Br.$$.fragment,h0),h0.forEach(n),p0.forEach(n),kT=d(up),Md=r(up,"SPAN",{});var f0=a(Md);wT=o(f0,"TFFunnelModelForPreTraining"),f0.forEach(n),up.forEach(n),nu=d(i),Le=r(i,"DIV",{class:!0});var gn=a(Le);w(Wr.$$.fragment,gn),bT=d(gn),zd=r(gn,"P",{});var m0=a(zd);yT=o(m0,"Funnel model with a binary classification head on top as used during pretraining for identifying generated tokens."),m0.forEach(n),$T=d(gn),Qr=r(gn,"P",{});var pp=a(Qr);ET=o(pp,"The Funnel Transformer model was proposed in "),Rr=r(pp,"A",{href:!0,rel:!0});var g0=a(Rr);MT=o(g0,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),g0.forEach(n),zT=o(pp," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),pp.forEach(n),PT=d(gn),Hr=r(gn,"P",{});var hp=a(Hr);qT=o(hp,"This model inherits from "),Ei=r(hp,"A",{href:!0});var _0=a(Ei);CT=o(_0,"TFPreTrainedModel"),_0.forEach(n),xT=o(hp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hp.forEach(n),jT=d(gn),Vr=r(gn,"P",{});var fp=a(Vr);LT=o(fp,"This model is also a "),Yr=r(fp,"A",{href:!0,rel:!0});var v0=a(Yr);AT=o(v0,"tf.keras.Model"),v0.forEach(n),DT=o(fp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fp.forEach(n),IT=d(gn),w(_o.$$.fragment,gn),OT=d(gn),an=r(gn,"DIV",{class:!0});var Hn=a(an);w(Ur.$$.fragment,Hn),ST=d(Hn),zt=r(Hn,"P",{});var nl=a(zt);NT=o(nl,"The "),Mi=r(nl,"A",{href:!0});var T0=a(Mi);BT=o(T0,"TFFunnelForPreTraining"),T0.forEach(n),WT=o(nl," forward method, overrides the "),Pd=r(nl,"CODE",{});var F0=a(Pd);QT=o(F0,"__call__"),F0.forEach(n),RT=o(nl," special method."),nl.forEach(n),HT=d(Hn),w(vo.$$.fragment,Hn),VT=d(Hn),qd=r(Hn,"P",{});var k0=a(qd);YT=o(k0,"Examples:"),k0.forEach(n),UT=d(Hn),w(Gr.$$.fragment,Hn),Hn.forEach(n),gn.forEach(n),tu=d(i),Pt=r(i,"H2",{class:!0});var mp=a(Pt);To=r(mp,"A",{id:!0,class:!0,href:!0});var w0=a(To);Cd=r(w0,"SPAN",{});var b0=a(Cd);w(Zr.$$.fragment,b0),b0.forEach(n),w0.forEach(n),GT=d(mp),xd=r(mp,"SPAN",{});var y0=a(xd);ZT=o(y0,"TFFunnelForMaskedLM"),y0.forEach(n),mp.forEach(n),ou=d(i),Ae=r(i,"DIV",{class:!0});var _n=a(Ae);w(Kr.$$.fragment,_n),KT=d(_n),Xr=r(_n,"P",{});var gp=a(Xr);XT=o(gp,"Funnel Model with a "),jd=r(gp,"CODE",{});var $0=a(jd);JT=o($0,"language modeling"),$0.forEach(n),eF=o(gp," head on top."),gp.forEach(n),nF=d(_n),Jr=r(_n,"P",{});var _p=a(Jr);tF=o(_p,"The Funnel Transformer model was proposed in "),ea=r(_p,"A",{href:!0,rel:!0});var E0=a(ea);oF=o(E0,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),E0.forEach(n),sF=o(_p," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),_p.forEach(n),rF=d(_n),na=r(_n,"P",{});var vp=a(na);aF=o(vp,"This model inherits from "),zi=r(vp,"A",{href:!0});var M0=a(zi);iF=o(M0,"TFPreTrainedModel"),M0.forEach(n),lF=o(vp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vp.forEach(n),dF=d(_n),ta=r(_n,"P",{});var Tp=a(ta);cF=o(Tp,"This model is also a "),oa=r(Tp,"A",{href:!0,rel:!0});var z0=a(oa);uF=o(z0,"tf.keras.Model"),z0.forEach(n),pF=o(Tp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Tp.forEach(n),hF=d(_n),w(Fo.$$.fragment,_n),fF=d(_n),ln=r(_n,"DIV",{class:!0});var Vn=a(ln);w(sa.$$.fragment,Vn),mF=d(Vn),qt=r(Vn,"P",{});var tl=a(qt);gF=o(tl,"The "),Pi=r(tl,"A",{href:!0});var P0=a(Pi);_F=o(P0,"TFFunnelForMaskedLM"),P0.forEach(n),vF=o(tl," forward method, overrides the "),Ld=r(tl,"CODE",{});var q0=a(Ld);TF=o(q0,"__call__"),q0.forEach(n),FF=o(tl," special method."),tl.forEach(n),kF=d(Vn),w(ko.$$.fragment,Vn),wF=d(Vn),Ad=r(Vn,"P",{});var C0=a(Ad);bF=o(C0,"Example:"),C0.forEach(n),yF=d(Vn),w(ra.$$.fragment,Vn),Vn.forEach(n),_n.forEach(n),su=d(i),Ct=r(i,"H2",{class:!0});var Fp=a(Ct);wo=r(Fp,"A",{id:!0,class:!0,href:!0});var x0=a(wo);Dd=r(x0,"SPAN",{});var j0=a(Dd);w(aa.$$.fragment,j0),j0.forEach(n),x0.forEach(n),$F=d(Fp),Id=r(Fp,"SPAN",{});var L0=a(Id);EF=o(L0,"TFFunnelForSequenceClassification"),L0.forEach(n),Fp.forEach(n),ru=d(i),De=r(i,"DIV",{class:!0});var vn=a(De);w(ia.$$.fragment,vn),MF=d(vn),Od=r(vn,"P",{});var A0=a(Od);zF=o(A0,`Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),A0.forEach(n),PF=d(vn),la=r(vn,"P",{});var kp=a(la);qF=o(kp,"The Funnel Transformer model was proposed in "),da=r(kp,"A",{href:!0,rel:!0});var D0=a(da);CF=o(D0,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),D0.forEach(n),xF=o(kp," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),kp.forEach(n),jF=d(vn),ca=r(vn,"P",{});var wp=a(ca);LF=o(wp,"This model inherits from "),qi=r(wp,"A",{href:!0});var I0=a(qi);AF=o(I0,"TFPreTrainedModel"),I0.forEach(n),DF=o(wp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wp.forEach(n),IF=d(vn),ua=r(vn,"P",{});var bp=a(ua);OF=o(bp,"This model is also a "),pa=r(bp,"A",{href:!0,rel:!0});var O0=a(pa);SF=o(O0,"tf.keras.Model"),O0.forEach(n),NF=o(bp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bp.forEach(n),BF=d(vn),w(bo.$$.fragment,vn),WF=d(vn),dn=r(vn,"DIV",{class:!0});var Yn=a(dn);w(ha.$$.fragment,Yn),QF=d(Yn),xt=r(Yn,"P",{});var ol=a(xt);RF=o(ol,"The "),Ci=r(ol,"A",{href:!0});var S0=a(Ci);HF=o(S0,"TFFunnelForSequenceClassification"),S0.forEach(n),VF=o(ol," forward method, overrides the "),Sd=r(ol,"CODE",{});var N0=a(Sd);YF=o(N0,"__call__"),N0.forEach(n),UF=o(ol," special method."),ol.forEach(n),GF=d(Yn),w(yo.$$.fragment,Yn),ZF=d(Yn),Nd=r(Yn,"P",{});var B0=a(Nd);KF=o(B0,"Example:"),B0.forEach(n),XF=d(Yn),w(fa.$$.fragment,Yn),Yn.forEach(n),vn.forEach(n),au=d(i),jt=r(i,"H2",{class:!0});var yp=a(jt);$o=r(yp,"A",{id:!0,class:!0,href:!0});var W0=a($o);Bd=r(W0,"SPAN",{});var Q0=a(Bd);w(ma.$$.fragment,Q0),Q0.forEach(n),W0.forEach(n),JF=d(yp),Wd=r(yp,"SPAN",{});var R0=a(Wd);ek=o(R0,"TFFunnelForMultipleChoice"),R0.forEach(n),yp.forEach(n),iu=d(i),Ie=r(i,"DIV",{class:!0});var Tn=a(Ie);w(ga.$$.fragment,Tn),nk=d(Tn),Qd=r(Tn,"P",{});var H0=a(Qd);tk=o(H0,`Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),H0.forEach(n),ok=d(Tn),_a=r(Tn,"P",{});var $p=a(_a);sk=o($p,"The Funnel Transformer model was proposed in "),va=r($p,"A",{href:!0,rel:!0});var V0=a(va);rk=o(V0,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),V0.forEach(n),ak=o($p," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),$p.forEach(n),ik=d(Tn),Ta=r(Tn,"P",{});var Ep=a(Ta);lk=o(Ep,"This model inherits from "),xi=r(Ep,"A",{href:!0});var Y0=a(xi);dk=o(Y0,"TFPreTrainedModel"),Y0.forEach(n),ck=o(Ep,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ep.forEach(n),uk=d(Tn),Fa=r(Tn,"P",{});var Mp=a(Fa);pk=o(Mp,"This model is also a "),ka=r(Mp,"A",{href:!0,rel:!0});var U0=a(ka);hk=o(U0,"tf.keras.Model"),U0.forEach(n),fk=o(Mp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Mp.forEach(n),mk=d(Tn),w(Eo.$$.fragment,Tn),gk=d(Tn),cn=r(Tn,"DIV",{class:!0});var Un=a(cn);w(wa.$$.fragment,Un),_k=d(Un),Lt=r(Un,"P",{});var sl=a(Lt);vk=o(sl,"The "),ji=r(sl,"A",{href:!0});var G0=a(ji);Tk=o(G0,"TFFunnelForMultipleChoice"),G0.forEach(n),Fk=o(sl," forward method, overrides the "),Rd=r(sl,"CODE",{});var Z0=a(Rd);kk=o(Z0,"__call__"),Z0.forEach(n),wk=o(sl," special method."),sl.forEach(n),bk=d(Un),w(Mo.$$.fragment,Un),yk=d(Un),Hd=r(Un,"P",{});var K0=a(Hd);$k=o(K0,"Example:"),K0.forEach(n),Ek=d(Un),w(ba.$$.fragment,Un),Un.forEach(n),Tn.forEach(n),lu=d(i),At=r(i,"H2",{class:!0});var zp=a(At);zo=r(zp,"A",{id:!0,class:!0,href:!0});var X0=a(zo);Vd=r(X0,"SPAN",{});var J0=a(Vd);w(ya.$$.fragment,J0),J0.forEach(n),X0.forEach(n),Mk=d(zp),Yd=r(zp,"SPAN",{});var ey=a(Yd);zk=o(ey,"TFFunnelForTokenClassification"),ey.forEach(n),zp.forEach(n),du=d(i),Oe=r(i,"DIV",{class:!0});var Fn=a(Oe);w($a.$$.fragment,Fn),Pk=d(Fn),Ud=r(Fn,"P",{});var ny=a(Ud);qk=o(ny,`Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ny.forEach(n),Ck=d(Fn),Ea=r(Fn,"P",{});var Pp=a(Ea);xk=o(Pp,"The Funnel Transformer model was proposed in "),Ma=r(Pp,"A",{href:!0,rel:!0});var ty=a(Ma);jk=o(ty,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),ty.forEach(n),Lk=o(Pp," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),Pp.forEach(n),Ak=d(Fn),za=r(Fn,"P",{});var qp=a(za);Dk=o(qp,"This model inherits from "),Li=r(qp,"A",{href:!0});var oy=a(Li);Ik=o(oy,"TFPreTrainedModel"),oy.forEach(n),Ok=o(qp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qp.forEach(n),Sk=d(Fn),Pa=r(Fn,"P",{});var Cp=a(Pa);Nk=o(Cp,"This model is also a "),qa=r(Cp,"A",{href:!0,rel:!0});var sy=a(qa);Bk=o(sy,"tf.keras.Model"),sy.forEach(n),Wk=o(Cp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cp.forEach(n),Qk=d(Fn),w(Po.$$.fragment,Fn),Rk=d(Fn),un=r(Fn,"DIV",{class:!0});var Gn=a(un);w(Ca.$$.fragment,Gn),Hk=d(Gn),Dt=r(Gn,"P",{});var rl=a(Dt);Vk=o(rl,"The "),Ai=r(rl,"A",{href:!0});var ry=a(Ai);Yk=o(ry,"TFFunnelForTokenClassification"),ry.forEach(n),Uk=o(rl," forward method, overrides the "),Gd=r(rl,"CODE",{});var ay=a(Gd);Gk=o(ay,"__call__"),ay.forEach(n),Zk=o(rl," special method."),rl.forEach(n),Kk=d(Gn),w(qo.$$.fragment,Gn),Xk=d(Gn),Zd=r(Gn,"P",{});var iy=a(Zd);Jk=o(iy,"Example:"),iy.forEach(n),e1=d(Gn),w(xa.$$.fragment,Gn),Gn.forEach(n),Fn.forEach(n),cu=d(i),It=r(i,"H2",{class:!0});var xp=a(It);Co=r(xp,"A",{id:!0,class:!0,href:!0});var ly=a(Co);Kd=r(ly,"SPAN",{});var dy=a(Kd);w(ja.$$.fragment,dy),dy.forEach(n),ly.forEach(n),n1=d(xp),Xd=r(xp,"SPAN",{});var cy=a(Xd);t1=o(cy,"TFFunnelForQuestionAnswering"),cy.forEach(n),xp.forEach(n),uu=d(i),Se=r(i,"DIV",{class:!0});var kn=a(Se);w(La.$$.fragment,kn),o1=d(kn),Ot=r(kn,"P",{});var al=a(Ot);s1=o(al,`Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Jd=r(al,"CODE",{});var uy=a(Jd);r1=o(uy,"span start logits"),uy.forEach(n),a1=o(al," and "),ec=r(al,"CODE",{});var py=a(ec);i1=o(py,"span end logits"),py.forEach(n),l1=o(al,")."),al.forEach(n),d1=d(kn),Aa=r(kn,"P",{});var jp=a(Aa);c1=o(jp,"The Funnel Transformer model was proposed in "),Da=r(jp,"A",{href:!0,rel:!0});var hy=a(Da);u1=o(hy,`Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing`),hy.forEach(n),p1=o(jp," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),jp.forEach(n),h1=d(kn),Ia=r(kn,"P",{});var Lp=a(Ia);f1=o(Lp,"This model inherits from "),Di=r(Lp,"A",{href:!0});var fy=a(Di);m1=o(fy,"TFPreTrainedModel"),fy.forEach(n),g1=o(Lp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Lp.forEach(n),_1=d(kn),Oa=r(kn,"P",{});var Ap=a(Oa);v1=o(Ap,"This model is also a "),Sa=r(Ap,"A",{href:!0,rel:!0});var my=a(Sa);T1=o(my,"tf.keras.Model"),my.forEach(n),F1=o(Ap,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ap.forEach(n),k1=d(kn),w(xo.$$.fragment,kn),w1=d(kn),pn=r(kn,"DIV",{class:!0});var Zn=a(pn);w(Na.$$.fragment,Zn),b1=d(Zn),St=r(Zn,"P",{});var il=a(St);y1=o(il,"The "),Ii=r(il,"A",{href:!0});var gy=a(Ii);$1=o(gy,"TFFunnelForQuestionAnswering"),gy.forEach(n),E1=o(il," forward method, overrides the "),nc=r(il,"CODE",{});var _y=a(nc);M1=o(_y,"__call__"),_y.forEach(n),z1=o(il," special method."),il.forEach(n),P1=d(Zn),w(jo.$$.fragment,Zn),q1=d(Zn),tc=r(Zn,"P",{});var vy=a(tc);C1=o(vy,"Example:"),vy.forEach(n),x1=d(Zn),w(Ba.$$.fragment,Zn),Zn.forEach(n),kn.forEach(n),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(Uy)),c(g,"id","funnel-transformer"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#funnel-transformer"),c(m,"class","relative group"),c(X,"id","overview"),c(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(X,"href","#overview"),c(P,"class","relative group"),c(te,"href","https://arxiv.org/abs/2006.03236"),c(te,"rel","nofollow"),c(re,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel"),c(u,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForPreTraining"),c(ve,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMaskedLM"),c(Te,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForTokenClassification"),c(Fe,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelBaseModel"),c(ke,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForSequenceClassification"),c(Va,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMultipleChoice"),c(Io,"href","https://huggingface.co/sgugger"),c(Io,"rel","nofollow"),c(Oo,"href","https://github.com/laiguokun/Funnel-Transformer"),c(Oo,"rel","nofollow"),c(Nt,"id","transformers.FunnelConfig"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#transformers.FunnelConfig"),c(Kn,"class","relative group"),c(Ya,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel"),c(Ua,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.TFBertModel"),c(Bo,"href","https://huggingface.co/funnel-transformer/small"),c(Bo,"rel","nofollow"),c(Ga,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Za,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),c(Cn,"class","docstring"),c(Bt,"id","transformers.FunnelTokenizer"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#transformers.FunnelTokenizer"),c(Jn,"class","relative group"),c(Ka,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizer"),c(Xa,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(Ja,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),c(Ln,"class","docstring"),c(Qt,"class","docstring"),c(wn,"class","docstring"),c(Fl,"class","docstring"),c(qe,"class","docstring"),c(Rt,"id","transformers.FunnelTokenizerFast"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.FunnelTokenizerFast"),c(nt,"class","relative group"),c(ti,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelTokenizerFast"),c(oi,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(si,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),c(bn,"class","docstring"),c(Ze,"class","docstring"),c(Vt,"id","transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput"),c(ot,"class","relative group"),c(ri,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForPreTraining"),c(st,"class","docstring"),c(ai,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForPreTraining"),c(rt,"class","docstring"),c(Yt,"id","transformers.FunnelBaseModel"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#transformers.FunnelBaseModel"),c(at,"class","relative group"),c(us,"href","https://arxiv.org/abs/2006.03236"),c(us,"rel","nofollow"),c(ii,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(fs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(fs,"rel","nofollow"),c(li,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelBaseModel"),c(Ke,"class","docstring"),c(We,"class","docstring"),c(Gt,"id","transformers.FunnelModel"),c(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gt,"href","#transformers.FunnelModel"),c(lt,"class","relative group"),c(Fs,"href","https://arxiv.org/abs/2006.03236"),c(Fs,"rel","nofollow"),c(di,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(bs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(bs,"rel","nofollow"),c(ci,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelModel"),c(Xe,"class","docstring"),c(Qe,"class","docstring"),c(Kt,"id","transformers.FunnelForPreTraining"),c(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kt,"href","#transformers.FunnelForPreTraining"),c(ct,"class","relative group"),c(ui,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForPreTraining"),c(Je,"class","docstring"),c(Ms,"class","docstring"),c(Jt,"id","transformers.FunnelForMaskedLM"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#transformers.FunnelForMaskedLM"),c(pt,"class","relative group"),c(Ls,"href","https://arxiv.org/abs/2006.03236"),c(Ls,"rel","nofollow"),c(pi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Is,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Is,"rel","nofollow"),c(hi,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMaskedLM"),c(en,"class","docstring"),c(Re,"class","docstring"),c(no,"id","transformers.FunnelForSequenceClassification"),c(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(no,"href","#transformers.FunnelForSequenceClassification"),c(ft,"class","relative group"),c(Qs,"href","https://arxiv.org/abs/2006.03236"),c(Qs,"rel","nofollow"),c(fi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Vs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Vs,"rel","nofollow"),c(mi,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForSequenceClassification"),c(Be,"class","docstring"),c(He,"class","docstring"),c(oo,"id","transformers.FunnelForMultipleChoice"),c(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(oo,"href","#transformers.FunnelForMultipleChoice"),c(gt,"class","relative group"),c(Js,"href","https://arxiv.org/abs/2006.03236"),c(Js,"rel","nofollow"),c(gi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(tr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(tr,"rel","nofollow"),c(_i,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForMultipleChoice"),c(nn,"class","docstring"),c(Ve,"class","docstring"),c(ro,"id","transformers.FunnelForTokenClassification"),c(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ro,"href","#transformers.FunnelForTokenClassification"),c(vt,"class","relative group"),c(lr,"href","https://arxiv.org/abs/2006.03236"),c(lr,"rel","nofollow"),c(vi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ur,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ur,"rel","nofollow"),c(Ti,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForTokenClassification"),c(tn,"class","docstring"),c(Ye,"class","docstring"),c(io,"id","transformers.FunnelForQuestionAnswering"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.FunnelForQuestionAnswering"),c(Ft,"class","relative group"),c(_r,"href","https://arxiv.org/abs/2006.03236"),c(_r,"rel","nofollow"),c(Fi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Fr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Fr,"rel","nofollow"),c(ki,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.FunnelForQuestionAnswering"),c(on,"class","docstring"),c(Ue,"class","docstring"),c(co,"id","transformers.TFFunnelBaseModel"),c(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(co,"href","#transformers.TFFunnelBaseModel"),c(bt,"class","relative group"),c(Er,"href","https://arxiv.org/abs/2006.03236"),c(Er,"rel","nofollow"),c(wi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Pr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Pr,"rel","nofollow"),c(bi,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelBaseModel"),c(sn,"class","docstring"),c(xe,"class","docstring"),c(ho,"id","transformers.TFFunnelModel"),c(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ho,"href","#transformers.TFFunnelModel"),c($t,"class","relative group"),c(Ar,"href","https://arxiv.org/abs/2006.03236"),c(Ar,"rel","nofollow"),c(yi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Or,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Or,"rel","nofollow"),c($i,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelModel"),c(rn,"class","docstring"),c(je,"class","docstring"),c(go,"id","transformers.TFFunnelForPreTraining"),c(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(go,"href","#transformers.TFFunnelForPreTraining"),c(Mt,"class","relative group"),c(Rr,"href","https://arxiv.org/abs/2006.03236"),c(Rr,"rel","nofollow"),c(Ei,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Yr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Yr,"rel","nofollow"),c(Mi,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForPreTraining"),c(an,"class","docstring"),c(Le,"class","docstring"),c(To,"id","transformers.TFFunnelForMaskedLM"),c(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(To,"href","#transformers.TFFunnelForMaskedLM"),c(Pt,"class","relative group"),c(ea,"href","https://arxiv.org/abs/2006.03236"),c(ea,"rel","nofollow"),c(zi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(oa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(oa,"rel","nofollow"),c(Pi,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForMaskedLM"),c(ln,"class","docstring"),c(Ae,"class","docstring"),c(wo,"id","transformers.TFFunnelForSequenceClassification"),c(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wo,"href","#transformers.TFFunnelForSequenceClassification"),c(Ct,"class","relative group"),c(da,"href","https://arxiv.org/abs/2006.03236"),c(da,"rel","nofollow"),c(qi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(pa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(pa,"rel","nofollow"),c(Ci,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForSequenceClassification"),c(dn,"class","docstring"),c(De,"class","docstring"),c($o,"id","transformers.TFFunnelForMultipleChoice"),c($o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c($o,"href","#transformers.TFFunnelForMultipleChoice"),c(jt,"class","relative group"),c(va,"href","https://arxiv.org/abs/2006.03236"),c(va,"rel","nofollow"),c(xi,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(ka,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ka,"rel","nofollow"),c(ji,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForMultipleChoice"),c(cn,"class","docstring"),c(Ie,"class","docstring"),c(zo,"id","transformers.TFFunnelForTokenClassification"),c(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zo,"href","#transformers.TFFunnelForTokenClassification"),c(At,"class","relative group"),c(Ma,"href","https://arxiv.org/abs/2006.03236"),c(Ma,"rel","nofollow"),c(Li,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(qa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(qa,"rel","nofollow"),c(Ai,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForTokenClassification"),c(un,"class","docstring"),c(Oe,"class","docstring"),c(Co,"id","transformers.TFFunnelForQuestionAnswering"),c(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Co,"href","#transformers.TFFunnelForQuestionAnswering"),c(It,"class","relative group"),c(Da,"href","https://arxiv.org/abs/2006.03236"),c(Da,"rel","nofollow"),c(Di,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Sa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Sa,"rel","nofollow"),c(Ii,"href","/docs/transformers/v4.15.0/en/model_doc/funnel#transformers.TFFunnelForQuestionAnswering"),c(pn,"class","docstring"),c(Se,"class","docstring")},m(i,f){e(document.head,p),h(i,M,f),h(i,m,f),e(m,g),e(g,T),b(v,T,null),e(m,_),e(m,z),e(z,ce),h(i,G,f),h(i,P,f),e(P,X),e(X,I),b(ne,I,null),e(P,ue),e(P,O),e(O,pe),h(i,ie,f),h(i,U,f),e(U,L),e(U,te),e(te,Z),e(U,q),h(i,x,f),h(i,oe,f),e(oe,Q),h(i,le,f),h(i,se,f),e(se,S),e(S,he),h(i,de,f),h(i,C,f),e(C,fe),h(i,B,f),h(i,J,f),e(J,ae),e(ae,R),e(J,me),e(J,N),e(N,A),e(N,re),e(re,H),e(N,ge),e(N,u),e(u,F),e(N,K),e(N,ve),e(ve,we),e(N,D),e(N,Te),e(Te,be),e(N,ye),e(N,j),e(j,V),e(N,$e),e(N,Fe),e(Fe,Y),e(N,Ee),e(N,ke),e(ke,_e),e(N,Me),e(N,Va),e(Va,Dp),e(N,Ip),h(i,yc,f),h(i,jn,f),e(jn,Op),e(jn,Io),e(Io,Sp),e(jn,Np),e(jn,Oo),e(Oo,Bp),e(jn,Wp),h(i,$c,f),h(i,Kn,f),e(Kn,Nt),e(Nt,ll),b(So,ll,null),e(Kn,Qp),e(Kn,dl),e(dl,Rp),h(i,Ec,f),h(i,Cn,f),b(No,Cn,null),e(Cn,Hp),e(Cn,xn),e(xn,Vp),e(xn,Ya),e(Ya,Yp),e(xn,Up),e(xn,Ua),e(Ua,Gp),e(xn,Zp),e(xn,Bo),e(Bo,Kp),e(xn,Xp),e(Cn,Jp),e(Cn,Xn),e(Xn,eh),e(Xn,Ga),e(Ga,nh),e(Xn,th),e(Xn,Za),e(Za,oh),e(Xn,sh),h(i,Mc,f),h(i,Jn,f),e(Jn,Bt),e(Bt,cl),b(Wo,cl,null),e(Jn,rh),e(Jn,ul),e(ul,ah),h(i,zc,f),h(i,qe,f),b(Qo,qe,null),e(qe,ih),e(qe,pl),e(pl,lh),e(qe,dh),e(qe,Wt),e(Wt,Ka),e(Ka,ch),e(Wt,uh),e(Wt,Xa),e(Xa,ph),e(Wt,hh),e(qe,fh),e(qe,Ro),e(Ro,mh),e(Ro,Ja),e(Ja,gh),e(Ro,_h),e(qe,vh),e(qe,Ln),b(Ho,Ln,null),e(Ln,Th),e(Ln,hl),e(hl,Fh),e(Ln,kh),e(Ln,Vo),e(Vo,ei),e(ei,wh),e(ei,fl),e(fl,bh),e(Vo,yh),e(Vo,ni),e(ni,$h),e(ni,ml),e(ml,Eh),e(qe,Mh),e(qe,Qt),b(Yo,Qt,null),e(Qt,zh),e(Qt,Uo),e(Uo,Ph),e(Uo,gl),e(gl,qh),e(Uo,Ch),e(qe,xh),e(qe,wn),b(Go,wn,null),e(wn,jh),e(wn,_l),e(_l,Lh),e(wn,Ah),b(Zo,wn,null),e(wn,Dh),e(wn,et),e(et,Ih),e(et,vl),e(vl,Oh),e(et,Sh),e(et,Tl),e(Tl,Nh),e(et,Bh),e(qe,Wh),e(qe,Fl),h(i,Pc,f),h(i,nt,f),e(nt,Rt),e(Rt,kl),b(Ko,kl,null),e(nt,Qh),e(nt,wl),e(wl,Rh),h(i,qc,f),h(i,Ze,f),b(Xo,Ze,null),e(Ze,Hh),e(Ze,Jo),e(Jo,Vh),e(Jo,bl),e(bl,Yh),e(Jo,Uh),e(Ze,Gh),e(Ze,Ht),e(Ht,ti),e(ti,Zh),e(Ht,Kh),e(Ht,oi),e(oi,Xh),e(Ht,Jh),e(Ze,ef),e(Ze,es),e(es,nf),e(es,si),e(si,tf),e(es,of),e(Ze,sf),e(Ze,bn),b(ns,bn,null),e(bn,rf),e(bn,yl),e(yl,af),e(bn,lf),b(ts,bn,null),e(bn,df),e(bn,tt),e(tt,cf),e(tt,$l),e($l,uf),e(tt,pf),e(tt,El),e(El,hf),e(tt,ff),h(i,Cc,f),h(i,ot,f),e(ot,Vt),e(Vt,Ml),b(os,Ml,null),e(ot,mf),e(ot,zl),e(zl,gf),h(i,xc,f),h(i,st,f),b(ss,st,null),e(st,_f),e(st,rs),e(rs,vf),e(rs,ri),e(ri,Tf),e(rs,Ff),h(i,jc,f),h(i,rt,f),b(as,rt,null),e(rt,kf),e(rt,is),e(is,wf),e(is,ai),e(ai,bf),e(is,yf),h(i,Lc,f),h(i,at,f),e(at,Yt),e(Yt,Pl),b(ls,Pl,null),e(at,$f),e(at,ql),e(ql,Ef),h(i,Ac,f),h(i,We,f),b(ds,We,null),e(We,Mf),e(We,Cl),e(Cl,zf),e(We,Pf),e(We,cs),e(cs,qf),e(cs,us),e(us,Cf),e(cs,xf),e(We,jf),e(We,ps),e(ps,Lf),e(ps,ii),e(ii,Af),e(ps,Df),e(We,If),e(We,hs),e(hs,Of),e(hs,fs),e(fs,Sf),e(hs,Nf),e(We,Bf),e(We,Ke),b(ms,Ke,null),e(Ke,Wf),e(Ke,it),e(it,Qf),e(it,li),e(li,Rf),e(it,Hf),e(it,xl),e(xl,Vf),e(it,Yf),e(Ke,Uf),b(Ut,Ke,null),e(Ke,Gf),e(Ke,jl),e(jl,Zf),e(Ke,Kf),b(gs,Ke,null),h(i,Dc,f),h(i,lt,f),e(lt,Gt),e(Gt,Ll),b(_s,Ll,null),e(lt,Xf),e(lt,Al),e(Al,Jf),h(i,Ic,f),h(i,Qe,f),b(vs,Qe,null),e(Qe,em),e(Qe,Dl),e(Dl,nm),e(Qe,tm),e(Qe,Ts),e(Ts,om),e(Ts,Fs),e(Fs,sm),e(Ts,rm),e(Qe,am),e(Qe,ks),e(ks,im),e(ks,di),e(di,lm),e(ks,dm),e(Qe,cm),e(Qe,ws),e(ws,um),e(ws,bs),e(bs,pm),e(ws,hm),e(Qe,fm),e(Qe,Xe),b(ys,Xe,null),e(Xe,mm),e(Xe,dt),e(dt,gm),e(dt,ci),e(ci,_m),e(dt,vm),e(dt,Il),e(Il,Tm),e(dt,Fm),e(Xe,km),b(Zt,Xe,null),e(Xe,wm),e(Xe,Ol),e(Ol,bm),e(Xe,ym),b($s,Xe,null),h(i,Oc,f),h(i,ct,f),e(ct,Kt),e(Kt,Sl),b(Es,Sl,null),e(ct,$m),e(ct,Nl),e(Nl,Em),h(i,Sc,f),h(i,Ms,f),e(Ms,Je),b(zs,Je,null),e(Je,Mm),e(Je,ut),e(ut,zm),e(ut,ui),e(ui,Pm),e(ut,qm),e(ut,Bl),e(Bl,Cm),e(ut,xm),e(Je,jm),b(Xt,Je,null),e(Je,Lm),e(Je,Wl),e(Wl,Am),e(Je,Dm),b(Ps,Je,null),h(i,Nc,f),h(i,pt,f),e(pt,Jt),e(Jt,Ql),b(qs,Ql,null),e(pt,Im),e(pt,Rl),e(Rl,Om),h(i,Bc,f),h(i,Re,f),b(Cs,Re,null),e(Re,Sm),e(Re,xs),e(xs,Nm),e(xs,Hl),e(Hl,Bm),e(xs,Wm),e(Re,Qm),e(Re,js),e(js,Rm),e(js,Ls),e(Ls,Hm),e(js,Vm),e(Re,Ym),e(Re,As),e(As,Um),e(As,pi),e(pi,Gm),e(As,Zm),e(Re,Km),e(Re,Ds),e(Ds,Xm),e(Ds,Is),e(Is,Jm),e(Ds,eg),e(Re,ng),e(Re,en),b(Os,en,null),e(en,tg),e(en,ht),e(ht,og),e(ht,hi),e(hi,sg),e(ht,rg),e(ht,Vl),e(Vl,ag),e(ht,ig),e(en,lg),b(eo,en,null),e(en,dg),e(en,Yl),e(Yl,cg),e(en,ug),b(Ss,en,null),h(i,Wc,f),h(i,ft,f),e(ft,no),e(no,Ul),b(Ns,Ul,null),e(ft,pg),e(ft,Gl),e(Gl,hg),h(i,Qc,f),h(i,He,f),b(Bs,He,null),e(He,fg),e(He,Zl),e(Zl,mg),e(He,gg),e(He,Ws),e(Ws,_g),e(Ws,Qs),e(Qs,vg),e(Ws,Tg),e(He,Fg),e(He,Rs),e(Rs,kg),e(Rs,fi),e(fi,wg),e(Rs,bg),e(He,yg),e(He,Hs),e(Hs,$g),e(Hs,Vs),e(Vs,Eg),e(Hs,Mg),e(He,zg),e(He,Be),b(Ys,Be,null),e(Be,Pg),e(Be,mt),e(mt,qg),e(mt,mi),e(mi,Cg),e(mt,xg),e(mt,Kl),e(Kl,jg),e(mt,Lg),e(Be,Ag),b(to,Be,null),e(Be,Dg),e(Be,Xl),e(Xl,Ig),e(Be,Og),b(Us,Be,null),e(Be,Sg),e(Be,Jl),e(Jl,Ng),e(Be,Bg),b(Gs,Be,null),h(i,Rc,f),h(i,gt,f),e(gt,oo),e(oo,ed),b(Zs,ed,null),e(gt,Wg),e(gt,nd),e(nd,Qg),h(i,Hc,f),h(i,Ve,f),b(Ks,Ve,null),e(Ve,Rg),e(Ve,td),e(td,Hg),e(Ve,Vg),e(Ve,Xs),e(Xs,Yg),e(Xs,Js),e(Js,Ug),e(Xs,Gg),e(Ve,Zg),e(Ve,er),e(er,Kg),e(er,gi),e(gi,Xg),e(er,Jg),e(Ve,e_),e(Ve,nr),e(nr,n_),e(nr,tr),e(tr,t_),e(nr,o_),e(Ve,s_),e(Ve,nn),b(or,nn,null),e(nn,r_),e(nn,_t),e(_t,a_),e(_t,_i),e(_i,i_),e(_t,l_),e(_t,od),e(od,d_),e(_t,c_),e(nn,u_),b(so,nn,null),e(nn,p_),e(nn,sd),e(sd,h_),e(nn,f_),b(sr,nn,null),h(i,Vc,f),h(i,vt,f),e(vt,ro),e(ro,rd),b(rr,rd,null),e(vt,m_),e(vt,ad),e(ad,g_),h(i,Yc,f),h(i,Ye,f),b(ar,Ye,null),e(Ye,__),e(Ye,id),e(id,v_),e(Ye,T_),e(Ye,ir),e(ir,F_),e(ir,lr),e(lr,k_),e(ir,w_),e(Ye,b_),e(Ye,dr),e(dr,y_),e(dr,vi),e(vi,$_),e(dr,E_),e(Ye,M_),e(Ye,cr),e(cr,z_),e(cr,ur),e(ur,P_),e(cr,q_),e(Ye,C_),e(Ye,tn),b(pr,tn,null),e(tn,x_),e(tn,Tt),e(Tt,j_),e(Tt,Ti),e(Ti,L_),e(Tt,A_),e(Tt,ld),e(ld,D_),e(Tt,I_),e(tn,O_),b(ao,tn,null),e(tn,S_),e(tn,dd),e(dd,N_),e(tn,B_),b(hr,tn,null),h(i,Uc,f),h(i,Ft,f),e(Ft,io),e(io,cd),b(fr,cd,null),e(Ft,W_),e(Ft,ud),e(ud,Q_),h(i,Gc,f),h(i,Ue,f),b(mr,Ue,null),e(Ue,R_),e(Ue,kt),e(kt,H_),e(kt,pd),e(pd,V_),e(kt,Y_),e(kt,hd),e(hd,U_),e(kt,G_),e(Ue,Z_),e(Ue,gr),e(gr,K_),e(gr,_r),e(_r,X_),e(gr,J_),e(Ue,ev),e(Ue,vr),e(vr,nv),e(vr,Fi),e(Fi,tv),e(vr,ov),e(Ue,sv),e(Ue,Tr),e(Tr,rv),e(Tr,Fr),e(Fr,av),e(Tr,iv),e(Ue,lv),e(Ue,on),b(kr,on,null),e(on,dv),e(on,wt),e(wt,cv),e(wt,ki),e(ki,uv),e(wt,pv),e(wt,fd),e(fd,hv),e(wt,fv),e(on,mv),b(lo,on,null),e(on,gv),e(on,md),e(md,_v),e(on,vv),b(wr,on,null),h(i,Zc,f),h(i,bt,f),e(bt,co),e(co,gd),b(br,gd,null),e(bt,Tv),e(bt,_d),e(_d,Fv),h(i,Kc,f),h(i,xe,f),b(yr,xe,null),e(xe,kv),e(xe,vd),e(vd,wv),e(xe,bv),e(xe,$r),e($r,yv),e($r,Er),e(Er,$v),e($r,Ev),e(xe,Mv),e(xe,Mr),e(Mr,zv),e(Mr,wi),e(wi,Pv),e(Mr,qv),e(xe,Cv),e(xe,zr),e(zr,xv),e(zr,Pr),e(Pr,jv),e(zr,Lv),e(xe,Av),b(uo,xe,null),e(xe,Dv),e(xe,sn),b(qr,sn,null),e(sn,Iv),e(sn,yt),e(yt,Ov),e(yt,bi),e(bi,Sv),e(yt,Nv),e(yt,Td),e(Td,Bv),e(yt,Wv),e(sn,Qv),b(po,sn,null),e(sn,Rv),e(sn,Fd),e(Fd,Hv),e(sn,Vv),b(Cr,sn,null),h(i,Xc,f),h(i,$t,f),e($t,ho),e(ho,kd),b(xr,kd,null),e($t,Yv),e($t,wd),e(wd,Uv),h(i,Jc,f),h(i,je,f),b(jr,je,null),e(je,Gv),e(je,bd),e(bd,Zv),e(je,Kv),e(je,Lr),e(Lr,Xv),e(Lr,Ar),e(Ar,Jv),e(Lr,eT),e(je,nT),e(je,Dr),e(Dr,tT),e(Dr,yi),e(yi,oT),e(Dr,sT),e(je,rT),e(je,Ir),e(Ir,aT),e(Ir,Or),e(Or,iT),e(Ir,lT),e(je,dT),b(fo,je,null),e(je,cT),e(je,rn),b(Sr,rn,null),e(rn,uT),e(rn,Et),e(Et,pT),e(Et,$i),e($i,hT),e(Et,fT),e(Et,yd),e(yd,mT),e(Et,gT),e(rn,_T),b(mo,rn,null),e(rn,vT),e(rn,$d),e($d,TT),e(rn,FT),b(Nr,rn,null),h(i,eu,f),h(i,Mt,f),e(Mt,go),e(go,Ed),b(Br,Ed,null),e(Mt,kT),e(Mt,Md),e(Md,wT),h(i,nu,f),h(i,Le,f),b(Wr,Le,null),e(Le,bT),e(Le,zd),e(zd,yT),e(Le,$T),e(Le,Qr),e(Qr,ET),e(Qr,Rr),e(Rr,MT),e(Qr,zT),e(Le,PT),e(Le,Hr),e(Hr,qT),e(Hr,Ei),e(Ei,CT),e(Hr,xT),e(Le,jT),e(Le,Vr),e(Vr,LT),e(Vr,Yr),e(Yr,AT),e(Vr,DT),e(Le,IT),b(_o,Le,null),e(Le,OT),e(Le,an),b(Ur,an,null),e(an,ST),e(an,zt),e(zt,NT),e(zt,Mi),e(Mi,BT),e(zt,WT),e(zt,Pd),e(Pd,QT),e(zt,RT),e(an,HT),b(vo,an,null),e(an,VT),e(an,qd),e(qd,YT),e(an,UT),b(Gr,an,null),h(i,tu,f),h(i,Pt,f),e(Pt,To),e(To,Cd),b(Zr,Cd,null),e(Pt,GT),e(Pt,xd),e(xd,ZT),h(i,ou,f),h(i,Ae,f),b(Kr,Ae,null),e(Ae,KT),e(Ae,Xr),e(Xr,XT),e(Xr,jd),e(jd,JT),e(Xr,eF),e(Ae,nF),e(Ae,Jr),e(Jr,tF),e(Jr,ea),e(ea,oF),e(Jr,sF),e(Ae,rF),e(Ae,na),e(na,aF),e(na,zi),e(zi,iF),e(na,lF),e(Ae,dF),e(Ae,ta),e(ta,cF),e(ta,oa),e(oa,uF),e(ta,pF),e(Ae,hF),b(Fo,Ae,null),e(Ae,fF),e(Ae,ln),b(sa,ln,null),e(ln,mF),e(ln,qt),e(qt,gF),e(qt,Pi),e(Pi,_F),e(qt,vF),e(qt,Ld),e(Ld,TF),e(qt,FF),e(ln,kF),b(ko,ln,null),e(ln,wF),e(ln,Ad),e(Ad,bF),e(ln,yF),b(ra,ln,null),h(i,su,f),h(i,Ct,f),e(Ct,wo),e(wo,Dd),b(aa,Dd,null),e(Ct,$F),e(Ct,Id),e(Id,EF),h(i,ru,f),h(i,De,f),b(ia,De,null),e(De,MF),e(De,Od),e(Od,zF),e(De,PF),e(De,la),e(la,qF),e(la,da),e(da,CF),e(la,xF),e(De,jF),e(De,ca),e(ca,LF),e(ca,qi),e(qi,AF),e(ca,DF),e(De,IF),e(De,ua),e(ua,OF),e(ua,pa),e(pa,SF),e(ua,NF),e(De,BF),b(bo,De,null),e(De,WF),e(De,dn),b(ha,dn,null),e(dn,QF),e(dn,xt),e(xt,RF),e(xt,Ci),e(Ci,HF),e(xt,VF),e(xt,Sd),e(Sd,YF),e(xt,UF),e(dn,GF),b(yo,dn,null),e(dn,ZF),e(dn,Nd),e(Nd,KF),e(dn,XF),b(fa,dn,null),h(i,au,f),h(i,jt,f),e(jt,$o),e($o,Bd),b(ma,Bd,null),e(jt,JF),e(jt,Wd),e(Wd,ek),h(i,iu,f),h(i,Ie,f),b(ga,Ie,null),e(Ie,nk),e(Ie,Qd),e(Qd,tk),e(Ie,ok),e(Ie,_a),e(_a,sk),e(_a,va),e(va,rk),e(_a,ak),e(Ie,ik),e(Ie,Ta),e(Ta,lk),e(Ta,xi),e(xi,dk),e(Ta,ck),e(Ie,uk),e(Ie,Fa),e(Fa,pk),e(Fa,ka),e(ka,hk),e(Fa,fk),e(Ie,mk),b(Eo,Ie,null),e(Ie,gk),e(Ie,cn),b(wa,cn,null),e(cn,_k),e(cn,Lt),e(Lt,vk),e(Lt,ji),e(ji,Tk),e(Lt,Fk),e(Lt,Rd),e(Rd,kk),e(Lt,wk),e(cn,bk),b(Mo,cn,null),e(cn,yk),e(cn,Hd),e(Hd,$k),e(cn,Ek),b(ba,cn,null),h(i,lu,f),h(i,At,f),e(At,zo),e(zo,Vd),b(ya,Vd,null),e(At,Mk),e(At,Yd),e(Yd,zk),h(i,du,f),h(i,Oe,f),b($a,Oe,null),e(Oe,Pk),e(Oe,Ud),e(Ud,qk),e(Oe,Ck),e(Oe,Ea),e(Ea,xk),e(Ea,Ma),e(Ma,jk),e(Ea,Lk),e(Oe,Ak),e(Oe,za),e(za,Dk),e(za,Li),e(Li,Ik),e(za,Ok),e(Oe,Sk),e(Oe,Pa),e(Pa,Nk),e(Pa,qa),e(qa,Bk),e(Pa,Wk),e(Oe,Qk),b(Po,Oe,null),e(Oe,Rk),e(Oe,un),b(Ca,un,null),e(un,Hk),e(un,Dt),e(Dt,Vk),e(Dt,Ai),e(Ai,Yk),e(Dt,Uk),e(Dt,Gd),e(Gd,Gk),e(Dt,Zk),e(un,Kk),b(qo,un,null),e(un,Xk),e(un,Zd),e(Zd,Jk),e(un,e1),b(xa,un,null),h(i,cu,f),h(i,It,f),e(It,Co),e(Co,Kd),b(ja,Kd,null),e(It,n1),e(It,Xd),e(Xd,t1),h(i,uu,f),h(i,Se,f),b(La,Se,null),e(Se,o1),e(Se,Ot),e(Ot,s1),e(Ot,Jd),e(Jd,r1),e(Ot,a1),e(Ot,ec),e(ec,i1),e(Ot,l1),e(Se,d1),e(Se,Aa),e(Aa,c1),e(Aa,Da),e(Da,u1),e(Aa,p1),e(Se,h1),e(Se,Ia),e(Ia,f1),e(Ia,Di),e(Di,m1),e(Ia,g1),e(Se,_1),e(Se,Oa),e(Oa,v1),e(Oa,Sa),e(Sa,T1),e(Oa,F1),e(Se,k1),b(xo,Se,null),e(Se,w1),e(Se,pn),b(Na,pn,null),e(pn,b1),e(pn,St),e(St,y1),e(St,Ii),e(Ii,$1),e(St,E1),e(St,nc),e(nc,M1),e(St,z1),e(pn,P1),b(jo,pn,null),e(pn,q1),e(pn,tc),e(tc,C1),e(pn,x1),b(Ba,pn,null),pu=!0},p(i,[f]){const Wa={};f&2&&(Wa.$$scope={dirty:f,ctx:i}),Ut.$set(Wa);const oc={};f&2&&(oc.$$scope={dirty:f,ctx:i}),Zt.$set(oc);const sc={};f&2&&(sc.$$scope={dirty:f,ctx:i}),Xt.$set(sc);const rc={};f&2&&(rc.$$scope={dirty:f,ctx:i}),eo.$set(rc);const Qa={};f&2&&(Qa.$$scope={dirty:f,ctx:i}),to.$set(Qa);const ac={};f&2&&(ac.$$scope={dirty:f,ctx:i}),so.$set(ac);const ic={};f&2&&(ic.$$scope={dirty:f,ctx:i}),ao.$set(ic);const lc={};f&2&&(lc.$$scope={dirty:f,ctx:i}),lo.$set(lc);const Ra={};f&2&&(Ra.$$scope={dirty:f,ctx:i}),uo.$set(Ra);const dc={};f&2&&(dc.$$scope={dirty:f,ctx:i}),po.$set(dc);const cc={};f&2&&(cc.$$scope={dirty:f,ctx:i}),fo.$set(cc);const uc={};f&2&&(uc.$$scope={dirty:f,ctx:i}),mo.$set(uc);const pc={};f&2&&(pc.$$scope={dirty:f,ctx:i}),_o.$set(pc);const hc={};f&2&&(hc.$$scope={dirty:f,ctx:i}),vo.$set(hc);const Ha={};f&2&&(Ha.$$scope={dirty:f,ctx:i}),Fo.$set(Ha);const fc={};f&2&&(fc.$$scope={dirty:f,ctx:i}),ko.$set(fc);const Ce={};f&2&&(Ce.$$scope={dirty:f,ctx:i}),bo.$set(Ce);const mc={};f&2&&(mc.$$scope={dirty:f,ctx:i}),yo.$set(mc);const gc={};f&2&&(gc.$$scope={dirty:f,ctx:i}),Eo.$set(gc);const _c={};f&2&&(_c.$$scope={dirty:f,ctx:i}),Mo.$set(_c);const vc={};f&2&&(vc.$$scope={dirty:f,ctx:i}),Po.$set(vc);const Tc={};f&2&&(Tc.$$scope={dirty:f,ctx:i}),qo.$set(Tc);const Fc={};f&2&&(Fc.$$scope={dirty:f,ctx:i}),xo.$set(Fc);const kc={};f&2&&(kc.$$scope={dirty:f,ctx:i}),jo.$set(kc)},i(i){pu||(y(v.$$.fragment,i),y(ne.$$.fragment,i),y(So.$$.fragment,i),y(No.$$.fragment,i),y(Wo.$$.fragment,i),y(Qo.$$.fragment,i),y(Ho.$$.fragment,i),y(Yo.$$.fragment,i),y(Go.$$.fragment,i),y(Zo.$$.fragment,i),y(Ko.$$.fragment,i),y(Xo.$$.fragment,i),y(ns.$$.fragment,i),y(ts.$$.fragment,i),y(os.$$.fragment,i),y(ss.$$.fragment,i),y(as.$$.fragment,i),y(ls.$$.fragment,i),y(ds.$$.fragment,i),y(ms.$$.fragment,i),y(Ut.$$.fragment,i),y(gs.$$.fragment,i),y(_s.$$.fragment,i),y(vs.$$.fragment,i),y(ys.$$.fragment,i),y(Zt.$$.fragment,i),y($s.$$.fragment,i),y(Es.$$.fragment,i),y(zs.$$.fragment,i),y(Xt.$$.fragment,i),y(Ps.$$.fragment,i),y(qs.$$.fragment,i),y(Cs.$$.fragment,i),y(Os.$$.fragment,i),y(eo.$$.fragment,i),y(Ss.$$.fragment,i),y(Ns.$$.fragment,i),y(Bs.$$.fragment,i),y(Ys.$$.fragment,i),y(to.$$.fragment,i),y(Us.$$.fragment,i),y(Gs.$$.fragment,i),y(Zs.$$.fragment,i),y(Ks.$$.fragment,i),y(or.$$.fragment,i),y(so.$$.fragment,i),y(sr.$$.fragment,i),y(rr.$$.fragment,i),y(ar.$$.fragment,i),y(pr.$$.fragment,i),y(ao.$$.fragment,i),y(hr.$$.fragment,i),y(fr.$$.fragment,i),y(mr.$$.fragment,i),y(kr.$$.fragment,i),y(lo.$$.fragment,i),y(wr.$$.fragment,i),y(br.$$.fragment,i),y(yr.$$.fragment,i),y(uo.$$.fragment,i),y(qr.$$.fragment,i),y(po.$$.fragment,i),y(Cr.$$.fragment,i),y(xr.$$.fragment,i),y(jr.$$.fragment,i),y(fo.$$.fragment,i),y(Sr.$$.fragment,i),y(mo.$$.fragment,i),y(Nr.$$.fragment,i),y(Br.$$.fragment,i),y(Wr.$$.fragment,i),y(_o.$$.fragment,i),y(Ur.$$.fragment,i),y(vo.$$.fragment,i),y(Gr.$$.fragment,i),y(Zr.$$.fragment,i),y(Kr.$$.fragment,i),y(Fo.$$.fragment,i),y(sa.$$.fragment,i),y(ko.$$.fragment,i),y(ra.$$.fragment,i),y(aa.$$.fragment,i),y(ia.$$.fragment,i),y(bo.$$.fragment,i),y(ha.$$.fragment,i),y(yo.$$.fragment,i),y(fa.$$.fragment,i),y(ma.$$.fragment,i),y(ga.$$.fragment,i),y(Eo.$$.fragment,i),y(wa.$$.fragment,i),y(Mo.$$.fragment,i),y(ba.$$.fragment,i),y(ya.$$.fragment,i),y($a.$$.fragment,i),y(Po.$$.fragment,i),y(Ca.$$.fragment,i),y(qo.$$.fragment,i),y(xa.$$.fragment,i),y(ja.$$.fragment,i),y(La.$$.fragment,i),y(xo.$$.fragment,i),y(Na.$$.fragment,i),y(jo.$$.fragment,i),y(Ba.$$.fragment,i),pu=!0)},o(i){$(v.$$.fragment,i),$(ne.$$.fragment,i),$(So.$$.fragment,i),$(No.$$.fragment,i),$(Wo.$$.fragment,i),$(Qo.$$.fragment,i),$(Ho.$$.fragment,i),$(Yo.$$.fragment,i),$(Go.$$.fragment,i),$(Zo.$$.fragment,i),$(Ko.$$.fragment,i),$(Xo.$$.fragment,i),$(ns.$$.fragment,i),$(ts.$$.fragment,i),$(os.$$.fragment,i),$(ss.$$.fragment,i),$(as.$$.fragment,i),$(ls.$$.fragment,i),$(ds.$$.fragment,i),$(ms.$$.fragment,i),$(Ut.$$.fragment,i),$(gs.$$.fragment,i),$(_s.$$.fragment,i),$(vs.$$.fragment,i),$(ys.$$.fragment,i),$(Zt.$$.fragment,i),$($s.$$.fragment,i),$(Es.$$.fragment,i),$(zs.$$.fragment,i),$(Xt.$$.fragment,i),$(Ps.$$.fragment,i),$(qs.$$.fragment,i),$(Cs.$$.fragment,i),$(Os.$$.fragment,i),$(eo.$$.fragment,i),$(Ss.$$.fragment,i),$(Ns.$$.fragment,i),$(Bs.$$.fragment,i),$(Ys.$$.fragment,i),$(to.$$.fragment,i),$(Us.$$.fragment,i),$(Gs.$$.fragment,i),$(Zs.$$.fragment,i),$(Ks.$$.fragment,i),$(or.$$.fragment,i),$(so.$$.fragment,i),$(sr.$$.fragment,i),$(rr.$$.fragment,i),$(ar.$$.fragment,i),$(pr.$$.fragment,i),$(ao.$$.fragment,i),$(hr.$$.fragment,i),$(fr.$$.fragment,i),$(mr.$$.fragment,i),$(kr.$$.fragment,i),$(lo.$$.fragment,i),$(wr.$$.fragment,i),$(br.$$.fragment,i),$(yr.$$.fragment,i),$(uo.$$.fragment,i),$(qr.$$.fragment,i),$(po.$$.fragment,i),$(Cr.$$.fragment,i),$(xr.$$.fragment,i),$(jr.$$.fragment,i),$(fo.$$.fragment,i),$(Sr.$$.fragment,i),$(mo.$$.fragment,i),$(Nr.$$.fragment,i),$(Br.$$.fragment,i),$(Wr.$$.fragment,i),$(_o.$$.fragment,i),$(Ur.$$.fragment,i),$(vo.$$.fragment,i),$(Gr.$$.fragment,i),$(Zr.$$.fragment,i),$(Kr.$$.fragment,i),$(Fo.$$.fragment,i),$(sa.$$.fragment,i),$(ko.$$.fragment,i),$(ra.$$.fragment,i),$(aa.$$.fragment,i),$(ia.$$.fragment,i),$(bo.$$.fragment,i),$(ha.$$.fragment,i),$(yo.$$.fragment,i),$(fa.$$.fragment,i),$(ma.$$.fragment,i),$(ga.$$.fragment,i),$(Eo.$$.fragment,i),$(wa.$$.fragment,i),$(Mo.$$.fragment,i),$(ba.$$.fragment,i),$(ya.$$.fragment,i),$($a.$$.fragment,i),$(Po.$$.fragment,i),$(Ca.$$.fragment,i),$(qo.$$.fragment,i),$(xa.$$.fragment,i),$(ja.$$.fragment,i),$(La.$$.fragment,i),$(xo.$$.fragment,i),$(Na.$$.fragment,i),$(jo.$$.fragment,i),$(Ba.$$.fragment,i),pu=!1},d(i){n(p),i&&n(M),i&&n(m),E(v),i&&n(G),i&&n(P),E(ne),i&&n(ie),i&&n(U),i&&n(x),i&&n(oe),i&&n(le),i&&n(se),i&&n(de),i&&n(C),i&&n(B),i&&n(J),i&&n(yc),i&&n(jn),i&&n($c),i&&n(Kn),E(So),i&&n(Ec),i&&n(Cn),E(No),i&&n(Mc),i&&n(Jn),E(Wo),i&&n(zc),i&&n(qe),E(Qo),E(Ho),E(Yo),E(Go),E(Zo),i&&n(Pc),i&&n(nt),E(Ko),i&&n(qc),i&&n(Ze),E(Xo),E(ns),E(ts),i&&n(Cc),i&&n(ot),E(os),i&&n(xc),i&&n(st),E(ss),i&&n(jc),i&&n(rt),E(as),i&&n(Lc),i&&n(at),E(ls),i&&n(Ac),i&&n(We),E(ds),E(ms),E(Ut),E(gs),i&&n(Dc),i&&n(lt),E(_s),i&&n(Ic),i&&n(Qe),E(vs),E(ys),E(Zt),E($s),i&&n(Oc),i&&n(ct),E(Es),i&&n(Sc),i&&n(Ms),E(zs),E(Xt),E(Ps),i&&n(Nc),i&&n(pt),E(qs),i&&n(Bc),i&&n(Re),E(Cs),E(Os),E(eo),E(Ss),i&&n(Wc),i&&n(ft),E(Ns),i&&n(Qc),i&&n(He),E(Bs),E(Ys),E(to),E(Us),E(Gs),i&&n(Rc),i&&n(gt),E(Zs),i&&n(Hc),i&&n(Ve),E(Ks),E(or),E(so),E(sr),i&&n(Vc),i&&n(vt),E(rr),i&&n(Yc),i&&n(Ye),E(ar),E(pr),E(ao),E(hr),i&&n(Uc),i&&n(Ft),E(fr),i&&n(Gc),i&&n(Ue),E(mr),E(kr),E(lo),E(wr),i&&n(Zc),i&&n(bt),E(br),i&&n(Kc),i&&n(xe),E(yr),E(uo),E(qr),E(po),E(Cr),i&&n(Xc),i&&n($t),E(xr),i&&n(Jc),i&&n(je),E(jr),E(fo),E(Sr),E(mo),E(Nr),i&&n(eu),i&&n(Mt),E(Br),i&&n(nu),i&&n(Le),E(Wr),E(_o),E(Ur),E(vo),E(Gr),i&&n(tu),i&&n(Pt),E(Zr),i&&n(ou),i&&n(Ae),E(Kr),E(Fo),E(sa),E(ko),E(ra),i&&n(su),i&&n(Ct),E(aa),i&&n(ru),i&&n(De),E(ia),E(bo),E(ha),E(yo),E(fa),i&&n(au),i&&n(jt),E(ma),i&&n(iu),i&&n(Ie),E(ga),E(Eo),E(wa),E(Mo),E(ba),i&&n(lu),i&&n(At),E(ya),i&&n(du),i&&n(Oe),E($a),E(Po),E(Ca),E(qo),E(xa),i&&n(cu),i&&n(It),E(ja),i&&n(uu),i&&n(Se),E(La),E(xo),E(Na),E(jo),E(Ba)}}}const Uy={local:"funnel-transformer",sections:[{local:"overview",title:"Overview"},{local:"transformers.FunnelConfig",title:"FunnelConfig"},{local:"transformers.FunnelTokenizer",title:"FunnelTokenizer"},{local:"transformers.FunnelTokenizerFast",title:"FunnelTokenizerFast"},{local:"transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput",title:"Funnel specific outputs"},{local:"transformers.FunnelBaseModel",title:"FunnelBaseModel"},{local:"transformers.FunnelModel",title:"FunnelModel"},{local:"transformers.FunnelForPreTraining",title:"FunnelModelForPreTraining"},{local:"transformers.FunnelForMaskedLM",title:"FunnelForMaskedLM"},{local:"transformers.FunnelForSequenceClassification",title:"FunnelForSequenceClassification"},{local:"transformers.FunnelForMultipleChoice",title:"FunnelForMultipleChoice"},{local:"transformers.FunnelForTokenClassification",title:"FunnelForTokenClassification"},{local:"transformers.FunnelForQuestionAnswering",title:"FunnelForQuestionAnswering"},{local:"transformers.TFFunnelBaseModel",title:"TFFunnelBaseModel"},{local:"transformers.TFFunnelModel",title:"TFFunnelModel"},{local:"transformers.TFFunnelForPreTraining",title:"TFFunnelModelForPreTraining"},{local:"transformers.TFFunnelForMaskedLM",title:"TFFunnelForMaskedLM"},{local:"transformers.TFFunnelForSequenceClassification",title:"TFFunnelForSequenceClassification"},{local:"transformers.TFFunnelForMultipleChoice",title:"TFFunnelForMultipleChoice"},{local:"transformers.TFFunnelForTokenClassification",title:"TFFunnelForTokenClassification"},{local:"transformers.TFFunnelForQuestionAnswering",title:"TFFunnelForQuestionAnswering"}],title:"Funnel Transformer"};function Gy(W,p,M){let{fw:m}=p;return W.$$set=g=>{"fw"in g&&M(0,m=g.fw)},[m]}class t2 extends Ty{constructor(p){super();Fy(this,p,Gy,Yy,ky,{fw:0})}}export{t2 as default,Uy as metadata};
9,986
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/wavlm.mdx-310754f2.js
import{S as Il,i as Xl,s as Ul,e as a,k as d,w as _,t as s,L as Bl,c as n,d as o,m as c,a as i,x as v,h as r,b as l,J as e,g as h,y as b,q as w,o as y,B as M}from"../../chunks/vendor-b1433968.js";import{T as Go}from"../../chunks/Tip-c3840994.js";import{D as X}from"../../chunks/Docstring-ff504c58.js";import{C as Vt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ge}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Yl(V){let p,L,u,W,k;return{c(){p=a("p"),L=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),W=s("Module"),k=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=n(g,"P",{});var f=i(p);L=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n(f,"CODE",{});var T=i(u);W=r(T,"Module"),T.forEach(o),k=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){h(g,p,f),e(p,L),e(p,u),e(u,W),e(p,k)},d(g){g&&o(p)}}}function Hl(V){let p,L,u,W,k;return{c(){p=a("p"),L=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),W=s("Module"),k=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=n(g,"P",{});var f=i(p);L=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n(f,"CODE",{});var T=i(u);W=r(T,"Module"),T.forEach(o),k=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){h(g,p,f),e(p,L),e(p,u),e(u,W),e(p,k)},d(g){g&&o(p)}}}function Kl(V){let p,L,u,W,k;return{c(){p=a("p"),L=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),W=s("Module"),k=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=n(g,"P",{});var f=i(p);L=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n(f,"CODE",{});var T=i(u);W=r(T,"Module"),T.forEach(o),k=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){h(g,p,f),e(p,L),e(p,u),e(u,W),e(p,k)},d(g){g&&o(p)}}}function Rl(V){let p,L,u,W,k;return{c(){p=a("p"),L=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),W=s("Module"),k=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=n(g,"P",{});var f=i(p);L=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n(f,"CODE",{});var T=i(u);W=r(T,"Module"),T.forEach(o),k=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){h(g,p,f),e(p,L),e(p,u),e(u,W),e(p,k)},d(g){g&&o(p)}}}function Zl(V){let p,L,u,W,k;return{c(){p=a("p"),L=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),W=s("Module"),k=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=n(g,"P",{});var f=i(p);L=r(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n(f,"CODE",{});var T=i(u);W=r(T,"Module"),T.forEach(o),k=r(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(g,f){h(g,p,f),e(p,L),e(p,u),e(u,W),e(p,k)},d(g){g&&o(p)}}}function Ql(V){let p,L,u,W,k,g,f,T,Za,ea,G,_e,fo,qe,Qa,go,Ja,ta,ve,Ga,Ee,en,tn,oa,Ot,on,aa,Nt,_o,an,na,It,nn,sa,U,Pe,sn,Xt,rn,ln,dn,Se,cn,Ut,mn,pn,hn,vo,un,ra,be,fn,ze,gn,_n,ia,B,vn,Ae,bn,wn,De,yn,Mn,la,ee,we,bo,Ve,Wn,wo,Ln,da,$,Oe,kn,te,Tn,Bt,$n,Cn,Ne,Fn,xn,jn,oe,qn,Yt,En,Pn,Ht,Sn,zn,An,yo,Dn,Vn,Ie,On,Mo,Nn,In,Xe,ca,ae,ye,Wo,Ue,Xn,Lo,Un,ma,ne,Be,Bn,Ye,Yn,ko,Hn,Kn,pa,se,Me,To,He,Rn,$o,Zn,ha,j,Ke,Qn,Re,Jn,Ze,Gn,es,ts,Qe,os,Kt,as,ns,ss,Je,rs,Ge,is,ls,ds,E,et,cs,re,ms,Rt,ps,hs,Co,us,fs,gs,We,_s,Fo,vs,bs,tt,ua,ie,Le,xo,ot,ws,jo,ys,fa,q,at,Ms,le,Ws,qo,Ls,ks,nt,Ts,$s,Cs,st,Fs,Zt,xs,js,qs,rt,Es,it,Ps,Ss,zs,P,lt,As,de,Ds,Qt,Vs,Os,Eo,Ns,Is,Xs,ke,Us,Po,Bs,Ys,dt,ga,ce,Te,So,ct,Hs,zo,Ks,_a,C,mt,Rs,Ao,Zs,Qs,pt,Js,ht,Gs,er,tr,ut,or,Jt,ar,nr,sr,ft,rr,gt,ir,lr,dr,S,_t,cr,me,mr,Gt,pr,hr,Do,ur,fr,gr,$e,_r,Vo,vr,br,vt,va,pe,Ce,Oo,bt,wr,No,yr,ba,F,wt,Mr,Io,Wr,Lr,yt,kr,Mt,Tr,$r,Cr,Wt,Fr,eo,xr,jr,qr,Lt,Er,kt,Pr,Sr,zr,z,Tt,Ar,he,Dr,to,Vr,Or,Xo,Nr,Ir,Xr,Fe,Ur,Uo,Br,Yr,$t,wa,ue,xe,Bo,Ct,Hr,Yo,Kr,ya,x,Ft,Rr,Ho,Zr,Qr,xt,Jr,jt,Gr,ei,ti,qt,oi,oo,ai,ni,si,Et,ri,Pt,ii,li,di,A,St,ci,fe,mi,ao,pi,hi,Ko,ui,fi,gi,je,_i,Ro,vi,bi,zt,Ma;return g=new ge({}),qe=new ge({}),Ve=new ge({}),Oe=new X({props:{name:"class transformers.WavLMConfig",anchor:"transformers.WavLMConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"feat_quantizer_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"num_buckets",val:" = 320"},{name:"max_bucket_distance",val:" = 800"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"num_codevectors_per_group",val:" = 320"},{name:"num_codevector_groups",val:" = 2"},{name:"contrastive_logits_temperature",val:" = 0.1"},{name:"num_negatives",val:" = 100"},{name:"codevector_dim",val:" = 256"},{name:"proj_codevector_dim",val:" = 256"},{name:"diversity_loss_weight",val:" = 0.1"},{name:"ctc_loss_reduction",val:" = 'mean'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"tdnn_dim",val:" = (512, 512, 512, 512, 1500)"},{name:"tdnn_kernel",val:" = (5, 3, 3, 1, 1)"},{name:"tdnn_dilation",val:" = (1, 2, 3, 1, 1)"},{name:"xvector_output_dim",val:" = 512"},{name:"num_ctc_classes",val:" = 80"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"add_adapter",val:" = False"},{name:"adapter_kernel_size",val:" = 3"},{name:"adapter_stride",val:" = 2"},{name:"num_adapter_layers",val:" = 3"},{name:"output_hidden_size",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/configuration_wavlm.py#L29",parametersDescription:[{anchor:"transformers.WavLMConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMModel">WavLMModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMModel">WavLMModel</a>.`,name:"vocab_size"},{anchor:"transformers.WavLMConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.WavLMConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.WavLMConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.WavLMConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.WavLMConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.WavLMConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.WavLMConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.WavLMConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForCTC">WavLMForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.WavLMConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.WavLMConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.WavLMConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature extractor. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.WavLMConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature extractor.`,name:"feat_proj_dropout"},{anchor:"transformers.WavLMConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.WavLMConfig.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (obj &#x2014;<em>float</em>, <em>optional</em>, defaults to 0.0): The dropout probabilitiy for quantized feature extractor states.`,name:"feat_quantizer_dropout"},{anchor:"transformers.WavLMConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature extractor. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.WavLMConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature extractor. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.WavLMConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature extractor. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.WavLMConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.WavLMConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.WavLMConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.WavLMConfig.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.WavLMConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature extractor. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.WavLMConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Propability of each feature vector along the time axis to be chosen as the start of the vector span to be masked. Approximately <code>mask_time_prob * sequence_length // mask_time_length</code> feature vectors will be masked along the time axis. This is only relevant if <code>apply_spec_augment is True</code>.`,name:"mask_time_prob"},{anchor:"transformers.WavLMConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.WavLMConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.WavLMConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Propability of each feature vector along the feature axis to be chosen as the start of the vector span to be masked. Approximately <code>mask_time_prob * hidden_size // mask_time_length</code> feature vectors will be masked along the time axis. This is only relevant if <code>apply_spec_augment is True</code>.`,name:"mask_feature_prob"},{anchor:"transformers.WavLMConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.WavLMConfig.num_codevectors_per_group",description:`<strong>num_codevectors_per_group</strong> (<code>int</code>, <em>optional</em>, defaults to 320) &#x2014; Number of entries in each quantization codebook (group).`,name:"num_codevectors_per_group"},{anchor:"transformers.WavLMConfig.num_codevector_groups",description:`<strong>num_codevector_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of codevector groups for product codevector quantization.`,name:"num_codevector_groups"},{anchor:"transformers.WavLMConfig.contrastive_logits_temperature",description:`<strong>contrastive_logits_temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The temperature <em>kappa</em> in the contrastive loss.`,name:"contrastive_logits_temperature"},{anchor:"transformers.WavLMConfig.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for the output of the feature extractor that&#x2019;s used by the quantizer.`,name:"feat_quantizer_dropout"},{anchor:"transformers.WavLMConfig.num_negatives",description:`<strong>num_negatives</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Number of negative samples for the contrastive loss.`,name:"num_negatives"},{anchor:"transformers.WavLMConfig.codevector_dim",description:`<strong>codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the quantized feature vectors.`,name:"codevector_dim"},{anchor:"transformers.WavLMConfig.proj_codevector_dim",description:`<strong>proj_codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the final projection of both the quantized and the transformer features.`,name:"proj_codevector_dim"},{anchor:"transformers.WavLMConfig.diversity_loss_weight",description:`<strong>diversity_loss_weight</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The weight of the codebook diversity loss component.`,name:"diversity_loss_weight"},{anchor:"transformers.WavLMConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;mean&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForCTC">WavLMForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.WavLMConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForCTC">WavLMForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.WavLMConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForSequenceClassification">WavLMForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.WavLMConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"},{anchor:"transformers.WavLMConfig.tdnn_dim",description:`<strong>tdnn_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 1500)</code>) &#x2014; A tuple of integers defining the number of output channels of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dim</em> defines the number of <em>TDNN</em> layers.`,name:"tdnn_dim"},{anchor:"transformers.WavLMConfig.tdnn_kernel",description:`<strong>tdnn_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 3, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_kernel</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_kernel"},{anchor:"transformers.WavLMConfig.tdnn_dilation",description:`<strong>tdnn_dilation</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(1, 2, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the dilation factor of each 1D convolutional layer in <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dilation</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_dilation"},{anchor:"transformers.WavLMConfig.xvector_output_dim",description:`<strong>xvector_output_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the <em>XVector</em> embedding vectors.`,name:"xvector_output_dim"},{anchor:"transformers.WavLMConfig.add_adapter",description:`<strong>add_adapter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for warm-starting Wav2Vec2 for SpeechEncoderDecoder models.`,name:"add_adapter"},{anchor:"transformers.WavLMConfig.adapter_kernel_size",description:`<strong>adapter_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Kernel size of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_kernel_size"},{anchor:"transformers.WavLMConfig.adapter_stride",description:`<strong>adapter_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Stride of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_stride"},{anchor:"transformers.WavLMConfig.num_adapter_layers",description:`<strong>num_adapter_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of convolutional layers that should be used in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"num_adapter_layers"},{anchor:"transformers.WavLMConfig.output_hidden_size",description:`<strong>output_hidden_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimensionality of the encoder output layer. If not defined, this defaults to <em>hidden-size</em>. Only relevant if <code>add_adapter is True</code>.`,name:"output_hidden_size"}]}}),Ie=new Vt({props:{code:",",highlighted:""}}),Xe=new Vt({props:{code:`from transformers import WavLMModel, WavLMConfig # Initializing a WavLM facebook/wavlm-base-960h style configuration configuration = WavLMConfig() # Initializing a model from the facebook/wavlm-base-960h style configuration model = WavLMModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> WavLMModel, WavLMConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a WavLM facebook/wavlm-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = WavLMConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/wavlm-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = WavLMModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ue=new ge({}),Be=new X({props:{name:"class transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput",anchor:"transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"extract_features",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L66",parametersDescription:[{anchor:"transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.`,name:"extract_features"},{anchor:"transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),He=new ge({}),Ke=new X({props:{name:"class transformers.WavLMModel",anchor:"transformers.WavLMModel",parameters:[{name:"config",val:": WavLMConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1144",parametersDescription:[{anchor:"transformers.WavLMModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),et=new X({props:{name:"forward",anchor:"transformers.WavLMModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1209",parametersDescription:[{anchor:"transformers.WavLMModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>WavLMProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>WavLMProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.WavLMModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.WavLMModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.WavLMModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.WavLMModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput" >transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig" >WavLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput" >transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),We=new Go({props:{$$slots:{default:[Yl]},$$scope:{ctx:V}}}),tt=new Vt({props:{code:`from transformers import Wav2Vec2Processor, WavLMModel from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('microsoft/wavlm-base') model = WavLMModel.from_pretrained('microsoft/wavlm-base') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, WavLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = WavLMModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ot=new ge({}),at=new X({props:{name:"class transformers.WavLMForCTC",anchor:"transformers.WavLMForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1275",parametersDescription:[{anchor:"transformers.WavLMForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lt=new X({props:{name:"forward",anchor:"transformers.WavLMForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1301",parametersDescription:[{anchor:"transformers.WavLMForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>WavLMProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>WavLMProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.WavLMForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.WavLMForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.WavLMForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.WavLMForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.WavLMForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig" >WavLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ke=new Go({props:{$$slots:{default:[Hl]},$$scope:{ctx:V}}}),dt=new Vt({props:{code:`from transformers import Wav2Vec2Processor, WavLMForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained('microsoft/wavlm-base') model = WavLMForCTC.from_pretrained('microsoft/wavlm-base') # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) # compute loss with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, WavLMForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = WavLMForCTC.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),ct=new ge({}),mt=new X({props:{name:"class transformers.WavLMForSequenceClassification",anchor:"transformers.WavLMForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1387",parametersDescription:[{anchor:"transformers.WavLMForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),_t=new X({props:{name:"forward",anchor:"transformers.WavLMForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1416",parametersDescription:[{anchor:"transformers.WavLMForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>WavLMProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>WavLMProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.WavLMForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.WavLMForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.WavLMForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.WavLMForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.WavLMForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig" >WavLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$e=new Go({props:{$$slots:{default:[Kl]},$$scope:{ctx:V}}}),vt=new Vt({props:{code:`from transformers import Wav2Vec2FeatureExtractor, WavLMForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/wavlm-base-plus') model = WavLMForSequenceClassification.from_pretrained('microsoft/wavlm-base-plus') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1) predicted_label = model.config.id2label[predicted_class_ids] # compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, WavLMForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = WavLMForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base-plus&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits &gt;&gt;&gt; predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss`}}),bt=new ge({}),wt=new X({props:{name:"class transformers.WavLMForAudioFrameClassification",anchor:"transformers.WavLMForAudioFrameClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1492",parametersDescription:[{anchor:"transformers.WavLMForAudioFrameClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Tt=new X({props:{name:"forward",anchor:"transformers.WavLMForAudioFrameClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1519",parametersDescription:[{anchor:"transformers.WavLMForAudioFrameClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>WavLMProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>WavLMProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.WavLMForAudioFrameClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.WavLMForAudioFrameClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.WavLMForAudioFrameClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.WavLMForAudioFrameClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.WavLMForAudioFrameClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig" >WavLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fe=new Go({props:{$$slots:{default:[Rl]},$$scope:{ctx:V}}}),$t=new Vt({props:{code:`from transformers import Wav2Vec2FeatureExtractor, WavLMForAudioFrameClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/wavlm-base-plus-sd') model = WavLMForAudioFrameClassification.from_pretrained('microsoft/wavlm-base-plus-sd') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits probabilities = torch.sigmoid(logits[0]) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (probabilities > 0.5).long(),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, WavLMForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base-plus-sd&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = WavLMForAudioFrameClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base-plus-sd&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probabilities = torch.sigmoid(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># labels is a one-hot array of shape (num_frames, num_speakers)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = (probabilities &gt; <span class="hljs-number">0.5</span>).long()`}}),Ct=new ge({}),Ft=new X({props:{name:"class transformers.WavLMForXVector",anchor:"transformers.WavLMForXVector",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1632",parametersDescription:[{anchor:"transformers.WavLMForXVector.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig">WavLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),St=new X({props:{name:"forward",anchor:"transformers.WavLMForXVector.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/wavlm/modeling_wavlm.py#L1682",parametersDescription:[{anchor:"transformers.WavLMForXVector.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <code>WavLMProcessor</code> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <code>WavLMProcessor.__call__</code> for details.`,name:"input_values"},{anchor:"transformers.WavLMForXVector.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.WavLMForXVector.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.WavLMForXVector.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.WavLMForXVector.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.WavLMForXVector.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.wavlm.modeling_wavlm.XVectorOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMConfig" >WavLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Classification hidden states before AMSoftmax.</p> </li> <li> <p><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Utterance embeddings used for vector similarity-based retrieval.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.wavlm.modeling_wavlm.XVectorOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),je=new Go({props:{$$slots:{default:[Zl]},$$scope:{ctx:V}}}),zt=new Vt({props:{code:`from transformers import Wav2Vec2FeatureExtractor, WavLMForXVector from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/wavlm-base-plus-sv') model = WavLMForXVector.from_pretrained('microsoft/wavlm-base-plus-sv') # audio file is decoded on the fly inputs = feature_extractor(dataset[:2]["audio"]["array"], return_tensors="pt") embeddings = model(**inputs).embeddings embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() # the resulting embeddings can be used for cosine similarity-based retrieval cosine_sim = torch.nn.CosineSimilarity(dim=-1) similarity = cosine_sim(embeddings[0], embeddings[1]) threshold = 0.7 # the optimal threshold is dataset-dependent if similarity < threshold: print("Speakers are not the same!"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, WavLMForXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base-plus-sv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = WavLMForXVector.from_pretrained(<span class="hljs-string">&#x27;microsoft/wavlm-base-plus-sv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[:<span class="hljs-number">2</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(**inputs).embeddings <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = torch.nn.functional.normalize(embeddings, dim=-<span class="hljs-number">1</span>).cpu() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the resulting embeddings can be used for cosine similarity-based retrieval</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.nn.CosineSimilarity(dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>similarity = cosine_sim(embeddings[<span class="hljs-number">0</span>], embeddings[<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>threshold = <span class="hljs-number">0.7</span> <span class="hljs-comment"># the optimal threshold is dataset-dependent</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">if</span> similarity &lt; threshold: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Speakers are not the same!&quot;</span>)`}}),{c(){p=a("meta"),L=d(),u=a("h1"),W=a("a"),k=a("span"),_(g.$$.fragment),f=d(),T=a("span"),Za=s("WavLM"),ea=d(),G=a("h2"),_e=a("a"),fo=a("span"),_(qe.$$.fragment),Qa=d(),go=a("span"),Ja=s("Overview"),ta=d(),ve=a("p"),Ga=s("The WavLM model was proposed in "),Ee=a("a"),en=s("WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing"),tn=s(` by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.`),oa=d(),Ot=a("p"),on=s("The abstract from the paper is the following:"),aa=d(),Nt=a("p"),_o=a("em"),an=s(`Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisely and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.`),na=d(),It=a("p"),nn=s("Tips:"),sa=d(),U=a("ul"),Pe=a("li"),sn=s(`WavLM is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use `),Xt=a("a"),rn=s("Wav2Vec2Processor"),ln=s(" for the feature extraction."),dn=d(),Se=a("li"),cn=s(`WavLM model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Ut=a("a"),mn=s("Wav2Vec2CTCTokenizer"),pn=s("."),hn=d(),vo=a("li"),un=s("WavLM performs especially well on speaker verification, speaker identification, and speaker diarization tasks."),ra=d(),be=a("p"),fn=s("Relevant checkpoints can be found under "),ze=a("a"),gn=s("https://huggingface.co/models?other=wavlm"),_n=s("."),ia=d(),B=a("p"),vn=s("This model was contributed by "),Ae=a("a"),bn=s("patrickvonplaten"),wn=s(`. The Authors\u2019 code can be found `),De=a("a"),yn=s("here"),Mn=s("."),la=d(),ee=a("h2"),we=a("a"),bo=a("span"),_(Ve.$$.fragment),Wn=d(),wo=a("span"),Ln=s("WavLMConfig"),da=d(),$=a("div"),_(Oe.$$.fragment),kn=d(),te=a("p"),Tn=s("This is the configuration class to store the configuration of a "),Bt=a("a"),$n=s("WavLMModel"),Cn=s(`. It is used to instantiate an WavLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the WavLM `),Ne=a("a"),Fn=s("facebook/wavlm-base-960h"),xn=s(" architecture."),jn=d(),oe=a("p"),qn=s("Configuration objects inherit from "),Yt=a("a"),En=s("PretrainedConfig"),Pn=s(` and can be used to control the model outputs. Read the documentation from `),Ht=a("a"),Sn=s("PretrainedConfig"),zn=s(" for more information."),An=d(),yo=a("p"),Dn=s("Example:"),Vn=d(),_(Ie.$$.fragment),On=d(),Mo=a("p"),Nn=s("Example:"),In=d(),_(Xe.$$.fragment),ca=d(),ae=a("h2"),ye=a("a"),Wo=a("span"),_(Ue.$$.fragment),Xn=d(),Lo=a("span"),Un=s("WavLM specific outputs"),ma=d(),ne=a("div"),_(Be.$$.fragment),Bn=d(),Ye=a("p"),Yn=s("Output type of "),ko=a("code"),Hn=s("WavLMBaseModelOutput"),Kn=s(", with potential hidden states and attentions."),pa=d(),se=a("h2"),Me=a("a"),To=a("span"),_(He.$$.fragment),Rn=d(),$o=a("span"),Zn=s("WavLMModel"),ha=d(),j=a("div"),_(Ke.$$.fragment),Qn=d(),Re=a("p"),Jn=s(`The bare WavLM Model transformer outputting raw hidden-states without any specific head on top. WavLM was proposed in `),Ze=a("a"),Gn=s("WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),es=s(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),ts=d(),Qe=a("p"),os=s("This model inherits from "),Kt=a("a"),as=s("PreTrainedModel"),ns=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),ss=d(),Je=a("p"),rs=s("This model is a PyTorch "),Ge=a("a"),is=s("torch.nn.Module"),ls=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ds=d(),E=a("div"),_(et.$$.fragment),cs=d(),re=a("p"),ms=s("The "),Rt=a("a"),ps=s("WavLMModel"),hs=s(" forward method, overrides the "),Co=a("code"),us=s("__call__"),fs=s(" special method."),gs=d(),_(We.$$.fragment),_s=d(),Fo=a("p"),vs=s("Example:"),bs=d(),_(tt.$$.fragment),ua=d(),ie=a("h2"),Le=a("a"),xo=a("span"),_(ot.$$.fragment),ws=d(),jo=a("span"),ys=s("WavLMForCTC"),fa=d(),q=a("div"),_(at.$$.fragment),Ms=d(),le=a("p"),Ws=s("WavLM Model with a "),qo=a("code"),Ls=s("language modeling"),ks=s(` head on top for Connectionist Temporal Classification (CTC). WavLM was proposed in `),nt=a("a"),Ts=s("WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),$s=s(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Cs=d(),st=a("p"),Fs=s("This model inherits from "),Zt=a("a"),xs=s("PreTrainedModel"),js=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),qs=d(),rt=a("p"),Es=s("This model is a PyTorch "),it=a("a"),Ps=s("torch.nn.Module"),Ss=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zs=d(),P=a("div"),_(lt.$$.fragment),As=d(),de=a("p"),Ds=s("The "),Qt=a("a"),Vs=s("WavLMForCTC"),Os=s(" forward method, overrides the "),Eo=a("code"),Ns=s("__call__"),Is=s(" special method."),Xs=d(),_(ke.$$.fragment),Us=d(),Po=a("p"),Bs=s("Example:"),Ys=d(),_(dt.$$.fragment),ga=d(),ce=a("h2"),Te=a("a"),So=a("span"),_(ct.$$.fragment),Hs=d(),zo=a("span"),Ks=s("WavLMForSequenceClassification"),_a=d(),C=a("div"),_(mt.$$.fragment),Rs=d(),Ao=a("p"),Zs=s(`WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),Qs=d(),pt=a("p"),Js=s("WavLM was proposed in "),ht=a("a"),Gs=s("WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),er=s(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),tr=d(),ut=a("p"),or=s("This model inherits from "),Jt=a("a"),ar=s("PreTrainedModel"),nr=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),sr=d(),ft=a("p"),rr=s("This model is a PyTorch "),gt=a("a"),ir=s("torch.nn.Module"),lr=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dr=d(),S=a("div"),_(_t.$$.fragment),cr=d(),me=a("p"),mr=s("The "),Gt=a("a"),pr=s("WavLMForSequenceClassification"),hr=s(" forward method, overrides the "),Do=a("code"),ur=s("__call__"),fr=s(" special method."),gr=d(),_($e.$$.fragment),_r=d(),Vo=a("p"),vr=s("Example:"),br=d(),_(vt.$$.fragment),va=d(),pe=a("h2"),Ce=a("a"),Oo=a("span"),_(bt.$$.fragment),wr=d(),No=a("span"),yr=s("WavLMForAudioFrameClassification"),ba=d(),F=a("div"),_(wt.$$.fragment),Mr=d(),Io=a("p"),Wr=s("WavLM Model with a frame classification head on top for tasks like Speaker Diarization."),Lr=d(),yt=a("p"),kr=s("WavLM was proposed in "),Mt=a("a"),Tr=s("WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),$r=s(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Cr=d(),Wt=a("p"),Fr=s("This model inherits from "),eo=a("a"),xr=s("PreTrainedModel"),jr=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),qr=d(),Lt=a("p"),Er=s("This model is a PyTorch "),kt=a("a"),Pr=s("torch.nn.Module"),Sr=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zr=d(),z=a("div"),_(Tt.$$.fragment),Ar=d(),he=a("p"),Dr=s("The "),to=a("a"),Vr=s("WavLMForAudioFrameClassification"),Or=s(" forward method, overrides the "),Xo=a("code"),Nr=s("__call__"),Ir=s(" special method."),Xr=d(),_(Fe.$$.fragment),Ur=d(),Uo=a("p"),Br=s("Example:"),Yr=d(),_($t.$$.fragment),wa=d(),ue=a("h2"),xe=a("a"),Bo=a("span"),_(Ct.$$.fragment),Hr=d(),Yo=a("span"),Kr=s("WavLMForXVector"),ya=d(),x=a("div"),_(Ft.$$.fragment),Rr=d(),Ho=a("p"),Zr=s("WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification."),Qr=d(),xt=a("p"),Jr=s("WavLM was proposed in "),jt=a("a"),Gr=s("WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),ei=s(` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),ti=d(),qt=a("p"),oi=s("This model inherits from "),oo=a("a"),ai=s("PreTrainedModel"),ni=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),si=d(),Et=a("p"),ri=s("This model is a PyTorch "),Pt=a("a"),ii=s("torch.nn.Module"),li=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),di=d(),A=a("div"),_(St.$$.fragment),ci=d(),fe=a("p"),mi=s("The "),ao=a("a"),pi=s("WavLMForXVector"),hi=s(" forward method, overrides the "),Ko=a("code"),ui=s("__call__"),fi=s(" special method."),gi=d(),_(je.$$.fragment),_i=d(),Ro=a("p"),vi=s("Example:"),bi=d(),_(zt.$$.fragment),this.h()},l(t){const m=Bl('[data-svelte="svelte-1phssyn"]',document.head);p=n(m,"META",{name:!0,content:!0}),m.forEach(o),L=c(t),u=n(t,"H1",{class:!0});var At=i(u);W=n(At,"A",{id:!0,class:!0,href:!0});var Zo=i(W);k=n(Zo,"SPAN",{});var Qo=i(k);v(g.$$.fragment,Qo),Qo.forEach(o),Zo.forEach(o),f=c(At),T=n(At,"SPAN",{});var Jo=i(T);Za=r(Jo,"WavLM"),Jo.forEach(o),At.forEach(o),ea=c(t),G=n(t,"H2",{class:!0});var Dt=i(G);_e=n(Dt,"A",{id:!0,class:!0,href:!0});var wi=i(_e);fo=n(wi,"SPAN",{});var yi=i(fo);v(qe.$$.fragment,yi),yi.forEach(o),wi.forEach(o),Qa=c(Dt),go=n(Dt,"SPAN",{});var Mi=i(go);Ja=r(Mi,"Overview"),Mi.forEach(o),Dt.forEach(o),ta=c(t),ve=n(t,"P",{});var Wa=i(ve);Ga=r(Wa,"The WavLM model was proposed in "),Ee=n(Wa,"A",{href:!0,rel:!0});var Wi=i(Ee);en=r(Wi,"WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing"),Wi.forEach(o),tn=r(Wa,` by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.`),Wa.forEach(o),oa=c(t),Ot=n(t,"P",{});var Li=i(Ot);on=r(Li,"The abstract from the paper is the following:"),Li.forEach(o),aa=c(t),Nt=n(t,"P",{});var ki=i(Nt);_o=n(ki,"EM",{});var Ti=i(_o);an=r(Ti,`Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisely and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.`),Ti.forEach(o),ki.forEach(o),na=c(t),It=n(t,"P",{});var $i=i(It);nn=r($i,"Tips:"),$i.forEach(o),sa=c(t),U=n(t,"UL",{});var no=i(U);Pe=n(no,"LI",{});var La=i(Pe);sn=r(La,`WavLM is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use `),Xt=n(La,"A",{href:!0});var Ci=i(Xt);rn=r(Ci,"Wav2Vec2Processor"),Ci.forEach(o),ln=r(La," for the feature extraction."),La.forEach(o),dn=c(no),Se=n(no,"LI",{});var ka=i(Se);cn=r(ka,`WavLM model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),Ut=n(ka,"A",{href:!0});var Fi=i(Ut);mn=r(Fi,"Wav2Vec2CTCTokenizer"),Fi.forEach(o),pn=r(ka,"."),ka.forEach(o),hn=c(no),vo=n(no,"LI",{});var xi=i(vo);un=r(xi,"WavLM performs especially well on speaker verification, speaker identification, and speaker diarization tasks."),xi.forEach(o),no.forEach(o),ra=c(t),be=n(t,"P",{});var Ta=i(be);fn=r(Ta,"Relevant checkpoints can be found under "),ze=n(Ta,"A",{href:!0,rel:!0});var ji=i(ze);gn=r(ji,"https://huggingface.co/models?other=wavlm"),ji.forEach(o),_n=r(Ta,"."),Ta.forEach(o),ia=c(t),B=n(t,"P",{});var so=i(B);vn=r(so,"This model was contributed by "),Ae=n(so,"A",{href:!0,rel:!0});var qi=i(Ae);bn=r(qi,"patrickvonplaten"),qi.forEach(o),wn=r(so,`. The Authors\u2019 code can be found `),De=n(so,"A",{href:!0,rel:!0});var Ei=i(De);yn=r(Ei,"here"),Ei.forEach(o),Mn=r(so,"."),so.forEach(o),la=c(t),ee=n(t,"H2",{class:!0});var $a=i(ee);we=n($a,"A",{id:!0,class:!0,href:!0});var Pi=i(we);bo=n(Pi,"SPAN",{});var Si=i(bo);v(Ve.$$.fragment,Si),Si.forEach(o),Pi.forEach(o),Wn=c($a),wo=n($a,"SPAN",{});var zi=i(wo);Ln=r(zi,"WavLMConfig"),zi.forEach(o),$a.forEach(o),da=c(t),$=n(t,"DIV",{class:!0});var D=i($);v(Oe.$$.fragment,D),kn=c(D),te=n(D,"P",{});var ro=i(te);Tn=r(ro,"This is the configuration class to store the configuration of a "),Bt=n(ro,"A",{href:!0});var Ai=i(Bt);$n=r(Ai,"WavLMModel"),Ai.forEach(o),Cn=r(ro,`. It is used to instantiate an WavLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the WavLM `),Ne=n(ro,"A",{href:!0,rel:!0});var Di=i(Ne);Fn=r(Di,"facebook/wavlm-base-960h"),Di.forEach(o),xn=r(ro," architecture."),ro.forEach(o),jn=c(D),oe=n(D,"P",{});var io=i(oe);qn=r(io,"Configuration objects inherit from "),Yt=n(io,"A",{href:!0});var Vi=i(Yt);En=r(Vi,"PretrainedConfig"),Vi.forEach(o),Pn=r(io,` and can be used to control the model outputs. Read the documentation from `),Ht=n(io,"A",{href:!0});var Oi=i(Ht);Sn=r(Oi,"PretrainedConfig"),Oi.forEach(o),zn=r(io," for more information."),io.forEach(o),An=c(D),yo=n(D,"P",{});var Ni=i(yo);Dn=r(Ni,"Example:"),Ni.forEach(o),Vn=c(D),v(Ie.$$.fragment,D),On=c(D),Mo=n(D,"P",{});var Ii=i(Mo);Nn=r(Ii,"Example:"),Ii.forEach(o),In=c(D),v(Xe.$$.fragment,D),D.forEach(o),ca=c(t),ae=n(t,"H2",{class:!0});var Ca=i(ae);ye=n(Ca,"A",{id:!0,class:!0,href:!0});var Xi=i(ye);Wo=n(Xi,"SPAN",{});var Ui=i(Wo);v(Ue.$$.fragment,Ui),Ui.forEach(o),Xi.forEach(o),Xn=c(Ca),Lo=n(Ca,"SPAN",{});var Bi=i(Lo);Un=r(Bi,"WavLM specific outputs"),Bi.forEach(o),Ca.forEach(o),ma=c(t),ne=n(t,"DIV",{class:!0});var Fa=i(ne);v(Be.$$.fragment,Fa),Bn=c(Fa),Ye=n(Fa,"P",{});var xa=i(Ye);Yn=r(xa,"Output type of "),ko=n(xa,"CODE",{});var Yi=i(ko);Hn=r(Yi,"WavLMBaseModelOutput"),Yi.forEach(o),Kn=r(xa,", with potential hidden states and attentions."),xa.forEach(o),Fa.forEach(o),pa=c(t),se=n(t,"H2",{class:!0});var ja=i(se);Me=n(ja,"A",{id:!0,class:!0,href:!0});var Hi=i(Me);To=n(Hi,"SPAN",{});var Ki=i(To);v(He.$$.fragment,Ki),Ki.forEach(o),Hi.forEach(o),Rn=c(ja),$o=n(ja,"SPAN",{});var Ri=i($o);Zn=r(Ri,"WavLMModel"),Ri.forEach(o),ja.forEach(o),ha=c(t),j=n(t,"DIV",{class:!0});var Y=i(j);v(Ke.$$.fragment,Y),Qn=c(Y),Re=n(Y,"P",{});var qa=i(Re);Jn=r(qa,`The bare WavLM Model transformer outputting raw hidden-states without any specific head on top. WavLM was proposed in `),Ze=n(qa,"A",{href:!0,rel:!0});var Zi=i(Ze);Gn=r(Zi,"WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Zi.forEach(o),es=r(qa,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),qa.forEach(o),ts=c(Y),Qe=n(Y,"P",{});var Ea=i(Qe);os=r(Ea,"This model inherits from "),Kt=n(Ea,"A",{href:!0});var Qi=i(Kt);as=r(Qi,"PreTrainedModel"),Qi.forEach(o),ns=r(Ea,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ea.forEach(o),ss=c(Y),Je=n(Y,"P",{});var Pa=i(Je);rs=r(Pa,"This model is a PyTorch "),Ge=n(Pa,"A",{href:!0,rel:!0});var Ji=i(Ge);is=r(Ji,"torch.nn.Module"),Ji.forEach(o),ls=r(Pa,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pa.forEach(o),ds=c(Y),E=n(Y,"DIV",{class:!0});var H=i(E);v(et.$$.fragment,H),cs=c(H),re=n(H,"P",{});var lo=i(re);ms=r(lo,"The "),Rt=n(lo,"A",{href:!0});var Gi=i(Rt);ps=r(Gi,"WavLMModel"),Gi.forEach(o),hs=r(lo," forward method, overrides the "),Co=n(lo,"CODE",{});var el=i(Co);us=r(el,"__call__"),el.forEach(o),fs=r(lo," special method."),lo.forEach(o),gs=c(H),v(We.$$.fragment,H),_s=c(H),Fo=n(H,"P",{});var tl=i(Fo);vs=r(tl,"Example:"),tl.forEach(o),bs=c(H),v(tt.$$.fragment,H),H.forEach(o),Y.forEach(o),ua=c(t),ie=n(t,"H2",{class:!0});var Sa=i(ie);Le=n(Sa,"A",{id:!0,class:!0,href:!0});var ol=i(Le);xo=n(ol,"SPAN",{});var al=i(xo);v(ot.$$.fragment,al),al.forEach(o),ol.forEach(o),ws=c(Sa),jo=n(Sa,"SPAN",{});var nl=i(jo);ys=r(nl,"WavLMForCTC"),nl.forEach(o),Sa.forEach(o),fa=c(t),q=n(t,"DIV",{class:!0});var K=i(q);v(at.$$.fragment,K),Ms=c(K),le=n(K,"P",{});var co=i(le);Ws=r(co,"WavLM Model with a "),qo=n(co,"CODE",{});var sl=i(qo);Ls=r(sl,"language modeling"),sl.forEach(o),ks=r(co,` head on top for Connectionist Temporal Classification (CTC). WavLM was proposed in `),nt=n(co,"A",{href:!0,rel:!0});var rl=i(nt);Ts=r(rl,"WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),rl.forEach(o),$s=r(co,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),co.forEach(o),Cs=c(K),st=n(K,"P",{});var za=i(st);Fs=r(za,"This model inherits from "),Zt=n(za,"A",{href:!0});var il=i(Zt);xs=r(il,"PreTrainedModel"),il.forEach(o),js=r(za,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),za.forEach(o),qs=c(K),rt=n(K,"P",{});var Aa=i(rt);Es=r(Aa,"This model is a PyTorch "),it=n(Aa,"A",{href:!0,rel:!0});var ll=i(it);Ps=r(ll,"torch.nn.Module"),ll.forEach(o),Ss=r(Aa,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Aa.forEach(o),zs=c(K),P=n(K,"DIV",{class:!0});var R=i(P);v(lt.$$.fragment,R),As=c(R),de=n(R,"P",{});var mo=i(de);Ds=r(mo,"The "),Qt=n(mo,"A",{href:!0});var dl=i(Qt);Vs=r(dl,"WavLMForCTC"),dl.forEach(o),Os=r(mo," forward method, overrides the "),Eo=n(mo,"CODE",{});var cl=i(Eo);Ns=r(cl,"__call__"),cl.forEach(o),Is=r(mo," special method."),mo.forEach(o),Xs=c(R),v(ke.$$.fragment,R),Us=c(R),Po=n(R,"P",{});var ml=i(Po);Bs=r(ml,"Example:"),ml.forEach(o),Ys=c(R),v(dt.$$.fragment,R),R.forEach(o),K.forEach(o),ga=c(t),ce=n(t,"H2",{class:!0});var Da=i(ce);Te=n(Da,"A",{id:!0,class:!0,href:!0});var pl=i(Te);So=n(pl,"SPAN",{});var hl=i(So);v(ct.$$.fragment,hl),hl.forEach(o),pl.forEach(o),Hs=c(Da),zo=n(Da,"SPAN",{});var ul=i(zo);Ks=r(ul,"WavLMForSequenceClassification"),ul.forEach(o),Da.forEach(o),_a=c(t),C=n(t,"DIV",{class:!0});var O=i(C);v(mt.$$.fragment,O),Rs=c(O),Ao=n(O,"P",{});var fl=i(Ao);Zs=r(fl,`WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),fl.forEach(o),Qs=c(O),pt=n(O,"P",{});var Va=i(pt);Js=r(Va,"WavLM was proposed in "),ht=n(Va,"A",{href:!0,rel:!0});var gl=i(ht);Gs=r(gl,"WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),gl.forEach(o),er=r(Va,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Va.forEach(o),tr=c(O),ut=n(O,"P",{});var Oa=i(ut);or=r(Oa,"This model inherits from "),Jt=n(Oa,"A",{href:!0});var _l=i(Jt);ar=r(_l,"PreTrainedModel"),_l.forEach(o),nr=r(Oa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Oa.forEach(o),sr=c(O),ft=n(O,"P",{});var Na=i(ft);rr=r(Na,"This model is a PyTorch "),gt=n(Na,"A",{href:!0,rel:!0});var vl=i(gt);ir=r(vl,"torch.nn.Module"),vl.forEach(o),lr=r(Na,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Na.forEach(o),dr=c(O),S=n(O,"DIV",{class:!0});var Z=i(S);v(_t.$$.fragment,Z),cr=c(Z),me=n(Z,"P",{});var po=i(me);mr=r(po,"The "),Gt=n(po,"A",{href:!0});var bl=i(Gt);pr=r(bl,"WavLMForSequenceClassification"),bl.forEach(o),hr=r(po," forward method, overrides the "),Do=n(po,"CODE",{});var wl=i(Do);ur=r(wl,"__call__"),wl.forEach(o),fr=r(po," special method."),po.forEach(o),gr=c(Z),v($e.$$.fragment,Z),_r=c(Z),Vo=n(Z,"P",{});var yl=i(Vo);vr=r(yl,"Example:"),yl.forEach(o),br=c(Z),v(vt.$$.fragment,Z),Z.forEach(o),O.forEach(o),va=c(t),pe=n(t,"H2",{class:!0});var Ia=i(pe);Ce=n(Ia,"A",{id:!0,class:!0,href:!0});var Ml=i(Ce);Oo=n(Ml,"SPAN",{});var Wl=i(Oo);v(bt.$$.fragment,Wl),Wl.forEach(o),Ml.forEach(o),wr=c(Ia),No=n(Ia,"SPAN",{});var Ll=i(No);yr=r(Ll,"WavLMForAudioFrameClassification"),Ll.forEach(o),Ia.forEach(o),ba=c(t),F=n(t,"DIV",{class:!0});var N=i(F);v(wt.$$.fragment,N),Mr=c(N),Io=n(N,"P",{});var kl=i(Io);Wr=r(kl,"WavLM Model with a frame classification head on top for tasks like Speaker Diarization."),kl.forEach(o),Lr=c(N),yt=n(N,"P",{});var Xa=i(yt);kr=r(Xa,"WavLM was proposed in "),Mt=n(Xa,"A",{href:!0,rel:!0});var Tl=i(Mt);Tr=r(Tl,"WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Tl.forEach(o),$r=r(Xa,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Xa.forEach(o),Cr=c(N),Wt=n(N,"P",{});var Ua=i(Wt);Fr=r(Ua,"This model inherits from "),eo=n(Ua,"A",{href:!0});var $l=i(eo);xr=r($l,"PreTrainedModel"),$l.forEach(o),jr=r(Ua,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ua.forEach(o),qr=c(N),Lt=n(N,"P",{});var Ba=i(Lt);Er=r(Ba,"This model is a PyTorch "),kt=n(Ba,"A",{href:!0,rel:!0});var Cl=i(kt);Pr=r(Cl,"torch.nn.Module"),Cl.forEach(o),Sr=r(Ba,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ba.forEach(o),zr=c(N),z=n(N,"DIV",{class:!0});var Q=i(z);v(Tt.$$.fragment,Q),Ar=c(Q),he=n(Q,"P",{});var ho=i(he);Dr=r(ho,"The "),to=n(ho,"A",{href:!0});var Fl=i(to);Vr=r(Fl,"WavLMForAudioFrameClassification"),Fl.forEach(o),Or=r(ho," forward method, overrides the "),Xo=n(ho,"CODE",{});var xl=i(Xo);Nr=r(xl,"__call__"),xl.forEach(o),Ir=r(ho," special method."),ho.forEach(o),Xr=c(Q),v(Fe.$$.fragment,Q),Ur=c(Q),Uo=n(Q,"P",{});var jl=i(Uo);Br=r(jl,"Example:"),jl.forEach(o),Yr=c(Q),v($t.$$.fragment,Q),Q.forEach(o),N.forEach(o),wa=c(t),ue=n(t,"H2",{class:!0});var Ya=i(ue);xe=n(Ya,"A",{id:!0,class:!0,href:!0});var ql=i(xe);Bo=n(ql,"SPAN",{});var El=i(Bo);v(Ct.$$.fragment,El),El.forEach(o),ql.forEach(o),Hr=c(Ya),Yo=n(Ya,"SPAN",{});var Pl=i(Yo);Kr=r(Pl,"WavLMForXVector"),Pl.forEach(o),Ya.forEach(o),ya=c(t),x=n(t,"DIV",{class:!0});var I=i(x);v(Ft.$$.fragment,I),Rr=c(I),Ho=n(I,"P",{});var Sl=i(Ho);Zr=r(Sl,"WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification."),Sl.forEach(o),Qr=c(I),xt=n(I,"P",{});var Ha=i(xt);Jr=r(Ha,"WavLM was proposed in "),jt=n(Ha,"A",{href:!0,rel:!0});var zl=i(jt);Gr=r(zl,"WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data"),zl.forEach(o),ei=r(Ha,` by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.`),Ha.forEach(o),ti=c(I),qt=n(I,"P",{});var Ka=i(qt);oi=r(Ka,"This model inherits from "),oo=n(Ka,"A",{href:!0});var Al=i(oo);ai=r(Al,"PreTrainedModel"),Al.forEach(o),ni=r(Ka,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ka.forEach(o),si=c(I),Et=n(I,"P",{});var Ra=i(Et);ri=r(Ra,"This model is a PyTorch "),Pt=n(Ra,"A",{href:!0,rel:!0});var Dl=i(Pt);ii=r(Dl,"torch.nn.Module"),Dl.forEach(o),li=r(Ra,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ra.forEach(o),di=c(I),A=n(I,"DIV",{class:!0});var J=i(A);v(St.$$.fragment,J),ci=c(J),fe=n(J,"P",{});var uo=i(fe);mi=r(uo,"The "),ao=n(uo,"A",{href:!0});var Vl=i(ao);pi=r(Vl,"WavLMForXVector"),Vl.forEach(o),hi=r(uo," forward method, overrides the "),Ko=n(uo,"CODE",{});var Ol=i(Ko);ui=r(Ol,"__call__"),Ol.forEach(o),fi=r(uo," special method."),uo.forEach(o),gi=c(J),v(je.$$.fragment,J),_i=c(J),Ro=n(J,"P",{});var Nl=i(Ro);vi=r(Nl,"Example:"),Nl.forEach(o),bi=c(J),v(zt.$$.fragment,J),J.forEach(o),I.forEach(o),this.h()},h(){l(p,"name","hf:doc:metadata"),l(p,"content",JSON.stringify(Jl)),l(W,"id","wavlm"),l(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(W,"href","#wavlm"),l(u,"class","relative group"),l(_e,"id","overview"),l(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_e,"href","#overview"),l(G,"class","relative group"),l(Ee,"href","https://arxiv.org/abs/2110.13900"),l(Ee,"rel","nofollow"),l(Xt,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),l(Ut,"href","/docs/transformers/v4.15.0/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),l(ze,"href","https://huggingface.co/models?other=wavlm"),l(ze,"rel","nofollow"),l(Ae,"href","https://huggingface.co/patrickvonplaten"),l(Ae,"rel","nofollow"),l(De,"href","https://github.com/microsoft/unilm/tree/master/wavlm"),l(De,"rel","nofollow"),l(we,"id","transformers.WavLMConfig"),l(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(we,"href","#transformers.WavLMConfig"),l(ee,"class","relative group"),l(Bt,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMModel"),l(Ne,"href","https://huggingface.co/facebook/wavlm-base-960h"),l(Ne,"rel","nofollow"),l(Yt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Ht,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l($,"class","docstring"),l(ye,"id","transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput"),l(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ye,"href","#transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput"),l(ae,"class","relative group"),l(ne,"class","docstring"),l(Me,"id","transformers.WavLMModel"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.WavLMModel"),l(se,"class","relative group"),l(Ze,"href","https://arxiv.org/abs/2101.07597"),l(Ze,"rel","nofollow"),l(Kt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Ge,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ge,"rel","nofollow"),l(Rt,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMModel"),l(E,"class","docstring"),l(j,"class","docstring"),l(Le,"id","transformers.WavLMForCTC"),l(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Le,"href","#transformers.WavLMForCTC"),l(ie,"class","relative group"),l(nt,"href","https://arxiv.org/abs/2101.07597"),l(nt,"rel","nofollow"),l(Zt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(it,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(it,"rel","nofollow"),l(Qt,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForCTC"),l(P,"class","docstring"),l(q,"class","docstring"),l(Te,"id","transformers.WavLMForSequenceClassification"),l(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Te,"href","#transformers.WavLMForSequenceClassification"),l(ce,"class","relative group"),l(ht,"href","https://arxiv.org/abs/2101.07597"),l(ht,"rel","nofollow"),l(Jt,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(gt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(gt,"rel","nofollow"),l(Gt,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForSequenceClassification"),l(S,"class","docstring"),l(C,"class","docstring"),l(Ce,"id","transformers.WavLMForAudioFrameClassification"),l(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ce,"href","#transformers.WavLMForAudioFrameClassification"),l(pe,"class","relative group"),l(Mt,"href","https://arxiv.org/abs/2101.07597"),l(Mt,"rel","nofollow"),l(eo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(kt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(kt,"rel","nofollow"),l(to,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForAudioFrameClassification"),l(z,"class","docstring"),l(F,"class","docstring"),l(xe,"id","transformers.WavLMForXVector"),l(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(xe,"href","#transformers.WavLMForXVector"),l(ue,"class","relative group"),l(jt,"href","https://arxiv.org/abs/2101.07597"),l(jt,"rel","nofollow"),l(oo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(Pt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Pt,"rel","nofollow"),l(ao,"href","/docs/transformers/v4.15.0/en/model_doc/wavlm#transformers.WavLMForXVector"),l(A,"class","docstring"),l(x,"class","docstring")},m(t,m){e(document.head,p),h(t,L,m),h(t,u,m),e(u,W),e(W,k),b(g,k,null),e(u,f),e(u,T),e(T,Za),h(t,ea,m),h(t,G,m),e(G,_e),e(_e,fo),b(qe,fo,null),e(G,Qa),e(G,go),e(go,Ja),h(t,ta,m),h(t,ve,m),e(ve,Ga),e(ve,Ee),e(Ee,en),e(ve,tn),h(t,oa,m),h(t,Ot,m),e(Ot,on),h(t,aa,m),h(t,Nt,m),e(Nt,_o),e(_o,an),h(t,na,m),h(t,It,m),e(It,nn),h(t,sa,m),h(t,U,m),e(U,Pe),e(Pe,sn),e(Pe,Xt),e(Xt,rn),e(Pe,ln),e(U,dn),e(U,Se),e(Se,cn),e(Se,Ut),e(Ut,mn),e(Se,pn),e(U,hn),e(U,vo),e(vo,un),h(t,ra,m),h(t,be,m),e(be,fn),e(be,ze),e(ze,gn),e(be,_n),h(t,ia,m),h(t,B,m),e(B,vn),e(B,Ae),e(Ae,bn),e(B,wn),e(B,De),e(De,yn),e(B,Mn),h(t,la,m),h(t,ee,m),e(ee,we),e(we,bo),b(Ve,bo,null),e(ee,Wn),e(ee,wo),e(wo,Ln),h(t,da,m),h(t,$,m),b(Oe,$,null),e($,kn),e($,te),e(te,Tn),e(te,Bt),e(Bt,$n),e(te,Cn),e(te,Ne),e(Ne,Fn),e(te,xn),e($,jn),e($,oe),e(oe,qn),e(oe,Yt),e(Yt,En),e(oe,Pn),e(oe,Ht),e(Ht,Sn),e(oe,zn),e($,An),e($,yo),e(yo,Dn),e($,Vn),b(Ie,$,null),e($,On),e($,Mo),e(Mo,Nn),e($,In),b(Xe,$,null),h(t,ca,m),h(t,ae,m),e(ae,ye),e(ye,Wo),b(Ue,Wo,null),e(ae,Xn),e(ae,Lo),e(Lo,Un),h(t,ma,m),h(t,ne,m),b(Be,ne,null),e(ne,Bn),e(ne,Ye),e(Ye,Yn),e(Ye,ko),e(ko,Hn),e(Ye,Kn),h(t,pa,m),h(t,se,m),e(se,Me),e(Me,To),b(He,To,null),e(se,Rn),e(se,$o),e($o,Zn),h(t,ha,m),h(t,j,m),b(Ke,j,null),e(j,Qn),e(j,Re),e(Re,Jn),e(Re,Ze),e(Ze,Gn),e(Re,es),e(j,ts),e(j,Qe),e(Qe,os),e(Qe,Kt),e(Kt,as),e(Qe,ns),e(j,ss),e(j,Je),e(Je,rs),e(Je,Ge),e(Ge,is),e(Je,ls),e(j,ds),e(j,E),b(et,E,null),e(E,cs),e(E,re),e(re,ms),e(re,Rt),e(Rt,ps),e(re,hs),e(re,Co),e(Co,us),e(re,fs),e(E,gs),b(We,E,null),e(E,_s),e(E,Fo),e(Fo,vs),e(E,bs),b(tt,E,null),h(t,ua,m),h(t,ie,m),e(ie,Le),e(Le,xo),b(ot,xo,null),e(ie,ws),e(ie,jo),e(jo,ys),h(t,fa,m),h(t,q,m),b(at,q,null),e(q,Ms),e(q,le),e(le,Ws),e(le,qo),e(qo,Ls),e(le,ks),e(le,nt),e(nt,Ts),e(le,$s),e(q,Cs),e(q,st),e(st,Fs),e(st,Zt),e(Zt,xs),e(st,js),e(q,qs),e(q,rt),e(rt,Es),e(rt,it),e(it,Ps),e(rt,Ss),e(q,zs),e(q,P),b(lt,P,null),e(P,As),e(P,de),e(de,Ds),e(de,Qt),e(Qt,Vs),e(de,Os),e(de,Eo),e(Eo,Ns),e(de,Is),e(P,Xs),b(ke,P,null),e(P,Us),e(P,Po),e(Po,Bs),e(P,Ys),b(dt,P,null),h(t,ga,m),h(t,ce,m),e(ce,Te),e(Te,So),b(ct,So,null),e(ce,Hs),e(ce,zo),e(zo,Ks),h(t,_a,m),h(t,C,m),b(mt,C,null),e(C,Rs),e(C,Ao),e(Ao,Zs),e(C,Qs),e(C,pt),e(pt,Js),e(pt,ht),e(ht,Gs),e(pt,er),e(C,tr),e(C,ut),e(ut,or),e(ut,Jt),e(Jt,ar),e(ut,nr),e(C,sr),e(C,ft),e(ft,rr),e(ft,gt),e(gt,ir),e(ft,lr),e(C,dr),e(C,S),b(_t,S,null),e(S,cr),e(S,me),e(me,mr),e(me,Gt),e(Gt,pr),e(me,hr),e(me,Do),e(Do,ur),e(me,fr),e(S,gr),b($e,S,null),e(S,_r),e(S,Vo),e(Vo,vr),e(S,br),b(vt,S,null),h(t,va,m),h(t,pe,m),e(pe,Ce),e(Ce,Oo),b(bt,Oo,null),e(pe,wr),e(pe,No),e(No,yr),h(t,ba,m),h(t,F,m),b(wt,F,null),e(F,Mr),e(F,Io),e(Io,Wr),e(F,Lr),e(F,yt),e(yt,kr),e(yt,Mt),e(Mt,Tr),e(yt,$r),e(F,Cr),e(F,Wt),e(Wt,Fr),e(Wt,eo),e(eo,xr),e(Wt,jr),e(F,qr),e(F,Lt),e(Lt,Er),e(Lt,kt),e(kt,Pr),e(Lt,Sr),e(F,zr),e(F,z),b(Tt,z,null),e(z,Ar),e(z,he),e(he,Dr),e(he,to),e(to,Vr),e(he,Or),e(he,Xo),e(Xo,Nr),e(he,Ir),e(z,Xr),b(Fe,z,null),e(z,Ur),e(z,Uo),e(Uo,Br),e(z,Yr),b($t,z,null),h(t,wa,m),h(t,ue,m),e(ue,xe),e(xe,Bo),b(Ct,Bo,null),e(ue,Hr),e(ue,Yo),e(Yo,Kr),h(t,ya,m),h(t,x,m),b(Ft,x,null),e(x,Rr),e(x,Ho),e(Ho,Zr),e(x,Qr),e(x,xt),e(xt,Jr),e(xt,jt),e(jt,Gr),e(xt,ei),e(x,ti),e(x,qt),e(qt,oi),e(qt,oo),e(oo,ai),e(qt,ni),e(x,si),e(x,Et),e(Et,ri),e(Et,Pt),e(Pt,ii),e(Et,li),e(x,di),e(x,A),b(St,A,null),e(A,ci),e(A,fe),e(fe,mi),e(fe,ao),e(ao,pi),e(fe,hi),e(fe,Ko),e(Ko,ui),e(fe,fi),e(A,gi),b(je,A,null),e(A,_i),e(A,Ro),e(Ro,vi),e(A,bi),b(zt,A,null),Ma=!0},p(t,[m]){const At={};m&2&&(At.$$scope={dirty:m,ctx:t}),We.$set(At);const Zo={};m&2&&(Zo.$$scope={dirty:m,ctx:t}),ke.$set(Zo);const Qo={};m&2&&(Qo.$$scope={dirty:m,ctx:t}),$e.$set(Qo);const Jo={};m&2&&(Jo.$$scope={dirty:m,ctx:t}),Fe.$set(Jo);const Dt={};m&2&&(Dt.$$scope={dirty:m,ctx:t}),je.$set(Dt)},i(t){Ma||(w(g.$$.fragment,t),w(qe.$$.fragment,t),w(Ve.$$.fragment,t),w(Oe.$$.fragment,t),w(Ie.$$.fragment,t),w(Xe.$$.fragment,t),w(Ue.$$.fragment,t),w(Be.$$.fragment,t),w(He.$$.fragment,t),w(Ke.$$.fragment,t),w(et.$$.fragment,t),w(We.$$.fragment,t),w(tt.$$.fragment,t),w(ot.$$.fragment,t),w(at.$$.fragment,t),w(lt.$$.fragment,t),w(ke.$$.fragment,t),w(dt.$$.fragment,t),w(ct.$$.fragment,t),w(mt.$$.fragment,t),w(_t.$$.fragment,t),w($e.$$.fragment,t),w(vt.$$.fragment,t),w(bt.$$.fragment,t),w(wt.$$.fragment,t),w(Tt.$$.fragment,t),w(Fe.$$.fragment,t),w($t.$$.fragment,t),w(Ct.$$.fragment,t),w(Ft.$$.fragment,t),w(St.$$.fragment,t),w(je.$$.fragment,t),w(zt.$$.fragment,t),Ma=!0)},o(t){y(g.$$.fragment,t),y(qe.$$.fragment,t),y(Ve.$$.fragment,t),y(Oe.$$.fragment,t),y(Ie.$$.fragment,t),y(Xe.$$.fragment,t),y(Ue.$$.fragment,t),y(Be.$$.fragment,t),y(He.$$.fragment,t),y(Ke.$$.fragment,t),y(et.$$.fragment,t),y(We.$$.fragment,t),y(tt.$$.fragment,t),y(ot.$$.fragment,t),y(at.$$.fragment,t),y(lt.$$.fragment,t),y(ke.$$.fragment,t),y(dt.$$.fragment,t),y(ct.$$.fragment,t),y(mt.$$.fragment,t),y(_t.$$.fragment,t),y($e.$$.fragment,t),y(vt.$$.fragment,t),y(bt.$$.fragment,t),y(wt.$$.fragment,t),y(Tt.$$.fragment,t),y(Fe.$$.fragment,t),y($t.$$.fragment,t),y(Ct.$$.fragment,t),y(Ft.$$.fragment,t),y(St.$$.fragment,t),y(je.$$.fragment,t),y(zt.$$.fragment,t),Ma=!1},d(t){o(p),t&&o(L),t&&o(u),M(g),t&&o(ea),t&&o(G),M(qe),t&&o(ta),t&&o(ve),t&&o(oa),t&&o(Ot),t&&o(aa),t&&o(Nt),t&&o(na),t&&o(It),t&&o(sa),t&&o(U),t&&o(ra),t&&o(be),t&&o(ia),t&&o(B),t&&o(la),t&&o(ee),M(Ve),t&&o(da),t&&o($),M(Oe),M(Ie),M(Xe),t&&o(ca),t&&o(ae),M(Ue),t&&o(ma),t&&o(ne),M(Be),t&&o(pa),t&&o(se),M(He),t&&o(ha),t&&o(j),M(Ke),M(et),M(We),M(tt),t&&o(ua),t&&o(ie),M(ot),t&&o(fa),t&&o(q),M(at),M(lt),M(ke),M(dt),t&&o(ga),t&&o(ce),M(ct),t&&o(_a),t&&o(C),M(mt),M(_t),M($e),M(vt),t&&o(va),t&&o(pe),M(bt),t&&o(ba),t&&o(F),M(wt),M(Tt),M(Fe),M($t),t&&o(wa),t&&o(ue),M(Ct),t&&o(ya),t&&o(x),M(Ft),M(St),M(je),M(zt)}}}const Jl={local:"wavlm",sections:[{local:"overview",title:"Overview"},{local:"transformers.WavLMConfig",title:"WavLMConfig"},{local:"transformers.models.wavlm.modeling_wavlm.WavLMBaseModelOutput",title:"WavLM specific outputs"},{local:"transformers.WavLMModel",title:"WavLMModel"},{local:"transformers.WavLMForCTC",title:"WavLMForCTC"},{local:"transformers.WavLMForSequenceClassification",title:"WavLMForSequenceClassification"},{local:"transformers.WavLMForAudioFrameClassification",title:"WavLMForAudioFrameClassification"},{local:"transformers.WavLMForXVector",title:"WavLMForXVector"}],title:"WavLM"};function Gl(V,p,L){let{fw:u}=p;return V.$$set=W=>{"fw"in W&&L(0,u=W.fw)},[u]}class rd extends Il{constructor(p){super();Xl(this,p,Gl,Ql,Ul,{fw:0})}}export{rd as default,Jl as metadata};
9,987
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/lxmert.mdx-43e871b3.js
import{S as hc,i as uc,s as pc,e as n,k as d,w as T,t as a,L as fc,c as r,d as t,m as c,a as s,x,h as i,b as l,J as e,g as m,y as b,q as w,o as y,B as k}from"../../chunks/vendor-b1433968.js";import{T as Vo}from"../../chunks/Tip-c3840994.js";import{D as I}from"../../chunks/Docstring-ff504c58.js";import{C as Al}from"../../chunks/CodeBlock-a320dbd7.js";import{I as De}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function gc(D){let u,$,f,g,L;return{c(){u=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),L=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){u=r(v,"P",{});var _=s(u);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var z=s(f);g=i(z,"Module"),z.forEach(t),L=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,u,_),e(u,$),e(u,f),e(f,g),e(u,L)},d(v){v&&t(u)}}}function _c(D){let u,$,f,g,L;return{c(){u=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),L=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){u=r(v,"P",{});var _=s(u);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var z=s(f);g=i(z,"Module"),z.forEach(t),L=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,u,_),e(u,$),e(u,f),e(f,g),e(u,L)},d(v){v&&t(u)}}}function vc(D){let u,$,f,g,L;return{c(){u=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),L=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){u=r(v,"P",{});var _=s(u);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var z=s(f);g=i(z,"Module"),z.forEach(t),L=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,u,_),e(u,$),e(u,f),e(f,g),e(u,L)},d(v){v&&t(u)}}}function Tc(D){let u,$,f,g,L,v,_,z,$e,re,E,Y,R,ee,ze,S,Ee,Te,C,Q,te,me,M,q,se,oe,xe,ae,V,Me,be,P,Pe,Z,N,he,ne,qe,ue,W,Ae,B,Oe;return{c(){u=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),f=d(),g=n("ul"),L=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=d(),z=n("li"),$e=a("having all inputs as a list, tuple or dict in the first positional arguments."),re=d(),E=n("p"),Y=a("This second option is useful when using "),R=n("code"),ee=a("tf.keras.Model.fit"),ze=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),Ee=a("model(inputs)"),Te=a("."),C=d(),Q=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),me=d(),M=n("ul"),q=n("li"),se=a("a single Tensor with "),oe=n("code"),xe=a("input_ids"),ae=a(" only and nothing else: "),V=n("code"),Me=a("model(inputs_ids)"),be=d(),P=n("li"),Pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Z=n("code"),N=a("model([input_ids, attention_mask])"),he=a(" or "),ne=n("code"),qe=a("model([input_ids, attention_mask, token_type_ids])"),ue=d(),W=n("li"),Ae=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=n("code"),Oe=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){u=r(p,"P",{});var F=s(u);$=i(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=c(p),g=r(p,"UL",{});var pe=s(g);L=r(pe,"LI",{});var fe=s(L);v=i(fe,"having all inputs as keyword arguments (like PyTorch models), or"),fe.forEach(t),_=c(pe),z=r(pe,"LI",{});var Xe=s(z);$e=i(Xe,"having all inputs as a list, tuple or dict in the first positional arguments."),Xe.forEach(t),pe.forEach(t),re=c(p),E=r(p,"P",{});var X=s(E);Y=i(X,"This second option is useful when using "),R=r(X,"CODE",{});var ge=s(R);ee=i(ge,"tf.keras.Model.fit"),ge.forEach(t),ze=i(X,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(X,"CODE",{});var He=s(S);Ee=i(He,"model(inputs)"),He.forEach(t),Te=i(X,"."),X.forEach(t),C=c(p),Q=r(p,"P",{});var Ge=s(Q);te=i(Ge,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Ge.forEach(t),me=c(p),M=r(p,"UL",{});var j=s(M);q=r(j,"LI",{});var A=s(q);se=i(A,"a single Tensor with "),oe=r(A,"CODE",{});var ie=s(oe);xe=i(ie,"input_ids"),ie.forEach(t),ae=i(A," only and nothing else: "),V=r(A,"CODE",{});var Ce=s(V);Me=i(Ce,"model(inputs_ids)"),Ce.forEach(t),A.forEach(t),be=c(j),P=r(j,"LI",{});var O=s(P);Pe=i(O,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Z=r(O,"CODE",{});var Ue=s(Z);N=i(Ue,"model([input_ids, attention_mask])"),Ue.forEach(t),he=i(O," or "),ne=r(O,"CODE",{});var Ne=s(ne);qe=i(Ne,"model([input_ids, attention_mask, token_type_ids])"),Ne.forEach(t),O.forEach(t),ue=c(j),W=r(j,"LI",{});var je=s(W);Ae=i(je,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(je,"CODE",{});var Re=s(B);Oe=i(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),je.forEach(t),j.forEach(t)},m(p,F){m(p,u,F),e(u,$),m(p,f,F),m(p,g,F),e(g,L),e(L,v),e(g,_),e(g,z),e(z,$e),m(p,re,F),m(p,E,F),e(E,Y),e(E,R),e(R,ee),e(E,ze),e(E,S),e(S,Ee),e(E,Te),m(p,C,F),m(p,Q,F),e(Q,te),m(p,me,F),m(p,M,F),e(M,q),e(q,se),e(q,oe),e(oe,xe),e(q,ae),e(q,V),e(V,Me),e(M,be),e(M,P),e(P,Pe),e(P,Z),e(Z,N),e(P,he),e(P,ne),e(ne,qe),e(M,ue),e(M,W),e(W,Ae),e(W,B),e(B,Oe)},d(p){p&&t(u),p&&t(f),p&&t(g),p&&t(re),p&&t(E),p&&t(C),p&&t(Q),p&&t(me),p&&t(M)}}}function xc(D){let u,$,f,g,L;return{c(){u=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),L=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){u=r(v,"P",{});var _=s(u);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var z=s(f);g=i(z,"Module"),z.forEach(t),L=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,u,_),e(u,$),e(u,f),e(f,g),e(u,L)},d(v){v&&t(u)}}}function bc(D){let u,$,f,g,L,v,_,z,$e,re,E,Y,R,ee,ze,S,Ee,Te,C,Q,te,me,M,q,se,oe,xe,ae,V,Me,be,P,Pe,Z,N,he,ne,qe,ue,W,Ae,B,Oe;return{c(){u=n("p"),$=a("TF 2.0 models accepts two formats as inputs:"),f=d(),g=n("ul"),L=n("li"),v=a("having all inputs as keyword arguments (like PyTorch models), or"),_=d(),z=n("li"),$e=a("having all inputs as a list, tuple or dict in the first positional arguments."),re=d(),E=n("p"),Y=a("This second option is useful when using "),R=n("code"),ee=a("tf.keras.Model.fit"),ze=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),Ee=a("model(inputs)"),Te=a("."),C=d(),Q=n("p"),te=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),me=d(),M=n("ul"),q=n("li"),se=a("a single Tensor with "),oe=n("code"),xe=a("input_ids"),ae=a(" only and nothing else: "),V=n("code"),Me=a("model(inputs_ids)"),be=d(),P=n("li"),Pe=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Z=n("code"),N=a("model([input_ids, attention_mask])"),he=a(" or "),ne=n("code"),qe=a("model([input_ids, attention_mask, token_type_ids])"),ue=d(),W=n("li"),Ae=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=n("code"),Oe=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(p){u=r(p,"P",{});var F=s(u);$=i(F,"TF 2.0 models accepts two formats as inputs:"),F.forEach(t),f=c(p),g=r(p,"UL",{});var pe=s(g);L=r(pe,"LI",{});var fe=s(L);v=i(fe,"having all inputs as keyword arguments (like PyTorch models), or"),fe.forEach(t),_=c(pe),z=r(pe,"LI",{});var Xe=s(z);$e=i(Xe,"having all inputs as a list, tuple or dict in the first positional arguments."),Xe.forEach(t),pe.forEach(t),re=c(p),E=r(p,"P",{});var X=s(E);Y=i(X,"This second option is useful when using "),R=r(X,"CODE",{});var ge=s(R);ee=i(ge,"tf.keras.Model.fit"),ge.forEach(t),ze=i(X,` method which currently requires having all the tensors in the first argument of the model call function: `),S=r(X,"CODE",{});var He=s(S);Ee=i(He,"model(inputs)"),He.forEach(t),Te=i(X,"."),X.forEach(t),C=c(p),Q=r(p,"P",{});var Ge=s(Q);te=i(Ge,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Ge.forEach(t),me=c(p),M=r(p,"UL",{});var j=s(M);q=r(j,"LI",{});var A=s(q);se=i(A,"a single Tensor with "),oe=r(A,"CODE",{});var ie=s(oe);xe=i(ie,"input_ids"),ie.forEach(t),ae=i(A," only and nothing else: "),V=r(A,"CODE",{});var Ce=s(V);Me=i(Ce,"model(inputs_ids)"),Ce.forEach(t),A.forEach(t),be=c(j),P=r(j,"LI",{});var O=s(P);Pe=i(O,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),Z=r(O,"CODE",{});var Ue=s(Z);N=i(Ue,"model([input_ids, attention_mask])"),Ue.forEach(t),he=i(O," or "),ne=r(O,"CODE",{});var Ne=s(ne);qe=i(Ne,"model([input_ids, attention_mask, token_type_ids])"),Ne.forEach(t),O.forEach(t),ue=c(j),W=r(j,"LI",{});var je=s(W);Ae=i(je,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),B=r(je,"CODE",{});var Re=s(B);Oe=i(Re,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Re.forEach(t),je.forEach(t),j.forEach(t)},m(p,F){m(p,u,F),e(u,$),m(p,f,F),m(p,g,F),e(g,L),e(L,v),e(g,_),e(g,z),e(z,$e),m(p,re,F),m(p,E,F),e(E,Y),e(E,R),e(R,ee),e(E,ze),e(E,S),e(S,Ee),e(E,Te),m(p,C,F),m(p,Q,F),e(Q,te),m(p,me,F),m(p,M,F),e(M,q),e(q,se),e(q,oe),e(oe,xe),e(q,ae),e(q,V),e(V,Me),e(M,be),e(M,P),e(P,Pe),e(P,Z),e(Z,N),e(P,he),e(P,ne),e(ne,qe),e(M,ue),e(M,W),e(W,Ae),e(W,B),e(B,Oe)},d(p){p&&t(u),p&&t(f),p&&t(g),p&&t(re),p&&t(E),p&&t(C),p&&t(Q),p&&t(me),p&&t(M)}}}function wc(D){let u,$,f,g,L;return{c(){u=n("p"),$=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),g=a("Module"),L=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){u=r(v,"P",{});var _=s(u);$=i(_,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(_,"CODE",{});var z=s(f);g=i(z,"Module"),z.forEach(t),L=i(_,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),_.forEach(t)},m(v,_){m(v,u,_),e(u,$),e(u,f),e(f,g),e(u,L)},d(v){v&&t(u)}}}function yc(D){let u,$,f,g,L,v,_,z,$e,re,E,Y,R,ee,ze,S,Ee,Te,C,Q,te,me,M,q,se,oe,xe,ae,V,Me,be,P,Pe,Z,N,he,ne,qe,ue,W,Ae,B,Oe,p,F,pe,fe,Xe,X,ge,He,Ge,j,A,ie,Ce,O,Ue,Ne,je,Re,Ie,Ct,hs,Ke,us,Wo,ps,fs,Bo,gs,_s,vs,Je,Ts,Xo,xs,bs,Ho,ws,ys,cr,Ye,gt,kn,Nt,ks,Ln,Ls,mr,_e,jt,Fs,Fn,$s,zs,_t,Go,Es,Ms,Uo,Ps,qs,As,It,Os,Ko,Cs,Ns,hr,Ze,vt,$n,Dt,js,zn,Is,ur,ve,Rt,Ds,St,Rs,En,Ss,Qs,Vs,Tt,Jo,Ws,Bs,Yo,Xs,Hs,Gs,Qt,Us,Zo,Ks,Js,pr,et,xt,Mn,Vt,Ys,Pn,Zs,fr,tt,Wt,ea,qn,ta,gr,ot,Bt,oa,Xt,na,en,ra,sa,_r,nt,Ht,aa,Gt,ia,tn,la,da,vr,rt,Ut,ca,An,ma,Tr,st,Kt,ha,Jt,ua,on,pa,fa,xr,at,bt,On,Yt,ga,Cn,_a,br,H,Zt,va,Nn,Ta,xa,eo,ba,to,wa,ya,ka,oo,La,nn,Fa,$a,za,no,Ea,ro,Ma,Pa,qa,le,so,Aa,it,Oa,rn,Ca,Na,jn,ja,Ia,Da,wt,Ra,In,Sa,Qa,ao,wr,lt,yt,Dn,io,Va,Rn,Wa,yr,G,lo,Ba,Sn,Xa,Ha,co,Ga,mo,Ua,Ka,Ja,ho,Ya,sn,Za,ei,ti,uo,oi,po,ni,ri,si,Se,fo,ai,dt,ii,an,li,di,Qn,ci,mi,hi,kt,kr,ct,Lt,Vn,go,ui,Wn,pi,Lr,U,_o,fi,Bn,gi,_i,vo,vi,To,Ti,xi,bi,xo,wi,ln,yi,ki,Li,bo,Fi,wo,$i,zi,Ei,de,yo,Mi,mt,Pi,dn,qi,Ai,Xn,Oi,Ci,Ni,Ft,ji,Hn,Ii,Di,ko,Fr,ht,$t,Gn,Lo,Ri,Un,Si,$r,K,Fo,Qi,Kn,Vi,Wi,$o,Bi,zo,Xi,Hi,Gi,Eo,Ui,Mo,Ki,Ji,Yi,zt,Zi,ce,Po,el,ut,tl,cn,ol,nl,Jn,rl,sl,al,Et,il,Yn,ll,dl,qo,zr,pt,Mt,Zn,Ao,cl,er,ml,Er,J,Oo,hl,Co,ul,tr,pl,fl,gl,No,_l,jo,vl,Tl,xl,Io,bl,Do,wl,yl,kl,Pt,Ll,Qe,Ro,Fl,ft,$l,mn,zl,El,or,Ml,Pl,ql,qt,Mr;return v=new De({}),ee=new De({}),O=new De({}),Ct=new I({props:{name:"class transformers.LxmertConfig",anchor:"transformers.LxmertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_attention_heads",val:" = 12"},{name:"num_qa_labels",val:" = 9500"},{name:"num_object_labels",val:" = 1600"},{name:"num_attr_labels",val:" = 400"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"l_layers",val:" = 9"},{name:"x_layers",val:" = 5"},{name:"r_layers",val:" = 5"},{name:"visual_feat_dim",val:" = 2048"},{name:"visual_pos_dim",val:" = 4"},{name:"visual_loss_normalizer",val:" = 6.67"},{name:"task_matched",val:" = True"},{name:"task_mask_lm",val:" = True"},{name:"task_obj_predict",val:" = True"},{name:"task_qa",val:" = True"},{name:"visual_obj_loss",val:" = True"},{name:"visual_attr_loss",val:" = True"},{name:"visual_feat_loss",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/configuration_lxmert.py#L29",parametersDescription:[{anchor:"transformers.LxmertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertModel">LxmertModel</a> or <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertModel">TFLxmertModel</a>.`,name:"vocab_size"},{anchor:"transformers.LxmertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.LxmertConfig.r_layers",description:`<strong>r_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of hidden layers in the Transformer visual encoder.`,name:"r_layers"},{anchor:"transformers.LxmertConfig.l_layers",description:`<strong>l_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 9) &#x2014; Number of hidden layers in the Transformer language encoder.`,name:"l_layers"},{anchor:"transformers.LxmertConfig.x_layers",description:`<strong>x_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of hidden layers in the Transformer cross modality encoder.`,name:"x_layers"},{anchor:"transformers.LxmertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.LxmertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.LxmertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.LxmertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.LxmertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.LxmertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.LxmertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <em>token_type_ids</em> passed into <a href="/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertModel">BertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.LxmertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.LxmertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.LxmertConfig.visual_feat_dim",description:`<strong>visual_feat_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; This represents the last dimension of the pooled-object features used as input for the model, representing the size of each object feature itself.`,name:"visual_feat_dim"},{anchor:"transformers.LxmertConfig.visual_pos_dim",description:`<strong>visual_pos_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; This represents the number of spacial features that are mixed into the visual features. The default is set to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)`,name:"visual_pos_dim"},{anchor:"transformers.LxmertConfig.visual_loss_normalizer",description:`<strong>visual_loss_normalizer</strong> (<code>float</code>, <em>optional</em>, defaults to 1/15) &#x2014; This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one decided to train with multiple vision-based loss objectives.`,name:"visual_loss_normalizer"},{anchor:"transformers.LxmertConfig.num_qa_labels",description:`<strong>num_qa_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 9500) &#x2014; This represents the total number of different question answering (QA) labels there are. If using more than one dataset with QA, the user will need to account for the total number of labels that all of the datasets have in total.`,name:"num_qa_labels"},{anchor:"transformers.LxmertConfig.num_object_labels",description:`<strong>num_object_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 1600) &#x2014; This represents the total number of semantically unique objects that lxmert will be able to classify a pooled-object feature as belonging too.`,name:"num_object_labels"},{anchor:"transformers.LxmertConfig.num_attr_labels",description:`<strong>num_attr_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 400) &#x2014; This represents the total number of semantically unique attributes that lxmert will be able to classify a pooled-object feature as possessing.`,name:"num_attr_labels"},{anchor:"transformers.LxmertConfig.task_matched",description:`<strong>task_matched</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; This task is used for sentence-image matching. If the sentence correctly describes the image the label will be 1. If the sentence does not correctly describe the image, the label will be 0.`,name:"task_matched"},{anchor:"transformers.LxmertConfig.task_mask_lm",description:`<strong>task_mask_lm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss objective.`,name:"task_mask_lm"},{anchor:"transformers.LxmertConfig.task_obj_predict",description:`<strong>task_obj_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.`,name:"task_obj_predict"},{anchor:"transformers.LxmertConfig.task_qa",description:`<strong>task_qa</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to add the question-answering loss to the objective`,name:"task_qa"},{anchor:"transformers.LxmertConfig.visual_obj_loss",description:`<strong>visual_obj_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to calculate the object-prediction loss objective`,name:"visual_obj_loss"},{anchor:"transformers.LxmertConfig.visual_attr_loss",description:`<strong>visual_attr_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to calculate the attribute-prediction loss objective`,name:"visual_attr_loss"},{anchor:"transformers.LxmertConfig.visual_feat_loss",description:`<strong>visual_feat_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to calculate the feature-regression loss objective`,name:"visual_feat_loss"},{anchor:"transformers.LxmertConfig.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return the attentions from the vision, language, and cross-modality layers should be returned.`,name:"output_attentions"},{anchor:"transformers.LxmertConfig.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return the hidden states from the vision, language, and cross-modality layers should be returned.`,name:"output_hidden_states"}]}}),Nt=new De({}),jt=new I({props:{name:"class transformers.LxmertTokenizer",anchor:"transformers.LxmertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/tokenization_lxmert.py#L36"}}),Dt=new De({}),Rt=new I({props:{name:"class transformers.LxmertTokenizerFast",anchor:"transformers.LxmertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/tokenization_lxmert_fast.py#L40"}}),Vt=new De({}),Wt=new I({props:{name:"class transformers.models.lxmert.modeling_lxmert.LxmertModelOutput",anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput",parameters:[{name:"language_output",val:": typing.Optional[torch.FloatTensor] = None"},{name:"vision_output",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_output",val:": typing.Optional[torch.FloatTensor] = None"},{name:"language_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"vision_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"language_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"vision_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L61",parametersDescription:[{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_output",description:`<strong>language_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the language encoder.`,name:"language_output"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_output",description:`<strong>vision_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the visual encoder.`,name:"vision_output"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.pooled_output",description:`<strong>pooled_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear`,name:"pooled_output"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_hidden_states",description:`<strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"language_hidden_states"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_hidden_states",description:`<strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"vision_hidden_states"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_attentions",description:`<strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"language_attentions"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_attentions",description:`<strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"vision_attentions"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.cross_encoder_attentions",description:`<strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"cross_encoder_attentions"}]}}),Bt=new I({props:{name:"class transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput",anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput",parameters:[{name:"loss",val:": [<class 'torch.FloatTensor'>] = None"},{name:"prediction_logits",val:": typing.Optional[torch.FloatTensor] = None"},{name:"cross_relationship_score",val:": typing.Optional[torch.FloatTensor] = None"},{name:"question_answering_score",val:": typing.Optional[torch.FloatTensor] = None"},{name:"language_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"vision_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"language_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"vision_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L141",parametersDescription:[{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>): Prediction scores of question answering objective (classification).`,name:"prediction_logits"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.language_hidden_states",description:`<strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"language_hidden_states"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.vision_hidden_states",description:`<strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"vision_hidden_states"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.language_attentions",description:`<strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"language_attentions"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.vision_attentions",description:`<strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"vision_attentions"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.cross_encoder_attentions",description:`<strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"cross_encoder_attentions"}]}}),Ht=new I({props:{name:"class transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput",anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"question_answering_score",val:": typing.Optional[torch.FloatTensor] = None"},{name:"language_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"vision_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"language_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"vision_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L104",parametersDescription:[{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k. question_answering_score &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>, <em>optional</em>): Prediction scores of question answering objective (classification).`,name:"loss"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.language_hidden_states",description:`<strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"language_hidden_states"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.vision_hidden_states",description:`<strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"vision_hidden_states"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.language_attentions",description:`<strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"language_attentions"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.vision_attentions",description:`<strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"vision_attentions"},{anchor:"transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.cross_encoder_attentions",description:`<strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"cross_encoder_attentions"}]}}),Ut=new I({props:{name:"class transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput",anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput",parameters:[{name:"language_output",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"vision_output",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"pooled_output",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"language_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"vision_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"language_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"vision_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_tf_lxmert.py#L50",parametersDescription:[{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_output",description:`<strong>language_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the language encoder.`,name:"language_output"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_output",description:`<strong>vision_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the visual encoder.`,name:"vision_output"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.pooled_output",description:`<strong>pooled_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear`,name:"pooled_output"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_hidden_states",description:`<strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"language_hidden_states"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_hidden_states",description:`<strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"vision_hidden_states"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_attentions",description:`<strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"language_attentions"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_attentions",description:`<strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"vision_attentions"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.cross_encoder_attentions",description:`<strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"cross_encoder_attentions"}]}}),Kt=new I({props:{name:"class transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput",anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"prediction_logits",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"cross_relationship_score",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"question_answering_score",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"language_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"vision_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"language_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"vision_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_tf_lxmert.py#L93",parametersDescription:[{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>tf.Tensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.prediction_logits",description:`<strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, n_qa_answers)</code>): Prediction scores of question answering objective (classification).`,name:"prediction_logits"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.language_hidden_states",description:`<strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"language_hidden_states"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.vision_hidden_states",description:`<strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"vision_hidden_states"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.language_attentions",description:`<strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"language_attentions"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.vision_attentions",description:`<strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"vision_attentions"},{anchor:"transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.cross_encoder_attentions",description:`<strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"cross_encoder_attentions"}]}}),Yt=new De({}),Zt=new I({props:{name:"class transformers.LxmertModel",anchor:"transformers.LxmertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L877",parametersDescription:[{anchor:"transformers.LxmertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),so=new I({props:{name:"forward",anchor:"transformers.LxmertModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"visual_feats",val:" = None"},{name:"visual_pos",val:" = None"},{name:"attention_mask",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L892",parametersDescription:[{anchor:"transformers.LxmertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_pos_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.`,name:"input_ids"},{anchor:"transformers.LxmertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LxmertModel.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.LxmertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LxmertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LxmertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LxmertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LxmertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" >transformers.models.lxmert.modeling_lxmert.LxmertModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>language_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the language encoder.</li> <li><strong>vision_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the visual encoder.</li> <li><strong>pooled_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear</li> <li><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" >transformers.models.lxmert.modeling_lxmert.LxmertModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),wt=new Vo({props:{$$slots:{default:[gc]},$$scope:{ctx:D}}}),ao=new Al({props:{code:`from transformers import LxmertTokenizer, LxmertModel import torch tokenizer = LxmertTokenizer.from_pretrained('unc-nlp/lxmert-base-uncased') model = LxmertModel.from_pretrained('unc-nlp/lxmert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LxmertTokenizer, LxmertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LxmertTokenizer.from_pretrained(<span class="hljs-string">&#x27;unc-nlp/lxmert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LxmertModel.from_pretrained(<span class="hljs-string">&#x27;unc-nlp/lxmert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),io=new De({}),lo=new I({props:{name:"class transformers.LxmertForPreTraining",anchor:"transformers.LxmertForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L1016",parametersDescription:[{anchor:"transformers.LxmertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fo=new I({props:{name:"forward",anchor:"transformers.LxmertForPreTraining.forward",parameters:[{name:"input_ids",val:" = None"},{name:"visual_feats",val:" = None"},{name:"visual_pos",val:" = None"},{name:"attention_mask",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"obj_labels",val:" = None"},{name:"matched_label",val:" = None"},{name:"ans",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L1145",parametersDescription:[{anchor:"transformers.LxmertForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_pos_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.`,name:"input_ids"},{anchor:"transformers.LxmertForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LxmertForPreTraining.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.LxmertForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LxmertForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LxmertForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LxmertForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LxmertForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LxmertForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code> obj_labels &#x2014; (<code>Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]</code>, <em>optional</em>): each key is named after each one of the visual losses and each element of the tuple is of the shape <code>(batch_size, num_features)</code> and <code>(batch_size, num_features, visual_feature_dim)</code> for each the label id and the label score respectively`,name:"labels"},{anchor:"transformers.LxmertForPreTraining.forward.matched_label",description:`<strong>matched_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates that the sentence does not match the image,</li> <li>1 indicates that the sentence does match the image.</li> </ul>`,name:"matched_label"},{anchor:"transformers.LxmertForPreTraining.forward.ans",description:`<strong>ans</strong> (<code>Torch.Tensor</code> of shape <code>(batch_size)</code>, <em>optional</em>) &#x2014; a one hot representation hof the correct answer <em>optional</em>`,name:"ans"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</li> <li><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>cross_relationship_score:</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax).</li> <li><strong>question_answering_score:</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>) \u2014 Prediction scores of question answering objective (classification).</li> <li><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),kt=new Vo({props:{$$slots:{default:[_c]},$$scope:{ctx:D}}}),go=new De({}),_o=new I({props:{name:"class transformers.LxmertForQuestionAnswering",anchor:"transformers.LxmertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L1282",parametersDescription:[{anchor:"transformers.LxmertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yo=new I({props:{name:"forward",anchor:"transformers.LxmertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"visual_feats",val:" = None"},{name:"visual_pos",val:" = None"},{name:"attention_mask",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_lxmert.py#L1376",parametersDescription:[{anchor:"transformers.LxmertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_pos_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.`,name:"input_ids"},{anchor:"transformers.LxmertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LxmertForQuestionAnswering.forward.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.LxmertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LxmertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LxmertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LxmertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LxmertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.</p> <p>labels &#x2014; (<code>Torch.Tensor</code> of shape <code>(batch_size)</code>, <em>optional</em>): A one-hot representation of the correct answer`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k.</li> <li><strong>question_answering_score:</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>, <em>optional</em>) \u2014 Prediction scores of question answering objective (classification).</li> <li><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ft=new Vo({props:{$$slots:{default:[vc]},$$scope:{ctx:D}}}),ko=new Al({props:{code:`from transformers import LxmertTokenizer, LxmertForQuestionAnswering import torch tokenizer = LxmertTokenizer.from_pretrained('unc-nlp/lxmert-base-uncased') model = LxmertForQuestionAnswering.from_pretrained('unc-nlp/lxmert-base-uncased') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LxmertTokenizer, LxmertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LxmertTokenizer.from_pretrained(<span class="hljs-string">&#x27;unc-nlp/lxmert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LxmertForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;unc-nlp/lxmert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Lo=new De({}),Fo=new I({props:{name:"class transformers.TFLxmertModel",anchor:"transformers.TFLxmertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_tf_lxmert.py#L938",parametersDescription:[{anchor:"transformers.TFLxmertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zt=new Vo({props:{$$slots:{default:[Tc]},$$scope:{ctx:D}}}),Po=new I({props:{name:"call",anchor:"transformers.TFLxmertModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"visual_feats",val:" = None"},{name:"visual_pos",val:" = None"},{name:"attention_mask",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_tf_lxmert.py#L943",parametersDescription:[{anchor:"transformers.TFLxmertModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.`,name:"input_ids"},{anchor:"transformers.TFLxmertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLxmertModel.call.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; MMask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.TFLxmertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLxmertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLxmertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLxmertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLxmertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLxmertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>language_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the language encoder.</li> <li><strong>vision_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the visual encoder.</li> <li><strong>pooled_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear</li> <li><strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Et=new Vo({props:{$$slots:{default:[xc]},$$scope:{ctx:D}}}),qo=new Al({props:{code:`from transformers import LxmertTokenizer, TFLxmertModel import tensorflow as tf tokenizer = LxmertTokenizer.from_pretrained('unc-nlp/lxmert-base-uncased') model = TFLxmertModel.from_pretrained('unc-nlp/lxmert-base-uncased') inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LxmertTokenizer, TFLxmertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LxmertTokenizer.from_pretrained(<span class="hljs-string">&#x27;unc-nlp/lxmert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLxmertModel.from_pretrained(<span class="hljs-string">&#x27;unc-nlp/lxmert-base-uncased&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ao=new De({}),Oo=new I({props:{name:"class transformers.TFLxmertForPreTraining",anchor:"transformers.TFLxmertForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_tf_lxmert.py#L1195",parametersDescription:[{anchor:"transformers.TFLxmertForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Pt=new Vo({props:{$$slots:{default:[bc]},$$scope:{ctx:D}}}),Ro=new I({props:{name:"call",anchor:"transformers.TFLxmertForPreTraining.call",parameters:[{name:"input_ids",val:" = None"},{name:"visual_feats",val:" = None"},{name:"visual_pos",val:" = None"},{name:"attention_mask",val:" = None"},{name:"visual_attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"masked_lm_labels",val:" = None"},{name:"obj_labels",val:" = None"},{name:"matched_label",val:" = None"},{name:"ans",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/lxmert/modeling_tf_lxmert.py#L1295",parametersDescription:[{anchor:"transformers.TFLxmertForPreTraining.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.`,name:"input_ids"},{anchor:"transformers.TFLxmertForPreTraining.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFLxmertForPreTraining.call.visual_attention_mask",description:`<strong>visual_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; MMask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"visual_attention_mask"},{anchor:"transformers.TFLxmertForPreTraining.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFLxmertForPreTraining.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFLxmertForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFLxmertForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFLxmertForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFLxmertForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFLxmertForPreTraining.call.masked_lm_labels",description:`<strong>masked_lm_labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code> obj_labels &#x2014; (<code>Dict[Str: Tuple[tf.Tensor, tf.Tensor]]</code>, <em>optional</em>, defaults to :obj: <em>None</em>): each key is named after each one of the visual losses and each element of the tuple is of the shape <code>(batch_size, num_features)</code> and <code>(batch_size, num_features, visual_feature_dim)</code> for each the label id and the label score respectively`,name:"masked_lm_labels"},{anchor:"transformers.TFLxmertForPreTraining.call.matched_label",description:`<strong>matched_label</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates that the sentence does not match the image,</li> <li>1 indicates that the sentence does match the image.</li> </ul>`,name:"matched_label"},{anchor:"transformers.TFLxmertForPreTraining.call.ans",description:`<strong>ans</strong> (<code>Torch.Tensor</code> of shape <code>(batch_size)</code>, <em>optional</em>, defaults to :obj: <em>None</em>) &#x2014; a one hot representation hof the correct answer <em>optional</em>`,name:"ans"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>tf.Tensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</li> <li><strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>cross_relationship_score:</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax).</li> <li><strong>question_answering_score:</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, n_qa_answers)</code>) \u2014 Prediction scores of question answering objective (classification).</li> <li><strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),qt=new Vo({props:{$$slots:{default:[wc]},$$scope:{ctx:D}}}),{c(){u=n("meta"),$=d(),f=n("h1"),g=n("a"),L=n("span"),T(v.$$.fragment),_=d(),z=n("span"),$e=a("LXMERT"),re=d(),E=n("h2"),Y=n("a"),R=n("span"),T(ee.$$.fragment),ze=d(),S=n("span"),Ee=a("Overview"),Te=d(),C=n("p"),Q=a("The LXMERT model was proposed in "),te=n("a"),me=a("LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),M=a(` by Hao Tan & Mohit Bansal. It is a series of bidirectional transformer encoders (one for the vision modality, one for the language modality, and then one to fuse both modalities) pretrained using a combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. The pretraining consists of multiple multi-modal datasets: MSCOCO, Visual-Genome + Visual-Genome Question Answering, VQA 2.0, and GQA.`),q=d(),se=n("p"),oe=a("The abstract from the paper is the following:"),xe=d(),ae=n("p"),V=n("em"),Me=a(`Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly, the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification), cross-modality matching, and image question answering. These tasks help in learning both intra-modality and cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our pretrained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR, and improve the previous best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel model components and pretraining strategies significantly contribute to our strong results; and also present several attention visualizations for the different encoders`),be=d(),P=n("p"),Pe=a("Tips:"),Z=d(),N=n("ul"),he=n("li"),ne=a(`Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features will work.`),qe=d(),ue=n("li"),W=a(`Both the language hidden states and the visual hidden states that LXMERT outputs are passed through the cross-modality layer, so they contain information from both modalities. To access a modality that only attends to itself, select the vision/language hidden states from the first input in the tuple.`),Ae=d(),B=n("li"),Oe=a(`The bidirectional cross-modality encoder attention only returns attention values when the language modality is used as the input and the vision modality is used as the context vector. Further, while the cross-modality encoder contains self-attention for each respective modality and cross-attention, only the cross attention is returned and both self attention outputs are disregarded.`),p=d(),F=n("p"),pe=a("This model was contributed by "),fe=n("a"),Xe=a("eltoto1219"),X=a(". The original code can be found "),ge=n("a"),He=a("here"),Ge=a("."),j=d(),A=n("h2"),ie=n("a"),Ce=n("span"),T(O.$$.fragment),Ue=d(),Ne=n("span"),je=a("LxmertConfig"),Re=d(),Ie=n("div"),T(Ct.$$.fragment),hs=d(),Ke=n("p"),us=a("This is the configuration class to store the configuration of a "),Wo=n("a"),ps=a("LxmertModel"),fs=a(` or a `),Bo=n("a"),gs=a("TFLxmertModel"),_s=a(`. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture.`),vs=d(),Je=n("p"),Ts=a("Configuration objects inherit from "),Xo=n("a"),xs=a("PretrainedConfig"),bs=a(` and can be used to control the model outputs. Read the documentation from `),Ho=n("a"),ws=a("PretrainedConfig"),ys=a(" for more information."),cr=d(),Ye=n("h2"),gt=n("a"),kn=n("span"),T(Nt.$$.fragment),ks=d(),Ln=n("span"),Ls=a("LxmertTokenizer"),mr=d(),_e=n("div"),T(jt.$$.fragment),Fs=d(),Fn=n("p"),$s=a("Construct an LXMERT tokenizer."),zs=d(),_t=n("p"),Go=n("a"),Es=a("LxmertTokenizer"),Ms=a(" is identical to "),Uo=n("a"),Ps=a("BertTokenizer"),qs=a(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),As=d(),It=n("p"),Os=a("Refer to superclass "),Ko=n("a"),Cs=a("BertTokenizer"),Ns=a(` for usage examples and documentation concerning parameters.`),hr=d(),Ze=n("h2"),vt=n("a"),$n=n("span"),T(Dt.$$.fragment),js=d(),zn=n("span"),Is=a("LxmertTokenizerFast"),ur=d(),ve=n("div"),T(Rt.$$.fragment),Ds=d(),St=n("p"),Rs=a("Construct a \u201Cfast\u201D LXMERT tokenizer (backed by HuggingFace\u2019s "),En=n("em"),Ss=a("tokenizers"),Qs=a(" library)."),Vs=d(),Tt=n("p"),Jo=n("a"),Ws=a("LxmertTokenizerFast"),Bs=a(" is identical to "),Yo=n("a"),Xs=a("BertTokenizerFast"),Hs=a(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Gs=d(),Qt=n("p"),Us=a("Refer to superclass "),Zo=n("a"),Ks=a("BertTokenizerFast"),Js=a(` for usage examples and documentation concerning parameters.`),pr=d(),et=n("h2"),xt=n("a"),Mn=n("span"),T(Vt.$$.fragment),Ys=d(),Pn=n("span"),Zs=a("Lxmert specific outputs"),fr=d(),tt=n("div"),T(Wt.$$.fragment),ea=d(),qn=n("p"),ta=a(`Lxmert\u2019s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the \u201Crelation-ship\u201D encoder\u201D)`),gr=d(),ot=n("div"),T(Bt.$$.fragment),oa=d(),Xt=n("p"),na=a("Output type of "),en=n("a"),ra=a("LxmertForPreTraining"),sa=a("."),_r=d(),nt=n("div"),T(Ht.$$.fragment),aa=d(),Gt=n("p"),ia=a("Output type of "),tn=n("a"),la=a("LxmertForQuestionAnswering"),da=a("."),vr=d(),rt=n("div"),T(Ut.$$.fragment),ca=d(),An=n("p"),ma=a(`Lxmert\u2019s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the \u201Crelation-ship\u201D encoder\u201D)`),Tr=d(),st=n("div"),T(Kt.$$.fragment),ha=d(),Jt=n("p"),ua=a("Output type of "),on=n("a"),pa=a("LxmertForPreTraining"),fa=a("."),xr=d(),at=n("h2"),bt=n("a"),On=n("span"),T(Yt.$$.fragment),ga=d(),Cn=n("span"),_a=a("LxmertModel"),br=d(),H=n("div"),T(Zt.$$.fragment),va=d(),Nn=n("p"),Ta=a("The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top."),xa=d(),eo=n("p"),ba=a("The LXMERT model was proposed in "),to=n("a"),wa=a("LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),ya=a(` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),ka=d(),oo=n("p"),La=a("This model inherits from "),nn=n("a"),Fa=a("PreTrainedModel"),$a=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),za=d(),no=n("p"),Ea=a("This model is also a PyTorch "),ro=n("a"),Ma=a("torch.nn.Module"),Pa=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qa=d(),le=n("div"),T(so.$$.fragment),Aa=d(),it=n("p"),Oa=a("The "),rn=n("a"),Ca=a("LxmertModel"),Na=a(" forward method, overrides the "),jn=n("code"),ja=a("__call__"),Ia=a(" special method."),Da=d(),T(wt.$$.fragment),Ra=d(),In=n("p"),Sa=a("Example:"),Qa=d(),T(ao.$$.fragment),wr=d(),lt=n("h2"),yt=n("a"),Dn=n("span"),T(io.$$.fragment),Va=d(),Rn=n("span"),Wa=a("LxmertForPreTraining"),yr=d(),G=n("div"),T(lo.$$.fragment),Ba=d(),Sn=n("p"),Xa=a("Lxmert Model with a specified pretraining head on top."),Ha=d(),co=n("p"),Ga=a("The LXMERT model was proposed in "),mo=n("a"),Ua=a("LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Ka=a(` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),Ja=d(),ho=n("p"),Ya=a("This model inherits from "),sn=n("a"),Za=a("PreTrainedModel"),ei=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ti=d(),uo=n("p"),oi=a("This model is also a PyTorch "),po=n("a"),ni=a("torch.nn.Module"),ri=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),si=d(),Se=n("div"),T(fo.$$.fragment),ai=d(),dt=n("p"),ii=a("The "),an=n("a"),li=a("LxmertForPreTraining"),di=a(" forward method, overrides the "),Qn=n("code"),ci=a("__call__"),mi=a(" special method."),hi=d(),T(kt.$$.fragment),kr=d(),ct=n("h2"),Lt=n("a"),Vn=n("span"),T(go.$$.fragment),ui=d(),Wn=n("span"),pi=a("LxmertForQuestionAnswering"),Lr=d(),U=n("div"),T(_o.$$.fragment),fi=d(),Bn=n("p"),gi=a("Lxmert Model with a visual-answering head on top for downstream QA tasks"),_i=d(),vo=n("p"),vi=a("The LXMERT model was proposed in "),To=n("a"),Ti=a("LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),xi=a(` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),bi=d(),xo=n("p"),wi=a("This model inherits from "),ln=n("a"),yi=a("PreTrainedModel"),ki=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Li=d(),bo=n("p"),Fi=a("This model is also a PyTorch "),wo=n("a"),$i=a("torch.nn.Module"),zi=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ei=d(),de=n("div"),T(yo.$$.fragment),Mi=d(),mt=n("p"),Pi=a("The "),dn=n("a"),qi=a("LxmertForQuestionAnswering"),Ai=a(" forward method, overrides the "),Xn=n("code"),Oi=a("__call__"),Ci=a(" special method."),Ni=d(),T(Ft.$$.fragment),ji=d(),Hn=n("p"),Ii=a("Example:"),Di=d(),T(ko.$$.fragment),Fr=d(),ht=n("h2"),$t=n("a"),Gn=n("span"),T(Lo.$$.fragment),Ri=d(),Un=n("span"),Si=a("TFLxmertModel"),$r=d(),K=n("div"),T(Fo.$$.fragment),Qi=d(),Kn=n("p"),Vi=a("The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top."),Wi=d(),$o=n("p"),Bi=a("The LXMERT model was proposed in "),zo=n("a"),Xi=a("LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Hi=a(` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),Gi=d(),Eo=n("p"),Ui=a("This model is also a "),Mo=n("a"),Ki=a("tf.keras.Model"),Ji=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Yi=d(),T(zt.$$.fragment),Zi=d(),ce=n("div"),T(Po.$$.fragment),el=d(),ut=n("p"),tl=a("The "),cn=n("a"),ol=a("TFLxmertModel"),nl=a(" forward method, overrides the "),Jn=n("code"),rl=a("__call__"),sl=a(" special method."),al=d(),T(Et.$$.fragment),il=d(),Yn=n("p"),ll=a("Example:"),dl=d(),T(qo.$$.fragment),zr=d(),pt=n("h2"),Mt=n("a"),Zn=n("span"),T(Ao.$$.fragment),cl=d(),er=n("span"),ml=a("TFLxmertForPreTraining"),Er=d(),J=n("div"),T(Oo.$$.fragment),hl=d(),Co=n("p"),ul=a("Lxmert Model with a "),tr=n("code"),pl=a("language modeling"),fl=a(" head on top."),gl=d(),No=n("p"),_l=a("The LXMERT model was proposed in "),jo=n("a"),vl=a("LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Tl=a(` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),xl=d(),Io=n("p"),bl=a("This model is also a "),Do=n("a"),wl=a("tf.keras.Model"),yl=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),kl=d(),T(Pt.$$.fragment),Ll=d(),Qe=n("div"),T(Ro.$$.fragment),Fl=d(),ft=n("p"),$l=a("The "),mn=n("a"),zl=a("TFLxmertForPreTraining"),El=a(" forward method, overrides the "),or=n("code"),Ml=a("__call__"),Pl=a(" special method."),ql=d(),T(qt.$$.fragment),this.h()},l(o){const h=fc('[data-svelte="svelte-1phssyn"]',document.head);u=r(h,"META",{name:!0,content:!0}),h.forEach(t),$=c(o),f=r(o,"H1",{class:!0});var So=s(f);g=r(So,"A",{id:!0,class:!0,href:!0});var nr=s(g);L=r(nr,"SPAN",{});var rr=s(L);x(v.$$.fragment,rr),rr.forEach(t),nr.forEach(t),_=c(So),z=r(So,"SPAN",{});var sr=s(z);$e=i(sr,"LXMERT"),sr.forEach(t),So.forEach(t),re=c(o),E=r(o,"H2",{class:!0});var Qo=s(E);Y=r(Qo,"A",{id:!0,class:!0,href:!0});var ar=s(Y);R=r(ar,"SPAN",{});var ir=s(R);x(ee.$$.fragment,ir),ir.forEach(t),ar.forEach(t),ze=c(Qo),S=r(Qo,"SPAN",{});var Ol=s(S);Ee=i(Ol,"Overview"),Ol.forEach(t),Qo.forEach(t),Te=c(o),C=r(o,"P",{});var Pr=s(C);Q=i(Pr,"The LXMERT model was proposed in "),te=r(Pr,"A",{href:!0,rel:!0});var Cl=s(te);me=i(Cl,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Cl.forEach(t),M=i(Pr,` by Hao Tan & Mohit Bansal. It is a series of bidirectional transformer encoders (one for the vision modality, one for the language modality, and then one to fuse both modalities) pretrained using a combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. The pretraining consists of multiple multi-modal datasets: MSCOCO, Visual-Genome + Visual-Genome Question Answering, VQA 2.0, and GQA.`),Pr.forEach(t),q=c(o),se=r(o,"P",{});var Nl=s(se);oe=i(Nl,"The abstract from the paper is the following:"),Nl.forEach(t),xe=c(o),ae=r(o,"P",{});var jl=s(ae);V=r(jl,"EM",{});var Il=s(V);Me=i(Il,`Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly, the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification), cross-modality matching, and image question answering. These tasks help in learning both intra-modality and cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our pretrained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR, and improve the previous best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel model components and pretraining strategies significantly contribute to our strong results; and also present several attention visualizations for the different encoders`),Il.forEach(t),jl.forEach(t),be=c(o),P=r(o,"P",{});var Dl=s(P);Pe=i(Dl,"Tips:"),Dl.forEach(t),Z=c(o),N=r(o,"UL",{});var hn=s(N);he=r(hn,"LI",{});var Rl=s(he);ne=i(Rl,`Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features will work.`),Rl.forEach(t),qe=c(hn),ue=r(hn,"LI",{});var Sl=s(ue);W=i(Sl,`Both the language hidden states and the visual hidden states that LXMERT outputs are passed through the cross-modality layer, so they contain information from both modalities. To access a modality that only attends to itself, select the vision/language hidden states from the first input in the tuple.`),Sl.forEach(t),Ae=c(hn),B=r(hn,"LI",{});var Ql=s(B);Oe=i(Ql,`The bidirectional cross-modality encoder attention only returns attention values when the language modality is used as the input and the vision modality is used as the context vector. Further, while the cross-modality encoder contains self-attention for each respective modality and cross-attention, only the cross attention is returned and both self attention outputs are disregarded.`),Ql.forEach(t),hn.forEach(t),p=c(o),F=r(o,"P",{});var un=s(F);pe=i(un,"This model was contributed by "),fe=r(un,"A",{href:!0,rel:!0});var Vl=s(fe);Xe=i(Vl,"eltoto1219"),Vl.forEach(t),X=i(un,". The original code can be found "),ge=r(un,"A",{href:!0,rel:!0});var Wl=s(ge);He=i(Wl,"here"),Wl.forEach(t),Ge=i(un,"."),un.forEach(t),j=c(o),A=r(o,"H2",{class:!0});var qr=s(A);ie=r(qr,"A",{id:!0,class:!0,href:!0});var Bl=s(ie);Ce=r(Bl,"SPAN",{});var Xl=s(Ce);x(O.$$.fragment,Xl),Xl.forEach(t),Bl.forEach(t),Ue=c(qr),Ne=r(qr,"SPAN",{});var Hl=s(Ne);je=i(Hl,"LxmertConfig"),Hl.forEach(t),qr.forEach(t),Re=c(o),Ie=r(o,"DIV",{class:!0});var pn=s(Ie);x(Ct.$$.fragment,pn),hs=c(pn),Ke=r(pn,"P",{});var fn=s(Ke);us=i(fn,"This is the configuration class to store the configuration of a "),Wo=r(fn,"A",{href:!0});var Gl=s(Wo);ps=i(Gl,"LxmertModel"),Gl.forEach(t),fs=i(fn,` or a `),Bo=r(fn,"A",{href:!0});var Ul=s(Bo);gs=i(Ul,"TFLxmertModel"),Ul.forEach(t),_s=i(fn,`. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture.`),fn.forEach(t),vs=c(pn),Je=r(pn,"P",{});var gn=s(Je);Ts=i(gn,"Configuration objects inherit from "),Xo=r(gn,"A",{href:!0});var Kl=s(Xo);xs=i(Kl,"PretrainedConfig"),Kl.forEach(t),bs=i(gn,` and can be used to control the model outputs. Read the documentation from `),Ho=r(gn,"A",{href:!0});var Jl=s(Ho);ws=i(Jl,"PretrainedConfig"),Jl.forEach(t),ys=i(gn," for more information."),gn.forEach(t),pn.forEach(t),cr=c(o),Ye=r(o,"H2",{class:!0});var Ar=s(Ye);gt=r(Ar,"A",{id:!0,class:!0,href:!0});var Yl=s(gt);kn=r(Yl,"SPAN",{});var Zl=s(kn);x(Nt.$$.fragment,Zl),Zl.forEach(t),Yl.forEach(t),ks=c(Ar),Ln=r(Ar,"SPAN",{});var ed=s(Ln);Ls=i(ed,"LxmertTokenizer"),ed.forEach(t),Ar.forEach(t),mr=c(o),_e=r(o,"DIV",{class:!0});var At=s(_e);x(jt.$$.fragment,At),Fs=c(At),Fn=r(At,"P",{});var td=s(Fn);$s=i(td,"Construct an LXMERT tokenizer."),td.forEach(t),zs=c(At),_t=r(At,"P",{});var lr=s(_t);Go=r(lr,"A",{href:!0});var od=s(Go);Es=i(od,"LxmertTokenizer"),od.forEach(t),Ms=i(lr," is identical to "),Uo=r(lr,"A",{href:!0});var nd=s(Uo);Ps=i(nd,"BertTokenizer"),nd.forEach(t),qs=i(lr,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),lr.forEach(t),As=c(At),It=r(At,"P",{});var Or=s(It);Os=i(Or,"Refer to superclass "),Ko=r(Or,"A",{href:!0});var rd=s(Ko);Cs=i(rd,"BertTokenizer"),rd.forEach(t),Ns=i(Or,` for usage examples and documentation concerning parameters.`),Or.forEach(t),At.forEach(t),hr=c(o),Ze=r(o,"H2",{class:!0});var Cr=s(Ze);vt=r(Cr,"A",{id:!0,class:!0,href:!0});var sd=s(vt);$n=r(sd,"SPAN",{});var ad=s($n);x(Dt.$$.fragment,ad),ad.forEach(t),sd.forEach(t),js=c(Cr),zn=r(Cr,"SPAN",{});var id=s(zn);Is=i(id,"LxmertTokenizerFast"),id.forEach(t),Cr.forEach(t),ur=c(o),ve=r(o,"DIV",{class:!0});var Ot=s(ve);x(Rt.$$.fragment,Ot),Ds=c(Ot),St=r(Ot,"P",{});var Nr=s(St);Rs=i(Nr,"Construct a \u201Cfast\u201D LXMERT tokenizer (backed by HuggingFace\u2019s "),En=r(Nr,"EM",{});var ld=s(En);Ss=i(ld,"tokenizers"),ld.forEach(t),Qs=i(Nr," library)."),Nr.forEach(t),Vs=c(Ot),Tt=r(Ot,"P",{});var dr=s(Tt);Jo=r(dr,"A",{href:!0});var dd=s(Jo);Ws=i(dd,"LxmertTokenizerFast"),dd.forEach(t),Bs=i(dr," is identical to "),Yo=r(dr,"A",{href:!0});var cd=s(Yo);Xs=i(cd,"BertTokenizerFast"),cd.forEach(t),Hs=i(dr,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),dr.forEach(t),Gs=c(Ot),Qt=r(Ot,"P",{});var jr=s(Qt);Us=i(jr,"Refer to superclass "),Zo=r(jr,"A",{href:!0});var md=s(Zo);Ks=i(md,"BertTokenizerFast"),md.forEach(t),Js=i(jr,` for usage examples and documentation concerning parameters.`),jr.forEach(t),Ot.forEach(t),pr=c(o),et=r(o,"H2",{class:!0});var Ir=s(et);xt=r(Ir,"A",{id:!0,class:!0,href:!0});var hd=s(xt);Mn=r(hd,"SPAN",{});var ud=s(Mn);x(Vt.$$.fragment,ud),ud.forEach(t),hd.forEach(t),Ys=c(Ir),Pn=r(Ir,"SPAN",{});var pd=s(Pn);Zs=i(pd,"Lxmert specific outputs"),pd.forEach(t),Ir.forEach(t),fr=c(o),tt=r(o,"DIV",{class:!0});var Dr=s(tt);x(Wt.$$.fragment,Dr),ea=c(Dr),qn=r(Dr,"P",{});var fd=s(qn);ta=i(fd,`Lxmert\u2019s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the \u201Crelation-ship\u201D encoder\u201D)`),fd.forEach(t),Dr.forEach(t),gr=c(o),ot=r(o,"DIV",{class:!0});var Rr=s(ot);x(Bt.$$.fragment,Rr),oa=c(Rr),Xt=r(Rr,"P",{});var Sr=s(Xt);na=i(Sr,"Output type of "),en=r(Sr,"A",{href:!0});var gd=s(en);ra=i(gd,"LxmertForPreTraining"),gd.forEach(t),sa=i(Sr,"."),Sr.forEach(t),Rr.forEach(t),_r=c(o),nt=r(o,"DIV",{class:!0});var Qr=s(nt);x(Ht.$$.fragment,Qr),aa=c(Qr),Gt=r(Qr,"P",{});var Vr=s(Gt);ia=i(Vr,"Output type of "),tn=r(Vr,"A",{href:!0});var _d=s(tn);la=i(_d,"LxmertForQuestionAnswering"),_d.forEach(t),da=i(Vr,"."),Vr.forEach(t),Qr.forEach(t),vr=c(o),rt=r(o,"DIV",{class:!0});var Wr=s(rt);x(Ut.$$.fragment,Wr),ca=c(Wr),An=r(Wr,"P",{});var vd=s(An);ma=i(vd,`Lxmert\u2019s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the \u201Crelation-ship\u201D encoder\u201D)`),vd.forEach(t),Wr.forEach(t),Tr=c(o),st=r(o,"DIV",{class:!0});var Br=s(st);x(Kt.$$.fragment,Br),ha=c(Br),Jt=r(Br,"P",{});var Xr=s(Jt);ua=i(Xr,"Output type of "),on=r(Xr,"A",{href:!0});var Td=s(on);pa=i(Td,"LxmertForPreTraining"),Td.forEach(t),fa=i(Xr,"."),Xr.forEach(t),Br.forEach(t),xr=c(o),at=r(o,"H2",{class:!0});var Hr=s(at);bt=r(Hr,"A",{id:!0,class:!0,href:!0});var xd=s(bt);On=r(xd,"SPAN",{});var bd=s(On);x(Yt.$$.fragment,bd),bd.forEach(t),xd.forEach(t),ga=c(Hr),Cn=r(Hr,"SPAN",{});var wd=s(Cn);_a=i(wd,"LxmertModel"),wd.forEach(t),Hr.forEach(t),br=c(o),H=r(o,"DIV",{class:!0});var we=s(H);x(Zt.$$.fragment,we),va=c(we),Nn=r(we,"P",{});var yd=s(Nn);Ta=i(yd,"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top."),yd.forEach(t),xa=c(we),eo=r(we,"P",{});var Gr=s(eo);ba=i(Gr,"The LXMERT model was proposed in "),to=r(Gr,"A",{href:!0,rel:!0});var kd=s(to);wa=i(kd,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),kd.forEach(t),ya=i(Gr,` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),Gr.forEach(t),ka=c(we),oo=r(we,"P",{});var Ur=s(oo);La=i(Ur,"This model inherits from "),nn=r(Ur,"A",{href:!0});var Ld=s(nn);Fa=i(Ld,"PreTrainedModel"),Ld.forEach(t),$a=i(Ur,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ur.forEach(t),za=c(we),no=r(we,"P",{});var Kr=s(no);Ea=i(Kr,"This model is also a PyTorch "),ro=r(Kr,"A",{href:!0,rel:!0});var Fd=s(ro);Ma=i(Fd,"torch.nn.Module"),Fd.forEach(t),Pa=i(Kr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kr.forEach(t),qa=c(we),le=r(we,"DIV",{class:!0});var Ve=s(le);x(so.$$.fragment,Ve),Aa=c(Ve),it=r(Ve,"P",{});var _n=s(it);Oa=i(_n,"The "),rn=r(_n,"A",{href:!0});var $d=s(rn);Ca=i($d,"LxmertModel"),$d.forEach(t),Na=i(_n," forward method, overrides the "),jn=r(_n,"CODE",{});var zd=s(jn);ja=i(zd,"__call__"),zd.forEach(t),Ia=i(_n," special method."),_n.forEach(t),Da=c(Ve),x(wt.$$.fragment,Ve),Ra=c(Ve),In=r(Ve,"P",{});var Ed=s(In);Sa=i(Ed,"Example:"),Ed.forEach(t),Qa=c(Ve),x(ao.$$.fragment,Ve),Ve.forEach(t),we.forEach(t),wr=c(o),lt=r(o,"H2",{class:!0});var Jr=s(lt);yt=r(Jr,"A",{id:!0,class:!0,href:!0});var Md=s(yt);Dn=r(Md,"SPAN",{});var Pd=s(Dn);x(io.$$.fragment,Pd),Pd.forEach(t),Md.forEach(t),Va=c(Jr),Rn=r(Jr,"SPAN",{});var qd=s(Rn);Wa=i(qd,"LxmertForPreTraining"),qd.forEach(t),Jr.forEach(t),yr=c(o),G=r(o,"DIV",{class:!0});var ye=s(G);x(lo.$$.fragment,ye),Ba=c(ye),Sn=r(ye,"P",{});var Ad=s(Sn);Xa=i(Ad,"Lxmert Model with a specified pretraining head on top."),Ad.forEach(t),Ha=c(ye),co=r(ye,"P",{});var Yr=s(co);Ga=i(Yr,"The LXMERT model was proposed in "),mo=r(Yr,"A",{href:!0,rel:!0});var Od=s(mo);Ua=i(Od,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Od.forEach(t),Ka=i(Yr,` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),Yr.forEach(t),Ja=c(ye),ho=r(ye,"P",{});var Zr=s(ho);Ya=i(Zr,"This model inherits from "),sn=r(Zr,"A",{href:!0});var Cd=s(sn);Za=i(Cd,"PreTrainedModel"),Cd.forEach(t),ei=i(Zr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zr.forEach(t),ti=c(ye),uo=r(ye,"P",{});var es=s(uo);oi=i(es,"This model is also a PyTorch "),po=r(es,"A",{href:!0,rel:!0});var Nd=s(po);ni=i(Nd,"torch.nn.Module"),Nd.forEach(t),ri=i(es,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),es.forEach(t),si=c(ye),Se=r(ye,"DIV",{class:!0});var vn=s(Se);x(fo.$$.fragment,vn),ai=c(vn),dt=r(vn,"P",{});var Tn=s(dt);ii=i(Tn,"The "),an=r(Tn,"A",{href:!0});var jd=s(an);li=i(jd,"LxmertForPreTraining"),jd.forEach(t),di=i(Tn," forward method, overrides the "),Qn=r(Tn,"CODE",{});var Id=s(Qn);ci=i(Id,"__call__"),Id.forEach(t),mi=i(Tn," special method."),Tn.forEach(t),hi=c(vn),x(kt.$$.fragment,vn),vn.forEach(t),ye.forEach(t),kr=c(o),ct=r(o,"H2",{class:!0});var ts=s(ct);Lt=r(ts,"A",{id:!0,class:!0,href:!0});var Dd=s(Lt);Vn=r(Dd,"SPAN",{});var Rd=s(Vn);x(go.$$.fragment,Rd),Rd.forEach(t),Dd.forEach(t),ui=c(ts),Wn=r(ts,"SPAN",{});var Sd=s(Wn);pi=i(Sd,"LxmertForQuestionAnswering"),Sd.forEach(t),ts.forEach(t),Lr=c(o),U=r(o,"DIV",{class:!0});var ke=s(U);x(_o.$$.fragment,ke),fi=c(ke),Bn=r(ke,"P",{});var Qd=s(Bn);gi=i(Qd,"Lxmert Model with a visual-answering head on top for downstream QA tasks"),Qd.forEach(t),_i=c(ke),vo=r(ke,"P",{});var os=s(vo);vi=i(os,"The LXMERT model was proposed in "),To=r(os,"A",{href:!0,rel:!0});var Vd=s(To);Ti=i(Vd,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Vd.forEach(t),xi=i(os,` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),os.forEach(t),bi=c(ke),xo=r(ke,"P",{});var ns=s(xo);wi=i(ns,"This model inherits from "),ln=r(ns,"A",{href:!0});var Wd=s(ln);yi=i(Wd,"PreTrainedModel"),Wd.forEach(t),ki=i(ns,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ns.forEach(t),Li=c(ke),bo=r(ke,"P",{});var rs=s(bo);Fi=i(rs,"This model is also a PyTorch "),wo=r(rs,"A",{href:!0,rel:!0});var Bd=s(wo);$i=i(Bd,"torch.nn.Module"),Bd.forEach(t),zi=i(rs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),rs.forEach(t),Ei=c(ke),de=r(ke,"DIV",{class:!0});var We=s(de);x(yo.$$.fragment,We),Mi=c(We),mt=r(We,"P",{});var xn=s(mt);Pi=i(xn,"The "),dn=r(xn,"A",{href:!0});var Xd=s(dn);qi=i(Xd,"LxmertForQuestionAnswering"),Xd.forEach(t),Ai=i(xn," forward method, overrides the "),Xn=r(xn,"CODE",{});var Hd=s(Xn);Oi=i(Hd,"__call__"),Hd.forEach(t),Ci=i(xn," special method."),xn.forEach(t),Ni=c(We),x(Ft.$$.fragment,We),ji=c(We),Hn=r(We,"P",{});var Gd=s(Hn);Ii=i(Gd,"Example:"),Gd.forEach(t),Di=c(We),x(ko.$$.fragment,We),We.forEach(t),ke.forEach(t),Fr=c(o),ht=r(o,"H2",{class:!0});var ss=s(ht);$t=r(ss,"A",{id:!0,class:!0,href:!0});var Ud=s($t);Gn=r(Ud,"SPAN",{});var Kd=s(Gn);x(Lo.$$.fragment,Kd),Kd.forEach(t),Ud.forEach(t),Ri=c(ss),Un=r(ss,"SPAN",{});var Jd=s(Un);Si=i(Jd,"TFLxmertModel"),Jd.forEach(t),ss.forEach(t),$r=c(o),K=r(o,"DIV",{class:!0});var Le=s(K);x(Fo.$$.fragment,Le),Qi=c(Le),Kn=r(Le,"P",{});var Yd=s(Kn);Vi=i(Yd,"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top."),Yd.forEach(t),Wi=c(Le),$o=r(Le,"P",{});var as=s($o);Bi=i(as,"The LXMERT model was proposed in "),zo=r(as,"A",{href:!0,rel:!0});var Zd=s(zo);Xi=i(Zd,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),Zd.forEach(t),Hi=i(as,` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),as.forEach(t),Gi=c(Le),Eo=r(Le,"P",{});var is=s(Eo);Ui=i(is,"This model is also a "),Mo=r(is,"A",{href:!0,rel:!0});var ec=s(Mo);Ki=i(ec,"tf.keras.Model"),ec.forEach(t),Ji=i(is,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),is.forEach(t),Yi=c(Le),x(zt.$$.fragment,Le),Zi=c(Le),ce=r(Le,"DIV",{class:!0});var Be=s(ce);x(Po.$$.fragment,Be),el=c(Be),ut=r(Be,"P",{});var bn=s(ut);tl=i(bn,"The "),cn=r(bn,"A",{href:!0});var tc=s(cn);ol=i(tc,"TFLxmertModel"),tc.forEach(t),nl=i(bn," forward method, overrides the "),Jn=r(bn,"CODE",{});var oc=s(Jn);rl=i(oc,"__call__"),oc.forEach(t),sl=i(bn," special method."),bn.forEach(t),al=c(Be),x(Et.$$.fragment,Be),il=c(Be),Yn=r(Be,"P",{});var nc=s(Yn);ll=i(nc,"Example:"),nc.forEach(t),dl=c(Be),x(qo.$$.fragment,Be),Be.forEach(t),Le.forEach(t),zr=c(o),pt=r(o,"H2",{class:!0});var ls=s(pt);Mt=r(ls,"A",{id:!0,class:!0,href:!0});var rc=s(Mt);Zn=r(rc,"SPAN",{});var sc=s(Zn);x(Ao.$$.fragment,sc),sc.forEach(t),rc.forEach(t),cl=c(ls),er=r(ls,"SPAN",{});var ac=s(er);ml=i(ac,"TFLxmertForPreTraining"),ac.forEach(t),ls.forEach(t),Er=c(o),J=r(o,"DIV",{class:!0});var Fe=s(J);x(Oo.$$.fragment,Fe),hl=c(Fe),Co=r(Fe,"P",{});var ds=s(Co);ul=i(ds,"Lxmert Model with a "),tr=r(ds,"CODE",{});var ic=s(tr);pl=i(ic,"language modeling"),ic.forEach(t),fl=i(ds," head on top."),ds.forEach(t),gl=c(Fe),No=r(Fe,"P",{});var cs=s(No);_l=i(cs,"The LXMERT model was proposed in "),jo=r(cs,"A",{href:!0,rel:!0});var lc=s(jo);vl=i(lc,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers"),lc.forEach(t),Tl=i(cs,` by Hao Tan and Mohit Bansal. It\u2019s a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.`),cs.forEach(t),xl=c(Fe),Io=r(Fe,"P",{});var ms=s(Io);bl=i(ms,"This model is also a "),Do=r(ms,"A",{href:!0,rel:!0});var dc=s(Do);wl=i(dc,"tf.keras.Model"),dc.forEach(t),yl=i(ms,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ms.forEach(t),kl=c(Fe),x(Pt.$$.fragment,Fe),Ll=c(Fe),Qe=r(Fe,"DIV",{class:!0});var wn=s(Qe);x(Ro.$$.fragment,wn),Fl=c(wn),ft=r(wn,"P",{});var yn=s(ft);$l=i(yn,"The "),mn=r(yn,"A",{href:!0});var cc=s(mn);zl=i(cc,"TFLxmertForPreTraining"),cc.forEach(t),El=i(yn," forward method, overrides the "),or=r(yn,"CODE",{});var mc=s(or);Ml=i(mc,"__call__"),mc.forEach(t),Pl=i(yn," special method."),yn.forEach(t),ql=c(wn),x(qt.$$.fragment,wn),wn.forEach(t),Fe.forEach(t),this.h()},h(){l(u,"name","hf:doc:metadata"),l(u,"content",JSON.stringify(kc)),l(g,"id","lxmert"),l(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(g,"href","#lxmert"),l(f,"class","relative group"),l(Y,"id","overview"),l(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Y,"href","#overview"),l(E,"class","relative group"),l(te,"href","https://arxiv.org/abs/1908.07490"),l(te,"rel","nofollow"),l(fe,"href","https://huggingface.co/eltoto1219"),l(fe,"rel","nofollow"),l(ge,"href","https://github.com/airsplay/lxmert"),l(ge,"rel","nofollow"),l(ie,"id","transformers.LxmertConfig"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#transformers.LxmertConfig"),l(A,"class","relative group"),l(Wo,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertModel"),l(Bo,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertModel"),l(Xo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Ho,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(Ie,"class","docstring"),l(gt,"id","transformers.LxmertTokenizer"),l(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(gt,"href","#transformers.LxmertTokenizer"),l(Ye,"class","relative group"),l(Go,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizer"),l(Uo,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),l(Ko,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizer"),l(_e,"class","docstring"),l(vt,"id","transformers.LxmertTokenizerFast"),l(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(vt,"href","#transformers.LxmertTokenizerFast"),l(Ze,"class","relative group"),l(Jo,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertTokenizerFast"),l(Yo,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),l(Zo,"href","/docs/transformers/v4.15.0/en/model_doc/bert#transformers.BertTokenizerFast"),l(ve,"class","docstring"),l(xt,"id","transformers.models.lxmert.modeling_lxmert.LxmertModelOutput"),l(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(xt,"href","#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput"),l(et,"class","relative group"),l(tt,"class","docstring"),l(en,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForPreTraining"),l(ot,"class","docstring"),l(tn,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForQuestionAnswering"),l(nt,"class","docstring"),l(rt,"class","docstring"),l(on,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForPreTraining"),l(st,"class","docstring"),l(bt,"id","transformers.LxmertModel"),l(bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(bt,"href","#transformers.LxmertModel"),l(at,"class","relative group"),l(to,"href","https://arxiv.org/abs/1908.07490"),l(to,"rel","nofollow"),l(nn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(ro,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(ro,"rel","nofollow"),l(rn,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertModel"),l(le,"class","docstring"),l(H,"class","docstring"),l(yt,"id","transformers.LxmertForPreTraining"),l(yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(yt,"href","#transformers.LxmertForPreTraining"),l(lt,"class","relative group"),l(mo,"href","https://arxiv.org/abs/1908.07490"),l(mo,"rel","nofollow"),l(sn,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(po,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(po,"rel","nofollow"),l(an,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForPreTraining"),l(Se,"class","docstring"),l(G,"class","docstring"),l(Lt,"id","transformers.LxmertForQuestionAnswering"),l(Lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Lt,"href","#transformers.LxmertForQuestionAnswering"),l(ct,"class","relative group"),l(To,"href","https://arxiv.org/abs/1908.07490"),l(To,"rel","nofollow"),l(ln,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),l(wo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(wo,"rel","nofollow"),l(dn,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.LxmertForQuestionAnswering"),l(de,"class","docstring"),l(U,"class","docstring"),l($t,"id","transformers.TFLxmertModel"),l($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($t,"href","#transformers.TFLxmertModel"),l(ht,"class","relative group"),l(zo,"href","https://arxiv.org/abs/1908.07490"),l(zo,"rel","nofollow"),l(Mo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(Mo,"rel","nofollow"),l(cn,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertModel"),l(ce,"class","docstring"),l(K,"class","docstring"),l(Mt,"id","transformers.TFLxmertForPreTraining"),l(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Mt,"href","#transformers.TFLxmertForPreTraining"),l(pt,"class","relative group"),l(jo,"href","https://arxiv.org/abs/1908.07490"),l(jo,"rel","nofollow"),l(Do,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),l(Do,"rel","nofollow"),l(mn,"href","/docs/transformers/v4.15.0/en/model_doc/lxmert#transformers.TFLxmertForPreTraining"),l(Qe,"class","docstring"),l(J,"class","docstring")},m(o,h){e(document.head,u),m(o,$,h),m(o,f,h),e(f,g),e(g,L),b(v,L,null),e(f,_),e(f,z),e(z,$e),m(o,re,h),m(o,E,h),e(E,Y),e(Y,R),b(ee,R,null),e(E,ze),e(E,S),e(S,Ee),m(o,Te,h),m(o,C,h),e(C,Q),e(C,te),e(te,me),e(C,M),m(o,q,h),m(o,se,h),e(se,oe),m(o,xe,h),m(o,ae,h),e(ae,V),e(V,Me),m(o,be,h),m(o,P,h),e(P,Pe),m(o,Z,h),m(o,N,h),e(N,he),e(he,ne),e(N,qe),e(N,ue),e(ue,W),e(N,Ae),e(N,B),e(B,Oe),m(o,p,h),m(o,F,h),e(F,pe),e(F,fe),e(fe,Xe),e(F,X),e(F,ge),e(ge,He),e(F,Ge),m(o,j,h),m(o,A,h),e(A,ie),e(ie,Ce),b(O,Ce,null),e(A,Ue),e(A,Ne),e(Ne,je),m(o,Re,h),m(o,Ie,h),b(Ct,Ie,null),e(Ie,hs),e(Ie,Ke),e(Ke,us),e(Ke,Wo),e(Wo,ps),e(Ke,fs),e(Ke,Bo),e(Bo,gs),e(Ke,_s),e(Ie,vs),e(Ie,Je),e(Je,Ts),e(Je,Xo),e(Xo,xs),e(Je,bs),e(Je,Ho),e(Ho,ws),e(Je,ys),m(o,cr,h),m(o,Ye,h),e(Ye,gt),e(gt,kn),b(Nt,kn,null),e(Ye,ks),e(Ye,Ln),e(Ln,Ls),m(o,mr,h),m(o,_e,h),b(jt,_e,null),e(_e,Fs),e(_e,Fn),e(Fn,$s),e(_e,zs),e(_e,_t),e(_t,Go),e(Go,Es),e(_t,Ms),e(_t,Uo),e(Uo,Ps),e(_t,qs),e(_e,As),e(_e,It),e(It,Os),e(It,Ko),e(Ko,Cs),e(It,Ns),m(o,hr,h),m(o,Ze,h),e(Ze,vt),e(vt,$n),b(Dt,$n,null),e(Ze,js),e(Ze,zn),e(zn,Is),m(o,ur,h),m(o,ve,h),b(Rt,ve,null),e(ve,Ds),e(ve,St),e(St,Rs),e(St,En),e(En,Ss),e(St,Qs),e(ve,Vs),e(ve,Tt),e(Tt,Jo),e(Jo,Ws),e(Tt,Bs),e(Tt,Yo),e(Yo,Xs),e(Tt,Hs),e(ve,Gs),e(ve,Qt),e(Qt,Us),e(Qt,Zo),e(Zo,Ks),e(Qt,Js),m(o,pr,h),m(o,et,h),e(et,xt),e(xt,Mn),b(Vt,Mn,null),e(et,Ys),e(et,Pn),e(Pn,Zs),m(o,fr,h),m(o,tt,h),b(Wt,tt,null),e(tt,ea),e(tt,qn),e(qn,ta),m(o,gr,h),m(o,ot,h),b(Bt,ot,null),e(ot,oa),e(ot,Xt),e(Xt,na),e(Xt,en),e(en,ra),e(Xt,sa),m(o,_r,h),m(o,nt,h),b(Ht,nt,null),e(nt,aa),e(nt,Gt),e(Gt,ia),e(Gt,tn),e(tn,la),e(Gt,da),m(o,vr,h),m(o,rt,h),b(Ut,rt,null),e(rt,ca),e(rt,An),e(An,ma),m(o,Tr,h),m(o,st,h),b(Kt,st,null),e(st,ha),e(st,Jt),e(Jt,ua),e(Jt,on),e(on,pa),e(Jt,fa),m(o,xr,h),m(o,at,h),e(at,bt),e(bt,On),b(Yt,On,null),e(at,ga),e(at,Cn),e(Cn,_a),m(o,br,h),m(o,H,h),b(Zt,H,null),e(H,va),e(H,Nn),e(Nn,Ta),e(H,xa),e(H,eo),e(eo,ba),e(eo,to),e(to,wa),e(eo,ya),e(H,ka),e(H,oo),e(oo,La),e(oo,nn),e(nn,Fa),e(oo,$a),e(H,za),e(H,no),e(no,Ea),e(no,ro),e(ro,Ma),e(no,Pa),e(H,qa),e(H,le),b(so,le,null),e(le,Aa),e(le,it),e(it,Oa),e(it,rn),e(rn,Ca),e(it,Na),e(it,jn),e(jn,ja),e(it,Ia),e(le,Da),b(wt,le,null),e(le,Ra),e(le,In),e(In,Sa),e(le,Qa),b(ao,le,null),m(o,wr,h),m(o,lt,h),e(lt,yt),e(yt,Dn),b(io,Dn,null),e(lt,Va),e(lt,Rn),e(Rn,Wa),m(o,yr,h),m(o,G,h),b(lo,G,null),e(G,Ba),e(G,Sn),e(Sn,Xa),e(G,Ha),e(G,co),e(co,Ga),e(co,mo),e(mo,Ua),e(co,Ka),e(G,Ja),e(G,ho),e(ho,Ya),e(ho,sn),e(sn,Za),e(ho,ei),e(G,ti),e(G,uo),e(uo,oi),e(uo,po),e(po,ni),e(uo,ri),e(G,si),e(G,Se),b(fo,Se,null),e(Se,ai),e(Se,dt),e(dt,ii),e(dt,an),e(an,li),e(dt,di),e(dt,Qn),e(Qn,ci),e(dt,mi),e(Se,hi),b(kt,Se,null),m(o,kr,h),m(o,ct,h),e(ct,Lt),e(Lt,Vn),b(go,Vn,null),e(ct,ui),e(ct,Wn),e(Wn,pi),m(o,Lr,h),m(o,U,h),b(_o,U,null),e(U,fi),e(U,Bn),e(Bn,gi),e(U,_i),e(U,vo),e(vo,vi),e(vo,To),e(To,Ti),e(vo,xi),e(U,bi),e(U,xo),e(xo,wi),e(xo,ln),e(ln,yi),e(xo,ki),e(U,Li),e(U,bo),e(bo,Fi),e(bo,wo),e(wo,$i),e(bo,zi),e(U,Ei),e(U,de),b(yo,de,null),e(de,Mi),e(de,mt),e(mt,Pi),e(mt,dn),e(dn,qi),e(mt,Ai),e(mt,Xn),e(Xn,Oi),e(mt,Ci),e(de,Ni),b(Ft,de,null),e(de,ji),e(de,Hn),e(Hn,Ii),e(de,Di),b(ko,de,null),m(o,Fr,h),m(o,ht,h),e(ht,$t),e($t,Gn),b(Lo,Gn,null),e(ht,Ri),e(ht,Un),e(Un,Si),m(o,$r,h),m(o,K,h),b(Fo,K,null),e(K,Qi),e(K,Kn),e(Kn,Vi),e(K,Wi),e(K,$o),e($o,Bi),e($o,zo),e(zo,Xi),e($o,Hi),e(K,Gi),e(K,Eo),e(Eo,Ui),e(Eo,Mo),e(Mo,Ki),e(Eo,Ji),e(K,Yi),b(zt,K,null),e(K,Zi),e(K,ce),b(Po,ce,null),e(ce,el),e(ce,ut),e(ut,tl),e(ut,cn),e(cn,ol),e(ut,nl),e(ut,Jn),e(Jn,rl),e(ut,sl),e(ce,al),b(Et,ce,null),e(ce,il),e(ce,Yn),e(Yn,ll),e(ce,dl),b(qo,ce,null),m(o,zr,h),m(o,pt,h),e(pt,Mt),e(Mt,Zn),b(Ao,Zn,null),e(pt,cl),e(pt,er),e(er,ml),m(o,Er,h),m(o,J,h),b(Oo,J,null),e(J,hl),e(J,Co),e(Co,ul),e(Co,tr),e(tr,pl),e(Co,fl),e(J,gl),e(J,No),e(No,_l),e(No,jo),e(jo,vl),e(No,Tl),e(J,xl),e(J,Io),e(Io,bl),e(Io,Do),e(Do,wl),e(Io,yl),e(J,kl),b(Pt,J,null),e(J,Ll),e(J,Qe),b(Ro,Qe,null),e(Qe,Fl),e(Qe,ft),e(ft,$l),e(ft,mn),e(mn,zl),e(ft,El),e(ft,or),e(or,Ml),e(ft,Pl),e(Qe,ql),b(qt,Qe,null),Mr=!0},p(o,[h]){const So={};h&2&&(So.$$scope={dirty:h,ctx:o}),wt.$set(So);const nr={};h&2&&(nr.$$scope={dirty:h,ctx:o}),kt.$set(nr);const rr={};h&2&&(rr.$$scope={dirty:h,ctx:o}),Ft.$set(rr);const sr={};h&2&&(sr.$$scope={dirty:h,ctx:o}),zt.$set(sr);const Qo={};h&2&&(Qo.$$scope={dirty:h,ctx:o}),Et.$set(Qo);const ar={};h&2&&(ar.$$scope={dirty:h,ctx:o}),Pt.$set(ar);const ir={};h&2&&(ir.$$scope={dirty:h,ctx:o}),qt.$set(ir)},i(o){Mr||(w(v.$$.fragment,o),w(ee.$$.fragment,o),w(O.$$.fragment,o),w(Ct.$$.fragment,o),w(Nt.$$.fragment,o),w(jt.$$.fragment,o),w(Dt.$$.fragment,o),w(Rt.$$.fragment,o),w(Vt.$$.fragment,o),w(Wt.$$.fragment,o),w(Bt.$$.fragment,o),w(Ht.$$.fragment,o),w(Ut.$$.fragment,o),w(Kt.$$.fragment,o),w(Yt.$$.fragment,o),w(Zt.$$.fragment,o),w(so.$$.fragment,o),w(wt.$$.fragment,o),w(ao.$$.fragment,o),w(io.$$.fragment,o),w(lo.$$.fragment,o),w(fo.$$.fragment,o),w(kt.$$.fragment,o),w(go.$$.fragment,o),w(_o.$$.fragment,o),w(yo.$$.fragment,o),w(Ft.$$.fragment,o),w(ko.$$.fragment,o),w(Lo.$$.fragment,o),w(Fo.$$.fragment,o),w(zt.$$.fragment,o),w(Po.$$.fragment,o),w(Et.$$.fragment,o),w(qo.$$.fragment,o),w(Ao.$$.fragment,o),w(Oo.$$.fragment,o),w(Pt.$$.fragment,o),w(Ro.$$.fragment,o),w(qt.$$.fragment,o),Mr=!0)},o(o){y(v.$$.fragment,o),y(ee.$$.fragment,o),y(O.$$.fragment,o),y(Ct.$$.fragment,o),y(Nt.$$.fragment,o),y(jt.$$.fragment,o),y(Dt.$$.fragment,o),y(Rt.$$.fragment,o),y(Vt.$$.fragment,o),y(Wt.$$.fragment,o),y(Bt.$$.fragment,o),y(Ht.$$.fragment,o),y(Ut.$$.fragment,o),y(Kt.$$.fragment,o),y(Yt.$$.fragment,o),y(Zt.$$.fragment,o),y(so.$$.fragment,o),y(wt.$$.fragment,o),y(ao.$$.fragment,o),y(io.$$.fragment,o),y(lo.$$.fragment,o),y(fo.$$.fragment,o),y(kt.$$.fragment,o),y(go.$$.fragment,o),y(_o.$$.fragment,o),y(yo.$$.fragment,o),y(Ft.$$.fragment,o),y(ko.$$.fragment,o),y(Lo.$$.fragment,o),y(Fo.$$.fragment,o),y(zt.$$.fragment,o),y(Po.$$.fragment,o),y(Et.$$.fragment,o),y(qo.$$.fragment,o),y(Ao.$$.fragment,o),y(Oo.$$.fragment,o),y(Pt.$$.fragment,o),y(Ro.$$.fragment,o),y(qt.$$.fragment,o),Mr=!1},d(o){t(u),o&&t($),o&&t(f),k(v),o&&t(re),o&&t(E),k(ee),o&&t(Te),o&&t(C),o&&t(q),o&&t(se),o&&t(xe),o&&t(ae),o&&t(be),o&&t(P),o&&t(Z),o&&t(N),o&&t(p),o&&t(F),o&&t(j),o&&t(A),k(O),o&&t(Re),o&&t(Ie),k(Ct),o&&t(cr),o&&t(Ye),k(Nt),o&&t(mr),o&&t(_e),k(jt),o&&t(hr),o&&t(Ze),k(Dt),o&&t(ur),o&&t(ve),k(Rt),o&&t(pr),o&&t(et),k(Vt),o&&t(fr),o&&t(tt),k(Wt),o&&t(gr),o&&t(ot),k(Bt),o&&t(_r),o&&t(nt),k(Ht),o&&t(vr),o&&t(rt),k(Ut),o&&t(Tr),o&&t(st),k(Kt),o&&t(xr),o&&t(at),k(Yt),o&&t(br),o&&t(H),k(Zt),k(so),k(wt),k(ao),o&&t(wr),o&&t(lt),k(io),o&&t(yr),o&&t(G),k(lo),k(fo),k(kt),o&&t(kr),o&&t(ct),k(go),o&&t(Lr),o&&t(U),k(_o),k(yo),k(Ft),k(ko),o&&t(Fr),o&&t(ht),k(Lo),o&&t($r),o&&t(K),k(Fo),k(zt),k(Po),k(Et),k(qo),o&&t(zr),o&&t(pt),k(Ao),o&&t(Er),o&&t(J),k(Oo),k(Pt),k(Ro),k(qt)}}}const kc={local:"lxmert",sections:[{local:"overview",title:"Overview"},{local:"transformers.LxmertConfig",title:"LxmertConfig"},{local:"transformers.LxmertTokenizer",title:"LxmertTokenizer"},{local:"transformers.LxmertTokenizerFast",title:"LxmertTokenizerFast"},{local:"transformers.models.lxmert.modeling_lxmert.LxmertModelOutput",title:"Lxmert specific outputs"},{local:"transformers.LxmertModel",title:"LxmertModel"},{local:"transformers.LxmertForPreTraining",title:"LxmertForPreTraining"},{local:"transformers.LxmertForQuestionAnswering",title:"LxmertForQuestionAnswering"},{local:"transformers.TFLxmertModel",title:"TFLxmertModel"},{local:"transformers.TFLxmertForPreTraining",title:"TFLxmertForPreTraining"}],title:"LXMERT"};function Lc(D,u,$){let{fw:f}=u;return D.$$set=g=>{"fw"in g&&$(0,f=g.fw)},[f]}class qc extends hc{constructor(u){super();uc(this,u,Lc,yc,pc,{fw:0})}}export{qc as default,kc as metadata};
9,988
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bertweet.mdx-f6547238.js
import{S as zs,i as qs,s as Bs,e as n,k as a,w as g,t as i,L as Ls,c as o,d as s,m as l,a as r,x as _,h as c,b as d,J as e,g as h,y as k,K as xs,q as w,o as v,B as b}from"../../chunks/vendor-b1433968.js";import{D as N}from"../../chunks/Docstring-ff504c58.js";import{C as As}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Kt}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Rs(Ze){let E,se,u,T,pe,C,Ke,me,Ge,Be,z,B,fe,O,et,he,tt,Le,L,st,F,nt,ot,xe,ne,rt,Ae,oe,ue,at,Re,re,it,De,M,Pe,$,lt,S,ct,dt,V,pt,mt,je,q,x,ge,W,ft,_e,ht,Ie,p,U,ut,ke,gt,_t,H,kt,ae,wt,vt,bt,A,X,Tt,we,Et,$t,y,J,yt,ve,zt,qt,Q,ie,Bt,be,Lt,xt,le,At,Te,Rt,Dt,R,Y,Pt,Ee,jt,It,D,Z,Nt,$e,Ct,Ot,P,K,Ft,G,Mt,ye,St,Vt,Wt,j,ee,Ut,ze,Ht,Xt,I,te,Jt,qe,Qt,Ne;return C=new Kt({}),O=new Kt({}),M=new As({props:{code:`import torch from transformers import AutoModel, AutoTokenizer bertweet = AutoModel.from_pretrained("vinai/bertweet-base") # For transformers v4.x+: tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False) # For transformers v3.x: # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base") # INPUT TWEET IS ALREADY NORMALIZED! line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:" input_ids = torch.tensor([tokenizer.encode(line)]) with torch.no_grad(): features = bertweet(input_ids) # Models outputs are now tuples # With TensorFlow 2.0+: # from transformers import TFAutoModel # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base"),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>bertweet = AutoModel.from_pretrained(<span class="hljs-string">&quot;vinai/bertweet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># For transformers v4.x+: </span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;vinai/bertweet-base&quot;</span>, use_fast=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># For transformers v3.x: </span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># tokenizer = AutoTokenizer.from_pretrained(&quot;vinai/bertweet-base&quot;)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># INPUT TWEET IS ALREADY NORMALIZED!</span> <span class="hljs-meta">&gt;&gt;&gt; </span>line = <span class="hljs-string">&quot;SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([tokenizer.encode(line)]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> features = bertweet(input_ids) <span class="hljs-comment"># Models outputs are now tuples</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># With TensorFlow 2.0+:</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># from transformers import TFAutoModel</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># bertweet = TFAutoModel.from_pretrained(&quot;vinai/bertweet-base&quot;)</span>`}}),W=new Kt({}),U=new N({props:{name:"class transformers.BertweetTokenizer",anchor:"transformers.BertweetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"normalization",val:" = False"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L68",parametersDescription:[{anchor:"transformers.BertweetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.BertweetTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.BertweetTokenizer.normalization",description:`<strong>normalization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to apply a normalization preprocess.`,name:"normalization"},{anchor:"transformers.BertweetTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.BertweetTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.BertweetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.BertweetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.BertweetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.BertweetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.BertweetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),X=new N({props:{name:"add_from_file",anchor:"transformers.BertweetTokenizer.add_from_file",parameters:[{name:"f",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L414"}}),J=new N({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertweetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L183",parametersDescription:[{anchor:"transformers.BertweetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertweetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Y=new N({props:{name:"convert_tokens_to_string",anchor:"transformers.BertweetTokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L384"}}),Z=new N({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BertweetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L237",parametersDescription:[{anchor:"transformers.BertweetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertweetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),K=new N({props:{name:"get_special_tokens_mask",anchor:"transformers.BertweetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L209",parametersDescription:[{anchor:"transformers.BertweetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertweetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BertweetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ee=new N({props:{name:"normalizeToken",anchor:"transformers.BertweetTokenizer.normalizeToken",parameters:[{name:"token",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L357"}}),te=new N({props:{name:"normalizeTweet",anchor:"transformers.BertweetTokenizer.normalizeTweet",parameters:[{name:"tweet",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bertweet/tokenization_bertweet.py#L323"}}),{c(){E=n("meta"),se=a(),u=n("h1"),T=n("a"),pe=n("span"),g(C.$$.fragment),Ke=a(),me=n("span"),Ge=i("BERTweet"),Be=a(),z=n("h2"),B=n("a"),fe=n("span"),g(O.$$.fragment),et=a(),he=n("span"),tt=i("Overview"),Le=a(),L=n("p"),st=i("The BERTweet model was proposed in "),F=n("a"),nt=i("BERTweet: A pre-trained language model for English Tweets"),ot=i(" by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen."),xe=a(),ne=n("p"),rt=i("The abstract from the paper is the following:"),Ae=a(),oe=n("p"),ue=n("em"),at=i(`We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al., 2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks: Part-of-speech tagging, Named-entity recognition and text classification.`),Re=a(),re=n("p"),it=i("Example of use:"),De=a(),g(M.$$.fragment),Pe=a(),$=n("p"),lt=i("This model was contributed by "),S=n("a"),ct=i("dqnguyen"),dt=i(". The original code can be found "),V=n("a"),pt=i("here"),mt=i("."),je=a(),q=n("h2"),x=n("a"),ge=n("span"),g(W.$$.fragment),ft=a(),_e=n("span"),ht=i("BertweetTokenizer"),Ie=a(),p=n("div"),g(U.$$.fragment),ut=a(),ke=n("p"),gt=i("Constructs a BERTweet tokenizer, using Byte-Pair-Encoding."),_t=a(),H=n("p"),kt=i("This tokenizer inherits from "),ae=n("a"),wt=i("PreTrainedTokenizer"),vt=i(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),bt=a(),A=n("div"),g(X.$$.fragment),Tt=a(),we=n("p"),Et=i("Loads a pre-existing dictionary from a text file and adds its symbols to this instance."),$t=a(),y=n("div"),g(J.$$.fragment),yt=a(),ve=n("p"),zt=i(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERTweet sequence has the following format:`),qt=a(),Q=n("ul"),ie=n("li"),Bt=i("single sequence: "),be=n("code"),Lt=i("<s> X </s>"),xt=a(),le=n("li"),At=i("pair of sequences: "),Te=n("code"),Rt=i("<s> A </s></s> B </s>"),Dt=a(),R=n("div"),g(Y.$$.fragment),Pt=a(),Ee=n("p"),jt=i("Converts a sequence of tokens (string) in a single string."),It=a(),D=n("div"),g(Z.$$.fragment),Nt=a(),$e=n("p"),Ct=i(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does not make use of token type ids, therefore a list of zeros is returned.`),Ot=a(),P=n("div"),g(K.$$.fragment),Ft=a(),G=n("p"),Mt=i(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ye=n("code"),St=i("prepare_for_model"),Vt=i(" method."),Wt=a(),j=n("div"),g(ee.$$.fragment),Ut=a(),ze=n("p"),Ht=i("Normalize tokens in a Tweet"),Xt=a(),I=n("div"),g(te.$$.fragment),Jt=a(),qe=n("p"),Qt=i("Normalize a raw Tweet"),this.h()},l(t){const m=Ls('[data-svelte="svelte-1phssyn"]',document.head);E=o(m,"META",{name:!0,content:!0}),m.forEach(s),se=l(t),u=o(t,"H1",{class:!0});var Ce=r(u);T=o(Ce,"A",{id:!0,class:!0,href:!0});var Gt=r(T);pe=o(Gt,"SPAN",{});var es=r(pe);_(C.$$.fragment,es),es.forEach(s),Gt.forEach(s),Ke=l(Ce),me=o(Ce,"SPAN",{});var ts=r(me);Ge=c(ts,"BERTweet"),ts.forEach(s),Ce.forEach(s),Be=l(t),z=o(t,"H2",{class:!0});var Oe=r(z);B=o(Oe,"A",{id:!0,class:!0,href:!0});var ss=r(B);fe=o(ss,"SPAN",{});var ns=r(fe);_(O.$$.fragment,ns),ns.forEach(s),ss.forEach(s),et=l(Oe),he=o(Oe,"SPAN",{});var os=r(he);tt=c(os,"Overview"),os.forEach(s),Oe.forEach(s),Le=l(t),L=o(t,"P",{});var Fe=r(L);st=c(Fe,"The BERTweet model was proposed in "),F=o(Fe,"A",{href:!0,rel:!0});var rs=r(F);nt=c(rs,"BERTweet: A pre-trained language model for English Tweets"),rs.forEach(s),ot=c(Fe," by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen."),Fe.forEach(s),xe=l(t),ne=o(t,"P",{});var as=r(ne);rt=c(as,"The abstract from the paper is the following:"),as.forEach(s),Ae=l(t),oe=o(t,"P",{});var is=r(oe);ue=o(is,"EM",{});var ls=r(ue);at=c(ls,`We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al., 2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks: Part-of-speech tagging, Named-entity recognition and text classification.`),ls.forEach(s),is.forEach(s),Re=l(t),re=o(t,"P",{});var cs=r(re);it=c(cs,"Example of use:"),cs.forEach(s),De=l(t),_(M.$$.fragment,t),Pe=l(t),$=o(t,"P",{});var ce=r($);lt=c(ce,"This model was contributed by "),S=o(ce,"A",{href:!0,rel:!0});var ds=r(S);ct=c(ds,"dqnguyen"),ds.forEach(s),dt=c(ce,". The original code can be found "),V=o(ce,"A",{href:!0,rel:!0});var ps=r(V);pt=c(ps,"here"),ps.forEach(s),mt=c(ce,"."),ce.forEach(s),je=l(t),q=o(t,"H2",{class:!0});var Me=r(q);x=o(Me,"A",{id:!0,class:!0,href:!0});var ms=r(x);ge=o(ms,"SPAN",{});var fs=r(ge);_(W.$$.fragment,fs),fs.forEach(s),ms.forEach(s),ft=l(Me),_e=o(Me,"SPAN",{});var hs=r(_e);ht=c(hs,"BertweetTokenizer"),hs.forEach(s),Me.forEach(s),Ie=l(t),p=o(t,"DIV",{class:!0});var f=r(p);_(U.$$.fragment,f),ut=l(f),ke=o(f,"P",{});var us=r(ke);gt=c(us,"Constructs a BERTweet tokenizer, using Byte-Pair-Encoding."),us.forEach(s),_t=l(f),H=o(f,"P",{});var Se=r(H);kt=c(Se,"This tokenizer inherits from "),ae=o(Se,"A",{href:!0});var gs=r(ae);wt=c(gs,"PreTrainedTokenizer"),gs.forEach(s),vt=c(Se,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Se.forEach(s),bt=l(f),A=o(f,"DIV",{class:!0});var Ve=r(A);_(X.$$.fragment,Ve),Tt=l(Ve),we=o(Ve,"P",{});var _s=r(we);Et=c(_s,"Loads a pre-existing dictionary from a text file and adds its symbols to this instance."),_s.forEach(s),Ve.forEach(s),$t=l(f),y=o(f,"DIV",{class:!0});var de=r(y);_(J.$$.fragment,de),yt=l(de),ve=o(de,"P",{});var ks=r(ve);zt=c(ks,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERTweet sequence has the following format:`),ks.forEach(s),qt=l(de),Q=o(de,"UL",{});var We=r(Q);ie=o(We,"LI",{});var Yt=r(ie);Bt=c(Yt,"single sequence: "),be=o(Yt,"CODE",{});var ws=r(be);Lt=c(ws,"<s> X </s>"),ws.forEach(s),Yt.forEach(s),xt=l(We),le=o(We,"LI",{});var Zt=r(le);At=c(Zt,"pair of sequences: "),Te=o(Zt,"CODE",{});var vs=r(Te);Rt=c(vs,"<s> A </s></s> B </s>"),vs.forEach(s),Zt.forEach(s),We.forEach(s),de.forEach(s),Dt=l(f),R=o(f,"DIV",{class:!0});var Ue=r(R);_(Y.$$.fragment,Ue),Pt=l(Ue),Ee=o(Ue,"P",{});var bs=r(Ee);jt=c(bs,"Converts a sequence of tokens (string) in a single string."),bs.forEach(s),Ue.forEach(s),It=l(f),D=o(f,"DIV",{class:!0});var He=r(D);_(Z.$$.fragment,He),Nt=l(He),$e=o(He,"P",{});var Ts=r($e);Ct=c(Ts,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does not make use of token type ids, therefore a list of zeros is returned.`),Ts.forEach(s),He.forEach(s),Ot=l(f),P=o(f,"DIV",{class:!0});var Xe=r(P);_(K.$$.fragment,Xe),Ft=l(Xe),G=o(Xe,"P",{});var Je=r(G);Mt=c(Je,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ye=o(Je,"CODE",{});var Es=r(ye);St=c(Es,"prepare_for_model"),Es.forEach(s),Vt=c(Je," method."),Je.forEach(s),Xe.forEach(s),Wt=l(f),j=o(f,"DIV",{class:!0});var Qe=r(j);_(ee.$$.fragment,Qe),Ut=l(Qe),ze=o(Qe,"P",{});var $s=r(ze);Ht=c($s,"Normalize tokens in a Tweet"),$s.forEach(s),Qe.forEach(s),Xt=l(f),I=o(f,"DIV",{class:!0});var Ye=r(I);_(te.$$.fragment,Ye),Jt=l(Ye),qe=o(Ye,"P",{});var ys=r(qe);Qt=c(ys,"Normalize a raw Tweet"),ys.forEach(s),Ye.forEach(s),f.forEach(s),this.h()},h(){d(E,"name","hf:doc:metadata"),d(E,"content",JSON.stringify(Ds)),d(T,"id","bertweet"),d(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(T,"href","#bertweet"),d(u,"class","relative group"),d(B,"id","overview"),d(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(B,"href","#overview"),d(z,"class","relative group"),d(F,"href","https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf"),d(F,"rel","nofollow"),d(S,"href","https://huggingface.co/dqnguyen"),d(S,"rel","nofollow"),d(V,"href","https://github.com/VinAIResearch/BERTweet"),d(V,"rel","nofollow"),d(x,"id","transformers.BertweetTokenizer"),d(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(x,"href","#transformers.BertweetTokenizer"),d(q,"class","relative group"),d(ae,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(A,"class","docstring"),d(y,"class","docstring"),d(R,"class","docstring"),d(D,"class","docstring"),d(P,"class","docstring"),d(j,"class","docstring"),d(I,"class","docstring"),d(p,"class","docstring")},m(t,m){e(document.head,E),h(t,se,m),h(t,u,m),e(u,T),e(T,pe),k(C,pe,null),e(u,Ke),e(u,me),e(me,Ge),h(t,Be,m),h(t,z,m),e(z,B),e(B,fe),k(O,fe,null),e(z,et),e(z,he),e(he,tt),h(t,Le,m),h(t,L,m),e(L,st),e(L,F),e(F,nt),e(L,ot),h(t,xe,m),h(t,ne,m),e(ne,rt),h(t,Ae,m),h(t,oe,m),e(oe,ue),e(ue,at),h(t,Re,m),h(t,re,m),e(re,it),h(t,De,m),k(M,t,m),h(t,Pe,m),h(t,$,m),e($,lt),e($,S),e(S,ct),e($,dt),e($,V),e(V,pt),e($,mt),h(t,je,m),h(t,q,m),e(q,x),e(x,ge),k(W,ge,null),e(q,ft),e(q,_e),e(_e,ht),h(t,Ie,m),h(t,p,m),k(U,p,null),e(p,ut),e(p,ke),e(ke,gt),e(p,_t),e(p,H),e(H,kt),e(H,ae),e(ae,wt),e(H,vt),e(p,bt),e(p,A),k(X,A,null),e(A,Tt),e(A,we),e(we,Et),e(p,$t),e(p,y),k(J,y,null),e(y,yt),e(y,ve),e(ve,zt),e(y,qt),e(y,Q),e(Q,ie),e(ie,Bt),e(ie,be),e(be,Lt),e(Q,xt),e(Q,le),e(le,At),e(le,Te),e(Te,Rt),e(p,Dt),e(p,R),k(Y,R,null),e(R,Pt),e(R,Ee),e(Ee,jt),e(p,It),e(p,D),k(Z,D,null),e(D,Nt),e(D,$e),e($e,Ct),e(p,Ot),e(p,P),k(K,P,null),e(P,Ft),e(P,G),e(G,Mt),e(G,ye),e(ye,St),e(G,Vt),e(p,Wt),e(p,j),k(ee,j,null),e(j,Ut),e(j,ze),e(ze,Ht),e(p,Xt),e(p,I),k(te,I,null),e(I,Jt),e(I,qe),e(qe,Qt),Ne=!0},p:xs,i(t){Ne||(w(C.$$.fragment,t),w(O.$$.fragment,t),w(M.$$.fragment,t),w(W.$$.fragment,t),w(U.$$.fragment,t),w(X.$$.fragment,t),w(J.$$.fragment,t),w(Y.$$.fragment,t),w(Z.$$.fragment,t),w(K.$$.fragment,t),w(ee.$$.fragment,t),w(te.$$.fragment,t),Ne=!0)},o(t){v(C.$$.fragment,t),v(O.$$.fragment,t),v(M.$$.fragment,t),v(W.$$.fragment,t),v(U.$$.fragment,t),v(X.$$.fragment,t),v(J.$$.fragment,t),v(Y.$$.fragment,t),v(Z.$$.fragment,t),v(K.$$.fragment,t),v(ee.$$.fragment,t),v(te.$$.fragment,t),Ne=!1},d(t){s(E),t&&s(se),t&&s(u),b(C),t&&s(Be),t&&s(z),b(O),t&&s(Le),t&&s(L),t&&s(xe),t&&s(ne),t&&s(Ae),t&&s(oe),t&&s(Re),t&&s(re),t&&s(De),b(M,t),t&&s(Pe),t&&s($),t&&s(je),t&&s(q),b(W),t&&s(Ie),t&&s(p),b(U),b(X),b(J),b(Y),b(Z),b(K),b(ee),b(te)}}}const Ds={local:"bertweet",sections:[{local:"overview",title:"Overview"},{local:"transformers.BertweetTokenizer",title:"BertweetTokenizer"}],title:"BERTweet"};function Ps(Ze,E,se){let{fw:u}=E;return Ze.$$set=T=>{"fw"in T&&se(0,u=T.fw)},[u]}class Fs extends zs{constructor(E){super();qs(this,E,Ps,Rs,Bs,{fw:0})}}export{Fs as default,Ds as metadata};
9,989
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/bigbird_pegasus.mdx-ffda3939.js
import{S as Hc,i as Rc,s as Kc,e as n,k as c,w as f,t as r,L as Vc,c as s,d as o,m as l,a,x as m,h as i,b as d,J as e,g as p,y as _,q as b,o as v,B as k}from"../../chunks/vendor-b1433968.js";import{T as ws}from"../../chunks/Tip-c3840994.js";import{D as J}from"../../chunks/Docstring-ff504c58.js";import{C as nt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as D}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Jc(U){let h,P,g,y,q;return{c(){h=n("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),y=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){h=s(B,"P",{});var w=a(h);P=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(w,"CODE",{});var F=a(g);y=i(F,"Module"),F.forEach(o),q=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(B,w){p(B,h,w),e(h,P),e(h,g),e(g,y),e(h,q)},d(B){B&&o(h)}}}function Zc(U){let h,P,g,y,q;return{c(){h=n("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),y=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){h=s(B,"P",{});var w=a(h);P=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(w,"CODE",{});var F=a(g);y=i(F,"Module"),F.forEach(o),q=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(B,w){p(B,h,w),e(h,P),e(h,g),e(g,y),e(h,q)},d(B){B&&o(h)}}}function Yc(U){let h,P,g,y,q;return{c(){h=n("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),y=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){h=s(B,"P",{});var w=a(h);P=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(w,"CODE",{});var F=a(g);y=i(F,"Module"),F.forEach(o),q=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(B,w){p(B,h,w),e(h,P),e(h,g),e(g,y),e(h,q)},d(B){B&&o(h)}}}function Xc(U){let h,P,g,y,q;return{c(){h=n("p"),P=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),y=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){h=s(B,"P",{});var w=a(h);P=i(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(w,"CODE",{});var F=a(g);y=i(F,"Module"),F.forEach(o),q=i(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(o)},m(B,w){p(B,h,w),e(h,P),e(h,g),e(g,y),e(h,q)},d(B){B&&o(h)}}}function el(U){let h,P,g,y,q,B,w,F,ys,zn,Z,ue,st,Se,Ts,at,Ps,$n,he,qs,Ae,zs,$s,xn,Ao,xs,Cn,Lo,rt,Cs,Fn,No,Fs,En,$,Le,Es,Ne,Ms,Ss,As,j,Ls,it,Ns,Os,dt,js,Is,ct,Qs,Ds,lt,Us,Gs,Ws,ut,Hs,Rs,ht,Ks,Vs,Oe,Js,pt,Zs,Ys,Xs,je,ea,gt,oa,ta,na,Ie,sa,Qe,aa,ra,Mn,pe,ia,De,da,ca,Sn,Y,ge,ft,Ue,la,mt,ua,An,T,Ge,ha,X,pa,Oo,ga,fa,We,ma,_a,ba,ee,va,jo,ka,Ba,Io,wa,ya,Ta,_t,Pa,qa,He,za,bt,vt,kt,Bt,$a,xa,wt,yt,Re,fe,me,Tt,Ke,Ca,Pt,Fa,Ea,qt,Ma,Sa,zt,$t,Ve,_e,be,xt,Je,Aa,Ct,La,Na,Ft,Oa,ja,Et,Mt,Ze,ve,ke,St,Ye,Ia,At,Qa,Da,Lt,Ua,Ln,oe,Be,Nt,Xe,Ga,Ot,Wa,Nn,I,eo,Ha,oo,Ra,Qo,Ka,Va,Ja,to,Za,no,Ya,Xa,er,A,so,or,te,tr,Do,nr,sr,jt,ar,rr,ir,we,dr,It,cr,lr,ao,On,ne,ye,Qt,ro,ur,Dt,hr,jn,Q,io,pr,co,gr,Uo,fr,mr,_r,lo,br,uo,vr,kr,Br,z,ho,wr,se,yr,Go,Tr,Pr,Ut,qr,zr,$r,Te,xr,Gt,Cr,Fr,Wt,Ht,Rt,Kt,Er,Mr,Vt,Jt,Zt,Yt,Sr,Ar,Xt,en,on,tn,Lr,Nr,nn,sn,po,Pe,qe,an,go,Or,rn,jr,Ir,dn,Qr,In,ae,ze,cn,fo,Dr,ln,Ur,Qn,M,mo,Gr,un,Wr,Hr,_o,Rr,Wo,Kr,Vr,Jr,bo,Zr,vo,Yr,Xr,ei,x,ko,oi,re,ti,Ho,ni,si,hn,ai,ri,ii,$e,di,pn,ci,li,Bo,ui,gn,hi,pi,wo,Dn,ie,xe,fn,yo,gi,mn,fi,Un,S,To,mi,de,_i,_n,bi,vi,bn,ki,Bi,wi,Po,yi,Ro,Ti,Pi,qi,qo,zi,zo,$i,xi,Ci,L,$o,Fi,ce,Ei,Ko,Mi,Si,vn,Ai,Li,Ni,Ce,Oi,kn,ji,Ii,xo,Gn,le,Fe,Bn,Co,Qi,wn,Di,Wn,Fo,G,Eo,Ui,yn,Gi,Wi,Mo,Hn;return B=new D({}),Se=new D({}),Ue=new D({}),Ge=new J({props:{name:"class transformers.BigBirdPegasusConfig",anchor:"transformers.BigBirdPegasusConfig",parameters:[{name:"vocab_size",val:" = 96103"},{name:"max_position_embeddings",val:" = 4096"},{name:"encoder_layers",val:" = 16"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 16"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu_new'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = True"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 2"},{name:"eos_token_id",val:" = 1"},{name:"attention_type",val:" = 'block_sparse'"},{name:"block_size",val:" = 64"},{name:"num_random_blocks",val:" = 3"},{name:"use_bias",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py#L31",parametersDescription:[{anchor:"transformers.BigBirdPegasusConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 96103) &#x2014; Vocabulary size of the BigBirdPegasus model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusModel">BigBirdPegasusModel</a>.`,name:"vocab_size"},{anchor:"transformers.BigBirdPegasusConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimension of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.BigBirdPegasusConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.BigBirdPegasusConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.BigBirdPegasusConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.BigBirdPegasusConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.BigBirdPegasusConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.BigBirdPegasusConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.BigBirdPegasusConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.BigBirdPegasusConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.BigBirdPegasusConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.BigBirdPegasusConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.BigBirdPegasusConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.BigBirdPegasusConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 1024 or 2048 or 4096).`,name:"max_position_embeddings"},{anchor:"transformers.BigBirdPegasusConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.BigBirdPegasusConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.BigBirdPegasusConfig.attention_type",description:`<strong>attention_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;block_sparse&quot;</code>) &#x2014; Whether to use block sparse attention (with n complexity) as introduced in paper or original attention layer (with n^2 complexity) in encoder. Possible values are <code>&quot;original_full&quot;</code> and <code>&quot;block_sparse&quot;</code>.`,name:"attention_type"},{anchor:"transformers.BigBirdPegasusConfig.use_bias",description:`<strong>use_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bias in query, key, value.`,name:"use_bias"},{anchor:"transformers.BigBirdPegasusConfig.block_size",description:`<strong>block_size</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Size of each block. Useful only when <code>attention_type == &quot;block_sparse&quot;</code>.`,name:"block_size"},{anchor:"transformers.BigBirdPegasusConfig.num_random_blocks",description:`<strong>num_random_blocks</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Each query is going to attend these many number of random blocks. Useful only when <code>attention_type == &quot;block_sparse&quot;</code>.`,name:"num_random_blocks"},{anchor:"transformers.BigBirdPegasusConfig.scale_embeddings",description:`<strong>scale_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to rescale embeddings with (hidden_size ** 0.5).`,name:"scale_embeddings"}]}}),He=new nt({props:{code:",",highlighted:""}}),Ke=new D({}),Je=new D({}),Ye=new D({}),Xe=new D({}),eo=new J({props:{name:"class transformers.BigBirdPegasusModel",anchor:"transformers.BigBirdPegasusModel",parameters:[{name:"config",val:": BigBirdPegasusConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2316",parametersDescription:[{anchor:"transformers.BigBirdPegasusModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),so=new J({props:{name:"forward",anchor:"transformers.BigBirdPegasusModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2343",parametersDescription:[{anchor:"transformers.BigBirdPegasusModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdPegasusModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdPegasusModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for translation and summarization training. By default, the model will create this tensor by shifting the <code>input_ids</code> to the right, following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BigBirdPegasusModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bigbird_pegasus._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BigBirdPegasusModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BigBirdPegasusModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BigBirdPegasusModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BigBirdPegasusModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BigBirdPegasusModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BigBirdPegasusModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdPegasusModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdPegasusModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig" >BigBirdPegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),we=new ws({props:{$$slots:{default:[Jc]},$$scope:{ctx:U}}}),ao=new nt({props:{code:`from transformers import PegasusTokenizer, BigBirdPegasusModel import torch tokenizer = PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv') model = BigBirdPegasusModel.from_pretrained('google/bigbird-pegasus-large-arxiv') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, BigBirdPegasusModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdPegasusModel.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ro=new D({}),io=new J({props:{name:"class transformers.BigBirdPegasusForConditionalGeneration",anchor:"transformers.BigBirdPegasusForConditionalGeneration",parameters:[{name:"config",val:": BigBirdPegasusConfig"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2444",parametersDescription:[{anchor:"transformers.BigBirdPegasusForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ho=new J({props:{name:"forward",anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2483",parametersDescription:[{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for translation and summarization training. By default, the model will create this tensor by shifting the <code>input_ids</code> to the right, following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bigbird_pegasus._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdPegasusForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig" >BigBirdPegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Te=new ws({props:{$$slots:{default:[Zc]},$$scope:{ctx:U}}}),go=new D({}),fo=new D({}),mo=new J({props:{name:"class transformers.BigBirdPegasusForSequenceClassification",anchor:"transformers.BigBirdPegasusForSequenceClassification",parameters:[{name:"config",val:": BigBirdPegasusConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2610",parametersDescription:[{anchor:"transformers.BigBirdPegasusForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ko=new J({props:{name:"forward",anchor:"transformers.BigBirdPegasusForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2623",parametersDescription:[{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for translation and summarization training. By default, the model will create this tensor by shifting the <code>input_ids</code> to the right, following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bigbird_pegasus._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdPegasusForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig" >BigBirdPegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$e=new ws({props:{$$slots:{default:[Yc]},$$scope:{ctx:U}}}),Bo=new nt({props:{code:`from transformers import PegasusTokenizer, BigBirdPegasusForSequenceClassification import torch tokenizer = PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv') model = BigBirdPegasusForSequenceClassification.from_pretrained('google/bigbird-pegasus-large-arxiv') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, BigBirdPegasusForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdPegasusForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),wo=new nt({props:{code:`from transformers import PegasusTokenizer, BigBirdPegasusForSequenceClassification import torch tokenizer = PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv') model = BigBirdPegasusForSequenceClassification.from_pretrained('google/bigbird-pegasus-large-arxiv', problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, BigBirdPegasusForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdPegasusForSequenceClassification.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),yo=new D({}),To=new J({props:{name:"class transformers.BigBirdPegasusForQuestionAnswering",anchor:"transformers.BigBirdPegasusForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2735",parametersDescription:[{anchor:"transformers.BigBirdPegasusForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig">BigBirdPegasusConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$o=new J({props:{name:"forward",anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2747",parametersDescription:[{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for translation and summarization training. By default, the model will create this tensor by shifting the <code>input_ids</code> to the right, following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_bigbird_pegasus._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all \`<code>decoder_input_ids\`\`\` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids\` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.BigBirdPegasusForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig" >BigBirdPegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ce=new ws({props:{$$slots:{default:[Xc]},$$scope:{ctx:U}}}),xo=new nt({props:{code:`from transformers import PegasusTokenizer, BigBirdPegasusForQuestionAnswering import torch tokenizer = PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv') model = BigBirdPegasusForQuestionAnswering.from_pretrained('google/bigbird-pegasus-large-arxiv') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors='pt') start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss start_scores = outputs.start_logits end_scores = outputs.end_logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, BigBirdPegasusForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdPegasusForQuestionAnswering.from_pretrained(<span class="hljs-string">&#x27;google/bigbird-pegasus-large-arxiv&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&#x27;pt&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),Co=new D({}),Eo=new J({props:{name:"forward",anchor:"transformers.BigBirdPegasusForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py#L2896",parametersDescription:[{anchor:"transformers.BigBirdPegasusForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/pegasus#transformers.PegasusTokenizer">PegasusTokenizer</a>. See <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BigBirdPegasusForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusConfig" >BigBirdPegasusConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mo=new nt({props:{code:`from transformers import PegasusTokenizer, BigBirdPegasusForCausalLM tokenizer = PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv") model = BigBirdPegasusForCausalLM.from_pretrained("google/bigbird-pegasus-large-arxiv", add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PegasusTokenizer, BigBirdPegasusForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PegasusTokenizer.from_pretrained(<span class="hljs-string">&quot;google/bigbird-pegasus-large-arxiv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BigBirdPegasusForCausalLM.from_pretrained(<span class="hljs-string">&quot;google/bigbird-pegasus-large-arxiv&quot;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){h=n("meta"),P=c(),g=n("h1"),y=n("a"),q=n("span"),f(B.$$.fragment),w=c(),F=n("span"),ys=r("BigBirdPegasus"),zn=c(),Z=n("h2"),ue=n("a"),st=n("span"),f(Se.$$.fragment),Ts=c(),at=n("span"),Ps=r("Overview"),$n=c(),he=n("p"),qs=r("The BigBird model was proposed in "),Ae=n("a"),zs=r("Big Bird: Transformers for Longer Sequences"),$s=r(` by Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and random attention approximates full attention, while being computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context, BigBird has shown improved performance on various long document NLP tasks, such as question answering and summarization, compared to BERT or RoBERTa.`),xn=c(),Ao=n("p"),xs=r("The abstract from the paper is the following:"),Cn=c(),Lo=n("p"),rt=n("em"),Cs=r(`Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP. Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to 8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context, BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.`),Fn=c(),No=n("p"),Fs=r("Tips:"),En=c(),$=n("ul"),Le=n("li"),Es=r("For an in-detail explanation on how BigBird\u2019s attention works, see "),Ne=n("a"),Ms=r("this blog post"),Ss=r("."),As=c(),j=n("li"),Ls=r("BigBird comes with 2 implementations: "),it=n("strong"),Ns=r("original_full"),Os=r(" & "),dt=n("strong"),js=r("block_sparse"),Is=r(`. For the sequence length < 1024, using `),ct=n("strong"),Qs=r("original_full"),Ds=r(" is advised as there is no benefit in using "),lt=n("strong"),Us=r("block_sparse"),Gs=r(" attention."),Ws=c(),ut=n("li"),Hs=r("The code currently uses window size of 3 blocks and 2 global blocks."),Rs=c(),ht=n("li"),Ks=r("Sequence length must be divisible by block size."),Vs=c(),Oe=n("li"),Js=r("Current implementation supports only "),pt=n("strong"),Zs=r("ITC"),Ys=r("."),Xs=c(),je=n("li"),ea=r("Current implementation doesn\u2019t support "),gt=n("strong"),oa=r("num_random_blocks = 0"),ta=r("."),na=c(),Ie=n("li"),sa=r("BigBirdPegasus uses the "),Qe=n("a"),aa=r("PegasusTokenizer"),ra=r("."),Mn=c(),pe=n("p"),ia=r("The original code can be found "),De=n("a"),da=r("here"),ca=r("."),Sn=c(),Y=n("h2"),ge=n("a"),ft=n("span"),f(Ue.$$.fragment),la=c(),mt=n("span"),ua=r("BigBirdPegasusConfig"),An=c(),T=n("div"),f(Ge.$$.fragment),ha=c(),X=n("p"),pa=r("This is the configuration class to store the configuration of a "),Oo=n("a"),ga=r("BigBirdPegasusModel"),fa=r(`. It is used to instantiate an BigBirdPegasus model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BigBirdPegasus `),We=n("a"),ma=r("google/bigbird-pegasus-large-arxiv"),_a=r(" architecture."),ba=c(),ee=n("p"),va=r("Configuration objects inherit from "),jo=n("a"),ka=r("PretrainedConfig"),Ba=r(` and can be used to control the model outputs. Read the documentation from `),Io=n("a"),wa=r("PretrainedConfig"),ya=r(" for more information."),Ta=c(),_t=n("p"),Pa=r("Example:"),qa=c(),f(He.$$.fragment),za=c(),bt=n("blockquote"),vt=n("blockquote"),kt=n("blockquote"),Bt=n("p"),$a=r("from transformers import BigBirdPegasusModel, BigBirdPegasusConfig"),xa=c(),wt=n("blockquote"),yt=n("blockquote"),Re=n("blockquote"),fe=n("h1"),me=n("a"),Tt=n("span"),f(Ke.$$.fragment),Ca=c(),Pt=n("span"),Fa=r("Initializing a BigBirdPegasus bigbird-pegasus-base style configuration"),Ea=c(),qt=n("p"),Ma=r("configuration = BigBirdPegasusConfig()"),Sa=c(),zt=n("blockquote"),$t=n("blockquote"),Ve=n("blockquote"),_e=n("h1"),be=n("a"),xt=n("span"),f(Je.$$.fragment),Aa=c(),Ct=n("span"),La=r("Initializing a model from the bigbird-pegasus-base style configuration"),Na=c(),Ft=n("p"),Oa=r("model = BigBirdPegasusModel(configuration)"),ja=c(),Et=n("blockquote"),Mt=n("blockquote"),Ze=n("blockquote"),ve=n("h1"),ke=n("a"),St=n("span"),f(Ye.$$.fragment),Ia=c(),At=n("span"),Qa=r("Accessing the model configuration"),Da=c(),Lt=n("p"),Ua=r("configuration = model.config"),Ln=c(),oe=n("h2"),Be=n("a"),Nt=n("span"),f(Xe.$$.fragment),Ga=c(),Ot=n("span"),Wa=r("BigBirdPegasusModel"),Nn=c(),I=n("div"),f(eo.$$.fragment),Ha=c(),oo=n("p"),Ra=r(`The bare BigBirdPegasus Model outputting raw hidden-states without any specific head on top. This model inherits from `),Qo=n("a"),Ka=r("PreTrainedModel"),Va=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),Ja=c(),to=n("p"),Za=r("This model is also a PyTorch "),no=n("a"),Ya=r("torch.nn.Module"),Xa=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),er=c(),A=n("div"),f(so.$$.fragment),or=c(),te=n("p"),tr=r("The "),Do=n("a"),nr=r("BigBirdPegasusModel"),sr=r(" forward method, overrides the "),jt=n("code"),ar=r("__call__"),rr=r(" special method."),ir=c(),f(we.$$.fragment),dr=c(),It=n("p"),cr=r("Example:"),lr=c(),f(ao.$$.fragment),On=c(),ne=n("h2"),ye=n("a"),Qt=n("span"),f(ro.$$.fragment),ur=c(),Dt=n("span"),hr=r("BigBirdPegasusForConditionalGeneration"),jn=c(),Q=n("div"),f(io.$$.fragment),pr=c(),co=n("p"),gr=r(`The BigBirdPegasus Model with a language modeling head. Can be used for summarization. This model inherits from `),Uo=n("a"),fr=r("PreTrainedModel"),mr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),_r=c(),lo=n("p"),br=r("This model is also a PyTorch "),uo=n("a"),vr=r("torch.nn.Module"),kr=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Br=c(),z=n("div"),f(ho.$$.fragment),wr=c(),se=n("p"),yr=r("The "),Go=n("a"),Tr=r("BigBirdPegasusForConditionalGeneration"),Pr=r(" forward method, overrides the "),Ut=n("code"),qr=r("__call__"),zr=r(" special method."),$r=c(),f(Te.$$.fragment),xr=c(),Gt=n("p"),Cr=r("Summarization example::"),Fr=c(),Wt=n("blockquote"),Ht=n("blockquote"),Rt=n("blockquote"),Kt=n("p"),Er=r("from transformers import PegasusTokenizer, BigBirdPegasusForConditionalGeneration, BigBirdPegasusConfig"),Mr=c(),Vt=n("blockquote"),Jt=n("blockquote"),Zt=n("blockquote"),Yt=n("p"),Sr=r(`model = BigBirdPegasusForConditionalGeneration.from_pretrained(\u2018google/bigbird-pegasus-large-arxiv\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/bigbird-pegasus-large-arxiv\u2019)`),Ar=c(),Xt=n("blockquote"),en=n("blockquote"),on=n("blockquote"),tn=n("p"),Lr=r(`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors=\u2018pt\u2019, truncation=True)`),Nr=c(),nn=n("blockquote"),sn=n("blockquote"),po=n("blockquote"),Pe=n("h1"),qe=n("a"),an=n("span"),f(go.$$.fragment),Or=c(),rn=n("span"),jr=r("Generate Summary"),Ir=c(),dn=n("p"),Qr=r(`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),In=c(),ae=n("h2"),ze=n("a"),cn=n("span"),f(fo.$$.fragment),Dr=c(),ln=n("span"),Ur=r("BigBirdPegasusForSequenceClassification"),Qn=c(),M=n("div"),f(mo.$$.fragment),Gr=c(),un=n("p"),Wr=r(`BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Hr=c(),_o=n("p"),Rr=r("This model inherits from "),Wo=n("a"),Kr=r("PreTrainedModel"),Vr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),Jr=c(),bo=n("p"),Zr=r("This model is also a PyTorch "),vo=n("a"),Yr=r("torch.nn.Module"),Xr=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ei=c(),x=n("div"),f(ko.$$.fragment),oi=c(),re=n("p"),ti=r("The "),Ho=n("a"),ni=r("BigBirdPegasusForSequenceClassification"),si=r(" forward method, overrides the "),hn=n("code"),ai=r("__call__"),ri=r(" special method."),ii=c(),f($e.$$.fragment),di=c(),pn=n("p"),ci=r("Example of single-label classification:"),li=c(),f(Bo.$$.fragment),ui=c(),gn=n("p"),hi=r("Example of multi-label classification:"),pi=c(),f(wo.$$.fragment),Dn=c(),ie=n("h2"),xe=n("a"),fn=n("span"),f(yo.$$.fragment),gi=c(),mn=n("span"),fi=r("BigBirdPegasusForQuestionAnswering"),Un=c(),S=n("div"),f(To.$$.fragment),mi=c(),de=n("p"),_i=r(`BigBirdPegasus Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),_n=n("code"),bi=r("span start logits"),vi=r(" and "),bn=n("code"),ki=r("span end logits"),Bi=r(")."),wi=c(),Po=n("p"),yi=r("This model inherits from "),Ro=n("a"),Ti=r("PreTrainedModel"),Pi=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),qi=c(),qo=n("p"),zi=r("This model is also a PyTorch "),zo=n("a"),$i=r("torch.nn.Module"),xi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ci=c(),L=n("div"),f($o.$$.fragment),Fi=c(),ce=n("p"),Ei=r("The "),Ko=n("a"),Mi=r("BigBirdPegasusForQuestionAnswering"),Si=r(" forward method, overrides the "),vn=n("code"),Ai=r("__call__"),Li=r(" special method."),Ni=c(),f(Ce.$$.fragment),Oi=c(),kn=n("p"),ji=r("Example:"),Ii=c(),f(xo.$$.fragment),Gn=c(),le=n("h2"),Fe=n("a"),Bn=n("span"),f(Co.$$.fragment),Qi=c(),wn=n("span"),Di=r("BigBirdPegasusForCausalLM"),Wn=c(),Fo=n("div"),G=n("div"),f(Eo.$$.fragment),Ui=c(),yn=n("p"),Gi=r("Example:"),Wi=c(),f(Mo.$$.fragment),this.h()},l(t){const u=Vc('[data-svelte="svelte-1phssyn"]',document.head);h=s(u,"META",{name:!0,content:!0}),u.forEach(o),P=l(t),g=s(t,"H1",{class:!0});var So=a(g);y=s(So,"A",{id:!0,class:!0,href:!0});var Tn=a(y);q=s(Tn,"SPAN",{});var Pn=a(q);m(B.$$.fragment,Pn),Pn.forEach(o),Tn.forEach(o),w=l(So),F=s(So,"SPAN",{});var qn=a(F);ys=i(qn,"BigBirdPegasus"),qn.forEach(o),So.forEach(o),zn=l(t),Z=s(t,"H2",{class:!0});var Rn=a(Z);ue=s(Rn,"A",{id:!0,class:!0,href:!0});var Hi=a(ue);st=s(Hi,"SPAN",{});var Ri=a(st);m(Se.$$.fragment,Ri),Ri.forEach(o),Hi.forEach(o),Ts=l(Rn),at=s(Rn,"SPAN",{});var Ki=a(at);Ps=i(Ki,"Overview"),Ki.forEach(o),Rn.forEach(o),$n=l(t),he=s(t,"P",{});var Kn=a(he);qs=i(Kn,"The BigBird model was proposed in "),Ae=s(Kn,"A",{href:!0,rel:!0});var Vi=a(Ae);zs=i(Vi,"Big Bird: Transformers for Longer Sequences"),Vi.forEach(o),$s=i(Kn,` by Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and random attention approximates full attention, while being computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context, BigBird has shown improved performance on various long document NLP tasks, such as question answering and summarization, compared to BERT or RoBERTa.`),Kn.forEach(o),xn=l(t),Ao=s(t,"P",{});var Ji=a(Ao);xs=i(Ji,"The abstract from the paper is the following:"),Ji.forEach(o),Cn=l(t),Lo=s(t,"P",{});var Zi=a(Lo);rt=s(Zi,"EM",{});var Yi=a(rt);Cs=i(Yi,`Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP. Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to 8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context, BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.`),Yi.forEach(o),Zi.forEach(o),Fn=l(t),No=s(t,"P",{});var Xi=a(No);Fs=i(Xi,"Tips:"),Xi.forEach(o),En=l(t),$=s(t,"UL",{});var N=a($);Le=s(N,"LI",{});var Vn=a(Le);Es=i(Vn,"For an in-detail explanation on how BigBird\u2019s attention works, see "),Ne=s(Vn,"A",{href:!0,rel:!0});var ed=a(Ne);Ms=i(ed,"this blog post"),ed.forEach(o),Ss=i(Vn,"."),Vn.forEach(o),As=l(N),j=s(N,"LI",{});var W=a(j);Ls=i(W,"BigBird comes with 2 implementations: "),it=s(W,"STRONG",{});var od=a(it);Ns=i(od,"original_full"),od.forEach(o),Os=i(W," & "),dt=s(W,"STRONG",{});var td=a(dt);js=i(td,"block_sparse"),td.forEach(o),Is=i(W,`. For the sequence length < 1024, using `),ct=s(W,"STRONG",{});var nd=a(ct);Qs=i(nd,"original_full"),nd.forEach(o),Ds=i(W," is advised as there is no benefit in using "),lt=s(W,"STRONG",{});var sd=a(lt);Us=i(sd,"block_sparse"),sd.forEach(o),Gs=i(W," attention."),W.forEach(o),Ws=l(N),ut=s(N,"LI",{});var ad=a(ut);Hs=i(ad,"The code currently uses window size of 3 blocks and 2 global blocks."),ad.forEach(o),Rs=l(N),ht=s(N,"LI",{});var rd=a(ht);Ks=i(rd,"Sequence length must be divisible by block size."),rd.forEach(o),Vs=l(N),Oe=s(N,"LI",{});var Jn=a(Oe);Js=i(Jn,"Current implementation supports only "),pt=s(Jn,"STRONG",{});var id=a(pt);Zs=i(id,"ITC"),id.forEach(o),Ys=i(Jn,"."),Jn.forEach(o),Xs=l(N),je=s(N,"LI",{});var Zn=a(je);ea=i(Zn,"Current implementation doesn\u2019t support "),gt=s(Zn,"STRONG",{});var dd=a(gt);oa=i(dd,"num_random_blocks = 0"),dd.forEach(o),ta=i(Zn,"."),Zn.forEach(o),na=l(N),Ie=s(N,"LI",{});var Yn=a(Ie);sa=i(Yn,"BigBirdPegasus uses the "),Qe=s(Yn,"A",{href:!0,rel:!0});var cd=a(Qe);aa=i(cd,"PegasusTokenizer"),cd.forEach(o),ra=i(Yn,"."),Yn.forEach(o),N.forEach(o),Mn=l(t),pe=s(t,"P",{});var Xn=a(pe);ia=i(Xn,"The original code can be found "),De=s(Xn,"A",{href:!0,rel:!0});var ld=a(De);da=i(ld,"here"),ld.forEach(o),ca=i(Xn,"."),Xn.forEach(o),Sn=l(t),Y=s(t,"H2",{class:!0});var es=a(Y);ge=s(es,"A",{id:!0,class:!0,href:!0});var ud=a(ge);ft=s(ud,"SPAN",{});var hd=a(ft);m(Ue.$$.fragment,hd),hd.forEach(o),ud.forEach(o),la=l(es),mt=s(es,"SPAN",{});var pd=a(mt);ua=i(pd,"BigBirdPegasusConfig"),pd.forEach(o),es.forEach(o),An=l(t),T=s(t,"DIV",{class:!0});var C=a(T);m(Ge.$$.fragment,C),ha=l(C),X=s(C,"P",{});var Vo=a(X);pa=i(Vo,"This is the configuration class to store the configuration of a "),Oo=s(Vo,"A",{href:!0});var gd=a(Oo);ga=i(gd,"BigBirdPegasusModel"),gd.forEach(o),fa=i(Vo,`. It is used to instantiate an BigBirdPegasus model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BigBirdPegasus `),We=s(Vo,"A",{href:!0,rel:!0});var fd=a(We);ma=i(fd,"google/bigbird-pegasus-large-arxiv"),fd.forEach(o),_a=i(Vo," architecture."),Vo.forEach(o),ba=l(C),ee=s(C,"P",{});var Jo=a(ee);va=i(Jo,"Configuration objects inherit from "),jo=s(Jo,"A",{href:!0});var md=a(jo);ka=i(md,"PretrainedConfig"),md.forEach(o),Ba=i(Jo,` and can be used to control the model outputs. Read the documentation from `),Io=s(Jo,"A",{href:!0});var _d=a(Io);wa=i(_d,"PretrainedConfig"),_d.forEach(o),ya=i(Jo," for more information."),Jo.forEach(o),Ta=l(C),_t=s(C,"P",{});var bd=a(_t);Pa=i(bd,"Example:"),bd.forEach(o),qa=l(C),m(He.$$.fragment,C),za=l(C),bt=s(C,"BLOCKQUOTE",{});var vd=a(bt);vt=s(vd,"BLOCKQUOTE",{});var kd=a(vt);kt=s(kd,"BLOCKQUOTE",{});var Bd=a(kt);Bt=s(Bd,"P",{});var wd=a(Bt);$a=i(wd,"from transformers import BigBirdPegasusModel, BigBirdPegasusConfig"),wd.forEach(o),Bd.forEach(o),kd.forEach(o),vd.forEach(o),xa=l(C),wt=s(C,"BLOCKQUOTE",{});var yd=a(wt);yt=s(yd,"BLOCKQUOTE",{});var Td=a(yt);Re=s(Td,"BLOCKQUOTE",{});var os=a(Re);fe=s(os,"H1",{class:!0});var ts=a(fe);me=s(ts,"A",{id:!0,class:!0,href:!0});var Pd=a(me);Tt=s(Pd,"SPAN",{});var qd=a(Tt);m(Ke.$$.fragment,qd),qd.forEach(o),Pd.forEach(o),Ca=l(ts),Pt=s(ts,"SPAN",{});var zd=a(Pt);Fa=i(zd,"Initializing a BigBirdPegasus bigbird-pegasus-base style configuration"),zd.forEach(o),ts.forEach(o),Ea=l(os),qt=s(os,"P",{});var $d=a(qt);Ma=i($d,"configuration = BigBirdPegasusConfig()"),$d.forEach(o),os.forEach(o),Td.forEach(o),yd.forEach(o),Sa=l(C),zt=s(C,"BLOCKQUOTE",{});var xd=a(zt);$t=s(xd,"BLOCKQUOTE",{});var Cd=a($t);Ve=s(Cd,"BLOCKQUOTE",{});var ns=a(Ve);_e=s(ns,"H1",{class:!0});var ss=a(_e);be=s(ss,"A",{id:!0,class:!0,href:!0});var Fd=a(be);xt=s(Fd,"SPAN",{});var Ed=a(xt);m(Je.$$.fragment,Ed),Ed.forEach(o),Fd.forEach(o),Aa=l(ss),Ct=s(ss,"SPAN",{});var Md=a(Ct);La=i(Md,"Initializing a model from the bigbird-pegasus-base style configuration"),Md.forEach(o),ss.forEach(o),Na=l(ns),Ft=s(ns,"P",{});var Sd=a(Ft);Oa=i(Sd,"model = BigBirdPegasusModel(configuration)"),Sd.forEach(o),ns.forEach(o),Cd.forEach(o),xd.forEach(o),ja=l(C),Et=s(C,"BLOCKQUOTE",{});var Ad=a(Et);Mt=s(Ad,"BLOCKQUOTE",{});var Ld=a(Mt);Ze=s(Ld,"BLOCKQUOTE",{});var as=a(Ze);ve=s(as,"H1",{class:!0});var rs=a(ve);ke=s(rs,"A",{id:!0,class:!0,href:!0});var Nd=a(ke);St=s(Nd,"SPAN",{});var Od=a(St);m(Ye.$$.fragment,Od),Od.forEach(o),Nd.forEach(o),Ia=l(rs),At=s(rs,"SPAN",{});var jd=a(At);Qa=i(jd,"Accessing the model configuration"),jd.forEach(o),rs.forEach(o),Da=l(as),Lt=s(as,"P",{});var Id=a(Lt);Ua=i(Id,"configuration = model.config"),Id.forEach(o),as.forEach(o),Ld.forEach(o),Ad.forEach(o),C.forEach(o),Ln=l(t),oe=s(t,"H2",{class:!0});var is=a(oe);Be=s(is,"A",{id:!0,class:!0,href:!0});var Qd=a(Be);Nt=s(Qd,"SPAN",{});var Dd=a(Nt);m(Xe.$$.fragment,Dd),Dd.forEach(o),Qd.forEach(o),Ga=l(is),Ot=s(is,"SPAN",{});var Ud=a(Ot);Wa=i(Ud,"BigBirdPegasusModel"),Ud.forEach(o),is.forEach(o),Nn=l(t),I=s(t,"DIV",{class:!0});var Ee=a(I);m(eo.$$.fragment,Ee),Ha=l(Ee),oo=s(Ee,"P",{});var ds=a(oo);Ra=i(ds,`The bare BigBirdPegasus Model outputting raw hidden-states without any specific head on top. This model inherits from `),Qo=s(ds,"A",{href:!0});var Gd=a(Qo);Ka=i(Gd,"PreTrainedModel"),Gd.forEach(o),Va=i(ds,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),ds.forEach(o),Ja=l(Ee),to=s(Ee,"P",{});var cs=a(to);Za=i(cs,"This model is also a PyTorch "),no=s(cs,"A",{href:!0,rel:!0});var Wd=a(no);Ya=i(Wd,"torch.nn.Module"),Wd.forEach(o),Xa=i(cs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cs.forEach(o),er=l(Ee),A=s(Ee,"DIV",{class:!0});var H=a(A);m(so.$$.fragment,H),or=l(H),te=s(H,"P",{});var Zo=a(te);tr=i(Zo,"The "),Do=s(Zo,"A",{href:!0});var Hd=a(Do);nr=i(Hd,"BigBirdPegasusModel"),Hd.forEach(o),sr=i(Zo," forward method, overrides the "),jt=s(Zo,"CODE",{});var Rd=a(jt);ar=i(Rd,"__call__"),Rd.forEach(o),rr=i(Zo," special method."),Zo.forEach(o),ir=l(H),m(we.$$.fragment,H),dr=l(H),It=s(H,"P",{});var Kd=a(It);cr=i(Kd,"Example:"),Kd.forEach(o),lr=l(H),m(ao.$$.fragment,H),H.forEach(o),Ee.forEach(o),On=l(t),ne=s(t,"H2",{class:!0});var ls=a(ne);ye=s(ls,"A",{id:!0,class:!0,href:!0});var Vd=a(ye);Qt=s(Vd,"SPAN",{});var Jd=a(Qt);m(ro.$$.fragment,Jd),Jd.forEach(o),Vd.forEach(o),ur=l(ls),Dt=s(ls,"SPAN",{});var Zd=a(Dt);hr=i(Zd,"BigBirdPegasusForConditionalGeneration"),Zd.forEach(o),ls.forEach(o),jn=l(t),Q=s(t,"DIV",{class:!0});var Me=a(Q);m(io.$$.fragment,Me),pr=l(Me),co=s(Me,"P",{});var us=a(co);gr=i(us,`The BigBirdPegasus Model with a language modeling head. Can be used for summarization. This model inherits from `),Uo=s(us,"A",{href:!0});var Yd=a(Uo);fr=i(Yd,"PreTrainedModel"),Yd.forEach(o),mr=i(us,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),us.forEach(o),_r=l(Me),lo=s(Me,"P",{});var hs=a(lo);br=i(hs,"This model is also a PyTorch "),uo=s(hs,"A",{href:!0,rel:!0});var Xd=a(uo);vr=i(Xd,"torch.nn.Module"),Xd.forEach(o),kr=i(hs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hs.forEach(o),Br=l(Me),z=s(Me,"DIV",{class:!0});var E=a(z);m(ho.$$.fragment,E),wr=l(E),se=s(E,"P",{});var Yo=a(se);yr=i(Yo,"The "),Go=s(Yo,"A",{href:!0});var ec=a(Go);Tr=i(ec,"BigBirdPegasusForConditionalGeneration"),ec.forEach(o),Pr=i(Yo," forward method, overrides the "),Ut=s(Yo,"CODE",{});var oc=a(Ut);qr=i(oc,"__call__"),oc.forEach(o),zr=i(Yo," special method."),Yo.forEach(o),$r=l(E),m(Te.$$.fragment,E),xr=l(E),Gt=s(E,"P",{});var tc=a(Gt);Cr=i(tc,"Summarization example::"),tc.forEach(o),Fr=l(E),Wt=s(E,"BLOCKQUOTE",{});var nc=a(Wt);Ht=s(nc,"BLOCKQUOTE",{});var sc=a(Ht);Rt=s(sc,"BLOCKQUOTE",{});var ac=a(Rt);Kt=s(ac,"P",{});var rc=a(Kt);Er=i(rc,"from transformers import PegasusTokenizer, BigBirdPegasusForConditionalGeneration, BigBirdPegasusConfig"),rc.forEach(o),ac.forEach(o),sc.forEach(o),nc.forEach(o),Mr=l(E),Vt=s(E,"BLOCKQUOTE",{});var ic=a(Vt);Jt=s(ic,"BLOCKQUOTE",{});var dc=a(Jt);Zt=s(dc,"BLOCKQUOTE",{});var cc=a(Zt);Yt=s(cc,"P",{});var lc=a(Yt);Sr=i(lc,`model = BigBirdPegasusForConditionalGeneration.from_pretrained(\u2018google/bigbird-pegasus-large-arxiv\u2019) tokenizer = PegasusTokenizer.from_pretrained(\u2018google/bigbird-pegasus-large-arxiv\u2019)`),lc.forEach(o),cc.forEach(o),dc.forEach(o),ic.forEach(o),Ar=l(E),Xt=s(E,"BLOCKQUOTE",{});var uc=a(Xt);en=s(uc,"BLOCKQUOTE",{});var hc=a(en);on=s(hc,"BLOCKQUOTE",{});var pc=a(on);tn=s(pc,"P",{});var gc=a(tn);Lr=i(gc,`ARTICLE_TO_SUMMARIZE = \u201CMy friends are cool but they eat too many carbs.\u201D inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors=\u2018pt\u2019, truncation=True)`),gc.forEach(o),pc.forEach(o),hc.forEach(o),uc.forEach(o),Nr=l(E),nn=s(E,"BLOCKQUOTE",{});var fc=a(nn);sn=s(fc,"BLOCKQUOTE",{});var mc=a(sn);po=s(mc,"BLOCKQUOTE",{});var ps=a(po);Pe=s(ps,"H1",{class:!0});var gs=a(Pe);qe=s(gs,"A",{id:!0,class:!0,href:!0});var _c=a(qe);an=s(_c,"SPAN",{});var bc=a(an);m(go.$$.fragment,bc),bc.forEach(o),_c.forEach(o),Or=l(gs),rn=s(gs,"SPAN",{});var vc=a(rn);jr=i(vc,"Generate Summary"),vc.forEach(o),gs.forEach(o),Ir=l(ps),dn=s(ps,"P",{});var kc=a(dn);Qr=i(kc,`summary_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5, early_stopping=True) print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])`),kc.forEach(o),ps.forEach(o),mc.forEach(o),fc.forEach(o),E.forEach(o),Me.forEach(o),In=l(t),ae=s(t,"H2",{class:!0});var fs=a(ae);ze=s(fs,"A",{id:!0,class:!0,href:!0});var Bc=a(ze);cn=s(Bc,"SPAN",{});var wc=a(cn);m(fo.$$.fragment,wc),wc.forEach(o),Bc.forEach(o),Dr=l(fs),ln=s(fs,"SPAN",{});var yc=a(ln);Ur=i(yc,"BigBirdPegasusForSequenceClassification"),yc.forEach(o),fs.forEach(o),Qn=l(t),M=s(t,"DIV",{class:!0});var R=a(M);m(mo.$$.fragment,R),Gr=l(R),un=s(R,"P",{});var Tc=a(un);Wr=i(Tc,`BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Tc.forEach(o),Hr=l(R),_o=s(R,"P",{});var ms=a(_o);Rr=i(ms,"This model inherits from "),Wo=s(ms,"A",{href:!0});var Pc=a(Wo);Kr=i(Pc,"PreTrainedModel"),Pc.forEach(o),Vr=i(ms,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),ms.forEach(o),Jr=l(R),bo=s(R,"P",{});var _s=a(bo);Zr=i(_s,"This model is also a PyTorch "),vo=s(_s,"A",{href:!0,rel:!0});var qc=a(vo);Yr=i(qc,"torch.nn.Module"),qc.forEach(o),Xr=i(_s,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_s.forEach(o),ei=l(R),x=s(R,"DIV",{class:!0});var O=a(x);m(ko.$$.fragment,O),oi=l(O),re=s(O,"P",{});var Xo=a(re);ti=i(Xo,"The "),Ho=s(Xo,"A",{href:!0});var zc=a(Ho);ni=i(zc,"BigBirdPegasusForSequenceClassification"),zc.forEach(o),si=i(Xo," forward method, overrides the "),hn=s(Xo,"CODE",{});var $c=a(hn);ai=i($c,"__call__"),$c.forEach(o),ri=i(Xo," special method."),Xo.forEach(o),ii=l(O),m($e.$$.fragment,O),di=l(O),pn=s(O,"P",{});var xc=a(pn);ci=i(xc,"Example of single-label classification:"),xc.forEach(o),li=l(O),m(Bo.$$.fragment,O),ui=l(O),gn=s(O,"P",{});var Cc=a(gn);hi=i(Cc,"Example of multi-label classification:"),Cc.forEach(o),pi=l(O),m(wo.$$.fragment,O),O.forEach(o),R.forEach(o),Dn=l(t),ie=s(t,"H2",{class:!0});var bs=a(ie);xe=s(bs,"A",{id:!0,class:!0,href:!0});var Fc=a(xe);fn=s(Fc,"SPAN",{});var Ec=a(fn);m(yo.$$.fragment,Ec),Ec.forEach(o),Fc.forEach(o),gi=l(bs),mn=s(bs,"SPAN",{});var Mc=a(mn);fi=i(Mc,"BigBirdPegasusForQuestionAnswering"),Mc.forEach(o),bs.forEach(o),Un=l(t),S=s(t,"DIV",{class:!0});var K=a(S);m(To.$$.fragment,K),mi=l(K),de=s(K,"P",{});var et=a(de);_i=i(et,`BigBirdPegasus Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `),_n=s(et,"CODE",{});var Sc=a(_n);bi=i(Sc,"span start logits"),Sc.forEach(o),vi=i(et," and "),bn=s(et,"CODE",{});var Ac=a(bn);ki=i(Ac,"span end logits"),Ac.forEach(o),Bi=i(et,")."),et.forEach(o),wi=l(K),Po=s(K,"P",{});var vs=a(Po);yi=i(vs,"This model inherits from "),Ro=s(vs,"A",{href:!0});var Lc=a(Ro);Ti=i(Lc,"PreTrainedModel"),Lc.forEach(o),Pi=i(vs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)`),vs.forEach(o),qi=l(K),qo=s(K,"P",{});var ks=a(qo);zi=i(ks,"This model is also a PyTorch "),zo=s(ks,"A",{href:!0,rel:!0});var Nc=a(zo);$i=i(Nc,"torch.nn.Module"),Nc.forEach(o),xi=i(ks,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ks.forEach(o),Ci=l(K),L=s(K,"DIV",{class:!0});var V=a(L);m($o.$$.fragment,V),Fi=l(V),ce=s(V,"P",{});var ot=a(ce);Ei=i(ot,"The "),Ko=s(ot,"A",{href:!0});var Oc=a(Ko);Mi=i(Oc,"BigBirdPegasusForQuestionAnswering"),Oc.forEach(o),Si=i(ot," forward method, overrides the "),vn=s(ot,"CODE",{});var jc=a(vn);Ai=i(jc,"__call__"),jc.forEach(o),Li=i(ot," special method."),ot.forEach(o),Ni=l(V),m(Ce.$$.fragment,V),Oi=l(V),kn=s(V,"P",{});var Ic=a(kn);ji=i(Ic,"Example:"),Ic.forEach(o),Ii=l(V),m(xo.$$.fragment,V),V.forEach(o),K.forEach(o),Gn=l(t),le=s(t,"H2",{class:!0});var Bs=a(le);Fe=s(Bs,"A",{id:!0,class:!0,href:!0});var Qc=a(Fe);Bn=s(Qc,"SPAN",{});var Dc=a(Bn);m(Co.$$.fragment,Dc),Dc.forEach(o),Qc.forEach(o),Qi=l(Bs),wn=s(Bs,"SPAN",{});var Uc=a(wn);Di=i(Uc,"BigBirdPegasusForCausalLM"),Uc.forEach(o),Bs.forEach(o),Wn=l(t),Fo=s(t,"DIV",{class:!0});var Gc=a(Fo);G=s(Gc,"DIV",{class:!0});var tt=a(G);m(Eo.$$.fragment,tt),Ui=l(tt),yn=s(tt,"P",{});var Wc=a(yn);Gi=i(Wc,"Example:"),Wc.forEach(o),Wi=l(tt),m(Mo.$$.fragment,tt),tt.forEach(o),Gc.forEach(o),this.h()},h(){d(h,"name","hf:doc:metadata"),d(h,"content",JSON.stringify(ol)),d(y,"id","bigbirdpegasus"),d(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(y,"href","#bigbirdpegasus"),d(g,"class","relative group"),d(ue,"id","overview"),d(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ue,"href","#overview"),d(Z,"class","relative group"),d(Ae,"href","https://arxiv.org/abs/2007.14062"),d(Ae,"rel","nofollow"),d(Ne,"href","https://huggingface.co/blog/big-bird"),d(Ne,"rel","nofollow"),d(Qe,"href","https://github.com/huggingface/transformers/blob/master/src/transformers/models/pegasus/tokenization_pegasus.py"),d(Qe,"rel","nofollow"),d(De,"href","https://github.com/google-research/bigbird"),d(De,"rel","nofollow"),d(ge,"id","transformers.BigBirdPegasusConfig"),d(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ge,"href","#transformers.BigBirdPegasusConfig"),d(Y,"class","relative group"),d(Oo,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusModel"),d(We,"href","https://huggingface.co/google/bigbird-pegasus-large-arxiv"),d(We,"rel","nofollow"),d(jo,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(Io,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),d(me,"id","initializing-a-bigbirdpegasus-bigbird-pegasus-base-style-configuration"),d(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(me,"href","#initializing-a-bigbirdpegasus-bigbird-pegasus-base-style-configuration"),d(fe,"class","relative group"),d(be,"id","initializing-a-model-from-the-bigbird-pegasus-base-style-configuration"),d(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(be,"href","#initializing-a-model-from-the-bigbird-pegasus-base-style-configuration"),d(_e,"class","relative group"),d(ke,"id","accessing-the-model-configuration"),d(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ke,"href","#accessing-the-model-configuration"),d(ve,"class","relative group"),d(T,"class","docstring"),d(Be,"id","transformers.BigBirdPegasusModel"),d(Be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Be,"href","#transformers.BigBirdPegasusModel"),d(oe,"class","relative group"),d(Qo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(no,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(no,"rel","nofollow"),d(Do,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusModel"),d(A,"class","docstring"),d(I,"class","docstring"),d(ye,"id","transformers.BigBirdPegasusForConditionalGeneration"),d(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ye,"href","#transformers.BigBirdPegasusForConditionalGeneration"),d(ne,"class","relative group"),d(Uo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(uo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(uo,"rel","nofollow"),d(Go,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForConditionalGeneration"),d(qe,"id","generate-summary"),d(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(qe,"href","#generate-summary"),d(Pe,"class","relative group"),d(z,"class","docstring"),d(Q,"class","docstring"),d(ze,"id","transformers.BigBirdPegasusForSequenceClassification"),d(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ze,"href","#transformers.BigBirdPegasusForSequenceClassification"),d(ae,"class","relative group"),d(Wo,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(vo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(vo,"rel","nofollow"),d(Ho,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForSequenceClassification"),d(x,"class","docstring"),d(M,"class","docstring"),d(xe,"id","transformers.BigBirdPegasusForQuestionAnswering"),d(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(xe,"href","#transformers.BigBirdPegasusForQuestionAnswering"),d(ie,"class","relative group"),d(Ro,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),d(zo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(zo,"rel","nofollow"),d(Ko,"href","/docs/transformers/v4.15.0/en/model_doc/bigbird_pegasus#transformers.BigBirdPegasusForQuestionAnswering"),d(L,"class","docstring"),d(S,"class","docstring"),d(Fe,"id","transformers.BigBirdPegasusForCausalLM"),d(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Fe,"href","#transformers.BigBirdPegasusForCausalLM"),d(le,"class","relative group"),d(G,"class","docstring"),d(Fo,"class","docstring")},m(t,u){e(document.head,h),p(t,P,u),p(t,g,u),e(g,y),e(y,q),_(B,q,null),e(g,w),e(g,F),e(F,ys),p(t,zn,u),p(t,Z,u),e(Z,ue),e(ue,st),_(Se,st,null),e(Z,Ts),e(Z,at),e(at,Ps),p(t,$n,u),p(t,he,u),e(he,qs),e(he,Ae),e(Ae,zs),e(he,$s),p(t,xn,u),p(t,Ao,u),e(Ao,xs),p(t,Cn,u),p(t,Lo,u),e(Lo,rt),e(rt,Cs),p(t,Fn,u),p(t,No,u),e(No,Fs),p(t,En,u),p(t,$,u),e($,Le),e(Le,Es),e(Le,Ne),e(Ne,Ms),e(Le,Ss),e($,As),e($,j),e(j,Ls),e(j,it),e(it,Ns),e(j,Os),e(j,dt),e(dt,js),e(j,Is),e(j,ct),e(ct,Qs),e(j,Ds),e(j,lt),e(lt,Us),e(j,Gs),e($,Ws),e($,ut),e(ut,Hs),e($,Rs),e($,ht),e(ht,Ks),e($,Vs),e($,Oe),e(Oe,Js),e(Oe,pt),e(pt,Zs),e(Oe,Ys),e($,Xs),e($,je),e(je,ea),e(je,gt),e(gt,oa),e(je,ta),e($,na),e($,Ie),e(Ie,sa),e(Ie,Qe),e(Qe,aa),e(Ie,ra),p(t,Mn,u),p(t,pe,u),e(pe,ia),e(pe,De),e(De,da),e(pe,ca),p(t,Sn,u),p(t,Y,u),e(Y,ge),e(ge,ft),_(Ue,ft,null),e(Y,la),e(Y,mt),e(mt,ua),p(t,An,u),p(t,T,u),_(Ge,T,null),e(T,ha),e(T,X),e(X,pa),e(X,Oo),e(Oo,ga),e(X,fa),e(X,We),e(We,ma),e(X,_a),e(T,ba),e(T,ee),e(ee,va),e(ee,jo),e(jo,ka),e(ee,Ba),e(ee,Io),e(Io,wa),e(ee,ya),e(T,Ta),e(T,_t),e(_t,Pa),e(T,qa),_(He,T,null),e(T,za),e(T,bt),e(bt,vt),e(vt,kt),e(kt,Bt),e(Bt,$a),e(T,xa),e(T,wt),e(wt,yt),e(yt,Re),e(Re,fe),e(fe,me),e(me,Tt),_(Ke,Tt,null),e(fe,Ca),e(fe,Pt),e(Pt,Fa),e(Re,Ea),e(Re,qt),e(qt,Ma),e(T,Sa),e(T,zt),e(zt,$t),e($t,Ve),e(Ve,_e),e(_e,be),e(be,xt),_(Je,xt,null),e(_e,Aa),e(_e,Ct),e(Ct,La),e(Ve,Na),e(Ve,Ft),e(Ft,Oa),e(T,ja),e(T,Et),e(Et,Mt),e(Mt,Ze),e(Ze,ve),e(ve,ke),e(ke,St),_(Ye,St,null),e(ve,Ia),e(ve,At),e(At,Qa),e(Ze,Da),e(Ze,Lt),e(Lt,Ua),p(t,Ln,u),p(t,oe,u),e(oe,Be),e(Be,Nt),_(Xe,Nt,null),e(oe,Ga),e(oe,Ot),e(Ot,Wa),p(t,Nn,u),p(t,I,u),_(eo,I,null),e(I,Ha),e(I,oo),e(oo,Ra),e(oo,Qo),e(Qo,Ka),e(oo,Va),e(I,Ja),e(I,to),e(to,Za),e(to,no),e(no,Ya),e(to,Xa),e(I,er),e(I,A),_(so,A,null),e(A,or),e(A,te),e(te,tr),e(te,Do),e(Do,nr),e(te,sr),e(te,jt),e(jt,ar),e(te,rr),e(A,ir),_(we,A,null),e(A,dr),e(A,It),e(It,cr),e(A,lr),_(ao,A,null),p(t,On,u),p(t,ne,u),e(ne,ye),e(ye,Qt),_(ro,Qt,null),e(ne,ur),e(ne,Dt),e(Dt,hr),p(t,jn,u),p(t,Q,u),_(io,Q,null),e(Q,pr),e(Q,co),e(co,gr),e(co,Uo),e(Uo,fr),e(co,mr),e(Q,_r),e(Q,lo),e(lo,br),e(lo,uo),e(uo,vr),e(lo,kr),e(Q,Br),e(Q,z),_(ho,z,null),e(z,wr),e(z,se),e(se,yr),e(se,Go),e(Go,Tr),e(se,Pr),e(se,Ut),e(Ut,qr),e(se,zr),e(z,$r),_(Te,z,null),e(z,xr),e(z,Gt),e(Gt,Cr),e(z,Fr),e(z,Wt),e(Wt,Ht),e(Ht,Rt),e(Rt,Kt),e(Kt,Er),e(z,Mr),e(z,Vt),e(Vt,Jt),e(Jt,Zt),e(Zt,Yt),e(Yt,Sr),e(z,Ar),e(z,Xt),e(Xt,en),e(en,on),e(on,tn),e(tn,Lr),e(z,Nr),e(z,nn),e(nn,sn),e(sn,po),e(po,Pe),e(Pe,qe),e(qe,an),_(go,an,null),e(Pe,Or),e(Pe,rn),e(rn,jr),e(po,Ir),e(po,dn),e(dn,Qr),p(t,In,u),p(t,ae,u),e(ae,ze),e(ze,cn),_(fo,cn,null),e(ae,Dr),e(ae,ln),e(ln,Ur),p(t,Qn,u),p(t,M,u),_(mo,M,null),e(M,Gr),e(M,un),e(un,Wr),e(M,Hr),e(M,_o),e(_o,Rr),e(_o,Wo),e(Wo,Kr),e(_o,Vr),e(M,Jr),e(M,bo),e(bo,Zr),e(bo,vo),e(vo,Yr),e(bo,Xr),e(M,ei),e(M,x),_(ko,x,null),e(x,oi),e(x,re),e(re,ti),e(re,Ho),e(Ho,ni),e(re,si),e(re,hn),e(hn,ai),e(re,ri),e(x,ii),_($e,x,null),e(x,di),e(x,pn),e(pn,ci),e(x,li),_(Bo,x,null),e(x,ui),e(x,gn),e(gn,hi),e(x,pi),_(wo,x,null),p(t,Dn,u),p(t,ie,u),e(ie,xe),e(xe,fn),_(yo,fn,null),e(ie,gi),e(ie,mn),e(mn,fi),p(t,Un,u),p(t,S,u),_(To,S,null),e(S,mi),e(S,de),e(de,_i),e(de,_n),e(_n,bi),e(de,vi),e(de,bn),e(bn,ki),e(de,Bi),e(S,wi),e(S,Po),e(Po,yi),e(Po,Ro),e(Ro,Ti),e(Po,Pi),e(S,qi),e(S,qo),e(qo,zi),e(qo,zo),e(zo,$i),e(qo,xi),e(S,Ci),e(S,L),_($o,L,null),e(L,Fi),e(L,ce),e(ce,Ei),e(ce,Ko),e(Ko,Mi),e(ce,Si),e(ce,vn),e(vn,Ai),e(ce,Li),e(L,Ni),_(Ce,L,null),e(L,Oi),e(L,kn),e(kn,ji),e(L,Ii),_(xo,L,null),p(t,Gn,u),p(t,le,u),e(le,Fe),e(Fe,Bn),_(Co,Bn,null),e(le,Qi),e(le,wn),e(wn,Di),p(t,Wn,u),p(t,Fo,u),e(Fo,G),_(Eo,G,null),e(G,Ui),e(G,yn),e(yn,Gi),e(G,Wi),_(Mo,G,null),Hn=!0},p(t,[u]){const So={};u&2&&(So.$$scope={dirty:u,ctx:t}),we.$set(So);const Tn={};u&2&&(Tn.$$scope={dirty:u,ctx:t}),Te.$set(Tn);const Pn={};u&2&&(Pn.$$scope={dirty:u,ctx:t}),$e.$set(Pn);const qn={};u&2&&(qn.$$scope={dirty:u,ctx:t}),Ce.$set(qn)},i(t){Hn||(b(B.$$.fragment,t),b(Se.$$.fragment,t),b(Ue.$$.fragment,t),b(Ge.$$.fragment,t),b(He.$$.fragment,t),b(Ke.$$.fragment,t),b(Je.$$.fragment,t),b(Ye.$$.fragment,t),b(Xe.$$.fragment,t),b(eo.$$.fragment,t),b(so.$$.fragment,t),b(we.$$.fragment,t),b(ao.$$.fragment,t),b(ro.$$.fragment,t),b(io.$$.fragment,t),b(ho.$$.fragment,t),b(Te.$$.fragment,t),b(go.$$.fragment,t),b(fo.$$.fragment,t),b(mo.$$.fragment,t),b(ko.$$.fragment,t),b($e.$$.fragment,t),b(Bo.$$.fragment,t),b(wo.$$.fragment,t),b(yo.$$.fragment,t),b(To.$$.fragment,t),b($o.$$.fragment,t),b(Ce.$$.fragment,t),b(xo.$$.fragment,t),b(Co.$$.fragment,t),b(Eo.$$.fragment,t),b(Mo.$$.fragment,t),Hn=!0)},o(t){v(B.$$.fragment,t),v(Se.$$.fragment,t),v(Ue.$$.fragment,t),v(Ge.$$.fragment,t),v(He.$$.fragment,t),v(Ke.$$.fragment,t),v(Je.$$.fragment,t),v(Ye.$$.fragment,t),v(Xe.$$.fragment,t),v(eo.$$.fragment,t),v(so.$$.fragment,t),v(we.$$.fragment,t),v(ao.$$.fragment,t),v(ro.$$.fragment,t),v(io.$$.fragment,t),v(ho.$$.fragment,t),v(Te.$$.fragment,t),v(go.$$.fragment,t),v(fo.$$.fragment,t),v(mo.$$.fragment,t),v(ko.$$.fragment,t),v($e.$$.fragment,t),v(Bo.$$.fragment,t),v(wo.$$.fragment,t),v(yo.$$.fragment,t),v(To.$$.fragment,t),v($o.$$.fragment,t),v(Ce.$$.fragment,t),v(xo.$$.fragment,t),v(Co.$$.fragment,t),v(Eo.$$.fragment,t),v(Mo.$$.fragment,t),Hn=!1},d(t){o(h),t&&o(P),t&&o(g),k(B),t&&o(zn),t&&o(Z),k(Se),t&&o($n),t&&o(he),t&&o(xn),t&&o(Ao),t&&o(Cn),t&&o(Lo),t&&o(Fn),t&&o(No),t&&o(En),t&&o($),t&&o(Mn),t&&o(pe),t&&o(Sn),t&&o(Y),k(Ue),t&&o(An),t&&o(T),k(Ge),k(He),k(Ke),k(Je),k(Ye),t&&o(Ln),t&&o(oe),k(Xe),t&&o(Nn),t&&o(I),k(eo),k(so),k(we),k(ao),t&&o(On),t&&o(ne),k(ro),t&&o(jn),t&&o(Q),k(io),k(ho),k(Te),k(go),t&&o(In),t&&o(ae),k(fo),t&&o(Qn),t&&o(M),k(mo),k(ko),k($e),k(Bo),k(wo),t&&o(Dn),t&&o(ie),k(yo),t&&o(Un),t&&o(S),k(To),k($o),k(Ce),k(xo),t&&o(Gn),t&&o(le),k(Co),t&&o(Wn),t&&o(Fo),k(Eo),k(Mo)}}}const ol={local:"bigbirdpegasus",sections:[{local:"overview",title:"Overview"},{local:"transformers.BigBirdPegasusConfig",title:"BigBirdPegasusConfig"},{local:"transformers.BigBirdPegasusModel",title:"BigBirdPegasusModel"},{local:"transformers.BigBirdPegasusForConditionalGeneration",title:"BigBirdPegasusForConditionalGeneration"},{local:"transformers.BigBirdPegasusForSequenceClassification",title:"BigBirdPegasusForSequenceClassification"},{local:"transformers.BigBirdPegasusForQuestionAnswering",title:"BigBirdPegasusForQuestionAnswering"},{local:"transformers.BigBirdPegasusForCausalLM",title:"BigBirdPegasusForCausalLM"}],title:"BigBirdPegasus"};function tl(U,h,P){let{fw:g}=h;return U.$$set=y=>{"fw"in y&&P(0,g=y.fw)},[g]}class cl extends Hc{constructor(h){super();Rc(this,h,tl,el,Kc,{fw:0})}}export{cl as default,ol as metadata};
9,990
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/beit.mdx-62dec511.js
import{S as Fp,i as Ep,s as kp,e as a,k as d,w as _,t as r,L as Ip,c as s,d as t,m as c,a as n,x as v,h as i,b as l,J as e,g as p,y as b,q as x,o as w,B as y}from"../../chunks/vendor-b1433968.js";import{T as mt}from"../../chunks/Tip-c3840994.js";import{D as P}from"../../chunks/Docstring-ff504c58.js";import{C as pt}from"../../chunks/CodeBlock-a320dbd7.js";import{I as ee}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Mp(E){let h,$;return{c(){h=a("p"),$=r(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(f){h=s(f,"P",{});var T=n(h);$=i(T,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),T.forEach(t)},m(f,T){p(f,h,T),e(h,$)},d(f){f&&t(h)}}}function jp(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function Pp(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function Cp(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function zp(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function Ap(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function Lp(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function Np(E){let h,$,f,T,B;return{c(){h=a("p"),$=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),B=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=s(g,"P",{});var u=n(h);$=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(u,"CODE",{});var F=n(f);T=i(F,"Module"),F.forEach(t),B=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){p(g,h,u),e(h,$),e(h,f),e(f,T),e(h,B)},d(g){g&&t(h)}}}function Sp(E){let h,$,f,T,B,g,u,F,Qn,Ks,fe,Se,Ia,ft,er,Ma,tr,Ys,K,or,ut,ar,sr,gt,nr,rr,_t,ir,lr,Zs,Jo,dr,Qs,Xo,ja,cr,en,Go,hr,tn,C,k,mr,Ko,pr,fr,Yo,ur,gr,vt,_r,vr,Zo,br,xr,Qo,wr,yr,ea,Tr,$r,ta,Br,Fr,Er,bt,kr,xt,Ir,Mr,jr,wt,Pr,oa,Cr,zr,Ar,ue,Lr,Pa,Nr,Sr,yt,qr,Or,Dr,ge,Wr,Tt,Vr,Ur,$t,Rr,Hr,Jr,R,Xr,Ca,Gr,Kr,za,Yr,Zr,aa,Qr,ei,Aa,ti,oi,on,Y,ai,Bt,si,ni,Ft,ri,ii,Et,li,di,an,_e,qe,La,kt,ci,Na,hi,sn,ve,It,mi,Mt,pi,sa,fi,ui,nn,be,jt,gi,Pt,_i,na,vi,bi,rn,xe,Oe,Sa,Ct,xi,qa,wi,ln,H,zt,yi,we,Ti,ra,$i,Bi,At,Fi,Ei,ki,Oa,Ii,Mi,Lt,dn,ye,De,Da,Nt,ji,Wa,Pi,cn,J,St,Ci,Va,zi,Ai,qt,Li,ia,Ni,Si,qi,re,Ot,Oi,Ua,Di,Wi,We,hn,Te,Ve,Ra,Dt,Vi,Ha,Ui,mn,te,Wt,Ri,Vt,Hi,Ut,Ji,Xi,Gi,L,Rt,Ki,$e,Yi,la,Zi,Qi,Ja,el,tl,ol,Ue,al,Xa,sl,nl,Ht,pn,Be,Re,Ga,Jt,rl,Ka,il,fn,oe,Xt,ll,Gt,dl,Kt,cl,hl,ml,N,Yt,pl,Fe,fl,da,ul,gl,Ya,_l,vl,bl,He,xl,Za,wl,yl,Zt,un,Ee,Je,Qa,Qt,Tl,es,$l,gn,X,eo,Bl,ts,Fl,El,to,kl,oo,Il,Ml,jl,S,ao,Pl,ke,Cl,ca,zl,Al,os,Ll,Nl,Sl,Xe,ql,as,Ol,Dl,so,_n,Ie,Ge,ss,no,Wl,ns,Vl,vn,G,ro,Ul,rs,Rl,Hl,io,Jl,lo,Xl,Gl,Kl,q,co,Yl,Me,Zl,ha,Ql,ed,is,td,od,ad,Ke,sd,ls,nd,rd,ho,bn,je,Ye,ds,mo,id,cs,ld,xn,I,po,dd,hs,cd,hd,fo,md,ma,pd,fd,ud,uo,gd,go,_d,vd,bd,ms,xd,wd,ae,ps,_o,yd,Td,fs,vo,$d,Bd,us,bo,Fd,Ed,gs,xo,kd,Id,O,wo,Md,Pe,jd,_s,Pd,Cd,vs,zd,Ad,Ld,Ze,Nd,bs,Sd,qd,yo,wn,Ce,Qe,xs,To,Od,ws,Dd,yn,M,$o,Wd,ys,Vd,Ud,Bo,Rd,pa,Hd,Jd,Xd,Fo,Gd,Eo,Kd,Yd,Zd,Ts,Qd,ec,se,$s,ko,tc,oc,Bs,Io,ac,sc,Fs,Mo,nc,rc,Es,jo,ic,lc,z,Po,dc,ze,cc,ks,hc,mc,Is,pc,fc,uc,et,gc,Ae,_c,Ms,vc,bc,js,xc,wc,yc,Ps,Tc,$c,Co,Tn,Le,tt,Cs,zo,Bc,zs,Fc,$n,j,Ao,Ec,As,kc,Ic,Lo,Mc,fa,jc,Pc,Cc,No,zc,So,Ac,Lc,Nc,Ls,Sc,qc,ne,Ns,qo,Oc,Dc,Ss,Oo,Wc,Vc,qs,Do,Uc,Rc,Os,Wo,Hc,Jc,D,Vo,Xc,Ne,Gc,Ds,Kc,Yc,Ws,Zc,Qc,eh,ot,th,Vs,oh,ah,Uo,Bn;return g=new ee({}),ft=new ee({}),kt=new ee({}),It=new P({props:{name:"class transformers.models.beit.modeling_beit.BeitModelOutputWithPooling",anchor:"transformers.models.beit.modeling_beit.BeitModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"pooler_output",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L47",parametersDescription:[{anchor:"transformers.models.beit.modeling_beit.BeitModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.beit.modeling_beit.BeitModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Average of the last layer hidden states of the patch tokens (excluding the <em>[CLS]</em> token) if <em>config.use_mean_pooling</em> is set to True. If set to False, then the final hidden state of the <em>[CLS]</em> token will be returned.`,name:"pooler_output"},{anchor:"transformers.models.beit.modeling_beit.BeitModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.beit.modeling_beit.BeitModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),jt=new P({props:{name:"class transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling",anchor:"transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"pooler_output",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L45",parametersDescription:[{anchor:"transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Average of the last layer hidden states of the patch tokens (excluding the <em>[CLS]</em> token) if <em>config.use_mean_pooling</em> is set to True. If set to False, then the final hidden state of the <em>[CLS]</em> token will be returned.`,name:"pooler_output"},{anchor:"transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ct=new ee({}),zt=new P({props:{name:"class transformers.BeitConfig",anchor:"transformers.BeitConfig",parameters:[{name:"vocab_size",val:" = 8192"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"is_encoder_decoder",val:" = False"},{name:"image_size",val:" = 224"},{name:"patch_size",val:" = 16"},{name:"num_channels",val:" = 3"},{name:"use_mask_token",val:" = False"},{name:"use_absolute_position_embeddings",val:" = False"},{name:"use_relative_position_bias",val:" = False"},{name:"use_shared_relative_position_bias",val:" = False"},{name:"layer_scale_init_value",val:" = 0.1"},{name:"drop_path_rate",val:" = 0.1"},{name:"use_mean_pooling",val:" = True"},{name:"out_indices",val:" = [3, 5, 7, 11]"},{name:"pool_scales",val:" = [1, 2, 3, 6]"},{name:"use_auxiliary_head",val:" = True"},{name:"auxiliary_loss_weight",val:" = 0.4"},{name:"auxiliary_channels",val:" = 256"},{name:"auxiliary_num_convs",val:" = 1"},{name:"auxiliary_concat_input",val:" = False"},{name:"semantic_loss_ignore_index",val:" = 255"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/configuration_beit.py#L29",parametersDescription:[{anchor:"transformers.BeitConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8092) &#x2014; Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during pre-training.`,name:"vocab_size"},{anchor:"transformers.BeitConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.BeitConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.BeitConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.BeitConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.BeitConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.BeitConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.BeitConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.BeitConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.BeitConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.BeitConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>224</code>) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.BeitConfig.patch_size",description:`<strong>patch_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>16</code>) &#x2014; The size (resolution) of each patch.`,name:"patch_size"},{anchor:"transformers.BeitConfig.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to <code>3</code>) &#x2014; The number of input channels.`,name:"num_channels"},{anchor:"transformers.BeitConfig.use_mask_token",description:`<strong>use_mask_token</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a mask token for masked image modeling.`,name:"use_mask_token"},{anchor:"transformers.BeitConfig.use_absolute_position_embeddings",description:`<strong>use_absolute_position_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use BERT-style absolute position embeddings.`,name:"use_absolute_position_embeddings"},{anchor:"transformers.BeitConfig.use_relative_position_bias",description:`<strong>use_relative_position_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use T5-style relative position embeddings in the self-attention layers.`,name:"use_relative_position_bias"},{anchor:"transformers.BeitConfig.use_shared_relative_position_bias",description:`<strong>use_shared_relative_position_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use the same relative position embeddings across all self-attention layers of the Transformer.`,name:"use_shared_relative_position_bias"},{anchor:"transformers.BeitConfig.layer_scale_init_value",description:`<strong>layer_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.`,name:"layer_scale_init_value"},{anchor:"transformers.BeitConfig.drop_path_rate",description:`<strong>drop_path_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Stochastic depth rate per sample (when applied in the main path of residual layers).`,name:"drop_path_rate"},{anchor:"transformers.BeitConfig.use_mean_pooling",description:`<strong>use_mean_pooling</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the CLS token, before applying the classification head.`,name:"use_mean_pooling"},{anchor:"transformers.BeitConfig.out_indices",description:`<strong>out_indices</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[3, 5, 7, 11]</code>) &#x2014; Indices of the feature maps to use for semantic segmentation.`,name:"out_indices"},{anchor:"transformers.BeitConfig.pool_scales",description:`<strong>pool_scales</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>[1, 2, 3, 6]</code>) &#x2014; Pooling scales used in Pooling Pyramid Module applied on the last feature map.`,name:"pool_scales"},{anchor:"transformers.BeitConfig.use_auxiliary_head",description:`<strong>use_auxiliary_head</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use an auxiliary head during training.`,name:"use_auxiliary_head"},{anchor:"transformers.BeitConfig.auxiliary_loss_weight",description:`<strong>auxiliary_loss_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 0.4) &#x2014; Weight of the cross-entropy loss of the auxiliary head.`,name:"auxiliary_loss_weight"},{anchor:"transformers.BeitConfig.auxiliary_channels",description:`<strong>auxiliary_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Number of channels to use in the auxiliary head.`,name:"auxiliary_channels"},{anchor:"transformers.BeitConfig.auxiliary_num_convs",description:`<strong>auxiliary_num_convs</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of convolutional layers to use in the auxiliary head.`,name:"auxiliary_num_convs"},{anchor:"transformers.BeitConfig.auxiliary_concat_input",description:`<strong>auxiliary_concat_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to concatenate the output of the auxiliary head with the input before the classification layer.`,name:"auxiliary_concat_input"},{anchor:"transformers.BeitConfig.semantic_loss_ignore_index",description:`<strong>semantic_loss_ignore_index</strong> (<code>int</code>, <em>optional</em>, defaults to 255) &#x2014; The index that is ignored by the loss function of the semantic segmentation model.`,name:"semantic_loss_ignore_index"}]}}),Lt=new pt({props:{code:`from transformers import BeitModel, BeitConfig # Initializing a BEiT beit-base-patch16-224-in22k style configuration configuration = BeitConfig() # Initializing a model from the beit-base-patch16-224-in22k style configuration model = BeitModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitModel, BeitConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BEiT beit-base-patch16-224-in22k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BeitConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the beit-base-patch16-224-in22k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BeitModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Nt=new ee({}),St=new P({props:{name:"class transformers.BeitFeatureExtractor",anchor:"transformers.BeitFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 256"},{name:"resample",val:" = 3"},{name:"do_center_crop",val:" = True"},{name:"crop_size",val:" = 224"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"reduce_labels",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/feature_extraction_beit.py#L37",parametersDescription:[{anchor:"transformers.BeitFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.BeitFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 256) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.BeitFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.BeitFeatureExtractor.do_center_crop",description:`<strong>do_center_crop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to crop the input at the center. If the input size is smaller than <code>crop_size</code> along any edge, the image is padded with 0&#x2019;s and then center cropped.`,name:"do_center_crop"},{anchor:"transformers.BeitFeatureExtractor.crop_size",description:`<strong>crop_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Desired output size when applying center-cropping. Only has an effect if <code>do_center_crop</code> is set to <code>True</code>.`,name:"crop_size"},{anchor:"transformers.BeitFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with <code>image_mean</code> and <code>image_std</code>.`,name:"do_normalize"},{anchor:"transformers.BeitFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.5, 0.5, 0.5]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.`,name:"image_mean"},{anchor:"transformers.BeitFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.5, 0.5, 0.5]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.`,name:"image_std"},{anchor:"transformers.BeitFeatureExtractor.reduce_labels",description:`<strong>reduce_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.`,name:"reduce_labels"}]}}),Ot=new P({props:{name:"__call__",anchor:"transformers.BeitFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"segmentation_maps",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/feature_extraction_beit.py#L100",parametersDescription:[{anchor:"transformers.BeitFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.BeitFeatureExtractor.__call__.segmentation_maps",description:`<strong>segmentation_maps</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014; Optionally, the corresponding semantic segmentation maps with the pixel-wise annotations.`,name:"segmentation_maps"},{anchor:"transformers.BeitFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> <li><strong>labels</strong> \u2014 Optional labels to be fed to a model (when <code>segmentation_maps</code> are provided)</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),We=new mt({props:{warning:"&lcub;true}",$$slots:{default:[Mp]},$$scope:{ctx:E}}}),Dt=new ee({}),Wt=new P({props:{name:"class transformers.BeitModel",anchor:"transformers.BeitModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L588",parametersDescription:[{anchor:"transformers.BeitModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Rt=new P({props:{name:"forward",anchor:"transformers.BeitModel.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"bool_masked_pos",val:" = None"},{name:"head_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L615",parametersDescription:[{anchor:"transformers.BeitModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor">BeitFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor.__call__">BeitFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.BeitModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BeitModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BeitModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BeitModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.models.beit.modeling_beit.BeitModelOutputWithPooling" >transformers.models.beit.modeling_beit.BeitModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig" >BeitConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Average of the last layer hidden states of the patch tokens (excluding the <em>[CLS]</em> token) if <em>config.use_mean_pooling</em> is set to True. If set to False, then the final hidden state of the <em>[CLS]</em> token will be returned.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.models.beit.modeling_beit.BeitModelOutputWithPooling" >transformers.models.beit.modeling_beit.BeitModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ue=new mt({props:{$$slots:{default:[jp]},$$scope:{ctx:E}}}),Ht=new pt({props:{code:`from transformers import BeitFeatureExtractor, BeitModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-patch16-224-pt22k-ft22k') model = BeitModel.from_pretrained('microsoft/beit-base-patch16-224-pt22k-ft22k') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, BeitModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k-ft22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BeitModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k-ft22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Jt=new ee({}),Xt=new P({props:{name:"class transformers.BeitForMaskedImageModeling",anchor:"transformers.BeitForMaskedImageModeling",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L709",parametersDescription:[{anchor:"transformers.BeitForMaskedImageModeling.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Yt=new P({props:{name:"forward",anchor:"transformers.BeitForMaskedImageModeling.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"bool_masked_pos",val:" = None"},{name:"head_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L723",parametersDescription:[{anchor:"transformers.BeitForMaskedImageModeling.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor">BeitFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor.__call__">BeitFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.BeitForMaskedImageModeling.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BeitForMaskedImageModeling.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BeitForMaskedImageModeling.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BeitForMaskedImageModeling.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BeitForMaskedImageModeling.forward.bool_masked_pos",description:`<strong>bool_masked_pos</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, num_patches)</code>) &#x2014; Boolean masked positions. Indicates which patches are masked (1) and which aren&#x2019;t (0).`,name:"bool_masked_pos"},{anchor:"transformers.BeitForMaskedImageModeling.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig" >BeitConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),He=new mt({props:{$$slots:{default:[Pp]},$$scope:{ctx:E}}}),Zt=new pt({props:{code:`from transformers import BeitFeatureExtractor, BeitForMaskedImageModeling from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-patch16-224-pt22k') model = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, BeitForMaskedImageModeling <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BeitForMaskedImageModeling.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Qt=new ee({}),eo=new P({props:{name:"class transformers.BeitForImageClassification",anchor:"transformers.BeitForImageClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L801",parametersDescription:[{anchor:"transformers.BeitForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ao=new P({props:{name:"forward",anchor:"transformers.BeitForImageClassification.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"head_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L814",parametersDescription:[{anchor:"transformers.BeitForImageClassification.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor">BeitFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor.__call__">BeitFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.BeitForImageClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BeitForImageClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BeitForImageClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BeitForImageClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BeitForImageClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig" >BeitConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Xe=new mt({props:{$$slots:{default:[Cp]},$$scope:{ctx:E}}}),so=new pt({props:{code:`from transformers import BeitFeatureExtractor, BeitForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-patch16-224') model = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, BeitForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BeitForImageClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),no=new ee({}),ro=new P({props:{name:"class transformers.BeitForSemanticSegmentation",anchor:"transformers.BeitForSemanticSegmentation",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L1104",parametersDescription:[{anchor:"transformers.BeitForSemanticSegmentation.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),co=new P({props:{name:"forward",anchor:"transformers.BeitForSemanticSegmentation.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"head_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_beit.py#L1148",parametersDescription:[{anchor:"transformers.BeitForSemanticSegmentation.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor">BeitFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor.__call__">BeitFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.BeitForSemanticSegmentation.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BeitForSemanticSegmentation.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BeitForSemanticSegmentation.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BeitForSemanticSegmentation.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BeitForSemanticSegmentation.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Ground truth semantic segmentation maps for computing the loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code>, a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig" >BeitConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ke=new mt({props:{$$slots:{default:[zp]},$$scope:{ctx:E}}}),ho=new pt({props:{code:`from transformers import BeitFeatureExtractor, BeitForSemanticSegmentation from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-finetuned-ade-640-640') model = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) # logits are of shape (batch_size, num_labels, height/4, width/4) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, BeitForSemanticSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-finetuned-ade-640-640&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BeitForSemanticSegmentation.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-finetuned-ade-640-640&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># logits are of shape (batch_size, num_labels, height/4, width/4)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),mo=new ee({}),po=new P({props:{name:"class transformers.FlaxBeitModel",anchor:"transformers.FlaxBeitModel",parameters:[{name:"config",val:": BeitConfig"},{name:"input_shape",val:" = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L724",parametersDescription:[{anchor:"transformers.FlaxBeitModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBeitModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),wo=new P({props:{name:"__call__",anchor:"transformers.FlaxBeitPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"bool_masked_pos",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L610",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling" >transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.beit.configuration_beit.BeitConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Average of the last layer hidden states of the patch tokens (excluding the <em>[CLS]</em> token) if <em>config.use_mean_pooling</em> is set to True. If set to False, then the final hidden state of the <em>[CLS]</em> token will be returned.</li> <li><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling" >transformers.models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ze=new mt({props:{$$slots:{default:[Ap]},$$scope:{ctx:E}}}),yo=new pt({props:{code:`from transformers import BeitFeatureExtractor, FlaxBeitModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-patch16-224-pt22k-ft22k') model = FlaxBeitModel.from_pretrained('microsoft/beit-base-patch16-224-pt22k-ft22k') inputs = feature_extractor(images=image, return_tensors="np") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, FlaxBeitModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k-ft22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBeitModel.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k-ft22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),To=new ee({}),$o=new P({props:{name:"class transformers.FlaxBeitForMaskedImageModeling",anchor:"transformers.FlaxBeitForMaskedImageModeling",parameters:[{name:"config",val:": BeitConfig"},{name:"input_shape",val:" = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L808",parametersDescription:[{anchor:"transformers.FlaxBeitForMaskedImageModeling.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBeitForMaskedImageModeling.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Po=new P({props:{name:"__call__",anchor:"transformers.FlaxBeitPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"bool_masked_pos",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L610",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.beit.configuration_beit.BeitConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),et=new mt({props:{$$slots:{default:[Lp]},$$scope:{ctx:E}}}),Co=new pt({props:{code:`from transformers import BeitFeatureExtractor, BeitForMaskedImageModeling from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-patch16-224-pt22k') model = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k') inputs = feature_extractor(images=image, return_tensors="np") outputs = model(**inputs) logits = outputs.logits,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, BeitForMaskedImageModeling <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BeitForMaskedImageModeling.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224-pt22k&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),zo=new ee({}),Ao=new P({props:{name:"class transformers.FlaxBeitForImageClassification",anchor:"transformers.FlaxBeitForImageClassification",parameters:[{name:"config",val:": BeitConfig"},{name:"input_shape",val:" = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L895",parametersDescription:[{anchor:"transformers.FlaxBeitForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig">BeitConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxBeitForImageClassification.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Vo=new P({props:{name:"__call__",anchor:"transformers.FlaxBeitPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"bool_masked_pos",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/beit/modeling_flax_beit.py#L610",returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.beit.configuration_beit.BeitConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ot=new mt({props:{$$slots:{default:[Np]},$$scope:{ctx:E}}}),Uo=new pt({props:{code:`from transformers import BeitFeatureExtractor, FlaxBeitForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-base-patch16-224') model = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224') inputs = feature_extractor(images=image, return_tensors="np") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitFeatureExtractor, FlaxBeitForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = BeitFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBeitForImageClassification.from_pretrained(<span class="hljs-string">&#x27;microsoft/beit-base-patch16-224&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),{c(){h=a("meta"),$=d(),f=a("h1"),T=a("a"),B=a("span"),_(g.$$.fragment),u=d(),F=a("span"),Qn=r("BEiT"),Ks=d(),fe=a("h2"),Se=a("a"),Ia=a("span"),_(ft.$$.fragment),er=d(),Ma=a("span"),tr=r("Overview"),Ys=d(),K=a("p"),or=r("The BEiT model was proposed in "),ut=a("a"),ar=r("BEiT: BERT Pre-Training of Image Transformers"),sr=r(` by Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class of an image (as done in the `),gt=a("a"),nr=r("original ViT paper"),rr=r(`), BEiT models are pre-trained to predict visual tokens from the codebook of OpenAI\u2019s `),_t=a("a"),ir=r("DALL-E model"),lr=r(` given masked patches.`),Zs=d(),Jo=a("p"),dr=r("The abstract from the paper is the following:"),Qs=d(),Xo=a("p"),ja=a("em"),cr=r(`We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first \u201Ctokenize\u201D the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).`),en=d(),Go=a("p"),hr=r("Tips:"),tn=d(),C=a("ul"),k=a("li"),mr=r(`BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They outperform both the `),Ko=a("a"),pr=r("original model (ViT)"),fr=r(" as well as "),Yo=a("a"),ur=r("Data-efficient Image Transformers (DeiT)"),gr=r(` when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as fine-tuning on custom data `),vt=a("a"),_r=r("here"),vr=r(` (you can just replace `),Zo=a("a"),br=r("ViTFeatureExtractor"),xr=r(" by "),Qo=a("a"),wr=r("BeitFeatureExtractor"),yr=r(` and `),ea=a("a"),Tr=r("ViTForImageClassification"),$r=r(" by "),ta=a("a"),Br=r("BeitForImageClassification"),Fr=r(")."),Er=d(),bt=a("li"),kr=r(`There\u2019s also a demo notebook available which showcases how to combine DALL-E\u2019s image tokenizer with BEiT for performing masked image modeling. You can find it `),xt=a("a"),Ir=r("here"),Mr=r("."),jr=d(),wt=a("li"),Pr=r(`As the BEiT models expect each image to be of the same size (resolution), one can use `),oa=a("a"),Cr=r("BeitFeatureExtractor"),zr=r(" to resize (or rescale) and normalize images for the model."),Ar=d(),ue=a("li"),Lr=r(`Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `),Pa=a("code"),Nr=r("microsoft/beit-base-patch16-224"),Sr=r(` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the `),yt=a("a"),qr=r("hub"),Or=r("."),Dr=d(),ge=a("li"),Wr=r("The available checkpoints are either (1) pre-trained on "),Tt=a("a"),Vr=r("ImageNet-22k"),Ur=r(` (a collection of 14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on `),$t=a("a"),Rr=r("ImageNet-1k"),Hr=r(` (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes).`),Jr=d(),R=a("li"),Xr=r(`BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the relative position bias among the several self-attention layers. During fine-tuning, each layer\u2019s relative position bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to pre-train a model from scratch, one needs to either set the `),Ca=a("code"),Gr=r("use_relative_position_bias"),Kr=r(` or the `),za=a("code"),Yr=r("use_relative_position_bias"),Zr=r(" attribute of "),aa=a("a"),Qr=r("BeitConfig"),ei=r(" to "),Aa=a("code"),ti=r("True"),oi=r(` in order to add position embeddings.`),on=d(),Y=a("p"),ai=r("This model was contributed by "),Bt=a("a"),si=r("nielsr"),ni=r(`. The JAX/FLAX version of this model was contributed by `),Ft=a("a"),ri=r("kamalkraj"),ii=r(". The original code can be found "),Et=a("a"),li=r("here"),di=r("."),an=d(),_e=a("h2"),qe=a("a"),La=a("span"),_(kt.$$.fragment),ci=d(),Na=a("span"),hi=r("BEiT specific outputs"),sn=d(),ve=a("div"),_(It.$$.fragment),mi=d(),Mt=a("p"),pi=r("Class for outputs of "),sa=a("a"),fi=r("BeitModel"),ui=r("."),nn=d(),be=a("div"),_(jt.$$.fragment),gi=d(),Pt=a("p"),_i=r("Class for outputs of "),na=a("a"),vi=r("FlaxBeitModel"),bi=r("."),rn=d(),xe=a("h2"),Oe=a("a"),Sa=a("span"),_(Ct.$$.fragment),xi=d(),qa=a("span"),wi=r("BeitConfig"),ln=d(),H=a("div"),_(zt.$$.fragment),yi=d(),we=a("p"),Ti=r("This is the configuration class to store the configuration of a "),ra=a("a"),$i=r("BeitModel"),Bi=r(`. It is used to instantiate an BEiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BEiT `),At=a("a"),Fi=r("microsoft/beit-base-patch16-224-in22k"),Ei=r(` architecture.`),ki=d(),Oa=a("p"),Ii=r("Example:"),Mi=d(),_(Lt.$$.fragment),dn=d(),ye=a("h2"),De=a("a"),Da=a("span"),_(Nt.$$.fragment),ji=d(),Wa=a("span"),Pi=r("BeitFeatureExtractor"),cn=d(),J=a("div"),_(St.$$.fragment),Ci=d(),Va=a("p"),zi=r("Constructs a BEiT feature extractor."),Ai=d(),qt=a("p"),Li=r("This feature extractor inherits from "),ia=a("a"),Ni=r("FeatureExtractionMixin"),Si=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),qi=d(),re=a("div"),_(Ot.$$.fragment),Oi=d(),Ua=a("p"),Di=r("Main method to prepare for the model one or several image(s)."),Wi=d(),_(We.$$.fragment),hn=d(),Te=a("h2"),Ve=a("a"),Ra=a("span"),_(Dt.$$.fragment),Vi=d(),Ha=a("span"),Ui=r("BeitModel"),mn=d(),te=a("div"),_(Wt.$$.fragment),Ri=d(),Vt=a("p"),Hi=r(`The bare Beit Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Ut=a("a"),Ji=r("torch.nn.Module"),Xi=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gi=d(),L=a("div"),_(Rt.$$.fragment),Ki=d(),$e=a("p"),Yi=r("The "),la=a("a"),Zi=r("BeitModel"),Qi=r(" forward method, overrides the "),Ja=a("code"),el=r("__call__"),tl=r(" special method."),ol=d(),_(Ue.$$.fragment),al=d(),Xa=a("p"),sl=r("Examples:"),nl=d(),_(Ht.$$.fragment),pn=d(),Be=a("h2"),Re=a("a"),Ga=a("span"),_(Jt.$$.fragment),rl=d(),Ka=a("span"),il=r("BeitForMaskedImageModeling"),fn=d(),oe=a("div"),_(Xt.$$.fragment),ll=d(),Gt=a("p"),dl=r(`Beit Model transformer with a \u2018language\u2019 modeling head on top (to predict visual tokens). This model is a PyTorch `),Kt=a("a"),cl=r("torch.nn.Module"),hl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ml=d(),N=a("div"),_(Yt.$$.fragment),pl=d(),Fe=a("p"),fl=r("The "),da=a("a"),ul=r("BeitForMaskedImageModeling"),gl=r(" forward method, overrides the "),Ya=a("code"),_l=r("__call__"),vl=r(" special method."),bl=d(),_(He.$$.fragment),xl=d(),Za=a("p"),wl=r("Examples:"),yl=d(),_(Zt.$$.fragment),un=d(),Ee=a("h2"),Je=a("a"),Qa=a("span"),_(Qt.$$.fragment),Tl=d(),es=a("span"),$l=r("BeitForImageClassification"),gn=d(),X=a("div"),_(eo.$$.fragment),Bl=d(),ts=a("p"),Fl=r(`Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final hidden states of the patch tokens) e.g. for ImageNet.`),El=d(),to=a("p"),kl=r("This model is a PyTorch "),oo=a("a"),Il=r("torch.nn.Module"),Ml=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jl=d(),S=a("div"),_(ao.$$.fragment),Pl=d(),ke=a("p"),Cl=r("The "),ca=a("a"),zl=r("BeitForImageClassification"),Al=r(" forward method, overrides the "),os=a("code"),Ll=r("__call__"),Nl=r(" special method."),Sl=d(),_(Xe.$$.fragment),ql=d(),as=a("p"),Ol=r("Examples:"),Dl=d(),_(so.$$.fragment),_n=d(),Ie=a("h2"),Ge=a("a"),ss=a("span"),_(no.$$.fragment),Wl=d(),ns=a("span"),Vl=r("BeitForSemanticSegmentation"),vn=d(),G=a("div"),_(ro.$$.fragment),Ul=d(),rs=a("p"),Rl=r("Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes."),Hl=d(),io=a("p"),Jl=r("This model is a PyTorch "),lo=a("a"),Xl=r("torch.nn.Module"),Gl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kl=d(),q=a("div"),_(co.$$.fragment),Yl=d(),Me=a("p"),Zl=r("The "),ha=a("a"),Ql=r("BeitForSemanticSegmentation"),ed=r(" forward method, overrides the "),is=a("code"),td=r("__call__"),od=r(" special method."),ad=d(),_(Ke.$$.fragment),sd=d(),ls=a("p"),nd=r("Examples:"),rd=d(),_(ho.$$.fragment),bn=d(),je=a("h2"),Ye=a("a"),ds=a("span"),_(mo.$$.fragment),id=d(),cs=a("span"),ld=r("FlaxBeitModel"),xn=d(),I=a("div"),_(po.$$.fragment),dd=d(),hs=a("p"),cd=r("The bare Beit Model transformer outputting raw hidden-states without any specific head on top."),hd=d(),fo=a("p"),md=r("This model inherits from "),ma=a("a"),pd=r("FlaxPreTrainedModel"),fd=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),ud=d(),uo=a("p"),gd=r("This model is also a Flax Linen "),go=a("a"),_d=r("flax.linen.Module"),vd=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),bd=d(),ms=a("p"),xd=r("Finally, this model supports inherent JAX features such as:"),wd=d(),ae=a("ul"),ps=a("li"),_o=a("a"),yd=r("Just-In-Time (JIT) compilation"),Td=d(),fs=a("li"),vo=a("a"),$d=r("Automatic Differentiation"),Bd=d(),us=a("li"),bo=a("a"),Fd=r("Vectorization"),Ed=d(),gs=a("li"),xo=a("a"),kd=r("Parallelization"),Id=d(),O=a("div"),_(wo.$$.fragment),Md=d(),Pe=a("p"),jd=r("The "),_s=a("code"),Pd=r("FlaxBeitPreTrainedModel"),Cd=r(" forward method, overrides the "),vs=a("code"),zd=r("__call__"),Ad=r(" special method."),Ld=d(),_(Ze.$$.fragment),Nd=d(),bs=a("p"),Sd=r("Examples:"),qd=d(),_(yo.$$.fragment),wn=d(),Ce=a("h2"),Qe=a("a"),xs=a("span"),_(To.$$.fragment),Od=d(),ws=a("span"),Dd=r("FlaxBeitForMaskedImageModeling"),yn=d(),M=a("div"),_($o.$$.fragment),Wd=d(),ys=a("p"),Vd=r("Beit Model transformer with a \u2018language\u2019 modeling head on top (to predict visual tokens)."),Ud=d(),Bo=a("p"),Rd=r("This model inherits from "),pa=a("a"),Hd=r("FlaxPreTrainedModel"),Jd=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Xd=d(),Fo=a("p"),Gd=r("This model is also a Flax Linen "),Eo=a("a"),Kd=r("flax.linen.Module"),Yd=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Zd=d(),Ts=a("p"),Qd=r("Finally, this model supports inherent JAX features such as:"),ec=d(),se=a("ul"),$s=a("li"),ko=a("a"),tc=r("Just-In-Time (JIT) compilation"),oc=d(),Bs=a("li"),Io=a("a"),ac=r("Automatic Differentiation"),sc=d(),Fs=a("li"),Mo=a("a"),nc=r("Vectorization"),rc=d(),Es=a("li"),jo=a("a"),ic=r("Parallelization"),lc=d(),z=a("div"),_(Po.$$.fragment),dc=d(),ze=a("p"),cc=r("The "),ks=a("code"),hc=r("FlaxBeitPreTrainedModel"),mc=r(" forward method, overrides the "),Is=a("code"),pc=r("__call__"),fc=r(" special method."),uc=d(),_(et.$$.fragment),gc=d(),Ae=a("p"),_c=r("bool_masked_pos ("),Ms=a("code"),vc=r("numpy.ndarray"),bc=r(" of shape "),js=a("code"),xc=r("(batch_size, num_patches)"),wc=r(`): Boolean masked positions. Indicates which patches are masked (1) and which aren\u2019t (0).`),yc=d(),Ps=a("p"),Tc=r("Examples:"),$c=d(),_(Co.$$.fragment),Tn=d(),Le=a("h2"),tt=a("a"),Cs=a("span"),_(zo.$$.fragment),Bc=d(),zs=a("span"),Fc=r("FlaxBeitForImageClassification"),$n=d(),j=a("div"),_(Ao.$$.fragment),Ec=d(),As=a("p"),kc=r(`Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final hidden states of the patch tokens) e.g. for ImageNet.`),Ic=d(),Lo=a("p"),Mc=r("This model inherits from "),fa=a("a"),jc=r("FlaxPreTrainedModel"),Pc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Cc=d(),No=a("p"),zc=r("This model is also a Flax Linen "),So=a("a"),Ac=r("flax.linen.Module"),Lc=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Nc=d(),Ls=a("p"),Sc=r("Finally, this model supports inherent JAX features such as:"),qc=d(),ne=a("ul"),Ns=a("li"),qo=a("a"),Oc=r("Just-In-Time (JIT) compilation"),Dc=d(),Ss=a("li"),Oo=a("a"),Wc=r("Automatic Differentiation"),Vc=d(),qs=a("li"),Do=a("a"),Uc=r("Vectorization"),Rc=d(),Os=a("li"),Wo=a("a"),Hc=r("Parallelization"),Jc=d(),D=a("div"),_(Vo.$$.fragment),Xc=d(),Ne=a("p"),Gc=r("The "),Ds=a("code"),Kc=r("FlaxBeitPreTrainedModel"),Yc=r(" forward method, overrides the "),Ws=a("code"),Zc=r("__call__"),Qc=r(" special method."),eh=d(),_(ot.$$.fragment),th=d(),Vs=a("p"),oh=r("Example:"),ah=d(),_(Uo.$$.fragment),this.h()},l(o){const m=Ip('[data-svelte="svelte-1phssyn"]',document.head);h=s(m,"META",{name:!0,content:!0}),m.forEach(t),$=c(o),f=s(o,"H1",{class:!0});var Ro=n(f);T=s(Ro,"A",{id:!0,class:!0,href:!0});var Us=n(T);B=s(Us,"SPAN",{});var Rs=n(B);v(g.$$.fragment,Rs),Rs.forEach(t),Us.forEach(t),u=c(Ro),F=s(Ro,"SPAN",{});var Hs=n(F);Qn=i(Hs,"BEiT"),Hs.forEach(t),Ro.forEach(t),Ks=c(o),fe=s(o,"H2",{class:!0});var Ho=n(fe);Se=s(Ho,"A",{id:!0,class:!0,href:!0});var Js=n(Se);Ia=s(Js,"SPAN",{});var Xs=n(Ia);v(ft.$$.fragment,Xs),Xs.forEach(t),Js.forEach(t),er=c(Ho),Ma=s(Ho,"SPAN",{});var Gs=n(Ma);tr=i(Gs,"Overview"),Gs.forEach(t),Ho.forEach(t),Ys=c(o),K=s(o,"P",{});var at=n(K);or=i(at,"The BEiT model was proposed in "),ut=s(at,"A",{href:!0,rel:!0});var sh=n(ut);ar=i(sh,"BEiT: BERT Pre-Training of Image Transformers"),sh.forEach(t),sr=i(at,` by Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class of an image (as done in the `),gt=s(at,"A",{href:!0,rel:!0});var nh=n(gt);nr=i(nh,"original ViT paper"),nh.forEach(t),rr=i(at,`), BEiT models are pre-trained to predict visual tokens from the codebook of OpenAI\u2019s `),_t=s(at,"A",{href:!0,rel:!0});var rh=n(_t);ir=i(rh,"DALL-E model"),rh.forEach(t),lr=i(at,` given masked patches.`),at.forEach(t),Zs=c(o),Jo=s(o,"P",{});var ih=n(Jo);dr=i(ih,"The abstract from the paper is the following:"),ih.forEach(t),Qs=c(o),Xo=s(o,"P",{});var lh=n(Xo);ja=s(lh,"EM",{});var dh=n(ja);cr=i(dh,`We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first \u201Ctokenize\u201D the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).`),dh.forEach(t),lh.forEach(t),en=c(o),Go=s(o,"P",{});var ch=n(Go);hr=i(ch,"Tips:"),ch.forEach(t),tn=c(o),C=s(o,"UL",{});var Z=n(C);k=s(Z,"LI",{});var A=n(k);mr=i(A,`BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They outperform both the `),Ko=s(A,"A",{href:!0});var hh=n(Ko);pr=i(hh,"original model (ViT)"),hh.forEach(t),fr=i(A," as well as "),Yo=s(A,"A",{href:!0});var mh=n(Yo);ur=i(mh,"Data-efficient Image Transformers (DeiT)"),mh.forEach(t),gr=i(A,` when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as fine-tuning on custom data `),vt=s(A,"A",{href:!0,rel:!0});var ph=n(vt);_r=i(ph,"here"),ph.forEach(t),vr=i(A,` (you can just replace `),Zo=s(A,"A",{href:!0});var fh=n(Zo);br=i(fh,"ViTFeatureExtractor"),fh.forEach(t),xr=i(A," by "),Qo=s(A,"A",{href:!0});var uh=n(Qo);wr=i(uh,"BeitFeatureExtractor"),uh.forEach(t),yr=i(A,` and `),ea=s(A,"A",{href:!0});var gh=n(ea);Tr=i(gh,"ViTForImageClassification"),gh.forEach(t),$r=i(A," by "),ta=s(A,"A",{href:!0});var _h=n(ta);Br=i(_h,"BeitForImageClassification"),_h.forEach(t),Fr=i(A,")."),A.forEach(t),Er=c(Z),bt=s(Z,"LI",{});var Fn=n(bt);kr=i(Fn,`There\u2019s also a demo notebook available which showcases how to combine DALL-E\u2019s image tokenizer with BEiT for performing masked image modeling. You can find it `),xt=s(Fn,"A",{href:!0,rel:!0});var vh=n(xt);Ir=i(vh,"here"),vh.forEach(t),Mr=i(Fn,"."),Fn.forEach(t),jr=c(Z),wt=s(Z,"LI",{});var En=n(wt);Pr=i(En,`As the BEiT models expect each image to be of the same size (resolution), one can use `),oa=s(En,"A",{href:!0});var bh=n(oa);Cr=i(bh,"BeitFeatureExtractor"),bh.forEach(t),zr=i(En," to resize (or rescale) and normalize images for the model."),En.forEach(t),Ar=c(Z),ue=s(Z,"LI",{});var ua=n(ue);Lr=i(ua,`Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `),Pa=s(ua,"CODE",{});var xh=n(Pa);Nr=i(xh,"microsoft/beit-base-patch16-224"),xh.forEach(t),Sr=i(ua,` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the `),yt=s(ua,"A",{href:!0,rel:!0});var wh=n(yt);qr=i(wh,"hub"),wh.forEach(t),Or=i(ua,"."),ua.forEach(t),Dr=c(Z),ge=s(Z,"LI",{});var ga=n(ge);Wr=i(ga,"The available checkpoints are either (1) pre-trained on "),Tt=s(ga,"A",{href:!0,rel:!0});var yh=n(Tt);Vr=i(yh,"ImageNet-22k"),yh.forEach(t),Ur=i(ga,` (a collection of 14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on `),$t=s(ga,"A",{href:!0,rel:!0});var Th=n($t);Rr=i(Th,"ImageNet-1k"),Th.forEach(t),Hr=i(ga,` (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes).`),ga.forEach(t),Jr=c(Z),R=s(Z,"LI",{});var ie=n(R);Xr=i(ie,`BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the relative position bias among the several self-attention layers. During fine-tuning, each layer\u2019s relative position bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to pre-train a model from scratch, one needs to either set the `),Ca=s(ie,"CODE",{});var $h=n(Ca);Gr=i($h,"use_relative_position_bias"),$h.forEach(t),Kr=i(ie,` or the `),za=s(ie,"CODE",{});var Bh=n(za);Yr=i(Bh,"use_relative_position_bias"),Bh.forEach(t),Zr=i(ie," attribute of "),aa=s(ie,"A",{href:!0});var Fh=n(aa);Qr=i(Fh,"BeitConfig"),Fh.forEach(t),ei=i(ie," to "),Aa=s(ie,"CODE",{});var Eh=n(Aa);ti=i(Eh,"True"),Eh.forEach(t),oi=i(ie,` in order to add position embeddings.`),ie.forEach(t),Z.forEach(t),on=c(o),Y=s(o,"P",{});var st=n(Y);ai=i(st,"This model was contributed by "),Bt=s(st,"A",{href:!0,rel:!0});var kh=n(Bt);si=i(kh,"nielsr"),kh.forEach(t),ni=i(st,`. The JAX/FLAX version of this model was contributed by `),Ft=s(st,"A",{href:!0,rel:!0});var Ih=n(Ft);ri=i(Ih,"kamalkraj"),Ih.forEach(t),ii=i(st,". The original code can be found "),Et=s(st,"A",{href:!0,rel:!0});var Mh=n(Et);li=i(Mh,"here"),Mh.forEach(t),di=i(st,"."),st.forEach(t),an=c(o),_e=s(o,"H2",{class:!0});var kn=n(_e);qe=s(kn,"A",{id:!0,class:!0,href:!0});var jh=n(qe);La=s(jh,"SPAN",{});var Ph=n(La);v(kt.$$.fragment,Ph),Ph.forEach(t),jh.forEach(t),ci=c(kn),Na=s(kn,"SPAN",{});var Ch=n(Na);hi=i(Ch,"BEiT specific outputs"),Ch.forEach(t),kn.forEach(t),sn=c(o),ve=s(o,"DIV",{class:!0});var In=n(ve);v(It.$$.fragment,In),mi=c(In),Mt=s(In,"P",{});var Mn=n(Mt);pi=i(Mn,"Class for outputs of "),sa=s(Mn,"A",{href:!0});var zh=n(sa);fi=i(zh,"BeitModel"),zh.forEach(t),ui=i(Mn,"."),Mn.forEach(t),In.forEach(t),nn=c(o),be=s(o,"DIV",{class:!0});var jn=n(be);v(jt.$$.fragment,jn),gi=c(jn),Pt=s(jn,"P",{});var Pn=n(Pt);_i=i(Pn,"Class for outputs of "),na=s(Pn,"A",{href:!0});var Ah=n(na);vi=i(Ah,"FlaxBeitModel"),Ah.forEach(t),bi=i(Pn,"."),Pn.forEach(t),jn.forEach(t),rn=c(o),xe=s(o,"H2",{class:!0});var Cn=n(xe);Oe=s(Cn,"A",{id:!0,class:!0,href:!0});var Lh=n(Oe);Sa=s(Lh,"SPAN",{});var Nh=n(Sa);v(Ct.$$.fragment,Nh),Nh.forEach(t),Lh.forEach(t),xi=c(Cn),qa=s(Cn,"SPAN",{});var Sh=n(qa);wi=i(Sh,"BeitConfig"),Sh.forEach(t),Cn.forEach(t),ln=c(o),H=s(o,"DIV",{class:!0});var nt=n(H);v(zt.$$.fragment,nt),yi=c(nt),we=s(nt,"P",{});var _a=n(we);Ti=i(_a,"This is the configuration class to store the configuration of a "),ra=s(_a,"A",{href:!0});var qh=n(ra);$i=i(qh,"BeitModel"),qh.forEach(t),Bi=i(_a,`. It is used to instantiate an BEiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BEiT `),At=s(_a,"A",{href:!0,rel:!0});var Oh=n(At);Fi=i(Oh,"microsoft/beit-base-patch16-224-in22k"),Oh.forEach(t),Ei=i(_a,` architecture.`),_a.forEach(t),ki=c(nt),Oa=s(nt,"P",{});var Dh=n(Oa);Ii=i(Dh,"Example:"),Dh.forEach(t),Mi=c(nt),v(Lt.$$.fragment,nt),nt.forEach(t),dn=c(o),ye=s(o,"H2",{class:!0});var zn=n(ye);De=s(zn,"A",{id:!0,class:!0,href:!0});var Wh=n(De);Da=s(Wh,"SPAN",{});var Vh=n(Da);v(Nt.$$.fragment,Vh),Vh.forEach(t),Wh.forEach(t),ji=c(zn),Wa=s(zn,"SPAN",{});var Uh=n(Wa);Pi=i(Uh,"BeitFeatureExtractor"),Uh.forEach(t),zn.forEach(t),cn=c(o),J=s(o,"DIV",{class:!0});var rt=n(J);v(St.$$.fragment,rt),Ci=c(rt),Va=s(rt,"P",{});var Rh=n(Va);zi=i(Rh,"Constructs a BEiT feature extractor."),Rh.forEach(t),Ai=c(rt),qt=s(rt,"P",{});var An=n(qt);Li=i(An,"This feature extractor inherits from "),ia=s(An,"A",{href:!0});var Hh=n(ia);Ni=i(Hh,"FeatureExtractionMixin"),Hh.forEach(t),Si=i(An,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),An.forEach(t),qi=c(rt),re=s(rt,"DIV",{class:!0});var va=n(re);v(Ot.$$.fragment,va),Oi=c(va),Ua=s(va,"P",{});var Jh=n(Ua);Di=i(Jh,"Main method to prepare for the model one or several image(s)."),Jh.forEach(t),Wi=c(va),v(We.$$.fragment,va),va.forEach(t),rt.forEach(t),hn=c(o),Te=s(o,"H2",{class:!0});var Ln=n(Te);Ve=s(Ln,"A",{id:!0,class:!0,href:!0});var Xh=n(Ve);Ra=s(Xh,"SPAN",{});var Gh=n(Ra);v(Dt.$$.fragment,Gh),Gh.forEach(t),Xh.forEach(t),Vi=c(Ln),Ha=s(Ln,"SPAN",{});var Kh=n(Ha);Ui=i(Kh,"BeitModel"),Kh.forEach(t),Ln.forEach(t),mn=c(o),te=s(o,"DIV",{class:!0});var ba=n(te);v(Wt.$$.fragment,ba),Ri=c(ba),Vt=s(ba,"P",{});var Nn=n(Vt);Hi=i(Nn,`The bare Beit Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Ut=s(Nn,"A",{href:!0,rel:!0});var Yh=n(Ut);Ji=i(Yh,"torch.nn.Module"),Yh.forEach(t),Xi=i(Nn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nn.forEach(t),Gi=c(ba),L=s(ba,"DIV",{class:!0});var le=n(L);v(Rt.$$.fragment,le),Ki=c(le),$e=s(le,"P",{});var xa=n($e);Yi=i(xa,"The "),la=s(xa,"A",{href:!0});var Zh=n(la);Zi=i(Zh,"BeitModel"),Zh.forEach(t),Qi=i(xa," forward method, overrides the "),Ja=s(xa,"CODE",{});var Qh=n(Ja);el=i(Qh,"__call__"),Qh.forEach(t),tl=i(xa," special method."),xa.forEach(t),ol=c(le),v(Ue.$$.fragment,le),al=c(le),Xa=s(le,"P",{});var em=n(Xa);sl=i(em,"Examples:"),em.forEach(t),nl=c(le),v(Ht.$$.fragment,le),le.forEach(t),ba.forEach(t),pn=c(o),Be=s(o,"H2",{class:!0});var Sn=n(Be);Re=s(Sn,"A",{id:!0,class:!0,href:!0});var tm=n(Re);Ga=s(tm,"SPAN",{});var om=n(Ga);v(Jt.$$.fragment,om),om.forEach(t),tm.forEach(t),rl=c(Sn),Ka=s(Sn,"SPAN",{});var am=n(Ka);il=i(am,"BeitForMaskedImageModeling"),am.forEach(t),Sn.forEach(t),fn=c(o),oe=s(o,"DIV",{class:!0});var wa=n(oe);v(Xt.$$.fragment,wa),ll=c(wa),Gt=s(wa,"P",{});var qn=n(Gt);dl=i(qn,`Beit Model transformer with a \u2018language\u2019 modeling head on top (to predict visual tokens). This model is a PyTorch `),Kt=s(qn,"A",{href:!0,rel:!0});var sm=n(Kt);cl=i(sm,"torch.nn.Module"),sm.forEach(t),hl=i(qn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qn.forEach(t),ml=c(wa),N=s(wa,"DIV",{class:!0});var de=n(N);v(Yt.$$.fragment,de),pl=c(de),Fe=s(de,"P",{});var ya=n(Fe);fl=i(ya,"The "),da=s(ya,"A",{href:!0});var nm=n(da);ul=i(nm,"BeitForMaskedImageModeling"),nm.forEach(t),gl=i(ya," forward method, overrides the "),Ya=s(ya,"CODE",{});var rm=n(Ya);_l=i(rm,"__call__"),rm.forEach(t),vl=i(ya," special method."),ya.forEach(t),bl=c(de),v(He.$$.fragment,de),xl=c(de),Za=s(de,"P",{});var im=n(Za);wl=i(im,"Examples:"),im.forEach(t),yl=c(de),v(Zt.$$.fragment,de),de.forEach(t),wa.forEach(t),un=c(o),Ee=s(o,"H2",{class:!0});var On=n(Ee);Je=s(On,"A",{id:!0,class:!0,href:!0});var lm=n(Je);Qa=s(lm,"SPAN",{});var dm=n(Qa);v(Qt.$$.fragment,dm),dm.forEach(t),lm.forEach(t),Tl=c(On),es=s(On,"SPAN",{});var cm=n(es);$l=i(cm,"BeitForImageClassification"),cm.forEach(t),On.forEach(t),gn=c(o),X=s(o,"DIV",{class:!0});var it=n(X);v(eo.$$.fragment,it),Bl=c(it),ts=s(it,"P",{});var hm=n(ts);Fl=i(hm,`Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final hidden states of the patch tokens) e.g. for ImageNet.`),hm.forEach(t),El=c(it),to=s(it,"P",{});var Dn=n(to);kl=i(Dn,"This model is a PyTorch "),oo=s(Dn,"A",{href:!0,rel:!0});var mm=n(oo);Il=i(mm,"torch.nn.Module"),mm.forEach(t),Ml=i(Dn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Dn.forEach(t),jl=c(it),S=s(it,"DIV",{class:!0});var ce=n(S);v(ao.$$.fragment,ce),Pl=c(ce),ke=s(ce,"P",{});var Ta=n(ke);Cl=i(Ta,"The "),ca=s(Ta,"A",{href:!0});var pm=n(ca);zl=i(pm,"BeitForImageClassification"),pm.forEach(t),Al=i(Ta," forward method, overrides the "),os=s(Ta,"CODE",{});var fm=n(os);Ll=i(fm,"__call__"),fm.forEach(t),Nl=i(Ta," special method."),Ta.forEach(t),Sl=c(ce),v(Xe.$$.fragment,ce),ql=c(ce),as=s(ce,"P",{});var um=n(as);Ol=i(um,"Examples:"),um.forEach(t),Dl=c(ce),v(so.$$.fragment,ce),ce.forEach(t),it.forEach(t),_n=c(o),Ie=s(o,"H2",{class:!0});var Wn=n(Ie);Ge=s(Wn,"A",{id:!0,class:!0,href:!0});var gm=n(Ge);ss=s(gm,"SPAN",{});var _m=n(ss);v(no.$$.fragment,_m),_m.forEach(t),gm.forEach(t),Wl=c(Wn),ns=s(Wn,"SPAN",{});var vm=n(ns);Vl=i(vm,"BeitForSemanticSegmentation"),vm.forEach(t),Wn.forEach(t),vn=c(o),G=s(o,"DIV",{class:!0});var lt=n(G);v(ro.$$.fragment,lt),Ul=c(lt),rs=s(lt,"P",{});var bm=n(rs);Rl=i(bm,"Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes."),bm.forEach(t),Hl=c(lt),io=s(lt,"P",{});var Vn=n(io);Jl=i(Vn,"This model is a PyTorch "),lo=s(Vn,"A",{href:!0,rel:!0});var xm=n(lo);Xl=i(xm,"torch.nn.Module"),xm.forEach(t),Gl=i(Vn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vn.forEach(t),Kl=c(lt),q=s(lt,"DIV",{class:!0});var he=n(q);v(co.$$.fragment,he),Yl=c(he),Me=s(he,"P",{});var $a=n(Me);Zl=i($a,"The "),ha=s($a,"A",{href:!0});var wm=n(ha);Ql=i(wm,"BeitForSemanticSegmentation"),wm.forEach(t),ed=i($a," forward method, overrides the "),is=s($a,"CODE",{});var ym=n(is);td=i(ym,"__call__"),ym.forEach(t),od=i($a," special method."),$a.forEach(t),ad=c(he),v(Ke.$$.fragment,he),sd=c(he),ls=s(he,"P",{});var Tm=n(ls);nd=i(Tm,"Examples:"),Tm.forEach(t),rd=c(he),v(ho.$$.fragment,he),he.forEach(t),lt.forEach(t),bn=c(o),je=s(o,"H2",{class:!0});var Un=n(je);Ye=s(Un,"A",{id:!0,class:!0,href:!0});var $m=n(Ye);ds=s($m,"SPAN",{});var Bm=n(ds);v(mo.$$.fragment,Bm),Bm.forEach(t),$m.forEach(t),id=c(Un),cs=s(Un,"SPAN",{});var Fm=n(cs);ld=i(Fm,"FlaxBeitModel"),Fm.forEach(t),Un.forEach(t),xn=c(o),I=s(o,"DIV",{class:!0});var W=n(I);v(po.$$.fragment,W),dd=c(W),hs=s(W,"P",{});var Em=n(hs);cd=i(Em,"The bare Beit Model transformer outputting raw hidden-states without any specific head on top."),Em.forEach(t),hd=c(W),fo=s(W,"P",{});var Rn=n(fo);md=i(Rn,"This model inherits from "),ma=s(Rn,"A",{href:!0});var km=n(ma);pd=i(km,"FlaxPreTrainedModel"),km.forEach(t),fd=i(Rn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Rn.forEach(t),ud=c(W),uo=s(W,"P",{});var Hn=n(uo);gd=i(Hn,"This model is also a Flax Linen "),go=s(Hn,"A",{href:!0,rel:!0});var Im=n(go);_d=i(Im,"flax.linen.Module"),Im.forEach(t),vd=i(Hn,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Hn.forEach(t),bd=c(W),ms=s(W,"P",{});var Mm=n(ms);xd=i(Mm,"Finally, this model supports inherent JAX features such as:"),Mm.forEach(t),wd=c(W),ae=s(W,"UL",{});var dt=n(ae);ps=s(dt,"LI",{});var jm=n(ps);_o=s(jm,"A",{href:!0,rel:!0});var Pm=n(_o);yd=i(Pm,"Just-In-Time (JIT) compilation"),Pm.forEach(t),jm.forEach(t),Td=c(dt),fs=s(dt,"LI",{});var Cm=n(fs);vo=s(Cm,"A",{href:!0,rel:!0});var zm=n(vo);$d=i(zm,"Automatic Differentiation"),zm.forEach(t),Cm.forEach(t),Bd=c(dt),us=s(dt,"LI",{});var Am=n(us);bo=s(Am,"A",{href:!0,rel:!0});var Lm=n(bo);Fd=i(Lm,"Vectorization"),Lm.forEach(t),Am.forEach(t),Ed=c(dt),gs=s(dt,"LI",{});var Nm=n(gs);xo=s(Nm,"A",{href:!0,rel:!0});var Sm=n(xo);kd=i(Sm,"Parallelization"),Sm.forEach(t),Nm.forEach(t),dt.forEach(t),Id=c(W),O=s(W,"DIV",{class:!0});var me=n(O);v(wo.$$.fragment,me),Md=c(me),Pe=s(me,"P",{});var Ba=n(Pe);jd=i(Ba,"The "),_s=s(Ba,"CODE",{});var qm=n(_s);Pd=i(qm,"FlaxBeitPreTrainedModel"),qm.forEach(t),Cd=i(Ba," forward method, overrides the "),vs=s(Ba,"CODE",{});var Om=n(vs);zd=i(Om,"__call__"),Om.forEach(t),Ad=i(Ba," special method."),Ba.forEach(t),Ld=c(me),v(Ze.$$.fragment,me),Nd=c(me),bs=s(me,"P",{});var Dm=n(bs);Sd=i(Dm,"Examples:"),Dm.forEach(t),qd=c(me),v(yo.$$.fragment,me),me.forEach(t),W.forEach(t),wn=c(o),Ce=s(o,"H2",{class:!0});var Jn=n(Ce);Qe=s(Jn,"A",{id:!0,class:!0,href:!0});var Wm=n(Qe);xs=s(Wm,"SPAN",{});var Vm=n(xs);v(To.$$.fragment,Vm),Vm.forEach(t),Wm.forEach(t),Od=c(Jn),ws=s(Jn,"SPAN",{});var Um=n(ws);Dd=i(Um,"FlaxBeitForMaskedImageModeling"),Um.forEach(t),Jn.forEach(t),yn=c(o),M=s(o,"DIV",{class:!0});var V=n(M);v($o.$$.fragment,V),Wd=c(V),ys=s(V,"P",{});var Rm=n(ys);Vd=i(Rm,"Beit Model transformer with a \u2018language\u2019 modeling head on top (to predict visual tokens)."),Rm.forEach(t),Ud=c(V),Bo=s(V,"P",{});var Xn=n(Bo);Rd=i(Xn,"This model inherits from "),pa=s(Xn,"A",{href:!0});var Hm=n(pa);Hd=i(Hm,"FlaxPreTrainedModel"),Hm.forEach(t),Jd=i(Xn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Xn.forEach(t),Xd=c(V),Fo=s(V,"P",{});var Gn=n(Fo);Gd=i(Gn,"This model is also a Flax Linen "),Eo=s(Gn,"A",{href:!0,rel:!0});var Jm=n(Eo);Kd=i(Jm,"flax.linen.Module"),Jm.forEach(t),Yd=i(Gn,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Gn.forEach(t),Zd=c(V),Ts=s(V,"P",{});var Xm=n(Ts);Qd=i(Xm,"Finally, this model supports inherent JAX features such as:"),Xm.forEach(t),ec=c(V),se=s(V,"UL",{});var ct=n(se);$s=s(ct,"LI",{});var Gm=n($s);ko=s(Gm,"A",{href:!0,rel:!0});var Km=n(ko);tc=i(Km,"Just-In-Time (JIT) compilation"),Km.forEach(t),Gm.forEach(t),oc=c(ct),Bs=s(ct,"LI",{});var Ym=n(Bs);Io=s(Ym,"A",{href:!0,rel:!0});var Zm=n(Io);ac=i(Zm,"Automatic Differentiation"),Zm.forEach(t),Ym.forEach(t),sc=c(ct),Fs=s(ct,"LI",{});var Qm=n(Fs);Mo=s(Qm,"A",{href:!0,rel:!0});var ep=n(Mo);nc=i(ep,"Vectorization"),ep.forEach(t),Qm.forEach(t),rc=c(ct),Es=s(ct,"LI",{});var tp=n(Es);jo=s(tp,"A",{href:!0,rel:!0});var op=n(jo);ic=i(op,"Parallelization"),op.forEach(t),tp.forEach(t),ct.forEach(t),lc=c(V),z=s(V,"DIV",{class:!0});var Q=n(z);v(Po.$$.fragment,Q),dc=c(Q),ze=s(Q,"P",{});var Fa=n(ze);cc=i(Fa,"The "),ks=s(Fa,"CODE",{});var ap=n(ks);hc=i(ap,"FlaxBeitPreTrainedModel"),ap.forEach(t),mc=i(Fa," forward method, overrides the "),Is=s(Fa,"CODE",{});var sp=n(Is);pc=i(sp,"__call__"),sp.forEach(t),fc=i(Fa," special method."),Fa.forEach(t),uc=c(Q),v(et.$$.fragment,Q),gc=c(Q),Ae=s(Q,"P",{});var Ea=n(Ae);_c=i(Ea,"bool_masked_pos ("),Ms=s(Ea,"CODE",{});var np=n(Ms);vc=i(np,"numpy.ndarray"),np.forEach(t),bc=i(Ea," of shape "),js=s(Ea,"CODE",{});var rp=n(js);xc=i(rp,"(batch_size, num_patches)"),rp.forEach(t),wc=i(Ea,`): Boolean masked positions. Indicates which patches are masked (1) and which aren\u2019t (0).`),Ea.forEach(t),yc=c(Q),Ps=s(Q,"P",{});var ip=n(Ps);Tc=i(ip,"Examples:"),ip.forEach(t),$c=c(Q),v(Co.$$.fragment,Q),Q.forEach(t),V.forEach(t),Tn=c(o),Le=s(o,"H2",{class:!0});var Kn=n(Le);tt=s(Kn,"A",{id:!0,class:!0,href:!0});var lp=n(tt);Cs=s(lp,"SPAN",{});var dp=n(Cs);v(zo.$$.fragment,dp),dp.forEach(t),lp.forEach(t),Bc=c(Kn),zs=s(Kn,"SPAN",{});var cp=n(zs);Fc=i(cp,"FlaxBeitForImageClassification"),cp.forEach(t),Kn.forEach(t),$n=c(o),j=s(o,"DIV",{class:!0});var U=n(j);v(Ao.$$.fragment,U),Ec=c(U),As=s(U,"P",{});var hp=n(As);kc=i(hp,`Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final hidden states of the patch tokens) e.g. for ImageNet.`),hp.forEach(t),Ic=c(U),Lo=s(U,"P",{});var Yn=n(Lo);Mc=i(Yn,"This model inherits from "),fa=s(Yn,"A",{href:!0});var mp=n(fa);jc=i(mp,"FlaxPreTrainedModel"),mp.forEach(t),Pc=i(Yn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Yn.forEach(t),Cc=c(U),No=s(U,"P",{});var Zn=n(No);zc=i(Zn,"This model is also a Flax Linen "),So=s(Zn,"A",{href:!0,rel:!0});var pp=n(So);Ac=i(pp,"flax.linen.Module"),pp.forEach(t),Lc=i(Zn,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Zn.forEach(t),Nc=c(U),Ls=s(U,"P",{});var fp=n(Ls);Sc=i(fp,"Finally, this model supports inherent JAX features such as:"),fp.forEach(t),qc=c(U),ne=s(U,"UL",{});var ht=n(ne);Ns=s(ht,"LI",{});var up=n(Ns);qo=s(up,"A",{href:!0,rel:!0});var gp=n(qo);Oc=i(gp,"Just-In-Time (JIT) compilation"),gp.forEach(t),up.forEach(t),Dc=c(ht),Ss=s(ht,"LI",{});var _p=n(Ss);Oo=s(_p,"A",{href:!0,rel:!0});var vp=n(Oo);Wc=i(vp,"Automatic Differentiation"),vp.forEach(t),_p.forEach(t),Vc=c(ht),qs=s(ht,"LI",{});var bp=n(qs);Do=s(bp,"A",{href:!0,rel:!0});var xp=n(Do);Uc=i(xp,"Vectorization"),xp.forEach(t),bp.forEach(t),Rc=c(ht),Os=s(ht,"LI",{});var wp=n(Os);Wo=s(wp,"A",{href:!0,rel:!0});var yp=n(Wo);Hc=i(yp,"Parallelization"),yp.forEach(t),wp.forEach(t),ht.forEach(t),Jc=c(U),D=s(U,"DIV",{class:!0});var pe=n(D);v(Vo.$$.fragment,pe),Xc=c(pe),Ne=s(pe,"P",{});var ka=n(Ne);Gc=i(ka,"The "),Ds=s(ka,"CODE",{});var Tp=n(Ds);Kc=i(Tp,"FlaxBeitPreTrainedModel"),Tp.forEach(t),Yc=i(ka," forward method, overrides the "),Ws=s(ka,"CODE",{});var $p=n(Ws);Zc=i($p,"__call__"),$p.forEach(t),Qc=i(ka," special method."),ka.forEach(t),eh=c(pe),v(ot.$$.fragment,pe),th=c(pe),Vs=s(pe,"P",{});var Bp=n(Vs);oh=i(Bp,"Example:"),Bp.forEach(t),ah=c(pe),v(Uo.$$.fragment,pe),pe.forEach(t),U.forEach(t),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(qp)),l(T,"id","beit"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#beit"),l(f,"class","relative group"),l(Se,"id","overview"),l(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Se,"href","#overview"),l(fe,"class","relative group"),l(ut,"href","https://arxiv.org/abs/2106.08254"),l(ut,"rel","nofollow"),l(gt,"href","https://arxiv.org/abs/2010.11929"),l(gt,"rel","nofollow"),l(_t,"href","https://arxiv.org/abs/2102.12092"),l(_t,"rel","nofollow"),l(Ko,"href","vit"),l(Yo,"href","deit"),l(vt,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer"),l(vt,"rel","nofollow"),l(Zo,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTFeatureExtractor"),l(Qo,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor"),l(ea,"href","/docs/transformers/v4.15.0/en/model_doc/vit#transformers.ViTForImageClassification"),l(ta,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitForImageClassification"),l(xt,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT"),l(xt,"rel","nofollow"),l(oa,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitFeatureExtractor"),l(yt,"href","https://huggingface.co/models?search=microsoft/beit"),l(yt,"rel","nofollow"),l(Tt,"href","http://www.image-net.org/"),l(Tt,"rel","nofollow"),l($t,"href","http://www.image-net.org/challenges/LSVRC/2012/"),l($t,"rel","nofollow"),l(aa,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitConfig"),l(Bt,"href","https://huggingface.co/nielsr"),l(Bt,"rel","nofollow"),l(Ft,"href","https://huggingface.co/kamalkraj"),l(Ft,"rel","nofollow"),l(Et,"href","https://github.com/microsoft/unilm/tree/master/beit"),l(Et,"rel","nofollow"),l(qe,"id","transformers.models.beit.modeling_beit.BeitModelOutputWithPooling"),l(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(qe,"href","#transformers.models.beit.modeling_beit.BeitModelOutputWithPooling"),l(_e,"class","relative group"),l(sa,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitModel"),l(ve,"class","docstring"),l(na,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.FlaxBeitModel"),l(be,"class","docstring"),l(Oe,"id","transformers.BeitConfig"),l(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Oe,"href","#transformers.BeitConfig"),l(xe,"class","relative group"),l(ra,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitModel"),l(At,"href","https://huggingface.co/microsoft/beit-base-patch16-224-in22k"),l(At,"rel","nofollow"),l(H,"class","docstring"),l(De,"id","transformers.BeitFeatureExtractor"),l(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(De,"href","#transformers.BeitFeatureExtractor"),l(ye,"class","relative group"),l(ia,"href","/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.feature_extraction_utils.FeatureExtractionMixin"),l(re,"class","docstring"),l(J,"class","docstring"),l(Ve,"id","transformers.BeitModel"),l(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ve,"href","#transformers.BeitModel"),l(Te,"class","relative group"),l(Ut,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ut,"rel","nofollow"),l(la,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitModel"),l(L,"class","docstring"),l(te,"class","docstring"),l(Re,"id","transformers.BeitForMaskedImageModeling"),l(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Re,"href","#transformers.BeitForMaskedImageModeling"),l(Be,"class","relative group"),l(Kt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Kt,"rel","nofollow"),l(da,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitForMaskedImageModeling"),l(N,"class","docstring"),l(oe,"class","docstring"),l(Je,"id","transformers.BeitForImageClassification"),l(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Je,"href","#transformers.BeitForImageClassification"),l(Ee,"class","relative group"),l(oo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(oo,"rel","nofollow"),l(ca,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitForImageClassification"),l(S,"class","docstring"),l(X,"class","docstring"),l(Ge,"id","transformers.BeitForSemanticSegmentation"),l(Ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ge,"href","#transformers.BeitForSemanticSegmentation"),l(Ie,"class","relative group"),l(lo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(lo,"rel","nofollow"),l(ha,"href","/docs/transformers/v4.15.0/en/model_doc/beit#transformers.BeitForSemanticSegmentation"),l(q,"class","docstring"),l(G,"class","docstring"),l(Ye,"id","transformers.FlaxBeitModel"),l(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ye,"href","#transformers.FlaxBeitModel"),l(je,"class","relative group"),l(ma,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(go,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(go,"rel","nofollow"),l(_o,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(_o,"rel","nofollow"),l(vo,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(vo,"rel","nofollow"),l(bo,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(bo,"rel","nofollow"),l(xo,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(xo,"rel","nofollow"),l(O,"class","docstring"),l(I,"class","docstring"),l(Qe,"id","transformers.FlaxBeitForMaskedImageModeling"),l(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Qe,"href","#transformers.FlaxBeitForMaskedImageModeling"),l(Ce,"class","relative group"),l(pa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(Eo,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(Eo,"rel","nofollow"),l(ko,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(ko,"rel","nofollow"),l(Io,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Io,"rel","nofollow"),l(Mo,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Mo,"rel","nofollow"),l(jo,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(jo,"rel","nofollow"),l(z,"class","docstring"),l(M,"class","docstring"),l(tt,"id","transformers.FlaxBeitForImageClassification"),l(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(tt,"href","#transformers.FlaxBeitForImageClassification"),l(Le,"class","relative group"),l(fa,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.FlaxPreTrainedModel"),l(So,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(So,"rel","nofollow"),l(qo,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(qo,"rel","nofollow"),l(Oo,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Oo,"rel","nofollow"),l(Do,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Do,"rel","nofollow"),l(Wo,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(Wo,"rel","nofollow"),l(D,"class","docstring"),l(j,"class","docstring")},m(o,m){e(document.head,h),p(o,$,m),p(o,f,m),e(f,T),e(T,B),b(g,B,null),e(f,u),e(f,F),e(F,Qn),p(o,Ks,m),p(o,fe,m),e(fe,Se),e(Se,Ia),b(ft,Ia,null),e(fe,er),e(fe,Ma),e(Ma,tr),p(o,Ys,m),p(o,K,m),e(K,or),e(K,ut),e(ut,ar),e(K,sr),e(K,gt),e(gt,nr),e(K,rr),e(K,_t),e(_t,ir),e(K,lr),p(o,Zs,m),p(o,Jo,m),e(Jo,dr),p(o,Qs,m),p(o,Xo,m),e(Xo,ja),e(ja,cr),p(o,en,m),p(o,Go,m),e(Go,hr),p(o,tn,m),p(o,C,m),e(C,k),e(k,mr),e(k,Ko),e(Ko,pr),e(k,fr),e(k,Yo),e(Yo,ur),e(k,gr),e(k,vt),e(vt,_r),e(k,vr),e(k,Zo),e(Zo,br),e(k,xr),e(k,Qo),e(Qo,wr),e(k,yr),e(k,ea),e(ea,Tr),e(k,$r),e(k,ta),e(ta,Br),e(k,Fr),e(C,Er),e(C,bt),e(bt,kr),e(bt,xt),e(xt,Ir),e(bt,Mr),e(C,jr),e(C,wt),e(wt,Pr),e(wt,oa),e(oa,Cr),e(wt,zr),e(C,Ar),e(C,ue),e(ue,Lr),e(ue,Pa),e(Pa,Nr),e(ue,Sr),e(ue,yt),e(yt,qr),e(ue,Or),e(C,Dr),e(C,ge),e(ge,Wr),e(ge,Tt),e(Tt,Vr),e(ge,Ur),e(ge,$t),e($t,Rr),e(ge,Hr),e(C,Jr),e(C,R),e(R,Xr),e(R,Ca),e(Ca,Gr),e(R,Kr),e(R,za),e(za,Yr),e(R,Zr),e(R,aa),e(aa,Qr),e(R,ei),e(R,Aa),e(Aa,ti),e(R,oi),p(o,on,m),p(o,Y,m),e(Y,ai),e(Y,Bt),e(Bt,si),e(Y,ni),e(Y,Ft),e(Ft,ri),e(Y,ii),e(Y,Et),e(Et,li),e(Y,di),p(o,an,m),p(o,_e,m),e(_e,qe),e(qe,La),b(kt,La,null),e(_e,ci),e(_e,Na),e(Na,hi),p(o,sn,m),p(o,ve,m),b(It,ve,null),e(ve,mi),e(ve,Mt),e(Mt,pi),e(Mt,sa),e(sa,fi),e(Mt,ui),p(o,nn,m),p(o,be,m),b(jt,be,null),e(be,gi),e(be,Pt),e(Pt,_i),e(Pt,na),e(na,vi),e(Pt,bi),p(o,rn,m),p(o,xe,m),e(xe,Oe),e(Oe,Sa),b(Ct,Sa,null),e(xe,xi),e(xe,qa),e(qa,wi),p(o,ln,m),p(o,H,m),b(zt,H,null),e(H,yi),e(H,we),e(we,Ti),e(we,ra),e(ra,$i),e(we,Bi),e(we,At),e(At,Fi),e(we,Ei),e(H,ki),e(H,Oa),e(Oa,Ii),e(H,Mi),b(Lt,H,null),p(o,dn,m),p(o,ye,m),e(ye,De),e(De,Da),b(Nt,Da,null),e(ye,ji),e(ye,Wa),e(Wa,Pi),p(o,cn,m),p(o,J,m),b(St,J,null),e(J,Ci),e(J,Va),e(Va,zi),e(J,Ai),e(J,qt),e(qt,Li),e(qt,ia),e(ia,Ni),e(qt,Si),e(J,qi),e(J,re),b(Ot,re,null),e(re,Oi),e(re,Ua),e(Ua,Di),e(re,Wi),b(We,re,null),p(o,hn,m),p(o,Te,m),e(Te,Ve),e(Ve,Ra),b(Dt,Ra,null),e(Te,Vi),e(Te,Ha),e(Ha,Ui),p(o,mn,m),p(o,te,m),b(Wt,te,null),e(te,Ri),e(te,Vt),e(Vt,Hi),e(Vt,Ut),e(Ut,Ji),e(Vt,Xi),e(te,Gi),e(te,L),b(Rt,L,null),e(L,Ki),e(L,$e),e($e,Yi),e($e,la),e(la,Zi),e($e,Qi),e($e,Ja),e(Ja,el),e($e,tl),e(L,ol),b(Ue,L,null),e(L,al),e(L,Xa),e(Xa,sl),e(L,nl),b(Ht,L,null),p(o,pn,m),p(o,Be,m),e(Be,Re),e(Re,Ga),b(Jt,Ga,null),e(Be,rl),e(Be,Ka),e(Ka,il),p(o,fn,m),p(o,oe,m),b(Xt,oe,null),e(oe,ll),e(oe,Gt),e(Gt,dl),e(Gt,Kt),e(Kt,cl),e(Gt,hl),e(oe,ml),e(oe,N),b(Yt,N,null),e(N,pl),e(N,Fe),e(Fe,fl),e(Fe,da),e(da,ul),e(Fe,gl),e(Fe,Ya),e(Ya,_l),e(Fe,vl),e(N,bl),b(He,N,null),e(N,xl),e(N,Za),e(Za,wl),e(N,yl),b(Zt,N,null),p(o,un,m),p(o,Ee,m),e(Ee,Je),e(Je,Qa),b(Qt,Qa,null),e(Ee,Tl),e(Ee,es),e(es,$l),p(o,gn,m),p(o,X,m),b(eo,X,null),e(X,Bl),e(X,ts),e(ts,Fl),e(X,El),e(X,to),e(to,kl),e(to,oo),e(oo,Il),e(to,Ml),e(X,jl),e(X,S),b(ao,S,null),e(S,Pl),e(S,ke),e(ke,Cl),e(ke,ca),e(ca,zl),e(ke,Al),e(ke,os),e(os,Ll),e(ke,Nl),e(S,Sl),b(Xe,S,null),e(S,ql),e(S,as),e(as,Ol),e(S,Dl),b(so,S,null),p(o,_n,m),p(o,Ie,m),e(Ie,Ge),e(Ge,ss),b(no,ss,null),e(Ie,Wl),e(Ie,ns),e(ns,Vl),p(o,vn,m),p(o,G,m),b(ro,G,null),e(G,Ul),e(G,rs),e(rs,Rl),e(G,Hl),e(G,io),e(io,Jl),e(io,lo),e(lo,Xl),e(io,Gl),e(G,Kl),e(G,q),b(co,q,null),e(q,Yl),e(q,Me),e(Me,Zl),e(Me,ha),e(ha,Ql),e(Me,ed),e(Me,is),e(is,td),e(Me,od),e(q,ad),b(Ke,q,null),e(q,sd),e(q,ls),e(ls,nd),e(q,rd),b(ho,q,null),p(o,bn,m),p(o,je,m),e(je,Ye),e(Ye,ds),b(mo,ds,null),e(je,id),e(je,cs),e(cs,ld),p(o,xn,m),p(o,I,m),b(po,I,null),e(I,dd),e(I,hs),e(hs,cd),e(I,hd),e(I,fo),e(fo,md),e(fo,ma),e(ma,pd),e(fo,fd),e(I,ud),e(I,uo),e(uo,gd),e(uo,go),e(go,_d),e(uo,vd),e(I,bd),e(I,ms),e(ms,xd),e(I,wd),e(I,ae),e(ae,ps),e(ps,_o),e(_o,yd),e(ae,Td),e(ae,fs),e(fs,vo),e(vo,$d),e(ae,Bd),e(ae,us),e(us,bo),e(bo,Fd),e(ae,Ed),e(ae,gs),e(gs,xo),e(xo,kd),e(I,Id),e(I,O),b(wo,O,null),e(O,Md),e(O,Pe),e(Pe,jd),e(Pe,_s),e(_s,Pd),e(Pe,Cd),e(Pe,vs),e(vs,zd),e(Pe,Ad),e(O,Ld),b(Ze,O,null),e(O,Nd),e(O,bs),e(bs,Sd),e(O,qd),b(yo,O,null),p(o,wn,m),p(o,Ce,m),e(Ce,Qe),e(Qe,xs),b(To,xs,null),e(Ce,Od),e(Ce,ws),e(ws,Dd),p(o,yn,m),p(o,M,m),b($o,M,null),e(M,Wd),e(M,ys),e(ys,Vd),e(M,Ud),e(M,Bo),e(Bo,Rd),e(Bo,pa),e(pa,Hd),e(Bo,Jd),e(M,Xd),e(M,Fo),e(Fo,Gd),e(Fo,Eo),e(Eo,Kd),e(Fo,Yd),e(M,Zd),e(M,Ts),e(Ts,Qd),e(M,ec),e(M,se),e(se,$s),e($s,ko),e(ko,tc),e(se,oc),e(se,Bs),e(Bs,Io),e(Io,ac),e(se,sc),e(se,Fs),e(Fs,Mo),e(Mo,nc),e(se,rc),e(se,Es),e(Es,jo),e(jo,ic),e(M,lc),e(M,z),b(Po,z,null),e(z,dc),e(z,ze),e(ze,cc),e(ze,ks),e(ks,hc),e(ze,mc),e(ze,Is),e(Is,pc),e(ze,fc),e(z,uc),b(et,z,null),e(z,gc),e(z,Ae),e(Ae,_c),e(Ae,Ms),e(Ms,vc),e(Ae,bc),e(Ae,js),e(js,xc),e(Ae,wc),e(z,yc),e(z,Ps),e(Ps,Tc),e(z,$c),b(Co,z,null),p(o,Tn,m),p(o,Le,m),e(Le,tt),e(tt,Cs),b(zo,Cs,null),e(Le,Bc),e(Le,zs),e(zs,Fc),p(o,$n,m),p(o,j,m),b(Ao,j,null),e(j,Ec),e(j,As),e(As,kc),e(j,Ic),e(j,Lo),e(Lo,Mc),e(Lo,fa),e(fa,jc),e(Lo,Pc),e(j,Cc),e(j,No),e(No,zc),e(No,So),e(So,Ac),e(No,Lc),e(j,Nc),e(j,Ls),e(Ls,Sc),e(j,qc),e(j,ne),e(ne,Ns),e(Ns,qo),e(qo,Oc),e(ne,Dc),e(ne,Ss),e(Ss,Oo),e(Oo,Wc),e(ne,Vc),e(ne,qs),e(qs,Do),e(Do,Uc),e(ne,Rc),e(ne,Os),e(Os,Wo),e(Wo,Hc),e(j,Jc),e(j,D),b(Vo,D,null),e(D,Xc),e(D,Ne),e(Ne,Gc),e(Ne,Ds),e(Ds,Kc),e(Ne,Yc),e(Ne,Ws),e(Ws,Zc),e(Ne,Qc),e(D,eh),b(ot,D,null),e(D,th),e(D,Vs),e(Vs,oh),e(D,ah),b(Uo,D,null),Bn=!0},p(o,[m]){const Ro={};m&2&&(Ro.$$scope={dirty:m,ctx:o}),We.$set(Ro);const Us={};m&2&&(Us.$$scope={dirty:m,ctx:o}),Ue.$set(Us);const Rs={};m&2&&(Rs.$$scope={dirty:m,ctx:o}),He.$set(Rs);const Hs={};m&2&&(Hs.$$scope={dirty:m,ctx:o}),Xe.$set(Hs);const Ho={};m&2&&(Ho.$$scope={dirty:m,ctx:o}),Ke.$set(Ho);const Js={};m&2&&(Js.$$scope={dirty:m,ctx:o}),Ze.$set(Js);const Xs={};m&2&&(Xs.$$scope={dirty:m,ctx:o}),et.$set(Xs);const Gs={};m&2&&(Gs.$$scope={dirty:m,ctx:o}),ot.$set(Gs)},i(o){Bn||(x(g.$$.fragment,o),x(ft.$$.fragment,o),x(kt.$$.fragment,o),x(It.$$.fragment,o),x(jt.$$.fragment,o),x(Ct.$$.fragment,o),x(zt.$$.fragment,o),x(Lt.$$.fragment,o),x(Nt.$$.fragment,o),x(St.$$.fragment,o),x(Ot.$$.fragment,o),x(We.$$.fragment,o),x(Dt.$$.fragment,o),x(Wt.$$.fragment,o),x(Rt.$$.fragment,o),x(Ue.$$.fragment,o),x(Ht.$$.fragment,o),x(Jt.$$.fragment,o),x(Xt.$$.fragment,o),x(Yt.$$.fragment,o),x(He.$$.fragment,o),x(Zt.$$.fragment,o),x(Qt.$$.fragment,o),x(eo.$$.fragment,o),x(ao.$$.fragment,o),x(Xe.$$.fragment,o),x(so.$$.fragment,o),x(no.$$.fragment,o),x(ro.$$.fragment,o),x(co.$$.fragment,o),x(Ke.$$.fragment,o),x(ho.$$.fragment,o),x(mo.$$.fragment,o),x(po.$$.fragment,o),x(wo.$$.fragment,o),x(Ze.$$.fragment,o),x(yo.$$.fragment,o),x(To.$$.fragment,o),x($o.$$.fragment,o),x(Po.$$.fragment,o),x(et.$$.fragment,o),x(Co.$$.fragment,o),x(zo.$$.fragment,o),x(Ao.$$.fragment,o),x(Vo.$$.fragment,o),x(ot.$$.fragment,o),x(Uo.$$.fragment,o),Bn=!0)},o(o){w(g.$$.fragment,o),w(ft.$$.fragment,o),w(kt.$$.fragment,o),w(It.$$.fragment,o),w(jt.$$.fragment,o),w(Ct.$$.fragment,o),w(zt.$$.fragment,o),w(Lt.$$.fragment,o),w(Nt.$$.fragment,o),w(St.$$.fragment,o),w(Ot.$$.fragment,o),w(We.$$.fragment,o),w(Dt.$$.fragment,o),w(Wt.$$.fragment,o),w(Rt.$$.fragment,o),w(Ue.$$.fragment,o),w(Ht.$$.fragment,o),w(Jt.$$.fragment,o),w(Xt.$$.fragment,o),w(Yt.$$.fragment,o),w(He.$$.fragment,o),w(Zt.$$.fragment,o),w(Qt.$$.fragment,o),w(eo.$$.fragment,o),w(ao.$$.fragment,o),w(Xe.$$.fragment,o),w(so.$$.fragment,o),w(no.$$.fragment,o),w(ro.$$.fragment,o),w(co.$$.fragment,o),w(Ke.$$.fragment,o),w(ho.$$.fragment,o),w(mo.$$.fragment,o),w(po.$$.fragment,o),w(wo.$$.fragment,o),w(Ze.$$.fragment,o),w(yo.$$.fragment,o),w(To.$$.fragment,o),w($o.$$.fragment,o),w(Po.$$.fragment,o),w(et.$$.fragment,o),w(Co.$$.fragment,o),w(zo.$$.fragment,o),w(Ao.$$.fragment,o),w(Vo.$$.fragment,o),w(ot.$$.fragment,o),w(Uo.$$.fragment,o),Bn=!1},d(o){t(h),o&&t($),o&&t(f),y(g),o&&t(Ks),o&&t(fe),y(ft),o&&t(Ys),o&&t(K),o&&t(Zs),o&&t(Jo),o&&t(Qs),o&&t(Xo),o&&t(en),o&&t(Go),o&&t(tn),o&&t(C),o&&t(on),o&&t(Y),o&&t(an),o&&t(_e),y(kt),o&&t(sn),o&&t(ve),y(It),o&&t(nn),o&&t(be),y(jt),o&&t(rn),o&&t(xe),y(Ct),o&&t(ln),o&&t(H),y(zt),y(Lt),o&&t(dn),o&&t(ye),y(Nt),o&&t(cn),o&&t(J),y(St),y(Ot),y(We),o&&t(hn),o&&t(Te),y(Dt),o&&t(mn),o&&t(te),y(Wt),y(Rt),y(Ue),y(Ht),o&&t(pn),o&&t(Be),y(Jt),o&&t(fn),o&&t(oe),y(Xt),y(Yt),y(He),y(Zt),o&&t(un),o&&t(Ee),y(Qt),o&&t(gn),o&&t(X),y(eo),y(ao),y(Xe),y(so),o&&t(_n),o&&t(Ie),y(no),o&&t(vn),o&&t(G),y(ro),y(co),y(Ke),y(ho),o&&t(bn),o&&t(je),y(mo),o&&t(xn),o&&t(I),y(po),y(wo),y(Ze),y(yo),o&&t(wn),o&&t(Ce),y(To),o&&t(yn),o&&t(M),y($o),y(Po),y(et),y(Co),o&&t(Tn),o&&t(Le),y(zo),o&&t($n),o&&t(j),y(Ao),y(Vo),y(ot),y(Uo)}}}const qp={local:"beit",sections:[{local:"overview",title:"Overview"},{local:"transformers.models.beit.modeling_beit.BeitModelOutputWithPooling",title:"BEiT specific outputs"},{local:"transformers.BeitConfig",title:"BeitConfig"},{local:"transformers.BeitFeatureExtractor",title:"BeitFeatureExtractor"},{local:"transformers.BeitModel",title:"BeitModel"},{local:"transformers.BeitForMaskedImageModeling",title:"BeitForMaskedImageModeling"},{local:"transformers.BeitForImageClassification",title:"BeitForImageClassification"},{local:"transformers.BeitForSemanticSegmentation",title:"BeitForSemanticSegmentation"},{local:"transformers.FlaxBeitModel",title:"FlaxBeitModel"},{local:"transformers.FlaxBeitForMaskedImageModeling",title:"FlaxBeitForMaskedImageModeling"},{local:"transformers.FlaxBeitForImageClassification",title:"FlaxBeitForImageClassification"}],title:"BEiT"};function Op(E,h,$){let{fw:f}=h;return E.$$set=T=>{"fw"in T&&$(0,f=T.fw)},[f]}class Jp extends Fp{constructor(h){super();Ep(this,h,Op,Sp,kp,{fw:0})}}export{Jp as default,qp as metadata};
9,991
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/segformer.mdx-1506d4ef.js
import{S as Fi,i as Ii,s as Pi,e as n,k as d,w as _,t as r,L as Ci,c as s,d as o,m as c,a as i,x as v,h as a,b as l,M as Mi,J as e,g as f,y as b,q as S,o as w,B as y}from"../../chunks/vendor-b1433968.js";import{T as cr}from"../../chunks/Tip-c3840994.js";import{D as de}from"../../chunks/Docstring-ff504c58.js";import{C as mr}from"../../chunks/CodeBlock-a320dbd7.js";import{I as xe}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function ki(D){let h,x;return{c(){h=n("p"),x=r(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(g){h=s(g,"P",{});var p=i(h);x=a(p,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),p.forEach(o)},m(g,p){f(g,h,p),e(h,x)},d(g){g&&o(h)}}}function zi(D){let h,x,g,p,I;return{c(){h=n("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),p=r("Module"),I=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){h=s(u,"P",{});var $=i(h);x=a($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s($,"CODE",{});var C=i(g);p=a(C,"Module"),C.forEach(o),I=a($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(u,$){f(u,h,$),e(h,x),e(h,g),e(g,p),e(h,I)},d(u){u&&o(h)}}}function ji(D){let h,x,g,p,I;return{c(){h=n("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),p=r("Module"),I=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){h=s(u,"P",{});var $=i(h);x=a($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s($,"CODE",{});var C=i(g);p=a(C,"Module"),C.forEach(o),I=a($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(u,$){f(u,h,$),e(h,x),e(h,g),e(g,p),e(h,I)},d(u){u&&o(h)}}}function Ai(D){let h,x,g,p,I;return{c(){h=n("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=n("code"),p=r("Module"),I=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(u){h=s(u,"P",{});var $=i(h);x=a($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s($,"CODE",{});var C=i(g);p=a(C,"Module"),C.forEach(o),I=a($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(o)},m(u,$){f(u,h,$),e(h,x),e(h,g),e(g,p),e(h,I)},d(u){u&&o(h)}}}function Li(D){let h,x,g,p,I,u,$,C,fr,Fo,X,ce,Lt,Ee,hr,Nt,gr,Io,me,pr,Fe,ur,_r,Po,mt,vr,Co,ft,qt,br,Mo,fe,Sr,Ie,wr,yr,ko,Pe,ss,zo,W,$r,Ce,Tr,xr,Me,Er,Fr,jo,ht,Ir,Ao,M,L,Pr,gt,Cr,Mr,pt,kr,zr,ut,jr,Ar,ke,Lr,Nr,qr,ze,Dr,je,Or,Hr,Wr,Z,Br,_t,Rr,Ur,Ae,Vr,Gr,Kr,T,Jr,vt,Xr,Zr,Dt,Yr,Qr,Ot,ea,ta,Ht,oa,ra,Wt,aa,na,Bt,sa,ia,bt,la,da,Rt,ca,ma,Ut,fa,ha,ga,Vt,pa,Lo,F,ua,Gt,_a,va,Kt,ba,Sa,Jt,wa,ya,Xt,$a,Ta,Zt,xa,Ea,Yt,Fa,Ia,No,Y,he,Qt,Le,Pa,eo,Ca,qo,P,Ne,Ma,Q,ka,St,za,ja,qe,Aa,La,Na,ee,qa,wt,Da,Oa,yt,Ha,Wa,Ba,to,Ra,Ua,De,Do,te,ge,oo,Oe,Va,ro,Ga,Oo,N,He,Ka,ao,Ja,Xa,We,Za,no,Ya,Qa,en,B,Be,tn,so,on,rn,pe,Ho,oe,ue,io,Re,an,lo,nn,Wo,O,Ue,sn,Ve,ln,Ge,dn,cn,mn,k,Ke,fn,re,hn,$t,gn,pn,co,un,_n,vn,_e,bn,mo,Sn,wn,Je,Bo,ae,ve,fo,Xe,yn,ho,$n,Ro,Ze,go,Uo,ne,be,po,Ye,Tn,uo,xn,Vo,q,Qe,En,_o,Fn,In,et,Pn,tt,Cn,Mn,kn,z,ot,zn,se,jn,Tt,An,Ln,vo,Nn,qn,Dn,Se,On,bo,Hn,Wn,rt,Go,ie,we,So,at,Bn,wo,Rn,Ko,H,nt,Un,st,Vn,it,Gn,Kn,Jn,j,lt,Xn,le,Zn,xt,Yn,Qn,yo,es,ts,os,ye,rs,$o,as,ns,dt,Jo;return u=new xe({}),Ee=new xe({}),Le=new xe({}),Ne=new de({props:{name:"class transformers.SegformerConfig",anchor:"transformers.SegformerConfig",parameters:[{name:"image_size",val:" = 224"},{name:"num_channels",val:" = 3"},{name:"num_encoder_blocks",val:" = 4"},{name:"depths",val:" = [2, 2, 2, 2]"},{name:"sr_ratios",val:" = [8, 4, 2, 1]"},{name:"hidden_sizes",val:" = [32, 64, 160, 256]"},{name:"downsampling_rates",val:" = [1, 4, 8, 16]"},{name:"patch_sizes",val:" = [7, 3, 3, 3]"},{name:"strides",val:" = [4, 2, 2, 2]"},{name:"num_attention_heads",val:" = [1, 2, 5, 8]"},{name:"mlp_ratios",val:" = [4, 4, 4, 4]"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"classifier_dropout_prob",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"drop_path_rate",val:" = 0.1"},{name:"layer_norm_eps",val:" = 1e-06"},{name:"decoder_hidden_size",val:" = 256"},{name:"is_encoder_decoder",val:" = False"},{name:"reshape_last_stage",val:" = True"},{name:"semantic_loss_ignore_index",val:" = 255"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/configuration_segformer.py#L29",parametersDescription:[{anchor:"transformers.SegformerConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.SegformerConfig.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of input channels.`,name:"num_channels"},{anchor:"transformers.SegformerConfig.num_encoder_blocks",description:`<strong>num_encoder_blocks</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of encoder blocks (i.e. stages in the Mix Transformer encoder).`,name:"num_encoder_blocks"},{anchor:"transformers.SegformerConfig.depths",description:`<strong>depths</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [2, 2, 2, 2]) &#x2014; The number of layers in each encoder block.`,name:"depths"},{anchor:"transformers.SegformerConfig.sr_ratios",description:`<strong>sr_ratios</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [8, 4, 2, 1]) &#x2014; Sequence reduction ratios in each encoder block.`,name:"sr_ratios"},{anchor:"transformers.SegformerConfig.hidden_sizes",description:`<strong>hidden_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [32, 64, 160, 256]) &#x2014; Dimension of each of the encoder blocks.`,name:"hidden_sizes"},{anchor:"transformers.SegformerConfig.downsampling_rates",description:`<strong>downsampling_rates</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [1, 4, 8, 16]) &#x2014; Downsample rate of the image resolution compared to the original image size before each encoder block.`,name:"downsampling_rates"},{anchor:"transformers.SegformerConfig.patch_sizes",description:`<strong>patch_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [7, 3, 3, 3]) &#x2014; Patch size before each encoder block.`,name:"patch_sizes"},{anchor:"transformers.SegformerConfig.strides",description:`<strong>strides</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [4, 2, 2, 2]) &#x2014; Stride before each encoder block.`,name:"strides"},{anchor:"transformers.SegformerConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [1, 2, 4, 8]) &#x2014; Number of attention heads for each attention layer in each block of the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SegformerConfig.mlp_ratios",description:`<strong>mlp_ratios</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [4, 4, 4, 4]) &#x2014; Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks.`,name:"mlp_ratios"},{anchor:"transformers.SegformerConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SegformerConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.SegformerConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.SegformerConfig.classifier_dropout_prob",description:`<strong>classifier_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability before the classification head.`,name:"classifier_dropout_prob"},{anchor:"transformers.SegformerConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SegformerConfig.drop_path_rate",description:`<strong>drop_path_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.`,name:"drop_path_rate"},{anchor:"transformers.SegformerConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.SegformerConfig.decoder_hidden_size",description:`<strong>decoder_hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The dimension of the all-MLP decode head.`,name:"decoder_hidden_size"},{anchor:"transformers.SegformerConfig.reshape_last_stage",description:`<strong>reshape_last_stage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to reshape the features of the last stage back to <code>(batch_size, num_channels, height, width)</code>. Only required for the semantic segmentation model.`,name:"reshape_last_stage"},{anchor:"transformers.SegformerConfig.semantic_loss_ignore_index",description:`<strong>semantic_loss_ignore_index</strong> (<code>int</code>, <em>optional</em>, defaults to 255) &#x2014; The index that is ignored by the loss function of the semantic segmentation model.`,name:"semantic_loss_ignore_index"}]}}),De=new mr({props:{code:`from transformers import SegformerModel, SegformerConfig # Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration configuration = SegformerConfig() # Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration model = SegformerModel(configuration) # Accessing the model configuration configuration = model.config,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerModel, SegformerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SegformerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Oe=new xe({}),He=new de({props:{name:"class transformers.SegformerFeatureExtractor",anchor:"transformers.SegformerFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 512"},{name:"resample",val:" = 2"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"reduce_labels",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/feature_extraction_segformer.py#L37",parametersDescription:[{anchor:"transformers.SegformerFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input based on a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.SegformerFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 512) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.SegformerFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.SegformerFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with mean and standard deviation.`,name:"do_normalize"},{anchor:"transformers.SegformerFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.`,name:"image_mean"},{anchor:"transformers.SegformerFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std.`,name:"image_std"},{anchor:"transformers.SegformerFeatureExtractor.reduce_labels",description:`<strong>reduce_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.`,name:"reduce_labels"}]}}),Be=new de({props:{name:"__call__",anchor:"transformers.SegformerFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"segmentation_maps",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/feature_extraction_segformer.py#L90",parametersDescription:[{anchor:"transformers.SegformerFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is the number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.SegformerFeatureExtractor.__call__.segmentation_maps",description:`<strong>segmentation_maps</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014; Optionally, the corresponding semantic segmentation maps with the pixel-wise annotations.`,name:"segmentation_maps"},{anchor:"transformers.SegformerFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> <li><strong>labels</strong> \u2014 Optional labels to be fed to a model (when <code>segmentation_maps</code> are provided)</li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),pe=new cr({props:{warning:"&lcub;true}",$$slots:{default:[ki]},$$scope:{ctx:D}}}),Re=new xe({}),Ue=new de({props:{name:"class transformers.SegformerModel",anchor:"transformers.SegformerModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/modeling_segformer.py#L463",parametersDescription:[{anchor:"transformers.SegformerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ke=new de({props:{name:"forward",anchor:"transformers.SegformerModel.forward",parameters:[{name:"pixel_values",val:""},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/modeling_segformer.py#L482",parametersDescription:[{anchor:"transformers.SegformerModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor">SegformerFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor.__call__">SegformerFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.SegformerModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SegformerModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SegformerModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig" >SegformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_e=new cr({props:{$$slots:{default:[zi]},$$scope:{ctx:D}}}),Je=new mr({props:{code:`from transformers import SegformerFeatureExtractor, SegformerModel from PIL import Image import requests feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") model = SegformerModel("nvidia/segformer-b0-finetuned-ade-512-512") url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) sequence_output = outputs.last_hidden_state,`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerFeatureExtractor, SegformerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = SegformerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/segformer-b0-finetuned-ade-512-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerModel(<span class="hljs-string">&quot;nvidia/segformer-b0-finetuned-ade-512-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_output = outputs.last_hidden_state`}}),Xe=new xe({}),Ye=new xe({}),Qe=new de({props:{name:"class transformers.SegformerForImageClassification",anchor:"transformers.SegformerForImageClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/modeling_segformer.py#L537",parametersDescription:[{anchor:"transformers.SegformerForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ot=new de({props:{name:"forward",anchor:"transformers.SegformerForImageClassification.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/modeling_segformer.py#L550",parametersDescription:[{anchor:"transformers.SegformerForImageClassification.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor">SegformerFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor.__call__">SegformerFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.SegformerForImageClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SegformerForImageClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SegformerForImageClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SegformerForImageClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig" >SegformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Se=new cr({props:{$$slots:{default:[ji]},$$scope:{ctx:D}}}),rt=new mr({props:{code:`from transformers import SegformerFeatureExtractor, SegformerForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = SegformerFeatureExtractor.from_pretrained('nvidia/mit-b0') model = SegformerForImageClassification.from_pretrained('nvidia/mit-b0') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerFeatureExtractor, SegformerForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = SegformerFeatureExtractor.from_pretrained(<span class="hljs-string">&#x27;nvidia/mit-b0&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerForImageClassification.from_pretrained(<span class="hljs-string">&#x27;nvidia/mit-b0&#x27;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),at=new xe({}),nt=new de({props:{name:"class transformers.SegformerForSemanticSegmentation",anchor:"transformers.SegformerForSemanticSegmentation",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/modeling_segformer.py#L697",parametersDescription:[{anchor:"transformers.SegformerForSemanticSegmentation.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lt=new de({props:{name:"forward",anchor:"transformers.SegformerForSemanticSegmentation.forward",parameters:[{name:"pixel_values",val:""},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/segformer/modeling_segformer.py#L706",parametersDescription:[{anchor:"transformers.SegformerForSemanticSegmentation.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor">SegformerFeatureExtractor</a>. See <a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor.__call__">SegformerFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Ground truth semantic segmentation maps for computing the loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code>, a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerConfig" >SegformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ye=new cr({props:{$$slots:{default:[Ai]},$$scope:{ctx:D}}}),dt=new mr({props:{code:`from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation from PIL import Image import requests feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4),`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerFeatureExtractor, SegformerForSemanticSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = SegformerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/segformer-b0-finetuned-ade-512-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerForSemanticSegmentation.from_pretrained(<span class="hljs-string">&quot;nvidia/segformer-b0-finetuned-ade-512-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&#x27;http://images.cocodataset.org/val2017/000000039769.jpg&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-comment"># shape (batch_size, num_labels, height/4, width/4)</span>`}}),{c(){h=n("meta"),x=d(),g=n("h1"),p=n("a"),I=n("span"),_(u.$$.fragment),$=d(),C=n("span"),fr=r("SegFormer"),Fo=d(),X=n("h2"),ce=n("a"),Lt=n("span"),_(Ee.$$.fragment),hr=d(),Nt=n("span"),gr=r("Overview"),Io=d(),me=n("p"),pr=r("The SegFormer model was proposed in "),Fe=n("a"),ur=r("SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),_r=r(` by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. The model consists of a hierarchical Transformer encoder and a lightweight all-MLP decode head to achieve great results on image segmentation benchmarks such as ADE20K and Cityscapes.`),Po=d(),mt=n("p"),vr=r("The abstract from the paper is the following:"),Co=d(),ft=n("p"),qt=n("em"),br=r(`We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C.`),Mo=d(),fe=n("p"),Sr=r("The figure below illustrates the architecture of SegFormer. Taken from the "),Ie=n("a"),wr=r("original paper"),yr=r("."),ko=d(),Pe=n("img"),zo=d(),W=n("p"),$r=r("This model was contributed by "),Ce=n("a"),Tr=r("nielsr"),xr=r(". The original code can be found "),Me=n("a"),Er=r("here"),Fr=r("."),jo=d(),ht=n("p"),Ir=r("Tips:"),Ao=d(),M=n("ul"),L=n("li"),Pr=r(`SegFormer consists of a hierarchical Transformer encoder, and a lightweight all-MLP decode head. `),gt=n("a"),Cr=r("SegformerModel"),Mr=r(` is the hierarchical Transformer encoder (which in the paper is also referred to as Mix Transformer or MiT). `),pt=n("a"),kr=r("SegformerForSemanticSegmentation"),zr=r(` adds the all-MLP decode head on top to perform semantic segmentation of images. In addition, there\u2019s `),ut=n("a"),jr=r("SegformerForImageClassification"),Ar=r(` which can be used to - you guessed it - classify images. The authors of SegFormer first pre-trained the Transformer encoder on ImageNet-1k to classify images. Next, they throw away the classification head, and replace it by the all-MLP decode head. Next, they fine-tune the model altogether on ADE20K, Cityscapes and COCO-stuff, which are important benchmarks for semantic segmentation. All checkpoints can be found on the `),ke=n("a"),Lr=r("hub"),Nr=r("."),qr=d(),ze=n("li"),Dr=r("The quickest way to get started with SegFormer is by checking the "),je=n("a"),Or=r("example notebooks"),Hr=r(` (which showcase both inference and fine-tuning on custom data).`),Wr=d(),Z=n("li"),Br=r("One can use "),_t=n("a"),Rr=r("SegformerFeatureExtractor"),Ur=r(` to prepare images and corresponding segmentation maps for the model. Note that this feature extractor is fairly basic and does not include all data augmentations used in the original paper. The original preprocessing pipelines (for the ADE20k dataset for instance) can be found `),Ae=n("a"),Vr=r("here"),Gr=r(`. The most important preprocessing step is that images and segmentation maps are randomly cropped and padded to the same size, such as 512x512 or 640x640, after which they are normalized.`),Kr=d(),T=n("li"),Jr=r("One additional thing to keep in mind is that one can initialize "),vt=n("a"),Xr=r("SegformerFeatureExtractor"),Zr=r(` with `),Dt=n("code"),Yr=r("reduce_labels"),Qr=r(" set to "),Ot=n("em"),ea=r("True"),ta=r(" or "),Ht=n("em"),oa=r("False"),ra=r(`. In some datasets (like ADE20k), the 0 index is used in the annotated segmentation maps for background. However, ADE20k doesn\u2019t include the \u201Cbackground\u201D class in its 150 labels. Therefore, `),Wt=n("code"),aa=r("reduce_labels"),na=r(` is used to reduce all labels by 1, and to make sure no loss is computed for the background class (i.e. it replaces 0 in the annotated maps by 255, which is the `),Bt=n("em"),sa=r("ignore_index"),ia=r(` of the loss function used by `),bt=n("a"),la=r("SegformerForSemanticSegmentation"),da=r(`). However, other datasets use the 0 index as background class and include this class as part of all labels. In that case, `),Rt=n("code"),ca=r("reduce_labels"),ma=r(` should be set to `),Ut=n("em"),fa=r("False"),ha=r(", as loss should also be computed for the background class."),ga=d(),Vt=n("li"),pa=r("As most models, SegFormer comes in different sizes, the details of which can be found in the table below."),Lo=d(),F=n("p"),ua=r("| "),Gt=n("strong"),_a=r("Model variant"),va=r(" | "),Kt=n("strong"),ba=r("Depths"),Sa=r(" | "),Jt=n("strong"),wa=r("Hidden sizes"),ya=r(" | "),Xt=n("strong"),$a=r("Decoder hidden size"),Ta=r(" | "),Zt=n("strong"),xa=r("Params (M)"),Ea=r(" | "),Yt=n("strong"),Fa=r("ImageNet-1k Top 1"),Ia=r(` | | MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 | | MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 | | MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 | | MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 | | MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 | | MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 |`),No=d(),Y=n("h2"),he=n("a"),Qt=n("span"),_(Le.$$.fragment),Pa=d(),eo=n("span"),Ca=r("SegformerConfig"),qo=d(),P=n("div"),_(Ne.$$.fragment),Ma=d(),Q=n("p"),ka=r("This is the configuration class to store the configuration of a "),St=n("a"),za=r("SegformerModel"),ja=r(`. It is used to instantiate an SegFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SegFormer `),qe=n("a"),Aa=r("nvidia/segformer-b0-finetuned-ade-512-512"),La=r(` architecture.`),Na=d(),ee=n("p"),qa=r("Configuration objects inherit from "),wt=n("a"),Da=r("PretrainedConfig"),Oa=r(` and can be used to control the model outputs. Read the documentation from `),yt=n("a"),Ha=r("PretrainedConfig"),Wa=r(" for more information."),Ba=d(),to=n("p"),Ra=r("Example:"),Ua=d(),_(De.$$.fragment),Do=d(),te=n("h2"),ge=n("a"),oo=n("span"),_(Oe.$$.fragment),Va=d(),ro=n("span"),Ga=r("SegformerFeatureExtractor"),Oo=d(),N=n("div"),_(He.$$.fragment),Ka=d(),ao=n("p"),Ja=r("Constructs a SegFormer feature extractor."),Xa=d(),We=n("p"),Za=r("This feature extractor inherits from "),no=n("code"),Ya=r("FeatureExtractionMixin"),Qa=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),en=d(),B=n("div"),_(Be.$$.fragment),tn=d(),so=n("p"),on=r("Main method to prepare for the model one or several image(s) and optional corresponding segmentation maps."),rn=d(),_(pe.$$.fragment),Ho=d(),oe=n("h2"),ue=n("a"),io=n("span"),_(Re.$$.fragment),an=d(),lo=n("span"),nn=r("SegformerModel"),Wo=d(),O=n("div"),_(Ue.$$.fragment),sn=d(),Ve=n("p"),ln=r(`The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Ge=n("a"),dn=r("torch.nn.Module"),cn=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mn=d(),k=n("div"),_(Ke.$$.fragment),fn=d(),re=n("p"),hn=r("The "),$t=n("a"),gn=r("SegformerModel"),pn=r(" forward method, overrides the "),co=n("code"),un=r("__call__"),_n=r(" special method."),vn=d(),_(_e.$$.fragment),bn=d(),mo=n("p"),Sn=r("Examples:"),wn=d(),_(Je.$$.fragment),Bo=d(),ae=n("h2"),ve=n("a"),fo=n("span"),_(Xe.$$.fragment),yn=d(),ho=n("span"),$n=r("SegformerDecodeHead"),Ro=d(),Ze=n("div"),go=n("div"),Uo=d(),ne=n("h2"),be=n("a"),po=n("span"),_(Ye.$$.fragment),Tn=d(),uo=n("span"),xn=r("SegformerForImageClassification"),Vo=d(),q=n("div"),_(Qe.$$.fragment),En=d(),_o=n("p"),Fn=r(`SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet.`),In=d(),et=n("p"),Pn=r("This model is a PyTorch "),tt=n("a"),Cn=r("torch.nn.Module"),Mn=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kn=d(),z=n("div"),_(ot.$$.fragment),zn=d(),se=n("p"),jn=r("The "),Tt=n("a"),An=r("SegformerForImageClassification"),Ln=r(" forward method, overrides the "),vo=n("code"),Nn=r("__call__"),qn=r(" special method."),Dn=d(),_(Se.$$.fragment),On=d(),bo=n("p"),Hn=r("Examples:"),Wn=d(),_(rt.$$.fragment),Go=d(),ie=n("h2"),we=n("a"),So=n("span"),_(at.$$.fragment),Bn=d(),wo=n("span"),Rn=r("SegformerForSemanticSegmentation"),Ko=d(),H=n("div"),_(nt.$$.fragment),Un=d(),st=n("p"),Vn=r(`SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. This model is a PyTorch `),it=n("a"),Gn=r("torch.nn.Module"),Kn=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Jn=d(),j=n("div"),_(lt.$$.fragment),Xn=d(),le=n("p"),Zn=r("The "),xt=n("a"),Yn=r("SegformerForSemanticSegmentation"),Qn=r(" forward method, overrides the "),yo=n("code"),es=r("__call__"),ts=r(" special method."),os=d(),_(ye.$$.fragment),rs=d(),$o=n("p"),as=r("Examples:"),ns=d(),_(dt.$$.fragment),this.h()},l(t){const m=Ci('[data-svelte="svelte-1phssyn"]',document.head);h=s(m,"META",{name:!0,content:!0}),m.forEach(o),x=c(t),g=s(t,"H1",{class:!0});var ct=i(g);p=s(ct,"A",{id:!0,class:!0,href:!0});var To=i(p);I=s(To,"SPAN",{});var xo=i(I);v(u.$$.fragment,xo),xo.forEach(o),To.forEach(o),$=c(ct),C=s(ct,"SPAN",{});var Eo=i(C);fr=a(Eo,"SegFormer"),Eo.forEach(o),ct.forEach(o),Fo=c(t),X=s(t,"H2",{class:!0});var Xo=i(X);ce=s(Xo,"A",{id:!0,class:!0,href:!0});var is=i(ce);Lt=s(is,"SPAN",{});var ls=i(Lt);v(Ee.$$.fragment,ls),ls.forEach(o),is.forEach(o),hr=c(Xo),Nt=s(Xo,"SPAN",{});var ds=i(Nt);gr=a(ds,"Overview"),ds.forEach(o),Xo.forEach(o),Io=c(t),me=s(t,"P",{});var Zo=i(me);pr=a(Zo,"The SegFormer model was proposed in "),Fe=s(Zo,"A",{href:!0,rel:!0});var cs=i(Fe);ur=a(cs,"SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),cs.forEach(o),_r=a(Zo,` by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. The model consists of a hierarchical Transformer encoder and a lightweight all-MLP decode head to achieve great results on image segmentation benchmarks such as ADE20K and Cityscapes.`),Zo.forEach(o),Po=c(t),mt=s(t,"P",{});var ms=i(mt);vr=a(ms,"The abstract from the paper is the following:"),ms.forEach(o),Co=c(t),ft=s(t,"P",{});var fs=i(ft);qt=s(fs,"EM",{});var hs=i(qt);br=a(hs,`We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C.`),hs.forEach(o),fs.forEach(o),Mo=c(t),fe=s(t,"P",{});var Yo=i(fe);Sr=a(Yo,"The figure below illustrates the architecture of SegFormer. Taken from the "),Ie=s(Yo,"A",{href:!0,rel:!0});var gs=i(Ie);wr=a(gs,"original paper"),gs.forEach(o),yr=a(Yo,"."),Yo.forEach(o),ko=c(t),Pe=s(t,"IMG",{width:!0,src:!0}),zo=c(t),W=s(t,"P",{});var Et=i(W);$r=a(Et,"This model was contributed by "),Ce=s(Et,"A",{href:!0,rel:!0});var ps=i(Ce);Tr=a(ps,"nielsr"),ps.forEach(o),xr=a(Et,". The original code can be found "),Me=s(Et,"A",{href:!0,rel:!0});var us=i(Me);Er=a(us,"here"),us.forEach(o),Fr=a(Et,"."),Et.forEach(o),jo=c(t),ht=s(t,"P",{});var _s=i(ht);Ir=a(_s,"Tips:"),_s.forEach(o),Ao=c(t),M=s(t,"UL",{});var R=i(M);L=s(R,"LI",{});var U=i(L);Pr=a(U,`SegFormer consists of a hierarchical Transformer encoder, and a lightweight all-MLP decode head. `),gt=s(U,"A",{href:!0});var vs=i(gt);Cr=a(vs,"SegformerModel"),vs.forEach(o),Mr=a(U,` is the hierarchical Transformer encoder (which in the paper is also referred to as Mix Transformer or MiT). `),pt=s(U,"A",{href:!0});var bs=i(pt);kr=a(bs,"SegformerForSemanticSegmentation"),bs.forEach(o),zr=a(U,` adds the all-MLP decode head on top to perform semantic segmentation of images. In addition, there\u2019s `),ut=s(U,"A",{href:!0});var Ss=i(ut);jr=a(Ss,"SegformerForImageClassification"),Ss.forEach(o),Ar=a(U,` which can be used to - you guessed it - classify images. The authors of SegFormer first pre-trained the Transformer encoder on ImageNet-1k to classify images. Next, they throw away the classification head, and replace it by the all-MLP decode head. Next, they fine-tune the model altogether on ADE20K, Cityscapes and COCO-stuff, which are important benchmarks for semantic segmentation. All checkpoints can be found on the `),ke=s(U,"A",{href:!0,rel:!0});var ws=i(ke);Lr=a(ws,"hub"),ws.forEach(o),Nr=a(U,"."),U.forEach(o),qr=c(R),ze=s(R,"LI",{});var Qo=i(ze);Dr=a(Qo,"The quickest way to get started with SegFormer is by checking the "),je=s(Qo,"A",{href:!0,rel:!0});var ys=i(je);Or=a(ys,"example notebooks"),ys.forEach(o),Hr=a(Qo,` (which showcase both inference and fine-tuning on custom data).`),Qo.forEach(o),Wr=c(R),Z=s(R,"LI",{});var Ft=i(Z);Br=a(Ft,"One can use "),_t=s(Ft,"A",{href:!0});var $s=i(_t);Rr=a($s,"SegformerFeatureExtractor"),$s.forEach(o),Ur=a(Ft,` to prepare images and corresponding segmentation maps for the model. Note that this feature extractor is fairly basic and does not include all data augmentations used in the original paper. The original preprocessing pipelines (for the ADE20k dataset for instance) can be found `),Ae=s(Ft,"A",{href:!0,rel:!0});var Ts=i(Ae);Vr=a(Ts,"here"),Ts.forEach(o),Gr=a(Ft,`. The most important preprocessing step is that images and segmentation maps are randomly cropped and padded to the same size, such as 512x512 or 640x640, after which they are normalized.`),Ft.forEach(o),Kr=c(R),T=s(R,"LI",{});var E=i(T);Jr=a(E,"One additional thing to keep in mind is that one can initialize "),vt=s(E,"A",{href:!0});var xs=i(vt);Xr=a(xs,"SegformerFeatureExtractor"),xs.forEach(o),Zr=a(E,` with `),Dt=s(E,"CODE",{});var Es=i(Dt);Yr=a(Es,"reduce_labels"),Es.forEach(o),Qr=a(E," set to "),Ot=s(E,"EM",{});var Fs=i(Ot);ea=a(Fs,"True"),Fs.forEach(o),ta=a(E," or "),Ht=s(E,"EM",{});var Is=i(Ht);oa=a(Is,"False"),Is.forEach(o),ra=a(E,`. In some datasets (like ADE20k), the 0 index is used in the annotated segmentation maps for background. However, ADE20k doesn\u2019t include the \u201Cbackground\u201D class in its 150 labels. Therefore, `),Wt=s(E,"CODE",{});var Ps=i(Wt);aa=a(Ps,"reduce_labels"),Ps.forEach(o),na=a(E,` is used to reduce all labels by 1, and to make sure no loss is computed for the background class (i.e. it replaces 0 in the annotated maps by 255, which is the `),Bt=s(E,"EM",{});var Cs=i(Bt);sa=a(Cs,"ignore_index"),Cs.forEach(o),ia=a(E,` of the loss function used by `),bt=s(E,"A",{href:!0});var Ms=i(bt);la=a(Ms,"SegformerForSemanticSegmentation"),Ms.forEach(o),da=a(E,`). However, other datasets use the 0 index as background class and include this class as part of all labels. In that case, `),Rt=s(E,"CODE",{});var ks=i(Rt);ca=a(ks,"reduce_labels"),ks.forEach(o),ma=a(E,` should be set to `),Ut=s(E,"EM",{});var zs=i(Ut);fa=a(zs,"False"),zs.forEach(o),ha=a(E,", as loss should also be computed for the background class."),E.forEach(o),ga=c(R),Vt=s(R,"LI",{});var js=i(Vt);pa=a(js,"As most models, SegFormer comes in different sizes, the details of which can be found in the table below."),js.forEach(o),R.forEach(o),Lo=c(t),F=s(t,"P",{});var A=i(F);ua=a(A,"| "),Gt=s(A,"STRONG",{});var As=i(Gt);_a=a(As,"Model variant"),As.forEach(o),va=a(A," | "),Kt=s(A,"STRONG",{});var Ls=i(Kt);ba=a(Ls,"Depths"),Ls.forEach(o),Sa=a(A," | "),Jt=s(A,"STRONG",{});var Ns=i(Jt);wa=a(Ns,"Hidden sizes"),Ns.forEach(o),ya=a(A," | "),Xt=s(A,"STRONG",{});var qs=i(Xt);$a=a(qs,"Decoder hidden size"),qs.forEach(o),Ta=a(A," | "),Zt=s(A,"STRONG",{});var Ds=i(Zt);xa=a(Ds,"Params (M)"),Ds.forEach(o),Ea=a(A," | "),Yt=s(A,"STRONG",{});var Os=i(Yt);Fa=a(Os,"ImageNet-1k Top 1"),Os.forEach(o),Ia=a(A,` | | MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 | | MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 | | MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 | | MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 | | MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 | | MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 |`),A.forEach(o),No=c(t),Y=s(t,"H2",{class:!0});var er=i(Y);he=s(er,"A",{id:!0,class:!0,href:!0});var Hs=i(he);Qt=s(Hs,"SPAN",{});var Ws=i(Qt);v(Le.$$.fragment,Ws),Ws.forEach(o),Hs.forEach(o),Pa=c(er),eo=s(er,"SPAN",{});var Bs=i(eo);Ca=a(Bs,"SegformerConfig"),Bs.forEach(o),er.forEach(o),qo=c(t),P=s(t,"DIV",{class:!0});var V=i(P);v(Ne.$$.fragment,V),Ma=c(V),Q=s(V,"P",{});var It=i(Q);ka=a(It,"This is the configuration class to store the configuration of a "),St=s(It,"A",{href:!0});var Rs=i(St);za=a(Rs,"SegformerModel"),Rs.forEach(o),ja=a(It,`. It is used to instantiate an SegFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SegFormer `),qe=s(It,"A",{href:!0,rel:!0});var Us=i(qe);Aa=a(Us,"nvidia/segformer-b0-finetuned-ade-512-512"),Us.forEach(o),La=a(It,` architecture.`),It.forEach(o),Na=c(V),ee=s(V,"P",{});var Pt=i(ee);qa=a(Pt,"Configuration objects inherit from "),wt=s(Pt,"A",{href:!0});var Vs=i(wt);Da=a(Vs,"PretrainedConfig"),Vs.forEach(o),Oa=a(Pt,` and can be used to control the model outputs. Read the documentation from `),yt=s(Pt,"A",{href:!0});var Gs=i(yt);Ha=a(Gs,"PretrainedConfig"),Gs.forEach(o),Wa=a(Pt," for more information."),Pt.forEach(o),Ba=c(V),to=s(V,"P",{});var Ks=i(to);Ra=a(Ks,"Example:"),Ks.forEach(o),Ua=c(V),v(De.$$.fragment,V),V.forEach(o),Do=c(t),te=s(t,"H2",{class:!0});var tr=i(te);ge=s(tr,"A",{id:!0,class:!0,href:!0});var Js=i(ge);oo=s(Js,"SPAN",{});var Xs=i(oo);v(Oe.$$.fragment,Xs),Xs.forEach(o),Js.forEach(o),Va=c(tr),ro=s(tr,"SPAN",{});var Zs=i(ro);Ga=a(Zs,"SegformerFeatureExtractor"),Zs.forEach(o),tr.forEach(o),Oo=c(t),N=s(t,"DIV",{class:!0});var $e=i(N);v(He.$$.fragment,$e),Ka=c($e),ao=s($e,"P",{});var Ys=i(ao);Ja=a(Ys,"Constructs a SegFormer feature extractor."),Ys.forEach(o),Xa=c($e),We=s($e,"P",{});var or=i(We);Za=a(or,"This feature extractor inherits from "),no=s(or,"CODE",{});var Qs=i(no);Ya=a(Qs,"FeatureExtractionMixin"),Qs.forEach(o),Qa=a(or,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),or.forEach(o),en=c($e),B=s($e,"DIV",{class:!0});var Ct=i(B);v(Be.$$.fragment,Ct),tn=c(Ct),so=s(Ct,"P",{});var ei=i(so);on=a(ei,"Main method to prepare for the model one or several image(s) and optional corresponding segmentation maps."),ei.forEach(o),rn=c(Ct),v(pe.$$.fragment,Ct),Ct.forEach(o),$e.forEach(o),Ho=c(t),oe=s(t,"H2",{class:!0});var rr=i(oe);ue=s(rr,"A",{id:!0,class:!0,href:!0});var ti=i(ue);io=s(ti,"SPAN",{});var oi=i(io);v(Re.$$.fragment,oi),oi.forEach(o),ti.forEach(o),an=c(rr),lo=s(rr,"SPAN",{});var ri=i(lo);nn=a(ri,"SegformerModel"),ri.forEach(o),rr.forEach(o),Wo=c(t),O=s(t,"DIV",{class:!0});var Mt=i(O);v(Ue.$$.fragment,Mt),sn=c(Mt),Ve=s(Mt,"P",{});var ar=i(Ve);ln=a(ar,`The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Ge=s(ar,"A",{href:!0,rel:!0});var ai=i(Ge);dn=a(ai,"torch.nn.Module"),ai.forEach(o),cn=a(ar,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ar.forEach(o),mn=c(Mt),k=s(Mt,"DIV",{class:!0});var G=i(k);v(Ke.$$.fragment,G),fn=c(G),re=s(G,"P",{});var kt=i(re);hn=a(kt,"The "),$t=s(kt,"A",{href:!0});var ni=i($t);gn=a(ni,"SegformerModel"),ni.forEach(o),pn=a(kt," forward method, overrides the "),co=s(kt,"CODE",{});var si=i(co);un=a(si,"__call__"),si.forEach(o),_n=a(kt," special method."),kt.forEach(o),vn=c(G),v(_e.$$.fragment,G),bn=c(G),mo=s(G,"P",{});var ii=i(mo);Sn=a(ii,"Examples:"),ii.forEach(o),wn=c(G),v(Je.$$.fragment,G),G.forEach(o),Mt.forEach(o),Bo=c(t),ae=s(t,"H2",{class:!0});var nr=i(ae);ve=s(nr,"A",{id:!0,class:!0,href:!0});var li=i(ve);fo=s(li,"SPAN",{});var di=i(fo);v(Xe.$$.fragment,di),di.forEach(o),li.forEach(o),yn=c(nr),ho=s(nr,"SPAN",{});var ci=i(ho);$n=a(ci,"SegformerDecodeHead"),ci.forEach(o),nr.forEach(o),Ro=c(t),Ze=s(t,"DIV",{class:!0});var mi=i(Ze);go=s(mi,"DIV",{class:!0}),i(go).forEach(o),mi.forEach(o),Uo=c(t),ne=s(t,"H2",{class:!0});var sr=i(ne);be=s(sr,"A",{id:!0,class:!0,href:!0});var fi=i(be);po=s(fi,"SPAN",{});var hi=i(po);v(Ye.$$.fragment,hi),hi.forEach(o),fi.forEach(o),Tn=c(sr),uo=s(sr,"SPAN",{});var gi=i(uo);xn=a(gi,"SegformerForImageClassification"),gi.forEach(o),sr.forEach(o),Vo=c(t),q=s(t,"DIV",{class:!0});var Te=i(q);v(Qe.$$.fragment,Te),En=c(Te),_o=s(Te,"P",{});var pi=i(_o);Fn=a(pi,`SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet.`),pi.forEach(o),In=c(Te),et=s(Te,"P",{});var ir=i(et);Pn=a(ir,"This model is a PyTorch "),tt=s(ir,"A",{href:!0,rel:!0});var ui=i(tt);Cn=a(ui,"torch.nn.Module"),ui.forEach(o),Mn=a(ir,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ir.forEach(o),kn=c(Te),z=s(Te,"DIV",{class:!0});var K=i(z);v(ot.$$.fragment,K),zn=c(K),se=s(K,"P",{});var zt=i(se);jn=a(zt,"The "),Tt=s(zt,"A",{href:!0});var _i=i(Tt);An=a(_i,"SegformerForImageClassification"),_i.forEach(o),Ln=a(zt," forward method, overrides the "),vo=s(zt,"CODE",{});var vi=i(vo);Nn=a(vi,"__call__"),vi.forEach(o),qn=a(zt," special method."),zt.forEach(o),Dn=c(K),v(Se.$$.fragment,K),On=c(K),bo=s(K,"P",{});var bi=i(bo);Hn=a(bi,"Examples:"),bi.forEach(o),Wn=c(K),v(rt.$$.fragment,K),K.forEach(o),Te.forEach(o),Go=c(t),ie=s(t,"H2",{class:!0});var lr=i(ie);we=s(lr,"A",{id:!0,class:!0,href:!0});var Si=i(we);So=s(Si,"SPAN",{});var wi=i(So);v(at.$$.fragment,wi),wi.forEach(o),Si.forEach(o),Bn=c(lr),wo=s(lr,"SPAN",{});var yi=i(wo);Rn=a(yi,"SegformerForSemanticSegmentation"),yi.forEach(o),lr.forEach(o),Ko=c(t),H=s(t,"DIV",{class:!0});var jt=i(H);v(nt.$$.fragment,jt),Un=c(jt),st=s(jt,"P",{});var dr=i(st);Vn=a(dr,`SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. This model is a PyTorch `),it=s(dr,"A",{href:!0,rel:!0});var $i=i(it);Gn=a($i,"torch.nn.Module"),$i.forEach(o),Kn=a(dr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dr.forEach(o),Jn=c(jt),j=s(jt,"DIV",{class:!0});var J=i(j);v(lt.$$.fragment,J),Xn=c(J),le=s(J,"P",{});var At=i(le);Zn=a(At,"The "),xt=s(At,"A",{href:!0});var Ti=i(xt);Yn=a(Ti,"SegformerForSemanticSegmentation"),Ti.forEach(o),Qn=a(At," forward method, overrides the "),yo=s(At,"CODE",{});var xi=i(yo);es=a(xi,"__call__"),xi.forEach(o),ts=a(At," special method."),At.forEach(o),os=c(J),v(ye.$$.fragment,J),rs=c(J),$o=s(J,"P",{});var Ei=i($o);as=a(Ei,"Examples:"),Ei.forEach(o),ns=c(J),v(dt.$$.fragment,J),J.forEach(o),jt.forEach(o),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(Ni)),l(p,"id","segformer"),l(p,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(p,"href","#segformer"),l(g,"class","relative group"),l(ce,"id","overview"),l(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ce,"href","#overview"),l(X,"class","relative group"),l(Fe,"href","https://arxiv.org/abs/2105.15203"),l(Fe,"rel","nofollow"),l(Ie,"href","https://arxiv.org/abs/2105.15203"),l(Ie,"rel","nofollow"),l(Pe,"width","600"),Mi(Pe.src,ss="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/segformer_architecture.png")||l(Pe,"src",ss),l(Ce,"href","https://huggingface.co/nielsr"),l(Ce,"rel","nofollow"),l(Me,"href","https://github.com/NVlabs/SegFormer"),l(Me,"rel","nofollow"),l(gt,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerModel"),l(pt,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForSemanticSegmentation"),l(ut,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForImageClassification"),l(ke,"href","https://huggingface.co/models?other=segformer"),l(ke,"rel","nofollow"),l(je,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/SegFormer"),l(je,"rel","nofollow"),l(_t,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor"),l(Ae,"href","https://github.com/NVlabs/SegFormer/blob/master/local_configs/_base_/datasets/ade20k_repeat.py"),l(Ae,"rel","nofollow"),l(vt,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerFeatureExtractor"),l(bt,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForSemanticSegmentation"),l(he,"id","transformers.SegformerConfig"),l(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(he,"href","#transformers.SegformerConfig"),l(Y,"class","relative group"),l(St,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerModel"),l(qe,"href","https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512"),l(qe,"rel","nofollow"),l(wt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(yt,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),l(P,"class","docstring"),l(ge,"id","transformers.SegformerFeatureExtractor"),l(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ge,"href","#transformers.SegformerFeatureExtractor"),l(te,"class","relative group"),l(B,"class","docstring"),l(N,"class","docstring"),l(ue,"id","transformers.SegformerModel"),l(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ue,"href","#transformers.SegformerModel"),l(oe,"class","relative group"),l(Ge,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ge,"rel","nofollow"),l($t,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerModel"),l(k,"class","docstring"),l(O,"class","docstring"),l(ve,"id","transformers.SegformerDecodeHead"),l(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ve,"href","#transformers.SegformerDecodeHead"),l(ae,"class","relative group"),l(go,"class","docstring"),l(Ze,"class","docstring"),l(be,"id","transformers.SegformerForImageClassification"),l(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(be,"href","#transformers.SegformerForImageClassification"),l(ne,"class","relative group"),l(tt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(tt,"rel","nofollow"),l(Tt,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForImageClassification"),l(z,"class","docstring"),l(q,"class","docstring"),l(we,"id","transformers.SegformerForSemanticSegmentation"),l(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(we,"href","#transformers.SegformerForSemanticSegmentation"),l(ie,"class","relative group"),l(it,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(it,"rel","nofollow"),l(xt,"href","/docs/transformers/v4.15.0/en/model_doc/segformer#transformers.SegformerForSemanticSegmentation"),l(j,"class","docstring"),l(H,"class","docstring")},m(t,m){e(document.head,h),f(t,x,m),f(t,g,m),e(g,p),e(p,I),b(u,I,null),e(g,$),e(g,C),e(C,fr),f(t,Fo,m),f(t,X,m),e(X,ce),e(ce,Lt),b(Ee,Lt,null),e(X,hr),e(X,Nt),e(Nt,gr),f(t,Io,m),f(t,me,m),e(me,pr),e(me,Fe),e(Fe,ur),e(me,_r),f(t,Po,m),f(t,mt,m),e(mt,vr),f(t,Co,m),f(t,ft,m),e(ft,qt),e(qt,br),f(t,Mo,m),f(t,fe,m),e(fe,Sr),e(fe,Ie),e(Ie,wr),e(fe,yr),f(t,ko,m),f(t,Pe,m),f(t,zo,m),f(t,W,m),e(W,$r),e(W,Ce),e(Ce,Tr),e(W,xr),e(W,Me),e(Me,Er),e(W,Fr),f(t,jo,m),f(t,ht,m),e(ht,Ir),f(t,Ao,m),f(t,M,m),e(M,L),e(L,Pr),e(L,gt),e(gt,Cr),e(L,Mr),e(L,pt),e(pt,kr),e(L,zr),e(L,ut),e(ut,jr),e(L,Ar),e(L,ke),e(ke,Lr),e(L,Nr),e(M,qr),e(M,ze),e(ze,Dr),e(ze,je),e(je,Or),e(ze,Hr),e(M,Wr),e(M,Z),e(Z,Br),e(Z,_t),e(_t,Rr),e(Z,Ur),e(Z,Ae),e(Ae,Vr),e(Z,Gr),e(M,Kr),e(M,T),e(T,Jr),e(T,vt),e(vt,Xr),e(T,Zr),e(T,Dt),e(Dt,Yr),e(T,Qr),e(T,Ot),e(Ot,ea),e(T,ta),e(T,Ht),e(Ht,oa),e(T,ra),e(T,Wt),e(Wt,aa),e(T,na),e(T,Bt),e(Bt,sa),e(T,ia),e(T,bt),e(bt,la),e(T,da),e(T,Rt),e(Rt,ca),e(T,ma),e(T,Ut),e(Ut,fa),e(T,ha),e(M,ga),e(M,Vt),e(Vt,pa),f(t,Lo,m),f(t,F,m),e(F,ua),e(F,Gt),e(Gt,_a),e(F,va),e(F,Kt),e(Kt,ba),e(F,Sa),e(F,Jt),e(Jt,wa),e(F,ya),e(F,Xt),e(Xt,$a),e(F,Ta),e(F,Zt),e(Zt,xa),e(F,Ea),e(F,Yt),e(Yt,Fa),e(F,Ia),f(t,No,m),f(t,Y,m),e(Y,he),e(he,Qt),b(Le,Qt,null),e(Y,Pa),e(Y,eo),e(eo,Ca),f(t,qo,m),f(t,P,m),b(Ne,P,null),e(P,Ma),e(P,Q),e(Q,ka),e(Q,St),e(St,za),e(Q,ja),e(Q,qe),e(qe,Aa),e(Q,La),e(P,Na),e(P,ee),e(ee,qa),e(ee,wt),e(wt,Da),e(ee,Oa),e(ee,yt),e(yt,Ha),e(ee,Wa),e(P,Ba),e(P,to),e(to,Ra),e(P,Ua),b(De,P,null),f(t,Do,m),f(t,te,m),e(te,ge),e(ge,oo),b(Oe,oo,null),e(te,Va),e(te,ro),e(ro,Ga),f(t,Oo,m),f(t,N,m),b(He,N,null),e(N,Ka),e(N,ao),e(ao,Ja),e(N,Xa),e(N,We),e(We,Za),e(We,no),e(no,Ya),e(We,Qa),e(N,en),e(N,B),b(Be,B,null),e(B,tn),e(B,so),e(so,on),e(B,rn),b(pe,B,null),f(t,Ho,m),f(t,oe,m),e(oe,ue),e(ue,io),b(Re,io,null),e(oe,an),e(oe,lo),e(lo,nn),f(t,Wo,m),f(t,O,m),b(Ue,O,null),e(O,sn),e(O,Ve),e(Ve,ln),e(Ve,Ge),e(Ge,dn),e(Ve,cn),e(O,mn),e(O,k),b(Ke,k,null),e(k,fn),e(k,re),e(re,hn),e(re,$t),e($t,gn),e(re,pn),e(re,co),e(co,un),e(re,_n),e(k,vn),b(_e,k,null),e(k,bn),e(k,mo),e(mo,Sn),e(k,wn),b(Je,k,null),f(t,Bo,m),f(t,ae,m),e(ae,ve),e(ve,fo),b(Xe,fo,null),e(ae,yn),e(ae,ho),e(ho,$n),f(t,Ro,m),f(t,Ze,m),e(Ze,go),f(t,Uo,m),f(t,ne,m),e(ne,be),e(be,po),b(Ye,po,null),e(ne,Tn),e(ne,uo),e(uo,xn),f(t,Vo,m),f(t,q,m),b(Qe,q,null),e(q,En),e(q,_o),e(_o,Fn),e(q,In),e(q,et),e(et,Pn),e(et,tt),e(tt,Cn),e(et,Mn),e(q,kn),e(q,z),b(ot,z,null),e(z,zn),e(z,se),e(se,jn),e(se,Tt),e(Tt,An),e(se,Ln),e(se,vo),e(vo,Nn),e(se,qn),e(z,Dn),b(Se,z,null),e(z,On),e(z,bo),e(bo,Hn),e(z,Wn),b(rt,z,null),f(t,Go,m),f(t,ie,m),e(ie,we),e(we,So),b(at,So,null),e(ie,Bn),e(ie,wo),e(wo,Rn),f(t,Ko,m),f(t,H,m),b(nt,H,null),e(H,Un),e(H,st),e(st,Vn),e(st,it),e(it,Gn),e(st,Kn),e(H,Jn),e(H,j),b(lt,j,null),e(j,Xn),e(j,le),e(le,Zn),e(le,xt),e(xt,Yn),e(le,Qn),e(le,yo),e(yo,es),e(le,ts),e(j,os),b(ye,j,null),e(j,rs),e(j,$o),e($o,as),e(j,ns),b(dt,j,null),Jo=!0},p(t,[m]){const ct={};m&2&&(ct.$$scope={dirty:m,ctx:t}),pe.$set(ct);const To={};m&2&&(To.$$scope={dirty:m,ctx:t}),_e.$set(To);const xo={};m&2&&(xo.$$scope={dirty:m,ctx:t}),Se.$set(xo);const Eo={};m&2&&(Eo.$$scope={dirty:m,ctx:t}),ye.$set(Eo)},i(t){Jo||(S(u.$$.fragment,t),S(Ee.$$.fragment,t),S(Le.$$.fragment,t),S(Ne.$$.fragment,t),S(De.$$.fragment,t),S(Oe.$$.fragment,t),S(He.$$.fragment,t),S(Be.$$.fragment,t),S(pe.$$.fragment,t),S(Re.$$.fragment,t),S(Ue.$$.fragment,t),S(Ke.$$.fragment,t),S(_e.$$.fragment,t),S(Je.$$.fragment,t),S(Xe.$$.fragment,t),S(Ye.$$.fragment,t),S(Qe.$$.fragment,t),S(ot.$$.fragment,t),S(Se.$$.fragment,t),S(rt.$$.fragment,t),S(at.$$.fragment,t),S(nt.$$.fragment,t),S(lt.$$.fragment,t),S(ye.$$.fragment,t),S(dt.$$.fragment,t),Jo=!0)},o(t){w(u.$$.fragment,t),w(Ee.$$.fragment,t),w(Le.$$.fragment,t),w(Ne.$$.fragment,t),w(De.$$.fragment,t),w(Oe.$$.fragment,t),w(He.$$.fragment,t),w(Be.$$.fragment,t),w(pe.$$.fragment,t),w(Re.$$.fragment,t),w(Ue.$$.fragment,t),w(Ke.$$.fragment,t),w(_e.$$.fragment,t),w(Je.$$.fragment,t),w(Xe.$$.fragment,t),w(Ye.$$.fragment,t),w(Qe.$$.fragment,t),w(ot.$$.fragment,t),w(Se.$$.fragment,t),w(rt.$$.fragment,t),w(at.$$.fragment,t),w(nt.$$.fragment,t),w(lt.$$.fragment,t),w(ye.$$.fragment,t),w(dt.$$.fragment,t),Jo=!1},d(t){o(h),t&&o(x),t&&o(g),y(u),t&&o(Fo),t&&o(X),y(Ee),t&&o(Io),t&&o(me),t&&o(Po),t&&o(mt),t&&o(Co),t&&o(ft),t&&o(Mo),t&&o(fe),t&&o(ko),t&&o(Pe),t&&o(zo),t&&o(W),t&&o(jo),t&&o(ht),t&&o(Ao),t&&o(M),t&&o(Lo),t&&o(F),t&&o(No),t&&o(Y),y(Le),t&&o(qo),t&&o(P),y(Ne),y(De),t&&o(Do),t&&o(te),y(Oe),t&&o(Oo),t&&o(N),y(He),y(Be),y(pe),t&&o(Ho),t&&o(oe),y(Re),t&&o(Wo),t&&o(O),y(Ue),y(Ke),y(_e),y(Je),t&&o(Bo),t&&o(ae),y(Xe),t&&o(Ro),t&&o(Ze),t&&o(Uo),t&&o(ne),y(Ye),t&&o(Vo),t&&o(q),y(Qe),y(ot),y(Se),y(rt),t&&o(Go),t&&o(ie),y(at),t&&o(Ko),t&&o(H),y(nt),y(lt),y(ye),y(dt)}}}const Ni={local:"segformer",sections:[{local:"overview",title:"Overview"},{local:"transformers.SegformerConfig",title:"SegformerConfig"},{local:"transformers.SegformerFeatureExtractor",title:"SegformerFeatureExtractor"},{local:"transformers.SegformerModel",title:"SegformerModel"},{local:"transformers.SegformerDecodeHead",title:"SegformerDecodeHead"},{local:"transformers.SegformerForImageClassification",title:"SegformerForImageClassification"},{local:"transformers.SegformerForSemanticSegmentation",title:"SegformerForSemanticSegmentation"}],title:"SegFormer"};function qi(D,h,x){let{fw:g}=h;return D.$$set=p=>{"fw"in p&&x(0,g=p.fw)},[g]}class Ui extends Fi{constructor(h){super();Ii(this,h,qi,Li,Pi,{fw:0})}}export{Ui as default,Ni as metadata};
9,992
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/model_doc/camembert.mdx-49c41aa9.js
import{S as Nu,i as Uu,s as Hu,e as r,k as d,w as $,t as n,L as Qu,c as s,d as t,m,a,x as F,h as i,b as c,J as e,g as h,y as P,q as M,o as q,B as L}from"../../chunks/vendor-b1433968.js";import{T as zs}from"../../chunks/Tip-c3840994.js";import{D as Fe}from"../../chunks/Docstring-ff504c58.js";import{I as Pe}from"../../chunks/IconCopyLink-7029626d.js";function Vu(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue;return{c(){b=r("p"),Z=n("TF 2.0 models accepts two formats as inputs:"),A=d(),_=r("ul"),z=r("li"),G=n("having all inputs as keyword arguments (like PyTorch models), or"),le=d(),D=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=d(),u=r("p"),K=n("This second option is useful when using "),x=r("code"),X=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=r("code"),ce=n("model(inputs)"),se=n("."),H=d(),E=r("p"),j=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),V=d(),g=r("ul"),k=r("li"),ee=n("a single Tensor with "),O=r("code"),ae=n("input_ids"),te=n(" only and nothing else: "),S=r("code"),he=n("model(inputs_ids)"),ne=d(),v=r("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r("code"),oe=n("model([input_ids, attention_mask])"),J=n(" or "),N=r("code"),re=n("model([input_ids, attention_mask, token_type_ids])"),pe=d(),C=r("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=r("code"),ue=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(l){b=s(l,"P",{});var p=a(b);Z=i(p,"TF 2.0 models accepts two formats as inputs:"),p.forEach(t),A=m(l),_=s(l,"UL",{});var W=a(_);z=s(W,"LI",{});var ke=a(z);G=i(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),le=m(W),D=s(W,"LI",{});var Ee=a(D);de=i(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),W.forEach(t),Q=m(l),u=s(l,"P",{});var y=a(u);K=i(y,"This second option is useful when using "),x=s(y,"CODE",{});var Te=a(x);X=i(Te,"tf.keras.Model.fit"),Te.forEach(t),me=i(y,` method which currently requires having all the tensors in the first argument of the model call function: `),I=s(y,"CODE",{});var ge=a(I);ce=i(ge,"model(inputs)"),ge.forEach(t),se=i(y,"."),y.forEach(t),H=m(l),E=s(l,"P",{});var ve=a(E);j=i(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),V=m(l),g=s(l,"UL",{});var T=a(g);k=s(T,"LI",{});var R=a(k);ee=i(R,"a single Tensor with "),O=s(R,"CODE",{});var Ce=a(O);ae=i(Ce,"input_ids"),Ce.forEach(t),te=i(R," only and nothing else: "),S=s(R,"CODE",{});var be=a(S);he=i(be,"model(inputs_ids)"),be.forEach(t),R.forEach(t),ne=m(T),v=s(T,"LI",{});var U=a(v);fe=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(U,"CODE",{});var we=a(B);oe=i(we,"model([input_ids, attention_mask])"),we.forEach(t),J=i(U," or "),N=s(U,"CODE",{});var _e=a(N);re=i(_e,"model([input_ids, attention_mask, token_type_ids])"),_e.forEach(t),U.forEach(t),pe=m(T),C=s(T,"LI",{});var Y=a(C);ie=i(Y,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=s(Y,"CODE",{});var ye=a(w);ue=i(ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ye.forEach(t),Y.forEach(t),T.forEach(t)},m(l,p){h(l,b,p),e(b,Z),h(l,A,p),h(l,_,p),e(_,z),e(z,G),e(_,le),e(_,D),e(D,de),h(l,Q,p),h(l,u,p),e(u,K),e(u,x),e(x,X),e(u,me),e(u,I),e(I,ce),e(u,se),h(l,H,p),h(l,E,p),e(E,j),h(l,V,p),h(l,g,p),e(g,k),e(k,ee),e(k,O),e(O,ae),e(k,te),e(k,S),e(S,he),e(g,ne),e(g,v),e(v,fe),e(v,B),e(B,oe),e(v,J),e(v,N),e(N,re),e(g,pe),e(g,C),e(C,ie),e(C,w),e(w,ue)},d(l){l&&t(b),l&&t(A),l&&t(_),l&&t(Q),l&&t(u),l&&t(H),l&&t(E),l&&t(V),l&&t(g)}}}function Wu(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue;return{c(){b=r("p"),Z=n("TF 2.0 models accepts two formats as inputs:"),A=d(),_=r("ul"),z=r("li"),G=n("having all inputs as keyword arguments (like PyTorch models), or"),le=d(),D=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=d(),u=r("p"),K=n("This second option is useful when using "),x=r("code"),X=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=r("code"),ce=n("model(inputs)"),se=n("."),H=d(),E=r("p"),j=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),V=d(),g=r("ul"),k=r("li"),ee=n("a single Tensor with "),O=r("code"),ae=n("input_ids"),te=n(" only and nothing else: "),S=r("code"),he=n("model(inputs_ids)"),ne=d(),v=r("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r("code"),oe=n("model([input_ids, attention_mask])"),J=n(" or "),N=r("code"),re=n("model([input_ids, attention_mask, token_type_ids])"),pe=d(),C=r("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=r("code"),ue=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(l){b=s(l,"P",{});var p=a(b);Z=i(p,"TF 2.0 models accepts two formats as inputs:"),p.forEach(t),A=m(l),_=s(l,"UL",{});var W=a(_);z=s(W,"LI",{});var ke=a(z);G=i(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),le=m(W),D=s(W,"LI",{});var Ee=a(D);de=i(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),W.forEach(t),Q=m(l),u=s(l,"P",{});var y=a(u);K=i(y,"This second option is useful when using "),x=s(y,"CODE",{});var Te=a(x);X=i(Te,"tf.keras.Model.fit"),Te.forEach(t),me=i(y,` method which currently requires having all the tensors in the first argument of the model call function: `),I=s(y,"CODE",{});var ge=a(I);ce=i(ge,"model(inputs)"),ge.forEach(t),se=i(y,"."),y.forEach(t),H=m(l),E=s(l,"P",{});var ve=a(E);j=i(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),V=m(l),g=s(l,"UL",{});var T=a(g);k=s(T,"LI",{});var R=a(k);ee=i(R,"a single Tensor with "),O=s(R,"CODE",{});var Ce=a(O);ae=i(Ce,"input_ids"),Ce.forEach(t),te=i(R," only and nothing else: "),S=s(R,"CODE",{});var be=a(S);he=i(be,"model(inputs_ids)"),be.forEach(t),R.forEach(t),ne=m(T),v=s(T,"LI",{});var U=a(v);fe=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(U,"CODE",{});var we=a(B);oe=i(we,"model([input_ids, attention_mask])"),we.forEach(t),J=i(U," or "),N=s(U,"CODE",{});var _e=a(N);re=i(_e,"model([input_ids, attention_mask, token_type_ids])"),_e.forEach(t),U.forEach(t),pe=m(T),C=s(T,"LI",{});var Y=a(C);ie=i(Y,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=s(Y,"CODE",{});var ye=a(w);ue=i(ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ye.forEach(t),Y.forEach(t),T.forEach(t)},m(l,p){h(l,b,p),e(b,Z),h(l,A,p),h(l,_,p),e(_,z),e(z,G),e(_,le),e(_,D),e(D,de),h(l,Q,p),h(l,u,p),e(u,K),e(u,x),e(x,X),e(u,me),e(u,I),e(I,ce),e(u,se),h(l,H,p),h(l,E,p),e(E,j),h(l,V,p),h(l,g,p),e(g,k),e(k,ee),e(k,O),e(O,ae),e(k,te),e(k,S),e(S,he),e(g,ne),e(g,v),e(v,fe),e(v,B),e(B,oe),e(v,J),e(v,N),e(N,re),e(g,pe),e(g,C),e(C,ie),e(C,w),e(w,ue)},d(l){l&&t(b),l&&t(A),l&&t(_),l&&t(Q),l&&t(u),l&&t(H),l&&t(E),l&&t(V),l&&t(g)}}}function Ku(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue;return{c(){b=r("p"),Z=n("TF 2.0 models accepts two formats as inputs:"),A=d(),_=r("ul"),z=r("li"),G=n("having all inputs as keyword arguments (like PyTorch models), or"),le=d(),D=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=d(),u=r("p"),K=n("This second option is useful when using "),x=r("code"),X=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=r("code"),ce=n("model(inputs)"),se=n("."),H=d(),E=r("p"),j=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),V=d(),g=r("ul"),k=r("li"),ee=n("a single Tensor with "),O=r("code"),ae=n("input_ids"),te=n(" only and nothing else: "),S=r("code"),he=n("model(inputs_ids)"),ne=d(),v=r("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r("code"),oe=n("model([input_ids, attention_mask])"),J=n(" or "),N=r("code"),re=n("model([input_ids, attention_mask, token_type_ids])"),pe=d(),C=r("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=r("code"),ue=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(l){b=s(l,"P",{});var p=a(b);Z=i(p,"TF 2.0 models accepts two formats as inputs:"),p.forEach(t),A=m(l),_=s(l,"UL",{});var W=a(_);z=s(W,"LI",{});var ke=a(z);G=i(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),le=m(W),D=s(W,"LI",{});var Ee=a(D);de=i(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),W.forEach(t),Q=m(l),u=s(l,"P",{});var y=a(u);K=i(y,"This second option is useful when using "),x=s(y,"CODE",{});var Te=a(x);X=i(Te,"tf.keras.Model.fit"),Te.forEach(t),me=i(y,` method which currently requires having all the tensors in the first argument of the model call function: `),I=s(y,"CODE",{});var ge=a(I);ce=i(ge,"model(inputs)"),ge.forEach(t),se=i(y,"."),y.forEach(t),H=m(l),E=s(l,"P",{});var ve=a(E);j=i(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),V=m(l),g=s(l,"UL",{});var T=a(g);k=s(T,"LI",{});var R=a(k);ee=i(R,"a single Tensor with "),O=s(R,"CODE",{});var Ce=a(O);ae=i(Ce,"input_ids"),Ce.forEach(t),te=i(R," only and nothing else: "),S=s(R,"CODE",{});var be=a(S);he=i(be,"model(inputs_ids)"),be.forEach(t),R.forEach(t),ne=m(T),v=s(T,"LI",{});var U=a(v);fe=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(U,"CODE",{});var we=a(B);oe=i(we,"model([input_ids, attention_mask])"),we.forEach(t),J=i(U," or "),N=s(U,"CODE",{});var _e=a(N);re=i(_e,"model([input_ids, attention_mask, token_type_ids])"),_e.forEach(t),U.forEach(t),pe=m(T),C=s(T,"LI",{});var Y=a(C);ie=i(Y,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=s(Y,"CODE",{});var ye=a(w);ue=i(ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ye.forEach(t),Y.forEach(t),T.forEach(t)},m(l,p){h(l,b,p),e(b,Z),h(l,A,p),h(l,_,p),e(_,z),e(z,G),e(_,le),e(_,D),e(D,de),h(l,Q,p),h(l,u,p),e(u,K),e(u,x),e(x,X),e(u,me),e(u,I),e(I,ce),e(u,se),h(l,H,p),h(l,E,p),e(E,j),h(l,V,p),h(l,g,p),e(g,k),e(k,ee),e(k,O),e(O,ae),e(k,te),e(k,S),e(S,he),e(g,ne),e(g,v),e(v,fe),e(v,B),e(B,oe),e(v,J),e(v,N),e(N,re),e(g,pe),e(g,C),e(C,ie),e(C,w),e(w,ue)},d(l){l&&t(b),l&&t(A),l&&t(_),l&&t(Q),l&&t(u),l&&t(H),l&&t(E),l&&t(V),l&&t(g)}}}function Gu(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue;return{c(){b=r("p"),Z=n("TF 2.0 models accepts two formats as inputs:"),A=d(),_=r("ul"),z=r("li"),G=n("having all inputs as keyword arguments (like PyTorch models), or"),le=d(),D=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=d(),u=r("p"),K=n("This second option is useful when using "),x=r("code"),X=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=r("code"),ce=n("model(inputs)"),se=n("."),H=d(),E=r("p"),j=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),V=d(),g=r("ul"),k=r("li"),ee=n("a single Tensor with "),O=r("code"),ae=n("input_ids"),te=n(" only and nothing else: "),S=r("code"),he=n("model(inputs_ids)"),ne=d(),v=r("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r("code"),oe=n("model([input_ids, attention_mask])"),J=n(" or "),N=r("code"),re=n("model([input_ids, attention_mask, token_type_ids])"),pe=d(),C=r("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=r("code"),ue=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(l){b=s(l,"P",{});var p=a(b);Z=i(p,"TF 2.0 models accepts two formats as inputs:"),p.forEach(t),A=m(l),_=s(l,"UL",{});var W=a(_);z=s(W,"LI",{});var ke=a(z);G=i(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),le=m(W),D=s(W,"LI",{});var Ee=a(D);de=i(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),W.forEach(t),Q=m(l),u=s(l,"P",{});var y=a(u);K=i(y,"This second option is useful when using "),x=s(y,"CODE",{});var Te=a(x);X=i(Te,"tf.keras.Model.fit"),Te.forEach(t),me=i(y,` method which currently requires having all the tensors in the first argument of the model call function: `),I=s(y,"CODE",{});var ge=a(I);ce=i(ge,"model(inputs)"),ge.forEach(t),se=i(y,"."),y.forEach(t),H=m(l),E=s(l,"P",{});var ve=a(E);j=i(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),V=m(l),g=s(l,"UL",{});var T=a(g);k=s(T,"LI",{});var R=a(k);ee=i(R,"a single Tensor with "),O=s(R,"CODE",{});var Ce=a(O);ae=i(Ce,"input_ids"),Ce.forEach(t),te=i(R," only and nothing else: "),S=s(R,"CODE",{});var be=a(S);he=i(be,"model(inputs_ids)"),be.forEach(t),R.forEach(t),ne=m(T),v=s(T,"LI",{});var U=a(v);fe=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(U,"CODE",{});var we=a(B);oe=i(we,"model([input_ids, attention_mask])"),we.forEach(t),J=i(U," or "),N=s(U,"CODE",{});var _e=a(N);re=i(_e,"model([input_ids, attention_mask, token_type_ids])"),_e.forEach(t),U.forEach(t),pe=m(T),C=s(T,"LI",{});var Y=a(C);ie=i(Y,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=s(Y,"CODE",{});var ye=a(w);ue=i(ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ye.forEach(t),Y.forEach(t),T.forEach(t)},m(l,p){h(l,b,p),e(b,Z),h(l,A,p),h(l,_,p),e(_,z),e(z,G),e(_,le),e(_,D),e(D,de),h(l,Q,p),h(l,u,p),e(u,K),e(u,x),e(x,X),e(u,me),e(u,I),e(I,ce),e(u,se),h(l,H,p),h(l,E,p),e(E,j),h(l,V,p),h(l,g,p),e(g,k),e(k,ee),e(k,O),e(O,ae),e(k,te),e(k,S),e(S,he),e(g,ne),e(g,v),e(v,fe),e(v,B),e(B,oe),e(v,J),e(v,N),e(N,re),e(g,pe),e(g,C),e(C,ie),e(C,w),e(w,ue)},d(l){l&&t(b),l&&t(A),l&&t(_),l&&t(Q),l&&t(u),l&&t(H),l&&t(E),l&&t(V),l&&t(g)}}}function Xu(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue;return{c(){b=r("p"),Z=n("TF 2.0 models accepts two formats as inputs:"),A=d(),_=r("ul"),z=r("li"),G=n("having all inputs as keyword arguments (like PyTorch models), or"),le=d(),D=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=d(),u=r("p"),K=n("This second option is useful when using "),x=r("code"),X=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=r("code"),ce=n("model(inputs)"),se=n("."),H=d(),E=r("p"),j=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),V=d(),g=r("ul"),k=r("li"),ee=n("a single Tensor with "),O=r("code"),ae=n("input_ids"),te=n(" only and nothing else: "),S=r("code"),he=n("model(inputs_ids)"),ne=d(),v=r("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r("code"),oe=n("model([input_ids, attention_mask])"),J=n(" or "),N=r("code"),re=n("model([input_ids, attention_mask, token_type_ids])"),pe=d(),C=r("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=r("code"),ue=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(l){b=s(l,"P",{});var p=a(b);Z=i(p,"TF 2.0 models accepts two formats as inputs:"),p.forEach(t),A=m(l),_=s(l,"UL",{});var W=a(_);z=s(W,"LI",{});var ke=a(z);G=i(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),le=m(W),D=s(W,"LI",{});var Ee=a(D);de=i(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),W.forEach(t),Q=m(l),u=s(l,"P",{});var y=a(u);K=i(y,"This second option is useful when using "),x=s(y,"CODE",{});var Te=a(x);X=i(Te,"tf.keras.Model.fit"),Te.forEach(t),me=i(y,` method which currently requires having all the tensors in the first argument of the model call function: `),I=s(y,"CODE",{});var ge=a(I);ce=i(ge,"model(inputs)"),ge.forEach(t),se=i(y,"."),y.forEach(t),H=m(l),E=s(l,"P",{});var ve=a(E);j=i(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),V=m(l),g=s(l,"UL",{});var T=a(g);k=s(T,"LI",{});var R=a(k);ee=i(R,"a single Tensor with "),O=s(R,"CODE",{});var Ce=a(O);ae=i(Ce,"input_ids"),Ce.forEach(t),te=i(R," only and nothing else: "),S=s(R,"CODE",{});var be=a(S);he=i(be,"model(inputs_ids)"),be.forEach(t),R.forEach(t),ne=m(T),v=s(T,"LI",{});var U=a(v);fe=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(U,"CODE",{});var we=a(B);oe=i(we,"model([input_ids, attention_mask])"),we.forEach(t),J=i(U," or "),N=s(U,"CODE",{});var _e=a(N);re=i(_e,"model([input_ids, attention_mask, token_type_ids])"),_e.forEach(t),U.forEach(t),pe=m(T),C=s(T,"LI",{});var Y=a(C);ie=i(Y,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=s(Y,"CODE",{});var ye=a(w);ue=i(ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ye.forEach(t),Y.forEach(t),T.forEach(t)},m(l,p){h(l,b,p),e(b,Z),h(l,A,p),h(l,_,p),e(_,z),e(z,G),e(_,le),e(_,D),e(D,de),h(l,Q,p),h(l,u,p),e(u,K),e(u,x),e(x,X),e(u,me),e(u,I),e(I,ce),e(u,se),h(l,H,p),h(l,E,p),e(E,j),h(l,V,p),h(l,g,p),e(g,k),e(k,ee),e(k,O),e(O,ae),e(k,te),e(k,S),e(S,he),e(g,ne),e(g,v),e(v,fe),e(v,B),e(B,oe),e(v,J),e(v,N),e(N,re),e(g,pe),e(g,C),e(C,ie),e(C,w),e(w,ue)},d(l){l&&t(b),l&&t(A),l&&t(_),l&&t(Q),l&&t(u),l&&t(H),l&&t(E),l&&t(V),l&&t(g)}}}function ju(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue;return{c(){b=r("p"),Z=n("TF 2.0 models accepts two formats as inputs:"),A=d(),_=r("ul"),z=r("li"),G=n("having all inputs as keyword arguments (like PyTorch models), or"),le=d(),D=r("li"),de=n("having all inputs as a list, tuple or dict in the first positional arguments."),Q=d(),u=r("p"),K=n("This second option is useful when using "),x=r("code"),X=n("tf.keras.Model.fit"),me=n(` method which currently requires having all the tensors in the first argument of the model call function: `),I=r("code"),ce=n("model(inputs)"),se=n("."),H=d(),E=r("p"),j=n(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),V=d(),g=r("ul"),k=r("li"),ee=n("a single Tensor with "),O=r("code"),ae=n("input_ids"),te=n(" only and nothing else: "),S=r("code"),he=n("model(inputs_ids)"),ne=d(),v=r("li"),fe=n(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=r("code"),oe=n("model([input_ids, attention_mask])"),J=n(" or "),N=r("code"),re=n("model([input_ids, attention_mask, token_type_ids])"),pe=d(),C=r("li"),ie=n(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=r("code"),ue=n('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(l){b=s(l,"P",{});var p=a(b);Z=i(p,"TF 2.0 models accepts two formats as inputs:"),p.forEach(t),A=m(l),_=s(l,"UL",{});var W=a(_);z=s(W,"LI",{});var ke=a(z);G=i(ke,"having all inputs as keyword arguments (like PyTorch models), or"),ke.forEach(t),le=m(W),D=s(W,"LI",{});var Ee=a(D);de=i(Ee,"having all inputs as a list, tuple or dict in the first positional arguments."),Ee.forEach(t),W.forEach(t),Q=m(l),u=s(l,"P",{});var y=a(u);K=i(y,"This second option is useful when using "),x=s(y,"CODE",{});var Te=a(x);X=i(Te,"tf.keras.Model.fit"),Te.forEach(t),me=i(y,` method which currently requires having all the tensors in the first argument of the model call function: `),I=s(y,"CODE",{});var ge=a(I);ce=i(ge,"model(inputs)"),ge.forEach(t),se=i(y,"."),y.forEach(t),H=m(l),E=s(l,"P",{});var ve=a(E);j=i(ve,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ve.forEach(t),V=m(l),g=s(l,"UL",{});var T=a(g);k=s(T,"LI",{});var R=a(k);ee=i(R,"a single Tensor with "),O=s(R,"CODE",{});var Ce=a(O);ae=i(Ce,"input_ids"),Ce.forEach(t),te=i(R," only and nothing else: "),S=s(R,"CODE",{});var be=a(S);he=i(be,"model(inputs_ids)"),be.forEach(t),R.forEach(t),ne=m(T),v=s(T,"LI",{});var U=a(v);fe=i(U,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),B=s(U,"CODE",{});var we=a(B);oe=i(we,"model([input_ids, attention_mask])"),we.forEach(t),J=i(U," or "),N=s(U,"CODE",{});var _e=a(N);re=i(_e,"model([input_ids, attention_mask, token_type_ids])"),_e.forEach(t),U.forEach(t),pe=m(T),C=s(T,"LI",{});var Y=a(C);ie=i(Y,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),w=s(Y,"CODE",{});var ye=a(w);ue=i(ye,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ye.forEach(t),Y.forEach(t),T.forEach(t)},m(l,p){h(l,b,p),e(b,Z),h(l,A,p),h(l,_,p),e(_,z),e(z,G),e(_,le),e(_,D),e(D,de),h(l,Q,p),h(l,u,p),e(u,K),e(u,x),e(x,X),e(u,me),e(u,I),e(I,ce),e(u,se),h(l,H,p),h(l,E,p),e(E,j),h(l,V,p),h(l,g,p),e(g,k),e(k,ee),e(k,O),e(O,ae),e(k,te),e(k,S),e(S,he),e(g,ne),e(g,v),e(v,fe),e(v,B),e(B,oe),e(v,J),e(v,N),e(N,re),e(g,pe),e(g,C),e(C,ie),e(C,w),e(w,ue)},d(l){l&&t(b),l&&t(A),l&&t(_),l&&t(Q),l&&t(u),l&&t(H),l&&t(E),l&&t(V),l&&t(g)}}}function Ju(xe){let b,Z,A,_,z,G,le,D,de,Q,u,K,x,X,me,I,ce,se,H,E,j,V,g,k,ee,O,ae,te,S,he,ne,v,fe,B,oe,J,N,re,pe,C,ie,w,ue,l,p,W,ke,Ee,y,Te,ge,ve,T,R,Ce,be,U,we,_e,Y,ye,Zt,Si,Wr,Bi,Oi,Qa,mt,Ft,Ds,eo,Ni,xs,Ui,Va,$e,to,Hi,Ye,Qi,Kr,Vi,Wi,Gr,Ki,Gi,oo,Xi,ji,Ji,ro,Yi,Xr,Zi,el,tl,ct,ol,Is,rl,sl,Ss,al,nl,il,Ze,so,ll,Bs,dl,ml,ao,jr,cl,Os,hl,fl,Jr,pl,Ns,ul,gl,Pt,no,_l,io,vl,Us,kl,bl,Tl,Mt,lo,wl,Hs,El,Cl,Qs,Wa,ht,qt,Vs,mo,yl,Ws,$l,Ka,Ie,co,Fl,Ve,Pl,Ks,Ml,ql,Yr,Ll,Rl,Zr,Al,zl,ho,Dl,xl,Il,fo,Sl,es,Bl,Ol,Nl,et,po,Ul,Gs,Hl,Ql,uo,ts,Vl,Xs,Wl,Kl,os,Gl,js,Xl,jl,Lt,go,Jl,Js,Yl,Ga,ft,Rt,Ys,_o,Zl,Zs,ed,Xa,Se,vo,td,ea,od,rd,ko,sd,rs,ad,nd,id,bo,ld,To,dd,md,cd,wo,hd,ss,fd,pd,ja,pt,At,ta,Eo,ud,oa,gd,Ja,Be,Co,_d,yo,vd,ra,kd,bd,Td,$o,wd,as,Ed,Cd,yd,Fo,$d,Po,Fd,Pd,Md,Mo,qd,ns,Ld,Rd,Ya,ut,zt,sa,qo,Ad,aa,zd,Za,Oe,Lo,Dd,Ro,xd,na,Id,Sd,Bd,Ao,Od,is,Nd,Ud,Hd,zo,Qd,Do,Vd,Wd,Kd,xo,Gd,ls,Xd,jd,en,gt,Dt,ia,Io,Jd,la,Yd,tn,Ne,So,Zd,da,em,tm,Bo,om,ds,rm,sm,am,Oo,nm,No,im,lm,dm,Uo,mm,ms,cm,hm,on,_t,xt,ma,Ho,fm,ca,pm,rn,Ue,Qo,um,ha,gm,_m,Vo,vm,cs,km,bm,Tm,Wo,wm,Ko,Em,Cm,ym,Go,$m,hs,Fm,Pm,sn,vt,It,fa,Xo,Mm,pa,qm,an,He,jo,Lm,ua,Rm,Am,Jo,zm,fs,Dm,xm,Im,Yo,Sm,Zo,Bm,Om,Nm,er,Um,ps,Hm,Qm,nn,kt,St,ga,tr,Vm,_a,Wm,ln,Qe,or,Km,Bt,Gm,va,Xm,jm,ka,Jm,Ym,rr,Zm,us,ec,tc,oc,sr,rc,ar,sc,ac,nc,nr,ic,gs,lc,dc,dn,bt,Ot,ba,ir,mc,Ta,cc,mn,Me,lr,hc,wa,fc,pc,dr,uc,_s,gc,_c,vc,mr,kc,cr,bc,Tc,wc,Nt,Ec,hr,Cc,vs,yc,$c,cn,Tt,Ut,Ea,fr,Fc,Ca,Pc,hn,qe,pr,Mc,ur,qc,ya,Lc,Rc,Ac,gr,zc,ks,Dc,xc,Ic,_r,Sc,vr,Bc,Oc,Nc,Ht,Uc,kr,Hc,bs,Qc,Vc,fn,wt,Qt,$a,br,Wc,Fa,Kc,pn,Le,Tr,Gc,Pa,Xc,jc,wr,Jc,Ts,Yc,Zc,eh,Er,th,Cr,oh,rh,sh,Vt,ah,yr,nh,ws,ih,lh,un,Et,Wt,Ma,$r,dh,qa,mh,gn,Re,Fr,ch,La,hh,fh,Pr,ph,Es,uh,gh,_h,Mr,vh,qr,kh,bh,Th,Kt,wh,Lr,Eh,Cs,Ch,yh,_n,Ct,Gt,Ra,Rr,$h,Aa,Fh,vn,Ae,Ar,Ph,za,Mh,qh,zr,Lh,ys,Rh,Ah,zh,Dr,Dh,xr,xh,Ih,Sh,Xt,Bh,Ir,Oh,$s,Nh,Uh,kn,yt,jt,Da,Sr,Hh,xa,Qh,bn,ze,Br,Vh,$t,Wh,Ia,Kh,Gh,Sa,Xh,jh,Jh,Or,Yh,Fs,Zh,ef,tf,Nr,of,Ur,rf,sf,af,Jt,nf,Hr,lf,Ps,df,mf,Tn;return G=new Pe({}),X=new Pe({}),R=new Pe({}),Y=new Fe({props:{name:"class transformers.CamembertConfig",anchor:"transformers.CamembertConfig",parameters:[{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/configuration_camembert.py#L35"}}),eo=new Pe({}),to=new Fe({props:{name:"class transformers.CamembertTokenizer",anchor:"transformers.CamembertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"additional_special_tokens",val:" = ['<s>NOTUSED', '</s>NOTUSED']"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert.py#L45",parametersDescription:[{anchor:"transformers.CamembertTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.CamembertTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.CamembertTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.CamembertTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.CamembertTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.CamembertTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.CamembertTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.CamembertTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.CamembertTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.CamembertTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}]}}),so=new Fe({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.CamembertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert.py#L159",parametersDescription:[{anchor:"transformers.CamembertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.CamembertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),no=new Fe({props:{name:"get_special_tokens_mask",anchor:"transformers.CamembertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert.py#L185",parametersDescription:[{anchor:"transformers.CamembertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CamembertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.CamembertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),lo=new Fe({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.CamembertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert.py#L212",parametersDescription:[{anchor:"transformers.CamembertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CamembertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),mo=new Pe({}),co=new Fe({props:{name:"class transformers.CamembertTokenizerFast",anchor:"transformers.CamembertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"additional_special_tokens",val:" = ['<s>NOTUSED', '</s>NOTUSED']"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert_fast.py#L54",parametersDescription:[{anchor:"transformers.CamembertTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.CamembertTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.CamembertTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.CamembertTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.CamembertTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.CamembertTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.CamembertTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.CamembertTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.CamembertTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),po=new Fe({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.CamembertTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert_fast.py#L145",parametersDescription:[{anchor:"transformers.CamembertTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.CamembertTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),go=new Fe({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.CamembertTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/tokenization_camembert_fast.py#L171",parametersDescription:[{anchor:"transformers.CamembertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CamembertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),_o=new Pe({}),vo=new Fe({props:{name:"class transformers.CamembertModel",anchor:"transformers.CamembertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L65",parametersDescription:[{anchor:"transformers.CamembertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Eo=new Pe({}),Co=new Fe({props:{name:"class transformers.CamembertForCausalLM",anchor:"transformers.CamembertForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L154",parametersDescription:[{anchor:"transformers.CamembertForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qo=new Pe({}),Lo=new Fe({props:{name:"class transformers.CamembertForMaskedLM",anchor:"transformers.CamembertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L78",parametersDescription:[{anchor:"transformers.CamembertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Io=new Pe({}),So=new Fe({props:{name:"class transformers.CamembertForSequenceClassification",anchor:"transformers.CamembertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L94",parametersDescription:[{anchor:"transformers.CamembertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ho=new Pe({}),Qo=new Fe({props:{name:"class transformers.CamembertForMultipleChoice",anchor:"transformers.CamembertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L110",parametersDescription:[{anchor:"transformers.CamembertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xo=new Pe({}),jo=new Fe({props:{name:"class transformers.CamembertForTokenClassification",anchor:"transformers.CamembertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L126",parametersDescription:[{anchor:"transformers.CamembertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),tr=new Pe({}),or=new Fe({props:{name:"class transformers.CamembertForQuestionAnswering",anchor:"transformers.CamembertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_camembert.py#L142",parametersDescription:[{anchor:"transformers.CamembertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ir=new Pe({}),lr=new Fe({props:{name:"class transformers.TFCamembertModel",anchor:"transformers.TFCamembertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_tf_camembert.py#L81",parametersDescription:[{anchor:"transformers.TFCamembertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Nt=new zs({props:{$$slots:{default:[Vu]},$$scope:{ctx:xe}}}),fr=new Pe({}),pr=new Fe({props:{name:"class transformers.TFCamembertForMaskedLM",anchor:"transformers.TFCamembertForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_tf_camembert.py#L94",parametersDescription:[{anchor:"transformers.TFCamembertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ht=new zs({props:{$$slots:{default:[Wu]},$$scope:{ctx:xe}}}),br=new Pe({}),Tr=new Fe({props:{name:"class transformers.TFCamembertForSequenceClassification",anchor:"transformers.TFCamembertForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_tf_camembert.py#L110",parametersDescription:[{anchor:"transformers.TFCamembertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vt=new zs({props:{$$slots:{default:[Ku]},$$scope:{ctx:xe}}}),$r=new Pe({}),Fr=new Fe({props:{name:"class transformers.TFCamembertForMultipleChoice",anchor:"transformers.TFCamembertForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_tf_camembert.py#L142",parametersDescription:[{anchor:"transformers.TFCamembertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kt=new zs({props:{$$slots:{default:[Gu]},$$scope:{ctx:xe}}}),Rr=new Pe({}),Ar=new Fe({props:{name:"class transformers.TFCamembertForTokenClassification",anchor:"transformers.TFCamembertForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_tf_camembert.py#L126",parametersDescription:[{anchor:"transformers.TFCamembertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xt=new zs({props:{$$slots:{default:[Xu]},$$scope:{ctx:xe}}}),Sr=new Pe({}),Br=new Fe({props:{name:"class transformers.TFCamembertForQuestionAnswering",anchor:"transformers.TFCamembertForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/camembert/modeling_tf_camembert.py#L158",parametersDescription:[{anchor:"transformers.TFCamembertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/v4.15.0/en/model_doc/camembert#transformers.CamembertConfig">CamembertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jt=new zs({props:{$$slots:{default:[ju]},$$scope:{ctx:xe}}}),{c(){b=r("meta"),Z=d(),A=r("h1"),_=r("a"),z=r("span"),$(G.$$.fragment),le=d(),D=r("span"),de=n("CamemBERT"),Q=d(),u=r("h2"),K=r("a"),x=r("span"),$(X.$$.fragment),me=d(),I=r("span"),ce=n("Overview"),se=d(),H=r("p"),E=n("The CamemBERT model was proposed in "),j=r("a"),V=n("CamemBERT: a Tasty French Language Model"),g=n(` by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Su\xE1rez, Yoann Dupont, Laurent Romary, \xC9ric Villemonte de la Clergerie, Djam\xE9 Seddah, and Beno\xEEt Sagot. It is based on Facebook\u2019s RoBERTa model released in 2019. It is a model trained on 138GB of French text.`),k=d(),ee=r("p"),O=n("The abstract from the paper is the following:"),ae=d(),te=r("p"),S=r("em"),he=n(`Pretrained language models are now ubiquitous in Natural Language Processing. Despite their success, most available models have either been trained on English data or on the concatenation of data in multiple languages. This makes practical use of such models \u2014in all languages except English\u2014 very limited. Aiming to address this issue for French, we release CamemBERT, a French version of the Bi-directional Encoders for Transformers (BERT). We measure the performance of CamemBERT compared to multilingual models in multiple downstream tasks, namely part-of-speech tagging, dependency parsing, named-entity recognition, and natural language inference. CamemBERT improves the state of the art for most of the tasks considered. We release the pretrained model for CamemBERT hoping to foster research and downstream applications for French NLP.`),ne=d(),v=r("p"),fe=n("Tips:"),B=d(),oe=r("ul"),J=r("li"),N=n("This implementation is the same as RoBERTa. Refer to the "),re=r("a"),pe=n("documentation of RoBERTa"),C=n(` for usage examples as well as the information relative to the inputs and outputs.`),ie=d(),w=r("p"),ue=n("This model was contributed by "),l=r("a"),p=n("camembert"),W=n(". The original code can be found "),ke=r("a"),Ee=n("here"),y=n("."),Te=d(),ge=r("h2"),ve=r("a"),T=r("span"),$(R.$$.fragment),Ce=d(),be=r("span"),U=n("CamembertConfig"),we=d(),_e=r("div"),$(Y.$$.fragment),ye=d(),Zt=r("p"),Si=n("This class overrides "),Wr=r("a"),Bi=n("RobertaConfig"),Oi=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Qa=d(),mt=r("h2"),Ft=r("a"),Ds=r("span"),$(eo.$$.fragment),Ni=d(),xs=r("span"),Ui=n("CamembertTokenizer"),Va=d(),$e=r("div"),$(to.$$.fragment),Hi=d(),Ye=r("p"),Qi=n("Adapted from "),Kr=r("a"),Vi=n("RobertaTokenizer"),Wi=n(" and "),Gr=r("a"),Ki=n("XLNetTokenizer"),Gi=n(`. Construct a CamemBERT tokenizer. Based on `),oo=r("a"),Xi=n("SentencePiece"),ji=n("."),Ji=d(),ro=r("p"),Yi=n("This tokenizer inherits from "),Xr=r("a"),Zi=n("PreTrainedTokenizer"),el=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),tl=d(),ct=r("p"),ol=n(`Attributes: sp_model (`),Is=r("code"),rl=n("SentencePieceProcessor"),sl=n(`): The `),Ss=r("em"),al=n("SentencePiece"),nl=n(" processor that is used for every conversion (string, tokens and IDs)."),il=d(),Ze=r("div"),$(so.$$.fragment),ll=d(),Bs=r("p"),dl=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An CamemBERT sequence has the following format:`),ml=d(),ao=r("ul"),jr=r("li"),cl=n("single sequence: "),Os=r("code"),hl=n("<s> X </s>"),fl=d(),Jr=r("li"),pl=n("pair of sequences: "),Ns=r("code"),ul=n("<s> A </s></s> B </s>"),gl=d(),Pt=r("div"),$(no.$$.fragment),_l=d(),io=r("p"),vl=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Us=r("code"),kl=n("prepare_for_model"),bl=n(" method."),Tl=d(),Mt=r("div"),$(lo.$$.fragment),wl=d(),Hs=r("p"),El=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.`),Cl=d(),Qs=r("div"),Wa=d(),ht=r("h2"),qt=r("a"),Vs=r("span"),$(mo.$$.fragment),yl=d(),Ws=r("span"),$l=n("CamembertTokenizerFast"),Ka=d(),Ie=r("div"),$(co.$$.fragment),Fl=d(),Ve=r("p"),Pl=n("Construct a \u201Cfast\u201D CamemBERT tokenizer (backed by HuggingFace\u2019s "),Ks=r("em"),Ml=n("tokenizers"),ql=n(` library). Adapted from `),Yr=r("a"),Ll=n("RobertaTokenizer"),Rl=n(" and "),Zr=r("a"),Al=n("XLNetTokenizer"),zl=n(". Based on "),ho=r("a"),Dl=n("BPE"),xl=n("."),Il=d(),fo=r("p"),Sl=n("This tokenizer inherits from "),es=r("a"),Bl=n("PreTrainedTokenizerFast"),Ol=n(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Nl=d(),et=r("div"),$(po.$$.fragment),Ul=d(),Gs=r("p"),Hl=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An CamemBERT sequence has the following format:`),Ql=d(),uo=r("ul"),ts=r("li"),Vl=n("single sequence: "),Xs=r("code"),Wl=n("<s> X </s>"),Kl=d(),os=r("li"),Gl=n("pair of sequences: "),js=r("code"),Xl=n("<s> A </s></s> B </s>"),jl=d(),Lt=r("div"),$(go.$$.fragment),Jl=d(),Js=r("p"),Yl=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.`),Ga=d(),ft=r("h2"),Rt=r("a"),Ys=r("span"),$(_o.$$.fragment),Zl=d(),Zs=r("span"),ed=n("CamembertModel"),Xa=d(),Se=r("div"),$(vo.$$.fragment),td=d(),ea=r("p"),od=n("The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top."),rd=d(),ko=r("p"),sd=n("This model inherits from "),rs=r("a"),ad=n("PreTrainedModel"),nd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),id=d(),bo=r("p"),ld=n("This model is also a PyTorch "),To=r("a"),dd=n("torch.nn.Module"),md=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cd=d(),wo=r("p"),hd=n("This class overrides "),ss=r("a"),fd=n("RobertaModel"),pd=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),ja=d(),pt=r("h2"),At=r("a"),ta=r("span"),$(Eo.$$.fragment),ud=d(),oa=r("span"),gd=n("CamembertForCausalLM"),Ja=d(),Be=r("div"),$(Co.$$.fragment),_d=d(),yo=r("p"),vd=n("CamemBERT Model with a "),ra=r("code"),kd=n("language modeling"),bd=n(" head on top for CLM fine-tuning."),Td=d(),$o=r("p"),wd=n("This model inherits from "),as=r("a"),Ed=n("PreTrainedModel"),Cd=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yd=d(),Fo=r("p"),$d=n("This model is also a PyTorch "),Po=r("a"),Fd=n("torch.nn.Module"),Pd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Md=d(),Mo=r("p"),qd=n("This class overrides "),ns=r("a"),Ld=n("RobertaForCausalLM"),Rd=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ya=d(),ut=r("h2"),zt=r("a"),sa=r("span"),$(qo.$$.fragment),Ad=d(),aa=r("span"),zd=n("CamembertForMaskedLM"),Za=d(),Oe=r("div"),$(Lo.$$.fragment),Dd=d(),Ro=r("p"),xd=n("CamemBERT Model with a "),na=r("code"),Id=n("language modeling"),Sd=n(" head on top."),Bd=d(),Ao=r("p"),Od=n("This model inherits from "),is=r("a"),Nd=n("PreTrainedModel"),Ud=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hd=d(),zo=r("p"),Qd=n("This model is also a PyTorch "),Do=r("a"),Vd=n("torch.nn.Module"),Wd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kd=d(),xo=r("p"),Gd=n("This class overrides "),ls=r("a"),Xd=n("RobertaForMaskedLM"),jd=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),en=d(),gt=r("h2"),Dt=r("a"),ia=r("span"),$(Io.$$.fragment),Jd=d(),la=r("span"),Yd=n("CamembertForSequenceClassification"),tn=d(),Ne=r("div"),$(So.$$.fragment),Zd=d(),da=r("p"),em=n(`CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),tm=d(),Bo=r("p"),om=n("This model inherits from "),ds=r("a"),rm=n("PreTrainedModel"),sm=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),am=d(),Oo=r("p"),nm=n("This model is also a PyTorch "),No=r("a"),im=n("torch.nn.Module"),lm=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dm=d(),Uo=r("p"),mm=n("This class overrides "),ms=r("a"),cm=n("RobertaForSequenceClassification"),hm=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),on=d(),_t=r("h2"),xt=r("a"),ma=r("span"),$(Ho.$$.fragment),fm=d(),ca=r("span"),pm=n("CamembertForMultipleChoice"),rn=d(),Ue=r("div"),$(Qo.$$.fragment),um=d(),ha=r("p"),gm=n(`CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),_m=d(),Vo=r("p"),vm=n("This model inherits from "),cs=r("a"),km=n("PreTrainedModel"),bm=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tm=d(),Wo=r("p"),wm=n("This model is also a PyTorch "),Ko=r("a"),Em=n("torch.nn.Module"),Cm=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ym=d(),Go=r("p"),$m=n("This class overrides "),hs=r("a"),Fm=n("RobertaForMultipleChoice"),Pm=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),sn=d(),vt=r("h2"),It=r("a"),fa=r("span"),$(Xo.$$.fragment),Mm=d(),pa=r("span"),qm=n("CamembertForTokenClassification"),an=d(),He=r("div"),$(jo.$$.fragment),Lm=d(),ua=r("p"),Rm=n(`CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Am=d(),Jo=r("p"),zm=n("This model inherits from "),fs=r("a"),Dm=n("PreTrainedModel"),xm=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Im=d(),Yo=r("p"),Sm=n("This model is also a PyTorch "),Zo=r("a"),Bm=n("torch.nn.Module"),Om=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nm=d(),er=r("p"),Um=n("This class overrides "),ps=r("a"),Hm=n("RobertaForTokenClassification"),Qm=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),nn=d(),kt=r("h2"),St=r("a"),ga=r("span"),$(tr.$$.fragment),Vm=d(),_a=r("span"),Wm=n("CamembertForQuestionAnswering"),ln=d(),Qe=r("div"),$(or.$$.fragment),Km=d(),Bt=r("p"),Gm=n(`CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),va=r("code"),Xm=n("span start logits"),jm=n(" and "),ka=r("code"),Jm=n("span end logits"),Ym=d(),rr=r("p"),Zm=n("This model inherits from "),us=r("a"),ec=n("PreTrainedModel"),tc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oc=d(),sr=r("p"),rc=n("This model is also a PyTorch "),ar=r("a"),sc=n("torch.nn.Module"),ac=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nc=d(),nr=r("p"),ic=n("This class overrides "),gs=r("a"),lc=n("RobertaForQuestionAnswering"),dc=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),dn=d(),bt=r("h2"),Ot=r("a"),ba=r("span"),$(ir.$$.fragment),mc=d(),Ta=r("span"),cc=n("TFCamembertModel"),mn=d(),Me=r("div"),$(lr.$$.fragment),hc=d(),wa=r("p"),fc=n("The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top."),pc=d(),dr=r("p"),uc=n("This model inherits from "),_s=r("a"),gc=n("TFPreTrainedModel"),_c=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vc=d(),mr=r("p"),kc=n("This model is also a "),cr=r("a"),bc=n("tf.keras.Model"),Tc=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),wc=d(),$(Nt.$$.fragment),Ec=d(),hr=r("p"),Cc=n("This class overrides "),vs=r("a"),yc=n("TFRobertaModel"),$c=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),cn=d(),Tt=r("h2"),Ut=r("a"),Ea=r("span"),$(fr.$$.fragment),Fc=d(),Ca=r("span"),Pc=n("TFCamembertForMaskedLM"),hn=d(),qe=r("div"),$(pr.$$.fragment),Mc=d(),ur=r("p"),qc=n("CamemBERT Model with a "),ya=r("code"),Lc=n("language modeling"),Rc=n(" head on top."),Ac=d(),gr=r("p"),zc=n("This model inherits from "),ks=r("a"),Dc=n("TFPreTrainedModel"),xc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ic=d(),_r=r("p"),Sc=n("This model is also a "),vr=r("a"),Bc=n("tf.keras.Model"),Oc=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Nc=d(),$(Ht.$$.fragment),Uc=d(),kr=r("p"),Hc=n("This class overrides "),bs=r("a"),Qc=n("TFRobertaForMaskedLM"),Vc=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),fn=d(),wt=r("h2"),Qt=r("a"),$a=r("span"),$(br.$$.fragment),Wc=d(),Fa=r("span"),Kc=n("TFCamembertForSequenceClassification"),pn=d(),Le=r("div"),$(Tr.$$.fragment),Gc=d(),Pa=r("p"),Xc=n(`CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),jc=d(),wr=r("p"),Jc=n("This model inherits from "),Ts=r("a"),Yc=n("TFPreTrainedModel"),Zc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),eh=d(),Er=r("p"),th=n("This model is also a "),Cr=r("a"),oh=n("tf.keras.Model"),rh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sh=d(),$(Vt.$$.fragment),ah=d(),yr=r("p"),nh=n("This class overrides "),ws=r("a"),ih=n("TFRobertaForSequenceClassification"),lh=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),un=d(),Et=r("h2"),Wt=r("a"),Ma=r("span"),$($r.$$.fragment),dh=d(),qa=r("span"),mh=n("TFCamembertForMultipleChoice"),gn=d(),Re=r("div"),$(Fr.$$.fragment),ch=d(),La=r("p"),hh=n(`CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),fh=d(),Pr=r("p"),ph=n("This model inherits from "),Es=r("a"),uh=n("TFPreTrainedModel"),gh=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_h=d(),Mr=r("p"),vh=n("This model is also a "),qr=r("a"),kh=n("tf.keras.Model"),bh=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Th=d(),$(Kt.$$.fragment),wh=d(),Lr=r("p"),Eh=n("This class overrides "),Cs=r("a"),Ch=n("TFRobertaForMultipleChoice"),yh=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),_n=d(),Ct=r("h2"),Gt=r("a"),Ra=r("span"),$(Rr.$$.fragment),$h=d(),Aa=r("span"),Fh=n("TFCamembertForTokenClassification"),vn=d(),Ae=r("div"),$(Ar.$$.fragment),Ph=d(),za=r("p"),Mh=n(`CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),qh=d(),zr=r("p"),Lh=n("This model inherits from "),ys=r("a"),Rh=n("TFPreTrainedModel"),Ah=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zh=d(),Dr=r("p"),Dh=n("This model is also a "),xr=r("a"),xh=n("tf.keras.Model"),Ih=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sh=d(),$(Xt.$$.fragment),Bh=d(),Ir=r("p"),Oh=n("This class overrides "),$s=r("a"),Nh=n("TFRobertaForTokenClassification"),Uh=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),kn=d(),yt=r("h2"),jt=r("a"),Da=r("span"),$(Sr.$$.fragment),Hh=d(),xa=r("span"),Qh=n("TFCamembertForQuestionAnswering"),bn=d(),ze=r("div"),$(Br.$$.fragment),Vh=d(),$t=r("p"),Wh=n(`CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ia=r("code"),Kh=n("span start logits"),Gh=n(" and "),Sa=r("code"),Xh=n("span end logits"),jh=n(")."),Jh=d(),Or=r("p"),Yh=n("This model inherits from "),Fs=r("a"),Zh=n("TFPreTrainedModel"),ef=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),tf=d(),Nr=r("p"),of=n("This model is also a "),Ur=r("a"),rf=n("tf.keras.Model"),sf=n(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),af=d(),$(Jt.$$.fragment),nf=d(),Hr=r("p"),lf=n("This class overrides "),Ps=r("a"),df=n("TFRobertaForQuestionAnswering"),mf=n(`. Please check the superclass for the appropriate documentation alongside usage examples.`),this.h()},l(o){const f=Qu('[data-svelte="svelte-1phssyn"]',document.head);b=s(f,"META",{name:!0,content:!0}),f.forEach(t),Z=m(o),A=s(o,"H1",{class:!0});var Qr=a(A);_=s(Qr,"A",{id:!0,class:!0,href:!0});var Ba=a(_);z=s(Ba,"SPAN",{});var Oa=a(z);F(G.$$.fragment,Oa),Oa.forEach(t),Ba.forEach(t),le=m(Qr),D=s(Qr,"SPAN",{});var Na=a(D);de=i(Na,"CamemBERT"),Na.forEach(t),Qr.forEach(t),Q=m(o),u=s(o,"H2",{class:!0});var Vr=a(u);K=s(Vr,"A",{id:!0,class:!0,href:!0});var Ua=a(K);x=s(Ua,"SPAN",{});var uf=a(x);F(X.$$.fragment,uf),uf.forEach(t),Ua.forEach(t),me=m(Vr),I=s(Vr,"SPAN",{});var gf=a(I);ce=i(gf,"Overview"),gf.forEach(t),Vr.forEach(t),se=m(o),H=s(o,"P",{});var wn=a(H);E=i(wn,"The CamemBERT model was proposed in "),j=s(wn,"A",{href:!0,rel:!0});var _f=a(j);V=i(_f,"CamemBERT: a Tasty French Language Model"),_f.forEach(t),g=i(wn,` by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Su\xE1rez, Yoann Dupont, Laurent Romary, \xC9ric Villemonte de la Clergerie, Djam\xE9 Seddah, and Beno\xEEt Sagot. It is based on Facebook\u2019s RoBERTa model released in 2019. It is a model trained on 138GB of French text.`),wn.forEach(t),k=m(o),ee=s(o,"P",{});var vf=a(ee);O=i(vf,"The abstract from the paper is the following:"),vf.forEach(t),ae=m(o),te=s(o,"P",{});var kf=a(te);S=s(kf,"EM",{});var bf=a(S);he=i(bf,`Pretrained language models are now ubiquitous in Natural Language Processing. Despite their success, most available models have either been trained on English data or on the concatenation of data in multiple languages. This makes practical use of such models \u2014in all languages except English\u2014 very limited. Aiming to address this issue for French, we release CamemBERT, a French version of the Bi-directional Encoders for Transformers (BERT). We measure the performance of CamemBERT compared to multilingual models in multiple downstream tasks, namely part-of-speech tagging, dependency parsing, named-entity recognition, and natural language inference. CamemBERT improves the state of the art for most of the tasks considered. We release the pretrained model for CamemBERT hoping to foster research and downstream applications for French NLP.`),bf.forEach(t),kf.forEach(t),ne=m(o),v=s(o,"P",{});var Tf=a(v);fe=i(Tf,"Tips:"),Tf.forEach(t),B=m(o),oe=s(o,"UL",{});var wf=a(oe);J=s(wf,"LI",{});var En=a(J);N=i(En,"This implementation is the same as RoBERTa. Refer to the "),re=s(En,"A",{href:!0});var Ef=a(re);pe=i(Ef,"documentation of RoBERTa"),Ef.forEach(t),C=i(En,` for usage examples as well as the information relative to the inputs and outputs.`),En.forEach(t),wf.forEach(t),ie=m(o),w=s(o,"P",{});var Ms=a(w);ue=i(Ms,"This model was contributed by "),l=s(Ms,"A",{href:!0,rel:!0});var Cf=a(l);p=i(Cf,"camembert"),Cf.forEach(t),W=i(Ms,". The original code can be found "),ke=s(Ms,"A",{href:!0,rel:!0});var yf=a(ke);Ee=i(yf,"here"),yf.forEach(t),y=i(Ms,"."),Ms.forEach(t),Te=m(o),ge=s(o,"H2",{class:!0});var Cn=a(ge);ve=s(Cn,"A",{id:!0,class:!0,href:!0});var $f=a(ve);T=s($f,"SPAN",{});var Ff=a(T);F(R.$$.fragment,Ff),Ff.forEach(t),$f.forEach(t),Ce=m(Cn),be=s(Cn,"SPAN",{});var Pf=a(be);U=i(Pf,"CamembertConfig"),Pf.forEach(t),Cn.forEach(t),we=m(o),_e=s(o,"DIV",{class:!0});var yn=a(_e);F(Y.$$.fragment,yn),ye=m(yn),Zt=s(yn,"P",{});var $n=a(Zt);Si=i($n,"This class overrides "),Wr=s($n,"A",{href:!0});var Mf=a(Wr);Bi=i(Mf,"RobertaConfig"),Mf.forEach(t),Oi=i($n,`. Please check the superclass for the appropriate documentation alongside usage examples.`),$n.forEach(t),yn.forEach(t),Qa=m(o),mt=s(o,"H2",{class:!0});var Fn=a(mt);Ft=s(Fn,"A",{id:!0,class:!0,href:!0});var qf=a(Ft);Ds=s(qf,"SPAN",{});var Lf=a(Ds);F(eo.$$.fragment,Lf),Lf.forEach(t),qf.forEach(t),Ni=m(Fn),xs=s(Fn,"SPAN",{});var Rf=a(xs);Ui=i(Rf,"CamembertTokenizer"),Rf.forEach(t),Fn.forEach(t),Va=m(o),$e=s(o,"DIV",{class:!0});var De=a($e);F(to.$$.fragment,De),Hi=m(De),Ye=s(De,"P",{});var Yt=a(Ye);Qi=i(Yt,"Adapted from "),Kr=s(Yt,"A",{href:!0});var Af=a(Kr);Vi=i(Af,"RobertaTokenizer"),Af.forEach(t),Wi=i(Yt," and "),Gr=s(Yt,"A",{href:!0});var zf=a(Gr);Ki=i(zf,"XLNetTokenizer"),zf.forEach(t),Gi=i(Yt,`. Construct a CamemBERT tokenizer. Based on `),oo=s(Yt,"A",{href:!0,rel:!0});var Df=a(oo);Xi=i(Df,"SentencePiece"),Df.forEach(t),ji=i(Yt,"."),Yt.forEach(t),Ji=m(De),ro=s(De,"P",{});var Pn=a(ro);Yi=i(Pn,"This tokenizer inherits from "),Xr=s(Pn,"A",{href:!0});var xf=a(Xr);Zi=i(xf,"PreTrainedTokenizer"),xf.forEach(t),el=i(Pn,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Pn.forEach(t),tl=m(De),ct=s(De,"P",{});var qs=a(ct);ol=i(qs,`Attributes: sp_model (`),Is=s(qs,"CODE",{});var If=a(Is);rl=i(If,"SentencePieceProcessor"),If.forEach(t),sl=i(qs,`): The `),Ss=s(qs,"EM",{});var Sf=a(Ss);al=i(Sf,"SentencePiece"),Sf.forEach(t),nl=i(qs," processor that is used for every conversion (string, tokens and IDs)."),qs.forEach(t),il=m(De),Ze=s(De,"DIV",{class:!0});var Ls=a(Ze);F(so.$$.fragment,Ls),ll=m(Ls),Bs=s(Ls,"P",{});var Bf=a(Bs);dl=i(Bf,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An CamemBERT sequence has the following format:`),Bf.forEach(t),ml=m(Ls),ao=s(Ls,"UL",{});var Mn=a(ao);jr=s(Mn,"LI",{});var cf=a(jr);cl=i(cf,"single sequence: "),Os=s(cf,"CODE",{});var Of=a(Os);hl=i(Of,"<s> X </s>"),Of.forEach(t),cf.forEach(t),fl=m(Mn),Jr=s(Mn,"LI",{});var hf=a(Jr);pl=i(hf,"pair of sequences: "),Ns=s(hf,"CODE",{});var Nf=a(Ns);ul=i(Nf,"<s> A </s></s> B </s>"),Nf.forEach(t),hf.forEach(t),Mn.forEach(t),Ls.forEach(t),gl=m(De),Pt=s(De,"DIV",{class:!0});var qn=a(Pt);F(no.$$.fragment,qn),_l=m(qn),io=s(qn,"P",{});var Ln=a(io);vl=i(Ln,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Us=s(Ln,"CODE",{});var Uf=a(Us);kl=i(Uf,"prepare_for_model"),Uf.forEach(t),bl=i(Ln," method."),Ln.forEach(t),qn.forEach(t),Tl=m(De),Mt=s(De,"DIV",{class:!0});var Rn=a(Mt);F(lo.$$.fragment,Rn),wl=m(Rn),Hs=s(Rn,"P",{});var Hf=a(Hs);El=i(Hf,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.`),Hf.forEach(t),Rn.forEach(t),Cl=m(De),Qs=s(De,"DIV",{class:!0}),a(Qs).forEach(t),De.forEach(t),Wa=m(o),ht=s(o,"H2",{class:!0});var An=a(ht);qt=s(An,"A",{id:!0,class:!0,href:!0});var Qf=a(qt);Vs=s(Qf,"SPAN",{});var Vf=a(Vs);F(mo.$$.fragment,Vf),Vf.forEach(t),Qf.forEach(t),yl=m(An),Ws=s(An,"SPAN",{});var Wf=a(Ws);$l=i(Wf,"CamembertTokenizerFast"),Wf.forEach(t),An.forEach(t),Ka=m(o),Ie=s(o,"DIV",{class:!0});var tt=a(Ie);F(co.$$.fragment,tt),Fl=m(tt),Ve=s(tt,"P",{});var ot=a(Ve);Pl=i(ot,"Construct a \u201Cfast\u201D CamemBERT tokenizer (backed by HuggingFace\u2019s "),Ks=s(ot,"EM",{});var Kf=a(Ks);Ml=i(Kf,"tokenizers"),Kf.forEach(t),ql=i(ot,` library). Adapted from `),Yr=s(ot,"A",{href:!0});var Gf=a(Yr);Ll=i(Gf,"RobertaTokenizer"),Gf.forEach(t),Rl=i(ot," and "),Zr=s(ot,"A",{href:!0});var Xf=a(Zr);Al=i(Xf,"XLNetTokenizer"),Xf.forEach(t),zl=i(ot,". Based on "),ho=s(ot,"A",{href:!0,rel:!0});var jf=a(ho);Dl=i(jf,"BPE"),jf.forEach(t),xl=i(ot,"."),ot.forEach(t),Il=m(tt),fo=s(tt,"P",{});var zn=a(fo);Sl=i(zn,"This tokenizer inherits from "),es=s(zn,"A",{href:!0});var Jf=a(es);Bl=i(Jf,"PreTrainedTokenizerFast"),Jf.forEach(t),Ol=i(zn,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),zn.forEach(t),Nl=m(tt),et=s(tt,"DIV",{class:!0});var Rs=a(et);F(po.$$.fragment,Rs),Ul=m(Rs),Gs=s(Rs,"P",{});var Yf=a(Gs);Hl=i(Yf,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An CamemBERT sequence has the following format:`),Yf.forEach(t),Ql=m(Rs),uo=s(Rs,"UL",{});var Dn=a(uo);ts=s(Dn,"LI",{});var ff=a(ts);Vl=i(ff,"single sequence: "),Xs=s(ff,"CODE",{});var Zf=a(Xs);Wl=i(Zf,"<s> X </s>"),Zf.forEach(t),ff.forEach(t),Kl=m(Dn),os=s(Dn,"LI",{});var pf=a(os);Gl=i(pf,"pair of sequences: "),js=s(pf,"CODE",{});var ep=a(js);Xl=i(ep,"<s> A </s></s> B </s>"),ep.forEach(t),pf.forEach(t),Dn.forEach(t),Rs.forEach(t),jl=m(tt),Lt=s(tt,"DIV",{class:!0});var xn=a(Lt);F(go.$$.fragment,xn),Jl=m(xn),Js=s(xn,"P",{});var tp=a(Js);Yl=i(tp,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.`),tp.forEach(t),xn.forEach(t),tt.forEach(t),Ga=m(o),ft=s(o,"H2",{class:!0});var In=a(ft);Rt=s(In,"A",{id:!0,class:!0,href:!0});var op=a(Rt);Ys=s(op,"SPAN",{});var rp=a(Ys);F(_o.$$.fragment,rp),rp.forEach(t),op.forEach(t),Zl=m(In),Zs=s(In,"SPAN",{});var sp=a(Zs);ed=i(sp,"CamembertModel"),sp.forEach(t),In.forEach(t),Xa=m(o),Se=s(o,"DIV",{class:!0});var rt=a(Se);F(vo.$$.fragment,rt),td=m(rt),ea=s(rt,"P",{});var ap=a(ea);od=i(ap,"The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top."),ap.forEach(t),rd=m(rt),ko=s(rt,"P",{});var Sn=a(ko);sd=i(Sn,"This model inherits from "),rs=s(Sn,"A",{href:!0});var np=a(rs);ad=i(np,"PreTrainedModel"),np.forEach(t),nd=i(Sn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sn.forEach(t),id=m(rt),bo=s(rt,"P",{});var Bn=a(bo);ld=i(Bn,"This model is also a PyTorch "),To=s(Bn,"A",{href:!0,rel:!0});var ip=a(To);dd=i(ip,"torch.nn.Module"),ip.forEach(t),md=i(Bn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bn.forEach(t),cd=m(rt),wo=s(rt,"P",{});var On=a(wo);hd=i(On,"This class overrides "),ss=s(On,"A",{href:!0});var lp=a(ss);fd=i(lp,"RobertaModel"),lp.forEach(t),pd=i(On,`. Please check the superclass for the appropriate documentation alongside usage examples.`),On.forEach(t),rt.forEach(t),ja=m(o),pt=s(o,"H2",{class:!0});var Nn=a(pt);At=s(Nn,"A",{id:!0,class:!0,href:!0});var dp=a(At);ta=s(dp,"SPAN",{});var mp=a(ta);F(Eo.$$.fragment,mp),mp.forEach(t),dp.forEach(t),ud=m(Nn),oa=s(Nn,"SPAN",{});var cp=a(oa);gd=i(cp,"CamembertForCausalLM"),cp.forEach(t),Nn.forEach(t),Ja=m(o),Be=s(o,"DIV",{class:!0});var st=a(Be);F(Co.$$.fragment,st),_d=m(st),yo=s(st,"P",{});var Un=a(yo);vd=i(Un,"CamemBERT Model with a "),ra=s(Un,"CODE",{});var hp=a(ra);kd=i(hp,"language modeling"),hp.forEach(t),bd=i(Un," head on top for CLM fine-tuning."),Un.forEach(t),Td=m(st),$o=s(st,"P",{});var Hn=a($o);wd=i(Hn,"This model inherits from "),as=s(Hn,"A",{href:!0});var fp=a(as);Ed=i(fp,"PreTrainedModel"),fp.forEach(t),Cd=i(Hn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hn.forEach(t),yd=m(st),Fo=s(st,"P",{});var Qn=a(Fo);$d=i(Qn,"This model is also a PyTorch "),Po=s(Qn,"A",{href:!0,rel:!0});var pp=a(Po);Fd=i(pp,"torch.nn.Module"),pp.forEach(t),Pd=i(Qn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qn.forEach(t),Md=m(st),Mo=s(st,"P",{});var Vn=a(Mo);qd=i(Vn,"This class overrides "),ns=s(Vn,"A",{href:!0});var up=a(ns);Ld=i(up,"RobertaForCausalLM"),up.forEach(t),Rd=i(Vn,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Vn.forEach(t),st.forEach(t),Ya=m(o),ut=s(o,"H2",{class:!0});var Wn=a(ut);zt=s(Wn,"A",{id:!0,class:!0,href:!0});var gp=a(zt);sa=s(gp,"SPAN",{});var _p=a(sa);F(qo.$$.fragment,_p),_p.forEach(t),gp.forEach(t),Ad=m(Wn),aa=s(Wn,"SPAN",{});var vp=a(aa);zd=i(vp,"CamembertForMaskedLM"),vp.forEach(t),Wn.forEach(t),Za=m(o),Oe=s(o,"DIV",{class:!0});var at=a(Oe);F(Lo.$$.fragment,at),Dd=m(at),Ro=s(at,"P",{});var Kn=a(Ro);xd=i(Kn,"CamemBERT Model with a "),na=s(Kn,"CODE",{});var kp=a(na);Id=i(kp,"language modeling"),kp.forEach(t),Sd=i(Kn," head on top."),Kn.forEach(t),Bd=m(at),Ao=s(at,"P",{});var Gn=a(Ao);Od=i(Gn,"This model inherits from "),is=s(Gn,"A",{href:!0});var bp=a(is);Nd=i(bp,"PreTrainedModel"),bp.forEach(t),Ud=i(Gn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gn.forEach(t),Hd=m(at),zo=s(at,"P",{});var Xn=a(zo);Qd=i(Xn,"This model is also a PyTorch "),Do=s(Xn,"A",{href:!0,rel:!0});var Tp=a(Do);Vd=i(Tp,"torch.nn.Module"),Tp.forEach(t),Wd=i(Xn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xn.forEach(t),Kd=m(at),xo=s(at,"P",{});var jn=a(xo);Gd=i(jn,"This class overrides "),ls=s(jn,"A",{href:!0});var wp=a(ls);Xd=i(wp,"RobertaForMaskedLM"),wp.forEach(t),jd=i(jn,`. Please check the superclass for the appropriate documentation alongside usage examples.`),jn.forEach(t),at.forEach(t),en=m(o),gt=s(o,"H2",{class:!0});var Jn=a(gt);Dt=s(Jn,"A",{id:!0,class:!0,href:!0});var Ep=a(Dt);ia=s(Ep,"SPAN",{});var Cp=a(ia);F(Io.$$.fragment,Cp),Cp.forEach(t),Ep.forEach(t),Jd=m(Jn),la=s(Jn,"SPAN",{});var yp=a(la);Yd=i(yp,"CamembertForSequenceClassification"),yp.forEach(t),Jn.forEach(t),tn=m(o),Ne=s(o,"DIV",{class:!0});var nt=a(Ne);F(So.$$.fragment,nt),Zd=m(nt),da=s(nt,"P",{});var $p=a(da);em=i($p,`CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),$p.forEach(t),tm=m(nt),Bo=s(nt,"P",{});var Yn=a(Bo);om=i(Yn,"This model inherits from "),ds=s(Yn,"A",{href:!0});var Fp=a(ds);rm=i(Fp,"PreTrainedModel"),Fp.forEach(t),sm=i(Yn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yn.forEach(t),am=m(nt),Oo=s(nt,"P",{});var Zn=a(Oo);nm=i(Zn,"This model is also a PyTorch "),No=s(Zn,"A",{href:!0,rel:!0});var Pp=a(No);im=i(Pp,"torch.nn.Module"),Pp.forEach(t),lm=i(Zn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zn.forEach(t),dm=m(nt),Uo=s(nt,"P",{});var ei=a(Uo);mm=i(ei,"This class overrides "),ms=s(ei,"A",{href:!0});var Mp=a(ms);cm=i(Mp,"RobertaForSequenceClassification"),Mp.forEach(t),hm=i(ei,`. Please check the superclass for the appropriate documentation alongside usage examples.`),ei.forEach(t),nt.forEach(t),on=m(o),_t=s(o,"H2",{class:!0});var ti=a(_t);xt=s(ti,"A",{id:!0,class:!0,href:!0});var qp=a(xt);ma=s(qp,"SPAN",{});var Lp=a(ma);F(Ho.$$.fragment,Lp),Lp.forEach(t),qp.forEach(t),fm=m(ti),ca=s(ti,"SPAN",{});var Rp=a(ca);pm=i(Rp,"CamembertForMultipleChoice"),Rp.forEach(t),ti.forEach(t),rn=m(o),Ue=s(o,"DIV",{class:!0});var it=a(Ue);F(Qo.$$.fragment,it),um=m(it),ha=s(it,"P",{});var Ap=a(ha);gm=i(Ap,`CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Ap.forEach(t),_m=m(it),Vo=s(it,"P",{});var oi=a(Vo);vm=i(oi,"This model inherits from "),cs=s(oi,"A",{href:!0});var zp=a(cs);km=i(zp,"PreTrainedModel"),zp.forEach(t),bm=i(oi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oi.forEach(t),Tm=m(it),Wo=s(it,"P",{});var ri=a(Wo);wm=i(ri,"This model is also a PyTorch "),Ko=s(ri,"A",{href:!0,rel:!0});var Dp=a(Ko);Em=i(Dp,"torch.nn.Module"),Dp.forEach(t),Cm=i(ri,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ri.forEach(t),ym=m(it),Go=s(it,"P",{});var si=a(Go);$m=i(si,"This class overrides "),hs=s(si,"A",{href:!0});var xp=a(hs);Fm=i(xp,"RobertaForMultipleChoice"),xp.forEach(t),Pm=i(si,`. Please check the superclass for the appropriate documentation alongside usage examples.`),si.forEach(t),it.forEach(t),sn=m(o),vt=s(o,"H2",{class:!0});var ai=a(vt);It=s(ai,"A",{id:!0,class:!0,href:!0});var Ip=a(It);fa=s(Ip,"SPAN",{});var Sp=a(fa);F(Xo.$$.fragment,Sp),Sp.forEach(t),Ip.forEach(t),Mm=m(ai),pa=s(ai,"SPAN",{});var Bp=a(pa);qm=i(Bp,"CamembertForTokenClassification"),Bp.forEach(t),ai.forEach(t),an=m(o),He=s(o,"DIV",{class:!0});var lt=a(He);F(jo.$$.fragment,lt),Lm=m(lt),ua=s(lt,"P",{});var Op=a(ua);Rm=i(Op,`CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Op.forEach(t),Am=m(lt),Jo=s(lt,"P",{});var ni=a(Jo);zm=i(ni,"This model inherits from "),fs=s(ni,"A",{href:!0});var Np=a(fs);Dm=i(Np,"PreTrainedModel"),Np.forEach(t),xm=i(ni,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ni.forEach(t),Im=m(lt),Yo=s(lt,"P",{});var ii=a(Yo);Sm=i(ii,"This model is also a PyTorch "),Zo=s(ii,"A",{href:!0,rel:!0});var Up=a(Zo);Bm=i(Up,"torch.nn.Module"),Up.forEach(t),Om=i(ii,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ii.forEach(t),Nm=m(lt),er=s(lt,"P",{});var li=a(er);Um=i(li,"This class overrides "),ps=s(li,"A",{href:!0});var Hp=a(ps);Hm=i(Hp,"RobertaForTokenClassification"),Hp.forEach(t),Qm=i(li,`. Please check the superclass for the appropriate documentation alongside usage examples.`),li.forEach(t),lt.forEach(t),nn=m(o),kt=s(o,"H2",{class:!0});var di=a(kt);St=s(di,"A",{id:!0,class:!0,href:!0});var Qp=a(St);ga=s(Qp,"SPAN",{});var Vp=a(ga);F(tr.$$.fragment,Vp),Vp.forEach(t),Qp.forEach(t),Vm=m(di),_a=s(di,"SPAN",{});var Wp=a(_a);Wm=i(Wp,"CamembertForQuestionAnswering"),Wp.forEach(t),di.forEach(t),ln=m(o),Qe=s(o,"DIV",{class:!0});var dt=a(Qe);F(or.$$.fragment,dt),Km=m(dt),Bt=s(dt,"P",{});var Ha=a(Bt);Gm=i(Ha,`CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),va=s(Ha,"CODE",{});var Kp=a(va);Xm=i(Kp,"span start logits"),Kp.forEach(t),jm=i(Ha," and "),ka=s(Ha,"CODE",{});var Gp=a(ka);Jm=i(Gp,"span end logits"),Gp.forEach(t),Ha.forEach(t),Ym=m(dt),rr=s(dt,"P",{});var mi=a(rr);Zm=i(mi,"This model inherits from "),us=s(mi,"A",{href:!0});var Xp=a(us);ec=i(Xp,"PreTrainedModel"),Xp.forEach(t),tc=i(mi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mi.forEach(t),oc=m(dt),sr=s(dt,"P",{});var ci=a(sr);rc=i(ci,"This model is also a PyTorch "),ar=s(ci,"A",{href:!0,rel:!0});var jp=a(ar);sc=i(jp,"torch.nn.Module"),jp.forEach(t),ac=i(ci,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ci.forEach(t),nc=m(dt),nr=s(dt,"P",{});var hi=a(nr);ic=i(hi,"This class overrides "),gs=s(hi,"A",{href:!0});var Jp=a(gs);lc=i(Jp,"RobertaForQuestionAnswering"),Jp.forEach(t),dc=i(hi,`. Please check the superclass for the appropriate documentation alongside usage examples.`),hi.forEach(t),dt.forEach(t),dn=m(o),bt=s(o,"H2",{class:!0});var fi=a(bt);Ot=s(fi,"A",{id:!0,class:!0,href:!0});var Yp=a(Ot);ba=s(Yp,"SPAN",{});var Zp=a(ba);F(ir.$$.fragment,Zp),Zp.forEach(t),Yp.forEach(t),mc=m(fi),Ta=s(fi,"SPAN",{});var eu=a(Ta);cc=i(eu,"TFCamembertModel"),eu.forEach(t),fi.forEach(t),mn=m(o),Me=s(o,"DIV",{class:!0});var We=a(Me);F(lr.$$.fragment,We),hc=m(We),wa=s(We,"P",{});var tu=a(wa);fc=i(tu,"The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top."),tu.forEach(t),pc=m(We),dr=s(We,"P",{});var pi=a(dr);uc=i(pi,"This model inherits from "),_s=s(pi,"A",{href:!0});var ou=a(_s);gc=i(ou,"TFPreTrainedModel"),ou.forEach(t),_c=i(pi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pi.forEach(t),vc=m(We),mr=s(We,"P",{});var ui=a(mr);kc=i(ui,"This model is also a "),cr=s(ui,"A",{href:!0,rel:!0});var ru=a(cr);bc=i(ru,"tf.keras.Model"),ru.forEach(t),Tc=i(ui,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ui.forEach(t),wc=m(We),F(Nt.$$.fragment,We),Ec=m(We),hr=s(We,"P",{});var gi=a(hr);Cc=i(gi,"This class overrides "),vs=s(gi,"A",{href:!0});var su=a(vs);yc=i(su,"TFRobertaModel"),su.forEach(t),$c=i(gi,`. Please check the superclass for the appropriate documentation alongside usage examples.`),gi.forEach(t),We.forEach(t),cn=m(o),Tt=s(o,"H2",{class:!0});var _i=a(Tt);Ut=s(_i,"A",{id:!0,class:!0,href:!0});var au=a(Ut);Ea=s(au,"SPAN",{});var nu=a(Ea);F(fr.$$.fragment,nu),nu.forEach(t),au.forEach(t),Fc=m(_i),Ca=s(_i,"SPAN",{});var iu=a(Ca);Pc=i(iu,"TFCamembertForMaskedLM"),iu.forEach(t),_i.forEach(t),hn=m(o),qe=s(o,"DIV",{class:!0});var Ke=a(qe);F(pr.$$.fragment,Ke),Mc=m(Ke),ur=s(Ke,"P",{});var vi=a(ur);qc=i(vi,"CamemBERT Model with a "),ya=s(vi,"CODE",{});var lu=a(ya);Lc=i(lu,"language modeling"),lu.forEach(t),Rc=i(vi," head on top."),vi.forEach(t),Ac=m(Ke),gr=s(Ke,"P",{});var ki=a(gr);zc=i(ki,"This model inherits from "),ks=s(ki,"A",{href:!0});var du=a(ks);Dc=i(du,"TFPreTrainedModel"),du.forEach(t),xc=i(ki,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ki.forEach(t),Ic=m(Ke),_r=s(Ke,"P",{});var bi=a(_r);Sc=i(bi,"This model is also a "),vr=s(bi,"A",{href:!0,rel:!0});var mu=a(vr);Bc=i(mu,"tf.keras.Model"),mu.forEach(t),Oc=i(bi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bi.forEach(t),Nc=m(Ke),F(Ht.$$.fragment,Ke),Uc=m(Ke),kr=s(Ke,"P",{});var Ti=a(kr);Hc=i(Ti,"This class overrides "),bs=s(Ti,"A",{href:!0});var cu=a(bs);Qc=i(cu,"TFRobertaForMaskedLM"),cu.forEach(t),Vc=i(Ti,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ti.forEach(t),Ke.forEach(t),fn=m(o),wt=s(o,"H2",{class:!0});var wi=a(wt);Qt=s(wi,"A",{id:!0,class:!0,href:!0});var hu=a(Qt);$a=s(hu,"SPAN",{});var fu=a($a);F(br.$$.fragment,fu),fu.forEach(t),hu.forEach(t),Wc=m(wi),Fa=s(wi,"SPAN",{});var pu=a(Fa);Kc=i(pu,"TFCamembertForSequenceClassification"),pu.forEach(t),wi.forEach(t),pn=m(o),Le=s(o,"DIV",{class:!0});var Ge=a(Le);F(Tr.$$.fragment,Ge),Gc=m(Ge),Pa=s(Ge,"P",{});var uu=a(Pa);Xc=i(uu,`CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),uu.forEach(t),jc=m(Ge),wr=s(Ge,"P",{});var Ei=a(wr);Jc=i(Ei,"This model inherits from "),Ts=s(Ei,"A",{href:!0});var gu=a(Ts);Yc=i(gu,"TFPreTrainedModel"),gu.forEach(t),Zc=i(Ei,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ei.forEach(t),eh=m(Ge),Er=s(Ge,"P",{});var Ci=a(Er);th=i(Ci,"This model is also a "),Cr=s(Ci,"A",{href:!0,rel:!0});var _u=a(Cr);oh=i(_u,"tf.keras.Model"),_u.forEach(t),rh=i(Ci,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ci.forEach(t),sh=m(Ge),F(Vt.$$.fragment,Ge),ah=m(Ge),yr=s(Ge,"P",{});var yi=a(yr);nh=i(yi,"This class overrides "),ws=s(yi,"A",{href:!0});var vu=a(ws);ih=i(vu,"TFRobertaForSequenceClassification"),vu.forEach(t),lh=i(yi,`. Please check the superclass for the appropriate documentation alongside usage examples.`),yi.forEach(t),Ge.forEach(t),un=m(o),Et=s(o,"H2",{class:!0});var $i=a(Et);Wt=s($i,"A",{id:!0,class:!0,href:!0});var ku=a(Wt);Ma=s(ku,"SPAN",{});var bu=a(Ma);F($r.$$.fragment,bu),bu.forEach(t),ku.forEach(t),dh=m($i),qa=s($i,"SPAN",{});var Tu=a(qa);mh=i(Tu,"TFCamembertForMultipleChoice"),Tu.forEach(t),$i.forEach(t),gn=m(o),Re=s(o,"DIV",{class:!0});var Xe=a(Re);F(Fr.$$.fragment,Xe),ch=m(Xe),La=s(Xe,"P",{});var wu=a(La);hh=i(wu,`CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),wu.forEach(t),fh=m(Xe),Pr=s(Xe,"P",{});var Fi=a(Pr);ph=i(Fi,"This model inherits from "),Es=s(Fi,"A",{href:!0});var Eu=a(Es);uh=i(Eu,"TFPreTrainedModel"),Eu.forEach(t),gh=i(Fi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fi.forEach(t),_h=m(Xe),Mr=s(Xe,"P",{});var Pi=a(Mr);vh=i(Pi,"This model is also a "),qr=s(Pi,"A",{href:!0,rel:!0});var Cu=a(qr);kh=i(Cu,"tf.keras.Model"),Cu.forEach(t),bh=i(Pi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Pi.forEach(t),Th=m(Xe),F(Kt.$$.fragment,Xe),wh=m(Xe),Lr=s(Xe,"P",{});var Mi=a(Lr);Eh=i(Mi,"This class overrides "),Cs=s(Mi,"A",{href:!0});var yu=a(Cs);Ch=i(yu,"TFRobertaForMultipleChoice"),yu.forEach(t),yh=i(Mi,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Mi.forEach(t),Xe.forEach(t),_n=m(o),Ct=s(o,"H2",{class:!0});var qi=a(Ct);Gt=s(qi,"A",{id:!0,class:!0,href:!0});var $u=a(Gt);Ra=s($u,"SPAN",{});var Fu=a(Ra);F(Rr.$$.fragment,Fu),Fu.forEach(t),$u.forEach(t),$h=m(qi),Aa=s(qi,"SPAN",{});var Pu=a(Aa);Fh=i(Pu,"TFCamembertForTokenClassification"),Pu.forEach(t),qi.forEach(t),vn=m(o),Ae=s(o,"DIV",{class:!0});var je=a(Ae);F(Ar.$$.fragment,je),Ph=m(je),za=s(je,"P",{});var Mu=a(za);Mh=i(Mu,`CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Mu.forEach(t),qh=m(je),zr=s(je,"P",{});var Li=a(zr);Lh=i(Li,"This model inherits from "),ys=s(Li,"A",{href:!0});var qu=a(ys);Rh=i(qu,"TFPreTrainedModel"),qu.forEach(t),Ah=i(Li,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Li.forEach(t),zh=m(je),Dr=s(je,"P",{});var Ri=a(Dr);Dh=i(Ri,"This model is also a "),xr=s(Ri,"A",{href:!0,rel:!0});var Lu=a(xr);xh=i(Lu,"tf.keras.Model"),Lu.forEach(t),Ih=i(Ri,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ri.forEach(t),Sh=m(je),F(Xt.$$.fragment,je),Bh=m(je),Ir=s(je,"P",{});var Ai=a(Ir);Oh=i(Ai,"This class overrides "),$s=s(Ai,"A",{href:!0});var Ru=a($s);Nh=i(Ru,"TFRobertaForTokenClassification"),Ru.forEach(t),Uh=i(Ai,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ai.forEach(t),je.forEach(t),kn=m(o),yt=s(o,"H2",{class:!0});var zi=a(yt);jt=s(zi,"A",{id:!0,class:!0,href:!0});var Au=a(jt);Da=s(Au,"SPAN",{});var zu=a(Da);F(Sr.$$.fragment,zu),zu.forEach(t),Au.forEach(t),Hh=m(zi),xa=s(zi,"SPAN",{});var Du=a(xa);Qh=i(Du,"TFCamembertForQuestionAnswering"),Du.forEach(t),zi.forEach(t),bn=m(o),ze=s(o,"DIV",{class:!0});var Je=a(ze);F(Br.$$.fragment,Je),Vh=m(Je),$t=s(Je,"P",{});var As=a($t);Wh=i(As,`CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ia=s(As,"CODE",{});var xu=a(Ia);Kh=i(xu,"span start logits"),xu.forEach(t),Gh=i(As," and "),Sa=s(As,"CODE",{});var Iu=a(Sa);Xh=i(Iu,"span end logits"),Iu.forEach(t),jh=i(As,")."),As.forEach(t),Jh=m(Je),Or=s(Je,"P",{});var Di=a(Or);Yh=i(Di,"This model inherits from "),Fs=s(Di,"A",{href:!0});var Su=a(Fs);Zh=i(Su,"TFPreTrainedModel"),Su.forEach(t),ef=i(Di,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Di.forEach(t),tf=m(Je),Nr=s(Je,"P",{});var xi=a(Nr);of=i(xi,"This model is also a "),Ur=s(xi,"A",{href:!0,rel:!0});var Bu=a(Ur);rf=i(Bu,"tf.keras.Model"),Bu.forEach(t),sf=i(xi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),xi.forEach(t),af=m(Je),F(Jt.$$.fragment,Je),nf=m(Je),Hr=s(Je,"P",{});var Ii=a(Hr);lf=i(Ii,"This class overrides "),Ps=s(Ii,"A",{href:!0});var Ou=a(Ps);df=i(Ou,"TFRobertaForQuestionAnswering"),Ou.forEach(t),mf=i(Ii,`. Please check the superclass for the appropriate documentation alongside usage examples.`),Ii.forEach(t),Je.forEach(t),this.h()},h(){c(b,"name","hf:doc:metadata"),c(b,"content",JSON.stringify(Yu)),c(_,"id","camembert"),c(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_,"href","#camembert"),c(A,"class","relative group"),c(K,"id","overview"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#overview"),c(u,"class","relative group"),c(j,"href","https://arxiv.org/abs/1911.03894"),c(j,"rel","nofollow"),c(re,"href","roberta"),c(l,"href","https://huggingface.co/camembert"),c(l,"rel","nofollow"),c(ke,"href","https://camembert-model.fr/"),c(ke,"rel","nofollow"),c(ve,"id","transformers.CamembertConfig"),c(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ve,"href","#transformers.CamembertConfig"),c(ge,"class","relative group"),c(Wr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaConfig"),c(_e,"class","docstring"),c(Ft,"id","transformers.CamembertTokenizer"),c(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ft,"href","#transformers.CamembertTokenizer"),c(mt,"class","relative group"),c(Kr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Gr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),c(oo,"href","https://github.com/google/sentencepiece"),c(oo,"rel","nofollow"),c(Xr,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Ze,"class","docstring"),c(Pt,"class","docstring"),c(Mt,"class","docstring"),c(Qs,"class","docstring"),c($e,"class","docstring"),c(qt,"id","transformers.CamembertTokenizerFast"),c(qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qt,"href","#transformers.CamembertTokenizerFast"),c(ht,"class","relative group"),c(Yr,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Zr,"href","/docs/transformers/v4.15.0/en/model_doc/xlnet#transformers.XLNetTokenizer"),c(ho,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models"),c(ho,"rel","nofollow"),c(es,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(et,"class","docstring"),c(Lt,"class","docstring"),c(Ie,"class","docstring"),c(Rt,"id","transformers.CamembertModel"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.CamembertModel"),c(ft,"class","relative group"),c(rs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(To,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(To,"rel","nofollow"),c(ss,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaModel"),c(Se,"class","docstring"),c(At,"id","transformers.CamembertForCausalLM"),c(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(At,"href","#transformers.CamembertForCausalLM"),c(pt,"class","relative group"),c(as,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Po,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Po,"rel","nofollow"),c(ns,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForCausalLM"),c(Be,"class","docstring"),c(zt,"id","transformers.CamembertForMaskedLM"),c(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zt,"href","#transformers.CamembertForMaskedLM"),c(ut,"class","relative group"),c(is,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Do,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Do,"rel","nofollow"),c(ls,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMaskedLM"),c(Oe,"class","docstring"),c(Dt,"id","transformers.CamembertForSequenceClassification"),c(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dt,"href","#transformers.CamembertForSequenceClassification"),c(gt,"class","relative group"),c(ds,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(No,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(No,"rel","nofollow"),c(ms,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForSequenceClassification"),c(Ne,"class","docstring"),c(xt,"id","transformers.CamembertForMultipleChoice"),c(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xt,"href","#transformers.CamembertForMultipleChoice"),c(_t,"class","relative group"),c(cs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Ko,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ko,"rel","nofollow"),c(hs,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForMultipleChoice"),c(Ue,"class","docstring"),c(It,"id","transformers.CamembertForTokenClassification"),c(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(It,"href","#transformers.CamembertForTokenClassification"),c(vt,"class","relative group"),c(fs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(Zo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Zo,"rel","nofollow"),c(ps,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForTokenClassification"),c(He,"class","docstring"),c(St,"id","transformers.CamembertForQuestionAnswering"),c(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(St,"href","#transformers.CamembertForQuestionAnswering"),c(kt,"class","relative group"),c(us,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel"),c(ar,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ar,"rel","nofollow"),c(gs,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.RobertaForQuestionAnswering"),c(Qe,"class","docstring"),c(Ot,"id","transformers.TFCamembertModel"),c(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ot,"href","#transformers.TFCamembertModel"),c(bt,"class","relative group"),c(_s,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(cr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(cr,"rel","nofollow"),c(vs,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaModel"),c(Me,"class","docstring"),c(Ut,"id","transformers.TFCamembertForMaskedLM"),c(Ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ut,"href","#transformers.TFCamembertForMaskedLM"),c(Tt,"class","relative group"),c(ks,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(vr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(vr,"rel","nofollow"),c(bs,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMaskedLM"),c(qe,"class","docstring"),c(Qt,"id","transformers.TFCamembertForSequenceClassification"),c(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qt,"href","#transformers.TFCamembertForSequenceClassification"),c(wt,"class","relative group"),c(Ts,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Cr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Cr,"rel","nofollow"),c(ws,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification"),c(Le,"class","docstring"),c(Wt,"id","transformers.TFCamembertForMultipleChoice"),c(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wt,"href","#transformers.TFCamembertForMultipleChoice"),c(Et,"class","relative group"),c(Es,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(qr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(qr,"rel","nofollow"),c(Cs,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice"),c(Re,"class","docstring"),c(Gt,"id","transformers.TFCamembertForTokenClassification"),c(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gt,"href","#transformers.TFCamembertForTokenClassification"),c(Ct,"class","relative group"),c(ys,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(xr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(xr,"rel","nofollow"),c($s,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForTokenClassification"),c(Ae,"class","docstring"),c(jt,"id","transformers.TFCamembertForQuestionAnswering"),c(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jt,"href","#transformers.TFCamembertForQuestionAnswering"),c(yt,"class","relative group"),c(Fs,"href","/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel"),c(Ur,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ur,"rel","nofollow"),c(Ps,"href","/docs/transformers/v4.15.0/en/model_doc/roberta#transformers.TFRobertaForQuestionAnswering"),c(ze,"class","docstring")},m(o,f){e(document.head,b),h(o,Z,f),h(o,A,f),e(A,_),e(_,z),P(G,z,null),e(A,le),e(A,D),e(D,de),h(o,Q,f),h(o,u,f),e(u,K),e(K,x),P(X,x,null),e(u,me),e(u,I),e(I,ce),h(o,se,f),h(o,H,f),e(H,E),e(H,j),e(j,V),e(H,g),h(o,k,f),h(o,ee,f),e(ee,O),h(o,ae,f),h(o,te,f),e(te,S),e(S,he),h(o,ne,f),h(o,v,f),e(v,fe),h(o,B,f),h(o,oe,f),e(oe,J),e(J,N),e(J,re),e(re,pe),e(J,C),h(o,ie,f),h(o,w,f),e(w,ue),e(w,l),e(l,p),e(w,W),e(w,ke),e(ke,Ee),e(w,y),h(o,Te,f),h(o,ge,f),e(ge,ve),e(ve,T),P(R,T,null),e(ge,Ce),e(ge,be),e(be,U),h(o,we,f),h(o,_e,f),P(Y,_e,null),e(_e,ye),e(_e,Zt),e(Zt,Si),e(Zt,Wr),e(Wr,Bi),e(Zt,Oi),h(o,Qa,f),h(o,mt,f),e(mt,Ft),e(Ft,Ds),P(eo,Ds,null),e(mt,Ni),e(mt,xs),e(xs,Ui),h(o,Va,f),h(o,$e,f),P(to,$e,null),e($e,Hi),e($e,Ye),e(Ye,Qi),e(Ye,Kr),e(Kr,Vi),e(Ye,Wi),e(Ye,Gr),e(Gr,Ki),e(Ye,Gi),e(Ye,oo),e(oo,Xi),e(Ye,ji),e($e,Ji),e($e,ro),e(ro,Yi),e(ro,Xr),e(Xr,Zi),e(ro,el),e($e,tl),e($e,ct),e(ct,ol),e(ct,Is),e(Is,rl),e(ct,sl),e(ct,Ss),e(Ss,al),e(ct,nl),e($e,il),e($e,Ze),P(so,Ze,null),e(Ze,ll),e(Ze,Bs),e(Bs,dl),e(Ze,ml),e(Ze,ao),e(ao,jr),e(jr,cl),e(jr,Os),e(Os,hl),e(ao,fl),e(ao,Jr),e(Jr,pl),e(Jr,Ns),e(Ns,ul),e($e,gl),e($e,Pt),P(no,Pt,null),e(Pt,_l),e(Pt,io),e(io,vl),e(io,Us),e(Us,kl),e(io,bl),e($e,Tl),e($e,Mt),P(lo,Mt,null),e(Mt,wl),e(Mt,Hs),e(Hs,El),e($e,Cl),e($e,Qs),h(o,Wa,f),h(o,ht,f),e(ht,qt),e(qt,Vs),P(mo,Vs,null),e(ht,yl),e(ht,Ws),e(Ws,$l),h(o,Ka,f),h(o,Ie,f),P(co,Ie,null),e(Ie,Fl),e(Ie,Ve),e(Ve,Pl),e(Ve,Ks),e(Ks,Ml),e(Ve,ql),e(Ve,Yr),e(Yr,Ll),e(Ve,Rl),e(Ve,Zr),e(Zr,Al),e(Ve,zl),e(Ve,ho),e(ho,Dl),e(Ve,xl),e(Ie,Il),e(Ie,fo),e(fo,Sl),e(fo,es),e(es,Bl),e(fo,Ol),e(Ie,Nl),e(Ie,et),P(po,et,null),e(et,Ul),e(et,Gs),e(Gs,Hl),e(et,Ql),e(et,uo),e(uo,ts),e(ts,Vl),e(ts,Xs),e(Xs,Wl),e(uo,Kl),e(uo,os),e(os,Gl),e(os,js),e(js,Xl),e(Ie,jl),e(Ie,Lt),P(go,Lt,null),e(Lt,Jl),e(Lt,Js),e(Js,Yl),h(o,Ga,f),h(o,ft,f),e(ft,Rt),e(Rt,Ys),P(_o,Ys,null),e(ft,Zl),e(ft,Zs),e(Zs,ed),h(o,Xa,f),h(o,Se,f),P(vo,Se,null),e(Se,td),e(Se,ea),e(ea,od),e(Se,rd),e(Se,ko),e(ko,sd),e(ko,rs),e(rs,ad),e(ko,nd),e(Se,id),e(Se,bo),e(bo,ld),e(bo,To),e(To,dd),e(bo,md),e(Se,cd),e(Se,wo),e(wo,hd),e(wo,ss),e(ss,fd),e(wo,pd),h(o,ja,f),h(o,pt,f),e(pt,At),e(At,ta),P(Eo,ta,null),e(pt,ud),e(pt,oa),e(oa,gd),h(o,Ja,f),h(o,Be,f),P(Co,Be,null),e(Be,_d),e(Be,yo),e(yo,vd),e(yo,ra),e(ra,kd),e(yo,bd),e(Be,Td),e(Be,$o),e($o,wd),e($o,as),e(as,Ed),e($o,Cd),e(Be,yd),e(Be,Fo),e(Fo,$d),e(Fo,Po),e(Po,Fd),e(Fo,Pd),e(Be,Md),e(Be,Mo),e(Mo,qd),e(Mo,ns),e(ns,Ld),e(Mo,Rd),h(o,Ya,f),h(o,ut,f),e(ut,zt),e(zt,sa),P(qo,sa,null),e(ut,Ad),e(ut,aa),e(aa,zd),h(o,Za,f),h(o,Oe,f),P(Lo,Oe,null),e(Oe,Dd),e(Oe,Ro),e(Ro,xd),e(Ro,na),e(na,Id),e(Ro,Sd),e(Oe,Bd),e(Oe,Ao),e(Ao,Od),e(Ao,is),e(is,Nd),e(Ao,Ud),e(Oe,Hd),e(Oe,zo),e(zo,Qd),e(zo,Do),e(Do,Vd),e(zo,Wd),e(Oe,Kd),e(Oe,xo),e(xo,Gd),e(xo,ls),e(ls,Xd),e(xo,jd),h(o,en,f),h(o,gt,f),e(gt,Dt),e(Dt,ia),P(Io,ia,null),e(gt,Jd),e(gt,la),e(la,Yd),h(o,tn,f),h(o,Ne,f),P(So,Ne,null),e(Ne,Zd),e(Ne,da),e(da,em),e(Ne,tm),e(Ne,Bo),e(Bo,om),e(Bo,ds),e(ds,rm),e(Bo,sm),e(Ne,am),e(Ne,Oo),e(Oo,nm),e(Oo,No),e(No,im),e(Oo,lm),e(Ne,dm),e(Ne,Uo),e(Uo,mm),e(Uo,ms),e(ms,cm),e(Uo,hm),h(o,on,f),h(o,_t,f),e(_t,xt),e(xt,ma),P(Ho,ma,null),e(_t,fm),e(_t,ca),e(ca,pm),h(o,rn,f),h(o,Ue,f),P(Qo,Ue,null),e(Ue,um),e(Ue,ha),e(ha,gm),e(Ue,_m),e(Ue,Vo),e(Vo,vm),e(Vo,cs),e(cs,km),e(Vo,bm),e(Ue,Tm),e(Ue,Wo),e(Wo,wm),e(Wo,Ko),e(Ko,Em),e(Wo,Cm),e(Ue,ym),e(Ue,Go),e(Go,$m),e(Go,hs),e(hs,Fm),e(Go,Pm),h(o,sn,f),h(o,vt,f),e(vt,It),e(It,fa),P(Xo,fa,null),e(vt,Mm),e(vt,pa),e(pa,qm),h(o,an,f),h(o,He,f),P(jo,He,null),e(He,Lm),e(He,ua),e(ua,Rm),e(He,Am),e(He,Jo),e(Jo,zm),e(Jo,fs),e(fs,Dm),e(Jo,xm),e(He,Im),e(He,Yo),e(Yo,Sm),e(Yo,Zo),e(Zo,Bm),e(Yo,Om),e(He,Nm),e(He,er),e(er,Um),e(er,ps),e(ps,Hm),e(er,Qm),h(o,nn,f),h(o,kt,f),e(kt,St),e(St,ga),P(tr,ga,null),e(kt,Vm),e(kt,_a),e(_a,Wm),h(o,ln,f),h(o,Qe,f),P(or,Qe,null),e(Qe,Km),e(Qe,Bt),e(Bt,Gm),e(Bt,va),e(va,Xm),e(Bt,jm),e(Bt,ka),e(ka,Jm),e(Qe,Ym),e(Qe,rr),e(rr,Zm),e(rr,us),e(us,ec),e(rr,tc),e(Qe,oc),e(Qe,sr),e(sr,rc),e(sr,ar),e(ar,sc),e(sr,ac),e(Qe,nc),e(Qe,nr),e(nr,ic),e(nr,gs),e(gs,lc),e(nr,dc),h(o,dn,f),h(o,bt,f),e(bt,Ot),e(Ot,ba),P(ir,ba,null),e(bt,mc),e(bt,Ta),e(Ta,cc),h(o,mn,f),h(o,Me,f),P(lr,Me,null),e(Me,hc),e(Me,wa),e(wa,fc),e(Me,pc),e(Me,dr),e(dr,uc),e(dr,_s),e(_s,gc),e(dr,_c),e(Me,vc),e(Me,mr),e(mr,kc),e(mr,cr),e(cr,bc),e(mr,Tc),e(Me,wc),P(Nt,Me,null),e(Me,Ec),e(Me,hr),e(hr,Cc),e(hr,vs),e(vs,yc),e(hr,$c),h(o,cn,f),h(o,Tt,f),e(Tt,Ut),e(Ut,Ea),P(fr,Ea,null),e(Tt,Fc),e(Tt,Ca),e(Ca,Pc),h(o,hn,f),h(o,qe,f),P(pr,qe,null),e(qe,Mc),e(qe,ur),e(ur,qc),e(ur,ya),e(ya,Lc),e(ur,Rc),e(qe,Ac),e(qe,gr),e(gr,zc),e(gr,ks),e(ks,Dc),e(gr,xc),e(qe,Ic),e(qe,_r),e(_r,Sc),e(_r,vr),e(vr,Bc),e(_r,Oc),e(qe,Nc),P(Ht,qe,null),e(qe,Uc),e(qe,kr),e(kr,Hc),e(kr,bs),e(bs,Qc),e(kr,Vc),h(o,fn,f),h(o,wt,f),e(wt,Qt),e(Qt,$a),P(br,$a,null),e(wt,Wc),e(wt,Fa),e(Fa,Kc),h(o,pn,f),h(o,Le,f),P(Tr,Le,null),e(Le,Gc),e(Le,Pa),e(Pa,Xc),e(Le,jc),e(Le,wr),e(wr,Jc),e(wr,Ts),e(Ts,Yc),e(wr,Zc),e(Le,eh),e(Le,Er),e(Er,th),e(Er,Cr),e(Cr,oh),e(Er,rh),e(Le,sh),P(Vt,Le,null),e(Le,ah),e(Le,yr),e(yr,nh),e(yr,ws),e(ws,ih),e(yr,lh),h(o,un,f),h(o,Et,f),e(Et,Wt),e(Wt,Ma),P($r,Ma,null),e(Et,dh),e(Et,qa),e(qa,mh),h(o,gn,f),h(o,Re,f),P(Fr,Re,null),e(Re,ch),e(Re,La),e(La,hh),e(Re,fh),e(Re,Pr),e(Pr,ph),e(Pr,Es),e(Es,uh),e(Pr,gh),e(Re,_h),e(Re,Mr),e(Mr,vh),e(Mr,qr),e(qr,kh),e(Mr,bh),e(Re,Th),P(Kt,Re,null),e(Re,wh),e(Re,Lr),e(Lr,Eh),e(Lr,Cs),e(Cs,Ch),e(Lr,yh),h(o,_n,f),h(o,Ct,f),e(Ct,Gt),e(Gt,Ra),P(Rr,Ra,null),e(Ct,$h),e(Ct,Aa),e(Aa,Fh),h(o,vn,f),h(o,Ae,f),P(Ar,Ae,null),e(Ae,Ph),e(Ae,za),e(za,Mh),e(Ae,qh),e(Ae,zr),e(zr,Lh),e(zr,ys),e(ys,Rh),e(zr,Ah),e(Ae,zh),e(Ae,Dr),e(Dr,Dh),e(Dr,xr),e(xr,xh),e(Dr,Ih),e(Ae,Sh),P(Xt,Ae,null),e(Ae,Bh),e(Ae,Ir),e(Ir,Oh),e(Ir,$s),e($s,Nh),e(Ir,Uh),h(o,kn,f),h(o,yt,f),e(yt,jt),e(jt,Da),P(Sr,Da,null),e(yt,Hh),e(yt,xa),e(xa,Qh),h(o,bn,f),h(o,ze,f),P(Br,ze,null),e(ze,Vh),e(ze,$t),e($t,Wh),e($t,Ia),e(Ia,Kh),e($t,Gh),e($t,Sa),e(Sa,Xh),e($t,jh),e(ze,Jh),e(ze,Or),e(Or,Yh),e(Or,Fs),e(Fs,Zh),e(Or,ef),e(ze,tf),e(ze,Nr),e(Nr,of),e(Nr,Ur),e(Ur,rf),e(Nr,sf),e(ze,af),P(Jt,ze,null),e(ze,nf),e(ze,Hr),e(Hr,lf),e(Hr,Ps),e(Ps,df),e(Hr,mf),Tn=!0},p(o,[f]){const Qr={};f&2&&(Qr.$$scope={dirty:f,ctx:o}),Nt.$set(Qr);const Ba={};f&2&&(Ba.$$scope={dirty:f,ctx:o}),Ht.$set(Ba);const Oa={};f&2&&(Oa.$$scope={dirty:f,ctx:o}),Vt.$set(Oa);const Na={};f&2&&(Na.$$scope={dirty:f,ctx:o}),Kt.$set(Na);const Vr={};f&2&&(Vr.$$scope={dirty:f,ctx:o}),Xt.$set(Vr);const Ua={};f&2&&(Ua.$$scope={dirty:f,ctx:o}),Jt.$set(Ua)},i(o){Tn||(M(G.$$.fragment,o),M(X.$$.fragment,o),M(R.$$.fragment,o),M(Y.$$.fragment,o),M(eo.$$.fragment,o),M(to.$$.fragment,o),M(so.$$.fragment,o),M(no.$$.fragment,o),M(lo.$$.fragment,o),M(mo.$$.fragment,o),M(co.$$.fragment,o),M(po.$$.fragment,o),M(go.$$.fragment,o),M(_o.$$.fragment,o),M(vo.$$.fragment,o),M(Eo.$$.fragment,o),M(Co.$$.fragment,o),M(qo.$$.fragment,o),M(Lo.$$.fragment,o),M(Io.$$.fragment,o),M(So.$$.fragment,o),M(Ho.$$.fragment,o),M(Qo.$$.fragment,o),M(Xo.$$.fragment,o),M(jo.$$.fragment,o),M(tr.$$.fragment,o),M(or.$$.fragment,o),M(ir.$$.fragment,o),M(lr.$$.fragment,o),M(Nt.$$.fragment,o),M(fr.$$.fragment,o),M(pr.$$.fragment,o),M(Ht.$$.fragment,o),M(br.$$.fragment,o),M(Tr.$$.fragment,o),M(Vt.$$.fragment,o),M($r.$$.fragment,o),M(Fr.$$.fragment,o),M(Kt.$$.fragment,o),M(Rr.$$.fragment,o),M(Ar.$$.fragment,o),M(Xt.$$.fragment,o),M(Sr.$$.fragment,o),M(Br.$$.fragment,o),M(Jt.$$.fragment,o),Tn=!0)},o(o){q(G.$$.fragment,o),q(X.$$.fragment,o),q(R.$$.fragment,o),q(Y.$$.fragment,o),q(eo.$$.fragment,o),q(to.$$.fragment,o),q(so.$$.fragment,o),q(no.$$.fragment,o),q(lo.$$.fragment,o),q(mo.$$.fragment,o),q(co.$$.fragment,o),q(po.$$.fragment,o),q(go.$$.fragment,o),q(_o.$$.fragment,o),q(vo.$$.fragment,o),q(Eo.$$.fragment,o),q(Co.$$.fragment,o),q(qo.$$.fragment,o),q(Lo.$$.fragment,o),q(Io.$$.fragment,o),q(So.$$.fragment,o),q(Ho.$$.fragment,o),q(Qo.$$.fragment,o),q(Xo.$$.fragment,o),q(jo.$$.fragment,o),q(tr.$$.fragment,o),q(or.$$.fragment,o),q(ir.$$.fragment,o),q(lr.$$.fragment,o),q(Nt.$$.fragment,o),q(fr.$$.fragment,o),q(pr.$$.fragment,o),q(Ht.$$.fragment,o),q(br.$$.fragment,o),q(Tr.$$.fragment,o),q(Vt.$$.fragment,o),q($r.$$.fragment,o),q(Fr.$$.fragment,o),q(Kt.$$.fragment,o),q(Rr.$$.fragment,o),q(Ar.$$.fragment,o),q(Xt.$$.fragment,o),q(Sr.$$.fragment,o),q(Br.$$.fragment,o),q(Jt.$$.fragment,o),Tn=!1},d(o){t(b),o&&t(Z),o&&t(A),L(G),o&&t(Q),o&&t(u),L(X),o&&t(se),o&&t(H),o&&t(k),o&&t(ee),o&&t(ae),o&&t(te),o&&t(ne),o&&t(v),o&&t(B),o&&t(oe),o&&t(ie),o&&t(w),o&&t(Te),o&&t(ge),L(R),o&&t(we),o&&t(_e),L(Y),o&&t(Qa),o&&t(mt),L(eo),o&&t(Va),o&&t($e),L(to),L(so),L(no),L(lo),o&&t(Wa),o&&t(ht),L(mo),o&&t(Ka),o&&t(Ie),L(co),L(po),L(go),o&&t(Ga),o&&t(ft),L(_o),o&&t(Xa),o&&t(Se),L(vo),o&&t(ja),o&&t(pt),L(Eo),o&&t(Ja),o&&t(Be),L(Co),o&&t(Ya),o&&t(ut),L(qo),o&&t(Za),o&&t(Oe),L(Lo),o&&t(en),o&&t(gt),L(Io),o&&t(tn),o&&t(Ne),L(So),o&&t(on),o&&t(_t),L(Ho),o&&t(rn),o&&t(Ue),L(Qo),o&&t(sn),o&&t(vt),L(Xo),o&&t(an),o&&t(He),L(jo),o&&t(nn),o&&t(kt),L(tr),o&&t(ln),o&&t(Qe),L(or),o&&t(dn),o&&t(bt),L(ir),o&&t(mn),o&&t(Me),L(lr),L(Nt),o&&t(cn),o&&t(Tt),L(fr),o&&t(hn),o&&t(qe),L(pr),L(Ht),o&&t(fn),o&&t(wt),L(br),o&&t(pn),o&&t(Le),L(Tr),L(Vt),o&&t(un),o&&t(Et),L($r),o&&t(gn),o&&t(Re),L(Fr),L(Kt),o&&t(_n),o&&t(Ct),L(Rr),o&&t(vn),o&&t(Ae),L(Ar),L(Xt),o&&t(kn),o&&t(yt),L(Sr),o&&t(bn),o&&t(ze),L(Br),L(Jt)}}}const Yu={local:"camembert",sections:[{local:"overview",title:"Overview"},{local:"transformers.CamembertConfig",title:"CamembertConfig"},{local:"transformers.CamembertTokenizer",title:"CamembertTokenizer"},{local:"transformers.CamembertTokenizerFast",title:"CamembertTokenizerFast"},{local:"transformers.CamembertModel",title:"CamembertModel"},{local:"transformers.CamembertForCausalLM",title:"CamembertForCausalLM"},{local:"transformers.CamembertForMaskedLM",title:"CamembertForMaskedLM"},{local:"transformers.CamembertForSequenceClassification",title:"CamembertForSequenceClassification"},{local:"transformers.CamembertForMultipleChoice",title:"CamembertForMultipleChoice"},{local:"transformers.CamembertForTokenClassification",title:"CamembertForTokenClassification"},{local:"transformers.CamembertForQuestionAnswering",title:"CamembertForQuestionAnswering"},{local:"transformers.TFCamembertModel",title:"TFCamembertModel"},{local:"transformers.TFCamembertForMaskedLM",title:"TFCamembertForMaskedLM"},{local:"transformers.TFCamembertForSequenceClassification",title:"TFCamembertForSequenceClassification"},{local:"transformers.TFCamembertForMultipleChoice",title:"TFCamembertForMultipleChoice"},{local:"transformers.TFCamembertForTokenClassification",title:"TFCamembertForTokenClassification"},{local:"transformers.TFCamembertForQuestionAnswering",title:"TFCamembertForQuestionAnswering"}],title:"CamemBERT"};function Zu(xe,b,Z){let{fw:A}=b;return xe.$$set=_=>{"fw"in _&&Z(0,A=_.fw)},[A]}class sg extends Nu{constructor(b){super();Uu(this,b,Zu,Ju,Hu,{fw:0})}}export{sg as default,Yu as metadata};
9,993
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/main_classes/callback.mdx-dc097cb5.js
import{S as xd,i as Fd,s as jd,e as s,k as c,w as v,t as r,L as Nd,c as o,d as t,m as f,a as l,x as u,h as n,b as i,J as e,g as m,y as _,q as b,o as E,B as k}from"../../chunks/vendor-b1433968.js";import{T as Wd}from"../../chunks/Tip-c3840994.js";import{D as $}from"../../chunks/Docstring-ff504c58.js";import{C as bh}from"../../chunks/CodeBlock-a320dbd7.js";import{I as $n}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function zd(nr){let w,Y,A,S,Z,M,Qe,ee;return{c(){w=s("p"),Y=r(`In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `),A=s("code"),S=r("gradient_accumulation_steps=n"),Z=r(`, then one update step requires going through `),M=s("em"),Qe=r("n"),ee=r(" batches.")},l(te){w=o(te,"P",{});var x=l(w);Y=n(x,`In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `),A=o(x,"CODE",{});var q=l(A);S=n(q,"gradient_accumulation_steps=n"),q.forEach(t),Z=n(x,`, then one update step requires going through `),M=o(x,"EM",{});var sa=l(M);Qe=n(sa,"n"),sa.forEach(t),ee=n(x," batches."),x.forEach(t)},m(te,x){m(te,w,x),e(w,Y),e(w,A),e(A,S),e(w,Z),e(w,M),e(M,Qe),e(w,ee)},d(te){te&&t(w)}}}function Bd(nr){let w,Y,A,S,Z,M,Qe,ee,te,x,q,sa,oa,Fs,js,Cn,j,Ns,la,Ws,zs,ia,Bs,Vs,ca,qs,Rs,wn,Ee,Hs,fa,Us,Gs,yn,D,ha,ma,Js,Ys,Xs,X,da,Ks,Qs,pa,Zs,eo,ga,to,ao,ro,va,ua,no,so,oo,ke,_a,lo,io,Ze,co,fo,ho,Te,ba,mo,po,et,go,vo,uo,$e,Ea,_o,bo,tt,Eo,ko,To,Ce,ka,$o,Co,at,wo,yo,An,L,Ao,Ta,Do,Lo,$a,So,Po,Ca,Oo,Io,wa,Mo,xo,ya,Fo,jo,Dn,ae,we,sr,rt,No,or,Wo,Ln,ye,zo,Aa,Bo,Vo,Sn,R,nt,qo,re,Ro,Da,Ho,Uo,st,Go,Jo,Yo,N,ot,Xo,lr,Ko,Qo,C,Zo,ir,el,tl,cr,al,rl,fr,nl,sl,hr,ol,ll,mr,il,cl,dr,fl,hl,pr,ml,dl,gr,pl,gl,vr,vl,ul,_l,lt,bl,it,El,kl,Pn,ne,ct,Tl,ft,$l,La,Cl,wl,On,se,ht,yl,mt,Al,Sa,Dl,Ll,In,oe,dt,Sl,pt,Pl,Pa,Ol,Il,Mn,H,gt,Ml,vt,xl,Oa,Fl,jl,Nl,U,Wl,Ia,zl,Bl,ur,Vl,ql,Ma,Rl,Hl,xn,le,ut,Ul,ie,Gl,xa,Jl,Yl,_t,Xl,Kl,Fn,G,bt,Ql,ce,Zl,Fa,ei,ti,Et,ai,ri,ni,W,kt,si,Tt,oi,_r,li,ii,ci,$t,fi,Ct,hi,mi,di,d,pi,br,gi,vi,Er,ui,_i,kr,bi,Ei,Tr,ki,Ti,$r,$i,Ci,Cr,wi,yi,wr,Ai,Di,yr,Li,Si,Ar,Pi,Oi,Dr,Ii,Mi,Lr,xi,Fi,Sr,ji,Ni,Pr,Wi,zi,Or,Bi,Vi,Ir,qi,Ri,Mr,Hi,Ui,xr,Gi,Ji,Fr,Yi,Xi,jr,Ki,Qi,jn,J,wt,Zi,fe,ec,ja,tc,ac,yt,rc,nc,sc,z,At,oc,Nr,lc,ic,he,cc,Wr,fc,hc,zr,mc,dc,pc,F,gc,Br,vc,uc,Vr,_c,bc,Na,Ec,kc,qr,Tc,$c,Nn,me,Dt,Cc,de,wc,Wa,yc,Ac,Lt,Dc,Lc,Wn,pe,Ae,Rr,St,Sc,Hr,Pc,zn,p,Pt,Oc,Ur,Ic,Mc,Ot,xc,Gr,Fc,jc,Nc,P,Wc,Jr,zc,Bc,Yr,Vc,qc,Xr,Rc,Hc,Kr,Uc,Gc,Qr,Jc,Yc,Xc,Zr,Kc,Qc,It,Zc,De,Mt,ef,en,tf,af,Le,xt,rf,tn,nf,sf,Se,Ft,of,an,lf,cf,Pe,jt,ff,Nt,hf,za,mf,df,pf,Oe,Wt,gf,rn,vf,uf,Ie,zt,_f,nn,bf,Ef,Me,Bt,kf,sn,Tf,$f,xe,Vt,Cf,on,wf,yf,Fe,qt,Af,ln,Df,Lf,je,Rt,Sf,cn,Pf,Of,Ne,Ht,If,fn,Mf,xf,We,Ut,Ff,hn,jf,Bn,ze,Nf,Ba,Wf,zf,Vn,Gt,qn,Be,Bf,mn,Vf,qf,Rn,Jt,Hn,ge,Ve,dn,Yt,Rf,pn,Hf,Un,O,Xt,Uf,ve,Gf,Va,Jf,Yf,qa,Xf,Kf,Qf,qe,Zf,Re,Kt,eh,Qt,th,gn,ah,rh,nh,He,Zt,sh,ea,oh,vn,lh,ih,Gn,ue,Ue,un,ta,ch,_n,fh,Jn,_e,aa,hh,be,mh,Ra,dh,ph,Ha,gh,vh,Yn;return M=new $n({}),rt=new $n({}),nt=new $({props:{name:"class transformers.integrations.CometCallback",anchor:"transformers.integrations.CometCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L584"}}),ot=new $({props:{name:"setup",anchor:"transformers.integrations.CometCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L595"}}),ct=new $({props:{name:"class transformers.DefaultFlowCallback",anchor:"transformers.DefaultFlowCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L408"}}),ht=new $({props:{name:"class transformers.PrinterCallback",anchor:"transformers.PrinterCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L497"}}),dt=new $({props:{name:"class transformers.ProgressCallback",anchor:"transformers.ProgressCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L455"}}),gt=new $({props:{name:"class transformers.EarlyStoppingCallback",anchor:"transformers.EarlyStoppingCallback",parameters:[{name:"early_stopping_patience",val:": int = 1"},{name:"early_stopping_threshold",val:": typing.Optional[float] = 0.0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L508",parametersDescription:[{anchor:"transformers.EarlyStoppingCallback.early_stopping_patience",description:`<strong>early_stopping_patience</strong> (<code>int</code>) &#x2014; Use with <code>metric_for_best_model</code> to stop training when the specified metric worsens for <code>early_stopping_patience</code> evaluation calls.`,name:"early_stopping_patience"},{anchor:"transformers.EarlyStoppingCallback.early_stopping_threshold(float,",description:`<strong>early_stopping_threshold(<code>float</code>,</strong> <em>optional</em>) &#x2014; Use with TrainingArguments <code>metric_for_best_model</code> and <code>early_stopping_patience</code> to denote how much the specified metric must improve to satisfy early stopping conditions. \``,name:"early_stopping_threshold(float,"}]}}),ut=new $({props:{name:"class transformers.integrations.TensorBoardCallback",anchor:"transformers.integrations.TensorBoardCallback",parameters:[{name:"tb_writer",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L373",parametersDescription:[{anchor:"transformers.integrations.TensorBoardCallback.tb_writer",description:`<strong>tb_writer</strong> (<code>SummaryWriter</code>, <em>optional</em>) &#x2014; The writer to use. Will instantiate one if not set.`,name:"tb_writer"}]}}),bt=new $({props:{name:"class transformers.integrations.WandbCallback",anchor:"transformers.integrations.WandbCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L461"}}),kt=new $({props:{name:"setup",anchor:"transformers.integrations.WandbCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L478"}}),wt=new $({props:{name:"class transformers.integrations.MLflowCallback",anchor:"transformers.integrations.MLflowCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L682"}}),At=new $({props:{name:"setup",anchor:"transformers.integrations.MLflowCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L699"}}),Dt=new $({props:{name:"class transformers.integrations.AzureMLCallback",anchor:"transformers.integrations.AzureMLCallback",parameters:[{name:"azureml_run",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/integrations.py#L659"}}),St=new $n({}),Pt=new $({props:{name:"class transformers.TrainerCallback",anchor:"transformers.TrainerCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L160",parametersDescription:[{anchor:"transformers.TrainerCallback.args",description:`<strong>args</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>) &#x2014; The training arguments used to instantiate the <a href="/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer">Trainer</a>.`,name:"args"},{anchor:"transformers.TrainerCallback.state",description:`<strong>state</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerState">TrainerState</a>) &#x2014; The current state of the <a href="/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer">Trainer</a>.`,name:"state"},{anchor:"transformers.TrainerCallback.control",description:`<strong>control</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>) &#x2014; The object that is returned to the <a href="/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer">Trainer</a> and can be used to make some decisions.`,name:"control"},{anchor:"transformers.TrainerCallback.model",description:`<strong>model</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>) &#x2014; The model being trained.`,name:"model"},{anchor:"transformers.TrainerCallback.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.TrainerCallback.optimizer",description:`<strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer used for the training steps.`,name:"optimizer"},{anchor:"transformers.TrainerCallback.lr_scheduler",description:`<strong>lr_scheduler</strong> (<code>torch.optim.lr_scheduler.LambdaLR</code>) &#x2014; The scheduler used for setting the learning rate.`,name:"lr_scheduler"},{anchor:"transformers.TrainerCallback.train_dataloader",description:`<strong>train_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.`,name:"train_dataloader"},{anchor:"transformers.TrainerCallback.eval_dataloader",description:`<strong>eval_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.`,name:"eval_dataloader"},{anchor:"transformers.TrainerCallback.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics computed by the last evaluation phase.</p> <p>Those are only accessible in the event <code>on_evaluate</code>.`,name:"metrics"},{anchor:"transformers.TrainerCallback.logs",description:`<strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.</p> <p>Those are only accessible in the event <code>on_log</code>.`,name:"logs"}]}}),It=new bh({props:{code:`class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs),`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">PrinterCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_log</span>(<span class="hljs-params">self, args, state, control, logs=<span class="hljs-literal">None</span>, **kwargs</span>): _ = logs.pop(<span class="hljs-string">&quot;total_flos&quot;</span>, <span class="hljs-literal">None</span>) <span class="hljs-keyword">if</span> state.is_local_process_zero: <span class="hljs-built_in">print</span>(logs)`}}),Mt=new $({props:{name:"on_epoch_begin",anchor:"transformers.TrainerCallback.on_epoch_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L229"}}),xt=new $({props:{name:"on_epoch_end",anchor:"transformers.TrainerCallback.on_epoch_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L235"}}),Ft=new $({props:{name:"on_evaluate",anchor:"transformers.TrainerCallback.on_evaluate",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L261"}}),jt=new $({props:{name:"on_init_end",anchor:"transformers.TrainerCallback.on_init_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L211"}}),Wt=new $({props:{name:"on_log",anchor:"transformers.TrainerCallback.on_log",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L273"}}),zt=new $({props:{name:"on_prediction_step",anchor:"transformers.TrainerCallback.on_prediction_step",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L279"}}),Bt=new $({props:{name:"on_save",anchor:"transformers.TrainerCallback.on_save",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L267"}}),Vt=new $({props:{name:"on_step_begin",anchor:"transformers.TrainerCallback.on_step_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L241"}}),qt=new $({props:{name:"on_step_end",anchor:"transformers.TrainerCallback.on_step_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L254"}}),Rt=new $({props:{name:"on_substep_end",anchor:"transformers.TrainerCallback.on_substep_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L248"}}),Ht=new $({props:{name:"on_train_begin",anchor:"transformers.TrainerCallback.on_train_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L217"}}),Ut=new $({props:{name:"on_train_end",anchor:"transformers.TrainerCallback.on_train_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L223"}}),Gt=new bh({props:{code:`class MyCallback(TrainerCallback): "A callback that prints a message at the beginning of training" def on_train_begin(self, args, state, control, **kwargs): print("Starting training") trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback] # We can either pass the callback class this way or an instance of it (MyCallback()) ),`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-string">&quot;A callback that prints a message at the beginning of training&quot;</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_train_begin</span>(<span class="hljs-params">self, args, state, control, **kwargs</span>): <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Starting training&quot;</span>) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback] <span class="hljs-comment"># We can either pass the callback class this way or an instance of it (MyCallback())</span> )`}}),Jt=new bh({props:{code:`trainer = Trainer(...) trainer.add_callback(MyCallback) # Alternatively, we can pass an instance of the callback class trainer.add_callback(MyCallback()),`,highlighted:`trainer = Trainer(...) trainer.add_callback(MyCallback) <span class="hljs-comment"># Alternatively, we can pass an instance of the callback class</span> trainer.add_callback(MyCallback())`}}),Yt=new $n({}),Xt=new $({props:{name:"class transformers.TrainerState",anchor:"transformers.TrainerState",parameters:[{name:"epoch",val:": typing.Optional[float] = None"},{name:"global_step",val:": int = 0"},{name:"max_steps",val:": int = 0"},{name:"num_train_epochs",val:": int = 0"},{name:"total_flos",val:": float = 0"},{name:"log_history",val:": typing.List[typing.Dict[str, float]] = None"},{name:"best_metric",val:": typing.Optional[float] = None"},{name:"best_model_checkpoint",val:": typing.Optional[str] = None"},{name:"is_local_process_zero",val:": bool = True"},{name:"is_world_process_zero",val:": bool = True"},{name:"is_hyper_param_search",val:": bool = False"},{name:"trial_name",val:": str = None"},{name:"trial_params",val:": typing.Dict[str, typing.Union[str, float, int, bool]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L36",parametersDescription:[{anchor:"transformers.TrainerState.epoch",description:`<strong>epoch</strong> (<code>float</code>, <em>optional</em>) &#x2014; Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed).`,name:"epoch"},{anchor:"transformers.TrainerState.global_step",description:`<strong>global_step</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; During training, represents the number of update steps completed.`,name:"global_step"},{anchor:"transformers.TrainerState.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The number of update steps to do during the current training.`,name:"max_steps"},{anchor:"transformers.TrainerState.total_flos",description:`<strong>total_flos</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow).`,name:"total_flos"},{anchor:"transformers.TrainerState.log_history",description:`<strong>log_history</strong> (<code>List[Dict[str, float]]</code>, <em>optional</em>) &#x2014; The list of logs done since the beginning of training.`,name:"log_history"},{anchor:"transformers.TrainerState.best_metric",description:`<strong>best_metric</strong> (<code>float</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the best metric encountered so far.`,name:"best_metric"},{anchor:"transformers.TrainerState.best_model_checkpoint",description:`<strong>best_model_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the name of the checkpoint for the best model encountered so far.`,name:"best_model_checkpoint"},{anchor:"transformers.TrainerState.is_local_process_zero",description:`<strong>is_local_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`,name:"is_local_process_zero"},{anchor:"transformers.TrainerState.is_world_process_zero",description:`<strong>is_world_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).`,name:"is_world_process_zero"},{anchor:"transformers.TrainerState.is_hyper_param_search",description:`<strong>is_hyper_param_search</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard.`,name:"is_hyper_param_search"}]}}),qe=new Wd({props:{$$slots:{default:[zd]},$$scope:{ctx:nr}}}),Kt=new $({props:{name:"load_from_json",anchor:"transformers.TrainerState.load_from_json",parameters:[{name:"json_path",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L102"}}),Zt=new $({props:{name:"save_to_json",anchor:"transformers.TrainerState.save_to_json",parameters:[{name:"json_path",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L96"}}),ta=new $n({}),aa=new $({props:{name:"class transformers.TrainerControl",anchor:"transformers.TrainerControl",parameters:[{name:"should_training_stop",val:": bool = False"},{name:"should_epoch_stop",val:": bool = False"},{name:"should_save",val:": bool = False"},{name:"should_evaluate",val:": bool = False"},{name:"should_log",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_callback.py#L111",parametersDescription:[{anchor:"transformers.TrainerControl.should_training_stop",description:`<strong>should_training_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the training should be interrupted.</p> <p>If <code>True</code>, this variable will not be set back to <code>False</code>. The training will just stop.`,name:"should_training_stop"},{anchor:"transformers.TrainerControl.should_epoch_stop",description:`<strong>should_epoch_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the current epoch should be interrupted.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next epoch.`,name:"should_epoch_stop"},{anchor:"transformers.TrainerControl.should_save",description:`<strong>should_save</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be saved at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_save"},{anchor:"transformers.TrainerControl.should_evaluate",description:`<strong>should_evaluate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be evaluated at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_evaluate"},{anchor:"transformers.TrainerControl.should_log",description:`<strong>should_log</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the logs should be reported at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_log"}]}}),{c(){w=s("meta"),Y=c(),A=s("h1"),S=s("a"),Z=s("span"),v(M.$$.fragment),Qe=c(),ee=s("span"),te=r("Callbacks"),x=c(),q=s("p"),sa=r(`Callbacks are objects that can customize the behavior of the training loop in the PyTorch `),oa=s("a"),Fs=r("Trainer"),js=r(` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping).`),Cn=c(),j=s("p"),Ns=r("Callbacks are \u201Cread only\u201D pieces of code, apart from the "),la=s("a"),Ws=r("TrainerControl"),zs=r(` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass `),ia=s("a"),Bs=r("Trainer"),Vs=r(" and override the methods you need (see "),ca=s("a"),qs=r("trainer"),Rs=r(" for examples)."),wn=c(),Ee=s("p"),Hs=r("By default a "),fa=s("a"),Us=r("Trainer"),Gs=r(" will use the following callbacks:"),yn=c(),D=s("ul"),ha=s("li"),ma=s("a"),Js=r("DefaultFlowCallback"),Ys=r(" which handles the default behavior for logging, saving and evaluation."),Xs=c(),X=s("li"),da=s("a"),Ks=r("PrinterCallback"),Qs=r(" or "),pa=s("a"),Zs=r("ProgressCallback"),eo=r(` to display progress and print the logs (the first one is used if you deactivate tqdm through the `),ga=s("a"),to=r("TrainingArguments"),ao=r(`, otherwise it\u2019s the second one).`),ro=c(),va=s("li"),ua=s("a"),no=r("TensorBoardCallback"),so=r(` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX).`),oo=c(),ke=s("li"),_a=s("a"),lo=r("WandbCallback"),io=r(" if "),Ze=s("a"),co=r("wandb"),fo=r(" is installed."),ho=c(),Te=s("li"),ba=s("a"),mo=r("CometCallback"),po=r(" if "),et=s("a"),go=r("comet_ml"),vo=r(" is installed."),uo=c(),$e=s("li"),Ea=s("a"),_o=r("MLflowCallback"),bo=r(" if "),tt=s("a"),Eo=r("mlflow"),ko=r(" is installed."),To=c(),Ce=s("li"),ka=s("a"),$o=r("AzureMLCallback"),Co=r(" if "),at=s("a"),wo=r("azureml-sdk"),yo=r(` is installed.`),An=c(),L=s("p"),Ao=r("The main class that implements callbacks is "),Ta=s("a"),Do=r("TrainerCallback"),Lo=r(`. It gets the `),$a=s("a"),So=r("TrainingArguments"),Po=r(" used to instantiate the "),Ca=s("a"),Oo=r("Trainer"),Io=r(`, can access that Trainer\u2019s internal state via `),wa=s("a"),Mo=r("TrainerState"),xo=r(`, and can take some actions on the training loop via `),ya=s("a"),Fo=r("TrainerControl"),jo=r("."),Dn=c(),ae=s("h2"),we=s("a"),sr=s("span"),v(rt.$$.fragment),No=c(),or=s("span"),Wo=r("Available Callbacks"),Ln=c(),ye=s("p"),zo=r("Here is the list of the available "),Aa=s("a"),Bo=r("TrainerCallback"),Vo=r(" in the library:"),Sn=c(),R=s("div"),v(nt.$$.fragment),qo=c(),re=s("p"),Ro=r("A "),Da=s("a"),Ho=r("TrainerCallback"),Uo=r(" that sends the logs to "),st=s("a"),Go=r("Comet ML"),Jo=r("."),Yo=c(),N=s("div"),v(ot.$$.fragment),Xo=c(),lr=s("p"),Ko=r("Setup the optional Comet.ml integration."),Qo=c(),C=s("p"),Zo=r(`Environment: COMET_MODE (`),ir=s("code"),el=r("str"),tl=r(", "),cr=s("em"),al=r("optional"),rl=r(`): Whether to create an online, offline experiment or disable Comet logging. Can be \u201COFFLINE\u201D, \u201CONLINE\u201D, or \u201CDISABLED\u201D. Defaults to \u201CONLINE\u201D. COMET_PROJECT_NAME (`),fr=s("code"),nl=r("str"),sl=r(", "),hr=s("em"),ol=r("optional"),ll=r(`): Comet project name for experiments COMET_OFFLINE_DIRECTORY (`),mr=s("code"),il=r("str"),cl=r(", "),dr=s("em"),fl=r("optional"),hl=r(`): Folder to use for saving offline experiments when `),pr=s("code"),ml=r("COMET_MODE"),dl=r(` is \u201COFFLINE\u201D COMET_LOG_ASSETS (`),gr=s("code"),pl=r("str"),gl=r(", "),vr=s("em"),vl=r("optional"),ul=r(`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be \u201CTRUE\u201D, or \u201CFALSE\u201D. Defaults to \u201CTRUE\u201D.`),_l=c(),lt=s("p"),bl=r("For a number of configurable items in the environment, see "),it=s("a"),El=r("here"),kl=r("."),Pn=c(),ne=s("div"),v(ct.$$.fragment),Tl=c(),ft=s("p"),$l=r("A "),La=s("a"),Cl=r("TrainerCallback"),wl=r(` that handles the default flow of the training loop for logs, evaluation and checkpoints.`),On=c(),se=s("div"),v(ht.$$.fragment),yl=c(),mt=s("p"),Al=r("A bare "),Sa=s("a"),Dl=r("TrainerCallback"),Ll=r(" that just prints the logs."),In=c(),oe=s("div"),v(dt.$$.fragment),Sl=c(),pt=s("p"),Pl=r("A "),Pa=s("a"),Ol=r("TrainerCallback"),Il=r(" that displays the progress of training or evaluation."),Mn=c(),H=s("div"),v(gt.$$.fragment),Ml=c(),vt=s("p"),xl=r("A "),Oa=s("a"),Fl=r("TrainerCallback"),jl=r(" that handles early stopping."),Nl=c(),U=s("p"),Wl=r("This callback depends on "),Ia=s("a"),zl=r("TrainingArguments"),Bl=r(" argument "),ur=s("em"),Vl=r("load_best_model_at_end"),ql=r(` functionality to set best_metric in `),Ma=s("a"),Rl=r("TrainerState"),Hl=r("."),xn=c(),le=s("div"),v(ut.$$.fragment),Ul=c(),ie=s("p"),Gl=r("A "),xa=s("a"),Jl=r("TrainerCallback"),Yl=r(" that sends the logs to "),_t=s("a"),Xl=r("TensorBoard"),Kl=r("."),Fn=c(),G=s("div"),v(bt.$$.fragment),Ql=c(),ce=s("p"),Zl=r("A "),Fa=s("a"),ei=r("TrainerCallback"),ti=r(" that sends the logs to "),Et=s("a"),ai=r("Weight and Biases"),ri=r("."),ni=c(),W=s("div"),v(kt.$$.fragment),si=c(),Tt=s("p"),oi=r("Setup the optional Weights & Biases ("),_r=s("em"),li=r("wandb"),ii=r(") integration."),ci=c(),$t=s("p"),fi=r("One can subclass and override this method to customize the setup if needed. Find more information "),Ct=s("a"),hi=r("here"),mi=r(". You can also override the following environment variables:"),di=c(),d=s("p"),pi=r(`Environment: WANDB_LOG_MODEL (`),br=s("code"),gi=r("bool"),vi=r(", "),Er=s("em"),ui=r("optional"),_i=r(", defaults to "),kr=s("code"),bi=r("False"),Ei=r(`): Whether or not to log model as artifact at the end of training. Use along with `),Tr=s("em"),ki=r("TrainingArguments.load_best_model_at_end"),Ti=r(` to upload best model. WANDB_WATCH (`),$r=s("code"),$i=r("str"),Ci=r(", "),Cr=s("em"),wi=r("optional"),yi=r(" defaults to "),wr=s("code"),Ai=r('"gradients"'),Di=r(`): Can be `),yr=s("code"),Li=r('"gradients"'),Si=r(", "),Ar=s("code"),Pi=r('"all"'),Oi=r(" or "),Dr=s("code"),Ii=r('"false"'),Mi=r(". Set to "),Lr=s("code"),xi=r('"false"'),Fi=r(` to disable gradient logging or `),Sr=s("code"),ji=r('"all"'),Ni=r(` to log gradients and parameters. WANDB_PROJECT (`),Pr=s("code"),Wi=r("str"),zi=r(", "),Or=s("em"),Bi=r("optional"),Vi=r(", defaults to "),Ir=s("code"),qi=r('"huggingface"'),Ri=r(`): Set this to a custom string to store results in a different project. WANDB_DISABLED (`),Mr=s("code"),Hi=r("bool"),Ui=r(", "),xr=s("em"),Gi=r("optional"),Ji=r(", defaults to "),Fr=s("code"),Yi=r("False"),Xi=r(`): Whether or not to disable wandb entirely. Set `),jr=s("em"),Ki=r("WANDB_DISABLED=true"),Qi=r(" to disable."),jn=c(),J=s("div"),v(wt.$$.fragment),Zi=c(),fe=s("p"),ec=r("A "),ja=s("a"),tc=r("TrainerCallback"),ac=r(" that sends the logs to "),yt=s("a"),rc=r("MLflow"),nc=r("."),sc=c(),z=s("div"),v(At.$$.fragment),oc=c(),Nr=s("p"),lc=r("Setup the optional MLflow integration."),ic=c(),he=s("p"),cc=r(`Environment: HF_MLFLOW_LOG_ARTIFACTS (`),Wr=s("code"),fc=r("str"),hc=r(", "),zr=s("em"),mc=r("optional"),dc=r(`): Whether to use MLflow .log_artifact() facility to log artifacts.`),pc=c(),F=s("p"),gc=r("This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to "),Br=s("em"),vc=r("True"),uc=r(" or "),Vr=s("em"),_c=r("1"),bc=r(`, will copy whatever is in `),Na=s("a"),Ec=r("TrainingArguments"),kc=r("\u2019s "),qr=s("code"),Tc=r("output_dir"),$c=r(` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location.`),Nn=c(),me=s("div"),v(Dt.$$.fragment),Cc=c(),de=s("p"),wc=r("A "),Wa=s("a"),yc=r("TrainerCallback"),Ac=r(" that sends the logs to "),Lt=s("a"),Dc=r("AzureML"),Lc=r("."),Wn=c(),pe=s("h2"),Ae=s("a"),Rr=s("span"),v(St.$$.fragment),Sc=c(),Hr=s("span"),Pc=r("TrainerCallback"),zn=c(),p=s("div"),v(Pt.$$.fragment),Oc=c(),Ur=s("p"),Ic=r(`A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:`),Mc=c(),Ot=s("p"),xc=r("The "),Gr=s("code"),Fc=r("control"),jc=r(` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.`),Nc=c(),P=s("p"),Wc=r("The argument "),Jr=s("code"),zc=r("args"),Bc=r(", "),Yr=s("code"),Vc=r("state"),qc=r(" and "),Xr=s("code"),Rc=r("control"),Hc=r(` are positionals for all events, all the others are grouped in `),Kr=s("code"),Uc=r("kwargs"),Gc=r(`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple `),Qr=s("code"),Jc=r("PrinterCallback"),Yc=r("."),Xc=c(),Zr=s("p"),Kc=r("Example:"),Qc=c(),v(It.$$.fragment),Zc=c(),De=s("div"),v(Mt.$$.fragment),ef=c(),en=s("p"),tf=r("Event called at the beginning of an epoch."),af=c(),Le=s("div"),v(xt.$$.fragment),rf=c(),tn=s("p"),nf=r("Event called at the end of an epoch."),sf=c(),Se=s("div"),v(Ft.$$.fragment),of=c(),an=s("p"),lf=r("Event called after an evaluation phase."),cf=c(),Pe=s("div"),v(jt.$$.fragment),ff=c(),Nt=s("p"),hf=r("Event called at the end of the initialization of the "),za=s("a"),mf=r("Trainer"),df=r("."),pf=c(),Oe=s("div"),v(Wt.$$.fragment),gf=c(),rn=s("p"),vf=r("Event called after logging the last logs."),uf=c(),Ie=s("div"),v(zt.$$.fragment),_f=c(),nn=s("p"),bf=r("Event called after a prediction step."),Ef=c(),Me=s("div"),v(Bt.$$.fragment),kf=c(),sn=s("p"),Tf=r("Event called after a checkpoint save."),$f=c(),xe=s("div"),v(Vt.$$.fragment),Cf=c(),on=s("p"),wf=r(`Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.`),yf=c(),Fe=s("div"),v(qt.$$.fragment),Af=c(),ln=s("p"),Df=r(`Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.`),Lf=c(),je=s("div"),v(Rt.$$.fragment),Sf=c(),cn=s("p"),Pf=r("Event called at the end of an substep during gradient accumulation."),Of=c(),Ne=s("div"),v(Ht.$$.fragment),If=c(),fn=s("p"),Mf=r("Event called at the beginning of training."),xf=c(),We=s("div"),v(Ut.$$.fragment),Ff=c(),hn=s("p"),jf=r("Event called at the end of training."),Bn=c(),ze=s("p"),Nf=r("Here is an example of how to register a custom callback with the PyTorch "),Ba=s("a"),Wf=r("Trainer"),zf=r(":"),Vn=c(),v(Gt.$$.fragment),qn=c(),Be=s("p"),Bf=r("Another way to register a callback is to call "),mn=s("code"),Vf=r("trainer.add_callback()"),qf=r(" as follows:"),Rn=c(),v(Jt.$$.fragment),Hn=c(),ge=s("h2"),Ve=s("a"),dn=s("span"),v(Yt.$$.fragment),Rf=c(),pn=s("span"),Hf=r("TrainerState"),Un=c(),O=s("div"),v(Xt.$$.fragment),Uf=c(),ve=s("p"),Gf=r("A class containing the "),Va=s("a"),Jf=r("Trainer"),Yf=r(` inner state that will be saved along the model and optimizer when checkpointing and passed to the `),qa=s("a"),Xf=r("TrainerCallback"),Kf=r("."),Qf=c(),v(qe.$$.fragment),Zf=c(),Re=s("div"),v(Kt.$$.fragment),eh=c(),Qt=s("p"),th=r("Create an instance from the content of "),gn=s("code"),ah=r("json_path"),rh=r("."),nh=c(),He=s("div"),v(Zt.$$.fragment),sh=c(),ea=s("p"),oh=r("Save the content of this instance in JSON format inside "),vn=s("code"),lh=r("json_path"),ih=r("."),Gn=c(),ue=s("h2"),Ue=s("a"),un=s("span"),v(ta.$$.fragment),ch=c(),_n=s("span"),fh=r("TrainerControl"),Jn=c(),_e=s("div"),v(aa.$$.fragment),hh=c(),be=s("p"),mh=r("A class that handles the "),Ra=s("a"),dh=r("Trainer"),ph=r(` control flow. This class is used by the `),Ha=s("a"),gh=r("TrainerCallback"),vh=r(" to activate some switches in the training loop."),this.h()},l(a){const h=Nd('[data-svelte="svelte-1phssyn"]',document.head);w=o(h,"META",{name:!0,content:!0}),h.forEach(t),Y=f(a),A=o(a,"H1",{class:!0});var ra=l(A);S=o(ra,"A",{id:!0,class:!0,href:!0});var Eh=l(S);Z=o(Eh,"SPAN",{});var kh=l(Z);u(M.$$.fragment,kh),kh.forEach(t),Eh.forEach(t),Qe=f(ra),ee=o(ra,"SPAN",{});var Th=l(ee);te=n(Th,"Callbacks"),Th.forEach(t),ra.forEach(t),x=f(a),q=o(a,"P",{});var Xn=l(q);sa=n(Xn,`Callbacks are objects that can customize the behavior of the training loop in the PyTorch `),oa=o(Xn,"A",{href:!0});var $h=l(oa);Fs=n($h,"Trainer"),$h.forEach(t),js=n(Xn,` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping).`),Xn.forEach(t),Cn=f(a),j=o(a,"P",{});var Ge=l(j);Ns=n(Ge,"Callbacks are \u201Cread only\u201D pieces of code, apart from the "),la=o(Ge,"A",{href:!0});var Ch=l(la);Ws=n(Ch,"TrainerControl"),Ch.forEach(t),zs=n(Ge,` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass `),ia=o(Ge,"A",{href:!0});var wh=l(ia);Bs=n(wh,"Trainer"),wh.forEach(t),Vs=n(Ge," and override the methods you need (see "),ca=o(Ge,"A",{href:!0});var yh=l(ca);qs=n(yh,"trainer"),yh.forEach(t),Rs=n(Ge," for examples)."),Ge.forEach(t),wn=f(a),Ee=o(a,"P",{});var Kn=l(Ee);Hs=n(Kn,"By default a "),fa=o(Kn,"A",{href:!0});var Ah=l(fa);Us=n(Ah,"Trainer"),Ah.forEach(t),Gs=n(Kn," will use the following callbacks:"),Kn.forEach(t),yn=f(a),D=o(a,"UL",{});var I=l(D);ha=o(I,"LI",{});var uh=l(ha);ma=o(uh,"A",{href:!0});var Dh=l(ma);Js=n(Dh,"DefaultFlowCallback"),Dh.forEach(t),Ys=n(uh," which handles the default behavior for logging, saving and evaluation."),uh.forEach(t),Xs=f(I),X=o(I,"LI",{});var na=l(X);da=o(na,"A",{href:!0});var Lh=l(da);Ks=n(Lh,"PrinterCallback"),Lh.forEach(t),Qs=n(na," or "),pa=o(na,"A",{href:!0});var Sh=l(pa);Zs=n(Sh,"ProgressCallback"),Sh.forEach(t),eo=n(na,` to display progress and print the logs (the first one is used if you deactivate tqdm through the `),ga=o(na,"A",{href:!0});var Ph=l(ga);to=n(Ph,"TrainingArguments"),Ph.forEach(t),ao=n(na,`, otherwise it\u2019s the second one).`),na.forEach(t),ro=f(I),va=o(I,"LI",{});var _h=l(va);ua=o(_h,"A",{href:!0});var Oh=l(ua);no=n(Oh,"TensorBoardCallback"),Oh.forEach(t),so=n(_h,` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX).`),_h.forEach(t),oo=f(I),ke=o(I,"LI",{});var bn=l(ke);_a=o(bn,"A",{href:!0});var Ih=l(_a);lo=n(Ih,"WandbCallback"),Ih.forEach(t),io=n(bn," if "),Ze=o(bn,"A",{href:!0,rel:!0});var Mh=l(Ze);co=n(Mh,"wandb"),Mh.forEach(t),fo=n(bn," is installed."),bn.forEach(t),ho=f(I),Te=o(I,"LI",{});var En=l(Te);ba=o(En,"A",{href:!0});var xh=l(ba);mo=n(xh,"CometCallback"),xh.forEach(t),po=n(En," if "),et=o(En,"A",{href:!0,rel:!0});var Fh=l(et);go=n(Fh,"comet_ml"),Fh.forEach(t),vo=n(En," is installed."),En.forEach(t),uo=f(I),$e=o(I,"LI",{});var kn=l($e);Ea=o(kn,"A",{href:!0});var jh=l(Ea);_o=n(jh,"MLflowCallback"),jh.forEach(t),bo=n(kn," if "),tt=o(kn,"A",{href:!0,rel:!0});var Nh=l(tt);Eo=n(Nh,"mlflow"),Nh.forEach(t),ko=n(kn," is installed."),kn.forEach(t),To=f(I),Ce=o(I,"LI",{});var Tn=l(Ce);ka=o(Tn,"A",{href:!0});var Wh=l(ka);$o=n(Wh,"AzureMLCallback"),Wh.forEach(t),Co=n(Tn," if "),at=o(Tn,"A",{href:!0,rel:!0});var zh=l(at);wo=n(zh,"azureml-sdk"),zh.forEach(t),yo=n(Tn,` is installed.`),Tn.forEach(t),I.forEach(t),An=f(a),L=o(a,"P",{});var B=l(L);Ao=n(B,"The main class that implements callbacks is "),Ta=o(B,"A",{href:!0});var Bh=l(Ta);Do=n(Bh,"TrainerCallback"),Bh.forEach(t),Lo=n(B,`. It gets the `),$a=o(B,"A",{href:!0});var Vh=l($a);So=n(Vh,"TrainingArguments"),Vh.forEach(t),Po=n(B," used to instantiate the "),Ca=o(B,"A",{href:!0});var qh=l(Ca);Oo=n(qh,"Trainer"),qh.forEach(t),Io=n(B,`, can access that Trainer\u2019s internal state via `),wa=o(B,"A",{href:!0});var Rh=l(wa);Mo=n(Rh,"TrainerState"),Rh.forEach(t),xo=n(B,`, and can take some actions on the training loop via `),ya=o(B,"A",{href:!0});var Hh=l(ya);Fo=n(Hh,"TrainerControl"),Hh.forEach(t),jo=n(B,"."),B.forEach(t),Dn=f(a),ae=o(a,"H2",{class:!0});var Qn=l(ae);we=o(Qn,"A",{id:!0,class:!0,href:!0});var Uh=l(we);sr=o(Uh,"SPAN",{});var Gh=l(sr);u(rt.$$.fragment,Gh),Gh.forEach(t),Uh.forEach(t),No=f(Qn),or=o(Qn,"SPAN",{});var Jh=l(or);Wo=n(Jh,"Available Callbacks"),Jh.forEach(t),Qn.forEach(t),Ln=f(a),ye=o(a,"P",{});var Zn=l(ye);zo=n(Zn,"Here is the list of the available "),Aa=o(Zn,"A",{href:!0});var Yh=l(Aa);Bo=n(Yh,"TrainerCallback"),Yh.forEach(t),Vo=n(Zn," in the library:"),Zn.forEach(t),Sn=f(a),R=o(a,"DIV",{class:!0});var Ua=l(R);u(nt.$$.fragment,Ua),qo=f(Ua),re=o(Ua,"P",{});var Ga=l(re);Ro=n(Ga,"A "),Da=o(Ga,"A",{href:!0});var Xh=l(Da);Ho=n(Xh,"TrainerCallback"),Xh.forEach(t),Uo=n(Ga," that sends the logs to "),st=o(Ga,"A",{href:!0,rel:!0});var Kh=l(st);Go=n(Kh,"Comet ML"),Kh.forEach(t),Jo=n(Ga,"."),Ga.forEach(t),Yo=f(Ua),N=o(Ua,"DIV",{class:!0});var Je=l(N);u(ot.$$.fragment,Je),Xo=f(Je),lr=o(Je,"P",{});var Qh=l(lr);Ko=n(Qh,"Setup the optional Comet.ml integration."),Qh.forEach(t),Qo=f(Je),C=o(Je,"P",{});var y=l(C);Zo=n(y,`Environment: COMET_MODE (`),ir=o(y,"CODE",{});var Zh=l(ir);el=n(Zh,"str"),Zh.forEach(t),tl=n(y,", "),cr=o(y,"EM",{});var em=l(cr);al=n(em,"optional"),em.forEach(t),rl=n(y,`): Whether to create an online, offline experiment or disable Comet logging. Can be \u201COFFLINE\u201D, \u201CONLINE\u201D, or \u201CDISABLED\u201D. Defaults to \u201CONLINE\u201D. COMET_PROJECT_NAME (`),fr=o(y,"CODE",{});var tm=l(fr);nl=n(tm,"str"),tm.forEach(t),sl=n(y,", "),hr=o(y,"EM",{});var am=l(hr);ol=n(am,"optional"),am.forEach(t),ll=n(y,`): Comet project name for experiments COMET_OFFLINE_DIRECTORY (`),mr=o(y,"CODE",{});var rm=l(mr);il=n(rm,"str"),rm.forEach(t),cl=n(y,", "),dr=o(y,"EM",{});var nm=l(dr);fl=n(nm,"optional"),nm.forEach(t),hl=n(y,`): Folder to use for saving offline experiments when `),pr=o(y,"CODE",{});var sm=l(pr);ml=n(sm,"COMET_MODE"),sm.forEach(t),dl=n(y,` is \u201COFFLINE\u201D COMET_LOG_ASSETS (`),gr=o(y,"CODE",{});var om=l(gr);pl=n(om,"str"),om.forEach(t),gl=n(y,", "),vr=o(y,"EM",{});var lm=l(vr);vl=n(lm,"optional"),lm.forEach(t),ul=n(y,`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be \u201CTRUE\u201D, or \u201CFALSE\u201D. Defaults to \u201CTRUE\u201D.`),y.forEach(t),_l=f(Je),lt=o(Je,"P",{});var es=l(lt);bl=n(es,"For a number of configurable items in the environment, see "),it=o(es,"A",{href:!0,rel:!0});var im=l(it);El=n(im,"here"),im.forEach(t),kl=n(es,"."),es.forEach(t),Je.forEach(t),Ua.forEach(t),Pn=f(a),ne=o(a,"DIV",{class:!0});var ts=l(ne);u(ct.$$.fragment,ts),Tl=f(ts),ft=o(ts,"P",{});var as=l(ft);$l=n(as,"A "),La=o(as,"A",{href:!0});var cm=l(La);Cl=n(cm,"TrainerCallback"),cm.forEach(t),wl=n(as,` that handles the default flow of the training loop for logs, evaluation and checkpoints.`),as.forEach(t),ts.forEach(t),On=f(a),se=o(a,"DIV",{class:!0});var rs=l(se);u(ht.$$.fragment,rs),yl=f(rs),mt=o(rs,"P",{});var ns=l(mt);Al=n(ns,"A bare "),Sa=o(ns,"A",{href:!0});var fm=l(Sa);Dl=n(fm,"TrainerCallback"),fm.forEach(t),Ll=n(ns," that just prints the logs."),ns.forEach(t),rs.forEach(t),In=f(a),oe=o(a,"DIV",{class:!0});var ss=l(oe);u(dt.$$.fragment,ss),Sl=f(ss),pt=o(ss,"P",{});var os=l(pt);Pl=n(os,"A "),Pa=o(os,"A",{href:!0});var hm=l(Pa);Ol=n(hm,"TrainerCallback"),hm.forEach(t),Il=n(os," that displays the progress of training or evaluation."),os.forEach(t),ss.forEach(t),Mn=f(a),H=o(a,"DIV",{class:!0});var Ja=l(H);u(gt.$$.fragment,Ja),Ml=f(Ja),vt=o(Ja,"P",{});var ls=l(vt);xl=n(ls,"A "),Oa=o(ls,"A",{href:!0});var mm=l(Oa);Fl=n(mm,"TrainerCallback"),mm.forEach(t),jl=n(ls," that handles early stopping."),ls.forEach(t),Nl=f(Ja),U=o(Ja,"P",{});var Ye=l(U);Wl=n(Ye,"This callback depends on "),Ia=o(Ye,"A",{href:!0});var dm=l(Ia);zl=n(dm,"TrainingArguments"),dm.forEach(t),Bl=n(Ye," argument "),ur=o(Ye,"EM",{});var pm=l(ur);Vl=n(pm,"load_best_model_at_end"),pm.forEach(t),ql=n(Ye,` functionality to set best_metric in `),Ma=o(Ye,"A",{href:!0});var gm=l(Ma);Rl=n(gm,"TrainerState"),gm.forEach(t),Hl=n(Ye,"."),Ye.forEach(t),Ja.forEach(t),xn=f(a),le=o(a,"DIV",{class:!0});var is=l(le);u(ut.$$.fragment,is),Ul=f(is),ie=o(is,"P",{});var Ya=l(ie);Gl=n(Ya,"A "),xa=o(Ya,"A",{href:!0});var vm=l(xa);Jl=n(vm,"TrainerCallback"),vm.forEach(t),Yl=n(Ya," that sends the logs to "),_t=o(Ya,"A",{href:!0,rel:!0});var um=l(_t);Xl=n(um,"TensorBoard"),um.forEach(t),Kl=n(Ya,"."),Ya.forEach(t),is.forEach(t),Fn=f(a),G=o(a,"DIV",{class:!0});var Xa=l(G);u(bt.$$.fragment,Xa),Ql=f(Xa),ce=o(Xa,"P",{});var Ka=l(ce);Zl=n(Ka,"A "),Fa=o(Ka,"A",{href:!0});var _m=l(Fa);ei=n(_m,"TrainerCallback"),_m.forEach(t),ti=n(Ka," that sends the logs to "),Et=o(Ka,"A",{href:!0,rel:!0});var bm=l(Et);ai=n(bm,"Weight and Biases"),bm.forEach(t),ri=n(Ka,"."),Ka.forEach(t),ni=f(Xa),W=o(Xa,"DIV",{class:!0});var Xe=l(W);u(kt.$$.fragment,Xe),si=f(Xe),Tt=o(Xe,"P",{});var cs=l(Tt);oi=n(cs,"Setup the optional Weights & Biases ("),_r=o(cs,"EM",{});var Em=l(_r);li=n(Em,"wandb"),Em.forEach(t),ii=n(cs,") integration."),cs.forEach(t),ci=f(Xe),$t=o(Xe,"P",{});var fs=l($t);fi=n(fs,"One can subclass and override this method to customize the setup if needed. Find more information "),Ct=o(fs,"A",{href:!0,rel:!0});var km=l(Ct);hi=n(km,"here"),km.forEach(t),mi=n(fs,". You can also override the following environment variables:"),fs.forEach(t),di=f(Xe),d=o(Xe,"P",{});var g=l(d);pi=n(g,`Environment: WANDB_LOG_MODEL (`),br=o(g,"CODE",{});var Tm=l(br);gi=n(Tm,"bool"),Tm.forEach(t),vi=n(g,", "),Er=o(g,"EM",{});var $m=l(Er);ui=n($m,"optional"),$m.forEach(t),_i=n(g,", defaults to "),kr=o(g,"CODE",{});var Cm=l(kr);bi=n(Cm,"False"),Cm.forEach(t),Ei=n(g,`): Whether or not to log model as artifact at the end of training. Use along with `),Tr=o(g,"EM",{});var wm=l(Tr);ki=n(wm,"TrainingArguments.load_best_model_at_end"),wm.forEach(t),Ti=n(g,` to upload best model. WANDB_WATCH (`),$r=o(g,"CODE",{});var ym=l($r);$i=n(ym,"str"),ym.forEach(t),Ci=n(g,", "),Cr=o(g,"EM",{});var Am=l(Cr);wi=n(Am,"optional"),Am.forEach(t),yi=n(g," defaults to "),wr=o(g,"CODE",{});var Dm=l(wr);Ai=n(Dm,'"gradients"'),Dm.forEach(t),Di=n(g,`): Can be `),yr=o(g,"CODE",{});var Lm=l(yr);Li=n(Lm,'"gradients"'),Lm.forEach(t),Si=n(g,", "),Ar=o(g,"CODE",{});var Sm=l(Ar);Pi=n(Sm,'"all"'),Sm.forEach(t),Oi=n(g," or "),Dr=o(g,"CODE",{});var Pm=l(Dr);Ii=n(Pm,'"false"'),Pm.forEach(t),Mi=n(g,". Set to "),Lr=o(g,"CODE",{});var Om=l(Lr);xi=n(Om,'"false"'),Om.forEach(t),Fi=n(g,` to disable gradient logging or `),Sr=o(g,"CODE",{});var Im=l(Sr);ji=n(Im,'"all"'),Im.forEach(t),Ni=n(g,` to log gradients and parameters. WANDB_PROJECT (`),Pr=o(g,"CODE",{});var Mm=l(Pr);Wi=n(Mm,"str"),Mm.forEach(t),zi=n(g,", "),Or=o(g,"EM",{});var xm=l(Or);Bi=n(xm,"optional"),xm.forEach(t),Vi=n(g,", defaults to "),Ir=o(g,"CODE",{});var Fm=l(Ir);qi=n(Fm,'"huggingface"'),Fm.forEach(t),Ri=n(g,`): Set this to a custom string to store results in a different project. WANDB_DISABLED (`),Mr=o(g,"CODE",{});var jm=l(Mr);Hi=n(jm,"bool"),jm.forEach(t),Ui=n(g,", "),xr=o(g,"EM",{});var Nm=l(xr);Gi=n(Nm,"optional"),Nm.forEach(t),Ji=n(g,", defaults to "),Fr=o(g,"CODE",{});var Wm=l(Fr);Yi=n(Wm,"False"),Wm.forEach(t),Xi=n(g,`): Whether or not to disable wandb entirely. Set `),jr=o(g,"EM",{});var zm=l(jr);Ki=n(zm,"WANDB_DISABLED=true"),zm.forEach(t),Qi=n(g," to disable."),g.forEach(t),Xe.forEach(t),Xa.forEach(t),jn=f(a),J=o(a,"DIV",{class:!0});var Qa=l(J);u(wt.$$.fragment,Qa),Zi=f(Qa),fe=o(Qa,"P",{});var Za=l(fe);ec=n(Za,"A "),ja=o(Za,"A",{href:!0});var Bm=l(ja);tc=n(Bm,"TrainerCallback"),Bm.forEach(t),ac=n(Za," that sends the logs to "),yt=o(Za,"A",{href:!0,rel:!0});var Vm=l(yt);rc=n(Vm,"MLflow"),Vm.forEach(t),nc=n(Za,"."),Za.forEach(t),sc=f(Qa),z=o(Qa,"DIV",{class:!0});var Ke=l(z);u(At.$$.fragment,Ke),oc=f(Ke),Nr=o(Ke,"P",{});var qm=l(Nr);lc=n(qm,"Setup the optional MLflow integration."),qm.forEach(t),ic=f(Ke),he=o(Ke,"P",{});var er=l(he);cc=n(er,`Environment: HF_MLFLOW_LOG_ARTIFACTS (`),Wr=o(er,"CODE",{});var Rm=l(Wr);fc=n(Rm,"str"),Rm.forEach(t),hc=n(er,", "),zr=o(er,"EM",{});var Hm=l(zr);mc=n(Hm,"optional"),Hm.forEach(t),dc=n(er,`): Whether to use MLflow .log_artifact() facility to log artifacts.`),er.forEach(t),pc=f(Ke),F=o(Ke,"P",{});var K=l(F);gc=n(K,"This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to "),Br=o(K,"EM",{});var Um=l(Br);vc=n(Um,"True"),Um.forEach(t),uc=n(K," or "),Vr=o(K,"EM",{});var Gm=l(Vr);_c=n(Gm,"1"),Gm.forEach(t),bc=n(K,`, will copy whatever is in `),Na=o(K,"A",{href:!0});var Jm=l(Na);Ec=n(Jm,"TrainingArguments"),Jm.forEach(t),kc=n(K,"\u2019s "),qr=o(K,"CODE",{});var Ym=l(qr);Tc=n(Ym,"output_dir"),Ym.forEach(t),$c=n(K,` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location.`),K.forEach(t),Ke.forEach(t),Qa.forEach(t),Nn=f(a),me=o(a,"DIV",{class:!0});var hs=l(me);u(Dt.$$.fragment,hs),Cc=f(hs),de=o(hs,"P",{});var tr=l(de);wc=n(tr,"A "),Wa=o(tr,"A",{href:!0});var Xm=l(Wa);yc=n(Xm,"TrainerCallback"),Xm.forEach(t),Ac=n(tr," that sends the logs to "),Lt=o(tr,"A",{href:!0,rel:!0});var Km=l(Lt);Dc=n(Km,"AzureML"),Km.forEach(t),Lc=n(tr,"."),tr.forEach(t),hs.forEach(t),Wn=f(a),pe=o(a,"H2",{class:!0});var ms=l(pe);Ae=o(ms,"A",{id:!0,class:!0,href:!0});var Qm=l(Ae);Rr=o(Qm,"SPAN",{});var Zm=l(Rr);u(St.$$.fragment,Zm),Zm.forEach(t),Qm.forEach(t),Sc=f(ms),Hr=o(ms,"SPAN",{});var ed=l(Hr);Pc=n(ed,"TrainerCallback"),ed.forEach(t),ms.forEach(t),zn=f(a),p=o(a,"DIV",{class:!0});var T=l(p);u(Pt.$$.fragment,T),Oc=f(T),Ur=o(T,"P",{});var td=l(Ur);Ic=n(td,`A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:`),td.forEach(t),Mc=f(T),Ot=o(T,"P",{});var ds=l(Ot);xc=n(ds,"The "),Gr=o(ds,"CODE",{});var ad=l(Gr);Fc=n(ad,"control"),ad.forEach(t),jc=n(ds,` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.`),ds.forEach(t),Nc=f(T),P=o(T,"P",{});var V=l(P);Wc=n(V,"The argument "),Jr=o(V,"CODE",{});var rd=l(Jr);zc=n(rd,"args"),rd.forEach(t),Bc=n(V,", "),Yr=o(V,"CODE",{});var nd=l(Yr);Vc=n(nd,"state"),nd.forEach(t),qc=n(V," and "),Xr=o(V,"CODE",{});var sd=l(Xr);Rc=n(sd,"control"),sd.forEach(t),Hc=n(V,` are positionals for all events, all the others are grouped in `),Kr=o(V,"CODE",{});var od=l(Kr);Uc=n(od,"kwargs"),od.forEach(t),Gc=n(V,`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple `),Qr=o(V,"CODE",{});var ld=l(Qr);Jc=n(ld,"PrinterCallback"),ld.forEach(t),Yc=n(V,"."),V.forEach(t),Xc=f(T),Zr=o(T,"P",{});var id=l(Zr);Kc=n(id,"Example:"),id.forEach(t),Qc=f(T),u(It.$$.fragment,T),Zc=f(T),De=o(T,"DIV",{class:!0});var ps=l(De);u(Mt.$$.fragment,ps),ef=f(ps),en=o(ps,"P",{});var cd=l(en);tf=n(cd,"Event called at the beginning of an epoch."),cd.forEach(t),ps.forEach(t),af=f(T),Le=o(T,"DIV",{class:!0});var gs=l(Le);u(xt.$$.fragment,gs),rf=f(gs),tn=o(gs,"P",{});var fd=l(tn);nf=n(fd,"Event called at the end of an epoch."),fd.forEach(t),gs.forEach(t),sf=f(T),Se=o(T,"DIV",{class:!0});var vs=l(Se);u(Ft.$$.fragment,vs),of=f(vs),an=o(vs,"P",{});var hd=l(an);lf=n(hd,"Event called after an evaluation phase."),hd.forEach(t),vs.forEach(t),cf=f(T),Pe=o(T,"DIV",{class:!0});var us=l(Pe);u(jt.$$.fragment,us),ff=f(us),Nt=o(us,"P",{});var _s=l(Nt);hf=n(_s,"Event called at the end of the initialization of the "),za=o(_s,"A",{href:!0});var md=l(za);mf=n(md,"Trainer"),md.forEach(t),df=n(_s,"."),_s.forEach(t),us.forEach(t),pf=f(T),Oe=o(T,"DIV",{class:!0});var bs=l(Oe);u(Wt.$$.fragment,bs),gf=f(bs),rn=o(bs,"P",{});var dd=l(rn);vf=n(dd,"Event called after logging the last logs."),dd.forEach(t),bs.forEach(t),uf=f(T),Ie=o(T,"DIV",{class:!0});var Es=l(Ie);u(zt.$$.fragment,Es),_f=f(Es),nn=o(Es,"P",{});var pd=l(nn);bf=n(pd,"Event called after a prediction step."),pd.forEach(t),Es.forEach(t),Ef=f(T),Me=o(T,"DIV",{class:!0});var ks=l(Me);u(Bt.$$.fragment,ks),kf=f(ks),sn=o(ks,"P",{});var gd=l(sn);Tf=n(gd,"Event called after a checkpoint save."),gd.forEach(t),ks.forEach(t),$f=f(T),xe=o(T,"DIV",{class:!0});var Ts=l(xe);u(Vt.$$.fragment,Ts),Cf=f(Ts),on=o(Ts,"P",{});var vd=l(on);wf=n(vd,`Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.`),vd.forEach(t),Ts.forEach(t),yf=f(T),Fe=o(T,"DIV",{class:!0});var $s=l(Fe);u(qt.$$.fragment,$s),Af=f($s),ln=o($s,"P",{});var ud=l(ln);Df=n(ud,`Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.`),ud.forEach(t),$s.forEach(t),Lf=f(T),je=o(T,"DIV",{class:!0});var Cs=l(je);u(Rt.$$.fragment,Cs),Sf=f(Cs),cn=o(Cs,"P",{});var _d=l(cn);Pf=n(_d,"Event called at the end of an substep during gradient accumulation."),_d.forEach(t),Cs.forEach(t),Of=f(T),Ne=o(T,"DIV",{class:!0});var ws=l(Ne);u(Ht.$$.fragment,ws),If=f(ws),fn=o(ws,"P",{});var bd=l(fn);Mf=n(bd,"Event called at the beginning of training."),bd.forEach(t),ws.forEach(t),xf=f(T),We=o(T,"DIV",{class:!0});var ys=l(We);u(Ut.$$.fragment,ys),Ff=f(ys),hn=o(ys,"P",{});var Ed=l(hn);jf=n(Ed,"Event called at the end of training."),Ed.forEach(t),ys.forEach(t),T.forEach(t),Bn=f(a),ze=o(a,"P",{});var As=l(ze);Nf=n(As,"Here is an example of how to register a custom callback with the PyTorch "),Ba=o(As,"A",{href:!0});var kd=l(Ba);Wf=n(kd,"Trainer"),kd.forEach(t),zf=n(As,":"),As.forEach(t),Vn=f(a),u(Gt.$$.fragment,a),qn=f(a),Be=o(a,"P",{});var Ds=l(Be);Bf=n(Ds,"Another way to register a callback is to call "),mn=o(Ds,"CODE",{});var Td=l(mn);Vf=n(Td,"trainer.add_callback()"),Td.forEach(t),qf=n(Ds," as follows:"),Ds.forEach(t),Rn=f(a),u(Jt.$$.fragment,a),Hn=f(a),ge=o(a,"H2",{class:!0});var Ls=l(ge);Ve=o(Ls,"A",{id:!0,class:!0,href:!0});var $d=l(Ve);dn=o($d,"SPAN",{});var Cd=l(dn);u(Yt.$$.fragment,Cd),Cd.forEach(t),$d.forEach(t),Rf=f(Ls),pn=o(Ls,"SPAN",{});var wd=l(pn);Hf=n(wd,"TrainerState"),wd.forEach(t),Ls.forEach(t),Un=f(a),O=o(a,"DIV",{class:!0});var Q=l(O);u(Xt.$$.fragment,Q),Uf=f(Q),ve=o(Q,"P",{});var ar=l(ve);Gf=n(ar,"A class containing the "),Va=o(ar,"A",{href:!0});var yd=l(Va);Jf=n(yd,"Trainer"),yd.forEach(t),Yf=n(ar,` inner state that will be saved along the model and optimizer when checkpointing and passed to the `),qa=o(ar,"A",{href:!0});var Ad=l(qa);Xf=n(Ad,"TrainerCallback"),Ad.forEach(t),Kf=n(ar,"."),ar.forEach(t),Qf=f(Q),u(qe.$$.fragment,Q),Zf=f(Q),Re=o(Q,"DIV",{class:!0});var Ss=l(Re);u(Kt.$$.fragment,Ss),eh=f(Ss),Qt=o(Ss,"P",{});var Ps=l(Qt);th=n(Ps,"Create an instance from the content of "),gn=o(Ps,"CODE",{});var Dd=l(gn);ah=n(Dd,"json_path"),Dd.forEach(t),rh=n(Ps,"."),Ps.forEach(t),Ss.forEach(t),nh=f(Q),He=o(Q,"DIV",{class:!0});var Os=l(He);u(Zt.$$.fragment,Os),sh=f(Os),ea=o(Os,"P",{});var Is=l(ea);oh=n(Is,"Save the content of this instance in JSON format inside "),vn=o(Is,"CODE",{});var Ld=l(vn);lh=n(Ld,"json_path"),Ld.forEach(t),ih=n(Is,"."),Is.forEach(t),Os.forEach(t),Q.forEach(t),Gn=f(a),ue=o(a,"H2",{class:!0});var Ms=l(ue);Ue=o(Ms,"A",{id:!0,class:!0,href:!0});var Sd=l(Ue);un=o(Sd,"SPAN",{});var Pd=l(un);u(ta.$$.fragment,Pd),Pd.forEach(t),Sd.forEach(t),ch=f(Ms),_n=o(Ms,"SPAN",{});var Od=l(_n);fh=n(Od,"TrainerControl"),Od.forEach(t),Ms.forEach(t),Jn=f(a),_e=o(a,"DIV",{class:!0});var xs=l(_e);u(aa.$$.fragment,xs),hh=f(xs),be=o(xs,"P",{});var rr=l(be);mh=n(rr,"A class that handles the "),Ra=o(rr,"A",{href:!0});var Id=l(Ra);dh=n(Id,"Trainer"),Id.forEach(t),ph=n(rr,` control flow. This class is used by the `),Ha=o(rr,"A",{href:!0});var Md=l(Ha);gh=n(Md,"TrainerCallback"),Md.forEach(t),vh=n(rr," to activate some switches in the training loop."),rr.forEach(t),xs.forEach(t),this.h()},h(){i(w,"name","hf:doc:metadata"),i(w,"content",JSON.stringify(Vd)),i(S,"id","callbacks"),i(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(S,"href","#callbacks"),i(A,"class","relative group"),i(oa,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(la,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerControl"),i(ia,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(ca,"href","/docs/transformers/v4.15.0/en/trainer"),i(fa,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(ma,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.DefaultFlowCallback"),i(da,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.PrinterCallback"),i(pa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.ProgressCallback"),i(ga,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.TrainingArguments"),i(ua,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.integrations.TensorBoardCallback"),i(_a,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.integrations.WandbCallback"),i(Ze,"href","https://www.wandb.com/"),i(Ze,"rel","nofollow"),i(ba,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.integrations.CometCallback"),i(et,"href","https://www.comet.ml/site/"),i(et,"rel","nofollow"),i(Ea,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.integrations.MLflowCallback"),i(tt,"href","https://www.mlflow.org/"),i(tt,"rel","nofollow"),i(ka,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.integrations.AzureMLCallback"),i(at,"href","https://pypi.org/project/azureml-sdk/"),i(at,"rel","nofollow"),i(Ta,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i($a,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.TrainingArguments"),i(Ca,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(wa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerState"),i(ya,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerControl"),i(we,"id","transformers.integrations.CometCallback"),i(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(we,"href","#transformers.integrations.CometCallback"),i(ae,"class","relative group"),i(Aa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(Da,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(st,"href","https://www.comet.ml/site/"),i(st,"rel","nofollow"),i(it,"href","https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables"),i(it,"rel","nofollow"),i(N,"class","docstring"),i(R,"class","docstring"),i(La,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(ne,"class","docstring"),i(Sa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(se,"class","docstring"),i(Pa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(oe,"class","docstring"),i(Oa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(Ia,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.TrainingArguments"),i(Ma,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerState"),i(H,"class","docstring"),i(xa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(_t,"href","https://www.tensorflow.org/tensorboard"),i(_t,"rel","nofollow"),i(le,"class","docstring"),i(Fa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(Et,"href","https://www.wandb.com/"),i(Et,"rel","nofollow"),i(Ct,"href","https://docs.wandb.ai/integrations/huggingface"),i(Ct,"rel","nofollow"),i(W,"class","docstring"),i(G,"class","docstring"),i(ja,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(yt,"href","https://www.mlflow.org/"),i(yt,"rel","nofollow"),i(Na,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.TrainingArguments"),i(z,"class","docstring"),i(J,"class","docstring"),i(Wa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(Lt,"href","https://pypi.org/project/azureml-sdk/"),i(Lt,"rel","nofollow"),i(me,"class","docstring"),i(Ae,"id","transformers.TrainerCallback"),i(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ae,"href","#transformers.TrainerCallback"),i(pe,"class","relative group"),i(De,"class","docstring"),i(Le,"class","docstring"),i(Se,"class","docstring"),i(za,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(Pe,"class","docstring"),i(Oe,"class","docstring"),i(Ie,"class","docstring"),i(Me,"class","docstring"),i(xe,"class","docstring"),i(Fe,"class","docstring"),i(je,"class","docstring"),i(Ne,"class","docstring"),i(We,"class","docstring"),i(p,"class","docstring"),i(Ba,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(Ve,"id","transformers.TrainerState"),i(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ve,"href","#transformers.TrainerState"),i(ge,"class","relative group"),i(Va,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(qa,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(Re,"class","docstring"),i(He,"class","docstring"),i(O,"class","docstring"),i(Ue,"id","transformers.TrainerControl"),i(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ue,"href","#transformers.TrainerControl"),i(ue,"class","relative group"),i(Ra,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),i(Ha,"href","/docs/transformers/v4.15.0/en/main_classes/callback#transformers.TrainerCallback"),i(_e,"class","docstring")},m(a,h){e(document.head,w),m(a,Y,h),m(a,A,h),e(A,S),e(S,Z),_(M,Z,null),e(A,Qe),e(A,ee),e(ee,te),m(a,x,h),m(a,q,h),e(q,sa),e(q,oa),e(oa,Fs),e(q,js),m(a,Cn,h),m(a,j,h),e(j,Ns),e(j,la),e(la,Ws),e(j,zs),e(j,ia),e(ia,Bs),e(j,Vs),e(j,ca),e(ca,qs),e(j,Rs),m(a,wn,h),m(a,Ee,h),e(Ee,Hs),e(Ee,fa),e(fa,Us),e(Ee,Gs),m(a,yn,h),m(a,D,h),e(D,ha),e(ha,ma),e(ma,Js),e(ha,Ys),e(D,Xs),e(D,X),e(X,da),e(da,Ks),e(X,Qs),e(X,pa),e(pa,Zs),e(X,eo),e(X,ga),e(ga,to),e(X,ao),e(D,ro),e(D,va),e(va,ua),e(ua,no),e(va,so),e(D,oo),e(D,ke),e(ke,_a),e(_a,lo),e(ke,io),e(ke,Ze),e(Ze,co),e(ke,fo),e(D,ho),e(D,Te),e(Te,ba),e(ba,mo),e(Te,po),e(Te,et),e(et,go),e(Te,vo),e(D,uo),e(D,$e),e($e,Ea),e(Ea,_o),e($e,bo),e($e,tt),e(tt,Eo),e($e,ko),e(D,To),e(D,Ce),e(Ce,ka),e(ka,$o),e(Ce,Co),e(Ce,at),e(at,wo),e(Ce,yo),m(a,An,h),m(a,L,h),e(L,Ao),e(L,Ta),e(Ta,Do),e(L,Lo),e(L,$a),e($a,So),e(L,Po),e(L,Ca),e(Ca,Oo),e(L,Io),e(L,wa),e(wa,Mo),e(L,xo),e(L,ya),e(ya,Fo),e(L,jo),m(a,Dn,h),m(a,ae,h),e(ae,we),e(we,sr),_(rt,sr,null),e(ae,No),e(ae,or),e(or,Wo),m(a,Ln,h),m(a,ye,h),e(ye,zo),e(ye,Aa),e(Aa,Bo),e(ye,Vo),m(a,Sn,h),m(a,R,h),_(nt,R,null),e(R,qo),e(R,re),e(re,Ro),e(re,Da),e(Da,Ho),e(re,Uo),e(re,st),e(st,Go),e(re,Jo),e(R,Yo),e(R,N),_(ot,N,null),e(N,Xo),e(N,lr),e(lr,Ko),e(N,Qo),e(N,C),e(C,Zo),e(C,ir),e(ir,el),e(C,tl),e(C,cr),e(cr,al),e(C,rl),e(C,fr),e(fr,nl),e(C,sl),e(C,hr),e(hr,ol),e(C,ll),e(C,mr),e(mr,il),e(C,cl),e(C,dr),e(dr,fl),e(C,hl),e(C,pr),e(pr,ml),e(C,dl),e(C,gr),e(gr,pl),e(C,gl),e(C,vr),e(vr,vl),e(C,ul),e(N,_l),e(N,lt),e(lt,bl),e(lt,it),e(it,El),e(lt,kl),m(a,Pn,h),m(a,ne,h),_(ct,ne,null),e(ne,Tl),e(ne,ft),e(ft,$l),e(ft,La),e(La,Cl),e(ft,wl),m(a,On,h),m(a,se,h),_(ht,se,null),e(se,yl),e(se,mt),e(mt,Al),e(mt,Sa),e(Sa,Dl),e(mt,Ll),m(a,In,h),m(a,oe,h),_(dt,oe,null),e(oe,Sl),e(oe,pt),e(pt,Pl),e(pt,Pa),e(Pa,Ol),e(pt,Il),m(a,Mn,h),m(a,H,h),_(gt,H,null),e(H,Ml),e(H,vt),e(vt,xl),e(vt,Oa),e(Oa,Fl),e(vt,jl),e(H,Nl),e(H,U),e(U,Wl),e(U,Ia),e(Ia,zl),e(U,Bl),e(U,ur),e(ur,Vl),e(U,ql),e(U,Ma),e(Ma,Rl),e(U,Hl),m(a,xn,h),m(a,le,h),_(ut,le,null),e(le,Ul),e(le,ie),e(ie,Gl),e(ie,xa),e(xa,Jl),e(ie,Yl),e(ie,_t),e(_t,Xl),e(ie,Kl),m(a,Fn,h),m(a,G,h),_(bt,G,null),e(G,Ql),e(G,ce),e(ce,Zl),e(ce,Fa),e(Fa,ei),e(ce,ti),e(ce,Et),e(Et,ai),e(ce,ri),e(G,ni),e(G,W),_(kt,W,null),e(W,si),e(W,Tt),e(Tt,oi),e(Tt,_r),e(_r,li),e(Tt,ii),e(W,ci),e(W,$t),e($t,fi),e($t,Ct),e(Ct,hi),e($t,mi),e(W,di),e(W,d),e(d,pi),e(d,br),e(br,gi),e(d,vi),e(d,Er),e(Er,ui),e(d,_i),e(d,kr),e(kr,bi),e(d,Ei),e(d,Tr),e(Tr,ki),e(d,Ti),e(d,$r),e($r,$i),e(d,Ci),e(d,Cr),e(Cr,wi),e(d,yi),e(d,wr),e(wr,Ai),e(d,Di),e(d,yr),e(yr,Li),e(d,Si),e(d,Ar),e(Ar,Pi),e(d,Oi),e(d,Dr),e(Dr,Ii),e(d,Mi),e(d,Lr),e(Lr,xi),e(d,Fi),e(d,Sr),e(Sr,ji),e(d,Ni),e(d,Pr),e(Pr,Wi),e(d,zi),e(d,Or),e(Or,Bi),e(d,Vi),e(d,Ir),e(Ir,qi),e(d,Ri),e(d,Mr),e(Mr,Hi),e(d,Ui),e(d,xr),e(xr,Gi),e(d,Ji),e(d,Fr),e(Fr,Yi),e(d,Xi),e(d,jr),e(jr,Ki),e(d,Qi),m(a,jn,h),m(a,J,h),_(wt,J,null),e(J,Zi),e(J,fe),e(fe,ec),e(fe,ja),e(ja,tc),e(fe,ac),e(fe,yt),e(yt,rc),e(fe,nc),e(J,sc),e(J,z),_(At,z,null),e(z,oc),e(z,Nr),e(Nr,lc),e(z,ic),e(z,he),e(he,cc),e(he,Wr),e(Wr,fc),e(he,hc),e(he,zr),e(zr,mc),e(he,dc),e(z,pc),e(z,F),e(F,gc),e(F,Br),e(Br,vc),e(F,uc),e(F,Vr),e(Vr,_c),e(F,bc),e(F,Na),e(Na,Ec),e(F,kc),e(F,qr),e(qr,Tc),e(F,$c),m(a,Nn,h),m(a,me,h),_(Dt,me,null),e(me,Cc),e(me,de),e(de,wc),e(de,Wa),e(Wa,yc),e(de,Ac),e(de,Lt),e(Lt,Dc),e(de,Lc),m(a,Wn,h),m(a,pe,h),e(pe,Ae),e(Ae,Rr),_(St,Rr,null),e(pe,Sc),e(pe,Hr),e(Hr,Pc),m(a,zn,h),m(a,p,h),_(Pt,p,null),e(p,Oc),e(p,Ur),e(Ur,Ic),e(p,Mc),e(p,Ot),e(Ot,xc),e(Ot,Gr),e(Gr,Fc),e(Ot,jc),e(p,Nc),e(p,P),e(P,Wc),e(P,Jr),e(Jr,zc),e(P,Bc),e(P,Yr),e(Yr,Vc),e(P,qc),e(P,Xr),e(Xr,Rc),e(P,Hc),e(P,Kr),e(Kr,Uc),e(P,Gc),e(P,Qr),e(Qr,Jc),e(P,Yc),e(p,Xc),e(p,Zr),e(Zr,Kc),e(p,Qc),_(It,p,null),e(p,Zc),e(p,De),_(Mt,De,null),e(De,ef),e(De,en),e(en,tf),e(p,af),e(p,Le),_(xt,Le,null),e(Le,rf),e(Le,tn),e(tn,nf),e(p,sf),e(p,Se),_(Ft,Se,null),e(Se,of),e(Se,an),e(an,lf),e(p,cf),e(p,Pe),_(jt,Pe,null),e(Pe,ff),e(Pe,Nt),e(Nt,hf),e(Nt,za),e(za,mf),e(Nt,df),e(p,pf),e(p,Oe),_(Wt,Oe,null),e(Oe,gf),e(Oe,rn),e(rn,vf),e(p,uf),e(p,Ie),_(zt,Ie,null),e(Ie,_f),e(Ie,nn),e(nn,bf),e(p,Ef),e(p,Me),_(Bt,Me,null),e(Me,kf),e(Me,sn),e(sn,Tf),e(p,$f),e(p,xe),_(Vt,xe,null),e(xe,Cf),e(xe,on),e(on,wf),e(p,yf),e(p,Fe),_(qt,Fe,null),e(Fe,Af),e(Fe,ln),e(ln,Df),e(p,Lf),e(p,je),_(Rt,je,null),e(je,Sf),e(je,cn),e(cn,Pf),e(p,Of),e(p,Ne),_(Ht,Ne,null),e(Ne,If),e(Ne,fn),e(fn,Mf),e(p,xf),e(p,We),_(Ut,We,null),e(We,Ff),e(We,hn),e(hn,jf),m(a,Bn,h),m(a,ze,h),e(ze,Nf),e(ze,Ba),e(Ba,Wf),e(ze,zf),m(a,Vn,h),_(Gt,a,h),m(a,qn,h),m(a,Be,h),e(Be,Bf),e(Be,mn),e(mn,Vf),e(Be,qf),m(a,Rn,h),_(Jt,a,h),m(a,Hn,h),m(a,ge,h),e(ge,Ve),e(Ve,dn),_(Yt,dn,null),e(ge,Rf),e(ge,pn),e(pn,Hf),m(a,Un,h),m(a,O,h),_(Xt,O,null),e(O,Uf),e(O,ve),e(ve,Gf),e(ve,Va),e(Va,Jf),e(ve,Yf),e(ve,qa),e(qa,Xf),e(ve,Kf),e(O,Qf),_(qe,O,null),e(O,Zf),e(O,Re),_(Kt,Re,null),e(Re,eh),e(Re,Qt),e(Qt,th),e(Qt,gn),e(gn,ah),e(Qt,rh),e(O,nh),e(O,He),_(Zt,He,null),e(He,sh),e(He,ea),e(ea,oh),e(ea,vn),e(vn,lh),e(ea,ih),m(a,Gn,h),m(a,ue,h),e(ue,Ue),e(Ue,un),_(ta,un,null),e(ue,ch),e(ue,_n),e(_n,fh),m(a,Jn,h),m(a,_e,h),_(aa,_e,null),e(_e,hh),e(_e,be),e(be,mh),e(be,Ra),e(Ra,dh),e(be,ph),e(be,Ha),e(Ha,gh),e(be,vh),Yn=!0},p(a,[h]){const ra={};h&2&&(ra.$$scope={dirty:h,ctx:a}),qe.$set(ra)},i(a){Yn||(b(M.$$.fragment,a),b(rt.$$.fragment,a),b(nt.$$.fragment,a),b(ot.$$.fragment,a),b(ct.$$.fragment,a),b(ht.$$.fragment,a),b(dt.$$.fragment,a),b(gt.$$.fragment,a),b(ut.$$.fragment,a),b(bt.$$.fragment,a),b(kt.$$.fragment,a),b(wt.$$.fragment,a),b(At.$$.fragment,a),b(Dt.$$.fragment,a),b(St.$$.fragment,a),b(Pt.$$.fragment,a),b(It.$$.fragment,a),b(Mt.$$.fragment,a),b(xt.$$.fragment,a),b(Ft.$$.fragment,a),b(jt.$$.fragment,a),b(Wt.$$.fragment,a),b(zt.$$.fragment,a),b(Bt.$$.fragment,a),b(Vt.$$.fragment,a),b(qt.$$.fragment,a),b(Rt.$$.fragment,a),b(Ht.$$.fragment,a),b(Ut.$$.fragment,a),b(Gt.$$.fragment,a),b(Jt.$$.fragment,a),b(Yt.$$.fragment,a),b(Xt.$$.fragment,a),b(qe.$$.fragment,a),b(Kt.$$.fragment,a),b(Zt.$$.fragment,a),b(ta.$$.fragment,a),b(aa.$$.fragment,a),Yn=!0)},o(a){E(M.$$.fragment,a),E(rt.$$.fragment,a),E(nt.$$.fragment,a),E(ot.$$.fragment,a),E(ct.$$.fragment,a),E(ht.$$.fragment,a),E(dt.$$.fragment,a),E(gt.$$.fragment,a),E(ut.$$.fragment,a),E(bt.$$.fragment,a),E(kt.$$.fragment,a),E(wt.$$.fragment,a),E(At.$$.fragment,a),E(Dt.$$.fragment,a),E(St.$$.fragment,a),E(Pt.$$.fragment,a),E(It.$$.fragment,a),E(Mt.$$.fragment,a),E(xt.$$.fragment,a),E(Ft.$$.fragment,a),E(jt.$$.fragment,a),E(Wt.$$.fragment,a),E(zt.$$.fragment,a),E(Bt.$$.fragment,a),E(Vt.$$.fragment,a),E(qt.$$.fragment,a),E(Rt.$$.fragment,a),E(Ht.$$.fragment,a),E(Ut.$$.fragment,a),E(Gt.$$.fragment,a),E(Jt.$$.fragment,a),E(Yt.$$.fragment,a),E(Xt.$$.fragment,a),E(qe.$$.fragment,a),E(Kt.$$.fragment,a),E(Zt.$$.fragment,a),E(ta.$$.fragment,a),E(aa.$$.fragment,a),Yn=!1},d(a){t(w),a&&t(Y),a&&t(A),k(M),a&&t(x),a&&t(q),a&&t(Cn),a&&t(j),a&&t(wn),a&&t(Ee),a&&t(yn),a&&t(D),a&&t(An),a&&t(L),a&&t(Dn),a&&t(ae),k(rt),a&&t(Ln),a&&t(ye),a&&t(Sn),a&&t(R),k(nt),k(ot),a&&t(Pn),a&&t(ne),k(ct),a&&t(On),a&&t(se),k(ht),a&&t(In),a&&t(oe),k(dt),a&&t(Mn),a&&t(H),k(gt),a&&t(xn),a&&t(le),k(ut),a&&t(Fn),a&&t(G),k(bt),k(kt),a&&t(jn),a&&t(J),k(wt),k(At),a&&t(Nn),a&&t(me),k(Dt),a&&t(Wn),a&&t(pe),k(St),a&&t(zn),a&&t(p),k(Pt),k(It),k(Mt),k(xt),k(Ft),k(jt),k(Wt),k(zt),k(Bt),k(Vt),k(qt),k(Rt),k(Ht),k(Ut),a&&t(Bn),a&&t(ze),a&&t(Vn),k(Gt,a),a&&t(qn),a&&t(Be),a&&t(Rn),k(Jt,a),a&&t(Hn),a&&t(ge),k(Yt),a&&t(Un),a&&t(O),k(Xt),k(qe),k(Kt),k(Zt),a&&t(Gn),a&&t(ue),k(ta),a&&t(Jn),a&&t(_e),k(aa)}}}const Vd={local:"callbacks",sections:[{local:"transformers.integrations.CometCallback",title:"Available Callbacks"},{local:"transformers.TrainerCallback",title:"TrainerCallback"},{local:"transformers.TrainerState",title:"TrainerState"},{local:"transformers.TrainerControl",title:"TrainerControl"}],title:"Callbacks"};function qd(nr,w,Y){let{fw:A}=w;return nr.$$set=S=>{"fw"in S&&Y(0,A=S.fw)},[A]}class Xd extends xd{constructor(w){super();Fd(this,w,qd,Bd,jd,{fw:0})}}export{Xd as default,Vd as metadata};
9,994
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/main_classes/configuration.mdx-ad31653a.js
import{S as bs,i as vs,s as ys,e as r,k as d,w as g,t as o,L as ws,c as a,d as t,m as c,a as s,x as p,h as n,b as h,J as e,g as A,y as _,q as b,o as v,B as y}from"../../chunks/vendor-b1433968.js";import{T as gs}from"../../chunks/Tip-c3840994.js";import{D}from"../../chunks/Docstring-ff504c58.js";import{C as ps}from"../../chunks/CodeBlock-a320dbd7.js";import{I as _s}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function $s(be){let m,C,u,w,j;return{c(){m=r("p"),C=o(`A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does `),u=r("strong"),w=o("not"),j=o(" load the model weights. It only affects the model\u2019s configuration.")},l($){m=a($,"P",{});var P=s(m);C=n(P,`A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does `),u=a(P,"STRONG",{});var S=s(u);w=n(S,"not"),S.forEach(t),j=n(P," load the model weights. It only affects the model\u2019s configuration."),P.forEach(t)},m($,P){A($,m,P),e(m,C),e(m,u),e(u,w),e(m,j)},d($){$&&t(m)}}}function ks(be){let m,C,u,w,j;return{c(){m=r("p"),C=o("Passing "),u=r("code"),w=o("use_auth_token=True"),j=o(" is required when you want to use a private model.")},l($){m=a($,"P",{});var P=s(m);C=n(P,"Passing "),u=a(P,"CODE",{});var S=s(u);w=n(S,"use_auth_token=True"),S.forEach(t),j=n(P," is required when you want to use a private model."),P.forEach(t)},m($,P){A($,m,P),e(m,C),e(m,u),e(u,w),e(m,j)},d($){$&&t(m)}}}function Ps(be){let m,C,u,w,j,$,P,S,$o,Yt,U,ko,Be,Po,Co,Zt,E,Eo,ot,xo,To,nt,Do,jo,rt,zo,Oo,at,Ao,So,eo,V,R,st,ve,Fo,it,Lo,to,l,ye,Io,dt,No,qo,G,Mo,ct,Wo,Ho,F,M,lt,Vo,Bo,ft,Jo,Uo,Je,Ro,Go,Xo,x,ht,Ko,Qo,mt,Yo,Zo,Ue,en,tn,Re,on,nn,Ge,rn,an,sn,X,ut,dn,cn,gt,ln,fn,hn,K,pt,mn,un,_t,gn,pn,_n,bt,bn,vn,L,Q,vt,yn,wn,yt,$n,kn,Pn,Y,wt,Cn,En,$t,xn,Tn,Dn,Z,kt,jn,zn,Pt,On,An,Sn,ee,Ct,Fn,Ln,Et,In,Nn,qn,z,we,Mn,$e,Wn,xt,Hn,Vn,Bn,Tt,Jn,Un,ke,Rn,te,Pe,Gn,I,Xn,Dt,Kn,Qn,jt,Yn,Zn,zt,er,tr,or,oe,Ce,nr,Ee,rr,Xe,ar,sr,ir,ne,xe,dr,Te,cr,Ke,lr,fr,hr,T,De,mr,je,ur,Qe,gr,pr,_r,re,br,Ot,vr,yr,ze,wr,ae,Oe,$r,N,kr,At,Pr,Cr,Ye,Er,xr,St,Tr,Dr,jr,se,Ae,zr,B,Or,Ft,Ar,Sr,Ze,Fr,Lr,Ir,ie,Se,Nr,Lt,qr,Mr,de,Fe,Wr,It,Hr,Vr,ce,Le,Br,Nt,Jr,Ur,le,Ie,Rr,qt,Gr,Xr,fe,Ne,Kr,qe,Qr,Mt,Yr,Zr,ea,O,Me,ta,We,oa,Wt,na,ra,aa,J,sa,Ht,ia,da,Vt,ca,la,fa,Bt,ha,oo;return $=new _s({}),ve=new _s({}),ye=new D({props:{name:"class transformers.PretrainedConfig",anchor:"transformers.PretrainedConfig",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L48",parametersDescription:[{anchor:"transformers.PretrainedConfig.name_or_path",description:`<strong>name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Store the string that was passed to <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a> or <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">TFPreTrainedModel.from_pretrained()</a> as <code>pretrained_model_name_or_path</code> if the configuration was created with such a method.`,name:"name_or_path"},{anchor:"transformers.PretrainedConfig.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return all hidden-states.`,name:"output_hidden_states"},{anchor:"transformers.PretrainedConfig.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should returns all attentions.`,name:"output_attentions"},{anchor:"transformers.PretrainedConfig.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PretrainedConfig.is_encoder_decoder",description:`<strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as an encoder/decoder or not.`,name:"is_encoder_decoder"},{anchor:"transformers.PretrainedConfig.is_decoder",description:`<strong>is_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as decoder or not (in which case it&#x2019;s used as an encoder).`,name:"is_decoder"},{anchor:"transformers.PretrainedConfig.cross_attention_hidden_size**",description:`<strong>cross_attention_hidden_size**</strong> (<code>bool</code>, <em>optional</em>) &#x2014; The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder setting and the cross-attention hidden dimension differs from <code>self.config.hidden_size</code>.`,name:"cross_attention_hidden_size**"},{anchor:"transformers.PretrainedConfig.add_cross_attention",description:`<strong>add_cross_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the <a href="/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> class, which consists of all models in <code>AUTO_MODELS_FOR_CAUSAL_LM</code>.`,name:"add_cross_attention"},{anchor:"transformers.PretrainedConfig.tie_encoder_decoder",description:`<strong>tie_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.`,name:"tie_encoder_decoder"},{anchor:"transformers.PretrainedConfig.prune_heads",description:`<strong>prune_heads</strong> (<code>Dict[int, List[int]]</code>, <em>optional</em>, defaults to <code>{}</code>) &#x2014; Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of heads to prune in said layer.</p> <p>For instance <code>{1: [0, 2], 2: [2, 3]}</code> will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.`,name:"prune_heads"},{anchor:"transformers.PretrainedConfig.chunk_size_feed_forward",description:`<strong>chunk_size_feed_forward</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; The chunk size of all feed forward layers in the residual attention blocks. A chunk size of <code>0</code> means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes <code>n</code> &lt; sequence_length embeddings at a time. For more information on feed forward chunking, see <a href="../glossary.html#feed-forward-chunking">How does Feed Forward Chunking work?</a>.`,name:"chunk_size_feed_forward"}],parameterGroups:[{title:"Parameters for sequence generation",parametersDescription:` <ul> <li><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) \u2014 Maximum length that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) \u2014 Minimum length that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) \u2014 Flag that will be used by default in the <code>generate</code> method of the model. Whether or not to use sampling ; use greedy decoding otherwise.</li> <li><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) \u2014 Flag that will be used by default in the <code>generate</code> method of the model. Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.</li> <li><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) \u2014 Number of beams for beam search that will be used by default in the <code>generate</code> method of the model. 1 means no beam search.</li> <li><strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) \u2014 Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams that will be used by default in the <code>generate</code> method of the model. 1 means no group beam search.</li> <li><strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) \u2014 Value to control diversity for group beam search. that will be used by default in the <code>generate</code> method of the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.</li> <li><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1) \u2014 The value used to module the next token probabilities that will be used by default in the <code>generate</code> method of the model. Must be strictly positive.</li> <li><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) \u2014 Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1) \u2014 Value that will be used by default in the <code>generate</code> method of the model for <code>top_p</code>. If set to float < 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.</li> <li><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) \u2014 Parameter for repetition penalty that will be used by default in the <code>generate</code> method of the model. 1.0 means no penalty.</li> <li><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) \u2014 Exponential penalty to the length that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) \u2014 Value that will be used by default in the \u2014 <code>generate</code> method of the model for <code>no_repeat_ngram_size</code>. If set to int > 0, all ngrams of that size can only occur once.</li> <li><strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) \u2014 Value that will be used by \u2014 default in the <code>generate</code> method of the model for <code>encoder_no_repeat_ngram_size</code>. If set to int > 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.</li> <li><strong>bad_words_ids</strong> (<code>List[int]</code>, <em>optional</em>) \u2014 List of token ids that are not allowed to be generated that will be used by default in the <code>generate</code> method of the model. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.</li> <li><strong>num_return_sequences</strong> (<code>int</code>, <em>optional</em>, defaults to 1) \u2014 Number of independently computed returned sequences for each element in the batch that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) \u2014 Whether the model should return the logits when used for generation.</li> <li><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) \u2014 Whether the model should return a <a href="/docs/transformers/v4.15.0/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> instead of a <code>torch.LongTensor</code>.</li> <li><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.</li> <li><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 The id of the token to force as the last generated token when <code>max_length</code> is reached.</li> <li><strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) \u2014 Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.</li> </ul> `},{title:"Parameters for fine-tuning tasks",parametersDescription:` <ul> <li><strong>architectures</strong> (<code>List[str]</code>, <em>optional</em>) \u2014 Model architectures that can be used with the model pretrained weights.</li> <li><strong>finetuning_task</strong> (<code>str</code>, <em>optional</em>) \u2014 Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.</li> <li><strong>id2label</strong> (<code>Dict[int, str]</code>, <em>optional</em>) \u2014 A map from index (for instance prediction index, or target index) to label.</li> <li><strong>label2id</strong> (<code>Dict[str, int]</code>, <em>optional</em>) \u2014 A map from label to index for the model.</li> <li><strong>num_labels</strong> (<code>int</code>, <em>optional</em>) \u2014 Number of labels to use in the last layer added to the model, typically for a classification task.</li> <li><strong>task_specific_params</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) \u2014 Additional keyword arguments to store for the current task.</li> <li><strong>problem_type</strong> (<code>str</code>, <em>optional</em>) \u2014 Problem type for <code>XxxForSequenceClassification</code> models. Can be one of <code>"regression"</code>, <code>"single_label_classification"</code> or <code>"multi_label_classification"</code>.</li> </ul> `},{title:"Parameters linked to the tokenizer",parametersDescription:` <ul> <li><strong>tokenizer_class</strong> (<code>str</code>, <em>optional</em>) \u2014 The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the model by default).</li> <li><strong>prefix</strong> (<code>str</code>, <em>optional</em>) \u2014 A specific prompt that should be added at the beginning of each text before calling the model.</li> <li><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 The id of the <em>beginning-of-stream</em> token.</li> <li><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 The id of the <em>padding</em> token.</li> <li><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 The id of the <em>end-of-stream</em> token.</li> <li><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.</li> <li><strong>sep_token_id</strong> (<code>int</code>, <em>optional</em>) \u2014 The id of the <em>separation</em> token.</li> </ul> `},{title:"PyTorch specific parameters",parametersDescription:` <ul> <li> <p><strong>torchscript</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) \u2014 Whether or not the model should be used with Torchscript.</p> </li> <li> <p><strong>tie_word_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) \u2014 Whether the model\u2019s input and output word embeddings should be tied. Note that this is only relevant if the model has a output word embedding layer.</p> </li> <li> <p><strong>torch_dtype</strong> (<code>str</code>, <em>optional</em>) \u2014 The <code>dtype</code> of the weights. This attribute can be used to initialize the model to a non-default <code>dtype</code> (which is normally <code>float32</code>) and thus allow for optimal storage allocation. For example, if the saved model is <code>float16</code>, ideally we want to load it back using the minimal amount of memory needed to load <code>float16</code> weights. Since the config object is stored in plain text, this attribute contains just the floating type string without the <code>torch.</code> prefix. For example, for <code>torch.float16</code> \`<code>torch_dtype</code> is the <code>"float16"</code> string.</p> <p>This attribute is currently not being used during model loading time, but this may change in the future versions. But we can already start preparing for the future by saving the dtype with save_pretrained.</p> </li> </ul> `},{title:"TensorFlow specific parameters",parametersDescription:` <ul> <li><strong>use_bfloat16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) \u2014 Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).</li> </ul> `}]}}),G=new gs({props:{$$slots:{default:[$s]},$$scope:{ctx:be}}}),we=new D({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/file_utils.py#L2482",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your config in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add config&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your config (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your config in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),ke=new ps({props:{code:`from transformers import AutoConfig config = AutoConfig.from_pretrained("bert-base-cased") # Push the config to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. config.push_to_hub("my-finetuned-bert") # Push the config to your namespace with the name "my-finetuned-bert" with no local clone. config.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the config to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. config.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. config.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert"),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the config to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the config to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the config to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),Pe=new D({props:{name:"dict_torch_dtype_to_str",anchor:"transformers.PretrainedConfig.dict_torch_dtype_to_str",parameters:[{name:"d",val:": typing.Dict[str, typing.Any]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L807"}}),Ce=new D({props:{name:"from_dict",anchor:"transformers.PretrainedConfig.from_dict",parameters:[{name:"config_dict",val:": typing.Dict[str, typing.Any]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L610",parametersDescription:[{anchor:"transformers.PretrainedConfig.from_dict.config_dict",description:`<strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the <a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig.get_config_dict">get_config_dict()</a> method.`,name:"config_dict"},{anchor:"transformers.PretrainedConfig.from_dict.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>) &#x2014; Additional parameters from which to initialize the configuration object.`,name:"kwargs"}],returnDescription:` <p>The configuration object instantiated from those parameters.</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> `}}),xe=new D({props:{name:"from_json_file",anchor:"transformers.PretrainedConfig.from_json_file",parameters:[{name:"json_file",val:": typing.Union[str, os.PathLike]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L649",parametersDescription:[{anchor:"transformers.PretrainedConfig.from_json_file.json_file",description:`<strong>json_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file containing the parameters.`,name:"json_file"}],returnDescription:` <p>The configuration object instantiated from that JSON file.</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> `}}),De=new D({props:{name:"from_pretrained",anchor:"transformers.PretrainedConfig.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L433",parametersDescription:[{anchor:"transformers.PretrainedConfig.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a configuration file saved using the <a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved configuration JSON <em>file</em>, e.g., <code>./my_model_directory/configuration.json</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.PretrainedConfig.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.PretrainedConfig.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the configuration files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.PretrainedConfig.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.PretrainedConfig.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.PretrainedConfig.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.PretrainedConfig.from_pretrained.revision(str,",description:`<strong>revision(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.PretrainedConfig.from_pretrained.return_unused_kwargs",description:`<strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final configuration object.</p> <p>If <code>True</code>, then this functions returns a <code>Tuple(config, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>config</code> and is otherwise ignored.`,name:"return_unused_kwargs"},{anchor:"transformers.PretrainedConfig.from_pretrained.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> configuration attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.`,name:"kwargs"}],returnDescription:` <p>The configuration object instantiated from this pretrained model.</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> `}}),re=new gs({props:{$$slots:{default:[ks]},$$scope:{ctx:be}}}),ze=new ps({props:{code:`# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from huggingface.co and cache. config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')* config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') config = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False) assert config.output_attentions == True config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True) assert config.output_attentions == True assert unused_kwargs == {'foo': False},`,highlighted:`<span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PretrainedConfig* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: BertConfig</span> config = BertConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>) <span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> config = BertConfig.from_pretrained(<span class="hljs-string">&#x27;./test/saved_model/&#x27;</span>) <span class="hljs-comment"># E.g. config (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> config = BertConfig.from_pretrained(<span class="hljs-string">&#x27;./test/saved_model/my_configuration.json&#x27;</span>) config = BertConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> config, unused_kwargs = BertConfig.from_pretrained(<span class="hljs-string">&#x27;bert-base-uncased&#x27;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span>) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&#x27;foo&#x27;</span>: <span class="hljs-literal">False</span>}`}}),Oe=new D({props:{name:"get_config_dict",anchor:"transformers.PretrainedConfig.get_config_dict",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L512",parametersDescription:[{anchor:"transformers.PretrainedConfig.get_config_dict.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.`,name:"pretrained_model_name_or_path"}],returnDescription:` <p>The dictionary(ies) that will be used to instantiate the configuration object.</p> `,returnType:` <p><code>Tuple[Dict, Dict]</code></p> `}}),Ae=new D({props:{name:"save_pretrained",anchor:"transformers.PretrainedConfig.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L391",parametersDescription:[{anchor:"transformers.PretrainedConfig.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the configuration JSON file will be saved (will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.PretrainedConfig.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/v4.15.0/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),Se=new D({props:{name:"to_dict",anchor:"transformers.PretrainedConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L709",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance.</p> `,returnType:` <p><code>Dict[str, Any]</code></p> `}}),Fe=new D({props:{name:"to_diff_dict",anchor:"transformers.PretrainedConfig.to_diff_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L677",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, Any]</code></p> `}}),Le=new D({props:{name:"to_json_file",anchor:"transformers.PretrainedConfig.to_json_file",parameters:[{name:"json_file_path",val:": typing.Union[str, os.PathLike]"},{name:"use_diff",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L745",parametersDescription:[{anchor:"transformers.PretrainedConfig.to_json_file.json_file_path",description:`<strong>json_file_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file in which this configuration instance&#x2019;s parameters will be saved.`,name:"json_file_path"},{anchor:"transformers.PretrainedConfig.to_json_file.use_diff",description:`<strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON file.`,name:"use_diff"}]}}),Ie=new D({props:{name:"to_json_string",anchor:"transformers.PretrainedConfig.to_json_string",parameters:[{name:"use_diff",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L727",parametersDescription:[{anchor:"transformers.PretrainedConfig.to_json_string.use_diff",description:`<strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON string.`,name:"use_diff"}],returnDescription:` <p>String containing all the attributes that make up this configuration instance in JSON format.</p> `,returnType:` <p><code>str</code></p> `}}),Ne=new D({props:{name:"update",anchor:"transformers.PretrainedConfig.update",parameters:[{name:"config_dict",val:": typing.Dict[str, typing.Any]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L759",parametersDescription:[{anchor:"transformers.PretrainedConfig.update.config_dict",description:"<strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary of attributes that should be updated for this class.",name:"config_dict"}]}}),Me=new D({props:{name:"update_from_string",anchor:"transformers.PretrainedConfig.update_from_string",parameters:[{name:"update_str",val:": str"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/configuration_utils.py#L769",parametersDescription:[{anchor:"transformers.PretrainedConfig.update_from_string.update_str",description:"<strong>update_str</strong> (<code>str</code>) &#x2014; String with attributes that should be updated for this class.",name:"update_str"}]}}),{c(){m=r("meta"),C=d(),u=r("h1"),w=r("a"),j=r("span"),g($.$$.fragment),P=d(),S=r("span"),$o=o("Configuration"),Yt=d(),U=r("p"),ko=o("The base class "),Be=r("a"),Po=o("PretrainedConfig"),Co=o(` implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),Zt=d(),E=r("p"),Eo=o(`Each derived config class implements model specific attributes. Common attributes present in all config classes are: `),ot=r("code"),xo=o("hidden_size"),To=o(", "),nt=r("code"),Do=o("num_attention_heads"),jo=o(", and "),rt=r("code"),zo=o("num_hidden_layers"),Oo=o(`. Text models further implement: `),at=r("code"),Ao=o("vocab_size"),So=o("."),eo=d(),V=r("h2"),R=r("a"),st=r("span"),g(ve.$$.fragment),Fo=d(),it=r("span"),Lo=o("PretrainedConfig"),to=d(),l=r("div"),g(ye.$$.fragment),Io=d(),dt=r("p"),No=o(`Base class for all configuration classes. Handles a few parameters common to all models\u2019 configurations as well as methods for loading/downloading/saving configurations.`),qo=d(),g(G.$$.fragment),Mo=d(),ct=r("p"),Wo=o("Class attributes (overridden by derived classes):"),Ho=d(),F=r("ul"),M=r("li"),lt=r("strong"),Vo=o("model_type"),Bo=o(" ("),ft=r("code"),Jo=o("str"),Uo=o(`) \u2014 An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in `),Je=r("a"),Ro=o("AutoConfig"),Go=o("."),Xo=d(),x=r("li"),ht=r("strong"),Ko=o("is_composition"),Qo=o(" ("),mt=r("code"),Yo=o("bool"),Zo=o(`) \u2014 Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type `),Ue=r("a"),en=o("PretrainedConfig"),tn=o(` like: `),Re=r("a"),on=o("EncoderDecoderConfig"),nn=o(" or "),Ge=r("a"),rn=o("~RagConfig"),an=o("."),sn=d(),X=r("li"),ut=r("strong"),dn=o("keys_to_ignore_at_inference"),cn=o(" ("),gt=r("code"),ln=o("List[str]"),fn=o(`) \u2014 A list of keys to ignore by default when looking at dictionary outputs of the model during inference.`),hn=d(),K=r("li"),pt=r("strong"),mn=o("attribute_map"),un=o(" ("),_t=r("code"),gn=o("Dict[str, str]"),pn=o(`) \u2014 A dict that maps model specific attribute names to the standardized naming of attributes.`),_n=d(),bt=r("p"),bn=o("Common attributes (present in all subclasses):"),vn=d(),L=r("ul"),Q=r("li"),vt=r("strong"),yn=o("vocab_size"),wn=o(" ("),yt=r("code"),$n=o("int"),kn=o(`) \u2014 The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don\u2019t have a text modality like ViT).`),Pn=d(),Y=r("li"),wt=r("strong"),Cn=o("hidden_size"),En=o(" ("),$t=r("code"),xn=o("int"),Tn=o(") \u2014 The hidden size of the model."),Dn=d(),Z=r("li"),kt=r("strong"),jn=o("num_attention_heads"),zn=o(" ("),Pt=r("code"),On=o("int"),An=o(`) \u2014 The number of attention heads used in the multi-head attention layers of the model.`),Sn=d(),ee=r("li"),Ct=r("strong"),Fn=o("num_hidden_layers"),Ln=o(" ("),Et=r("code"),In=o("int"),Nn=o(") \u2014 The number of blocks in the model."),qn=d(),z=r("div"),g(we.$$.fragment),Mn=d(),$e=r("p"),Wn=o(`Upload the configuration file to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),xt=r("code"),Hn=o("repo_path_or_name"),Vn=o("."),Bn=d(),Tt=r("p"),Jn=o("Examples:"),Un=d(),g(ke.$$.fragment),Rn=d(),te=r("div"),g(Pe.$$.fragment),Gn=d(),I=r("p"),Xn=o("Checks whether the passed dictionary has a "),Dt=r("em"),Kn=o("torch_dtype"),Qn=o(` key and if it\u2019s not None, converts torch.dtype to a string of just the type. For example, `),jt=r("code"),Yn=o("torch.float32"),Zn=o(" get converted into "),zt=r("em"),er=o("\u201Cfloat32\u201D"),tr=o(` string, which can then be stored in the json format.`),or=d(),oe=r("div"),g(Ce.$$.fragment),nr=d(),Ee=r("p"),rr=o("Instantiates a "),Xe=r("a"),ar=o("PretrainedConfig"),sr=o(" from a Python dictionary of parameters."),ir=d(),ne=r("div"),g(xe.$$.fragment),dr=d(),Te=r("p"),cr=o("Instantiates a "),Ke=r("a"),lr=o("PretrainedConfig"),fr=o(" from the path to a JSON file of parameters."),hr=d(),T=r("div"),g(De.$$.fragment),mr=d(),je=r("p"),ur=o("Instantiate a "),Qe=r("a"),gr=o("PretrainedConfig"),pr=o(` (or a derived class) from a pretrained model configuration.`),_r=d(),g(re.$$.fragment),br=d(),Ot=r("p"),vr=o("Examples:"),yr=d(),g(ze.$$.fragment),wr=d(),ae=r("div"),g(Oe.$$.fragment),$r=d(),N=r("p"),kr=o("From a "),At=r("code"),Pr=o("pretrained_model_name_or_path"),Cr=o(`, resolve to a dictionary of parameters, to be used for instantiating a `),Ye=r("a"),Er=o("PretrainedConfig"),xr=o(" using "),St=r("code"),Tr=o("from_dict"),Dr=o("."),jr=d(),se=r("div"),g(Ae.$$.fragment),zr=d(),B=r("p"),Or=o("Save a configuration object to the directory "),Ft=r("code"),Ar=o("save_directory"),Sr=o(`, so that it can be re-loaded using the `),Ze=r("a"),Fr=o("from_pretrained()"),Lr=o(" class method."),Ir=d(),ie=r("div"),g(Se.$$.fragment),Nr=d(),Lt=r("p"),qr=o("Serializes this instance to a Python dictionary."),Mr=d(),de=r("div"),g(Fe.$$.fragment),Wr=d(),It=r("p"),Hr=o(`Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.`),Vr=d(),ce=r("div"),g(Le.$$.fragment),Br=d(),Nt=r("p"),Jr=o("Save this instance to a JSON file."),Ur=d(),le=r("div"),g(Ie.$$.fragment),Rr=d(),qt=r("p"),Gr=o("Serializes this instance to a JSON string."),Xr=d(),fe=r("div"),g(Ne.$$.fragment),Kr=d(),qe=r("p"),Qr=o("Updates attributes of this class with attributes from "),Mt=r("code"),Yr=o("config_dict"),Zr=o("."),ea=d(),O=r("div"),g(Me.$$.fragment),ta=d(),We=r("p"),oa=o("Updates attributes of this class with attributes from "),Wt=r("code"),na=o("update_str"),ra=o("."),aa=d(),J=r("p"),sa=o("The expected format is ints, floats and strings as is, and for booleans use "),Ht=r("code"),ia=o("true"),da=o(" or "),Vt=r("code"),ca=o("false"),la=o(`. For example: \u201Cn_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\u201D`),fa=d(),Bt=r("p"),ha=o("The keys to change have to already exist in the config object."),this.h()},l(i){const k=ws('[data-svelte="svelte-1phssyn"]',document.head);m=a(k,"META",{name:!0,content:!0}),k.forEach(t),C=c(i),u=a(i,"H1",{class:!0});var He=s(u);w=a(He,"A",{id:!0,class:!0,href:!0});var Jt=s(w);j=a(Jt,"SPAN",{});var ma=s(j);p($.$$.fragment,ma),ma.forEach(t),Jt.forEach(t),P=c(He),S=a(He,"SPAN",{});var ua=s(S);$o=n(ua,"Configuration"),ua.forEach(t),He.forEach(t),Yt=c(i),U=a(i,"P",{});var no=s(U);ko=n(no,"The base class "),Be=a(no,"A",{href:!0});var ga=s(Be);Po=n(ga,"PretrainedConfig"),ga.forEach(t),Co=n(no,` implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),no.forEach(t),Zt=c(i),E=a(i,"P",{});var W=s(E);Eo=n(W,`Each derived config class implements model specific attributes. Common attributes present in all config classes are: `),ot=a(W,"CODE",{});var pa=s(ot);xo=n(pa,"hidden_size"),pa.forEach(t),To=n(W,", "),nt=a(W,"CODE",{});var _a=s(nt);Do=n(_a,"num_attention_heads"),_a.forEach(t),jo=n(W,", and "),rt=a(W,"CODE",{});var ba=s(rt);zo=n(ba,"num_hidden_layers"),ba.forEach(t),Oo=n(W,`. Text models further implement: `),at=a(W,"CODE",{});var va=s(at);Ao=n(va,"vocab_size"),va.forEach(t),So=n(W,"."),W.forEach(t),eo=c(i),V=a(i,"H2",{class:!0});var ro=s(V);R=a(ro,"A",{id:!0,class:!0,href:!0});var ya=s(R);st=a(ya,"SPAN",{});var wa=s(st);p(ve.$$.fragment,wa),wa.forEach(t),ya.forEach(t),Fo=c(ro),it=a(ro,"SPAN",{});var $a=s(it);Lo=n($a,"PretrainedConfig"),$a.forEach(t),ro.forEach(t),to=c(i),l=a(i,"DIV",{class:!0});var f=s(l);p(ye.$$.fragment,f),Io=c(f),dt=a(f,"P",{});var ka=s(dt);No=n(ka,`Base class for all configuration classes. Handles a few parameters common to all models\u2019 configurations as well as methods for loading/downloading/saving configurations.`),ka.forEach(t),qo=c(f),p(G.$$.fragment,f),Mo=c(f),ct=a(f,"P",{});var Pa=s(ct);Wo=n(Pa,"Class attributes (overridden by derived classes):"),Pa.forEach(t),Ho=c(f),F=a(f,"UL",{});var he=s(F);M=a(he,"LI",{});var Ve=s(M);lt=a(Ve,"STRONG",{});var Ca=s(lt);Vo=n(Ca,"model_type"),Ca.forEach(t),Bo=n(Ve," ("),ft=a(Ve,"CODE",{});var Ea=s(ft);Jo=n(Ea,"str"),Ea.forEach(t),Uo=n(Ve,`) \u2014 An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in `),Je=a(Ve,"A",{href:!0});var xa=s(Je);Ro=n(xa,"AutoConfig"),xa.forEach(t),Go=n(Ve,"."),Ve.forEach(t),Xo=c(he),x=a(he,"LI",{});var q=s(x);ht=a(q,"STRONG",{});var Ta=s(ht);Ko=n(Ta,"is_composition"),Ta.forEach(t),Qo=n(q," ("),mt=a(q,"CODE",{});var Da=s(mt);Yo=n(Da,"bool"),Da.forEach(t),Zo=n(q,`) \u2014 Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type `),Ue=a(q,"A",{href:!0});var ja=s(Ue);en=n(ja,"PretrainedConfig"),ja.forEach(t),tn=n(q,` like: `),Re=a(q,"A",{href:!0});var za=s(Re);on=n(za,"EncoderDecoderConfig"),za.forEach(t),nn=n(q," or "),Ge=a(q,"A",{href:!0});var Oa=s(Ge);rn=n(Oa,"~RagConfig"),Oa.forEach(t),an=n(q,"."),q.forEach(t),sn=c(he),X=a(he,"LI",{});var Ut=s(X);ut=a(Ut,"STRONG",{});var Aa=s(ut);dn=n(Aa,"keys_to_ignore_at_inference"),Aa.forEach(t),cn=n(Ut," ("),gt=a(Ut,"CODE",{});var Sa=s(gt);ln=n(Sa,"List[str]"),Sa.forEach(t),fn=n(Ut,`) \u2014 A list of keys to ignore by default when looking at dictionary outputs of the model during inference.`),Ut.forEach(t),hn=c(he),K=a(he,"LI",{});var Rt=s(K);pt=a(Rt,"STRONG",{});var Fa=s(pt);mn=n(Fa,"attribute_map"),Fa.forEach(t),un=n(Rt," ("),_t=a(Rt,"CODE",{});var La=s(_t);gn=n(La,"Dict[str, str]"),La.forEach(t),pn=n(Rt,`) \u2014 A dict that maps model specific attribute names to the standardized naming of attributes.`),Rt.forEach(t),he.forEach(t),_n=c(f),bt=a(f,"P",{});var Ia=s(bt);bn=n(Ia,"Common attributes (present in all subclasses):"),Ia.forEach(t),vn=c(f),L=a(f,"UL",{});var me=s(L);Q=a(me,"LI",{});var Gt=s(Q);vt=a(Gt,"STRONG",{});var Na=s(vt);yn=n(Na,"vocab_size"),Na.forEach(t),wn=n(Gt," ("),yt=a(Gt,"CODE",{});var qa=s(yt);$n=n(qa,"int"),qa.forEach(t),kn=n(Gt,`) \u2014 The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don\u2019t have a text modality like ViT).`),Gt.forEach(t),Pn=c(me),Y=a(me,"LI",{});var Xt=s(Y);wt=a(Xt,"STRONG",{});var Ma=s(wt);Cn=n(Ma,"hidden_size"),Ma.forEach(t),En=n(Xt," ("),$t=a(Xt,"CODE",{});var Wa=s($t);xn=n(Wa,"int"),Wa.forEach(t),Tn=n(Xt,") \u2014 The hidden size of the model."),Xt.forEach(t),Dn=c(me),Z=a(me,"LI",{});var Kt=s(Z);kt=a(Kt,"STRONG",{});var Ha=s(kt);jn=n(Ha,"num_attention_heads"),Ha.forEach(t),zn=n(Kt," ("),Pt=a(Kt,"CODE",{});var Va=s(Pt);On=n(Va,"int"),Va.forEach(t),An=n(Kt,`) \u2014 The number of attention heads used in the multi-head attention layers of the model.`),Kt.forEach(t),Sn=c(me),ee=a(me,"LI",{});var Qt=s(ee);Ct=a(Qt,"STRONG",{});var Ba=s(Ct);Fn=n(Ba,"num_hidden_layers"),Ba.forEach(t),Ln=n(Qt," ("),Et=a(Qt,"CODE",{});var Ja=s(Et);In=n(Ja,"int"),Ja.forEach(t),Nn=n(Qt,") \u2014 The number of blocks in the model."),Qt.forEach(t),me.forEach(t),qn=c(f),z=a(f,"DIV",{class:!0});var ue=s(z);p(we.$$.fragment,ue),Mn=c(ue),$e=a(ue,"P",{});var ao=s($e);Wn=n(ao,`Upload the configuration file to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),xt=a(ao,"CODE",{});var Ua=s(xt);Hn=n(Ua,"repo_path_or_name"),Ua.forEach(t),Vn=n(ao,"."),ao.forEach(t),Bn=c(ue),Tt=a(ue,"P",{});var Ra=s(Tt);Jn=n(Ra,"Examples:"),Ra.forEach(t),Un=c(ue),p(ke.$$.fragment,ue),ue.forEach(t),Rn=c(f),te=a(f,"DIV",{class:!0});var so=s(te);p(Pe.$$.fragment,so),Gn=c(so),I=a(so,"P",{});var ge=s(I);Xn=n(ge,"Checks whether the passed dictionary has a "),Dt=a(ge,"EM",{});var Ga=s(Dt);Kn=n(Ga,"torch_dtype"),Ga.forEach(t),Qn=n(ge,` key and if it\u2019s not None, converts torch.dtype to a string of just the type. For example, `),jt=a(ge,"CODE",{});var Xa=s(jt);Yn=n(Xa,"torch.float32"),Xa.forEach(t),Zn=n(ge," get converted into "),zt=a(ge,"EM",{});var Ka=s(zt);er=n(Ka,"\u201Cfloat32\u201D"),Ka.forEach(t),tr=n(ge,` string, which can then be stored in the json format.`),ge.forEach(t),so.forEach(t),or=c(f),oe=a(f,"DIV",{class:!0});var io=s(oe);p(Ce.$$.fragment,io),nr=c(io),Ee=a(io,"P",{});var co=s(Ee);rr=n(co,"Instantiates a "),Xe=a(co,"A",{href:!0});var Qa=s(Xe);ar=n(Qa,"PretrainedConfig"),Qa.forEach(t),sr=n(co," from a Python dictionary of parameters."),co.forEach(t),io.forEach(t),ir=c(f),ne=a(f,"DIV",{class:!0});var lo=s(ne);p(xe.$$.fragment,lo),dr=c(lo),Te=a(lo,"P",{});var fo=s(Te);cr=n(fo,"Instantiates a "),Ke=a(fo,"A",{href:!0});var Ya=s(Ke);lr=n(Ya,"PretrainedConfig"),Ya.forEach(t),fr=n(fo," from the path to a JSON file of parameters."),fo.forEach(t),lo.forEach(t),hr=c(f),T=a(f,"DIV",{class:!0});var H=s(T);p(De.$$.fragment,H),mr=c(H),je=a(H,"P",{});var ho=s(je);ur=n(ho,"Instantiate a "),Qe=a(ho,"A",{href:!0});var Za=s(Qe);gr=n(Za,"PretrainedConfig"),Za.forEach(t),pr=n(ho,` (or a derived class) from a pretrained model configuration.`),ho.forEach(t),_r=c(H),p(re.$$.fragment,H),br=c(H),Ot=a(H,"P",{});var es=s(Ot);vr=n(es,"Examples:"),es.forEach(t),yr=c(H),p(ze.$$.fragment,H),H.forEach(t),wr=c(f),ae=a(f,"DIV",{class:!0});var mo=s(ae);p(Oe.$$.fragment,mo),$r=c(mo),N=a(mo,"P",{});var pe=s(N);kr=n(pe,"From a "),At=a(pe,"CODE",{});var ts=s(At);Pr=n(ts,"pretrained_model_name_or_path"),ts.forEach(t),Cr=n(pe,`, resolve to a dictionary of parameters, to be used for instantiating a `),Ye=a(pe,"A",{href:!0});var os=s(Ye);Er=n(os,"PretrainedConfig"),os.forEach(t),xr=n(pe," using "),St=a(pe,"CODE",{});var ns=s(St);Tr=n(ns,"from_dict"),ns.forEach(t),Dr=n(pe,"."),pe.forEach(t),mo.forEach(t),jr=c(f),se=a(f,"DIV",{class:!0});var uo=s(se);p(Ae.$$.fragment,uo),zr=c(uo),B=a(uo,"P",{});var et=s(B);Or=n(et,"Save a configuration object to the directory "),Ft=a(et,"CODE",{});var rs=s(Ft);Ar=n(rs,"save_directory"),rs.forEach(t),Sr=n(et,`, so that it can be re-loaded using the `),Ze=a(et,"A",{href:!0});var as=s(Ze);Fr=n(as,"from_pretrained()"),as.forEach(t),Lr=n(et," class method."),et.forEach(t),uo.forEach(t),Ir=c(f),ie=a(f,"DIV",{class:!0});var go=s(ie);p(Se.$$.fragment,go),Nr=c(go),Lt=a(go,"P",{});var ss=s(Lt);qr=n(ss,"Serializes this instance to a Python dictionary."),ss.forEach(t),go.forEach(t),Mr=c(f),de=a(f,"DIV",{class:!0});var po=s(de);p(Fe.$$.fragment,po),Wr=c(po),It=a(po,"P",{});var is=s(It);Hr=n(is,`Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.`),is.forEach(t),po.forEach(t),Vr=c(f),ce=a(f,"DIV",{class:!0});var _o=s(ce);p(Le.$$.fragment,_o),Br=c(_o),Nt=a(_o,"P",{});var ds=s(Nt);Jr=n(ds,"Save this instance to a JSON file."),ds.forEach(t),_o.forEach(t),Ur=c(f),le=a(f,"DIV",{class:!0});var bo=s(le);p(Ie.$$.fragment,bo),Rr=c(bo),qt=a(bo,"P",{});var cs=s(qt);Gr=n(cs,"Serializes this instance to a JSON string."),cs.forEach(t),bo.forEach(t),Xr=c(f),fe=a(f,"DIV",{class:!0});var vo=s(fe);p(Ne.$$.fragment,vo),Kr=c(vo),qe=a(vo,"P",{});var yo=s(qe);Qr=n(yo,"Updates attributes of this class with attributes from "),Mt=a(yo,"CODE",{});var ls=s(Mt);Yr=n(ls,"config_dict"),ls.forEach(t),Zr=n(yo,"."),yo.forEach(t),vo.forEach(t),ea=c(f),O=a(f,"DIV",{class:!0});var _e=s(O);p(Me.$$.fragment,_e),ta=c(_e),We=a(_e,"P",{});var wo=s(We);oa=n(wo,"Updates attributes of this class with attributes from "),Wt=a(wo,"CODE",{});var fs=s(Wt);na=n(fs,"update_str"),fs.forEach(t),ra=n(wo,"."),wo.forEach(t),aa=c(_e),J=a(_e,"P",{});var tt=s(J);sa=n(tt,"The expected format is ints, floats and strings as is, and for booleans use "),Ht=a(tt,"CODE",{});var hs=s(Ht);ia=n(hs,"true"),hs.forEach(t),da=n(tt," or "),Vt=a(tt,"CODE",{});var ms=s(Vt);ca=n(ms,"false"),ms.forEach(t),la=n(tt,`. For example: \u201Cn_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\u201D`),tt.forEach(t),fa=c(_e),Bt=a(_e,"P",{});var us=s(Bt);ha=n(us,"The keys to change have to already exist in the config object."),us.forEach(t),_e.forEach(t),f.forEach(t),this.h()},h(){h(m,"name","hf:doc:metadata"),h(m,"content",JSON.stringify(Cs)),h(w,"id","configuration"),h(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(w,"href","#configuration"),h(u,"class","relative group"),h(Be,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(R,"id","transformers.PretrainedConfig"),h(R,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(R,"href","#transformers.PretrainedConfig"),h(V,"class","relative group"),h(Je,"href","/docs/transformers/v4.15.0/en/model_doc/auto#transformers.AutoConfig"),h(Ue,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(Re,"href","/docs/transformers/v4.15.0/en/model_doc/encoderdecoder#transformers.EncoderDecoderConfig"),h(Ge,"href","/docs/transformers/v4.15.0/en/model_doc/rag#transformers.RagConfig"),h(z,"class","docstring"),h(te,"class","docstring"),h(Xe,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(oe,"class","docstring"),h(Ke,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(ne,"class","docstring"),h(Qe,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(T,"class","docstring"),h(Ye,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig"),h(ae,"class","docstring"),h(Ze,"href","/docs/transformers/v4.15.0/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained"),h(se,"class","docstring"),h(ie,"class","docstring"),h(de,"class","docstring"),h(ce,"class","docstring"),h(le,"class","docstring"),h(fe,"class","docstring"),h(O,"class","docstring"),h(l,"class","docstring")},m(i,k){e(document.head,m),A(i,C,k),A(i,u,k),e(u,w),e(w,j),_($,j,null),e(u,P),e(u,S),e(S,$o),A(i,Yt,k),A(i,U,k),e(U,ko),e(U,Be),e(Be,Po),e(U,Co),A(i,Zt,k),A(i,E,k),e(E,Eo),e(E,ot),e(ot,xo),e(E,To),e(E,nt),e(nt,Do),e(E,jo),e(E,rt),e(rt,zo),e(E,Oo),e(E,at),e(at,Ao),e(E,So),A(i,eo,k),A(i,V,k),e(V,R),e(R,st),_(ve,st,null),e(V,Fo),e(V,it),e(it,Lo),A(i,to,k),A(i,l,k),_(ye,l,null),e(l,Io),e(l,dt),e(dt,No),e(l,qo),_(G,l,null),e(l,Mo),e(l,ct),e(ct,Wo),e(l,Ho),e(l,F),e(F,M),e(M,lt),e(lt,Vo),e(M,Bo),e(M,ft),e(ft,Jo),e(M,Uo),e(M,Je),e(Je,Ro),e(M,Go),e(F,Xo),e(F,x),e(x,ht),e(ht,Ko),e(x,Qo),e(x,mt),e(mt,Yo),e(x,Zo),e(x,Ue),e(Ue,en),e(x,tn),e(x,Re),e(Re,on),e(x,nn),e(x,Ge),e(Ge,rn),e(x,an),e(F,sn),e(F,X),e(X,ut),e(ut,dn),e(X,cn),e(X,gt),e(gt,ln),e(X,fn),e(F,hn),e(F,K),e(K,pt),e(pt,mn),e(K,un),e(K,_t),e(_t,gn),e(K,pn),e(l,_n),e(l,bt),e(bt,bn),e(l,vn),e(l,L),e(L,Q),e(Q,vt),e(vt,yn),e(Q,wn),e(Q,yt),e(yt,$n),e(Q,kn),e(L,Pn),e(L,Y),e(Y,wt),e(wt,Cn),e(Y,En),e(Y,$t),e($t,xn),e(Y,Tn),e(L,Dn),e(L,Z),e(Z,kt),e(kt,jn),e(Z,zn),e(Z,Pt),e(Pt,On),e(Z,An),e(L,Sn),e(L,ee),e(ee,Ct),e(Ct,Fn),e(ee,Ln),e(ee,Et),e(Et,In),e(ee,Nn),e(l,qn),e(l,z),_(we,z,null),e(z,Mn),e(z,$e),e($e,Wn),e($e,xt),e(xt,Hn),e($e,Vn),e(z,Bn),e(z,Tt),e(Tt,Jn),e(z,Un),_(ke,z,null),e(l,Rn),e(l,te),_(Pe,te,null),e(te,Gn),e(te,I),e(I,Xn),e(I,Dt),e(Dt,Kn),e(I,Qn),e(I,jt),e(jt,Yn),e(I,Zn),e(I,zt),e(zt,er),e(I,tr),e(l,or),e(l,oe),_(Ce,oe,null),e(oe,nr),e(oe,Ee),e(Ee,rr),e(Ee,Xe),e(Xe,ar),e(Ee,sr),e(l,ir),e(l,ne),_(xe,ne,null),e(ne,dr),e(ne,Te),e(Te,cr),e(Te,Ke),e(Ke,lr),e(Te,fr),e(l,hr),e(l,T),_(De,T,null),e(T,mr),e(T,je),e(je,ur),e(je,Qe),e(Qe,gr),e(je,pr),e(T,_r),_(re,T,null),e(T,br),e(T,Ot),e(Ot,vr),e(T,yr),_(ze,T,null),e(l,wr),e(l,ae),_(Oe,ae,null),e(ae,$r),e(ae,N),e(N,kr),e(N,At),e(At,Pr),e(N,Cr),e(N,Ye),e(Ye,Er),e(N,xr),e(N,St),e(St,Tr),e(N,Dr),e(l,jr),e(l,se),_(Ae,se,null),e(se,zr),e(se,B),e(B,Or),e(B,Ft),e(Ft,Ar),e(B,Sr),e(B,Ze),e(Ze,Fr),e(B,Lr),e(l,Ir),e(l,ie),_(Se,ie,null),e(ie,Nr),e(ie,Lt),e(Lt,qr),e(l,Mr),e(l,de),_(Fe,de,null),e(de,Wr),e(de,It),e(It,Hr),e(l,Vr),e(l,ce),_(Le,ce,null),e(ce,Br),e(ce,Nt),e(Nt,Jr),e(l,Ur),e(l,le),_(Ie,le,null),e(le,Rr),e(le,qt),e(qt,Gr),e(l,Xr),e(l,fe),_(Ne,fe,null),e(fe,Kr),e(fe,qe),e(qe,Qr),e(qe,Mt),e(Mt,Yr),e(qe,Zr),e(l,ea),e(l,O),_(Me,O,null),e(O,ta),e(O,We),e(We,oa),e(We,Wt),e(Wt,na),e(We,ra),e(O,aa),e(O,J),e(J,sa),e(J,Ht),e(Ht,ia),e(J,da),e(J,Vt),e(Vt,ca),e(J,la),e(O,fa),e(O,Bt),e(Bt,ha),oo=!0},p(i,[k]){const He={};k&2&&(He.$$scope={dirty:k,ctx:i}),G.$set(He);const Jt={};k&2&&(Jt.$$scope={dirty:k,ctx:i}),re.$set(Jt)},i(i){oo||(b($.$$.fragment,i),b(ve.$$.fragment,i),b(ye.$$.fragment,i),b(G.$$.fragment,i),b(we.$$.fragment,i),b(ke.$$.fragment,i),b(Pe.$$.fragment,i),b(Ce.$$.fragment,i),b(xe.$$.fragment,i),b(De.$$.fragment,i),b(re.$$.fragment,i),b(ze.$$.fragment,i),b(Oe.$$.fragment,i),b(Ae.$$.fragment,i),b(Se.$$.fragment,i),b(Fe.$$.fragment,i),b(Le.$$.fragment,i),b(Ie.$$.fragment,i),b(Ne.$$.fragment,i),b(Me.$$.fragment,i),oo=!0)},o(i){v($.$$.fragment,i),v(ve.$$.fragment,i),v(ye.$$.fragment,i),v(G.$$.fragment,i),v(we.$$.fragment,i),v(ke.$$.fragment,i),v(Pe.$$.fragment,i),v(Ce.$$.fragment,i),v(xe.$$.fragment,i),v(De.$$.fragment,i),v(re.$$.fragment,i),v(ze.$$.fragment,i),v(Oe.$$.fragment,i),v(Ae.$$.fragment,i),v(Se.$$.fragment,i),v(Fe.$$.fragment,i),v(Le.$$.fragment,i),v(Ie.$$.fragment,i),v(Ne.$$.fragment,i),v(Me.$$.fragment,i),oo=!1},d(i){t(m),i&&t(C),i&&t(u),y($),i&&t(Yt),i&&t(U),i&&t(Zt),i&&t(E),i&&t(eo),i&&t(V),y(ve),i&&t(to),i&&t(l),y(ye),y(G),y(we),y(ke),y(Pe),y(Ce),y(xe),y(De),y(re),y(ze),y(Oe),y(Ae),y(Se),y(Fe),y(Le),y(Ie),y(Ne),y(Me)}}}const Cs={local:"configuration",sections:[{local:"transformers.PretrainedConfig",title:"PretrainedConfig"}],title:"Configuration"};function Es(be,m,C){let{fw:u}=m;return be.$$set=w=>{"fw"in w&&C(0,u=w.fw)},[u]}class As extends bs{constructor(m){super();vs(this,m,Es,Ps,ys,{fw:0})}}export{As as default,Cs as metadata};
9,995
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/main_classes/logging.mdx-81a8bb50.js
import{S as Us,i as js,s as qs,e as o,k as g,w as u,t as n,L as zs,c as s,d as r,m as f,a as l,x as p,h as a,b as m,J as t,g as c,y as _,q as E,o as b,B as $}from"../../chunks/vendor-b1433968.js";import{T as Js}from"../../chunks/Tip-c3840994.js";import{D as W}from"../../chunks/Docstring-ff504c58.js";import{C as Ko}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Qo}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Ks(dt){let y,Y,R,d,I,P,X,Z,Ie,H,U,Oe,G,S,ee,te,Ne,L,ue,C,De,pe,T,_e,v,j,re,oe,Te,se,q,Se,V,Le;return{c(){y=o("p"),Y=n("\u{1F917} Transformers has following logging levels:"),R=g(),d=o("ul"),I=o("li"),P=n("50: "),X=o("code"),Z=n("transformers.logging.CRITICAL"),Ie=n(" or "),H=o("code"),U=n("transformers.logging.FATAL"),Oe=g(),G=o("li"),S=n("40: "),ee=o("code"),te=n("transformers.logging.ERROR"),Ne=g(),L=o("li"),ue=n("30: "),C=o("code"),De=n("transformers.logging.WARNING"),pe=n(" or "),T=o("code"),_e=n("transformers.logging.WARN"),v=g(),j=o("li"),re=n("20: "),oe=o("code"),Te=n("transformers.logging.INFO"),se=g(),q=o("li"),Se=n("10: "),V=o("code"),Le=n("transformers.logging.DEBUG")},l(w){y=s(w,"P",{});var F=l(y);Y=a(F,"\u{1F917} Transformers has following logging levels:"),F.forEach(r),R=f(w),d=s(w,"UL",{});var O=l(d);I=s(O,"LI",{});var le=l(I);P=a(le,"50: "),X=s(le,"CODE",{});var Ee=l(X);Z=a(Ee,"transformers.logging.CRITICAL"),Ee.forEach(r),Ie=a(le," or "),H=s(le,"CODE",{});var tt=l(H);U=a(tt,"transformers.logging.FATAL"),tt.forEach(r),le.forEach(r),Oe=f(O),G=s(O,"LI",{});var Ce=l(G);S=a(Ce,"40: "),ee=s(Ce,"CODE",{});var be=l(ee);te=a(be,"transformers.logging.ERROR"),be.forEach(r),Ce.forEach(r),Ne=f(O),L=s(O,"LI",{});var ne=l(L);ue=a(ne,"30: "),C=s(ne,"CODE",{});var rt=l(C);De=a(rt,"transformers.logging.WARNING"),rt.forEach(r),pe=a(ne," or "),T=s(ne,"CODE",{});var Fe=l(T);_e=a(Fe,"transformers.logging.WARN"),Fe.forEach(r),ne.forEach(r),v=f(O),j=s(O,"LI",{});var x=l(j);re=a(x,"20: "),oe=s(x,"CODE",{});var Pe=l(oe);Te=a(Pe,"transformers.logging.INFO"),Pe.forEach(r),x.forEach(r),se=f(O),q=s(O,"LI",{});var h=l(q);Se=a(h,"10: "),V=s(h,"CODE",{});var ot=l(V);Le=a(ot,"transformers.logging.DEBUG"),ot.forEach(r),h.forEach(r),O.forEach(r)},m(w,F){c(w,y,F),t(y,Y),c(w,R,F),c(w,d,F),t(d,I),t(I,P),t(I,X),t(X,Z),t(I,Ie),t(I,H),t(H,U),t(d,Oe),t(d,G),t(G,S),t(G,ee),t(ee,te),t(d,Ne),t(d,L),t(L,ue),t(L,C),t(C,De),t(L,pe),t(L,T),t(T,_e),t(d,v),t(d,j),t(j,re),t(j,oe),t(oe,Te),t(d,se),t(d,q),t(q,Se),t(q,V),t(V,Le)},d(w){w&&r(y),w&&r(R),w&&r(d)}}}function Qs(dt){let y,Y,R,d,I,P,X,Z,Ie,H,U,Oe,G,S,ee,te,Ne,L,ue,C,De,pe,T,_e,v,j,re,oe,Te,se,q,Se,V,Le,w,F,O,le,Ee,tt,Ce,be,ne,rt,Fe,x,Pe,h,ot,vt,wr,Ar,ht,Ir,Or,ut,Nr,Dr,pt,Tr,Sr,qt,Ge,zt,z,Lr,st,Cr,Fr,lt,Pr,Gr,Jt,N,$e,_t,Vr,xr,Et,Mr,kr,Br,nt,bt,Wr,Yr,Hr,ye,$t,Ur,jr,yt,qr,zr,Jr,at,Rt,Kr,Qr,Xr,it,wt,Zr,eo,Kt,ae,Re,At,Ve,to,It,ro,Qt,ie,xe,oo,Me,so,Ot,lo,no,Xt,ge,ke,ao,Be,io,Nt,go,fo,Zt,fe,We,co,Ye,mo,Dt,vo,ho,er,ce,He,uo,Ue,po,Tt,_o,Eo,tr,me,we,St,je,bo,Lt,$o,rr,M,qe,yo,Ct,Ro,wo,Ae,or,de,ze,Ao,Ft,Io,sr,k,Je,Oo,Pt,No,Do,Gt,To,lr,ve,Ke,So,Vt,Lo,nr,he,Qe,Co,xt,Fo,ar,A,Xe,Po,Mt,Go,Vo,kt,xo,Mo,Bt,ko,Bo,Wt,Wo,ir,B,Ze,Yo,Yt,Ho,Uo,Ht,jo,gr;return P=new Qo({}),T=new Ko({props:{code:`import transformers transformers.logging.set_verbosity_info(),`,highlighted:`<span class="hljs-keyword">import</span> transformers transformers.logging.set_verbosity_info()`}}),x=new Ko({props:{code:"TRANSFORMERS_VERBOSITY=error ./myprogram.py,",highlighted:"TRANSFORMERS_VERBOSITY=error ./myprogram.py"}}),Ge=new Ko({props:{code:"TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py,",highlighted:"TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py"}}),Ve=new Qo({}),xe=new W({props:{name:"transformers.utils.logging.set_verbosity_error",anchor:"transformers.utils.logging.set_verbosity_error",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L180"}}),ke=new W({props:{name:"transformers.utils.logging.set_verbosity_warning",anchor:"transformers.utils.logging.set_verbosity_warning",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L170"}}),We=new W({props:{name:"transformers.utils.logging.set_verbosity_info",anchor:"transformers.utils.logging.set_verbosity_info",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L165"}}),He=new W({props:{name:"transformers.utils.logging.set_verbosity_debug",anchor:"transformers.utils.logging.set_verbosity_debug",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L175"}}),je=new Qo({}),qe=new W({props:{name:"transformers.utils.logging.get_verbosity",anchor:"transformers.utils.logging.get_verbosity",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L123",returnDescription:` <p>The logging level.</p> `,returnType:` <p><code>int</code></p> `}}),Ae=new Js({props:{$$slots:{default:[Ks]},$$scope:{ctx:dt}}}),ze=new W({props:{name:"transformers.utils.logging.set_verbosity",anchor:"transformers.utils.logging.set_verbosity",parameters:[{name:"verbosity",val:": int"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L146",parametersDescription:[{anchor:"transformers.utils.logging.set_verbosity.verbosity",description:`<strong>verbosity</strong> (<code>int</code>) &#x2014; Logging level, e.g., one of:</p> <ul> <li><code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code></li> <li><code>transformers.logging.ERROR</code></li> <li><code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code></li> <li><code>transformers.logging.INFO</code></li> <li><code>transformers.logging.DEBUG</code></li> </ul>`,name:"verbosity"}]}}),Je=new W({props:{name:"transformers.utils.logging.get_logger",anchor:"transformers.utils.logging.get_logger",parameters:[{name:"name",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L109"}}),Ke=new W({props:{name:"transformers.utils.logging.enable_default_handler",anchor:"transformers.utils.logging.enable_default_handler",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L194"}}),Qe=new W({props:{name:"transformers.utils.logging.disable_default_handler",anchor:"transformers.utils.logging.disable_default_handler",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L185"}}),Xe=new W({props:{name:"transformers.utils.logging.enable_explicit_format",anchor:"transformers.utils.logging.enable_explicit_format",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L240"}}),Ze=new W({props:{name:"transformers.utils.logging.reset_format",anchor:"transformers.utils.logging.reset_format",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/utils/logging.py#L257"}}),{c(){y=o("meta"),Y=g(),R=o("h1"),d=o("a"),I=o("span"),u(P.$$.fragment),X=g(),Z=o("span"),Ie=n("Logging"),H=g(),U=o("p"),Oe=n("\u{1F917} Transformers has a centralized logging system, so that you can setup the verbosity of the library easily."),G=g(),S=o("p"),ee=n("Currently the default verbosity of the library is "),te=o("code"),Ne=n("WARNING"),L=n("."),ue=g(),C=o("p"),De=n(`To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level.`),pe=g(),u(T.$$.fragment),_e=g(),v=o("p"),j=n("You can also use the environment variable "),re=o("code"),oe=n("TRANSFORMERS_VERBOSITY"),Te=n(` to override the default verbosity. You can set it to one of the following: `),se=o("code"),q=n("debug"),Se=n(", "),V=o("code"),Le=n("info"),w=n(", "),F=o("code"),O=n("warning"),le=n(", "),Ee=o("code"),tt=n("error"),Ce=n(", "),be=o("code"),ne=n("critical"),rt=n(". For example:"),Fe=g(),u(x.$$.fragment),Pe=g(),h=o("p"),ot=n("Additionally, some "),vt=o("code"),wr=n("warnings"),Ar=n(` can be disabled by setting the environment variable `),ht=o("code"),Ir=n("TRANSFORMERS_NO_ADVISORY_WARNINGS"),Or=n(" to a true value, like "),ut=o("em"),Nr=n("1"),Dr=n(`. This will disable any warning that is logged using `),pt=o("code"),Tr=n("logger.warning_advice"),Sr=n(". For example:"),qt=g(),u(Ge.$$.fragment),zt=g(),z=o("p"),Lr=n(`All the methods of this logging module are documented below, the main ones are `),st=o("a"),Cr=n("transformers.logging.get_verbosity()"),Fr=n(` to get the current level of verbosity in the logger and `),lt=o("a"),Pr=n("transformers.logging.set_verbosity()"),Gr=n(` to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:`),Jt=g(),N=o("ul"),$e=o("li"),_t=o("code"),Vr=n("transformers.logging.CRITICAL"),xr=n(" or "),Et=o("code"),Mr=n("transformers.logging.FATAL"),kr=n(` (int value, 50): only report the most critical errors.`),Br=g(),nt=o("li"),bt=o("code"),Wr=n("transformers.logging.ERROR"),Yr=n(" (int value, 40): only report errors."),Hr=g(),ye=o("li"),$t=o("code"),Ur=n("transformers.logging.WARNING"),jr=n(" or "),yt=o("code"),qr=n("transformers.logging.WARN"),zr=n(` (int value, 30): only reports error and warnings. This the default level used by the library.`),Jr=g(),at=o("li"),Rt=o("code"),Kr=n("transformers.logging.INFO"),Qr=n(" (int value, 20): reports error, warnings and basic information."),Xr=g(),it=o("li"),wt=o("code"),Zr=n("transformers.logging.DEBUG"),eo=n(" (int value, 10): report all information."),Kt=g(),ae=o("h2"),Re=o("a"),At=o("span"),u(Ve.$$.fragment),to=g(),It=o("span"),ro=n("Base setters"),Qt=g(),ie=o("div"),u(xe.$$.fragment),oo=g(),Me=o("p"),so=n("Set the verbosity to the "),Ot=o("code"),lo=n("ERROR"),no=n(" level."),Xt=g(),ge=o("div"),u(ke.$$.fragment),ao=g(),Be=o("p"),io=n("Set the verbosity to the "),Nt=o("code"),go=n("WARNING"),fo=n(" level."),Zt=g(),fe=o("div"),u(We.$$.fragment),co=g(),Ye=o("p"),mo=n("Set the verbosity to the "),Dt=o("code"),vo=n("INFO"),ho=n(" level."),er=g(),ce=o("div"),u(He.$$.fragment),uo=g(),Ue=o("p"),po=n("Set the verbosity to the "),Tt=o("code"),_o=n("DEBUG"),Eo=n(" level."),tr=g(),me=o("h2"),we=o("a"),St=o("span"),u(je.$$.fragment),bo=g(),Lt=o("span"),$o=n("Other functions"),rr=g(),M=o("div"),u(qe.$$.fragment),yo=g(),Ct=o("p"),Ro=n("Return the current level for the \u{1F917} Transformers\u2019s root logger as an int."),wo=g(),u(Ae.$$.fragment),or=g(),de=o("div"),u(ze.$$.fragment),Ao=g(),Ft=o("p"),Io=n("Set the verbosity level for the \u{1F917} Transformers\u2019s root logger."),sr=g(),k=o("div"),u(Je.$$.fragment),Oo=g(),Pt=o("p"),No=n("Return a logger with the specified name."),Do=g(),Gt=o("p"),To=n("This function is not supposed to be directly accessed unless you are writing a custom transformers module."),lr=g(),ve=o("div"),u(Ke.$$.fragment),So=g(),Vt=o("p"),Lo=n("Enable the default handler of the HuggingFace Transformers\u2019s root logger."),nr=g(),he=o("div"),u(Qe.$$.fragment),Co=g(),xt=o("p"),Fo=n("Disable the default handler of the HuggingFace Transformers\u2019s root logger."),ar=g(),A=o("div"),u(Xe.$$.fragment),Po=g(),Mt=o("p"),Go=n("Enable explicit formatting for every HuggingFace Transformers\u2019s logger. The explicit formatter is as follows:"),Vo=g(),kt=o("p"),xo=n("::"),Mo=g(),Bt=o("p"),ko=n("[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE"),Bo=g(),Wt=o("p"),Wo=n("All handlers currently bound to the root logger are affected by this method."),ir=g(),B=o("div"),u(Ze.$$.fragment),Yo=g(),Yt=o("p"),Ho=n("Resets the formatting for HuggingFace Transformers\u2019s loggers."),Uo=g(),Ht=o("p"),jo=n("All handlers currently bound to the root logger are affected by this method."),this.h()},l(e){const i=zs('[data-svelte="svelte-1phssyn"]',document.head);y=s(i,"META",{name:!0,content:!0}),i.forEach(r),Y=f(e),R=s(e,"H1",{class:!0});var et=l(R);d=s(et,"A",{id:!0,class:!0,href:!0});var Xo=l(d);I=s(Xo,"SPAN",{});var Zo=l(I);p(P.$$.fragment,Zo),Zo.forEach(r),Xo.forEach(r),X=f(et),Z=s(et,"SPAN",{});var es=l(Z);Ie=a(es,"Logging"),es.forEach(r),et.forEach(r),H=f(e),U=s(e,"P",{});var ts=l(U);Oe=a(ts,"\u{1F917} Transformers has a centralized logging system, so that you can setup the verbosity of the library easily."),ts.forEach(r),G=f(e),S=s(e,"P",{});var fr=l(S);ee=a(fr,"Currently the default verbosity of the library is "),te=s(fr,"CODE",{});var rs=l(te);Ne=a(rs,"WARNING"),rs.forEach(r),L=a(fr,"."),fr.forEach(r),ue=f(e),C=s(e,"P",{});var os=l(C);De=a(os,`To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level.`),os.forEach(r),pe=f(e),p(T.$$.fragment,e),_e=f(e),v=s(e,"P",{});var D=l(v);j=a(D,"You can also use the environment variable "),re=s(D,"CODE",{});var ss=l(re);oe=a(ss,"TRANSFORMERS_VERBOSITY"),ss.forEach(r),Te=a(D,` to override the default verbosity. You can set it to one of the following: `),se=s(D,"CODE",{});var ls=l(se);q=a(ls,"debug"),ls.forEach(r),Se=a(D,", "),V=s(D,"CODE",{});var ns=l(V);Le=a(ns,"info"),ns.forEach(r),w=a(D,", "),F=s(D,"CODE",{});var as=l(F);O=a(as,"warning"),as.forEach(r),le=a(D,", "),Ee=s(D,"CODE",{});var is=l(Ee);tt=a(is,"error"),is.forEach(r),Ce=a(D,", "),be=s(D,"CODE",{});var gs=l(be);ne=a(gs,"critical"),gs.forEach(r),rt=a(D,". For example:"),D.forEach(r),Fe=f(e),p(x.$$.fragment,e),Pe=f(e),h=s(e,"P",{});var J=l(h);ot=a(J,"Additionally, some "),vt=s(J,"CODE",{});var fs=l(vt);wr=a(fs,"warnings"),fs.forEach(r),Ar=a(J,` can be disabled by setting the environment variable `),ht=s(J,"CODE",{});var cs=l(ht);Ir=a(cs,"TRANSFORMERS_NO_ADVISORY_WARNINGS"),cs.forEach(r),Or=a(J," to a true value, like "),ut=s(J,"EM",{});var ms=l(ut);Nr=a(ms,"1"),ms.forEach(r),Dr=a(J,`. This will disable any warning that is logged using `),pt=s(J,"CODE",{});var ds=l(pt);Tr=a(ds,"logger.warning_advice"),ds.forEach(r),Sr=a(J,". For example:"),J.forEach(r),qt=f(e),p(Ge.$$.fragment,e),zt=f(e),z=s(e,"P",{});var gt=l(z);Lr=a(gt,`All the methods of this logging module are documented below, the main ones are `),st=s(gt,"A",{href:!0});var vs=l(st);Cr=a(vs,"transformers.logging.get_verbosity()"),vs.forEach(r),Fr=a(gt,` to get the current level of verbosity in the logger and `),lt=s(gt,"A",{href:!0});var hs=l(lt);Pr=a(hs,"transformers.logging.set_verbosity()"),hs.forEach(r),Gr=a(gt,` to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:`),gt.forEach(r),Jt=f(e),N=s(e,"UL",{});var K=l(N);$e=s(K,"LI",{});var Ut=l($e);_t=s(Ut,"CODE",{});var us=l(_t);Vr=a(us,"transformers.logging.CRITICAL"),us.forEach(r),xr=a(Ut," or "),Et=s(Ut,"CODE",{});var ps=l(Et);Mr=a(ps,"transformers.logging.FATAL"),ps.forEach(r),kr=a(Ut,` (int value, 50): only report the most critical errors.`),Ut.forEach(r),Br=f(K),nt=s(K,"LI",{});var qo=l(nt);bt=s(qo,"CODE",{});var _s=l(bt);Wr=a(_s,"transformers.logging.ERROR"),_s.forEach(r),Yr=a(qo," (int value, 40): only report errors."),qo.forEach(r),Hr=f(K),ye=s(K,"LI",{});var jt=l(ye);$t=s(jt,"CODE",{});var Es=l($t);Ur=a(Es,"transformers.logging.WARNING"),Es.forEach(r),jr=a(jt," or "),yt=s(jt,"CODE",{});var bs=l(yt);qr=a(bs,"transformers.logging.WARN"),bs.forEach(r),zr=a(jt,` (int value, 30): only reports error and warnings. This the default level used by the library.`),jt.forEach(r),Jr=f(K),at=s(K,"LI",{});var zo=l(at);Rt=s(zo,"CODE",{});var $s=l(Rt);Kr=a($s,"transformers.logging.INFO"),$s.forEach(r),Qr=a(zo," (int value, 20): reports error, warnings and basic information."),zo.forEach(r),Xr=f(K),it=s(K,"LI",{});var Jo=l(it);wt=s(Jo,"CODE",{});var ys=l(wt);Zr=a(ys,"transformers.logging.DEBUG"),ys.forEach(r),eo=a(Jo," (int value, 10): report all information."),Jo.forEach(r),K.forEach(r),Kt=f(e),ae=s(e,"H2",{class:!0});var cr=l(ae);Re=s(cr,"A",{id:!0,class:!0,href:!0});var Rs=l(Re);At=s(Rs,"SPAN",{});var ws=l(At);p(Ve.$$.fragment,ws),ws.forEach(r),Rs.forEach(r),to=f(cr),It=s(cr,"SPAN",{});var As=l(It);ro=a(As,"Base setters"),As.forEach(r),cr.forEach(r),Qt=f(e),ie=s(e,"DIV",{class:!0});var mr=l(ie);p(xe.$$.fragment,mr),oo=f(mr),Me=s(mr,"P",{});var dr=l(Me);so=a(dr,"Set the verbosity to the "),Ot=s(dr,"CODE",{});var Is=l(Ot);lo=a(Is,"ERROR"),Is.forEach(r),no=a(dr," level."),dr.forEach(r),mr.forEach(r),Xt=f(e),ge=s(e,"DIV",{class:!0});var vr=l(ge);p(ke.$$.fragment,vr),ao=f(vr),Be=s(vr,"P",{});var hr=l(Be);io=a(hr,"Set the verbosity to the "),Nt=s(hr,"CODE",{});var Os=l(Nt);go=a(Os,"WARNING"),Os.forEach(r),fo=a(hr," level."),hr.forEach(r),vr.forEach(r),Zt=f(e),fe=s(e,"DIV",{class:!0});var ur=l(fe);p(We.$$.fragment,ur),co=f(ur),Ye=s(ur,"P",{});var pr=l(Ye);mo=a(pr,"Set the verbosity to the "),Dt=s(pr,"CODE",{});var Ns=l(Dt);vo=a(Ns,"INFO"),Ns.forEach(r),ho=a(pr," level."),pr.forEach(r),ur.forEach(r),er=f(e),ce=s(e,"DIV",{class:!0});var _r=l(ce);p(He.$$.fragment,_r),uo=f(_r),Ue=s(_r,"P",{});var Er=l(Ue);po=a(Er,"Set the verbosity to the "),Tt=s(Er,"CODE",{});var Ds=l(Tt);_o=a(Ds,"DEBUG"),Ds.forEach(r),Eo=a(Er," level."),Er.forEach(r),_r.forEach(r),tr=f(e),me=s(e,"H2",{class:!0});var br=l(me);we=s(br,"A",{id:!0,class:!0,href:!0});var Ts=l(we);St=s(Ts,"SPAN",{});var Ss=l(St);p(je.$$.fragment,Ss),Ss.forEach(r),Ts.forEach(r),bo=f(br),Lt=s(br,"SPAN",{});var Ls=l(Lt);$o=a(Ls,"Other functions"),Ls.forEach(r),br.forEach(r),rr=f(e),M=s(e,"DIV",{class:!0});var ft=l(M);p(qe.$$.fragment,ft),yo=f(ft),Ct=s(ft,"P",{});var Cs=l(Ct);Ro=a(Cs,"Return the current level for the \u{1F917} Transformers\u2019s root logger as an int."),Cs.forEach(r),wo=f(ft),p(Ae.$$.fragment,ft),ft.forEach(r),or=f(e),de=s(e,"DIV",{class:!0});var $r=l(de);p(ze.$$.fragment,$r),Ao=f($r),Ft=s($r,"P",{});var Fs=l(Ft);Io=a(Fs,"Set the verbosity level for the \u{1F917} Transformers\u2019s root logger."),Fs.forEach(r),$r.forEach(r),sr=f(e),k=s(e,"DIV",{class:!0});var ct=l(k);p(Je.$$.fragment,ct),Oo=f(ct),Pt=s(ct,"P",{});var Ps=l(Pt);No=a(Ps,"Return a logger with the specified name."),Ps.forEach(r),Do=f(ct),Gt=s(ct,"P",{});var Gs=l(Gt);To=a(Gs,"This function is not supposed to be directly accessed unless you are writing a custom transformers module."),Gs.forEach(r),ct.forEach(r),lr=f(e),ve=s(e,"DIV",{class:!0});var yr=l(ve);p(Ke.$$.fragment,yr),So=f(yr),Vt=s(yr,"P",{});var Vs=l(Vt);Lo=a(Vs,"Enable the default handler of the HuggingFace Transformers\u2019s root logger."),Vs.forEach(r),yr.forEach(r),nr=f(e),he=s(e,"DIV",{class:!0});var Rr=l(he);p(Qe.$$.fragment,Rr),Co=f(Rr),xt=s(Rr,"P",{});var xs=l(xt);Fo=a(xs,"Disable the default handler of the HuggingFace Transformers\u2019s root logger."),xs.forEach(r),Rr.forEach(r),ar=f(e),A=s(e,"DIV",{class:!0});var Q=l(A);p(Xe.$$.fragment,Q),Po=f(Q),Mt=s(Q,"P",{});var Ms=l(Mt);Go=a(Ms,"Enable explicit formatting for every HuggingFace Transformers\u2019s logger. The explicit formatter is as follows:"),Ms.forEach(r),Vo=f(Q),kt=s(Q,"P",{});var ks=l(kt);xo=a(ks,"::"),ks.forEach(r),Mo=f(Q),Bt=s(Q,"P",{});var Bs=l(Bt);ko=a(Bs,"[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE"),Bs.forEach(r),Bo=f(Q),Wt=s(Q,"P",{});var Ws=l(Wt);Wo=a(Ws,"All handlers currently bound to the root logger are affected by this method."),Ws.forEach(r),Q.forEach(r),ir=f(e),B=s(e,"DIV",{class:!0});var mt=l(B);p(Ze.$$.fragment,mt),Yo=f(mt),Yt=s(mt,"P",{});var Ys=l(Yt);Ho=a(Ys,"Resets the formatting for HuggingFace Transformers\u2019s loggers."),Ys.forEach(r),Uo=f(mt),Ht=s(mt,"P",{});var Hs=l(Ht);jo=a(Hs,"All handlers currently bound to the root logger are affected by this method."),Hs.forEach(r),mt.forEach(r),this.h()},h(){m(y,"name","hf:doc:metadata"),m(y,"content",JSON.stringify(Xs)),m(d,"id","logging"),m(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(d,"href","#logging"),m(R,"class","relative group"),m(st,"href","/docs/transformers/v4.15.0/en/main_classes/logging#transformers.utils.logging.get_verbosity"),m(lt,"href","/docs/transformers/v4.15.0/en/main_classes/logging#transformers.utils.logging.set_verbosity"),m(Re,"id","transformers.utils.logging.set_verbosity_error"),m(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Re,"href","#transformers.utils.logging.set_verbosity_error"),m(ae,"class","relative group"),m(ie,"class","docstring"),m(ge,"class","docstring"),m(fe,"class","docstring"),m(ce,"class","docstring"),m(we,"id","transformers.utils.logging.get_verbosity"),m(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(we,"href","#transformers.utils.logging.get_verbosity"),m(me,"class","relative group"),m(M,"class","docstring"),m(de,"class","docstring"),m(k,"class","docstring"),m(ve,"class","docstring"),m(he,"class","docstring"),m(A,"class","docstring"),m(B,"class","docstring")},m(e,i){t(document.head,y),c(e,Y,i),c(e,R,i),t(R,d),t(d,I),_(P,I,null),t(R,X),t(R,Z),t(Z,Ie),c(e,H,i),c(e,U,i),t(U,Oe),c(e,G,i),c(e,S,i),t(S,ee),t(S,te),t(te,Ne),t(S,L),c(e,ue,i),c(e,C,i),t(C,De),c(e,pe,i),_(T,e,i),c(e,_e,i),c(e,v,i),t(v,j),t(v,re),t(re,oe),t(v,Te),t(v,se),t(se,q),t(v,Se),t(v,V),t(V,Le),t(v,w),t(v,F),t(F,O),t(v,le),t(v,Ee),t(Ee,tt),t(v,Ce),t(v,be),t(be,ne),t(v,rt),c(e,Fe,i),_(x,e,i),c(e,Pe,i),c(e,h,i),t(h,ot),t(h,vt),t(vt,wr),t(h,Ar),t(h,ht),t(ht,Ir),t(h,Or),t(h,ut),t(ut,Nr),t(h,Dr),t(h,pt),t(pt,Tr),t(h,Sr),c(e,qt,i),_(Ge,e,i),c(e,zt,i),c(e,z,i),t(z,Lr),t(z,st),t(st,Cr),t(z,Fr),t(z,lt),t(lt,Pr),t(z,Gr),c(e,Jt,i),c(e,N,i),t(N,$e),t($e,_t),t(_t,Vr),t($e,xr),t($e,Et),t(Et,Mr),t($e,kr),t(N,Br),t(N,nt),t(nt,bt),t(bt,Wr),t(nt,Yr),t(N,Hr),t(N,ye),t(ye,$t),t($t,Ur),t(ye,jr),t(ye,yt),t(yt,qr),t(ye,zr),t(N,Jr),t(N,at),t(at,Rt),t(Rt,Kr),t(at,Qr),t(N,Xr),t(N,it),t(it,wt),t(wt,Zr),t(it,eo),c(e,Kt,i),c(e,ae,i),t(ae,Re),t(Re,At),_(Ve,At,null),t(ae,to),t(ae,It),t(It,ro),c(e,Qt,i),c(e,ie,i),_(xe,ie,null),t(ie,oo),t(ie,Me),t(Me,so),t(Me,Ot),t(Ot,lo),t(Me,no),c(e,Xt,i),c(e,ge,i),_(ke,ge,null),t(ge,ao),t(ge,Be),t(Be,io),t(Be,Nt),t(Nt,go),t(Be,fo),c(e,Zt,i),c(e,fe,i),_(We,fe,null),t(fe,co),t(fe,Ye),t(Ye,mo),t(Ye,Dt),t(Dt,vo),t(Ye,ho),c(e,er,i),c(e,ce,i),_(He,ce,null),t(ce,uo),t(ce,Ue),t(Ue,po),t(Ue,Tt),t(Tt,_o),t(Ue,Eo),c(e,tr,i),c(e,me,i),t(me,we),t(we,St),_(je,St,null),t(me,bo),t(me,Lt),t(Lt,$o),c(e,rr,i),c(e,M,i),_(qe,M,null),t(M,yo),t(M,Ct),t(Ct,Ro),t(M,wo),_(Ae,M,null),c(e,or,i),c(e,de,i),_(ze,de,null),t(de,Ao),t(de,Ft),t(Ft,Io),c(e,sr,i),c(e,k,i),_(Je,k,null),t(k,Oo),t(k,Pt),t(Pt,No),t(k,Do),t(k,Gt),t(Gt,To),c(e,lr,i),c(e,ve,i),_(Ke,ve,null),t(ve,So),t(ve,Vt),t(Vt,Lo),c(e,nr,i),c(e,he,i),_(Qe,he,null),t(he,Co),t(he,xt),t(xt,Fo),c(e,ar,i),c(e,A,i),_(Xe,A,null),t(A,Po),t(A,Mt),t(Mt,Go),t(A,Vo),t(A,kt),t(kt,xo),t(A,Mo),t(A,Bt),t(Bt,ko),t(A,Bo),t(A,Wt),t(Wt,Wo),c(e,ir,i),c(e,B,i),_(Ze,B,null),t(B,Yo),t(B,Yt),t(Yt,Ho),t(B,Uo),t(B,Ht),t(Ht,jo),gr=!0},p(e,[i]){const et={};i&2&&(et.$$scope={dirty:i,ctx:e}),Ae.$set(et)},i(e){gr||(E(P.$$.fragment,e),E(T.$$.fragment,e),E(x.$$.fragment,e),E(Ge.$$.fragment,e),E(Ve.$$.fragment,e),E(xe.$$.fragment,e),E(ke.$$.fragment,e),E(We.$$.fragment,e),E(He.$$.fragment,e),E(je.$$.fragment,e),E(qe.$$.fragment,e),E(Ae.$$.fragment,e),E(ze.$$.fragment,e),E(Je.$$.fragment,e),E(Ke.$$.fragment,e),E(Qe.$$.fragment,e),E(Xe.$$.fragment,e),E(Ze.$$.fragment,e),gr=!0)},o(e){b(P.$$.fragment,e),b(T.$$.fragment,e),b(x.$$.fragment,e),b(Ge.$$.fragment,e),b(Ve.$$.fragment,e),b(xe.$$.fragment,e),b(ke.$$.fragment,e),b(We.$$.fragment,e),b(He.$$.fragment,e),b(je.$$.fragment,e),b(qe.$$.fragment,e),b(Ae.$$.fragment,e),b(ze.$$.fragment,e),b(Je.$$.fragment,e),b(Ke.$$.fragment,e),b(Qe.$$.fragment,e),b(Xe.$$.fragment,e),b(Ze.$$.fragment,e),gr=!1},d(e){r(y),e&&r(Y),e&&r(R),$(P),e&&r(H),e&&r(U),e&&r(G),e&&r(S),e&&r(ue),e&&r(C),e&&r(pe),$(T,e),e&&r(_e),e&&r(v),e&&r(Fe),$(x,e),e&&r(Pe),e&&r(h),e&&r(qt),$(Ge,e),e&&r(zt),e&&r(z),e&&r(Jt),e&&r(N),e&&r(Kt),e&&r(ae),$(Ve),e&&r(Qt),e&&r(ie),$(xe),e&&r(Xt),e&&r(ge),$(ke),e&&r(Zt),e&&r(fe),$(We),e&&r(er),e&&r(ce),$(He),e&&r(tr),e&&r(me),$(je),e&&r(rr),e&&r(M),$(qe),$(Ae),e&&r(or),e&&r(de),$(ze),e&&r(sr),e&&r(k),$(Je),e&&r(lr),e&&r(ve),$(Ke),e&&r(nr),e&&r(he),$(Qe),e&&r(ar),e&&r(A),$(Xe),e&&r(ir),e&&r(B),$(Ze)}}}const Xs={local:"logging",sections:[{local:"transformers.utils.logging.set_verbosity_error",title:"Base setters"},{local:"transformers.utils.logging.get_verbosity",title:"Other functions"}],title:"Logging"};function Zs(dt,y,Y){let{fw:R}=y;return dt.$$set=d=>{"fw"in d&&Y(0,R=d.fw)},[R]}class nl extends Us{constructor(y){super();js(this,y,Zs,Qs,qs,{fw:0})}}export{nl as default,Xs as metadata};
9,996
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/main_classes/tokenizer.mdx-27b3fec7.js
import{S as zv,i as xv,s as Ev,e as r,k as d,w as h,t as n,L as $v,c as a,d as t,m as c,a as s,x as p,h as o,b as l,J as e,g as x,y as m,q as f,o as u,B as _}from"../../chunks/vendor-b1433968.js";import{T as Tv}from"../../chunks/Tip-c3840994.js";import{D as v}from"../../chunks/Docstring-ff504c58.js";import{C as wv}from"../../chunks/CodeBlock-a320dbd7.js";import{I as qi}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Pv(pt){let E,C;return{c(){E=r("p"),C=n(`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`)},l($){E=a($,"P",{});var P=s(E);C=o(P,`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`),P.forEach(t)},m($,P){x($,E,P),e(E,C)},d($){$&&t(E)}}}function qv(pt){let E,C;return{c(){E=r("p"),C=n(`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`)},l($){E=a($,"P",{});var P=s(E);C=o(P,`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`),P.forEach(t)},m($,P){x($,E,P),e(E,C)},d($){$&&t(E)}}}function Dv(pt){let E,C,$,P,Io,mt,Di,Lo,Ii,$s,Oe,Li,ft,Bi,Fi,Ps,Se,Bo,Ai,Ci,Fo,Ni,qs,N,Oi,In,Si,Wi,Ln,Ri,Ui,Bn,ji,Vi,Fn,Hi,Mi,Ds,Ee,An,Gi,Xi,Cn,Yi,Ji,Is,de,Ao,Ki,Qi,Co,Zi,ed,No,td,Ls,q,Nn,nd,od,On,rd,ad,Oo,sd,id,So,dd,cd,Wo,ld,hd,Ro,pd,md,Uo,fd,ud,ut,_d,gd,Bs,$e,We,jo,_t,kd,Vo,vd,Fs,g,gt,bd,Ho,yd,Td,kt,wd,Sn,zd,xd,Ed,Mo,$d,Pd,Go,qd,Dd,Xo,Id,Ld,F,ce,Yo,Bd,Fd,Jo,Ad,Cd,Ko,Nd,Od,Sd,O,Qo,Wd,Rd,Zo,Ud,jd,er,Vd,Hd,tr,Md,Gd,nr,Xd,Yd,Jd,G,or,Kd,Qd,rr,Zd,ec,ar,tc,nc,sr,oc,rc,ac,S,ir,sc,ic,dr,dc,cc,cr,lc,hc,lr,pc,mc,Wn,fc,uc,_c,Re,hr,gc,kc,pr,vc,bc,yc,X,mr,Tc,wc,fr,zc,xc,ur,Ec,$c,_r,Pc,qc,Dc,Ue,vt,Ic,gr,Lc,Bc,je,bt,Fc,kr,Ac,Cc,le,yt,Nc,vr,Oc,Sc,Tt,Wc,br,Rc,Uc,jc,he,wt,Vc,yr,Hc,Mc,zt,Gc,Tr,Xc,Yc,Jc,Y,xt,Kc,Et,Qc,wr,Zc,el,tl,zr,nl,ol,$t,rl,Ve,Pt,al,xr,sl,il,He,qt,dl,Er,cl,ll,Me,Dt,hl,$r,pl,ml,pe,It,fl,Pr,ul,_l,Ge,gl,me,Lt,kl,qr,vl,bl,Pe,yl,Dr,Tl,wl,Ir,zl,xl,El,fe,Bt,$l,Lr,Pl,ql,Br,Dl,As,qe,Xe,Fr,Ft,Il,Ar,Ll,Cs,J,Bl,Rn,Fl,Al,At,Cl,Nl,Un,Ol,Sl,Ns,k,Ct,Wl,Cr,Rl,Ul,Nt,jl,jn,Vl,Hl,Ml,Nr,Gl,Xl,Or,Yl,Jl,Sr,Kl,Ql,A,ue,Wr,Zl,eh,Rr,th,nh,Ur,oh,rh,ah,W,jr,sh,ih,Vr,dh,ch,Hr,lh,hh,Mr,ph,mh,Gr,fh,uh,_h,K,Xr,gh,kh,Yr,vh,bh,Jr,yh,Th,Kr,wh,zh,xh,R,Qr,Eh,$h,Zr,Ph,qh,ea,Dh,Ih,ta,Lh,Bh,Vn,Fh,Ah,Ch,Ye,na,Nh,Oh,oa,Sh,Wh,Rh,Q,ra,Uh,jh,aa,Vh,Hh,sa,Mh,Gh,ia,Xh,Yh,Jh,Je,Ot,Kh,da,Qh,Zh,Ke,St,ep,ca,tp,np,_e,Wt,op,la,rp,ap,Rt,sp,ha,ip,dp,cp,ge,Ut,lp,pa,hp,pp,jt,mp,ma,fp,up,_p,Z,Vt,gp,Ht,kp,fa,vp,bp,yp,ua,Tp,wp,Mt,zp,Qe,Gt,xp,_a,Ep,$p,Ze,Xt,Pp,ga,qp,Dp,et,Yt,Ip,ka,Lp,Bp,ke,Jt,Fp,va,Ap,Cp,tt,Np,ve,Kt,Op,ba,Sp,Wp,ya,Rp,Up,nt,Qt,jp,Ta,Vp,Os,De,ot,wa,Zt,Hp,za,Mp,Ss,b,en,Gp,Ie,Xp,Hn,Yp,Jp,xa,Kp,Qp,Zp,Ea,em,tm,U,tn,nm,$a,om,rm,Pa,am,sm,nn,Mn,qa,im,dm,cm,Gn,Da,lm,hm,pm,Ia,mm,fm,j,on,um,La,_m,gm,Ba,km,vm,rn,Xn,Fa,bm,ym,Tm,Yn,Aa,wm,zm,xm,Ca,Em,$m,rt,an,Pm,Na,qm,Dm,be,sn,Im,Oa,Lm,Bm,Le,Jn,Sa,Fm,Am,Cm,Kn,Wa,Nm,Om,Sm,Qn,Ra,Wm,Rm,Um,at,dn,jm,cn,Vm,Ua,Hm,Mm,Gm,L,ln,Xm,ja,Ym,Jm,hn,Km,Zn,Qm,Zm,ef,pn,eo,Va,tf,nf,of,to,Ha,rf,af,sf,Ma,df,cf,mn,no,Ga,lf,hf,pf,oo,Xa,mf,ff,uf,V,fn,_f,Be,gf,Ya,kf,vf,Ja,bf,yf,Tf,Ka,wf,zf,un,ro,Qa,xf,Ef,$f,ao,Za,Pf,qf,Df,es,If,Lf,H,_n,Bf,ts,Ff,Af,ns,Cf,Nf,gn,so,os,Of,Sf,Wf,io,rs,Rf,Uf,jf,as,Vf,Hf,st,kn,Mf,ss,Gf,Xf,it,vn,Yf,is,Jf,Kf,B,bn,Qf,ds,Zf,eu,cs,tu,nu,yn,ls,ou,ru,hs,au,su,ps,iu,du,Tn,co,ms,cu,lu,hu,lo,fs,pu,mu,fu,D,wn,uu,us,_u,gu,zn,ku,ho,vu,bu,yu,xn,po,_s,Tu,wu,zu,mo,gs,xu,Eu,$u,ks,Pu,qu,En,fo,vs,Du,Iu,Lu,uo,bs,Bu,Fu,Au,ys,Cu,Nu,dt,$n,Ou,Ts,Su,Ws;return mt=new qi({}),_t=new qi({}),gt=new v({props:{name:"class transformers.PreTrainedTokenizer",anchor:"transformers.PreTrainedTokenizer",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L326",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>). padding_side &#x2014; (<code>str</code>, <em>optional</em>): The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"model_max_length"},{anchor:"transformers.PreTrainedTokenizer.model_input_names",description:`<strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.`,name:"model_input_names"},{anchor:"transformers.PreTrainedTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.`,name:"bos_token"},{anchor:"transformers.PreTrainedTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.`,name:"eos_token"},{anchor:"transformers.PreTrainedTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.`,name:"unk_token"},{anchor:"transformers.PreTrainedTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.`,name:"sep_token"},{anchor:"transformers.PreTrainedTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.`,name:"pad_token"},{anchor:"transformers.PreTrainedTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.`,name:"cls_token"},{anchor:"transformers.PreTrainedTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.`,name:"mask_token"},{anchor:"transformers.PreTrainedTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.`,name:"additional_special_tokens"}]}}),vt=new v({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2334",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),bt=new v({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerBase.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3178",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),yt=new v({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerBase.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3211",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerBase.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),wt=new v({props:{name:"encode",anchor:"transformers.PreTrainedTokenizerBase.encode",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2143",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.encode.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.encode.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.encode.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.encode.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.encode.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.encode.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.encode.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>The tokenized ids of the text.</p> `,returnType:` <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> `}}),xt=new v({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/file_utils.py#L2482",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add tokenizer&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your tokenizer (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your tokenizer in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),$t=new wv({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to your namespace with the name "my-finetuned-bert" with no local clone. tokenizer.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the tokenizer to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. tokenizer.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. tokenizer.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert"),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),Pt=new v({props:{name:"convert_ids_to_tokens",anchor:"transformers.PreTrainedTokenizer.convert_ids_to_tokens",parameters:[{name:"ids",val:": typing.Union[int, typing.List[int]]"},{name:"skip_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L874",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.convert_ids_to_tokens.ids",description:`<strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizer.convert_ids_to_tokens.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"}],returnDescription:` <p>The decoded token(s).</p> `,returnType:` <p><code>str</code> or <code>List[str]</code></p> `}}),qt=new v({props:{name:"convert_tokens_to_ids",anchor:"transformers.PreTrainedTokenizer.convert_tokens_to_ids",parameters:[{name:"tokens",val:": typing.Union[str, typing.List[str]]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L553",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.convert_tokens_to_ids.tokens",description:"<strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).",name:"tokens"}],returnDescription:` <p>The token id or list of token ids.</p> `,returnType:` <p><code>int</code> or <code>List[int]</code></p> `}}),Dt=new v({props:{name:"get_added_vocab",anchor:"transformers.PreTrainedTokenizer.get_added_vocab",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L362",returnDescription:` <p>The added tokens.</p> `,returnType:` <p><code>Dict[str, int]</code></p> `}}),It=new v({props:{name:"num_special_tokens_to_add",anchor:"transformers.PreTrainedTokenizer.num_special_tokens_to_add",parameters:[{name:"pair",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L451",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.num_special_tokens_to_add.pair",description:`<strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.`,name:"pair"}],returnDescription:` <p>Number of special tokens added to sequences.</p> `,returnType:` <p><code>int</code></p> `}}),Ge=new Tv({props:{$$slots:{default:[Pv]},$$scope:{ctx:pt}}}),Lt=new v({props:{name:"prepare_for_tokenization",anchor:"transformers.PreTrainedTokenizer.prepare_for_tokenization",parameters:[{name:"text",val:": str"},{name:"is_split_into_words",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L812",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.prepare_for_tokenization.text",description:`<strong>text</strong> (<code>str</code>) &#x2014; The text to prepare.`,name:"text"},{anchor:"transformers.PreTrainedTokenizer.prepare_for_tokenization.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. kwargs &#x2014; Keyword arguments to use for the tokenization.`,name:"is_split_into_words"}],returnDescription:` <p>The prepared text and the unused kwargs.</p> `,returnType:` <p><code>Tuple[str, Dict[str, Any]]</code></p> `}}),Bt=new v({props:{name:"tokenize",anchor:"transformers.PreTrainedTokenizer.tokenize",parameters:[{name:"text",val:": str"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils.py#L474",parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.tokenize.text",description:`<strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.`,name:"text"},{anchor:"transformers.PreTrainedTokenizer.tokenize.*kwargs",description:`*<strong>*kwargs</strong> (additional keyword arguments) &#x2014; Passed along to the model-specific <code>prepare_for_tokenization</code> preprocessing method.`,name:"*kwargs"}],returnDescription:` <p>The list of tokens.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Ft=new qi({}),Ct=new v({props:{name:"class transformers.PreTrainedTokenizerFast",anchor:"transformers.PreTrainedTokenizerFast",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L76",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>). padding_side &#x2014; (<code>str</code>, <em>optional</em>): The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"model_max_length"},{anchor:"transformers.PreTrainedTokenizerFast.model_input_names",description:`<strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.`,name:"model_input_names"},{anchor:"transformers.PreTrainedTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.`,name:"bos_token"},{anchor:"transformers.PreTrainedTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.`,name:"eos_token"},{anchor:"transformers.PreTrainedTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.`,name:"unk_token"},{anchor:"transformers.PreTrainedTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.`,name:"sep_token"},{anchor:"transformers.PreTrainedTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.`,name:"pad_token"},{anchor:"transformers.PreTrainedTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.`,name:"cls_token"},{anchor:"transformers.PreTrainedTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.`,name:"mask_token"},{anchor:"transformers.PreTrainedTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.`,name:"additional_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.tokenizer_object",description:`<strong>tokenizer_object</strong> (<code>tokenizers.Tokenizer</code>) &#x2014; A <code>tokenizers.Tokenizer</code> object from &#x1F917; tokenizers to instantiate from. See <a href="../fast_tokenizers">Using tokenizers from &#x1F917; tokenizers</a> for more information.`,name:"tokenizer_object"},{anchor:"transformers.PreTrainedTokenizerFast.tokenizer_file",description:`<strong>tokenizer_file</strong> (<code>str</code>) &#x2014; A path to a local JSON file representing a previously serialized <code>tokenizers.Tokenizer</code> object from &#x1F917; tokenizers.`,name:"tokenizer_file"}]}}),Ot=new v({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2334",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),St=new v({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerBase.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3178",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Wt=new v({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerBase.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L3211",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerBase.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),Ut=new v({props:{name:"encode",anchor:"transformers.PreTrainedTokenizerBase.encode",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L2143",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.encode.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.encode.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.encode.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.encode.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.encode.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.encode.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.encode.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>The tokenized ids of the text.</p> `,returnType:` <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> `}}),Vt=new v({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/file_utils.py#L2482",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add tokenizer&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your tokenizer (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your tokenizer in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),Mt=new wv({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to your namespace with the name "my-finetuned-bert" with no local clone. tokenizer.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the tokenizer to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. tokenizer.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. tokenizer.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert"),`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),Gt=new v({props:{name:"convert_ids_to_tokens",anchor:"transformers.PreTrainedTokenizerFast.convert_ids_to_tokens",parameters:[{name:"ids",val:": typing.Union[int, typing.List[int]]"},{name:"skip_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L290",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.ids",description:`<strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"}],returnDescription:` <p>The decoded token(s).</p> `,returnType:` <p><code>str</code> or <code>List[str]</code></p> `}}),Xt=new v({props:{name:"convert_tokens_to_ids",anchor:"transformers.PreTrainedTokenizerFast.convert_tokens_to_ids",parameters:[{name:"tokens",val:": typing.Union[str, typing.List[str]]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L232",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.tokens",description:"<strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).",name:"tokens"}],returnDescription:` <p>The token id or list of token ids.</p> `,returnType:` <p><code>int</code> or <code>List[int]</code></p> `}}),Yt=new v({props:{name:"get_added_vocab",anchor:"transformers.PreTrainedTokenizerFast.get_added_vocab",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L153",returnDescription:` <p>The added tokens.</p> `,returnType:` <p><code>Dict[str, int]</code></p> `}}),Jt=new v({props:{name:"num_special_tokens_to_add",anchor:"transformers.PreTrainedTokenizerFast.num_special_tokens_to_add",parameters:[{name:"pair",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L269",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.pair",description:`<strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.`,name:"pair"}],returnDescription:` <p>Number of special tokens added to sequences.</p> `,returnType:` <p><code>int</code></p> `}}),tt=new Tv({props:{$$slots:{default:[qv]},$$scope:{ctx:pt}}}),Kt=new v({props:{name:"set_truncation_and_padding",anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding",parameters:[{name:"padding_strategy",val:": PaddingStrategy"},{name:"truncation_strategy",val:": TruncationStrategy"},{name:"max_length",val:": int"},{name:"stride",val:": int"},{name:"pad_to_multiple_of",val:": typing.Optional[int]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L319",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.padding_strategy",description:`<strong>padding_strategy</strong> (<a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>) &#x2014; The kind of padding that will be applied to the input`,name:"padding_strategy"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.truncation_strategy",description:`<strong>truncation_strategy</strong> (<a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>) &#x2014; The kind of truncation that will be applied to the input`,name:"truncation_strategy"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum size of a sequence.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.stride",description:`<strong>stride</strong> (<code>int</code>) &#x2014; The stride to use when handling overflow.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"}]}}),Qt=new v({props:{name:"train_new_from_iterator",anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator",parameters:[{name:"text_iterator",val:""},{name:"vocab_size",val:""},{name:"new_special_tokens",val:" = None"},{name:"special_tokens_map",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_fast.py#L587",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.text_iterator",description:`<strong>text_iterator</strong> (generator of <code>List[str]</code>) &#x2014; The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory.`,name:"text_iterator"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary you want for your tokenizer.`,name:"vocab_size"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.new_special_tokens",description:`<strong>new_special_tokens</strong> (list of <code>str</code> or <code>AddedToken</code>, <em>optional</em>) &#x2014; A list of new special tokens to add to the tokenizer you are training.`,name:"new_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.special_tokens_map",description:`<strong>special_tokens_map</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs &#x2014; Additional keyword arguments passed along to the trainer from the &#x1F917; Tokenizers library.`,name:"special_tokens_map"}],returnDescription:` <p>A new tokenizer of the same type as the original one, trained on <code>text_iterator</code>.</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast" >PreTrainedTokenizerFast</a></p> `}}),Zt=new qi({}),en=new v({props:{name:"class transformers.BatchEncoding",anchor:"transformers.BatchEncoding",parameters:[{name:"data",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"encoding",val:": typing.Union[tokenizers.Encoding, typing.Sequence[tokenizers.Encoding], NoneType] = None"},{name:"tensor_type",val:": typing.Union[NoneType, str, transformers.file_utils.TensorType] = None"},{name:"prepend_batch_axis",val:": bool = False"},{name:"n_sequences",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L163",parametersDescription:[{anchor:"transformers.BatchEncoding.data",description:`<strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods (&#x2018;input_ids&#x2019;, &#x2018;attention_mask&#x2019;, etc.).`,name:"data"},{anchor:"transformers.BatchEncoding.encoding",description:`<strong>encoding</strong> (<code>tokenizers.Encoding</code> or <code>Sequence[tokenizers.Encoding]</code>, <em>optional</em>) &#x2014; If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character space to token space the <code>tokenizers.Encoding</code> instance or list of instance (for batches) hold this information.`,name:"encoding"},{anchor:"transformers.BatchEncoding.tensor_type",description:`<strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.`,name:"tensor_type"},{anchor:"transformers.BatchEncoding.prepend_batch_axis",description:`<strong>prepend_batch_axis</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add a batch axis when converting to tensors (see <code>tensor_type</code> above).`,name:"prepend_batch_axis"},{anchor:"transformers.BatchEncoding.n_sequences",description:`<strong>n_sequences</strong> (<code>Optional[int]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.`,name:"n_sequences"}]}}),tn=new v({props:{name:"char_to_token",anchor:"transformers.BatchEncoding.char_to_token",parameters:[{name:"batch_or_char_index",val:": int"},{name:"char_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L520",parametersDescription:[{anchor:"transformers.BatchEncoding.char_to_token.batch_or_char_index",description:`<strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence`,name:"batch_or_char_index"},{anchor:"transformers.BatchEncoding.char_to_token.char_index",description:`<strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.`,name:"char_index"},{anchor:"transformers.BatchEncoding.char_to_token.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.`,name:"sequence_index"}],returnDescription:` <p>Index of the token.</p> `,returnType:` <p><code>int</code></p> `}}),on=new v({props:{name:"char_to_word",anchor:"transformers.BatchEncoding.char_to_word",parameters:[{name:"batch_or_char_index",val:": int"},{name:"char_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L606",parametersDescription:[{anchor:"transformers.BatchEncoding.char_to_word.batch_or_char_index",description:`<strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string.`,name:"batch_or_char_index"},{anchor:"transformers.BatchEncoding.char_to_word.char_index",description:`<strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the character in the original string.`,name:"char_index"},{anchor:"transformers.BatchEncoding.char_to_word.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.`,name:"sequence_index"}],returnDescription:` <p>Index or indices of the associated encoded token(s).</p> `,returnType:` <p><code>int</code> or <code>List[int]</code></p> `}}),an=new v({props:{name:"convert_to_tensors",anchor:"transformers.BatchEncoding.convert_to_tensors",parameters:[{name:"tensor_type",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"prepend_batch_axis",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L645",parametersDescription:[{anchor:"transformers.BatchEncoding.convert_to_tensors.tensor_type",description:`<strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/v4.15.0/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.`,name:"tensor_type"},{anchor:"transformers.BatchEncoding.convert_to_tensors.prepend_batch_axis",description:`<strong>prepend_batch_axis</strong> (<code>int</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the batch dimension during the conversion.`,name:"prepend_batch_axis"}]}}),sn=new v({props:{name:"sequence_ids",anchor:"transformers.BatchEncoding.sequence_ids",parameters:[{name:"batch_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L298",parametersDescription:[{anchor:"transformers.BatchEncoding.sequence_ids.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],returnDescription:` <p>A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding sequence.</p> `,returnType:` <p><code>List[Optional[int]]</code></p> `}}),dn=new v({props:{name:"to",anchor:"transformers.BatchEncoding.to",parameters:[{name:"device",val:": typing.Union[str, ForwardRef('torch.device')]"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L728",parametersDescription:[{anchor:"transformers.BatchEncoding.to.device",description:"<strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.",name:"device"}],returnDescription:` <p>The same instance after modification.</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),ln=new v({props:{name:"token_to_chars",anchor:"transformers.BatchEncoding.token_to_chars",parameters:[{name:"batch_or_token_index",val:": int"},{name:"token_index",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L484",parametersDescription:[{anchor:"transformers.BatchEncoding.token_to_chars.batch_or_token_index",description:`<strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.`,name:"batch_or_token_index"},{anchor:"transformers.BatchEncoding.token_to_chars.token_index",description:`<strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token or tokens in the sequence.`,name:"token_index"}],returnDescription:` <p>Span of characters in the original string.</p> `,returnType:` <p><a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.CharSpan" >CharSpan</a></p> `}}),fn=new v({props:{name:"token_to_sequence",anchor:"transformers.BatchEncoding.token_to_sequence",parameters:[{name:"batch_or_token_index",val:": int"},{name:"token_index",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L356",parametersDescription:[{anchor:"transformers.BatchEncoding.token_to_sequence.batch_or_token_index",description:`<strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the token in the sequence.`,name:"batch_or_token_index"},{anchor:"transformers.BatchEncoding.token_to_sequence.token_index",description:`<strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.`,name:"token_index"}],returnDescription:` <p>Index of the word in the input sequence.</p> `,returnType:` <p><code>int</code></p> `}}),_n=new v({props:{name:"token_to_word",anchor:"transformers.BatchEncoding.token_to_word",parameters:[{name:"batch_or_token_index",val:": int"},{name:"token_index",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L395",parametersDescription:[{anchor:"transformers.BatchEncoding.token_to_word.batch_or_token_index",description:`<strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.`,name:"batch_or_token_index"},{anchor:"transformers.BatchEncoding.token_to_word.token_index",description:`<strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.`,name:"token_index"}],returnDescription:` <p>Index of the word in the input sequence.</p> `,returnType:` <p><code>int</code></p> `}}),kn=new v({props:{name:"tokens",anchor:"transformers.BatchEncoding.tokens",parameters:[{name:"batch_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L283",parametersDescription:[{anchor:"transformers.BatchEncoding.tokens.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],returnDescription:` <p>The list of tokens at that index.</p> `,returnType:` <p><code>List[str]</code></p> `}}),vn=new v({props:{name:"word_ids",anchor:"transformers.BatchEncoding.word_ids",parameters:[{name:"batch_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L340",parametersDescription:[{anchor:"transformers.BatchEncoding.word_ids.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],returnDescription:` <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> `,returnType:` <p><code>List[Optional[int]]</code></p> `}}),bn=new v({props:{name:"word_to_chars",anchor:"transformers.BatchEncoding.word_to_chars",parameters:[{name:"batch_or_word_index",val:": int"},{name:"word_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L561",parametersDescription:[{anchor:"transformers.BatchEncoding.word_to_chars.batch_or_word_index",description:`<strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence`,name:"batch_or_word_index"},{anchor:"transformers.BatchEncoding.word_to_chars.word_index",description:`<strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.`,name:"word_index"},{anchor:"transformers.BatchEncoding.word_to_chars.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.`,name:"sequence_index"}],returnDescription:` <p>Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with:</p> <ul> <li>start: index of the first character associated to the token in the original string</li> <li>end: index of the character following the last character associated to the token in the original string</li> </ul> `,returnType:` <p><code>CharSpan</code> or <code>List[CharSpan]</code></p> `}}),wn=new v({props:{name:"word_to_tokens",anchor:"transformers.BatchEncoding.word_to_tokens",parameters:[{name:"batch_or_word_index",val:": int"},{name:"word_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L433",parametersDescription:[{anchor:"transformers.BatchEncoding.word_to_tokens.batch_or_word_index",description:`<strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence.`,name:"batch_or_word_index"},{anchor:"transformers.BatchEncoding.word_to_tokens.word_index",description:`<strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.`,name:"word_index"},{anchor:"transformers.BatchEncoding.word_to_tokens.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.`,name:"sequence_index"}],returnDescription:` <p>Optional <a href="/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.TokenSpan" >TokenSpan</a> Span of tokens in the encoded sequence. Returns <code>None</code> if no tokens correspond to the word.</p> `}}),$n=new v({props:{name:"words",anchor:"transformers.BatchEncoding.words",parameters:[{name:"batch_index",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/tokenization_utils_base.py#L319",parametersDescription:[{anchor:"transformers.BatchEncoding.words.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],returnDescription:` <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> `,returnType:` <p><code>List[Optional[int]]</code></p> `}}),{c(){E=r("meta"),C=d(),$=r("h1"),P=r("a"),Io=r("span"),h(mt.$$.fragment),Di=d(),Lo=r("span"),Ii=n("Tokenizer"),$s=d(),Oe=r("p"),Li=n(`A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a \u201CFast\u201D implementation based on the Rust library `),ft=r("a"),Bi=n("tokenizers"),Fi=n(". The \u201CFast\u201D implementations allows:"),Ps=d(),Se=r("ol"),Bo=r("li"),Ai=n("a significant speed-up in particular when doing batched tokenization and"),Ci=d(),Fo=r("li"),Ni=n(`additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token). Currently no \u201CFast\u201D implementation is available for the SentencePiece-based tokenizers (for T5, ALBERT, CamemBERT, XLM-RoBERTa and XLNet models).`),qs=d(),N=r("p"),Oi=n("The base classes "),In=r("a"),Si=n("PreTrainedTokenizer"),Wi=n(" and "),Ln=r("a"),Ri=n("PreTrainedTokenizerFast"),Ui=n(` implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and \u201CFast\u201D tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository). They both rely on `),Bn=r("a"),ji=n("PreTrainedTokenizerBase"),Vi=n(` that contains the common methods, and `),Fn=r("a"),Hi=n("SpecialTokensMixin"),Mi=n("."),Ds=d(),Ee=r("p"),An=r("a"),Gi=n("PreTrainedTokenizer"),Xi=n(" and "),Cn=r("a"),Yi=n("PreTrainedTokenizerFast"),Ji=n(` thus implement the main methods for using all the tokenizers:`),Is=d(),de=r("ul"),Ao=r("li"),Ki=n(`Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers).`),Qi=d(),Co=r("li"),Zi=n("Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece\u2026)."),ed=d(),No=r("li"),td=n(`Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization.`),Ls=d(),q=r("p"),Nn=r("a"),nd=n("BatchEncoding"),od=n(` holds the output of the `),On=r("a"),rd=n("PreTrainedTokenizerBase"),ad=n("\u2019s encoding methods ("),Oo=r("code"),sd=n("__call__"),id=n(`, `),So=r("code"),dd=n("encode_plus"),cd=n(" and "),Wo=r("code"),ld=n("batch_encode_plus"),hd=n(`) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (`),Ro=r("code"),pd=n("input_ids"),md=n(", "),Uo=r("code"),fd=n("attention_mask"),ud=n(`\u2026). When the tokenizer is a \u201CFast\u201D tokenizer (i.e., backed by HuggingFace `),ut=r("a"),_d=n("tokenizers library"),gd=n(`), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token).`),Bs=d(),$e=r("h2"),We=r("a"),jo=r("span"),h(_t.$$.fragment),kd=d(),Vo=r("span"),vd=n("PreTrainedTokenizer"),Fs=d(),g=r("div"),h(gt.$$.fragment),bd=d(),Ho=r("p"),yd=n("Base class for all slow tokenizers."),Td=d(),kt=r("p"),wd=n("Inherits from "),Sn=r("a"),zd=n("PreTrainedTokenizerBase"),xd=n("."),Ed=d(),Mo=r("p"),$d=n(`Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.`),Pd=d(),Go=r("p"),qd=n(`This class also contain the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),Dd=d(),Xo=r("p"),Id=n("Class attributes (overridden by derived classes)"),Ld=d(),F=r("ul"),ce=r("li"),Yo=r("strong"),Bd=n("vocab_files_names"),Fd=n(" ("),Jo=r("code"),Ad=n("Dict[str, str]"),Cd=n(") \u2014 A dictionary with, as keys, the "),Ko=r("code"),Nd=n("__init__"),Od=n(` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Sd=d(),O=r("li"),Qo=r("strong"),Wd=n("pretrained_vocab_files_map"),Rd=n(" ("),Zo=r("code"),Ud=n("Dict[str, Dict[str, str]]"),jd=n(`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),er=r("code"),Vd=n("__init__"),Hd=n(` keyword name of each vocabulary file required by the model, the low-level being the `),tr=r("code"),Md=n("short-cut-names"),Gd=n(` of the pretrained models with, as associated values, the `),nr=r("code"),Xd=n("url"),Yd=n(" to the associated pretrained vocabulary file."),Jd=d(),G=r("li"),or=r("strong"),Kd=n("max_model_input_sizes"),Qd=n(" ("),rr=r("code"),Zd=n("Dict[str, Optional[int]]"),ec=n(`) \u2014 A dictionary with, as keys, the `),ar=r("code"),tc=n("short-cut-names"),nc=n(` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),sr=r("code"),oc=n("None"),rc=n(" if the model has no maximum input size."),ac=d(),S=r("li"),ir=r("strong"),sc=n("pretrained_init_configuration"),ic=n(" ("),dr=r("code"),dc=n("Dict[str, Dict[str, Any]]"),cc=n(`) \u2014 A dictionary with, as keys, the `),cr=r("code"),lc=n("short-cut-names"),hc=n(` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),lr=r("code"),pc=n("__init__"),mc=n(` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Wn=r("a"),fc=n("from_pretrained()"),uc=n(` method.`),_c=d(),Re=r("li"),hr=r("strong"),gc=n("model_input_names"),kc=n(" ("),pr=r("code"),vc=n("List[str]"),bc=n(") \u2014 A list of inputs expected in the forward pass of the model."),yc=d(),X=r("li"),mr=r("strong"),Tc=n("padding_side"),wc=n(" ("),fr=r("code"),zc=n("str"),xc=n(`) \u2014 The default value for the side on which the model should have padding applied. Should be `),ur=r("code"),Ec=n("'right'"),$c=n(" or "),_r=r("code"),Pc=n("'left'"),qc=n("."),Dc=d(),Ue=r("div"),h(vt.$$.fragment),Ic=d(),gr=r("p"),Lc=n(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Bc=d(),je=r("div"),h(bt.$$.fragment),Fc=d(),kr=r("p"),Ac=n("Convert a list of lists of token ids into a list of strings by calling decode."),Cc=d(),le=r("div"),h(yt.$$.fragment),Nc=d(),vr=r("p"),Oc=n(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Sc=d(),Tt=r("p"),Wc=n("Similar to doing "),br=r("code"),Rc=n("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Uc=n("."),jc=d(),he=r("div"),h(wt.$$.fragment),Vc=d(),yr=r("p"),Hc=n("Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Mc=d(),zt=r("p"),Gc=n("Same as doing "),Tr=r("code"),Xc=n("self.convert_tokens_to_ids(self.tokenize(text))"),Yc=n("."),Jc=d(),Y=r("div"),h(xt.$$.fragment),Kc=d(),Et=r("p"),Qc=n(`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),wr=r("code"),Zc=n("repo_path_or_name"),el=n("."),tl=d(),zr=r("p"),nl=n("Examples:"),ol=d(),h($t.$$.fragment),rl=d(),Ve=r("div"),h(Pt.$$.fragment),al=d(),xr=r("p"),sl=n(`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),il=d(),He=r("div"),h(qt.$$.fragment),dl=d(),Er=r("p"),cl=n(`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),ll=d(),Me=r("div"),h(Dt.$$.fragment),hl=d(),$r=r("p"),pl=n("Returns the added tokens in the vocabulary as a dictionary of token to index."),ml=d(),pe=r("div"),h(It.$$.fragment),fl=d(),Pr=r("p"),ul=n("Returns the number of added tokens when encoding a sequence with special tokens."),_l=d(),h(Ge.$$.fragment),gl=d(),me=r("div"),h(Lt.$$.fragment),kl=d(),qr=r("p"),vl=n("Performs any necessary transformations before tokenization."),bl=d(),Pe=r("p"),yl=n("This method should pop the arguments from kwargs and return the remaining "),Dr=r("code"),Tl=n("kwargs"),wl=n(` as well. We test the `),Ir=r("code"),zl=n("kwargs"),xl=n(" at the end of the encoding process to be sure all the arguments have been used."),El=d(),fe=r("div"),h(Bt.$$.fragment),$l=d(),Lr=r("p"),Pl=n("Converts a string in a sequence of tokens, using the tokenizer."),ql=d(),Br=r("p"),Dl=n(`Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens.`),As=d(),qe=r("h2"),Xe=r("a"),Fr=r("span"),h(Ft.$$.fragment),Il=d(),Ar=r("span"),Ll=n("PreTrainedTokenizerFast"),Cs=d(),J=r("p"),Bl=n("The "),Rn=r("a"),Fl=n("PreTrainedTokenizerFast"),Al=n(" depend on the "),At=r("a"),Cl=n("tokenizers"),Nl=n(` library. The tokenizers obtained from the \u{1F917} tokenizers library can be loaded very simply into \u{1F917} transformers. Take a look at the `),Un=r("a"),Ol=n("Using tokenizers from \u{1F917} tokenizers"),Sl=n(" page to understand how this is done."),Ns=d(),k=r("div"),h(Ct.$$.fragment),Wl=d(),Cr=r("p"),Rl=n("Base class for all fast tokenizers (wrapping HuggingFace tokenizers library)."),Ul=d(),Nt=r("p"),jl=n("Inherits from "),jn=r("a"),Vl=n("PreTrainedTokenizerBase"),Hl=n("."),Ml=d(),Nr=r("p"),Gl=n(`Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.`),Xl=d(),Or=r("p"),Yl=n(`This class also contains the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),Jl=d(),Sr=r("p"),Kl=n("Class attributes (overridden by derived classes)"),Ql=d(),A=r("ul"),ue=r("li"),Wr=r("strong"),Zl=n("vocab_files_names"),eh=n(" ("),Rr=r("code"),th=n("Dict[str, str]"),nh=n(") \u2014 A dictionary with, as keys, the "),Ur=r("code"),oh=n("__init__"),rh=n(` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),ah=d(),W=r("li"),jr=r("strong"),sh=n("pretrained_vocab_files_map"),ih=n(" ("),Vr=r("code"),dh=n("Dict[str, Dict[str, str]]"),ch=n(`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),Hr=r("code"),lh=n("__init__"),hh=n(` keyword name of each vocabulary file required by the model, the low-level being the `),Mr=r("code"),ph=n("short-cut-names"),mh=n(` of the pretrained models with, as associated values, the `),Gr=r("code"),fh=n("url"),uh=n(" to the associated pretrained vocabulary file."),_h=d(),K=r("li"),Xr=r("strong"),gh=n("max_model_input_sizes"),kh=n(" ("),Yr=r("code"),vh=n("Dict[str, Optional[int]]"),bh=n(`) \u2014 A dictionary with, as keys, the `),Jr=r("code"),yh=n("short-cut-names"),Th=n(` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),Kr=r("code"),wh=n("None"),zh=n(" if the model has no maximum input size."),xh=d(),R=r("li"),Qr=r("strong"),Eh=n("pretrained_init_configuration"),$h=n(" ("),Zr=r("code"),Ph=n("Dict[str, Dict[str, Any]]"),qh=n(`) \u2014 A dictionary with, as keys, the `),ea=r("code"),Dh=n("short-cut-names"),Ih=n(` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),ta=r("code"),Lh=n("__init__"),Bh=n(` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Vn=r("a"),Fh=n("from_pretrained()"),Ah=n(` method.`),Ch=d(),Ye=r("li"),na=r("strong"),Nh=n("model_input_names"),Oh=n(" ("),oa=r("code"),Sh=n("List[str]"),Wh=n(") \u2014 A list of inputs expected in the forward pass of the model."),Rh=d(),Q=r("li"),ra=r("strong"),Uh=n("padding_side"),jh=n(" ("),aa=r("code"),Vh=n("str"),Hh=n(`) \u2014 The default value for the side on which the model should have padding applied. Should be `),sa=r("code"),Mh=n("'right'"),Gh=n(" or "),ia=r("code"),Xh=n("'left'"),Yh=n("."),Jh=d(),Je=r("div"),h(Ot.$$.fragment),Kh=d(),da=r("p"),Qh=n(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Zh=d(),Ke=r("div"),h(St.$$.fragment),ep=d(),ca=r("p"),tp=n("Convert a list of lists of token ids into a list of strings by calling decode."),np=d(),_e=r("div"),h(Wt.$$.fragment),op=d(),la=r("p"),rp=n(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),ap=d(),Rt=r("p"),sp=n("Similar to doing "),ha=r("code"),ip=n("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),dp=n("."),cp=d(),ge=r("div"),h(Ut.$$.fragment),lp=d(),pa=r("p"),hp=n("Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),pp=d(),jt=r("p"),mp=n("Same as doing "),ma=r("code"),fp=n("self.convert_tokens_to_ids(self.tokenize(text))"),up=n("."),_p=d(),Z=r("div"),h(Vt.$$.fragment),gp=d(),Ht=r("p"),kp=n(`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),fa=r("code"),vp=n("repo_path_or_name"),bp=n("."),yp=d(),ua=r("p"),Tp=n("Examples:"),wp=d(),h(Mt.$$.fragment),zp=d(),Qe=r("div"),h(Gt.$$.fragment),xp=d(),_a=r("p"),Ep=n(`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),$p=d(),Ze=r("div"),h(Xt.$$.fragment),Pp=d(),ga=r("p"),qp=n(`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),Dp=d(),et=r("div"),h(Yt.$$.fragment),Ip=d(),ka=r("p"),Lp=n("Returns the added tokens in the vocabulary as a dictionary of token to index."),Bp=d(),ke=r("div"),h(Jt.$$.fragment),Fp=d(),va=r("p"),Ap=n("Returns the number of added tokens when encoding a sequence with special tokens."),Cp=d(),h(tt.$$.fragment),Np=d(),ve=r("div"),h(Kt.$$.fragment),Op=d(),ba=r("p"),Sp=n(`Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.`),Wp=d(),ya=r("p"),Rp=n(`The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section.`),Up=d(),nt=r("div"),h(Qt.$$.fragment),jp=d(),Ta=r("p"),Vp=n(`Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one.`),Os=d(),De=r("h2"),ot=r("a"),wa=r("span"),h(Zt.$$.fragment),Hp=d(),za=r("span"),Mp=n("BatchEncoding"),Ss=d(),b=r("div"),h(en.$$.fragment),Gp=d(),Ie=r("p"),Xp=n("Holds the output of the "),Hn=r("a"),Yp=n("encode_plus()"),Jp=n(` and `),xa=r("code"),Kp=n("batch_encode"),Qp=n(` methods (tokens, attention_masks, etc).`),Zp=d(),Ea=r("p"),em=n(`This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space.`),tm=d(),U=r("div"),h(tn.$$.fragment),nm=d(),$a=r("p"),om=n(`Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch.`),rm=d(),Pa=r("p"),am=n("Can be called as:"),sm=d(),nn=r("ul"),Mn=r("li"),qa=r("code"),im=n("self.char_to_token(char_index)"),dm=n(" if batch size is 1"),cm=d(),Gn=r("li"),Da=r("code"),lm=n("self.char_to_token(batch_index, char_index)"),hm=n(" if batch size is greater or equal to 1"),pm=d(),Ia=r("p"),mm=n(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),fm=d(),j=r("div"),h(on.$$.fragment),um=d(),La=r("p"),_m=n(`Get the word in the original string corresponding to a character in the original string of a sequence of the batch.`),gm=d(),Ba=r("p"),km=n("Can be called as:"),vm=d(),rn=r("ul"),Xn=r("li"),Fa=r("code"),bm=n("self.char_to_word(char_index)"),ym=n(" if batch size is 1"),Tm=d(),Yn=r("li"),Aa=r("code"),wm=n("self.char_to_word(batch_index, char_index)"),zm=n(" if batch size is greater than 1"),xm=d(),Ca=r("p"),Em=n(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),$m=d(),rt=r("div"),h(an.$$.fragment),Pm=d(),Na=r("p"),qm=n("Convert the inner content to tensors."),Dm=d(),be=r("div"),h(sn.$$.fragment),Im=d(),Oa=r("p"),Lm=n("Return a list mapping the tokens to the id of their original sentences:"),Bm=d(),Le=r("ul"),Jn=r("li"),Sa=r("code"),Fm=n("None"),Am=n(" for special tokens added around or between sequences,"),Cm=d(),Kn=r("li"),Wa=r("code"),Nm=n("0"),Om=n(" for tokens corresponding to words in the first sequence,"),Sm=d(),Qn=r("li"),Ra=r("code"),Wm=n("1"),Rm=n(` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded.`),Um=d(),at=r("div"),h(dn.$$.fragment),jm=d(),cn=r("p"),Vm=n("Send all values to device by calling "),Ua=r("code"),Hm=n("v.to(device)"),Mm=n(" (PyTorch only)."),Gm=d(),L=r("div"),h(ln.$$.fragment),Xm=d(),ja=r("p"),Ym=n("Get the character span corresponding to an encoded token in a sequence of the batch."),Jm=d(),hn=r("p"),Km=n("Character spans are returned as a "),Zn=r("a"),Qm=n("CharSpan"),Zm=n(" with:"),ef=d(),pn=r("ul"),eo=r("li"),Va=r("strong"),tf=n("start"),nf=n(" \u2014 Index of the first character in the original string associated to the token."),of=d(),to=r("li"),Ha=r("strong"),rf=n("end"),af=n(` \u2014 Index of the character following the last character in the original string associated to the token.`),sf=d(),Ma=r("p"),df=n("Can be called as:"),cf=d(),mn=r("ul"),no=r("li"),Ga=r("code"),lf=n("self.token_to_chars(token_index)"),hf=n(" if batch size is 1"),pf=d(),oo=r("li"),Xa=r("code"),mf=n("self.token_to_chars(batch_index, token_index)"),ff=n(" if batch size is greater or equal to 1"),uf=d(),V=r("div"),h(fn.$$.fragment),_f=d(),Be=r("p"),gf=n(`Get the index of the sequence represented by the given token. In the general use case, this method returns `),Ya=r("code"),kf=n("0"),vf=n(" for a single sequence or the first sequence of a pair, and "),Ja=r("code"),bf=n("1"),yf=n(" for the second sequence of a pair"),Tf=d(),Ka=r("p"),wf=n("Can be called as:"),zf=d(),un=r("ul"),ro=r("li"),Qa=r("code"),xf=n("self.token_to_sequence(token_index)"),Ef=n(" if batch size is 1"),$f=d(),ao=r("li"),Za=r("code"),Pf=n("self.token_to_sequence(batch_index, token_index)"),qf=n(" if batch size is greater than 1"),Df=d(),es=r("p"),If=n(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Lf=d(),H=r("div"),h(_n.$$.fragment),Bf=d(),ts=r("p"),Ff=n("Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch."),Af=d(),ns=r("p"),Cf=n("Can be called as:"),Nf=d(),gn=r("ul"),so=r("li"),os=r("code"),Of=n("self.token_to_word(token_index)"),Sf=n(" if batch size is 1"),Wf=d(),io=r("li"),rs=r("code"),Rf=n("self.token_to_word(batch_index, token_index)"),Uf=n(" if batch size is greater than 1"),jf=d(),as=r("p"),Vf=n(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Hf=d(),st=r("div"),h(kn.$$.fragment),Mf=d(),ss=r("p"),Gf=n(`Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer).`),Xf=d(),it=r("div"),h(vn.$$.fragment),Yf=d(),is=r("p"),Jf=n("Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),Kf=d(),B=r("div"),h(bn.$$.fragment),Qf=d(),ds=r("p"),Zf=n("Get the character span in the original string corresponding to given word in a sequence of the batch."),eu=d(),cs=r("p"),tu=n("Character spans are returned as a CharSpan NamedTuple with:"),nu=d(),yn=r("ul"),ls=r("li"),ou=n("start: index of the first character in the original string"),ru=d(),hs=r("li"),au=n("end: index of the character following the last character in the original string"),su=d(),ps=r("p"),iu=n("Can be called as:"),du=d(),Tn=r("ul"),co=r("li"),ms=r("code"),cu=n("self.word_to_chars(word_index)"),lu=n(" if batch size is 1"),hu=d(),lo=r("li"),fs=r("code"),pu=n("self.word_to_chars(batch_index, word_index)"),mu=n(" if batch size is greater or equal to 1"),fu=d(),D=r("div"),h(wn.$$.fragment),uu=d(),us=r("p"),_u=n("Get the encoded token span corresponding to a word in a sequence of the batch."),gu=d(),zn=r("p"),ku=n("Token spans are returned as a "),ho=r("a"),vu=n("TokenSpan"),bu=n(" with:"),yu=d(),xn=r("ul"),po=r("li"),_s=r("strong"),Tu=n("start"),wu=n(" \u2014 Index of the first token."),zu=d(),mo=r("li"),gs=r("strong"),xu=n("end"),Eu=n(" \u2014 Index of the token following the last token."),$u=d(),ks=r("p"),Pu=n("Can be called as:"),qu=d(),En=r("ul"),fo=r("li"),vs=r("code"),Du=n("self.word_to_tokens(word_index, sequence_index: int = 0)"),Iu=n(" if batch size is 1"),Lu=d(),uo=r("li"),bs=r("code"),Bu=n("self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)"),Fu=n(` if batch size is greater or equal to 1`),Au=d(),ys=r("p"),Cu=n(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Nu=d(),dt=r("div"),h($n.$$.fragment),Ou=d(),Ts=r("p"),Su=n("Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),this.h()},l(i){const y=$v('[data-svelte="svelte-1phssyn"]',document.head);E=a(y,"META",{name:!0,content:!0}),y.forEach(t),C=c(i),$=a(i,"H1",{class:!0});var Pn=s($);P=a(Pn,"A",{id:!0,class:!0,href:!0});var ws=s(P);Io=a(ws,"SPAN",{});var i_=s(Io);p(mt.$$.fragment,i_),i_.forEach(t),ws.forEach(t),Di=c(Pn),Lo=a(Pn,"SPAN",{});var d_=s(Lo);Ii=o(d_,"Tokenizer"),d_.forEach(t),Pn.forEach(t),$s=c(i),Oe=a(i,"P",{});var Rs=s(Oe);Li=o(Rs,`A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a \u201CFast\u201D implementation based on the Rust library `),ft=a(Rs,"A",{href:!0,rel:!0});var c_=s(ft);Bi=o(c_,"tokenizers"),c_.forEach(t),Fi=o(Rs,". The \u201CFast\u201D implementations allows:"),Rs.forEach(t),Ps=c(i),Se=a(i,"OL",{});var Us=s(Se);Bo=a(Us,"LI",{});var l_=s(Bo);Ai=o(l_,"a significant speed-up in particular when doing batched tokenization and"),l_.forEach(t),Ci=c(Us),Fo=a(Us,"LI",{});var h_=s(Fo);Ni=o(h_,`additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token). Currently no \u201CFast\u201D implementation is available for the SentencePiece-based tokenizers (for T5, ALBERT, CamemBERT, XLM-RoBERTa and XLNet models).`),h_.forEach(t),Us.forEach(t),qs=c(i),N=a(i,"P",{});var ye=s(N);Oi=o(ye,"The base classes "),In=a(ye,"A",{href:!0});var p_=s(In);Si=o(p_,"PreTrainedTokenizer"),p_.forEach(t),Wi=o(ye," and "),Ln=a(ye,"A",{href:!0});var m_=s(Ln);Ri=o(m_,"PreTrainedTokenizerFast"),m_.forEach(t),Ui=o(ye,` implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and \u201CFast\u201D tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository). They both rely on `),Bn=a(ye,"A",{href:!0});var f_=s(Bn);ji=o(f_,"PreTrainedTokenizerBase"),f_.forEach(t),Vi=o(ye,` that contains the common methods, and `),Fn=a(ye,"A",{href:!0});var u_=s(Fn);Hi=o(u_,"SpecialTokensMixin"),u_.forEach(t),Mi=o(ye,"."),ye.forEach(t),Ds=c(i),Ee=a(i,"P",{});var zs=s(Ee);An=a(zs,"A",{href:!0});var __=s(An);Gi=o(__,"PreTrainedTokenizer"),__.forEach(t),Xi=o(zs," and "),Cn=a(zs,"A",{href:!0});var g_=s(Cn);Yi=o(g_,"PreTrainedTokenizerFast"),g_.forEach(t),Ji=o(zs,` thus implement the main methods for using all the tokenizers:`),zs.forEach(t),Is=c(i),de=a(i,"UL",{});var _o=s(de);Ao=a(_o,"LI",{});var k_=s(Ao);Ki=o(k_,`Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers).`),k_.forEach(t),Qi=c(_o),Co=a(_o,"LI",{});var v_=s(Co);Zi=o(v_,"Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece\u2026)."),v_.forEach(t),ed=c(_o),No=a(_o,"LI",{});var b_=s(No);td=o(b_,`Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization.`),b_.forEach(t),_o.forEach(t),Ls=c(i),q=a(i,"P",{});var I=s(q);Nn=a(I,"A",{href:!0});var y_=s(Nn);nd=o(y_,"BatchEncoding"),y_.forEach(t),od=o(I,` holds the output of the `),On=a(I,"A",{href:!0});var T_=s(On);rd=o(T_,"PreTrainedTokenizerBase"),T_.forEach(t),ad=o(I,"\u2019s encoding methods ("),Oo=a(I,"CODE",{});var w_=s(Oo);sd=o(w_,"__call__"),w_.forEach(t),id=o(I,`, `),So=a(I,"CODE",{});var z_=s(So);dd=o(z_,"encode_plus"),z_.forEach(t),cd=o(I," and "),Wo=a(I,"CODE",{});var x_=s(Wo);ld=o(x_,"batch_encode_plus"),x_.forEach(t),hd=o(I,`) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (`),Ro=a(I,"CODE",{});var E_=s(Ro);pd=o(E_,"input_ids"),E_.forEach(t),md=o(I,", "),Uo=a(I,"CODE",{});var $_=s(Uo);fd=o($_,"attention_mask"),$_.forEach(t),ud=o(I,`\u2026). When the tokenizer is a \u201CFast\u201D tokenizer (i.e., backed by HuggingFace `),ut=a(I,"A",{href:!0,rel:!0});var P_=s(ut);_d=o(P_,"tokenizers library"),P_.forEach(t),gd=o(I,`), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token).`),I.forEach(t),Bs=c(i),$e=a(i,"H2",{class:!0});var js=s($e);We=a(js,"A",{id:!0,class:!0,href:!0});var q_=s(We);jo=a(q_,"SPAN",{});var D_=s(jo);p(_t.$$.fragment,D_),D_.forEach(t),q_.forEach(t),kd=c(js),Vo=a(js,"SPAN",{});var I_=s(Vo);vd=o(I_,"PreTrainedTokenizer"),I_.forEach(t),js.forEach(t),Fs=c(i),g=a(i,"DIV",{class:!0});var T=s(g);p(gt.$$.fragment,T),bd=c(T),Ho=a(T,"P",{});var L_=s(Ho);yd=o(L_,"Base class for all slow tokenizers."),L_.forEach(t),Td=c(T),kt=a(T,"P",{});var Vs=s(kt);wd=o(Vs,"Inherits from "),Sn=a(Vs,"A",{href:!0});var B_=s(Sn);zd=o(B_,"PreTrainedTokenizerBase"),B_.forEach(t),xd=o(Vs,"."),Vs.forEach(t),Ed=c(T),Mo=a(T,"P",{});var F_=s(Mo);$d=o(F_,`Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.`),F_.forEach(t),Pd=c(T),Go=a(T,"P",{});var A_=s(Go);qd=o(A_,`This class also contain the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),A_.forEach(t),Dd=c(T),Xo=a(T,"P",{});var C_=s(Xo);Id=o(C_,"Class attributes (overridden by derived classes)"),C_.forEach(t),Ld=c(T),F=a(T,"UL",{});var ee=s(F);ce=a(ee,"LI",{});var qn=s(ce);Yo=a(qn,"STRONG",{});var N_=s(Yo);Bd=o(N_,"vocab_files_names"),N_.forEach(t),Fd=o(qn," ("),Jo=a(qn,"CODE",{});var O_=s(Jo);Ad=o(O_,"Dict[str, str]"),O_.forEach(t),Cd=o(qn,") \u2014 A dictionary with, as keys, the "),Ko=a(qn,"CODE",{});var S_=s(Ko);Nd=o(S_,"__init__"),S_.forEach(t),Od=o(qn,` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),qn.forEach(t),Sd=c(ee),O=a(ee,"LI",{});var re=s(O);Qo=a(re,"STRONG",{});var W_=s(Qo);Wd=o(W_,"pretrained_vocab_files_map"),W_.forEach(t),Rd=o(re," ("),Zo=a(re,"CODE",{});var R_=s(Zo);Ud=o(R_,"Dict[str, Dict[str, str]]"),R_.forEach(t),jd=o(re,`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),er=a(re,"CODE",{});var U_=s(er);Vd=o(U_,"__init__"),U_.forEach(t),Hd=o(re,` keyword name of each vocabulary file required by the model, the low-level being the `),tr=a(re,"CODE",{});var j_=s(tr);Md=o(j_,"short-cut-names"),j_.forEach(t),Gd=o(re,` of the pretrained models with, as associated values, the `),nr=a(re,"CODE",{});var V_=s(nr);Xd=o(V_,"url"),V_.forEach(t),Yd=o(re," to the associated pretrained vocabulary file."),re.forEach(t),Jd=c(ee),G=a(ee,"LI",{});var Fe=s(G);or=a(Fe,"STRONG",{});var H_=s(or);Kd=o(H_,"max_model_input_sizes"),H_.forEach(t),Qd=o(Fe," ("),rr=a(Fe,"CODE",{});var M_=s(rr);Zd=o(M_,"Dict[str, Optional[int]]"),M_.forEach(t),ec=o(Fe,`) \u2014 A dictionary with, as keys, the `),ar=a(Fe,"CODE",{});var G_=s(ar);tc=o(G_,"short-cut-names"),G_.forEach(t),nc=o(Fe,` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),sr=a(Fe,"CODE",{});var X_=s(sr);oc=o(X_,"None"),X_.forEach(t),rc=o(Fe," if the model has no maximum input size."),Fe.forEach(t),ac=c(ee),S=a(ee,"LI",{});var ae=s(S);ir=a(ae,"STRONG",{});var Y_=s(ir);sc=o(Y_,"pretrained_init_configuration"),Y_.forEach(t),ic=o(ae," ("),dr=a(ae,"CODE",{});var J_=s(dr);dc=o(J_,"Dict[str, Dict[str, Any]]"),J_.forEach(t),cc=o(ae,`) \u2014 A dictionary with, as keys, the `),cr=a(ae,"CODE",{});var K_=s(cr);lc=o(K_,"short-cut-names"),K_.forEach(t),hc=o(ae,` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),lr=a(ae,"CODE",{});var Q_=s(lr);pc=o(Q_,"__init__"),Q_.forEach(t),mc=o(ae,` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Wn=a(ae,"A",{href:!0});var Z_=s(Wn);fc=o(Z_,"from_pretrained()"),Z_.forEach(t),uc=o(ae,` method.`),ae.forEach(t),_c=c(ee),Re=a(ee,"LI",{});var xs=s(Re);hr=a(xs,"STRONG",{});var eg=s(hr);gc=o(eg,"model_input_names"),eg.forEach(t),kc=o(xs," ("),pr=a(xs,"CODE",{});var tg=s(pr);vc=o(tg,"List[str]"),tg.forEach(t),bc=o(xs,") \u2014 A list of inputs expected in the forward pass of the model."),xs.forEach(t),yc=c(ee),X=a(ee,"LI",{});var Ae=s(X);mr=a(Ae,"STRONG",{});var ng=s(mr);Tc=o(ng,"padding_side"),ng.forEach(t),wc=o(Ae," ("),fr=a(Ae,"CODE",{});var og=s(fr);zc=o(og,"str"),og.forEach(t),xc=o(Ae,`) \u2014 The default value for the side on which the model should have padding applied. Should be `),ur=a(Ae,"CODE",{});var rg=s(ur);Ec=o(rg,"'right'"),rg.forEach(t),$c=o(Ae," or "),_r=a(Ae,"CODE",{});var ag=s(_r);Pc=o(ag,"'left'"),ag.forEach(t),qc=o(Ae,"."),Ae.forEach(t),ee.forEach(t),Dc=c(T),Ue=a(T,"DIV",{class:!0});var Hs=s(Ue);p(vt.$$.fragment,Hs),Ic=c(Hs),gr=a(Hs,"P",{});var sg=s(gr);Lc=o(sg,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),sg.forEach(t),Hs.forEach(t),Bc=c(T),je=a(T,"DIV",{class:!0});var Ms=s(je);p(bt.$$.fragment,Ms),Fc=c(Ms),kr=a(Ms,"P",{});var ig=s(kr);Ac=o(ig,"Convert a list of lists of token ids into a list of strings by calling decode."),ig.forEach(t),Ms.forEach(t),Cc=c(T),le=a(T,"DIV",{class:!0});var go=s(le);p(yt.$$.fragment,go),Nc=c(go),vr=a(go,"P",{});var dg=s(vr);Oc=o(dg,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),dg.forEach(t),Sc=c(go),Tt=a(go,"P",{});var Gs=s(Tt);Wc=o(Gs,"Similar to doing "),br=a(Gs,"CODE",{});var cg=s(br);Rc=o(cg,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),cg.forEach(t),Uc=o(Gs,"."),Gs.forEach(t),go.forEach(t),jc=c(T),he=a(T,"DIV",{class:!0});var ko=s(he);p(wt.$$.fragment,ko),Vc=c(ko),yr=a(ko,"P",{});var lg=s(yr);Hc=o(lg,"Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),lg.forEach(t),Mc=c(ko),zt=a(ko,"P",{});var Xs=s(zt);Gc=o(Xs,"Same as doing "),Tr=a(Xs,"CODE",{});var hg=s(Tr);Xc=o(hg,"self.convert_tokens_to_ids(self.tokenize(text))"),hg.forEach(t),Yc=o(Xs,"."),Xs.forEach(t),ko.forEach(t),Jc=c(T),Y=a(T,"DIV",{class:!0});var ct=s(Y);p(xt.$$.fragment,ct),Kc=c(ct),Et=a(ct,"P",{});var Ys=s(Et);Qc=o(Ys,`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),wr=a(Ys,"CODE",{});var pg=s(wr);Zc=o(pg,"repo_path_or_name"),pg.forEach(t),el=o(Ys,"."),Ys.forEach(t),tl=c(ct),zr=a(ct,"P",{});var mg=s(zr);nl=o(mg,"Examples:"),mg.forEach(t),ol=c(ct),p($t.$$.fragment,ct),ct.forEach(t),rl=c(T),Ve=a(T,"DIV",{class:!0});var Js=s(Ve);p(Pt.$$.fragment,Js),al=c(Js),xr=a(Js,"P",{});var fg=s(xr);sl=o(fg,`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),fg.forEach(t),Js.forEach(t),il=c(T),He=a(T,"DIV",{class:!0});var Ks=s(He);p(qt.$$.fragment,Ks),dl=c(Ks),Er=a(Ks,"P",{});var ug=s(Er);cl=o(ug,`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),ug.forEach(t),Ks.forEach(t),ll=c(T),Me=a(T,"DIV",{class:!0});var Qs=s(Me);p(Dt.$$.fragment,Qs),hl=c(Qs),$r=a(Qs,"P",{});var _g=s($r);pl=o(_g,"Returns the added tokens in the vocabulary as a dictionary of token to index."),_g.forEach(t),Qs.forEach(t),ml=c(T),pe=a(T,"DIV",{class:!0});var vo=s(pe);p(It.$$.fragment,vo),fl=c(vo),Pr=a(vo,"P",{});var gg=s(Pr);ul=o(gg,"Returns the number of added tokens when encoding a sequence with special tokens."),gg.forEach(t),_l=c(vo),p(Ge.$$.fragment,vo),vo.forEach(t),gl=c(T),me=a(T,"DIV",{class:!0});var bo=s(me);p(Lt.$$.fragment,bo),kl=c(bo),qr=a(bo,"P",{});var kg=s(qr);vl=o(kg,"Performs any necessary transformations before tokenization."),kg.forEach(t),bl=c(bo),Pe=a(bo,"P",{});var yo=s(Pe);yl=o(yo,"This method should pop the arguments from kwargs and return the remaining "),Dr=a(yo,"CODE",{});var vg=s(Dr);Tl=o(vg,"kwargs"),vg.forEach(t),wl=o(yo,` as well. We test the `),Ir=a(yo,"CODE",{});var bg=s(Ir);zl=o(bg,"kwargs"),bg.forEach(t),xl=o(yo," at the end of the encoding process to be sure all the arguments have been used."),yo.forEach(t),bo.forEach(t),El=c(T),fe=a(T,"DIV",{class:!0});var To=s(fe);p(Bt.$$.fragment,To),$l=c(To),Lr=a(To,"P",{});var yg=s(Lr);Pl=o(yg,"Converts a string in a sequence of tokens, using the tokenizer."),yg.forEach(t),ql=c(To),Br=a(To,"P",{});var Tg=s(Br);Dl=o(Tg,`Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens.`),Tg.forEach(t),To.forEach(t),T.forEach(t),As=c(i),qe=a(i,"H2",{class:!0});var Zs=s(qe);Xe=a(Zs,"A",{id:!0,class:!0,href:!0});var wg=s(Xe);Fr=a(wg,"SPAN",{});var zg=s(Fr);p(Ft.$$.fragment,zg),zg.forEach(t),wg.forEach(t),Il=c(Zs),Ar=a(Zs,"SPAN",{});var xg=s(Ar);Ll=o(xg,"PreTrainedTokenizerFast"),xg.forEach(t),Zs.forEach(t),Cs=c(i),J=a(i,"P",{});var lt=s(J);Bl=o(lt,"The "),Rn=a(lt,"A",{href:!0});var Eg=s(Rn);Fl=o(Eg,"PreTrainedTokenizerFast"),Eg.forEach(t),Al=o(lt," depend on the "),At=a(lt,"A",{href:!0,rel:!0});var $g=s(At);Cl=o($g,"tokenizers"),$g.forEach(t),Nl=o(lt,` library. The tokenizers obtained from the \u{1F917} tokenizers library can be loaded very simply into \u{1F917} transformers. Take a look at the `),Un=a(lt,"A",{href:!0});var Pg=s(Un);Ol=o(Pg,"Using tokenizers from \u{1F917} tokenizers"),Pg.forEach(t),Sl=o(lt," page to understand how this is done."),lt.forEach(t),Ns=c(i),k=a(i,"DIV",{class:!0});var w=s(k);p(Ct.$$.fragment,w),Wl=c(w),Cr=a(w,"P",{});var qg=s(Cr);Rl=o(qg,"Base class for all fast tokenizers (wrapping HuggingFace tokenizers library)."),qg.forEach(t),Ul=c(w),Nt=a(w,"P",{});var ei=s(Nt);jl=o(ei,"Inherits from "),jn=a(ei,"A",{href:!0});var Dg=s(jn);Vl=o(Dg,"PreTrainedTokenizerBase"),Dg.forEach(t),Hl=o(ei,"."),ei.forEach(t),Ml=c(w),Nr=a(w,"P",{});var Ig=s(Nr);Gl=o(Ig,`Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.`),Ig.forEach(t),Xl=c(w),Or=a(w,"P",{});var Lg=s(Or);Yl=o(Lg,`This class also contains the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),Lg.forEach(t),Jl=c(w),Sr=a(w,"P",{});var Bg=s(Sr);Kl=o(Bg,"Class attributes (overridden by derived classes)"),Bg.forEach(t),Ql=c(w),A=a(w,"UL",{});var te=s(A);ue=a(te,"LI",{});var Dn=s(ue);Wr=a(Dn,"STRONG",{});var Fg=s(Wr);Zl=o(Fg,"vocab_files_names"),Fg.forEach(t),eh=o(Dn," ("),Rr=a(Dn,"CODE",{});var Ag=s(Rr);th=o(Ag,"Dict[str, str]"),Ag.forEach(t),nh=o(Dn,") \u2014 A dictionary with, as keys, the "),Ur=a(Dn,"CODE",{});var Cg=s(Ur);oh=o(Cg,"__init__"),Cg.forEach(t),rh=o(Dn,` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Dn.forEach(t),ah=c(te),W=a(te,"LI",{});var se=s(W);jr=a(se,"STRONG",{});var Ng=s(jr);sh=o(Ng,"pretrained_vocab_files_map"),Ng.forEach(t),ih=o(se," ("),Vr=a(se,"CODE",{});var Og=s(Vr);dh=o(Og,"Dict[str, Dict[str, str]]"),Og.forEach(t),ch=o(se,`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),Hr=a(se,"CODE",{});var Sg=s(Hr);lh=o(Sg,"__init__"),Sg.forEach(t),hh=o(se,` keyword name of each vocabulary file required by the model, the low-level being the `),Mr=a(se,"CODE",{});var Wg=s(Mr);ph=o(Wg,"short-cut-names"),Wg.forEach(t),mh=o(se,` of the pretrained models with, as associated values, the `),Gr=a(se,"CODE",{});var Rg=s(Gr);fh=o(Rg,"url"),Rg.forEach(t),uh=o(se," to the associated pretrained vocabulary file."),se.forEach(t),_h=c(te),K=a(te,"LI",{});var Ce=s(K);Xr=a(Ce,"STRONG",{});var Ug=s(Xr);gh=o(Ug,"max_model_input_sizes"),Ug.forEach(t),kh=o(Ce," ("),Yr=a(Ce,"CODE",{});var jg=s(Yr);vh=o(jg,"Dict[str, Optional[int]]"),jg.forEach(t),bh=o(Ce,`) \u2014 A dictionary with, as keys, the `),Jr=a(Ce,"CODE",{});var Vg=s(Jr);yh=o(Vg,"short-cut-names"),Vg.forEach(t),Th=o(Ce,` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),Kr=a(Ce,"CODE",{});var Hg=s(Kr);wh=o(Hg,"None"),Hg.forEach(t),zh=o(Ce," if the model has no maximum input size."),Ce.forEach(t),xh=c(te),R=a(te,"LI",{});var ie=s(R);Qr=a(ie,"STRONG",{});var Mg=s(Qr);Eh=o(Mg,"pretrained_init_configuration"),Mg.forEach(t),$h=o(ie," ("),Zr=a(ie,"CODE",{});var Gg=s(Zr);Ph=o(Gg,"Dict[str, Dict[str, Any]]"),Gg.forEach(t),qh=o(ie,`) \u2014 A dictionary with, as keys, the `),ea=a(ie,"CODE",{});var Xg=s(ea);Dh=o(Xg,"short-cut-names"),Xg.forEach(t),Ih=o(ie,` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),ta=a(ie,"CODE",{});var Yg=s(ta);Lh=o(Yg,"__init__"),Yg.forEach(t),Bh=o(ie,` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Vn=a(ie,"A",{href:!0});var Jg=s(Vn);Fh=o(Jg,"from_pretrained()"),Jg.forEach(t),Ah=o(ie,` method.`),ie.forEach(t),Ch=c(te),Ye=a(te,"LI",{});var Es=s(Ye);na=a(Es,"STRONG",{});var Kg=s(na);Nh=o(Kg,"model_input_names"),Kg.forEach(t),Oh=o(Es," ("),oa=a(Es,"CODE",{});var Qg=s(oa);Sh=o(Qg,"List[str]"),Qg.forEach(t),Wh=o(Es,") \u2014 A list of inputs expected in the forward pass of the model."),Es.forEach(t),Rh=c(te),Q=a(te,"LI",{});var Ne=s(Q);ra=a(Ne,"STRONG",{});var Zg=s(ra);Uh=o(Zg,"padding_side"),Zg.forEach(t),jh=o(Ne," ("),aa=a(Ne,"CODE",{});var ek=s(aa);Vh=o(ek,"str"),ek.forEach(t),Hh=o(Ne,`) \u2014 The default value for the side on which the model should have padding applied. Should be `),sa=a(Ne,"CODE",{});var tk=s(sa);Mh=o(tk,"'right'"),tk.forEach(t),Gh=o(Ne," or "),ia=a(Ne,"CODE",{});var nk=s(ia);Xh=o(nk,"'left'"),nk.forEach(t),Yh=o(Ne,"."),Ne.forEach(t),te.forEach(t),Jh=c(w),Je=a(w,"DIV",{class:!0});var ti=s(Je);p(Ot.$$.fragment,ti),Kh=c(ti),da=a(ti,"P",{});var ok=s(da);Qh=o(ok,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),ok.forEach(t),ti.forEach(t),Zh=c(w),Ke=a(w,"DIV",{class:!0});var ni=s(Ke);p(St.$$.fragment,ni),ep=c(ni),ca=a(ni,"P",{});var rk=s(ca);tp=o(rk,"Convert a list of lists of token ids into a list of strings by calling decode."),rk.forEach(t),ni.forEach(t),np=c(w),_e=a(w,"DIV",{class:!0});var wo=s(_e);p(Wt.$$.fragment,wo),op=c(wo),la=a(wo,"P",{});var ak=s(la);rp=o(ak,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),ak.forEach(t),ap=c(wo),Rt=a(wo,"P",{});var oi=s(Rt);sp=o(oi,"Similar to doing "),ha=a(oi,"CODE",{});var sk=s(ha);ip=o(sk,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),sk.forEach(t),dp=o(oi,"."),oi.forEach(t),wo.forEach(t),cp=c(w),ge=a(w,"DIV",{class:!0});var zo=s(ge);p(Ut.$$.fragment,zo),lp=c(zo),pa=a(zo,"P",{});var ik=s(pa);hp=o(ik,"Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),ik.forEach(t),pp=c(zo),jt=a(zo,"P",{});var ri=s(jt);mp=o(ri,"Same as doing "),ma=a(ri,"CODE",{});var dk=s(ma);fp=o(dk,"self.convert_tokens_to_ids(self.tokenize(text))"),dk.forEach(t),up=o(ri,"."),ri.forEach(t),zo.forEach(t),_p=c(w),Z=a(w,"DIV",{class:!0});var ht=s(Z);p(Vt.$$.fragment,ht),gp=c(ht),Ht=a(ht,"P",{});var ai=s(Ht);kp=o(ai,`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),fa=a(ai,"CODE",{});var ck=s(fa);vp=o(ck,"repo_path_or_name"),ck.forEach(t),bp=o(ai,"."),ai.forEach(t),yp=c(ht),ua=a(ht,"P",{});var lk=s(ua);Tp=o(lk,"Examples:"),lk.forEach(t),wp=c(ht),p(Mt.$$.fragment,ht),ht.forEach(t),zp=c(w),Qe=a(w,"DIV",{class:!0});var si=s(Qe);p(Gt.$$.fragment,si),xp=c(si),_a=a(si,"P",{});var hk=s(_a);Ep=o(hk,`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),hk.forEach(t),si.forEach(t),$p=c(w),Ze=a(w,"DIV",{class:!0});var ii=s(Ze);p(Xt.$$.fragment,ii),Pp=c(ii),ga=a(ii,"P",{});var pk=s(ga);qp=o(pk,`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),pk.forEach(t),ii.forEach(t),Dp=c(w),et=a(w,"DIV",{class:!0});var di=s(et);p(Yt.$$.fragment,di),Ip=c(di),ka=a(di,"P",{});var mk=s(ka);Lp=o(mk,"Returns the added tokens in the vocabulary as a dictionary of token to index."),mk.forEach(t),di.forEach(t),Bp=c(w),ke=a(w,"DIV",{class:!0});var xo=s(ke);p(Jt.$$.fragment,xo),Fp=c(xo),va=a(xo,"P",{});var fk=s(va);Ap=o(fk,"Returns the number of added tokens when encoding a sequence with special tokens."),fk.forEach(t),Cp=c(xo),p(tt.$$.fragment,xo),xo.forEach(t),Np=c(w),ve=a(w,"DIV",{class:!0});var Eo=s(ve);p(Kt.$$.fragment,Eo),Op=c(Eo),ba=a(Eo,"P",{});var uk=s(ba);Sp=o(uk,`Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.`),uk.forEach(t),Wp=c(Eo),ya=a(Eo,"P",{});var _k=s(ya);Rp=o(_k,`The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section.`),_k.forEach(t),Eo.forEach(t),Up=c(w),nt=a(w,"DIV",{class:!0});var ci=s(nt);p(Qt.$$.fragment,ci),jp=c(ci),Ta=a(ci,"P",{});var gk=s(Ta);Vp=o(gk,`Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one.`),gk.forEach(t),ci.forEach(t),w.forEach(t),Os=c(i),De=a(i,"H2",{class:!0});var li=s(De);ot=a(li,"A",{id:!0,class:!0,href:!0});var kk=s(ot);wa=a(kk,"SPAN",{});var vk=s(wa);p(Zt.$$.fragment,vk),vk.forEach(t),kk.forEach(t),Hp=c(li),za=a(li,"SPAN",{});var bk=s(za);Mp=o(bk,"BatchEncoding"),bk.forEach(t),li.forEach(t),Ss=c(i),b=a(i,"DIV",{class:!0});var z=s(b);p(en.$$.fragment,z),Gp=c(z),Ie=a(z,"P",{});var $o=s(Ie);Xp=o($o,"Holds the output of the "),Hn=a($o,"A",{href:!0});var yk=s(Hn);Yp=o(yk,"encode_plus()"),yk.forEach(t),Jp=o($o,` and `),xa=a($o,"CODE",{});var Tk=s(xa);Kp=o(Tk,"batch_encode"),Tk.forEach(t),Qp=o($o,` methods (tokens, attention_masks, etc).`),$o.forEach(t),Zp=c(z),Ea=a(z,"P",{});var wk=s(Ea);em=o(wk,`This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space.`),wk.forEach(t),tm=c(z),U=a(z,"DIV",{class:!0});var Te=s(U);p(tn.$$.fragment,Te),nm=c(Te),$a=a(Te,"P",{});var zk=s($a);om=o(zk,`Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch.`),zk.forEach(t),rm=c(Te),Pa=a(Te,"P",{});var xk=s(Pa);am=o(xk,"Can be called as:"),xk.forEach(t),sm=c(Te),nn=a(Te,"UL",{});var hi=s(nn);Mn=a(hi,"LI",{});var Wu=s(Mn);qa=a(Wu,"CODE",{});var Ek=s(qa);im=o(Ek,"self.char_to_token(char_index)"),Ek.forEach(t),dm=o(Wu," if batch size is 1"),Wu.forEach(t),cm=c(hi),Gn=a(hi,"LI",{});var Ru=s(Gn);Da=a(Ru,"CODE",{});var $k=s(Da);lm=o($k,"self.char_to_token(batch_index, char_index)"),$k.forEach(t),hm=o(Ru," if batch size is greater or equal to 1"),Ru.forEach(t),hi.forEach(t),pm=c(Te),Ia=a(Te,"P",{});var Pk=s(Ia);mm=o(Pk,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Pk.forEach(t),Te.forEach(t),fm=c(z),j=a(z,"DIV",{class:!0});var we=s(j);p(on.$$.fragment,we),um=c(we),La=a(we,"P",{});var qk=s(La);_m=o(qk,`Get the word in the original string corresponding to a character in the original string of a sequence of the batch.`),qk.forEach(t),gm=c(we),Ba=a(we,"P",{});var Dk=s(Ba);km=o(Dk,"Can be called as:"),Dk.forEach(t),vm=c(we),rn=a(we,"UL",{});var pi=s(rn);Xn=a(pi,"LI",{});var Uu=s(Xn);Fa=a(Uu,"CODE",{});var Ik=s(Fa);bm=o(Ik,"self.char_to_word(char_index)"),Ik.forEach(t),ym=o(Uu," if batch size is 1"),Uu.forEach(t),Tm=c(pi),Yn=a(pi,"LI",{});var ju=s(Yn);Aa=a(ju,"CODE",{});var Lk=s(Aa);wm=o(Lk,"self.char_to_word(batch_index, char_index)"),Lk.forEach(t),zm=o(ju," if batch size is greater than 1"),ju.forEach(t),pi.forEach(t),xm=c(we),Ca=a(we,"P",{});var Bk=s(Ca);Em=o(Bk,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Bk.forEach(t),we.forEach(t),$m=c(z),rt=a(z,"DIV",{class:!0});var mi=s(rt);p(an.$$.fragment,mi),Pm=c(mi),Na=a(mi,"P",{});var Fk=s(Na);qm=o(Fk,"Convert the inner content to tensors."),Fk.forEach(t),mi.forEach(t),Dm=c(z),be=a(z,"DIV",{class:!0});var Po=s(be);p(sn.$$.fragment,Po),Im=c(Po),Oa=a(Po,"P",{});var Ak=s(Oa);Lm=o(Ak,"Return a list mapping the tokens to the id of their original sentences:"),Ak.forEach(t),Bm=c(Po),Le=a(Po,"UL",{});var qo=s(Le);Jn=a(qo,"LI",{});var Vu=s(Jn);Sa=a(Vu,"CODE",{});var Ck=s(Sa);Fm=o(Ck,"None"),Ck.forEach(t),Am=o(Vu," for special tokens added around or between sequences,"),Vu.forEach(t),Cm=c(qo),Kn=a(qo,"LI",{});var Hu=s(Kn);Wa=a(Hu,"CODE",{});var Nk=s(Wa);Nm=o(Nk,"0"),Nk.forEach(t),Om=o(Hu," for tokens corresponding to words in the first sequence,"),Hu.forEach(t),Sm=c(qo),Qn=a(qo,"LI",{});var Mu=s(Qn);Ra=a(Mu,"CODE",{});var Ok=s(Ra);Wm=o(Ok,"1"),Ok.forEach(t),Rm=o(Mu,` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded.`),Mu.forEach(t),qo.forEach(t),Po.forEach(t),Um=c(z),at=a(z,"DIV",{class:!0});var fi=s(at);p(dn.$$.fragment,fi),jm=c(fi),cn=a(fi,"P",{});var ui=s(cn);Vm=o(ui,"Send all values to device by calling "),Ua=a(ui,"CODE",{});var Sk=s(Ua);Hm=o(Sk,"v.to(device)"),Sk.forEach(t),Mm=o(ui," (PyTorch only)."),ui.forEach(t),fi.forEach(t),Gm=c(z),L=a(z,"DIV",{class:!0});var ne=s(L);p(ln.$$.fragment,ne),Xm=c(ne),ja=a(ne,"P",{});var Wk=s(ja);Ym=o(Wk,"Get the character span corresponding to an encoded token in a sequence of the batch."),Wk.forEach(t),Jm=c(ne),hn=a(ne,"P",{});var _i=s(hn);Km=o(_i,"Character spans are returned as a "),Zn=a(_i,"A",{href:!0});var Rk=s(Zn);Qm=o(Rk,"CharSpan"),Rk.forEach(t),Zm=o(_i," with:"),_i.forEach(t),ef=c(ne),pn=a(ne,"UL",{});var gi=s(pn);eo=a(gi,"LI",{});var Gu=s(eo);Va=a(Gu,"STRONG",{});var Uk=s(Va);tf=o(Uk,"start"),Uk.forEach(t),nf=o(Gu," \u2014 Index of the first character in the original string associated to the token."),Gu.forEach(t),of=c(gi),to=a(gi,"LI",{});var Xu=s(to);Ha=a(Xu,"STRONG",{});var jk=s(Ha);rf=o(jk,"end"),jk.forEach(t),af=o(Xu,` \u2014 Index of the character following the last character in the original string associated to the token.`),Xu.forEach(t),gi.forEach(t),sf=c(ne),Ma=a(ne,"P",{});var Vk=s(Ma);df=o(Vk,"Can be called as:"),Vk.forEach(t),cf=c(ne),mn=a(ne,"UL",{});var ki=s(mn);no=a(ki,"LI",{});var Yu=s(no);Ga=a(Yu,"CODE",{});var Hk=s(Ga);lf=o(Hk,"self.token_to_chars(token_index)"),Hk.forEach(t),hf=o(Yu," if batch size is 1"),Yu.forEach(t),pf=c(ki),oo=a(ki,"LI",{});var Ju=s(oo);Xa=a(Ju,"CODE",{});var Mk=s(Xa);mf=o(Mk,"self.token_to_chars(batch_index, token_index)"),Mk.forEach(t),ff=o(Ju," if batch size is greater or equal to 1"),Ju.forEach(t),ki.forEach(t),ne.forEach(t),uf=c(z),V=a(z,"DIV",{class:!0});var ze=s(V);p(fn.$$.fragment,ze),_f=c(ze),Be=a(ze,"P",{});var Do=s(Be);gf=o(Do,`Get the index of the sequence represented by the given token. In the general use case, this method returns `),Ya=a(Do,"CODE",{});var Gk=s(Ya);kf=o(Gk,"0"),Gk.forEach(t),vf=o(Do," for a single sequence or the first sequence of a pair, and "),Ja=a(Do,"CODE",{});var Xk=s(Ja);bf=o(Xk,"1"),Xk.forEach(t),yf=o(Do," for the second sequence of a pair"),Do.forEach(t),Tf=c(ze),Ka=a(ze,"P",{});var Yk=s(Ka);wf=o(Yk,"Can be called as:"),Yk.forEach(t),zf=c(ze),un=a(ze,"UL",{});var vi=s(un);ro=a(vi,"LI",{});var Ku=s(ro);Qa=a(Ku,"CODE",{});var Jk=s(Qa);xf=o(Jk,"self.token_to_sequence(token_index)"),Jk.forEach(t),Ef=o(Ku," if batch size is 1"),Ku.forEach(t),$f=c(vi),ao=a(vi,"LI",{});var Qu=s(ao);Za=a(Qu,"CODE",{});var Kk=s(Za);Pf=o(Kk,"self.token_to_sequence(batch_index, token_index)"),Kk.forEach(t),qf=o(Qu," if batch size is greater than 1"),Qu.forEach(t),vi.forEach(t),Df=c(ze),es=a(ze,"P",{});var Qk=s(es);If=o(Qk,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Qk.forEach(t),ze.forEach(t),Lf=c(z),H=a(z,"DIV",{class:!0});var xe=s(H);p(_n.$$.fragment,xe),Bf=c(xe),ts=a(xe,"P",{});var Zk=s(ts);Ff=o(Zk,"Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch."),Zk.forEach(t),Af=c(xe),ns=a(xe,"P",{});var ev=s(ns);Cf=o(ev,"Can be called as:"),ev.forEach(t),Nf=c(xe),gn=a(xe,"UL",{});var bi=s(gn);so=a(bi,"LI",{});var Zu=s(so);os=a(Zu,"CODE",{});var tv=s(os);Of=o(tv,"self.token_to_word(token_index)"),tv.forEach(t),Sf=o(Zu," if batch size is 1"),Zu.forEach(t),Wf=c(bi),io=a(bi,"LI",{});var e_=s(io);rs=a(e_,"CODE",{});var nv=s(rs);Rf=o(nv,"self.token_to_word(batch_index, token_index)"),nv.forEach(t),Uf=o(e_," if batch size is greater than 1"),e_.forEach(t),bi.forEach(t),jf=c(xe),as=a(xe,"P",{});var ov=s(as);Vf=o(ov,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),ov.forEach(t),xe.forEach(t),Hf=c(z),st=a(z,"DIV",{class:!0});var yi=s(st);p(kn.$$.fragment,yi),Mf=c(yi),ss=a(yi,"P",{});var rv=s(ss);Gf=o(rv,`Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer).`),rv.forEach(t),yi.forEach(t),Xf=c(z),it=a(z,"DIV",{class:!0});var Ti=s(it);p(vn.$$.fragment,Ti),Yf=c(Ti),is=a(Ti,"P",{});var av=s(is);Jf=o(av,"Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),av.forEach(t),Ti.forEach(t),Kf=c(z),B=a(z,"DIV",{class:!0});var oe=s(B);p(bn.$$.fragment,oe),Qf=c(oe),ds=a(oe,"P",{});var sv=s(ds);Zf=o(sv,"Get the character span in the original string corresponding to given word in a sequence of the batch."),sv.forEach(t),eu=c(oe),cs=a(oe,"P",{});var iv=s(cs);tu=o(iv,"Character spans are returned as a CharSpan NamedTuple with:"),iv.forEach(t),nu=c(oe),yn=a(oe,"UL",{});var wi=s(yn);ls=a(wi,"LI",{});var dv=s(ls);ou=o(dv,"start: index of the first character in the original string"),dv.forEach(t),ru=c(wi),hs=a(wi,"LI",{});var cv=s(hs);au=o(cv,"end: index of the character following the last character in the original string"),cv.forEach(t),wi.forEach(t),su=c(oe),ps=a(oe,"P",{});var lv=s(ps);iu=o(lv,"Can be called as:"),lv.forEach(t),du=c(oe),Tn=a(oe,"UL",{});var zi=s(Tn);co=a(zi,"LI",{});var t_=s(co);ms=a(t_,"CODE",{});var hv=s(ms);cu=o(hv,"self.word_to_chars(word_index)"),hv.forEach(t),lu=o(t_," if batch size is 1"),t_.forEach(t),hu=c(zi),lo=a(zi,"LI",{});var n_=s(lo);fs=a(n_,"CODE",{});var pv=s(fs);pu=o(pv,"self.word_to_chars(batch_index, word_index)"),pv.forEach(t),mu=o(n_," if batch size is greater or equal to 1"),n_.forEach(t),zi.forEach(t),oe.forEach(t),fu=c(z),D=a(z,"DIV",{class:!0});var M=s(D);p(wn.$$.fragment,M),uu=c(M),us=a(M,"P",{});var mv=s(us);_u=o(mv,"Get the encoded token span corresponding to a word in a sequence of the batch."),mv.forEach(t),gu=c(M),zn=a(M,"P",{});var xi=s(zn);ku=o(xi,"Token spans are returned as a "),ho=a(xi,"A",{href:!0});var fv=s(ho);vu=o(fv,"TokenSpan"),fv.forEach(t),bu=o(xi," with:"),xi.forEach(t),yu=c(M),xn=a(M,"UL",{});var Ei=s(xn);po=a(Ei,"LI",{});var o_=s(po);_s=a(o_,"STRONG",{});var uv=s(_s);Tu=o(uv,"start"),uv.forEach(t),wu=o(o_," \u2014 Index of the first token."),o_.forEach(t),zu=c(Ei),mo=a(Ei,"LI",{});var r_=s(mo);gs=a(r_,"STRONG",{});var _v=s(gs);xu=o(_v,"end"),_v.forEach(t),Eu=o(r_," \u2014 Index of the token following the last token."),r_.forEach(t),Ei.forEach(t),$u=c(M),ks=a(M,"P",{});var gv=s(ks);Pu=o(gv,"Can be called as:"),gv.forEach(t),qu=c(M),En=a(M,"UL",{});var $i=s(En);fo=a($i,"LI",{});var a_=s(fo);vs=a(a_,"CODE",{});var kv=s(vs);Du=o(kv,"self.word_to_tokens(word_index, sequence_index: int = 0)"),kv.forEach(t),Iu=o(a_," if batch size is 1"),a_.forEach(t),Lu=c($i),uo=a($i,"LI",{});var s_=s(uo);bs=a(s_,"CODE",{});var vv=s(bs);Bu=o(vv,"self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)"),vv.forEach(t),Fu=o(s_,` if batch size is greater or equal to 1`),s_.forEach(t),$i.forEach(t),Au=c(M),ys=a(M,"P",{});var bv=s(ys);Cu=o(bv,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),bv.forEach(t),M.forEach(t),Nu=c(z),dt=a(z,"DIV",{class:!0});var Pi=s(dt);p($n.$$.fragment,Pi),Ou=c(Pi),Ts=a(Pi,"P",{});var yv=s(Ts);Su=o(yv,"Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),yv.forEach(t),Pi.forEach(t),z.forEach(t),this.h()},h(){l(E,"name","hf:doc:metadata"),l(E,"content",JSON.stringify(Iv)),l(P,"id","tokenizer"),l(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(P,"href","#tokenizer"),l($,"class","relative group"),l(ft,"href","https://github.com/huggingface/tokenizers"),l(ft,"rel","nofollow"),l(In,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(Ln,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(Bn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(Fn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.SpecialTokensMixin"),l(An,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(Cn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(Nn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.BatchEncoding"),l(On,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(ut,"href","https://github.com/huggingface/tokenizers"),l(ut,"rel","nofollow"),l(We,"id","transformers.PreTrainedTokenizer"),l(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(We,"href","#transformers.PreTrainedTokenizer"),l($e,"class","relative group"),l(Sn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(Wn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained"),l(Ue,"class","docstring"),l(je,"class","docstring"),l(le,"class","docstring"),l(he,"class","docstring"),l(Y,"class","docstring"),l(Ve,"class","docstring"),l(He,"class","docstring"),l(Me,"class","docstring"),l(pe,"class","docstring"),l(me,"class","docstring"),l(fe,"class","docstring"),l(g,"class","docstring"),l(Xe,"id","transformers.PreTrainedTokenizerFast"),l(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Xe,"href","#transformers.PreTrainedTokenizerFast"),l(qe,"class","relative group"),l(Rn,"href","/docs/transformers/v4.15.0/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(At,"href","https://huggingface.co/docs/tokenizers"),l(At,"rel","nofollow"),l(Un,"href","/docs/transformers/v4.15.0/en/../fast_tokenizers"),l(jn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(Vn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained"),l(Je,"class","docstring"),l(Ke,"class","docstring"),l(_e,"class","docstring"),l(ge,"class","docstring"),l(Z,"class","docstring"),l(Qe,"class","docstring"),l(Ze,"class","docstring"),l(et,"class","docstring"),l(ke,"class","docstring"),l(ve,"class","docstring"),l(nt,"class","docstring"),l(k,"class","docstring"),l(ot,"id","transformers.BatchEncoding"),l(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ot,"href","#transformers.BatchEncoding"),l(De,"class","relative group"),l(Hn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode_plus"),l(U,"class","docstring"),l(j,"class","docstring"),l(rt,"class","docstring"),l(be,"class","docstring"),l(at,"class","docstring"),l(Zn,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.CharSpan"),l(L,"class","docstring"),l(V,"class","docstring"),l(H,"class","docstring"),l(st,"class","docstring"),l(it,"class","docstring"),l(B,"class","docstring"),l(ho,"href","/docs/transformers/v4.15.0/en/internal/tokenization_utils#transformers.TokenSpan"),l(D,"class","docstring"),l(dt,"class","docstring"),l(b,"class","docstring")},m(i,y){e(document.head,E),x(i,C,y),x(i,$,y),e($,P),e(P,Io),m(mt,Io,null),e($,Di),e($,Lo),e(Lo,Ii),x(i,$s,y),x(i,Oe,y),e(Oe,Li),e(Oe,ft),e(ft,Bi),e(Oe,Fi),x(i,Ps,y),x(i,Se,y),e(Se,Bo),e(Bo,Ai),e(Se,Ci),e(Se,Fo),e(Fo,Ni),x(i,qs,y),x(i,N,y),e(N,Oi),e(N,In),e(In,Si),e(N,Wi),e(N,Ln),e(Ln,Ri),e(N,Ui),e(N,Bn),e(Bn,ji),e(N,Vi),e(N,Fn),e(Fn,Hi),e(N,Mi),x(i,Ds,y),x(i,Ee,y),e(Ee,An),e(An,Gi),e(Ee,Xi),e(Ee,Cn),e(Cn,Yi),e(Ee,Ji),x(i,Is,y),x(i,de,y),e(de,Ao),e(Ao,Ki),e(de,Qi),e(de,Co),e(Co,Zi),e(de,ed),e(de,No),e(No,td),x(i,Ls,y),x(i,q,y),e(q,Nn),e(Nn,nd),e(q,od),e(q,On),e(On,rd),e(q,ad),e(q,Oo),e(Oo,sd),e(q,id),e(q,So),e(So,dd),e(q,cd),e(q,Wo),e(Wo,ld),e(q,hd),e(q,Ro),e(Ro,pd),e(q,md),e(q,Uo),e(Uo,fd),e(q,ud),e(q,ut),e(ut,_d),e(q,gd),x(i,Bs,y),x(i,$e,y),e($e,We),e(We,jo),m(_t,jo,null),e($e,kd),e($e,Vo),e(Vo,vd),x(i,Fs,y),x(i,g,y),m(gt,g,null),e(g,bd),e(g,Ho),e(Ho,yd),e(g,Td),e(g,kt),e(kt,wd),e(kt,Sn),e(Sn,zd),e(kt,xd),e(g,Ed),e(g,Mo),e(Mo,$d),e(g,Pd),e(g,Go),e(Go,qd),e(g,Dd),e(g,Xo),e(Xo,Id),e(g,Ld),e(g,F),e(F,ce),e(ce,Yo),e(Yo,Bd),e(ce,Fd),e(ce,Jo),e(Jo,Ad),e(ce,Cd),e(ce,Ko),e(Ko,Nd),e(ce,Od),e(F,Sd),e(F,O),e(O,Qo),e(Qo,Wd),e(O,Rd),e(O,Zo),e(Zo,Ud),e(O,jd),e(O,er),e(er,Vd),e(O,Hd),e(O,tr),e(tr,Md),e(O,Gd),e(O,nr),e(nr,Xd),e(O,Yd),e(F,Jd),e(F,G),e(G,or),e(or,Kd),e(G,Qd),e(G,rr),e(rr,Zd),e(G,ec),e(G,ar),e(ar,tc),e(G,nc),e(G,sr),e(sr,oc),e(G,rc),e(F,ac),e(F,S),e(S,ir),e(ir,sc),e(S,ic),e(S,dr),e(dr,dc),e(S,cc),e(S,cr),e(cr,lc),e(S,hc),e(S,lr),e(lr,pc),e(S,mc),e(S,Wn),e(Wn,fc),e(S,uc),e(F,_c),e(F,Re),e(Re,hr),e(hr,gc),e(Re,kc),e(Re,pr),e(pr,vc),e(Re,bc),e(F,yc),e(F,X),e(X,mr),e(mr,Tc),e(X,wc),e(X,fr),e(fr,zc),e(X,xc),e(X,ur),e(ur,Ec),e(X,$c),e(X,_r),e(_r,Pc),e(X,qc),e(g,Dc),e(g,Ue),m(vt,Ue,null),e(Ue,Ic),e(Ue,gr),e(gr,Lc),e(g,Bc),e(g,je),m(bt,je,null),e(je,Fc),e(je,kr),e(kr,Ac),e(g,Cc),e(g,le),m(yt,le,null),e(le,Nc),e(le,vr),e(vr,Oc),e(le,Sc),e(le,Tt),e(Tt,Wc),e(Tt,br),e(br,Rc),e(Tt,Uc),e(g,jc),e(g,he),m(wt,he,null),e(he,Vc),e(he,yr),e(yr,Hc),e(he,Mc),e(he,zt),e(zt,Gc),e(zt,Tr),e(Tr,Xc),e(zt,Yc),e(g,Jc),e(g,Y),m(xt,Y,null),e(Y,Kc),e(Y,Et),e(Et,Qc),e(Et,wr),e(wr,Zc),e(Et,el),e(Y,tl),e(Y,zr),e(zr,nl),e(Y,ol),m($t,Y,null),e(g,rl),e(g,Ve),m(Pt,Ve,null),e(Ve,al),e(Ve,xr),e(xr,sl),e(g,il),e(g,He),m(qt,He,null),e(He,dl),e(He,Er),e(Er,cl),e(g,ll),e(g,Me),m(Dt,Me,null),e(Me,hl),e(Me,$r),e($r,pl),e(g,ml),e(g,pe),m(It,pe,null),e(pe,fl),e(pe,Pr),e(Pr,ul),e(pe,_l),m(Ge,pe,null),e(g,gl),e(g,me),m(Lt,me,null),e(me,kl),e(me,qr),e(qr,vl),e(me,bl),e(me,Pe),e(Pe,yl),e(Pe,Dr),e(Dr,Tl),e(Pe,wl),e(Pe,Ir),e(Ir,zl),e(Pe,xl),e(g,El),e(g,fe),m(Bt,fe,null),e(fe,$l),e(fe,Lr),e(Lr,Pl),e(fe,ql),e(fe,Br),e(Br,Dl),x(i,As,y),x(i,qe,y),e(qe,Xe),e(Xe,Fr),m(Ft,Fr,null),e(qe,Il),e(qe,Ar),e(Ar,Ll),x(i,Cs,y),x(i,J,y),e(J,Bl),e(J,Rn),e(Rn,Fl),e(J,Al),e(J,At),e(At,Cl),e(J,Nl),e(J,Un),e(Un,Ol),e(J,Sl),x(i,Ns,y),x(i,k,y),m(Ct,k,null),e(k,Wl),e(k,Cr),e(Cr,Rl),e(k,Ul),e(k,Nt),e(Nt,jl),e(Nt,jn),e(jn,Vl),e(Nt,Hl),e(k,Ml),e(k,Nr),e(Nr,Gl),e(k,Xl),e(k,Or),e(Or,Yl),e(k,Jl),e(k,Sr),e(Sr,Kl),e(k,Ql),e(k,A),e(A,ue),e(ue,Wr),e(Wr,Zl),e(ue,eh),e(ue,Rr),e(Rr,th),e(ue,nh),e(ue,Ur),e(Ur,oh),e(ue,rh),e(A,ah),e(A,W),e(W,jr),e(jr,sh),e(W,ih),e(W,Vr),e(Vr,dh),e(W,ch),e(W,Hr),e(Hr,lh),e(W,hh),e(W,Mr),e(Mr,ph),e(W,mh),e(W,Gr),e(Gr,fh),e(W,uh),e(A,_h),e(A,K),e(K,Xr),e(Xr,gh),e(K,kh),e(K,Yr),e(Yr,vh),e(K,bh),e(K,Jr),e(Jr,yh),e(K,Th),e(K,Kr),e(Kr,wh),e(K,zh),e(A,xh),e(A,R),e(R,Qr),e(Qr,Eh),e(R,$h),e(R,Zr),e(Zr,Ph),e(R,qh),e(R,ea),e(ea,Dh),e(R,Ih),e(R,ta),e(ta,Lh),e(R,Bh),e(R,Vn),e(Vn,Fh),e(R,Ah),e(A,Ch),e(A,Ye),e(Ye,na),e(na,Nh),e(Ye,Oh),e(Ye,oa),e(oa,Sh),e(Ye,Wh),e(A,Rh),e(A,Q),e(Q,ra),e(ra,Uh),e(Q,jh),e(Q,aa),e(aa,Vh),e(Q,Hh),e(Q,sa),e(sa,Mh),e(Q,Gh),e(Q,ia),e(ia,Xh),e(Q,Yh),e(k,Jh),e(k,Je),m(Ot,Je,null),e(Je,Kh),e(Je,da),e(da,Qh),e(k,Zh),e(k,Ke),m(St,Ke,null),e(Ke,ep),e(Ke,ca),e(ca,tp),e(k,np),e(k,_e),m(Wt,_e,null),e(_e,op),e(_e,la),e(la,rp),e(_e,ap),e(_e,Rt),e(Rt,sp),e(Rt,ha),e(ha,ip),e(Rt,dp),e(k,cp),e(k,ge),m(Ut,ge,null),e(ge,lp),e(ge,pa),e(pa,hp),e(ge,pp),e(ge,jt),e(jt,mp),e(jt,ma),e(ma,fp),e(jt,up),e(k,_p),e(k,Z),m(Vt,Z,null),e(Z,gp),e(Z,Ht),e(Ht,kp),e(Ht,fa),e(fa,vp),e(Ht,bp),e(Z,yp),e(Z,ua),e(ua,Tp),e(Z,wp),m(Mt,Z,null),e(k,zp),e(k,Qe),m(Gt,Qe,null),e(Qe,xp),e(Qe,_a),e(_a,Ep),e(k,$p),e(k,Ze),m(Xt,Ze,null),e(Ze,Pp),e(Ze,ga),e(ga,qp),e(k,Dp),e(k,et),m(Yt,et,null),e(et,Ip),e(et,ka),e(ka,Lp),e(k,Bp),e(k,ke),m(Jt,ke,null),e(ke,Fp),e(ke,va),e(va,Ap),e(ke,Cp),m(tt,ke,null),e(k,Np),e(k,ve),m(Kt,ve,null),e(ve,Op),e(ve,ba),e(ba,Sp),e(ve,Wp),e(ve,ya),e(ya,Rp),e(k,Up),e(k,nt),m(Qt,nt,null),e(nt,jp),e(nt,Ta),e(Ta,Vp),x(i,Os,y),x(i,De,y),e(De,ot),e(ot,wa),m(Zt,wa,null),e(De,Hp),e(De,za),e(za,Mp),x(i,Ss,y),x(i,b,y),m(en,b,null),e(b,Gp),e(b,Ie),e(Ie,Xp),e(Ie,Hn),e(Hn,Yp),e(Ie,Jp),e(Ie,xa),e(xa,Kp),e(Ie,Qp),e(b,Zp),e(b,Ea),e(Ea,em),e(b,tm),e(b,U),m(tn,U,null),e(U,nm),e(U,$a),e($a,om),e(U,rm),e(U,Pa),e(Pa,am),e(U,sm),e(U,nn),e(nn,Mn),e(Mn,qa),e(qa,im),e(Mn,dm),e(nn,cm),e(nn,Gn),e(Gn,Da),e(Da,lm),e(Gn,hm),e(U,pm),e(U,Ia),e(Ia,mm),e(b,fm),e(b,j),m(on,j,null),e(j,um),e(j,La),e(La,_m),e(j,gm),e(j,Ba),e(Ba,km),e(j,vm),e(j,rn),e(rn,Xn),e(Xn,Fa),e(Fa,bm),e(Xn,ym),e(rn,Tm),e(rn,Yn),e(Yn,Aa),e(Aa,wm),e(Yn,zm),e(j,xm),e(j,Ca),e(Ca,Em),e(b,$m),e(b,rt),m(an,rt,null),e(rt,Pm),e(rt,Na),e(Na,qm),e(b,Dm),e(b,be),m(sn,be,null),e(be,Im),e(be,Oa),e(Oa,Lm),e(be,Bm),e(be,Le),e(Le,Jn),e(Jn,Sa),e(Sa,Fm),e(Jn,Am),e(Le,Cm),e(Le,Kn),e(Kn,Wa),e(Wa,Nm),e(Kn,Om),e(Le,Sm),e(Le,Qn),e(Qn,Ra),e(Ra,Wm),e(Qn,Rm),e(b,Um),e(b,at),m(dn,at,null),e(at,jm),e(at,cn),e(cn,Vm),e(cn,Ua),e(Ua,Hm),e(cn,Mm),e(b,Gm),e(b,L),m(ln,L,null),e(L,Xm),e(L,ja),e(ja,Ym),e(L,Jm),e(L,hn),e(hn,Km),e(hn,Zn),e(Zn,Qm),e(hn,Zm),e(L,ef),e(L,pn),e(pn,eo),e(eo,Va),e(Va,tf),e(eo,nf),e(pn,of),e(pn,to),e(to,Ha),e(Ha,rf),e(to,af),e(L,sf),e(L,Ma),e(Ma,df),e(L,cf),e(L,mn),e(mn,no),e(no,Ga),e(Ga,lf),e(no,hf),e(mn,pf),e(mn,oo),e(oo,Xa),e(Xa,mf),e(oo,ff),e(b,uf),e(b,V),m(fn,V,null),e(V,_f),e(V,Be),e(Be,gf),e(Be,Ya),e(Ya,kf),e(Be,vf),e(Be,Ja),e(Ja,bf),e(Be,yf),e(V,Tf),e(V,Ka),e(Ka,wf),e(V,zf),e(V,un),e(un,ro),e(ro,Qa),e(Qa,xf),e(ro,Ef),e(un,$f),e(un,ao),e(ao,Za),e(Za,Pf),e(ao,qf),e(V,Df),e(V,es),e(es,If),e(b,Lf),e(b,H),m(_n,H,null),e(H,Bf),e(H,ts),e(ts,Ff),e(H,Af),e(H,ns),e(ns,Cf),e(H,Nf),e(H,gn),e(gn,so),e(so,os),e(os,Of),e(so,Sf),e(gn,Wf),e(gn,io),e(io,rs),e(rs,Rf),e(io,Uf),e(H,jf),e(H,as),e(as,Vf),e(b,Hf),e(b,st),m(kn,st,null),e(st,Mf),e(st,ss),e(ss,Gf),e(b,Xf),e(b,it),m(vn,it,null),e(it,Yf),e(it,is),e(is,Jf),e(b,Kf),e(b,B),m(bn,B,null),e(B,Qf),e(B,ds),e(ds,Zf),e(B,eu),e(B,cs),e(cs,tu),e(B,nu),e(B,yn),e(yn,ls),e(ls,ou),e(yn,ru),e(yn,hs),e(hs,au),e(B,su),e(B,ps),e(ps,iu),e(B,du),e(B,Tn),e(Tn,co),e(co,ms),e(ms,cu),e(co,lu),e(Tn,hu),e(Tn,lo),e(lo,fs),e(fs,pu),e(lo,mu),e(b,fu),e(b,D),m(wn,D,null),e(D,uu),e(D,us),e(us,_u),e(D,gu),e(D,zn),e(zn,ku),e(zn,ho),e(ho,vu),e(zn,bu),e(D,yu),e(D,xn),e(xn,po),e(po,_s),e(_s,Tu),e(po,wu),e(xn,zu),e(xn,mo),e(mo,gs),e(gs,xu),e(mo,Eu),e(D,$u),e(D,ks),e(ks,Pu),e(D,qu),e(D,En),e(En,fo),e(fo,vs),e(vs,Du),e(fo,Iu),e(En,Lu),e(En,uo),e(uo,bs),e(bs,Bu),e(uo,Fu),e(D,Au),e(D,ys),e(ys,Cu),e(b,Nu),e(b,dt),m($n,dt,null),e(dt,Ou),e(dt,Ts),e(Ts,Su),Ws=!0},p(i,[y]){const Pn={};y&2&&(Pn.$$scope={dirty:y,ctx:i}),Ge.$set(Pn);const ws={};y&2&&(ws.$$scope={dirty:y,ctx:i}),tt.$set(ws)},i(i){Ws||(f(mt.$$.fragment,i),f(_t.$$.fragment,i),f(gt.$$.fragment,i),f(vt.$$.fragment,i),f(bt.$$.fragment,i),f(yt.$$.fragment,i),f(wt.$$.fragment,i),f(xt.$$.fragment,i),f($t.$$.fragment,i),f(Pt.$$.fragment,i),f(qt.$$.fragment,i),f(Dt.$$.fragment,i),f(It.$$.fragment,i),f(Ge.$$.fragment,i),f(Lt.$$.fragment,i),f(Bt.$$.fragment,i),f(Ft.$$.fragment,i),f(Ct.$$.fragment,i),f(Ot.$$.fragment,i),f(St.$$.fragment,i),f(Wt.$$.fragment,i),f(Ut.$$.fragment,i),f(Vt.$$.fragment,i),f(Mt.$$.fragment,i),f(Gt.$$.fragment,i),f(Xt.$$.fragment,i),f(Yt.$$.fragment,i),f(Jt.$$.fragment,i),f(tt.$$.fragment,i),f(Kt.$$.fragment,i),f(Qt.$$.fragment,i),f(Zt.$$.fragment,i),f(en.$$.fragment,i),f(tn.$$.fragment,i),f(on.$$.fragment,i),f(an.$$.fragment,i),f(sn.$$.fragment,i),f(dn.$$.fragment,i),f(ln.$$.fragment,i),f(fn.$$.fragment,i),f(_n.$$.fragment,i),f(kn.$$.fragment,i),f(vn.$$.fragment,i),f(bn.$$.fragment,i),f(wn.$$.fragment,i),f($n.$$.fragment,i),Ws=!0)},o(i){u(mt.$$.fragment,i),u(_t.$$.fragment,i),u(gt.$$.fragment,i),u(vt.$$.fragment,i),u(bt.$$.fragment,i),u(yt.$$.fragment,i),u(wt.$$.fragment,i),u(xt.$$.fragment,i),u($t.$$.fragment,i),u(Pt.$$.fragment,i),u(qt.$$.fragment,i),u(Dt.$$.fragment,i),u(It.$$.fragment,i),u(Ge.$$.fragment,i),u(Lt.$$.fragment,i),u(Bt.$$.fragment,i),u(Ft.$$.fragment,i),u(Ct.$$.fragment,i),u(Ot.$$.fragment,i),u(St.$$.fragment,i),u(Wt.$$.fragment,i),u(Ut.$$.fragment,i),u(Vt.$$.fragment,i),u(Mt.$$.fragment,i),u(Gt.$$.fragment,i),u(Xt.$$.fragment,i),u(Yt.$$.fragment,i),u(Jt.$$.fragment,i),u(tt.$$.fragment,i),u(Kt.$$.fragment,i),u(Qt.$$.fragment,i),u(Zt.$$.fragment,i),u(en.$$.fragment,i),u(tn.$$.fragment,i),u(on.$$.fragment,i),u(an.$$.fragment,i),u(sn.$$.fragment,i),u(dn.$$.fragment,i),u(ln.$$.fragment,i),u(fn.$$.fragment,i),u(_n.$$.fragment,i),u(kn.$$.fragment,i),u(vn.$$.fragment,i),u(bn.$$.fragment,i),u(wn.$$.fragment,i),u($n.$$.fragment,i),Ws=!1},d(i){t(E),i&&t(C),i&&t($),_(mt),i&&t($s),i&&t(Oe),i&&t(Ps),i&&t(Se),i&&t(qs),i&&t(N),i&&t(Ds),i&&t(Ee),i&&t(Is),i&&t(de),i&&t(Ls),i&&t(q),i&&t(Bs),i&&t($e),_(_t),i&&t(Fs),i&&t(g),_(gt),_(vt),_(bt),_(yt),_(wt),_(xt),_($t),_(Pt),_(qt),_(Dt),_(It),_(Ge),_(Lt),_(Bt),i&&t(As),i&&t(qe),_(Ft),i&&t(Cs),i&&t(J),i&&t(Ns),i&&t(k),_(Ct),_(Ot),_(St),_(Wt),_(Ut),_(Vt),_(Mt),_(Gt),_(Xt),_(Yt),_(Jt),_(tt),_(Kt),_(Qt),i&&t(Os),i&&t(De),_(Zt),i&&t(Ss),i&&t(b),_(en),_(tn),_(on),_(an),_(sn),_(dn),_(ln),_(fn),_(_n),_(kn),_(vn),_(bn),_(wn),_($n)}}}const Iv={local:"tokenizer",sections:[{local:"transformers.PreTrainedTokenizer",title:"PreTrainedTokenizer"},{local:"transformers.PreTrainedTokenizerFast",title:"PreTrainedTokenizerFast"},{local:"transformers.BatchEncoding",title:"BatchEncoding"}],title:"Tokenizer"};function Lv(pt,E,C){let{fw:$}=E;return pt.$$set=P=>{"fw"in P&&C(0,$=P.fw)},[$]}class Sv extends zv{constructor(E){super();xv(this,E,Lv,Dv,Ev,{fw:0})}}export{Sv as default,Iv as metadata};
9,997
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/main_classes/keras_callbacks.mdx-0906f637.js
import{S as aa,i as ea,s as ta,e as o,k as _,w as F,t as L,L as sa,c as n,d as e,m as w,a as h,x as G,h as W,b as i,J as s,g as m,y as Q,K as ra,q as R,o as U,B as X}from"../../chunks/vendor-b1433968.js";import{I as Y}from"../../chunks/IconCopyLink-7029626d.js";function la(K){let c,d,t,r,y,p,S,$,N,T,b,q,g,f,u,E,v,I,P,J,x,k,C;return p=new Y({}),v=new Y({}),{c(){c=o("meta"),d=_(),t=o("h1"),r=o("a"),y=o("span"),F(p.$$.fragment),S=_(),$=o("span"),N=L("Keras callbacks"),T=_(),b=o("p"),q=L(`When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:`),g=_(),f=o("h2"),u=o("a"),E=o("span"),F(v.$$.fragment),I=_(),P=o("span"),J=L("PushToHubCallback"),x=_(),k=o("div"),this.h()},l(a){const l=sa('[data-svelte="svelte-1phssyn"]',document.head);c=n(l,"META",{name:!0,content:!0}),l.forEach(e),d=w(a),t=n(a,"H1",{class:!0});var H=h(t);r=n(H,"A",{id:!0,class:!0,href:!0});var B=h(r);y=n(B,"SPAN",{});var D=h(y);G(p.$$.fragment,D),D.forEach(e),B.forEach(e),S=w(H),$=n(H,"SPAN",{});var M=h($);N=W(M,"Keras callbacks"),M.forEach(e),H.forEach(e),T=w(a),b=n(a,"P",{});var O=h(b);q=W(O,`When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:`),O.forEach(e),g=w(a),f=n(a,"H2",{class:!0});var A=h(f);u=n(A,"A",{id:!0,class:!0,href:!0});var V=h(u);E=n(V,"SPAN",{});var j=h(E);G(v.$$.fragment,j),j.forEach(e),V.forEach(e),I=w(A),P=n(A,"SPAN",{});var z=h(P);J=W(z,"PushToHubCallback"),z.forEach(e),A.forEach(e),x=w(a),k=n(a,"DIV",{class:!0});var Z=h(k);Z.forEach(e),this.h()},h(){i(c,"name","hf:doc:metadata"),i(c,"content",JSON.stringify(oa)),i(r,"id","keras-callbacks"),i(r,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(r,"href","#keras-callbacks"),i(t,"class","relative group"),i(u,"id","transformers.PushToHubCallback"),i(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(u,"href","#transformers.PushToHubCallback"),i(f,"class","relative group"),i(k,"class","docstring")},m(a,l){s(document.head,c),m(a,d,l),m(a,t,l),s(t,r),s(r,y),Q(p,y,null),s(t,S),s(t,$),s($,N),m(a,T,l),m(a,b,l),s(b,q),m(a,g,l),m(a,f,l),s(f,u),s(u,E),Q(v,E,null),s(f,I),s(f,P),s(P,J),m(a,x,l),m(a,k,l),C=!0},p:ra,i(a){C||(R(p.$$.fragment,a),R(v.$$.fragment,a),C=!0)},o(a){U(p.$$.fragment,a),U(v.$$.fragment,a),C=!1},d(a){e(c),a&&e(d),a&&e(t),X(p),a&&e(T),a&&e(b),a&&e(g),a&&e(f),X(v),a&&e(x),a&&e(k)}}}const oa={local:"keras-callbacks",sections:[{local:"transformers.PushToHubCallback",title:"PushToHubCallback"}],title:"Keras callbacks"};function na(K,c,d){let{fw:t}=c;return K.$$set=r=>{"fw"in r&&d(0,t=r.fw)},[t]}class ha extends aa{constructor(c){super();ea(this,c,na,la,ta,{fw:0})}}export{ha as default,oa as metadata};
9,998
0
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages
hf_public_repos/doc-build/transformers/v4.15.0/en/_app/pages/main_classes/optimizer_schedules.mdx-f69bb992.js
import{S as Ji,i as Ki,s as Qi,e as r,k as l,w as u,t as s,L as Xi,c as n,d as a,m as c,a as o,x as f,h as i,b as m,M as kr,J as t,g as d,y as g,K as Yi,q as _,o as w,B as v}from"../../chunks/vendor-b1433968.js";import{D as $}from"../../chunks/Docstring-ff504c58.js";import{C as Nr}from"../../chunks/CodeBlock-a320dbd7.js";import{I as Z}from"../../chunks/IconCopyLink-7029626d.js";import"../../chunks/CopyButton-f65cb278.js";function Zi(Cr){let S,dt,E,x,bt,_e,Fr,$t,Or,La,ee,Rr,At,jr,qr,Pa,I,zt,Ur,Gr,we,Vr,Et,Mr,Hr,Br,xt,Jr,Wa,k,te,Tt,ve,Kr,Dt,Qr,Sa,T,ye,Xr,be,Yr,$e,Zr,en,tn,ae,Ae,an,Lt,rn,Ia,N,re,Pt,ze,nn,Wt,on,ka,h,Ee,sn,ht,ln,xe,cn,mn,b,pn,St,dn,hn,Te,un,fn,It,gn,_n,kt,wn,vn,Nt,yn,bn,Ct,$n,An,Ft,zn,En,xn,Ot,Tn,Dn,De,Ln,Le,Pn,Wn,Sn,D,Pe,Rt,In,kn,We,jt,Nn,Cn,Se,Fn,Ie,On,Rn,jn,qt,Ut,qn,Un,Gt,Vt,Gn,Vn,Mt,Ht,Mn,Hn,Bt,Bn,Jn,ke,Kn,Jt,Qn,Xn,Ne,Yn,L,Zn,Kt,eo,to,ut,ao,ro,Qt,no,oo,so,Ce,io,Xt,lo,co,Fe,mo,ne,Oe,po,Yt,ho,Na,C,oe,Zt,Re,uo,ea,fo,Ca,z,je,go,F,_o,ta,wo,vo,qe,yo,bo,$o,aa,Ao,zo,se,Ue,Eo,ra,xo,Fa,O,Ge,To,na,Do,Oa,R,ie,oa,Ve,Lo,sa,Po,Ra,j,le,ia,Me,Wo,la,So,ja,q,He,Io,ca,ko,qa,U,Be,No,ma,Co,Ua,G,Je,Fo,pa,Oo,Ga,V,Ke,Ro,da,jo,Va,Qe,ys,Ma,M,Xe,qo,ha,Uo,Ha,Ye,bs,Ba,H,Ze,Go,ua,Vo,Ja,et,$s,Ka,B,tt,Mo,fa,Ho,Qa,at,As,Xa,P,rt,Bo,nt,Jo,ga,Ko,Qo,Xo,ce,Yo,_a,Zo,es,ot,ts,Ya,J,me,wa,st,as,va,rs,Za,K,it,ns,ya,os,er,Q,pe,ba,lt,ss,$a,is,tr,X,de,Aa,ct,ls,za,cs,ar,W,mt,ms,Y,ps,Ea,ds,hs,xa,us,fs,gs,he,pt,_s,Ta,ws,rr;return _e=new Z({}),ve=new Z({}),ye=new $({props:{name:"class transformers.AdamW",anchor:"transformers.AdamW",parameters:[{name:"params",val:": typing.Iterable[torch.nn.parameter.Parameter]"},{name:"lr",val:": float = 0.001"},{name:"betas",val:": typing.Tuple[float, float] = (0.9, 0.999)"},{name:"eps",val:": float = 1e-06"},{name:"weight_decay",val:": float = 0.0"},{name:"correct_bias",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L272",parametersDescription:[{anchor:"transformers.AdamW.params",description:`<strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.`,name:"params"},{anchor:"transformers.AdamW.lr",description:`<strong>lr</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use.`,name:"lr"},{anchor:"transformers.AdamW.betas",description:`<strong>betas</strong> (<code>Tuple[float,float]</code>, <em>optional</em>, defaults to (0.9, 0.999)) &#x2014; Adam&#x2019;s betas parameters (b1, b2).`,name:"betas"},{anchor:"transformers.AdamW.eps",description:`<strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; Adam&#x2019;s epsilon for numerical stability.`,name:"eps"},{anchor:"transformers.AdamW.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Decoupled weight decay to apply.`,name:"weight_decay"},{anchor:"transformers.AdamW.correct_bias",description:`<strong>correct_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <em>True</em>) &#x2014; Whether or not to correct bias in Adam (for instance, in Bert TF repository they use <code>False</code>).`,name:"correct_bias"}]}}),Ae=new $({props:{name:"step",anchor:"transformers.AdamW.step",parameters:[{name:"closure",val:": typing.Callable = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L312",parametersDescription:[{anchor:"transformers.AdamW.step.closure",description:"<strong>closure</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A closure that reevaluates the model and returns the loss.",name:"closure"}]}}),ze=new Z({}),Ee=new $({props:{name:"class transformers.Adafactor",anchor:"transformers.Adafactor",parameters:[{name:"params",val:""},{name:"lr",val:" = None"},{name:"eps",val:" = (1e-30, 0.001)"},{name:"clip_threshold",val:" = 1.0"},{name:"decay_rate",val:" = -0.8"},{name:"beta1",val:" = None"},{name:"weight_decay",val:" = 0.0"},{name:"scale_parameter",val:" = True"},{name:"relative_step",val:" = True"},{name:"warmup_init",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L374",parametersDescription:[{anchor:"transformers.Adafactor.params",description:`<strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.`,name:"params"},{anchor:"transformers.Adafactor.lr",description:`<strong>lr</strong> (<code>float</code>, <em>optional</em>) &#x2014; The external learning rate.`,name:"lr"},{anchor:"transformers.Adafactor.eps",description:`<strong>eps</strong> (<code>Tuple[float, float]</code>, <em>optional</em>, defaults to (1e-30, 1e-3)) &#x2014; Regularization constants for square gradient and parameter scale respectively`,name:"eps"},{anchor:"transformers.Adafactor.clip_threshold",description:`<strong>clip_threshold</strong> (<code>float</code>, <em>optional</em>, defaults 1.0) &#x2014; Threshold of root mean square of final gradient update`,name:"clip_threshold"},{anchor:"transformers.Adafactor.decay_rate",description:`<strong>decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to -0.8) &#x2014; Coefficient used to compute running averages of square`,name:"decay_rate"},{anchor:"transformers.Adafactor.beta1",description:`<strong>beta1</strong> (<code>float</code>, <em>optional</em>) &#x2014; Coefficient used for computing running averages of gradient`,name:"beta1"},{anchor:"transformers.Adafactor.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Weight decay (L2 penalty)`,name:"weight_decay"},{anchor:"transformers.Adafactor.scale_parameter",description:`<strong>scale_parameter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, learning rate is scaled by root mean square`,name:"scale_parameter"},{anchor:"transformers.Adafactor.relative_step",description:`<strong>relative_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, time-dependent learning rate is computed instead of external learning rate`,name:"relative_step"},{anchor:"transformers.Adafactor.warmup_init",description:`<strong>warmup_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Time-dependent learning rate computation depends on whether warm-up initialization is being used`,name:"warmup_init"}]}}),ke=new Nr({props:{code:"Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3),",highlighted:'Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">False</span>, relative_step=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, lr=<span class="hljs-number">1e-3</span>)'}}),Ne=new Nr({props:{code:"Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None),",highlighted:'Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>)'}}),Ce=new Nr({props:{code:`from transformers.optimization import Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)),`,highlighted:`<span class="hljs-keyword">from</span> transformers.optimization <span class="hljs-keyword">import</span> Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))`}}),Fe=new Nr({props:{code:`# replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False ),`,highlighted:`<span class="hljs-comment"># replace AdamW with Adafactor</span> optimizer = Adafactor( model.parameters(), lr=<span class="hljs-number">1e-3</span>, eps=(<span class="hljs-number">1e-30</span>, <span class="hljs-number">1e-3</span>), clip_threshold=<span class="hljs-number">1.0</span>, decay_rate=-<span class="hljs-number">0.8</span>, beta1=<span class="hljs-literal">None</span>, weight_decay=<span class="hljs-number">0.0</span>, relative_step=<span class="hljs-literal">False</span>, scale_parameter=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span> )`}}),Oe=new $({props:{name:"step",anchor:"transformers.Adafactor.step",parameters:[{name:"closure",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L518",parametersDescription:[{anchor:"transformers.Adafactor.step.closure",description:`<strong>closure</strong> (callable, optional) &#x2014; A closure that reevaluates the model and returns the loss.`,name:"closure"}]}}),Re=new Z({}),je=new $({props:{name:"class transformers.AdamWeightDecay",anchor:"transformers.AdamWeightDecay",parameters:[{name:"learning_rate",val:": typing.Union[float, keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule] = 0.001"},{name:"beta_1",val:": float = 0.9"},{name:"beta_2",val:": float = 0.999"},{name:"epsilon",val:": float = 1e-07"},{name:"amsgrad",val:": bool = False"},{name:"weight_decay_rate",val:": float = 0.0"},{name:"include_in_weight_decay",val:": typing.Optional[typing.List[str]] = None"},{name:"exclude_from_weight_decay",val:": typing.Optional[typing.List[str]] = None"},{name:"name",val:": str = 'AdamWeightDecay'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization_tf.py#L152",parametersDescription:[{anchor:"transformers.AdamWeightDecay.learning_rate",description:`<strong>learning_rate</strong> (<code>Union[float, tf.keras.optimizers.schedules.LearningRateSchedule]</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use or a schedule.`,name:"learning_rate"},{anchor:"transformers.AdamWeightDecay.beta_1",description:`<strong>beta_1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.`,name:"beta_1"},{anchor:"transformers.AdamWeightDecay.beta_2",description:`<strong>beta_2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates.`,name:"beta_2"},{anchor:"transformers.AdamWeightDecay.epsilon",description:`<strong>epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon parameter in Adam, which is a small constant for numerical stability.`,name:"epsilon"},{anchor:"transformers.AdamWeightDecay.amsgrad",description:`<strong>amsgrad</strong> (<code>bool</code>, <em>optional</em>, default to <em>False</em>) &#x2014; Whether to apply AMSGrad variant of this algorithm or not, see <a href="https://arxiv.org/abs/1904.09237" rel="nofollow">On the Convergence of Adam and Beyond</a>.`,name:"amsgrad"},{anchor:"transformers.AdamWeightDecay.weight_decay_rate",description:`<strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply.`,name:"weight_decay_rate"},{anchor:"transformers.AdamWeightDecay.include_in_weight_decay",description:`<strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters by default (unless they are in <code>exclude_from_weight_decay</code>).`,name:"include_in_weight_decay"},{anchor:"transformers.AdamWeightDecay.exclude_from_weight_decay",description:`<strong>exclude_from_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to exclude from applying weight decay to. If a <code>include_in_weight_decay</code> is passed, the names in it will supersede this list.`,name:"exclude_from_weight_decay"},{anchor:"transformers.AdamWeightDecay.name",description:`<strong>name</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;AdamWeightDecay&#x2019;) &#x2014; Optional name for the operations created when applying gradients. kwargs &#x2014; Keyword arguments. Allowed to be {<code>clipnorm</code>, <code>clipvalue</code>, <code>lr</code>, <code>decay</code>}. <code>clipnorm</code> is clip gradients by norm; <code>clipvalue</code> is clip gradients by value, <code>decay</code> is included for backward compatibility to allow time inverse decay of learning rate. <code>lr</code> is included for backward compatibility, recommended to use <code>learning_rate</code> instead.`,name:"name"}]}}),Ue=new $({props:{name:"from_config",anchor:"transformers.AdamWeightDecay.from_config",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization_tf.py#L207"}}),Ge=new $({props:{name:"transformers.create_optimizer",anchor:"transformers.create_optimizer",parameters:[{name:"init_lr",val:": float"},{name:"num_train_steps",val:": int"},{name:"num_warmup_steps",val:": int"},{name:"min_lr_ratio",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"weight_decay_rate",val:": float = 0.0"},{name:"power",val:": float = 1.0"},{name:"include_in_weight_decay",val:": typing.Optional[typing.List[str]] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization_tf.py#L82",parametersDescription:[{anchor:"transformers.create_optimizer.init_lr",description:`<strong>init_lr</strong> (<code>float</code>) &#x2014; The desired learning rate at the end of the warmup phase.`,name:"init_lr"},{anchor:"transformers.create_optimizer.num_train_steps",description:`<strong>num_train_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_train_steps"},{anchor:"transformers.create_optimizer.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of warmup steps.`,name:"num_warmup_steps"},{anchor:"transformers.create_optimizer.min_lr_ratio",description:`<strong>min_lr_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The final learning rate at the end of the linear decay will be <code>init_lr * min_lr_ratio</code>.`,name:"min_lr_ratio"},{anchor:"transformers.create_optimizer.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 to use in Adam.`,name:"adam_beta1"},{anchor:"transformers.create_optimizer.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 to use in Adam.`,name:"adam_beta2"},{anchor:"transformers.create_optimizer.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon to use in Adam.`,name:"adam_epsilon"},{anchor:"transformers.create_optimizer.weight_decay_rate",description:`<strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to use.`,name:"weight_decay_rate"},{anchor:"transformers.create_optimizer.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The power to use for PolynomialDecay.`,name:"power"},{anchor:"transformers.create_optimizer.include_in_weight_decay",description:`<strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters except bias and layer norm parameters.`,name:"include_in_weight_decay"}]}}),Ve=new Z({}),Me=new Z({}),He=new $({props:{name:"class transformers.SchedulerType",anchor:"transformers.SchedulerType",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/trainer_utils.py#L282"}}),Be=new $({props:{name:"transformers.get_scheduler",anchor:"transformers.get_scheduler",parameters:[{name:"name",val:": typing.Union[str, transformers.trainer_utils.SchedulerType]"},{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": typing.Optional[int] = None"},{name:"num_training_steps",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L232",parametersDescription:[{anchor:"transformers.get_scheduler.name",description:`<strong>name</strong> (<code>str</code> or <code>SchedulerType</code>) &#x2014; The name of the scheduler to use.`,name:"name"},{anchor:"transformers.get_scheduler.optimizer",description:`<strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer that will be used during training.`,name:"optimizer"},{anchor:"transformers.get_scheduler.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.`,name:"num_warmup_steps"},{anchor:"transformers.get_scheduler.num_training_steps",description:`<strong>num_training_steps</strong> (\`int&#x201C;, <em>optional</em>) &#x2014; The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.`,name:"num_training_steps"}]}}),Je=new $({props:{name:"transformers.get_constant_schedule",anchor:"transformers.get_constant_schedule",parameters:[{name:"optimizer",val:": Optimizer"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L33",parametersDescription:[{anchor:"transformers.get_constant_schedule.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_constant_schedule.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Ke=new $({props:{name:"transformers.get_constant_schedule_with_warmup",anchor:"transformers.get_constant_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L49",parametersDescription:[{anchor:"transformers.get_constant_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_constant_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_constant_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Xe=new $({props:{name:"transformers.get_cosine_schedule_with_warmup",anchor:"transformers.get_cosine_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"num_training_steps",val:": int"},{name:"num_cycles",val:": float = 0.5"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L103",parametersDescription:[{anchor:"transformers.get_cosine_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_cycles",description:`<strong>num_cycles</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).`,name:"num_cycles"},{anchor:"transformers.get_cosine_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Ze=new $({props:{name:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"num_training_steps",val:": int"},{name:"num_cycles",val:": int = 1"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L137",parametersDescription:[{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles",description:`<strong>num_cycles</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of hard restarts to use.`,name:"num_cycles"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),tt=new $({props:{name:"transformers.get_linear_schedule_with_warmup",anchor:"transformers.get_linear_schedule_with_warmup",parameters:[{name:"optimizer",val:""},{name:"num_warmup_steps",val:""},{name:"num_training_steps",val:""},{name:"last_epoch",val:" = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L74",parametersDescription:[{anchor:"transformers.get_linear_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_linear_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_linear_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_linear_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),rt=new $({props:{name:"transformers.get_polynomial_decay_schedule_with_warmup",anchor:"transformers.get_polynomial_decay_schedule_with_warmup",parameters:[{name:"optimizer",val:""},{name:"num_warmup_steps",val:""},{name:"num_training_steps",val:""},{name:"lr_end",val:" = 1e-07"},{name:"power",val:" = 1.0"},{name:"last_epoch",val:" = -1"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py#L172",parametersDescription:[{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.lr_end",description:`<strong>lr_end</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The end LR.`,name:"lr_end"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Power factor.`,name:"power"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),st=new Z({}),it=new $({props:{name:"class transformers.WarmUp",anchor:"transformers.WarmUp",parameters:[{name:"initial_learning_rate",val:": float"},{name:"decay_schedule_fn",val:": typing.Callable"},{name:"warmup_steps",val:": int"},{name:"power",val:": float = 1.0"},{name:"name",val:": str = None"}],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization_tf.py#L24",parametersDescription:[{anchor:"transformers.WarmUp.initial_learning_rate",description:`<strong>initial_learning_rate</strong> (<code>float</code>) &#x2014; The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end of the warmup).`,name:"initial_learning_rate"},{anchor:"transformers.WarmUp.decay_schedule_fn",description:`<strong>decay_schedule_fn</strong> (<code>Callable</code>) &#x2014; The schedule function to apply after the warmup for the rest of training.`,name:"decay_schedule_fn"},{anchor:"transformers.WarmUp.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup part of training.`,name:"warmup_steps"},{anchor:"transformers.WarmUp.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The power to use for the polynomial warmup (defaults is a linear warmup).`,name:"power"},{anchor:"transformers.WarmUp.name",description:`<strong>name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Optional name prefix for the returned tensors during the schedule.`,name:"name"}]}}),lt=new Z({}),ct=new Z({}),mt=new $({props:{name:"class transformers.GradientAccumulator",anchor:"transformers.GradientAccumulator",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization_tf.py#L280"}}),pt=new $({props:{name:"reset",anchor:"transformers.GradientAccumulator.reset",parameters:[],source:"https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization_tf.py#L342"}}),{c(){S=r("meta"),dt=l(),E=r("h1"),x=r("a"),bt=r("span"),u(_e.$$.fragment),Fr=l(),$t=r("span"),Or=s("Optimization"),La=l(),ee=r("p"),Rr=s("The "),At=r("code"),jr=s(".optimization"),qr=s(" module provides:"),Pa=l(),I=r("ul"),zt=r("li"),Ur=s("an optimizer with weight decay fixed that can be used to fine-tuned models, and"),Gr=l(),we=r("li"),Vr=s("several schedules in the form of schedule objects that inherit from "),Et=r("code"),Mr=s("_LRSchedule"),Hr=s(":"),Br=l(),xt=r("li"),Jr=s("a gradient accumulation class to accumulate the gradients of multiple batches"),Wa=l(),k=r("h2"),te=r("a"),Tt=r("span"),u(ve.$$.fragment),Kr=l(),Dt=r("span"),Qr=s("AdamW (PyTorch)"),Sa=l(),T=r("div"),u(ye.$$.fragment),Xr=l(),be=r("p"),Yr=s("Implements Adam algorithm with weight decay fix as introduced in "),$e=r("a"),Zr=s("Decoupled Weight Decay Regularization"),en=s("."),tn=l(),ae=r("div"),u(Ae.$$.fragment),an=l(),Lt=r("p"),rn=s("Performs a single optimization step."),Ia=l(),N=r("h2"),re=r("a"),Pt=r("span"),u(ze.$$.fragment),nn=l(),Wt=r("span"),on=s("AdaFactor (PyTorch)"),ka=l(),h=r("div"),u(Ee.$$.fragment),sn=l(),ht=r("p"),ln=s(`AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: `),xe=r("a"),cn=s("https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),mn=l(),b=r("p"),pn=s("Paper: "),St=r("em"),dn=s("Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"),hn=l(),Te=r("a"),un=s("https://arxiv.org/abs/1804.04235"),fn=s(` Note that this optimizer internally adjusts the learning rate depending on the `),It=r("code"),gn=s("scale_parameter"),_n=s(", "),kt=r("code"),wn=s("relative_step"),vn=s(` and `),Nt=r("code"),yn=s("warmup_init"),bn=s(" options. To use a manual (external) learning rate schedule you should set "),Ct=r("code"),$n=s("scale_parameter=False"),An=s(` and `),Ft=r("code"),zn=s("relative_step=False"),En=s("."),xn=l(),Ot=r("p"),Tn=s("This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested."),Dn=l(),De=r("p"),Ln=s("Recommended T5 finetuning settings ("),Le=r("a"),Pn=s("https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),Wn=s("):"),Sn=l(),D=r("ul"),Pe=r("li"),Rt=r("p"),In=s("Training without LR warmup or clip_threshold is not recommended."),kn=l(),We=r("ul"),jt=r("li"),Nn=s("use scheduled LR warm-up to fixed LR"),Cn=l(),Se=r("li"),Fn=s("use clip_threshold=1.0 ("),Ie=r("a"),On=s("https://arxiv.org/abs/1804.04235"),Rn=s(")"),jn=l(),qt=r("li"),Ut=r("p"),qn=s("Disable relative updates"),Un=l(),Gt=r("li"),Vt=r("p"),Gn=s("Use scale_parameter=False"),Vn=l(),Mt=r("li"),Ht=r("p"),Mn=s("Additional optimizer operations like gradient clipping should not be used alongside Adafactor"),Hn=l(),Bt=r("p"),Bn=s("Example:"),Jn=l(),u(ke.$$.fragment),Kn=l(),Jt=r("p"),Qn=s("Others reported the following combination to work well:"),Xn=l(),u(Ne.$$.fragment),Yn=l(),L=r("p"),Zn=s("When using "),Kt=r("code"),eo=s("lr=None"),to=s(" with "),ut=r("a"),ao=s("Trainer"),ro=s(" you will most likely need to use "),Qt=r("code"),no=s("AdafactorSchedule"),oo=s(" scheduler as following:"),so=l(),u(Ce.$$.fragment),io=l(),Xt=r("p"),lo=s("Usage:"),co=l(),u(Fe.$$.fragment),mo=l(),ne=r("div"),u(Oe.$$.fragment),po=l(),Yt=r("p"),ho=s("Performs a single optimization step"),Na=l(),C=r("h2"),oe=r("a"),Zt=r("span"),u(Re.$$.fragment),uo=l(),ea=r("span"),fo=s("AdamWeightDecay (TensorFlow)"),Ca=l(),z=r("div"),u(je.$$.fragment),go=l(),F=r("p"),_o=s(`Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is `),ta=r("em"),wo=s("not"),vo=s(` the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in `),qe=r("a"),yo=s("Decoupled Weight Decay Regularization"),bo=s("."),$o=l(),aa=r("p"),Ao=s(`Instead we want ot decay the weights in a manner that doesn\u2019t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.`),zo=l(),se=r("div"),u(Ue.$$.fragment),Eo=l(),ra=r("p"),xo=s("Creates an optimizer from its config with WarmUp custom object."),Fa=l(),O=r("div"),u(Ge.$$.fragment),To=l(),na=r("p"),Do=s("Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay."),Oa=l(),R=r("h2"),ie=r("a"),oa=r("span"),u(Ve.$$.fragment),Lo=l(),sa=r("span"),Po=s("Schedules"),Ra=l(),j=r("h3"),le=r("a"),ia=r("span"),u(Me.$$.fragment),Wo=l(),la=r("span"),So=s("Learning Rate Schedules (Pytorch)"),ja=l(),q=r("div"),u(He.$$.fragment),Io=l(),ca=r("p"),ko=s("An enumeration."),qa=l(),U=r("div"),u(Be.$$.fragment),No=l(),ma=r("p"),Co=s("Unified API to get any scheduler from its name."),Ua=l(),G=r("div"),u(Je.$$.fragment),Fo=l(),pa=r("p"),Oo=s("Create a schedule with a constant learning rate, using the learning rate set in optimizer."),Ga=l(),V=r("div"),u(Ke.$$.fragment),Ro=l(),da=r("p"),jo=s(`Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.`),Va=l(),Qe=r("img"),Ma=l(),M=r("div"),u(Xe.$$.fragment),qo=l(),ha=r("p"),Uo=s(`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Ha=l(),Ye=r("img"),Ba=l(),H=r("div"),u(Ze.$$.fragment),Go=l(),ua=r("p"),Vo=s(`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Ja=l(),et=r("img"),Ka=l(),B=r("div"),u(tt.$$.fragment),Mo=l(),fa=r("p"),Ho=s(`Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Qa=l(),at=r("img"),Xa=l(),P=r("div"),u(rt.$$.fragment),Bo=l(),nt=r("p"),Jo=s(`Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `),ga=r("em"),Ko=s("lr_end"),Qo=s(`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Xo=l(),ce=r("p"),Yo=s("Note: "),_a=r("em"),Zo=s("power"),es=s(` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at `),ot=r("a"),ts=s("https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),Ya=l(),J=r("h3"),me=r("a"),wa=r("span"),u(st.$$.fragment),as=l(),va=r("span"),rs=s("Warmup (TensorFlow)"),Za=l(),K=r("div"),u(it.$$.fragment),ns=l(),ya=r("p"),os=s("Applies a warmup schedule on a given learning rate decay schedule."),er=l(),Q=r("h2"),pe=r("a"),ba=r("span"),u(lt.$$.fragment),ss=l(),$a=r("span"),is=s("Gradient Strategies"),tr=l(),X=r("h3"),de=r("a"),Aa=r("span"),u(ct.$$.fragment),ls=l(),za=r("span"),cs=s("GradientAccumulator (TensorFlow)"),ar=l(),W=r("div"),u(mt.$$.fragment),ms=l(),Y=r("p"),ps=s(`Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call `),Ea=r("code"),ds=s(".gradients"),hs=s(", scale the gradients if required, and pass the result to "),xa=r("code"),us=s("apply_gradients"),fs=s("."),gs=l(),he=r("div"),u(pt.$$.fragment),_s=l(),Ta=r("p"),ws=s("Resets the accumulated gradients on the current replica."),this.h()},l(e){const p=Xi('[data-svelte="svelte-1phssyn"]',document.head);S=n(p,"META",{name:!0,content:!0}),p.forEach(a),dt=c(e),E=n(e,"H1",{class:!0});var nr=o(E);x=n(nr,"A",{id:!0,class:!0,href:!0});var zs=o(x);bt=n(zs,"SPAN",{});var Es=o(bt);f(_e.$$.fragment,Es),Es.forEach(a),zs.forEach(a),Fr=c(nr),$t=n(nr,"SPAN",{});var xs=o($t);Or=i(xs,"Optimization"),xs.forEach(a),nr.forEach(a),La=c(e),ee=n(e,"P",{});var or=o(ee);Rr=i(or,"The "),At=n(or,"CODE",{});var Ts=o(At);jr=i(Ts,".optimization"),Ts.forEach(a),qr=i(or," module provides:"),or.forEach(a),Pa=c(e),I=n(e,"UL",{});var ft=o(I);zt=n(ft,"LI",{});var Ds=o(zt);Ur=i(Ds,"an optimizer with weight decay fixed that can be used to fine-tuned models, and"),Ds.forEach(a),Gr=c(ft),we=n(ft,"LI",{});var sr=o(we);Vr=i(sr,"several schedules in the form of schedule objects that inherit from "),Et=n(sr,"CODE",{});var Ls=o(Et);Mr=i(Ls,"_LRSchedule"),Ls.forEach(a),Hr=i(sr,":"),sr.forEach(a),Br=c(ft),xt=n(ft,"LI",{});var Ps=o(xt);Jr=i(Ps,"a gradient accumulation class to accumulate the gradients of multiple batches"),Ps.forEach(a),ft.forEach(a),Wa=c(e),k=n(e,"H2",{class:!0});var ir=o(k);te=n(ir,"A",{id:!0,class:!0,href:!0});var Ws=o(te);Tt=n(Ws,"SPAN",{});var Ss=o(Tt);f(ve.$$.fragment,Ss),Ss.forEach(a),Ws.forEach(a),Kr=c(ir),Dt=n(ir,"SPAN",{});var Is=o(Dt);Qr=i(Is,"AdamW (PyTorch)"),Is.forEach(a),ir.forEach(a),Sa=c(e),T=n(e,"DIV",{class:!0});var gt=o(T);f(ye.$$.fragment,gt),Xr=c(gt),be=n(gt,"P",{});var lr=o(be);Yr=i(lr,"Implements Adam algorithm with weight decay fix as introduced in "),$e=n(lr,"A",{href:!0,rel:!0});var ks=o($e);Zr=i(ks,"Decoupled Weight Decay Regularization"),ks.forEach(a),en=i(lr,"."),lr.forEach(a),tn=c(gt),ae=n(gt,"DIV",{class:!0});var cr=o(ae);f(Ae.$$.fragment,cr),an=c(cr),Lt=n(cr,"P",{});var Ns=o(Lt);rn=i(Ns,"Performs a single optimization step."),Ns.forEach(a),cr.forEach(a),gt.forEach(a),Ia=c(e),N=n(e,"H2",{class:!0});var mr=o(N);re=n(mr,"A",{id:!0,class:!0,href:!0});var Cs=o(re);Pt=n(Cs,"SPAN",{});var Fs=o(Pt);f(ze.$$.fragment,Fs),Fs.forEach(a),Cs.forEach(a),nn=c(mr),Wt=n(mr,"SPAN",{});var Os=o(Wt);on=i(Os,"AdaFactor (PyTorch)"),Os.forEach(a),mr.forEach(a),ka=c(e),h=n(e,"DIV",{class:!0});var y=o(h);f(Ee.$$.fragment,y),sn=c(y),ht=n(y,"P",{});var vs=o(ht);ln=i(vs,`AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: `),xe=n(vs,"A",{href:!0,rel:!0});var Rs=o(xe);cn=i(Rs,"https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),Rs.forEach(a),vs.forEach(a),mn=c(y),b=n(y,"P",{});var A=o(b);pn=i(A,"Paper: "),St=n(A,"EM",{});var js=o(St);dn=i(js,"Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"),js.forEach(a),hn=c(A),Te=n(A,"A",{href:!0,rel:!0});var qs=o(Te);un=i(qs,"https://arxiv.org/abs/1804.04235"),qs.forEach(a),fn=i(A,` Note that this optimizer internally adjusts the learning rate depending on the `),It=n(A,"CODE",{});var Us=o(It);gn=i(Us,"scale_parameter"),Us.forEach(a),_n=i(A,", "),kt=n(A,"CODE",{});var Gs=o(kt);wn=i(Gs,"relative_step"),Gs.forEach(a),vn=i(A,` and `),Nt=n(A,"CODE",{});var Vs=o(Nt);yn=i(Vs,"warmup_init"),Vs.forEach(a),bn=i(A," options. To use a manual (external) learning rate schedule you should set "),Ct=n(A,"CODE",{});var Ms=o(Ct);$n=i(Ms,"scale_parameter=False"),Ms.forEach(a),An=i(A,` and `),Ft=n(A,"CODE",{});var Hs=o(Ft);zn=i(Hs,"relative_step=False"),Hs.forEach(a),En=i(A,"."),A.forEach(a),xn=c(y),Ot=n(y,"P",{});var Bs=o(Ot);Tn=i(Bs,"This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested."),Bs.forEach(a),Dn=c(y),De=n(y,"P",{});var pr=o(De);Ln=i(pr,"Recommended T5 finetuning settings ("),Le=n(pr,"A",{href:!0,rel:!0});var Js=o(Le);Pn=i(Js,"https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),Js.forEach(a),Wn=i(pr,"):"),pr.forEach(a),Sn=c(y),D=n(y,"UL",{});var ue=o(D);Pe=n(ue,"LI",{});var dr=o(Pe);Rt=n(dr,"P",{});var Ks=o(Rt);In=i(Ks,"Training without LR warmup or clip_threshold is not recommended."),Ks.forEach(a),kn=c(dr),We=n(dr,"UL",{});var hr=o(We);jt=n(hr,"LI",{});var Qs=o(jt);Nn=i(Qs,"use scheduled LR warm-up to fixed LR"),Qs.forEach(a),Cn=c(hr),Se=n(hr,"LI",{});var ur=o(Se);Fn=i(ur,"use clip_threshold=1.0 ("),Ie=n(ur,"A",{href:!0,rel:!0});var Xs=o(Ie);On=i(Xs,"https://arxiv.org/abs/1804.04235"),Xs.forEach(a),Rn=i(ur,")"),ur.forEach(a),hr.forEach(a),dr.forEach(a),jn=c(ue),qt=n(ue,"LI",{});var Ys=o(qt);Ut=n(Ys,"P",{});var Zs=o(Ut);qn=i(Zs,"Disable relative updates"),Zs.forEach(a),Ys.forEach(a),Un=c(ue),Gt=n(ue,"LI",{});var ei=o(Gt);Vt=n(ei,"P",{});var ti=o(Vt);Gn=i(ti,"Use scale_parameter=False"),ti.forEach(a),ei.forEach(a),Vn=c(ue),Mt=n(ue,"LI",{});var ai=o(Mt);Ht=n(ai,"P",{});var ri=o(Ht);Mn=i(ri,"Additional optimizer operations like gradient clipping should not be used alongside Adafactor"),ri.forEach(a),ai.forEach(a),ue.forEach(a),Hn=c(y),Bt=n(y,"P",{});var ni=o(Bt);Bn=i(ni,"Example:"),ni.forEach(a),Jn=c(y),f(ke.$$.fragment,y),Kn=c(y),Jt=n(y,"P",{});var oi=o(Jt);Qn=i(oi,"Others reported the following combination to work well:"),oi.forEach(a),Xn=c(y),f(Ne.$$.fragment,y),Yn=c(y),L=n(y,"P",{});var fe=o(L);Zn=i(fe,"When using "),Kt=n(fe,"CODE",{});var si=o(Kt);eo=i(si,"lr=None"),si.forEach(a),to=i(fe," with "),ut=n(fe,"A",{href:!0});var ii=o(ut);ao=i(ii,"Trainer"),ii.forEach(a),ro=i(fe," you will most likely need to use "),Qt=n(fe,"CODE",{});var li=o(Qt);no=i(li,"AdafactorSchedule"),li.forEach(a),oo=i(fe," scheduler as following:"),fe.forEach(a),so=c(y),f(Ce.$$.fragment,y),io=c(y),Xt=n(y,"P",{});var ci=o(Xt);lo=i(ci,"Usage:"),ci.forEach(a),co=c(y),f(Fe.$$.fragment,y),mo=c(y),ne=n(y,"DIV",{class:!0});var fr=o(ne);f(Oe.$$.fragment,fr),po=c(fr),Yt=n(fr,"P",{});var mi=o(Yt);ho=i(mi,"Performs a single optimization step"),mi.forEach(a),fr.forEach(a),y.forEach(a),Na=c(e),C=n(e,"H2",{class:!0});var gr=o(C);oe=n(gr,"A",{id:!0,class:!0,href:!0});var pi=o(oe);Zt=n(pi,"SPAN",{});var di=o(Zt);f(Re.$$.fragment,di),di.forEach(a),pi.forEach(a),uo=c(gr),ea=n(gr,"SPAN",{});var hi=o(ea);fo=i(hi,"AdamWeightDecay (TensorFlow)"),hi.forEach(a),gr.forEach(a),Ca=c(e),z=n(e,"DIV",{class:!0});var ge=o(z);f(je.$$.fragment,ge),go=c(ge),F=n(ge,"P",{});var _t=o(F);_o=i(_t,`Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is `),ta=n(_t,"EM",{});var ui=o(ta);wo=i(ui,"not"),ui.forEach(a),vo=i(_t,` the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in `),qe=n(_t,"A",{href:!0,rel:!0});var fi=o(qe);yo=i(fi,"Decoupled Weight Decay Regularization"),fi.forEach(a),bo=i(_t,"."),_t.forEach(a),$o=c(ge),aa=n(ge,"P",{});var gi=o(aa);Ao=i(gi,`Instead we want ot decay the weights in a manner that doesn\u2019t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.`),gi.forEach(a),zo=c(ge),se=n(ge,"DIV",{class:!0});var _r=o(se);f(Ue.$$.fragment,_r),Eo=c(_r),ra=n(_r,"P",{});var _i=o(ra);xo=i(_i,"Creates an optimizer from its config with WarmUp custom object."),_i.forEach(a),_r.forEach(a),ge.forEach(a),Fa=c(e),O=n(e,"DIV",{class:!0});var wr=o(O);f(Ge.$$.fragment,wr),To=c(wr),na=n(wr,"P",{});var wi=o(na);Do=i(wi,"Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay."),wi.forEach(a),wr.forEach(a),Oa=c(e),R=n(e,"H2",{class:!0});var vr=o(R);ie=n(vr,"A",{id:!0,class:!0,href:!0});var vi=o(ie);oa=n(vi,"SPAN",{});var yi=o(oa);f(Ve.$$.fragment,yi),yi.forEach(a),vi.forEach(a),Lo=c(vr),sa=n(vr,"SPAN",{});var bi=o(sa);Po=i(bi,"Schedules"),bi.forEach(a),vr.forEach(a),Ra=c(e),j=n(e,"H3",{class:!0});var yr=o(j);le=n(yr,"A",{id:!0,class:!0,href:!0});var $i=o(le);ia=n($i,"SPAN",{});var Ai=o(ia);f(Me.$$.fragment,Ai),Ai.forEach(a),$i.forEach(a),Wo=c(yr),la=n(yr,"SPAN",{});var zi=o(la);So=i(zi,"Learning Rate Schedules (Pytorch)"),zi.forEach(a),yr.forEach(a),ja=c(e),q=n(e,"DIV",{class:!0});var br=o(q);f(He.$$.fragment,br),Io=c(br),ca=n(br,"P",{});var Ei=o(ca);ko=i(Ei,"An enumeration."),Ei.forEach(a),br.forEach(a),qa=c(e),U=n(e,"DIV",{class:!0});var $r=o(U);f(Be.$$.fragment,$r),No=c($r),ma=n($r,"P",{});var xi=o(ma);Co=i(xi,"Unified API to get any scheduler from its name."),xi.forEach(a),$r.forEach(a),Ua=c(e),G=n(e,"DIV",{class:!0});var Ar=o(G);f(Je.$$.fragment,Ar),Fo=c(Ar),pa=n(Ar,"P",{});var Ti=o(pa);Oo=i(Ti,"Create a schedule with a constant learning rate, using the learning rate set in optimizer."),Ti.forEach(a),Ar.forEach(a),Ga=c(e),V=n(e,"DIV",{class:!0});var zr=o(V);f(Ke.$$.fragment,zr),Ro=c(zr),da=n(zr,"P",{});var Di=o(da);jo=i(Di,`Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.`),Di.forEach(a),zr.forEach(a),Va=c(e),Qe=n(e,"IMG",{alt:!0,src:!0}),Ma=c(e),M=n(e,"DIV",{class:!0});var Er=o(M);f(Xe.$$.fragment,Er),qo=c(Er),ha=n(Er,"P",{});var Li=o(ha);Uo=i(Li,`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Li.forEach(a),Er.forEach(a),Ha=c(e),Ye=n(e,"IMG",{alt:!0,src:!0}),Ba=c(e),H=n(e,"DIV",{class:!0});var xr=o(H);f(Ze.$$.fragment,xr),Go=c(xr),ua=n(xr,"P",{});var Pi=o(ua);Vo=i(Pi,`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Pi.forEach(a),xr.forEach(a),Ja=c(e),et=n(e,"IMG",{alt:!0,src:!0}),Ka=c(e),B=n(e,"DIV",{class:!0});var Tr=o(B);f(tt.$$.fragment,Tr),Mo=c(Tr),fa=n(Tr,"P",{});var Wi=o(fa);Ho=i(Wi,`Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Wi.forEach(a),Tr.forEach(a),Qa=c(e),at=n(e,"IMG",{alt:!0,src:!0}),Xa=c(e),P=n(e,"DIV",{class:!0});var wt=o(P);f(rt.$$.fragment,wt),Bo=c(wt),nt=n(wt,"P",{});var Dr=o(nt);Jo=i(Dr,`Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `),ga=n(Dr,"EM",{});var Si=o(ga);Ko=i(Si,"lr_end"),Si.forEach(a),Qo=i(Dr,`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Dr.forEach(a),Xo=c(wt),ce=n(wt,"P",{});var Da=o(ce);Yo=i(Da,"Note: "),_a=n(Da,"EM",{});var Ii=o(_a);Zo=i(Ii,"power"),Ii.forEach(a),es=i(Da,` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at `),ot=n(Da,"A",{href:!0,rel:!0});var ki=o(ot);ts=i(ki,"https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),ki.forEach(a),Da.forEach(a),wt.forEach(a),Ya=c(e),J=n(e,"H3",{class:!0});var Lr=o(J);me=n(Lr,"A",{id:!0,class:!0,href:!0});var Ni=o(me);wa=n(Ni,"SPAN",{});var Ci=o(wa);f(st.$$.fragment,Ci),Ci.forEach(a),Ni.forEach(a),as=c(Lr),va=n(Lr,"SPAN",{});var Fi=o(va);rs=i(Fi,"Warmup (TensorFlow)"),Fi.forEach(a),Lr.forEach(a),Za=c(e),K=n(e,"DIV",{class:!0});var Pr=o(K);f(it.$$.fragment,Pr),ns=c(Pr),ya=n(Pr,"P",{});var Oi=o(ya);os=i(Oi,"Applies a warmup schedule on a given learning rate decay schedule."),Oi.forEach(a),Pr.forEach(a),er=c(e),Q=n(e,"H2",{class:!0});var Wr=o(Q);pe=n(Wr,"A",{id:!0,class:!0,href:!0});var Ri=o(pe);ba=n(Ri,"SPAN",{});var ji=o(ba);f(lt.$$.fragment,ji),ji.forEach(a),Ri.forEach(a),ss=c(Wr),$a=n(Wr,"SPAN",{});var qi=o($a);is=i(qi,"Gradient Strategies"),qi.forEach(a),Wr.forEach(a),tr=c(e),X=n(e,"H3",{class:!0});var Sr=o(X);de=n(Sr,"A",{id:!0,class:!0,href:!0});var Ui=o(de);Aa=n(Ui,"SPAN",{});var Gi=o(Aa);f(ct.$$.fragment,Gi),Gi.forEach(a),Ui.forEach(a),ls=c(Sr),za=n(Sr,"SPAN",{});var Vi=o(za);cs=i(Vi,"GradientAccumulator (TensorFlow)"),Vi.forEach(a),Sr.forEach(a),ar=c(e),W=n(e,"DIV",{class:!0});var vt=o(W);f(mt.$$.fragment,vt),ms=c(vt),Y=n(vt,"P",{});var yt=o(Y);ps=i(yt,`Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call `),Ea=n(yt,"CODE",{});var Mi=o(Ea);ds=i(Mi,".gradients"),Mi.forEach(a),hs=i(yt,", scale the gradients if required, and pass the result to "),xa=n(yt,"CODE",{});var Hi=o(xa);us=i(Hi,"apply_gradients"),Hi.forEach(a),fs=i(yt,"."),yt.forEach(a),gs=c(vt),he=n(vt,"DIV",{class:!0});var Ir=o(he);f(pt.$$.fragment,Ir),_s=c(Ir),Ta=n(Ir,"P",{});var Bi=o(Ta);ws=i(Bi,"Resets the accumulated gradients on the current replica."),Bi.forEach(a),Ir.forEach(a),vt.forEach(a),this.h()},h(){m(S,"name","hf:doc:metadata"),m(S,"content",JSON.stringify(el)),m(x,"id","optimization"),m(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(x,"href","#optimization"),m(E,"class","relative group"),m(te,"id","transformers.AdamW"),m(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(te,"href","#transformers.AdamW"),m(k,"class","relative group"),m($e,"href","https://arxiv.org/abs/1711.05101"),m($e,"rel","nofollow"),m(ae,"class","docstring"),m(T,"class","docstring"),m(re,"id","transformers.Adafactor"),m(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(re,"href","#transformers.Adafactor"),m(N,"class","relative group"),m(xe,"href","https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),m(xe,"rel","nofollow"),m(Te,"href","https://arxiv.org/abs/1804.04235"),m(Te,"rel","nofollow"),m(Le,"href","https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),m(Le,"rel","nofollow"),m(Ie,"href","https://arxiv.org/abs/1804.04235"),m(Ie,"rel","nofollow"),m(ut,"href","/docs/transformers/v4.15.0/en/main_classes/trainer#transformers.Trainer"),m(ne,"class","docstring"),m(h,"class","docstring"),m(oe,"id","transformers.AdamWeightDecay"),m(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(oe,"href","#transformers.AdamWeightDecay"),m(C,"class","relative group"),m(qe,"href","https://arxiv.org/abs/1711.05101"),m(qe,"rel","nofollow"),m(se,"class","docstring"),m(z,"class","docstring"),m(O,"class","docstring"),m(ie,"id","schedules"),m(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ie,"href","#schedules"),m(R,"class","relative group"),m(le,"id","transformers.SchedulerType"),m(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(le,"href","#transformers.SchedulerType"),m(j,"class","relative group"),m(q,"class","docstring"),m(U,"class","docstring"),m(G,"class","docstring"),m(V,"class","docstring"),m(Qe,"alt",""),kr(Qe.src,ys="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png")||m(Qe,"src",ys),m(M,"class","docstring"),m(Ye,"alt",""),kr(Ye.src,bs="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png")||m(Ye,"src",bs),m(H,"class","docstring"),m(et,"alt",""),kr(et.src,$s="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png")||m(et,"src",$s),m(B,"class","docstring"),m(at,"alt",""),kr(at.src,As="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png")||m(at,"src",As),m(ot,"href","https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),m(ot,"rel","nofollow"),m(P,"class","docstring"),m(me,"id","transformers.WarmUp"),m(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(me,"href","#transformers.WarmUp"),m(J,"class","relative group"),m(K,"class","docstring"),m(pe,"id","gradient-strategies"),m(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(pe,"href","#gradient-strategies"),m(Q,"class","relative group"),m(de,"id","transformers.GradientAccumulator"),m(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(de,"href","#transformers.GradientAccumulator"),m(X,"class","relative group"),m(he,"class","docstring"),m(W,"class","docstring")},m(e,p){t(document.head,S),d(e,dt,p),d(e,E,p),t(E,x),t(x,bt),g(_e,bt,null),t(E,Fr),t(E,$t),t($t,Or),d(e,La,p),d(e,ee,p),t(ee,Rr),t(ee,At),t(At,jr),t(ee,qr),d(e,Pa,p),d(e,I,p),t(I,zt),t(zt,Ur),t(I,Gr),t(I,we),t(we,Vr),t(we,Et),t(Et,Mr),t(we,Hr),t(I,Br),t(I,xt),t(xt,Jr),d(e,Wa,p),d(e,k,p),t(k,te),t(te,Tt),g(ve,Tt,null),t(k,Kr),t(k,Dt),t(Dt,Qr),d(e,Sa,p),d(e,T,p),g(ye,T,null),t(T,Xr),t(T,be),t(be,Yr),t(be,$e),t($e,Zr),t(be,en),t(T,tn),t(T,ae),g(Ae,ae,null),t(ae,an),t(ae,Lt),t(Lt,rn),d(e,Ia,p),d(e,N,p),t(N,re),t(re,Pt),g(ze,Pt,null),t(N,nn),t(N,Wt),t(Wt,on),d(e,ka,p),d(e,h,p),g(Ee,h,null),t(h,sn),t(h,ht),t(ht,ln),t(ht,xe),t(xe,cn),t(h,mn),t(h,b),t(b,pn),t(b,St),t(St,dn),t(b,hn),t(b,Te),t(Te,un),t(b,fn),t(b,It),t(It,gn),t(b,_n),t(b,kt),t(kt,wn),t(b,vn),t(b,Nt),t(Nt,yn),t(b,bn),t(b,Ct),t(Ct,$n),t(b,An),t(b,Ft),t(Ft,zn),t(b,En),t(h,xn),t(h,Ot),t(Ot,Tn),t(h,Dn),t(h,De),t(De,Ln),t(De,Le),t(Le,Pn),t(De,Wn),t(h,Sn),t(h,D),t(D,Pe),t(Pe,Rt),t(Rt,In),t(Pe,kn),t(Pe,We),t(We,jt),t(jt,Nn),t(We,Cn),t(We,Se),t(Se,Fn),t(Se,Ie),t(Ie,On),t(Se,Rn),t(D,jn),t(D,qt),t(qt,Ut),t(Ut,qn),t(D,Un),t(D,Gt),t(Gt,Vt),t(Vt,Gn),t(D,Vn),t(D,Mt),t(Mt,Ht),t(Ht,Mn),t(h,Hn),t(h,Bt),t(Bt,Bn),t(h,Jn),g(ke,h,null),t(h,Kn),t(h,Jt),t(Jt,Qn),t(h,Xn),g(Ne,h,null),t(h,Yn),t(h,L),t(L,Zn),t(L,Kt),t(Kt,eo),t(L,to),t(L,ut),t(ut,ao),t(L,ro),t(L,Qt),t(Qt,no),t(L,oo),t(h,so),g(Ce,h,null),t(h,io),t(h,Xt),t(Xt,lo),t(h,co),g(Fe,h,null),t(h,mo),t(h,ne),g(Oe,ne,null),t(ne,po),t(ne,Yt),t(Yt,ho),d(e,Na,p),d(e,C,p),t(C,oe),t(oe,Zt),g(Re,Zt,null),t(C,uo),t(C,ea),t(ea,fo),d(e,Ca,p),d(e,z,p),g(je,z,null),t(z,go),t(z,F),t(F,_o),t(F,ta),t(ta,wo),t(F,vo),t(F,qe),t(qe,yo),t(F,bo),t(z,$o),t(z,aa),t(aa,Ao),t(z,zo),t(z,se),g(Ue,se,null),t(se,Eo),t(se,ra),t(ra,xo),d(e,Fa,p),d(e,O,p),g(Ge,O,null),t(O,To),t(O,na),t(na,Do),d(e,Oa,p),d(e,R,p),t(R,ie),t(ie,oa),g(Ve,oa,null),t(R,Lo),t(R,sa),t(sa,Po),d(e,Ra,p),d(e,j,p),t(j,le),t(le,ia),g(Me,ia,null),t(j,Wo),t(j,la),t(la,So),d(e,ja,p),d(e,q,p),g(He,q,null),t(q,Io),t(q,ca),t(ca,ko),d(e,qa,p),d(e,U,p),g(Be,U,null),t(U,No),t(U,ma),t(ma,Co),d(e,Ua,p),d(e,G,p),g(Je,G,null),t(G,Fo),t(G,pa),t(pa,Oo),d(e,Ga,p),d(e,V,p),g(Ke,V,null),t(V,Ro),t(V,da),t(da,jo),d(e,Va,p),d(e,Qe,p),d(e,Ma,p),d(e,M,p),g(Xe,M,null),t(M,qo),t(M,ha),t(ha,Uo),d(e,Ha,p),d(e,Ye,p),d(e,Ba,p),d(e,H,p),g(Ze,H,null),t(H,Go),t(H,ua),t(ua,Vo),d(e,Ja,p),d(e,et,p),d(e,Ka,p),d(e,B,p),g(tt,B,null),t(B,Mo),t(B,fa),t(fa,Ho),d(e,Qa,p),d(e,at,p),d(e,Xa,p),d(e,P,p),g(rt,P,null),t(P,Bo),t(P,nt),t(nt,Jo),t(nt,ga),t(ga,Ko),t(nt,Qo),t(P,Xo),t(P,ce),t(ce,Yo),t(ce,_a),t(_a,Zo),t(ce,es),t(ce,ot),t(ot,ts),d(e,Ya,p),d(e,J,p),t(J,me),t(me,wa),g(st,wa,null),t(J,as),t(J,va),t(va,rs),d(e,Za,p),d(e,K,p),g(it,K,null),t(K,ns),t(K,ya),t(ya,os),d(e,er,p),d(e,Q,p),t(Q,pe),t(pe,ba),g(lt,ba,null),t(Q,ss),t(Q,$a),t($a,is),d(e,tr,p),d(e,X,p),t(X,de),t(de,Aa),g(ct,Aa,null),t(X,ls),t(X,za),t(za,cs),d(e,ar,p),d(e,W,p),g(mt,W,null),t(W,ms),t(W,Y),t(Y,ps),t(Y,Ea),t(Ea,ds),t(Y,hs),t(Y,xa),t(xa,us),t(Y,fs),t(W,gs),t(W,he),g(pt,he,null),t(he,_s),t(he,Ta),t(Ta,ws),rr=!0},p:Yi,i(e){rr||(_(_e.$$.fragment,e),_(ve.$$.fragment,e),_(ye.$$.fragment,e),_(Ae.$$.fragment,e),_(ze.$$.fragment,e),_(Ee.$$.fragment,e),_(ke.$$.fragment,e),_(Ne.$$.fragment,e),_(Ce.$$.fragment,e),_(Fe.$$.fragment,e),_(Oe.$$.fragment,e),_(Re.$$.fragment,e),_(je.$$.fragment,e),_(Ue.$$.fragment,e),_(Ge.$$.fragment,e),_(Ve.$$.fragment,e),_(Me.$$.fragment,e),_(He.$$.fragment,e),_(Be.$$.fragment,e),_(Je.$$.fragment,e),_(Ke.$$.fragment,e),_(Xe.$$.fragment,e),_(Ze.$$.fragment,e),_(tt.$$.fragment,e),_(rt.$$.fragment,e),_(st.$$.fragment,e),_(it.$$.fragment,e),_(lt.$$.fragment,e),_(ct.$$.fragment,e),_(mt.$$.fragment,e),_(pt.$$.fragment,e),rr=!0)},o(e){w(_e.$$.fragment,e),w(ve.$$.fragment,e),w(ye.$$.fragment,e),w(Ae.$$.fragment,e),w(ze.$$.fragment,e),w(Ee.$$.fragment,e),w(ke.$$.fragment,e),w(Ne.$$.fragment,e),w(Ce.$$.fragment,e),w(Fe.$$.fragment,e),w(Oe.$$.fragment,e),w(Re.$$.fragment,e),w(je.$$.fragment,e),w(Ue.$$.fragment,e),w(Ge.$$.fragment,e),w(Ve.$$.fragment,e),w(Me.$$.fragment,e),w(He.$$.fragment,e),w(Be.$$.fragment,e),w(Je.$$.fragment,e),w(Ke.$$.fragment,e),w(Xe.$$.fragment,e),w(Ze.$$.fragment,e),w(tt.$$.fragment,e),w(rt.$$.fragment,e),w(st.$$.fragment,e),w(it.$$.fragment,e),w(lt.$$.fragment,e),w(ct.$$.fragment,e),w(mt.$$.fragment,e),w(pt.$$.fragment,e),rr=!1},d(e){a(S),e&&a(dt),e&&a(E),v(_e),e&&a(La),e&&a(ee),e&&a(Pa),e&&a(I),e&&a(Wa),e&&a(k),v(ve),e&&a(Sa),e&&a(T),v(ye),v(Ae),e&&a(Ia),e&&a(N),v(ze),e&&a(ka),e&&a(h),v(Ee),v(ke),v(Ne),v(Ce),v(Fe),v(Oe),e&&a(Na),e&&a(C),v(Re),e&&a(Ca),e&&a(z),v(je),v(Ue),e&&a(Fa),e&&a(O),v(Ge),e&&a(Oa),e&&a(R),v(Ve),e&&a(Ra),e&&a(j),v(Me),e&&a(ja),e&&a(q),v(He),e&&a(qa),e&&a(U),v(Be),e&&a(Ua),e&&a(G),v(Je),e&&a(Ga),e&&a(V),v(Ke),e&&a(Va),e&&a(Qe),e&&a(Ma),e&&a(M),v(Xe),e&&a(Ha),e&&a(Ye),e&&a(Ba),e&&a(H),v(Ze),e&&a(Ja),e&&a(et),e&&a(Ka),e&&a(B),v(tt),e&&a(Qa),e&&a(at),e&&a(Xa),e&&a(P),v(rt),e&&a(Ya),e&&a(J),v(st),e&&a(Za),e&&a(K),v(it),e&&a(er),e&&a(Q),v(lt),e&&a(tr),e&&a(X),v(ct),e&&a(ar),e&&a(W),v(mt),v(pt)}}}const el={local:"optimization",sections:[{local:"transformers.AdamW",title:"AdamW (PyTorch)"},{local:"transformers.Adafactor",title:"AdaFactor (PyTorch)"},{local:"transformers.AdamWeightDecay",title:"AdamWeightDecay (TensorFlow)"},{local:"schedules",sections:[{local:"transformers.SchedulerType",title:"Learning Rate Schedules (Pytorch)"},{local:"transformers.WarmUp",title:"Warmup (TensorFlow)"}],title:"Schedules"},{local:"gradient-strategies",sections:[{local:"transformers.GradientAccumulator",title:"GradientAccumulator (TensorFlow)"}],title:"Gradient Strategies"}],title:"Optimization"};function tl(Cr,S,dt){let{fw:E}=S;return Cr.$$set=x=>{"fw"in x&&dt(0,E=x.fw)},[E]}class il extends Ji{constructor(S){super();Ki(this,S,tl,Zi,Qi,{fw:0})}}export{il as default,el as metadata};
9,999